summaryrefslogtreecommitdiff
path: root/bdb
diff options
context:
space:
mode:
Diffstat (limited to 'bdb')
-rw-r--r--bdb/LICENSE102
-rw-r--r--bdb/btree/bt_compare.c211
-rw-r--r--bdb/btree/bt_conv.c98
-rw-r--r--bdb/btree/bt_curadj.c573
-rw-r--r--bdb/btree/bt_cursor.c2131
-rw-r--r--bdb/btree/bt_delete.c530
-rw-r--r--bdb/btree/bt_method.c387
-rw-r--r--bdb/btree/bt_open.c468
-rw-r--r--bdb/btree/bt_put.c859
-rw-r--r--bdb/btree/bt_rec.c1219
-rw-r--r--bdb/btree/bt_reclaim.c53
-rw-r--r--bdb/btree/bt_recno.c1369
-rw-r--r--bdb/btree/bt_rsearch.c429
-rw-r--r--bdb/btree/bt_search.c471
-rw-r--r--bdb/btree/bt_split.c1126
-rw-r--r--bdb/btree/bt_stat.c480
-rw-r--r--bdb/btree/bt_upgrade.c164
-rw-r--r--bdb/btree/bt_verify.c2237
-rw-r--r--bdb/btree/btree.src296
-rw-r--r--bdb/btree/btree_auto.c2284
-rw-r--r--bdb/build_unix/.IGNORE_ME3
-rw-r--r--bdb/build_vxworks/BerkeleyDB.wpj6066
-rw-r--r--bdb/build_vxworks/BerkeleyDB.wsp24
-rw-r--r--bdb/build_vxworks/db_config.h264
-rw-r--r--bdb/build_vxworks/db_int.h398
-rw-r--r--bdb/build_vxworks/ex_access/ex_access.wpj244
-rw-r--r--bdb/build_vxworks/ex_btrec/ex_btrec.wpj250
-rw-r--r--bdb/build_vxworks/ex_dbclient/ex_dbclient.wpj266
-rw-r--r--bdb/build_vxworks/ex_env/ex_env.wpj248
-rw-r--r--bdb/build_vxworks/ex_mpool/ex_mpool.wpj248
-rw-r--r--bdb/build_vxworks/ex_tpcb/ex_tpcb.wpj261
-rw-r--r--bdb/build_win32/Berkeley_DB.dsw569
-rw-r--r--bdb/build_win32/app_dsp.src148
-rw-r--r--bdb/build_win32/db_archive.dsp151
-rw-r--r--bdb/build_win32/db_buildall.dsp128
-rw-r--r--bdb/build_win32/db_checkpoint.dsp151
-rw-r--r--bdb/build_win32/db_config.h275
-rw-r--r--bdb/build_win32/db_deadlock.dsp151
-rw-r--r--bdb/build_win32/db_dll.dsp753
-rw-r--r--bdb/build_win32/db_dump.dsp151
-rw-r--r--bdb/build_win32/db_int.h398
-rw-r--r--bdb/build_win32/db_java.dsp174
-rw-r--r--bdb/build_win32/db_load.dsp151
-rw-r--r--bdb/build_win32/db_printlog.dsp151
-rw-r--r--bdb/build_win32/db_recover.dsp151
-rw-r--r--bdb/build_win32/db_stat.dsp151
-rw-r--r--bdb/build_win32/db_static.dsp714
-rw-r--r--bdb/build_win32/db_tcl.dsp135
-rw-r--r--bdb/build_win32/db_test.dsp99
-rw-r--r--bdb/build_win32/db_upgrade.dsp151
-rw-r--r--bdb/build_win32/db_verify.dsp151
-rw-r--r--bdb/build_win32/dbkill.cpp131
-rw-r--r--bdb/build_win32/dllmain.c97
-rw-r--r--bdb/build_win32/dynamic_dsp.src154
-rw-r--r--bdb/build_win32/ex_access.dsp151
-rw-r--r--bdb/build_win32/ex_btrec.dsp151
-rw-r--r--bdb/build_win32/ex_env.dsp151
-rw-r--r--bdb/build_win32/ex_lock.dsp151
-rw-r--r--bdb/build_win32/ex_mpool.dsp151
-rw-r--r--bdb/build_win32/ex_tpcb.dsp151
-rw-r--r--bdb/build_win32/excxx_access.dsp151
-rw-r--r--bdb/build_win32/excxx_btrec.dsp151
-rw-r--r--bdb/build_win32/excxx_env.dsp151
-rw-r--r--bdb/build_win32/excxx_lock.dsp151
-rw-r--r--bdb/build_win32/excxx_mpool.dsp151
-rw-r--r--bdb/build_win32/excxx_tpcb.dsp151
-rw-r--r--bdb/build_win32/include.tcl16
-rw-r--r--bdb/build_win32/java_dsp.src135
-rw-r--r--bdb/build_win32/libdb.def151
-rw-r--r--bdb/build_win32/libdb.rc33
-rw-r--r--bdb/build_win32/libdb_tcl.def35
-rw-r--r--bdb/build_win32/libdbrc.src33
-rw-r--r--bdb/build_win32/srcfile_dsp.src4
-rw-r--r--bdb/build_win32/static_dsp.src127
-rw-r--r--bdb/build_win32/tcl_dsp.src92
-rw-r--r--bdb/clib/getcwd.c272
-rw-r--r--bdb/clib/getopt.c139
-rw-r--r--bdb/clib/memcmp.c67
-rw-r--r--bdb/clib/memmove.c155
-rw-r--r--bdb/clib/raise.c32
-rw-r--r--bdb/clib/snprintf.c61
-rw-r--r--bdb/clib/strcasecmp.c102
-rw-r--r--bdb/clib/strerror.c77
-rw-r--r--bdb/clib/vsnprintf.c47
-rw-r--r--bdb/common/db_byteorder.c62
-rw-r--r--bdb/common/db_err.c544
-rw-r--r--bdb/common/db_getlong.c159
-rw-r--r--bdb/common/db_log2.c65
-rw-r--r--bdb/common/util_log.c63
-rw-r--r--bdb/common/util_sig.c87
-rw-r--r--bdb/cxx/cxx_app.cpp671
-rw-r--r--bdb/cxx/cxx_except.cpp132
-rw-r--r--bdb/cxx/cxx_lock.cpp125
-rw-r--r--bdb/cxx/cxx_log.cpp125
-rw-r--r--bdb/cxx/cxx_mpool.cpp180
-rw-r--r--bdb/cxx/cxx_table.cpp808
-rw-r--r--bdb/cxx/cxx_txn.cpp136
-rw-r--r--bdb/cxx/namemap.txt21
-rw-r--r--bdb/db/Design.fileop452
-rw-r--r--bdb/db/crdel.src103
-rw-r--r--bdb/db/crdel_auto.c900
-rw-r--r--bdb/db/crdel_rec.c646
-rw-r--r--bdb/db/db.c2325
-rw-r--r--bdb/db/db.src178
-rw-r--r--bdb/db/db_am.c511
-rw-r--r--bdb/db/db_auto.c1270
-rw-r--r--bdb/db/db_cam.c974
-rw-r--r--bdb/db/db_conv.c348
-rw-r--r--bdb/db/db_dispatch.c983
-rw-r--r--bdb/db/db_dup.c275
-rw-r--r--bdb/db/db_iface.c687
-rw-r--r--bdb/db/db_join.c730
-rw-r--r--bdb/db/db_meta.c309
-rw-r--r--bdb/db/db_method.c629
-rw-r--r--bdb/db/db_overflow.c681
-rw-r--r--bdb/db/db_pr.c1284
-rw-r--r--bdb/db/db_rec.c529
-rw-r--r--bdb/db/db_reclaim.c134
-rw-r--r--bdb/db/db_ret.c160
-rw-r--r--bdb/db/db_upg.c338
-rw-r--r--bdb/db/db_upg_opd.c353
-rw-r--r--bdb/db/db_vrfy.c2340
-rw-r--r--bdb/db/db_vrfyutil.c830
-rw-r--r--bdb/db185/db185.c593
-rw-r--r--bdb/db185/db185_int.h129
-rw-r--r--bdb/db_archive/db_archive.c164
-rw-r--r--bdb/db_checkpoint/db_checkpoint.c237
-rw-r--r--bdb/db_deadlock/db_deadlock.c222
-rw-r--r--bdb/db_dump/db_dump.c517
-rw-r--r--bdb/db_dump185/db_dump185.c353
-rw-r--r--bdb/db_load/db_load.c998
-rw-r--r--bdb/db_printlog/README25
-rw-r--r--bdb/db_printlog/commit.awk7
-rw-r--r--bdb/db_printlog/count.awk9
-rw-r--r--bdb/db_printlog/db_printlog.c200
-rw-r--r--bdb/db_printlog/dbname.awk79
-rw-r--r--bdb/db_printlog/fileid.awk37
-rw-r--r--bdb/db_printlog/pgno.awk47
-rw-r--r--bdb/db_printlog/range.awk27
-rw-r--r--bdb/db_printlog/rectype.awk27
-rw-r--r--bdb/db_printlog/status.awk26
-rw-r--r--bdb/db_printlog/txn.awk34
-rw-r--r--bdb/db_recover/db_recover.c288
-rw-r--r--bdb/db_stat/db_stat.c989
-rw-r--r--bdb/db_upgrade/db_upgrade.c173
-rw-r--r--bdb/db_verify/db_verify.c182
-rw-r--r--bdb/dbm/dbm.c489
-rw-r--r--bdb/dist/Makefile.in999
-rw-r--r--bdb/dist/RELEASE8
-rw-r--r--bdb/dist/acconfig.h108
-rw-r--r--bdb/dist/aclocal/mutex.m4395
-rw-r--r--bdb/dist/aclocal/options.m4121
-rw-r--r--bdb/dist/aclocal/programs.m448
-rw-r--r--bdb/dist/aclocal/tcl.m4126
-rw-r--r--bdb/dist/aclocal/types.m4139
-rwxr-xr-xbdb/dist/build/chk.def50
-rwxr-xr-xbdb/dist/build/chk.define55
-rwxr-xr-xbdb/dist/build/chk.offt19
-rwxr-xr-xbdb/dist/build/chk.srcfiles29
-rwxr-xr-xbdb/dist/build/chk.tags39
-rw-r--r--bdb/dist/build/script94
-rwxr-xr-xbdb/dist/config.guess1289
-rw-r--r--bdb/dist/config.hin231
-rwxr-xr-xbdb/dist/config.sub1328
-rw-r--r--bdb/dist/configure.in501
-rw-r--r--bdb/dist/gen_rec.awk475
-rw-r--r--bdb/dist/gen_rpc.awk1482
-rwxr-xr-xbdb/dist/install-sh251
-rw-r--r--bdb/dist/ltconfig3136
-rw-r--r--bdb/dist/ltmain.sh4029
-rw-r--r--bdb/dist/rec_ctemp62
-rw-r--r--bdb/dist/s_all16
-rwxr-xr-xbdb/dist/s_config37
-rwxr-xr-xbdb/dist/s_include33
-rwxr-xr-xbdb/dist/s_java31
-rwxr-xr-xbdb/dist/s_perm37
-rwxr-xr-xbdb/dist/s_readme18
-rwxr-xr-xbdb/dist/s_recover56
-rw-r--r--bdb/dist/s_rpc70
-rwxr-xr-xbdb/dist/s_symlink91
-rwxr-xr-xbdb/dist/s_tags47
-rwxr-xr-xbdb/dist/s_tcl53
-rw-r--r--bdb/dist/s_vxworks48
-rwxr-xr-xbdb/dist/s_win3267
-rw-r--r--bdb/dist/s_win32_dsp98
-rw-r--r--bdb/dist/srcfiles.in269
-rw-r--r--bdb/dist/template/db_server_proc1057
-rw-r--r--bdb/dist/template/gen_client_ret522
-rw-r--r--bdb/dist/template/rec_btree943
-rw-r--r--bdb/dist/template/rec_crdel385
-rw-r--r--bdb/dist/template/rec_db509
-rw-r--r--bdb/dist/template/rec_hash881
-rw-r--r--bdb/dist/template/rec_log137
-rw-r--r--bdb/dist/template/rec_qam509
-rw-r--r--bdb/dist/template/rec_txn509
-rw-r--r--bdb/docs/api_c/c_index.html172
-rw-r--r--bdb/docs/api_c/c_pindex.html530
-rw-r--r--bdb/docs/api_c/db_close.html119
-rw-r--r--bdb/docs/api_c/db_create.html107
-rw-r--r--bdb/docs/api_c/db_cursor.html103
-rw-r--r--bdb/docs/api_c/db_del.html101
-rw-r--r--bdb/docs/api_c/db_err.html93
-rw-r--r--bdb/docs/api_c/db_fd.html92
-rw-r--r--bdb/docs/api_c/db_get.html156
-rw-r--r--bdb/docs/api_c/db_get_byteswapped.html84
-rw-r--r--bdb/docs/api_c/db_get_type.html81
-rw-r--r--bdb/docs/api_c/db_join.html151
-rw-r--r--bdb/docs/api_c/db_key_range.html106
-rw-r--r--bdb/docs/api_c/db_lsn.html36
-rw-r--r--bdb/docs/api_c/db_open.html182
-rw-r--r--bdb/docs/api_c/db_put.html136
-rw-r--r--bdb/docs/api_c/db_remove.html108
-rw-r--r--bdb/docs/api_c/db_rename.html109
-rw-r--r--bdb/docs/api_c/db_set_append_recno.html66
-rw-r--r--bdb/docs/api_c/db_set_bt_compare.html105
-rw-r--r--bdb/docs/api_c/db_set_bt_minkey.html92
-rw-r--r--bdb/docs/api_c/db_set_bt_prefix.html106
-rw-r--r--bdb/docs/api_c/db_set_cachesize.html107
-rw-r--r--bdb/docs/api_c/db_set_dup_compare.html102
-rw-r--r--bdb/docs/api_c/db_set_errcall.html76
-rw-r--r--bdb/docs/api_c/db_set_errfile.html73
-rw-r--r--bdb/docs/api_c/db_set_errpfx.html62
-rw-r--r--bdb/docs/api_c/db_set_feedback.html95
-rw-r--r--bdb/docs/api_c/db_set_flags.html181
-rw-r--r--bdb/docs/api_c/db_set_h_ffactor.html93
-rw-r--r--bdb/docs/api_c/db_set_h_hash.html97
-rw-r--r--bdb/docs/api_c/db_set_h_nelem.html88
-rw-r--r--bdb/docs/api_c/db_set_lorder.html94
-rw-r--r--bdb/docs/api_c/db_set_malloc.html98
-rw-r--r--bdb/docs/api_c/db_set_pagesize.html90
-rw-r--r--bdb/docs/api_c/db_set_paniccall.html70
-rw-r--r--bdb/docs/api_c/db_set_q_extentsize.html90
-rw-r--r--bdb/docs/api_c/db_set_re_delim.html90
-rw-r--r--bdb/docs/api_c/db_set_re_len.html94
-rw-r--r--bdb/docs/api_c/db_set_re_pad.html88
-rw-r--r--bdb/docs/api_c/db_set_re_source.html130
-rw-r--r--bdb/docs/api_c/db_set_realloc.html99
-rw-r--r--bdb/docs/api_c/db_stat.html195
-rw-r--r--bdb/docs/api_c/db_sync.html98
-rw-r--r--bdb/docs/api_c/db_upgrade.html135
-rw-r--r--bdb/docs/api_c/db_verify.html150
-rw-r--r--bdb/docs/api_c/dbc_close.html64
-rw-r--r--bdb/docs/api_c/dbc_count.html55
-rw-r--r--bdb/docs/api_c/dbc_del.html68
-rw-r--r--bdb/docs/api_c/dbc_dup.html72
-rw-r--r--bdb/docs/api_c/dbc_get.html167
-rw-r--r--bdb/docs/api_c/dbc_put.html154
-rw-r--r--bdb/docs/api_c/dbm.html220
-rw-r--r--bdb/docs/api_c/dbt.html158
-rw-r--r--bdb/docs/api_c/env_close.html84
-rw-r--r--bdb/docs/api_c/env_create.html74
-rw-r--r--bdb/docs/api_c/env_open.html205
-rw-r--r--bdb/docs/api_c/env_remove.html125
-rw-r--r--bdb/docs/api_c/env_set_cachesize.html87
-rw-r--r--bdb/docs/api_c/env_set_data_dir.html77
-rw-r--r--bdb/docs/api_c/env_set_errcall.html73
-rw-r--r--bdb/docs/api_c/env_set_errfile.html70
-rw-r--r--bdb/docs/api_c/env_set_errpfx.html59
-rw-r--r--bdb/docs/api_c/env_set_feedback.html69
-rw-r--r--bdb/docs/api_c/env_set_flags.html84
-rw-r--r--bdb/docs/api_c/env_set_lg_bsize.html68
-rw-r--r--bdb/docs/api_c/env_set_lg_dir.html73
-rw-r--r--bdb/docs/api_c/env_set_lg_max.html68
-rw-r--r--bdb/docs/api_c/env_set_lk_conflicts.html69
-rw-r--r--bdb/docs/api_c/env_set_lk_detect.html72
-rw-r--r--bdb/docs/api_c/env_set_lk_max.html72
-rw-r--r--bdb/docs/api_c/env_set_lk_max_lockers.html68
-rw-r--r--bdb/docs/api_c/env_set_lk_max_locks.html67
-rw-r--r--bdb/docs/api_c/env_set_lk_max_objects.html68
-rw-r--r--bdb/docs/api_c/env_set_mp_mmapsize.html71
-rw-r--r--bdb/docs/api_c/env_set_mutexlocks.html59
-rw-r--r--bdb/docs/api_c/env_set_pageyield.html68
-rw-r--r--bdb/docs/api_c/env_set_paniccall.html67
-rw-r--r--bdb/docs/api_c/env_set_panicstate.html64
-rw-r--r--bdb/docs/api_c/env_set_rec_init.html71
-rw-r--r--bdb/docs/api_c/env_set_region_init.html77
-rw-r--r--bdb/docs/api_c/env_set_server.html77
-rw-r--r--bdb/docs/api_c/env_set_shm_key.html87
-rw-r--r--bdb/docs/api_c/env_set_tas_spins.html70
-rw-r--r--bdb/docs/api_c/env_set_tmp_dir.html89
-rw-r--r--bdb/docs/api_c/env_set_tx_max.html67
-rw-r--r--bdb/docs/api_c/env_set_tx_recover.html75
-rw-r--r--bdb/docs/api_c/env_set_tx_timestamp.html63
-rw-r--r--bdb/docs/api_c/env_set_verbose.html78
-rw-r--r--bdb/docs/api_c/env_strerror.html60
-rw-r--r--bdb/docs/api_c/env_version.html57
-rw-r--r--bdb/docs/api_c/hsearch.html107
-rw-r--r--bdb/docs/api_c/lock_detect.html73
-rw-r--r--bdb/docs/api_c/lock_get.html91
-rw-r--r--bdb/docs/api_c/lock_id.html57
-rw-r--r--bdb/docs/api_c/lock_put.html59
-rw-r--r--bdb/docs/api_c/lock_stat.html92
-rw-r--r--bdb/docs/api_c/lock_vec.html123
-rw-r--r--bdb/docs/api_c/log_archive.html102
-rw-r--r--bdb/docs/api_c/log_compare.html51
-rw-r--r--bdb/docs/api_c/log_file.html76
-rw-r--r--bdb/docs/api_c/log_flush.html62
-rw-r--r--bdb/docs/api_c/log_get.html114
-rw-r--r--bdb/docs/api_c/log_put.html81
-rw-r--r--bdb/docs/api_c/log_register.html64
-rw-r--r--bdb/docs/api_c/log_stat.html90
-rw-r--r--bdb/docs/api_c/log_unregister.html59
-rw-r--r--bdb/docs/api_c/memp_fclose.html61
-rw-r--r--bdb/docs/api_c/memp_fget.html98
-rw-r--r--bdb/docs/api_c/memp_fopen.html157
-rw-r--r--bdb/docs/api_c/memp_fput.html79
-rw-r--r--bdb/docs/api_c/memp_fset.html72
-rw-r--r--bdb/docs/api_c/memp_fsync.html59
-rw-r--r--bdb/docs/api_c/memp_register.html93
-rw-r--r--bdb/docs/api_c/memp_stat.html118
-rw-r--r--bdb/docs/api_c/memp_sync.html83
-rw-r--r--bdb/docs/api_c/memp_trickle.html66
-rw-r--r--bdb/docs/api_c/pindex.src301
-rw-r--r--bdb/docs/api_c/set_func_close.html66
-rw-r--r--bdb/docs/api_c/set_func_dirfree.html75
-rw-r--r--bdb/docs/api_c/set_func_dirlist.html78
-rw-r--r--bdb/docs/api_c/set_func_exists.html75
-rw-r--r--bdb/docs/api_c/set_func_free.html67
-rw-r--r--bdb/docs/api_c/set_func_fsync.html66
-rw-r--r--bdb/docs/api_c/set_func_ioinfo.html83
-rw-r--r--bdb/docs/api_c/set_func_malloc.html67
-rw-r--r--bdb/docs/api_c/set_func_map.html86
-rw-r--r--bdb/docs/api_c/set_func_open.html66
-rw-r--r--bdb/docs/api_c/set_func_read.html66
-rw-r--r--bdb/docs/api_c/set_func_realloc.html67
-rw-r--r--bdb/docs/api_c/set_func_rename.html66
-rw-r--r--bdb/docs/api_c/set_func_seek.html81
-rw-r--r--bdb/docs/api_c/set_func_sleep.html76
-rw-r--r--bdb/docs/api_c/set_func_unlink.html66
-rw-r--r--bdb/docs/api_c/set_func_unmap.html75
-rw-r--r--bdb/docs/api_c/set_func_write.html67
-rw-r--r--bdb/docs/api_c/set_func_yield.html84
-rw-r--r--bdb/docs/api_c/txn_abort.html63
-rw-r--r--bdb/docs/api_c/txn_begin.html93
-rw-r--r--bdb/docs/api_c/txn_checkpoint.html75
-rw-r--r--bdb/docs/api_c/txn_commit.html83
-rw-r--r--bdb/docs/api_c/txn_id.html50
-rw-r--r--bdb/docs/api_c/txn_prepare.html63
-rw-r--r--bdb/docs/api_c/txn_stat.html94
-rw-r--r--bdb/docs/api_cxx/cxx_index.html148
-rw-r--r--bdb/docs/api_cxx/cxx_pindex.html516
-rw-r--r--bdb/docs/api_cxx/db_class.html109
-rw-r--r--bdb/docs/api_cxx/db_close.html123
-rw-r--r--bdb/docs/api_cxx/db_cursor.html105
-rw-r--r--bdb/docs/api_cxx/db_del.html104
-rw-r--r--bdb/docs/api_cxx/db_err.html94
-rw-r--r--bdb/docs/api_cxx/db_fd.html95
-rw-r--r--bdb/docs/api_cxx/db_get.html158
-rw-r--r--bdb/docs/api_cxx/db_get_byteswapped.html85
-rw-r--r--bdb/docs/api_cxx/db_get_type.html82
-rw-r--r--bdb/docs/api_cxx/db_join.html153
-rw-r--r--bdb/docs/api_cxx/db_key_range.html109
-rw-r--r--bdb/docs/api_cxx/db_open.html185
-rw-r--r--bdb/docs/api_cxx/db_put.html138
-rw-r--r--bdb/docs/api_cxx/db_remove.html110
-rw-r--r--bdb/docs/api_cxx/db_rename.html112
-rw-r--r--bdb/docs/api_cxx/db_set_append_recno.html69
-rw-r--r--bdb/docs/api_cxx/db_set_bt_compare.html109
-rw-r--r--bdb/docs/api_cxx/db_set_bt_minkey.html94
-rw-r--r--bdb/docs/api_cxx/db_set_bt_prefix.html110
-rw-r--r--bdb/docs/api_cxx/db_set_cachesize.html108
-rw-r--r--bdb/docs/api_cxx/db_set_dup_compare.html106
-rw-r--r--bdb/docs/api_cxx/db_set_errcall.html79
-rw-r--r--bdb/docs/api_cxx/db_set_errfile.html80
-rw-r--r--bdb/docs/api_cxx/db_set_errpfx.html63
-rw-r--r--bdb/docs/api_cxx/db_set_feedback.html97
-rw-r--r--bdb/docs/api_cxx/db_set_flags.html183
-rw-r--r--bdb/docs/api_cxx/db_set_h_ffactor.html95
-rw-r--r--bdb/docs/api_cxx/db_set_h_hash.html102
-rw-r--r--bdb/docs/api_cxx/db_set_h_nelem.html90
-rw-r--r--bdb/docs/api_cxx/db_set_lorder.html96
-rw-r--r--bdb/docs/api_cxx/db_set_malloc.html103
-rw-r--r--bdb/docs/api_cxx/db_set_pagesize.html92
-rw-r--r--bdb/docs/api_cxx/db_set_paniccall.html76
-rw-r--r--bdb/docs/api_cxx/db_set_q_extentsize.html92
-rw-r--r--bdb/docs/api_cxx/db_set_re_delim.html92
-rw-r--r--bdb/docs/api_cxx/db_set_re_len.html96
-rw-r--r--bdb/docs/api_cxx/db_set_re_pad.html90
-rw-r--r--bdb/docs/api_cxx/db_set_re_source.html132
-rw-r--r--bdb/docs/api_cxx/db_set_realloc.html103
-rw-r--r--bdb/docs/api_cxx/db_stat.html201
-rw-r--r--bdb/docs/api_cxx/db_sync.html101
-rw-r--r--bdb/docs/api_cxx/db_upgrade.html135
-rw-r--r--bdb/docs/api_cxx/db_verify.html150
-rw-r--r--bdb/docs/api_cxx/dbc_class.html49
-rw-r--r--bdb/docs/api_cxx/dbc_close.html68
-rw-r--r--bdb/docs/api_cxx/dbc_count.html59
-rw-r--r--bdb/docs/api_cxx/dbc_del.html72
-rw-r--r--bdb/docs/api_cxx/dbc_dup.html76
-rw-r--r--bdb/docs/api_cxx/dbc_get.html170
-rw-r--r--bdb/docs/api_cxx/dbc_put.html158
-rw-r--r--bdb/docs/api_cxx/dbenv_class.html76
-rw-r--r--bdb/docs/api_cxx/dbt_class.html230
-rw-r--r--bdb/docs/api_cxx/env_close.html87
-rw-r--r--bdb/docs/api_cxx/env_open.html209
-rw-r--r--bdb/docs/api_cxx/env_remove.html129
-rw-r--r--bdb/docs/api_cxx/env_set_cachesize.html89
-rw-r--r--bdb/docs/api_cxx/env_set_data_dir.html80
-rw-r--r--bdb/docs/api_cxx/env_set_errcall.html76
-rw-r--r--bdb/docs/api_cxx/env_set_errfile.html77
-rw-r--r--bdb/docs/api_cxx/env_set_error_stream.html74
-rw-r--r--bdb/docs/api_cxx/env_set_errpfx.html60
-rw-r--r--bdb/docs/api_cxx/env_set_feedback.html72
-rw-r--r--bdb/docs/api_cxx/env_set_flags.html87
-rw-r--r--bdb/docs/api_cxx/env_set_lg_bsize.html71
-rw-r--r--bdb/docs/api_cxx/env_set_lg_dir.html76
-rw-r--r--bdb/docs/api_cxx/env_set_lg_max.html71
-rw-r--r--bdb/docs/api_cxx/env_set_lk_conflicts.html71
-rw-r--r--bdb/docs/api_cxx/env_set_lk_detect.html75
-rw-r--r--bdb/docs/api_cxx/env_set_lk_max.html75
-rw-r--r--bdb/docs/api_cxx/env_set_lk_max_lockers.html71
-rw-r--r--bdb/docs/api_cxx/env_set_lk_max_locks.html70
-rw-r--r--bdb/docs/api_cxx/env_set_lk_max_objects.html71
-rw-r--r--bdb/docs/api_cxx/env_set_mp_mmapsize.html74
-rw-r--r--bdb/docs/api_cxx/env_set_mutexlocks.html62
-rw-r--r--bdb/docs/api_cxx/env_set_pageyield.html71
-rw-r--r--bdb/docs/api_cxx/env_set_paniccall.html72
-rw-r--r--bdb/docs/api_cxx/env_set_panicstate.html67
-rw-r--r--bdb/docs/api_cxx/env_set_rec_init.html73
-rw-r--r--bdb/docs/api_cxx/env_set_region_init.html80
-rw-r--r--bdb/docs/api_cxx/env_set_server.html80
-rw-r--r--bdb/docs/api_cxx/env_set_shm_key.html90
-rw-r--r--bdb/docs/api_cxx/env_set_tas_spins.html73
-rw-r--r--bdb/docs/api_cxx/env_set_tmp_dir.html92
-rw-r--r--bdb/docs/api_cxx/env_set_tx_max.html70
-rw-r--r--bdb/docs/api_cxx/env_set_tx_recover.html77
-rw-r--r--bdb/docs/api_cxx/env_set_tx_timestamp.html66
-rw-r--r--bdb/docs/api_cxx/env_set_verbose.html81
-rw-r--r--bdb/docs/api_cxx/env_strerror.html62
-rw-r--r--bdb/docs/api_cxx/env_version.html59
-rw-r--r--bdb/docs/api_cxx/except_class.html64
-rw-r--r--bdb/docs/api_cxx/get_errno.html43
-rw-r--r--bdb/docs/api_cxx/lock_class.html61
-rw-r--r--bdb/docs/api_cxx/lock_detect.html73
-rw-r--r--bdb/docs/api_cxx/lock_get.html94
-rw-r--r--bdb/docs/api_cxx/lock_id.html61
-rw-r--r--bdb/docs/api_cxx/lock_put.html63
-rw-r--r--bdb/docs/api_cxx/lock_stat.html98
-rw-r--r--bdb/docs/api_cxx/lock_vec.html127
-rw-r--r--bdb/docs/api_cxx/log_archive.html106
-rw-r--r--bdb/docs/api_cxx/log_compare.html53
-rw-r--r--bdb/docs/api_cxx/log_file.html79
-rw-r--r--bdb/docs/api_cxx/log_flush.html66
-rw-r--r--bdb/docs/api_cxx/log_get.html118
-rw-r--r--bdb/docs/api_cxx/log_put.html84
-rw-r--r--bdb/docs/api_cxx/log_register.html68
-rw-r--r--bdb/docs/api_cxx/log_stat.html96
-rw-r--r--bdb/docs/api_cxx/log_unregister.html63
-rw-r--r--bdb/docs/api_cxx/lsn_class.html38
-rw-r--r--bdb/docs/api_cxx/memp_fclose.html65
-rw-r--r--bdb/docs/api_cxx/memp_fget.html101
-rw-r--r--bdb/docs/api_cxx/memp_fopen.html160
-rw-r--r--bdb/docs/api_cxx/memp_fput.html83
-rw-r--r--bdb/docs/api_cxx/memp_fset.html76
-rw-r--r--bdb/docs/api_cxx/memp_fsync.html63
-rw-r--r--bdb/docs/api_cxx/memp_register.html102
-rw-r--r--bdb/docs/api_cxx/memp_stat.html125
-rw-r--r--bdb/docs/api_cxx/memp_sync.html87
-rw-r--r--bdb/docs/api_cxx/memp_trickle.html70
-rw-r--r--bdb/docs/api_cxx/mempfile_class.html62
-rw-r--r--bdb/docs/api_cxx/pindex.src287
-rw-r--r--bdb/docs/api_cxx/txn_abort.html67
-rw-r--r--bdb/docs/api_cxx/txn_begin.html96
-rw-r--r--bdb/docs/api_cxx/txn_checkpoint.html75
-rw-r--r--bdb/docs/api_cxx/txn_class.html59
-rw-r--r--bdb/docs/api_cxx/txn_commit.html87
-rw-r--r--bdb/docs/api_cxx/txn_id.html52
-rw-r--r--bdb/docs/api_cxx/txn_prepare.html67
-rw-r--r--bdb/docs/api_cxx/txn_stat.html100
-rw-r--r--bdb/docs/api_cxx/what.html43
-rw-r--r--bdb/docs/api_java/db_class.html92
-rw-r--r--bdb/docs/api_java/db_close.html113
-rw-r--r--bdb/docs/api_java/db_cursor.html94
-rw-r--r--bdb/docs/api_java/db_del.html94
-rw-r--r--bdb/docs/api_java/db_fd.html79
-rw-r--r--bdb/docs/api_java/db_get.html149
-rw-r--r--bdb/docs/api_java/db_get_byteswapped.html75
-rw-r--r--bdb/docs/api_java/db_get_type.html72
-rw-r--r--bdb/docs/api_java/db_join.html142
-rw-r--r--bdb/docs/api_java/db_key_range.html99
-rw-r--r--bdb/docs/api_java/db_open.html179
-rw-r--r--bdb/docs/api_java/db_put.html128
-rw-r--r--bdb/docs/api_java/db_remove.html104
-rw-r--r--bdb/docs/api_java/db_rename.html105
-rw-r--r--bdb/docs/api_java/db_set_append_recno.html75
-rw-r--r--bdb/docs/api_java/db_set_bt_compare.html105
-rw-r--r--bdb/docs/api_java/db_set_bt_minkey.html85
-rw-r--r--bdb/docs/api_java/db_set_bt_prefix.html106
-rw-r--r--bdb/docs/api_java/db_set_cachesize.html99
-rw-r--r--bdb/docs/api_java/db_set_dup_compare.html102
-rw-r--r--bdb/docs/api_java/db_set_errcall.html81
-rw-r--r--bdb/docs/api_java/db_set_errpfx.html55
-rw-r--r--bdb/docs/api_java/db_set_feedback.html95
-rw-r--r--bdb/docs/api_java/db_set_flags.html170
-rw-r--r--bdb/docs/api_java/db_set_h_ffactor.html86
-rw-r--r--bdb/docs/api_java/db_set_h_hash.html97
-rw-r--r--bdb/docs/api_java/db_set_h_nelem.html81
-rw-r--r--bdb/docs/api_java/db_set_lorder.html87
-rw-r--r--bdb/docs/api_java/db_set_pagesize.html83
-rw-r--r--bdb/docs/api_java/db_set_q_extentsize.html83
-rw-r--r--bdb/docs/api_java/db_set_re_delim.html83
-rw-r--r--bdb/docs/api_java/db_set_re_len.html87
-rw-r--r--bdb/docs/api_java/db_set_re_pad.html81
-rw-r--r--bdb/docs/api_java/db_set_re_source.html123
-rw-r--r--bdb/docs/api_java/db_stat.html185
-rw-r--r--bdb/docs/api_java/db_sync.html91
-rw-r--r--bdb/docs/api_java/db_upgrade.html125
-rw-r--r--bdb/docs/api_java/db_verify.html140
-rw-r--r--bdb/docs/api_java/dbc_class.html49
-rw-r--r--bdb/docs/api_java/dbc_close.html67
-rw-r--r--bdb/docs/api_java/dbc_count.html58
-rw-r--r--bdb/docs/api_java/dbc_del.html71
-rw-r--r--bdb/docs/api_java/dbc_dup.html75
-rw-r--r--bdb/docs/api_java/dbc_get.html168
-rw-r--r--bdb/docs/api_java/dbc_put.html157
-rw-r--r--bdb/docs/api_java/dbenv_class.html65
-rw-r--r--bdb/docs/api_java/dbt_class.html227
-rw-r--r--bdb/docs/api_java/deadlock_class.html47
-rw-r--r--bdb/docs/api_java/env_close.html82
-rw-r--r--bdb/docs/api_java/env_open.html212
-rw-r--r--bdb/docs/api_java/env_remove.html129
-rw-r--r--bdb/docs/api_java/env_set_cachesize.html86
-rw-r--r--bdb/docs/api_java/env_set_data_dir.html77
-rw-r--r--bdb/docs/api_java/env_set_errcall.html78
-rw-r--r--bdb/docs/api_java/env_set_error_stream.html69
-rw-r--r--bdb/docs/api_java/env_set_errpfx.html52
-rw-r--r--bdb/docs/api_java/env_set_feedback.html76
-rw-r--r--bdb/docs/api_java/env_set_flags.html84
-rw-r--r--bdb/docs/api_java/env_set_lg_bsize.html71
-rw-r--r--bdb/docs/api_java/env_set_lg_dir.html73
-rw-r--r--bdb/docs/api_java/env_set_lg_max.html71
-rw-r--r--bdb/docs/api_java/env_set_lk_conflicts.html68
-rw-r--r--bdb/docs/api_java/env_set_lk_detect.html74
-rw-r--r--bdb/docs/api_java/env_set_lk_max.html74
-rw-r--r--bdb/docs/api_java/env_set_lk_max_lockers.html70
-rw-r--r--bdb/docs/api_java/env_set_lk_max_locks.html69
-rw-r--r--bdb/docs/api_java/env_set_lk_max_objects.html70
-rw-r--r--bdb/docs/api_java/env_set_mp_mmapsize.html66
-rw-r--r--bdb/docs/api_java/env_set_mutexlocks.html59
-rw-r--r--bdb/docs/api_java/env_set_pageyield.html69
-rw-r--r--bdb/docs/api_java/env_set_panicstate.html65
-rw-r--r--bdb/docs/api_java/env_set_rec_init.html78
-rw-r--r--bdb/docs/api_java/env_set_region_init.html78
-rw-r--r--bdb/docs/api_java/env_set_server.html77
-rw-r--r--bdb/docs/api_java/env_set_shm_key.html87
-rw-r--r--bdb/docs/api_java/env_set_tas_spins.html71
-rw-r--r--bdb/docs/api_java/env_set_tmp_dir.html89
-rw-r--r--bdb/docs/api_java/env_set_tx_max.html69
-rw-r--r--bdb/docs/api_java/env_set_tx_recover.html84
-rw-r--r--bdb/docs/api_java/env_set_tx_timestamp.html64
-rw-r--r--bdb/docs/api_java/env_set_verbose.html77
-rw-r--r--bdb/docs/api_java/env_strerror.html58
-rw-r--r--bdb/docs/api_java/env_version.html58
-rw-r--r--bdb/docs/api_java/except_class.html52
-rw-r--r--bdb/docs/api_java/get_errno.html46
-rw-r--r--bdb/docs/api_java/java_index.html131
-rw-r--r--bdb/docs/api_java/java_pindex.html478
-rw-r--r--bdb/docs/api_java/lock_class.html54
-rw-r--r--bdb/docs/api_java/lock_detect.html69
-rw-r--r--bdb/docs/api_java/lock_get.html92
-rw-r--r--bdb/docs/api_java/lock_id.html59
-rw-r--r--bdb/docs/api_java/lock_put.html61
-rw-r--r--bdb/docs/api_java/lock_stat.html94
-rw-r--r--bdb/docs/api_java/lock_vec.html33
-rw-r--r--bdb/docs/api_java/log_archive.html92
-rw-r--r--bdb/docs/api_java/log_compare.html52
-rw-r--r--bdb/docs/api_java/log_file.html77
-rw-r--r--bdb/docs/api_java/log_flush.html65
-rw-r--r--bdb/docs/api_java/log_get.html117
-rw-r--r--bdb/docs/api_java/log_put.html83
-rw-r--r--bdb/docs/api_java/log_register.html67
-rw-r--r--bdb/docs/api_java/log_stat.html93
-rw-r--r--bdb/docs/api_java/log_unregister.html62
-rw-r--r--bdb/docs/api_java/lsn_class.html37
-rw-r--r--bdb/docs/api_java/mem_class.html48
-rw-r--r--bdb/docs/api_java/memp_fclose.html33
-rw-r--r--bdb/docs/api_java/memp_fget.html33
-rw-r--r--bdb/docs/api_java/memp_fopen.html33
-rw-r--r--bdb/docs/api_java/memp_fput.html33
-rw-r--r--bdb/docs/api_java/memp_fset.html33
-rw-r--r--bdb/docs/api_java/memp_fsync.html33
-rw-r--r--bdb/docs/api_java/memp_register.html33
-rw-r--r--bdb/docs/api_java/memp_stat.html102
-rw-r--r--bdb/docs/api_java/memp_sync.html33
-rw-r--r--bdb/docs/api_java/memp_trickle.html60
-rw-r--r--bdb/docs/api_java/pindex.src249
-rw-r--r--bdb/docs/api_java/runrec_class.html50
-rw-r--r--bdb/docs/api_java/txn_abort.html65
-rw-r--r--bdb/docs/api_java/txn_begin.html93
-rw-r--r--bdb/docs/api_java/txn_checkpoint.html74
-rw-r--r--bdb/docs/api_java/txn_class.html58
-rw-r--r--bdb/docs/api_java/txn_commit.html85
-rw-r--r--bdb/docs/api_java/txn_id.html51
-rw-r--r--bdb/docs/api_java/txn_prepare.html65
-rw-r--r--bdb/docs/api_java/txn_stat.html95
-rw-r--r--bdb/docs/api_tcl/db_close.html59
-rw-r--r--bdb/docs/api_tcl/db_count.html38
-rw-r--r--bdb/docs/api_tcl/db_cursor.html42
-rw-r--r--bdb/docs/api_tcl/db_del.html47
-rw-r--r--bdb/docs/api_tcl/db_get.html98
-rw-r--r--bdb/docs/api_tcl/db_get_join.html45
-rw-r--r--bdb/docs/api_tcl/db_get_type.html34
-rw-r--r--bdb/docs/api_tcl/db_is_byteswapped.html37
-rw-r--r--bdb/docs/api_tcl/db_join.html48
-rw-r--r--bdb/docs/api_tcl/db_open.html300
-rw-r--r--bdb/docs/api_tcl/db_put.html74
-rw-r--r--bdb/docs/api_tcl/db_remove.html49
-rw-r--r--bdb/docs/api_tcl/db_rename.html50
-rw-r--r--bdb/docs/api_tcl/db_stat.html41
-rw-r--r--bdb/docs/api_tcl/db_sync.html36
-rw-r--r--bdb/docs/api_tcl/dbc_close.html36
-rw-r--r--bdb/docs/api_tcl/dbc_del.html38
-rw-r--r--bdb/docs/api_tcl/dbc_dup.html46
-rw-r--r--bdb/docs/api_tcl/dbc_get.html168
-rw-r--r--bdb/docs/api_tcl/dbc_put.html133
-rw-r--r--bdb/docs/api_tcl/env_close.html42
-rw-r--r--bdb/docs/api_tcl/env_open.html168
-rw-r--r--bdb/docs/api_tcl/env_remove.html70
-rw-r--r--bdb/docs/api_tcl/pindex.src27
-rw-r--r--bdb/docs/api_tcl/tcl_index.html49
-rw-r--r--bdb/docs/api_tcl/tcl_pindex.html258
-rw-r--r--bdb/docs/api_tcl/txn.html62
-rw-r--r--bdb/docs/api_tcl/txn_abort.html45
-rw-r--r--bdb/docs/api_tcl/txn_commit.html68
-rw-r--r--bdb/docs/api_tcl/version.html39
-rw-r--r--bdb/docs/images/api.gifbin0 -> 121 bytes
-rw-r--r--bdb/docs/images/next.gifbin0 -> 225 bytes
-rw-r--r--bdb/docs/images/prev.gifbin0 -> 234 bytes
-rw-r--r--bdb/docs/images/ps.gifbin0 -> 244 bytes
-rw-r--r--bdb/docs/images/ref.gifbin0 -> 119 bytes
-rw-r--r--bdb/docs/images/sleepycat.gifbin0 -> 6211 bytes
-rw-r--r--bdb/docs/index.html75
-rw-r--r--bdb/docs/ref/am/close.html43
-rw-r--r--bdb/docs/ref/am/count.html28
-rw-r--r--bdb/docs/ref/am/curclose.html28
-rw-r--r--bdb/docs/ref/am/curdel.html26
-rw-r--r--bdb/docs/ref/am/curdup.html34
-rw-r--r--bdb/docs/ref/am/curget.html74
-rw-r--r--bdb/docs/ref/am/curput.html40
-rw-r--r--bdb/docs/ref/am/cursor.html41
-rw-r--r--bdb/docs/ref/am/delete.html28
-rw-r--r--bdb/docs/ref/am/error.html61
-rw-r--r--bdb/docs/ref/am/get.html39
-rw-r--r--bdb/docs/ref/am/join.html184
-rw-r--r--bdb/docs/ref/am/open.html47
-rw-r--r--bdb/docs/ref/am/opensub.html64
-rw-r--r--bdb/docs/ref/am/ops.html36
-rw-r--r--bdb/docs/ref/am/partial.html134
-rw-r--r--bdb/docs/ref/am/put.html36
-rw-r--r--bdb/docs/ref/am/stability.html49
-rw-r--r--bdb/docs/ref/am/stat.html36
-rw-r--r--bdb/docs/ref/am/sync.html38
-rw-r--r--bdb/docs/ref/am/upgrade.html50
-rw-r--r--bdb/docs/ref/am/verify.html50
-rw-r--r--bdb/docs/ref/am_conf/bt_compare.html85
-rw-r--r--bdb/docs/ref/am_conf/bt_minkey.html53
-rw-r--r--bdb/docs/ref/am_conf/bt_prefix.html66
-rw-r--r--bdb/docs/ref/am_conf/bt_recnum.html34
-rw-r--r--bdb/docs/ref/am_conf/byteorder.html38
-rw-r--r--bdb/docs/ref/am_conf/cachesize.html86
-rw-r--r--bdb/docs/ref/am_conf/dup.html71
-rw-r--r--bdb/docs/ref/am_conf/extentsize.html38
-rw-r--r--bdb/docs/ref/am_conf/h_ffactor.html31
-rw-r--r--bdb/docs/ref/am_conf/h_hash.html39
-rw-r--r--bdb/docs/ref/am_conf/h_nelem.html32
-rw-r--r--bdb/docs/ref/am_conf/intro.html45
-rw-r--r--bdb/docs/ref/am_conf/logrec.html45
-rw-r--r--bdb/docs/ref/am_conf/malloc.html31
-rw-r--r--bdb/docs/ref/am_conf/pagesize.html66
-rw-r--r--bdb/docs/ref/am_conf/re_source.html62
-rw-r--r--bdb/docs/ref/am_conf/recno.html69
-rw-r--r--bdb/docs/ref/am_conf/renumber.html80
-rw-r--r--bdb/docs/ref/am_conf/select.html116
-rw-r--r--bdb/docs/ref/arch/apis.html74
-rw-r--r--bdb/docs/ref/arch/bigpic.gifbin0 -> 2589 bytes
-rw-r--r--bdb/docs/ref/arch/bigpic.html114
-rw-r--r--bdb/docs/ref/arch/progmodel.html41
-rw-r--r--bdb/docs/ref/arch/script.html29
-rw-r--r--bdb/docs/ref/arch/smallpic.gifbin0 -> 1613 bytes
-rw-r--r--bdb/docs/ref/arch/utilities.html62
-rw-r--r--bdb/docs/ref/build_unix/aix.html60
-rw-r--r--bdb/docs/ref/build_unix/conf.html143
-rw-r--r--bdb/docs/ref/build_unix/flags.html60
-rw-r--r--bdb/docs/ref/build_unix/freebsd.html57
-rw-r--r--bdb/docs/ref/build_unix/hpux.html89
-rw-r--r--bdb/docs/ref/build_unix/install.html60
-rw-r--r--bdb/docs/ref/build_unix/intro.html60
-rw-r--r--bdb/docs/ref/build_unix/irix.html30
-rw-r--r--bdb/docs/ref/build_unix/linux.html30
-rw-r--r--bdb/docs/ref/build_unix/notes.html138
-rw-r--r--bdb/docs/ref/build_unix/osf1.html30
-rw-r--r--bdb/docs/ref/build_unix/qnx.html58
-rw-r--r--bdb/docs/ref/build_unix/sco.html29
-rw-r--r--bdb/docs/ref/build_unix/shlib.html94
-rw-r--r--bdb/docs/ref/build_unix/solaris.html90
-rw-r--r--bdb/docs/ref/build_unix/sunos.html30
-rw-r--r--bdb/docs/ref/build_unix/test.html49
-rw-r--r--bdb/docs/ref/build_unix/ultrix.html27
-rw-r--r--bdb/docs/ref/build_vxworks/faq.html85
-rw-r--r--bdb/docs/ref/build_vxworks/intro.html86
-rw-r--r--bdb/docs/ref/build_vxworks/notes.html56
-rw-r--r--bdb/docs/ref/build_win/faq.html49
-rw-r--r--bdb/docs/ref/build_win/intro.html143
-rw-r--r--bdb/docs/ref/build_win/notes.html56
-rw-r--r--bdb/docs/ref/build_win/test.html77
-rw-r--r--bdb/docs/ref/cam/intro.html72
-rw-r--r--bdb/docs/ref/debug/common.html109
-rw-r--r--bdb/docs/ref/debug/compile.html43
-rw-r--r--bdb/docs/ref/debug/intro.html58
-rw-r--r--bdb/docs/ref/debug/printlog.html160
-rw-r--r--bdb/docs/ref/debug/runtime.html47
-rw-r--r--bdb/docs/ref/distrib/layout.html74
-rw-r--r--bdb/docs/ref/dumpload/format.html69
-rw-r--r--bdb/docs/ref/dumpload/text.html32
-rw-r--r--bdb/docs/ref/dumpload/utility.html45
-rw-r--r--bdb/docs/ref/env/create.html73
-rw-r--r--bdb/docs/ref/env/error.html57
-rw-r--r--bdb/docs/ref/env/intro.html56
-rw-r--r--bdb/docs/ref/env/naming.html145
-rw-r--r--bdb/docs/ref/env/open.html30
-rw-r--r--bdb/docs/ref/env/region.html66
-rw-r--r--bdb/docs/ref/env/remote.html48
-rw-r--r--bdb/docs/ref/env/security.html54
-rw-r--r--bdb/docs/ref/install/file.html37
-rw-r--r--bdb/docs/ref/install/magic.s5.be.txt87
-rw-r--r--bdb/docs/ref/install/magic.s5.le.txt87
-rw-r--r--bdb/docs/ref/install/magic.txt56
-rw-r--r--bdb/docs/ref/intro/data.html54
-rw-r--r--bdb/docs/ref/intro/dbis.html159
-rw-r--r--bdb/docs/ref/intro/dbisnot.html146
-rw-r--r--bdb/docs/ref/intro/distrib.html28
-rw-r--r--bdb/docs/ref/intro/need.html60
-rw-r--r--bdb/docs/ref/intro/products.html69
-rw-r--r--bdb/docs/ref/intro/terrain.html248
-rw-r--r--bdb/docs/ref/intro/what.html53
-rw-r--r--bdb/docs/ref/intro/where.html39
-rw-r--r--bdb/docs/ref/java/compat.html34
-rw-r--r--bdb/docs/ref/java/conf.html82
-rw-r--r--bdb/docs/ref/java/faq.html31
-rw-r--r--bdb/docs/ref/java/program.html72
-rw-r--r--bdb/docs/ref/lock/am_conv.html129
-rw-r--r--bdb/docs/ref/lock/cam_conv.html53
-rw-r--r--bdb/docs/ref/lock/config.html46
-rw-r--r--bdb/docs/ref/lock/dead.html93
-rw-r--r--bdb/docs/ref/lock/intro.html89
-rw-r--r--bdb/docs/ref/lock/max.html88
-rw-r--r--bdb/docs/ref/lock/nondb.html50
-rw-r--r--bdb/docs/ref/lock/notxn.html46
-rw-r--r--bdb/docs/ref/lock/page.html62
-rw-r--r--bdb/docs/ref/lock/stdmode.html61
-rw-r--r--bdb/docs/ref/lock/twopl.html50
-rw-r--r--bdb/docs/ref/log/config.html40
-rw-r--r--bdb/docs/ref/log/intro.html58
-rw-r--r--bdb/docs/ref/log/limits.html47
-rw-r--r--bdb/docs/ref/mp/config.html55
-rw-r--r--bdb/docs/ref/mp/intro.html59
-rw-r--r--bdb/docs/ref/perl/intro.html42
-rw-r--r--bdb/docs/ref/pindex.src212
-rw-r--r--bdb/docs/ref/program/appsignals.html35
-rw-r--r--bdb/docs/ref/program/byteorder.html31
-rw-r--r--bdb/docs/ref/program/compatible.html32
-rw-r--r--bdb/docs/ref/program/copy.html63
-rw-r--r--bdb/docs/ref/program/dbsizes.html45
-rw-r--r--bdb/docs/ref/program/diskspace.html145
-rw-r--r--bdb/docs/ref/program/environ.html33
-rw-r--r--bdb/docs/ref/program/errorret.html108
-rw-r--r--bdb/docs/ref/program/extending.html242
-rw-r--r--bdb/docs/ref/program/mt.html95
-rw-r--r--bdb/docs/ref/program/namespace.html44
-rw-r--r--bdb/docs/ref/program/recimp.html49
-rw-r--r--bdb/docs/ref/program/runtime.html57
-rw-r--r--bdb/docs/ref/program/scope.html71
-rw-r--r--bdb/docs/ref/program/solaris.txt213
-rw-r--r--bdb/docs/ref/program/version.html45
-rw-r--r--bdb/docs/ref/refs/bdb_usenix.html1120
-rw-r--r--bdb/docs/ref/refs/bdb_usenix.ps1441
-rw-r--r--bdb/docs/ref/refs/embedded.html672
-rw-r--r--bdb/docs/ref/refs/hash_usenix.ps12209
-rw-r--r--bdb/docs/ref/refs/libtp_usenix.ps12340
-rw-r--r--bdb/docs/ref/refs/refs.html75
-rw-r--r--bdb/docs/ref/refs/witold.html16
-rw-r--r--bdb/docs/ref/rpc/client.html75
-rw-r--r--bdb/docs/ref/rpc/intro.html62
-rw-r--r--bdb/docs/ref/rpc/server.html54
-rw-r--r--bdb/docs/ref/sendmail/intro.html51
-rw-r--r--bdb/docs/ref/simple_tut/close.html102
-rw-r--r--bdb/docs/ref/simple_tut/del.html93
-rw-r--r--bdb/docs/ref/simple_tut/errors.html46
-rw-r--r--bdb/docs/ref/simple_tut/example.txt73
-rw-r--r--bdb/docs/ref/simple_tut/get.html97
-rw-r--r--bdb/docs/ref/simple_tut/handles.html29
-rw-r--r--bdb/docs/ref/simple_tut/intro.html40
-rw-r--r--bdb/docs/ref/simple_tut/keydata.html48
-rw-r--r--bdb/docs/ref/simple_tut/open.html90
-rw-r--r--bdb/docs/ref/simple_tut/put.html127
-rw-r--r--bdb/docs/ref/tcl/error.html69
-rw-r--r--bdb/docs/ref/tcl/faq.html60
-rw-r--r--bdb/docs/ref/tcl/intro.html70
-rw-r--r--bdb/docs/ref/tcl/program.html33
-rw-r--r--bdb/docs/ref/tcl/using.html53
-rw-r--r--bdb/docs/ref/test/faq.html32
-rw-r--r--bdb/docs/ref/test/run.html78
-rw-r--r--bdb/docs/ref/toc.html310
-rw-r--r--bdb/docs/ref/transapp/admin.html47
-rw-r--r--bdb/docs/ref/transapp/app.html117
-rw-r--r--bdb/docs/ref/transapp/archival.html149
-rw-r--r--bdb/docs/ref/transapp/checkpoint.html127
-rw-r--r--bdb/docs/ref/transapp/cursor.html169
-rw-r--r--bdb/docs/ref/transapp/data_open.html119
-rw-r--r--bdb/docs/ref/transapp/deadlock.html92
-rw-r--r--bdb/docs/ref/transapp/env_open.html174
-rw-r--r--bdb/docs/ref/transapp/filesys.html62
-rw-r--r--bdb/docs/ref/transapp/inc.html201
-rw-r--r--bdb/docs/ref/transapp/intro.html42
-rw-r--r--bdb/docs/ref/transapp/logfile.html104
-rw-r--r--bdb/docs/ref/transapp/put.html151
-rw-r--r--bdb/docs/ref/transapp/read.html40
-rw-r--r--bdb/docs/ref/transapp/reclimit.html106
-rw-r--r--bdb/docs/ref/transapp/recovery.html91
-rw-r--r--bdb/docs/ref/transapp/term.html60
-rw-r--r--bdb/docs/ref/transapp/throughput.html117
-rw-r--r--bdb/docs/ref/transapp/transapp.txt492
-rw-r--r--bdb/docs/ref/transapp/why.html49
-rw-r--r--bdb/docs/ref/transapp/writetest.txt100
-rw-r--r--bdb/docs/ref/txn/config.html37
-rw-r--r--bdb/docs/ref/txn/intro.html86
-rw-r--r--bdb/docs/ref/txn/limits.html66
-rw-r--r--bdb/docs/ref/txn/nested.html66
-rw-r--r--bdb/docs/ref/txn/other.html67
-rw-r--r--bdb/docs/ref/upgrade.2.0/convert.html74
-rw-r--r--bdb/docs/ref/upgrade.2.0/disk.html27
-rw-r--r--bdb/docs/ref/upgrade.2.0/intro.html32
-rw-r--r--bdb/docs/ref/upgrade.2.0/system.html84
-rw-r--r--bdb/docs/ref/upgrade.2.0/toc.html20
-rw-r--r--bdb/docs/ref/upgrade.3.0/close.html34
-rw-r--r--bdb/docs/ref/upgrade.3.0/cxx.html31
-rw-r--r--bdb/docs/ref/upgrade.3.0/db.html48
-rw-r--r--bdb/docs/ref/upgrade.3.0/db_cxx.html47
-rw-r--r--bdb/docs/ref/upgrade.3.0/dbenv.html68
-rw-r--r--bdb/docs/ref/upgrade.3.0/dbenv_cxx.html72
-rw-r--r--bdb/docs/ref/upgrade.3.0/dbinfo.html72
-rw-r--r--bdb/docs/ref/upgrade.3.0/disk.html30
-rw-r--r--bdb/docs/ref/upgrade.3.0/eacces.html28
-rw-r--r--bdb/docs/ref/upgrade.3.0/eagain.html34
-rw-r--r--bdb/docs/ref/upgrade.3.0/envopen.html156
-rw-r--r--bdb/docs/ref/upgrade.3.0/func.html69
-rw-r--r--bdb/docs/ref/upgrade.3.0/intro.html26
-rw-r--r--bdb/docs/ref/upgrade.3.0/java.html34
-rw-r--r--bdb/docs/ref/upgrade.3.0/join.html28
-rw-r--r--bdb/docs/ref/upgrade.3.0/jump_set.html48
-rw-r--r--bdb/docs/ref/upgrade.3.0/lock_detect.html24
-rw-r--r--bdb/docs/ref/upgrade.3.0/lock_notheld.html27
-rw-r--r--bdb/docs/ref/upgrade.3.0/lock_put.html25
-rw-r--r--bdb/docs/ref/upgrade.3.0/lock_stat.html24
-rw-r--r--bdb/docs/ref/upgrade.3.0/log_register.html25
-rw-r--r--bdb/docs/ref/upgrade.3.0/log_stat.html23
-rw-r--r--bdb/docs/ref/upgrade.3.0/memp_stat.html26
-rw-r--r--bdb/docs/ref/upgrade.3.0/open.html65
-rw-r--r--bdb/docs/ref/upgrade.3.0/rmw.html31
-rw-r--r--bdb/docs/ref/upgrade.3.0/stat.html24
-rw-r--r--bdb/docs/ref/upgrade.3.0/toc.html47
-rw-r--r--bdb/docs/ref/upgrade.3.0/txn_begin.html25
-rw-r--r--bdb/docs/ref/upgrade.3.0/txn_commit.html25
-rw-r--r--bdb/docs/ref/upgrade.3.0/txn_stat.html23
-rw-r--r--bdb/docs/ref/upgrade.3.0/value_set.html41
-rw-r--r--bdb/docs/ref/upgrade.3.0/xa.html33
-rw-r--r--bdb/docs/ref/upgrade.3.1/btstat.html50
-rw-r--r--bdb/docs/ref/upgrade.3.1/config.html35
-rw-r--r--bdb/docs/ref/upgrade.3.1/disk.html34
-rw-r--r--bdb/docs/ref/upgrade.3.1/dup.html31
-rw-r--r--bdb/docs/ref/upgrade.3.1/env.html53
-rw-r--r--bdb/docs/ref/upgrade.3.1/intro.html26
-rw-r--r--bdb/docs/ref/upgrade.3.1/log_register.html28
-rw-r--r--bdb/docs/ref/upgrade.3.1/logalloc.html27
-rw-r--r--bdb/docs/ref/upgrade.3.1/memp_register.html30
-rw-r--r--bdb/docs/ref/upgrade.3.1/put.html63
-rw-r--r--bdb/docs/ref/upgrade.3.1/set_feedback.html27
-rw-r--r--bdb/docs/ref/upgrade.3.1/set_paniccall.html27
-rw-r--r--bdb/docs/ref/upgrade.3.1/set_tx_recover.html36
-rw-r--r--bdb/docs/ref/upgrade.3.1/sysmem.html25
-rw-r--r--bdb/docs/ref/upgrade.3.1/tcl.html30
-rw-r--r--bdb/docs/ref/upgrade.3.1/tmp.html34
-rw-r--r--bdb/docs/ref/upgrade.3.1/toc.html33
-rw-r--r--bdb/docs/ref/upgrade.3.1/txn_check.html26
-rw-r--r--bdb/docs/ref/upgrade.3.2/callback.html39
-rw-r--r--bdb/docs/ref/upgrade.3.2/db_dump.html29
-rw-r--r--bdb/docs/ref/upgrade.3.2/disk.html28
-rw-r--r--bdb/docs/ref/upgrade.3.2/handle.html27
-rw-r--r--bdb/docs/ref/upgrade.3.2/incomplete.html39
-rw-r--r--bdb/docs/ref/upgrade.3.2/intro.html26
-rw-r--r--bdb/docs/ref/upgrade.3.2/mutexlock.html28
-rw-r--r--bdb/docs/ref/upgrade.3.2/notfound.html25
-rw-r--r--bdb/docs/ref/upgrade.3.2/renumber.html39
-rw-r--r--bdb/docs/ref/upgrade.3.2/set_flags.html35
-rw-r--r--bdb/docs/ref/upgrade.3.2/toc.html27
-rw-r--r--bdb/docs/ref/upgrade.3.2/tx_recover.html32
-rw-r--r--bdb/docs/ref/upgrade/process.html108
-rw-r--r--bdb/docs/ref/xa/config.html79
-rw-r--r--bdb/docs/ref/xa/faq.html55
-rw-r--r--bdb/docs/ref/xa/intro.html61
-rw-r--r--bdb/docs/sleepycat/contact.html107
-rw-r--r--bdb/docs/sleepycat/legal.html56
-rw-r--r--bdb/docs/sleepycat/license.html109
-rw-r--r--bdb/docs/utility/berkeley_db_svc.html88
-rw-r--r--bdb/docs/utility/db_archive.html85
-rw-r--r--bdb/docs/utility/db_checkpoint.html82
-rw-r--r--bdb/docs/utility/db_deadlock.html85
-rw-r--r--bdb/docs/utility/db_dump.html128
-rw-r--r--bdb/docs/utility/db_load.html151
-rw-r--r--bdb/docs/utility/db_printlog.html69
-rw-r--r--bdb/docs/utility/db_recover.html97
-rw-r--r--bdb/docs/utility/db_stat.html104
-rw-r--r--bdb/docs/utility/db_upgrade.html93
-rw-r--r--bdb/docs/utility/db_verify.html73
-rw-r--r--bdb/docs/utility/index.html28
-rw-r--r--bdb/env/db_salloc.c360
-rw-r--r--bdb/env/db_shash.c124
-rw-r--r--bdb/env/env_method.c461
-rw-r--r--bdb/env/env_open.c1064
-rw-r--r--bdb/env/env_recover.c449
-rw-r--r--bdb/env/env_region.c1205
-rw-r--r--bdb/examples_c/README23
-rw-r--r--bdb/examples_c/ex_access.c171
-rw-r--r--bdb/examples_c/ex_btrec.c241
-rw-r--r--bdb/examples_c/ex_dbclient.c248
-rw-r--r--bdb/examples_c/ex_env.c170
-rw-r--r--bdb/examples_c/ex_lock.c235
-rw-r--r--bdb/examples_c/ex_mpool.c280
-rw-r--r--bdb/examples_c/ex_thread.c604
-rw-r--r--bdb/examples_c/ex_tpcb.c811
-rw-r--r--bdb/examples_c/ex_tpcb.h39
-rw-r--r--bdb/examples_cxx/AccessExample.cpp151
-rw-r--r--bdb/examples_cxx/BtRecExample.cpp247
-rw-r--r--bdb/examples_cxx/EnvExample.cpp122
-rw-r--r--bdb/examples_cxx/LockExample.cpp236
-rw-r--r--bdb/examples_cxx/MpoolExample.cpp210
-rw-r--r--bdb/examples_cxx/TpcbExample.cpp666
-rw-r--r--bdb/hash/hash.c2096
-rw-r--r--bdb/hash/hash.src361
-rw-r--r--bdb/hash/hash_auto.c2023
-rw-r--r--bdb/hash/hash_conv.c112
-rw-r--r--bdb/hash/hash_dup.c805
-rw-r--r--bdb/hash/hash_func.c242
-rw-r--r--bdb/hash/hash_meta.c121
-rw-r--r--bdb/hash/hash_method.c126
-rw-r--r--bdb/hash/hash_page.c1655
-rw-r--r--bdb/hash/hash_rec.c1078
-rw-r--r--bdb/hash/hash_reclaim.c68
-rw-r--r--bdb/hash/hash_stat.c329
-rw-r--r--bdb/hash/hash_upgrade.c271
-rw-r--r--bdb/hash/hash_verify.c1051
-rw-r--r--bdb/hsearch/hsearch.c148
-rw-r--r--bdb/include/btree.h317
-rw-r--r--bdb/include/btree_auto.h267
-rw-r--r--bdb/include/btree_ext.h122
-rw-r--r--bdb/include/clib_ext.h38
-rw-r--r--bdb/include/common_ext.h44
-rw-r--r--bdb/include/crdel_auto.h88
-rw-r--r--bdb/include/cxx_int.h96
-rw-r--r--bdb/include/db.src1375
-rw-r--r--bdb/include/db_185.h175
-rw-r--r--bdb/include/db_am.h131
-rw-r--r--bdb/include/db_auto.h140
-rw-r--r--bdb/include/db_cxx.h652
-rw-r--r--bdb/include/db_dispatch.h95
-rw-r--r--bdb/include/db_ext.h208
-rw-r--r--bdb/include/db_int.src397
-rw-r--r--bdb/include/db_join.h30
-rw-r--r--bdb/include/db_page.h576
-rw-r--r--bdb/include/db_server.h762
-rw-r--r--bdb/include/db_server_int.h85
-rw-r--r--bdb/include/db_shash.h77
-rw-r--r--bdb/include/db_swap.h115
-rw-r--r--bdb/include/db_upgrade.h174
-rw-r--r--bdb/include/db_verify.h191
-rw-r--r--bdb/include/debug.h104
-rw-r--r--bdb/include/env_ext.h35
-rw-r--r--bdb/include/gen_client_ext.h121
-rw-r--r--bdb/include/gen_server_ext.h106
-rw-r--r--bdb/include/hash.h140
-rw-r--r--bdb/include/hash_auto.h248
-rw-r--r--bdb/include/hash_ext.h106
-rw-r--r--bdb/include/lock.h190
-rw-r--r--bdb/include/lock_ext.h39
-rw-r--r--bdb/include/log.h208
-rw-r--r--bdb/include/log_auto.h39
-rw-r--r--bdb/include/log_ext.h33
-rw-r--r--bdb/include/mp.h244
-rw-r--r--bdb/include/mp_ext.h33
-rw-r--r--bdb/include/mutex.h744
-rw-r--r--bdb/include/mutex_ext.h31
-rw-r--r--bdb/include/os.h46
-rw-r--r--bdb/include/os_ext.h62
-rw-r--r--bdb/include/os_jump.h34
-rw-r--r--bdb/include/qam.h150
-rw-r--r--bdb/include/qam_auto.h129
-rw-r--r--bdb/include/qam_ext.h56
-rw-r--r--bdb/include/queue.h319
-rw-r--r--bdb/include/region.h292
-rw-r--r--bdb/include/rpc_client_ext.h19
-rw-r--r--bdb/include/rpc_server_ext.h21
-rw-r--r--bdb/include/shqueue.h337
-rw-r--r--bdb/include/tcl_db.h219
-rw-r--r--bdb/include/tcl_ext.h89
-rw-r--r--bdb/include/txn.h150
-rw-r--r--bdb/include/txn_auto.h114
-rw-r--r--bdb/include/txn_ext.h24
-rw-r--r--bdb/include/xa.h179
-rw-r--r--bdb/include/xa_ext.h17
-rw-r--r--bdb/java/src/com/sleepycat/db/Db.java710
-rw-r--r--bdb/java/src/com/sleepycat/db/DbAppendRecno.java22
-rw-r--r--bdb/java/src/com/sleepycat/db/DbBtreeCompare.java21
-rw-r--r--bdb/java/src/com/sleepycat/db/DbBtreePrefix.java21
-rw-r--r--bdb/java/src/com/sleepycat/db/DbBtreeStat.java40
-rw-r--r--bdb/java/src/com/sleepycat/db/DbConstants.java217
-rw-r--r--bdb/java/src/com/sleepycat/db/DbDeadlockException.java28
-rw-r--r--bdb/java/src/com/sleepycat/db/DbDupCompare.java21
-rw-r--r--bdb/java/src/com/sleepycat/db/DbEnv.java392
-rw-r--r--bdb/java/src/com/sleepycat/db/DbEnvFeedback.java19
-rw-r--r--bdb/java/src/com/sleepycat/db/DbErrcall.java23
-rw-r--r--bdb/java/src/com/sleepycat/db/DbException.java56
-rw-r--r--bdb/java/src/com/sleepycat/db/DbFeedback.java23
-rw-r--r--bdb/java/src/com/sleepycat/db/DbHash.java21
-rw-r--r--bdb/java/src/com/sleepycat/db/DbHashStat.java37
-rw-r--r--bdb/java/src/com/sleepycat/db/DbKeyRange.java23
-rw-r--r--bdb/java/src/com/sleepycat/db/DbLock.java38
-rw-r--r--bdb/java/src/com/sleepycat/db/DbLockStat.java30
-rw-r--r--bdb/java/src/com/sleepycat/db/DbLogStat.java35
-rw-r--r--bdb/java/src/com/sleepycat/db/DbLsn.java42
-rw-r--r--bdb/java/src/com/sleepycat/db/DbMemoryException.java28
-rw-r--r--bdb/java/src/com/sleepycat/db/DbMpoolFStat.java28
-rw-r--r--bdb/java/src/com/sleepycat/db/DbMpoolStat.java42
-rw-r--r--bdb/java/src/com/sleepycat/db/DbOutputStreamErrcall.java58
-rw-r--r--bdb/java/src/com/sleepycat/db/DbQueueStat.java32
-rw-r--r--bdb/java/src/com/sleepycat/db/DbRecoveryInit.java23
-rw-r--r--bdb/java/src/com/sleepycat/db/DbRunRecoveryException.java32
-rw-r--r--bdb/java/src/com/sleepycat/db/DbTxn.java47
-rw-r--r--bdb/java/src/com/sleepycat/db/DbTxnRecover.java22
-rw-r--r--bdb/java/src/com/sleepycat/db/DbTxnStat.java40
-rw-r--r--bdb/java/src/com/sleepycat/db/Dbc.java56
-rw-r--r--bdb/java/src/com/sleepycat/db/Dbt.java110
-rw-r--r--bdb/java/src/com/sleepycat/examples/AccessExample.java186
-rw-r--r--bdb/java/src/com/sleepycat/examples/BtRecExample.java348
-rw-r--r--bdb/java/src/com/sleepycat/examples/EnvExample.java128
-rw-r--r--bdb/java/src/com/sleepycat/examples/LockExample.java235
-rw-r--r--bdb/java/src/com/sleepycat/examples/TpcbExample.java831
-rw-r--r--bdb/libdb_java/checkapi.prl132
-rw-r--r--bdb/libdb_java/com_sleepycat_db_Db.h349
-rw-r--r--bdb/libdb_java/com_sleepycat_db_DbEnv.h509
-rw-r--r--bdb/libdb_java/com_sleepycat_db_DbLock.h29
-rw-r--r--bdb/libdb_java/com_sleepycat_db_DbLsn.h29
-rw-r--r--bdb/libdb_java/com_sleepycat_db_DbTxn.h53
-rw-r--r--bdb/libdb_java/com_sleepycat_db_Dbc.h69
-rw-r--r--bdb/libdb_java/com_sleepycat_db_Dbt.h157
-rw-r--r--bdb/libdb_java/java_Db.c964
-rw-r--r--bdb/libdb_java/java_DbEnv.c1300
-rw-r--r--bdb/libdb_java/java_DbLock.c55
-rw-r--r--bdb/libdb_java/java_DbLsn.c43
-rw-r--r--bdb/libdb_java/java_DbTxn.c82
-rw-r--r--bdb/libdb_java/java_Dbc.c196
-rw-r--r--bdb/libdb_java/java_Dbt.c176
-rw-r--r--bdb/libdb_java/java_info.c1001
-rw-r--r--bdb/libdb_java/java_info.h200
-rw-r--r--bdb/libdb_java/java_locked.c294
-rw-r--r--bdb/libdb_java/java_locked.h98
-rw-r--r--bdb/libdb_java/java_util.c556
-rw-r--r--bdb/libdb_java/java_util.h359
-rw-r--r--bdb/lock/Design293
-rw-r--r--bdb/lock/lock.c1439
-rw-r--r--bdb/lock/lock_conflict.c34
-rw-r--r--bdb/lock/lock_deadlock.c637
-rw-r--r--bdb/lock/lock_method.c148
-rw-r--r--bdb/lock/lock_region.c430
-rw-r--r--bdb/lock/lock_stat.c308
-rw-r--r--bdb/lock/lock_util.c138
-rw-r--r--bdb/log/log.c653
-rw-r--r--bdb/log/log.src46
-rw-r--r--bdb/log/log_archive.c447
-rw-r--r--bdb/log/log_auto.c326
-rw-r--r--bdb/log/log_compare.c34
-rw-r--r--bdb/log/log_findckp.c135
-rw-r--r--bdb/log/log_get.c465
-rw-r--r--bdb/log/log_method.c121
-rw-r--r--bdb/log/log_put.c701
-rw-r--r--bdb/log/log_rec.c621
-rw-r--r--bdb/log/log_register.c433
-rw-r--r--bdb/mp/Design52
-rw-r--r--bdb/mp/mp_alloc.c152
-rw-r--r--bdb/mp/mp_bh.c662
-rw-r--r--bdb/mp/mp_fget.c417
-rw-r--r--bdb/mp/mp_fopen.c756
-rw-r--r--bdb/mp/mp_fput.c186
-rw-r--r--bdb/mp/mp_fset.c98
-rw-r--r--bdb/mp/mp_method.c115
-rw-r--r--bdb/mp/mp_region.c357
-rw-r--r--bdb/mp/mp_register.c85
-rw-r--r--bdb/mp/mp_stat.c388
-rw-r--r--bdb/mp/mp_sync.c658
-rw-r--r--bdb/mp/mp_trickle.c149
-rw-r--r--bdb/mutex/README108
-rw-r--r--bdb/mutex/mut_fcntl.c174
-rw-r--r--bdb/mutex/mut_pthread.c328
-rw-r--r--bdb/mutex/mut_tas.c200
-rw-r--r--bdb/mutex/mutex.c253
-rw-r--r--bdb/mutex/uts4_cc.s21
-rw-r--r--bdb/os/os_abs.c31
-rw-r--r--bdb/os/os_alloc.c342
-rw-r--r--bdb/os/os_dir.c108
-rw-r--r--bdb/os/os_errno.c44
-rw-r--r--bdb/os/os_fid.c140
-rw-r--r--bdb/os/os_finit.c111
-rw-r--r--bdb/os/os_fsync.c90
-rw-r--r--bdb/os/os_handle.c165
-rw-r--r--bdb/os/os_map.c436
-rw-r--r--bdb/os/os_method.c206
-rw-r--r--bdb/os/os_oflags.c106
-rw-r--r--bdb/os/os_open.c226
-rw-r--r--bdb/os/os_region.c116
-rw-r--r--bdb/os/os_rename.c46
-rw-r--r--bdb/os/os_root.c36
-rw-r--r--bdb/os/os_rpath.c69
-rw-r--r--bdb/os/os_rw.c147
-rw-r--r--bdb/os/os_seek.c76
-rw-r--r--bdb/os/os_sleep.c77
-rw-r--r--bdb/os/os_spin.c109
-rw-r--r--bdb/os/os_stat.c108
-rw-r--r--bdb/os/os_tmpdir.c119
-rw-r--r--bdb/os/os_unlink.c106
-rw-r--r--bdb/os_vxworks/os_abs.c45
-rw-r--r--bdb/os_vxworks/os_finit.c57
-rw-r--r--bdb/os_vxworks/os_map.c440
-rw-r--r--bdb/os_win32/os_abs.c33
-rw-r--r--bdb/os_win32/os_dir.c82
-rw-r--r--bdb/os_win32/os_errno.c146
-rw-r--r--bdb/os_win32/os_fid.c145
-rw-r--r--bdb/os_win32/os_finit.c60
-rw-r--r--bdb/os_win32/os_map.c310
-rw-r--r--bdb/os_win32/os_open.c201
-rw-r--r--bdb/os_win32/os_rename.c57
-rw-r--r--bdb/os_win32/os_seek.c65
-rw-r--r--bdb/os_win32/os_sleep.c41
-rw-r--r--bdb/os_win32/os_spin.c59
-rw-r--r--bdb/os_win32/os_type.c35
-rw-r--r--bdb/perl.BerkeleyDB/BerkeleyDB.pm1227
-rw-r--r--bdb/perl.BerkeleyDB/BerkeleyDB.pod1751
-rw-r--r--bdb/perl.BerkeleyDB/BerkeleyDB.pod.P1518
-rw-r--r--bdb/perl.BerkeleyDB/BerkeleyDB.xs3927
-rw-r--r--bdb/perl.BerkeleyDB/BerkeleyDB/Btree.pm8
-rw-r--r--bdb/perl.BerkeleyDB/BerkeleyDB/Hash.pm8
-rw-r--r--bdb/perl.BerkeleyDB/Changes112
-rw-r--r--bdb/perl.BerkeleyDB/MANIFEST49
-rw-r--r--bdb/perl.BerkeleyDB/Makefile.PL112
-rw-r--r--bdb/perl.BerkeleyDB/README464
-rw-r--r--bdb/perl.BerkeleyDB/Todo57
-rw-r--r--bdb/perl.BerkeleyDB/config.in51
-rwxr-xr-xbdb/perl.BerkeleyDB/dbinfo109
-rw-r--r--bdb/perl.BerkeleyDB/hints/irix_6_5.pl1
-rw-r--r--bdb/perl.BerkeleyDB/hints/solaris.pl1
-rw-r--r--bdb/perl.BerkeleyDB/mkconsts211
-rwxr-xr-xbdb/perl.BerkeleyDB/mkpod146
-rw-r--r--bdb/perl.BerkeleyDB/patches/5.00444
-rw-r--r--bdb/perl.BerkeleyDB/patches/5.004_01217
-rw-r--r--bdb/perl.BerkeleyDB/patches/5.004_02217
-rw-r--r--bdb/perl.BerkeleyDB/patches/5.004_03223
-rw-r--r--bdb/perl.BerkeleyDB/patches/5.004_04209
-rw-r--r--bdb/perl.BerkeleyDB/patches/5.004_05209
-rw-r--r--bdb/perl.BerkeleyDB/patches/5.005209
-rw-r--r--bdb/perl.BerkeleyDB/patches/5.005_01209
-rw-r--r--bdb/perl.BerkeleyDB/patches/5.005_02264
-rw-r--r--bdb/perl.BerkeleyDB/patches/5.005_03250
-rw-r--r--bdb/perl.BerkeleyDB/patches/5.6.0294
-rw-r--r--bdb/perl.BerkeleyDB/t/btree.t976
-rw-r--r--bdb/perl.BerkeleyDB/t/db-3.0.t128
-rw-r--r--bdb/perl.BerkeleyDB/t/db-3.1.t172
-rw-r--r--bdb/perl.BerkeleyDB/t/db-3.2.t90
-rw-r--r--bdb/perl.BerkeleyDB/t/destroy.t141
-rw-r--r--bdb/perl.BerkeleyDB/t/env.t279
-rw-r--r--bdb/perl.BerkeleyDB/t/examples.t482
-rw-r--r--bdb/perl.BerkeleyDB/t/examples.t.T496
-rw-r--r--bdb/perl.BerkeleyDB/t/examples3.t213
-rw-r--r--bdb/perl.BerkeleyDB/t/examples3.t.T217
-rw-r--r--bdb/perl.BerkeleyDB/t/filter.t244
-rw-r--r--bdb/perl.BerkeleyDB/t/hash.t777
-rw-r--r--bdb/perl.BerkeleyDB/t/join.t270
-rw-r--r--bdb/perl.BerkeleyDB/t/mldbm.t166
-rw-r--r--bdb/perl.BerkeleyDB/t/queue.t837
-rw-r--r--bdb/perl.BerkeleyDB/t/recno.t967
-rw-r--r--bdb/perl.BerkeleyDB/t/strict.t220
-rw-r--r--bdb/perl.BerkeleyDB/t/subdb.t296
-rw-r--r--bdb/perl.BerkeleyDB/t/txn.t354
-rw-r--r--bdb/perl.BerkeleyDB/t/unknown.t212
-rw-r--r--bdb/perl.BerkeleyDB/typemap275
-rw-r--r--bdb/perl.DB_File/Changes343
-rw-r--r--bdb/perl.DB_File/DB_File.pm2072
-rw-r--r--bdb/perl.DB_File/DB_File.xs2072
-rw-r--r--bdb/perl.DB_File/DB_File_BS6
-rw-r--r--bdb/perl.DB_File/MANIFEST27
-rw-r--r--bdb/perl.DB_File/Makefile.PL187
-rw-r--r--bdb/perl.DB_File/README396
-rw-r--r--bdb/perl.DB_File/config.in99
-rw-r--r--bdb/perl.DB_File/dbinfo109
-rw-r--r--bdb/perl.DB_File/hints/dynixptx.pl3
-rw-r--r--bdb/perl.DB_File/hints/sco.pl2
-rw-r--r--bdb/perl.DB_File/patches/5.00444
-rw-r--r--bdb/perl.DB_File/patches/5.004_01217
-rw-r--r--bdb/perl.DB_File/patches/5.004_02217
-rw-r--r--bdb/perl.DB_File/patches/5.004_03223
-rw-r--r--bdb/perl.DB_File/patches/5.004_04209
-rw-r--r--bdb/perl.DB_File/patches/5.004_05209
-rw-r--r--bdb/perl.DB_File/patches/5.005209
-rw-r--r--bdb/perl.DB_File/patches/5.005_01209
-rw-r--r--bdb/perl.DB_File/patches/5.005_02264
-rw-r--r--bdb/perl.DB_File/patches/5.005_03250
-rw-r--r--bdb/perl.DB_File/patches/5.6.0294
-rw-r--r--bdb/perl.DB_File/t/db-btree.t1306
-rw-r--r--bdb/perl.DB_File/t/db-hash.t753
-rw-r--r--bdb/perl.DB_File/t/db-recno.t899
-rw-r--r--bdb/perl.DB_File/typemap44
-rw-r--r--bdb/perl.DB_File/version.c81
-rw-r--r--bdb/qam/qam.c1357
-rw-r--r--bdb/qam/qam.src124
-rw-r--r--bdb/qam/qam_auto.c1282
-rw-r--r--bdb/qam/qam_conv.c83
-rw-r--r--bdb/qam/qam_files.c503
-rw-r--r--bdb/qam/qam_method.c472
-rw-r--r--bdb/qam/qam_open.c268
-rw-r--r--bdb/qam/qam_rec.c732
-rw-r--r--bdb/qam/qam_stat.c201
-rw-r--r--bdb/qam/qam_upgrade.c111
-rw-r--r--bdb/qam/qam_verify.c194
-rw-r--r--bdb/rpc_client/client.c371
-rw-r--r--bdb/rpc_client/db_server_clnt.c692
-rw-r--r--bdb/rpc_client/gen_client.c2494
-rw-r--r--bdb/rpc_client/gen_client_ret.c542
-rw-r--r--bdb/rpc_server/clsrv.html453
-rw-r--r--bdb/rpc_server/db_server.sed5
-rw-r--r--bdb/rpc_server/db_server.x492
-rw-r--r--bdb/rpc_server/db_server_proc.c1546
-rw-r--r--bdb/rpc_server/db_server_proc.sed418
-rw-r--r--bdb/rpc_server/db_server_svc.c359
-rw-r--r--bdb/rpc_server/db_server_svc.sed5
-rw-r--r--bdb/rpc_server/db_server_util.c612
-rw-r--r--bdb/rpc_server/db_server_xdr.c1149
-rw-r--r--bdb/rpc_server/gen_db_server.c703
-rw-r--r--bdb/rpc_server/rpc.src599
-rw-r--r--bdb/tcl/docs/db.html266
-rw-r--r--bdb/tcl/docs/env.html303
-rw-r--r--bdb/tcl/docs/historic.html168
-rw-r--r--bdb/tcl/docs/index.html47
-rw-r--r--bdb/tcl/docs/library.html26
-rw-r--r--bdb/tcl/docs/lock.html187
-rw-r--r--bdb/tcl/docs/log.html142
-rw-r--r--bdb/tcl/docs/mpool.html189
-rw-r--r--bdb/tcl/docs/test.html149
-rw-r--r--bdb/tcl/docs/txn.html56
-rw-r--r--bdb/tcl/tcl_compat.c1055
-rw-r--r--bdb/tcl/tcl_db.c1771
-rw-r--r--bdb/tcl/tcl_db_pkg.c2246
-rw-r--r--bdb/tcl/tcl_dbcursor.c744
-rw-r--r--bdb/tcl/tcl_env.c678
-rw-r--r--bdb/tcl/tcl_internal.c440
-rw-r--r--bdb/tcl/tcl_lock.c655
-rw-r--r--bdb/tcl/tcl_log.c581
-rw-r--r--bdb/tcl/tcl_mp.c822
-rw-r--r--bdb/tcl/tcl_txn.c473
-rw-r--r--bdb/test/TESTS448
-rw-r--r--bdb/test/archive.tcl232
-rw-r--r--bdb/test/byteorder.tcl23
-rw-r--r--bdb/test/conscript.tcl123
-rw-r--r--bdb/test/dbm.tcl128
-rw-r--r--bdb/test/dbscript.tcl357
-rw-r--r--bdb/test/ddscript.tcl43
-rw-r--r--bdb/test/dead001.tcl76
-rw-r--r--bdb/test/dead002.tcl68
-rw-r--r--bdb/test/dead003.tcl92
-rw-r--r--bdb/test/env001.tcl147
-rw-r--r--bdb/test/env002.tcl156
-rw-r--r--bdb/test/env003.tcl177
-rw-r--r--bdb/test/env004.tcl103
-rw-r--r--bdb/test/env005.tcl53
-rw-r--r--bdb/test/env006.tcl42
-rw-r--r--bdb/test/env007.tcl100
-rw-r--r--bdb/test/env008.tcl73
-rw-r--r--bdb/test/hsearch.tcl51
-rw-r--r--bdb/test/include.tcl19
-rw-r--r--bdb/test/join.tcl451
-rw-r--r--bdb/test/lock001.tcl170
-rw-r--r--bdb/test/lock002.tcl151
-rw-r--r--bdb/test/lock003.tcl48
-rw-r--r--bdb/test/lockscript.tcl88
-rw-r--r--bdb/test/log.tcl337
-rw-r--r--bdb/test/logtrack.list68
-rw-r--r--bdb/test/logtrack.tcl130
-rw-r--r--bdb/test/mdbscript.tcl381
-rw-r--r--bdb/test/mpool.tcl420
-rw-r--r--bdb/test/mpoolscript.tcl170
-rw-r--r--bdb/test/mutex.tcl225
-rw-r--r--bdb/test/mutexscript.tcl91
-rw-r--r--bdb/test/ndbm.tcl141
-rw-r--r--bdb/test/recd001.tcl180
-rw-r--r--bdb/test/recd002.tcl96
-rw-r--r--bdb/test/recd003.tcl111
-rw-r--r--bdb/test/recd004.tcl90
-rw-r--r--bdb/test/recd005.tcl231
-rw-r--r--bdb/test/recd006.tcl262
-rw-r--r--bdb/test/recd007.tcl723
-rw-r--r--bdb/test/recd008.tcl227
-rw-r--r--bdb/test/recd009.tcl181
-rw-r--r--bdb/test/recd010.tcl235
-rw-r--r--bdb/test/recd011.tcl115
-rw-r--r--bdb/test/recd012.tcl423
-rw-r--r--bdb/test/recd013.tcl244
-rw-r--r--bdb/test/recd014.tcl467
-rw-r--r--bdb/test/rpc001.tcl444
-rw-r--r--bdb/test/rpc002.tcl144
-rw-r--r--bdb/test/rsrc001.tcl223
-rw-r--r--bdb/test/rsrc002.tcl65
-rw-r--r--bdb/test/rsrc003.tcl174
-rw-r--r--bdb/test/sdb001.tcl123
-rw-r--r--bdb/test/sdb002.tcl167
-rw-r--r--bdb/test/sdb003.tcl137
-rw-r--r--bdb/test/sdb004.tcl179
-rw-r--r--bdb/test/sdb005.tcl109
-rw-r--r--bdb/test/sdb006.tcl130
-rw-r--r--bdb/test/sdb007.tcl123
-rw-r--r--bdb/test/sdb008.tcl151
-rw-r--r--bdb/test/sdb009.tcl77
-rw-r--r--bdb/test/sdb010.tcl46
-rw-r--r--bdb/test/sdbscript.tcl47
-rw-r--r--bdb/test/sdbtest001.tcl133
-rw-r--r--bdb/test/sdbtest002.tcl163
-rw-r--r--bdb/test/sdbutils.tcl171
-rw-r--r--bdb/test/sysscript.tcl283
-rw-r--r--bdb/test/test.tcl1297
-rw-r--r--bdb/test/test001.tcl157
-rw-r--r--bdb/test/test002.tcl128
-rw-r--r--bdb/test/test003.tcl177
-rw-r--r--bdb/test/test004.tcl134
-rw-r--r--bdb/test/test005.tcl14
-rw-r--r--bdb/test/test006.tcl118
-rw-r--r--bdb/test/test007.tcl13
-rw-r--r--bdb/test/test008.tcl138
-rw-r--r--bdb/test/test009.tcl15
-rw-r--r--bdb/test/test010.tcl126
-rw-r--r--bdb/test/test011.tcl349
-rw-r--r--bdb/test/test012.tcl113
-rw-r--r--bdb/test/test013.tcl193
-rw-r--r--bdb/test/test014.tcl204
-rw-r--r--bdb/test/test015.tcl235
-rw-r--r--bdb/test/test016.tcl170
-rw-r--r--bdb/test/test017.tcl237
-rw-r--r--bdb/test/test018.tcl13
-rw-r--r--bdb/test/test019.tcl107
-rw-r--r--bdb/test/test020.tcl108
-rw-r--r--bdb/test/test021.tcl130
-rw-r--r--bdb/test/test022.tcl55
-rw-r--r--bdb/test/test023.tcl204
-rw-r--r--bdb/test/test024.tcl206
-rw-r--r--bdb/test/test025.tcl105
-rw-r--r--bdb/test/test026.tcl112
-rw-r--r--bdb/test/test027.tcl13
-rw-r--r--bdb/test/test028.tcl208
-rw-r--r--bdb/test/test029.tcl192
-rw-r--r--bdb/test/test030.tcl191
-rw-r--r--bdb/test/test031.tcl196
-rw-r--r--bdb/test/test032.tcl195
-rw-r--r--bdb/test/test033.tcl103
-rw-r--r--bdb/test/test034.tcl16
-rw-r--r--bdb/test/test035.tcl16
-rw-r--r--bdb/test/test036.tcl135
-rw-r--r--bdb/test/test037.tcl191
-rw-r--r--bdb/test/test038.tcl174
-rw-r--r--bdb/test/test039.tcl177
-rw-r--r--bdb/test/test040.tcl16
-rw-r--r--bdb/test/test041.tcl16
-rw-r--r--bdb/test/test042.tcl149
-rw-r--r--bdb/test/test043.tcl162
-rw-r--r--bdb/test/test044.tcl243
-rw-r--r--bdb/test/test045.tcl117
-rw-r--r--bdb/test/test046.tcl717
-rw-r--r--bdb/test/test047.tcl192
-rw-r--r--bdb/test/test048.tcl139
-rw-r--r--bdb/test/test049.tcl160
-rw-r--r--bdb/test/test050.tcl191
-rw-r--r--bdb/test/test051.tcl191
-rw-r--r--bdb/test/test052.tcl254
-rw-r--r--bdb/test/test053.tcl194
-rw-r--r--bdb/test/test054.tcl369
-rw-r--r--bdb/test/test055.tcl118
-rw-r--r--bdb/test/test056.tcl145
-rw-r--r--bdb/test/test057.tcl225
-rw-r--r--bdb/test/test058.tcl99
-rw-r--r--bdb/test/test059.tcl128
-rw-r--r--bdb/test/test060.tcl53
-rw-r--r--bdb/test/test061.tcl215
-rw-r--r--bdb/test/test062.tcl125
-rw-r--r--bdb/test/test063.tcl141
-rw-r--r--bdb/test/test064.tcl62
-rw-r--r--bdb/test/test065.tcl146
-rw-r--r--bdb/test/test066.tcl73
-rw-r--r--bdb/test/test067.tcl114
-rw-r--r--bdb/test/test068.tcl181
-rw-r--r--bdb/test/test069.tcl14
-rw-r--r--bdb/test/test070.tcl120
-rw-r--r--bdb/test/test071.tcl15
-rw-r--r--bdb/test/test072.tcl225
-rw-r--r--bdb/test/test073.tcl265
-rw-r--r--bdb/test/test074.tcl221
-rw-r--r--bdb/test/test075.tcl195
-rw-r--r--bdb/test/test076.tcl59
-rw-r--r--bdb/test/test077.tcl68
-rw-r--r--bdb/test/test078.tcl90
-rw-r--r--bdb/test/test079.tcl18
-rw-r--r--bdb/test/test080.tcl41
-rw-r--r--bdb/test/test081.tcl16
-rw-r--r--bdb/test/test082.tcl15
-rw-r--r--bdb/test/test083.tcl136
-rw-r--r--bdb/test/test084.tcl48
-rw-r--r--bdb/test/test085.tcl274
-rw-r--r--bdb/test/test086.tcl162
-rw-r--r--bdb/test/test087.tcl278
-rw-r--r--bdb/test/test088.tcl142
-rw-r--r--bdb/test/test090.tcl20
-rw-r--r--bdb/test/test091.tcl21
-rw-r--r--bdb/test/testparams.tcl115
-rw-r--r--bdb/test/testutils.tcl2380
-rw-r--r--bdb/test/txn.tcl181
-rw-r--r--bdb/test/update.tcl92
-rw-r--r--bdb/test/upgrade.tcl279
-rw-r--r--bdb/test/upgrade/README85
-rw-r--r--bdb/test/upgrade/generate-2.X/pack-2.6.6.pl114
-rw-r--r--bdb/test/upgrade/generate-2.X/test-2.6.patch379
-rw-r--r--bdb/test/wordlist10001
-rw-r--r--bdb/test/wrap.tcl58
-rw-r--r--bdb/txn/txn.c869
-rw-r--r--bdb/txn/txn.src114
-rw-r--r--bdb/txn/txn_auto.c893
-rw-r--r--bdb/txn/txn_rec.c339
-rw-r--r--bdb/txn/txn_region.c393
-rw-r--r--bdb/xa/xa.c661
-rw-r--r--bdb/xa/xa_db.c182
-rw-r--r--bdb/xa/xa_map.c189
1442 files changed, 324740 insertions, 0 deletions
diff --git a/bdb/LICENSE b/bdb/LICENSE
new file mode 100644
index 00000000000..32cc483d68a
--- /dev/null
+++ b/bdb/LICENSE
@@ -0,0 +1,102 @@
+/*-
+ * $Id: LICENSE,v 11.7 2000/11/01 20:35:49 bostic Exp $
+ */
+
+The following is the license that applies to this copy of the Berkeley DB
+software. For a license to use the Berkeley DB software under conditions
+other than those described here, or to purchase support for this software,
+please contact Sleepycat Software by email at db@sleepycat.com, or on the
+Web at http://www.sleepycat.com.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+/*
+ * Copyright (c) 1990-2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Redistributions in any form must be accompanied by information on
+ * how to obtain complete source code for the DB software and any
+ * accompanying software that uses the DB software. The source code
+ * must either be included in the distribution or be available for no
+ * more than the cost of distribution plus a nominal fee, and must be
+ * freely redistributable under reasonable conditions. For an
+ * executable file, complete source code means the source code for all
+ * modules it contains. It does not include source code for modules or
+ * files that typically accompany the major components of the operating
+ * system on which the executable file runs.
+ *
+ * THIS SOFTWARE IS PROVIDED BY SLEEPYCAT SOFTWARE ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
+ * NON-INFRINGEMENT, ARE DISCLAIMED. IN NO EVENT SHALL SLEEPYCAT SOFTWARE
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*
+ * Copyright (c) 1995, 1996
+ * The President and Fellows of Harvard University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY HARVARD AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL HARVARD OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
diff --git a/bdb/btree/bt_compare.c b/bdb/btree/bt_compare.c
new file mode 100644
index 00000000000..91481c31366
--- /dev/null
+++ b/bdb/btree/bt_compare.c
@@ -0,0 +1,211 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995, 1996
+ * Keith Bostic. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Mike Olson.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: bt_compare.c,v 11.12 2000/10/26 19:00:28 krinsky Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "btree.h"
+
+/*
+ * __bam_cmp --
+ * Compare a key to a given record.
+ *
+ * PUBLIC: int __bam_cmp __P((DB *, const DBT *, PAGE *,
+ * PUBLIC: u_int32_t, int (*)(DB *, const DBT *, const DBT *), int *));
+ */
+int
+__bam_cmp(dbp, dbt, h, indx, func, cmpp)
+ DB *dbp;
+ const DBT *dbt;
+ PAGE *h;
+ u_int32_t indx;
+ int (*func)__P((DB *, const DBT *, const DBT *));
+ int *cmpp;
+{
+ BINTERNAL *bi;
+ BKEYDATA *bk;
+ BOVERFLOW *bo;
+ DBT pg_dbt;
+
+ /*
+ * Returns:
+ * < 0 if dbt is < page record
+ * = 0 if dbt is = page record
+ * > 0 if dbt is > page record
+ *
+ * !!!
+ * We do not clear the pg_dbt DBT even though it's likely to contain
+ * random bits. That should be okay, because the app's comparison
+ * routine had better not be looking at fields other than data/size.
+ * We don't clear it because we go through this path a lot and it's
+ * expensive.
+ */
+ switch (TYPE(h)) {
+ case P_LBTREE:
+ case P_LDUP:
+ case P_LRECNO:
+ bk = GET_BKEYDATA(h, indx);
+ if (B_TYPE(bk->type) == B_OVERFLOW)
+ bo = (BOVERFLOW *)bk;
+ else {
+ pg_dbt.data = bk->data;
+ pg_dbt.size = bk->len;
+ *cmpp = func(dbp, dbt, &pg_dbt);
+ return (0);
+ }
+ break;
+ case P_IBTREE:
+ /*
+ * The following code guarantees that the left-most key on an
+ * internal page at any place in the tree sorts less than any
+ * user-specified key. The reason is that if we have reached
+ * this internal page, we know the user key must sort greater
+ * than the key we're storing for this page in any internal
+ * pages at levels above us in the tree. It then follows that
+ * any user-specified key cannot sort less than the first page
+ * which we reference, and so there's no reason to call the
+ * comparison routine. While this may save us a comparison
+ * routine call or two, the real reason for this is because
+ * we don't maintain a copy of the smallest key in the tree,
+ * so that we don't have to update all the levels of the tree
+ * should the application store a new smallest key. And, so,
+ * we may not have a key to compare, which makes doing the
+ * comparison difficult and error prone.
+ */
+ if (indx == 0) {
+ *cmpp = 1;
+ return (0);
+ }
+
+ bi = GET_BINTERNAL(h, indx);
+ if (B_TYPE(bi->type) == B_OVERFLOW)
+ bo = (BOVERFLOW *)(bi->data);
+ else {
+ pg_dbt.data = bi->data;
+ pg_dbt.size = bi->len;
+ *cmpp = func(dbp, dbt, &pg_dbt);
+ return (0);
+ }
+ break;
+ default:
+ return (__db_pgfmt(dbp, PGNO(h)));
+ }
+
+ /*
+ * Overflow.
+ */
+ return (__db_moff(dbp, dbt,
+ bo->pgno, bo->tlen, func == __bam_defcmp ? NULL : func, cmpp));
+}
+
+/*
+ * __bam_defcmp --
+ * Default comparison routine.
+ *
+ * PUBLIC: int __bam_defcmp __P((DB *, const DBT *, const DBT *));
+ */
+int
+__bam_defcmp(dbp, a, b)
+ DB *dbp;
+ const DBT *a, *b;
+{
+ size_t len;
+ u_int8_t *p1, *p2;
+
+ COMPQUIET(dbp, NULL);
+
+ /*
+ * Returns:
+ * < 0 if a is < b
+ * = 0 if a is = b
+ * > 0 if a is > b
+ *
+ * XXX
+ * If a size_t doesn't fit into a long, or if the difference between
+ * any two characters doesn't fit into an int, this routine can lose.
+ * What we need is a signed integral type that's guaranteed to be at
+ * least as large as a size_t, and there is no such thing.
+ */
+ len = a->size > b->size ? b->size : a->size;
+ for (p1 = a->data, p2 = b->data; len--; ++p1, ++p2)
+ if (*p1 != *p2)
+ return ((long)*p1 - (long)*p2);
+ return ((long)a->size - (long)b->size);
+}
+
+/*
+ * __bam_defpfx --
+ * Default prefix routine.
+ *
+ * PUBLIC: size_t __bam_defpfx __P((DB *, const DBT *, const DBT *));
+ */
+size_t
+__bam_defpfx(dbp, a, b)
+ DB *dbp;
+ const DBT *a, *b;
+{
+ size_t cnt, len;
+ u_int8_t *p1, *p2;
+
+ COMPQUIET(dbp, NULL);
+
+ cnt = 1;
+ len = a->size > b->size ? b->size : a->size;
+ for (p1 = a->data, p2 = b->data; len--; ++p1, ++p2, ++cnt)
+ if (*p1 != *p2)
+ return (cnt);
+
+ /*
+ * We know that a->size must be <= b->size, or they wouldn't be
+ * in this order.
+ */
+ return (a->size < b->size ? a->size + 1 : a->size);
+}
diff --git a/bdb/btree/bt_conv.c b/bdb/btree/bt_conv.c
new file mode 100644
index 00000000000..fd30f375f7c
--- /dev/null
+++ b/bdb/btree/bt_conv.c
@@ -0,0 +1,98 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: bt_conv.c,v 11.6 2000/03/31 00:30:26 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_swap.h"
+#include "btree.h"
+
+/*
+ * __bam_pgin --
+ * Convert host-specific page layout from the host-independent format
+ * stored on disk.
+ *
+ * PUBLIC: int __bam_pgin __P((DB_ENV *, db_pgno_t, void *, DBT *));
+ */
+int
+__bam_pgin(dbenv, pg, pp, cookie)
+ DB_ENV *dbenv;
+ db_pgno_t pg;
+ void *pp;
+ DBT *cookie;
+{
+ DB_PGINFO *pginfo;
+ PAGE *h;
+
+ pginfo = (DB_PGINFO *)cookie->data;
+ if (!pginfo->needswap)
+ return (0);
+
+ h = pp;
+ return (TYPE(h) == P_BTREEMETA ? __bam_mswap(pp) :
+ __db_byteswap(dbenv, pg, pp, pginfo->db_pagesize, 1));
+}
+
+/*
+ * __bam_pgout --
+ * Convert host-specific page layout to the host-independent format
+ * stored on disk.
+ *
+ * PUBLIC: int __bam_pgout __P((DB_ENV *, db_pgno_t, void *, DBT *));
+ */
+int
+__bam_pgout(dbenv, pg, pp, cookie)
+ DB_ENV *dbenv;
+ db_pgno_t pg;
+ void *pp;
+ DBT *cookie;
+{
+ DB_PGINFO *pginfo;
+ PAGE *h;
+
+ pginfo = (DB_PGINFO *)cookie->data;
+ if (!pginfo->needswap)
+ return (0);
+
+ h = pp;
+ return (TYPE(h) == P_BTREEMETA ? __bam_mswap(pp) :
+ __db_byteswap(dbenv, pg, pp, pginfo->db_pagesize, 0));
+}
+
+/*
+ * __bam_mswap --
+ * Swap the bytes on the btree metadata page.
+ *
+ * PUBLIC: int __bam_mswap __P((PAGE *));
+ */
+int
+__bam_mswap(pg)
+ PAGE *pg;
+{
+ u_int8_t *p;
+
+ __db_metaswap(pg);
+
+ p = (u_int8_t *)pg + sizeof(DBMETA);
+
+ SWAP32(p); /* maxkey */
+ SWAP32(p); /* minkey */
+ SWAP32(p); /* re_len */
+ SWAP32(p); /* re_pad */
+ SWAP32(p); /* root */
+
+ return (0);
+}
diff --git a/bdb/btree/bt_curadj.c b/bdb/btree/bt_curadj.c
new file mode 100644
index 00000000000..011acd2f4a1
--- /dev/null
+++ b/bdb/btree/bt_curadj.c
@@ -0,0 +1,573 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: bt_curadj.c,v 11.20 2001/01/17 16:15:49 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "btree.h"
+#include "txn.h"
+
+static int __bam_opd_cursor __P((DB *, DBC *, db_pgno_t, u_int32_t, u_int32_t));
+
+#ifdef DEBUG
+/*
+ * __bam_cprint --
+ * Display the current internal cursor.
+ *
+ * PUBLIC: void __bam_cprint __P((DBC *));
+ */
+void
+__bam_cprint(dbc)
+ DBC *dbc;
+{
+ BTREE_CURSOR *cp;
+
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ fprintf(stderr, "\tinternal: ovflsize: %lu", (u_long)cp->ovflsize);
+ if (dbc->dbtype == DB_RECNO)
+ fprintf(stderr, " recno: %lu", (u_long)cp->recno);
+ if (F_ISSET(cp, C_DELETED))
+ fprintf(stderr, " (deleted)");
+ fprintf(stderr, "\n");
+}
+#endif
+
+/*
+ * Cursor adjustments are logged if they are for subtransactions. This is
+ * because it's possible for a subtransaction to adjust cursors which will
+ * still be active after the subtransaction aborts, and so which must be
+ * restored to their previous locations. Cursors that can be both affected
+ * by our cursor adjustments and active after our transaction aborts can
+ * only be found in our parent transaction -- cursors in other transactions,
+ * including other child transactions of our parent, must have conflicting
+ * locker IDs, and so cannot be affected by adjustments in this transaction.
+ */
+
+/*
+ * __bam_ca_delete --
+ * Update the cursors when items are deleted and when already deleted
+ * items are overwritten. Return the number of relevant cursors found.
+ *
+ * PUBLIC: int __bam_ca_delete __P((DB *, db_pgno_t, u_int32_t, int));
+ */
+int
+__bam_ca_delete(dbp, pgno, indx, delete)
+ DB *dbp;
+ db_pgno_t pgno;
+ u_int32_t indx;
+ int delete;
+{
+ BTREE_CURSOR *cp;
+ DB *ldbp;
+ DB_ENV *dbenv;
+ DBC *dbc;
+ int count; /* !!!: Has to contain max number of cursors. */
+
+ dbenv = dbp->dbenv;
+
+ /*
+ * Adjust the cursors. We have the page write locked, so the
+ * only other cursors that can be pointing at a page are
+ * those in the same thread of control. Unfortunately, we don't
+ * know that they're using the same DB handle, so traverse
+ * all matching DB handles in the same DB_ENV, then all cursors
+ * on each matching DB handle.
+ *
+ * Each cursor is single-threaded, so we only need to lock the
+ * list of DBs and then the list of cursors in each DB.
+ */
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ for (count = 0, ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (dbc = TAILQ_FIRST(&ldbp->active_queue);
+ dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
+ cp = (BTREE_CURSOR *)dbc->internal;
+ if (cp->pgno == pgno && cp->indx == indx) {
+ if (delete)
+ F_SET(cp, C_DELETED);
+ else
+ F_CLR(cp, C_DELETED);
+ ++count;
+ }
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+ return (count);
+}
+
+/*
+ * __ram_ca_delete --
+ * Return the number of relevant cursors.
+ *
+ * PUBLIC: int __ram_ca_delete __P((DB *, db_pgno_t));
+ */
+int
+__ram_ca_delete(dbp, root_pgno)
+ DB *dbp;
+ db_pgno_t root_pgno;
+{
+ DB *ldbp;
+ DBC *dbc;
+ DB_ENV *dbenv;
+ int found;
+
+ found = 0;
+ dbenv = dbp->dbenv;
+
+ /*
+ * Review the cursors. See the comment in __bam_ca_delete().
+ */
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ found == 0 && ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (dbc = TAILQ_FIRST(&ldbp->active_queue);
+ found == 0 && dbc != NULL; dbc = TAILQ_NEXT(dbc, links))
+ if (dbc->internal->root == root_pgno)
+ found = 1;
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+ return (found);
+}
+
+/*
+ * __bam_ca_di --
+ * Adjust the cursors during a delete or insert.
+ *
+ * PUBLIC: int __bam_ca_di __P((DBC *, db_pgno_t, u_int32_t, int));
+ */
+int
+__bam_ca_di(my_dbc, pgno, indx, adjust)
+ DBC *my_dbc;
+ db_pgno_t pgno;
+ u_int32_t indx;
+ int adjust;
+{
+ DB *dbp, *ldbp;
+ DB_ENV *dbenv;
+ DB_LSN lsn;
+ DB_TXN *my_txn;
+ DBC *dbc;
+ DBC_INTERNAL *cp;
+ int found, ret;
+
+ dbp = my_dbc->dbp;
+ dbenv = dbp->dbenv;
+
+ my_txn = IS_SUBTRANSACTION(my_dbc->txn) ? my_dbc->txn : NULL;
+
+ /*
+ * Adjust the cursors. See the comment in __bam_ca_delete().
+ */
+ found = 0;
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (dbc = TAILQ_FIRST(&ldbp->active_queue);
+ dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
+ if (dbc->dbtype == DB_RECNO)
+ continue;
+ cp = dbc->internal;
+ if (cp->pgno == pgno && cp->indx >= indx) {
+ /* Cursor indices should never be negative. */
+ DB_ASSERT(cp->indx != 0 || adjust > 0);
+
+ cp->indx += adjust;
+ if (my_txn != NULL && dbc->txn != my_txn)
+ found = 1;
+ }
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+ if (found != 0 && DB_LOGGING(my_dbc)) {
+ if ((ret = __bam_curadj_log(dbenv,
+ my_dbc->txn, &lsn, 0, dbp->log_fileid,
+ DB_CA_DI, pgno, 0, 0, adjust, indx, 0)) != 0)
+ return (ret);
+ }
+
+ return (0);
+}
+
+/*
+ * __bam_opd_cursor -- create a new opd cursor.
+ */
+static int
+__bam_opd_cursor(dbp, dbc, first, tpgno, ti)
+ DB *dbp;
+ DBC *dbc;
+ db_pgno_t tpgno;
+ u_int32_t first, ti;
+{
+ BTREE_CURSOR *cp, *orig_cp;
+ DBC *dbc_nopd;
+ int ret;
+
+ orig_cp = (BTREE_CURSOR *)dbc->internal;
+ dbc_nopd = NULL;
+
+ /*
+ * Allocate a new cursor and create the stack. If duplicates
+ * are sorted, we've just created an off-page duplicate Btree.
+ * If duplicates aren't sorted, we've just created a Recno tree.
+ */
+ if ((ret = __db_c_newopd(dbc, tpgno, &dbc_nopd)) != 0)
+ return (ret);
+
+ cp = (BTREE_CURSOR *)dbc_nopd->internal;
+ cp->pgno = tpgno;
+ cp->indx = ti;
+
+ if (dbp->dup_compare == NULL) {
+ /*
+ * Converting to off-page Recno trees is tricky. The
+ * record number for the cursor is the index + 1 (to
+ * convert to 1-based record numbers).
+ */
+ cp->recno = ti + 1;
+ }
+
+ /*
+ * Transfer the deleted flag from the top-level cursor to the
+ * created one.
+ */
+ if (F_ISSET(orig_cp, C_DELETED)) {
+ F_SET(cp, C_DELETED);
+ F_CLR(orig_cp, C_DELETED);
+ }
+
+ /* Stack the cursors and reset the initial cursor's index. */
+ orig_cp->opd = dbc_nopd;
+ orig_cp->indx = first;
+ return (0);
+}
+
+/*
+ * __bam_ca_dup --
+ * Adjust the cursors when moving items from a leaf page to a duplicates
+ * page.
+ *
+ * PUBLIC: int __bam_ca_dup __P((DBC *,
+ * PUBLIC: u_int32_t, db_pgno_t, u_int32_t, db_pgno_t, u_int32_t));
+ */
+int
+__bam_ca_dup(my_dbc, first, fpgno, fi, tpgno, ti)
+ DBC *my_dbc;
+ db_pgno_t fpgno, tpgno;
+ u_int32_t first, fi, ti;
+{
+ BTREE_CURSOR *orig_cp;
+ DB *dbp, *ldbp;
+ DBC *dbc;
+ DB_ENV *dbenv;
+ DB_LSN lsn;
+ DB_TXN *my_txn;
+ int found, ret;
+
+ dbp = my_dbc->dbp;
+ dbenv = dbp->dbenv;
+ my_txn = IS_SUBTRANSACTION(my_dbc->txn) ? my_dbc->txn : NULL;
+
+ /*
+ * Adjust the cursors. See the comment in __bam_ca_delete().
+ */
+ found = 0;
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+loop: MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (dbc = TAILQ_FIRST(&ldbp->active_queue);
+ dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
+ /* Find cursors pointing to this record. */
+ orig_cp = (BTREE_CURSOR *)dbc->internal;
+ if (orig_cp->pgno != fpgno || orig_cp->indx != fi)
+ continue;
+
+ /*
+ * Since we rescan the list see if this is already
+ * converted.
+ */
+ if (orig_cp->opd != NULL)
+ continue;
+
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ if ((ret = __bam_opd_cursor(dbp,
+ dbc, first, tpgno, ti)) !=0)
+ return (ret);
+ if (my_txn != NULL && dbc->txn != my_txn)
+ found = 1;
+ /* We released the MUTEX to get a cursor, start over. */
+ goto loop;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+ if (found != 0 && DB_LOGGING(my_dbc)) {
+ if ((ret = __bam_curadj_log(dbenv,
+ my_dbc->txn, &lsn, 0, dbp->log_fileid,
+ DB_CA_DUP, fpgno, tpgno, 0, first, fi, ti)) != 0)
+ return (ret);
+ }
+ return (0);
+}
+
+/*
+ * __bam_ca_undodup --
+ * Adjust the cursors when returning items to a leaf page
+ * from a duplicate page.
+ * Called only during undo processing.
+ *
+ * PUBLIC: int __bam_ca_undodup __P((DB *,
+ * PUBLIC: u_int32_t, db_pgno_t, u_int32_t, u_int32_t));
+ */
+int
+__bam_ca_undodup(dbp, first, fpgno, fi, ti)
+ DB *dbp;
+ db_pgno_t fpgno;
+ u_int32_t first, fi, ti;
+{
+ BTREE_CURSOR *orig_cp;
+ DB *ldbp;
+ DBC *dbc;
+ DB_ENV *dbenv;
+ int ret;
+
+ dbenv = dbp->dbenv;
+
+ /*
+ * Adjust the cursors. See the comment in __bam_ca_delete().
+ */
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+loop: MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (dbc = TAILQ_FIRST(&ldbp->active_queue);
+ dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
+ orig_cp = (BTREE_CURSOR *)dbc->internal;
+
+ if (orig_cp->pgno != fpgno ||
+ orig_cp->indx != first ||
+ ((BTREE_CURSOR *)orig_cp->opd->internal)->indx
+ != ti)
+ continue;
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ if ((ret = orig_cp->opd->c_close(orig_cp->opd)) != 0)
+ return (ret);
+ orig_cp->opd = NULL;
+ orig_cp->indx = fi;
+ /*
+ * We released the MUTEX to free a cursor,
+ * start over.
+ */
+ goto loop;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+ return (0);
+}
+
+/*
+ * __bam_ca_rsplit --
+ * Adjust the cursors when doing reverse splits.
+ *
+ * PUBLIC: int __bam_ca_rsplit __P((DBC *, db_pgno_t, db_pgno_t));
+ */
+int
+__bam_ca_rsplit(my_dbc, fpgno, tpgno)
+ DBC* my_dbc;
+ db_pgno_t fpgno, tpgno;
+{
+ DB *dbp, *ldbp;
+ DBC *dbc;
+ DB_ENV *dbenv;
+ DB_LSN lsn;
+ DB_TXN *my_txn;
+ int found, ret;
+
+ dbp = my_dbc->dbp;
+ dbenv = dbp->dbenv;
+ my_txn = IS_SUBTRANSACTION(my_dbc->txn) ? my_dbc->txn : NULL;
+
+ /*
+ * Adjust the cursors. See the comment in __bam_ca_delete().
+ */
+ found = 0;
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (dbc = TAILQ_FIRST(&ldbp->active_queue);
+ dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
+ if (dbc->dbtype == DB_RECNO)
+ continue;
+ if (dbc->internal->pgno == fpgno) {
+ dbc->internal->pgno = tpgno;
+ if (my_txn != NULL && dbc->txn != my_txn)
+ found = 1;
+ }
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+ if (found != 0 && DB_LOGGING(my_dbc)) {
+ if ((ret = __bam_curadj_log(dbenv,
+ my_dbc->txn, &lsn, 0, dbp->log_fileid,
+ DB_CA_RSPLIT, fpgno, tpgno, 0, 0, 0, 0)) != 0)
+ return (ret);
+ }
+ return (0);
+}
+
+/*
+ * __bam_ca_split --
+ * Adjust the cursors when splitting a page.
+ *
+ * PUBLIC: int __bam_ca_split __P((DBC *,
+ * PUBLIC: db_pgno_t, db_pgno_t, db_pgno_t, u_int32_t, int));
+ */
+int
+__bam_ca_split(my_dbc, ppgno, lpgno, rpgno, split_indx, cleft)
+ DBC *my_dbc;
+ db_pgno_t ppgno, lpgno, rpgno;
+ u_int32_t split_indx;
+ int cleft;
+{
+ DB *dbp, *ldbp;
+ DBC *dbc;
+ DBC_INTERNAL *cp;
+ DB_ENV *dbenv;
+ DB_LSN lsn;
+ DB_TXN *my_txn;
+ int found, ret;
+
+ dbp = my_dbc->dbp;
+ dbenv = dbp->dbenv;
+ my_txn = IS_SUBTRANSACTION(my_dbc->txn) ? my_dbc->txn : NULL;
+
+ /*
+ * Adjust the cursors. See the comment in __bam_ca_delete().
+ *
+ * If splitting the page that a cursor was on, the cursor has to be
+ * adjusted to point to the same record as before the split. Most
+ * of the time we don't adjust pointers to the left page, because
+ * we're going to copy its contents back over the original page. If
+ * the cursor is on the right page, it is decremented by the number of
+ * records split to the left page.
+ */
+ found = 0;
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (dbc = TAILQ_FIRST(&ldbp->active_queue);
+ dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
+ if (dbc->dbtype == DB_RECNO)
+ continue;
+ cp = dbc->internal;
+ if (cp->pgno == ppgno) {
+ if (my_txn != NULL && dbc->txn != my_txn)
+ found = 1;
+ if (cp->indx < split_indx) {
+ if (cleft)
+ cp->pgno = lpgno;
+ } else {
+ cp->pgno = rpgno;
+ cp->indx -= split_indx;
+ }
+ }
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+ if (found != 0 && DB_LOGGING(my_dbc)) {
+ if ((ret = __bam_curadj_log(dbenv, my_dbc->txn,
+ &lsn, 0, dbp->log_fileid, DB_CA_SPLIT, ppgno, rpgno,
+ cleft ? lpgno : PGNO_INVALID, 0, split_indx, 0)) != 0)
+ return (ret);
+ }
+
+ return (0);
+}
+
+/*
+ * __bam_ca_undosplit --
+ * Adjust the cursors when undoing a split of a page.
+ * If we grew a level we will execute this for both the
+ * left and the right pages.
+ * Called only during undo processing.
+ *
+ * PUBLIC: void __bam_ca_undosplit __P((DB *,
+ * PUBLIC: db_pgno_t, db_pgno_t, db_pgno_t, u_int32_t));
+ */
+void
+__bam_ca_undosplit(dbp, frompgno, topgno, lpgno, split_indx)
+ DB *dbp;
+ db_pgno_t frompgno, topgno, lpgno;
+ u_int32_t split_indx;
+{
+ DB *ldbp;
+ DBC *dbc;
+ DB_ENV *dbenv;
+ DBC_INTERNAL *cp;
+
+ dbenv = dbp->dbenv;
+
+ /*
+ * Adjust the cursors. See the comment in __bam_ca_delete().
+ *
+ * When backing out a split, we move the cursor back
+ * to the original offset and bump it by the split_indx.
+ */
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (dbc = TAILQ_FIRST(&ldbp->active_queue);
+ dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
+ if (dbc->dbtype == DB_RECNO)
+ continue;
+ cp = dbc->internal;
+ if (cp->pgno == topgno) {
+ cp->pgno = frompgno;
+ cp->indx += split_indx;
+ } else if (cp->pgno == lpgno)
+ cp->pgno = frompgno;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+}
diff --git a/bdb/btree/bt_cursor.c b/bdb/btree/bt_cursor.c
new file mode 100644
index 00000000000..84ab7c80744
--- /dev/null
+++ b/bdb/btree/bt_cursor.c
@@ -0,0 +1,2131 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: bt_cursor.c,v 11.88 2001/01/11 18:19:49 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_shash.h"
+#include "btree.h"
+#include "lock.h"
+#include "qam.h"
+#include "common_ext.h"
+
+static int __bam_c_close __P((DBC *, db_pgno_t, int *));
+static int __bam_c_del __P((DBC *));
+static int __bam_c_destroy __P((DBC *));
+static int __bam_c_first __P((DBC *));
+static int __bam_c_get __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+static int __bam_c_getstack __P((DBC *));
+static int __bam_c_last __P((DBC *));
+static int __bam_c_next __P((DBC *, int));
+static int __bam_c_physdel __P((DBC *));
+static int __bam_c_prev __P((DBC *));
+static int __bam_c_put __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+static void __bam_c_reset __P((BTREE_CURSOR *));
+static int __bam_c_search __P((DBC *, const DBT *, u_int32_t, int *));
+static int __bam_c_writelock __P((DBC *));
+static int __bam_getboth_finddatum __P((DBC *, DBT *));
+static int __bam_getbothc __P((DBC *, DBT *));
+static int __bam_isopd __P((DBC *, db_pgno_t *));
+
+/*
+ * Acquire a new page/lock. If we hold a page/lock, discard the page, and
+ * lock-couple the lock.
+ *
+ * !!!
+ * We have to handle both where we have a lock to lock-couple and where we
+ * don't -- we don't duplicate locks when we duplicate cursors if we are
+ * running in a transaction environment as there's no point if locks are
+ * never discarded. This means that the cursor may or may not hold a lock.
+ */
+#undef ACQUIRE
+#define ACQUIRE(dbc, mode, lpgno, lock, fpgno, pagep, ret) {\
+ if ((pagep) != NULL) { \
+ ret = memp_fput((dbc)->dbp->mpf, pagep, 0); \
+ pagep = NULL; \
+ } else \
+ ret = 0; \
+ if ((ret) == 0 && STD_LOCKING(dbc)) \
+ ret = __db_lget(dbc, \
+ (lock).off == LOCK_INVALID ? 0 : LCK_COUPLE, \
+ lpgno, mode, 0, &lock); \
+ else \
+ (lock).off = LOCK_INVALID; \
+ if ((ret) == 0) \
+ ret = memp_fget((dbc)->dbp->mpf, &(fpgno), 0, &(pagep));\
+}
+
+/* Acquire a new page/lock for a cursor. */
+#undef ACQUIRE_CUR
+#define ACQUIRE_CUR(dbc, mode, ret) { \
+ BTREE_CURSOR *__cp = (BTREE_CURSOR *)(dbc)->internal; \
+ ACQUIRE(dbc, mode, \
+ __cp->pgno, __cp->lock, __cp->pgno, __cp->page, ret); \
+ if ((ret) == 0) \
+ __cp->lock_mode = (mode); \
+}
+
+/*
+ * Acquire a new page/lock for a cursor, and move the cursor on success.
+ * The reason that this is a separate macro is because we don't want to
+ * set the pgno/indx fields in the cursor until we actually have the lock,
+ * otherwise the cursor adjust routines will adjust the cursor even though
+ * we're not really on the page.
+ */
+#undef ACQUIRE_CUR_SET
+#define ACQUIRE_CUR_SET(dbc, mode, p, ret) { \
+ BTREE_CURSOR *__cp = (BTREE_CURSOR *)(dbc)->internal; \
+ ACQUIRE(dbc, mode, p, __cp->lock, p, __cp->page, ret); \
+ if ((ret) == 0) { \
+ __cp->pgno = p; \
+ __cp->indx = 0; \
+ __cp->lock_mode = (mode); \
+ } \
+}
+
+/*
+ * Acquire a write lock if we don't already have one.
+ *
+ * !!!
+ * See ACQUIRE macro on why we handle cursors that don't have locks.
+ */
+#undef ACQUIRE_WRITE_LOCK
+#define ACQUIRE_WRITE_LOCK(dbc, ret) { \
+ BTREE_CURSOR *__cp = (BTREE_CURSOR *)(dbc)->internal; \
+ ret = 0; \
+ if (STD_LOCKING(dbc) && \
+ __cp->lock_mode != DB_LOCK_WRITE && \
+ ((ret) = __db_lget(dbc, \
+ __cp->lock.off == LOCK_INVALID ? 0 : LCK_COUPLE, \
+ __cp->pgno, DB_LOCK_WRITE, 0, &__cp->lock)) == 0) \
+ __cp->lock_mode = DB_LOCK_WRITE; \
+}
+
+/* Discard the current page/lock. */
+#undef DISCARD
+#define DISCARD(dbc, ldiscard, lock, pagep, ret) { \
+ int __t_ret; \
+ if ((pagep) != NULL) { \
+ ret = memp_fput((dbc)->dbp->mpf, pagep, 0); \
+ pagep = NULL; \
+ } else \
+ ret = 0; \
+ if ((lock).off != LOCK_INVALID) { \
+ __t_ret = ldiscard ? \
+ __LPUT((dbc), lock): __TLPUT((dbc), lock); \
+ if (__t_ret != 0 && (ret) == 0) \
+ ret = __t_ret; \
+ (lock).off = LOCK_INVALID; \
+ } \
+}
+
+/* Discard the current page/lock for a cursor. */
+#undef DISCARD_CUR
+#define DISCARD_CUR(dbc, ret) { \
+ BTREE_CURSOR *__cp = (BTREE_CURSOR *)(dbc)->internal; \
+ DISCARD(dbc, 0, __cp->lock, __cp->page, ret); \
+ if ((ret) == 0) \
+ __cp->lock_mode = DB_LOCK_NG; \
+}
+
+/* If on-page item is a deleted record. */
+#undef IS_DELETED
+#define IS_DELETED(page, indx) \
+ B_DISSET(GET_BKEYDATA(page, \
+ (indx) + (TYPE(page) == P_LBTREE ? O_INDX : 0))->type)
+#undef IS_CUR_DELETED
+#define IS_CUR_DELETED(dbc) \
+ IS_DELETED((dbc)->internal->page, (dbc)->internal->indx)
+
+/*
+ * Test to see if two cursors could point to duplicates of the same key.
+ * In the case of off-page duplicates they are they same, as the cursors
+ * will be in the same off-page duplicate tree. In the case of on-page
+ * duplicates, the key index offsets must be the same. For the last test,
+ * as the original cursor may not have a valid page pointer, we use the
+ * current cursor's.
+ */
+#undef IS_DUPLICATE
+#define IS_DUPLICATE(dbc, i1, i2) \
+ (((PAGE *)(dbc)->internal->page)->inp[i1] == \
+ ((PAGE *)(dbc)->internal->page)->inp[i2])
+#undef IS_CUR_DUPLICATE
+#define IS_CUR_DUPLICATE(dbc, orig_pgno, orig_indx) \
+ (F_ISSET(dbc, DBC_OPD) || \
+ (orig_pgno == (dbc)->internal->pgno && \
+ IS_DUPLICATE(dbc, (dbc)->internal->indx, orig_indx)))
+
+/*
+ * __bam_c_reset --
+ * Initialize internal cursor structure.
+ */
+static void
+__bam_c_reset(cp)
+ BTREE_CURSOR *cp;
+{
+ cp->csp = cp->sp;
+ cp->lock.off = LOCK_INVALID;
+ cp->lock_mode = DB_LOCK_NG;
+ cp->recno = RECNO_OOB;
+ cp->order = INVALID_ORDER;
+ cp->flags = 0;
+}
+
+/*
+ * __bam_c_init --
+ * Initialize the access private portion of a cursor
+ *
+ * PUBLIC: int __bam_c_init __P((DBC *, DBTYPE));
+ */
+int
+__bam_c_init(dbc, dbtype)
+ DBC *dbc;
+ DBTYPE dbtype;
+{
+ BTREE *t;
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ int ret;
+ u_int32_t minkey;
+
+ dbp = dbc->dbp;
+
+ /* Allocate/initialize the internal structure. */
+ if (dbc->internal == NULL) {
+ if ((ret = __os_malloc(dbp->dbenv,
+ sizeof(BTREE_CURSOR), NULL, &cp)) != 0)
+ return (ret);
+ dbc->internal = (DBC_INTERNAL *)cp;
+
+ cp->sp = cp->csp = cp->stack;
+ cp->esp = cp->stack + sizeof(cp->stack) / sizeof(cp->stack[0]);
+ } else
+ cp = (BTREE_CURSOR *)dbc->internal;
+ __bam_c_reset(cp);
+
+ /* Initialize methods. */
+ dbc->c_close = __db_c_close;
+ dbc->c_count = __db_c_count;
+ dbc->c_del = __db_c_del;
+ dbc->c_dup = __db_c_dup;
+ dbc->c_get = __db_c_get;
+ dbc->c_put = __db_c_put;
+ if (dbtype == DB_BTREE) {
+ dbc->c_am_close = __bam_c_close;
+ dbc->c_am_del = __bam_c_del;
+ dbc->c_am_destroy = __bam_c_destroy;
+ dbc->c_am_get = __bam_c_get;
+ dbc->c_am_put = __bam_c_put;
+ dbc->c_am_writelock = __bam_c_writelock;
+ } else {
+ dbc->c_am_close = __bam_c_close;
+ dbc->c_am_del = __ram_c_del;
+ dbc->c_am_destroy = __bam_c_destroy;
+ dbc->c_am_get = __ram_c_get;
+ dbc->c_am_put = __ram_c_put;
+ dbc->c_am_writelock = __bam_c_writelock;
+ }
+
+ /*
+ * The btree leaf page data structures require that two key/data pairs
+ * (or four items) fit on a page, but other than that there's no fixed
+ * requirement. The btree off-page duplicates only require two items,
+ * to be exact, but requiring four for them as well seems reasonable.
+ *
+ * Recno uses the btree bt_ovflsize value -- it's close enough.
+ */
+ t = dbp->bt_internal;
+ minkey = F_ISSET(dbc, DBC_OPD) ? 2 : t->bt_minkey;
+ cp->ovflsize = B_MINKEY_TO_OVFLSIZE(minkey, dbp->pgsize);
+
+ return (0);
+}
+
+/*
+ * __bam_c_refresh
+ * Set things up properly for cursor re-use.
+ *
+ * PUBLIC: int __bam_c_refresh __P((DBC *));
+ */
+int
+__bam_c_refresh(dbc)
+ DBC *dbc;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ __bam_c_reset(cp);
+
+ /*
+ * If our caller set the root page number, it's because the root was
+ * known. This is always the case for off page dup cursors. Else,
+ * pull it out of our internal information.
+ */
+ if (cp->root == PGNO_INVALID)
+ cp->root = ((BTREE *)dbp->bt_internal)->bt_root;
+
+ /* Initialize for record numbers. */
+ if (F_ISSET(dbc, DBC_OPD) ||
+ dbc->dbtype == DB_RECNO || F_ISSET(dbp, DB_BT_RECNUM)) {
+ F_SET(cp, C_RECNUM);
+
+ /*
+ * All btrees that support record numbers, optionally standard
+ * recno trees, and all off-page duplicate recno trees have
+ * mutable record numbers.
+ */
+ if ((F_ISSET(dbc, DBC_OPD) && dbc->dbtype == DB_RECNO) ||
+ F_ISSET(dbp, DB_BT_RECNUM | DB_RE_RENUMBER))
+ F_SET(cp, C_RENUMBER);
+ }
+
+ return (0);
+}
+
+/*
+ * __bam_c_close --
+ * Close down the cursor.
+ */
+static int
+__bam_c_close(dbc, root_pgno, rmroot)
+ DBC *dbc;
+ db_pgno_t root_pgno;
+ int *rmroot;
+{
+ BTREE_CURSOR *cp, *cp_opd, *cp_c;
+ DB *dbp;
+ DBC *dbc_opd, *dbc_c;
+ PAGE *h;
+ u_int32_t num;
+ int cdb_lock, ret, t_ret;
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ cp_opd = (dbc_opd = cp->opd) == NULL ?
+ NULL : (BTREE_CURSOR *)dbc_opd->internal;
+ cdb_lock = ret = 0;
+
+ /*
+ * There are 3 ways this function is called:
+ *
+ * 1. Closing a primary cursor: we get called with a pointer to a
+ * primary cursor that has a NULL opd field. This happens when
+ * closing a btree/recno database cursor without an associated
+ * off-page duplicate tree.
+ *
+ * 2. Closing a primary and an off-page duplicate cursor stack: we
+ * get called with a pointer to the primary cursor which has a
+ * non-NULL opd field. This happens when closing a btree cursor
+ * into database with an associated off-page btree/recno duplicate
+ * tree. (It can't be a primary recno database, recno databases
+ * don't support duplicates.)
+ *
+ * 3. Closing an off-page duplicate cursor stack: we get called with
+ * a pointer to the off-page duplicate cursor. This happens when
+ * closing a non-btree database that has an associated off-page
+ * btree/recno duplicate tree or for a btree database when the
+ * opd tree is not empty (root_pgno == PGNO_INVALID).
+ *
+ * If either the primary or off-page duplicate cursor deleted a btree
+ * key/data pair, check to see if the item is still referenced by a
+ * different cursor. If it is, confirm that cursor's delete flag is
+ * set and leave it to that cursor to do the delete.
+ *
+ * NB: The test for == 0 below is correct. Our caller already removed
+ * our cursor argument from the active queue, we won't find it when we
+ * search the queue in __bam_ca_delete().
+ * NB: It can't be true that both the primary and off-page duplicate
+ * cursors have deleted a btree key/data pair. Either the primary
+ * cursor may have deleted an item and there's no off-page duplicate
+ * cursor, or there's an off-page duplicate cursor and it may have
+ * deleted an item.
+ *
+ * Primary recno databases aren't an issue here. Recno keys are either
+ * deleted immediately or never deleted, and do not have to be handled
+ * here.
+ *
+ * Off-page duplicate recno databases are an issue here, cases #2 and
+ * #3 above can both be off-page recno databases. The problem is the
+ * same as the final problem for off-page duplicate btree databases.
+ * If we no longer need the off-page duplicate tree, we want to remove
+ * it. For off-page duplicate btrees, we are done with the tree when
+ * we delete the last item it contains, i.e., there can be no further
+ * references to it when it's empty. For off-page duplicate recnos,
+ * we remove items from the tree as the application calls the remove
+ * function, so we are done with the tree when we close the last cursor
+ * that references it.
+ *
+ * We optionally take the root page number from our caller. If the
+ * primary database is a btree, we can get it ourselves because dbc
+ * is the primary cursor. If the primary database is not a btree,
+ * the problem is that we may be dealing with a stack of pages. The
+ * cursor we're using to do the delete points at the bottom of that
+ * stack and we need the top of the stack.
+ */
+ if (F_ISSET(cp, C_DELETED)) {
+ dbc_c = dbc;
+ switch (dbc->dbtype) {
+ case DB_BTREE: /* Case #1, #3. */
+ if (__bam_ca_delete(dbp, cp->pgno, cp->indx, 1) == 0)
+ goto lock;
+ goto done;
+ case DB_RECNO:
+ if (!F_ISSET(dbc, DBC_OPD)) /* Case #1. */
+ goto done;
+ /* Case #3. */
+ if (__ram_ca_delete(dbp, cp->root) == 0)
+ goto lock;
+ goto done;
+ default:
+ return (__db_unknown_type(dbp->dbenv,
+ "__bam_c_close", dbc->dbtype));
+ }
+ }
+
+ if (dbc_opd == NULL)
+ goto done;
+
+ if (F_ISSET(cp_opd, C_DELETED)) { /* Case #2. */
+ /*
+ * We will not have been provided a root page number. Acquire
+ * one from the primary database.
+ */
+ if ((ret = memp_fget(dbp->mpf, &cp->pgno, 0, &h)) != 0)
+ goto err;
+ root_pgno = GET_BOVERFLOW(h, cp->indx + O_INDX)->pgno;
+ if ((ret = memp_fput(dbp->mpf, h, 0)) != 0)
+ goto err;
+
+ dbc_c = dbc_opd;
+ switch (dbc_opd->dbtype) {
+ case DB_BTREE:
+ if (__bam_ca_delete(
+ dbp, cp_opd->pgno, cp_opd->indx, 1) == 0)
+ goto lock;
+ goto done;
+ case DB_RECNO:
+ if (__ram_ca_delete(dbp, cp_opd->root) == 0)
+ goto lock;
+ goto done;
+ default:
+ return (__db_unknown_type(dbp->dbenv,
+ "__bam_c_close", dbc->dbtype));
+ }
+ }
+ goto done;
+
+lock: cp_c = (BTREE_CURSOR *)dbc_c->internal;
+
+ /*
+ * If this is CDB, upgrade the lock if necessary. While we acquired
+ * the write lock to logically delete the record, we released it when
+ * we returned from that call, and so may not be holding a write lock
+ * at the moment. NB: to get here in CDB we must either be holding a
+ * write lock or be the only cursor that is permitted to acquire write
+ * locks. The reason is that there can never be more than a single CDB
+ * write cursor (that cursor cannot be dup'd), and so that cursor must
+ * be closed and the item therefore deleted before any other cursor
+ * could acquire a reference to this item.
+ *
+ * Note that dbc may be an off-page dup cursor; this is the sole
+ * instance in which an OPD cursor does any locking, but it's necessary
+ * because we may be closed by ourselves without a parent cursor
+ * handy, and we have to do a lock upgrade on behalf of somebody.
+ * If this is the case, the OPD has been given the parent's locking
+ * info in __db_c_get--the OPD is also a WRITEDUP.
+ */
+ if (CDB_LOCKING(dbp->dbenv)) {
+ DB_ASSERT(!F_ISSET(dbc, DBC_OPD) || F_ISSET(dbc, DBC_WRITEDUP));
+ if (!F_ISSET(dbc, DBC_WRITER)) {
+ if ((ret =
+ lock_get(dbp->dbenv, dbc->locker, DB_LOCK_UPGRADE,
+ &dbc->lock_dbt, DB_LOCK_WRITE, &dbc->mylock)) != 0)
+ goto err;
+ cdb_lock = 1;
+ }
+
+ cp_c->lock.off = LOCK_INVALID;
+ if ((ret =
+ memp_fget(dbp->mpf, &cp_c->pgno, 0, &cp_c->page)) != 0)
+ goto err;
+
+ goto delete;
+ }
+
+ /*
+ * The variable dbc_c has been initialized to reference the cursor in
+ * which we're going to do the delete. Initialize the cursor's page
+ * and lock structures as necessary.
+ *
+ * First, we may not need to acquire any locks. If we're in case #3,
+ * that is, the primary database isn't a btree database, our caller
+ * is responsible for acquiring any necessary locks before calling us.
+ */
+ if (F_ISSET(dbc, DBC_OPD)) {
+ cp_c->lock.off = LOCK_INVALID;
+ if ((ret =
+ memp_fget(dbp->mpf, &cp_c->pgno, 0, &cp_c->page)) != 0)
+ goto err;
+ goto delete;
+ }
+
+ /*
+ * Otherwise, acquire a write lock. If the cursor that did the initial
+ * logical deletion (and which had a write lock) is not the same as the
+ * cursor doing the physical deletion (which may have only ever had a
+ * read lock on the item), we need to upgrade. The confusion comes as
+ * follows:
+ *
+ * C1 created, acquires item read lock
+ * C2 dup C1, create C2, also has item read lock.
+ * C1 acquire write lock, delete item
+ * C1 close
+ * C2 close, needs a write lock to physically delete item.
+ *
+ * If we're in a TXN, we know that C2 will be able to acquire the write
+ * lock, because no locker other than the one shared by C1 and C2 can
+ * acquire a write lock -- the original write lock C1 acquire was never
+ * discarded.
+ *
+ * If we're not in a TXN, it's nastier. Other cursors might acquire
+ * read locks on the item after C1 closed, discarding its write lock,
+ * and such locks would prevent C2 from acquiring a read lock. That's
+ * OK, though, we'll simply wait until we can acquire a read lock, or
+ * we'll deadlock. (Which better not happen, since we're not in a TXN.)
+ *
+ * Lock the primary database page, regardless of whether we're deleting
+ * an item on a primary database page or an off-page duplicates page.
+ */
+ ACQUIRE(dbc, DB_LOCK_WRITE,
+ cp->pgno, cp_c->lock, cp_c->pgno, cp_c->page, ret);
+ if (ret != 0)
+ goto err;
+
+delete: /*
+ * If the delete occurred in a btree, delete the on-page physical item
+ * referenced by the cursor.
+ */
+ if (dbc_c->dbtype == DB_BTREE && (ret = __bam_c_physdel(dbc_c)) != 0)
+ goto err;
+
+ /*
+ * If we're not working in an off-page duplicate tree, then we're
+ * done.
+ */
+ if (!F_ISSET(dbc_c, DBC_OPD) || root_pgno == PGNO_INVALID)
+ goto done;
+
+ /*
+ * We may have just deleted the last element in the off-page duplicate
+ * tree, and closed the last cursor in the tree. For an off-page btree
+ * there are no other cursors in the tree by definition, if the tree is
+ * empty. For an off-page recno we know we have closed the last cursor
+ * in the tree because the __ram_ca_delete call above returned 0 only
+ * in that case. So, if the off-page duplicate tree is empty at this
+ * point, we want to remove it.
+ */
+ if ((ret = memp_fget(dbp->mpf, &root_pgno, 0, &h)) != 0)
+ goto err;
+ if ((num = NUM_ENT(h)) == 0) {
+ if ((ret = __db_free(dbc, h)) != 0)
+ goto err;
+ } else {
+ if ((ret = memp_fput(dbp->mpf, h, 0)) != 0)
+ goto err;
+ goto done;
+ }
+
+ /*
+ * When removing the tree, we have to do one of two things. If this is
+ * case #2, that is, the primary tree is a btree, delete the key that's
+ * associated with the tree from the btree leaf page. We know we are
+ * the only reference to it and we already have the correct lock. We
+ * detect this case because the cursor that was passed to us references
+ * an off-page duplicate cursor.
+ *
+ * If this is case #3, that is, the primary tree isn't a btree, pass
+ * the information back to our caller, it's their job to do cleanup on
+ * the primary page.
+ */
+ if (dbc_opd != NULL) {
+ cp->lock.off = LOCK_INVALID;
+ if ((ret = memp_fget(dbp->mpf, &cp->pgno, 0, &cp->page)) != 0)
+ goto err;
+ if ((ret = __bam_c_physdel(dbc)) != 0)
+ goto err;
+ } else
+ *rmroot = 1;
+err:
+done: /*
+ * Discard the page references and locks, and confirm that the stack
+ * has been emptied.
+ */
+ if (dbc_opd != NULL) {
+ DISCARD_CUR(dbc_opd, t_ret);
+ if (t_ret != 0 && ret == 0)
+ ret = t_ret;
+ }
+ DISCARD_CUR(dbc, t_ret);
+ if (t_ret != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Downgrade any CDB lock we acquired. */
+ if (cdb_lock)
+ (void)__lock_downgrade(
+ dbp->dbenv, &dbc->mylock, DB_LOCK_IWRITE, 0);
+
+ return (ret);
+}
+
+/*
+ * __bam_c_destroy --
+ * Close a single cursor -- internal version.
+ */
+static int
+__bam_c_destroy(dbc)
+ DBC *dbc;
+{
+ /* Discard the structures. */
+ __os_free(dbc->internal, sizeof(BTREE_CURSOR));
+
+ return (0);
+}
+
+/*
+ * __bam_c_count --
+ * Return a count of on and off-page duplicates.
+ *
+ * PUBLIC: int __bam_c_count __P((DBC *, db_recno_t *));
+ */
+int
+__bam_c_count(dbc, recnop)
+ DBC *dbc;
+ db_recno_t *recnop;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ db_indx_t indx, top;
+ db_recno_t recno;
+ int ret;
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ /*
+ * Called with the top-level cursor that may reference an off-page
+ * duplicates page. If it's a set of on-page duplicates, get the
+ * page and count. Otherwise, get the root page of the off-page
+ * duplicate tree, and use the count. We don't have to acquire any
+ * new locks, we have to have a read lock to even get here.
+ */
+ if (cp->opd == NULL) {
+ if ((ret = memp_fget(dbp->mpf, &cp->pgno, 0, &cp->page)) != 0)
+ return (ret);
+
+ /*
+ * Move back to the beginning of the set of duplicates and
+ * then count forward.
+ */
+ for (indx = cp->indx;; indx -= P_INDX)
+ if (indx == 0 ||
+ !IS_DUPLICATE(dbc, indx, indx - P_INDX))
+ break;
+ for (recno = 1, top = NUM_ENT(cp->page) - P_INDX;
+ indx < top; ++recno, indx += P_INDX)
+ if (!IS_DUPLICATE(dbc, indx, indx + P_INDX))
+ break;
+ *recnop = recno;
+ } else {
+ if ((ret = memp_fget(dbp->mpf,
+ &cp->opd->internal->root, 0, &cp->page)) != 0)
+ return (ret);
+
+ *recnop = RE_NREC(cp->page);
+ }
+
+ ret = memp_fput(dbp->mpf, cp->page, 0);
+ cp->page = NULL;
+
+ return (ret);
+}
+
+/*
+ * __bam_c_del --
+ * Delete using a cursor.
+ */
+static int
+__bam_c_del(dbc)
+ DBC *dbc;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ int ret, t_ret;
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ ret = 0;
+
+ /* If the item was already deleted, return failure. */
+ if (F_ISSET(cp, C_DELETED))
+ return (DB_KEYEMPTY);
+
+ /*
+ * This code is always called with a page lock but no page.
+ */
+ DB_ASSERT(cp->page == NULL);
+
+ /*
+ * We don't physically delete the record until the cursor moves, so
+ * we have to have a long-lived write lock on the page instead of a
+ * a long-lived read lock. Note, we have to have a read lock to even
+ * get here.
+ *
+ * If we're maintaining record numbers, we lock the entire tree, else
+ * we lock the single page.
+ */
+ if (F_ISSET(cp, C_RECNUM)) {
+ if ((ret = __bam_c_getstack(dbc)) != 0)
+ goto err;
+ cp->page = cp->csp->page;
+ } else {
+ ACQUIRE_CUR(dbc, DB_LOCK_WRITE, ret);
+ if (ret != 0)
+ goto err;
+ }
+
+ /* Log the change. */
+ if (DB_LOGGING(dbc) &&
+ (ret = __bam_cdel_log(dbp->dbenv, dbc->txn, &LSN(cp->page), 0,
+ dbp->log_fileid, PGNO(cp->page), &LSN(cp->page), cp->indx)) != 0)
+ goto err;
+
+ /* Set the intent-to-delete flag on the page. */
+ if (TYPE(cp->page) == P_LBTREE)
+ B_DSET(GET_BKEYDATA(cp->page, cp->indx + O_INDX)->type);
+ else
+ B_DSET(GET_BKEYDATA(cp->page, cp->indx)->type);
+
+ /* Mark the page dirty. */
+ ret = memp_fset(dbp->mpf, cp->page, DB_MPOOL_DIRTY);
+
+err: /*
+ * If we've been successful so far and the tree has record numbers,
+ * adjust the record counts. Either way, release acquired page(s).
+ */
+ if (F_ISSET(cp, C_RECNUM)) {
+ if (ret == 0)
+ ret = __bam_adjust(dbc, -1);
+ (void)__bam_stkrel(dbc, 0);
+ } else
+ if (cp->page != NULL &&
+ (t_ret = memp_fput(dbp->mpf, cp->page, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ cp->page = NULL;
+
+ /* Update the cursors last, after all chance of failure is past. */
+ if (ret == 0)
+ (void)__bam_ca_delete(dbp, cp->pgno, cp->indx, 1);
+
+ return (ret);
+}
+
+/*
+ * __bam_c_dup --
+ * Duplicate a btree cursor, such that the new one holds appropriate
+ * locks for the position of the original.
+ *
+ * PUBLIC: int __bam_c_dup __P((DBC *, DBC *));
+ */
+int
+__bam_c_dup(orig_dbc, new_dbc)
+ DBC *orig_dbc, *new_dbc;
+{
+ BTREE_CURSOR *orig, *new;
+ int ret;
+
+ orig = (BTREE_CURSOR *)orig_dbc->internal;
+ new = (BTREE_CURSOR *)new_dbc->internal;
+
+ /*
+ * If we're holding a lock we need to acquire a copy of it, unless
+ * we're in a transaction. We don't need to copy any lock we're
+ * holding inside a transaction because all the locks are retained
+ * until the transaction commits or aborts.
+ */
+ if (orig->lock.off != LOCK_INVALID && orig_dbc->txn == NULL) {
+ if ((ret = __db_lget(new_dbc,
+ 0, new->pgno, new->lock_mode, 0, &new->lock)) != 0)
+ return (ret);
+ }
+ new->ovflsize = orig->ovflsize;
+ new->recno = orig->recno;
+ new->flags = orig->flags;
+
+ return (0);
+}
+
+/*
+ * __bam_c_get --
+ * Get using a cursor (btree).
+ */
+static int
+__bam_c_get(dbc, key, data, flags, pgnop)
+ DBC *dbc;
+ DBT *key, *data;
+ u_int32_t flags;
+ db_pgno_t *pgnop;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ db_pgno_t orig_pgno;
+ db_indx_t orig_indx;
+ int exact, newopd, ret;
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ orig_pgno = cp->pgno;
+ orig_indx = cp->indx;
+
+ newopd = 0;
+ switch (flags) {
+ case DB_CURRENT:
+ /* It's not possible to return a deleted record. */
+ if (F_ISSET(cp, C_DELETED)) {
+ ret = DB_KEYEMPTY;
+ goto err;
+ }
+
+ /*
+ * Acquire the current page. We have at least a read-lock
+ * already. The caller may have set DB_RMW asking for a
+ * write lock, but upgrading to a write lock has no better
+ * chance of succeeding now instead of later, so don't try.
+ */
+ if ((ret = memp_fget(dbp->mpf, &cp->pgno, 0, &cp->page)) != 0)
+ goto err;
+ break;
+ case DB_FIRST:
+ newopd = 1;
+ if ((ret = __bam_c_first(dbc)) != 0)
+ goto err;
+ break;
+ case DB_GET_BOTH:
+ /*
+ * There are two ways to get here based on DBcursor->c_get
+ * with the DB_GET_BOTH flag set:
+ *
+ * 1. Searching a sorted off-page duplicate tree: do a tree
+ * search.
+ *
+ * 2. Searching btree: do a tree search. If it returns a
+ * reference to off-page duplicate tree, return immediately
+ * and let our caller deal with it. If the search doesn't
+ * return a reference to off-page duplicate tree, start an
+ * on-page search.
+ */
+ if (F_ISSET(dbc, DBC_OPD)) {
+ if ((ret = __bam_c_search(
+ dbc, data, DB_GET_BOTH, &exact)) != 0)
+ goto err;
+ if (!exact) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+ } else {
+ if ((ret = __bam_c_search(
+ dbc, key, DB_GET_BOTH, &exact)) != 0)
+ return (ret);
+ if (!exact) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+
+ if (pgnop != NULL && __bam_isopd(dbc, pgnop)) {
+ newopd = 1;
+ break;
+ }
+ if ((ret = __bam_getboth_finddatum(dbc, data)) != 0)
+ goto err;
+ }
+ break;
+ case DB_GET_BOTHC:
+ if ((ret = __bam_getbothc(dbc, data)) != 0)
+ goto err;
+ break;
+ case DB_LAST:
+ newopd = 1;
+ if ((ret = __bam_c_last(dbc)) != 0)
+ goto err;
+ break;
+ case DB_NEXT:
+ newopd = 1;
+ if (cp->pgno == PGNO_INVALID) {
+ if ((ret = __bam_c_first(dbc)) != 0)
+ goto err;
+ } else
+ if ((ret = __bam_c_next(dbc, 1)) != 0)
+ goto err;
+ break;
+ case DB_NEXT_DUP:
+ if ((ret = __bam_c_next(dbc, 1)) != 0)
+ goto err;
+ if (!IS_CUR_DUPLICATE(dbc, orig_pgno, orig_indx)) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+ break;
+ case DB_NEXT_NODUP:
+ newopd = 1;
+ if (cp->pgno == PGNO_INVALID) {
+ if ((ret = __bam_c_first(dbc)) != 0)
+ goto err;
+ } else
+ do {
+ if ((ret = __bam_c_next(dbc, 1)) != 0)
+ goto err;
+ } while (IS_CUR_DUPLICATE(dbc, orig_pgno, orig_indx));
+ break;
+ case DB_PREV:
+ newopd = 1;
+ if (cp->pgno == PGNO_INVALID) {
+ if ((ret = __bam_c_last(dbc)) != 0)
+ goto err;
+ } else
+ if ((ret = __bam_c_prev(dbc)) != 0)
+ goto err;
+ break;
+ case DB_PREV_NODUP:
+ newopd = 1;
+ if (cp->pgno == PGNO_INVALID) {
+ if ((ret = __bam_c_last(dbc)) != 0)
+ goto err;
+ } else
+ do {
+ if ((ret = __bam_c_prev(dbc)) != 0)
+ goto err;
+ } while (IS_CUR_DUPLICATE(dbc, orig_pgno, orig_indx));
+ break;
+ case DB_SET:
+ case DB_SET_RECNO:
+ newopd = 1;
+ if ((ret = __bam_c_search(dbc, key, flags, &exact)) != 0)
+ goto err;
+ break;
+ case DB_SET_RANGE:
+ newopd = 1;
+ if ((ret = __bam_c_search(dbc, key, flags, &exact)) != 0)
+ goto err;
+
+ /*
+ * As we didn't require an exact match, the search function
+ * may have returned an entry past the end of the page. Or,
+ * we may be referencing a deleted record. If so, move to
+ * the next entry.
+ */
+ if (cp->indx == NUM_ENT(cp->page) || IS_CUR_DELETED(dbc))
+ if ((ret = __bam_c_next(dbc, 0)) != 0)
+ goto err;
+ break;
+ default:
+ ret = __db_unknown_flag(dbp->dbenv, "__bam_c_get", flags);
+ goto err;
+ }
+
+ /*
+ * We may have moved to an off-page duplicate tree. Return that
+ * information to our caller.
+ */
+ if (newopd && pgnop != NULL)
+ (void)__bam_isopd(dbc, pgnop);
+
+ /* Don't return the key, it was passed to us */
+ if (flags == DB_SET)
+ F_SET(key, DB_DBT_ISSET);
+
+err: /*
+ * Regardless of whether we were successful or not, if the cursor
+ * moved, clear the delete flag, DBcursor->c_get never references
+ * a deleted key, if it moved at all.
+ */
+ if (F_ISSET(cp, C_DELETED)
+ && (cp->pgno != orig_pgno || cp->indx != orig_indx))
+ F_CLR(cp, C_DELETED);
+
+ return (ret);
+}
+
+/*
+ * __bam_getbothc --
+ * Search for a matching data item on a join.
+ */
+static int
+__bam_getbothc(dbc, data)
+ DBC *dbc;
+ DBT *data;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ int cmp, exact, ret;
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ /*
+ * Acquire the current page. We have at least a read-lock
+ * already. The caller may have set DB_RMW asking for a
+ * write lock, but upgrading to a write lock has no better
+ * chance of succeeding now instead of later, so don't try.
+ */
+ if ((ret = memp_fget(dbp->mpf, &cp->pgno, 0, &cp->page)) != 0)
+ return (ret);
+
+ /*
+ * An off-page duplicate cursor. Search the remaining duplicates
+ * for one which matches (do a normal btree search, then verify
+ * that the retrieved record is greater than the original one).
+ */
+ if (F_ISSET(dbc, DBC_OPD)) {
+ /*
+ * Check to make sure the desired item comes strictly after
+ * the current position; if it doesn't, return DB_NOTFOUND.
+ */
+ if ((ret = __bam_cmp(dbp, data, cp->page, cp->indx,
+ dbp->dup_compare == NULL ? __bam_defcmp : dbp->dup_compare,
+ &cmp)) != 0)
+ return (ret);
+
+ if (cmp <= 0)
+ return (DB_NOTFOUND);
+
+ /* Discard the current page, we're going to do a full search. */
+ if ((ret = memp_fput(dbp->mpf, cp->page, 0)) != 0)
+ return (ret);
+ cp->page = NULL;
+
+ return (__bam_c_search(dbc, data, DB_GET_BOTH, &exact));
+ }
+
+ /*
+ * We're doing a DBC->c_get(DB_GET_BOTHC) and we're already searching
+ * a set of on-page duplicates (either sorted or unsorted). Continue
+ * a linear search from after the current position.
+ *
+ * (Note that we could have just finished a "set" of one duplicate,
+ * i.e. not a duplicate at all, but the following check will always
+ * return DB_NOTFOUND in this case, which is the desired behavior.)
+ */
+ if (cp->indx + P_INDX >= NUM_ENT(cp->page) ||
+ !IS_DUPLICATE(dbc, cp->indx, cp->indx + P_INDX))
+ return (DB_NOTFOUND);
+ cp->indx += P_INDX;
+
+ return (__bam_getboth_finddatum(dbc, data));
+}
+
+/*
+ * __bam_getboth_finddatum --
+ * Find a matching on-page data item.
+ */
+static int
+__bam_getboth_finddatum(dbc, data)
+ DBC *dbc;
+ DBT *data;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ db_indx_t base, lim, top;
+ int cmp, ret;
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ /*
+ * Called (sometimes indirectly) from DBC->get to search on-page data
+ * item(s) for a matching value. If the original flag was DB_GET_BOTH,
+ * the cursor argument is set to the first data item for the key. If
+ * the original flag was DB_GET_BOTHC, the cursor argument is set to
+ * the first data item that we can potentially return. In both cases,
+ * there may or may not be additional duplicate data items to search.
+ *
+ * If the duplicates are not sorted, do a linear search.
+ *
+ * If the duplicates are sorted, do a binary search. The reason for
+ * this is that large pages and small key/data pairs result in large
+ * numbers of on-page duplicates before they get pushed off-page.
+ */
+ if (dbp->dup_compare == NULL) {
+ for (;; cp->indx += P_INDX) {
+ if (!IS_CUR_DELETED(dbc) &&
+ (ret = __bam_cmp(dbp, data, cp->page,
+ cp->indx + O_INDX, __bam_defcmp, &cmp)) != 0)
+ return (ret);
+ if (cmp == 0)
+ return (0);
+
+ if (cp->indx + P_INDX >= NUM_ENT(cp->page) ||
+ !IS_DUPLICATE(dbc, cp->indx, cp->indx + P_INDX))
+ break;
+ }
+ } else {
+ /*
+ * Find the top and bottom of the duplicate set. Binary search
+ * requires at least two items, don't loop if there's only one.
+ */
+ for (base = top = cp->indx;
+ top < NUM_ENT(cp->page); top += P_INDX)
+ if (!IS_DUPLICATE(dbc, cp->indx, top))
+ break;
+ if (base == (top - P_INDX)) {
+ if ((ret = __bam_cmp(dbp, data,
+ cp->page, cp->indx + O_INDX,
+ dbp->dup_compare, &cmp)) != 0)
+ return (ret);
+ return (cmp == 0 ? 0 : DB_NOTFOUND);
+ }
+
+ for (lim =
+ (top - base) / (db_indx_t)P_INDX; lim != 0; lim >>= 1) {
+ cp->indx = base + ((lim >> 1) * P_INDX);
+ if ((ret = __bam_cmp(dbp, data, cp->page,
+ cp->indx + O_INDX, dbp->dup_compare, &cmp)) != 0)
+ return (ret);
+ if (cmp == 0) {
+ if (!IS_CUR_DELETED(dbc))
+ return (0);
+ break;
+ }
+ if (cmp > 0) {
+ base = cp->indx + P_INDX;
+ --lim;
+ }
+ }
+ }
+ return (DB_NOTFOUND);
+}
+
+/*
+ * __bam_c_put --
+ * Put using a cursor.
+ */
+static int
+__bam_c_put(dbc, key, data, flags, pgnop)
+ DBC *dbc;
+ DBT *key, *data;
+ u_int32_t flags;
+ db_pgno_t *pgnop;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DBT dbt;
+ u_int32_t iiop;
+ int cmp, exact, needkey, ret, stack;
+ void *arg;
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+split: needkey = ret = stack = 0;
+ switch (flags) {
+ case DB_AFTER:
+ case DB_BEFORE:
+ case DB_CURRENT:
+ needkey = 1;
+ iiop = flags;
+
+ /*
+ * If the Btree has record numbers (and we're not replacing an
+ * existing record), we need a complete stack so that we can
+ * adjust the record counts. The check for flags == DB_CURRENT
+ * is superfluous but left in for clarity. (If C_RECNUM is set
+ * we know that flags must be DB_CURRENT, as DB_AFTER/DB_BEFORE
+ * are illegal in a Btree unless it's configured for duplicates
+ * and you cannot configure a Btree for both record renumbering
+ * and duplicates.)
+ */
+ if (flags == DB_CURRENT &&
+ F_ISSET(cp, C_RECNUM) && F_ISSET(cp, C_DELETED)) {
+ if ((ret = __bam_c_getstack(dbc)) != 0)
+ goto err;
+ /*
+ * Initialize the cursor from the stack. Don't take
+ * the page number or page index, they should already
+ * be set.
+ */
+ cp->page = cp->csp->page;
+ cp->lock = cp->csp->lock;
+ cp->lock_mode = cp->csp->lock_mode;
+
+ stack = 1;
+ break;
+ }
+
+ /* Acquire the current page with a write lock. */
+ ACQUIRE_WRITE_LOCK(dbc, ret);
+ if (ret != 0)
+ goto err;
+ if ((ret = memp_fget(dbp->mpf, &cp->pgno, 0, &cp->page)) != 0)
+ goto err;
+ break;
+ case DB_KEYFIRST:
+ case DB_KEYLAST:
+ case DB_NODUPDATA:
+ /*
+ * Searching off-page, sorted duplicate tree: do a tree search
+ * for the correct item; __bam_c_search returns the smallest
+ * slot greater than the key, use it.
+ */
+ if (F_ISSET(dbc, DBC_OPD)) {
+ if ((ret =
+ __bam_c_search(dbc, data, flags, &exact)) != 0)
+ goto err;
+ stack = 1;
+
+ /* Disallow "sorted" duplicate duplicates. */
+ if (exact) {
+ ret = __db_duperr(dbp, flags);
+ goto err;
+ }
+ iiop = DB_BEFORE;
+ break;
+ }
+
+ /* Searching a btree. */
+ if ((ret = __bam_c_search(dbc, key,
+ flags == DB_KEYFIRST || dbp->dup_compare != NULL ?
+ DB_KEYFIRST : DB_KEYLAST, &exact)) != 0)
+ goto err;
+ stack = 1;
+
+ /*
+ * If we don't have an exact match, __bam_c_search returned
+ * the smallest slot greater than the key, use it.
+ */
+ if (!exact) {
+ iiop = DB_KEYFIRST;
+ break;
+ }
+
+ /*
+ * If duplicates aren't supported, replace the current item.
+ * (If implementing the DB->put function, our caller already
+ * checked the DB_NOOVERWRITE flag.)
+ */
+ if (!F_ISSET(dbp, DB_AM_DUP)) {
+ iiop = DB_CURRENT;
+ break;
+ }
+
+ /*
+ * If we find a matching entry, it may be an off-page duplicate
+ * tree. Return the page number to our caller, we need a new
+ * cursor.
+ */
+ if (pgnop != NULL && __bam_isopd(dbc, pgnop))
+ goto done;
+
+ /* If the duplicates aren't sorted, move to the right slot. */
+ if (dbp->dup_compare == NULL) {
+ if (flags == DB_KEYFIRST)
+ iiop = DB_BEFORE;
+ else
+ for (;; cp->indx += P_INDX)
+ if (cp->indx + P_INDX >=
+ NUM_ENT(cp->page) ||
+ !IS_DUPLICATE(dbc, cp->indx,
+ cp->indx + P_INDX)) {
+ iiop = DB_AFTER;
+ break;
+ }
+ break;
+ }
+
+ /*
+ * We know that we're looking at the first of a set of sorted
+ * on-page duplicates. Walk the list to find the right slot.
+ */
+ for (;; cp->indx += P_INDX) {
+ if ((ret = __bam_cmp(dbp, data, cp->page,
+ cp->indx + O_INDX, dbp->dup_compare, &cmp)) !=0)
+ return (ret);
+ if (cmp < 0) {
+ iiop = DB_BEFORE;
+ break;
+ }
+
+ /* Disallow "sorted" duplicate duplicates. */
+ if (cmp == 0) {
+ if (IS_DELETED(cp->page, cp->indx)) {
+ iiop = DB_CURRENT;
+ break;
+ }
+ ret = __db_duperr(dbp, flags);
+ goto err;
+ }
+
+ if (cp->indx + P_INDX >= NUM_ENT(cp->page) ||
+ ((PAGE *)cp->page)->inp[cp->indx] !=
+ ((PAGE *)cp->page)->inp[cp->indx + P_INDX]) {
+ iiop = DB_AFTER;
+ break;
+ }
+ }
+ break;
+ default:
+ ret = __db_unknown_flag(dbp->dbenv, "__bam_c_put", flags);
+ goto err;
+ }
+
+ switch (ret = __bam_iitem(dbc, key, data, iiop, 0)) {
+ case 0:
+ break;
+ case DB_NEEDSPLIT:
+ /*
+ * To split, we need a key for the page. Either use the key
+ * argument or get a copy of the key from the page.
+ */
+ if (flags == DB_AFTER ||
+ flags == DB_BEFORE || flags == DB_CURRENT) {
+ memset(&dbt, 0, sizeof(DBT));
+ if ((ret = __db_ret(dbp, cp->page, 0, &dbt,
+ &dbc->rkey.data, &dbc->rkey.ulen)) != 0)
+ goto err;
+ arg = &dbt;
+ } else
+ arg = F_ISSET(dbc, DBC_OPD) ? data : key;
+
+ /*
+ * Discard any locks and pinned pages (the locks are discarded
+ * even if we're running with transactions, as they lock pages
+ * that we're sorry we ever acquired). If stack is set and the
+ * cursor entries are valid, they point to the same entries as
+ * the stack, don't free them twice.
+ */
+ if (stack)
+ ret = __bam_stkrel(dbc, STK_CLRDBC | STK_NOLOCK);
+ else
+ DISCARD_CUR(dbc, ret);
+ if (ret != 0)
+ goto err;
+
+ /* Split the tree. */
+ if ((ret = __bam_split(dbc, arg)) != 0)
+ return (ret);
+
+ goto split;
+ default:
+ goto err;
+ }
+
+err:
+done: /*
+ * Discard any pages pinned in the tree and their locks, except for
+ * the leaf page. Note, the leaf page participated in any stack we
+ * acquired, and so we have to adjust the stack as necessary. If
+ * there was only a single page on the stack, we don't have to free
+ * further stack pages.
+ */
+ if (stack && BT_STK_POP(cp) != NULL)
+ (void)__bam_stkrel(dbc, 0);
+
+ /*
+ * Regardless of whether we were successful or not, clear the delete
+ * flag. If we're successful, we either moved the cursor or the item
+ * is no longer deleted. If we're not successful, then we're just a
+ * copy, no need to have the flag set.
+ */
+ F_CLR(cp, C_DELETED);
+
+ return (ret);
+}
+
+/*
+ * __bam_c_rget --
+ * Return the record number for a cursor.
+ *
+ * PUBLIC: int __bam_c_rget __P((DBC *, DBT *, u_int32_t));
+ */
+int
+__bam_c_rget(dbc, data, flags)
+ DBC *dbc;
+ DBT *data;
+ u_int32_t flags;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DBT dbt;
+ db_recno_t recno;
+ int exact, ret;
+
+ COMPQUIET(flags, 0);
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ /*
+ * Get the page with the current item on it.
+ * Get a copy of the key.
+ * Release the page, making sure we don't release it twice.
+ */
+ if ((ret = memp_fget(dbp->mpf, &cp->pgno, 0, &cp->page)) != 0)
+ return (ret);
+ memset(&dbt, 0, sizeof(DBT));
+ if ((ret = __db_ret(dbp, cp->page,
+ cp->indx, &dbt, &dbc->rkey.data, &dbc->rkey.ulen)) != 0)
+ goto err;
+ ret = memp_fput(dbp->mpf, cp->page, 0);
+ cp->page = NULL;
+ if (ret != 0)
+ return (ret);
+
+ if ((ret = __bam_search(dbc, &dbt,
+ F_ISSET(dbc, DBC_RMW) ? S_FIND_WR : S_FIND,
+ 1, &recno, &exact)) != 0)
+ goto err;
+
+ ret = __db_retcopy(dbp, data,
+ &recno, sizeof(recno), &dbc->rdata.data, &dbc->rdata.ulen);
+
+ /* Release the stack. */
+err: __bam_stkrel(dbc, 0);
+
+ return (ret);
+}
+
+/*
+ * __bam_c_writelock --
+ * Upgrade the cursor to a write lock.
+ */
+static int
+__bam_c_writelock(dbc)
+ DBC *dbc;
+{
+ BTREE_CURSOR *cp;
+ int ret;
+
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ if (cp->lock_mode == DB_LOCK_WRITE)
+ return (0);
+
+ /*
+ * When writing to an off-page duplicate tree, we need to have the
+ * appropriate page in the primary tree locked. The general DBC
+ * code calls us first with the primary cursor so we can acquire the
+ * appropriate lock.
+ */
+ ACQUIRE_WRITE_LOCK(dbc, ret);
+ return (ret);
+}
+
+/*
+ * __bam_c_first --
+ * Return the first record.
+ */
+static int
+__bam_c_first(dbc)
+ DBC *dbc;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ db_pgno_t pgno;
+ int ret;
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ ret = 0;
+
+ /* Walk down the left-hand side of the tree. */
+ for (pgno = cp->root;;) {
+ ACQUIRE_CUR_SET(dbc, DB_LOCK_READ, pgno, ret);
+ if (ret != 0)
+ return (ret);
+
+ /* If we find a leaf page, we're done. */
+ if (ISLEAF(cp->page))
+ break;
+
+ pgno = GET_BINTERNAL(cp->page, 0)->pgno;
+ }
+
+ /* If we want a write lock instead of a read lock, get it now. */
+ if (F_ISSET(dbc, DBC_RMW)) {
+ ACQUIRE_WRITE_LOCK(dbc, ret);
+ if (ret != 0)
+ return (ret);
+ }
+
+ /* If on an empty page or a deleted record, move to the next one. */
+ if (NUM_ENT(cp->page) == 0 || IS_CUR_DELETED(dbc))
+ if ((ret = __bam_c_next(dbc, 0)) != 0)
+ return (ret);
+
+ return (0);
+}
+
+/*
+ * __bam_c_last --
+ * Return the last record.
+ */
+static int
+__bam_c_last(dbc)
+ DBC *dbc;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ db_pgno_t pgno;
+ int ret;
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ ret = 0;
+
+ /* Walk down the right-hand side of the tree. */
+ for (pgno = cp->root;;) {
+ ACQUIRE_CUR_SET(dbc, DB_LOCK_READ, pgno, ret);
+ if (ret != 0)
+ return (ret);
+
+ /* If we find a leaf page, we're done. */
+ if (ISLEAF(cp->page))
+ break;
+
+ pgno =
+ GET_BINTERNAL(cp->page, NUM_ENT(cp->page) - O_INDX)->pgno;
+ }
+
+ /* If we want a write lock instead of a read lock, get it now. */
+ if (F_ISSET(dbc, DBC_RMW)) {
+ ACQUIRE_WRITE_LOCK(dbc, ret);
+ if (ret != 0)
+ return (ret);
+ }
+
+ cp->indx = NUM_ENT(cp->page) == 0 ? 0 :
+ NUM_ENT(cp->page) -
+ (TYPE(cp->page) == P_LBTREE ? P_INDX : O_INDX);
+
+ /* If on an empty page or a deleted record, move to the previous one. */
+ if (NUM_ENT(cp->page) == 0 || IS_CUR_DELETED(dbc))
+ if ((ret = __bam_c_prev(dbc)) != 0)
+ return (ret);
+
+ return (0);
+}
+
+/*
+ * __bam_c_next --
+ * Move to the next record.
+ */
+static int
+__bam_c_next(dbc, initial_move)
+ DBC *dbc;
+ int initial_move;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ db_indx_t adjust;
+ db_lockmode_t lock_mode;
+ db_pgno_t pgno;
+ int ret;
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ ret = 0;
+
+ /*
+ * We're either moving through a page of duplicates or a btree leaf
+ * page.
+ *
+ * !!!
+ * This code handles empty pages and pages with only deleted entries.
+ */
+ if (F_ISSET(dbc, DBC_OPD)) {
+ adjust = O_INDX;
+ lock_mode = DB_LOCK_NG;
+ } else {
+ adjust = dbc->dbtype == DB_BTREE ? P_INDX : O_INDX;
+ lock_mode =
+ F_ISSET(dbc, DBC_RMW) ? DB_LOCK_WRITE : DB_LOCK_READ;
+ }
+ if (cp->page == NULL) {
+ ACQUIRE_CUR(dbc, lock_mode, ret);
+ if (ret != 0)
+ return (ret);
+ }
+
+ if (initial_move)
+ cp->indx += adjust;
+
+ for (;;) {
+ /*
+ * If at the end of the page, move to a subsequent page.
+ *
+ * !!!
+ * Check for >= NUM_ENT. If the original search landed us on
+ * NUM_ENT, we may have incremented indx before the test.
+ */
+ if (cp->indx >= NUM_ENT(cp->page)) {
+ if ((pgno
+ = NEXT_PGNO(cp->page)) == PGNO_INVALID)
+ return (DB_NOTFOUND);
+
+ ACQUIRE_CUR_SET(dbc, lock_mode, pgno, ret);
+ if (ret != 0)
+ return (ret);
+ continue;
+ }
+ if (IS_CUR_DELETED(dbc)) {
+ cp->indx += adjust;
+ continue;
+ }
+ break;
+ }
+ return (0);
+}
+
+/*
+ * __bam_c_prev --
+ * Move to the previous record.
+ */
+static int
+__bam_c_prev(dbc)
+ DBC *dbc;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ db_indx_t adjust;
+ db_lockmode_t lock_mode;
+ db_pgno_t pgno;
+ int ret;
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ ret = 0;
+
+ /*
+ * We're either moving through a page of duplicates or a btree leaf
+ * page.
+ *
+ * !!!
+ * This code handles empty pages and pages with only deleted entries.
+ */
+ if (F_ISSET(dbc, DBC_OPD)) {
+ adjust = O_INDX;
+ lock_mode = DB_LOCK_NG;
+ } else {
+ adjust = dbc->dbtype == DB_BTREE ? P_INDX : O_INDX;
+ lock_mode =
+ F_ISSET(dbc, DBC_RMW) ? DB_LOCK_WRITE : DB_LOCK_READ;
+ }
+ if (cp->page == NULL) {
+ ACQUIRE_CUR(dbc, lock_mode, ret);
+ if (ret != 0)
+ return (ret);
+ }
+
+ for (;;) {
+ /* If at the beginning of the page, move to a previous one. */
+ if (cp->indx == 0) {
+ if ((pgno =
+ PREV_PGNO(cp->page)) == PGNO_INVALID)
+ return (DB_NOTFOUND);
+
+ ACQUIRE_CUR_SET(dbc, lock_mode, pgno, ret);
+ if (ret != 0)
+ return (ret);
+
+ if ((cp->indx = NUM_ENT(cp->page)) == 0)
+ continue;
+ }
+
+ /* Ignore deleted records. */
+ cp->indx -= adjust;
+ if (IS_CUR_DELETED(dbc))
+ continue;
+
+ break;
+ }
+ return (0);
+}
+
+/*
+ * __bam_c_search --
+ * Move to a specified record.
+ */
+static int
+__bam_c_search(dbc, key, flags, exactp)
+ DBC *dbc;
+ const DBT *key;
+ u_int32_t flags;
+ int *exactp;
+{
+ BTREE *t;
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ PAGE *h;
+ db_indx_t indx;
+ db_pgno_t bt_lpgno;
+ db_recno_t recno;
+ u_int32_t sflags;
+ int cmp, ret;
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ t = dbp->bt_internal;
+ ret = 0;
+
+ /*
+ * Find an entry in the database. Discard any lock we currently hold,
+ * we're going to search the tree.
+ */
+ DISCARD_CUR(dbc, ret);
+ if (ret != 0)
+ return (ret);
+
+ switch (flags) {
+ case DB_SET_RECNO:
+ if ((ret = __ram_getno(dbc, key, &recno, 0)) != 0)
+ return (ret);
+ sflags = (F_ISSET(dbc, DBC_RMW) ? S_FIND_WR : S_FIND) | S_EXACT;
+ if ((ret = __bam_rsearch(dbc, &recno, sflags, 1, exactp)) != 0)
+ return (ret);
+ break;
+ case DB_SET:
+ case DB_GET_BOTH:
+ sflags = (F_ISSET(dbc, DBC_RMW) ? S_FIND_WR : S_FIND) | S_EXACT;
+ goto search;
+ case DB_SET_RANGE:
+ sflags =
+ (F_ISSET(dbc, DBC_RMW) ? S_WRITE : S_READ) | S_DUPFIRST;
+ goto search;
+ case DB_KEYFIRST:
+ sflags = S_KEYFIRST;
+ goto fast_search;
+ case DB_KEYLAST:
+ case DB_NODUPDATA:
+ sflags = S_KEYLAST;
+fast_search: /*
+ * If the application has a history of inserting into the first
+ * or last pages of the database, we check those pages first to
+ * avoid doing a full search.
+ *
+ * If the tree has record numbers, we need a complete stack so
+ * that we can adjust the record counts, so fast_search isn't
+ * possible.
+ */
+ if (F_ISSET(cp, C_RECNUM))
+ goto search;
+
+ /*
+ * !!!
+ * We do not mutex protect the t->bt_lpgno field, which means
+ * that it can only be used in an advisory manner. If we find
+ * page we can use, great. If we don't, we don't care, we do
+ * it the slow way instead. Regardless, copy it into a local
+ * variable, otherwise we might acquire a lock for a page and
+ * then read a different page because it changed underfoot.
+ */
+ bt_lpgno = t->bt_lpgno;
+
+ /*
+ * If the tree has no history of insertion, do it the slow way.
+ */
+ if (bt_lpgno == PGNO_INVALID)
+ goto search;
+
+ /* Lock and retrieve the page on which we last inserted. */
+ h = NULL;
+ ACQUIRE(dbc,
+ DB_LOCK_WRITE, bt_lpgno, cp->lock, bt_lpgno, h, ret);
+ if (ret != 0)
+ goto fast_miss;
+
+ /*
+ * It's okay if the page type isn't right or it's empty, it
+ * just means that the world changed.
+ */
+ if (TYPE(h) != P_LBTREE || NUM_ENT(h) == 0)
+ goto fast_miss;
+
+ /*
+ * What we do here is test to see if we're at the beginning or
+ * end of the tree and if the new item sorts before/after the
+ * first/last page entry. We don't try and catch inserts into
+ * the middle of the tree (although we could, as long as there
+ * were two keys on the page and we saved both the index and
+ * the page number of the last insert).
+ */
+ if (h->next_pgno == PGNO_INVALID) {
+ indx = NUM_ENT(h) - P_INDX;
+ if ((ret = __bam_cmp(dbp,
+ key, h, indx, t->bt_compare, &cmp)) != 0)
+ return (ret);
+
+ if (cmp < 0)
+ goto try_begin;
+ if (cmp > 0) {
+ indx += P_INDX;
+ goto fast_hit;
+ }
+
+ /*
+ * Found a duplicate. If doing DB_KEYLAST, we're at
+ * the correct position, otherwise, move to the first
+ * of the duplicates. If we're looking at off-page
+ * duplicates, duplicate duplicates aren't permitted,
+ * so we're done.
+ */
+ if (flags == DB_KEYLAST)
+ goto fast_hit;
+ for (;
+ indx > 0 && h->inp[indx - P_INDX] == h->inp[indx];
+ indx -= P_INDX)
+ ;
+ goto fast_hit;
+ }
+try_begin: if (h->prev_pgno == PGNO_INVALID) {
+ indx = 0;
+ if ((ret = __bam_cmp(dbp,
+ key, h, indx, t->bt_compare, &cmp)) != 0)
+ return (ret);
+
+ if (cmp > 0)
+ goto fast_miss;
+ if (cmp < 0)
+ goto fast_hit;
+
+ /*
+ * Found a duplicate. If doing DB_KEYFIRST, we're at
+ * the correct position, otherwise, move to the last
+ * of the duplicates. If we're looking at off-page
+ * duplicates, duplicate duplicates aren't permitted,
+ * so we're done.
+ */
+ if (flags == DB_KEYFIRST)
+ goto fast_hit;
+ for (;
+ indx < (db_indx_t)(NUM_ENT(h) - P_INDX) &&
+ h->inp[indx] == h->inp[indx + P_INDX];
+ indx += P_INDX)
+ ;
+ goto fast_hit;
+ }
+ goto fast_miss;
+
+fast_hit: /* Set the exact match flag, we may have found a duplicate. */
+ *exactp = cmp == 0;
+
+ /*
+ * Insert the entry in the stack. (Our caller is likely to
+ * call __bam_stkrel() after our return.)
+ */
+ BT_STK_CLR(cp);
+ BT_STK_ENTER(dbp->dbenv,
+ cp, h, indx, cp->lock, cp->lock_mode, ret);
+ if (ret != 0)
+ return (ret);
+ break;
+
+fast_miss: /*
+ * This was not the right page, so we do not need to retain
+ * the lock even in the presence of transactions.
+ */
+ DISCARD(dbc, 1, cp->lock, h, ret);
+ if (ret != 0)
+ return (ret);
+
+search: if ((ret =
+ __bam_search(dbc, key, sflags, 1, NULL, exactp)) != 0)
+ return (ret);
+ break;
+ default:
+ return (__db_unknown_flag(dbp->dbenv, "__bam_c_search", flags));
+ }
+
+ /* Initialize the cursor from the stack. */
+ cp->page = cp->csp->page;
+ cp->pgno = cp->csp->page->pgno;
+ cp->indx = cp->csp->indx;
+ cp->lock = cp->csp->lock;
+ cp->lock_mode = cp->csp->lock_mode;
+
+ /*
+ * If we inserted a key into the first or last slot of the tree,
+ * remember where it was so we can do it more quickly next time.
+ */
+ if (TYPE(cp->page) == P_LBTREE &&
+ (flags == DB_KEYFIRST || flags == DB_KEYLAST))
+ t->bt_lpgno =
+ (NEXT_PGNO(cp->page) == PGNO_INVALID &&
+ cp->indx >= NUM_ENT(cp->page)) ||
+ (PREV_PGNO(cp->page) == PGNO_INVALID &&
+ cp->indx == 0) ? cp->pgno : PGNO_INVALID;
+ return (0);
+}
+
+/*
+ * __bam_c_physdel --
+ * Physically remove an item from the page.
+ */
+static int
+__bam_c_physdel(dbc)
+ DBC *dbc;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DBT key;
+ DB_LOCK lock;
+ PAGE *h;
+ db_pgno_t pgno;
+ int delete_page, empty_page, exact, level, ret;
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ delete_page = empty_page = ret = 0;
+
+ /* If the page is going to be emptied, consider deleting it. */
+ delete_page = empty_page =
+ NUM_ENT(cp->page) == (TYPE(cp->page) == P_LBTREE ? 2 : 1);
+
+ /*
+ * Check if the application turned off reverse splits. Applications
+ * can't turn off reverse splits in off-page duplicate trees, that
+ * space will never be reused unless the exact same key is specified.
+ */
+ if (delete_page &&
+ !F_ISSET(dbc, DBC_OPD) && F_ISSET(dbp, DB_BT_REVSPLIT))
+ delete_page = 0;
+
+ /*
+ * We never delete the last leaf page. (Not really true -- we delete
+ * the last leaf page of off-page duplicate trees, but that's handled
+ * by our caller, not down here.)
+ */
+ if (delete_page && cp->pgno == cp->root)
+ delete_page = 0;
+
+ /*
+ * To delete a leaf page other than an empty root page, we need a
+ * copy of a key from the page. Use the 0th page index since it's
+ * the last key the page held.
+ */
+ if (delete_page) {
+ memset(&key, 0, sizeof(DBT));
+ if ((ret = __db_ret(dbp, cp->page,
+ 0, &key, &dbc->rkey.data, &dbc->rkey.ulen)) != 0)
+ return (ret);
+ }
+
+ /*
+ * Delete the items. If page isn't empty, we adjust the cursors.
+ *
+ * !!!
+ * The following operations to delete a page may deadlock. The easy
+ * scenario is if we're deleting an item because we're closing cursors
+ * because we've already deadlocked and want to call txn_abort(). If
+ * we fail due to deadlock, we'll leave a locked, possibly empty page
+ * in the tree, which won't be empty long because we'll undo the delete
+ * when we undo the transaction's modifications.
+ *
+ * !!!
+ * Delete the key item first, otherwise the on-page duplicate checks
+ * in __bam_ditem() won't work!
+ */
+ if (TYPE(cp->page) == P_LBTREE) {
+ if ((ret = __bam_ditem(dbc, cp->page, cp->indx)) != 0)
+ return (ret);
+ if (!empty_page)
+ if ((ret = __bam_ca_di(dbc,
+ PGNO(cp->page), cp->indx, -1)) != 0)
+ return (ret);
+ }
+ if ((ret = __bam_ditem(dbc, cp->page, cp->indx)) != 0)
+ return (ret);
+ if (!empty_page)
+ if ((ret = __bam_ca_di(dbc, PGNO(cp->page), cp->indx, -1)) != 0)
+ return (ret);
+
+ /* If we're not going to try and delete the page, we're done. */
+ if (!delete_page)
+ return (0);
+
+ /*
+ * Call __bam_search to reacquire the empty leaf page, but this time
+ * get both the leaf page and it's parent, locked. Jump back up the
+ * tree, until we have the top pair of pages that we want to delete.
+ * Once we have the top page that we want to delete locked, lock the
+ * underlying pages and check to make sure they're still empty. If
+ * they are, delete them.
+ */
+ for (level = LEAFLEVEL;; ++level) {
+ /* Acquire a page and its parent, locked. */
+ if ((ret = __bam_search(
+ dbc, &key, S_WRPAIR, level, NULL, &exact)) != 0)
+ return (ret);
+
+ /*
+ * If we reach the root or the parent page isn't going to be
+ * empty when we delete one record, stop.
+ */
+ h = cp->csp[-1].page;
+ if (h->pgno == cp->root || NUM_ENT(h) != 1)
+ break;
+
+ /* Discard the stack, retaining no locks. */
+ (void)__bam_stkrel(dbc, STK_NOLOCK);
+ }
+
+ /*
+ * Move the stack pointer one after the last entry, we may be about
+ * to push more items onto the page stack.
+ */
+ ++cp->csp;
+
+ /*
+ * cp->csp[-2].page is now the parent page, which we may or may not be
+ * going to delete, and cp->csp[-1].page is the first page we know we
+ * are going to delete. Walk down the chain of pages, acquiring pages
+ * until we've acquired a leaf page. Generally, this shouldn't happen;
+ * we should only see a single internal page with one item and a single
+ * leaf page with no items. The scenario where we could see something
+ * else is if reverse splits were turned off for awhile and then turned
+ * back on. That could result in all sorts of strangeness, e.g., empty
+ * pages in the tree, trees that looked like linked lists, and so on.
+ *
+ * !!!
+ * Sheer paranoia: if we find any pages that aren't going to be emptied
+ * by the delete, someone else added an item while we were walking the
+ * tree, and we discontinue the delete. Shouldn't be possible, but we
+ * check regardless.
+ */
+ for (h = cp->csp[-1].page;;) {
+ if (ISLEAF(h)) {
+ if (NUM_ENT(h) != 0)
+ break;
+ break;
+ } else
+ if (NUM_ENT(h) != 1)
+ break;
+
+ /*
+ * Get the next page, write lock it and push it onto the stack.
+ * We know it's index 0, because it can only have one element.
+ */
+ switch (TYPE(h)) {
+ case P_IBTREE:
+ pgno = GET_BINTERNAL(h, 0)->pgno;
+ break;
+ case P_IRECNO:
+ pgno = GET_RINTERNAL(h, 0)->pgno;
+ break;
+ default:
+ return (__db_pgfmt(dbp, PGNO(h)));
+ }
+
+ if ((ret =
+ __db_lget(dbc, 0, pgno, DB_LOCK_WRITE, 0, &lock)) != 0)
+ break;
+ if ((ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0)
+ break;
+ BT_STK_PUSH(dbp->dbenv, cp, h, 0, lock, DB_LOCK_WRITE, ret);
+ if (ret != 0)
+ break;
+ }
+
+ /* Adjust the cursor stack to reference the last page on the stack. */
+ BT_STK_POP(cp);
+
+ /*
+ * If everything worked, delete the stack, otherwise, release the
+ * stack and page locks without further damage.
+ */
+ if (ret == 0)
+ ret = __bam_dpages(dbc, cp->sp);
+ else
+ (void)__bam_stkrel(dbc, 0);
+
+ return (ret);
+}
+
+/*
+ * __bam_c_getstack --
+ * Acquire a full stack for a cursor.
+ */
+static int
+__bam_c_getstack(dbc)
+ DBC *dbc;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DBT dbt;
+ PAGE *h;
+ int exact, ret, t_ret;
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ /*
+ * Get the page with the current item on it. The caller of this
+ * routine has to already hold a read lock on the page, so there
+ * is no additional lock to acquire.
+ */
+ if ((ret = memp_fget(dbp->mpf, &cp->pgno, 0, &h)) != 0)
+ return (ret);
+
+ /* Get a copy of a key from the page. */
+ memset(&dbt, 0, sizeof(DBT));
+ if ((ret = __db_ret(dbp,
+ h, 0, &dbt, &dbc->rkey.data, &dbc->rkey.ulen)) != 0)
+ goto err;
+
+ /* Get a write-locked stack for the page. */
+ exact = 0;
+ ret = __bam_search(dbc, &dbt, S_KEYFIRST, 1, NULL, &exact);
+
+err: /* Discard the key and the page. */
+ if ((t_ret = memp_fput(dbp->mpf, h, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __bam_isopd --
+ * Return if the cursor references an off-page duplicate tree via its
+ * page number.
+ */
+static int
+__bam_isopd(dbc, pgnop)
+ DBC *dbc;
+ db_pgno_t *pgnop;
+{
+ BOVERFLOW *bo;
+
+ if (TYPE(dbc->internal->page) != P_LBTREE)
+ return (0);
+
+ bo = GET_BOVERFLOW(dbc->internal->page, dbc->internal->indx + O_INDX);
+ if (B_TYPE(bo->type) == B_DUPLICATE) {
+ *pgnop = bo->pgno;
+ return (1);
+ }
+ return (0);
+}
diff --git a/bdb/btree/bt_delete.c b/bdb/btree/bt_delete.c
new file mode 100644
index 00000000000..9725887882a
--- /dev/null
+++ b/bdb/btree/bt_delete.c
@@ -0,0 +1,530 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995, 1996
+ * Keith Bostic. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Mike Olson.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: bt_delete.c,v 11.31 2001/01/17 18:48:46 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_shash.h"
+#include "btree.h"
+#include "lock.h"
+
+/*
+ * __bam_delete --
+ * Delete the items referenced by a key.
+ *
+ * PUBLIC: int __bam_delete __P((DB *, DB_TXN *, DBT *, u_int32_t));
+ */
+int
+__bam_delete(dbp, txn, key, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ DBT *key;
+ u_int32_t flags;
+{
+ DBC *dbc;
+ DBT lkey;
+ DBT data;
+ u_int32_t f_init, f_next;
+ int ret, t_ret;
+
+ PANIC_CHECK(dbp->dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->del");
+ DB_CHECK_TXN(dbp, txn);
+
+ /* Check for invalid flags. */
+ if ((ret =
+ __db_delchk(dbp, key, flags, F_ISSET(dbp, DB_AM_RDONLY))) != 0)
+ return (ret);
+
+ /* Allocate a cursor. */
+ if ((ret = dbp->cursor(dbp, txn, &dbc, DB_WRITELOCK)) != 0)
+ return (ret);
+
+ DEBUG_LWRITE(dbc, txn, "bam_delete", key, NULL, flags);
+
+ /*
+ * Walk a cursor through the key/data pairs, deleting as we go. Set
+ * the DB_DBT_USERMEM flag, as this might be a threaded application
+ * and the flags checking will catch us. We don't actually want the
+ * keys or data, so request a partial of length 0.
+ */
+ memset(&lkey, 0, sizeof(lkey));
+ F_SET(&lkey, DB_DBT_USERMEM | DB_DBT_PARTIAL);
+ memset(&data, 0, sizeof(data));
+ F_SET(&data, DB_DBT_USERMEM | DB_DBT_PARTIAL);
+
+ /*
+ * If locking (and we haven't already acquired CDB locks), set the
+ * read-modify-write flag.
+ */
+ f_init = DB_SET;
+ f_next = DB_NEXT_DUP;
+ if (STD_LOCKING(dbc)) {
+ f_init |= DB_RMW;
+ f_next |= DB_RMW;
+ }
+
+ /* Walk through the set of key/data pairs, deleting as we go. */
+ if ((ret = dbc->c_get(dbc, key, &data, f_init)) != 0)
+ goto err;
+ for (;;) {
+ if ((ret = dbc->c_del(dbc, 0)) != 0)
+ goto err;
+ if ((ret = dbc->c_get(dbc, &lkey, &data, f_next)) != 0) {
+ if (ret == DB_NOTFOUND) {
+ ret = 0;
+ break;
+ }
+ goto err;
+ }
+ }
+
+err: /* Discard the cursor. */
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __bam_ditem --
+ * Delete one or more entries from a page.
+ *
+ * PUBLIC: int __bam_ditem __P((DBC *, PAGE *, u_int32_t));
+ */
+int
+__bam_ditem(dbc, h, indx)
+ DBC *dbc;
+ PAGE *h;
+ u_int32_t indx;
+{
+ BINTERNAL *bi;
+ BKEYDATA *bk;
+ DB *dbp;
+ u_int32_t nbytes;
+ int ret;
+
+ dbp = dbc->dbp;
+
+ switch (TYPE(h)) {
+ case P_IBTREE:
+ bi = GET_BINTERNAL(h, indx);
+ switch (B_TYPE(bi->type)) {
+ case B_DUPLICATE:
+ case B_KEYDATA:
+ nbytes = BINTERNAL_SIZE(bi->len);
+ break;
+ case B_OVERFLOW:
+ nbytes = BINTERNAL_SIZE(bi->len);
+ if ((ret =
+ __db_doff(dbc, ((BOVERFLOW *)bi->data)->pgno)) != 0)
+ return (ret);
+ break;
+ default:
+ return (__db_pgfmt(dbp, PGNO(h)));
+ }
+ break;
+ case P_IRECNO:
+ nbytes = RINTERNAL_SIZE;
+ break;
+ case P_LBTREE:
+ /*
+ * If it's a duplicate key, discard the index and don't touch
+ * the actual page item.
+ *
+ * !!!
+ * This works because no data item can have an index matching
+ * any other index so even if the data item is in a key "slot",
+ * it won't match any other index.
+ */
+ if ((indx % 2) == 0) {
+ /*
+ * Check for a duplicate after us on the page. NOTE:
+ * we have to delete the key item before deleting the
+ * data item, otherwise the "indx + P_INDX" calculation
+ * won't work!
+ */
+ if (indx + P_INDX < (u_int32_t)NUM_ENT(h) &&
+ h->inp[indx] == h->inp[indx + P_INDX])
+ return (__bam_adjindx(dbc,
+ h, indx, indx + O_INDX, 0));
+ /*
+ * Check for a duplicate before us on the page. It
+ * doesn't matter if we delete the key item before or
+ * after the data item for the purposes of this one.
+ */
+ if (indx > 0 && h->inp[indx] == h->inp[indx - P_INDX])
+ return (__bam_adjindx(dbc,
+ h, indx, indx - P_INDX, 0));
+ }
+ /* FALLTHROUGH */
+ case P_LDUP:
+ case P_LRECNO:
+ bk = GET_BKEYDATA(h, indx);
+ switch (B_TYPE(bk->type)) {
+ case B_DUPLICATE:
+ nbytes = BOVERFLOW_SIZE;
+ break;
+ case B_OVERFLOW:
+ nbytes = BOVERFLOW_SIZE;
+ if ((ret = __db_doff(
+ dbc, (GET_BOVERFLOW(h, indx))->pgno)) != 0)
+ return (ret);
+ break;
+ case B_KEYDATA:
+ nbytes = BKEYDATA_SIZE(bk->len);
+ break;
+ default:
+ return (__db_pgfmt(dbp, PGNO(h)));
+ }
+ break;
+ default:
+ return (__db_pgfmt(dbp, PGNO(h)));
+ }
+
+ /* Delete the item and mark the page dirty. */
+ if ((ret = __db_ditem(dbc, h, indx, nbytes)) != 0)
+ return (ret);
+ if ((ret = memp_fset(dbp->mpf, h, DB_MPOOL_DIRTY)) != 0)
+ return (ret);
+
+ return (0);
+}
+
+/*
+ * __bam_adjindx --
+ * Adjust an index on the page.
+ *
+ * PUBLIC: int __bam_adjindx __P((DBC *, PAGE *, u_int32_t, u_int32_t, int));
+ */
+int
+__bam_adjindx(dbc, h, indx, indx_copy, is_insert)
+ DBC *dbc;
+ PAGE *h;
+ u_int32_t indx, indx_copy;
+ int is_insert;
+{
+ DB *dbp;
+ db_indx_t copy;
+ int ret;
+
+ dbp = dbc->dbp;
+
+ /* Log the change. */
+ if (DB_LOGGING(dbc) &&
+ (ret = __bam_adj_log(dbp->dbenv, dbc->txn, &LSN(h),
+ 0, dbp->log_fileid, PGNO(h), &LSN(h), indx, indx_copy,
+ (u_int32_t)is_insert)) != 0)
+ return (ret);
+
+ /* Shuffle the indices and mark the page dirty. */
+ if (is_insert) {
+ copy = h->inp[indx_copy];
+ if (indx != NUM_ENT(h))
+ memmove(&h->inp[indx + O_INDX], &h->inp[indx],
+ sizeof(db_indx_t) * (NUM_ENT(h) - indx));
+ h->inp[indx] = copy;
+ ++NUM_ENT(h);
+ } else {
+ --NUM_ENT(h);
+ if (indx != NUM_ENT(h))
+ memmove(&h->inp[indx], &h->inp[indx + O_INDX],
+ sizeof(db_indx_t) * (NUM_ENT(h) - indx));
+ }
+ if ((ret = memp_fset(dbp->mpf, h, DB_MPOOL_DIRTY)) != 0)
+ return (ret);
+
+ return (0);
+}
+
+/*
+ * __bam_dpages --
+ * Delete a set of locked pages.
+ *
+ * PUBLIC: int __bam_dpages __P((DBC *, EPG *));
+ */
+int
+__bam_dpages(dbc, stack_epg)
+ DBC *dbc;
+ EPG *stack_epg;
+{
+ BTREE_CURSOR *cp;
+ BINTERNAL *bi;
+ DB *dbp;
+ DBT a, b;
+ DB_LOCK c_lock, p_lock;
+ EPG *epg;
+ PAGE *child, *parent;
+ db_indx_t nitems;
+ db_pgno_t pgno, root_pgno;
+ db_recno_t rcnt;
+ int done, ret, t_ret;
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ /*
+ * We have the entire stack of deletable pages locked.
+ *
+ * Btree calls us with a pointer to the beginning of a stack, where
+ * the first page in the stack is to have a single item deleted, and
+ * the rest of the pages are to be removed.
+ *
+ * Recno calls us with a pointer into the middle of the stack, where
+ * the referenced page is to have a single item deleted, and pages
+ * after the stack reference are to be removed.
+ *
+ * First, discard any pages that we don't care about.
+ */
+ ret = 0;
+ for (epg = cp->sp; epg < stack_epg; ++epg) {
+ if ((t_ret =
+ memp_fput(dbp->mpf, epg->page, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ (void)__TLPUT(dbc, epg->lock);
+ }
+ if (ret != 0)
+ goto err;
+
+ /*
+ * !!!
+ * There is an interesting deadlock situation here. We have to relink
+ * the leaf page chain around the leaf page being deleted. Consider
+ * a cursor walking through the leaf pages, that has the previous page
+ * read-locked and is waiting on a lock for the page we're deleting.
+ * It will deadlock here. Before we unlink the subtree, we relink the
+ * leaf page chain.
+ */
+ if ((ret = __db_relink(dbc, DB_REM_PAGE, cp->csp->page, NULL, 1)) != 0)
+ goto err;
+
+ /*
+ * Delete the last item that references the underlying pages that are
+ * to be deleted, and adjust cursors that reference that page. Then,
+ * save that page's page number and item count and release it. If
+ * the application isn't retaining locks because it's running without
+ * transactions, this lets the rest of the tree get back to business
+ * immediately.
+ */
+ if ((ret = __bam_ditem(dbc, epg->page, epg->indx)) != 0)
+ goto err;
+ if ((ret = __bam_ca_di(dbc, PGNO(epg->page), epg->indx, -1)) != 0)
+ goto err;
+
+ pgno = PGNO(epg->page);
+ nitems = NUM_ENT(epg->page);
+
+ if ((ret = memp_fput(dbp->mpf, epg->page, 0)) != 0)
+ goto err_inc;
+ (void)__TLPUT(dbc, epg->lock);
+
+ /* Free the rest of the pages in the stack. */
+ while (++epg <= cp->csp) {
+ /*
+ * Delete page entries so they will be restored as part of
+ * recovery. We don't need to do cursor adjustment here as
+ * the pages are being emptied by definition and so cannot
+ * be referenced by a cursor.
+ */
+ if (NUM_ENT(epg->page) != 0) {
+ DB_ASSERT(NUM_ENT(epg->page) == 1);
+
+ if ((ret = __bam_ditem(dbc, epg->page, epg->indx)) != 0)
+ goto err;
+ }
+
+ if ((ret = __db_free(dbc, epg->page)) != 0) {
+ epg->page = NULL;
+ goto err_inc;
+ }
+ (void)__TLPUT(dbc, epg->lock);
+ }
+
+ if (0) {
+err_inc: ++epg;
+err: for (; epg <= cp->csp; ++epg) {
+ if (epg->page != NULL)
+ (void)memp_fput(dbp->mpf, epg->page, 0);
+ (void)__TLPUT(dbc, epg->lock);
+ }
+ BT_STK_CLR(cp);
+ return (ret);
+ }
+ BT_STK_CLR(cp);
+
+ /*
+ * If we just deleted the next-to-last item from the root page, the
+ * tree can collapse one or more levels. While there remains only a
+ * single item on the root page, write lock the last page referenced
+ * by the root page and copy it over the root page.
+ */
+ root_pgno = cp->root;
+ if (pgno != root_pgno || nitems != 1)
+ return (0);
+
+ for (done = 0; !done;) {
+ /* Initialize. */
+ parent = child = NULL;
+ p_lock.off = c_lock.off = LOCK_INVALID;
+
+ /* Lock the root. */
+ pgno = root_pgno;
+ if ((ret =
+ __db_lget(dbc, 0, pgno, DB_LOCK_WRITE, 0, &p_lock)) != 0)
+ goto stop;
+ if ((ret = memp_fget(dbp->mpf, &pgno, 0, &parent)) != 0)
+ goto stop;
+
+ if (NUM_ENT(parent) != 1)
+ goto stop;
+
+ switch (TYPE(parent)) {
+ case P_IBTREE:
+ /*
+ * If this is overflow, then try to delete it.
+ * The child may or may not still point at it.
+ */
+ bi = GET_BINTERNAL(parent, 0);
+ if (B_TYPE(bi->type) == B_OVERFLOW)
+ if ((ret = __db_doff(dbc,
+ ((BOVERFLOW *)bi->data)->pgno)) != 0)
+ goto stop;
+ pgno = bi->pgno;
+ break;
+ case P_IRECNO:
+ pgno = GET_RINTERNAL(parent, 0)->pgno;
+ break;
+ default:
+ goto stop;
+ }
+
+ /* Lock the child page. */
+ if ((ret =
+ __db_lget(dbc, 0, pgno, DB_LOCK_WRITE, 0, &c_lock)) != 0)
+ goto stop;
+ if ((ret = memp_fget(dbp->mpf, &pgno, 0, &child)) != 0)
+ goto stop;
+
+ /* Log the change. */
+ if (DB_LOGGING(dbc)) {
+ memset(&a, 0, sizeof(a));
+ a.data = child;
+ a.size = dbp->pgsize;
+ memset(&b, 0, sizeof(b));
+ b.data = P_ENTRY(parent, 0);
+ b.size = TYPE(parent) == P_IRECNO ? RINTERNAL_SIZE :
+ BINTERNAL_SIZE(((BINTERNAL *)b.data)->len);
+ if ((ret =
+ __bam_rsplit_log(dbp->dbenv, dbc->txn, &child->lsn,
+ 0, dbp->log_fileid, PGNO(child), &a, PGNO(parent),
+ RE_NREC(parent), &b, &parent->lsn)) != 0)
+ goto stop;
+ }
+
+ /*
+ * Make the switch.
+ *
+ * One fixup -- internal pages below the top level do not store
+ * a record count, so we have to preserve it if we're not
+ * converting to a leaf page. Note also that we are about to
+ * overwrite the parent page, including its LSN. This is OK
+ * because the log message we wrote describing this update
+ * stores its LSN on the child page. When the child is copied
+ * onto the parent, the correct LSN is copied into place.
+ */
+ COMPQUIET(rcnt, 0);
+ if (F_ISSET(cp, C_RECNUM) && LEVEL(child) > LEAFLEVEL)
+ rcnt = RE_NREC(parent);
+ memcpy(parent, child, dbp->pgsize);
+ PGNO(parent) = root_pgno;
+ if (F_ISSET(cp, C_RECNUM) && LEVEL(child) > LEAFLEVEL)
+ RE_NREC_SET(parent, rcnt);
+
+ /* Mark the pages dirty. */
+ if ((ret = memp_fset(dbp->mpf, parent, DB_MPOOL_DIRTY)) != 0)
+ goto stop;
+ if ((ret = memp_fset(dbp->mpf, child, DB_MPOOL_DIRTY)) != 0)
+ goto stop;
+
+ /* Adjust the cursors. */
+ if ((ret = __bam_ca_rsplit(dbc, PGNO(child), root_pgno)) != 0)
+ goto stop;
+
+ /*
+ * Free the page copied onto the root page and discard its
+ * lock. (The call to __db_free() discards our reference
+ * to the page.)
+ */
+ if ((ret = __db_free(dbc, child)) != 0) {
+ child = NULL;
+ goto stop;
+ }
+ child = NULL;
+
+ if (0) {
+stop: done = 1;
+ }
+ if (p_lock.off != LOCK_INVALID)
+ (void)__TLPUT(dbc, p_lock);
+ if (parent != NULL &&
+ (t_ret = memp_fput(dbp->mpf, parent, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (c_lock.off != LOCK_INVALID)
+ (void)__TLPUT(dbc, c_lock);
+ if (child != NULL &&
+ (t_ret = memp_fput(dbp->mpf, child, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ }
+
+ return (ret);
+}
diff --git a/bdb/btree/bt_method.c b/bdb/btree/bt_method.c
new file mode 100644
index 00000000000..5e3af27d033
--- /dev/null
+++ b/bdb/btree/bt_method.c
@@ -0,0 +1,387 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: bt_method.c,v 11.20 2000/11/30 00:58:28 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "btree.h"
+#include "qam.h"
+
+static int __bam_set_bt_compare
+ __P((DB *, int (*)(DB *, const DBT *, const DBT *)));
+static int __bam_set_bt_maxkey __P((DB *, u_int32_t));
+static int __bam_set_bt_minkey __P((DB *, u_int32_t));
+static int __bam_set_bt_prefix
+ __P((DB *, size_t(*)(DB *, const DBT *, const DBT *)));
+static int __ram_set_re_delim __P((DB *, int));
+static int __ram_set_re_len __P((DB *, u_int32_t));
+static int __ram_set_re_pad __P((DB *, int));
+static int __ram_set_re_source __P((DB *, const char *));
+
+/*
+ * __bam_db_create --
+ * Btree specific initialization of the DB structure.
+ *
+ * PUBLIC: int __bam_db_create __P((DB *));
+ */
+int
+__bam_db_create(dbp)
+ DB *dbp;
+{
+ BTREE *t;
+ int ret;
+
+ /* Allocate and initialize the private btree structure. */
+ if ((ret = __os_calloc(dbp->dbenv, 1, sizeof(BTREE), &t)) != 0)
+ return (ret);
+ dbp->bt_internal = t;
+
+ t->bt_minkey = DEFMINKEYPAGE; /* Btree */
+ t->bt_compare = __bam_defcmp;
+ t->bt_prefix = __bam_defpfx;
+
+ dbp->set_bt_compare = __bam_set_bt_compare;
+ dbp->set_bt_maxkey = __bam_set_bt_maxkey;
+ dbp->set_bt_minkey = __bam_set_bt_minkey;
+ dbp->set_bt_prefix = __bam_set_bt_prefix;
+
+ t->re_pad = ' '; /* Recno */
+ t->re_delim = '\n';
+ t->re_eof = 1;
+
+ dbp->set_re_delim = __ram_set_re_delim;
+ dbp->set_re_len = __ram_set_re_len;
+ dbp->set_re_pad = __ram_set_re_pad;
+ dbp->set_re_source = __ram_set_re_source;
+
+ return (0);
+}
+
+/*
+ * __bam_db_close --
+ * Btree specific discard of the DB structure.
+ *
+ * PUBLIC: int __bam_db_close __P((DB *));
+ */
+int
+__bam_db_close(dbp)
+ DB *dbp;
+{
+ BTREE *t;
+
+ t = dbp->bt_internal;
+ /* Recno */
+ /* Close any backing source file descriptor. */
+ if (t->re_fp != NULL)
+ (void)fclose(t->re_fp);
+
+ /* Free any backing source file name. */
+ if (t->re_source != NULL)
+ __os_freestr(t->re_source);
+
+ __os_free(t, sizeof(BTREE));
+ dbp->bt_internal = NULL;
+
+ return (0);
+}
+
+/*
+ * __bam_set_flags --
+ * Set Btree specific flags.
+ *
+ * PUBLIC: int __bam_set_flags __P((DB *, u_int32_t *flagsp));
+ */
+int
+__bam_set_flags(dbp, flagsp)
+ DB *dbp;
+ u_int32_t *flagsp;
+{
+ u_int32_t flags;
+
+ flags = *flagsp;
+ if (LF_ISSET(DB_DUP | DB_DUPSORT | DB_RECNUM | DB_REVSPLITOFF)) {
+ DB_ILLEGAL_AFTER_OPEN(dbp, "DB->set_flags");
+
+ /*
+ * The DB_DUP and DB_DUPSORT flags are shared by the Hash
+ * and Btree access methods.
+ */
+ if (LF_ISSET(DB_DUP | DB_DUPSORT))
+ DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE | DB_OK_HASH);
+
+ if (LF_ISSET(DB_RECNUM | DB_REVSPLITOFF))
+ DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE);
+
+ if (LF_ISSET(DB_DUP | DB_DUPSORT)) {
+ /* DB_DUP/DB_DUPSORT is incompatible with DB_RECNUM. */
+ if (F_ISSET(dbp, DB_BT_RECNUM))
+ goto incompat;
+
+ if (LF_ISSET(DB_DUPSORT)) {
+ if (dbp->dup_compare == NULL)
+ dbp->dup_compare = __bam_defcmp;
+ F_SET(dbp, DB_AM_DUPSORT);
+ }
+
+ F_SET(dbp, DB_AM_DUP);
+ LF_CLR(DB_DUP | DB_DUPSORT);
+ }
+
+ if (LF_ISSET(DB_RECNUM)) {
+ /* DB_RECNUM is incompatible with DB_DUP/DB_DUPSORT. */
+ if (F_ISSET(dbp, DB_AM_DUP))
+ goto incompat;
+
+ F_SET(dbp, DB_BT_RECNUM);
+ LF_CLR(DB_RECNUM);
+ }
+
+ if (LF_ISSET(DB_REVSPLITOFF)) {
+ F_SET(dbp, DB_BT_REVSPLIT);
+ LF_CLR(DB_REVSPLITOFF);
+ }
+
+ *flagsp = flags;
+ }
+ return (0);
+
+incompat:
+ return (__db_ferr(dbp->dbenv, "DB->set_flags", 1));
+}
+
+/*
+ * __bam_set_bt_compare --
+ * Set the comparison function.
+ */
+static int
+__bam_set_bt_compare(dbp, func)
+ DB *dbp;
+ int (*func) __P((DB *, const DBT *, const DBT *));
+{
+ BTREE *t;
+
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_bt_compare");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE);
+
+ t = dbp->bt_internal;
+
+ /*
+ * Can't default the prefix routine if the user supplies a comparison
+ * routine; shortening the keys can break their comparison algorithm.
+ */
+ t->bt_compare = func;
+ if (t->bt_prefix == __bam_defpfx)
+ t->bt_prefix = NULL;
+
+ return (0);
+}
+
+/*
+ * __bam_set_bt_maxkey --
+ * Set the maximum keys per page.
+ */
+static int
+__bam_set_bt_maxkey(dbp, bt_maxkey)
+ DB *dbp;
+ u_int32_t bt_maxkey;
+{
+ BTREE *t;
+
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_bt_maxkey");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE);
+
+ t = dbp->bt_internal;
+
+ if (bt_maxkey < 1) {
+ __db_err(dbp->dbenv, "minimum bt_maxkey value is 1");
+ return (EINVAL);
+ }
+
+ t->bt_maxkey = bt_maxkey;
+ return (0);
+}
+
+/*
+ * __bam_set_bt_minkey --
+ * Set the minimum keys per page.
+ */
+static int
+__bam_set_bt_minkey(dbp, bt_minkey)
+ DB *dbp;
+ u_int32_t bt_minkey;
+{
+ BTREE *t;
+
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_bt_minkey");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE);
+
+ t = dbp->bt_internal;
+
+ if (bt_minkey < 2) {
+ __db_err(dbp->dbenv, "minimum bt_minkey value is 2");
+ return (EINVAL);
+ }
+
+ t->bt_minkey = bt_minkey;
+ return (0);
+}
+
+/*
+ * __bam_set_bt_prefix --
+ * Set the prefix function.
+ */
+static int
+__bam_set_bt_prefix(dbp, func)
+ DB *dbp;
+ size_t (*func) __P((DB *, const DBT *, const DBT *));
+{
+ BTREE *t;
+
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_bt_prefix");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE);
+
+ t = dbp->bt_internal;
+
+ t->bt_prefix = func;
+ return (0);
+}
+
+/*
+ * __ram_set_flags --
+ * Set Recno specific flags.
+ *
+ * PUBLIC: int __ram_set_flags __P((DB *, u_int32_t *flagsp));
+ */
+int
+__ram_set_flags(dbp, flagsp)
+ DB *dbp;
+ u_int32_t *flagsp;
+{
+ u_int32_t flags;
+
+ flags = *flagsp;
+ if (LF_ISSET(DB_RENUMBER | DB_SNAPSHOT)) {
+ DB_ILLEGAL_AFTER_OPEN(dbp, "DB->set_flags");
+
+ DB_ILLEGAL_METHOD(dbp, DB_OK_RECNO);
+
+ if (LF_ISSET(DB_RENUMBER)) {
+ F_SET(dbp, DB_RE_RENUMBER);
+ LF_CLR(DB_RENUMBER);
+ }
+
+ if (LF_ISSET(DB_SNAPSHOT)) {
+ F_SET(dbp, DB_RE_SNAPSHOT);
+ LF_CLR(DB_SNAPSHOT);
+ }
+
+ *flagsp = flags;
+ }
+ return (0);
+}
+
+/*
+ * __ram_set_re_delim --
+ * Set the variable-length input record delimiter.
+ */
+static int
+__ram_set_re_delim(dbp, re_delim)
+ DB *dbp;
+ int re_delim;
+{
+ BTREE *t;
+
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_re_delim");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_RECNO);
+
+ t = dbp->bt_internal;
+
+ t->re_delim = re_delim;
+ F_SET(dbp, DB_RE_DELIMITER);
+
+ return (0);
+}
+
+/*
+ * __ram_set_re_len --
+ * Set the variable-length input record length.
+ */
+static int
+__ram_set_re_len(dbp, re_len)
+ DB *dbp;
+ u_int32_t re_len;
+{
+ BTREE *t;
+ QUEUE *q;
+
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_re_len");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_QUEUE | DB_OK_RECNO);
+
+ t = dbp->bt_internal;
+ t->re_len = re_len;
+
+ q = dbp->q_internal;
+ q->re_len = re_len;
+
+ F_SET(dbp, DB_RE_FIXEDLEN);
+
+ return (0);
+}
+
+/*
+ * __ram_set_re_pad --
+ * Set the fixed-length record pad character.
+ */
+static int
+__ram_set_re_pad(dbp, re_pad)
+ DB *dbp;
+ int re_pad;
+{
+ BTREE *t;
+ QUEUE *q;
+
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_re_pad");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_QUEUE | DB_OK_RECNO);
+
+ t = dbp->bt_internal;
+ t->re_pad = re_pad;
+
+ q = dbp->q_internal;
+ q->re_pad = re_pad;
+
+ F_SET(dbp, DB_RE_PAD);
+
+ return (0);
+}
+
+/*
+ * __ram_set_re_source --
+ * Set the backing source file name.
+ */
+static int
+__ram_set_re_source(dbp, re_source)
+ DB *dbp;
+ const char *re_source;
+{
+ BTREE *t;
+
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_re_source");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_RECNO);
+
+ t = dbp->bt_internal;
+
+ return (__os_strdup(dbp->dbenv, re_source, &t->re_source));
+}
diff --git a/bdb/btree/bt_open.c b/bdb/btree/bt_open.c
new file mode 100644
index 00000000000..405c1880f5e
--- /dev/null
+++ b/bdb/btree/bt_open.c
@@ -0,0 +1,468 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995, 1996
+ * Keith Bostic. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Mike Olson.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: bt_open.c,v 11.42 2000/11/30 00:58:28 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <limits.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_swap.h"
+#include "btree.h"
+#include "db_shash.h"
+#include "lock.h"
+#include "log.h"
+#include "mp.h"
+
+/*
+ * __bam_open --
+ * Open a btree.
+ *
+ * PUBLIC: int __bam_open __P((DB *, const char *, db_pgno_t, u_int32_t));
+ */
+int
+__bam_open(dbp, name, base_pgno, flags)
+ DB *dbp;
+ const char *name;
+ db_pgno_t base_pgno;
+ u_int32_t flags;
+{
+ BTREE *t;
+
+ t = dbp->bt_internal;
+
+ /* Initialize the remaining fields/methods of the DB. */
+ dbp->del = __bam_delete;
+ dbp->key_range = __bam_key_range;
+ dbp->stat = __bam_stat;
+
+ /*
+ * We don't permit the user to specify a prefix routine if they didn't
+ * also specify a comparison routine, they can't know enough about our
+ * comparison routine to get it right.
+ */
+ if (t->bt_compare == __bam_defcmp && t->bt_prefix != __bam_defpfx) {
+ __db_err(dbp->dbenv,
+"prefix comparison may not be specified for default comparison routine");
+ return (EINVAL);
+ }
+
+ /*
+ * Verify that the bt_minkey value specified won't cause the
+ * calculation of ovflsize to underflow [#2406] for this pagesize.
+ */
+ if (B_MINKEY_TO_OVFLSIZE(t->bt_minkey, dbp->pgsize) >
+ B_MINKEY_TO_OVFLSIZE(DEFMINKEYPAGE, dbp->pgsize)) {
+ __db_err(dbp->dbenv,
+ "bt_minkey value of %lu too high for page size of %lu",
+ (u_long)t->bt_minkey, (u_long)dbp->pgsize);
+ return (EINVAL);
+ }
+
+ /* Start up the tree. */
+ return (__bam_read_root(dbp, name, base_pgno, flags));
+}
+
+/*
+ * __bam_metachk --
+ *
+ * PUBLIC: int __bam_metachk __P((DB *, const char *, BTMETA *));
+ */
+int
+__bam_metachk(dbp, name, btm)
+ DB *dbp;
+ const char *name;
+ BTMETA *btm;
+{
+ DB_ENV *dbenv;
+ u_int32_t vers;
+ int ret;
+
+ dbenv = dbp->dbenv;
+
+ /*
+ * At this point, all we know is that the magic number is for a Btree.
+ * Check the version, the database may be out of date.
+ */
+ vers = btm->dbmeta.version;
+ if (F_ISSET(dbp, DB_AM_SWAP))
+ M_32_SWAP(vers);
+ switch (vers) {
+ case 6:
+ case 7:
+ __db_err(dbenv,
+ "%s: btree version %lu requires a version upgrade",
+ name, (u_long)vers);
+ return (DB_OLD_VERSION);
+ case 8:
+ break;
+ default:
+ __db_err(dbenv,
+ "%s: unsupported btree version: %lu", name, (u_long)vers);
+ return (EINVAL);
+ }
+
+ /* Swap the page if we need to. */
+ if (F_ISSET(dbp, DB_AM_SWAP) && (ret = __bam_mswap((PAGE *)btm)) != 0)
+ return (ret);
+
+ /*
+ * Check application info against metadata info, and set info, flags,
+ * and type based on metadata info.
+ */
+ if ((ret =
+ __db_fchk(dbenv, "DB->open", btm->dbmeta.flags, BTM_MASK)) != 0)
+ return (ret);
+
+ if (F_ISSET(&btm->dbmeta, BTM_RECNO)) {
+ if (dbp->type == DB_BTREE)
+ goto wrong_type;
+ dbp->type = DB_RECNO;
+ DB_ILLEGAL_METHOD(dbp, DB_OK_RECNO);
+ } else {
+ if (dbp->type == DB_RECNO)
+ goto wrong_type;
+ dbp->type = DB_BTREE;
+ DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE);
+ }
+
+ if (F_ISSET(&btm->dbmeta, BTM_DUP))
+ F_SET(dbp, DB_AM_DUP);
+ else
+ if (F_ISSET(dbp, DB_AM_DUP)) {
+ __db_err(dbenv,
+ "%s: DB_DUP specified to open method but not set in database",
+ name);
+ return (EINVAL);
+ }
+
+ if (F_ISSET(&btm->dbmeta, BTM_RECNUM)) {
+ if (dbp->type != DB_BTREE)
+ goto wrong_type;
+ F_SET(dbp, DB_BT_RECNUM);
+
+ if ((ret = __db_fcchk(dbenv,
+ "DB->open", dbp->flags, DB_AM_DUP, DB_BT_RECNUM)) != 0)
+ return (ret);
+ } else
+ if (F_ISSET(dbp, DB_BT_RECNUM)) {
+ __db_err(dbenv,
+ "%s: DB_RECNUM specified to open method but not set in database",
+ name);
+ return (EINVAL);
+ }
+
+ if (F_ISSET(&btm->dbmeta, BTM_FIXEDLEN)) {
+ if (dbp->type != DB_RECNO)
+ goto wrong_type;
+ F_SET(dbp, DB_RE_FIXEDLEN);
+ } else
+ if (F_ISSET(dbp, DB_RE_FIXEDLEN)) {
+ __db_err(dbenv,
+ "%s: DB_FIXEDLEN specified to open method but not set in database",
+ name);
+ return (EINVAL);
+ }
+
+ if (F_ISSET(&btm->dbmeta, BTM_RENUMBER)) {
+ if (dbp->type != DB_RECNO)
+ goto wrong_type;
+ F_SET(dbp, DB_RE_RENUMBER);
+ } else
+ if (F_ISSET(dbp, DB_RE_RENUMBER)) {
+ __db_err(dbenv,
+ "%s: DB_RENUMBER specified to open method but not set in database",
+ name);
+ return (EINVAL);
+ }
+
+ if (F_ISSET(&btm->dbmeta, BTM_SUBDB))
+ F_SET(dbp, DB_AM_SUBDB);
+ else
+ if (F_ISSET(dbp, DB_AM_SUBDB)) {
+ __db_err(dbenv,
+ "%s: multiple databases specified but not supported by file",
+ name);
+ return (EINVAL);
+ }
+
+ if (F_ISSET(&btm->dbmeta, BTM_DUPSORT)) {
+ if (dbp->dup_compare == NULL)
+ dbp->dup_compare = __bam_defcmp;
+ F_SET(dbp, DB_AM_DUPSORT);
+ } else
+ if (dbp->dup_compare != NULL) {
+ __db_err(dbenv,
+ "%s: duplicate sort specified but not supported in database",
+ name);
+ return (EINVAL);
+ }
+
+ /* Set the page size. */
+ dbp->pgsize = btm->dbmeta.pagesize;
+
+ /* Copy the file's ID. */
+ memcpy(dbp->fileid, btm->dbmeta.uid, DB_FILE_ID_LEN);
+
+ return (0);
+
+wrong_type:
+ if (dbp->type == DB_BTREE)
+ __db_err(dbenv,
+ "open method type is Btree, database type is Recno");
+ else
+ __db_err(dbenv,
+ "open method type is Recno, database type is Btree");
+ return (EINVAL);
+}
+
+/*
+ * __bam_read_root --
+ * Check (and optionally create) a tree.
+ *
+ * PUBLIC: int __bam_read_root __P((DB *, const char *, db_pgno_t, u_int32_t));
+ */
+int
+__bam_read_root(dbp, name, base_pgno, flags)
+ DB *dbp;
+ const char *name;
+ db_pgno_t base_pgno;
+ u_int32_t flags;
+{
+ BTMETA *meta;
+ BTREE *t;
+ DBC *dbc;
+ DB_LSN orig_lsn;
+ DB_LOCK metalock;
+ PAGE *root;
+ int locked, ret, t_ret;
+
+ ret = 0;
+ t = dbp->bt_internal;
+ meta = NULL;
+ root = NULL;
+ locked = 0;
+
+ /*
+ * Get a cursor. If DB_CREATE is specified, we may be creating
+ * the root page, and to do that safely in CDB we need a write
+ * cursor. In STD_LOCKING mode, we'll synchronize using the
+ * meta page lock instead.
+ */
+ if ((ret = dbp->cursor(dbp, dbp->open_txn,
+ &dbc, LF_ISSET(DB_CREATE) && CDB_LOCKING(dbp->dbenv) ?
+ DB_WRITECURSOR : 0)) != 0)
+ return (ret);
+
+ /* Get, and optionally create the metadata page. */
+ if ((ret =
+ __db_lget(dbc, 0, base_pgno, DB_LOCK_READ, 0, &metalock)) != 0)
+ goto err;
+ if ((ret = memp_fget(
+ dbp->mpf, &base_pgno, DB_MPOOL_CREATE, (PAGE **)&meta)) != 0)
+ goto err;
+
+ /*
+ * If the magic number is correct, we're not creating the tree.
+ * Correct any fields that may not be right. Note, all of the
+ * local flags were set by DB->open.
+ */
+again: if (meta->dbmeta.magic != 0) {
+ t->bt_maxkey = meta->maxkey;
+ t->bt_minkey = meta->minkey;
+ t->re_pad = meta->re_pad;
+ t->re_len = meta->re_len;
+
+ t->bt_meta = base_pgno;
+ t->bt_root = meta->root;
+
+ (void)memp_fput(dbp->mpf, meta, 0);
+ meta = NULL;
+ goto done;
+ }
+
+ /* In recovery if it's not there it will be created elsewhere.*/
+ if (IS_RECOVERING(dbp->dbenv))
+ goto done;
+
+ /* If we're doing CDB; we now have to get the write lock. */
+ if (CDB_LOCKING(dbp->dbenv)) {
+ /*
+ * We'd better have DB_CREATE set if we're actually doing
+ * the create.
+ */
+ DB_ASSERT(LF_ISSET(DB_CREATE));
+ if ((ret = lock_get(dbp->dbenv, dbc->locker, DB_LOCK_UPGRADE,
+ &dbc->lock_dbt, DB_LOCK_WRITE, &dbc->mylock)) != 0)
+ goto err;
+ }
+
+ /*
+ * If we are doing locking, relase the read lock and get a write lock.
+ * We want to avoid deadlock.
+ */
+ if (locked == 0 && STD_LOCKING(dbc)) {
+ if ((ret = __LPUT(dbc, metalock)) != 0)
+ goto err;
+ if ((ret = __db_lget(dbc,
+ 0, base_pgno, DB_LOCK_WRITE, 0, &metalock)) != 0)
+ goto err;
+ locked = 1;
+ goto again;
+ }
+
+ /* Initialize the tree structure metadata information. */
+ orig_lsn = meta->dbmeta.lsn;
+ memset(meta, 0, sizeof(BTMETA));
+ meta->dbmeta.lsn = orig_lsn;
+ meta->dbmeta.pgno = base_pgno;
+ meta->dbmeta.magic = DB_BTREEMAGIC;
+ meta->dbmeta.version = DB_BTREEVERSION;
+ meta->dbmeta.pagesize = dbp->pgsize;
+ meta->dbmeta.type = P_BTREEMETA;
+ meta->dbmeta.free = PGNO_INVALID;
+ if (F_ISSET(dbp, DB_AM_DUP))
+ F_SET(&meta->dbmeta, BTM_DUP);
+ if (F_ISSET(dbp, DB_RE_FIXEDLEN))
+ F_SET(&meta->dbmeta, BTM_FIXEDLEN);
+ if (F_ISSET(dbp, DB_BT_RECNUM))
+ F_SET(&meta->dbmeta, BTM_RECNUM);
+ if (F_ISSET(dbp, DB_RE_RENUMBER))
+ F_SET(&meta->dbmeta, BTM_RENUMBER);
+ if (F_ISSET(dbp, DB_AM_SUBDB))
+ F_SET(&meta->dbmeta, BTM_SUBDB);
+ if (dbp->dup_compare != NULL)
+ F_SET(&meta->dbmeta, BTM_DUPSORT);
+ if (dbp->type == DB_RECNO)
+ F_SET(&meta->dbmeta, BTM_RECNO);
+ memcpy(meta->dbmeta.uid, dbp->fileid, DB_FILE_ID_LEN);
+
+ meta->maxkey = t->bt_maxkey;
+ meta->minkey = t->bt_minkey;
+ meta->re_len = t->re_len;
+ meta->re_pad = t->re_pad;
+
+ /* If necessary, log the meta-data and root page creates. */
+ if ((ret = __db_log_page(dbp,
+ name, &orig_lsn, base_pgno, (PAGE *)meta)) != 0)
+ goto err;
+
+ /* Create and initialize a root page. */
+ if ((ret = __db_new(dbc,
+ dbp->type == DB_RECNO ? P_LRECNO : P_LBTREE, &root)) != 0)
+ goto err;
+ root->level = LEAFLEVEL;
+
+ if (dbp->open_txn != NULL && (ret = __bam_root_log(dbp->dbenv,
+ dbp->open_txn, &meta->dbmeta.lsn, 0, dbp->log_fileid,
+ meta->dbmeta.pgno, root->pgno, &meta->dbmeta.lsn)) != 0)
+ goto err;
+
+ meta->root = root->pgno;
+
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTLOGMETA, ret, name);
+ if ((ret = __db_log_page(dbp,
+ name, &root->lsn, root->pgno, root)) != 0)
+ goto err;
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTLOG, ret, name);
+
+ t->bt_meta = base_pgno;
+ t->bt_root = root->pgno;
+
+ /* Release the metadata and root pages. */
+ if ((ret = memp_fput(dbp->mpf, meta, DB_MPOOL_DIRTY)) != 0)
+ goto err;
+ meta = NULL;
+ if ((ret = memp_fput(dbp->mpf, root, DB_MPOOL_DIRTY)) != 0)
+ goto err;
+ root = NULL;
+
+ /*
+ * Flush the metadata and root pages to disk.
+ *
+ * !!!
+ * It's not useful to return not-yet-flushed here -- convert it to
+ * an error.
+ */
+ if ((ret = memp_fsync(dbp->mpf)) == DB_INCOMPLETE) {
+ __db_err(dbp->dbenv, "Metapage flush failed");
+ ret = EINVAL;
+ }
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTSYNC, ret, name);
+
+done: /*
+ * !!!
+ * We already did an insert and so the last-page-inserted has been
+ * set. I'm not sure where the *right* place to clear this value
+ * is, it's not intuitively obvious that it belongs here.
+ */
+ t->bt_lpgno = PGNO_INVALID;
+
+err:
+DB_TEST_RECOVERY_LABEL
+ /* Put any remaining pages back. */
+ if (meta != NULL)
+ if ((t_ret = memp_fput(dbp->mpf, meta, 0)) != 0 &&
+ ret == 0)
+ ret = t_ret;
+ if (root != NULL)
+ if ((t_ret = memp_fput(dbp->mpf, root, 0)) != 0 &&
+ ret == 0)
+ ret = t_ret;
+
+ /* We can release the metapage lock when we are done. */
+ if ((t_ret = __LPUT(dbc, metalock)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
diff --git a/bdb/btree/bt_put.c b/bdb/btree/bt_put.c
new file mode 100644
index 00000000000..19a04526d1b
--- /dev/null
+++ b/bdb/btree/bt_put.c
@@ -0,0 +1,859 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995, 1996
+ * Keith Bostic. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Mike Olson.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: bt_put.c,v 11.46 2001/01/17 18:48:46 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "btree.h"
+
+static int __bam_dup_convert __P((DBC *, PAGE *, u_int32_t));
+static int __bam_ovput
+ __P((DBC *, u_int32_t, db_pgno_t, PAGE *, u_int32_t, DBT *));
+
+/*
+ * __bam_iitem --
+ * Insert an item into the tree.
+ *
+ * PUBLIC: int __bam_iitem __P((DBC *, DBT *, DBT *, u_int32_t, u_int32_t));
+ */
+int
+__bam_iitem(dbc, key, data, op, flags)
+ DBC *dbc;
+ DBT *key, *data;
+ u_int32_t op, flags;
+{
+ BKEYDATA *bk, bk_tmp;
+ BTREE *t;
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DBT bk_hdr, tdbt;
+ PAGE *h;
+ db_indx_t indx;
+ u_int32_t data_size, have_bytes, need_bytes, needed;
+ int cmp, bigkey, bigdata, dupadjust, padrec, replace, ret, was_deleted;
+
+ COMPQUIET(bk, NULL);
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ t = dbp->bt_internal;
+ h = cp->page;
+ indx = cp->indx;
+ dupadjust = replace = was_deleted = 0;
+
+ /*
+ * Fixed-length records with partial puts: it's an error to specify
+ * anything other simple overwrite.
+ */
+ if (F_ISSET(dbp, DB_RE_FIXEDLEN) &&
+ F_ISSET(data, DB_DBT_PARTIAL) && data->dlen != data->size) {
+ data_size = data->size;
+ goto len_err;
+ }
+
+ /*
+ * Figure out how much space the data will take, including if it's a
+ * partial record.
+ *
+ * Fixed-length records: it's an error to specify a record that's
+ * longer than the fixed-length, and we never require less than
+ * the fixed-length record size.
+ */
+ data_size = F_ISSET(data, DB_DBT_PARTIAL) ?
+ __bam_partsize(op, data, h, indx) : data->size;
+ padrec = 0;
+ if (F_ISSET(dbp, DB_RE_FIXEDLEN)) {
+ if (data_size > t->re_len) {
+len_err: __db_err(dbp->dbenv,
+ "Length improper for fixed length record %lu",
+ (u_long)data_size);
+ return (EINVAL);
+ }
+ if (data_size < t->re_len) {
+ padrec = 1;
+ data_size = t->re_len;
+ }
+ }
+
+ /*
+ * Handle partial puts or short fixed-length records: build the
+ * real record.
+ */
+ if (padrec || F_ISSET(data, DB_DBT_PARTIAL)) {
+ tdbt = *data;
+ if ((ret =
+ __bam_build(dbc, op, &tdbt, h, indx, data_size)) != 0)
+ return (ret);
+ data = &tdbt;
+ }
+
+ /*
+ * If the user has specified a duplicate comparison function, return
+ * an error if DB_CURRENT was specified and the replacement data
+ * doesn't compare equal to the current data. This stops apps from
+ * screwing up the duplicate sort order. We have to do this after
+ * we build the real record so that we're comparing the real items.
+ */
+ if (op == DB_CURRENT && dbp->dup_compare != NULL) {
+ if ((ret = __bam_cmp(dbp, data, h,
+ indx + (TYPE(h) == P_LBTREE ? O_INDX : 0),
+ dbp->dup_compare, &cmp)) != 0)
+ return (ret);
+ if (cmp != 0) {
+ __db_err(dbp->dbenv,
+ "Current data differs from put data");
+ return (EINVAL);
+ }
+ }
+
+ /*
+ * If the key or data item won't fit on a page, we'll have to store
+ * them on overflow pages.
+ */
+ needed = 0;
+ bigdata = data_size > cp->ovflsize;
+ switch (op) {
+ case DB_KEYFIRST:
+ /* We're adding a new key and data pair. */
+ bigkey = key->size > cp->ovflsize;
+ if (bigkey)
+ needed += BOVERFLOW_PSIZE;
+ else
+ needed += BKEYDATA_PSIZE(key->size);
+ if (bigdata)
+ needed += BOVERFLOW_PSIZE;
+ else
+ needed += BKEYDATA_PSIZE(data_size);
+ break;
+ case DB_AFTER:
+ case DB_BEFORE:
+ case DB_CURRENT:
+ /*
+ * We're either overwriting the data item of a key/data pair
+ * or we're creating a new on-page duplicate and only adding
+ * a data item.
+ *
+ * !!!
+ * We're not currently correcting for space reclaimed from
+ * already deleted items, but I don't think it's worth the
+ * complexity.
+ */
+ bigkey = 0;
+ if (op == DB_CURRENT) {
+ bk = GET_BKEYDATA(h,
+ indx + (TYPE(h) == P_LBTREE ? O_INDX : 0));
+ if (B_TYPE(bk->type) == B_KEYDATA)
+ have_bytes = BKEYDATA_PSIZE(bk->len);
+ else
+ have_bytes = BOVERFLOW_PSIZE;
+ need_bytes = 0;
+ } else {
+ have_bytes = 0;
+ need_bytes = sizeof(db_indx_t);
+ }
+ if (bigdata)
+ need_bytes += BOVERFLOW_PSIZE;
+ else
+ need_bytes += BKEYDATA_PSIZE(data_size);
+
+ if (have_bytes < need_bytes)
+ needed += need_bytes - have_bytes;
+ break;
+ default:
+ return (__db_unknown_flag(dbp->dbenv, "__bam_iitem", op));
+ }
+
+ /*
+ * If there's not enough room, or the user has put a ceiling on the
+ * number of keys permitted in the page, split the page.
+ *
+ * XXX
+ * The t->bt_maxkey test here may be insufficient -- do we have to
+ * check in the btree split code, so we don't undo it there!?!?
+ */
+ if (P_FREESPACE(h) < needed ||
+ (t->bt_maxkey != 0 && NUM_ENT(h) > t->bt_maxkey))
+ return (DB_NEEDSPLIT);
+
+ /*
+ * The code breaks it up into five cases:
+ *
+ * 1. Insert a new key/data pair.
+ * 2. Append a new data item (a new duplicate).
+ * 3. Insert a new data item (a new duplicate).
+ * 4. Delete and re-add the data item (overflow item).
+ * 5. Overwrite the data item.
+ */
+ switch (op) {
+ case DB_KEYFIRST: /* 1. Insert a new key/data pair. */
+ if (bigkey) {
+ if ((ret = __bam_ovput(dbc,
+ B_OVERFLOW, PGNO_INVALID, h, indx, key)) != 0)
+ return (ret);
+ } else
+ if ((ret = __db_pitem(dbc, h, indx,
+ BKEYDATA_SIZE(key->size), NULL, key)) != 0)
+ return (ret);
+
+ if ((ret = __bam_ca_di(dbc, PGNO(h), indx, 1)) != 0)
+ return (ret);
+ ++indx;
+ break;
+ case DB_AFTER: /* 2. Append a new data item. */
+ if (TYPE(h) == P_LBTREE) {
+ /* Copy the key for the duplicate and adjust cursors. */
+ if ((ret =
+ __bam_adjindx(dbc, h, indx + P_INDX, indx, 1)) != 0)
+ return (ret);
+ if ((ret =
+ __bam_ca_di(dbc, PGNO(h), indx + P_INDX, 1)) != 0)
+ return (ret);
+
+ indx += 3;
+ dupadjust = 1;
+
+ cp->indx += 2;
+ } else {
+ ++indx;
+ cp->indx += 1;
+ }
+ break;
+ case DB_BEFORE: /* 3. Insert a new data item. */
+ if (TYPE(h) == P_LBTREE) {
+ /* Copy the key for the duplicate and adjust cursors. */
+ if ((ret = __bam_adjindx(dbc, h, indx, indx, 1)) != 0)
+ return (ret);
+ if ((ret = __bam_ca_di(dbc, PGNO(h), indx, 1)) != 0)
+ return (ret);
+
+ ++indx;
+ dupadjust = 1;
+ }
+ break;
+ case DB_CURRENT:
+ /*
+ * Clear the cursor's deleted flag. The problem is that if
+ * we deadlock or fail while deleting the overflow item or
+ * replacing the non-overflow item, a subsequent cursor close
+ * will try and remove the item because the cursor's delete
+ * flag is set
+ */
+ (void)__bam_ca_delete(dbp, PGNO(h), indx, 0);
+
+ if (TYPE(h) == P_LBTREE) {
+ ++indx;
+ dupadjust = 1;
+
+ /*
+ * In a Btree deleted records aren't counted (deleted
+ * records are counted in a Recno because all accesses
+ * are based on record number). If it's a Btree and
+ * it's a DB_CURRENT operation overwriting a previously
+ * deleted record, increment the record count.
+ */
+ was_deleted = B_DISSET(bk->type);
+ }
+
+ /*
+ * 4. Delete and re-add the data item.
+ *
+ * If we're changing the type of the on-page structure, or we
+ * are referencing offpage items, we have to delete and then
+ * re-add the item. We do not do any cursor adjustments here
+ * because we're going to immediately re-add the item into the
+ * same slot.
+ */
+ if (bigdata || B_TYPE(bk->type) != B_KEYDATA) {
+ if ((ret = __bam_ditem(dbc, h, indx)) != 0)
+ return (ret);
+ break;
+ }
+
+ /* 5. Overwrite the data item. */
+ replace = 1;
+ break;
+ default:
+ return (__db_unknown_flag(dbp->dbenv, "__bam_iitem", op));
+ }
+
+ /* Add the data. */
+ if (bigdata) {
+ if ((ret = __bam_ovput(dbc,
+ B_OVERFLOW, PGNO_INVALID, h, indx, data)) != 0)
+ return (ret);
+ } else {
+ if (LF_ISSET(BI_DELETED)) {
+ B_TSET(bk_tmp.type, B_KEYDATA, 1);
+ bk_tmp.len = data->size;
+ bk_hdr.data = &bk_tmp;
+ bk_hdr.size = SSZA(BKEYDATA, data);
+ ret = __db_pitem(dbc, h, indx,
+ BKEYDATA_SIZE(data->size), &bk_hdr, data);
+ } else if (replace)
+ ret = __bam_ritem(dbc, h, indx, data);
+ else
+ ret = __db_pitem(dbc, h, indx,
+ BKEYDATA_SIZE(data->size), NULL, data);
+ if (ret != 0)
+ return (ret);
+ }
+ if ((ret = memp_fset(dbp->mpf, h, DB_MPOOL_DIRTY)) != 0)
+ return (ret);
+
+ /*
+ * Re-position the cursors if necessary and reset the current cursor
+ * to point to the new item.
+ */
+ if (op != DB_CURRENT) {
+ if ((ret = __bam_ca_di(dbc, PGNO(h), indx, 1)) != 0)
+ return (ret);
+ cp->indx = TYPE(h) == P_LBTREE ? indx - O_INDX : indx;
+ }
+
+ /*
+ * If we've changed the record count, update the tree. There's no
+ * need to adjust the count if the operation not performed on the
+ * current record or when the current record was previously deleted.
+ */
+ if (F_ISSET(cp, C_RECNUM) && (op != DB_CURRENT || was_deleted))
+ if ((ret = __bam_adjust(dbc, 1)) != 0)
+ return (ret);
+
+ /*
+ * If a Btree leaf page is at least 50% full and we may have added or
+ * modified a duplicate data item, see if the set of duplicates takes
+ * up at least 25% of the space on the page. If it does, move it onto
+ * its own page.
+ */
+ if (dupadjust && P_FREESPACE(h) <= dbp->pgsize / 2) {
+ if ((ret = __bam_dup_convert(dbc, h, indx - O_INDX)) != 0)
+ return (ret);
+ }
+
+ /* If we've modified a recno file, set the flag. */
+ if (dbc->dbtype == DB_RECNO)
+ t->re_modified = 1;
+
+ return (ret);
+}
+
+/*
+ * __bam_partsize --
+ * Figure out how much space a partial data item is in total.
+ *
+ * PUBLIC: u_int32_t __bam_partsize __P((u_int32_t, DBT *, PAGE *, u_int32_t));
+ */
+u_int32_t
+__bam_partsize(op, data, h, indx)
+ u_int32_t op, indx;
+ DBT *data;
+ PAGE *h;
+{
+ BKEYDATA *bk;
+ u_int32_t nbytes;
+
+ /*
+ * If the record doesn't already exist, it's simply the data we're
+ * provided.
+ */
+ if (op != DB_CURRENT)
+ return (data->doff + data->size);
+
+ /*
+ * Otherwise, it's the data provided plus any already existing data
+ * that we're not replacing.
+ */
+ bk = GET_BKEYDATA(h, indx + (TYPE(h) == P_LBTREE ? O_INDX : 0));
+ nbytes =
+ B_TYPE(bk->type) == B_OVERFLOW ? ((BOVERFLOW *)bk)->tlen : bk->len;
+
+ /*
+ * There are really two cases here:
+ *
+ * Case 1: We are replacing some bytes that do not exist (i.e., they
+ * are past the end of the record). In this case the number of bytes
+ * we are replacing is irrelevant and all we care about is how many
+ * bytes we are going to add from offset. So, the new record length
+ * is going to be the size of the new bytes (size) plus wherever those
+ * new bytes begin (doff).
+ *
+ * Case 2: All the bytes we are replacing exist. Therefore, the new
+ * size is the oldsize (nbytes) minus the bytes we are replacing (dlen)
+ * plus the bytes we are adding (size).
+ */
+ if (nbytes < data->doff + data->dlen) /* Case 1 */
+ return (data->doff + data->size);
+
+ return (nbytes + data->size - data->dlen); /* Case 2 */
+}
+
+/*
+ * __bam_build --
+ * Build the real record for a partial put, or short fixed-length record.
+ *
+ * PUBLIC: int __bam_build __P((DBC *, u_int32_t,
+ * PUBLIC: DBT *, PAGE *, u_int32_t, u_int32_t));
+ */
+int
+__bam_build(dbc, op, dbt, h, indx, nbytes)
+ DBC *dbc;
+ u_int32_t op, indx, nbytes;
+ DBT *dbt;
+ PAGE *h;
+{
+ BKEYDATA *bk, tbk;
+ BOVERFLOW *bo;
+ BTREE *t;
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DBT copy;
+ u_int32_t len, tlen;
+ u_int8_t *p;
+ int ret;
+
+ COMPQUIET(bo, NULL);
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *) dbc->internal;
+ t = dbp->bt_internal;
+
+ /* We use the record data return memory, it's only a short-term use. */
+ if (dbc->rdata.ulen < nbytes) {
+ if ((ret = __os_realloc(dbp->dbenv,
+ nbytes, NULL, &dbc->rdata.data)) != 0) {
+ dbc->rdata.ulen = 0;
+ dbc->rdata.data = NULL;
+ return (ret);
+ }
+ dbc->rdata.ulen = nbytes;
+ }
+
+ /*
+ * We use nul or pad bytes for any part of the record that isn't
+ * specified; get it over with.
+ */
+ memset(dbc->rdata.data,
+ F_ISSET(dbp, DB_RE_FIXEDLEN) ? t->re_pad : 0, nbytes);
+
+ /*
+ * In the next clauses, we need to do three things: a) set p to point
+ * to the place at which to copy the user's data, b) set tlen to the
+ * total length of the record, not including the bytes contributed by
+ * the user, and c) copy any valid data from an existing record. If
+ * it's not a partial put (this code is called for both partial puts
+ * and fixed-length record padding) or it's a new key, we can cut to
+ * the chase.
+ */
+ if (!F_ISSET(dbt, DB_DBT_PARTIAL) || op != DB_CURRENT) {
+ p = (u_int8_t *)dbc->rdata.data + dbt->doff;
+ tlen = dbt->doff;
+ goto user_copy;
+ }
+
+ /* Find the current record. */
+ if (indx < NUM_ENT(h)) {
+ bk = GET_BKEYDATA(h, indx + (TYPE(h) == P_LBTREE ? O_INDX : 0));
+ bo = (BOVERFLOW *)bk;
+ } else {
+ bk = &tbk;
+ B_TSET(bk->type, B_KEYDATA, 0);
+ bk->len = 0;
+ }
+ if (B_TYPE(bk->type) == B_OVERFLOW) {
+ /*
+ * In the case of an overflow record, we shift things around
+ * in the current record rather than allocate a separate copy.
+ */
+ memset(&copy, 0, sizeof(copy));
+ if ((ret = __db_goff(dbp, &copy, bo->tlen,
+ bo->pgno, &dbc->rdata.data, &dbc->rdata.ulen)) != 0)
+ return (ret);
+
+ /* Skip any leading data from the original record. */
+ tlen = dbt->doff;
+ p = (u_int8_t *)dbc->rdata.data + dbt->doff;
+
+ /*
+ * Copy in any trailing data from the original record.
+ *
+ * If the original record was larger than the original offset
+ * plus the bytes being deleted, there is trailing data in the
+ * original record we need to preserve. If we aren't deleting
+ * the same number of bytes as we're inserting, copy it up or
+ * down, into place.
+ *
+ * Use memmove(), the regions may overlap.
+ */
+ if (bo->tlen > dbt->doff + dbt->dlen) {
+ len = bo->tlen - (dbt->doff + dbt->dlen);
+ if (dbt->dlen != dbt->size)
+ memmove(p + dbt->size, p + dbt->dlen, len);
+ tlen += len;
+ }
+ } else {
+ /* Copy in any leading data from the original record. */
+ memcpy(dbc->rdata.data,
+ bk->data, dbt->doff > bk->len ? bk->len : dbt->doff);
+ tlen = dbt->doff;
+ p = (u_int8_t *)dbc->rdata.data + dbt->doff;
+
+ /* Copy in any trailing data from the original record. */
+ len = dbt->doff + dbt->dlen;
+ if (bk->len > len) {
+ memcpy(p + dbt->size, bk->data + len, bk->len - len);
+ tlen += bk->len - len;
+ }
+ }
+
+user_copy:
+ /*
+ * Copy in the application provided data -- p and tlen must have been
+ * initialized above.
+ */
+ memcpy(p, dbt->data, dbt->size);
+ tlen += dbt->size;
+
+ /* Set the DBT to reference our new record. */
+ dbc->rdata.size = F_ISSET(dbp, DB_RE_FIXEDLEN) ? t->re_len : tlen;
+ dbc->rdata.dlen = 0;
+ dbc->rdata.doff = 0;
+ dbc->rdata.flags = 0;
+ *dbt = dbc->rdata;
+ return (0);
+}
+
+/*
+ * __bam_ritem --
+ * Replace an item on a page.
+ *
+ * PUBLIC: int __bam_ritem __P((DBC *, PAGE *, u_int32_t, DBT *));
+ */
+int
+__bam_ritem(dbc, h, indx, data)
+ DBC *dbc;
+ PAGE *h;
+ u_int32_t indx;
+ DBT *data;
+{
+ BKEYDATA *bk;
+ DB *dbp;
+ DBT orig, repl;
+ db_indx_t cnt, lo, ln, min, off, prefix, suffix;
+ int32_t nbytes;
+ int ret;
+ u_int8_t *p, *t;
+
+ dbp = dbc->dbp;
+
+ /*
+ * Replace a single item onto a page. The logic figuring out where
+ * to insert and whether it fits is handled in the caller. All we do
+ * here is manage the page shuffling.
+ */
+ bk = GET_BKEYDATA(h, indx);
+
+ /* Log the change. */
+ if (DB_LOGGING(dbc)) {
+ /*
+ * We might as well check to see if the two data items share
+ * a common prefix and suffix -- it can save us a lot of log
+ * message if they're large.
+ */
+ min = data->size < bk->len ? data->size : bk->len;
+ for (prefix = 0,
+ p = bk->data, t = data->data;
+ prefix < min && *p == *t; ++prefix, ++p, ++t)
+ ;
+
+ min -= prefix;
+ for (suffix = 0,
+ p = (u_int8_t *)bk->data + bk->len - 1,
+ t = (u_int8_t *)data->data + data->size - 1;
+ suffix < min && *p == *t; ++suffix, --p, --t)
+ ;
+
+ /* We only log the parts of the keys that have changed. */
+ orig.data = (u_int8_t *)bk->data + prefix;
+ orig.size = bk->len - (prefix + suffix);
+ repl.data = (u_int8_t *)data->data + prefix;
+ repl.size = data->size - (prefix + suffix);
+ if ((ret = __bam_repl_log(dbp->dbenv, dbc->txn,
+ &LSN(h), 0, dbp->log_fileid, PGNO(h), &LSN(h),
+ (u_int32_t)indx, (u_int32_t)B_DISSET(bk->type),
+ &orig, &repl, (u_int32_t)prefix, (u_int32_t)suffix)) != 0)
+ return (ret);
+ }
+
+ /*
+ * Set references to the first in-use byte on the page and the
+ * first byte of the item being replaced.
+ */
+ p = (u_int8_t *)h + HOFFSET(h);
+ t = (u_int8_t *)bk;
+
+ /*
+ * If the entry is growing in size, shift the beginning of the data
+ * part of the page down. If the entry is shrinking in size, shift
+ * the beginning of the data part of the page up. Use memmove(3),
+ * the regions overlap.
+ */
+ lo = BKEYDATA_SIZE(bk->len);
+ ln = BKEYDATA_SIZE(data->size);
+ if (lo != ln) {
+ nbytes = lo - ln; /* Signed difference. */
+ if (p == t) /* First index is fast. */
+ h->inp[indx] += nbytes;
+ else { /* Else, shift the page. */
+ memmove(p + nbytes, p, t - p);
+
+ /* Adjust the indices' offsets. */
+ off = h->inp[indx];
+ for (cnt = 0; cnt < NUM_ENT(h); ++cnt)
+ if (h->inp[cnt] <= off)
+ h->inp[cnt] += nbytes;
+ }
+
+ /* Clean up the page and adjust the item's reference. */
+ HOFFSET(h) += nbytes;
+ t += nbytes;
+ }
+
+ /* Copy the new item onto the page. */
+ bk = (BKEYDATA *)t;
+ B_TSET(bk->type, B_KEYDATA, 0);
+ bk->len = data->size;
+ memcpy(bk->data, data->data, data->size);
+
+ return (0);
+}
+
+/*
+ * __bam_dup_convert --
+ * Check to see if the duplicate set at indx should have its own page.
+ * If it should, create it.
+ */
+static int
+__bam_dup_convert(dbc, h, indx)
+ DBC *dbc;
+ PAGE *h;
+ u_int32_t indx;
+{
+ BTREE_CURSOR *cp;
+ BKEYDATA *bk;
+ DB *dbp;
+ DBT hdr;
+ PAGE *dp;
+ db_indx_t cnt, cpindx, dindx, first, sz;
+ int ret;
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ /*
+ * Count the duplicate records and calculate how much room they're
+ * using on the page.
+ */
+ while (indx > 0 && h->inp[indx] == h->inp[indx - P_INDX])
+ indx -= P_INDX;
+ for (cnt = 0, sz = 0, first = indx;; ++cnt, indx += P_INDX) {
+ if (indx >= NUM_ENT(h) || h->inp[first] != h->inp[indx])
+ break;
+ bk = GET_BKEYDATA(h, indx);
+ sz += B_TYPE(bk->type) == B_KEYDATA ?
+ BKEYDATA_PSIZE(bk->len) : BOVERFLOW_PSIZE;
+ bk = GET_BKEYDATA(h, indx + O_INDX);
+ sz += B_TYPE(bk->type) == B_KEYDATA ?
+ BKEYDATA_PSIZE(bk->len) : BOVERFLOW_PSIZE;
+ }
+
+ /*
+ * We have to do these checks when the user is replacing the cursor's
+ * data item -- if the application replaces a duplicate item with a
+ * larger data item, it can increase the amount of space used by the
+ * duplicates, requiring this check. But that means we may have done
+ * this check when it wasn't a duplicate item after all.
+ */
+ if (cnt == 1)
+ return (0);
+
+ /*
+ * If this set of duplicates is using more than 25% of the page, move
+ * them off. The choice of 25% is a WAG, but the value must be small
+ * enough that we can always split a page without putting duplicates
+ * on two different pages.
+ */
+ if (sz < dbp->pgsize / 4)
+ return (0);
+
+ /* Get a new page. */
+ if ((ret = __db_new(dbc,
+ dbp->dup_compare == NULL ? P_LRECNO : P_LDUP, &dp)) != 0)
+ return (ret);
+ P_INIT(dp, dbp->pgsize, dp->pgno,
+ PGNO_INVALID, PGNO_INVALID, LEAFLEVEL, TYPE(dp));
+
+ /*
+ * Move this set of duplicates off the page. First points to the first
+ * key of the first duplicate key/data pair, cnt is the number of pairs
+ * we're dealing with.
+ */
+ memset(&hdr, 0, sizeof(hdr));
+ dindx = first;
+ indx = first;
+ cpindx = 0;
+ do {
+ /* Move cursors referencing the old entry to the new entry. */
+ if ((ret = __bam_ca_dup(dbc, first,
+ PGNO(h), indx, PGNO(dp), cpindx)) != 0)
+ goto err;
+
+ /*
+ * Copy the entry to the new page. If the off-duplicate page
+ * If the off-duplicate page is a Btree page (i.e. dup_compare
+ * will be non-NULL, we use Btree pages for sorted dups,
+ * and Recno pages for unsorted dups), move all entries
+ * normally, even deleted ones. If it's a Recno page,
+ * deleted entries are discarded (if the deleted entry is
+ * overflow, then free up those pages).
+ */
+ bk = GET_BKEYDATA(h, dindx + 1);
+ hdr.data = bk;
+ hdr.size = B_TYPE(bk->type) == B_KEYDATA ?
+ BKEYDATA_SIZE(bk->len) : BOVERFLOW_SIZE;
+ if (dbp->dup_compare == NULL && B_DISSET(bk->type)) {
+ /*
+ * Unsorted dups, i.e. recno page, and we have
+ * a deleted entry, don't move it, but if it was
+ * an overflow entry, we need to free those pages.
+ */
+ if (B_TYPE(bk->type) == B_OVERFLOW &&
+ (ret = __db_doff(dbc,
+ (GET_BOVERFLOW(h, dindx + 1))->pgno)) != 0)
+ goto err;
+ } else {
+ if ((ret = __db_pitem(
+ dbc, dp, cpindx, hdr.size, &hdr, NULL)) != 0)
+ goto err;
+ ++cpindx;
+ }
+ /* Delete all but the last reference to the key. */
+ if (cnt != 1) {
+ if ((ret = __bam_adjindx(dbc,
+ h, dindx, first + 1, 0)) != 0)
+ goto err;
+ } else
+ dindx++;
+
+ /* Delete the data item. */
+ if ((ret = __db_ditem(dbc, h, dindx, hdr.size)) != 0)
+ goto err;
+ indx += P_INDX;
+ } while (--cnt);
+
+ /* Put in a new data item that points to the duplicates page. */
+ if ((ret = __bam_ovput(dbc,
+ B_DUPLICATE, dp->pgno, h, first + 1, NULL)) != 0)
+ goto err;
+
+ /* Adjust cursors for all the above movments. */
+ if ((ret = __bam_ca_di(dbc,
+ PGNO(h), first + P_INDX, first + P_INDX - indx)) != 0)
+ goto err;
+
+ return (memp_fput(dbp->mpf, dp, DB_MPOOL_DIRTY));
+
+err: (void)__db_free(dbc, dp);
+ return (ret);
+}
+
+/*
+ * __bam_ovput --
+ * Build an item for an off-page duplicates page or overflow page and
+ * insert it on the page.
+ */
+static int
+__bam_ovput(dbc, type, pgno, h, indx, item)
+ DBC *dbc;
+ u_int32_t type, indx;
+ db_pgno_t pgno;
+ PAGE *h;
+ DBT *item;
+{
+ BOVERFLOW bo;
+ DBT hdr;
+ int ret;
+
+ UMRW_SET(bo.unused1);
+ B_TSET(bo.type, type, 0);
+ UMRW_SET(bo.unused2);
+
+ /*
+ * If we're creating an overflow item, do so and acquire the page
+ * number for it. If we're creating an off-page duplicates tree,
+ * we are giving the page number as an argument.
+ */
+ if (type == B_OVERFLOW) {
+ if ((ret = __db_poff(dbc, item, &bo.pgno)) != 0)
+ return (ret);
+ bo.tlen = item->size;
+ } else {
+ bo.pgno = pgno;
+ bo.tlen = 0;
+ }
+
+ /* Store the new record on the page. */
+ memset(&hdr, 0, sizeof(hdr));
+ hdr.data = &bo;
+ hdr.size = BOVERFLOW_SIZE;
+ return (__db_pitem(dbc, h, indx, BOVERFLOW_SIZE, &hdr, NULL));
+}
diff --git a/bdb/btree/bt_rec.c b/bdb/btree/bt_rec.c
new file mode 100644
index 00000000000..24dc9bc6a6e
--- /dev/null
+++ b/bdb/btree/bt_rec.c
@@ -0,0 +1,1219 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: bt_rec.c,v 11.35 2001/01/10 16:24:47 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "hash.h"
+#include "btree.h"
+#include "log.h"
+
+#define IS_BTREE_PAGE(pagep) \
+ (TYPE(pagep) == P_IBTREE || \
+ TYPE(pagep) == P_LBTREE || TYPE(pagep) == P_LDUP)
+
+/*
+ * __bam_pg_alloc_recover --
+ * Recovery function for pg_alloc.
+ *
+ * PUBLIC: int __bam_pg_alloc_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_pg_alloc_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_pg_alloc_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DBMETA *meta;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ db_pgno_t pgno;
+ int cmp_n, cmp_p, level, modified, ret;
+
+ REC_PRINT(__bam_pg_alloc_print);
+ REC_INTRO(__bam_pg_alloc_read, 0);
+
+ /*
+ * Fix up the allocated page. If we're redoing the operation, we have
+ * to get the page (creating it if it doesn't exist), and update its
+ * LSN. If we're undoing the operation, we have to reset the page's
+ * LSN and put it on the free list.
+ *
+ * Fix up the metadata page. If we're redoing the operation, we have
+ * to get the metadata page and update its LSN and its free pointer.
+ * If we're undoing the operation and the page was ever created, we put
+ * it on the freelist.
+ */
+ pgno = PGNO_BASE_MD;
+ meta = NULL;
+ if ((ret = memp_fget(mpf, &pgno, 0, &meta)) != 0) {
+ /* The metadata page must always exist on redo. */
+ if (DB_REDO(op)) {
+ (void)__db_pgerr(file_dbp, pgno);
+ goto out;
+ } else
+ goto done;
+ }
+ if ((ret = memp_fget(mpf, &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) {
+ /*
+ * We specify creation and check for it later, because this
+ * operation was supposed to create the page, and even in
+ * the undo case it's going to get linked onto the freelist
+ * which we're also fixing up.
+ */
+ (void)__db_pgerr(file_dbp, argp->pgno);
+ goto err;
+ }
+
+ /* Fix up the allocated page. */
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->page_lsn);
+
+ /*
+ * If an inital allocation is aborted and then reallocated
+ * during an archival restore the log record will have
+ * an LSN for the page but the page will be empty.
+ */
+ if (IS_ZERO_LSN(LSN(pagep)))
+ cmp_p = 0;
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->page_lsn);
+ /*
+ * If we we rolled back this allocation previously during an
+ * archive restore, the page may have the LSN of the meta page
+ * at the point of the roll back. This will be no more
+ * than the LSN of the metadata page at the time of this allocation.
+ */
+ if (DB_REDO(op) &&
+ (cmp_p == 0 ||
+ (IS_ZERO_LSN(argp->page_lsn) &&
+ log_compare(&LSN(pagep), &argp->meta_lsn) <= 0))) {
+ /* Need to redo update described. */
+ switch (argp->ptype) {
+ case P_LBTREE:
+ case P_LRECNO:
+ case P_LDUP:
+ level = LEAFLEVEL;
+ break;
+ default:
+ level = 0;
+ break;
+ }
+ P_INIT(pagep, file_dbp->pgsize,
+ argp->pgno, PGNO_INVALID, PGNO_INVALID, level, argp->ptype);
+
+ pagep->lsn = *lsnp;
+ modified = 1;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /*
+ * Undo the allocation, reinitialize the page and
+ * link its next pointer to the free list.
+ */
+ P_INIT(pagep, file_dbp->pgsize,
+ argp->pgno, PGNO_INVALID, argp->next, 0, P_INVALID);
+
+ pagep->lsn = argp->page_lsn;
+ modified = 1;
+ }
+
+ if ((ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0) {
+ goto err;
+ }
+
+ /*
+ * If the page was newly created, put it on the limbo list.
+ */
+ if (IS_ZERO_LSN(LSN(pagep)) &&
+ IS_ZERO_LSN(argp->page_lsn) && DB_UNDO(op)) {
+ /* Put the page in limbo.*/
+ if ((ret = __db_add_limbo(dbenv,
+ info, argp->fileid, argp->pgno, 1)) != 0)
+ goto err;
+ }
+
+ /* Fix up the metadata page. */
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(meta));
+ cmp_p = log_compare(&LSN(meta), &argp->meta_lsn);
+ CHECK_LSN(op, cmp_p, &LSN(meta), &argp->meta_lsn);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ LSN(meta) = *lsnp;
+ meta->free = argp->next;
+ modified = 1;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Need to undo update described. */
+ LSN(meta) = argp->meta_lsn;
+
+ /*
+ * If the page has a zero LSN then its newly created
+ * and will go into limbo rather than directly on the
+ * free list.
+ */
+ if (!IS_ZERO_LSN(argp->page_lsn))
+ meta->free = argp->pgno;
+ modified = 1;
+ }
+ if ((ret = memp_fput(mpf, meta, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+ /*
+ * This could be the metapage from a subdb which is read from disk
+ * to recover its creation.
+ */
+ if (F_ISSET(file_dbp, DB_AM_SUBDB))
+ switch (argp->type) {
+ case P_BTREEMETA:
+ case P_HASHMETA:
+ case P_QAMMETA:
+ file_dbp->sync(file_dbp, 0);
+ break;
+ }
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+ if (0) {
+err:
+ if (meta != NULL)
+ (void)memp_fput(mpf, meta, 0);
+ }
+out: REC_CLOSE;
+}
+
+/*
+ * __bam_pg_free_recover --
+ * Recovery function for pg_free.
+ *
+ * PUBLIC: int __bam_pg_free_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_pg_free_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_pg_free_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DBMETA *meta;
+ DB_LSN copy_lsn;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ db_pgno_t pgno;
+ int cmp_n, cmp_p, modified, ret;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__bam_pg_free_print);
+ REC_INTRO(__bam_pg_free_read, 1);
+
+ /*
+ * Fix up the freed page. If we're redoing the operation we get the
+ * page and explicitly discard its contents, then update its LSN. If
+ * we're undoing the operation, we get the page and restore its header.
+ * Create the page if necessary, we may be freeing an aborted
+ * create.
+ */
+ if ((ret = memp_fget(mpf, &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ modified = 0;
+ __ua_memcpy(&copy_lsn, &LSN(argp->header.data), sizeof(DB_LSN));
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &copy_lsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &copy_lsn);
+ if (DB_REDO(op) &&
+ (cmp_p == 0 ||
+ (IS_ZERO_LSN(copy_lsn) &&
+ log_compare(&LSN(pagep), &argp->meta_lsn) <= 0))) {
+ /* Need to redo update described. */
+ P_INIT(pagep, file_dbp->pgsize,
+ argp->pgno, PGNO_INVALID, argp->next, 0, P_INVALID);
+ pagep->lsn = *lsnp;
+
+ modified = 1;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Need to undo update described. */
+ memcpy(pagep, argp->header.data, argp->header.size);
+
+ modified = 1;
+ }
+ if ((ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+
+ /*
+ * Fix up the metadata page. If we're redoing or undoing the operation
+ * we get the page and update its LSN and free pointer.
+ */
+ pgno = PGNO_BASE_MD;
+ if ((ret = memp_fget(mpf, &pgno, 0, &meta)) != 0) {
+ /* The metadata page must always exist. */
+ (void)__db_pgerr(file_dbp, pgno);
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(meta));
+ cmp_p = log_compare(&LSN(meta), &argp->meta_lsn);
+ CHECK_LSN(op, cmp_p, &LSN(meta), &argp->meta_lsn);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo the deallocation. */
+ meta->free = argp->pgno;
+ LSN(meta) = *lsnp;
+ modified = 1;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Need to undo the deallocation. */
+ meta->free = argp->next;
+ LSN(meta) = argp->meta_lsn;
+ modified = 1;
+ }
+ if ((ret = memp_fput(mpf, meta, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __bam_split_recover --
+ * Recovery function for split.
+ *
+ * PUBLIC: int __bam_split_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_split_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_split_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *_lp, *lp, *np, *pp, *_rp, *rp, *sp;
+ db_pgno_t pgno, root_pgno;
+ u_int32_t ptype;
+ int cmp, l_update, p_update, r_update, rc, ret, rootsplit, t_ret;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__bam_split_print);
+
+ mpf = NULL;
+ _lp = lp = np = pp = _rp = rp = NULL;
+ sp = NULL;
+
+ REC_INTRO(__bam_split_read, 1);
+
+ /*
+ * There are two kinds of splits that we have to recover from. The
+ * first is a root-page split, where the root page is split from a
+ * leaf page into an internal page and two new leaf pages are created.
+ * The second is where a page is split into two pages, and a new key
+ * is inserted into the parent page.
+ *
+ * DBTs are not aligned in log records, so we need to copy the page
+ * so that we can access fields within it throughout this routine.
+ * Although we could hardcode the unaligned copies in this routine,
+ * we will be calling into regular btree functions with this page,
+ * so it's got to be aligned. Copying it into allocated memory is
+ * the only way to guarantee this.
+ */
+ if ((ret = __os_malloc(dbenv, argp->pg.size, NULL, &sp)) != 0)
+ goto out;
+ memcpy(sp, argp->pg.data, argp->pg.size);
+
+ pgno = PGNO(sp);
+ root_pgno = argp->root_pgno;
+ rootsplit = pgno == root_pgno;
+ if (memp_fget(mpf, &argp->left, 0, &lp) != 0)
+ lp = NULL;
+ if (memp_fget(mpf, &argp->right, 0, &rp) != 0)
+ rp = NULL;
+
+ if (DB_REDO(op)) {
+ l_update = r_update = p_update = 0;
+ /*
+ * Decide if we need to resplit the page.
+ *
+ * If this is a root split, then the root has to exist, it's
+ * the page we're splitting and it gets modified. If this is
+ * not a root split, then the left page has to exist, for the
+ * same reason.
+ */
+ if (rootsplit) {
+ if ((ret = memp_fget(mpf, &pgno, 0, &pp)) != 0) {
+ (void)__db_pgerr(file_dbp, pgno);
+ pp = NULL;
+ goto out;
+ }
+ cmp = log_compare(&LSN(pp), &LSN(argp->pg.data));
+ CHECK_LSN(op, cmp, &LSN(pp), &LSN(argp->pg.data));
+ p_update = cmp == 0;
+ } else if (lp == NULL) {
+ (void)__db_pgerr(file_dbp, argp->left);
+ goto out;
+ }
+
+ if (lp != NULL) {
+ cmp = log_compare(&LSN(lp), &argp->llsn);
+ CHECK_LSN(op, cmp, &LSN(lp), &argp->llsn);
+ if (cmp == 0)
+ l_update = 1;
+ } else
+ l_update = 1;
+
+ if (rp != NULL) {
+ cmp = log_compare(&LSN(rp), &argp->rlsn);
+ CHECK_LSN(op, cmp, &LSN(rp), &argp->rlsn);
+ if (cmp == 0)
+ r_update = 1;
+ } else
+ r_update = 1;
+ if (!p_update && !l_update && !r_update)
+ goto check_next;
+
+ /* Allocate and initialize new left/right child pages. */
+ if ((ret =
+ __os_malloc(dbenv, file_dbp->pgsize, NULL, &_lp)) != 0
+ || (ret =
+ __os_malloc(dbenv, file_dbp->pgsize, NULL, &_rp)) != 0)
+ goto out;
+ if (rootsplit) {
+ P_INIT(_lp, file_dbp->pgsize, argp->left,
+ PGNO_INVALID,
+ ISINTERNAL(sp) ? PGNO_INVALID : argp->right,
+ LEVEL(sp), TYPE(sp));
+ P_INIT(_rp, file_dbp->pgsize, argp->right,
+ ISINTERNAL(sp) ? PGNO_INVALID : argp->left,
+ PGNO_INVALID, LEVEL(sp), TYPE(sp));
+ } else {
+ P_INIT(_lp, file_dbp->pgsize, PGNO(sp),
+ ISINTERNAL(sp) ? PGNO_INVALID : PREV_PGNO(sp),
+ ISINTERNAL(sp) ? PGNO_INVALID : argp->right,
+ LEVEL(sp), TYPE(sp));
+ P_INIT(_rp, file_dbp->pgsize, argp->right,
+ ISINTERNAL(sp) ? PGNO_INVALID : sp->pgno,
+ ISINTERNAL(sp) ? PGNO_INVALID : NEXT_PGNO(sp),
+ LEVEL(sp), TYPE(sp));
+ }
+
+ /* Split the page. */
+ if ((ret = __bam_copy(file_dbp, sp, _lp, 0, argp->indx)) != 0 ||
+ (ret = __bam_copy(file_dbp, sp, _rp, argp->indx,
+ NUM_ENT(sp))) != 0)
+ goto out;
+
+ /* If the left child is wrong, update it. */
+ if (lp == NULL && (ret =
+ memp_fget(mpf, &argp->left, DB_MPOOL_CREATE, &lp)) != 0) {
+ (void)__db_pgerr(file_dbp, argp->left);
+ lp = NULL;
+ goto out;
+ }
+ if (l_update) {
+ memcpy(lp, _lp, file_dbp->pgsize);
+ lp->lsn = *lsnp;
+ if ((ret = memp_fput(mpf, lp, DB_MPOOL_DIRTY)) != 0)
+ goto out;
+ lp = NULL;
+ }
+
+ /* If the right child is wrong, update it. */
+ if (rp == NULL && (ret = memp_fget(mpf,
+ &argp->right, DB_MPOOL_CREATE, &rp)) != 0) {
+ (void)__db_pgerr(file_dbp, argp->right);
+ rp = NULL;
+ goto out;
+ }
+ if (r_update) {
+ memcpy(rp, _rp, file_dbp->pgsize);
+ rp->lsn = *lsnp;
+ if ((ret = memp_fput(mpf, rp, DB_MPOOL_DIRTY)) != 0)
+ goto out;
+ rp = NULL;
+ }
+
+ /*
+ * If the parent page is wrong, update it. This is of interest
+ * only if it was a root split, since root splits create parent
+ * pages. All other splits modify a parent page, but those are
+ * separately logged and recovered.
+ */
+ if (rootsplit && p_update) {
+ if (IS_BTREE_PAGE(sp)) {
+ ptype = P_IBTREE;
+ rc = argp->opflags & SPL_NRECS ? 1 : 0;
+ } else {
+ ptype = P_IRECNO;
+ rc = 1;
+ }
+
+ P_INIT(pp, file_dbp->pgsize, root_pgno,
+ PGNO_INVALID, PGNO_INVALID, _lp->level + 1, ptype);
+ RE_NREC_SET(pp,
+ rc ? __bam_total(_lp) + __bam_total(_rp) : 0);
+
+ pp->lsn = *lsnp;
+ if ((ret = memp_fput(mpf, pp, DB_MPOOL_DIRTY)) != 0)
+ goto out;
+ pp = NULL;
+ }
+
+check_next: /*
+ * Finally, redo the next-page link if necessary. This is of
+ * interest only if it wasn't a root split -- inserting a new
+ * page in the tree requires that any following page have its
+ * previous-page pointer updated to our new page. The next
+ * page must exist because we're redoing the operation.
+ */
+ if (!rootsplit && !IS_ZERO_LSN(argp->nlsn)) {
+ if ((ret = memp_fget(mpf, &argp->npgno, 0, &np)) != 0) {
+ (void)__db_pgerr(file_dbp, argp->npgno);
+ np = NULL;
+ goto out;
+ }
+ cmp = log_compare(&LSN(np), &argp->nlsn);
+ CHECK_LSN(op, cmp, &LSN(np), &argp->nlsn);
+ if (cmp == 0) {
+ PREV_PGNO(np) = argp->right;
+ np->lsn = *lsnp;
+ if ((ret =
+ memp_fput(mpf, np, DB_MPOOL_DIRTY)) != 0)
+ goto out;
+ np = NULL;
+ }
+ }
+ } else {
+ /*
+ * If the split page is wrong, replace its contents with the
+ * logged page contents. If the page doesn't exist, it means
+ * that the create of the page never happened, nor did any of
+ * the adds onto the page that caused the split, and there's
+ * really no undo-ing to be done.
+ */
+ if ((ret = memp_fget(mpf, &pgno, 0, &pp)) != 0) {
+ pp = NULL;
+ goto lrundo;
+ }
+ if (log_compare(lsnp, &LSN(pp)) == 0) {
+ memcpy(pp, argp->pg.data, argp->pg.size);
+ if ((ret = memp_fput(mpf, pp, DB_MPOOL_DIRTY)) != 0)
+ goto out;
+ pp = NULL;
+ }
+
+ /*
+ * If it's a root split and the left child ever existed, update
+ * its LSN. (If it's not a root split, we've updated the left
+ * page already -- it's the same as the split page.) If the
+ * right child ever existed, root split or not, update its LSN.
+ * The undo of the page allocation(s) will restore them to the
+ * free list.
+ */
+lrundo: if ((rootsplit && lp != NULL) || rp != NULL) {
+ if (rootsplit && lp != NULL &&
+ log_compare(lsnp, &LSN(lp)) == 0) {
+ lp->lsn = argp->llsn;
+ if ((ret =
+ memp_fput(mpf, lp, DB_MPOOL_DIRTY)) != 0)
+ goto out;
+ lp = NULL;
+ }
+ if (rp != NULL &&
+ log_compare(lsnp, &LSN(rp)) == 0) {
+ rp->lsn = argp->rlsn;
+ if ((ret =
+ memp_fput(mpf, rp, DB_MPOOL_DIRTY)) != 0)
+ goto out;
+ rp = NULL;
+ }
+ }
+
+ /*
+ * Finally, undo the next-page link if necessary. This is of
+ * interest only if it wasn't a root split -- inserting a new
+ * page in the tree requires that any following page have its
+ * previous-page pointer updated to our new page. Since it's
+ * possible that the next-page never existed, we ignore it as
+ * if there's nothing to undo.
+ */
+ if (!rootsplit && !IS_ZERO_LSN(argp->nlsn)) {
+ if ((ret = memp_fget(mpf, &argp->npgno, 0, &np)) != 0) {
+ np = NULL;
+ goto done;
+ }
+ if (log_compare(lsnp, &LSN(np)) == 0) {
+ PREV_PGNO(np) = argp->left;
+ np->lsn = argp->nlsn;
+ if (memp_fput(mpf, np, DB_MPOOL_DIRTY))
+ goto out;
+ np = NULL;
+ }
+ }
+ }
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: /* Free any pages that weren't dirtied. */
+ if (pp != NULL && (t_ret = memp_fput(mpf, pp, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (lp != NULL && (t_ret = memp_fput(mpf, lp, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (np != NULL && (t_ret = memp_fput(mpf, np, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (rp != NULL && (t_ret = memp_fput(mpf, rp, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Free any allocated space. */
+ if (_lp != NULL)
+ __os_free(_lp, file_dbp->pgsize);
+ if (_rp != NULL)
+ __os_free(_rp, file_dbp->pgsize);
+ if (sp != NULL)
+ __os_free(sp, argp->pg.size);
+
+ REC_CLOSE;
+}
+
+/*
+ * __bam_rsplit_recover --
+ * Recovery function for a reverse split.
+ *
+ * PUBLIC: int __bam_rsplit_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_rsplit_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_rsplit_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_LSN copy_lsn;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ db_pgno_t pgno, root_pgno;
+ int cmp_n, cmp_p, modified, ret;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__bam_rsplit_print);
+ REC_INTRO(__bam_rsplit_read, 1);
+
+ /* Fix the root page. */
+ pgno = root_pgno = argp->root_pgno;
+ if ((ret = memp_fget(mpf, &pgno, 0, &pagep)) != 0) {
+ /* The root page must always exist if we are going forward. */
+ if (DB_REDO(op)) {
+ __db_pgerr(file_dbp, pgno);
+ goto out;
+ }
+ /* This must be the root of an OPD tree. */
+ DB_ASSERT(root_pgno !=
+ ((BTREE *)file_dbp->bt_internal)->bt_root);
+ ret = 0;
+ goto done;
+ }
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->rootlsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->rootlsn);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ memcpy(pagep, argp->pgdbt.data, argp->pgdbt.size);
+ pagep->pgno = root_pgno;
+ pagep->lsn = *lsnp;
+ modified = 1;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Need to undo update described. */
+ P_INIT(pagep, file_dbp->pgsize, root_pgno,
+ argp->nrec, PGNO_INVALID, pagep->level + 1,
+ IS_BTREE_PAGE(pagep) ? P_IBTREE : P_IRECNO);
+ if ((ret = __db_pitem(dbc, pagep, 0,
+ argp->rootent.size, &argp->rootent, NULL)) != 0)
+ goto out;
+ pagep->lsn = argp->rootlsn;
+ modified = 1;
+ }
+ if ((ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+
+ /*
+ * Fix the page copied over the root page. It's possible that the
+ * page never made it to disk, so if we're undo-ing and the page
+ * doesn't exist, it's okay and there's nothing further to do.
+ */
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op))
+ goto done;
+ (void)__db_pgerr(file_dbp, argp->pgno);
+ goto out;
+ }
+ modified = 0;
+ __ua_memcpy(&copy_lsn, &LSN(argp->pgdbt.data), sizeof(DB_LSN));
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &copy_lsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &copy_lsn);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ pagep->lsn = *lsnp;
+ modified = 1;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Need to undo update described. */
+ memcpy(pagep, argp->pgdbt.data, argp->pgdbt.size);
+ modified = 1;
+ }
+ if ((ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __bam_adj_recover --
+ * Recovery function for adj.
+ *
+ * PUBLIC: int __bam_adj_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_adj_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_adj_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__bam_adj_print);
+ REC_INTRO(__bam_adj_read, 1);
+
+ /* Get the page; if it never existed and we're undoing, we're done. */
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op))
+ goto done;
+ (void)__db_pgerr(file_dbp, argp->pgno);
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->lsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->lsn);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ if ((ret = __bam_adjindx(dbc,
+ pagep, argp->indx, argp->indx_copy, argp->is_insert)) != 0)
+ goto err;
+
+ LSN(pagep) = *lsnp;
+ modified = 1;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Need to undo update described. */
+ if ((ret = __bam_adjindx(dbc,
+ pagep, argp->indx, argp->indx_copy, !argp->is_insert)) != 0)
+ goto err;
+
+ LSN(pagep) = argp->lsn;
+ modified = 1;
+ }
+ if ((ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+ if (0) {
+err: (void)memp_fput(mpf, pagep, 0);
+ }
+out: REC_CLOSE;
+}
+
+/*
+ * __bam_cadjust_recover --
+ * Recovery function for the adjust of a count change in an internal
+ * page.
+ *
+ * PUBLIC: int __bam_cadjust_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_cadjust_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_cadjust_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__bam_cadjust_print);
+ REC_INTRO(__bam_cadjust_read, 1);
+
+ /* Get the page; if it never existed and we're undoing, we're done. */
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op))
+ goto done;
+ (void)__db_pgerr(file_dbp, argp->pgno);
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->lsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->lsn);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ if (IS_BTREE_PAGE(pagep)) {
+ GET_BINTERNAL(pagep, argp->indx)->nrecs += argp->adjust;
+ if (argp->opflags & CAD_UPDATEROOT)
+ RE_NREC_ADJ(pagep, argp->adjust);
+ } else {
+ GET_RINTERNAL(pagep, argp->indx)->nrecs += argp->adjust;
+ if (argp->opflags & CAD_UPDATEROOT)
+ RE_NREC_ADJ(pagep, argp->adjust);
+ }
+
+ LSN(pagep) = *lsnp;
+ modified = 1;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Need to undo update described. */
+ if (IS_BTREE_PAGE(pagep)) {
+ GET_BINTERNAL(pagep, argp->indx)->nrecs -= argp->adjust;
+ if (argp->opflags & CAD_UPDATEROOT)
+ RE_NREC_ADJ(pagep, -(argp->adjust));
+ } else {
+ GET_RINTERNAL(pagep, argp->indx)->nrecs -= argp->adjust;
+ if (argp->opflags & CAD_UPDATEROOT)
+ RE_NREC_ADJ(pagep, -(argp->adjust));
+ }
+ LSN(pagep) = argp->lsn;
+ modified = 1;
+ }
+ if ((ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __bam_cdel_recover --
+ * Recovery function for the intent-to-delete of a cursor record.
+ *
+ * PUBLIC: int __bam_cdel_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_cdel_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_cdel_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ u_int32_t indx;
+ int cmp_n, cmp_p, modified, ret;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__bam_cdel_print);
+ REC_INTRO(__bam_cdel_read, 1);
+
+ /* Get the page; if it never existed and we're undoing, we're done. */
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op))
+ goto done;
+ (void)__db_pgerr(file_dbp, argp->pgno);
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->lsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->lsn);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ indx = argp->indx + (TYPE(pagep) == P_LBTREE ? O_INDX : 0);
+ B_DSET(GET_BKEYDATA(pagep, indx)->type);
+
+ LSN(pagep) = *lsnp;
+ modified = 1;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Need to undo update described. */
+ indx = argp->indx + (TYPE(pagep) == P_LBTREE ? O_INDX : 0);
+ B_DCLR(GET_BKEYDATA(pagep, indx)->type);
+
+ (void)__bam_ca_delete(file_dbp, argp->pgno, argp->indx, 0);
+
+ LSN(pagep) = argp->lsn;
+ modified = 1;
+ }
+ if ((ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __bam_repl_recover --
+ * Recovery function for page item replacement.
+ *
+ * PUBLIC: int __bam_repl_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_repl_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_repl_args *argp;
+ BKEYDATA *bk;
+ DB *file_dbp;
+ DBC *dbc;
+ DBT dbt;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+ u_int8_t *p;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__bam_repl_print);
+ REC_INTRO(__bam_repl_read, 1);
+
+ /* Get the page; if it never existed and we're undoing, we're done. */
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op))
+ goto done;
+ (void)__db_pgerr(file_dbp, argp->pgno);
+ goto out;
+ }
+ bk = GET_BKEYDATA(pagep, argp->indx);
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->lsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->lsn);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /*
+ * Need to redo update described.
+ *
+ * Re-build the replacement item.
+ */
+ memset(&dbt, 0, sizeof(dbt));
+ dbt.size = argp->prefix + argp->suffix + argp->repl.size;
+ if ((ret = __os_malloc(dbenv, dbt.size, NULL, &dbt.data)) != 0)
+ goto err;
+ p = dbt.data;
+ memcpy(p, bk->data, argp->prefix);
+ p += argp->prefix;
+ memcpy(p, argp->repl.data, argp->repl.size);
+ p += argp->repl.size;
+ memcpy(p, bk->data + (bk->len - argp->suffix), argp->suffix);
+
+ ret = __bam_ritem(dbc, pagep, argp->indx, &dbt);
+ __os_free(dbt.data, dbt.size);
+ if (ret != 0)
+ goto err;
+
+ LSN(pagep) = *lsnp;
+ modified = 1;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /*
+ * Need to undo update described.
+ *
+ * Re-build the original item.
+ */
+ memset(&dbt, 0, sizeof(dbt));
+ dbt.size = argp->prefix + argp->suffix + argp->orig.size;
+ if ((ret = __os_malloc(dbenv, dbt.size, NULL, &dbt.data)) != 0)
+ goto err;
+ p = dbt.data;
+ memcpy(p, bk->data, argp->prefix);
+ p += argp->prefix;
+ memcpy(p, argp->orig.data, argp->orig.size);
+ p += argp->orig.size;
+ memcpy(p, bk->data + (bk->len - argp->suffix), argp->suffix);
+
+ ret = __bam_ritem(dbc, pagep, argp->indx, &dbt);
+ __os_free(dbt.data, dbt.size);
+ if (ret != 0)
+ goto err;
+
+ /* Reset the deleted flag, if necessary. */
+ if (argp->isdeleted)
+ B_DSET(GET_BKEYDATA(pagep, argp->indx)->type);
+
+ LSN(pagep) = argp->lsn;
+ modified = 1;
+ }
+ if ((ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+ if (0) {
+err: (void)memp_fput(mpf, pagep, 0);
+ }
+out: REC_CLOSE;
+}
+
+/*
+ * __bam_root_recover --
+ * Recovery function for setting the root page on the meta-data page.
+ *
+ * PUBLIC: int __bam_root_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_root_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_root_args *argp;
+ BTMETA *meta;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ int cmp_n, cmp_p, modified, ret;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__bam_root_print);
+ REC_INTRO(__bam_root_read, 0);
+
+ if ((ret = memp_fget(mpf, &argp->meta_pgno, 0, &meta)) != 0) {
+ /* The metadata page must always exist on redo. */
+ if (DB_REDO(op)) {
+ (void)__db_pgerr(file_dbp, argp->meta_pgno);
+ goto out;
+ } else
+ goto done;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(meta));
+ cmp_p = log_compare(&LSN(meta), &argp->meta_lsn);
+ CHECK_LSN(op, cmp_p, &LSN(meta), &argp->meta_lsn);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ meta->root = argp->root_pgno;
+ meta->dbmeta.lsn = *lsnp;
+ ((BTREE *)file_dbp->bt_internal)->bt_root = meta->root;
+ modified = 1;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Nothing to undo except lsn. */
+ meta->dbmeta.lsn = argp->meta_lsn;
+ modified = 1;
+ }
+ if ((ret = memp_fput(mpf, meta, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __bam_curadj_recover --
+ * Transaction abort function to undo cursor adjustments.
+ * This should only be triggered by subtransaction aborts.
+ *
+ * PUBLIC: int __bam_curadj_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_curadj_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_curadj_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ int ret;
+
+ COMPQUIET(info, NULL);
+
+ REC_PRINT(__bam_curadj_print);
+ REC_INTRO(__bam_curadj_read, 0);
+
+ ret = 0;
+ if (op != DB_TXN_ABORT)
+ goto done;
+
+ switch(argp->mode) {
+ case DB_CA_DI:
+ if ((ret = __bam_ca_di(dbc, argp->from_pgno,
+ argp->from_indx, -(int)argp->first_indx)) != 0)
+ goto out;
+ break;
+ case DB_CA_DUP:
+ if ((ret = __bam_ca_undodup(file_dbp, argp->first_indx,
+ argp->from_pgno, argp->from_indx, argp->to_indx)) != 0)
+ goto out;
+ break;
+
+ case DB_CA_RSPLIT:
+ if ((ret =
+ __bam_ca_rsplit(dbc, argp->to_pgno, argp->from_pgno)) != 0)
+ goto out;
+ break;
+
+ case DB_CA_SPLIT:
+ __bam_ca_undosplit(file_dbp, argp->from_pgno,
+ argp->to_pgno, argp->left_pgno, argp->from_indx);
+ break;
+ }
+
+done: *lsnp = argp->prev_lsn;
+out: REC_CLOSE;
+}
+
+/*
+ * __bam_rcuradj_recover --
+ * Transaction abort function to undo cursor adjustments in rrecno.
+ * This should only be triggered by subtransaction aborts.
+ *
+ * PUBLIC: int __bam_rcuradj_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_rcuradj_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_rcuradj_args *argp;
+ BTREE_CURSOR *cp;
+ DB *file_dbp;
+ DBC *dbc, *rdbc;
+ DB_MPOOLFILE *mpf;
+ int ret, t_ret;
+
+ COMPQUIET(info, NULL);
+ rdbc = NULL;
+
+ REC_PRINT(__bam_rcuradj_print);
+ REC_INTRO(__bam_rcuradj_read, 0);
+
+ ret = t_ret = 0;
+
+ if (op != DB_TXN_ABORT)
+ goto done;
+
+ /*
+ * We don't know whether we're in an offpage dup set, and
+ * thus don't know whether the dbc REC_INTRO has handed us is
+ * of a reasonable type. It's certainly unset, so if this is
+ * an offpage dup set, we don't have an OPD cursor. The
+ * simplest solution is just to allocate a whole new cursor
+ * for our use; we're only really using it to hold pass some
+ * state into __ram_ca, and this way we don't need to make
+ * this function know anything about how offpage dups work.
+ */
+ if ((ret =
+ __db_icursor(file_dbp, NULL, DB_RECNO, argp->root, 0, &rdbc)) != 0)
+ goto out;
+
+ cp = (BTREE_CURSOR *)rdbc->internal;
+ F_SET(cp, C_RENUMBER);
+ cp->recno = argp->recno;
+
+ switch(argp->mode) {
+ case CA_DELETE:
+ /*
+ * The way to undo a delete is with an insert. Since
+ * we're undoing it, the delete flag must be set.
+ */
+ F_SET(cp, C_DELETED);
+ F_SET(cp, C_RENUMBER); /* Just in case. */
+ cp->order = argp->order;
+ __ram_ca(rdbc, CA_ICURRENT);
+ break;
+ case CA_IAFTER:
+ case CA_IBEFORE:
+ case CA_ICURRENT:
+ /*
+ * The way to undo an insert is with a delete. The delete
+ * flag is unset to start with.
+ */
+ F_CLR(cp, C_DELETED);
+ cp->order = INVALID_ORDER;
+ __ram_ca(rdbc, CA_DELETE);
+ break;
+ }
+
+done: *lsnp = argp->prev_lsn;
+out: if (rdbc != NULL && (t_ret = rdbc->c_close(rdbc)) != 0 && ret == 0)
+ ret = t_ret;
+ REC_CLOSE;
+}
diff --git a/bdb/btree/bt_reclaim.c b/bdb/btree/bt_reclaim.c
new file mode 100644
index 00000000000..538d837c2d2
--- /dev/null
+++ b/bdb/btree/bt_reclaim.c
@@ -0,0 +1,53 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: bt_reclaim.c,v 11.5 2000/03/22 04:21:01 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_shash.h"
+#include "lock.h"
+#include "btree.h"
+
+/*
+ * __bam_reclaim --
+ * Free a database.
+ *
+ * PUBLIC: int __bam_reclaim __P((DB *, DB_TXN *));
+ */
+int
+__bam_reclaim(dbp, txn)
+ DB *dbp;
+ DB_TXN *txn;
+{
+ DBC *dbc;
+ int ret, t_ret;
+
+ /* Acquire a cursor. */
+ if ((ret = dbp->cursor(dbp, txn, &dbc, 0)) != 0)
+ return (ret);
+
+ /* Walk the tree, freeing pages. */
+ ret = __bam_traverse(dbc,
+ DB_LOCK_WRITE, dbc->internal->root, __db_reclaim_callback, dbc);
+
+ /* Discard the cursor. */
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
diff --git a/bdb/btree/bt_recno.c b/bdb/btree/bt_recno.c
new file mode 100644
index 00000000000..6ac0cac350d
--- /dev/null
+++ b/bdb/btree/bt_recno.c
@@ -0,0 +1,1369 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: bt_recno.c,v 11.65 2001/01/18 14:33:22 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <limits.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "btree.h"
+#include "db_ext.h"
+#include "db_shash.h"
+#include "lock.h"
+#include "lock_ext.h"
+#include "qam.h"
+#include "txn.h"
+
+static int __ram_add __P((DBC *, db_recno_t *, DBT *, u_int32_t, u_int32_t));
+static int __ram_delete __P((DB *, DB_TXN *, DBT *, u_int32_t));
+static int __ram_put __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+static int __ram_source __P((DB *));
+static int __ram_sread __P((DBC *, db_recno_t));
+static int __ram_update __P((DBC *, db_recno_t, int));
+
+/*
+ * In recno, there are two meanings to the on-page "deleted" flag. If we're
+ * re-numbering records, it means the record was implicitly created. We skip
+ * over implicitly created records if doing a cursor "next" or "prev", and
+ * return DB_KEYEMPTY if they're explicitly requested.. If not re-numbering
+ * records, it means that the record was implicitly created, or was deleted.
+ * We skip over implicitly created or deleted records if doing a cursor "next"
+ * or "prev", and return DB_KEYEMPTY if they're explicitly requested.
+ *
+ * If we're re-numbering records, then we have to detect in the cursor that
+ * a record was deleted, and adjust the cursor as necessary on the next get.
+ * If we're not re-numbering records, then we can detect that a record has
+ * been deleted by looking at the actual on-page record, so we completely
+ * ignore the cursor's delete flag. This is different from the B+tree code.
+ * It also maintains whether the cursor references a deleted record in the
+ * cursor, and it doesn't always check the on-page value.
+ */
+#define CD_SET(cp) { \
+ if (F_ISSET(cp, C_RENUMBER)) \
+ F_SET(cp, C_DELETED); \
+}
+#define CD_CLR(cp) { \
+ if (F_ISSET(cp, C_RENUMBER)) { \
+ F_CLR(cp, C_DELETED); \
+ cp->order = INVALID_ORDER; \
+ } \
+}
+#define CD_ISSET(cp) \
+ (F_ISSET(cp, C_RENUMBER) && F_ISSET(cp, C_DELETED))
+
+/*
+ * Macros for comparing the ordering of two cursors.
+ * cp1 comes before cp2 iff one of the following holds:
+ * cp1's recno is less than cp2's recno
+ * recnos are equal, both deleted, and cp1's order is less than cp2's
+ * recnos are equal, cp1 deleted, and cp2 not deleted
+ */
+#define C_LESSTHAN(cp1, cp2) \
+ (((cp1)->recno < (cp2)->recno) || \
+ (((cp1)->recno == (cp2)->recno) && \
+ ((CD_ISSET((cp1)) && CD_ISSET((cp2)) && (cp1)->order < (cp2)->order) || \
+ (CD_ISSET((cp1)) && !CD_ISSET((cp2))))))
+
+/*
+ * cp1 is equal to cp2 iff their recnos and delete flags are identical,
+ * and if the delete flag is set their orders are also identical.
+ */
+#define C_EQUAL(cp1, cp2) \
+ ((cp1)->recno == (cp2)->recno && CD_ISSET((cp1)) == CD_ISSET((cp2)) && \
+ (!CD_ISSET((cp1)) || (cp1)->order == (cp2)->order))
+
+/*
+ * Do we need to log the current cursor adjustment?
+ */
+#define CURADJ_LOG(dbc) \
+ (DB_LOGGING((dbc)) && (dbc)->txn != NULL && (dbc)->txn->parent != NULL)
+
+/*
+ * __ram_open --
+ * Recno open function.
+ *
+ * PUBLIC: int __ram_open __P((DB *, const char *, db_pgno_t, u_int32_t));
+ */
+int
+__ram_open(dbp, name, base_pgno, flags)
+ DB *dbp;
+ const char *name;
+ db_pgno_t base_pgno;
+ u_int32_t flags;
+{
+ BTREE *t;
+ DBC *dbc;
+ int ret, t_ret;
+
+ t = dbp->bt_internal;
+
+ /* Initialize the remaining fields/methods of the DB. */
+ dbp->del = __ram_delete;
+ dbp->put = __ram_put;
+ dbp->stat = __bam_stat;
+
+ /* Start up the tree. */
+ if ((ret = __bam_read_root(dbp, name, base_pgno, flags)) != 0)
+ return (ret);
+
+ /*
+ * If the user specified a source tree, open it and map it in.
+ *
+ * !!!
+ * We don't complain if the user specified transactions or threads.
+ * It's possible to make it work, but you'd better know what you're
+ * doing!
+ */
+ if (t->re_source != NULL && (ret = __ram_source(dbp)) != 0)
+ return (ret);
+
+ /* If we're snapshotting an underlying source file, do it now. */
+ if (F_ISSET(dbp, DB_RE_SNAPSHOT)) {
+ /* Allocate a cursor. */
+ if ((ret = dbp->cursor(dbp, NULL, &dbc, 0)) != 0)
+ return (ret);
+
+ /* Do the snapshot. */
+ if ((ret = __ram_update(dbc,
+ DB_MAX_RECORDS, 0)) != 0 && ret == DB_NOTFOUND)
+ ret = 0;
+
+ /* Discard the cursor. */
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+ }
+
+ return (0);
+}
+
+/*
+ * __ram_delete --
+ * Recno db->del function.
+ */
+static int
+__ram_delete(dbp, txn, key, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ DBT *key;
+ u_int32_t flags;
+{
+ BTREE_CURSOR *cp;
+ DBC *dbc;
+ db_recno_t recno;
+ int ret, t_ret;
+
+ PANIC_CHECK(dbp->dbenv);
+
+ /* Check for invalid flags. */
+ if ((ret = __db_delchk(dbp,
+ key, flags, F_ISSET(dbp, DB_AM_RDONLY))) != 0)
+ return (ret);
+
+ /* Acquire a cursor. */
+ if ((ret = dbp->cursor(dbp, txn, &dbc, DB_WRITELOCK)) != 0)
+ return (ret);
+
+ DEBUG_LWRITE(dbc, txn, "ram_delete", key, NULL, flags);
+
+ /* Check the user's record number and fill in as necessary. */
+ if ((ret = __ram_getno(dbc, key, &recno, 0)) != 0)
+ goto err;
+
+ /* Do the delete. */
+ cp = (BTREE_CURSOR *)dbc->internal;
+ cp->recno = recno;
+
+ ret = __ram_c_del(dbc);
+
+ /* Release the cursor. */
+err: if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __ram_put --
+ * Recno db->put function.
+ */
+static int
+__ram_put(dbp, txn, key, data, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ DBT *key, *data;
+ u_int32_t flags;
+{
+ DBC *dbc;
+ db_recno_t recno;
+ int ret, t_ret;
+
+ PANIC_CHECK(dbp->dbenv);
+
+ /* Check for invalid flags. */
+ if ((ret = __db_putchk(dbp,
+ key, data, flags, F_ISSET(dbp, DB_AM_RDONLY), 0)) != 0)
+ return (ret);
+
+ /* Allocate a cursor. */
+ if ((ret = dbp->cursor(dbp, txn, &dbc, DB_WRITELOCK)) != 0)
+ return (ret);
+
+ DEBUG_LWRITE(dbc, txn, "ram_put", key, data, flags);
+
+ /*
+ * If we're appending to the tree, make sure we've read in all of
+ * the backing source file. Otherwise, check the user's record
+ * number and fill in as necessary. If we found the record or it
+ * simply didn't exist, add the user's record.
+ */
+ if (flags == DB_APPEND)
+ ret = __ram_update(dbc, DB_MAX_RECORDS, 0);
+ else
+ ret = __ram_getno(dbc, key, &recno, 1);
+ if (ret == 0 || ret == DB_NOTFOUND)
+ ret = __ram_add(dbc, &recno, data, flags, 0);
+
+ /* Discard the cursor. */
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Return the record number if we're appending to the tree. */
+ if (ret == 0 && flags == DB_APPEND)
+ ret = __db_retcopy(dbp, key, &recno, sizeof(recno),
+ &dbc->rkey.data, &dbc->rkey.ulen);
+
+ return (ret);
+}
+
+/*
+ * __ram_c_del --
+ * Recno cursor->c_del function.
+ *
+ * PUBLIC: int __ram_c_del __P((DBC *));
+ */
+int
+__ram_c_del(dbc)
+ DBC *dbc;
+{
+ BKEYDATA bk;
+ BTREE *t;
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DB_LSN lsn;
+ DBT hdr, data;
+ EPG *epg;
+ int exact, ret, stack;
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ t = dbp->bt_internal;
+ stack = 0;
+
+ /*
+ * The semantics of cursors during delete are as follows: in
+ * non-renumbering recnos, records are replaced with a marker
+ * containing a delete flag. If the record referenced by this cursor
+ * has already been deleted, we will detect that as part of the delete
+ * operation, and fail.
+ *
+ * In renumbering recnos, cursors which represent deleted items
+ * are flagged with the C_DELETED flag, and it is an error to
+ * call c_del a second time without an intervening cursor motion.
+ */
+ if (CD_ISSET(cp))
+ return (DB_KEYEMPTY);
+
+ /* Search the tree for the key; delete only deletes exact matches. */
+ if ((ret = __bam_rsearch(dbc, &cp->recno, S_DELETE, 1, &exact)) != 0)
+ goto err;
+ if (!exact) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+ stack = 1;
+ cp->page = cp->csp->page;
+ cp->pgno = cp->csp->page->pgno;
+ cp->indx = cp->csp->indx;
+
+ /*
+ * If re-numbering records, the on-page deleted flag can only mean
+ * that this record was implicitly created. Applications aren't
+ * permitted to delete records they never created, return an error.
+ *
+ * If not re-numbering records, the on-page deleted flag means that
+ * this record was implicitly created, or, was deleted at some time.
+ * The former is an error because applications aren't permitted to
+ * delete records they never created, the latter is an error because
+ * if the record was "deleted", we could never have found it.
+ */
+ if (B_DISSET(GET_BKEYDATA(cp->page, cp->indx)->type)) {
+ ret = DB_KEYEMPTY;
+ goto err;
+ }
+
+ if (F_ISSET(cp, C_RENUMBER)) {
+ /* Delete the item, adjust the counts, adjust the cursors. */
+ if ((ret = __bam_ditem(dbc, cp->page, cp->indx)) != 0)
+ goto err;
+ __bam_adjust(dbc, -1);
+ if (__ram_ca(dbc, CA_DELETE) > 0 &&
+ CURADJ_LOG(dbc) && (ret = __bam_rcuradj_log(dbp->dbenv,
+ dbc->txn, &lsn, 0, dbp->log_fileid, CA_DELETE,
+ cp->root, cp->recno, cp->order)) != 0)
+ goto err;
+
+ /*
+ * If the page is empty, delete it.
+ *
+ * We never delete a root page. First, root pages of primary
+ * databases never go away, recno or otherwise. However, if
+ * it's the root page of an off-page duplicates database, then
+ * it can be deleted. We don't delete it here because we have
+ * no way of telling the primary database page holder (e.g.,
+ * the hash access method) that its page element should cleaned
+ * up because the underlying tree is gone. So, we keep the page
+ * around until the last cursor referencing the empty tree is
+ * are closed, and then clean it up.
+ */
+ if (NUM_ENT(cp->page) == 0 && PGNO(cp->page) != cp->root) {
+ /*
+ * We already have a locked stack of pages. However,
+ * there are likely entries in the stack that aren't
+ * going to be emptied by removing the single reference
+ * to the emptied page (or one of its parents).
+ */
+ for (epg = cp->sp; epg <= cp->csp; ++epg)
+ if (NUM_ENT(epg->page) <= 1)
+ break;
+
+ /*
+ * We want to delete a single item out of the last page
+ * that we're not deleting, back up to that page.
+ */
+ ret = __bam_dpages(dbc, --epg);
+
+ /*
+ * Regardless of the return from __bam_dpages, it will
+ * discard our stack and pinned page.
+ */
+ stack = 0;
+ cp->page = NULL;
+ }
+ } else {
+ /* Use a delete/put pair to replace the record with a marker. */
+ if ((ret = __bam_ditem(dbc, cp->page, cp->indx)) != 0)
+ goto err;
+
+ B_TSET(bk.type, B_KEYDATA, 1);
+ bk.len = 0;
+ memset(&hdr, 0, sizeof(hdr));
+ hdr.data = &bk;
+ hdr.size = SSZA(BKEYDATA, data);
+ memset(&data, 0, sizeof(data));
+ data.data = (void *)"";
+ data.size = 0;
+ if ((ret = __db_pitem(dbc,
+ cp->page, cp->indx, BKEYDATA_SIZE(0), &hdr, &data)) != 0)
+ goto err;
+ }
+
+ t->re_modified = 1;
+
+err: if (stack)
+ __bam_stkrel(dbc, STK_CLRDBC);
+
+ return (ret);
+}
+
+/*
+ * __ram_c_get --
+ * Recno cursor->c_get function.
+ *
+ * PUBLIC: int __ram_c_get
+ * PUBLIC: __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+ */
+int
+__ram_c_get(dbc, key, data, flags, pgnop)
+ DBC *dbc;
+ DBT *key, *data;
+ u_int32_t flags;
+ db_pgno_t *pgnop;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ int cmp, exact, ret;
+
+ COMPQUIET(pgnop, NULL);
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+retry: switch (flags) {
+ case DB_CURRENT:
+ /*
+ * If we're using mutable records and the deleted flag is
+ * set, the cursor is pointing at a nonexistent record;
+ * return an error.
+ */
+ if (CD_ISSET(cp))
+ return (DB_KEYEMPTY);
+ break;
+ case DB_NEXT_DUP:
+ /*
+ * If we're not in an off-page dup set, we know there's no
+ * next duplicate since recnos don't have them. If we
+ * are in an off-page dup set, the next item assuredly is
+ * a dup, so we set flags to DB_NEXT and keep going.
+ */
+ if (!F_ISSET(dbc, DBC_OPD))
+ return (DB_NOTFOUND);
+ /* FALLTHROUGH */
+ case DB_NEXT_NODUP:
+ /*
+ * Recno databases don't have duplicates, set flags to DB_NEXT
+ * and keep going.
+ */
+ /* FALLTHROUGH */
+ case DB_NEXT:
+ flags = DB_NEXT;
+ /*
+ * If record numbers are mutable: if we just deleted a record,
+ * we have to avoid incrementing the record number so that we
+ * return the right record by virtue of renumbering the tree.
+ */
+ if (CD_ISSET(cp))
+ break;
+
+ if (cp->recno != RECNO_OOB) {
+ ++cp->recno;
+ break;
+ }
+ /* FALLTHROUGH */
+ case DB_FIRST:
+ flags = DB_NEXT;
+ cp->recno = 1;
+ break;
+ case DB_PREV_NODUP:
+ /*
+ * Recno databases don't have duplicates, set flags to DB_PREV
+ * and keep going.
+ */
+ /* FALLTHROUGH */
+ case DB_PREV:
+ flags = DB_PREV;
+ if (cp->recno != RECNO_OOB) {
+ if (cp->recno == 1) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+ --cp->recno;
+ break;
+ }
+ /* FALLTHROUGH */
+ case DB_LAST:
+ flags = DB_PREV;
+ if (((ret = __ram_update(dbc,
+ DB_MAX_RECORDS, 0)) != 0) && ret != DB_NOTFOUND)
+ goto err;
+ if ((ret = __bam_nrecs(dbc, &cp->recno)) != 0)
+ goto err;
+ if (cp->recno == 0) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+ break;
+ case DB_GET_BOTHC:
+ /*
+ * If we're doing a join and these are offpage dups,
+ * we want to keep searching forward from after the
+ * current cursor position. Increment the recno by 1,
+ * then proceed as for a DB_SET.
+ *
+ * Otherwise, we know there are no additional matching
+ * data, as recnos don't have dups. return DB_NOTFOUND.
+ */
+ if (F_ISSET(dbc, DBC_OPD)) {
+ cp->recno++;
+ break;
+ }
+ ret = DB_NOTFOUND;
+ goto err;
+ /* NOTREACHED */
+ case DB_GET_BOTH:
+ /*
+ * If we're searching a set of off-page dups, we start
+ * a new linear search from the first record. Otherwise,
+ * we compare the single data item associated with the
+ * requested record for a match.
+ */
+ if (F_ISSET(dbc, DBC_OPD)) {
+ cp->recno = 1;
+ break;
+ }
+ /* FALLTHROUGH */
+ case DB_SET:
+ case DB_SET_RANGE:
+ if ((ret = __ram_getno(dbc, key, &cp->recno, 0)) != 0)
+ goto err;
+ break;
+ default:
+ ret = __db_unknown_flag(dbp->dbenv, "__ram_c_get", flags);
+ goto err;
+ }
+
+ /*
+ * For DB_PREV, DB_LAST, DB_SET and DB_SET_RANGE, we have already
+ * called __ram_update() to make sure sufficient records have been
+ * read from the backing source file. Do it now for DB_CURRENT (if
+ * the current record was deleted we may need more records from the
+ * backing file for a DB_CURRENT operation), DB_FIRST and DB_NEXT.
+ */
+ if ((flags == DB_NEXT || flags == DB_CURRENT) && ((ret =
+ __ram_update(dbc, cp->recno, 0)) != 0) && ret != DB_NOTFOUND)
+ goto err;
+
+ for (;; ++cp->recno) {
+ /* Search the tree for the record. */
+ if ((ret = __bam_rsearch(dbc, &cp->recno,
+ F_ISSET(dbc, DBC_RMW) ? S_FIND_WR : S_FIND,
+ 1, &exact)) != 0)
+ goto err;
+ if (!exact) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+
+ /*
+ * Copy the page into the cursor, discarding any lock we
+ * are currently holding.
+ */
+ cp->page = cp->csp->page;
+ cp->pgno = cp->csp->page->pgno;
+ cp->indx = cp->csp->indx;
+ (void)__TLPUT(dbc, cp->lock);
+ cp->lock = cp->csp->lock;
+ cp->lock_mode = cp->csp->lock_mode;
+
+ /*
+ * If re-numbering records, the on-page deleted flag means this
+ * record was implicitly created. If not re-numbering records,
+ * the on-page deleted flag means this record was implicitly
+ * created, or, it was deleted at some time. Regardless, we
+ * skip such records if doing cursor next/prev operations or
+ * walking through off-page duplicates, and fail if they were
+ * requested explicitly by the application.
+ */
+ if (B_DISSET(GET_BKEYDATA(cp->page, cp->indx)->type))
+ switch (flags) {
+ case DB_NEXT:
+ case DB_PREV:
+ (void)__bam_stkrel(dbc, STK_CLRDBC);
+ goto retry;
+ case DB_GET_BOTH:
+ (void)__bam_stkrel(dbc, STK_CLRDBC);
+ continue;
+ default:
+ ret = DB_KEYEMPTY;
+ goto err;
+ }
+
+ if (flags == DB_GET_BOTH || flags == DB_GET_BOTHC) {
+ if ((ret = __bam_cmp(dbp, data,
+ cp->page, cp->indx, __bam_defcmp, &cmp)) != 0)
+ return (ret);
+ if (cmp == 0)
+ break;
+ if (!F_ISSET(dbc, DBC_OPD)) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+ (void)__bam_stkrel(dbc, STK_CLRDBC);
+ } else
+ break;
+ }
+
+ /* Return the key if the user didn't give us one. */
+ if (!F_ISSET(dbc, DBC_OPD)) {
+ if (flags != DB_SET && flags != DB_SET_RANGE)
+ ret = __db_retcopy(dbp,
+ key, &cp->recno, sizeof(cp->recno),
+ &dbc->rkey.data, &dbc->rkey.ulen);
+ F_SET(key, DB_DBT_ISSET);
+ }
+
+ /* The cursor was reset, no further delete adjustment is necessary. */
+err: CD_CLR(cp);
+
+ return (ret);
+}
+
+/*
+ * __ram_c_put --
+ * Recno cursor->c_put function.
+ *
+ * PUBLIC: int __ram_c_put __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+ */
+int
+__ram_c_put(dbc, key, data, flags, pgnop)
+ DBC *dbc;
+ DBT *key, *data;
+ u_int32_t flags;
+ db_pgno_t *pgnop;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DB_LSN lsn;
+ int exact, nc, ret, t_ret;
+ u_int32_t iiflags;
+ void *arg;
+
+ COMPQUIET(pgnop, NULL);
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ /*
+ * DB_KEYFIRST and DB_KEYLAST will only be set if we're dealing with
+ * an off-page duplicate tree, they can't be specified at user level.
+ * Translate them into something else.
+ */
+ switch (flags) {
+ case DB_KEYFIRST:
+ cp->recno = 1;
+ flags = DB_BEFORE;
+ break;
+ case DB_KEYLAST:
+ if ((ret = __ram_add(dbc, &cp->recno, data, DB_APPEND, 0)) != 0)
+ return (ret);
+ if (CURADJ_LOG(dbc) && (ret = __bam_rcuradj_log(dbp->dbenv,
+ dbc->txn, &lsn, 0, dbp->log_fileid, CA_ICURRENT,
+ cp->root, cp->recno, cp->order)))
+ return (ret);
+ return (0);
+ }
+
+ /*
+ * If we're putting with a cursor that's marked C_DELETED, we need to
+ * take special care; the cursor doesn't "really" reference the item
+ * corresponding to its current recno, but instead is "between" that
+ * record and the current one. Translate the actual insert into
+ * DB_BEFORE, and let the __ram_ca work out the gory details of what
+ * should wind up pointing where.
+ */
+ if (CD_ISSET(cp))
+ iiflags = DB_BEFORE;
+ else
+ iiflags = flags;
+
+split: if ((ret = __bam_rsearch(dbc, &cp->recno, S_INSERT, 1, &exact)) != 0)
+ goto err;
+ /*
+ * An inexact match is okay; it just means we're one record past the
+ * end, which is reasonable if we're marked deleted.
+ */
+ DB_ASSERT(exact || CD_ISSET(cp));
+
+ cp->page = cp->csp->page;
+ cp->pgno = cp->csp->page->pgno;
+ cp->indx = cp->csp->indx;
+
+ ret = __bam_iitem(dbc, key, data, iiflags, 0);
+ t_ret = __bam_stkrel(dbc, STK_CLRDBC);
+
+ if (t_ret != 0 && (ret == 0 || ret == DB_NEEDSPLIT))
+ ret = t_ret;
+ else if (ret == DB_NEEDSPLIT) {
+ arg = &cp->recno;
+ if ((ret = __bam_split(dbc, arg)) != 0)
+ goto err;
+ goto split;
+ }
+ if (ret != 0)
+ goto err;
+
+ switch (flags) { /* Adjust the cursors. */
+ case DB_AFTER:
+ nc = __ram_ca(dbc, CA_IAFTER);
+
+ /*
+ * We only need to adjust this cursor forward if we truly added
+ * the item after the current recno, rather than remapping it
+ * to DB_BEFORE.
+ */
+ if (iiflags == DB_AFTER)
+ ++cp->recno;
+
+ /* Only log if __ram_ca found any relevant cursors. */
+ if (nc > 0 && CURADJ_LOG(dbc) &&
+ (ret = __bam_rcuradj_log(dbp->dbenv,
+ dbc->txn, &lsn, 0, dbp->log_fileid, CA_IAFTER,
+ cp->root, cp->recno, cp->order)) != 0)
+ goto err;
+ break;
+ case DB_BEFORE:
+ nc = __ram_ca(dbc, CA_IBEFORE);
+ --cp->recno;
+
+ /* Only log if __ram_ca found any relevant cursors. */
+ if (nc > 0 && CURADJ_LOG(dbc) &&
+ (ret = __bam_rcuradj_log(dbp->dbenv,
+ dbc->txn, &lsn, 0, dbp->log_fileid, CA_IBEFORE,
+ cp->root, cp->recno, cp->order)) != 0)
+ goto err;
+ break;
+ case DB_CURRENT:
+ /*
+ * We only need to do an adjustment if we actually
+ * added an item, which we only would have done if the
+ * cursor was marked deleted.
+ *
+ * Only log if __ram_ca found any relevant cursors.
+ */
+ if (CD_ISSET(cp) && __ram_ca(dbc, CA_ICURRENT) > 0 &&
+ CURADJ_LOG(dbc) && (ret = __bam_rcuradj_log(
+ dbp->dbenv, dbc->txn, &lsn, 0, dbp->log_fileid,
+ CA_ICURRENT, cp->root, cp->recno, cp->order)) != 0)
+ goto err;
+ break;
+ }
+
+ /* Return the key if we've created a new record. */
+ if (!F_ISSET(dbc, DBC_OPD) && (flags == DB_AFTER || flags == DB_BEFORE))
+ ret = __db_retcopy(dbp, key, &cp->recno,
+ sizeof(cp->recno), &dbc->rkey.data, &dbc->rkey.ulen);
+
+ /* The cursor was reset, no further delete adjustment is necessary. */
+err: CD_CLR(cp);
+
+ return (ret);
+}
+
+/*
+ * __ram_ca --
+ * Adjust cursors. Returns the number of relevant cursors.
+ *
+ * PUBLIC: int __ram_ca __P((DBC *, ca_recno_arg));
+ */
+int
+__ram_ca(dbc_arg, op)
+ DBC *dbc_arg;
+ ca_recno_arg op;
+{
+ BTREE_CURSOR *cp, *cp_arg;
+ DB *dbp, *ldbp;
+ DB_ENV *dbenv;
+ DBC *dbc;
+ db_recno_t recno;
+ int adjusted, found;
+ u_int32_t order;
+
+ dbp = dbc_arg->dbp;
+ dbenv = dbp->dbenv;
+ cp_arg = (BTREE_CURSOR *)dbc_arg->internal;
+ recno = cp_arg->recno;
+
+ found = 0;
+
+ /*
+ * It only makes sense to adjust cursors if we're a renumbering
+ * recno; we should only be called if this is one.
+ */
+ DB_ASSERT(F_ISSET(cp_arg, C_RENUMBER));
+
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ /*
+ * Adjust the cursors. See the comment in __bam_ca_delete().
+ */
+ /*
+ * If we're doing a delete, we need to find the highest
+ * order of any cursor currently pointing at this item,
+ * so we can assign a higher order to the newly deleted
+ * cursor. Unfortunately, this requires a second pass through
+ * the cursor list.
+ */
+ if (op == CA_DELETE) {
+ order = 1;
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (dbc = TAILQ_FIRST(&ldbp->active_queue);
+ dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
+ cp = (BTREE_CURSOR *)dbc->internal;
+ if (cp_arg->root == cp->root &&
+ recno == cp->recno && CD_ISSET(cp) &&
+ order <= cp->order)
+ order = cp->order + 1;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ } else
+ order = INVALID_ORDER;
+
+ /* Now go through and do the actual adjustments. */
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (dbc = TAILQ_FIRST(&ldbp->active_queue);
+ dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
+ cp = (BTREE_CURSOR *)dbc->internal;
+ if (cp_arg->root != cp->root)
+ continue;
+ ++found;
+ adjusted = 0;
+ switch (op) {
+ case CA_DELETE:
+ if (recno < cp->recno) {
+ --cp->recno;
+ /*
+ * If the adjustment made them equal,
+ * we have to merge the orders.
+ */
+ if (recno == cp->recno && CD_ISSET(cp))
+ cp->order += order;
+ } else if (recno == cp->recno &&
+ !CD_ISSET(cp)) {
+ CD_SET(cp);
+ cp->order = order;
+ }
+ break;
+ case CA_IBEFORE:
+ /*
+ * IBEFORE is just like IAFTER, except that we
+ * adjust cursors on the current record too.
+ */
+ if (C_EQUAL(cp_arg, cp)) {
+ ++cp->recno;
+ adjusted = 1;
+ }
+ goto iafter;
+ case CA_ICURRENT:
+
+ /*
+ * If the original cursor wasn't deleted, we
+ * just did a replacement and so there's no
+ * need to adjust anything--we shouldn't have
+ * gotten this far. Otherwise, we behave
+ * much like an IAFTER, except that all
+ * cursors pointing to the current item get
+ * marked undeleted and point to the new
+ * item.
+ */
+ DB_ASSERT(CD_ISSET(cp_arg));
+ if (C_EQUAL(cp_arg, cp)) {
+ CD_CLR(cp);
+ break;
+ }
+ /* FALLTHROUGH */
+ case CA_IAFTER:
+iafter: if (!adjusted && C_LESSTHAN(cp_arg, cp)) {
+ ++cp->recno;
+ adjusted = 1;
+ }
+ if (recno == cp->recno && adjusted)
+ /*
+ * If we've moved this cursor's recno,
+ * split its order number--i.e.,
+ * decrement it by enough so that
+ * the lowest cursor moved has order 1.
+ * cp_arg->order is the split point,
+ * so decrement by one less than that.
+ */
+ cp->order -= (cp_arg->order - 1);
+ break;
+ }
+ }
+ MUTEX_THREAD_UNLOCK(dbp->dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+ return (found);
+}
+
+/*
+ * __ram_getno --
+ * Check the user's record number, and make sure we've seen it.
+ *
+ * PUBLIC: int __ram_getno __P((DBC *, const DBT *, db_recno_t *, int));
+ */
+int
+__ram_getno(dbc, key, rep, can_create)
+ DBC *dbc;
+ const DBT *key;
+ db_recno_t *rep;
+ int can_create;
+{
+ DB *dbp;
+ db_recno_t recno;
+
+ dbp = dbc->dbp;
+
+ /* Check the user's record number. */
+ if ((recno = *(db_recno_t *)key->data) == 0) {
+ __db_err(dbp->dbenv, "illegal record number of 0");
+ return (EINVAL);
+ }
+ if (rep != NULL)
+ *rep = recno;
+
+ /*
+ * Btree can neither create records nor read them in. Recno can
+ * do both, see if we can find the record.
+ */
+ return (dbc->dbtype == DB_RECNO ?
+ __ram_update(dbc, recno, can_create) : 0);
+}
+
+/*
+ * __ram_update --
+ * Ensure the tree has records up to and including the specified one.
+ */
+static int
+__ram_update(dbc, recno, can_create)
+ DBC *dbc;
+ db_recno_t recno;
+ int can_create;
+{
+ BTREE *t;
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ db_recno_t nrecs;
+ int ret;
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ t = dbp->bt_internal;
+
+ /*
+ * If we can't create records and we've read the entire backing input
+ * file, we're done.
+ */
+ if (!can_create && t->re_eof)
+ return (0);
+
+ /*
+ * If we haven't seen this record yet, try to get it from the original
+ * file.
+ */
+ if ((ret = __bam_nrecs(dbc, &nrecs)) != 0)
+ return (ret);
+ if (!t->re_eof && recno > nrecs) {
+ if ((ret = __ram_sread(dbc, recno)) != 0 && ret != DB_NOTFOUND)
+ return (ret);
+ if ((ret = __bam_nrecs(dbc, &nrecs)) != 0)
+ return (ret);
+ }
+
+ /*
+ * If we can create records, create empty ones up to the requested
+ * record.
+ */
+ if (!can_create || recno <= nrecs + 1)
+ return (0);
+
+ dbc->rdata.dlen = 0;
+ dbc->rdata.doff = 0;
+ dbc->rdata.flags = 0;
+ if (F_ISSET(dbp, DB_RE_FIXEDLEN)) {
+ if (dbc->rdata.ulen < t->re_len) {
+ if ((ret = __os_realloc(dbp->dbenv,
+ t->re_len, NULL, &dbc->rdata.data)) != 0) {
+ dbc->rdata.ulen = 0;
+ dbc->rdata.data = NULL;
+ return (ret);
+ }
+ dbc->rdata.ulen = t->re_len;
+ }
+ dbc->rdata.size = t->re_len;
+ memset(dbc->rdata.data, t->re_pad, t->re_len);
+ } else
+ dbc->rdata.size = 0;
+
+ while (recno > ++nrecs)
+ if ((ret = __ram_add(dbc,
+ &nrecs, &dbc->rdata, 0, BI_DELETED)) != 0)
+ return (ret);
+ return (0);
+}
+
+/*
+ * __ram_source --
+ * Load information about the backing file.
+ */
+static int
+__ram_source(dbp)
+ DB *dbp;
+{
+ BTREE *t;
+ char *source;
+ int ret;
+
+ t = dbp->bt_internal;
+
+ /* Find the real name, and swap out the one we had before. */
+ if ((ret = __db_appname(dbp->dbenv,
+ DB_APP_DATA, NULL, t->re_source, 0, NULL, &source)) != 0)
+ return (ret);
+ __os_freestr(t->re_source);
+ t->re_source = source;
+
+ /*
+ * !!!
+ * It's possible that the backing source file is read-only. We don't
+ * much care other than we'll complain if there are any modifications
+ * when it comes time to write the database back to the source.
+ */
+ if ((t->re_fp = fopen(t->re_source, "r")) == NULL) {
+ ret = errno;
+ __db_err(dbp->dbenv, "%s: %s", t->re_source, db_strerror(ret));
+ return (ret);
+ }
+
+ t->re_eof = 0;
+ return (0);
+}
+
+/*
+ * __ram_writeback --
+ * Rewrite the backing file.
+ *
+ * PUBLIC: int __ram_writeback __P((DB *));
+ */
+int
+__ram_writeback(dbp)
+ DB *dbp;
+{
+ BTREE *t;
+ DB_ENV *dbenv;
+ DBC *dbc;
+ DBT key, data;
+ FILE *fp;
+ db_recno_t keyno;
+ int ret, t_ret;
+ u_int8_t delim, *pad;
+
+ t = dbp->bt_internal;
+ dbenv = dbp->dbenv;
+ fp = NULL;
+
+ /* If the file wasn't modified, we're done. */
+ if (!t->re_modified)
+ return (0);
+
+ /* If there's no backing source file, we're done. */
+ if (t->re_source == NULL) {
+ t->re_modified = 0;
+ return (0);
+ }
+
+ /* Allocate a cursor. */
+ if ((ret = dbp->cursor(dbp, NULL, &dbc, 0)) != 0)
+ return (ret);
+
+ /*
+ * Read any remaining records into the tree.
+ *
+ * !!!
+ * This is why we can't support transactions when applications specify
+ * backing (re_source) files. At this point we have to read in the
+ * rest of the records from the file so that we can write all of the
+ * records back out again, which could modify a page for which we'd
+ * have to log changes and which we don't have locked. This could be
+ * partially fixed by taking a snapshot of the entire file during the
+ * DB->open as DB->open is transaction protected. But, if a checkpoint
+ * occurs then, the part of the log holding the copy of the file could
+ * be discarded, and that would make it impossible to recover in the
+ * face of disaster. This could all probably be fixed, but it would
+ * require transaction protecting the backing source file.
+ *
+ * XXX
+ * This could be made to work now that we have transactions protecting
+ * file operations. Margo has specifically asked for the privilege of
+ * doing this work.
+ */
+ if ((ret =
+ __ram_update(dbc, DB_MAX_RECORDS, 0)) != 0 && ret != DB_NOTFOUND)
+ return (ret);
+
+ /*
+ * Close any existing file handle and re-open the file, truncating it.
+ */
+ if (t->re_fp != NULL) {
+ if (fclose(t->re_fp) != 0) {
+ ret = errno;
+ goto err;
+ }
+ t->re_fp = NULL;
+ }
+ if ((fp = fopen(t->re_source, "w")) == NULL) {
+ ret = errno;
+ __db_err(dbenv, "%s: %s", t->re_source, db_strerror(ret));
+ goto err;
+ }
+
+ /*
+ * We step through the records, writing each one out. Use the record
+ * number and the dbp->get() function, instead of a cursor, so we find
+ * and write out "deleted" or non-existent records.
+ */
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ key.size = sizeof(db_recno_t);
+ key.data = &keyno;
+
+ /*
+ * We'll need the delimiter if we're doing variable-length records,
+ * and the pad character if we're doing fixed-length records.
+ */
+ delim = t->re_delim;
+ if (F_ISSET(dbp, DB_RE_FIXEDLEN)) {
+ if ((ret = __os_malloc(dbenv, t->re_len, NULL, &pad)) != 0)
+ goto err;
+ memset(pad, t->re_pad, t->re_len);
+ } else
+ COMPQUIET(pad, NULL);
+ for (keyno = 1;; ++keyno) {
+ switch (ret = dbp->get(dbp, NULL, &key, &data, 0)) {
+ case 0:
+ if (fwrite(data.data, 1, data.size, fp) != data.size)
+ goto write_err;
+ break;
+ case DB_KEYEMPTY:
+ if (F_ISSET(dbp, DB_RE_FIXEDLEN) &&
+ fwrite(pad, 1, t->re_len, fp) != t->re_len)
+ goto write_err;
+ break;
+ case DB_NOTFOUND:
+ ret = 0;
+ goto done;
+ }
+ if (!F_ISSET(dbp, DB_RE_FIXEDLEN) &&
+ fwrite(&delim, 1, 1, fp) != 1) {
+write_err: ret = errno;
+ __db_err(dbp->dbenv,
+ "%s: write failed to backing file: %s",
+ t->re_source, strerror(ret));
+ goto err;
+ }
+ }
+
+err:
+done: /* Close the file descriptor. */
+ if (fp != NULL && fclose(fp) != 0) {
+ if (ret == 0)
+ ret = errno;
+ __db_err(dbenv, "%s: %s", t->re_source, db_strerror(errno));
+ }
+
+ /* Discard the cursor. */
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (ret == 0)
+ t->re_modified = 0;
+
+ return (ret);
+}
+
+/*
+ * __ram_sread --
+ * Read records from a source file.
+ */
+static int
+__ram_sread(dbc, top)
+ DBC *dbc;
+ db_recno_t top;
+{
+ BTREE *t;
+ DB *dbp;
+ DBT data;
+ db_recno_t recno;
+ size_t len;
+ int ch, ret, was_modified;
+
+ t = dbc->dbp->bt_internal;
+ dbp = dbc->dbp;
+ was_modified = t->re_modified;
+
+ if ((ret = __bam_nrecs(dbc, &recno)) != 0)
+ return (ret);
+
+ /* Use the record data return memory, it's only a short-term use. */
+ len = F_ISSET(dbp, DB_RE_FIXEDLEN) ? t->re_len : 256;
+ if (dbc->rdata.ulen < len) {
+ if ((ret = __os_realloc(
+ dbp->dbenv, len, NULL, &dbc->rdata.data)) != 0) {
+ dbc->rdata.ulen = 0;
+ dbc->rdata.data = NULL;
+ return (ret);
+ }
+ dbc->rdata.ulen = len;
+ }
+
+ memset(&data, 0, sizeof(data));
+ while (recno < top) {
+ data.data = dbc->rdata.data;
+ data.size = 0;
+ if (F_ISSET(dbp, DB_RE_FIXEDLEN))
+ for (len = t->re_len; len > 0; --len) {
+ if ((ch = getc(t->re_fp)) == EOF)
+ goto eof;
+ ((u_int8_t *)data.data)[data.size++] = ch;
+ }
+ else
+ for (;;) {
+ if ((ch = getc(t->re_fp)) == EOF)
+ goto eof;
+ if (ch == t->re_delim)
+ break;
+
+ ((u_int8_t *)data.data)[data.size++] = ch;
+ if (data.size == dbc->rdata.ulen) {
+ if ((ret = __os_realloc(dbp->dbenv,
+ dbc->rdata.ulen *= 2,
+ NULL, &dbc->rdata.data)) != 0) {
+ dbc->rdata.ulen = 0;
+ dbc->rdata.data = NULL;
+ return (ret);
+ } else
+ data.data = dbc->rdata.data;
+ }
+ }
+
+ /*
+ * Another process may have read this record from the input
+ * file and stored it into the database already, in which
+ * case we don't need to repeat that operation. We detect
+ * this by checking if the last record we've read is greater
+ * or equal to the number of records in the database.
+ */
+ if (t->re_last >= recno) {
+ ++recno;
+ if ((ret = __ram_add(dbc, &recno, &data, 0, 0)) != 0)
+ goto err;
+ }
+ ++t->re_last;
+ }
+
+ if (0) {
+eof: t->re_eof = 1;
+ ret = DB_NOTFOUND;
+ }
+err: if (!was_modified)
+ t->re_modified = 0;
+
+ return (ret);
+}
+
+/*
+ * __ram_add --
+ * Add records into the tree.
+ */
+static int
+__ram_add(dbc, recnop, data, flags, bi_flags)
+ DBC *dbc;
+ db_recno_t *recnop;
+ DBT *data;
+ u_int32_t flags, bi_flags;
+{
+ BKEYDATA *bk;
+ BTREE_CURSOR *cp;
+ int exact, ret, stack;
+
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+retry: /* Find the slot for insertion. */
+ if ((ret = __bam_rsearch(dbc, recnop,
+ S_INSERT | (flags == DB_APPEND ? S_APPEND : 0), 1, &exact)) != 0)
+ return (ret);
+ stack = 1;
+ cp->page = cp->csp->page;
+ cp->pgno = cp->csp->page->pgno;
+ cp->indx = cp->csp->indx;
+
+ /*
+ * The application may modify the data based on the selected record
+ * number.
+ */
+ if (flags == DB_APPEND && dbc->dbp->db_append_recno != NULL &&
+ (ret = dbc->dbp->db_append_recno(dbc->dbp, data, *recnop)) != 0)
+ goto err;
+
+ /*
+ * If re-numbering records, the on-page deleted flag means this record
+ * was implicitly created. If not re-numbering records, the on-page
+ * deleted flag means this record was implicitly created, or, it was
+ * deleted at some time.
+ *
+ * If DB_NOOVERWRITE is set and the item already exists in the tree,
+ * return an error unless the item was either marked for deletion or
+ * only implicitly created.
+ */
+ if (exact) {
+ bk = GET_BKEYDATA(cp->page, cp->indx);
+ if (!B_DISSET(bk->type) && flags == DB_NOOVERWRITE) {
+ ret = DB_KEYEXIST;
+ goto err;
+ }
+ }
+
+ /*
+ * Select the arguments for __bam_iitem() and do the insert. If the
+ * key is an exact match, or we're replacing the data item with a
+ * new data item, replace the current item. If the key isn't an exact
+ * match, we're inserting a new key/data pair, before the search
+ * location.
+ */
+ switch (ret = __bam_iitem(dbc,
+ NULL, data, exact ? DB_CURRENT : DB_BEFORE, bi_flags)) {
+ case 0:
+ /*
+ * Don't adjust anything.
+ *
+ * If we inserted a record, no cursors need adjusting because
+ * the only new record it's possible to insert is at the very
+ * end of the tree. The necessary adjustments to the internal
+ * page counts were made by __bam_iitem().
+ *
+ * If we overwrote a record, no cursors need adjusting because
+ * future DBcursor->get calls will simply return the underlying
+ * record (there's no adjustment made for the DB_CURRENT flag
+ * when a cursor get operation immediately follows a cursor
+ * delete operation, and the normal adjustment for the DB_NEXT
+ * flag is still correct).
+ */
+ break;
+ case DB_NEEDSPLIT:
+ /* Discard the stack of pages and split the page. */
+ (void)__bam_stkrel(dbc, STK_CLRDBC);
+ stack = 0;
+
+ if ((ret = __bam_split(dbc, recnop)) != 0)
+ goto err;
+
+ goto retry;
+ /* NOTREACHED */
+ default:
+ goto err;
+ }
+
+err: if (stack)
+ __bam_stkrel(dbc, STK_CLRDBC);
+
+ return (ret);
+}
diff --git a/bdb/btree/bt_rsearch.c b/bdb/btree/bt_rsearch.c
new file mode 100644
index 00000000000..7102cd715aa
--- /dev/null
+++ b/bdb/btree/bt_rsearch.c
@@ -0,0 +1,429 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995, 1996
+ * Keith Bostic. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: bt_rsearch.c,v 11.21 2000/03/28 21:50:04 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "btree.h"
+#include "db_shash.h"
+#include "lock.h"
+
+/*
+ * __bam_rsearch --
+ * Search a btree for a record number.
+ *
+ * PUBLIC: int __bam_rsearch __P((DBC *, db_recno_t *, u_int32_t, int, int *));
+ */
+int
+__bam_rsearch(dbc, recnop, flags, stop, exactp)
+ DBC *dbc;
+ db_recno_t *recnop;
+ u_int32_t flags;
+ int stop, *exactp;
+{
+ BINTERNAL *bi;
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DB_LOCK lock;
+ PAGE *h;
+ RINTERNAL *ri;
+ db_indx_t adjust, deloffset, indx, top;
+ db_lockmode_t lock_mode;
+ db_pgno_t pg;
+ db_recno_t recno, t_recno, total;
+ int ret, stack;
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ BT_STK_CLR(cp);
+
+ /*
+ * There are several ways we search a btree tree. The flags argument
+ * specifies if we're acquiring read or write locks and if we are
+ * locking pairs of pages. In addition, if we're adding or deleting
+ * an item, we have to lock the entire tree, regardless. See btree.h
+ * for more details.
+ *
+ * If write-locking pages, we need to know whether or not to acquire a
+ * write lock on a page before getting it. This depends on how deep it
+ * is in tree, which we don't know until we acquire the root page. So,
+ * if we need to lock the root page we may have to upgrade it later,
+ * because we won't get the correct lock initially.
+ *
+ * Retrieve the root page.
+ */
+ pg = cp->root;
+ stack = LF_ISSET(S_STACK);
+ lock_mode = stack ? DB_LOCK_WRITE : DB_LOCK_READ;
+ if ((ret = __db_lget(dbc, 0, pg, lock_mode, 0, &lock)) != 0)
+ return (ret);
+ if ((ret = memp_fget(dbp->mpf, &pg, 0, &h)) != 0) {
+ /* Did not read it, so we can release the lock */
+ (void)__LPUT(dbc, lock);
+ return (ret);
+ }
+
+ /*
+ * Decide if we need to save this page; if we do, write lock it.
+ * We deliberately don't lock-couple on this call. If the tree
+ * is tiny, i.e., one page, and two threads are busily updating
+ * the root page, we're almost guaranteed deadlocks galore, as
+ * each one gets a read lock and then blocks the other's attempt
+ * for a write lock.
+ */
+ if (!stack &&
+ ((LF_ISSET(S_PARENT) && (u_int8_t)(stop + 1) >= h->level) ||
+ (LF_ISSET(S_WRITE) && h->level == LEAFLEVEL))) {
+ (void)memp_fput(dbp->mpf, h, 0);
+ (void)__LPUT(dbc, lock);
+ lock_mode = DB_LOCK_WRITE;
+ if ((ret = __db_lget(dbc, 0, pg, lock_mode, 0, &lock)) != 0)
+ return (ret);
+ if ((ret = memp_fget(dbp->mpf, &pg, 0, &h)) != 0) {
+ /* Did not read it, so we can release the lock */
+ (void)__LPUT(dbc, lock);
+ return (ret);
+ }
+ stack = 1;
+ }
+
+ /*
+ * If appending to the tree, set the record number now -- we have the
+ * root page locked.
+ *
+ * Delete only deletes exact matches, read only returns exact matches.
+ * Note, this is different from __bam_search(), which returns non-exact
+ * matches for read.
+ *
+ * The record may not exist. We can only return the correct location
+ * for the record immediately after the last record in the tree, so do
+ * a fast check now.
+ */
+ total = RE_NREC(h);
+ if (LF_ISSET(S_APPEND)) {
+ *exactp = 0;
+ *recnop = recno = total + 1;
+ } else {
+ recno = *recnop;
+ if (recno <= total)
+ *exactp = 1;
+ else {
+ *exactp = 0;
+ if (!LF_ISSET(S_PAST_EOF) || recno > total + 1) {
+ /*
+ * Keep the page locked for serializability.
+ *
+ * XXX
+ * This leaves the root page locked, which will
+ * eliminate any concurrency. A possible fix
+ * would be to lock the last leaf page instead.
+ */
+ (void)memp_fput(dbp->mpf, h, 0);
+ (void)__TLPUT(dbc, lock);
+ return (DB_NOTFOUND);
+ }
+ }
+ }
+
+ /*
+ * !!!
+ * Record numbers in the tree are 0-based, but the recno is
+ * 1-based. All of the calculations below have to take this
+ * into account.
+ */
+ for (total = 0;;) {
+ switch (TYPE(h)) {
+ case P_LBTREE:
+ case P_LDUP:
+ recno -= total;
+ /*
+ * There may be logically deleted records on the page.
+ * If there are enough, the record may not exist.
+ */
+ if (TYPE(h) == P_LBTREE) {
+ adjust = P_INDX;
+ deloffset = O_INDX;
+ } else {
+ adjust = O_INDX;
+ deloffset = 0;
+ }
+ for (t_recno = 0, indx = 0;; indx += adjust) {
+ if (indx >= NUM_ENT(h)) {
+ *exactp = 0;
+ if (!LF_ISSET(S_PAST_EOF) ||
+ recno > t_recno + 1) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+ }
+ if (!B_DISSET(
+ GET_BKEYDATA(h, indx + deloffset)->type) &&
+ ++t_recno == recno)
+ break;
+ }
+
+ /* Correct from 1-based to 0-based for a page offset. */
+ BT_STK_ENTER(dbp->dbenv,
+ cp, h, indx, lock, lock_mode, ret);
+ if (ret != 0)
+ goto err;
+ return (0);
+ case P_IBTREE:
+ for (indx = 0, top = NUM_ENT(h);;) {
+ bi = GET_BINTERNAL(h, indx);
+ if (++indx == top || total + bi->nrecs >= recno)
+ break;
+ total += bi->nrecs;
+ }
+ pg = bi->pgno;
+ break;
+ case P_LRECNO:
+ recno -= total;
+
+ /* Correct from 1-based to 0-based for a page offset. */
+ --recno;
+ BT_STK_ENTER(dbp->dbenv,
+ cp, h, recno, lock, lock_mode, ret);
+ if (ret != 0)
+ goto err;
+ return (0);
+ case P_IRECNO:
+ for (indx = 0, top = NUM_ENT(h);;) {
+ ri = GET_RINTERNAL(h, indx);
+ if (++indx == top || total + ri->nrecs >= recno)
+ break;
+ total += ri->nrecs;
+ }
+ pg = ri->pgno;
+ break;
+ default:
+ return (__db_pgfmt(dbp, h->pgno));
+ }
+ --indx;
+
+ if (stack) {
+ /* Return if this is the lowest page wanted. */
+ if (LF_ISSET(S_PARENT) && stop == h->level) {
+ BT_STK_ENTER(dbp->dbenv,
+ cp, h, indx, lock, lock_mode, ret);
+ if (ret != 0)
+ goto err;
+ return (0);
+ }
+ BT_STK_PUSH(dbp->dbenv,
+ cp, h, indx, lock, lock_mode, ret);
+ if (ret != 0)
+ goto err;
+
+ lock_mode = DB_LOCK_WRITE;
+ if ((ret =
+ __db_lget(dbc, 0, pg, lock_mode, 0, &lock)) != 0)
+ goto err;
+ } else {
+ /*
+ * Decide if we want to return a pointer to the next
+ * page in the stack. If we do, write lock it and
+ * never unlock it.
+ */
+ if ((LF_ISSET(S_PARENT) &&
+ (u_int8_t)(stop + 1) >= (u_int8_t)(h->level - 1)) ||
+ (h->level - 1) == LEAFLEVEL)
+ stack = 1;
+
+ (void)memp_fput(dbp->mpf, h, 0);
+
+ lock_mode = stack &&
+ LF_ISSET(S_WRITE) ? DB_LOCK_WRITE : DB_LOCK_READ;
+ if ((ret = __db_lget(dbc,
+ LCK_COUPLE, pg, lock_mode, 0, &lock)) != 0) {
+ /*
+ * If we fail, discard the lock we held. This
+ * is OK because this only happens when we are
+ * descending the tree holding read-locks.
+ */
+ __LPUT(dbc, lock);
+ goto err;
+ }
+ }
+
+ if ((ret = memp_fget(dbp->mpf, &pg, 0, &h)) != 0)
+ goto err;
+ }
+ /* NOTREACHED */
+
+err: BT_STK_POP(cp);
+ __bam_stkrel(dbc, 0);
+ return (ret);
+}
+
+/*
+ * __bam_adjust --
+ * Adjust the tree after adding or deleting a record.
+ *
+ * PUBLIC: int __bam_adjust __P((DBC *, int32_t));
+ */
+int
+__bam_adjust(dbc, adjust)
+ DBC *dbc;
+ int32_t adjust;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ EPG *epg;
+ PAGE *h;
+ db_pgno_t root_pgno;
+ int ret;
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ root_pgno = cp->root;
+
+ /* Update the record counts for the tree. */
+ for (epg = cp->sp; epg <= cp->csp; ++epg) {
+ h = epg->page;
+ if (TYPE(h) == P_IBTREE || TYPE(h) == P_IRECNO) {
+ if (DB_LOGGING(dbc) &&
+ (ret = __bam_cadjust_log(dbp->dbenv,
+ dbc->txn, &LSN(h), 0, dbp->log_fileid,
+ PGNO(h), &LSN(h), (u_int32_t)epg->indx, adjust,
+ PGNO(h) == root_pgno ? CAD_UPDATEROOT : 0)) != 0)
+ return (ret);
+
+ if (TYPE(h) == P_IBTREE)
+ GET_BINTERNAL(h, epg->indx)->nrecs += adjust;
+ else
+ GET_RINTERNAL(h, epg->indx)->nrecs += adjust;
+
+ if (PGNO(h) == root_pgno)
+ RE_NREC_ADJ(h, adjust);
+
+ if ((ret = memp_fset(dbp->mpf, h, DB_MPOOL_DIRTY)) != 0)
+ return (ret);
+ }
+ }
+ return (0);
+}
+
+/*
+ * __bam_nrecs --
+ * Return the number of records in the tree.
+ *
+ * PUBLIC: int __bam_nrecs __P((DBC *, db_recno_t *));
+ */
+int
+__bam_nrecs(dbc, rep)
+ DBC *dbc;
+ db_recno_t *rep;
+{
+ DB *dbp;
+ DB_LOCK lock;
+ PAGE *h;
+ db_pgno_t pgno;
+ int ret;
+
+ dbp = dbc->dbp;
+
+ pgno = dbc->internal->root;
+ if ((ret = __db_lget(dbc, 0, pgno, DB_LOCK_READ, 0, &lock)) != 0)
+ return (ret);
+ if ((ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0)
+ return (ret);
+
+ *rep = RE_NREC(h);
+
+ (void)memp_fput(dbp->mpf, h, 0);
+ (void)__TLPUT(dbc, lock);
+
+ return (0);
+}
+
+/*
+ * __bam_total --
+ * Return the number of records below a page.
+ *
+ * PUBLIC: db_recno_t __bam_total __P((PAGE *));
+ */
+db_recno_t
+__bam_total(h)
+ PAGE *h;
+{
+ db_recno_t nrecs;
+ db_indx_t indx, top;
+
+ nrecs = 0;
+ top = NUM_ENT(h);
+
+ switch (TYPE(h)) {
+ case P_LBTREE:
+ /* Check for logically deleted records. */
+ for (indx = 0; indx < top; indx += P_INDX)
+ if (!B_DISSET(GET_BKEYDATA(h, indx + O_INDX)->type))
+ ++nrecs;
+ break;
+ case P_LDUP:
+ /* Check for logically deleted records. */
+ for (indx = 0; indx < top; indx += O_INDX)
+ if (!B_DISSET(GET_BKEYDATA(h, indx)->type))
+ ++nrecs;
+ break;
+ case P_IBTREE:
+ for (indx = 0; indx < top; indx += O_INDX)
+ nrecs += GET_BINTERNAL(h, indx)->nrecs;
+ break;
+ case P_LRECNO:
+ nrecs = NUM_ENT(h);
+ break;
+ case P_IRECNO:
+ for (indx = 0; indx < top; indx += O_INDX)
+ nrecs += GET_RINTERNAL(h, indx)->nrecs;
+ break;
+ }
+
+ return (nrecs);
+}
diff --git a/bdb/btree/bt_search.c b/bdb/btree/bt_search.c
new file mode 100644
index 00000000000..d822198f243
--- /dev/null
+++ b/bdb/btree/bt_search.c
@@ -0,0 +1,471 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995, 1996
+ * Keith Bostic. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Mike Olson.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: bt_search.c,v 11.32 2001/01/17 20:19:46 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_shash.h"
+#include "btree.h"
+#include "lock.h"
+
+/*
+ * __bam_search --
+ * Search a btree for a key.
+ *
+ * PUBLIC: int __bam_search __P((DBC *,
+ * PUBLIC: const DBT *, u_int32_t, int, db_recno_t *, int *));
+ */
+int
+__bam_search(dbc, key, flags, stop, recnop, exactp)
+ DBC *dbc;
+ const DBT *key;
+ u_int32_t flags;
+ int stop, *exactp;
+ db_recno_t *recnop;
+{
+ BTREE *t;
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DB_LOCK lock;
+ PAGE *h;
+ db_indx_t base, i, indx, lim;
+ db_lockmode_t lock_mode;
+ db_pgno_t pg;
+ db_recno_t recno;
+ int adjust, cmp, deloffset, ret, stack;
+ int (*func) __P((DB *, const DBT *, const DBT *));
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ t = dbp->bt_internal;
+ recno = 0;
+
+ BT_STK_CLR(cp);
+
+ /*
+ * There are several ways we search a btree tree. The flags argument
+ * specifies if we're acquiring read or write locks, if we position
+ * to the first or last item in a set of duplicates, if we return
+ * deleted items, and if we are locking pairs of pages. In addition,
+ * if we're modifying record numbers, we have to lock the entire tree
+ * regardless. See btree.h for more details.
+ *
+ * If write-locking pages, we need to know whether or not to acquire a
+ * write lock on a page before getting it. This depends on how deep it
+ * is in tree, which we don't know until we acquire the root page. So,
+ * if we need to lock the root page we may have to upgrade it later,
+ * because we won't get the correct lock initially.
+ *
+ * Retrieve the root page.
+ */
+try_again:
+ pg = cp->root;
+ stack = LF_ISSET(S_STACK) && F_ISSET(cp, C_RECNUM);
+ lock_mode = stack ? DB_LOCK_WRITE : DB_LOCK_READ;
+ if ((ret = __db_lget(dbc, 0, pg, lock_mode, 0, &lock)) != 0)
+ return (ret);
+ if ((ret = memp_fget(dbp->mpf, &pg, 0, &h)) != 0) {
+ /* Did not read it, so we can release the lock */
+ (void)__LPUT(dbc, lock);
+ return (ret);
+ }
+
+ /*
+ * Decide if we need to save this page; if we do, write lock it.
+ * We deliberately don't lock-couple on this call. If the tree
+ * is tiny, i.e., one page, and two threads are busily updating
+ * the root page, we're almost guaranteed deadlocks galore, as
+ * each one gets a read lock and then blocks the other's attempt
+ * for a write lock.
+ */
+ if (!stack &&
+ ((LF_ISSET(S_PARENT) && (u_int8_t)(stop + 1) >= h->level) ||
+ (LF_ISSET(S_WRITE) && h->level == LEAFLEVEL))) {
+ (void)memp_fput(dbp->mpf, h, 0);
+ (void)__LPUT(dbc, lock);
+ lock_mode = DB_LOCK_WRITE;
+ if ((ret = __db_lget(dbc, 0, pg, lock_mode, 0, &lock)) != 0)
+ return (ret);
+ if ((ret = memp_fget(dbp->mpf, &pg, 0, &h)) != 0) {
+ /* Did not read it, so we can release the lock */
+ (void)__LPUT(dbc, lock);
+ return (ret);
+ }
+ if (!((LF_ISSET(S_PARENT)
+ && (u_int8_t)(stop + 1) >= h->level) ||
+ (LF_ISSET(S_WRITE) && h->level == LEAFLEVEL))) {
+ /* Someone else split the root, start over. */
+ (void)memp_fput(dbp->mpf, h, 0);
+ (void)__LPUT(dbc, lock);
+ goto try_again;
+ }
+ stack = 1;
+ }
+
+ /* Choose a comparison function. */
+ func = F_ISSET(dbc, DBC_OPD) ?
+ (dbp->dup_compare == NULL ? __bam_defcmp : dbp->dup_compare) :
+ t->bt_compare;
+
+ for (;;) {
+ /*
+ * Do a binary search on the current page. If we're searching
+ * a Btree leaf page, we have to walk the indices in groups of
+ * two. If we're searching an internal page or a off-page dup
+ * page, they're an index per page item. If we find an exact
+ * match on a leaf page, we're done.
+ */
+ adjust = TYPE(h) == P_LBTREE ? P_INDX : O_INDX;
+ for (base = 0,
+ lim = NUM_ENT(h) / (db_indx_t)adjust; lim != 0; lim >>= 1) {
+ indx = base + ((lim >> 1) * adjust);
+ if ((ret =
+ __bam_cmp(dbp, key, h, indx, func, &cmp)) != 0)
+ goto err;
+ if (cmp == 0) {
+ if (TYPE(h) == P_LBTREE || TYPE(h) == P_LDUP)
+ goto found;
+ goto next;
+ }
+ if (cmp > 0) {
+ base = indx + adjust;
+ --lim;
+ }
+ }
+
+ /*
+ * No match found. Base is the smallest index greater than
+ * key and may be zero or a last + O_INDX index.
+ *
+ * If it's a leaf page, return base as the "found" value.
+ * Delete only deletes exact matches.
+ */
+ if (TYPE(h) == P_LBTREE || TYPE(h) == P_LDUP) {
+ *exactp = 0;
+
+ if (LF_ISSET(S_EXACT))
+ goto notfound;
+
+ if (LF_ISSET(S_STK_ONLY)) {
+ BT_STK_NUM(dbp->dbenv, cp, h, base, ret);
+ __LPUT(dbc, lock);
+ (void)memp_fput(dbp->mpf, h, 0);
+ return (ret);
+ }
+
+ /*
+ * !!!
+ * Possibly returning a deleted record -- DB_SET_RANGE,
+ * DB_KEYFIRST and DB_KEYLAST don't require an exact
+ * match, and we don't want to walk multiple pages here
+ * to find an undeleted record. This is handled by the
+ * calling routine.
+ */
+ BT_STK_ENTER(dbp->dbenv,
+ cp, h, base, lock, lock_mode, ret);
+ if (ret != 0)
+ goto err;
+ return (0);
+ }
+
+ /*
+ * If it's not a leaf page, record the internal page (which is
+ * a parent page for the key). Decrement the base by 1 if it's
+ * non-zero so that if a split later occurs, the inserted page
+ * will be to the right of the saved page.
+ */
+ indx = base > 0 ? base - O_INDX : base;
+
+ /*
+ * If we're trying to calculate the record number, sum up
+ * all the record numbers on this page up to the indx point.
+ */
+next: if (recnop != NULL)
+ for (i = 0; i < indx; ++i)
+ recno += GET_BINTERNAL(h, i)->nrecs;
+
+ pg = GET_BINTERNAL(h, indx)->pgno;
+
+ if (LF_ISSET(S_STK_ONLY)) {
+ if (stop == h->level) {
+ BT_STK_NUM(dbp->dbenv, cp, h, indx, ret);
+ __LPUT(dbc, lock);
+ (void)memp_fput(dbp->mpf, h, 0);
+ return (ret);
+ }
+ BT_STK_NUMPUSH(dbp->dbenv, cp, h, indx, ret);
+ (void)memp_fput(dbp->mpf, h, 0);
+ if ((ret = __db_lget(dbc,
+ LCK_COUPLE, pg, lock_mode, 0, &lock)) != 0) {
+ /*
+ * Discard our lock and return on failure. This
+ * is OK because it only happens when descending
+ * the tree holding read-locks.
+ */
+ __LPUT(dbc, lock);
+ return (ret);
+ }
+ } else if (stack) {
+ /* Return if this is the lowest page wanted. */
+ if (LF_ISSET(S_PARENT) && stop == h->level) {
+ BT_STK_ENTER(dbp->dbenv,
+ cp, h, indx, lock, lock_mode, ret);
+ if (ret != 0)
+ goto err;
+ return (0);
+ }
+ BT_STK_PUSH(dbp->dbenv,
+ cp, h, indx, lock, lock_mode, ret);
+ if (ret != 0)
+ goto err;
+
+ lock_mode = DB_LOCK_WRITE;
+ if ((ret =
+ __db_lget(dbc, 0, pg, lock_mode, 0, &lock)) != 0)
+ goto err;
+ } else {
+ /*
+ * Decide if we want to return a reference to the next
+ * page in the return stack. If so, lock it and never
+ * unlock it.
+ */
+ if ((LF_ISSET(S_PARENT) &&
+ (u_int8_t)(stop + 1) >= (u_int8_t)(h->level - 1)) ||
+ (h->level - 1) == LEAFLEVEL)
+ stack = 1;
+
+ (void)memp_fput(dbp->mpf, h, 0);
+
+ lock_mode = stack &&
+ LF_ISSET(S_WRITE) ? DB_LOCK_WRITE : DB_LOCK_READ;
+ if ((ret = __db_lget(dbc,
+ LCK_COUPLE, pg, lock_mode, 0, &lock)) != 0) {
+ /*
+ * If we fail, discard the lock we held. This
+ * is OK because this only happens when we are
+ * descending the tree holding read-locks.
+ */
+ __LPUT(dbc, lock);
+ goto err;
+ }
+ }
+ if ((ret = memp_fget(dbp->mpf, &pg, 0, &h)) != 0)
+ goto err;
+ }
+ /* NOTREACHED */
+
+found: *exactp = 1;
+
+ /*
+ * If we're trying to calculate the record number, add in the
+ * offset on this page and correct for the fact that records
+ * in the tree are 0-based.
+ */
+ if (recnop != NULL)
+ *recnop = recno + (indx / P_INDX) + 1;
+
+ /*
+ * If we got here, we know that we have a Btree leaf or off-page
+ * duplicates page. If it's a Btree leaf page, we have to handle
+ * on-page duplicates.
+ *
+ * If there are duplicates, go to the first/last one. This is
+ * safe because we know that we're not going to leave the page,
+ * all duplicate sets that are not on overflow pages exist on a
+ * single leaf page.
+ */
+ if (TYPE(h) == P_LBTREE) {
+ if (LF_ISSET(S_DUPLAST))
+ while (indx < (db_indx_t)(NUM_ENT(h) - P_INDX) &&
+ h->inp[indx] == h->inp[indx + P_INDX])
+ indx += P_INDX;
+ else
+ while (indx > 0 &&
+ h->inp[indx] == h->inp[indx - P_INDX])
+ indx -= P_INDX;
+ }
+
+ /*
+ * Now check if we are allowed to return deleted items; if not, then
+ * find the next (or previous) non-deleted duplicate entry. (We do
+ * not move from the original found key on the basis of the S_DELNO
+ * flag.)
+ */
+ if (LF_ISSET(S_DELNO)) {
+ deloffset = TYPE(h) == P_LBTREE ? O_INDX : 0;
+ if (LF_ISSET(S_DUPLAST))
+ while (B_DISSET(GET_BKEYDATA(
+ h, indx + deloffset)->type) && indx > 0 &&
+ h->inp[indx] == h->inp[indx - adjust])
+ indx -= adjust;
+ else
+ while (B_DISSET(GET_BKEYDATA(
+ h, indx + deloffset)->type) &&
+ indx < (db_indx_t)(NUM_ENT(h) - adjust) &&
+ h->inp[indx] == h->inp[indx + adjust])
+ indx += adjust;
+
+ /*
+ * If we weren't able to find a non-deleted duplicate, return
+ * DB_NOTFOUND.
+ */
+ if (B_DISSET(GET_BKEYDATA(h, indx + deloffset)->type))
+ goto notfound;
+ }
+
+ if (LF_ISSET(S_STK_ONLY)) {
+ BT_STK_NUM(dbp->dbenv, cp, h, indx, ret);
+ __LPUT(dbc, lock);
+ (void)memp_fput(dbp->mpf, h, 0);
+ } else {
+ BT_STK_ENTER(dbp->dbenv, cp, h, indx, lock, lock_mode, ret);
+ if (ret != 0)
+ goto err;
+ }
+ return (0);
+
+notfound:
+ /* Keep the page locked for serializability. */
+ (void)memp_fput(dbp->mpf, h, 0);
+ (void)__TLPUT(dbc, lock);
+ ret = DB_NOTFOUND;
+
+err: BT_STK_POP(cp);
+ __bam_stkrel(dbc, 0);
+ return (ret);
+}
+
+/*
+ * __bam_stkrel --
+ * Release all pages currently held in the stack.
+ *
+ * PUBLIC: int __bam_stkrel __P((DBC *, u_int32_t));
+ */
+int
+__bam_stkrel(dbc, flags)
+ DBC *dbc;
+ u_int32_t flags;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ EPG *epg;
+ int ret, t_ret;
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ /*
+ * Release inner pages first.
+ *
+ * The caller must be sure that setting STK_NOLOCK will not effect
+ * either serializability or recoverability.
+ */
+ for (ret = 0, epg = cp->sp; epg <= cp->csp; ++epg) {
+ if (epg->page != NULL) {
+ if (LF_ISSET(STK_CLRDBC) && cp->page == epg->page) {
+ cp->page = NULL;
+ cp->lock.off = LOCK_INVALID;
+ }
+ if ((t_ret = memp_fput(
+ dbp->mpf, epg->page, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ /*
+ * XXX
+ * Temporary fix for #3243 -- under certain deadlock
+ * conditions we call here again and re-free the page.
+ * The correct fix is to never release a stack that
+ * doesn't hold items.
+ */
+ epg->page = NULL;
+ }
+ if (epg->lock.off != LOCK_INVALID) {
+ if (LF_ISSET(STK_NOLOCK))
+ (void)__LPUT(dbc, epg->lock);
+ else
+ (void)__TLPUT(dbc, epg->lock);
+ }
+ }
+
+ /* Clear the stack, all pages have been released. */
+ BT_STK_CLR(cp);
+
+ return (ret);
+}
+
+/*
+ * __bam_stkgrow --
+ * Grow the stack.
+ *
+ * PUBLIC: int __bam_stkgrow __P((DB_ENV *, BTREE_CURSOR *));
+ */
+int
+__bam_stkgrow(dbenv, cp)
+ DB_ENV *dbenv;
+ BTREE_CURSOR *cp;
+{
+ EPG *p;
+ size_t entries;
+ int ret;
+
+ entries = cp->esp - cp->sp;
+
+ if ((ret = __os_calloc(dbenv, entries * 2, sizeof(EPG), &p)) != 0)
+ return (ret);
+ memcpy(p, cp->sp, entries * sizeof(EPG));
+ if (cp->sp != cp->stack)
+ __os_free(cp->sp, entries * sizeof(EPG));
+ cp->sp = p;
+ cp->csp = p + entries;
+ cp->esp = p + entries * 2;
+ return (0);
+}
diff --git a/bdb/btree/bt_split.c b/bdb/btree/bt_split.c
new file mode 100644
index 00000000000..f76337b1944
--- /dev/null
+++ b/bdb/btree/bt_split.c
@@ -0,0 +1,1126 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995, 1996
+ * Keith Bostic. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: bt_split.c,v 11.31 2000/12/22 19:08:27 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <limits.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_shash.h"
+#include "lock.h"
+#include "btree.h"
+
+static int __bam_broot __P((DBC *, PAGE *, PAGE *, PAGE *));
+static int __bam_page __P((DBC *, EPG *, EPG *));
+static int __bam_pinsert __P((DBC *, EPG *, PAGE *, PAGE *, int));
+static int __bam_psplit __P((DBC *, EPG *, PAGE *, PAGE *, db_indx_t *));
+static int __bam_root __P((DBC *, EPG *));
+static int __ram_root __P((DBC *, PAGE *, PAGE *, PAGE *));
+
+/*
+ * __bam_split --
+ * Split a page.
+ *
+ * PUBLIC: int __bam_split __P((DBC *, void *));
+ */
+int
+__bam_split(dbc, arg)
+ DBC *dbc;
+ void *arg;
+{
+ BTREE *t;
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ enum { UP, DOWN } dir;
+ db_pgno_t root_pgno;
+ int exact, level, ret;
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ root_pgno = cp->root;
+
+ /*
+ * The locking protocol we use to avoid deadlock to acquire locks by
+ * walking down the tree, but we do it as lazily as possible, locking
+ * the root only as a last resort. We expect all stack pages to have
+ * been discarded before we're called; we discard all short-term locks.
+ *
+ * When __bam_split is first called, we know that a leaf page was too
+ * full for an insert. We don't know what leaf page it was, but we
+ * have the key/recno that caused the problem. We call XX_search to
+ * reacquire the leaf page, but this time get both the leaf page and
+ * its parent, locked. We then split the leaf page and see if the new
+ * internal key will fit into the parent page. If it will, we're done.
+ *
+ * If it won't, we discard our current locks and repeat the process,
+ * only this time acquiring the parent page and its parent, locked.
+ * This process repeats until we succeed in the split, splitting the
+ * root page as the final resort. The entire process then repeats,
+ * as necessary, until we split a leaf page.
+ *
+ * XXX
+ * A traditional method of speeding this up is to maintain a stack of
+ * the pages traversed in the original search. You can detect if the
+ * stack is correct by storing the page's LSN when it was searched and
+ * comparing that LSN with the current one when it's locked during the
+ * split. This would be an easy change for this code, but I have no
+ * numbers that indicate it's worthwhile.
+ */
+ t = dbp->bt_internal;
+ for (dir = UP, level = LEAFLEVEL;; dir == UP ? ++level : --level) {
+ /*
+ * Acquire a page and its parent, locked.
+ */
+ if ((ret = (dbc->dbtype == DB_BTREE ?
+ __bam_search(dbc, arg, S_WRPAIR, level, NULL, &exact) :
+ __bam_rsearch(dbc,
+ (db_recno_t *)arg, S_WRPAIR, level, &exact))) != 0)
+ return (ret);
+
+ /*
+ * Split the page if it still needs it (it's possible another
+ * thread of control has already split the page). If we are
+ * guaranteed that two items will fit on the page, the split
+ * is no longer necessary.
+ */
+ if (2 * B_MAXSIZEONPAGE(cp->ovflsize)
+ <= (db_indx_t)P_FREESPACE(cp->csp[0].page)) {
+ __bam_stkrel(dbc, STK_NOLOCK);
+ return (0);
+ }
+ ret = cp->csp[0].page->pgno == root_pgno ?
+ __bam_root(dbc, &cp->csp[0]) :
+ __bam_page(dbc, &cp->csp[-1], &cp->csp[0]);
+ BT_STK_CLR(cp);
+
+ switch (ret) {
+ case 0:
+ /* Once we've split the leaf page, we're done. */
+ if (level == LEAFLEVEL)
+ return (0);
+
+ /* Switch directions. */
+ if (dir == UP)
+ dir = DOWN;
+ break;
+ case DB_NEEDSPLIT:
+ /*
+ * It's possible to fail to split repeatedly, as other
+ * threads may be modifying the tree, or the page usage
+ * is sufficiently bad that we don't get enough space
+ * the first time.
+ */
+ if (dir == DOWN)
+ dir = UP;
+ break;
+ default:
+ return (ret);
+ }
+ }
+ /* NOTREACHED */
+}
+
+/*
+ * __bam_root --
+ * Split the root page of a btree.
+ */
+static int
+__bam_root(dbc, cp)
+ DBC *dbc;
+ EPG *cp;
+{
+ DB *dbp;
+ DBT log_dbt;
+ DB_LSN log_lsn;
+ PAGE *lp, *rp;
+ db_indx_t split;
+ u_int32_t opflags;
+ int ret;
+
+ dbp = dbc->dbp;
+
+ /* Yeah, right. */
+ if (cp->page->level >= MAXBTREELEVEL) {
+ __db_err(dbp->dbenv,
+ "Too many btree levels: %d", cp->page->level);
+ ret = ENOSPC;
+ goto err;
+ }
+
+ /* Create new left and right pages for the split. */
+ lp = rp = NULL;
+ if ((ret = __db_new(dbc, TYPE(cp->page), &lp)) != 0 ||
+ (ret = __db_new(dbc, TYPE(cp->page), &rp)) != 0)
+ goto err;
+ P_INIT(lp, dbp->pgsize, lp->pgno,
+ PGNO_INVALID, ISINTERNAL(cp->page) ? PGNO_INVALID : rp->pgno,
+ cp->page->level, TYPE(cp->page));
+ P_INIT(rp, dbp->pgsize, rp->pgno,
+ ISINTERNAL(cp->page) ? PGNO_INVALID : lp->pgno, PGNO_INVALID,
+ cp->page->level, TYPE(cp->page));
+
+ /* Split the page. */
+ if ((ret = __bam_psplit(dbc, cp, lp, rp, &split)) != 0)
+ goto err;
+
+ /* Log the change. */
+ if (DB_LOGGING(dbc)) {
+ memset(&log_dbt, 0, sizeof(log_dbt));
+ log_dbt.data = cp->page;
+ log_dbt.size = dbp->pgsize;
+ ZERO_LSN(log_lsn);
+ opflags = F_ISSET(
+ (BTREE_CURSOR *)dbc->internal, C_RECNUM) ? SPL_NRECS : 0;
+ if ((ret = __bam_split_log(dbp->dbenv, dbc->txn,
+ &LSN(cp->page), 0, dbp->log_fileid, PGNO(lp), &LSN(lp),
+ PGNO(rp), &LSN(rp), (u_int32_t)NUM_ENT(lp), 0, &log_lsn,
+ dbc->internal->root, &log_dbt, opflags)) != 0)
+ goto err;
+ LSN(lp) = LSN(cp->page);
+ LSN(rp) = LSN(cp->page);
+ }
+
+ /* Clean up the new root page. */
+ if ((ret = (dbc->dbtype == DB_RECNO ?
+ __ram_root(dbc, cp->page, lp, rp) :
+ __bam_broot(dbc, cp->page, lp, rp))) != 0)
+ goto err;
+
+ /* Adjust any cursors. */
+ if ((ret = __bam_ca_split(dbc,
+ cp->page->pgno, lp->pgno, rp->pgno, split, 1)) != 0)
+ goto err;
+
+ /* Success -- write the real pages back to the store. */
+ (void)memp_fput(dbp->mpf, cp->page, DB_MPOOL_DIRTY);
+ (void)__TLPUT(dbc, cp->lock);
+ (void)memp_fput(dbp->mpf, lp, DB_MPOOL_DIRTY);
+ (void)memp_fput(dbp->mpf, rp, DB_MPOOL_DIRTY);
+
+ return (0);
+
+err: if (lp != NULL)
+ (void)__db_free(dbc, lp);
+ if (rp != NULL)
+ (void)__db_free(dbc, rp);
+ (void)memp_fput(dbp->mpf, cp->page, 0);
+ (void)__TLPUT(dbc, cp->lock);
+ return (ret);
+}
+
+/*
+ * __bam_page --
+ * Split the non-root page of a btree.
+ */
+static int
+__bam_page(dbc, pp, cp)
+ DBC *dbc;
+ EPG *pp, *cp;
+{
+ BTREE_CURSOR *bc;
+ DBT log_dbt;
+ DB_LSN log_lsn;
+ DB *dbp;
+ DB_LOCK tplock;
+ DB_LSN save_lsn;
+ PAGE *lp, *rp, *alloc_rp, *tp;
+ db_indx_t split;
+ u_int32_t opflags;
+ int ret, t_ret;
+
+ dbp = dbc->dbp;
+ alloc_rp = lp = rp = tp = NULL;
+ tplock.off = LOCK_INVALID;
+ ret = -1;
+
+ /*
+ * Create a new right page for the split, and fill in everything
+ * except its LSN and page number.
+ *
+ * We malloc space for both the left and right pages, so we don't get
+ * a new page from the underlying buffer pool until we know the split
+ * is going to succeed. The reason is that we can't release locks
+ * acquired during the get-a-new-page process because metadata page
+ * locks can't be discarded on failure since we may have modified the
+ * free list. So, if you assume that we're holding a write lock on the
+ * leaf page which ran out of space and started this split (e.g., we
+ * have already written records to the page, or we retrieved a record
+ * from it with the DB_RMW flag set), failing in a split with both a
+ * leaf page locked and the metadata page locked can potentially lock
+ * up the tree badly, because we've violated the rule of always locking
+ * down the tree, and never up.
+ */
+ if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, NULL, &rp)) != 0)
+ goto err;
+ P_INIT(rp, dbp->pgsize, 0,
+ ISINTERNAL(cp->page) ? PGNO_INVALID : PGNO(cp->page),
+ ISINTERNAL(cp->page) ? PGNO_INVALID : NEXT_PGNO(cp->page),
+ cp->page->level, TYPE(cp->page));
+
+ /*
+ * Create new left page for the split, and fill in everything
+ * except its LSN and next-page page number.
+ */
+ if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, NULL, &lp)) != 0)
+ goto err;
+ P_INIT(lp, dbp->pgsize, PGNO(cp->page),
+ ISINTERNAL(cp->page) ? PGNO_INVALID : PREV_PGNO(cp->page),
+ ISINTERNAL(cp->page) ? PGNO_INVALID : 0,
+ cp->page->level, TYPE(cp->page));
+
+ /*
+ * Split right.
+ *
+ * Only the indices are sorted on the page, i.e., the key/data pairs
+ * aren't, so it's simpler to copy the data from the split page onto
+ * two new pages instead of copying half the data to a new right page
+ * and compacting the left page in place. Since the left page can't
+ * change, we swap the original and the allocated left page after the
+ * split.
+ */
+ if ((ret = __bam_psplit(dbc, cp, lp, rp, &split)) != 0)
+ goto err;
+
+ /*
+ * Test to see if we are going to be able to insert the new pages into
+ * the parent page. The interesting failure here is that the parent
+ * page can't hold the new keys, and has to be split in turn, in which
+ * case we want to release all the locks we can.
+ */
+ if ((ret = __bam_pinsert(dbc, pp, lp, rp, 1)) != 0)
+ goto err;
+
+ /*
+ * Fix up the previous pointer of any leaf page following the split
+ * page.
+ *
+ * There's interesting deadlock situations here as we try to write-lock
+ * a page that's not in our direct ancestry. Consider a cursor walking
+ * backward through the leaf pages, that has our following page locked,
+ * and is waiting on a lock for the page we're splitting. In that case
+ * we're going to deadlock here . It's probably OK, stepping backward
+ * through the tree isn't a common operation.
+ */
+ if (ISLEAF(cp->page) && NEXT_PGNO(cp->page) != PGNO_INVALID) {
+ if ((ret = __db_lget(dbc,
+ 0, NEXT_PGNO(cp->page), DB_LOCK_WRITE, 0, &tplock)) != 0)
+ goto err;
+ if ((ret =
+ memp_fget(dbp->mpf, &NEXT_PGNO(cp->page), 0, &tp)) != 0)
+ goto err;
+ }
+
+ /*
+ * We've got everything locked down we need, and we know the split
+ * is going to succeed. Go and get the additional page we'll need.
+ */
+ if ((ret = __db_new(dbc, TYPE(cp->page), &alloc_rp)) != 0)
+ goto err;
+
+ /*
+ * Fix up the page numbers we didn't have before. We have to do this
+ * before calling __bam_pinsert because it may copy a page number onto
+ * the parent page and it takes the page number from its page argument.
+ */
+ PGNO(rp) = NEXT_PGNO(lp) = PGNO(alloc_rp);
+
+ /* Actually update the parent page. */
+ if ((ret = __bam_pinsert(dbc, pp, lp, rp, 0)) != 0)
+ goto err;
+
+ bc = (BTREE_CURSOR *)dbc->internal;
+ /* Log the change. */
+ if (DB_LOGGING(dbc)) {
+ memset(&log_dbt, 0, sizeof(log_dbt));
+ log_dbt.data = cp->page;
+ log_dbt.size = dbp->pgsize;
+ if (tp == NULL)
+ ZERO_LSN(log_lsn);
+ opflags = F_ISSET(bc, C_RECNUM) ? SPL_NRECS : 0;
+ if ((ret = __bam_split_log(dbp->dbenv, dbc->txn,
+ &LSN(cp->page), 0, dbp->log_fileid, PGNO(cp->page),
+ &LSN(cp->page), PGNO(alloc_rp), &LSN(alloc_rp),
+ (u_int32_t)NUM_ENT(lp),
+ tp == NULL ? 0 : PGNO(tp),
+ tp == NULL ? &log_lsn : &LSN(tp),
+ bc->root, &log_dbt, opflags)) != 0)
+ goto err;
+
+ /* Update the LSNs for all involved pages. */
+ LSN(alloc_rp) = LSN(cp->page);
+ LSN(lp) = LSN(cp->page);
+ LSN(rp) = LSN(cp->page);
+ if (tp != NULL)
+ LSN(tp) = LSN(cp->page);
+ }
+
+ /*
+ * Copy the left and right pages into place. There are two paths
+ * through here. Either we are logging and we set the LSNs in the
+ * logging path. However, if we are not logging, then we do not
+ * have valid LSNs on lp or rp. The correct LSNs to use are the
+ * ones on the page we got from __db_new or the one that was
+ * originally on cp->page. In both cases, we save the LSN from the
+ * real database page (not a malloc'd one) and reapply it after we
+ * do the copy.
+ */
+ save_lsn = alloc_rp->lsn;
+ memcpy(alloc_rp, rp, LOFFSET(rp));
+ memcpy((u_int8_t *)alloc_rp + HOFFSET(rp),
+ (u_int8_t *)rp + HOFFSET(rp), dbp->pgsize - HOFFSET(rp));
+ alloc_rp->lsn = save_lsn;
+
+ save_lsn = cp->page->lsn;
+ memcpy(cp->page, lp, LOFFSET(lp));
+ memcpy((u_int8_t *)cp->page + HOFFSET(lp),
+ (u_int8_t *)lp + HOFFSET(lp), dbp->pgsize - HOFFSET(lp));
+ cp->page->lsn = save_lsn;
+
+ /* Fix up the next-page link. */
+ if (tp != NULL)
+ PREV_PGNO(tp) = PGNO(rp);
+
+ /* Adjust any cursors. */
+ if ((ret = __bam_ca_split(dbc,
+ PGNO(cp->page), PGNO(cp->page), PGNO(rp), split, 0)) != 0)
+ goto err;
+
+ __os_free(lp, dbp->pgsize);
+ __os_free(rp, dbp->pgsize);
+
+ /*
+ * Success -- write the real pages back to the store. As we never
+ * acquired any sort of lock on the new page, we release it before
+ * releasing locks on the pages that reference it. We're finished
+ * modifying the page so it's not really necessary, but it's neater.
+ */
+ if ((t_ret =
+ memp_fput(dbp->mpf, alloc_rp, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+ if ((t_ret =
+ memp_fput(dbp->mpf, pp->page, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+ (void)__TLPUT(dbc, pp->lock);
+ if ((t_ret =
+ memp_fput(dbp->mpf, cp->page, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+ (void)__TLPUT(dbc, cp->lock);
+ if (tp != NULL) {
+ if ((t_ret =
+ memp_fput(dbp->mpf, tp, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+ (void)__TLPUT(dbc, tplock);
+ }
+ return (ret);
+
+err: if (lp != NULL)
+ __os_free(lp, dbp->pgsize);
+ if (rp != NULL)
+ __os_free(rp, dbp->pgsize);
+ if (alloc_rp != NULL)
+ (void)__db_free(dbc, alloc_rp);
+
+ if (tp != NULL)
+ (void)memp_fput(dbp->mpf, tp, 0);
+ if (tplock.off != LOCK_INVALID)
+ /* We never updated the next page, we can release it. */
+ (void)__LPUT(dbc, tplock);
+
+ (void)memp_fput(dbp->mpf, pp->page, 0);
+ if (ret == DB_NEEDSPLIT)
+ (void)__LPUT(dbc, pp->lock);
+ else
+ (void)__TLPUT(dbc, pp->lock);
+
+ (void)memp_fput(dbp->mpf, cp->page, 0);
+ if (ret == DB_NEEDSPLIT)
+ (void)__LPUT(dbc, cp->lock);
+ else
+ (void)__TLPUT(dbc, cp->lock);
+
+ return (ret);
+}
+
+/*
+ * __bam_broot --
+ * Fix up the btree root page after it has been split.
+ */
+static int
+__bam_broot(dbc, rootp, lp, rp)
+ DBC *dbc;
+ PAGE *rootp, *lp, *rp;
+{
+ BINTERNAL bi, *child_bi;
+ BKEYDATA *child_bk;
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DBT hdr, data;
+ db_pgno_t root_pgno;
+ int ret;
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ /*
+ * If the root page was a leaf page, change it into an internal page.
+ * We copy the key we split on (but not the key's data, in the case of
+ * a leaf page) to the new root page.
+ */
+ root_pgno = cp->root;
+ P_INIT(rootp, dbp->pgsize,
+ root_pgno, PGNO_INVALID, PGNO_INVALID, lp->level + 1, P_IBTREE);
+
+ memset(&data, 0, sizeof(data));
+ memset(&hdr, 0, sizeof(hdr));
+
+ /*
+ * The btree comparison code guarantees that the left-most key on any
+ * internal btree page is never used, so it doesn't need to be filled
+ * in. Set the record count if necessary.
+ */
+ memset(&bi, 0, sizeof(bi));
+ bi.len = 0;
+ B_TSET(bi.type, B_KEYDATA, 0);
+ bi.pgno = lp->pgno;
+ if (F_ISSET(cp, C_RECNUM)) {
+ bi.nrecs = __bam_total(lp);
+ RE_NREC_SET(rootp, bi.nrecs);
+ }
+ hdr.data = &bi;
+ hdr.size = SSZA(BINTERNAL, data);
+ if ((ret =
+ __db_pitem(dbc, rootp, 0, BINTERNAL_SIZE(0), &hdr, NULL)) != 0)
+ return (ret);
+
+ switch (TYPE(rp)) {
+ case P_IBTREE:
+ /* Copy the first key of the child page onto the root page. */
+ child_bi = GET_BINTERNAL(rp, 0);
+
+ bi.len = child_bi->len;
+ B_TSET(bi.type, child_bi->type, 0);
+ bi.pgno = rp->pgno;
+ if (F_ISSET(cp, C_RECNUM)) {
+ bi.nrecs = __bam_total(rp);
+ RE_NREC_ADJ(rootp, bi.nrecs);
+ }
+ hdr.data = &bi;
+ hdr.size = SSZA(BINTERNAL, data);
+ data.data = child_bi->data;
+ data.size = child_bi->len;
+ if ((ret = __db_pitem(dbc, rootp, 1,
+ BINTERNAL_SIZE(child_bi->len), &hdr, &data)) != 0)
+ return (ret);
+
+ /* Increment the overflow ref count. */
+ if (B_TYPE(child_bi->type) == B_OVERFLOW)
+ if ((ret = __db_ovref(dbc,
+ ((BOVERFLOW *)(child_bi->data))->pgno, 1)) != 0)
+ return (ret);
+ break;
+ case P_LDUP:
+ case P_LBTREE:
+ /* Copy the first key of the child page onto the root page. */
+ child_bk = GET_BKEYDATA(rp, 0);
+ switch (B_TYPE(child_bk->type)) {
+ case B_KEYDATA:
+ bi.len = child_bk->len;
+ B_TSET(bi.type, child_bk->type, 0);
+ bi.pgno = rp->pgno;
+ if (F_ISSET(cp, C_RECNUM)) {
+ bi.nrecs = __bam_total(rp);
+ RE_NREC_ADJ(rootp, bi.nrecs);
+ }
+ hdr.data = &bi;
+ hdr.size = SSZA(BINTERNAL, data);
+ data.data = child_bk->data;
+ data.size = child_bk->len;
+ if ((ret = __db_pitem(dbc, rootp, 1,
+ BINTERNAL_SIZE(child_bk->len), &hdr, &data)) != 0)
+ return (ret);
+ break;
+ case B_DUPLICATE:
+ case B_OVERFLOW:
+ bi.len = BOVERFLOW_SIZE;
+ B_TSET(bi.type, child_bk->type, 0);
+ bi.pgno = rp->pgno;
+ if (F_ISSET(cp, C_RECNUM)) {
+ bi.nrecs = __bam_total(rp);
+ RE_NREC_ADJ(rootp, bi.nrecs);
+ }
+ hdr.data = &bi;
+ hdr.size = SSZA(BINTERNAL, data);
+ data.data = child_bk;
+ data.size = BOVERFLOW_SIZE;
+ if ((ret = __db_pitem(dbc, rootp, 1,
+ BINTERNAL_SIZE(BOVERFLOW_SIZE), &hdr, &data)) != 0)
+ return (ret);
+
+ /* Increment the overflow ref count. */
+ if (B_TYPE(child_bk->type) == B_OVERFLOW)
+ if ((ret = __db_ovref(dbc,
+ ((BOVERFLOW *)child_bk)->pgno, 1)) != 0)
+ return (ret);
+ break;
+ default:
+ return (__db_pgfmt(dbp, rp->pgno));
+ }
+ break;
+ default:
+ return (__db_pgfmt(dbp, rp->pgno));
+ }
+ return (0);
+}
+
+/*
+ * __ram_root --
+ * Fix up the recno root page after it has been split.
+ */
+static int
+__ram_root(dbc, rootp, lp, rp)
+ DBC *dbc;
+ PAGE *rootp, *lp, *rp;
+{
+ DB *dbp;
+ DBT hdr;
+ RINTERNAL ri;
+ db_pgno_t root_pgno;
+ int ret;
+
+ dbp = dbc->dbp;
+ root_pgno = dbc->internal->root;
+
+ /* Initialize the page. */
+ P_INIT(rootp, dbp->pgsize,
+ root_pgno, PGNO_INVALID, PGNO_INVALID, lp->level + 1, P_IRECNO);
+
+ /* Initialize the header. */
+ memset(&hdr, 0, sizeof(hdr));
+ hdr.data = &ri;
+ hdr.size = RINTERNAL_SIZE;
+
+ /* Insert the left and right keys, set the header information. */
+ ri.pgno = lp->pgno;
+ ri.nrecs = __bam_total(lp);
+ if ((ret = __db_pitem(dbc, rootp, 0, RINTERNAL_SIZE, &hdr, NULL)) != 0)
+ return (ret);
+ RE_NREC_SET(rootp, ri.nrecs);
+ ri.pgno = rp->pgno;
+ ri.nrecs = __bam_total(rp);
+ if ((ret = __db_pitem(dbc, rootp, 1, RINTERNAL_SIZE, &hdr, NULL)) != 0)
+ return (ret);
+ RE_NREC_ADJ(rootp, ri.nrecs);
+ return (0);
+}
+
+/*
+ * __bam_pinsert --
+ * Insert a new key into a parent page, completing the split.
+ */
+static int
+__bam_pinsert(dbc, parent, lchild, rchild, space_check)
+ DBC *dbc;
+ EPG *parent;
+ PAGE *lchild, *rchild;
+ int space_check;
+{
+ BINTERNAL bi, *child_bi;
+ BKEYDATA *child_bk, *tmp_bk;
+ BTREE *t;
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DBT a, b, hdr, data;
+ PAGE *ppage;
+ RINTERNAL ri;
+ db_indx_t off;
+ db_recno_t nrecs;
+ size_t (*func) __P((DB *, const DBT *, const DBT *));
+ u_int32_t n, nbytes, nksize;
+ int ret;
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ t = dbp->bt_internal;
+ ppage = parent->page;
+
+ /* If handling record numbers, count records split to the right page. */
+ nrecs = F_ISSET(cp, C_RECNUM) && !space_check ? __bam_total(rchild) : 0;
+
+ /*
+ * Now we insert the new page's first key into the parent page, which
+ * completes the split. The parent points to a PAGE and a page index
+ * offset, where the new key goes ONE AFTER the index, because we split
+ * to the right.
+ *
+ * XXX
+ * Some btree algorithms replace the key for the old page as well as
+ * the new page. We don't, as there's no reason to believe that the
+ * first key on the old page is any better than the key we have, and,
+ * in the case of a key being placed at index 0 causing the split, the
+ * key is unavailable.
+ */
+ off = parent->indx + O_INDX;
+
+ /*
+ * Calculate the space needed on the parent page.
+ *
+ * Prefix trees: space hack used when inserting into BINTERNAL pages.
+ * Retain only what's needed to distinguish between the new entry and
+ * the LAST entry on the page to its left. If the keys compare equal,
+ * retain the entire key. We ignore overflow keys, and the entire key
+ * must be retained for the next-to-leftmost key on the leftmost page
+ * of each level, or the search will fail. Applicable ONLY to internal
+ * pages that have leaf pages as children. Further reduction of the
+ * key between pairs of internal pages loses too much information.
+ */
+ switch (TYPE(rchild)) {
+ case P_IBTREE:
+ child_bi = GET_BINTERNAL(rchild, 0);
+ nbytes = BINTERNAL_PSIZE(child_bi->len);
+
+ if (P_FREESPACE(ppage) < nbytes)
+ return (DB_NEEDSPLIT);
+ if (space_check)
+ return (0);
+
+ /* Add a new record for the right page. */
+ memset(&bi, 0, sizeof(bi));
+ bi.len = child_bi->len;
+ B_TSET(bi.type, child_bi->type, 0);
+ bi.pgno = rchild->pgno;
+ bi.nrecs = nrecs;
+ memset(&hdr, 0, sizeof(hdr));
+ hdr.data = &bi;
+ hdr.size = SSZA(BINTERNAL, data);
+ memset(&data, 0, sizeof(data));
+ data.data = child_bi->data;
+ data.size = child_bi->len;
+ if ((ret = __db_pitem(dbc, ppage, off,
+ BINTERNAL_SIZE(child_bi->len), &hdr, &data)) != 0)
+ return (ret);
+
+ /* Increment the overflow ref count. */
+ if (B_TYPE(child_bi->type) == B_OVERFLOW)
+ if ((ret = __db_ovref(dbc,
+ ((BOVERFLOW *)(child_bi->data))->pgno, 1)) != 0)
+ return (ret);
+ break;
+ case P_LDUP:
+ case P_LBTREE:
+ child_bk = GET_BKEYDATA(rchild, 0);
+ switch (B_TYPE(child_bk->type)) {
+ case B_KEYDATA:
+ /*
+ * We set t->bt_prefix to NULL if we have a comparison
+ * callback but no prefix compression callback. But,
+ * if we're splitting in an off-page duplicates tree,
+ * we still have to do some checking. If using the
+ * default off-page duplicates comparison routine we
+ * can use the default prefix compression callback. If
+ * not using the default off-page duplicates comparison
+ * routine, we can't do any kind of prefix compression
+ * as there's no way for an application to specify a
+ * prefix compression callback that corresponds to its
+ * comparison callback.
+ */
+ if (F_ISSET(dbc, DBC_OPD)) {
+ if (dbp->dup_compare == __bam_defcmp)
+ func = __bam_defpfx;
+ else
+ func = NULL;
+ } else
+ func = t->bt_prefix;
+
+ nbytes = BINTERNAL_PSIZE(child_bk->len);
+ nksize = child_bk->len;
+ if (func == NULL)
+ goto noprefix;
+ if (ppage->prev_pgno == PGNO_INVALID && off <= 1)
+ goto noprefix;
+ tmp_bk = GET_BKEYDATA(lchild, NUM_ENT(lchild) -
+ (TYPE(lchild) == P_LDUP ? O_INDX : P_INDX));
+ if (B_TYPE(tmp_bk->type) != B_KEYDATA)
+ goto noprefix;
+ memset(&a, 0, sizeof(a));
+ a.size = tmp_bk->len;
+ a.data = tmp_bk->data;
+ memset(&b, 0, sizeof(b));
+ b.size = child_bk->len;
+ b.data = child_bk->data;
+ nksize = func(dbp, &a, &b);
+ if ((n = BINTERNAL_PSIZE(nksize)) < nbytes)
+ nbytes = n;
+ else
+noprefix: nksize = child_bk->len;
+
+ if (P_FREESPACE(ppage) < nbytes)
+ return (DB_NEEDSPLIT);
+ if (space_check)
+ return (0);
+
+ memset(&bi, 0, sizeof(bi));
+ bi.len = nksize;
+ B_TSET(bi.type, child_bk->type, 0);
+ bi.pgno = rchild->pgno;
+ bi.nrecs = nrecs;
+ memset(&hdr, 0, sizeof(hdr));
+ hdr.data = &bi;
+ hdr.size = SSZA(BINTERNAL, data);
+ memset(&data, 0, sizeof(data));
+ data.data = child_bk->data;
+ data.size = nksize;
+ if ((ret = __db_pitem(dbc, ppage, off,
+ BINTERNAL_SIZE(nksize), &hdr, &data)) != 0)
+ return (ret);
+ break;
+ case B_DUPLICATE:
+ case B_OVERFLOW:
+ nbytes = BINTERNAL_PSIZE(BOVERFLOW_SIZE);
+
+ if (P_FREESPACE(ppage) < nbytes)
+ return (DB_NEEDSPLIT);
+ if (space_check)
+ return (0);
+
+ memset(&bi, 0, sizeof(bi));
+ bi.len = BOVERFLOW_SIZE;
+ B_TSET(bi.type, child_bk->type, 0);
+ bi.pgno = rchild->pgno;
+ bi.nrecs = nrecs;
+ memset(&hdr, 0, sizeof(hdr));
+ hdr.data = &bi;
+ hdr.size = SSZA(BINTERNAL, data);
+ memset(&data, 0, sizeof(data));
+ data.data = child_bk;
+ data.size = BOVERFLOW_SIZE;
+ if ((ret = __db_pitem(dbc, ppage, off,
+ BINTERNAL_SIZE(BOVERFLOW_SIZE), &hdr, &data)) != 0)
+ return (ret);
+
+ /* Increment the overflow ref count. */
+ if (B_TYPE(child_bk->type) == B_OVERFLOW)
+ if ((ret = __db_ovref(dbc,
+ ((BOVERFLOW *)child_bk)->pgno, 1)) != 0)
+ return (ret);
+ break;
+ default:
+ return (__db_pgfmt(dbp, rchild->pgno));
+ }
+ break;
+ case P_IRECNO:
+ case P_LRECNO:
+ nbytes = RINTERNAL_PSIZE;
+
+ if (P_FREESPACE(ppage) < nbytes)
+ return (DB_NEEDSPLIT);
+ if (space_check)
+ return (0);
+
+ /* Add a new record for the right page. */
+ memset(&hdr, 0, sizeof(hdr));
+ hdr.data = &ri;
+ hdr.size = RINTERNAL_SIZE;
+ ri.pgno = rchild->pgno;
+ ri.nrecs = nrecs;
+ if ((ret = __db_pitem(dbc,
+ ppage, off, RINTERNAL_SIZE, &hdr, NULL)) != 0)
+ return (ret);
+ break;
+ default:
+ return (__db_pgfmt(dbp, rchild->pgno));
+ }
+
+ /*
+ * If a Recno or Btree with record numbers AM page, or an off-page
+ * duplicates tree, adjust the parent page's left page record count.
+ */
+ if (F_ISSET(cp, C_RECNUM)) {
+ /* Log the change. */
+ if (DB_LOGGING(dbc) &&
+ (ret = __bam_cadjust_log(dbp->dbenv, dbc->txn,
+ &LSN(ppage), 0, dbp->log_fileid, PGNO(ppage),
+ &LSN(ppage), parent->indx, -(int32_t)nrecs, 0)) != 0)
+ return (ret);
+
+ /* Update the left page count. */
+ if (dbc->dbtype == DB_RECNO)
+ GET_RINTERNAL(ppage, parent->indx)->nrecs -= nrecs;
+ else
+ GET_BINTERNAL(ppage, parent->indx)->nrecs -= nrecs;
+ }
+
+ return (0);
+}
+
+/*
+ * __bam_psplit --
+ * Do the real work of splitting the page.
+ */
+static int
+__bam_psplit(dbc, cp, lp, rp, splitret)
+ DBC *dbc;
+ EPG *cp;
+ PAGE *lp, *rp;
+ db_indx_t *splitret;
+{
+ DB *dbp;
+ PAGE *pp;
+ db_indx_t half, nbytes, off, splitp, top;
+ int adjust, cnt, iflag, isbigkey, ret;
+
+ dbp = dbc->dbp;
+ pp = cp->page;
+ adjust = TYPE(pp) == P_LBTREE ? P_INDX : O_INDX;
+
+ /*
+ * If we're splitting the first (last) page on a level because we're
+ * inserting (appending) a key to it, it's likely that the data is
+ * sorted. Moving a single item to the new page is less work and can
+ * push the fill factor higher than normal. If we're wrong it's not
+ * a big deal, we'll just do the split the right way next time.
+ */
+ off = 0;
+ if (NEXT_PGNO(pp) == PGNO_INVALID &&
+ ((ISINTERNAL(pp) && cp->indx == NUM_ENT(cp->page) - 1) ||
+ (!ISINTERNAL(pp) && cp->indx == NUM_ENT(cp->page))))
+ off = NUM_ENT(cp->page) - adjust;
+ else if (PREV_PGNO(pp) == PGNO_INVALID && cp->indx == 0)
+ off = adjust;
+
+ if (off != 0)
+ goto sort;
+
+ /*
+ * Split the data to the left and right pages. Try not to split on
+ * an overflow key. (Overflow keys on internal pages will slow down
+ * searches.) Refuse to split in the middle of a set of duplicates.
+ *
+ * First, find the optimum place to split.
+ *
+ * It's possible to try and split past the last record on the page if
+ * there's a very large record at the end of the page. Make sure this
+ * doesn't happen by bounding the check at the next-to-last entry on
+ * the page.
+ *
+ * Note, we try and split half the data present on the page. This is
+ * because another process may have already split the page and left
+ * it half empty. We don't try and skip the split -- we don't know
+ * how much space we're going to need on the page, and we may need up
+ * to half the page for a big item, so there's no easy test to decide
+ * if we need to split or not. Besides, if two threads are inserting
+ * data into the same place in the database, we're probably going to
+ * need more space soon anyway.
+ */
+ top = NUM_ENT(pp) - adjust;
+ half = (dbp->pgsize - HOFFSET(pp)) / 2;
+ for (nbytes = 0, off = 0; off < top && nbytes < half; ++off)
+ switch (TYPE(pp)) {
+ case P_IBTREE:
+ if (B_TYPE(GET_BINTERNAL(pp, off)->type) == B_KEYDATA)
+ nbytes +=
+ BINTERNAL_SIZE(GET_BINTERNAL(pp, off)->len);
+ else
+ nbytes += BINTERNAL_SIZE(BOVERFLOW_SIZE);
+ break;
+ case P_LBTREE:
+ if (B_TYPE(GET_BKEYDATA(pp, off)->type) == B_KEYDATA)
+ nbytes +=
+ BKEYDATA_SIZE(GET_BKEYDATA(pp, off)->len);
+ else
+ nbytes += BOVERFLOW_SIZE;
+
+ ++off;
+ /* FALLTHROUGH */
+ case P_LDUP:
+ case P_LRECNO:
+ if (B_TYPE(GET_BKEYDATA(pp, off)->type) == B_KEYDATA)
+ nbytes +=
+ BKEYDATA_SIZE(GET_BKEYDATA(pp, off)->len);
+ else
+ nbytes += BOVERFLOW_SIZE;
+ break;
+ case P_IRECNO:
+ nbytes += RINTERNAL_SIZE;
+ break;
+ default:
+ return (__db_pgfmt(dbp, pp->pgno));
+ }
+sort: splitp = off;
+
+ /*
+ * Splitp is either at or just past the optimum split point. If the
+ * tree type is such that we're going to promote a key to an internal
+ * page, and our current choice is an overflow key, look for something
+ * close by that's smaller.
+ */
+ switch (TYPE(pp)) {
+ case P_IBTREE:
+ iflag = 1;
+ isbigkey = B_TYPE(GET_BINTERNAL(pp, off)->type) != B_KEYDATA;
+ break;
+ case P_LBTREE:
+ case P_LDUP:
+ iflag = 0;
+ isbigkey = B_TYPE(GET_BKEYDATA(pp, off)->type) != B_KEYDATA;
+ break;
+ default:
+ iflag = isbigkey = 0;
+ }
+ if (isbigkey)
+ for (cnt = 1; cnt <= 3; ++cnt) {
+ off = splitp + cnt * adjust;
+ if (off < (db_indx_t)NUM_ENT(pp) &&
+ ((iflag &&
+ B_TYPE(GET_BINTERNAL(pp,off)->type) == B_KEYDATA) ||
+ B_TYPE(GET_BKEYDATA(pp, off)->type) == B_KEYDATA)) {
+ splitp = off;
+ break;
+ }
+ if (splitp <= (db_indx_t)(cnt * adjust))
+ continue;
+ off = splitp - cnt * adjust;
+ if (iflag ?
+ B_TYPE(GET_BINTERNAL(pp, off)->type) == B_KEYDATA :
+ B_TYPE(GET_BKEYDATA(pp, off)->type) == B_KEYDATA) {
+ splitp = off;
+ break;
+ }
+ }
+
+ /*
+ * We can't split in the middle a set of duplicates. We know that
+ * no duplicate set can take up more than about 25% of the page,
+ * because that's the point where we push it off onto a duplicate
+ * page set. So, this loop can't be unbounded.
+ */
+ if (TYPE(pp) == P_LBTREE &&
+ pp->inp[splitp] == pp->inp[splitp - adjust])
+ for (cnt = 1;; ++cnt) {
+ off = splitp + cnt * adjust;
+ if (off < NUM_ENT(pp) &&
+ pp->inp[splitp] != pp->inp[off]) {
+ splitp = off;
+ break;
+ }
+ if (splitp <= (db_indx_t)(cnt * adjust))
+ continue;
+ off = splitp - cnt * adjust;
+ if (pp->inp[splitp] != pp->inp[off]) {
+ splitp = off + adjust;
+ break;
+ }
+ }
+
+ /* We're going to split at splitp. */
+ if ((ret = __bam_copy(dbp, pp, lp, 0, splitp)) != 0)
+ return (ret);
+ if ((ret = __bam_copy(dbp, pp, rp, splitp, NUM_ENT(pp))) != 0)
+ return (ret);
+
+ *splitret = splitp;
+ return (0);
+}
+
+/*
+ * __bam_copy --
+ * Copy a set of records from one page to another.
+ *
+ * PUBLIC: int __bam_copy __P((DB *, PAGE *, PAGE *, u_int32_t, u_int32_t));
+ */
+int
+__bam_copy(dbp, pp, cp, nxt, stop)
+ DB *dbp;
+ PAGE *pp, *cp;
+ u_int32_t nxt, stop;
+{
+ db_indx_t nbytes, off;
+
+ /*
+ * Copy the rest of the data to the right page. Nxt is the next
+ * offset placed on the target page.
+ */
+ for (off = 0; nxt < stop; ++nxt, ++NUM_ENT(cp), ++off) {
+ switch (TYPE(pp)) {
+ case P_IBTREE:
+ if (B_TYPE(GET_BINTERNAL(pp, nxt)->type) == B_KEYDATA)
+ nbytes =
+ BINTERNAL_SIZE(GET_BINTERNAL(pp, nxt)->len);
+ else
+ nbytes = BINTERNAL_SIZE(BOVERFLOW_SIZE);
+ break;
+ case P_LBTREE:
+ /*
+ * If we're on a key and it's a duplicate, just copy
+ * the offset.
+ */
+ if (off != 0 && (nxt % P_INDX) == 0 &&
+ pp->inp[nxt] == pp->inp[nxt - P_INDX]) {
+ cp->inp[off] = cp->inp[off - P_INDX];
+ continue;
+ }
+ /* FALLTHROUGH */
+ case P_LDUP:
+ case P_LRECNO:
+ if (B_TYPE(GET_BKEYDATA(pp, nxt)->type) == B_KEYDATA)
+ nbytes =
+ BKEYDATA_SIZE(GET_BKEYDATA(pp, nxt)->len);
+ else
+ nbytes = BOVERFLOW_SIZE;
+ break;
+ case P_IRECNO:
+ nbytes = RINTERNAL_SIZE;
+ break;
+ default:
+ return (__db_pgfmt(dbp, pp->pgno));
+ }
+ cp->inp[off] = HOFFSET(cp) -= nbytes;
+ memcpy(P_ENTRY(cp, off), P_ENTRY(pp, nxt), nbytes);
+ }
+ return (0);
+}
diff --git a/bdb/btree/bt_stat.c b/bdb/btree/bt_stat.c
new file mode 100644
index 00000000000..349bb40cf8b
--- /dev/null
+++ b/bdb/btree/bt_stat.c
@@ -0,0 +1,480 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: bt_stat.c,v 11.29 2000/11/28 21:42:27 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_shash.h"
+#include "lock.h"
+#include "btree.h"
+
+/*
+ * __bam_stat --
+ * Gather/print the btree statistics
+ *
+ * PUBLIC: int __bam_stat __P((DB *, void *, void *(*)(size_t), u_int32_t));
+ */
+int
+__bam_stat(dbp, spp, db_malloc, flags)
+ DB *dbp;
+ void *spp;
+ void *(*db_malloc) __P((size_t));
+ u_int32_t flags;
+{
+ BTMETA *meta;
+ BTREE *t;
+ BTREE_CURSOR *cp;
+ DBC *dbc;
+ DB_BTREE_STAT *sp;
+ DB_LOCK lock, metalock;
+ PAGE *h;
+ db_pgno_t pgno;
+ int ret, t_ret;
+
+ PANIC_CHECK(dbp->dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->stat");
+
+ meta = NULL;
+ t = dbp->bt_internal;
+ sp = NULL;
+ metalock.off = lock.off = LOCK_INVALID;
+ h = NULL;
+ ret = 0;
+
+ /* Check for invalid flags. */
+ if ((ret = __db_statchk(dbp, flags)) != 0)
+ return (ret);
+
+ /* Acquire a cursor. */
+ if ((ret = dbp->cursor(dbp, NULL, &dbc, 0)) != 0)
+ return (ret);
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ DEBUG_LWRITE(dbc, NULL, "bam_stat", NULL, NULL, flags);
+
+ /* Allocate and clear the structure. */
+ if ((ret = __os_malloc(dbp->dbenv, sizeof(*sp), db_malloc, &sp)) != 0)
+ goto err;
+ memset(sp, 0, sizeof(*sp));
+
+ /* If the app just wants the record count, make it fast. */
+ if (flags == DB_RECORDCOUNT) {
+ if ((ret = __db_lget(dbc, 0,
+ cp->root, DB_LOCK_READ, 0, &lock)) != 0)
+ goto err;
+ if ((ret = memp_fget(dbp->mpf,
+ &cp->root, 0, (PAGE **)&h)) != 0)
+ goto err;
+
+ sp->bt_nkeys = RE_NREC(h);
+
+ goto done;
+ }
+ if (flags == DB_CACHED_COUNTS) {
+ if ((ret = __db_lget(dbc,
+ 0, t->bt_meta, DB_LOCK_READ, 0, &lock)) != 0)
+ goto err;
+ if ((ret =
+ memp_fget(dbp->mpf, &t->bt_meta, 0, (PAGE **)&meta)) != 0)
+ goto err;
+ sp->bt_nkeys = meta->dbmeta.key_count;
+ sp->bt_ndata = meta->dbmeta.record_count;
+
+ goto done;
+ }
+
+ /* Get the metadata page for the entire database. */
+ pgno = PGNO_BASE_MD;
+ if ((ret = __db_lget(dbc, 0, pgno, DB_LOCK_READ, 0, &metalock)) != 0)
+ goto err;
+ if ((ret = memp_fget(dbp->mpf, &pgno, 0, (PAGE **)&meta)) != 0)
+ goto err;
+
+ /* Walk the metadata free list, counting pages. */
+ for (sp->bt_free = 0, pgno = meta->dbmeta.free; pgno != PGNO_INVALID;) {
+ ++sp->bt_free;
+
+ if ((ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0)
+ goto err;
+
+ pgno = h->next_pgno;
+ if ((ret = memp_fput(dbp->mpf, h, 0)) != 0)
+ goto err;
+ h = NULL;
+ }
+
+ /* Get the root page. */
+ pgno = cp->root;
+ if ((ret = __db_lget(dbc, 0, pgno, DB_LOCK_READ, 0, &lock)) != 0)
+ goto err;
+ if ((ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0)
+ goto err;
+
+ /* Get the levels from the root page. */
+ sp->bt_levels = h->level;
+
+ /* Discard the root page. */
+ if ((ret = memp_fput(dbp->mpf, h, 0)) != 0)
+ goto err;
+ h = NULL;
+ __LPUT(dbc, lock);
+
+ /* Walk the tree. */
+ if ((ret = __bam_traverse(dbc,
+ DB_LOCK_READ, cp->root, __bam_stat_callback, sp)) != 0)
+ goto err;
+
+ /*
+ * Get the subdatabase metadata page if it's not the same as the
+ * one we already have.
+ */
+ if (t->bt_meta != PGNO_BASE_MD || !F_ISSET(dbp, DB_AM_RDONLY)) {
+ if ((ret = memp_fput(dbp->mpf, meta, 0)) != 0)
+ goto err;
+ meta = NULL;
+ __LPUT(dbc, metalock);
+
+ if ((ret = __db_lget(dbc,
+ 0, t->bt_meta, F_ISSET(dbp, DB_AM_RDONLY) ?
+ DB_LOCK_READ : DB_LOCK_WRITE, 0, &metalock)) != 0)
+ goto err;
+ if ((ret =
+ memp_fget(dbp->mpf, &t->bt_meta, 0, (PAGE **)&meta)) != 0)
+ goto err;
+ }
+
+ /* Get metadata page statistics. */
+ sp->bt_metaflags = meta->dbmeta.flags;
+ sp->bt_maxkey = meta->maxkey;
+ sp->bt_minkey = meta->minkey;
+ sp->bt_re_len = meta->re_len;
+ sp->bt_re_pad = meta->re_pad;
+ sp->bt_pagesize = meta->dbmeta.pagesize;
+ sp->bt_magic = meta->dbmeta.magic;
+ sp->bt_version = meta->dbmeta.version;
+ if (!F_ISSET(dbp, DB_AM_RDONLY)) {
+ meta->dbmeta.key_count = sp->bt_nkeys;
+ meta->dbmeta.record_count = sp->bt_ndata;
+ }
+
+ /* Discard the metadata page. */
+ if ((ret = memp_fput(dbp->mpf,
+ meta, F_ISSET(dbp, DB_AM_RDONLY) ? 0 : DB_MPOOL_DIRTY)) != 0)
+ goto err;
+ meta = NULL;
+ __LPUT(dbc, metalock);
+
+done: *(DB_BTREE_STAT **)spp = sp;
+
+ if (0) {
+err: if (sp != NULL)
+ __os_free(sp, sizeof(*sp));
+ }
+
+ if (h != NULL &&
+ (t_ret = memp_fput(dbp->mpf, h, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (meta != NULL &&
+ (t_ret = memp_fput(dbp->mpf, meta, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (lock.off != LOCK_INVALID)
+ __LPUT(dbc, lock);
+
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __bam_traverse --
+ * Walk a Btree database.
+ *
+ * PUBLIC: int __bam_traverse __P((DBC *, db_lockmode_t,
+ * PUBLIC: db_pgno_t, int (*)(DB *, PAGE *, void *, int *), void *));
+ */
+int
+__bam_traverse(dbc, mode, root_pgno, callback, cookie)
+ DBC *dbc;
+ db_lockmode_t mode;
+ db_pgno_t root_pgno;
+ int (*callback)__P((DB *, PAGE *, void *, int *));
+ void *cookie;
+{
+ BINTERNAL *bi;
+ BKEYDATA *bk;
+ DB *dbp;
+ DB_LOCK lock;
+ PAGE *h;
+ RINTERNAL *ri;
+ db_indx_t indx;
+ int already_put, ret, t_ret;
+
+ dbp = dbc->dbp;
+
+ if ((ret = __db_lget(dbc, 0, root_pgno, mode, 0, &lock)) != 0)
+ return (ret);
+ if ((ret = memp_fget(dbp->mpf, &root_pgno, 0, &h)) != 0)
+ goto err;
+
+ switch (TYPE(h)) {
+ case P_IBTREE:
+ for (indx = 0; indx < NUM_ENT(h); indx += O_INDX) {
+ bi = GET_BINTERNAL(h, indx);
+ if (B_TYPE(bi->type) == B_OVERFLOW &&
+ (ret = __db_traverse_big(dbp,
+ ((BOVERFLOW *)bi->data)->pgno,
+ callback, cookie)) != 0)
+ goto err;
+ if ((ret = __bam_traverse(
+ dbc, mode, bi->pgno, callback, cookie)) != 0)
+ break;
+ }
+ break;
+ case P_IRECNO:
+ for (indx = 0; indx < NUM_ENT(h); indx += O_INDX) {
+ ri = GET_RINTERNAL(h, indx);
+ if ((ret = __bam_traverse(
+ dbc, mode, ri->pgno, callback, cookie)) != 0)
+ break;
+ }
+ break;
+ case P_LBTREE:
+ for (indx = 0; indx < NUM_ENT(h); indx += P_INDX) {
+ bk = GET_BKEYDATA(h, indx);
+ if (B_TYPE(bk->type) == B_OVERFLOW &&
+ (ret = __db_traverse_big(dbp,
+ GET_BOVERFLOW(h, indx)->pgno,
+ callback, cookie)) != 0)
+ goto err;
+ bk = GET_BKEYDATA(h, indx + O_INDX);
+ if (B_TYPE(bk->type) == B_DUPLICATE &&
+ (ret = __bam_traverse(dbc, mode,
+ GET_BOVERFLOW(h, indx + O_INDX)->pgno,
+ callback, cookie)) != 0)
+ goto err;
+ if (B_TYPE(bk->type) == B_OVERFLOW &&
+ (ret = __db_traverse_big(dbp,
+ GET_BOVERFLOW(h, indx + O_INDX)->pgno,
+ callback, cookie)) != 0)
+ goto err;
+ }
+ break;
+ case P_LDUP:
+ case P_LRECNO:
+ for (indx = 0; indx < NUM_ENT(h); indx += O_INDX) {
+ bk = GET_BKEYDATA(h, indx);
+ if (B_TYPE(bk->type) == B_OVERFLOW &&
+ (ret = __db_traverse_big(dbp,
+ GET_BOVERFLOW(h, indx)->pgno,
+ callback, cookie)) != 0)
+ goto err;
+ }
+ break;
+ }
+
+ already_put = 0;
+ if ((ret = callback(dbp, h, cookie, &already_put)) != 0)
+ goto err;
+
+err: if (!already_put &&
+ (t_ret = memp_fput(dbp->mpf, h, 0)) != 0 && ret != 0)
+ ret = t_ret;
+ __LPUT(dbc, lock);
+
+ return (ret);
+}
+
+/*
+ * __bam_stat_callback --
+ * Statistics callback.
+ *
+ * PUBLIC: int __bam_stat_callback __P((DB *, PAGE *, void *, int *));
+ */
+int
+__bam_stat_callback(dbp, h, cookie, putp)
+ DB *dbp;
+ PAGE *h;
+ void *cookie;
+ int *putp;
+{
+ DB_BTREE_STAT *sp;
+ db_indx_t indx, top;
+ u_int8_t type;
+
+ sp = cookie;
+ *putp = 0;
+ top = NUM_ENT(h);
+
+ switch (TYPE(h)) {
+ case P_IBTREE:
+ case P_IRECNO:
+ ++sp->bt_int_pg;
+ sp->bt_int_pgfree += P_FREESPACE(h);
+ break;
+ case P_LBTREE:
+ /* Correct for on-page duplicates and deleted items. */
+ for (indx = 0; indx < top; indx += P_INDX) {
+ if (indx + P_INDX >= top ||
+ h->inp[indx] != h->inp[indx + P_INDX])
+ ++sp->bt_nkeys;
+
+ type = GET_BKEYDATA(h, indx + O_INDX)->type;
+ if (!B_DISSET(type) && B_TYPE(type) != B_DUPLICATE)
+ ++sp->bt_ndata;
+ }
+
+ ++sp->bt_leaf_pg;
+ sp->bt_leaf_pgfree += P_FREESPACE(h);
+ break;
+ case P_LRECNO:
+ /*
+ * If walking a recno tree, then each of these items is a key.
+ * Otherwise, we're walking an off-page duplicate set.
+ */
+ if (dbp->type == DB_RECNO) {
+ sp->bt_nkeys += top;
+
+ /*
+ * Correct for deleted items in non-renumbering
+ * Recno databases.
+ */
+ if (F_ISSET(dbp, DB_RE_RENUMBER))
+ sp->bt_ndata += top;
+ else
+ for (indx = 0; indx < top; indx += O_INDX) {
+ type = GET_BKEYDATA(h, indx)->type;
+ if (!B_DISSET(type))
+ ++sp->bt_ndata;
+ }
+
+ ++sp->bt_leaf_pg;
+ sp->bt_leaf_pgfree += P_FREESPACE(h);
+ } else {
+ sp->bt_ndata += top;
+
+ ++sp->bt_dup_pg;
+ sp->bt_dup_pgfree += P_FREESPACE(h);
+ }
+ break;
+ case P_LDUP:
+ /* Correct for deleted items. */
+ for (indx = 0; indx < top; indx += O_INDX)
+ if (!B_DISSET(GET_BKEYDATA(h, indx)->type))
+ ++sp->bt_ndata;
+
+ ++sp->bt_dup_pg;
+ sp->bt_dup_pgfree += P_FREESPACE(h);
+ break;
+ case P_OVERFLOW:
+ ++sp->bt_over_pg;
+ sp->bt_over_pgfree += P_OVFLSPACE(dbp->pgsize, h);
+ break;
+ default:
+ return (__db_pgfmt(dbp, h->pgno));
+ }
+ return (0);
+}
+
+/*
+ * __bam_key_range --
+ * Return proportion of keys relative to given key. The numbers are
+ * slightly skewed due to on page duplicates.
+ *
+ * PUBLIC: int __bam_key_range __P((DB *,
+ * PUBLIC: DB_TXN *, DBT *, DB_KEY_RANGE *, u_int32_t));
+ */
+int
+__bam_key_range(dbp, txn, dbt, kp, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ DBT *dbt;
+ DB_KEY_RANGE *kp;
+ u_int32_t flags;
+{
+ BTREE_CURSOR *cp;
+ DBC *dbc;
+ EPG *sp;
+ double factor;
+ int exact, ret, t_ret;
+
+ PANIC_CHECK(dbp->dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->key_range");
+
+ if (flags != 0)
+ return (__db_ferr(dbp->dbenv, "DB->key_range", 0));
+
+ /* Acquire a cursor. */
+ if ((ret = dbp->cursor(dbp, txn, &dbc, 0)) != 0)
+ return (ret);
+
+ DEBUG_LWRITE(dbc, NULL, "bam_key_range", NULL, NULL, 0);
+
+ if ((ret = __bam_search(dbc, dbt, S_STK_ONLY, 1, NULL, &exact)) != 0)
+ goto err;
+
+ cp = (BTREE_CURSOR *)dbc->internal;
+ kp->less = kp->greater = 0.0;
+
+ factor = 1.0;
+ /* Correct the leaf page. */
+ cp->csp->entries /= 2;
+ cp->csp->indx /= 2;
+ for (sp = cp->sp; sp <= cp->csp; ++sp) {
+ /*
+ * At each level we know that pages greater than indx contain
+ * keys greater than what we are looking for and those less
+ * than indx are less than. The one pointed to by indx may
+ * have some less, some greater or even equal. If indx is
+ * equal to the number of entries, then the key is out of range
+ * and everything is less.
+ */
+ if (sp->indx == 0)
+ kp->greater += factor * (sp->entries - 1)/sp->entries;
+ else if (sp->indx == sp->entries)
+ kp->less += factor;
+ else {
+ kp->less += factor * sp->indx / sp->entries;
+ kp->greater += factor *
+ (sp->entries - sp->indx - 1) / sp->entries;
+ }
+ factor *= 1.0/sp->entries;
+ }
+
+ /*
+ * If there was an exact match then assign 1 n'th to the key itself.
+ * Otherwise that factor belongs to those greater than the key, unless
+ * the key was out of range.
+ */
+ if (exact)
+ kp->equal = factor;
+ else {
+ if (kp->less != 1)
+ kp->greater += factor;
+ kp->equal = 0;
+ }
+
+ BT_STK_CLR(cp);
+
+err: if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
diff --git a/bdb/btree/bt_upgrade.c b/bdb/btree/bt_upgrade.c
new file mode 100644
index 00000000000..4032dba3b36
--- /dev/null
+++ b/bdb/btree/bt_upgrade.c
@@ -0,0 +1,164 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: bt_upgrade.c,v 11.19 2000/11/30 00:58:29 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <limits.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_swap.h"
+#include "btree.h"
+#include "db_am.h"
+#include "db_upgrade.h"
+
+/*
+ * __bam_30_btreemeta --
+ * Upgrade the metadata pages from version 6 to version 7.
+ *
+ * PUBLIC: int __bam_30_btreemeta __P((DB *, char *, u_int8_t *));
+ */
+int
+__bam_30_btreemeta(dbp, real_name, buf)
+ DB *dbp;
+ char *real_name;
+ u_int8_t *buf;
+{
+ BTMETA30 *newmeta;
+ BTMETA2X *oldmeta;
+ DB_ENV *dbenv;
+ int ret;
+
+ dbenv = dbp->dbenv;
+
+ newmeta = (BTMETA30 *)buf;
+ oldmeta = (BTMETA2X *)buf;
+
+ /*
+ * Move things from the end up, so we do not overwrite things.
+ * We are going to create a new uid, so we can move the stuff
+ * at the end of the structure first, overwriting the uid.
+ */
+
+ newmeta->re_pad = oldmeta->re_pad;
+ newmeta->re_len = oldmeta->re_len;
+ newmeta->minkey = oldmeta->minkey;
+ newmeta->maxkey = oldmeta->maxkey;
+ newmeta->dbmeta.free = oldmeta->free;
+ newmeta->dbmeta.flags = oldmeta->flags;
+ newmeta->dbmeta.type = P_BTREEMETA;
+
+ newmeta->dbmeta.version = 7;
+ /* Replace the unique ID. */
+ if ((ret = __os_fileid(dbenv, real_name, 1, buf + 36)) != 0)
+ return (ret);
+
+ newmeta->root = 1;
+
+ return (0);
+}
+
+/*
+ * __bam_31_btreemeta --
+ * Upgrade the database from version 7 to version 8.
+ *
+ * PUBLIC: int __bam_31_btreemeta
+ * PUBLIC: __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *));
+ */
+int
+__bam_31_btreemeta(dbp, real_name, flags, fhp, h, dirtyp)
+ DB *dbp;
+ char *real_name;
+ u_int32_t flags;
+ DB_FH *fhp;
+ PAGE *h;
+ int *dirtyp;
+{
+ BTMETA31 *newmeta;
+ BTMETA30 *oldmeta;
+
+ COMPQUIET(dbp, NULL);
+ COMPQUIET(real_name, NULL);
+ COMPQUIET(fhp, NULL);
+
+ newmeta = (BTMETA31 *)h;
+ oldmeta = (BTMETA30 *)h;
+
+ /*
+ * Copy the effected fields down the page.
+ * The fields may overlap each other so we
+ * start at the bottom and use memmove.
+ */
+ newmeta->root = oldmeta->root;
+ newmeta->re_pad = oldmeta->re_pad;
+ newmeta->re_len = oldmeta->re_len;
+ newmeta->minkey = oldmeta->minkey;
+ newmeta->maxkey = oldmeta->maxkey;
+ memmove(newmeta->dbmeta.uid,
+ oldmeta->dbmeta.uid, sizeof(oldmeta->dbmeta.uid));
+ newmeta->dbmeta.flags = oldmeta->dbmeta.flags;
+ newmeta->dbmeta.record_count = 0;
+ newmeta->dbmeta.key_count = 0;
+ ZERO_LSN(newmeta->dbmeta.unused3);
+
+ /* Set the version number. */
+ newmeta->dbmeta.version = 8;
+
+ /* Upgrade the flags. */
+ if (LF_ISSET(DB_DUPSORT))
+ F_SET(&newmeta->dbmeta, BTM_DUPSORT);
+
+ *dirtyp = 1;
+ return (0);
+}
+
+/*
+ * __bam_31_lbtree --
+ * Upgrade the database btree leaf pages.
+ *
+ * PUBLIC: int __bam_31_lbtree
+ * PUBLIC: __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *));
+ */
+int
+__bam_31_lbtree(dbp, real_name, flags, fhp, h, dirtyp)
+ DB *dbp;
+ char *real_name;
+ u_int32_t flags;
+ DB_FH *fhp;
+ PAGE *h;
+ int *dirtyp;
+{
+ BKEYDATA *bk;
+ db_pgno_t pgno;
+ db_indx_t indx;
+ int ret;
+
+ ret = 0;
+ for (indx = O_INDX; indx < NUM_ENT(h); indx += P_INDX) {
+ bk = GET_BKEYDATA(h, indx);
+ if (B_TYPE(bk->type) == B_DUPLICATE) {
+ pgno = GET_BOVERFLOW(h, indx)->pgno;
+ if ((ret = __db_31_offdup(dbp, real_name, fhp,
+ LF_ISSET(DB_DUPSORT) ? 1 : 0, &pgno)) != 0)
+ break;
+ if (pgno != GET_BOVERFLOW(h, indx)->pgno) {
+ *dirtyp = 1;
+ GET_BOVERFLOW(h, indx)->pgno = pgno;
+ }
+ }
+ }
+
+ return (ret);
+}
diff --git a/bdb/btree/bt_verify.c b/bdb/btree/bt_verify.c
new file mode 100644
index 00000000000..9f8647e7e2a
--- /dev/null
+++ b/bdb/btree/bt_verify.c
@@ -0,0 +1,2237 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: bt_verify.c,v 1.44 2000/12/06 19:55:44 ubell Exp $
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: bt_verify.c,v 1.44 2000/12/06 19:55:44 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_verify.h"
+#include "btree.h"
+
+static int __bam_safe_getdata __P((DB *, PAGE *, u_int32_t, int, DBT *, int *));
+static int __bam_vrfy_inp __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t,
+ db_indx_t *, u_int32_t));
+static int __bam_vrfy_treeorder __P((DB *, db_pgno_t, PAGE *, BINTERNAL *,
+ BINTERNAL *, int (*)(DB *, const DBT *, const DBT *), u_int32_t));
+static int __ram_vrfy_inp __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t,
+ db_indx_t *, u_int32_t));
+
+#define OKFLAGS (DB_AGGRESSIVE | DB_NOORDERCHK | DB_SALVAGE)
+
+/*
+ * __bam_vrfy_meta --
+ * Verify the btree-specific part of a metadata page.
+ *
+ * PUBLIC: int __bam_vrfy_meta __P((DB *, VRFY_DBINFO *, BTMETA *,
+ * PUBLIC: db_pgno_t, u_int32_t));
+ */
+int
+__bam_vrfy_meta(dbp, vdp, meta, pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ BTMETA *meta;
+ db_pgno_t pgno;
+ u_int32_t flags;
+{
+ VRFY_PAGEINFO *pip;
+ int isbad, t_ret, ret;
+ db_indx_t ovflsize;
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ isbad = 0;
+
+ /*
+ * If VRFY_INCOMPLETE is not set, then we didn't come through
+ * __db_vrfy_pagezero and didn't incompletely
+ * check this page--we haven't checked it at all.
+ * Thus we need to call __db_vrfy_meta and check the common fields.
+ *
+ * If VRFY_INCOMPLETE is set, we've already done all the same work
+ * in __db_vrfy_pagezero, so skip the check.
+ */
+ if (!F_ISSET(pip, VRFY_INCOMPLETE) &&
+ (ret = __db_vrfy_meta(dbp, vdp, &meta->dbmeta, pgno, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+
+ /* bt_minkey: must be >= 2; must produce sensible ovflsize */
+
+ /* avoid division by zero */
+ ovflsize = meta->minkey > 0 ?
+ B_MINKEY_TO_OVFLSIZE(meta->minkey, dbp->pgsize) : 0;
+
+ if (meta->minkey < 2 ||
+ ovflsize > B_MINKEY_TO_OVFLSIZE(DEFMINKEYPAGE, dbp->pgsize)) {
+ pip->bt_minkey = 0;
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Nonsensical bt_minkey value %lu on metadata page %lu",
+ (u_long)meta->minkey, (u_long)pgno));
+ } else
+ pip->bt_minkey = meta->minkey;
+
+ /* bt_maxkey: no constraints (XXX: right?) */
+ pip->bt_maxkey = meta->maxkey;
+
+ /* re_len: no constraints on this (may be zero or huge--we make rope) */
+ pip->re_len = meta->re_len;
+
+ /*
+ * The root must not be current page or 0 and it must be within
+ * database. If this metadata page is the master meta data page
+ * of the file, then the root page had better be page 1.
+ */
+ pip->root = 0;
+ if (meta->root == PGNO_INVALID
+ || meta->root == pgno || !IS_VALID_PGNO(meta->root) ||
+ (pgno == PGNO_BASE_MD && meta->root != 1)) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Nonsensical root page %lu on metadata page %lu",
+ (u_long)meta->root, (u_long)vdp->last_pgno));
+ } else
+ pip->root = meta->root;
+
+ /* Flags. */
+ if (F_ISSET(&meta->dbmeta, BTM_RENUMBER))
+ F_SET(pip, VRFY_IS_RRECNO);
+
+ if (F_ISSET(&meta->dbmeta, BTM_SUBDB)) {
+ /*
+ * If this is a master db meta page, it had better not have
+ * duplicates.
+ */
+ if (F_ISSET(&meta->dbmeta, BTM_DUP) && pgno == PGNO_BASE_MD) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Btree metadata page %lu has both duplicates and multiple databases",
+ (u_long)pgno));
+ }
+ F_SET(pip, VRFY_HAS_SUBDBS);
+ }
+
+ if (F_ISSET(&meta->dbmeta, BTM_DUP))
+ F_SET(pip, VRFY_HAS_DUPS);
+ if (F_ISSET(&meta->dbmeta, BTM_DUPSORT))
+ F_SET(pip, VRFY_HAS_DUPSORT);
+ if (F_ISSET(&meta->dbmeta, BTM_RECNUM))
+ F_SET(pip, VRFY_HAS_RECNUMS);
+ if (F_ISSET(pip, VRFY_HAS_RECNUMS) && F_ISSET(pip, VRFY_HAS_DUPS)) {
+ EPRINT((dbp->dbenv,
+ "Btree metadata page %lu illegally has both recnums and dups",
+ (u_long)pgno));
+ isbad = 1;
+ }
+
+ if (F_ISSET(&meta->dbmeta, BTM_RECNO)) {
+ F_SET(pip, VRFY_IS_RECNO);
+ dbp->type = DB_RECNO;
+ } else if (F_ISSET(pip, VRFY_IS_RRECNO)) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Metadata page %lu has renumber flag set but is not recno",
+ (u_long)pgno));
+ }
+
+ if (F_ISSET(pip, VRFY_IS_RECNO) && F_ISSET(pip, VRFY_HAS_DUPS)) {
+ EPRINT((dbp->dbenv,
+ "Recno metadata page %lu specifies duplicates",
+ (u_long)pgno));
+ isbad = 1;
+ }
+
+ if (F_ISSET(&meta->dbmeta, BTM_FIXEDLEN))
+ F_SET(pip, VRFY_IS_FIXEDLEN);
+ else if (pip->re_len > 0) {
+ /*
+ * It's wrong to have an re_len if it's not a fixed-length
+ * database
+ */
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "re_len of %lu in non-fixed-length database",
+ (u_long)pip->re_len));
+ }
+
+ /*
+ * We do not check that the rest of the page is 0, because it may
+ * not be and may still be correct.
+ */
+
+err: if ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __ram_vrfy_leaf --
+ * Verify a recno leaf page.
+ *
+ * PUBLIC: int __ram_vrfy_leaf __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t,
+ * PUBLIC: u_int32_t));
+ */
+int
+__ram_vrfy_leaf(dbp, vdp, h, pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ PAGE *h;
+ db_pgno_t pgno;
+ u_int32_t flags;
+{
+ BKEYDATA *bk;
+ VRFY_PAGEINFO *pip;
+ db_indx_t i;
+ int ret, t_ret, isbad;
+ u_int32_t re_len_guess, len;
+
+ isbad = 0;
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ if ((ret = __db_fchk(dbp->dbenv,
+ "__ram_vrfy_leaf", flags, OKFLAGS)) != 0)
+ goto err;
+
+ if (TYPE(h) != P_LRECNO) {
+ /* We should not have been called. */
+ TYPE_ERR_PRINT(dbp->dbenv, "__ram_vrfy_leaf", pgno, TYPE(h));
+ DB_ASSERT(0);
+ ret = EINVAL;
+ goto err;
+ }
+
+ /*
+ * Verify (and, if relevant, save off) page fields common to
+ * all PAGEs.
+ */
+ if ((ret = __db_vrfy_datapage(dbp, vdp, h, pgno, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+
+ /*
+ * Verify inp[]. Return immediately if it returns DB_VERIFY_BAD;
+ * further checks are dangerous.
+ */
+ if ((ret = __bam_vrfy_inp(dbp,
+ vdp, h, pgno, &pip->entries, flags)) != 0)
+ goto err;
+
+ if (F_ISSET(pip, VRFY_HAS_DUPS)) {
+ EPRINT((dbp->dbenv,
+ "Recno database has dups on page %lu", (u_long)pgno));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+
+ /*
+ * Walk through inp and see if the lengths of all the records are the
+ * same--if so, this may be a fixed-length database, and we want to
+ * save off this value. We know inp to be safe if we've gotten this
+ * far.
+ */
+ re_len_guess = 0;
+ for (i = 0; i < NUM_ENT(h); i++) {
+ bk = GET_BKEYDATA(h, i);
+ /* KEYEMPTY. Go on. */
+ if (B_DISSET(bk->type))
+ continue;
+ if (bk->type == B_OVERFLOW)
+ len = ((BOVERFLOW *)bk)->tlen;
+ else if (bk->type == B_KEYDATA)
+ len = bk->len;
+ else {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Nonsensical type for item %lu, page %lu",
+ (u_long)i, (u_long)pgno));
+ continue;
+ }
+ if (re_len_guess == 0)
+ re_len_guess = len;
+
+ /*
+ * Is this item's len the same as the last one's? If not,
+ * reset to 0 and break--we don't have a single re_len.
+ * Otherwise, go on to the next item.
+ */
+ if (re_len_guess != len) {
+ re_len_guess = 0;
+ break;
+ }
+ }
+ pip->re_len = re_len_guess;
+
+ /* Save off record count. */
+ pip->rec_cnt = NUM_ENT(h);
+
+err: if ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : 0);
+}
+
+/*
+ * __bam_vrfy --
+ * Verify a btree leaf or internal page.
+ *
+ * PUBLIC: int __bam_vrfy __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t,
+ * PUBLIC: u_int32_t));
+ */
+int
+__bam_vrfy(dbp, vdp, h, pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ PAGE *h;
+ db_pgno_t pgno;
+ u_int32_t flags;
+{
+ VRFY_PAGEINFO *pip;
+ int ret, t_ret, isbad;
+
+ isbad = 0;
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ switch (TYPE(h)) {
+ case P_IBTREE:
+ case P_IRECNO:
+ case P_LBTREE:
+ case P_LDUP:
+ break;
+ default:
+ TYPE_ERR_PRINT(dbp->dbenv, "__bam_vrfy", pgno, TYPE(h));
+ DB_ASSERT(0);
+ ret = EINVAL;
+ goto err;
+ }
+
+ /*
+ * Verify (and, if relevant, save off) page fields common to
+ * all PAGEs.
+ */
+ if ((ret = __db_vrfy_datapage(dbp, vdp, h, pgno, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+
+ /*
+ * The record count is, on internal pages, stored in an overloaded
+ * next_pgno field. Save it off; we'll verify it when we check
+ * overall database structure. We could overload the field
+ * in VRFY_PAGEINFO, too, but this seems gross, and space
+ * is not at such a premium.
+ */
+ pip->rec_cnt = RE_NREC(h);
+
+ /*
+ * Verify inp[].
+ */
+ if (TYPE(h) == P_IRECNO) {
+ if ((ret = __ram_vrfy_inp(dbp,
+ vdp, h, pgno, &pip->entries, flags)) != 0)
+ goto err;
+ } else if ((ret = __bam_vrfy_inp(dbp,
+ vdp, h, pgno, &pip->entries, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ EPRINT((dbp->dbenv,
+ "item order check on page %lu unsafe: skipping",
+ (u_long)pgno));
+ } else if (!LF_ISSET(DB_NOORDERCHK) && (ret =
+ __bam_vrfy_itemorder(dbp, vdp, h, pgno, 0, 0, 0, flags)) != 0) {
+ /*
+ * We know that the elements of inp are reasonable.
+ *
+ * Check that elements fall in the proper order.
+ */
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+
+err: if ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : 0);
+}
+
+/*
+ * __ram_vrfy_inp --
+ * Verify that all entries in a P_IRECNO inp[] array are reasonable,
+ * and count them. Note that P_LRECNO uses __bam_vrfy_inp;
+ * P_IRECNOs are a special, and simpler, case, since they have
+ * RINTERNALs rather than BKEYDATA/BINTERNALs.
+ */
+static int
+__ram_vrfy_inp(dbp, vdp, h, pgno, nentriesp, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ PAGE *h;
+ db_pgno_t pgno;
+ db_indx_t *nentriesp;
+ u_int32_t flags;
+{
+ RINTERNAL *ri;
+ VRFY_CHILDINFO child;
+ VRFY_PAGEINFO *pip;
+ int ret, t_ret, isbad;
+ u_int32_t himark, i, offset, nentries;
+ u_int8_t *pagelayout, *p;
+
+ isbad = 0;
+ memset(&child, 0, sizeof(VRFY_CHILDINFO));
+ nentries = 0;
+ pagelayout = NULL;
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ if (TYPE(h) != P_IRECNO) {
+ TYPE_ERR_PRINT(dbp->dbenv, "__ram_vrfy_inp", pgno, TYPE(h));
+ DB_ASSERT(0);
+ ret = EINVAL;
+ goto err;
+ }
+
+ himark = dbp->pgsize;
+ if ((ret =
+ __os_malloc(dbp->dbenv, dbp->pgsize, NULL, &pagelayout)) != 0)
+ goto err;
+ memset(pagelayout, 0, dbp->pgsize);
+ for (i = 0; i < NUM_ENT(h); i++) {
+ if ((u_int8_t *)h->inp + i >= (u_int8_t *)h + himark) {
+ EPRINT((dbp->dbenv,
+ "Page %lu entries listing %lu overlaps data",
+ (u_long)pgno, (u_long)i));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+ offset = h->inp[i];
+ /*
+ * Check that the item offset is reasonable: it points
+ * somewhere after the inp array and before the end of the
+ * page.
+ */
+ if (offset <= (u_int32_t)((u_int8_t *)h->inp + i -
+ (u_int8_t *)h) ||
+ offset > (u_int32_t)(dbp->pgsize - RINTERNAL_SIZE)) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Bad offset %lu at page %lu index %lu",
+ (u_long)offset, (u_long)pgno, (u_long)i));
+ continue;
+ }
+
+ /* Update the high-water mark (what HOFFSET should be) */
+ if (offset < himark)
+ himark = offset;
+
+ nentries++;
+
+ /* Make sure this RINTERNAL is not multiply referenced. */
+ ri = GET_RINTERNAL(h, i);
+ if (pagelayout[offset] == 0) {
+ pagelayout[offset] = 1;
+ child.pgno = ri->pgno;
+ child.type = V_RECNO;
+ child.nrecs = ri->nrecs;
+ if ((ret = __db_vrfy_childput(vdp, pgno, &child)) != 0)
+ goto err;
+ } else {
+ EPRINT((dbp->dbenv,
+ "RINTERNAL structure at offset %lu, page %lu referenced twice",
+ (u_long)offset, (u_long)pgno));
+ isbad = 1;
+ }
+ }
+
+ for (p = pagelayout + himark;
+ p < pagelayout + dbp->pgsize;
+ p += RINTERNAL_SIZE)
+ if (*p != 1) {
+ EPRINT((dbp->dbenv,
+ "Gap between items at offset %lu, page %lu",
+ (u_long)(p - pagelayout), (u_long)pgno));
+ isbad = 1;
+ }
+
+ if ((db_indx_t)himark != HOFFSET(h)) {
+ EPRINT((dbp->dbenv, "Bad HOFFSET %lu, appears to be %lu",
+ (u_long)(HOFFSET(h)), (u_long)himark));
+ isbad = 1;
+ }
+
+ *nentriesp = nentries;
+
+err: if ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+ if (pagelayout != NULL)
+ __os_free(pagelayout, dbp->pgsize);
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __bam_vrfy_inp --
+ * Verify that all entries in inp[] array are reasonable;
+ * count them.
+ */
+static int
+__bam_vrfy_inp(dbp, vdp, h, pgno, nentriesp, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ PAGE *h;
+ db_pgno_t pgno;
+ db_indx_t *nentriesp;
+ u_int32_t flags;
+{
+ BKEYDATA *bk;
+ BOVERFLOW *bo;
+ VRFY_CHILDINFO child;
+ VRFY_PAGEINFO *pip;
+ int isbad, initem, isdupitem, ret, t_ret;
+ u_int32_t himark, offset; /* These would be db_indx_ts but for algnmt.*/
+ u_int32_t i, endoff, nentries;
+ u_int8_t *pagelayout;
+
+ isbad = isdupitem = 0;
+ nentries = 0;
+ memset(&child, 0, sizeof(VRFY_CHILDINFO));
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ switch (TYPE(h)) {
+ case P_IBTREE:
+ case P_LBTREE:
+ case P_LDUP:
+ case P_LRECNO:
+ break;
+ default:
+ /*
+ * In the salvager, we might call this from a page which
+ * we merely suspect is a btree page. Otherwise, it
+ * shouldn't get called--if it is, that's a verifier bug.
+ */
+ if (LF_ISSET(DB_SALVAGE))
+ break;
+ TYPE_ERR_PRINT(dbp->dbenv, "__bam_vrfy_inp", pgno, TYPE(h));
+ DB_ASSERT(0);
+ ret = EINVAL;
+ goto err;
+ }
+
+ /*
+ * Loop through inp[], the array of items, until we either
+ * run out of entries or collide with the data. Keep track
+ * of h_offset in himark.
+ *
+ * For each element in inp[i], make sure it references a region
+ * that starts after the end of the inp array (as defined by
+ * NUM_ENT(h)), ends before the beginning of the page, doesn't
+ * overlap any other regions, and doesn't have a gap between
+ * it and the region immediately after it.
+ */
+ himark = dbp->pgsize;
+ if ((ret = __os_malloc(dbp->dbenv,
+ dbp->pgsize, NULL, &pagelayout)) != 0)
+ goto err;
+ memset(pagelayout, 0, dbp->pgsize);
+ for (i = 0; i < NUM_ENT(h); i++) {
+
+ ret = __db_vrfy_inpitem(dbp,
+ h, pgno, i, 1, flags, &himark, &offset);
+ if (ret == DB_VERIFY_BAD) {
+ isbad = 1;
+ continue;
+ } else if (ret == DB_VERIFY_FATAL) {
+ isbad = 1;
+ goto err;
+ } else if (ret != 0)
+ DB_ASSERT(0);
+
+ /*
+ * We now have a plausible beginning for the item, and we know
+ * its length is safe.
+ *
+ * Mark the beginning and end in pagelayout so we can make sure
+ * items have no overlaps or gaps.
+ */
+ bk = GET_BKEYDATA(h, i);
+#define ITEM_BEGIN 1
+#define ITEM_END 2
+ if (pagelayout[offset] == 0)
+ pagelayout[offset] = ITEM_BEGIN;
+ else if (pagelayout[offset] == ITEM_BEGIN) {
+ /*
+ * Having two inp entries that point at the same patch
+ * of page is legal if and only if the page is
+ * a btree leaf and they're onpage duplicate keys--
+ * that is, if (i % P_INDX) == 0.
+ */
+ if ((i % P_INDX == 0) && (TYPE(h) == P_LBTREE)) {
+ /* Flag for later. */
+ F_SET(pip, VRFY_HAS_DUPS);
+
+ /* Bump up nentries so we don't undercount. */
+ nentries++;
+
+ /*
+ * We'll check to make sure the end is
+ * equal, too.
+ */
+ isdupitem = 1;
+ } else {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Duplicated item %lu on page %lu",
+ (u_long)i, (u_long)pgno));
+ }
+ }
+
+ /*
+ * Mark the end. Its location varies with the page type
+ * and the item type.
+ *
+ * If the end already has a sign other than 0, do nothing--
+ * it's an overlap that we'll catch later.
+ */
+ switch(B_TYPE(bk->type)) {
+ case B_KEYDATA:
+ if (TYPE(h) == P_IBTREE)
+ /* It's a BINTERNAL. */
+ endoff = offset + BINTERNAL_SIZE(bk->len) - 1;
+ else
+ endoff = offset + BKEYDATA_SIZE(bk->len) - 1;
+ break;
+ case B_DUPLICATE:
+ /*
+ * Flag that we have dups; we'll check whether
+ * that's okay during the structure check.
+ */
+ F_SET(pip, VRFY_HAS_DUPS);
+ /* FALLTHROUGH */
+ case B_OVERFLOW:
+ /*
+ * Overflow entries on internal pages are stored
+ * as the _data_ of a BINTERNAL; overflow entries
+ * on leaf pages are stored as the entire entry.
+ */
+ endoff = offset +
+ ((TYPE(h) == P_IBTREE) ?
+ BINTERNAL_SIZE(BOVERFLOW_SIZE) :
+ BOVERFLOW_SIZE) - 1;
+ break;
+ default:
+ /*
+ * We'll complain later; for now, just mark
+ * a minimum.
+ */
+ endoff = offset + BKEYDATA_SIZE(0) - 1;
+ break;
+ }
+
+ /*
+ * If this is an onpage duplicate key we've seen before,
+ * the end had better coincide too.
+ */
+ if (isdupitem && pagelayout[endoff] != ITEM_END) {
+ EPRINT((dbp->dbenv,
+ "Duplicated item %lu on page %lu",
+ (u_long)i, (u_long)pgno));
+ isbad = 1;
+ } else if (pagelayout[endoff] == 0)
+ pagelayout[endoff] = ITEM_END;
+ isdupitem = 0;
+
+ /*
+ * There should be no deleted items in a quiescent tree,
+ * except in recno.
+ */
+ if (B_DISSET(bk->type) && TYPE(h) != P_LRECNO) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Item %lu on page %lu marked deleted",
+ (u_long)i, (u_long)pgno));
+ }
+
+ /*
+ * Check the type and such of bk--make sure it's reasonable
+ * for the pagetype.
+ */
+ switch (B_TYPE(bk->type)) {
+ case B_KEYDATA:
+ /*
+ * This is a normal, non-overflow BKEYDATA or BINTERNAL.
+ * The only thing to check is the len, and that's
+ * already been done.
+ */
+ break;
+ case B_DUPLICATE:
+ if (TYPE(h) == P_IBTREE) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Duplicate page referenced by internal btree page %lu at item %lu",
+ (u_long)pgno, (u_long)i));
+ break;
+ } else if (TYPE(h) == P_LRECNO) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Duplicate page referenced by recno page %lu at item %lu",
+ (u_long)pgno, (u_long)i));
+ break;
+ }
+ /* FALLTHROUGH */
+ case B_OVERFLOW:
+ bo = (TYPE(h) == P_IBTREE) ?
+ (BOVERFLOW *)(((BINTERNAL *)bk)->data) :
+ (BOVERFLOW *)bk;
+
+ if (B_TYPE(bk->type) == B_OVERFLOW)
+ /* Make sure tlen is reasonable. */
+ if (bo->tlen > dbp->pgsize * vdp->last_pgno) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Impossible tlen %lu, item %lu, page %lu",
+ (u_long)bo->tlen, (u_long)i,
+ (u_long)pgno));
+ /* Don't save as a child. */
+ break;
+ }
+
+ if (!IS_VALID_PGNO(bo->pgno) || bo->pgno == pgno ||
+ bo->pgno == PGNO_INVALID) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Offpage item %lu, page %lu has bad pgno",
+ (u_long)i, (u_long)pgno));
+ /* Don't save as a child. */
+ break;
+ }
+
+ child.pgno = bo->pgno;
+ child.type = (B_TYPE(bk->type) == B_OVERFLOW ?
+ V_OVERFLOW : V_DUPLICATE);
+ child.tlen = bo->tlen;
+ if ((ret = __db_vrfy_childput(vdp, pgno, &child)) != 0)
+ goto err;
+ break;
+ default:
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Item %lu on page %lu of invalid type %lu",
+ (u_long)i, (u_long)pgno));
+ break;
+ }
+ }
+
+ /*
+ * Now, loop through and make sure the items are contiguous and
+ * non-overlapping.
+ */
+ initem = 0;
+ for (i = himark; i < dbp->pgsize; i++)
+ if (initem == 0)
+ switch (pagelayout[i]) {
+ case 0:
+ /* May be just for alignment. */
+ if (i != ALIGN(i, sizeof(u_int32_t)))
+ continue;
+
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Gap between items, page %lu offset %lu",
+ (u_long)pgno, (u_long)i));
+ /* Find the end of the gap */
+ for ( ; pagelayout[i + 1] == 0 &&
+ (size_t)(i + 1) < dbp->pgsize; i++)
+ ;
+ break;
+ case ITEM_BEGIN:
+ /* We've found an item. Check its alignment. */
+ if (i != ALIGN(i, sizeof(u_int32_t))) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Offset %lu page %lu unaligned",
+ (u_long)i, (u_long)pgno));
+ }
+ initem = 1;
+ nentries++;
+ break;
+ case ITEM_END:
+ /*
+ * We've hit the end of an item even though
+ * we don't think we're in one; must
+ * be an overlap.
+ */
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Overlapping items, page %lu offset %lu",
+ (u_long)pgno, (u_long)i));
+ break;
+ default:
+ /* Should be impossible. */
+ DB_ASSERT(0);
+ ret = EINVAL;
+ goto err;
+ }
+ else
+ switch (pagelayout[i]) {
+ case 0:
+ /* In the middle of an item somewhere. Okay. */
+ break;
+ case ITEM_END:
+ /* End of an item; switch to out-of-item mode.*/
+ initem = 0;
+ break;
+ case ITEM_BEGIN:
+ /*
+ * Hit a second item beginning without an
+ * end. Overlap.
+ */
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Overlapping items, page %lu offset %lu",
+ (u_long)pgno, (u_long)i));
+ break;
+ }
+
+ (void)__os_free(pagelayout, dbp->pgsize);
+
+ /* Verify HOFFSET. */
+ if ((db_indx_t)himark != HOFFSET(h)) {
+ EPRINT((dbp->dbenv, "Bad HOFFSET %lu, appears to be %lu",
+ (u_long)HOFFSET(h), (u_long)himark));
+ isbad = 1;
+ }
+
+err: if (nentriesp != NULL)
+ *nentriesp = nentries;
+
+ if ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return ((isbad == 1 && ret == 0) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __bam_vrfy_itemorder --
+ * Make sure the items on a page sort correctly.
+ *
+ * Assumes that NUM_ENT(h) and inp[0]..inp[NUM_ENT(h) - 1] are
+ * reasonable; be sure that __bam_vrfy_inp has been called first.
+ *
+ * If ovflok is set, it also assumes that overflow page chains
+ * hanging off the current page have been sanity-checked, and so we
+ * can use __bam_cmp to verify their ordering. If it is not set,
+ * and we run into an overflow page, carp and return DB_VERIFY_BAD;
+ * we shouldn't be called if any exist.
+ *
+ * PUBLIC: int __bam_vrfy_itemorder __P((DB *, VRFY_DBINFO *, PAGE *,
+ * PUBLIC: db_pgno_t, u_int32_t, int, int, u_int32_t));
+ */
+int
+__bam_vrfy_itemorder(dbp, vdp, h, pgno, nentries, ovflok, hasdups, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ PAGE *h;
+ db_pgno_t pgno;
+ u_int32_t nentries;
+ int ovflok, hasdups;
+ u_int32_t flags;
+{
+ DBT dbta, dbtb, dup1, dup2, *p1, *p2, *tmp;
+ BTREE *bt;
+ BINTERNAL *bi;
+ BKEYDATA *bk;
+ BOVERFLOW *bo;
+ VRFY_PAGEINFO *pip;
+ db_indx_t i;
+ int cmp, freedup1, freedup2, isbad, ret, t_ret;
+ int (*dupfunc) __P((DB *, const DBT *, const DBT *));
+ int (*func) __P((DB *, const DBT *, const DBT *));
+ void *buf1, *buf2, *tmpbuf;
+
+ /*
+ * We need to work in the ORDERCHKONLY environment where we might
+ * not have a pip, but we also may need to work in contexts where
+ * NUM_ENT isn't safe.
+ */
+ if (vdp != NULL) {
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+ nentries = pip->entries;
+ } else
+ pip = NULL;
+
+ ret = isbad = 0;
+ bo = NULL; /* Shut up compiler. */
+
+ memset(&dbta, 0, sizeof(DBT));
+ F_SET(&dbta, DB_DBT_REALLOC);
+
+ memset(&dbtb, 0, sizeof(DBT));
+ F_SET(&dbtb, DB_DBT_REALLOC);
+
+ buf1 = buf2 = NULL;
+
+ DB_ASSERT(!LF_ISSET(DB_NOORDERCHK));
+
+ dupfunc = (dbp->dup_compare == NULL) ? __bam_defcmp : dbp->dup_compare;
+ if (TYPE(h) == P_LDUP)
+ func = dupfunc;
+ else {
+ func = __bam_defcmp;
+ if (dbp->bt_internal != NULL) {
+ bt = (BTREE *)dbp->bt_internal;
+ if (bt->bt_compare != NULL)
+ func = bt->bt_compare;
+ }
+ }
+
+ /*
+ * We alternate our use of dbta and dbtb so that we can walk
+ * through the page key-by-key without copying a dbt twice.
+ * p1 is always the dbt for index i - 1, and p2 for index i.
+ */
+ p1 = &dbta;
+ p2 = &dbtb;
+
+ /*
+ * Loop through the entries. nentries ought to contain the
+ * actual count, and so is a safe way to terminate the loop; whether
+ * we inc. by one or two depends on whether we're a leaf page--
+ * on a leaf page, we care only about keys. On internal pages
+ * and LDUP pages, we want to check the order of all entries.
+ *
+ * Note that on IBTREE pages, we start with item 1, since item
+ * 0 doesn't get looked at by __bam_cmp.
+ */
+ for (i = (TYPE(h) == P_IBTREE) ? 1 : 0; i < nentries;
+ i += (TYPE(h) == P_LBTREE) ? P_INDX : O_INDX) {
+ /*
+ * Put key i-1, now in p2, into p1, by swapping DBTs and bufs.
+ */
+ tmp = p1;
+ p1 = p2;
+ p2 = tmp;
+ tmpbuf = buf1;
+ buf1 = buf2;
+ buf2 = tmpbuf;
+
+ /*
+ * Get key i into p2.
+ */
+ switch (TYPE(h)) {
+ case P_IBTREE:
+ bi = GET_BINTERNAL(h, i);
+ if (B_TYPE(bi->type) == B_OVERFLOW) {
+ bo = (BOVERFLOW *)(bi->data);
+ goto overflow;
+ } else {
+ p2->data = bi->data;
+ p2->size = bi->len;
+ }
+
+ /*
+ * The leftmost key on an internal page must be
+ * len 0, since it's just a placeholder and
+ * automatically sorts less than all keys.
+ *
+ * XXX
+ * This criterion does not currently hold!
+ * See todo list item #1686. Meanwhile, it's harmless
+ * to just not check for it.
+ */
+#if 0
+ if (i == 0 && bi->len != 0) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Lowest key on internal page %lu of nonzero length",
+ (u_long)pgno));
+ }
+#endif
+ break;
+ case P_LBTREE:
+ case P_LDUP:
+ bk = GET_BKEYDATA(h, i);
+ if (B_TYPE(bk->type) == B_OVERFLOW) {
+ bo = (BOVERFLOW *)bk;
+ goto overflow;
+ } else {
+ p2->data = bk->data;
+ p2->size = bk->len;
+ }
+ break;
+ default:
+ /*
+ * This means our caller screwed up and sent us
+ * an inappropriate page.
+ */
+ TYPE_ERR_PRINT(dbp->dbenv,
+ "__bam_vrfy_itemorder", pgno, TYPE(h))
+ DB_ASSERT(0);
+ ret = EINVAL;
+ goto err;
+ }
+
+ if (0) {
+ /*
+ * If ovflok != 1, we can't safely go chasing
+ * overflow pages with the normal routines now;
+ * they might be unsafe or nonexistent. Mark this
+ * page as incomplete and return.
+ *
+ * Note that we don't need to worry about freeing
+ * buffers, since they can't have been allocated
+ * if overflow items are unsafe.
+ */
+overflow: if (!ovflok) {
+ F_SET(pip, VRFY_INCOMPLETE);
+ goto err;
+ }
+
+ /*
+ * Overflow items are safe to chase. Do so.
+ * Fetch the overflow item into p2->data,
+ * NULLing it or reallocing it as appropriate.
+ *
+ * (We set p2->data to buf2 before the call
+ * so we're sure to realloc if we can and if p2
+ * was just pointing at a non-overflow item.)
+ */
+ p2->data = buf2;
+ if ((ret = __db_goff(dbp,
+ p2, bo->tlen, bo->pgno, NULL, NULL)) != 0) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Error %lu in fetching overflow item %lu, page %lu",
+ (u_long)ret, (u_long)i, (u_long)pgno));
+ }
+ /* In case it got realloc'ed and thus changed. */
+ buf2 = p2->data;
+ }
+
+ /* Compare with the last key. */
+ if (p1->data != NULL && p2->data != NULL) {
+ cmp = func(dbp, p1, p2);
+
+ /* comparison succeeded */
+ if (cmp > 0) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Out-of-order key, page %lu item %lu",
+ (u_long)pgno, (u_long)i));
+ /* proceed */
+ } else if (cmp == 0) {
+ /*
+ * If they compared equally, this
+ * had better be a (sub)database with dups.
+ * Mark it so we can check during the
+ * structure check.
+ */
+ if (pip != NULL)
+ F_SET(pip, VRFY_HAS_DUPS);
+ else if (hasdups == 0) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Database with no duplicates has duplicated keys on page %lu",
+ (u_long)pgno));
+ }
+
+ /*
+ * If we're a btree leaf, check to see
+ * if the data items of these on-page dups are
+ * in sorted order. If not, flag this, so
+ * that we can make sure during the
+ * structure checks that the DUPSORT flag
+ * is unset.
+ *
+ * At this point i points to a duplicate key.
+ * Compare the datum before it (same key)
+ * to the datum after it, i.e. i-1 to i+1.
+ */
+ if (TYPE(h) == P_LBTREE) {
+ /*
+ * Unsafe; continue and we'll pick
+ * up the bogus nentries later.
+ */
+ if (i + 1 >= (db_indx_t)nentries)
+ continue;
+
+ /*
+ * We don't bother with clever memory
+ * management with on-page dups,
+ * as it's only really a big win
+ * in the overflow case, and overflow
+ * dups are probably (?) rare.
+ */
+ if (((ret = __bam_safe_getdata(dbp,
+ h, i - 1, ovflok, &dup1,
+ &freedup1)) != 0) ||
+ ((ret = __bam_safe_getdata(dbp,
+ h, i + 1, ovflok, &dup2,
+ &freedup2)) != 0))
+ goto err;
+
+ /*
+ * If either of the data are NULL,
+ * it's because they're overflows and
+ * it's not safe to chase them now.
+ * Mark an incomplete and return.
+ */
+ if (dup1.data == NULL ||
+ dup2.data == NULL) {
+ DB_ASSERT(!ovflok);
+ F_SET(pip, VRFY_INCOMPLETE);
+ goto err;
+ }
+
+ /*
+ * If the dups are out of order,
+ * flag this. It's not an error
+ * until we do the structure check
+ * and see whether DUPSORT is set.
+ */
+ if (dupfunc(dbp, &dup1, &dup2) > 0)
+ F_SET(pip, VRFY_DUPS_UNSORTED);
+
+ if (freedup1)
+ __os_free(dup1.data, 0);
+ if (freedup2)
+ __os_free(dup2.data, 0);
+ }
+ }
+ }
+ }
+
+err: if (pip != NULL &&
+ ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0) && ret == 0)
+ ret = t_ret;
+
+ if (buf1 != NULL)
+ __os_free(buf1, 0);
+ if (buf2 != NULL)
+ __os_free(buf2, 0);
+
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __bam_vrfy_structure --
+ * Verify the tree structure of a btree database (including the master
+ * database containing subdbs).
+ *
+ * PUBLIC: int __bam_vrfy_structure __P((DB *, VRFY_DBINFO *, db_pgno_t,
+ * PUBLIC: u_int32_t));
+ */
+int
+__bam_vrfy_structure(dbp, vdp, meta_pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ db_pgno_t meta_pgno;
+ u_int32_t flags;
+{
+ DB *pgset;
+ VRFY_PAGEINFO *mip, *rip;
+ db_pgno_t root, p;
+ int t_ret, ret;
+ u_int32_t nrecs, level, relen, stflags;
+
+ mip = rip = 0;
+ pgset = vdp->pgset;
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, meta_pgno, &mip)) != 0)
+ return (ret);
+
+ if ((ret = __db_vrfy_pgset_get(pgset, meta_pgno, (int *)&p)) != 0)
+ goto err;
+ if (p != 0) {
+ EPRINT((dbp->dbenv,
+ "Btree metadata page number %lu observed twice",
+ (u_long)meta_pgno));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+ if ((ret = __db_vrfy_pgset_inc(pgset, meta_pgno)) != 0)
+ goto err;
+
+ root = mip->root;
+
+ if (root == 0) {
+ EPRINT((dbp->dbenv,
+ "Btree metadata page %lu has no root", (u_long)meta_pgno));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, root, &rip)) != 0)
+ goto err;
+
+ switch (rip->type) {
+ case P_IBTREE:
+ case P_LBTREE:
+ stflags = flags | ST_TOPLEVEL;
+ if (F_ISSET(mip, VRFY_HAS_DUPS))
+ stflags |= ST_DUPOK;
+ if (F_ISSET(mip, VRFY_HAS_DUPSORT))
+ stflags |= ST_DUPSORT;
+ if (F_ISSET(mip, VRFY_HAS_RECNUMS))
+ stflags |= ST_RECNUM;
+ ret = __bam_vrfy_subtree(dbp,
+ vdp, root, NULL, NULL, stflags, NULL, NULL, NULL);
+ break;
+ case P_IRECNO:
+ case P_LRECNO:
+ stflags = flags | ST_RECNUM | ST_IS_RECNO | ST_TOPLEVEL;
+ if (mip->re_len > 0)
+ stflags |= ST_RELEN;
+ if ((ret = __bam_vrfy_subtree(dbp, vdp,
+ root, NULL, NULL, stflags, &level, &nrecs, &relen)) != 0)
+ goto err;
+ /*
+ * Even if mip->re_len > 0, re_len may come back zero if the
+ * tree is empty. It should be okay to just skip the check in
+ * this case, as if there are any non-deleted keys at all,
+ * that should never happen.
+ */
+ if (mip->re_len > 0 && relen > 0 && mip->re_len != relen) {
+ EPRINT((dbp->dbenv,
+ "Recno database with meta page %lu has bad re_len %lu",
+ (u_long)meta_pgno, (u_long)relen));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+ ret = 0;
+ break;
+ case P_LDUP:
+ EPRINT((dbp->dbenv,
+ "Duplicate tree referenced from metadata page %lu",
+ (u_long)meta_pgno));
+ ret = DB_VERIFY_BAD;
+ break;
+ default:
+ EPRINT((dbp->dbenv,
+ "Btree root of incorrect type %lu on meta page %lu",
+ (u_long)rip->type, (u_long)meta_pgno));
+ ret = DB_VERIFY_BAD;
+ break;
+ }
+
+err: if (mip != NULL &&
+ ((t_ret = __db_vrfy_putpageinfo(vdp, mip)) != 0) && ret == 0)
+ t_ret = ret;
+ if (rip != NULL &&
+ ((t_ret = __db_vrfy_putpageinfo(vdp, rip)) != 0) && ret == 0)
+ t_ret = ret;
+ return (ret);
+}
+
+/*
+ * __bam_vrfy_subtree--
+ * Verify a subtree (or entire) btree with specified root.
+ *
+ * Note that this is public because it must be called to verify
+ * offpage dup trees, including from hash.
+ *
+ * PUBLIC: int __bam_vrfy_subtree __P((DB *, VRFY_DBINFO *, db_pgno_t, void *,
+ * PUBLIC: void *, u_int32_t, u_int32_t *, u_int32_t *, u_int32_t *));
+ */
+int
+__bam_vrfy_subtree(dbp,
+ vdp, pgno, l, r, flags, levelp, nrecsp, relenp)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+ void *l, *r;
+ u_int32_t flags, *levelp, *nrecsp, *relenp;
+{
+ BINTERNAL *li, *ri, *lp, *rp;
+ DB *pgset;
+ DBC *cc;
+ PAGE *h;
+ VRFY_CHILDINFO *child;
+ VRFY_PAGEINFO *pip;
+ db_recno_t nrecs, child_nrecs;
+ db_indx_t i;
+ int ret, t_ret, isbad, toplevel, p;
+ int (*func) __P((DB *, const DBT *, const DBT *));
+ u_int32_t level, child_level, stflags, child_relen, relen;
+
+ ret = isbad = 0;
+ nrecs = 0;
+ h = NULL;
+ relen = 0;
+ rp = (BINTERNAL *)r;
+ lp = (BINTERNAL *)l;
+
+ /* Provide feedback on our progress to the application. */
+ if (!LF_ISSET(DB_SALVAGE))
+ __db_vrfy_struct_feedback(dbp, vdp);
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ cc = NULL;
+ level = pip->bt_level;
+
+ toplevel = LF_ISSET(ST_TOPLEVEL);
+ LF_CLR(ST_TOPLEVEL);
+
+ /*
+ * We are recursively descending a btree, starting from the root
+ * and working our way out to the leaves.
+ *
+ * There are four cases we need to deal with:
+ * 1. pgno is a recno leaf page. Any children are overflows.
+ * 2. pgno is a duplicate leaf page. Any children
+ * are overflow pages; traverse them, and then return
+ * level and nrecs.
+ * 3. pgno is an ordinary leaf page. Check whether dups are
+ * allowed, and if so, traverse any off-page dups or
+ * overflows. Then return nrecs and level.
+ * 4. pgno is a recno internal page. Recursively check any
+ * child pages, making sure their levels are one lower
+ * and their nrecs sum to ours.
+ * 5. pgno is a btree internal page. Same as #4, plus we
+ * must verify that for each pair of BINTERNAL entries
+ * N and N+1, the leftmost item on N's child sorts
+ * greater than N, and the rightmost item on N's child
+ * sorts less than N+1.
+ *
+ * Furthermore, in any sorted page type (P_LDUP, P_LBTREE, P_IBTREE),
+ * we need to verify the internal sort order is correct if,
+ * due to overflow items, we were not able to do so earlier.
+ */
+ switch (pip->type) {
+ case P_LRECNO:
+ case P_LDUP:
+ case P_LBTREE:
+ /*
+ * Cases 1, 2 and 3 (overflow pages are common to all three);
+ * traverse child list, looking for overflows.
+ */
+ if ((ret = __db_vrfy_childcursor(vdp, &cc)) != 0)
+ goto err;
+ for (ret = __db_vrfy_ccset(cc, pgno, &child); ret == 0;
+ ret = __db_vrfy_ccnext(cc, &child))
+ if (child->type == V_OVERFLOW &&
+ (ret = __db_vrfy_ovfl_structure(dbp, vdp,
+ child->pgno, child->tlen,
+ flags | ST_OVFL_LEAF)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto done;
+ }
+
+ if ((ret = __db_vrfy_ccclose(cc)) != 0)
+ goto err;
+ cc = NULL;
+
+ /* Case 1 */
+ if (pip->type == P_LRECNO) {
+ if (!LF_ISSET(ST_IS_RECNO) &&
+ !(LF_ISSET(ST_DUPOK) && !LF_ISSET(ST_DUPSORT))) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Recno leaf page %lu in non-recno tree",
+ (u_long)pgno));
+ goto done;
+ }
+ goto leaf;
+ } else if (LF_ISSET(ST_IS_RECNO)) {
+ /*
+ * It's a non-recno leaf. Had better not be a recno
+ * subtree.
+ */
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Non-recno leaf page %lu in recno tree",
+ (u_long)pgno));
+ goto done;
+ }
+
+ /* Case 2--no more work. */
+ if (pip->type == P_LDUP)
+ goto leaf;
+
+ /* Case 3 */
+
+ /* Check if we have any dups. */
+ if (F_ISSET(pip, VRFY_HAS_DUPS)) {
+ /* If dups aren't allowed in this btree, trouble. */
+ if (!LF_ISSET(ST_DUPOK)) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Duplicates on page %lu in non-dup btree",
+ (u_long)pgno));
+ } else {
+ /*
+ * We correctly have dups. If any are off-page,
+ * traverse those btrees recursively.
+ */
+ if ((ret =
+ __db_vrfy_childcursor(vdp, &cc)) != 0)
+ goto err;
+ for (ret = __db_vrfy_ccset(cc, pgno, &child);
+ ret == 0;
+ ret = __db_vrfy_ccnext(cc, &child)) {
+ stflags = flags | ST_RECNUM | ST_DUPSET;
+ /* Skip any overflow entries. */
+ if (child->type == V_DUPLICATE) {
+ if ((ret = __db_vrfy_duptype(
+ dbp, vdp, child->pgno,
+ stflags)) != 0) {
+ isbad = 1;
+ /* Next child. */
+ continue;
+ }
+ if ((ret = __bam_vrfy_subtree(
+ dbp, vdp, child->pgno, NULL,
+ NULL, stflags, NULL, NULL,
+ NULL)) != 0) {
+ if (ret !=
+ DB_VERIFY_BAD)
+ goto err;
+ else
+ isbad = 1;
+ }
+ }
+ }
+
+ if ((ret = __db_vrfy_ccclose(cc)) != 0)
+ goto err;
+ cc = NULL;
+
+ /*
+ * If VRFY_DUPS_UNSORTED is set,
+ * ST_DUPSORT had better not be.
+ */
+ if (F_ISSET(pip, VRFY_DUPS_UNSORTED) &&
+ LF_ISSET(ST_DUPSORT)) {
+ EPRINT((dbp->dbenv,
+ "Unsorted duplicate set at page %lu in sorted-dup database",
+ (u_long)pgno));
+ isbad = 1;
+ }
+ }
+ }
+ goto leaf;
+ break;
+ case P_IBTREE:
+ case P_IRECNO:
+ /* We handle these below. */
+ break;
+ default:
+ /*
+ * If a P_IBTREE or P_IRECNO contains a reference to an
+ * invalid page, we'll wind up here; handle it gracefully.
+ * Note that the code at the "done" label assumes that the
+ * current page is a btree/recno one of some sort; this
+ * is not the case here, so we goto err.
+ */
+ EPRINT((dbp->dbenv,
+ "Page %lu is of inappropriate type %lu",
+ (u_long)pgno, (u_long)pip->type));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+
+ /*
+ * Cases 4 & 5: This is a btree or recno internal page. For each child,
+ * recurse, keeping a running count of nrecs and making sure the level
+ * is always reasonable.
+ */
+ if ((ret = __db_vrfy_childcursor(vdp, &cc)) != 0)
+ goto err;
+ for (ret = __db_vrfy_ccset(cc, pgno, &child); ret == 0;
+ ret = __db_vrfy_ccnext(cc, &child))
+ if (child->type == V_RECNO) {
+ if (pip->type != P_IRECNO) {
+ TYPE_ERR_PRINT(dbp->dbenv, "__bam_vrfy_subtree",
+ pgno, pip->type);
+ DB_ASSERT(0);
+ ret = EINVAL;
+ goto err;
+ }
+ if ((ret = __bam_vrfy_subtree(dbp, vdp, child->pgno,
+ NULL, NULL, flags, &child_level, &child_nrecs,
+ &child_relen)) != 0) {
+ if (ret != DB_VERIFY_BAD)
+ goto done;
+ else
+ isbad = 1;
+ }
+
+ if (LF_ISSET(ST_RELEN)) {
+ if (relen == 0)
+ relen = child_relen;
+ /*
+ * child_relen may be zero if the child subtree
+ * is empty.
+ */
+ else if (child_relen > 0 &&
+ relen != child_relen) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Recno page %lu returned bad re_len",
+ (u_long)child->pgno));
+ }
+ if (relenp)
+ *relenp = relen;
+ }
+ if (LF_ISSET(ST_RECNUM))
+ nrecs += child_nrecs;
+ if (level != child_level + 1) {
+ isbad = 1;
+ EPRINT((dbp->dbenv, "%s%lu%s%lu%s%lu",
+ "Recno level incorrect on page ",
+ (u_long)child->pgno, ": got ",
+ (u_long)child_level, ", expected ",
+ (u_long)(level - 1)));
+ }
+ } else if (child->type == V_OVERFLOW &&
+ (ret = __db_vrfy_ovfl_structure(dbp, vdp,
+ child->pgno, child->tlen, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto done;
+ }
+
+ if ((ret = __db_vrfy_ccclose(cc)) != 0)
+ goto err;
+ cc = NULL;
+
+ /* We're done with case 4. */
+ if (pip->type == P_IRECNO)
+ goto done;
+
+ /*
+ * Case 5. Btree internal pages.
+ * As described above, we need to iterate through all the
+ * items on the page and make sure that our children sort appropriately
+ * with respect to them.
+ *
+ * For each entry, li will be the "left-hand" key for the entry
+ * itself, which must sort lower than all entries on its child;
+ * ri will be the key to its right, which must sort greater.
+ */
+ if (h == NULL && (ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0)
+ goto err;
+ for (i = 0; i < pip->entries; i += O_INDX) {
+ li = GET_BINTERNAL(h, i);
+ ri = (i + O_INDX < pip->entries) ?
+ GET_BINTERNAL(h, i + O_INDX) : NULL;
+
+ /*
+ * The leftmost key is forcibly sorted less than all entries,
+ * so don't bother passing it.
+ */
+ if ((ret = __bam_vrfy_subtree(dbp, vdp, li->pgno,
+ i == 0 ? NULL : li, ri, flags, &child_level,
+ &child_nrecs, NULL)) != 0) {
+ if (ret != DB_VERIFY_BAD)
+ goto done;
+ else
+ isbad = 1;
+ }
+
+ if (LF_ISSET(ST_RECNUM)) {
+ /*
+ * Keep a running tally on the actual record count so
+ * we can return it to our parent (if we have one) or
+ * compare it to the NRECS field if we're a root page.
+ */
+ nrecs += child_nrecs;
+
+ /*
+ * Make sure the actual record count of the child
+ * is equal to the value in the BINTERNAL structure.
+ */
+ if (li->nrecs != child_nrecs) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Item %lu page %lu has incorrect record count of %lu, should be %lu",
+ (u_long)i, (u_long)pgno, (u_long)li->nrecs,
+ (u_long)child_nrecs));
+ }
+ }
+
+ if (level != child_level + 1) {
+ isbad = 1;
+ EPRINT((dbp->dbenv, "%s%lu%s%lu%s%lu",
+ "Btree level incorrect on page ", (u_long)li->pgno,
+ ": got ", (u_long)child_level, ", expected ",
+ (u_long)(level - 1)));
+ }
+ }
+
+ if (0) {
+leaf: level = LEAFLEVEL;
+ if (LF_ISSET(ST_RECNUM))
+ nrecs = pip->rec_cnt;
+
+ /* XXX
+ * We should verify that the record count on a leaf page
+ * is the sum of the number of keys and the number of
+ * records in its off-page dups. This requires looking
+ * at the page again, however, and it may all be changing
+ * soon, so for now we don't bother.
+ */
+
+ if (LF_ISSET(ST_RELEN) && relenp)
+ *relenp = pip->re_len;
+ }
+done: if (F_ISSET(pip, VRFY_INCOMPLETE) && isbad == 0 && ret == 0) {
+ /*
+ * During the page-by-page pass, item order verification was
+ * not finished due to the presence of overflow items. If
+ * isbad == 0, though, it's now safe to do so, as we've
+ * traversed any child overflow pages. Do it.
+ */
+ if (h == NULL && (ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0)
+ goto err;
+ if ((ret = __bam_vrfy_itemorder(dbp,
+ vdp, h, pgno, 0, 1, 0, flags)) != 0)
+ goto err;
+ F_CLR(pip, VRFY_INCOMPLETE);
+ }
+
+ /*
+ * Our parent has sent us BINTERNAL pointers to parent records
+ * so that we can verify our place with respect to them. If it's
+ * appropriate--we have a default sort function--verify this.
+ */
+ if (isbad == 0 && ret == 0 && !LF_ISSET(DB_NOORDERCHK) && lp != NULL) {
+ if (h == NULL && (ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0)
+ goto err;
+
+ /*
+ * __bam_vrfy_treeorder needs to know what comparison function
+ * to use. If ST_DUPSET is set, we're in a duplicate tree
+ * and we use the duplicate comparison function; otherwise,
+ * use the btree one. If unset, use the default, of course.
+ */
+ func = LF_ISSET(ST_DUPSET) ? dbp->dup_compare :
+ ((BTREE *)dbp->bt_internal)->bt_compare;
+ if (func == NULL)
+ func = __bam_defcmp;
+
+ if ((ret = __bam_vrfy_treeorder(
+ dbp, pgno, h, lp, rp, func, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+ }
+
+ /*
+ * This is guaranteed to succeed for leaf pages, but no harm done.
+ *
+ * Internal pages below the top level do not store their own
+ * record numbers, so we skip them.
+ */
+ if (LF_ISSET(ST_RECNUM) && nrecs != pip->rec_cnt && toplevel) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Bad record count on page %lu: got %lu, expected %lu",
+ (u_long)pgno, (u_long)nrecs, (u_long)pip->rec_cnt));
+ }
+
+ if (levelp)
+ *levelp = level;
+ if (nrecsp)
+ *nrecsp = nrecs;
+
+ pgset = vdp->pgset;
+ if ((ret = __db_vrfy_pgset_get(pgset, pgno, &p)) != 0)
+ goto err;
+ if (p != 0) {
+ isbad = 1;
+ EPRINT((dbp->dbenv, "Page %lu linked twice", (u_long)pgno));
+ } else if ((ret = __db_vrfy_pgset_inc(pgset, pgno)) != 0)
+ goto err;
+
+err: if (h != NULL && (t_ret = memp_fput(dbp->mpf, h, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+ if (cc != NULL && ((t_ret = __db_vrfy_ccclose(cc)) != 0) && ret == 0)
+ ret = t_ret;
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __bam_vrfy_treeorder --
+ * Verify that the lowest key on a page sorts greater than the
+ * BINTERNAL which points to it (lp), and the highest key
+ * sorts less than the BINTERNAL above that (rp).
+ *
+ * If lp is NULL, this means that it was the leftmost key on the
+ * parent, which (regardless of sort function) sorts less than
+ * all keys. No need to check it.
+ *
+ * If rp is NULL, lp was the highest key on the parent, so there's
+ * no higher key we must sort less than.
+ */
+static int
+__bam_vrfy_treeorder(dbp, pgno, h, lp, rp, func, flags)
+ DB *dbp;
+ db_pgno_t pgno;
+ PAGE *h;
+ BINTERNAL *lp, *rp;
+ int (*func) __P((DB *, const DBT *, const DBT *));
+ u_int32_t flags;
+{
+ BOVERFLOW *bo;
+ DBT dbt;
+ db_indx_t last;
+ int ret, cmp;
+
+ memset(&dbt, 0, sizeof(DBT));
+ F_SET(&dbt, DB_DBT_MALLOC);
+ ret = 0;
+
+ switch (TYPE(h)) {
+ case P_IBTREE:
+ case P_LDUP:
+ last = NUM_ENT(h) - O_INDX;
+ break;
+ case P_LBTREE:
+ last = NUM_ENT(h) - P_INDX;
+ break;
+ default:
+ TYPE_ERR_PRINT(dbp->dbenv,
+ "__bam_vrfy_treeorder", pgno, TYPE(h));
+ DB_ASSERT(0);
+ return (EINVAL);
+ }
+
+ /*
+ * The key on page h, the child page, is more likely to be
+ * an overflow page, so we pass its offset, rather than lp/rp's,
+ * into __bam_cmp. This will take advantage of __db_moff.
+ */
+
+ /*
+ * Skip first-item check if we're an internal page--the first
+ * entry on an internal page is treated specially by __bam_cmp,
+ * so what's on the page shouldn't matter. (Plus, since we're passing
+ * our page and item 0 as to __bam_cmp, we'll sort before our
+ * parent and falsely report a failure.)
+ */
+ if (lp != NULL && TYPE(h) != P_IBTREE) {
+ if (lp->type == B_KEYDATA) {
+ dbt.data = lp->data;
+ dbt.size = lp->len;
+ } else if (lp->type == B_OVERFLOW) {
+ bo = (BOVERFLOW *)lp->data;
+ if ((ret = __db_goff(dbp, &dbt, bo->tlen, bo->pgno,
+ NULL, NULL)) != 0)
+ return (ret);
+ } else {
+ DB_ASSERT(0);
+ EPRINT((dbp->dbenv,
+ "Unknown type for internal record"));
+ return (EINVAL);
+ }
+
+ /* On error, fall through, free if neeeded, and return. */
+ if ((ret = __bam_cmp(dbp, &dbt, h, 0, func, &cmp)) == 0) {
+ if (cmp > 0) {
+ EPRINT((dbp->dbenv,
+ "First item on page %lu sorted greater than parent entry",
+ (u_long)PGNO(h)));
+ ret = DB_VERIFY_BAD;
+ }
+ } else
+ EPRINT((dbp->dbenv,
+ "First item on page %lu had comparison error",
+ (u_long)PGNO(h)));
+
+ if (dbt.data != lp->data)
+ __os_free(dbt.data, 0);
+ if (ret != 0)
+ return (ret);
+ }
+
+ if (rp != NULL) {
+ if (rp->type == B_KEYDATA) {
+ dbt.data = rp->data;
+ dbt.size = rp->len;
+ } else if (rp->type == B_OVERFLOW) {
+ bo = (BOVERFLOW *)rp->data;
+ if ((ret = __db_goff(dbp, &dbt, bo->tlen, bo->pgno,
+ NULL, NULL)) != 0)
+ return (ret);
+ } else {
+ DB_ASSERT(0);
+ EPRINT((dbp->dbenv,
+ "Unknown type for internal record"));
+ return (EINVAL);
+ }
+
+ /* On error, fall through, free if neeeded, and return. */
+ if ((ret = __bam_cmp(dbp, &dbt, h, last, func, &cmp)) == 0) {
+ if (cmp < 0) {
+ EPRINT((dbp->dbenv,
+ "Last item on page %lu sorted greater than parent entry",
+ (u_long)PGNO(h)));
+ ret = DB_VERIFY_BAD;
+ }
+ } else
+ EPRINT((dbp->dbenv,
+ "Last item on page %lu had comparison error",
+ (u_long)PGNO(h)));
+
+ if (dbt.data != rp->data)
+ __os_free(dbt.data, 0);
+ }
+
+ return (ret);
+}
+
+/*
+ * __bam_salvage --
+ * Safely dump out anything that looks like a key on an alleged
+ * btree leaf page.
+ *
+ * PUBLIC: int __bam_salvage __P((DB *, VRFY_DBINFO *, db_pgno_t, u_int32_t,
+ * PUBLIC: PAGE *, void *, int (*)(void *, const void *), DBT *,
+ * PUBLIC: u_int32_t));
+ */
+int
+__bam_salvage(dbp, vdp, pgno, pgtype, h, handle, callback, key, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+ u_int32_t pgtype;
+ PAGE *h;
+ void *handle;
+ int (*callback) __P((void *, const void *));
+ DBT *key;
+ u_int32_t flags;
+{
+ DBT dbt, unkdbt;
+ BKEYDATA *bk;
+ BOVERFLOW *bo;
+ db_indx_t i, beg, end;
+ u_int32_t himark;
+ u_int8_t *pgmap;
+ void *ovflbuf;
+ int t_ret, ret, err_ret;
+
+ /* Shut up lint. */
+ COMPQUIET(end, 0);
+
+ ovflbuf = pgmap = NULL;
+ err_ret = ret = 0;
+
+ memset(&dbt, 0, sizeof(DBT));
+ dbt.flags = DB_DBT_REALLOC;
+
+ memset(&unkdbt, 0, sizeof(DBT));
+ unkdbt.size = strlen("UNKNOWN") + 1;
+ unkdbt.data = "UNKNOWN";
+
+ /*
+ * Allocate a buffer for overflow items. Start at one page;
+ * __db_safe_goff will realloc as needed.
+ */
+ if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, NULL, &ovflbuf)) != 0)
+ return (ret);
+
+ if (LF_ISSET(DB_AGGRESSIVE)) {
+ if ((ret =
+ __os_malloc(dbp->dbenv, dbp->pgsize, NULL, &pgmap)) != 0)
+ goto err;
+ memset(pgmap, 0, dbp->pgsize);
+ }
+
+ /*
+ * Loop through the inp array, spitting out key/data pairs.
+ *
+ * If we're salvaging normally, loop from 0 through NUM_ENT(h).
+ * If we're being aggressive, loop until we hit the end of the page--
+ * NUM_ENT() may be bogus.
+ */
+ himark = dbp->pgsize;
+ for (i = 0;; i += O_INDX) {
+ /* If we're not aggressive, break when we hit NUM_ENT(h). */
+ if (!LF_ISSET(DB_AGGRESSIVE) && i >= NUM_ENT(h))
+ break;
+
+ /* Verify the current item. */
+ ret = __db_vrfy_inpitem(dbp,
+ h, pgno, i, 1, flags, &himark, NULL);
+ /* If this returned a fatality, it's time to break. */
+ if (ret == DB_VERIFY_FATAL) {
+ /*
+ * Don't return DB_VERIFY_FATAL; it's private
+ * and means only that we can't go on with this
+ * page, not with the whole database. It's
+ * not even an error if we've run into it
+ * after NUM_ENT(h).
+ */
+ ret = (i < NUM_ENT(h)) ? DB_VERIFY_BAD : 0;
+ break;
+ }
+
+ /*
+ * If this returned 0, it's safe to print or (carefully)
+ * try to fetch.
+ */
+ if (ret == 0) {
+ /*
+ * We only want to print deleted items if
+ * DB_AGGRESSIVE is set.
+ */
+ bk = GET_BKEYDATA(h, i);
+ if (!LF_ISSET(DB_AGGRESSIVE) && B_DISSET(bk->type))
+ continue;
+
+ /*
+ * We're going to go try to print the next item. If
+ * key is non-NULL, we're a dup page, so we've got to
+ * print the key first, unless SA_SKIPFIRSTKEY is set
+ * and we're on the first entry.
+ */
+ if (key != NULL &&
+ (i != 0 || !LF_ISSET(SA_SKIPFIRSTKEY)))
+ if ((ret = __db_prdbt(key,
+ 0, " ", handle, callback, 0, NULL)) != 0)
+ err_ret = ret;
+
+ beg = h->inp[i];
+ switch (B_TYPE(bk->type)) {
+ case B_DUPLICATE:
+ end = beg + BOVERFLOW_SIZE - 1;
+ /*
+ * If we're not on a normal btree leaf page,
+ * there shouldn't be off-page
+ * dup sets. Something's confused; just
+ * drop it, and the code to pick up unlinked
+ * offpage dup sets will print it out
+ * with key "UNKNOWN" later.
+ */
+ if (pgtype != P_LBTREE)
+ break;
+
+ bo = (BOVERFLOW *)bk;
+
+ /*
+ * If the page number is unreasonable, or
+ * if this is supposed to be a key item,
+ * just spit out "UNKNOWN"--the best we
+ * can do is run into the data items in the
+ * unlinked offpage dup pass.
+ */
+ if (!IS_VALID_PGNO(bo->pgno) ||
+ (i % P_INDX == 0)) {
+ /* Not much to do on failure. */
+ if ((ret = __db_prdbt(&unkdbt, 0, " ",
+ handle, callback, 0, NULL)) != 0)
+ err_ret = ret;
+ break;
+ }
+
+ if ((ret = __db_salvage_duptree(dbp,
+ vdp, bo->pgno, &dbt, handle, callback,
+ flags | SA_SKIPFIRSTKEY)) != 0)
+ err_ret = ret;
+
+ break;
+ case B_KEYDATA:
+ end = ALIGN(beg + bk->len, sizeof(u_int32_t)) - 1;
+ dbt.data = bk->data;
+ dbt.size = bk->len;
+ if ((ret = __db_prdbt(&dbt,
+ 0, " ", handle, callback, 0, NULL)) != 0)
+ err_ret = ret;
+ break;
+ case B_OVERFLOW:
+ end = beg + BOVERFLOW_SIZE - 1;
+ bo = (BOVERFLOW *)bk;
+ if ((ret = __db_safe_goff(dbp, vdp,
+ bo->pgno, &dbt, &ovflbuf, flags)) != 0) {
+ err_ret = ret;
+ /* We care about err_ret more. */
+ (void)__db_prdbt(&unkdbt, 0, " ",
+ handle, callback, 0, NULL);
+ break;
+ }
+ if ((ret = __db_prdbt(&dbt,
+ 0, " ", handle, callback, 0, NULL)) != 0)
+ err_ret = ret;
+ break;
+ default:
+ /*
+ * We should never get here; __db_vrfy_inpitem
+ * should not be returning 0 if bk->type
+ * is unrecognizable.
+ */
+ DB_ASSERT(0);
+ return (EINVAL);
+ }
+
+ /*
+ * If we're being aggressive, mark the beginning
+ * and end of the item; we'll come back and print
+ * whatever "junk" is in the gaps in case we had
+ * any bogus inp elements and thereby missed stuff.
+ */
+ if (LF_ISSET(DB_AGGRESSIVE)) {
+ pgmap[beg] = ITEM_BEGIN;
+ pgmap[end] = ITEM_END;
+ }
+ }
+ }
+
+ /*
+ * If i is odd and this is a btree leaf, we've printed out a key but not
+ * a datum; fix this imbalance by printing an "UNKNOWN".
+ */
+ if (pgtype == P_LBTREE && (i % P_INDX == 1) && ((ret =
+ __db_prdbt(&unkdbt, 0, " ", handle, callback, 0, NULL)) != 0))
+ err_ret = ret;
+
+err: if (pgmap != NULL)
+ __os_free(pgmap, 0);
+ __os_free(ovflbuf, 0);
+
+ /* Mark this page as done. */
+ if ((t_ret = __db_salvage_markdone(vdp, pgno)) != 0)
+ return (t_ret);
+
+ return ((err_ret != 0) ? err_ret : ret);
+}
+
+/*
+ * __bam_salvage_walkdupint --
+ * Walk a known-good btree or recno internal page which is part of
+ * a dup tree, calling __db_salvage_duptree on each child page.
+ *
+ * PUBLIC: int __bam_salvage_walkdupint __P((DB *, VRFY_DBINFO *, PAGE *,
+ * PUBLIC: DBT *, void *, int (*)(void *, const void *), u_int32_t));
+ */
+int
+__bam_salvage_walkdupint(dbp, vdp, h, key, handle, callback, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ PAGE *h;
+ DBT *key;
+ void *handle;
+ int (*callback) __P((void *, const void *));
+ u_int32_t flags;
+{
+ RINTERNAL *ri;
+ BINTERNAL *bi;
+ int ret, t_ret;
+ db_indx_t i;
+
+ ret = 0;
+ for (i = 0; i < NUM_ENT(h); i++) {
+ switch (TYPE(h)) {
+ case P_IBTREE:
+ bi = GET_BINTERNAL(h, i);
+ if ((t_ret = __db_salvage_duptree(dbp,
+ vdp, bi->pgno, key, handle, callback, flags)) != 0)
+ ret = t_ret;
+ case P_IRECNO:
+ ri = GET_RINTERNAL(h, i);
+ if ((t_ret = __db_salvage_duptree(dbp,
+ vdp, ri->pgno, key, handle, callback, flags)) != 0)
+ ret = t_ret;
+ break;
+ default:
+ __db_err(dbp->dbenv,
+ "__bam_salvage_walkdupint called on non-int. page");
+ DB_ASSERT(0);
+ return (EINVAL);
+ }
+ /* Pass SA_SKIPFIRSTKEY, if set, on to the 0th child only. */
+ flags &= ~LF_ISSET(SA_SKIPFIRSTKEY);
+ }
+
+ return (ret);
+}
+
+/*
+ * __bam_meta2pgset --
+ * Given a known-good meta page, return in pgsetp a 0-terminated list of
+ * db_pgno_t's corresponding to the pages in the btree.
+ *
+ * We do this by a somewhat sleazy method, to avoid having to traverse the
+ * btree structure neatly: we walk down the left side to the very
+ * first leaf page, then we mark all the pages in the chain of
+ * NEXT_PGNOs (being wary of cycles and invalid ones), then we
+ * consolidate our scratch array into a nice list, and return. This
+ * avoids the memory management hassles of recursion and the
+ * trouble of walking internal pages--they just don't matter, except
+ * for the left branch.
+ *
+ * PUBLIC: int __bam_meta2pgset __P((DB *, VRFY_DBINFO *, BTMETA *,
+ * PUBLIC: u_int32_t, DB *));
+ */
+int
+__bam_meta2pgset(dbp, vdp, btmeta, flags, pgset)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ BTMETA *btmeta;
+ u_int32_t flags;
+ DB *pgset;
+{
+ BINTERNAL *bi;
+ PAGE *h;
+ RINTERNAL *ri;
+ db_pgno_t current, p;
+ int err_ret, ret;
+
+ h = NULL;
+ ret = err_ret = 0;
+ DB_ASSERT(pgset != NULL);
+ for (current = btmeta->root;;) {
+ if (!IS_VALID_PGNO(current) || current == PGNO(btmeta)) {
+ err_ret = DB_VERIFY_BAD;
+ goto err;
+ }
+ if ((ret = memp_fget(dbp->mpf, &current, 0, &h)) != 0) {
+ err_ret = ret;
+ goto err;
+ }
+
+ switch (TYPE(h)) {
+ case P_IBTREE:
+ case P_IRECNO:
+ if ((ret = __bam_vrfy(dbp,
+ vdp, h, current, flags | DB_NOORDERCHK)) != 0) {
+ err_ret = ret;
+ goto err;
+ }
+ if (TYPE(h) == P_IBTREE) {
+ bi = GET_BINTERNAL(h, 0);
+ current = bi->pgno;
+ } else { /* P_IRECNO */
+ ri = GET_RINTERNAL(h, 0);
+ current = ri->pgno;
+ }
+ break;
+ case P_LBTREE:
+ case P_LRECNO:
+ goto traverse;
+ default:
+ err_ret = DB_VERIFY_BAD;
+ goto err;
+ }
+
+ if ((ret = memp_fput(dbp->mpf, h, 0)) != 0)
+ err_ret = ret;
+ h = NULL;
+ }
+
+ /*
+ * At this point, current is the pgno of leaf page h, the 0th in the
+ * tree we're concerned with.
+ */
+traverse:
+ while (IS_VALID_PGNO(current) && current != PGNO_INVALID) {
+ if (h == NULL &&
+ (ret = memp_fget(dbp->mpf, &current, 0, &h) != 0)) {
+ err_ret = ret;
+ break;
+ }
+
+ if ((ret = __db_vrfy_pgset_get(pgset, current, (int *)&p)) != 0)
+ goto err;
+
+ if (p != 0) {
+ /*
+ * We've found a cycle. Return success anyway--
+ * our caller may as well use however much of
+ * the pgset we've come up with.
+ */
+ break;
+ }
+ if ((ret = __db_vrfy_pgset_inc(pgset, current)) != 0)
+ goto err;
+
+ current = NEXT_PGNO(h);
+ if ((ret = memp_fput(dbp->mpf, h, 0)) != 0)
+ err_ret = ret;
+ h = NULL;
+ }
+
+err: if (h != NULL)
+ (void)memp_fput(dbp->mpf, h, 0);
+
+ return (ret == 0 ? err_ret : ret);
+}
+
+/*
+ * __bam_safe_getdata --
+ *
+ * Utility function for __bam_vrfy_itemorder. Safely gets the datum at
+ * index i, page h, and sticks it in DBT dbt. If ovflok is 1 and i's an
+ * overflow item, we do a safe_goff to get the item and signal that we need
+ * to free dbt->data; if ovflok is 0, we leaves the DBT zeroed.
+ */
+static int
+__bam_safe_getdata(dbp, h, i, ovflok, dbt, freedbtp)
+ DB *dbp;
+ PAGE *h;
+ u_int32_t i;
+ int ovflok;
+ DBT *dbt;
+ int *freedbtp;
+{
+ BKEYDATA *bk;
+ BOVERFLOW *bo;
+
+ memset(dbt, 0, sizeof(DBT));
+ *freedbtp = 0;
+
+ bk = GET_BKEYDATA(h, i);
+ if (B_TYPE(bk->type) == B_OVERFLOW) {
+ if (!ovflok)
+ return (0);
+
+ bo = (BOVERFLOW *)bk;
+ F_SET(dbt, DB_DBT_MALLOC);
+
+ *freedbtp = 1;
+ return (__db_goff(dbp, dbt, bo->tlen, bo->pgno, NULL, NULL));
+ } else {
+ dbt->data = bk->data;
+ dbt->size = bk->len;
+ }
+
+ return (0);
+}
diff --git a/bdb/btree/btree.src b/bdb/btree/btree.src
new file mode 100644
index 00000000000..a1eba7d7fc7
--- /dev/null
+++ b/bdb/btree/btree.src
@@ -0,0 +1,296 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: btree.src,v 10.26 2000/12/12 17:40:23 bostic Exp $
+ */
+
+PREFIX bam
+
+INCLUDE #include "db_config.h"
+INCLUDE
+INCLUDE #ifndef NO_SYSTEM_INCLUDES
+INCLUDE #include <sys/types.h>
+INCLUDE
+INCLUDE #include <ctype.h>
+INCLUDE #include <errno.h>
+INCLUDE #include <string.h>
+INCLUDE #endif
+INCLUDE
+INCLUDE #include "db_int.h"
+INCLUDE #include "db_page.h"
+INCLUDE #include "db_dispatch.h"
+INCLUDE #include "db_am.h"
+INCLUDE #include "btree.h"
+INCLUDE #include "txn.h"
+INCLUDE
+
+/*
+ * BTREE-pg_alloc: used to record allocating a new page.
+ *
+ * meta_lsn: the meta-data page's original lsn.
+ * page_lsn: the allocated page's original lsn.
+ * pgno: the page allocated.
+ * next: the next page on the free list.
+ */
+BEGIN pg_alloc 51
+ARG fileid int32_t ld
+POINTER meta_lsn DB_LSN * lu
+POINTER page_lsn DB_LSN * lu
+ARG pgno db_pgno_t lu
+ARG ptype u_int32_t lu
+ARG next db_pgno_t lu
+END
+
+DEPRECATED pg_alloc1 60
+ARG fileid int32_t ld
+POINTER meta_lsn DB_LSN * lu
+POINTER alloc_lsn DB_LSN * lu
+POINTER page_lsn DB_LSN * lu
+ARG pgno db_pgno_t lu
+ARG ptype u_int32_t lu
+ARG next db_pgno_t lu
+END
+
+/*
+ * BTREE-pg_free: used to record freeing a page.
+ *
+ * pgno: the page being freed.
+ * meta_lsn: the meta-data page's original lsn.
+ * header: the header from the free'd page.
+ * next: the previous next pointer on the metadata page.
+ */
+BEGIN pg_free 52
+ARG fileid int32_t ld
+ARG pgno db_pgno_t lu
+POINTER meta_lsn DB_LSN * lu
+DBT header DBT s
+ARG next db_pgno_t lu
+END
+
+DEPRECATED pg_free1 61
+ARG fileid int32_t ld
+ARG pgno db_pgno_t lu
+POINTER meta_lsn DB_LSN * lu
+POINTER alloc_lsn DB_LSN * lu
+DBT header DBT s
+ARG next db_pgno_t lu
+END
+
+/*
+ * BTREE-split: used to log a page split.
+ *
+ * left: the page number for the low-order contents.
+ * llsn: the left page's original LSN.
+ * right: the page number for the high-order contents.
+ * rlsn: the right page's original LSN.
+ * indx: the number of entries that went to the left page.
+ * npgno: the next page number
+ * nlsn: the next page's original LSN (or 0 if no next page).
+ * pg: the split page's contents before the split.
+ */
+DEPRECATED split1 53
+ARG fileid int32_t ld
+ARG left db_pgno_t lu
+POINTER llsn DB_LSN * lu
+ARG right db_pgno_t lu
+POINTER rlsn DB_LSN * lu
+ARG indx u_int32_t lu
+ARG npgno db_pgno_t lu
+POINTER nlsn DB_LSN * lu
+DBT pg DBT s
+END
+
+/*
+ * BTREE-split: used to log a page split.
+ *
+ * left: the page number for the low-order contents.
+ * llsn: the left page's original LSN.
+ * right: the page number for the high-order contents.
+ * rlsn: the right page's original LSN.
+ * indx: the number of entries that went to the left page.
+ * npgno: the next page number
+ * npgno: the next page number
+ * nlsn: the next page's original LSN (or 0 if no next page).
+ * root_pgno: the root page number
+ * pg: the split page's contents before the split.
+ * opflags: SPL_NRECS: if splitting a tree that maintains a record count.
+ */
+BEGIN split 62
+ARG fileid int32_t ld
+ARG left db_pgno_t lu
+POINTER llsn DB_LSN * lu
+ARG right db_pgno_t lu
+POINTER rlsn DB_LSN * lu
+ARG indx u_int32_t lu
+ARG npgno db_pgno_t lu
+POINTER nlsn DB_LSN * lu
+ARG root_pgno db_pgno_t lu
+DBT pg DBT s
+ARG opflags u_int32_t lu
+END
+
+/*
+ * BTREE-rsplit: used to log a reverse-split
+ *
+ * pgno: the page number of the page copied over the root.
+ * pgdbt: the page being copied on the root page.
+ * nrec: the tree's record count.
+ * rootent: last entry on the root page.
+ * rootlsn: the root page's original lsn.
+ */
+DEPRECATED rsplit1 54
+ARG fileid int32_t ld
+ARG pgno db_pgno_t lu
+DBT pgdbt DBT s
+ARG nrec db_pgno_t lu
+DBT rootent DBT s
+POINTER rootlsn DB_LSN * lu
+END
+
+/*
+ * BTREE-rsplit: used to log a reverse-split
+ *
+ * pgno: the page number of the page copied over the root.
+ * pgdbt: the page being copied on the root page.
+ * root_pgno: the root page number.
+ * nrec: the tree's record count.
+ * rootent: last entry on the root page.
+ * rootlsn: the root page's original lsn.
+ */
+BEGIN rsplit 63
+ARG fileid int32_t ld
+ARG pgno db_pgno_t lu
+DBT pgdbt DBT s
+ARG root_pgno db_pgno_t lu
+ARG nrec db_pgno_t lu
+DBT rootent DBT s
+POINTER rootlsn DB_LSN * lu
+END
+
+/*
+ * BTREE-adj: used to log the adjustment of an index.
+ *
+ * pgno: the page modified.
+ * lsn: the page's original lsn.
+ * indx: the index adjusted.
+ * indx_copy: the index to copy if inserting.
+ * is_insert: 0 if a delete, 1 if an insert.
+ */
+BEGIN adj 55
+ARG fileid int32_t ld
+ARG pgno db_pgno_t lu
+POINTER lsn DB_LSN * lu
+ARG indx u_int32_t lu
+ARG indx_copy u_int32_t lu
+ARG is_insert u_int32_t lu
+END
+
+/*
+ * BTREE-cadjust: used to adjust the count change in an internal page.
+ *
+ * pgno: the page modified.
+ * lsn: the page's original lsn.
+ * indx: the index to be adjusted.
+ * adjust: the signed adjustment.
+ * opflags: CAD_UPDATEROOT: if root page count was adjusted.
+ */
+BEGIN cadjust 56
+ARG fileid int32_t ld
+ARG pgno db_pgno_t lu
+POINTER lsn DB_LSN * lu
+ARG indx u_int32_t lu
+ARG adjust int32_t ld
+ARG opflags u_int32_t lu
+END
+
+/*
+ * BTREE-cdel: used to log the intent-to-delete of a cursor record.
+ *
+ * pgno: the page modified.
+ * lsn: the page's original lsn.
+ * indx: the index to be deleted.
+ */
+BEGIN cdel 57
+ARG fileid int32_t ld
+ARG pgno db_pgno_t lu
+POINTER lsn DB_LSN * lu
+ARG indx u_int32_t lu
+END
+
+/*
+ * BTREE-repl: used to log the replacement of an item.
+ *
+ * pgno: the page modified.
+ * lsn: the page's original lsn.
+ * orig: the original data.
+ * new: the replacement data.
+ * duplicate: the prefix of the replacement that matches the original.
+ */
+BEGIN repl 58
+ARG fileid int32_t ld
+ARG pgno db_pgno_t lu
+POINTER lsn DB_LSN * lu
+ARG indx u_int32_t lu
+ARG isdeleted u_int32_t lu
+DBT orig DBT s
+DBT repl DBT s
+ARG prefix u_int32_t lu
+ARG suffix u_int32_t lu
+END
+
+/*
+ * BTREE-root: log the assignment of a root btree page.
+ */
+BEGIN root 59
+ARG fileid int32_t ld
+ARG meta_pgno db_pgno_t lu
+ARG root_pgno db_pgno_t lu
+POINTER meta_lsn DB_LSN * lu
+END
+
+/*
+ * BTREE-curadj: undo cursor adjustments on txn abort.
+ * Should only be processed during DB_TXN_ABORT.
+ * NOTE: the first_indx field gets used to hold
+ * signed index adjustment in one case.
+ * care should be taken if its size is changed.
+ */
+BEGIN curadj 64
+/* Fileid of db affected. */
+ARG fileid int32_t ld
+/* Which adjustment. */
+ARG mode db_ca_mode ld
+/* Page entry is from. */
+ARG from_pgno db_pgno_t lu
+/* Page entry went to. */
+ARG to_pgno db_pgno_t lu
+/* Left page of root split. */
+ARG left_pgno db_pgno_t lu
+/* First index of dup set. Also used as adjustment. */
+ARG first_indx u_int32_t lu
+/* Index entry is from. */
+ARG from_indx u_int32_t lu
+/* Index where entry went. */
+ARG to_indx u_int32_t lu
+END
+
+/*
+ * BTREE-rcuradj: undo cursor adjustments on txn abort in
+ * renumbering recno trees.
+ * Should only be processed during DB_TXN_ABORT.
+ */
+BEGIN rcuradj 65
+/* Fileid of db affected. */
+ARG fileid int32_t ld
+/* Which adjustment. */
+ARG mode ca_recno_arg ld
+/* Root page number. */
+ARG root db_pgno_t ld
+/* Recno of the adjustment. */
+ARG recno db_recno_t ld
+/* Order number of the adjustment. */
+ARG order u_int32_t ld
+END
diff --git a/bdb/btree/btree_auto.c b/bdb/btree/btree_auto.c
new file mode 100644
index 00000000000..fdb27b7d25e
--- /dev/null
+++ b/bdb/btree/btree_auto.c
@@ -0,0 +1,2284 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <errno.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_dispatch.h"
+#include "db_am.h"
+#include "btree.h"
+#include "txn.h"
+
+int
+__bam_pg_alloc_log(dbenv, txnid, ret_lsnp, flags,
+ fileid, meta_lsn, page_lsn, pgno, ptype, next)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ int32_t fileid;
+ DB_LSN * meta_lsn;
+ DB_LSN * page_lsn;
+ db_pgno_t pgno;
+ u_int32_t ptype;
+ db_pgno_t next;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_bam_pg_alloc;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(fileid)
+ + sizeof(*meta_lsn)
+ + sizeof(*page_lsn)
+ + sizeof(pgno)
+ + sizeof(ptype)
+ + sizeof(next);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(bp, &fileid, sizeof(fileid));
+ bp += sizeof(fileid);
+ if (meta_lsn != NULL)
+ memcpy(bp, meta_lsn, sizeof(*meta_lsn));
+ else
+ memset(bp, 0, sizeof(*meta_lsn));
+ bp += sizeof(*meta_lsn);
+ if (page_lsn != NULL)
+ memcpy(bp, page_lsn, sizeof(*page_lsn));
+ else
+ memset(bp, 0, sizeof(*page_lsn));
+ bp += sizeof(*page_lsn);
+ memcpy(bp, &pgno, sizeof(pgno));
+ bp += sizeof(pgno);
+ memcpy(bp, &ptype, sizeof(ptype));
+ bp += sizeof(ptype);
+ memcpy(bp, &next, sizeof(next));
+ bp += sizeof(next);
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__bam_pg_alloc_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __bam_pg_alloc_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __bam_pg_alloc_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]bam_pg_alloc: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tmeta_lsn: [%lu][%lu]\n",
+ (u_long)argp->meta_lsn.file, (u_long)argp->meta_lsn.offset);
+ printf("\tpage_lsn: [%lu][%lu]\n",
+ (u_long)argp->page_lsn.file, (u_long)argp->page_lsn.offset);
+ printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ printf("\tptype: %lu\n", (u_long)argp->ptype);
+ printf("\tnext: %lu\n", (u_long)argp->next);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__bam_pg_alloc_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __bam_pg_alloc_args **argpp;
+{
+ __bam_pg_alloc_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__bam_pg_alloc_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->meta_lsn, bp, sizeof(argp->meta_lsn));
+ bp += sizeof(argp->meta_lsn);
+ memcpy(&argp->page_lsn, bp, sizeof(argp->page_lsn));
+ bp += sizeof(argp->page_lsn);
+ memcpy(&argp->pgno, bp, sizeof(argp->pgno));
+ bp += sizeof(argp->pgno);
+ memcpy(&argp->ptype, bp, sizeof(argp->ptype));
+ bp += sizeof(argp->ptype);
+ memcpy(&argp->next, bp, sizeof(argp->next));
+ bp += sizeof(argp->next);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__bam_pg_alloc1_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __bam_pg_alloc1_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __bam_pg_alloc1_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]bam_pg_alloc1: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tmeta_lsn: [%lu][%lu]\n",
+ (u_long)argp->meta_lsn.file, (u_long)argp->meta_lsn.offset);
+ printf("\talloc_lsn: [%lu][%lu]\n",
+ (u_long)argp->alloc_lsn.file, (u_long)argp->alloc_lsn.offset);
+ printf("\tpage_lsn: [%lu][%lu]\n",
+ (u_long)argp->page_lsn.file, (u_long)argp->page_lsn.offset);
+ printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ printf("\tptype: %lu\n", (u_long)argp->ptype);
+ printf("\tnext: %lu\n", (u_long)argp->next);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__bam_pg_alloc1_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __bam_pg_alloc1_args **argpp;
+{
+ __bam_pg_alloc1_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__bam_pg_alloc1_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->meta_lsn, bp, sizeof(argp->meta_lsn));
+ bp += sizeof(argp->meta_lsn);
+ memcpy(&argp->alloc_lsn, bp, sizeof(argp->alloc_lsn));
+ bp += sizeof(argp->alloc_lsn);
+ memcpy(&argp->page_lsn, bp, sizeof(argp->page_lsn));
+ bp += sizeof(argp->page_lsn);
+ memcpy(&argp->pgno, bp, sizeof(argp->pgno));
+ bp += sizeof(argp->pgno);
+ memcpy(&argp->ptype, bp, sizeof(argp->ptype));
+ bp += sizeof(argp->ptype);
+ memcpy(&argp->next, bp, sizeof(argp->next));
+ bp += sizeof(argp->next);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__bam_pg_free_log(dbenv, txnid, ret_lsnp, flags,
+ fileid, pgno, meta_lsn, header, next)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DB_LSN * meta_lsn;
+ const DBT *header;
+ db_pgno_t next;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_bam_pg_free;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(fileid)
+ + sizeof(pgno)
+ + sizeof(*meta_lsn)
+ + sizeof(u_int32_t) + (header == NULL ? 0 : header->size)
+ + sizeof(next);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(bp, &fileid, sizeof(fileid));
+ bp += sizeof(fileid);
+ memcpy(bp, &pgno, sizeof(pgno));
+ bp += sizeof(pgno);
+ if (meta_lsn != NULL)
+ memcpy(bp, meta_lsn, sizeof(*meta_lsn));
+ else
+ memset(bp, 0, sizeof(*meta_lsn));
+ bp += sizeof(*meta_lsn);
+ if (header == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &header->size, sizeof(header->size));
+ bp += sizeof(header->size);
+ memcpy(bp, header->data, header->size);
+ bp += header->size;
+ }
+ memcpy(bp, &next, sizeof(next));
+ bp += sizeof(next);
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__bam_pg_free_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __bam_pg_free_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __bam_pg_free_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]bam_pg_free: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ printf("\tmeta_lsn: [%lu][%lu]\n",
+ (u_long)argp->meta_lsn.file, (u_long)argp->meta_lsn.offset);
+ printf("\theader: ");
+ for (i = 0; i < argp->header.size; i++) {
+ ch = ((u_int8_t *)argp->header.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\tnext: %lu\n", (u_long)argp->next);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__bam_pg_free_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __bam_pg_free_args **argpp;
+{
+ __bam_pg_free_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__bam_pg_free_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->pgno, bp, sizeof(argp->pgno));
+ bp += sizeof(argp->pgno);
+ memcpy(&argp->meta_lsn, bp, sizeof(argp->meta_lsn));
+ bp += sizeof(argp->meta_lsn);
+ memset(&argp->header, 0, sizeof(argp->header));
+ memcpy(&argp->header.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->header.data = bp;
+ bp += argp->header.size;
+ memcpy(&argp->next, bp, sizeof(argp->next));
+ bp += sizeof(argp->next);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__bam_pg_free1_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __bam_pg_free1_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __bam_pg_free1_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]bam_pg_free1: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ printf("\tmeta_lsn: [%lu][%lu]\n",
+ (u_long)argp->meta_lsn.file, (u_long)argp->meta_lsn.offset);
+ printf("\talloc_lsn: [%lu][%lu]\n",
+ (u_long)argp->alloc_lsn.file, (u_long)argp->alloc_lsn.offset);
+ printf("\theader: ");
+ for (i = 0; i < argp->header.size; i++) {
+ ch = ((u_int8_t *)argp->header.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\tnext: %lu\n", (u_long)argp->next);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__bam_pg_free1_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __bam_pg_free1_args **argpp;
+{
+ __bam_pg_free1_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__bam_pg_free1_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->pgno, bp, sizeof(argp->pgno));
+ bp += sizeof(argp->pgno);
+ memcpy(&argp->meta_lsn, bp, sizeof(argp->meta_lsn));
+ bp += sizeof(argp->meta_lsn);
+ memcpy(&argp->alloc_lsn, bp, sizeof(argp->alloc_lsn));
+ bp += sizeof(argp->alloc_lsn);
+ memset(&argp->header, 0, sizeof(argp->header));
+ memcpy(&argp->header.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->header.data = bp;
+ bp += argp->header.size;
+ memcpy(&argp->next, bp, sizeof(argp->next));
+ bp += sizeof(argp->next);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__bam_split1_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __bam_split1_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __bam_split1_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]bam_split1: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tleft: %lu\n", (u_long)argp->left);
+ printf("\tllsn: [%lu][%lu]\n",
+ (u_long)argp->llsn.file, (u_long)argp->llsn.offset);
+ printf("\tright: %lu\n", (u_long)argp->right);
+ printf("\trlsn: [%lu][%lu]\n",
+ (u_long)argp->rlsn.file, (u_long)argp->rlsn.offset);
+ printf("\tindx: %lu\n", (u_long)argp->indx);
+ printf("\tnpgno: %lu\n", (u_long)argp->npgno);
+ printf("\tnlsn: [%lu][%lu]\n",
+ (u_long)argp->nlsn.file, (u_long)argp->nlsn.offset);
+ printf("\tpg: ");
+ for (i = 0; i < argp->pg.size; i++) {
+ ch = ((u_int8_t *)argp->pg.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__bam_split1_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __bam_split1_args **argpp;
+{
+ __bam_split1_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__bam_split1_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->left, bp, sizeof(argp->left));
+ bp += sizeof(argp->left);
+ memcpy(&argp->llsn, bp, sizeof(argp->llsn));
+ bp += sizeof(argp->llsn);
+ memcpy(&argp->right, bp, sizeof(argp->right));
+ bp += sizeof(argp->right);
+ memcpy(&argp->rlsn, bp, sizeof(argp->rlsn));
+ bp += sizeof(argp->rlsn);
+ memcpy(&argp->indx, bp, sizeof(argp->indx));
+ bp += sizeof(argp->indx);
+ memcpy(&argp->npgno, bp, sizeof(argp->npgno));
+ bp += sizeof(argp->npgno);
+ memcpy(&argp->nlsn, bp, sizeof(argp->nlsn));
+ bp += sizeof(argp->nlsn);
+ memset(&argp->pg, 0, sizeof(argp->pg));
+ memcpy(&argp->pg.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->pg.data = bp;
+ bp += argp->pg.size;
+ *argpp = argp;
+ return (0);
+}
+
+int
+__bam_split_log(dbenv, txnid, ret_lsnp, flags,
+ fileid, left, llsn, right, rlsn, indx,
+ npgno, nlsn, root_pgno, pg, opflags)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ int32_t fileid;
+ db_pgno_t left;
+ DB_LSN * llsn;
+ db_pgno_t right;
+ DB_LSN * rlsn;
+ u_int32_t indx;
+ db_pgno_t npgno;
+ DB_LSN * nlsn;
+ db_pgno_t root_pgno;
+ const DBT *pg;
+ u_int32_t opflags;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_bam_split;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(fileid)
+ + sizeof(left)
+ + sizeof(*llsn)
+ + sizeof(right)
+ + sizeof(*rlsn)
+ + sizeof(indx)
+ + sizeof(npgno)
+ + sizeof(*nlsn)
+ + sizeof(root_pgno)
+ + sizeof(u_int32_t) + (pg == NULL ? 0 : pg->size)
+ + sizeof(opflags);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(bp, &fileid, sizeof(fileid));
+ bp += sizeof(fileid);
+ memcpy(bp, &left, sizeof(left));
+ bp += sizeof(left);
+ if (llsn != NULL)
+ memcpy(bp, llsn, sizeof(*llsn));
+ else
+ memset(bp, 0, sizeof(*llsn));
+ bp += sizeof(*llsn);
+ memcpy(bp, &right, sizeof(right));
+ bp += sizeof(right);
+ if (rlsn != NULL)
+ memcpy(bp, rlsn, sizeof(*rlsn));
+ else
+ memset(bp, 0, sizeof(*rlsn));
+ bp += sizeof(*rlsn);
+ memcpy(bp, &indx, sizeof(indx));
+ bp += sizeof(indx);
+ memcpy(bp, &npgno, sizeof(npgno));
+ bp += sizeof(npgno);
+ if (nlsn != NULL)
+ memcpy(bp, nlsn, sizeof(*nlsn));
+ else
+ memset(bp, 0, sizeof(*nlsn));
+ bp += sizeof(*nlsn);
+ memcpy(bp, &root_pgno, sizeof(root_pgno));
+ bp += sizeof(root_pgno);
+ if (pg == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &pg->size, sizeof(pg->size));
+ bp += sizeof(pg->size);
+ memcpy(bp, pg->data, pg->size);
+ bp += pg->size;
+ }
+ memcpy(bp, &opflags, sizeof(opflags));
+ bp += sizeof(opflags);
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__bam_split_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __bam_split_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __bam_split_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]bam_split: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tleft: %lu\n", (u_long)argp->left);
+ printf("\tllsn: [%lu][%lu]\n",
+ (u_long)argp->llsn.file, (u_long)argp->llsn.offset);
+ printf("\tright: %lu\n", (u_long)argp->right);
+ printf("\trlsn: [%lu][%lu]\n",
+ (u_long)argp->rlsn.file, (u_long)argp->rlsn.offset);
+ printf("\tindx: %lu\n", (u_long)argp->indx);
+ printf("\tnpgno: %lu\n", (u_long)argp->npgno);
+ printf("\tnlsn: [%lu][%lu]\n",
+ (u_long)argp->nlsn.file, (u_long)argp->nlsn.offset);
+ printf("\troot_pgno: %lu\n", (u_long)argp->root_pgno);
+ printf("\tpg: ");
+ for (i = 0; i < argp->pg.size; i++) {
+ ch = ((u_int8_t *)argp->pg.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\topflags: %lu\n", (u_long)argp->opflags);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__bam_split_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __bam_split_args **argpp;
+{
+ __bam_split_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__bam_split_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->left, bp, sizeof(argp->left));
+ bp += sizeof(argp->left);
+ memcpy(&argp->llsn, bp, sizeof(argp->llsn));
+ bp += sizeof(argp->llsn);
+ memcpy(&argp->right, bp, sizeof(argp->right));
+ bp += sizeof(argp->right);
+ memcpy(&argp->rlsn, bp, sizeof(argp->rlsn));
+ bp += sizeof(argp->rlsn);
+ memcpy(&argp->indx, bp, sizeof(argp->indx));
+ bp += sizeof(argp->indx);
+ memcpy(&argp->npgno, bp, sizeof(argp->npgno));
+ bp += sizeof(argp->npgno);
+ memcpy(&argp->nlsn, bp, sizeof(argp->nlsn));
+ bp += sizeof(argp->nlsn);
+ memcpy(&argp->root_pgno, bp, sizeof(argp->root_pgno));
+ bp += sizeof(argp->root_pgno);
+ memset(&argp->pg, 0, sizeof(argp->pg));
+ memcpy(&argp->pg.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->pg.data = bp;
+ bp += argp->pg.size;
+ memcpy(&argp->opflags, bp, sizeof(argp->opflags));
+ bp += sizeof(argp->opflags);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__bam_rsplit1_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __bam_rsplit1_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __bam_rsplit1_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]bam_rsplit1: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ printf("\tpgdbt: ");
+ for (i = 0; i < argp->pgdbt.size; i++) {
+ ch = ((u_int8_t *)argp->pgdbt.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\tnrec: %lu\n", (u_long)argp->nrec);
+ printf("\trootent: ");
+ for (i = 0; i < argp->rootent.size; i++) {
+ ch = ((u_int8_t *)argp->rootent.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\trootlsn: [%lu][%lu]\n",
+ (u_long)argp->rootlsn.file, (u_long)argp->rootlsn.offset);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__bam_rsplit1_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __bam_rsplit1_args **argpp;
+{
+ __bam_rsplit1_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__bam_rsplit1_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->pgno, bp, sizeof(argp->pgno));
+ bp += sizeof(argp->pgno);
+ memset(&argp->pgdbt, 0, sizeof(argp->pgdbt));
+ memcpy(&argp->pgdbt.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->pgdbt.data = bp;
+ bp += argp->pgdbt.size;
+ memcpy(&argp->nrec, bp, sizeof(argp->nrec));
+ bp += sizeof(argp->nrec);
+ memset(&argp->rootent, 0, sizeof(argp->rootent));
+ memcpy(&argp->rootent.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->rootent.data = bp;
+ bp += argp->rootent.size;
+ memcpy(&argp->rootlsn, bp, sizeof(argp->rootlsn));
+ bp += sizeof(argp->rootlsn);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__bam_rsplit_log(dbenv, txnid, ret_lsnp, flags,
+ fileid, pgno, pgdbt, root_pgno, nrec, rootent,
+ rootlsn)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ int32_t fileid;
+ db_pgno_t pgno;
+ const DBT *pgdbt;
+ db_pgno_t root_pgno;
+ db_pgno_t nrec;
+ const DBT *rootent;
+ DB_LSN * rootlsn;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_bam_rsplit;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(fileid)
+ + sizeof(pgno)
+ + sizeof(u_int32_t) + (pgdbt == NULL ? 0 : pgdbt->size)
+ + sizeof(root_pgno)
+ + sizeof(nrec)
+ + sizeof(u_int32_t) + (rootent == NULL ? 0 : rootent->size)
+ + sizeof(*rootlsn);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(bp, &fileid, sizeof(fileid));
+ bp += sizeof(fileid);
+ memcpy(bp, &pgno, sizeof(pgno));
+ bp += sizeof(pgno);
+ if (pgdbt == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &pgdbt->size, sizeof(pgdbt->size));
+ bp += sizeof(pgdbt->size);
+ memcpy(bp, pgdbt->data, pgdbt->size);
+ bp += pgdbt->size;
+ }
+ memcpy(bp, &root_pgno, sizeof(root_pgno));
+ bp += sizeof(root_pgno);
+ memcpy(bp, &nrec, sizeof(nrec));
+ bp += sizeof(nrec);
+ if (rootent == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &rootent->size, sizeof(rootent->size));
+ bp += sizeof(rootent->size);
+ memcpy(bp, rootent->data, rootent->size);
+ bp += rootent->size;
+ }
+ if (rootlsn != NULL)
+ memcpy(bp, rootlsn, sizeof(*rootlsn));
+ else
+ memset(bp, 0, sizeof(*rootlsn));
+ bp += sizeof(*rootlsn);
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__bam_rsplit_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __bam_rsplit_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __bam_rsplit_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]bam_rsplit: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ printf("\tpgdbt: ");
+ for (i = 0; i < argp->pgdbt.size; i++) {
+ ch = ((u_int8_t *)argp->pgdbt.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\troot_pgno: %lu\n", (u_long)argp->root_pgno);
+ printf("\tnrec: %lu\n", (u_long)argp->nrec);
+ printf("\trootent: ");
+ for (i = 0; i < argp->rootent.size; i++) {
+ ch = ((u_int8_t *)argp->rootent.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\trootlsn: [%lu][%lu]\n",
+ (u_long)argp->rootlsn.file, (u_long)argp->rootlsn.offset);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__bam_rsplit_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __bam_rsplit_args **argpp;
+{
+ __bam_rsplit_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__bam_rsplit_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->pgno, bp, sizeof(argp->pgno));
+ bp += sizeof(argp->pgno);
+ memset(&argp->pgdbt, 0, sizeof(argp->pgdbt));
+ memcpy(&argp->pgdbt.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->pgdbt.data = bp;
+ bp += argp->pgdbt.size;
+ memcpy(&argp->root_pgno, bp, sizeof(argp->root_pgno));
+ bp += sizeof(argp->root_pgno);
+ memcpy(&argp->nrec, bp, sizeof(argp->nrec));
+ bp += sizeof(argp->nrec);
+ memset(&argp->rootent, 0, sizeof(argp->rootent));
+ memcpy(&argp->rootent.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->rootent.data = bp;
+ bp += argp->rootent.size;
+ memcpy(&argp->rootlsn, bp, sizeof(argp->rootlsn));
+ bp += sizeof(argp->rootlsn);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__bam_adj_log(dbenv, txnid, ret_lsnp, flags,
+ fileid, pgno, lsn, indx, indx_copy, is_insert)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DB_LSN * lsn;
+ u_int32_t indx;
+ u_int32_t indx_copy;
+ u_int32_t is_insert;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_bam_adj;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(fileid)
+ + sizeof(pgno)
+ + sizeof(*lsn)
+ + sizeof(indx)
+ + sizeof(indx_copy)
+ + sizeof(is_insert);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(bp, &fileid, sizeof(fileid));
+ bp += sizeof(fileid);
+ memcpy(bp, &pgno, sizeof(pgno));
+ bp += sizeof(pgno);
+ if (lsn != NULL)
+ memcpy(bp, lsn, sizeof(*lsn));
+ else
+ memset(bp, 0, sizeof(*lsn));
+ bp += sizeof(*lsn);
+ memcpy(bp, &indx, sizeof(indx));
+ bp += sizeof(indx);
+ memcpy(bp, &indx_copy, sizeof(indx_copy));
+ bp += sizeof(indx_copy);
+ memcpy(bp, &is_insert, sizeof(is_insert));
+ bp += sizeof(is_insert);
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__bam_adj_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __bam_adj_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __bam_adj_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]bam_adj: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ printf("\tlsn: [%lu][%lu]\n",
+ (u_long)argp->lsn.file, (u_long)argp->lsn.offset);
+ printf("\tindx: %lu\n", (u_long)argp->indx);
+ printf("\tindx_copy: %lu\n", (u_long)argp->indx_copy);
+ printf("\tis_insert: %lu\n", (u_long)argp->is_insert);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__bam_adj_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __bam_adj_args **argpp;
+{
+ __bam_adj_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__bam_adj_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->pgno, bp, sizeof(argp->pgno));
+ bp += sizeof(argp->pgno);
+ memcpy(&argp->lsn, bp, sizeof(argp->lsn));
+ bp += sizeof(argp->lsn);
+ memcpy(&argp->indx, bp, sizeof(argp->indx));
+ bp += sizeof(argp->indx);
+ memcpy(&argp->indx_copy, bp, sizeof(argp->indx_copy));
+ bp += sizeof(argp->indx_copy);
+ memcpy(&argp->is_insert, bp, sizeof(argp->is_insert));
+ bp += sizeof(argp->is_insert);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__bam_cadjust_log(dbenv, txnid, ret_lsnp, flags,
+ fileid, pgno, lsn, indx, adjust, opflags)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DB_LSN * lsn;
+ u_int32_t indx;
+ int32_t adjust;
+ u_int32_t opflags;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_bam_cadjust;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(fileid)
+ + sizeof(pgno)
+ + sizeof(*lsn)
+ + sizeof(indx)
+ + sizeof(adjust)
+ + sizeof(opflags);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(bp, &fileid, sizeof(fileid));
+ bp += sizeof(fileid);
+ memcpy(bp, &pgno, sizeof(pgno));
+ bp += sizeof(pgno);
+ if (lsn != NULL)
+ memcpy(bp, lsn, sizeof(*lsn));
+ else
+ memset(bp, 0, sizeof(*lsn));
+ bp += sizeof(*lsn);
+ memcpy(bp, &indx, sizeof(indx));
+ bp += sizeof(indx);
+ memcpy(bp, &adjust, sizeof(adjust));
+ bp += sizeof(adjust);
+ memcpy(bp, &opflags, sizeof(opflags));
+ bp += sizeof(opflags);
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__bam_cadjust_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __bam_cadjust_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __bam_cadjust_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]bam_cadjust: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ printf("\tlsn: [%lu][%lu]\n",
+ (u_long)argp->lsn.file, (u_long)argp->lsn.offset);
+ printf("\tindx: %lu\n", (u_long)argp->indx);
+ printf("\tadjust: %ld\n", (long)argp->adjust);
+ printf("\topflags: %lu\n", (u_long)argp->opflags);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__bam_cadjust_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __bam_cadjust_args **argpp;
+{
+ __bam_cadjust_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__bam_cadjust_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->pgno, bp, sizeof(argp->pgno));
+ bp += sizeof(argp->pgno);
+ memcpy(&argp->lsn, bp, sizeof(argp->lsn));
+ bp += sizeof(argp->lsn);
+ memcpy(&argp->indx, bp, sizeof(argp->indx));
+ bp += sizeof(argp->indx);
+ memcpy(&argp->adjust, bp, sizeof(argp->adjust));
+ bp += sizeof(argp->adjust);
+ memcpy(&argp->opflags, bp, sizeof(argp->opflags));
+ bp += sizeof(argp->opflags);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__bam_cdel_log(dbenv, txnid, ret_lsnp, flags,
+ fileid, pgno, lsn, indx)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DB_LSN * lsn;
+ u_int32_t indx;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_bam_cdel;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(fileid)
+ + sizeof(pgno)
+ + sizeof(*lsn)
+ + sizeof(indx);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(bp, &fileid, sizeof(fileid));
+ bp += sizeof(fileid);
+ memcpy(bp, &pgno, sizeof(pgno));
+ bp += sizeof(pgno);
+ if (lsn != NULL)
+ memcpy(bp, lsn, sizeof(*lsn));
+ else
+ memset(bp, 0, sizeof(*lsn));
+ bp += sizeof(*lsn);
+ memcpy(bp, &indx, sizeof(indx));
+ bp += sizeof(indx);
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__bam_cdel_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __bam_cdel_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __bam_cdel_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]bam_cdel: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ printf("\tlsn: [%lu][%lu]\n",
+ (u_long)argp->lsn.file, (u_long)argp->lsn.offset);
+ printf("\tindx: %lu\n", (u_long)argp->indx);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__bam_cdel_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __bam_cdel_args **argpp;
+{
+ __bam_cdel_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__bam_cdel_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->pgno, bp, sizeof(argp->pgno));
+ bp += sizeof(argp->pgno);
+ memcpy(&argp->lsn, bp, sizeof(argp->lsn));
+ bp += sizeof(argp->lsn);
+ memcpy(&argp->indx, bp, sizeof(argp->indx));
+ bp += sizeof(argp->indx);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__bam_repl_log(dbenv, txnid, ret_lsnp, flags,
+ fileid, pgno, lsn, indx, isdeleted, orig,
+ repl, prefix, suffix)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DB_LSN * lsn;
+ u_int32_t indx;
+ u_int32_t isdeleted;
+ const DBT *orig;
+ const DBT *repl;
+ u_int32_t prefix;
+ u_int32_t suffix;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_bam_repl;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(fileid)
+ + sizeof(pgno)
+ + sizeof(*lsn)
+ + sizeof(indx)
+ + sizeof(isdeleted)
+ + sizeof(u_int32_t) + (orig == NULL ? 0 : orig->size)
+ + sizeof(u_int32_t) + (repl == NULL ? 0 : repl->size)
+ + sizeof(prefix)
+ + sizeof(suffix);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(bp, &fileid, sizeof(fileid));
+ bp += sizeof(fileid);
+ memcpy(bp, &pgno, sizeof(pgno));
+ bp += sizeof(pgno);
+ if (lsn != NULL)
+ memcpy(bp, lsn, sizeof(*lsn));
+ else
+ memset(bp, 0, sizeof(*lsn));
+ bp += sizeof(*lsn);
+ memcpy(bp, &indx, sizeof(indx));
+ bp += sizeof(indx);
+ memcpy(bp, &isdeleted, sizeof(isdeleted));
+ bp += sizeof(isdeleted);
+ if (orig == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &orig->size, sizeof(orig->size));
+ bp += sizeof(orig->size);
+ memcpy(bp, orig->data, orig->size);
+ bp += orig->size;
+ }
+ if (repl == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &repl->size, sizeof(repl->size));
+ bp += sizeof(repl->size);
+ memcpy(bp, repl->data, repl->size);
+ bp += repl->size;
+ }
+ memcpy(bp, &prefix, sizeof(prefix));
+ bp += sizeof(prefix);
+ memcpy(bp, &suffix, sizeof(suffix));
+ bp += sizeof(suffix);
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__bam_repl_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __bam_repl_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __bam_repl_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]bam_repl: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ printf("\tlsn: [%lu][%lu]\n",
+ (u_long)argp->lsn.file, (u_long)argp->lsn.offset);
+ printf("\tindx: %lu\n", (u_long)argp->indx);
+ printf("\tisdeleted: %lu\n", (u_long)argp->isdeleted);
+ printf("\torig: ");
+ for (i = 0; i < argp->orig.size; i++) {
+ ch = ((u_int8_t *)argp->orig.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\trepl: ");
+ for (i = 0; i < argp->repl.size; i++) {
+ ch = ((u_int8_t *)argp->repl.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\tprefix: %lu\n", (u_long)argp->prefix);
+ printf("\tsuffix: %lu\n", (u_long)argp->suffix);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__bam_repl_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __bam_repl_args **argpp;
+{
+ __bam_repl_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__bam_repl_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->pgno, bp, sizeof(argp->pgno));
+ bp += sizeof(argp->pgno);
+ memcpy(&argp->lsn, bp, sizeof(argp->lsn));
+ bp += sizeof(argp->lsn);
+ memcpy(&argp->indx, bp, sizeof(argp->indx));
+ bp += sizeof(argp->indx);
+ memcpy(&argp->isdeleted, bp, sizeof(argp->isdeleted));
+ bp += sizeof(argp->isdeleted);
+ memset(&argp->orig, 0, sizeof(argp->orig));
+ memcpy(&argp->orig.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->orig.data = bp;
+ bp += argp->orig.size;
+ memset(&argp->repl, 0, sizeof(argp->repl));
+ memcpy(&argp->repl.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->repl.data = bp;
+ bp += argp->repl.size;
+ memcpy(&argp->prefix, bp, sizeof(argp->prefix));
+ bp += sizeof(argp->prefix);
+ memcpy(&argp->suffix, bp, sizeof(argp->suffix));
+ bp += sizeof(argp->suffix);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__bam_root_log(dbenv, txnid, ret_lsnp, flags,
+ fileid, meta_pgno, root_pgno, meta_lsn)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ int32_t fileid;
+ db_pgno_t meta_pgno;
+ db_pgno_t root_pgno;
+ DB_LSN * meta_lsn;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_bam_root;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(fileid)
+ + sizeof(meta_pgno)
+ + sizeof(root_pgno)
+ + sizeof(*meta_lsn);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(bp, &fileid, sizeof(fileid));
+ bp += sizeof(fileid);
+ memcpy(bp, &meta_pgno, sizeof(meta_pgno));
+ bp += sizeof(meta_pgno);
+ memcpy(bp, &root_pgno, sizeof(root_pgno));
+ bp += sizeof(root_pgno);
+ if (meta_lsn != NULL)
+ memcpy(bp, meta_lsn, sizeof(*meta_lsn));
+ else
+ memset(bp, 0, sizeof(*meta_lsn));
+ bp += sizeof(*meta_lsn);
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__bam_root_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __bam_root_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __bam_root_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]bam_root: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tmeta_pgno: %lu\n", (u_long)argp->meta_pgno);
+ printf("\troot_pgno: %lu\n", (u_long)argp->root_pgno);
+ printf("\tmeta_lsn: [%lu][%lu]\n",
+ (u_long)argp->meta_lsn.file, (u_long)argp->meta_lsn.offset);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__bam_root_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __bam_root_args **argpp;
+{
+ __bam_root_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__bam_root_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->meta_pgno, bp, sizeof(argp->meta_pgno));
+ bp += sizeof(argp->meta_pgno);
+ memcpy(&argp->root_pgno, bp, sizeof(argp->root_pgno));
+ bp += sizeof(argp->root_pgno);
+ memcpy(&argp->meta_lsn, bp, sizeof(argp->meta_lsn));
+ bp += sizeof(argp->meta_lsn);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__bam_curadj_log(dbenv, txnid, ret_lsnp, flags,
+ fileid, mode, from_pgno, to_pgno, left_pgno, first_indx,
+ from_indx, to_indx)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ int32_t fileid;
+ db_ca_mode mode;
+ db_pgno_t from_pgno;
+ db_pgno_t to_pgno;
+ db_pgno_t left_pgno;
+ u_int32_t first_indx;
+ u_int32_t from_indx;
+ u_int32_t to_indx;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_bam_curadj;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(fileid)
+ + sizeof(mode)
+ + sizeof(from_pgno)
+ + sizeof(to_pgno)
+ + sizeof(left_pgno)
+ + sizeof(first_indx)
+ + sizeof(from_indx)
+ + sizeof(to_indx);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(bp, &fileid, sizeof(fileid));
+ bp += sizeof(fileid);
+ memcpy(bp, &mode, sizeof(mode));
+ bp += sizeof(mode);
+ memcpy(bp, &from_pgno, sizeof(from_pgno));
+ bp += sizeof(from_pgno);
+ memcpy(bp, &to_pgno, sizeof(to_pgno));
+ bp += sizeof(to_pgno);
+ memcpy(bp, &left_pgno, sizeof(left_pgno));
+ bp += sizeof(left_pgno);
+ memcpy(bp, &first_indx, sizeof(first_indx));
+ bp += sizeof(first_indx);
+ memcpy(bp, &from_indx, sizeof(from_indx));
+ bp += sizeof(from_indx);
+ memcpy(bp, &to_indx, sizeof(to_indx));
+ bp += sizeof(to_indx);
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__bam_curadj_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __bam_curadj_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __bam_curadj_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]bam_curadj: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tmode: %ld\n", (long)argp->mode);
+ printf("\tfrom_pgno: %lu\n", (u_long)argp->from_pgno);
+ printf("\tto_pgno: %lu\n", (u_long)argp->to_pgno);
+ printf("\tleft_pgno: %lu\n", (u_long)argp->left_pgno);
+ printf("\tfirst_indx: %lu\n", (u_long)argp->first_indx);
+ printf("\tfrom_indx: %lu\n", (u_long)argp->from_indx);
+ printf("\tto_indx: %lu\n", (u_long)argp->to_indx);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__bam_curadj_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __bam_curadj_args **argpp;
+{
+ __bam_curadj_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__bam_curadj_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->mode, bp, sizeof(argp->mode));
+ bp += sizeof(argp->mode);
+ memcpy(&argp->from_pgno, bp, sizeof(argp->from_pgno));
+ bp += sizeof(argp->from_pgno);
+ memcpy(&argp->to_pgno, bp, sizeof(argp->to_pgno));
+ bp += sizeof(argp->to_pgno);
+ memcpy(&argp->left_pgno, bp, sizeof(argp->left_pgno));
+ bp += sizeof(argp->left_pgno);
+ memcpy(&argp->first_indx, bp, sizeof(argp->first_indx));
+ bp += sizeof(argp->first_indx);
+ memcpy(&argp->from_indx, bp, sizeof(argp->from_indx));
+ bp += sizeof(argp->from_indx);
+ memcpy(&argp->to_indx, bp, sizeof(argp->to_indx));
+ bp += sizeof(argp->to_indx);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__bam_rcuradj_log(dbenv, txnid, ret_lsnp, flags,
+ fileid, mode, root, recno, order)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ int32_t fileid;
+ ca_recno_arg mode;
+ db_pgno_t root;
+ db_recno_t recno;
+ u_int32_t order;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_bam_rcuradj;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(fileid)
+ + sizeof(mode)
+ + sizeof(root)
+ + sizeof(recno)
+ + sizeof(order);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(bp, &fileid, sizeof(fileid));
+ bp += sizeof(fileid);
+ memcpy(bp, &mode, sizeof(mode));
+ bp += sizeof(mode);
+ memcpy(bp, &root, sizeof(root));
+ bp += sizeof(root);
+ memcpy(bp, &recno, sizeof(recno));
+ bp += sizeof(recno);
+ memcpy(bp, &order, sizeof(order));
+ bp += sizeof(order);
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__bam_rcuradj_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __bam_rcuradj_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __bam_rcuradj_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]bam_rcuradj: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tmode: %ld\n", (long)argp->mode);
+ printf("\troot: %ld\n", (long)argp->root);
+ printf("\trecno: %ld\n", (long)argp->recno);
+ printf("\torder: %ld\n", (long)argp->order);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__bam_rcuradj_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __bam_rcuradj_args **argpp;
+{
+ __bam_rcuradj_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__bam_rcuradj_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->mode, bp, sizeof(argp->mode));
+ bp += sizeof(argp->mode);
+ memcpy(&argp->root, bp, sizeof(argp->root));
+ bp += sizeof(argp->root);
+ memcpy(&argp->recno, bp, sizeof(argp->recno));
+ bp += sizeof(argp->recno);
+ memcpy(&argp->order, bp, sizeof(argp->order));
+ bp += sizeof(argp->order);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__bam_init_print(dbenv)
+ DB_ENV *dbenv;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv,
+ __bam_pg_alloc_print, DB_bam_pg_alloc)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __bam_pg_alloc1_print, DB_bam_pg_alloc1)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __bam_pg_free_print, DB_bam_pg_free)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __bam_pg_free1_print, DB_bam_pg_free1)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __bam_split1_print, DB_bam_split1)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __bam_split_print, DB_bam_split)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __bam_rsplit1_print, DB_bam_rsplit1)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __bam_rsplit_print, DB_bam_rsplit)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __bam_adj_print, DB_bam_adj)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __bam_cadjust_print, DB_bam_cadjust)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __bam_cdel_print, DB_bam_cdel)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __bam_repl_print, DB_bam_repl)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __bam_root_print, DB_bam_root)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __bam_curadj_print, DB_bam_curadj)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __bam_rcuradj_print, DB_bam_rcuradj)) != 0)
+ return (ret);
+ return (0);
+}
+
+int
+__bam_init_recover(dbenv)
+ DB_ENV *dbenv;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv,
+ __bam_pg_alloc_recover, DB_bam_pg_alloc)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __deprecated_recover, DB_bam_pg_alloc1)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __bam_pg_free_recover, DB_bam_pg_free)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __deprecated_recover, DB_bam_pg_free1)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __deprecated_recover, DB_bam_split1)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __bam_split_recover, DB_bam_split)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __deprecated_recover, DB_bam_rsplit1)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __bam_rsplit_recover, DB_bam_rsplit)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __bam_adj_recover, DB_bam_adj)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __bam_cadjust_recover, DB_bam_cadjust)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __bam_cdel_recover, DB_bam_cdel)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __bam_repl_recover, DB_bam_repl)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __bam_root_recover, DB_bam_root)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __bam_curadj_recover, DB_bam_curadj)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __bam_rcuradj_recover, DB_bam_rcuradj)) != 0)
+ return (ret);
+ return (0);
+}
+
diff --git a/bdb/build_unix/.IGNORE_ME b/bdb/build_unix/.IGNORE_ME
new file mode 100644
index 00000000000..558fd496f0c
--- /dev/null
+++ b/bdb/build_unix/.IGNORE_ME
@@ -0,0 +1,3 @@
+Some combinations of the gzip and tar archive exploders found
+on Linux systems ignore directories that don't have any files
+(other than symbolic links) in them. So, here's a file.
diff --git a/bdb/build_vxworks/BerkeleyDB.wpj b/bdb/build_vxworks/BerkeleyDB.wpj
new file mode 100644
index 00000000000..fa8aa61c14e
--- /dev/null
+++ b/bdb/build_vxworks/BerkeleyDB.wpj
@@ -0,0 +1,6066 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUM_RPCdebug_BUILDRULE
+BerkeleyDB.out
+<END>
+
+<BEGIN> BUILD_PENTIUM_RPCdebug_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUM_RPCdebug_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/BerkeleyDB_sim.a
+<END>
+
+<BEGIN> BUILD_PENTIUM_RPCdebug_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUM_RPCdebug_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUM_RPCdebug_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM \
+ -O0 \
+ -I$(PRJ_DIR) \
+ -I/export/home/db/include \
+ -DDIAGNOSTIC \
+ -DDEBUG \
+ -DHAVE_RPC
+<END>
+
+<BEGIN> BUILD_PENTIUM_RPCdebug_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUM_RPCdebug_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUM_RPCdebug_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUM_RPCdebug_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUM_RPCdebug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM_RPCdebug_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUM_RPCdebug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM_RPCdebug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM_RPCdebug_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUM_RPCdebug_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUM_RPCdebug_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUM_RPCdebug_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUM_RPCdebug_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_PENTIUM_RPCnodebug_BUILDRULE
+BerkeleyDB.out
+<END>
+
+<BEGIN> BUILD_PENTIUM_RPCnodebug_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUM_RPCnodebug_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/BerkeleyDB_sim.a
+<END>
+
+<BEGIN> BUILD_PENTIUM_RPCnodebug_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUM_RPCnodebug_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUM_RPCnodebug_MACRO_CFLAGS
+-mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM \
+ -O2 \
+ -I$(PRJ_DIR) \
+ -I/export/home/db/include \
+ -DHAVE_RPC
+<END>
+
+<BEGIN> BUILD_PENTIUM_RPCnodebug_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUM_RPCnodebug_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUM_RPCnodebug_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUM_RPCnodebug_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUM_RPCnodebug_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUM_RPCnodebug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM_RPCnodebug_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUM_RPCnodebug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM_RPCnodebug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM_RPCnodebug_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUM_RPCnodebug_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUM_RPCnodebug_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUM_RPCnodebug_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUM_RPCnodebug_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_BUILDRULE
+BerkeleyDB.out
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/BerkeleyDB_sim.a
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM \
+ -O0 \
+ -I$(PRJ_DIR) \
+ -I/export/home/db/include \
+ -DDIAGNOSTIC \
+ -DDEBUG
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_PENTIUM_nodebug_BUILDRULE
+BerkeleyDB.out
+<END>
+
+<BEGIN> BUILD_PENTIUM_nodebug_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUM_nodebug_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/BerkeleyDB_sim.a
+<END>
+
+<BEGIN> BUILD_PENTIUM_nodebug_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUM_nodebug_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUM_nodebug_MACRO_CFLAGS
+-mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM \
+ -O2 \
+ -I$(PRJ_DIR) \
+ -I/export/home/db/include
+<END>
+
+<BEGIN> BUILD_PENTIUM_nodebug_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUM_nodebug_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUM_nodebug_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUM_nodebug_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUM_nodebug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM_nodebug_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUM_nodebug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM_nodebug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM_nodebug_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUM_nodebug_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUM_nodebug_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUM_nodebug_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUM_nodebug_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_BerkeleyDB.out
+
+<END>
+
+<BEGIN> BUILD_RULE_BerkeleyDB_sim.out
+
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_AR
+arsimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/SIMSPARCSOLARISgnu/BerkeleyDB.a
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_AS
+ccsimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CC
+ccsimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CFLAGS
+-g \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -fno-builtin \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=SIMSPARCSOLARIS \
+ -O2 \
+ -I$(PRJ_DIR) \
+ -I/export/home/db/include
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CFLAGS_AS
+-g \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -fno-builtin \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=SIMSPARCSOLARIS
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CPP
+ccsimso -E -P -xc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LD
+ccsimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LDFLAGS
+-N
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LD_PARTIAL_FLAGS
+-nostdlib -r
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_NM
+nmsimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_SIZE
+sizesimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_TC
+::tc_SIMSPARCSOLARISgnu
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUM_RPCdebug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM_nodebug \
+ PENTIUM_RPCdebug \
+ PENTIUM_RPCnodebug \
+ PENTIUM_debug \
+ SIMSPARCSOLARISgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_compare.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_compare.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_compare.c_objects
+bt_compare.o
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_compare.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_conv.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_conv.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_swap.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_conv.c_objects
+bt_conv.o
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_conv.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_curadj.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_curadj.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/txn.h \
+ /export/home/db/include/xa.h \
+ /export/home/db/include/txn_auto.h \
+ /export/home/db/include/txn_ext.h \
+ /export/home/db/include/xa_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_curadj.c_objects
+bt_curadj.o
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_curadj.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_cursor.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_cursor.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/lock.h \
+ /export/home/db/include/lock_ext.h \
+ /export/home/db/include/qam.h \
+ /export/home/db/include/qam_auto.h \
+ /export/home/db/include/qam_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_cursor.c_objects
+bt_cursor.o
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_cursor.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_delete.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_delete.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/lock.h \
+ /export/home/db/include/lock_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_delete.c_objects
+bt_delete.o
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_delete.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_method.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_method.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/qam.h \
+ /export/home/db/include/qam_auto.h \
+ /export/home/db/include/qam_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_method.c_objects
+bt_method.o
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_method.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_open.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_open.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_swap.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/lock.h \
+ /export/home/db/include/lock_ext.h \
+ /export/home/db/include/log.h \
+ /export/home/db/include/log_auto.h \
+ /export/home/db/include/log_ext.h \
+ /export/home/db/include/mp.h \
+ /export/home/db/include/mp_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_open.c_objects
+bt_open.o
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_open.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_put.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_put.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_put.c_objects
+bt_put.o
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_put.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_rec.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_rec.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/hash.h \
+ /export/home/db/include/hash_auto.h \
+ /export/home/db/include/hash_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h \
+ /export/home/db/include/log.h \
+ /export/home/db/include/log_auto.h \
+ /export/home/db/include/log_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_rec.c_objects
+bt_rec.o
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_rec.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_reclaim.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_reclaim.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/lock.h \
+ /export/home/db/include/lock_ext.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_reclaim.c_objects
+bt_reclaim.o
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_reclaim.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_recno.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_recno.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/lock.h \
+ /export/home/db/include/lock_ext.h \
+ /export/home/db/include/qam.h \
+ /export/home/db/include/qam_auto.h \
+ /export/home/db/include/qam_ext.h \
+ /export/home/db/include/txn.h \
+ /export/home/db/include/xa.h \
+ /export/home/db/include/txn_auto.h \
+ /export/home/db/include/txn_ext.h \
+ /export/home/db/include/xa_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_recno.c_objects
+bt_recno.o
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_recno.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_rsearch.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_rsearch.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/lock.h \
+ /export/home/db/include/lock_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_rsearch.c_objects
+bt_rsearch.o
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_rsearch.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_search.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_search.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/lock.h \
+ /export/home/db/include/lock_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_search.c_objects
+bt_search.o
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_search.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_split.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_split.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/lock.h \
+ /export/home/db/include/lock_ext.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_split.c_objects
+bt_split.o
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_split.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_stat.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_stat.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/lock.h \
+ /export/home/db/include/lock_ext.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_stat.c_objects
+bt_stat.o
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_stat.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_upgrade.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_upgrade.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_swap.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/db_upgrade.h
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_upgrade.c_objects
+bt_upgrade.o
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_upgrade.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_verify.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_verify.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_verify.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_verify.c_objects
+bt_verify.o
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/bt_verify.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/btree_auto.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/btree_auto.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h \
+ /export/home/db/include/txn.h \
+ /export/home/db/include/xa.h \
+ /export/home/db/include/txn_auto.h \
+ /export/home/db/include/txn_ext.h \
+ /export/home/db/include/xa_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/btree_auto.c_objects
+btree_auto.o
+<END>
+
+<BEGIN> FILE_/export/home/db/btree/btree_auto.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/clib/getopt.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/clib/getopt.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/clib/getopt.c_objects
+getopt.o
+<END>
+
+<BEGIN> FILE_/export/home/db/clib/getopt.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/clib/snprintf.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/clib/snprintf.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/clib/snprintf.c_objects
+snprintf.o
+<END>
+
+<BEGIN> FILE_/export/home/db/clib/snprintf.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/clib/strcasecmp.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/clib/strcasecmp.c_dependencies
+$(PRJ_DIR)/db_config.h
+<END>
+
+<BEGIN> FILE_/export/home/db/clib/strcasecmp.c_objects
+strcasecmp.o
+<END>
+
+<BEGIN> FILE_/export/home/db/clib/strcasecmp.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/clib/vsnprintf.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/clib/vsnprintf.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/clib/vsnprintf.c_objects
+vsnprintf.o
+<END>
+
+<BEGIN> FILE_/export/home/db/clib/vsnprintf.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/common/db_byteorder.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/common/db_byteorder.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/common/db_byteorder.c_objects
+db_byteorder.o
+<END>
+
+<BEGIN> FILE_/export/home/db/common/db_byteorder.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/common/db_err.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/common/db_err.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/lock.h \
+ /export/home/db/include/lock_ext.h \
+ /export/home/db/include/log.h \
+ /export/home/db/include/log_auto.h \
+ /export/home/db/include/log_ext.h \
+ /export/home/db/include/mp.h \
+ /export/home/db/include/mp_ext.h \
+ /export/home/db/include/txn.h \
+ /export/home/db/include/xa.h \
+ /export/home/db/include/txn_auto.h \
+ /export/home/db/include/txn_ext.h \
+ /export/home/db/include/xa_ext.h \
+ /export/home/db/include/clib_ext.h \
+ /export/home/db/include/db_auto.h
+<END>
+
+<BEGIN> FILE_/export/home/db/common/db_err.c_objects
+db_err.o
+<END>
+
+<BEGIN> FILE_/export/home/db/common/db_err.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/common/db_getlong.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/common/db_getlong.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/clib_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/common/db_getlong.c_objects
+db_getlong.o
+<END>
+
+<BEGIN> FILE_/export/home/db/common/db_getlong.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/common/db_log2.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/common/db_log2.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/common/db_log2.c_objects
+db_log2.o
+<END>
+
+<BEGIN> FILE_/export/home/db/common/db_log2.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/common/util_log.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/common/util_log.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/common/util_log.c_objects
+util_log.o
+<END>
+
+<BEGIN> FILE_/export/home/db/common/util_log.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/common/util_sig.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/common/util_sig.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/common/util_sig.c_objects
+util_sig.o
+<END>
+
+<BEGIN> FILE_/export/home/db/common/util_sig.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/db/crdel_auto.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/db/crdel_auto.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/txn.h \
+ /export/home/db/include/xa.h \
+ /export/home/db/include/txn_auto.h \
+ /export/home/db/include/txn_ext.h \
+ /export/home/db/include/xa_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/db/crdel_auto.c_objects
+crdel_auto.o
+<END>
+
+<BEGIN> FILE_/export/home/db/db/crdel_auto.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/db/crdel_rec.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/db/crdel_rec.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/log.h \
+ /export/home/db/include/log_auto.h \
+ /export/home/db/include/log_ext.h \
+ /export/home/db/include/hash.h \
+ /export/home/db/include/hash_auto.h \
+ /export/home/db/include/hash_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/mp.h \
+ /export/home/db/include/mp_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/db/crdel_rec.c_objects
+crdel_rec.o
+<END>
+
+<BEGIN> FILE_/export/home/db/db/crdel_rec.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/db_swap.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/hash.h \
+ /export/home/db/include/hash_auto.h \
+ /export/home/db/include/hash_ext.h \
+ /export/home/db/include/lock.h \
+ /export/home/db/include/lock_ext.h \
+ /export/home/db/include/log.h \
+ /export/home/db/include/log_auto.h \
+ /export/home/db/include/log_ext.h \
+ /export/home/db/include/mp.h \
+ /export/home/db/include/mp_ext.h \
+ /export/home/db/include/qam.h \
+ /export/home/db/include/qam_auto.h \
+ /export/home/db/include/qam_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db.c_objects
+db.o
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_am.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_am.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/hash.h \
+ /export/home/db/include/hash_auto.h \
+ /export/home/db/include/hash_ext.h \
+ /export/home/db/include/qam.h \
+ /export/home/db/include/qam_auto.h \
+ /export/home/db/include/qam_ext.h \
+ /export/home/db/include/lock.h \
+ /export/home/db/include/lock_ext.h \
+ /export/home/db/include/mp.h \
+ /export/home/db/include/mp_ext.h \
+ /export/home/db/include/txn.h \
+ /export/home/db/include/xa.h \
+ /export/home/db/include/txn_auto.h \
+ /export/home/db/include/txn_ext.h \
+ /export/home/db/include/xa_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_am.c_objects
+db_am.o
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_am.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_auto.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_auto.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/txn.h \
+ /export/home/db/include/xa.h \
+ /export/home/db/include/txn_auto.h \
+ /export/home/db/include/txn_ext.h \
+ /export/home/db/include/xa_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_auto.c_objects
+db_auto.o
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_auto.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_cam.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_cam.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/lock.h \
+ /export/home/db/include/lock_ext.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/hash.h \
+ /export/home/db/include/hash_auto.h \
+ /export/home/db/include/hash_ext.h \
+ /export/home/db/include/qam.h \
+ /export/home/db/include/qam_auto.h \
+ /export/home/db/include/qam_ext.h \
+ /export/home/db/include/txn.h \
+ /export/home/db/include/xa.h \
+ /export/home/db/include/txn_auto.h \
+ /export/home/db/include/txn_ext.h \
+ /export/home/db/include/xa_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_cam.c_objects
+db_cam.o
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_cam.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_conv.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_conv.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_swap.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h \
+ /export/home/db/include/hash.h \
+ /export/home/db/include/hash_auto.h \
+ /export/home/db/include/hash_ext.h \
+ /export/home/db/include/qam.h \
+ /export/home/db/include/qam_auto.h \
+ /export/home/db/include/qam_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_conv.c_objects
+db_conv.o
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_conv.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_dispatch.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_dispatch.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/log_auto.h \
+ /export/home/db/include/txn.h \
+ /export/home/db/include/xa.h \
+ /export/home/db/include/txn_auto.h \
+ /export/home/db/include/txn_ext.h \
+ /export/home/db/include/xa_ext.h \
+ /export/home/db/include/log.h \
+ /export/home/db/include/log_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_dispatch.c_objects
+db_dispatch.o
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_dispatch.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_dup.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_dup.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/hash.h \
+ /export/home/db/include/hash_auto.h \
+ /export/home/db/include/hash_ext.h \
+ /export/home/db/include/lock.h \
+ /export/home/db/include/lock_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_dup.c_objects
+db_dup.o
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_dup.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_iface.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_iface.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_iface.c_objects
+db_iface.o
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_iface.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_join.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_join.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_join.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_join.c_objects
+db_join.o
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_join.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_meta.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_meta.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/lock.h \
+ /export/home/db/include/lock_ext.h \
+ /export/home/db/include/txn.h \
+ /export/home/db/include/xa.h \
+ /export/home/db/include/txn_auto.h \
+ /export/home/db/include/txn_ext.h \
+ /export/home/db/include/xa_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_meta.c_objects
+db_meta.o
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_meta.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_method.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_method.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ /export/home/db/include/db_server.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h \
+ /export/home/db/include/hash.h \
+ /export/home/db/include/hash_auto.h \
+ /export/home/db/include/hash_ext.h \
+ /export/home/db/include/qam.h \
+ /export/home/db/include/qam_auto.h \
+ /export/home/db/include/qam_ext.h \
+ /export/home/db/include/xa.h \
+ /export/home/db/include/xa_ext.h \
+ /export/home/db/include/gen_client_ext.h \
+ /export/home/db/include/rpc_client_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_method.c_objects
+db_method.o
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_method.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_overflow.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_overflow.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/db_verify.h
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_overflow.c_objects
+db_overflow.o
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_overflow.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_pr.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_pr.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/hash.h \
+ /export/home/db/include/hash_auto.h \
+ /export/home/db/include/hash_ext.h \
+ /export/home/db/include/qam.h \
+ /export/home/db/include/qam_auto.h \
+ /export/home/db/include/qam_ext.h \
+ /export/home/db/include/db_verify.h
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_pr.c_objects
+db_pr.o
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_pr.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_rec.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_rec.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/log.h \
+ /export/home/db/include/log_auto.h \
+ /export/home/db/include/log_ext.h \
+ /export/home/db/include/hash.h \
+ /export/home/db/include/hash_auto.h \
+ /export/home/db/include/hash_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_rec.c_objects
+db_rec.o
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_rec.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_reclaim.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_reclaim.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_reclaim.c_objects
+db_reclaim.o
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_reclaim.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_ret.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_ret.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_ret.c_objects
+db_ret.o
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_ret.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_upg.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_upg.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_swap.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/hash.h \
+ /export/home/db/include/hash_auto.h \
+ /export/home/db/include/hash_ext.h \
+ /export/home/db/include/qam.h \
+ /export/home/db/include/qam_auto.h \
+ /export/home/db/include/qam_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_upg.c_objects
+db_upg.o
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_upg.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_upg_opd.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_upg_opd.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_swap.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/hash.h \
+ /export/home/db/include/hash_auto.h \
+ /export/home/db/include/hash_ext.h \
+ /export/home/db/include/qam.h \
+ /export/home/db/include/qam_auto.h \
+ /export/home/db/include/qam_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_upg_opd.c_objects
+db_upg_opd.o
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_upg_opd.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_vrfy.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_vrfy.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_swap.h \
+ /export/home/db/include/db_verify.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/hash.h \
+ /export/home/db/include/hash_auto.h \
+ /export/home/db/include/hash_ext.h \
+ /export/home/db/include/qam.h \
+ /export/home/db/include/qam_auto.h \
+ /export/home/db/include/qam_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_vrfy.c_objects
+db_vrfy.o
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_vrfy.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_vrfyutil.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_vrfyutil.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_verify.h \
+ /export/home/db/include/db_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_vrfyutil.c_objects
+db_vrfyutil.o
+<END>
+
+<BEGIN> FILE_/export/home/db/db/db_vrfyutil.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/env/db_salloc.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/env/db_salloc.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/env/db_salloc.c_objects
+db_salloc.o
+<END>
+
+<BEGIN> FILE_/export/home/db/env/db_salloc.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/env/db_shash.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/env/db_shash.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/env/db_shash.c_objects
+db_shash.o
+<END>
+
+<BEGIN> FILE_/export/home/db/env/db_shash.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/env/env_method.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/env/env_method.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ /export/home/db/include/db_server.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/lock.h \
+ /export/home/db/include/lock_ext.h \
+ /export/home/db/include/log.h \
+ /export/home/db/include/log_auto.h \
+ /export/home/db/include/log_ext.h \
+ /export/home/db/include/mp.h \
+ /export/home/db/include/mp_ext.h \
+ /export/home/db/include/txn.h \
+ /export/home/db/include/xa.h \
+ /export/home/db/include/txn_auto.h \
+ /export/home/db/include/txn_ext.h \
+ /export/home/db/include/xa_ext.h \
+ /export/home/db/include/gen_client_ext.h \
+ /export/home/db/include/rpc_client_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/env/env_method.c_objects
+env_method.o
+<END>
+
+<BEGIN> FILE_/export/home/db/env/env_method.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/env/env_open.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/env/env_open.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/hash.h \
+ /export/home/db/include/hash_auto.h \
+ /export/home/db/include/hash_ext.h \
+ /export/home/db/include/qam.h \
+ /export/home/db/include/qam_auto.h \
+ /export/home/db/include/qam_ext.h \
+ /export/home/db/include/lock.h \
+ /export/home/db/include/lock_ext.h \
+ /export/home/db/include/log.h \
+ /export/home/db/include/log_auto.h \
+ /export/home/db/include/log_ext.h \
+ /export/home/db/include/mp.h \
+ /export/home/db/include/mp_ext.h \
+ /export/home/db/include/txn.h \
+ /export/home/db/include/xa.h \
+ /export/home/db/include/txn_auto.h \
+ /export/home/db/include/txn_ext.h \
+ /export/home/db/include/xa_ext.h \
+ /export/home/db/include/clib_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/env/env_open.c_objects
+env_open.o
+<END>
+
+<BEGIN> FILE_/export/home/db/env/env_open.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/env/env_recover.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/env/env_recover.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/log.h \
+ /export/home/db/include/log_auto.h \
+ /export/home/db/include/log_ext.h \
+ /export/home/db/include/txn.h \
+ /export/home/db/include/xa.h \
+ /export/home/db/include/txn_auto.h \
+ /export/home/db/include/txn_ext.h \
+ /export/home/db/include/xa_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/env/env_recover.c_objects
+env_recover.o
+<END>
+
+<BEGIN> FILE_/export/home/db/env/env_recover.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/env/env_region.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/env/env_region.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/lock.h \
+ /export/home/db/include/lock_ext.h \
+ /export/home/db/include/log.h \
+ /export/home/db/include/log_auto.h \
+ /export/home/db/include/log_ext.h \
+ /export/home/db/include/mp.h \
+ /export/home/db/include/mp_ext.h \
+ /export/home/db/include/txn.h \
+ /export/home/db/include/xa.h \
+ /export/home/db/include/txn_auto.h \
+ /export/home/db/include/txn_ext.h \
+ /export/home/db/include/xa_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/env/env_region.c_objects
+env_region.o
+<END>
+
+<BEGIN> FILE_/export/home/db/env/env_region.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/db_swap.h \
+ /export/home/db/include/hash.h \
+ /export/home/db/include/hash_auto.h \
+ /export/home/db/include/hash_ext.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h \
+ /export/home/db/include/log.h \
+ /export/home/db/include/log_auto.h \
+ /export/home/db/include/log_ext.h \
+ /export/home/db/include/lock.h \
+ /export/home/db/include/lock_ext.h \
+ /export/home/db/include/txn.h \
+ /export/home/db/include/xa.h \
+ /export/home/db/include/txn_auto.h \
+ /export/home/db/include/txn_ext.h \
+ /export/home/db/include/xa_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash.c_objects
+hash.o
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_auto.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_auto.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/hash.h \
+ /export/home/db/include/hash_auto.h \
+ /export/home/db/include/hash_ext.h \
+ /export/home/db/include/txn.h \
+ /export/home/db/include/xa.h \
+ /export/home/db/include/txn_auto.h \
+ /export/home/db/include/txn_ext.h \
+ /export/home/db/include/xa_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_auto.c_objects
+hash_auto.o
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_auto.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_conv.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_conv.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_swap.h \
+ /export/home/db/include/hash.h \
+ /export/home/db/include/hash_auto.h \
+ /export/home/db/include/hash_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_conv.c_objects
+hash_conv.o
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_conv.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_dup.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_dup.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/hash.h \
+ /export/home/db/include/hash_auto.h \
+ /export/home/db/include/hash_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h \
+ /export/home/db/include/txn.h \
+ /export/home/db/include/xa.h \
+ /export/home/db/include/txn_auto.h \
+ /export/home/db/include/txn_ext.h \
+ /export/home/db/include/xa_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_dup.c_objects
+hash_dup.o
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_dup.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_func.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_func.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/hash.h \
+ /export/home/db/include/hash_auto.h \
+ /export/home/db/include/hash_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_func.c_objects
+hash_func.o
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_func.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_meta.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_meta.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/hash.h \
+ /export/home/db/include/hash_auto.h \
+ /export/home/db/include/hash_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/lock.h \
+ /export/home/db/include/lock_ext.h \
+ /export/home/db/include/txn.h \
+ /export/home/db/include/xa.h \
+ /export/home/db/include/txn_auto.h \
+ /export/home/db/include/txn_ext.h \
+ /export/home/db/include/xa_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_meta.c_objects
+hash_meta.o
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_meta.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_method.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_method.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/hash.h \
+ /export/home/db/include/hash_auto.h \
+ /export/home/db/include/hash_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_method.c_objects
+hash_method.o
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_method.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_page.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_page.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/hash.h \
+ /export/home/db/include/hash_auto.h \
+ /export/home/db/include/hash_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/lock.h \
+ /export/home/db/include/lock_ext.h \
+ /export/home/db/include/txn.h \
+ /export/home/db/include/xa.h \
+ /export/home/db/include/txn_auto.h \
+ /export/home/db/include/txn_ext.h \
+ /export/home/db/include/xa_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_page.c_objects
+hash_page.o
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_page.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_rec.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_rec.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/hash.h \
+ /export/home/db/include/hash_auto.h \
+ /export/home/db/include/hash_ext.h \
+ /export/home/db/include/lock.h \
+ /export/home/db/include/lock_ext.h \
+ /export/home/db/include/log.h \
+ /export/home/db/include/log_auto.h \
+ /export/home/db/include/log_ext.h \
+ /export/home/db/include/mp.h \
+ /export/home/db/include/mp_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_rec.c_objects
+hash_rec.o
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_rec.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_reclaim.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_reclaim.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/hash.h \
+ /export/home/db/include/hash_auto.h \
+ /export/home/db/include/hash_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/lock.h \
+ /export/home/db/include/lock_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_reclaim.c_objects
+hash_reclaim.o
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_reclaim.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_stat.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_stat.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/hash.h \
+ /export/home/db/include/hash_auto.h \
+ /export/home/db/include/hash_ext.h \
+ /export/home/db/include/lock.h \
+ /export/home/db/include/lock_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_stat.c_objects
+hash_stat.o
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_stat.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_upgrade.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_upgrade.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_swap.h \
+ /export/home/db/include/hash.h \
+ /export/home/db/include/hash_auto.h \
+ /export/home/db/include/hash_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/db_upgrade.h
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_upgrade.c_objects
+hash_upgrade.o
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_upgrade.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_verify.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_verify.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_verify.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/hash.h \
+ /export/home/db/include/hash_auto.h \
+ /export/home/db/include/hash_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_verify.c_objects
+hash_verify.o
+<END>
+
+<BEGIN> FILE_/export/home/db/hash/hash_verify.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/hsearch/hsearch.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/hsearch/hsearch.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/hsearch/hsearch.c_objects
+hsearch.o
+<END>
+
+<BEGIN> FILE_/export/home/db/hsearch/hsearch.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/include/tcl_db.h_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/lock/lock.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/lock/lock.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ /export/home/db/include/db_server.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/lock.h \
+ /export/home/db/include/lock_ext.h \
+ /export/home/db/include/log.h \
+ /export/home/db/include/log_auto.h \
+ /export/home/db/include/log_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/txn.h \
+ /export/home/db/include/xa.h \
+ /export/home/db/include/txn_auto.h \
+ /export/home/db/include/txn_ext.h \
+ /export/home/db/include/xa_ext.h \
+ /export/home/db/include/gen_client_ext.h \
+ /export/home/db/include/rpc_client_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/lock/lock.c_objects
+lock.o
+<END>
+
+<BEGIN> FILE_/export/home/db/lock/lock.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/lock/lock_conflict.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/lock/lock_conflict.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/lock/lock_conflict.c_objects
+lock_conflict.o
+<END>
+
+<BEGIN> FILE_/export/home/db/lock/lock_conflict.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/lock/lock_deadlock.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/lock/lock_deadlock.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ /export/home/db/include/db_server.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/lock.h \
+ /export/home/db/include/lock_ext.h \
+ /export/home/db/include/txn.h \
+ /export/home/db/include/xa.h \
+ /export/home/db/include/txn_auto.h \
+ /export/home/db/include/txn_ext.h \
+ /export/home/db/include/xa_ext.h \
+ /export/home/db/include/gen_client_ext.h \
+ /export/home/db/include/rpc_client_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/lock/lock_deadlock.c_objects
+lock_deadlock.o
+<END>
+
+<BEGIN> FILE_/export/home/db/lock/lock_deadlock.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/lock/lock_method.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/lock/lock_method.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/lock.h \
+ /export/home/db/include/lock_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/lock/lock_method.c_objects
+lock_method.o
+<END>
+
+<BEGIN> FILE_/export/home/db/lock/lock_method.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/lock/lock_region.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/lock/lock_region.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ /export/home/db/include/db_server.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/lock.h \
+ /export/home/db/include/lock_ext.h \
+ /export/home/db/include/gen_client_ext.h \
+ /export/home/db/include/rpc_client_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/lock/lock_region.c_objects
+lock_region.o
+<END>
+
+<BEGIN> FILE_/export/home/db/lock/lock_region.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/lock/lock_stat.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/lock/lock_stat.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ /export/home/db/include/db_server.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/lock.h \
+ /export/home/db/include/lock_ext.h \
+ /export/home/db/include/gen_client_ext.h \
+ /export/home/db/include/rpc_client_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/lock/lock_stat.c_objects
+lock_stat.o
+<END>
+
+<BEGIN> FILE_/export/home/db/lock/lock_stat.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/lock/lock_util.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/lock/lock_util.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/hash.h \
+ /export/home/db/include/hash_auto.h \
+ /export/home/db/include/hash_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/lock.h \
+ /export/home/db/include/lock_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/lock/lock_util.c_objects
+lock_util.o
+<END>
+
+<BEGIN> FILE_/export/home/db/lock/lock_util.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ /export/home/db/include/db_server.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/log.h \
+ /export/home/db/include/log_auto.h \
+ /export/home/db/include/log_ext.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/txn.h \
+ /export/home/db/include/xa.h \
+ /export/home/db/include/txn_auto.h \
+ /export/home/db/include/txn_ext.h \
+ /export/home/db/include/xa_ext.h \
+ /export/home/db/include/gen_client_ext.h \
+ /export/home/db/include/rpc_client_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log.c_objects
+log.o
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log_archive.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log_archive.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ /export/home/db/include/db_server.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/log.h \
+ /export/home/db/include/log_auto.h \
+ /export/home/db/include/log_ext.h \
+ /export/home/db/include/clib_ext.h \
+ /export/home/db/include/gen_client_ext.h \
+ /export/home/db/include/rpc_client_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log_archive.c_objects
+log_archive.o
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log_archive.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log_auto.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log_auto.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/log.h \
+ /export/home/db/include/log_auto.h \
+ /export/home/db/include/log_ext.h \
+ /export/home/db/include/txn.h \
+ /export/home/db/include/xa.h \
+ /export/home/db/include/txn_auto.h \
+ /export/home/db/include/txn_ext.h \
+ /export/home/db/include/xa_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log_auto.c_objects
+log_auto.o
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log_auto.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log_compare.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log_compare.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log_compare.c_objects
+log_compare.o
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log_compare.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log_findckp.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log_findckp.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/log.h \
+ /export/home/db/include/log_auto.h \
+ /export/home/db/include/log_ext.h \
+ /export/home/db/include/txn.h \
+ /export/home/db/include/xa.h \
+ /export/home/db/include/txn_auto.h \
+ /export/home/db/include/txn_ext.h \
+ /export/home/db/include/xa_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log_findckp.c_objects
+log_findckp.o
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log_findckp.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log_get.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log_get.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ /export/home/db/include/db_server.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/log.h \
+ /export/home/db/include/log_auto.h \
+ /export/home/db/include/log_ext.h \
+ /export/home/db/include/hash.h \
+ /export/home/db/include/hash_auto.h \
+ /export/home/db/include/hash_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/gen_client_ext.h \
+ /export/home/db/include/rpc_client_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log_get.c_objects
+log_get.o
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log_get.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log_method.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log_method.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ /export/home/db/include/db_server.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/log.h \
+ /export/home/db/include/log_auto.h \
+ /export/home/db/include/log_ext.h \
+ /export/home/db/include/gen_client_ext.h \
+ /export/home/db/include/rpc_client_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log_method.c_objects
+log_method.o
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log_method.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log_put.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log_put.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ /export/home/db/include/db_server.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/log.h \
+ /export/home/db/include/log_auto.h \
+ /export/home/db/include/log_ext.h \
+ /export/home/db/include/hash.h \
+ /export/home/db/include/hash_auto.h \
+ /export/home/db/include/hash_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/clib_ext.h \
+ /export/home/db/include/gen_client_ext.h \
+ /export/home/db/include/rpc_client_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log_put.c_objects
+log_put.o
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log_put.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log_rec.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log_rec.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/log.h \
+ /export/home/db/include/log_auto.h \
+ /export/home/db/include/log_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log_rec.c_objects
+log_rec.o
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log_rec.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log_register.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log_register.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ /export/home/db/include/db_server.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/log.h \
+ /export/home/db/include/log_auto.h \
+ /export/home/db/include/log_ext.h \
+ /export/home/db/include/gen_client_ext.h \
+ /export/home/db/include/rpc_client_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log_register.c_objects
+log_register.o
+<END>
+
+<BEGIN> FILE_/export/home/db/log/log_register.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_alloc.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_alloc.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/mp.h \
+ /export/home/db/include/mp_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_alloc.c_objects
+mp_alloc.o
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_alloc.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_bh.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_bh.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/mp.h \
+ /export/home/db/include/mp_ext.h \
+ /export/home/db/include/log.h \
+ /export/home/db/include/log_auto.h \
+ /export/home/db/include/log_ext.h \
+ /export/home/db/include/db_page.h
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_bh.c_objects
+mp_bh.o
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_bh.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_fget.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_fget.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ /export/home/db/include/db_server.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/mp.h \
+ /export/home/db/include/mp_ext.h \
+ /export/home/db/include/gen_client_ext.h \
+ /export/home/db/include/rpc_client_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_fget.c_objects
+mp_fget.o
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_fget.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_fopen.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_fopen.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ /export/home/db/include/db_server.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/mp.h \
+ /export/home/db/include/mp_ext.h \
+ /export/home/db/include/gen_client_ext.h \
+ /export/home/db/include/rpc_client_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_fopen.c_objects
+mp_fopen.o
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_fopen.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_fput.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_fput.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ /export/home/db/include/db_server.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/mp.h \
+ /export/home/db/include/mp_ext.h \
+ /export/home/db/include/gen_client_ext.h \
+ /export/home/db/include/rpc_client_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_fput.c_objects
+mp_fput.o
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_fput.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_fset.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_fset.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ /export/home/db/include/db_server.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/mp.h \
+ /export/home/db/include/mp_ext.h \
+ /export/home/db/include/gen_client_ext.h \
+ /export/home/db/include/rpc_client_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_fset.c_objects
+mp_fset.o
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_fset.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_method.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_method.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ /export/home/db/include/db_server.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/mp.h \
+ /export/home/db/include/mp_ext.h \
+ /export/home/db/include/gen_client_ext.h \
+ /export/home/db/include/rpc_client_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_method.c_objects
+mp_method.o
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_method.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_region.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_region.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/mp.h \
+ /export/home/db/include/mp_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_region.c_objects
+mp_region.o
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_region.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_register.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_register.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ /export/home/db/include/db_server.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/mp.h \
+ /export/home/db/include/mp_ext.h \
+ /export/home/db/include/gen_client_ext.h \
+ /export/home/db/include/rpc_client_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_register.c_objects
+mp_register.o
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_register.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_stat.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_stat.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ /export/home/db/include/db_server.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/mp.h \
+ /export/home/db/include/mp_ext.h \
+ /export/home/db/include/gen_client_ext.h \
+ /export/home/db/include/rpc_client_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_stat.c_objects
+mp_stat.o
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_stat.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_sync.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_sync.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ /export/home/db/include/db_server.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/mp.h \
+ /export/home/db/include/mp_ext.h \
+ /export/home/db/include/gen_client_ext.h \
+ /export/home/db/include/rpc_client_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_sync.c_objects
+mp_sync.o
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_sync.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_trickle.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_trickle.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ /export/home/db/include/db_server.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/mp.h \
+ /export/home/db/include/mp_ext.h \
+ /export/home/db/include/gen_client_ext.h \
+ /export/home/db/include/rpc_client_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_trickle.c_objects
+mp_trickle.o
+<END>
+
+<BEGIN> FILE_/export/home/db/mp/mp_trickle.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/mutex/mut_tas.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/mutex/mut_tas.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/mutex/mut_tas.c_objects
+mut_tas.o
+<END>
+
+<BEGIN> FILE_/export/home/db/mutex/mut_tas.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/mutex/mutex.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/mutex/mutex.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/mutex/mutex.c_objects
+mutex.o
+<END>
+
+<BEGIN> FILE_/export/home/db/mutex/mutex.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_alloc.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_alloc.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/os_jump.h
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_alloc.c_objects
+os_alloc.o
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_alloc.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_dir.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_dir.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/os_jump.h
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_dir.c_objects
+os_dir.o
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_dir.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_errno.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_errno.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_errno.c_objects
+os_errno.o
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_errno.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_fid.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_fid.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_fid.c_objects
+os_fid.o
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_fid.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_fsync.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_fsync.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/os_jump.h
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_fsync.c_objects
+os_fsync.o
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_fsync.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_handle.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_handle.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/os_jump.h
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_handle.c_objects
+os_handle.o
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_handle.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_method.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_method.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/os_jump.h
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_method.c_objects
+os_method.o
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_method.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_oflags.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_oflags.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_oflags.c_objects
+os_oflags.o
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_oflags.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_open.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_open.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_open.c_objects
+os_open.o
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_open.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_region.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_region.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/os_jump.h
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_region.c_objects
+os_region.o
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_region.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_rename.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_rename.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/os_jump.h
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_rename.c_objects
+os_rename.o
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_rename.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_root.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_root.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_root.c_objects
+os_root.o
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_root.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_rpath.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_rpath.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_rpath.c_objects
+os_rpath.o
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_rpath.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_rw.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_rw.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/os_jump.h
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_rw.c_objects
+os_rw.o
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_rw.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_seek.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_seek.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/os_jump.h
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_seek.c_objects
+os_seek.o
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_seek.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_sleep.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_sleep.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/os_jump.h
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_sleep.c_objects
+os_sleep.o
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_sleep.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_spin.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_spin.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/os_jump.h
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_spin.c_objects
+os_spin.o
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_spin.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_stat.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_stat.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/os_jump.h
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_stat.c_objects
+os_stat.o
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_stat.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_tmpdir.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_tmpdir.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_tmpdir.c_objects
+os_tmpdir.o
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_tmpdir.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_unlink.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_unlink.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/os_jump.h
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_unlink.c_objects
+os_unlink.o
+<END>
+
+<BEGIN> FILE_/export/home/db/os/os_unlink.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/os_vxworks/os_abs.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/os_vxworks/os_abs.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/os_vxworks/os_abs.c_objects
+os_abs.o
+<END>
+
+<BEGIN> FILE_/export/home/db/os_vxworks/os_abs.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/os_vxworks/os_finit.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/os_vxworks/os_finit.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/os_vxworks/os_finit.c_objects
+os_finit.o
+<END>
+
+<BEGIN> FILE_/export/home/db/os_vxworks/os_finit.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/os_vxworks/os_map.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/os_vxworks/os_map.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/os_vxworks/os_map.c_objects
+os_map.o
+<END>
+
+<BEGIN> FILE_/export/home/db/os_vxworks/os_map.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/mp.h \
+ /export/home/db/include/mp_ext.h \
+ /export/home/db/include/lock.h \
+ /export/home/db/include/lock_ext.h \
+ /export/home/db/include/log.h \
+ /export/home/db/include/log_auto.h \
+ /export/home/db/include/log_ext.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h \
+ /export/home/db/include/qam.h \
+ /export/home/db/include/qam_auto.h \
+ /export/home/db/include/qam_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam.c_objects
+qam.o
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam_auto.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam_auto.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/qam.h \
+ /export/home/db/include/qam_auto.h \
+ /export/home/db/include/qam_ext.h \
+ /export/home/db/include/txn.h \
+ /export/home/db/include/xa.h \
+ /export/home/db/include/txn_auto.h \
+ /export/home/db/include/txn_ext.h \
+ /export/home/db/include/xa_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam_auto.c_objects
+qam_auto.o
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam_auto.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam_conv.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam_conv.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/qam.h \
+ /export/home/db/include/qam_auto.h \
+ /export/home/db/include/qam_ext.h \
+ /export/home/db/include/db_swap.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam_conv.c_objects
+qam_conv.o
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam_conv.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam_files.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam_files.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/lock.h \
+ /export/home/db/include/lock_ext.h \
+ /export/home/db/include/btree.h \
+ /export/home/db/include/btree_auto.h \
+ /export/home/db/include/btree_ext.h \
+ /export/home/db/include/qam.h \
+ /export/home/db/include/qam_auto.h \
+ /export/home/db/include/qam_ext.h \
+ /export/home/db/include/mp.h \
+ /export/home/db/include/mp_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam_files.c_objects
+qam_files.o
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam_files.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam_method.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam_method.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/qam.h \
+ /export/home/db/include/qam_auto.h \
+ /export/home/db/include/qam_ext.h \
+ /export/home/db/include/mp.h \
+ /export/home/db/include/mp_ext.h \
+ /export/home/db/include/lock.h \
+ /export/home/db/include/lock_ext.h \
+ /export/home/db/include/log.h \
+ /export/home/db/include/log_auto.h \
+ /export/home/db/include/log_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam_method.c_objects
+qam_method.o
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam_method.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam_open.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam_open.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/db_swap.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/lock.h \
+ /export/home/db/include/lock_ext.h \
+ /export/home/db/include/qam.h \
+ /export/home/db/include/qam_auto.h \
+ /export/home/db/include/qam_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam_open.c_objects
+qam_open.o
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam_open.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam_rec.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam_rec.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/lock.h \
+ /export/home/db/include/lock_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/qam.h \
+ /export/home/db/include/qam_auto.h \
+ /export/home/db/include/qam_ext.h \
+ /export/home/db/include/log.h \
+ /export/home/db/include/log_auto.h \
+ /export/home/db/include/log_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam_rec.c_objects
+qam_rec.o
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam_rec.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam_stat.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam_stat.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/lock.h \
+ /export/home/db/include/lock_ext.h \
+ /export/home/db/include/qam.h \
+ /export/home/db/include/qam_auto.h \
+ /export/home/db/include/qam_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam_stat.c_objects
+qam_stat.o
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam_stat.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam_upgrade.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam_upgrade.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_swap.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/db_upgrade.h
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam_upgrade.c_objects
+qam_upgrade.o
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam_upgrade.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam_verify.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam_verify.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_verify.h \
+ /export/home/db/include/qam.h \
+ /export/home/db/include/qam_auto.h \
+ /export/home/db/include/qam_ext.h \
+ /export/home/db/include/db_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam_verify.c_objects
+qam_verify.o
+<END>
+
+<BEGIN> FILE_/export/home/db/qam/qam_verify.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/rpc_client/client.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/rpc_client/client.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ /export/home/db/include/db_server.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/txn.h \
+ /export/home/db/include/xa.h \
+ /export/home/db/include/txn_auto.h \
+ /export/home/db/include/txn_ext.h \
+ /export/home/db/include/xa_ext.h \
+ /export/home/db/include/gen_client_ext.h \
+ /export/home/db/include/rpc_client_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/rpc_client/client.c_objects
+client.o
+<END>
+
+<BEGIN> FILE_/export/home/db/rpc_client/client.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/rpc_client/db_server_clnt.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/rpc_client/db_server_clnt.c_dependencies
+$(PRJ_DIR)/db_config.h /export/home/db/include/db_server.h
+<END>
+
+<BEGIN> FILE_/export/home/db/rpc_client/db_server_clnt.c_objects
+db_server_clnt.o
+<END>
+
+<BEGIN> FILE_/export/home/db/rpc_client/db_server_clnt.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/rpc_client/gen_client.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/rpc_client/gen_client.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ /export/home/db/include/db_server.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/mp.h \
+ /export/home/db/include/mp_ext.h \
+ /export/home/db/include/rpc_client_ext.h \
+ /export/home/db/include/txn.h \
+ /export/home/db/include/xa.h \
+ /export/home/db/include/txn_auto.h \
+ /export/home/db/include/txn_ext.h \
+ /export/home/db/include/xa_ext.h \
+ /export/home/db/include/gen_client_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/rpc_client/gen_client.c_objects
+gen_client.o
+<END>
+
+<BEGIN> FILE_/export/home/db/rpc_client/gen_client.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/rpc_client/gen_client_ret.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/rpc_client/gen_client_ret.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ /export/home/db/include/db_server.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/txn.h \
+ /export/home/db/include/xa.h \
+ /export/home/db/include/txn_auto.h \
+ /export/home/db/include/txn_ext.h \
+ /export/home/db/include/xa_ext.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/rpc_client_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/rpc_client/gen_client_ret.c_objects
+gen_client_ret.o
+<END>
+
+<BEGIN> FILE_/export/home/db/rpc_client/gen_client_ret.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/rpc_server/db_server_xdr.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/rpc_server/db_server_xdr.c_dependencies
+$(PRJ_DIR)/db_config.h /export/home/db/include/db_server.h
+<END>
+
+<BEGIN> FILE_/export/home/db/rpc_server/db_server_xdr.c_objects
+db_server_xdr.o
+<END>
+
+<BEGIN> FILE_/export/home/db/rpc_server/db_server_xdr.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/txn/txn.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/txn/txn.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ /export/home/db/include/db_server.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_shash.h \
+ /export/home/db/include/txn.h \
+ /export/home/db/include/xa.h \
+ /export/home/db/include/txn_auto.h \
+ /export/home/db/include/txn_ext.h \
+ /export/home/db/include/xa_ext.h \
+ /export/home/db/include/lock.h \
+ /export/home/db/include/lock_ext.h \
+ /export/home/db/include/log.h \
+ /export/home/db/include/log_auto.h \
+ /export/home/db/include/log_ext.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/gen_client_ext.h \
+ /export/home/db/include/rpc_client_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/txn/txn.c_objects
+txn.o
+<END>
+
+<BEGIN> FILE_/export/home/db/txn/txn.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/txn/txn_auto.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/txn/txn_auto.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/txn.h \
+ /export/home/db/include/xa.h \
+ /export/home/db/include/txn_auto.h \
+ /export/home/db/include/txn_ext.h \
+ /export/home/db/include/xa_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/txn/txn_auto.c_objects
+txn_auto.o
+<END>
+
+<BEGIN> FILE_/export/home/db/txn/txn_auto.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/txn/txn_rec.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/txn/txn_rec.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/txn.h \
+ /export/home/db/include/xa.h \
+ /export/home/db/include/txn_auto.h \
+ /export/home/db/include/txn_ext.h \
+ /export/home/db/include/xa_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/txn/txn_rec.c_objects
+txn_rec.o
+<END>
+
+<BEGIN> FILE_/export/home/db/txn/txn_rec.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/txn/txn_region.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/txn/txn_region.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ /export/home/db/include/db_server.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/txn.h \
+ /export/home/db/include/xa.h \
+ /export/home/db/include/txn_auto.h \
+ /export/home/db/include/txn_ext.h \
+ /export/home/db/include/xa_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h \
+ /export/home/db/include/gen_client_ext.h \
+ /export/home/db/include/rpc_client_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/txn/txn_region.c_objects
+txn_region.o
+<END>
+
+<BEGIN> FILE_/export/home/db/txn/txn_region.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/xa/xa.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/xa/xa.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/db_page.h \
+ /export/home/db/include/log.h \
+ /export/home/db/include/log_auto.h \
+ /export/home/db/include/log_ext.h \
+ /export/home/db/include/txn.h \
+ /export/home/db/include/xa.h \
+ /export/home/db/include/txn_auto.h \
+ /export/home/db/include/txn_ext.h \
+ /export/home/db/include/xa_ext.h \
+ /export/home/db/include/db_am.h \
+ /export/home/db/include/db_dispatch.h \
+ /export/home/db/include/db_auto.h \
+ /export/home/db/include/crdel_auto.h \
+ /export/home/db/include/db_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/xa/xa.c_objects
+xa.o
+<END>
+
+<BEGIN> FILE_/export/home/db/xa/xa.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/xa/xa_db.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/xa/xa_db.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/xa.h \
+ /export/home/db/include/xa_ext.h \
+ /export/home/db/include/txn.h \
+ /export/home/db/include/txn_auto.h \
+ /export/home/db/include/txn_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/xa/xa_db.c_objects
+xa_db.o
+<END>
+
+<BEGIN> FILE_/export/home/db/xa/xa_db.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_/export/home/db/xa/xa_map.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/xa/xa_map.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h \
+ /export/home/db/include/queue.h \
+ /export/home/db/include/shqueue.h \
+ /export/home/db/include/debug.h \
+ /export/home/db/include/mutex.h \
+ /export/home/db/include/region.h \
+ /export/home/db/include/mutex_ext.h \
+ /export/home/db/include/env_ext.h \
+ /export/home/db/include/os.h \
+ /export/home/db/include/os_ext.h \
+ /export/home/db/include/common_ext.h \
+ /export/home/db/include/txn.h \
+ /export/home/db/include/xa.h \
+ /export/home/db/include/txn_auto.h \
+ /export/home/db/include/txn_ext.h \
+ /export/home/db/include/xa_ext.h
+<END>
+
+<BEGIN> FILE_/export/home/db/xa/xa_map.c_objects
+xa_map.o
+<END>
+
+<BEGIN> FILE_/export/home/db/xa/xa_map.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+/export/home/db/hsearch/hsearch.c \
+ /export/home/db/mp/mp_trickle.c \
+ /export/home/db/mp/mp_bh.c \
+ /export/home/db/mp/mp_fget.c \
+ /export/home/db/mp/mp_fopen.c \
+ /export/home/db/mp/mp_fput.c \
+ /export/home/db/mp/mp_fset.c \
+ /export/home/db/mp/mp_method.c \
+ /export/home/db/mp/mp_region.c \
+ /export/home/db/mp/mp_register.c \
+ /export/home/db/mp/mp_stat.c \
+ /export/home/db/mp/mp_sync.c \
+ /export/home/db/mp/mp_alloc.c \
+ /export/home/db/db/crdel_rec.c \
+ /export/home/db/db/db.c \
+ /export/home/db/db/db_am.c \
+ /export/home/db/db/db_auto.c \
+ /export/home/db/db/db_cam.c \
+ /export/home/db/db/db_conv.c \
+ /export/home/db/db/db_dispatch.c \
+ /export/home/db/db/db_dup.c \
+ /export/home/db/db/db_iface.c \
+ /export/home/db/db/db_join.c \
+ /export/home/db/db/db_meta.c \
+ /export/home/db/db/db_method.c \
+ /export/home/db/db/db_overflow.c \
+ /export/home/db/db/db_pr.c \
+ /export/home/db/db/db_rec.c \
+ /export/home/db/db/db_reclaim.c \
+ /export/home/db/db/db_ret.c \
+ /export/home/db/db/crdel_auto.c \
+ /export/home/db/clib/getopt.c \
+ /export/home/db/clib/snprintf.c \
+ /export/home/db/clib/strcasecmp.c \
+ /export/home/db/os/os_unlink.c \
+ /export/home/db/os/os_alloc.c \
+ /export/home/db/os/os_dir.c \
+ /export/home/db/os/os_errno.c \
+ /export/home/db/os/os_fid.c \
+ /export/home/db/os/os_fsync.c \
+ /export/home/db/os/os_handle.c \
+ /export/home/db/os/os_method.c \
+ /export/home/db/os/os_oflags.c \
+ /export/home/db/os/os_open.c \
+ /export/home/db/os/os_region.c \
+ /export/home/db/os/os_rename.c \
+ /export/home/db/os/os_root.c \
+ /export/home/db/os/os_rpath.c \
+ /export/home/db/os/os_rw.c \
+ /export/home/db/os/os_seek.c \
+ /export/home/db/os/os_spin.c \
+ /export/home/db/os/os_stat.c \
+ /export/home/db/os/os_tmpdir.c \
+ /export/home/db/qam/qam_upgrade.c \
+ /export/home/db/qam/qam_auto.c \
+ /export/home/db/qam/qam_conv.c \
+ /export/home/db/qam/qam_method.c \
+ /export/home/db/qam/qam_open.c \
+ /export/home/db/qam/qam_rec.c \
+ /export/home/db/qam/qam_stat.c \
+ /export/home/db/qam/qam.c \
+ /export/home/db/hash/hash_upgrade.c \
+ /export/home/db/hash/hash_auto.c \
+ /export/home/db/hash/hash_conv.c \
+ /export/home/db/hash/hash_dup.c \
+ /export/home/db/hash/hash_func.c \
+ /export/home/db/hash/hash_meta.c \
+ /export/home/db/hash/hash_method.c \
+ /export/home/db/hash/hash_page.c \
+ /export/home/db/hash/hash_rec.c \
+ /export/home/db/hash/hash_reclaim.c \
+ /export/home/db/hash/hash_stat.c \
+ /export/home/db/hash/hash.c \
+ /export/home/db/xa/xa_map.c \
+ /export/home/db/xa/xa_db.c \
+ /export/home/db/xa/xa.c \
+ /export/home/db/btree/btree_auto.c \
+ /export/home/db/btree/bt_conv.c \
+ /export/home/db/btree/bt_curadj.c \
+ /export/home/db/btree/bt_cursor.c \
+ /export/home/db/btree/bt_delete.c \
+ /export/home/db/btree/bt_method.c \
+ /export/home/db/btree/bt_open.c \
+ /export/home/db/btree/bt_put.c \
+ /export/home/db/btree/bt_rec.c \
+ /export/home/db/btree/bt_reclaim.c \
+ /export/home/db/btree/bt_recno.c \
+ /export/home/db/btree/bt_rsearch.c \
+ /export/home/db/btree/bt_search.c \
+ /export/home/db/btree/bt_split.c \
+ /export/home/db/btree/bt_stat.c \
+ /export/home/db/btree/bt_upgrade.c \
+ /export/home/db/btree/bt_compare.c \
+ /export/home/db/common/db_log2.c \
+ /export/home/db/common/db_err.c \
+ /export/home/db/common/db_getlong.c \
+ /export/home/db/common/db_byteorder.c \
+ /export/home/db/env/env_region.c \
+ /export/home/db/env/db_shash.c \
+ /export/home/db/env/env_method.c \
+ /export/home/db/env/env_open.c \
+ /export/home/db/env/env_recover.c \
+ /export/home/db/env/db_salloc.c \
+ /export/home/db/lock/lock_util.c \
+ /export/home/db/lock/lock_conflict.c \
+ /export/home/db/lock/lock_deadlock.c \
+ /export/home/db/lock/lock_region.c \
+ /export/home/db/lock/lock.c \
+ /export/home/db/txn/txn_region.c \
+ /export/home/db/txn/txn_auto.c \
+ /export/home/db/txn/txn_rec.c \
+ /export/home/db/txn/txn.c \
+ /export/home/db/log/log_register.c \
+ /export/home/db/log/log_archive.c \
+ /export/home/db/log/log_auto.c \
+ /export/home/db/log/log_compare.c \
+ /export/home/db/log/log_findckp.c \
+ /export/home/db/log/log_get.c \
+ /export/home/db/log/log_method.c \
+ /export/home/db/log/log_put.c \
+ /export/home/db/log/log_rec.c \
+ /export/home/db/log/log.c \
+ /export/home/db/mutex/mut_tas.c \
+ /export/home/db/mutex/mutex.c \
+ /export/home/db/clib/vsnprintf.c \
+ /export/home/db/common/util_log.c \
+ /export/home/db/common/util_sig.c \
+ /export/home/db/os/os_sleep.c \
+ /export/home/db/btree/bt_verify.c \
+ /export/home/db/hash/hash_verify.c \
+ /export/home/db/qam/qam_verify.c \
+ /export/home/db/db/db_upg_opd.c \
+ /export/home/db/rpc_client/gen_client_ret.c \
+ /export/home/db/rpc_client/db_server_clnt.c \
+ /export/home/db/rpc_client/gen_client.c \
+ /export/home/db/rpc_client/client.c \
+ /export/home/db/include/tcl_db.h \
+ /export/home/db/rpc_server/db_server_xdr.c \
+ /export/home/db/os_vxworks/os_map.c \
+ /export/home/db/db/db_vrfy.c \
+ /export/home/db/db/db_upg.c \
+ /export/home/db/db/db_vrfyutil.c \
+ /export/home/db/os_vxworks/os_finit.c \
+ /export/home/db/os_vxworks/os_abs.c \
+ /export/home/db/lock/lock_method.c \
+ /export/home/db/lock/lock_stat.c \
+ /export/home/db/qam/qam_files.c
+<END>
+
+<BEGIN> userComments
+BerkeleyDB
+<END>
+
diff --git a/bdb/build_vxworks/BerkeleyDB.wsp b/bdb/build_vxworks/BerkeleyDB.wsp
new file mode 100644
index 00000000000..cffcf00dec9
--- /dev/null
+++ b/bdb/build_vxworks/BerkeleyDB.wsp
@@ -0,0 +1,24 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+Workspace
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> projectList
+$(PRJ_DIR)/BerkeleyDB.wpj \
+ $(PRJ_DIR)/ex_access/ex_access.wpj \
+ $(PRJ_DIR)/ex_btrec/ex_btrec.wpj \
+ $(PRJ_DIR)/ex_env/ex_env.wpj \
+ $(PRJ_DIR)/ex_mpool/ex_mpool.wpj \
+ $(PRJ_DIR)/ex_tpcb/ex_tpcb.wpj \
+ $(PRJ_DIR)/ex_dbclient/ex_dbclient.wpj
+<END>
+
+<BEGIN> userComments
+
+<END>
+
diff --git a/bdb/build_vxworks/db_config.h b/bdb/build_vxworks/db_config.h
new file mode 100644
index 00000000000..cccfc4a7cf6
--- /dev/null
+++ b/bdb/build_vxworks/db_config.h
@@ -0,0 +1,264 @@
+/*
+ * $Id: db_config.h,v 1.4 2000/12/12 18:39:26 bostic Exp $
+ */
+
+/* Define if building VxWorks */
+#define HAVE_VXWORKS 1
+
+/* Define to empty if the keyword does not work. */
+/* #undef const */
+
+/* Define if your struct stat has st_blksize. */
+#define HAVE_ST_BLKSIZE 1
+
+/* Define to `int' if <sys/types.h> doesn't define. */
+/* #undef mode_t */
+
+/* Define to `long' if <sys/types.h> doesn't define. */
+/* #undef off_t */
+
+/* Define to `int' if <sys/types.h> doesn't define. */
+/* #undef pid_t */
+
+/* Define to `unsigned' if <sys/types.h> doesn't define. */
+/* #undef size_t */
+
+/* Define if the `S_IS*' macros in <sys/stat.h> do not work properly. */
+/* #undef STAT_MACROS_BROKEN */
+
+/* Define if you have the ANSI C header files. */
+#define STDC_HEADERS 1
+
+/* Define if you can safely include both <sys/time.h> and <time.h>. */
+/* #undef TIME_WITH_SYS_TIME */
+
+/* !!!
+ * WORDS_BIGENDIAN is the ONLY option in this file that may be edited
+ * for VxWorks.
+ *
+ * The user must set this according to VxWork's target arch. We use an
+ * x86 (little-endian) target.
+ */
+/* Define if your processor stores words with the most significant
+ byte first (like Motorola and SPARC, unlike Intel and VAX). */
+/* #undef WORDS_BIGENDIAN */
+
+/* !!!
+ * The CONFIG_TEST option may be added using the Tornado project build.
+ * DO NOT modify it here.
+ */
+/* Define if you are building a version for running the test suite. */
+/* #undef CONFIG_TEST */
+
+/* !!!
+ * The DEBUG option may be added using the Tornado project build.
+ * DO NOT modify it here.
+ */
+/* Define if you want a debugging version. */
+/* #undef DEBUG */
+
+/* Define if you want a version that logs read operations. */
+/* #undef DEBUG_ROP */
+
+/* Define if you want a version that logs write operations. */
+/* #undef DEBUG_WOP */
+
+/* !!!
+ * The DIAGNOSTIC option may be added using the Tornado project build.
+ * DO NOT modify it here.
+ */
+/* Define if you want a version with run-time diagnostic checking. */
+/* #undef DIAGNOSTIC */
+
+/* Define if you want to mask harmless unitialized memory read/writes. */
+/* #undef UMRW */
+
+/* Define if fcntl/F_SETFD denies child access to file descriptors. */
+/* #undef HAVE_FCNTL_F_SETFD */
+
+/* Define if building big-file environment (e.g., AIX, HP/UX, Solaris). */
+/* #undef HAVE_FILE_OFFSET_BITS */
+
+/* Mutex possibilities. */
+/* #undef HAVE_MUTEX_68K_GCC_ASSEMBLY */
+/* #undef HAVE_MUTEX_AIX_CHECK_LOCK */
+/* #undef HAVE_MUTEX_ALPHA_GCC_ASSEMBLY */
+/* #undef HAVE_MUTEX_HPPA_GCC_ASSEMBLY */
+/* #undef HAVE_MUTEX_HPPA_MSEM_INIT */
+/* #undef HAVE_MUTEX_IA64_GCC_ASSEMBLY */
+/* #undef HAVE_MUTEX_MACOS */
+/* #undef HAVE_MUTEX_MSEM_INIT */
+/* #undef HAVE_MUTEX_PPC_GCC_ASSEMBLY */
+/* #undef HAVE_MUTEX_PTHREADS */
+/* #undef HAVE_MUTEX_RELIANTUNIX_INITSPIN */
+/* #undef HAVE_MUTEX_SCO_X86_CC_ASSEMBLY */
+/* #undef HAVE_MUTEX_SEMA_INIT */
+/* #undef HAVE_MUTEX_SGI_INIT_LOCK */
+/* #undef HAVE_MUTEX_SOLARIS_LOCK_TRY */
+/* #undef HAVE_MUTEX_SOLARIS_LWP */
+/* #undef HAVE_MUTEX_SPARC_GCC_ASSEMBLY */
+#define HAVE_MUTEX_THREADS 1
+/* #undef HAVE_MUTEX_UI_THREADS */
+/* #undef HAVE_MUTEX_UTS_CC_ASSEMBLY */
+/* #undef HAVE_MUTEX_VMS */
+#define HAVE_MUTEX_VXWORKS 1
+/* #undef HAVE_MUTEX_WIN16 */
+/* #undef HAVE_MUTEX_WIN32 */
+/* #undef HAVE_MUTEX_X86_GCC_ASSEMBLY */
+
+/* Define if building on QNX. */
+/* #undef HAVE_QNX */
+
+/* !!!
+ * The HAVE_RPC option may be added using the Tornado project build.
+ * DO NOT modify it here.
+ */
+/* Define if building RPC client/server. */
+/* #undef HAVE_RPC */
+
+/* Define if your sprintf returns a pointer, not a length. */
+/* #undef SPRINTF_RET_CHARPNT */
+
+/* Define if you have the getcwd function. */
+#define HAVE_GETCWD 1
+
+/* Define if you have the getopt function. */
+/* #undef HAVE_GETOPT */
+
+/* Define if you have the getuid function. */
+/* #undef HAVE_GETUID */
+
+/* Define if you have the memcmp function. */
+#define HAVE_MEMCMP 1
+
+/* Define if you have the memcpy function. */
+#define HAVE_MEMCPY 1
+
+/* Define if you have the memmove function. */
+#define HAVE_MEMMOVE 1
+
+/* Define if you have the mlock function. */
+#define HAVE_MLOCK 1
+
+/* Define if you have the mmap function. */
+/* #undef HAVE_MMAP */
+
+/* Define if you have the munlock function. */
+#define HAVE_MUNLOCK 1
+
+/* Define if you have the munmap function. */
+/* #undef HAVE_MUNMAP */
+
+/* Define if you have the pread function. */
+/* #undef HAVE_PREAD */
+
+/* Define if you have the pstat_getdynamic function. */
+/* #undef HAVE_PSTAT_GETDYNAMIC */
+
+/* Define if you have the pwrite function. */
+/* #undef HAVE_PWRITE */
+
+/* Define if you have the qsort function. */
+#define HAVE_QSORT 1
+
+/* Define if you have the raise function. */
+#define HAVE_RAISE 1
+
+/* Define if you have the sched_yield function. */
+#define HAVE_SCHED_YIELD 1
+
+/* Define if you have the select function. */
+#define HAVE_SELECT 1
+
+/* Define if you have the shmget function. */
+/* #undef HAVE_SHMGET */
+
+/* Define if you have the snprintf function. */
+/* #undef HAVE_SNPRINTF */
+
+/* Define if you have the strcasecmp function. */
+/* #undef HAVE_STRCASECMP */
+
+/* Define if you have the strerror function. */
+#define HAVE_STRERROR 1
+
+/* Define if you have the strtoul function. */
+#define HAVE_STRTOUL 1
+
+/* Define if you have the sysconf function. */
+/* #undef HAVE_SYSCONF */
+
+/* Define if you have the vsnprintf function. */
+/* #undef HAVE_VSNPRINTF */
+
+/* Define if you have the yield function. */
+/* #undef HAVE_YIELD */
+
+/* Define if you have the <dirent.h> header file. */
+#define HAVE_DIRENT_H 1
+
+/* Define if you have the <ndir.h> header file. */
+/* #undef HAVE_NDIR_H */
+
+/* Define if you have the <sys/dir.h> header file. */
+/* #undef HAVE_SYS_DIR_H */
+
+/* Define if you have the <sys/ndir.h> header file. */
+/* #undef HAVE_SYS_NDIR_H */
+
+/* Define if you have the <sys/select.h> header file. */
+/* #undef HAVE_SYS_SELECT_H */
+
+/* Define if you have the <sys/time.h> header file. */
+/* #undef HAVE_SYS_TIME_H */
+
+/* Define if you have the nsl library (-lnsl). */
+/* #undef HAVE_LIBNSL */
+
+/*
+ * !!!
+ * The following is not part of the automatic configuration setup, but
+ * provides necessary VxWorks information.
+ */
+#include "vxWorks.h"
+
+/*
+ * VxWorks does not have getpid().
+ */
+#define getpid() taskIdSelf()
+
+/*
+ * Don't step on the namespace. Other libraries may have their own
+ * implementations of these functions, we don't want to use their
+ * implementations or force them to use ours based on the load order.
+ */
+#ifndef HAVE_GETCWD
+#define getcwd __db_Cgetcwd
+#endif
+#ifndef HAVE_GETOPT
+#define getopt __db_Cgetopt
+#endif
+#ifndef HAVE_MEMCMP
+#define memcmp __db_Cmemcmp
+#endif
+#ifndef HAVE_MEMCPY
+#define memcpy __db_Cmemcpy
+#endif
+#ifndef HAVE_MEMMOVE
+#define memmove __db_Cmemmove
+#endif
+#ifndef HAVE_RAISE
+#define raise __db_Craise
+#endif
+#ifndef HAVE_SNPRINTF
+#define snprintf __db_Csnprintf
+#endif
+#ifndef HAVE_STRCASECMP
+#define strcasecmp __db_Cstrcasecmp
+#endif
+#ifndef HAVE_STRERROR
+#define strerror __db_Cstrerror
+#endif
+#ifndef HAVE_VSNPRINTF
+#define vsnprintf __db_Cvsnprintf
+#endif
diff --git a/bdb/build_vxworks/db_int.h b/bdb/build_vxworks/db_int.h
new file mode 100644
index 00000000000..99dc2932e92
--- /dev/null
+++ b/bdb/build_vxworks/db_int.h
@@ -0,0 +1,398 @@
+/* DO NOT EDIT: automatically built by dist/s_vxworks. */
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: db_int.src,v 11.42 2001/01/11 17:49:17 krinsky Exp $
+ */
+
+#ifndef _DB_INTERNAL_H_
+#define _DB_INTERNAL_H_
+
+/*******************************************************
+ * General includes.
+ *******************************************************/
+#include "db.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#if defined(__STDC__) || defined(__cplusplus)
+#include <stdarg.h>
+#else
+#include <varargs.h>
+#endif
+#include <errno.h>
+#endif
+
+#include "queue.h"
+#include "shqueue.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*******************************************************
+ * General purpose constants and macros.
+ *******************************************************/
+#define UINT16_T_MAX 0xffff /* Maximum 16 bit unsigned. */
+#define UINT32_T_MAX 0xffffffff /* Maximum 32 bit unsigned. */
+
+#define MEGABYTE 1048576
+#define GIGABYTE 1073741824
+
+#define MS_PER_SEC 1000 /* Milliseconds in a second. */
+#define USEC_PER_MS 1000 /* Microseconds in a millisecond. */
+
+#define DB_MIN_PGSIZE 0x000200 /* Minimum page size (512). */
+#define DB_MAX_PGSIZE 0x010000 /* Maximum page size (65536). */
+
+#define RECNO_OOB 0 /* Illegal record number. */
+
+/*
+ * If we are unable to determine the underlying filesystem block size, use
+ * 8K on the grounds that most OS's use less than 8K for a VM page size.
+ */
+#define DB_DEF_IOSIZE (8 * 1024)
+
+/*
+ * Aligning items to particular sizes or in pages or memory.
+ *
+ * db_align_t --
+ * Largest integral type, used to align structures in memory. We don't store
+ * floating point types in structures, so integral types should be sufficient
+ * (and we don't have to worry about systems that store floats in other than
+ * power-of-2 numbers of bytes). Additionally this fixes compiler that rewrite
+ * structure assignments and ANSI C memcpy calls to be in-line instructions
+ * that happen to require alignment. Note: this alignment isn't sufficient for
+ * mutexes, which depend on things like cache line alignment. Mutex alignment
+ * is handled separately, in mutex.h.
+ *
+ * db_alignp_t --
+ * Integral type that's the same size as a pointer. There are places where
+ * DB modifies pointers by discarding the bottom bits to guarantee alignment.
+ * We can't use db_align_t, it may be larger than the pointer, and compilers
+ * get upset about that. So far we haven't run on any machine where there
+ * isn't an integral type the same size as a pointer -- here's hoping.
+ */
+typedef unsigned long db_align_t;
+typedef unsigned long db_alignp_t;
+
+/* Align an integer to a specific boundary. */
+#undef ALIGN
+#define ALIGN(value, bound) \
+ (((value) + (bound) - 1) & ~(((u_int)bound) - 1))
+
+/* Align a pointer to a specific boundary. */
+#undef ALIGNP
+#define ALIGNP(value, bound) ALIGN((db_alignp_t)value, bound)
+
+/*
+ * There are several on-page structures that are declared to have a number of
+ * fields followed by a variable length array of items. The structure size
+ * without including the variable length array or the address of the first of
+ * those elements can be found using SSZ.
+ *
+ * This macro can also be used to find the offset of a structure element in a
+ * structure. This is used in various places to copy structure elements from
+ * unaligned memory references, e.g., pointers into a packed page.
+ *
+ * There are two versions because compilers object if you take the address of
+ * an array.
+ */
+#undef SSZ
+#define SSZ(name, field) ((int)&(((name *)0)->field))
+
+#undef SSZA
+#define SSZA(name, field) ((int)&(((name *)0)->field[0]))
+
+/*
+ * Print an address as a u_long (a u_long is the largest type we can print
+ * portably). Most 64-bit systems have made longs 64-bits, so this should
+ * work.
+ */
+#define P_TO_ULONG(p) ((u_long)(db_alignp_t)(p))
+
+/* Structure used to print flag values. */
+typedef struct __fn {
+ u_int32_t mask; /* Flag value. */
+ const char *name; /* Flag name. */
+} FN;
+
+/* Set, clear and test flags. */
+#define FLD_CLR(fld, f) (fld) &= ~(f)
+#define FLD_ISSET(fld, f) ((fld) & (f))
+#define FLD_SET(fld, f) (fld) |= (f)
+#define F_CLR(p, f) (p)->flags &= ~(f)
+#define F_ISSET(p, f) ((p)->flags & (f))
+#define F_SET(p, f) (p)->flags |= (f)
+#define LF_CLR(f) (flags &= ~(f))
+#define LF_ISSET(f) (flags & (f))
+#define LF_SET(f) (flags |= (f))
+
+/* Display separator string. */
+#undef DB_LINE
+#define DB_LINE "=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-="
+
+/* Unused, or not-used-yet variable. "Shut that bloody compiler up!" */
+#define COMPQUIET(n, v) (n) = (v)
+
+/*******************************************************
+ * Files.
+ *******************************************************/
+ /*
+ * We use 1024 as the maximum path length. It's too hard to figure out what
+ * the real path length is, as it was traditionally stored in <sys/param.h>,
+ * and that file isn't always available.
+ */
+#undef MAXPATHLEN
+#define MAXPATHLEN 1024
+
+#define PATH_DOT "." /* Current working directory. */
+#define PATH_SEPARATOR "/\\" /* Path separator character. */
+
+/*
+ * Flags understood by __os_open.
+ */
+#define DB_OSO_CREATE 0x001 /* POSIX: O_CREAT */
+#define DB_OSO_EXCL 0x002 /* POSIX: O_EXCL */
+#define DB_OSO_LOG 0x004 /* Opening a log file. */
+#define DB_OSO_RDONLY 0x008 /* POSIX: O_RDONLY */
+#define DB_OSO_REGION 0x010 /* Opening a region file. */
+#define DB_OSO_SEQ 0x020 /* Expected sequential access. */
+#define DB_OSO_TEMP 0x040 /* Remove after last close. */
+#define DB_OSO_TRUNC 0x080 /* POSIX: O_TRUNC */
+
+/*
+ * Seek options understood by __os_seek.
+ */
+typedef enum {
+ DB_OS_SEEK_CUR, /* POSIX: SEEK_CUR */
+ DB_OS_SEEK_END, /* POSIX: SEEK_END */
+ DB_OS_SEEK_SET /* POSIX: SEEK_SET */
+} DB_OS_SEEK;
+
+/*******************************************************
+ * Environment.
+ *******************************************************/
+/* Type passed to __db_appname(). */
+typedef enum {
+ DB_APP_NONE=0, /* No type (region). */
+ DB_APP_DATA, /* Data file. */
+ DB_APP_LOG, /* Log file. */
+ DB_APP_TMP /* Temporary file. */
+} APPNAME;
+
+/*
+ * CDB_LOCKING CDB product locking.
+ * LOCKING_ON Locking has been configured.
+ * LOGGING_ON Logging has been configured.
+ * MPOOL_ON Memory pool has been configured.
+ * TXN_ON Transactions have been configured.
+ */
+#define CDB_LOCKING(dbenv) F_ISSET(dbenv, DB_ENV_CDB)
+#define LOCKING_ON(dbenv) ((dbenv)->lk_handle != NULL)
+#define LOGGING_ON(dbenv) ((dbenv)->lg_handle != NULL)
+#define MPOOL_ON(dbenv) ((dbenv)->mp_handle != NULL)
+#define TXN_ON(dbenv) ((dbenv)->tx_handle != NULL)
+
+/*
+ * STD_LOCKING Standard locking, that is, locking was configured and CDB
+ * was not. We do not do locking in off-page duplicate trees,
+ * so we check for that in the cursor first.
+ */
+#define STD_LOCKING(dbc) \
+ (!F_ISSET(dbc, DBC_OPD) && \
+ !CDB_LOCKING((dbc)->dbp->dbenv) && LOCKING_ON((dbc)->dbp->dbenv))
+
+/*
+ * IS_RECOVERING The system is running recovery.
+ */
+#define IS_RECOVERING(dbenv) \
+ (LOGGING_ON(dbenv) && \
+ F_ISSET((DB_LOG *)(dbenv)->lg_handle, DBLOG_RECOVER))
+
+/* Most initialization methods cannot be called after open is called. */
+#define ENV_ILLEGAL_AFTER_OPEN(dbenv, name) \
+ if (F_ISSET((dbenv), DB_ENV_OPEN_CALLED)) \
+ return (__db_mi_open(dbenv, name, 1));
+
+/* We're not actually user hostile, honest. */
+#define ENV_REQUIRES_CONFIG(dbenv, handle, subsystem) \
+ if (handle == NULL) \
+ return (__db_env_config(dbenv, subsystem));
+
+/*******************************************************
+ * Database Access Methods.
+ *******************************************************/
+/*
+ * DB_IS_THREADED --
+ * The database handle is free-threaded (was opened with DB_THREAD).
+ */
+#define DB_IS_THREADED(dbp) \
+ ((dbp)->mutexp != NULL)
+
+/* Initialization methods are often illegal before/after open is called. */
+#define DB_ILLEGAL_AFTER_OPEN(dbp, name) \
+ if (F_ISSET((dbp), DB_OPEN_CALLED)) \
+ return (__db_mi_open(dbp->dbenv, name, 1));
+#define DB_ILLEGAL_BEFORE_OPEN(dbp, name) \
+ if (!F_ISSET((dbp), DB_OPEN_CALLED)) \
+ return (__db_mi_open(dbp->dbenv, name, 0));
+/* Some initialization methods are illegal if environment isn't local. */
+#define DB_ILLEGAL_IN_ENV(dbp, name) \
+ if (!F_ISSET(dbp->dbenv, DB_ENV_DBLOCAL)) \
+ return (__db_mi_env(dbp->dbenv, name));
+#define DB_ILLEGAL_METHOD(dbp, flags) { \
+ int __ret; \
+ if ((__ret = __dbh_am_chk(dbp, flags)) != 0) \
+ return (__ret); \
+}
+
+/*
+ * Common DBC->internal fields. Each access method adds additional fields
+ * to this list, but the initial fields are common.
+ */
+#define __DBC_INTERNAL \
+ DBC *opd; /* Off-page duplicate cursor. */\
+ \
+ void *page; /* Referenced page. */ \
+ db_pgno_t root; /* Tree root. */ \
+ db_pgno_t pgno; /* Referenced page number. */ \
+ db_indx_t indx; /* Referenced key item index. */\
+ \
+ DB_LOCK lock; /* Cursor lock. */ \
+ db_lockmode_t lock_mode; /* Lock mode. */
+
+struct __dbc_internal {
+ __DBC_INTERNAL
+};
+
+/*
+ * Access-method-common macro for determining whether a cursor
+ * has been initialized.
+ */
+#define IS_INITIALIZED(dbc) ((dbc)->internal->pgno != PGNO_INVALID)
+
+/*******************************************************
+ * Mpool.
+ *******************************************************/
+/*
+ * File types for DB access methods. Negative numbers are reserved to DB.
+ */
+#define DB_FTYPE_SET -1 /* Call pgin/pgout functions. */
+#define DB_FTYPE_NOTSET 0 /* Don't call... */
+
+/* Structure used as the DB pgin/pgout pgcookie. */
+typedef struct __dbpginfo {
+ size_t db_pagesize; /* Underlying page size. */
+ int needswap; /* If swapping required. */
+} DB_PGINFO;
+
+/*******************************************************
+ * Log.
+ *******************************************************/
+/* Initialize an LSN to 'zero'. */
+#define ZERO_LSN(LSN) do { \
+ (LSN).file = 0; \
+ (LSN).offset = 0; \
+} while (0)
+
+/* Return 1 if LSN is a 'zero' lsn, otherwise return 0. */
+#define IS_ZERO_LSN(LSN) ((LSN).file == 0)
+
+/* Test if we need to log a change. */
+#define DB_LOGGING(dbc) \
+ (LOGGING_ON((dbc)->dbp->dbenv) && !F_ISSET(dbc, DBC_RECOVER))
+
+/* Internal flag for use with internal __log_unregister. */
+#define DB_LOGONLY 0x01
+/*******************************************************
+ * Txn.
+ *******************************************************/
+#define DB_NONBLOCK(C) ((C)->txn != NULL && F_ISSET((C)->txn, TXN_NOWAIT))
+#define IS_SUBTRANSACTION(txn) \
+ ((txn) != NULL && (txn)->parent != NULL)
+
+/*******************************************************
+ * Global variables.
+ *******************************************************/
+#ifdef HAVE_VXWORKS
+#include "semLib.h"
+#endif
+
+/*
+ * DB global variables. Done in a single structure to minimize the name-space
+ * pollution.
+ */
+typedef struct __db_globals {
+ u_int32_t db_pageyield; /* db_set_pageyield */
+ u_int32_t db_panic; /* db_set_panic */
+ u_int32_t db_region_init; /* db_set_region_init */
+ u_int32_t db_tas_spins; /* db_set_tas_spins */
+#ifdef HAVE_VXWORKS
+ u_int32_t db_global_init; /* VxWorks: inited */
+ SEM_ID db_global_lock; /* VxWorks: global semaphore */
+#endif
+ /* XA: list of opened environments. */
+ TAILQ_HEAD(__db_envq, __db_env) db_envq;
+} DB_GLOBALS;
+
+#ifdef DB_INITIALIZE_DB_GLOBALS
+DB_GLOBALS __db_global_values = {
+ 0, /* db_set_pageyield */
+ 1, /* db_set_panic */
+ 0, /* db_set_region_init */
+ 0, /* db_set_tas_spins */
+#ifdef HAVE_VXWORKS
+ 0, /* db_global_init */
+ NULL, /* db_global_lock */
+#endif
+ /* XA environment queue */
+ {NULL, &__db_global_values.db_envq.tqh_first}
+};
+#else
+extern DB_GLOBALS __db_global_values;
+#endif
+#define DB_GLOBAL(v) __db_global_values.v
+
+/* Forward structure declarations. */
+struct __db_reginfo_t; typedef struct __db_reginfo_t REGINFO;
+struct __mutex_t; typedef struct __mutex_t MUTEX;
+struct __vrfy_childinfo; typedef struct __vrfy_childinfo VRFY_CHILDINFO;
+struct __vrfy_dbinfo; typedef struct __vrfy_dbinfo VRFY_DBINFO;
+struct __vrfy_pageinfo; typedef struct __vrfy_pageinfo VRFY_PAGEINFO;
+struct __db_txnlist; typedef struct __db_txnlist DB_TXNLIST;
+struct __db_txnhead; typedef struct __db_txnhead DB_TXNHEAD;
+typedef enum {
+ TXNLIST_DELETE,
+ TXNLIST_LSN,
+ TXNLIST_TXNID,
+ TXNLIST_PGNO
+} db_txnlist_type;
+
+/*
+ * Currently, region offsets are limited to 32-bits. I expect that's going
+ * to have to be fixed in the not-too-distant future, since we won't want to
+ * split 100Gb memory pools into that many different regions. It's typedef'd
+ * so it won't be too painful to upgrade.
+ */
+typedef u_int32_t roff_t;
+
+#if defined(__cplusplus)
+}
+#endif
+
+/*******************************************************
+ * More general includes.
+ *******************************************************/
+#include "debug.h"
+#include "mutex.h"
+#include "region.h"
+#include "mutex_ext.h"
+#include "env_ext.h"
+#include "os.h"
+#include "os_ext.h"
+#include "common_ext.h"
+
+#endif /* !_DB_INTERNAL_H_ */
diff --git a/bdb/build_vxworks/ex_access/ex_access.wpj b/bdb/build_vxworks/ex_access/ex_access.wpj
new file mode 100644
index 00000000000..bbbad47a253
--- /dev/null
+++ b/bdb/build_vxworks/ex_access/ex_access.wpj
@@ -0,0 +1,244 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/ex_access.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM \
+ -I/export/home/db/build_vxworks
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_ex_access.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_BUILDRULE
+ex_access.out
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_AR
+arsimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/SIMSPARCSOLARISgnu/ex_access.a
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_AS
+ccsimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CC
+ccsimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CFLAGS
+-g \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -fno-builtin \
+ -I/export/home/db/build_vxworks \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=SIMSPARCSOLARIS
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CFLAGS_AS
+-g \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -fno-builtin \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=SIMSPARCSOLARIS
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CPP
+ccsimso -E -P -xc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LD
+ccsimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LDFLAGS
+-N
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LD_PARTIAL_FLAGS
+-nostdlib -r
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_NM
+nmsimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_SIZE
+sizesimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_TC
+::tc_SIMSPARCSOLARISgnu
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+SIMSPARCSOLARISgnu PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_/export/home/db/examples_c/ex_access.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/examples_c/ex_access.c_dependencies
+/export/home/db/build_vxworks/db_config.h /export/home/db/build_vxworks/db.h
+<END>
+
+<BEGIN> FILE_/export/home/db/examples_c/ex_access.c_objects
+ex_access.o
+<END>
+
+<BEGIN> FILE_/export/home/db/examples_c/ex_access.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+/export/home/db/examples_c/ex_access.c
+<END>
+
+<BEGIN> userComments
+ex_access
+<END>
+
diff --git a/bdb/build_vxworks/ex_btrec/ex_btrec.wpj b/bdb/build_vxworks/ex_btrec/ex_btrec.wpj
new file mode 100644
index 00000000000..801ca6808e2
--- /dev/null
+++ b/bdb/build_vxworks/ex_btrec/ex_btrec.wpj
@@ -0,0 +1,250 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/ex_btrec.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM \
+ -I/export/home/db/build_vxworks
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_ex_btrec.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_BUILDRULE
+ex_btrec.out
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_AR
+arsimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/SIMSPARCSOLARISgnu/ex_btrec.a
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_AS
+ccsimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CC
+ccsimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CFLAGS
+-g \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -fno-builtin \
+ -I/export/home/db/build_vxworks \
+ -I/export/home/db/build_vxworks \
+ -I/export/home/db/include \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=SIMSPARCSOLARIS
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CFLAGS_AS
+-g \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -fno-builtin \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=SIMSPARCSOLARIS
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CPP
+ccsimso -E -P -xc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LD
+ccsimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LDFLAGS
+-N
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LD_PARTIAL_FLAGS
+-nostdlib -r
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_NM
+nmsimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_SIZE
+sizesimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_TC
+::tc_SIMSPARCSOLARISgnu
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+SIMSPARCSOLARISgnu PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_/export/home/db/examples_c/ex_btrec.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/examples_c/ex_btrec.c_dependencies
+/export/home/db/build_vxworks/db_config.h /export/home/db/build_vxworks/db.h
+<END>
+
+<BEGIN> FILE_/export/home/db/examples_c/ex_btrec.c_objects
+ex_btrec.o
+<END>
+
+<BEGIN> FILE_/export/home/db/examples_c/ex_btrec.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+/export/home/db/examples_c/ex_btrec.c
+<END>
+
+<BEGIN> userComments
+ex_btrec
+<END>
+
diff --git a/bdb/build_vxworks/ex_dbclient/ex_dbclient.wpj b/bdb/build_vxworks/ex_dbclient/ex_dbclient.wpj
new file mode 100644
index 00000000000..fdb721406ad
--- /dev/null
+++ b/bdb/build_vxworks/ex_dbclient/ex_dbclient.wpj
@@ -0,0 +1,266 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
+ex_dbclient.out
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/ex_dbclient.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I/export/home/db/build_vxworks \
+ -I/export/home/db/include \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_ex_dbclient.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_BUILDRULE
+ex_dbclient.out
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_AR
+arsimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/SIMSPARCSOLARISgnu/ex_dbclient.a
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_AS
+ccsimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CC
+ccsimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CFLAGS
+-g \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -fno-builtin \
+ -I/export/home/db/build_vxworks \
+ -I/export/home/db/include \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=SIMSPARCSOLARIS
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CFLAGS_AS
+-g \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -fno-builtin \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=SIMSPARCSOLARIS
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CPP
+ccsimso -E -P -xc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LD
+ccsimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LDFLAGS
+-N
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LD_PARTIAL_FLAGS
+-nostdlib -r
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_NM
+nmsimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_SIZE
+sizesimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_TC
+::tc_SIMSPARCSOLARISgnu
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+SIMSPARCSOLARISgnu PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_/export/home/db/examples_c/ex_dbclient.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/examples_c/ex_dbclient.c_dependencies
+/export/home/db/build_vxworks/db_config.h /export/home/db/build_vxworks/db.h
+<END>
+
+<BEGIN> FILE_/export/home/db/examples_c/ex_dbclient.c_objects
+ex_dbclient.o
+<END>
+
+<BEGIN> FILE_/export/home/db/examples_c/ex_dbclient.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+/export/home/db/examples_c/ex_dbclient.c
+<END>
+
+<BEGIN> userComments
+RPC Client example
+<END>
+
diff --git a/bdb/build_vxworks/ex_env/ex_env.wpj b/bdb/build_vxworks/ex_env/ex_env.wpj
new file mode 100644
index 00000000000..7229ffa1309
--- /dev/null
+++ b/bdb/build_vxworks/ex_env/ex_env.wpj
@@ -0,0 +1,248 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/ex_env.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM \
+ -I/export/home/db/build_vxworks
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_ex_env.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_BUILDRULE
+ex_env.out
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_AR
+arsimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/SIMSPARCSOLARISgnu/ex_env.a
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_AS
+ccsimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CC
+ccsimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CFLAGS
+-g \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -fno-builtin \
+ -I/export/home/db/build_vxworks \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=SIMSPARCSOLARIS
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CFLAGS_AS
+-g \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -fno-builtin \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=SIMSPARCSOLARIS
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CPP
+ccsimso -E -P -xc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LD
+ccsimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LDFLAGS
+-N
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LD_PARTIAL_FLAGS
+-nostdlib -r
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_NM
+nmsimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_SIZE
+sizesimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_TC
+::tc_SIMSPARCSOLARISgnu
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+SIMSPARCSOLARISgnu PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_/export/home/db/examples_c/ex_env.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/examples_c/ex_env.c_dependencies
+/export/home/db/build_vxworks/db_config.h /export/home/db/build_vxworks/db.h
+<END>
+
+<BEGIN> FILE_/export/home/db/examples_c/ex_env.c_objects
+ex_env.o
+<END>
+
+<BEGIN> FILE_/export/home/db/examples_c/ex_env.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+/export/home/db/examples_c/ex_env.c
+<END>
+
+<BEGIN> userComments
+ex_env
+<END>
+
diff --git a/bdb/build_vxworks/ex_mpool/ex_mpool.wpj b/bdb/build_vxworks/ex_mpool/ex_mpool.wpj
new file mode 100644
index 00000000000..6dd9ed4db27
--- /dev/null
+++ b/bdb/build_vxworks/ex_mpool/ex_mpool.wpj
@@ -0,0 +1,248 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/ex_mpool.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM \
+ -I/export/home/db/build_vxworks
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_ex_mpool.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_BUILDRULE
+ex_mpool.out
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_AR
+arsimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/SIMSPARCSOLARISgnu/ex_mpool.a
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_AS
+ccsimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CC
+ccsimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CFLAGS
+-g \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -fno-builtin \
+ -I/export/home/db/build_vxworks \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=SIMSPARCSOLARIS
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CFLAGS_AS
+-g \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -fno-builtin \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=SIMSPARCSOLARIS
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CPP
+ccsimso -E -P -xc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LD
+ccsimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LDFLAGS
+-N
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LD_PARTIAL_FLAGS
+-nostdlib -r
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_NM
+nmsimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_SIZE
+sizesimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_TC
+::tc_SIMSPARCSOLARISgnu
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+SIMSPARCSOLARISgnu PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_/export/home/db/examples_c/ex_mpool.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/examples_c/ex_mpool.c_dependencies
+/export/home/db/build_vxworks/db_config.h /export/home/db/build_vxworks/db.h
+<END>
+
+<BEGIN> FILE_/export/home/db/examples_c/ex_mpool.c_objects
+ex_mpool.o
+<END>
+
+<BEGIN> FILE_/export/home/db/examples_c/ex_mpool.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+/export/home/db/examples_c/ex_mpool.c
+<END>
+
+<BEGIN> userComments
+ex_mpool
+<END>
+
diff --git a/bdb/build_vxworks/ex_tpcb/ex_tpcb.wpj b/bdb/build_vxworks/ex_tpcb/ex_tpcb.wpj
new file mode 100644
index 00000000000..91de499dcf5
--- /dev/null
+++ b/bdb/build_vxworks/ex_tpcb/ex_tpcb.wpj
@@ -0,0 +1,261 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
+ex_tpcb.out
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/ex_tpcb.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM \
+ -I/export/home/db/build_vxworks \
+ -DVERY_TINY
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_ex_tpcb.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_BUILDRULE
+ex_tpcb.out
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_AR
+arsimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/SIMSPARCSOLARISgnu/ex_tpcb.a
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_AS
+ccsimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CC
+ccsimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CFLAGS
+-g \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -fno-builtin \
+ -I/export/home/db/build_vxworks \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=SIMSPARCSOLARIS
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CFLAGS_AS
+-g \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -fno-builtin \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=SIMSPARCSOLARIS
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CPP
+ccsimso -E -P -xc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LD
+ccsimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LDFLAGS
+-N
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LD_PARTIAL_FLAGS
+-nostdlib -r
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_NM
+nmsimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_SIZE
+sizesimso
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu_TC
+::tc_SIMSPARCSOLARISgnu
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+SIMSPARCSOLARISgnu PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_/export/home/db/examples_c/ex_tpcb.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_/export/home/db/examples_c/ex_tpcb.c_dependencies
+/export/home/db/build_vxworks/db_config.h /export/home/db/build_vxworks/db.h
+<END>
+
+<BEGIN> FILE_/export/home/db/examples_c/ex_tpcb.c_objects
+ex_tpcb.o
+<END>
+
+<BEGIN> FILE_/export/home/db/examples_c/ex_tpcb.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+/export/home/db/examples_c/ex_tpcb.c
+<END>
+
+<BEGIN> userComments
+ex_tpcb
+<END>
+
diff --git a/bdb/build_win32/Berkeley_DB.dsw b/bdb/build_win32/Berkeley_DB.dsw
new file mode 100644
index 00000000000..482ac7537f0
--- /dev/null
+++ b/bdb/build_win32/Berkeley_DB.dsw
@@ -0,0 +1,569 @@
+Microsoft Developer Studio Workspace File, Format Version 5.00
+# WARNING: DO NOT EDIT OR DELETE THIS WORKSPACE FILE!
+
+###############################################################################
+
+Project: "DB_DLL"=.\db_dll.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+}}}
+
+###############################################################################
+
+Project: "DB_Static"=.\db_static.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+}}}
+
+###############################################################################
+
+Project: "db_archive"=.\db_archive.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_buildall"=.\db_buildall.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name db_archive
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name db_checkpoint
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name db_deadlock
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name db_dump
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name db_load
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name db_printlog
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name db_recover
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name db_stat
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name db_upgrade
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name db_verify
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name ex_access
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name ex_btrec
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name ex_env
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name ex_lock
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name ex_mpool
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name ex_tpcb
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name excxx_access
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name excxx_btrec
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name excxx_env
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name excxx_lock
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name excxx_mpool
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name excxx_tpcb
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_checkpoint"=.\db_checkpoint.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_deadlock"=.\db_deadlock.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_dump"=.\db_dump.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_java"=.\db_java.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_load"=.\db_load.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_printlog"=.\db_printlog.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_recover"=.\db_recover.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_stat"=.\db_stat.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_tcl"=.\db_tcl.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_test"=.\db_test.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name db_buildall
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name db_tcl
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_upgrade"=.\db_upgrade.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_verify"=.\db_verify.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "ex_access"=.\ex_access.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "ex_btrec"=.\ex_btrec.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "ex_env"=.\ex_env.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "ex_lock"=.\ex_lock.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "ex_mpool"=.\ex_mpool.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "ex_tpcb"=.\ex_tpcb.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "excxx_access"=.\excxx_access.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "excxx_btrec"=.\excxx_btrec.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "excxx_env"=.\excxx_env.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "excxx_lock"=.\excxx_lock.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "excxx_mpool"=.\excxx_mpool.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "excxx_tpcb"=.\excxx_tpcb.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Global:
+
+Package=<5>
+{{{
+}}}
+
+Package=<3>
+{{{
+}}}
+
+###############################################################################
+
diff --git a/bdb/build_win32/app_dsp.src b/bdb/build_win32/app_dsp.src
new file mode 100644
index 00000000000..064ea7ef51a
--- /dev/null
+++ b/bdb/build_win32/app_dsp.src
@@ -0,0 +1,148 @@
+# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 5.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=@project_name@ - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "@project_name@.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "@project_name@ - Win32 Release" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "@project_name@ - Win32 Debug" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "@project_name@ - Win32 Release Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "@project_name@ - Win32 Debug Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "@project_name@ - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "@project_name@ - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "@project_name@ - Win32 Release"
+# Name "@project_name@ - Win32 Debug"
+# Name "@project_name@ - Win32 Release Static"
+# Name "@project_name@ - Win32 Debug Static"
+@SOURCE_FILES@
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/bdb/build_win32/db_archive.dsp b/bdb/build_win32/db_archive.dsp
new file mode 100644
index 00000000000..4b8509950ef
--- /dev/null
+++ b/bdb/build_win32/db_archive.dsp
@@ -0,0 +1,151 @@
+# Microsoft Developer Studio Project File - Name="db_archive" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 5.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=db_archive - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "db_archive.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "db_archive.mak" CFG="db_archive - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "db_archive - Win32 Release" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "db_archive - Win32 Debug" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "db_archive - Win32 Release Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "db_archive - Win32 Debug Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "db_archive - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb32.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "db_archive - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb32d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "db_archive - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb32s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "db_archive - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb32d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb32sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "db_archive - Win32 Release"
+# Name "db_archive - Win32 Debug"
+# Name "db_archive - Win32 Release Static"
+# Name "db_archive - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\db_archive\db_archive.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/bdb/build_win32/db_buildall.dsp b/bdb/build_win32/db_buildall.dsp
new file mode 100644
index 00000000000..58990dbb867
--- /dev/null
+++ b/bdb/build_win32/db_buildall.dsp
@@ -0,0 +1,128 @@
+# Microsoft Developer Studio Project File - Name="db_buildall" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 5.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) External Target" 0x0106
+
+CFG=db_buildall - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "db_buildall.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "db_buildall.mak" CFG="db_buildall - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "db_buildall - Win32 Release" (based on "Win32 (x86) External Target")
+!MESSAGE "db_buildall - Win32 Debug" (based on "Win32 (x86) External Target")
+!MESSAGE "db_buildall - Win32 Release Static" (based on\
+ "Win32 (x86) External Target")
+!MESSAGE "db_buildall - Win32 Debug Static" (based on\
+ "Win32 (x86) External Target")
+!MESSAGE
+
+# Begin Project
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+
+!IF "$(CFG)" == "db_buildall - Win32 Release"
+
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Cmd_Line "NMAKE /f db_buildall.mak"
+# PROP BASE Rebuild_Opt "/a"
+# PROP BASE Target_File "db_buildall.exe"
+# PROP BASE Bsc_Name "db_buildall.bsc"
+# PROP BASE Target_Dir ""
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Cmd_Line "echo DB release version built."
+# PROP Rebuild_Opt ""
+# PROP Target_File "db_buildall.exe"
+# PROP Bsc_Name "db_buildall.bsc"
+# PROP Target_Dir ""
+
+!ELSEIF "$(CFG)" == "db_buildall - Win32 Debug"
+
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Cmd_Line "NMAKE /f db_buildall.mak"
+# PROP BASE Rebuild_Opt "/a"
+# PROP BASE Target_File "db_buildall.exe"
+# PROP BASE Bsc_Name "db_buildall.bsc"
+# PROP BASE Target_Dir ""
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Cmd_Line "echo DB debug version built."
+# PROP Rebuild_Opt ""
+# PROP Target_File "db_buildall.exe"
+# PROP Bsc_Name "db_buildall.bsc"
+# PROP Target_Dir ""
+
+!ELSEIF "$(CFG)" == "db_buildall - Win32 Release Static"
+
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release_static"
+# PROP BASE Intermediate_Dir "Release_static"
+# PROP BASE Cmd_Line "echo DB release version built."
+# PROP BASE Rebuild_Opt ""
+# PROP BASE Target_File "db_buildall.exe"
+# PROP BASE Bsc_Name "db_buildall.bsc"
+# PROP BASE Target_Dir ""
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Cmd_Line "echo DB release version built."
+# PROP Rebuild_Opt ""
+# PROP Target_File "db_buildall.exe"
+# PROP Bsc_Name "db_buildall.bsc"
+# PROP Target_Dir ""
+
+!ELSEIF "$(CFG)" == "db_buildall - Win32 Debug Static"
+
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug_static"
+# PROP BASE Intermediate_Dir "Debug_static"
+# PROP BASE Cmd_Line "echo DB debug version built."
+# PROP BASE Rebuild_Opt ""
+# PROP BASE Target_File "db_buildall.exe"
+# PROP BASE Bsc_Name "db_buildall.bsc"
+# PROP BASE Target_Dir ""
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Cmd_Line "echo DB debug version built."
+# PROP Rebuild_Opt ""
+# PROP Target_File "db_buildall.exe"
+# PROP Bsc_Name "db_buildall.bsc"
+# PROP Target_Dir ""
+
+!ENDIF
+
+# Begin Target
+
+# Name "db_buildall - Win32 Release"
+# Name "db_buildall - Win32 Debug"
+# Name "db_buildall - Win32 Release Static"
+# Name "db_buildall - Win32 Debug Static"
+
+!IF "$(CFG)" == "db_buildall - Win32 Release"
+
+!ELSEIF "$(CFG)" == "db_buildall - Win32 Debug"
+
+!ELSEIF "$(CFG)" == "db_buildall - Win32 Release Static"
+
+!ELSEIF "$(CFG)" == "db_buildall - Win32 Debug Static"
+
+!ENDIF
+
+# End Target
+# End Project
diff --git a/bdb/build_win32/db_checkpoint.dsp b/bdb/build_win32/db_checkpoint.dsp
new file mode 100644
index 00000000000..ac464a07ab8
--- /dev/null
+++ b/bdb/build_win32/db_checkpoint.dsp
@@ -0,0 +1,151 @@
+# Microsoft Developer Studio Project File - Name="db_checkpoint" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 5.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=db_checkpoint - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "db_checkpoint.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "db_checkpoint.mak" CFG="db_checkpoint - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "db_checkpoint - Win32 Release" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "db_checkpoint - Win32 Debug" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "db_checkpoint - Win32 Release Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "db_checkpoint - Win32 Debug Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "db_checkpoint - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb32.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "db_checkpoint - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb32d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "db_checkpoint - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb32s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "db_checkpoint - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb32d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb32sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "db_checkpoint - Win32 Release"
+# Name "db_checkpoint - Win32 Debug"
+# Name "db_checkpoint - Win32 Release Static"
+# Name "db_checkpoint - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\db_checkpoint\db_checkpoint.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/bdb/build_win32/db_config.h b/bdb/build_win32/db_config.h
new file mode 100644
index 00000000000..8b12d64c3c9
--- /dev/null
+++ b/bdb/build_win32/db_config.h
@@ -0,0 +1,275 @@
+/*
+ * $Id: db_config.h,v 11.24 2000/12/12 18:39:26 bostic Exp $
+ */
+
+/* Define to empty if the keyword does not work. */
+/* #undef const */
+
+/* Define if your struct stat has st_blksize. */
+/* #undef HAVE_ST_BLKSIZE */
+
+/* Define to `int' if <sys/types.h> doesn't define. */
+/* #undef mode_t */
+
+/* Define to `long' if <sys/types.h> doesn't define. */
+/* #undef off_t */
+
+/* Define to `int' if <sys/types.h> doesn't define. */
+/* #undef pid_t */
+
+/* Define to `unsigned' if <sys/types.h> doesn't define. */
+/* #undef size_t */
+
+/* Define if the `S_IS*' macros in <sys/stat.h> do not work properly. */
+/* #undef STAT_MACROS_BROKEN */
+
+/* Define if you have the ANSI C header files. */
+#define STDC_HEADERS 1
+
+/* Define if you can safely include both <sys/time.h> and <time.h>. */
+/* #undef TIME_WITH_SYS_TIME */
+
+/* Define if your processor stores words with the most significant
+ byte first (like Motorola and SPARC, unlike Intel and VAX). */
+/* #undef WORDS_BIGENDIAN */
+
+/* Define if you are building a version for running the test suite. */
+/* #undef CONFIG_TEST */
+
+/* Define if you want a debugging version. */
+/* #undef DEBUG */
+#if defined(_DEBUG)
+#if !defined(DEBUG)
+#define DEBUG 1
+#endif
+#endif
+
+/* Define if you want a version that logs read operations. */
+/* #undef DEBUG_ROP */
+
+/* Define if you want a version that logs write operations. */
+/* #undef DEBUG_WOP */
+
+/* Define if you want a version with run-time diagnostic checking. */
+/* #undef DIAGNOSTIC */
+
+/* Define if you want to mask harmless unitialized memory read/writes. */
+/* #undef UMRW */
+
+/* Define if fcntl/F_SETFD denies child access to file descriptors. */
+/* #undef HAVE_FCNTL_F_SETFD */
+
+/* Define if building big-file environment (e.g., AIX, HP/UX, Solaris). */
+/* #undef HAVE_FILE_OFFSET_BITS */
+
+/* Mutex possibilities. */
+/* #undef HAVE_MUTEX_68K_GCC_ASSEMBLY */
+/* #undef HAVE_MUTEX_AIX_CHECK_LOCK */
+/* #undef HAVE_MUTEX_ALPHA_GCC_ASSEMBLY */
+/* #undef HAVE_MUTEX_HPPA_GCC_ASSEMBLY */
+/* #undef HAVE_MUTEX_HPPA_MSEM_INIT */
+/* #undef HAVE_MUTEX_IA64_GCC_ASSEMBLY */
+/* #undef HAVE_MUTEX_MACOS */
+/* #undef HAVE_MUTEX_MSEM_INIT */
+/* #undef HAVE_MUTEX_PPC_GCC_ASSEMBLY */
+/* #undef HAVE_MUTEX_PTHREADS */
+/* #undef HAVE_MUTEX_RELIANTUNIX_INITSPIN */
+/* #undef HAVE_MUTEX_SCO_X86_CC_ASSEMBLY */
+/* #undef HAVE_MUTEX_SEMA_INIT */
+/* #undef HAVE_MUTEX_SGI_INIT_LOCK */
+/* #undef HAVE_MUTEX_SOLARIS_LOCK_TRY */
+/* #undef HAVE_MUTEX_SOLARIS_LWP */
+/* #undef HAVE_MUTEX_SPARC_GCC_ASSEMBLY */
+#define HAVE_MUTEX_THREADS 1
+/* #undef HAVE_MUTEX_UI_THREADS */
+/* #undef HAVE_MUTEX_UTS_CC_ASSEMBLY */
+/* #undef HAVE_MUTEX_VMS */
+/* #undef HAVE_MUTEX_VXWORKS */
+/* #undef HAVE_MUTEX_WIN16 */
+#define HAVE_MUTEX_WIN32 1
+/* #undef HAVE_MUTEX_X86_GCC_ASSEMBLY */
+
+/* Define if building on QNX. */
+/* #undef HAVE_QNX */
+
+/* Define if building RPC client/server. */
+/* #undef HAVE_RPC */
+
+/* Define if your sprintf returns a pointer, not a length. */
+/* #undef SPRINTF_RET_CHARPNT */
+
+/* Define if you have the getcwd function. */
+#define HAVE_GETCWD 1
+
+/* Define if you have the getopt function. */
+/* #undef HAVE_GETOPT */
+
+/* Define if you have the getuid function. */
+/* #undef HAVE_GETUID */
+
+/* Define if you have the memcmp function. */
+#define HAVE_MEMCMP 1
+
+/* Define if you have the memcpy function. */
+#define HAVE_MEMCPY 1
+
+/* Define if you have the memmove function. */
+#define HAVE_MEMMOVE 1
+
+/* Define if you have the mlock function. */
+/* #undef HAVE_MLOCK */
+
+/* Define if you have the mmap function. */
+/* #undef HAVE_MMAP */
+
+/* Define if you have the munlock function. */
+/* #undef HAVE_MUNLOCK */
+
+/* Define if you have the munmap function. */
+/* #undef HAVE_MUNMAP */
+
+/* Define if you have the pread function. */
+/* #undef HAVE_PREAD */
+
+/* Define if you have the pstat_getdynamic function. */
+/* #undef HAVE_PSTAT_GETDYNAMIC */
+
+/* Define if you have the pwrite function. */
+/* #undef HAVE_PWRITE */
+
+/* Define if you have the qsort function. */
+#define HAVE_QSORT 1
+
+/* Define if you have the raise function. */
+#define HAVE_RAISE 1
+
+/* Define if you have the sched_yield function. */
+/* #undef HAVE_SCHED_YIELD */
+
+/* Define if you have the select function. */
+/* #undef HAVE_SELECT */
+
+/* Define if you have the shmget function. */
+/* #undef HAVE_SHMGET */
+
+/* Define if you have the snprintf function. */
+#define HAVE_SNPRINTF 1
+
+/* Define if you have the strcasecmp function. */
+/* #undef HAVE_STRCASECMP */
+
+/* Define if you have the strerror function. */
+#define HAVE_STRERROR 1
+
+/* Define if you have the strtoul function. */
+#define HAVE_STRTOUL 1
+
+/* Define if you have the sysconf function. */
+/* #undef HAVE_SYSCONF */
+
+/* Define if you have the vsnprintf function. */
+#define HAVE_VSNPRINTF 1
+
+/* Define if you have the yield function. */
+/* #undef HAVE_YIELD */
+
+/* Define if you have the <dirent.h> header file. */
+/* #undef HAVE_DIRENT_H */
+
+/* Define if you have the <ndir.h> header file. */
+/* #undef HAVE_NDIR_H */
+
+/* Define if you have the <sys/dir.h> header file. */
+/* #undef HAVE_SYS_DIR_H */
+
+/* Define if you have the <sys/ndir.h> header file. */
+/* #undef HAVE_SYS_NDIR_H */
+
+/* Define if you have the <sys/select.h> header file. */
+/* #undef HAVE_SYS_SELECT_H */
+
+/* Define if you have the <sys/time.h> header file. */
+/* #undef HAVE_SYS_TIME_H */
+
+/* Define if you have the nsl library (-lnsl). */
+/* #undef HAVE_LIBNSL */
+
+/*
+ * XXX
+ * The following is not part of the automatic configuration setup,
+ * but provides the information necessary to build DB on Windows.
+ */
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <direct.h>
+#include <fcntl.h>
+#include <io.h>
+#include <limits.h>
+#include <memory.h>
+#include <process.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <time.h>
+#include <errno.h>
+
+#if defined(__cplusplus)
+#include <iostream.h>
+#endif
+
+/*
+ * To build Tcl interface libraries, the include path must be configured to
+ * use the directory containing <tcl.h>, usually the include directory in
+ * the Tcl distribution.
+ */
+#ifdef DB_TCL_SUPPORT
+#include <tcl.h>
+#endif
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+
+/*
+ * Win32 has fsync, getcwd, snprintf and vsnprintf, but under different names.
+ */
+#define fsync(fd) _commit(fd)
+#define getcwd(buf, size) _getcwd(buf, size)
+#define snprintf _snprintf
+#define vsnprintf _vsnprintf
+
+/*
+ * Win32 does not define getopt and friends in any header file, so we must.
+ */
+#if defined(__cplusplus)
+extern "C" {
+#endif
+extern int optind;
+extern char *optarg;
+extern int getopt(int, char * const *, const char *);
+#if defined(__cplusplus)
+}
+#endif
+
+#define NO_SYSTEM_INCLUDES
+
+/*
+ * We use DB_WIN32 much as one would use _WIN32, to determine that we're
+ * using an operating system environment that supports Win32 calls
+ * and semantics. We don't use _WIN32 because cygwin/gcc also defines
+ * that, even though it closely emulates the Unix environment.
+ */
+#define DB_WIN32 1
+
+/*
+ * This is a grievous hack -- once we've included windows.h, we have no choice
+ * but to use ANSI-style varargs (because it pulls in stdarg.h for us). DB's
+ * code decides which type of varargs to use based on the state of __STDC__.
+ * Sensible. Unfortunately, Microsoft's compiler _doesn't_ define __STDC__
+ * unless you invoke it with arguments turning OFF all vendor extensions. Even
+ * more unfortunately, if we do that, it fails to parse windows.h!!!!! So, we
+ * define __STDC__ here, after windows.h comes in. Note: the compiler knows
+ * we've defined it, and starts enforcing strict ANSI compilance from this point
+ * on.
+ */
+#define __STDC__ 1
diff --git a/bdb/build_win32/db_deadlock.dsp b/bdb/build_win32/db_deadlock.dsp
new file mode 100644
index 00000000000..429896ded04
--- /dev/null
+++ b/bdb/build_win32/db_deadlock.dsp
@@ -0,0 +1,151 @@
+# Microsoft Developer Studio Project File - Name="db_deadlock" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 5.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=db_deadlock - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "db_deadlock.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "db_deadlock.mak" CFG="db_deadlock - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "db_deadlock - Win32 Release" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "db_deadlock - Win32 Debug" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "db_deadlock - Win32 Release Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "db_deadlock - Win32 Debug Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "db_deadlock - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb32.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "db_deadlock - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb32d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "db_deadlock - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb32s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "db_deadlock - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb32d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb32sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "db_deadlock - Win32 Release"
+# Name "db_deadlock - Win32 Debug"
+# Name "db_deadlock - Win32 Release Static"
+# Name "db_deadlock - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\db_deadlock\db_deadlock.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/bdb/build_win32/db_dll.dsp b/bdb/build_win32/db_dll.dsp
new file mode 100644
index 00000000000..d394e0313fd
--- /dev/null
+++ b/bdb/build_win32/db_dll.dsp
@@ -0,0 +1,753 @@
+# Microsoft Developer Studio Project File - Name="db_dll" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 5.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=db_dll - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "db_dll.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "db_dll.mak" CFG="db_dll - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "db_dll - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "db_dll - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "db_dll - Win32 Release Static" (based on\
+ "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "db_dll - Win32 Debug Static" (based on\
+ "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "db_dll - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MT /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I "../include" /D "DB_CREATE_DLL" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /YX /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o NUL /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o NUL /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /machine:I386
+# ADD LINK32 /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"Release/libdb32.dll"
+
+!ELSEIF "$(CFG)" == "db_dll - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 2
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MTd /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "DB_CREATE_DLL" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_WINDLL" /D "_AFXDLL" /D "_MBCS" /YX"config.h" /FD /c
+# SUBTRACT CPP /Fr
+# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o NUL /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o NUL /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG" /d "_AFXDLL"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 /nologo /base:"0x13000000" /subsystem:windows /dll /pdb:none /debug /machine:I386 /out:"Debug/libdb32d.dll" /fixed:no
+
+!ELSEIF "$(CFG)" == "db_dll - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "DB_DLL__"
+# PROP BASE Intermediate_Dir "DB_DLL__"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I "../include" /D "DB_CREATE_DLL" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I "../include" /D "DB_CREATE_DLL" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /YX /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o NUL /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o NUL /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"Release/libdb32.dll"
+# ADD LINK32 /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"Release/libdb32.dll"
+
+!ELSEIF "$(CFG)" == "db_dll - Win32 Debug Static"
+
+# PROP BASE Use_MFC 2
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "DB_DLL_0"
+# PROP BASE Intermediate_Dir "DB_DLL_0"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 2
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "DB_CREATE_DLL" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_WINDLL" /D "_AFXDLL" /YX"config.h" /FD /c
+# SUBTRACT BASE CPP /Fr
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "DB_CREATE_DLL" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_WINDLL" /D "_AFXDLL" /D "_MBCS" /YX"config.h" /FD /c
+# SUBTRACT CPP /Fr
+# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o NUL /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o NUL /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG" /d "_AFXDLL"
+# ADD RSC /l 0x409 /d "_DEBUG" /d "_AFXDLL"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 /nologo /base:"0x13000000" /subsystem:windows /dll /pdb:none /debug /machine:I386 /out:"Debug/libdb32d.dll" /fixed:no
+# ADD LINK32 /nologo /base:"0x13000000" /subsystem:windows /dll /pdb:none /debug /machine:I386 /out:"Debug/libdb32d.dll" /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "db_dll - Win32 Release"
+# Name "db_dll - Win32 Debug"
+# Name "db_dll - Win32 Release Static"
+# Name "db_dll - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\btree\bt_compare.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_conv.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_curadj.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_cursor.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_delete.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_open.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_put.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_rec.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_reclaim.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_recno.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_rsearch.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_search.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_split.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_stat.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_upgrade.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_verify.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\btree_auto.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\dllmain.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\libdb.def
+# End Source File
+# Begin Source File
+
+SOURCE=.\libdb.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\strcasecmp.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\common\db_byteorder.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\common\db_err.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\common\db_getlong.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\common\db_log2.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\common\util_log.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\common\util_sig.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\cxx\cxx_app.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\cxx\cxx_except.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\cxx\cxx_lock.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\cxx\cxx_log.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\cxx\cxx_mpool.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\cxx\cxx_table.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\cxx\cxx_txn.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\crdel_auto.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\crdel_rec.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_am.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_auto.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_cam.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_conv.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_dispatch.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_dup.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_iface.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_join.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_meta.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_overflow.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_pr.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_rec.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_reclaim.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_ret.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_upg.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_upg_opd.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_vrfy.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_vrfyutil.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\dbm\dbm.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\env\db_salloc.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\env\db_shash.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\env\env_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\env\env_open.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\env\env_recover.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\env\env_region.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_auto.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_conv.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_dup.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_func.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_meta.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_page.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_rec.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_reclaim.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_stat.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_upgrade.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_verify.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hsearch\hsearch.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\lock\lock.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\lock\lock_conflict.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\lock\lock_deadlock.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\lock\lock_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\lock\lock_region.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\lock\lock_stat.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\lock\lock_util.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\log\log.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\log\log_archive.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\log\log_auto.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\log\log_compare.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\log\log_findckp.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\log\log_get.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\log\log_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\log\log_put.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\log\log_rec.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\log\log_register.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_alloc.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_bh.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_fget.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_fopen.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_fput.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_fset.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_region.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_register.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_stat.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_sync.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_trickle.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mutex\mut_tas.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mutex\mutex.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_alloc.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_fsync.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_handle.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_oflags.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_region.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_root.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_rpath.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_rw.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_stat.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_tmpdir.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_unlink.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_abs.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_dir.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_errno.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_fid.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_finit.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_map.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_open.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_rename.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_seek.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_sleep.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_spin.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_type.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam_auto.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam_conv.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam_files.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam_open.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam_rec.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam_stat.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam_upgrade.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam_verify.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\txn\txn.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\txn\txn_auto.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\txn\txn_rec.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\txn\txn_region.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\xa\xa.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\xa\xa_db.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\xa\xa_map.c
+# End Source File
+# End Target
+# End Project
diff --git a/bdb/build_win32/db_dump.dsp b/bdb/build_win32/db_dump.dsp
new file mode 100644
index 00000000000..f4808b5138d
--- /dev/null
+++ b/bdb/build_win32/db_dump.dsp
@@ -0,0 +1,151 @@
+# Microsoft Developer Studio Project File - Name="db_dump" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 5.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=db_dump - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "db_dump.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "db_dump.mak" CFG="db_dump - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "db_dump - Win32 Release" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "db_dump - Win32 Debug" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "db_dump - Win32 Release Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "db_dump - Win32 Debug Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "db_dump - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb32.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "db_dump - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb32d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "db_dump - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb32s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "db_dump - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb32d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb32sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "db_dump - Win32 Release"
+# Name "db_dump - Win32 Debug"
+# Name "db_dump - Win32 Release Static"
+# Name "db_dump - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\db_dump\db_dump.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/bdb/build_win32/db_int.h b/bdb/build_win32/db_int.h
new file mode 100644
index 00000000000..0eb4309fc7c
--- /dev/null
+++ b/bdb/build_win32/db_int.h
@@ -0,0 +1,398 @@
+/* DO NOT EDIT: automatically built by dist/s_win32. */
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: db_int.src,v 11.42 2001/01/11 17:49:17 krinsky Exp $
+ */
+
+#ifndef _DB_INTERNAL_H_
+#define _DB_INTERNAL_H_
+
+/*******************************************************
+ * General includes.
+ *******************************************************/
+#include "db.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#if defined(__STDC__) || defined(__cplusplus)
+#include <stdarg.h>
+#else
+#include <varargs.h>
+#endif
+#include <errno.h>
+#endif
+
+#include "queue.h"
+#include "shqueue.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*******************************************************
+ * General purpose constants and macros.
+ *******************************************************/
+#define UINT16_T_MAX 0xffff /* Maximum 16 bit unsigned. */
+#define UINT32_T_MAX 0xffffffff /* Maximum 32 bit unsigned. */
+
+#define MEGABYTE 1048576
+#define GIGABYTE 1073741824
+
+#define MS_PER_SEC 1000 /* Milliseconds in a second. */
+#define USEC_PER_MS 1000 /* Microseconds in a millisecond. */
+
+#define DB_MIN_PGSIZE 0x000200 /* Minimum page size (512). */
+#define DB_MAX_PGSIZE 0x010000 /* Maximum page size (65536). */
+
+#define RECNO_OOB 0 /* Illegal record number. */
+
+/*
+ * If we are unable to determine the underlying filesystem block size, use
+ * 8K on the grounds that most OS's use less than 8K for a VM page size.
+ */
+#define DB_DEF_IOSIZE (8 * 1024)
+
+/*
+ * Aligning items to particular sizes or in pages or memory.
+ *
+ * db_align_t --
+ * Largest integral type, used to align structures in memory. We don't store
+ * floating point types in structures, so integral types should be sufficient
+ * (and we don't have to worry about systems that store floats in other than
+ * power-of-2 numbers of bytes). Additionally this fixes compiler that rewrite
+ * structure assignments and ANSI C memcpy calls to be in-line instructions
+ * that happen to require alignment. Note: this alignment isn't sufficient for
+ * mutexes, which depend on things like cache line alignment. Mutex alignment
+ * is handled separately, in mutex.h.
+ *
+ * db_alignp_t --
+ * Integral type that's the same size as a pointer. There are places where
+ * DB modifies pointers by discarding the bottom bits to guarantee alignment.
+ * We can't use db_align_t, it may be larger than the pointer, and compilers
+ * get upset about that. So far we haven't run on any machine where there
+ * isn't an integral type the same size as a pointer -- here's hoping.
+ */
+typedef unsigned long db_align_t;
+typedef unsigned long db_alignp_t;
+
+/* Align an integer to a specific boundary. */
+#undef ALIGN
+#define ALIGN(value, bound) \
+ (((value) + (bound) - 1) & ~(((u_int)bound) - 1))
+
+/* Align a pointer to a specific boundary. */
+#undef ALIGNP
+#define ALIGNP(value, bound) ALIGN((db_alignp_t)value, bound)
+
+/*
+ * There are several on-page structures that are declared to have a number of
+ * fields followed by a variable length array of items. The structure size
+ * without including the variable length array or the address of the first of
+ * those elements can be found using SSZ.
+ *
+ * This macro can also be used to find the offset of a structure element in a
+ * structure. This is used in various places to copy structure elements from
+ * unaligned memory references, e.g., pointers into a packed page.
+ *
+ * There are two versions because compilers object if you take the address of
+ * an array.
+ */
+#undef SSZ
+#define SSZ(name, field) ((int)&(((name *)0)->field))
+
+#undef SSZA
+#define SSZA(name, field) ((int)&(((name *)0)->field[0]))
+
+/*
+ * Print an address as a u_long (a u_long is the largest type we can print
+ * portably). Most 64-bit systems have made longs 64-bits, so this should
+ * work.
+ */
+#define P_TO_ULONG(p) ((u_long)(db_alignp_t)(p))
+
+/* Structure used to print flag values. */
+typedef struct __fn {
+ u_int32_t mask; /* Flag value. */
+ const char *name; /* Flag name. */
+} FN;
+
+/* Set, clear and test flags. */
+#define FLD_CLR(fld, f) (fld) &= ~(f)
+#define FLD_ISSET(fld, f) ((fld) & (f))
+#define FLD_SET(fld, f) (fld) |= (f)
+#define F_CLR(p, f) (p)->flags &= ~(f)
+#define F_ISSET(p, f) ((p)->flags & (f))
+#define F_SET(p, f) (p)->flags |= (f)
+#define LF_CLR(f) (flags &= ~(f))
+#define LF_ISSET(f) (flags & (f))
+#define LF_SET(f) (flags |= (f))
+
+/* Display separator string. */
+#undef DB_LINE
+#define DB_LINE "=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-="
+
+/* Unused, or not-used-yet variable. "Shut that bloody compiler up!" */
+#define COMPQUIET(n, v) (n) = (v)
+
+/*******************************************************
+ * Files.
+ *******************************************************/
+ /*
+ * We use 1024 as the maximum path length. It's too hard to figure out what
+ * the real path length is, as it was traditionally stored in <sys/param.h>,
+ * and that file isn't always available.
+ */
+#undef MAXPATHLEN
+#define MAXPATHLEN 1024
+
+#define PATH_DOT "." /* Current working directory. */
+#define PATH_SEPARATOR "\\/:" /* Path separator character. */
+
+/*
+ * Flags understood by __os_open.
+ */
+#define DB_OSO_CREATE 0x001 /* POSIX: O_CREAT */
+#define DB_OSO_EXCL 0x002 /* POSIX: O_EXCL */
+#define DB_OSO_LOG 0x004 /* Opening a log file. */
+#define DB_OSO_RDONLY 0x008 /* POSIX: O_RDONLY */
+#define DB_OSO_REGION 0x010 /* Opening a region file. */
+#define DB_OSO_SEQ 0x020 /* Expected sequential access. */
+#define DB_OSO_TEMP 0x040 /* Remove after last close. */
+#define DB_OSO_TRUNC 0x080 /* POSIX: O_TRUNC */
+
+/*
+ * Seek options understood by __os_seek.
+ */
+typedef enum {
+ DB_OS_SEEK_CUR, /* POSIX: SEEK_CUR */
+ DB_OS_SEEK_END, /* POSIX: SEEK_END */
+ DB_OS_SEEK_SET /* POSIX: SEEK_SET */
+} DB_OS_SEEK;
+
+/*******************************************************
+ * Environment.
+ *******************************************************/
+/* Type passed to __db_appname(). */
+typedef enum {
+ DB_APP_NONE=0, /* No type (region). */
+ DB_APP_DATA, /* Data file. */
+ DB_APP_LOG, /* Log file. */
+ DB_APP_TMP /* Temporary file. */
+} APPNAME;
+
+/*
+ * CDB_LOCKING CDB product locking.
+ * LOCKING_ON Locking has been configured.
+ * LOGGING_ON Logging has been configured.
+ * MPOOL_ON Memory pool has been configured.
+ * TXN_ON Transactions have been configured.
+ */
+#define CDB_LOCKING(dbenv) F_ISSET(dbenv, DB_ENV_CDB)
+#define LOCKING_ON(dbenv) ((dbenv)->lk_handle != NULL)
+#define LOGGING_ON(dbenv) ((dbenv)->lg_handle != NULL)
+#define MPOOL_ON(dbenv) ((dbenv)->mp_handle != NULL)
+#define TXN_ON(dbenv) ((dbenv)->tx_handle != NULL)
+
+/*
+ * STD_LOCKING Standard locking, that is, locking was configured and CDB
+ * was not. We do not do locking in off-page duplicate trees,
+ * so we check for that in the cursor first.
+ */
+#define STD_LOCKING(dbc) \
+ (!F_ISSET(dbc, DBC_OPD) && \
+ !CDB_LOCKING((dbc)->dbp->dbenv) && LOCKING_ON((dbc)->dbp->dbenv))
+
+/*
+ * IS_RECOVERING The system is running recovery.
+ */
+#define IS_RECOVERING(dbenv) \
+ (LOGGING_ON(dbenv) && \
+ F_ISSET((DB_LOG *)(dbenv)->lg_handle, DBLOG_RECOVER))
+
+/* Most initialization methods cannot be called after open is called. */
+#define ENV_ILLEGAL_AFTER_OPEN(dbenv, name) \
+ if (F_ISSET((dbenv), DB_ENV_OPEN_CALLED)) \
+ return (__db_mi_open(dbenv, name, 1));
+
+/* We're not actually user hostile, honest. */
+#define ENV_REQUIRES_CONFIG(dbenv, handle, subsystem) \
+ if (handle == NULL) \
+ return (__db_env_config(dbenv, subsystem));
+
+/*******************************************************
+ * Database Access Methods.
+ *******************************************************/
+/*
+ * DB_IS_THREADED --
+ * The database handle is free-threaded (was opened with DB_THREAD).
+ */
+#define DB_IS_THREADED(dbp) \
+ ((dbp)->mutexp != NULL)
+
+/* Initialization methods are often illegal before/after open is called. */
+#define DB_ILLEGAL_AFTER_OPEN(dbp, name) \
+ if (F_ISSET((dbp), DB_OPEN_CALLED)) \
+ return (__db_mi_open(dbp->dbenv, name, 1));
+#define DB_ILLEGAL_BEFORE_OPEN(dbp, name) \
+ if (!F_ISSET((dbp), DB_OPEN_CALLED)) \
+ return (__db_mi_open(dbp->dbenv, name, 0));
+/* Some initialization methods are illegal if environment isn't local. */
+#define DB_ILLEGAL_IN_ENV(dbp, name) \
+ if (!F_ISSET(dbp->dbenv, DB_ENV_DBLOCAL)) \
+ return (__db_mi_env(dbp->dbenv, name));
+#define DB_ILLEGAL_METHOD(dbp, flags) { \
+ int __ret; \
+ if ((__ret = __dbh_am_chk(dbp, flags)) != 0) \
+ return (__ret); \
+}
+
+/*
+ * Common DBC->internal fields. Each access method adds additional fields
+ * to this list, but the initial fields are common.
+ */
+#define __DBC_INTERNAL \
+ DBC *opd; /* Off-page duplicate cursor. */\
+ \
+ void *page; /* Referenced page. */ \
+ db_pgno_t root; /* Tree root. */ \
+ db_pgno_t pgno; /* Referenced page number. */ \
+ db_indx_t indx; /* Referenced key item index. */\
+ \
+ DB_LOCK lock; /* Cursor lock. */ \
+ db_lockmode_t lock_mode; /* Lock mode. */
+
+struct __dbc_internal {
+ __DBC_INTERNAL
+};
+
+/*
+ * Access-method-common macro for determining whether a cursor
+ * has been initialized.
+ */
+#define IS_INITIALIZED(dbc) ((dbc)->internal->pgno != PGNO_INVALID)
+
+/*******************************************************
+ * Mpool.
+ *******************************************************/
+/*
+ * File types for DB access methods. Negative numbers are reserved to DB.
+ */
+#define DB_FTYPE_SET -1 /* Call pgin/pgout functions. */
+#define DB_FTYPE_NOTSET 0 /* Don't call... */
+
+/* Structure used as the DB pgin/pgout pgcookie. */
+typedef struct __dbpginfo {
+ size_t db_pagesize; /* Underlying page size. */
+ int needswap; /* If swapping required. */
+} DB_PGINFO;
+
+/*******************************************************
+ * Log.
+ *******************************************************/
+/* Initialize an LSN to 'zero'. */
+#define ZERO_LSN(LSN) do { \
+ (LSN).file = 0; \
+ (LSN).offset = 0; \
+} while (0)
+
+/* Return 1 if LSN is a 'zero' lsn, otherwise return 0. */
+#define IS_ZERO_LSN(LSN) ((LSN).file == 0)
+
+/* Test if we need to log a change. */
+#define DB_LOGGING(dbc) \
+ (LOGGING_ON((dbc)->dbp->dbenv) && !F_ISSET(dbc, DBC_RECOVER))
+
+/* Internal flag for use with internal __log_unregister. */
+#define DB_LOGONLY 0x01
+/*******************************************************
+ * Txn.
+ *******************************************************/
+#define DB_NONBLOCK(C) ((C)->txn != NULL && F_ISSET((C)->txn, TXN_NOWAIT))
+#define IS_SUBTRANSACTION(txn) \
+ ((txn) != NULL && (txn)->parent != NULL)
+
+/*******************************************************
+ * Global variables.
+ *******************************************************/
+#ifdef HAVE_VXWORKS
+#include "semLib.h"
+#endif
+
+/*
+ * DB global variables. Done in a single structure to minimize the name-space
+ * pollution.
+ */
+typedef struct __db_globals {
+ u_int32_t db_pageyield; /* db_set_pageyield */
+ u_int32_t db_panic; /* db_set_panic */
+ u_int32_t db_region_init; /* db_set_region_init */
+ u_int32_t db_tas_spins; /* db_set_tas_spins */
+#ifdef HAVE_VXWORKS
+ u_int32_t db_global_init; /* VxWorks: inited */
+ SEM_ID db_global_lock; /* VxWorks: global semaphore */
+#endif
+ /* XA: list of opened environments. */
+ TAILQ_HEAD(__db_envq, __db_env) db_envq;
+} DB_GLOBALS;
+
+#ifdef DB_INITIALIZE_DB_GLOBALS
+DB_GLOBALS __db_global_values = {
+ 0, /* db_set_pageyield */
+ 1, /* db_set_panic */
+ 0, /* db_set_region_init */
+ 0, /* db_set_tas_spins */
+#ifdef HAVE_VXWORKS
+ 0, /* db_global_init */
+ NULL, /* db_global_lock */
+#endif
+ /* XA environment queue */
+ {NULL, &__db_global_values.db_envq.tqh_first}
+};
+#else
+extern DB_GLOBALS __db_global_values;
+#endif
+#define DB_GLOBAL(v) __db_global_values.v
+
+/* Forward structure declarations. */
+struct __db_reginfo_t; typedef struct __db_reginfo_t REGINFO;
+struct __mutex_t; typedef struct __mutex_t MUTEX;
+struct __vrfy_childinfo; typedef struct __vrfy_childinfo VRFY_CHILDINFO;
+struct __vrfy_dbinfo; typedef struct __vrfy_dbinfo VRFY_DBINFO;
+struct __vrfy_pageinfo; typedef struct __vrfy_pageinfo VRFY_PAGEINFO;
+struct __db_txnlist; typedef struct __db_txnlist DB_TXNLIST;
+struct __db_txnhead; typedef struct __db_txnhead DB_TXNHEAD;
+typedef enum {
+ TXNLIST_DELETE,
+ TXNLIST_LSN,
+ TXNLIST_TXNID,
+ TXNLIST_PGNO
+} db_txnlist_type;
+
+/*
+ * Currently, region offsets are limited to 32-bits. I expect that's going
+ * to have to be fixed in the not-too-distant future, since we won't want to
+ * split 100Gb memory pools into that many different regions. It's typedef'd
+ * so it won't be too painful to upgrade.
+ */
+typedef u_int32_t roff_t;
+
+#if defined(__cplusplus)
+}
+#endif
+
+/*******************************************************
+ * More general includes.
+ *******************************************************/
+#include "debug.h"
+#include "mutex.h"
+#include "region.h"
+#include "mutex_ext.h"
+#include "env_ext.h"
+#include "os.h"
+#include "os_ext.h"
+#include "common_ext.h"
+
+#endif /* !_DB_INTERNAL_H_ */
diff --git a/bdb/build_win32/db_java.dsp b/bdb/build_win32/db_java.dsp
new file mode 100644
index 00000000000..0a0d9dc0fcb
--- /dev/null
+++ b/bdb/build_win32/db_java.dsp
@@ -0,0 +1,174 @@
+# Microsoft Developer Studio Project File - Name="db_java" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 5.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=db_java - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "db_java.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "db_java.mak" CFG="db_java - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "db_java - Win32 Release" (based on\
+ "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "db_java - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "db_java - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MT /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /YX /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o NUL /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o NUL /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /machine:I386
+# ADD LINK32 Release/libdb32.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"Release/libdb_java32.dll"
+# Begin Custom Build - Compiling java files using javac
+ProjDir=.
+InputPath=.\Release\libdb_java32.dll
+SOURCE=$(InputPath)
+
+"force_compilation.txt" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ cd $(ProjDir)\..\java\src\com\sleepycat\db
+ mkdir ..\..\..\..\classes
+ echo compiling Berkeley DB classes
+ javac -d ../../../../classes -classpath "$(CLASSPATH);../../../../classes"\
+ *.java
+ echo compiling examples
+ cd ..\examples
+ javac -d ../../../../classes -classpath "$(CLASSPATH);../../../../classes"\
+ *.java
+ echo creating jar file
+ cd ..\..\..\..\classes
+ jar cf db.jar com\sleepycat\db\*.class
+ echo Java build finished
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "db_java - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 2
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MTd /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /D "_WINDLL" /D "_AFXDLL" /YX"config.h" /FD /c
+# SUBTRACT CPP /Fr
+# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o NUL /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o NUL /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG" /d "_AFXDLL"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb32d.lib /nologo /base:"0x13000000" /subsystem:windows /dll /pdb:none /debug /machine:I386 /out:"Debug/libdb_java32d.dll" /fixed:no
+# Begin Custom Build - Compiling java files using javac
+ProjDir=.
+InputPath=.\Debug\libdb_java32d.dll
+SOURCE=$(InputPath)
+
+"force_compilation.txt" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ cd $(ProjDir)\..\java\src\com\sleepycat\db
+ mkdir ..\..\..\..\classes
+ echo compiling Berkeley DB classes
+ javac -g -d ../../../../classes -classpath "$(CLASSPATH);../../../../classes"\
+ *.java
+ echo compiling examples
+ javac -g -d ../../../../classes -classpath "$(CLASSPATH);../../../../classes"\
+ *.java
+ cd ..\examples
+ echo creating jar file
+ cd ..\..\..\..\classes
+ jar cf db.jar com\sleepycat\db\*.class
+ echo Java build finished
+
+# End Custom Build
+
+!ENDIF
+
+# Begin Target
+
+# Name "db_java - Win32 Release"
+# Name "db_java - Win32 Debug"
+# Begin Source File
+
+SOURCE=..\libdb_java\java_Db.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\libdb_java\java_DbEnv.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\libdb_java\java_DbLock.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\libdb_java\java_DbLsn.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\libdb_java\java_DbTxn.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\libdb_java\java_Dbc.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\libdb_java\java_Dbt.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\libdb_java\java_info.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\libdb_java\java_locked.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\libdb_java\java_util.c
+# End Source File
+# End Target
+# End Project
diff --git a/bdb/build_win32/db_load.dsp b/bdb/build_win32/db_load.dsp
new file mode 100644
index 00000000000..adec8475a88
--- /dev/null
+++ b/bdb/build_win32/db_load.dsp
@@ -0,0 +1,151 @@
+# Microsoft Developer Studio Project File - Name="db_load" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 5.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=db_load - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "db_load.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "db_load.mak" CFG="db_load - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "db_load - Win32 Release" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "db_load - Win32 Debug" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "db_load - Win32 Release Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "db_load - Win32 Debug Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "db_load - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb32.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "db_load - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb32d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "db_load - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb32s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "db_load - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb32d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb32sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "db_load - Win32 Release"
+# Name "db_load - Win32 Debug"
+# Name "db_load - Win32 Release Static"
+# Name "db_load - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\db_load\db_load.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/bdb/build_win32/db_printlog.dsp b/bdb/build_win32/db_printlog.dsp
new file mode 100644
index 00000000000..8b3fbd9fe31
--- /dev/null
+++ b/bdb/build_win32/db_printlog.dsp
@@ -0,0 +1,151 @@
+# Microsoft Developer Studio Project File - Name="db_printlog" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 5.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=db_printlog - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "db_printlog.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "db_printlog.mak" CFG="db_printlog - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "db_printlog - Win32 Release" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "db_printlog - Win32 Debug" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "db_printlog - Win32 Release Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "db_printlog - Win32 Debug Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "db_printlog - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb32.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "db_printlog - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb32d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "db_printlog - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb32s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "db_printlog - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb32d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb32sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "db_printlog - Win32 Release"
+# Name "db_printlog - Win32 Debug"
+# Name "db_printlog - Win32 Release Static"
+# Name "db_printlog - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\db_printlog\db_printlog.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/bdb/build_win32/db_recover.dsp b/bdb/build_win32/db_recover.dsp
new file mode 100644
index 00000000000..bf07631f9b9
--- /dev/null
+++ b/bdb/build_win32/db_recover.dsp
@@ -0,0 +1,151 @@
+# Microsoft Developer Studio Project File - Name="db_recover" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 5.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=db_recover - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "db_recover.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "db_recover.mak" CFG="db_recover - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "db_recover - Win32 Release" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "db_recover - Win32 Debug" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "db_recover - Win32 Release Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "db_recover - Win32 Debug Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "db_recover - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb32.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "db_recover - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb32d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "db_recover - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb32s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "db_recover - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb32d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb32sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "db_recover - Win32 Release"
+# Name "db_recover - Win32 Debug"
+# Name "db_recover - Win32 Release Static"
+# Name "db_recover - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\db_recover\db_recover.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/bdb/build_win32/db_stat.dsp b/bdb/build_win32/db_stat.dsp
new file mode 100644
index 00000000000..3e906897e68
--- /dev/null
+++ b/bdb/build_win32/db_stat.dsp
@@ -0,0 +1,151 @@
+# Microsoft Developer Studio Project File - Name="db_stat" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 5.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=db_stat - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "db_stat.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "db_stat.mak" CFG="db_stat - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "db_stat - Win32 Release" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "db_stat - Win32 Debug" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "db_stat - Win32 Release Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "db_stat - Win32 Debug Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "db_stat - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb32.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "db_stat - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb32d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "db_stat - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb32s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "db_stat - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb32d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb32sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "db_stat - Win32 Release"
+# Name "db_stat - Win32 Debug"
+# Name "db_stat - Win32 Release Static"
+# Name "db_stat - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\db_stat\db_stat.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/bdb/build_win32/db_static.dsp b/bdb/build_win32/db_static.dsp
new file mode 100644
index 00000000000..0c355241537
--- /dev/null
+++ b/bdb/build_win32/db_static.dsp
@@ -0,0 +1,714 @@
+# Microsoft Developer Studio Project File - Name="db_static" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 5.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Static Library" 0x0104
+
+CFG=db_static - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "db_static.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "db_static.mak" CFG="db_static - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "db_static - Win32 Release" (based on "Win32 (x86) Static Library")
+!MESSAGE "db_static - Win32 Debug" (based on "Win32 (x86) Static Library")
+!MESSAGE "db_static - Win32 Release Static" (based on\
+ "Win32 (x86) Static Library")
+!MESSAGE "db_static - Win32 Debug Static" (based on\
+ "Win32 (x86) Static Library")
+!MESSAGE
+
+# Begin Project
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+
+!IF "$(CFG)" == "db_static - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "DB_Stati"
+# PROP BASE Intermediate_Dir "DB_Stati"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LIB32=link.exe -lib
+# ADD BASE LIB32 /nologo
+# ADD LIB32 /nologo /out:"Release_static/libdb32s.lib"
+
+!ELSEIF "$(CFG)" == "db_static - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "DB_Stat0"
+# PROP BASE Intermediate_Dir "DB_Stat0"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /Z7 /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I "../include" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LIB32=link.exe -lib
+# ADD BASE LIB32 /nologo
+# ADD LIB32 /nologo /out:"Debug_static/libdb32sd.lib"
+
+!ELSEIF "$(CFG)" == "db_static - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "DB_Stati"
+# PROP BASE Intermediate_Dir "DB_Stati"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MT /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX"config.h" /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LIB32=link.exe -lib
+# ADD BASE LIB32 /nologo /out:"Release/libdb32s.lib"
+# ADD LIB32 /nologo /out:"Release_static/libdb32s.lib"
+
+!ELSEIF "$(CFG)" == "db_static - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "DB_Stat0"
+# PROP BASE Intermediate_Dir "DB_Stat0"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I "../include" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX"config.h" /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I "../include" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LIB32=link.exe -lib
+# ADD BASE LIB32 /nologo /out:"Debug/libdb32sd.lib"
+# ADD LIB32 /nologo /out:"Debug_static/libdb32sd.lib"
+
+!ENDIF
+
+# Begin Target
+
+# Name "db_static - Win32 Release"
+# Name "db_static - Win32 Debug"
+# Name "db_static - Win32 Release Static"
+# Name "db_static - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\btree\bt_compare.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_conv.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_curadj.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_cursor.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_delete.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_open.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_put.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_rec.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_reclaim.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_recno.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_rsearch.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_search.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_split.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_stat.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_upgrade.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_verify.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\btree_auto.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\strcasecmp.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\common\db_byteorder.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\common\db_err.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\common\db_getlong.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\common\db_log2.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\common\util_log.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\common\util_sig.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\cxx\cxx_app.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\cxx\cxx_except.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\cxx\cxx_lock.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\cxx\cxx_log.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\cxx\cxx_mpool.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\cxx\cxx_table.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\cxx\cxx_txn.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\crdel_auto.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\crdel_rec.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_am.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_auto.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_cam.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_conv.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_dispatch.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_dup.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_iface.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_join.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_meta.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_overflow.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_pr.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_rec.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_reclaim.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_ret.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_upg.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_upg_opd.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_vrfy.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_vrfyutil.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\dbm\dbm.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\env\db_salloc.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\env\db_shash.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\env\env_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\env\env_open.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\env\env_recover.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\env\env_region.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_auto.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_conv.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_dup.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_func.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_meta.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_page.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_rec.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_reclaim.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_stat.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_upgrade.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_verify.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hsearch\hsearch.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\lock\lock.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\lock\lock_conflict.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\lock\lock_deadlock.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\lock\lock_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\lock\lock_region.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\lock\lock_stat.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\lock\lock_util.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\log\log.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\log\log_archive.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\log\log_auto.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\log\log_compare.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\log\log_findckp.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\log\log_get.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\log\log_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\log\log_put.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\log\log_rec.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\log\log_register.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_alloc.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_bh.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_fget.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_fopen.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_fput.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_fset.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_region.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_register.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_stat.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_sync.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_trickle.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mutex\mut_tas.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mutex\mutex.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_alloc.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_fsync.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_handle.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_oflags.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_region.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_root.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_rpath.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_rw.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_stat.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_tmpdir.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_unlink.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_abs.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_dir.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_errno.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_fid.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_finit.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_map.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_open.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_rename.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_seek.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_sleep.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_spin.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_type.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam_auto.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam_conv.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam_files.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam_open.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam_rec.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam_stat.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam_upgrade.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam_verify.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\txn\txn.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\txn\txn_auto.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\txn\txn_rec.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\txn\txn_region.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\xa\xa.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\xa\xa_db.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\xa\xa_map.c
+# End Source File
+# End Target
+# End Project
diff --git a/bdb/build_win32/db_tcl.dsp b/bdb/build_win32/db_tcl.dsp
new file mode 100644
index 00000000000..5a0712c3f82
--- /dev/null
+++ b/bdb/build_win32/db_tcl.dsp
@@ -0,0 +1,135 @@
+# Microsoft Developer Studio Project File - Name="db_tcl" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 5.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=db_tcl - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "db_tcl.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "db_tcl.mak" CFG="db_tcl - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "db_tcl - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "db_tcl - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "db_tcl - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MT /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I "../include" /D "DB_TCL_SUPPORT" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /YX /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o NUL /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o NUL /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /machine:I386
+# ADD LINK32 Release/libdb32.lib tcl83.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"Release/libdb_tcl32.dll"
+
+!ELSEIF "$(CFG)" == "db_tcl - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 2
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MTd /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "DB_TCL_SUPPORT" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /D "_WINDLL" /D "_AFXDLL" /YX"config.h" /FD /c
+# SUBTRACT CPP /Fr
+# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o NUL /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o NUL /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG" /d "_AFXDLL"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb32d.lib tcl83d.lib /nologo /base:"0x13000000" /subsystem:windows /dll /pdb:none /debug /machine:I386 /out:"Debug/libdb_tcl32d.dll" /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "db_tcl - Win32 Release"
+# Name "db_tcl - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\libdb_tcl.def
+# End Source File
+# Begin Source File
+
+SOURCE=..\tcl\tcl_compat.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\tcl\tcl_db.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\tcl\tcl_db_pkg.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\tcl\tcl_dbcursor.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\tcl\tcl_env.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\tcl\tcl_internal.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\tcl\tcl_lock.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\tcl\tcl_log.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\tcl\tcl_mp.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\tcl\tcl_txn.c
+# End Source File
+# End Target
+# End Project
diff --git a/bdb/build_win32/db_test.dsp b/bdb/build_win32/db_test.dsp
new file mode 100644
index 00000000000..e1bb9056824
--- /dev/null
+++ b/bdb/build_win32/db_test.dsp
@@ -0,0 +1,99 @@
+# Microsoft Developer Studio Project File - Name="db_test" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 5.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=db_test - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "db_test.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "db_test.mak" CFG="db_test - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "db_test - Win32 Release" (based on "Win32 (x86) Console Application")
+!MESSAGE "db_test - Win32 Debug" (based on "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "db_test - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb32.lib /nologo /subsystem:console /machine:I386
+# Begin Special Build Tool
+SOURCE=$(InputPath)
+PostBuild_Desc=Copy built executable files.
+PostBuild_Cmds=copy Release\*.exe .
+# End Special Build Tool
+
+!ELSEIF "$(CFG)" == "db_test - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "db_recov"
+# PROP BASE Intermediate_Dir "db_recov"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb32d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /out:"Debug/dbkill.exe" /fixed:no
+# Begin Special Build Tool
+SOURCE=$(InputPath)
+PostBuild_Desc=Copy built executable files.
+PostBuild_Cmds=copy Debug\*.exe .
+# End Special Build Tool
+
+!ENDIF
+
+# Begin Target
+
+# Name "db_test - Win32 Release"
+# Name "db_test - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\dbkill.cpp
+# End Source File
+# End Target
+# End Project
diff --git a/bdb/build_win32/db_upgrade.dsp b/bdb/build_win32/db_upgrade.dsp
new file mode 100644
index 00000000000..4bb821f57e2
--- /dev/null
+++ b/bdb/build_win32/db_upgrade.dsp
@@ -0,0 +1,151 @@
+# Microsoft Developer Studio Project File - Name="db_upgrade" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 5.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=db_upgrade - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "db_upgrade.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "db_upgrade.mak" CFG="db_upgrade - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "db_upgrade - Win32 Release" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "db_upgrade - Win32 Debug" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "db_upgrade - Win32 Release Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "db_upgrade - Win32 Debug Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "db_upgrade - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb32.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "db_upgrade - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb32d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "db_upgrade - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb32s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "db_upgrade - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb32d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb32sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "db_upgrade - Win32 Release"
+# Name "db_upgrade - Win32 Debug"
+# Name "db_upgrade - Win32 Release Static"
+# Name "db_upgrade - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\db_upgrade\db_upgrade.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/bdb/build_win32/db_verify.dsp b/bdb/build_win32/db_verify.dsp
new file mode 100644
index 00000000000..a8112364a83
--- /dev/null
+++ b/bdb/build_win32/db_verify.dsp
@@ -0,0 +1,151 @@
+# Microsoft Developer Studio Project File - Name="db_verify" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 5.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=db_verify - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "db_verify.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "db_verify.mak" CFG="db_verify - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "db_verify - Win32 Release" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "db_verify - Win32 Debug" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "db_verify - Win32 Release Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "db_verify - Win32 Debug Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "db_verify - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb32.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "db_verify - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb32d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "db_verify - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb32s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "db_verify - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb32d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb32sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "db_verify - Win32 Release"
+# Name "db_verify - Win32 Debug"
+# Name "db_verify - Win32 Release Static"
+# Name "db_verify - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\db_verify\db_verify.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/bdb/build_win32/dbkill.cpp b/bdb/build_win32/dbkill.cpp
new file mode 100644
index 00000000000..24709f37201
--- /dev/null
+++ b/bdb/build_win32/dbkill.cpp
@@ -0,0 +1,131 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: dbkill.cpp,v 11.4 2000/05/02 17:08:31 dda Exp $
+ */
+/*
+ * Kill -
+ * Simulate Unix kill on Windows/NT and Windows/9X.
+ * This good enough to support the Berkeley DB test suite,
+ * but may be missing some favorite features.
+ *
+ * Would have used MKS kill, but it didn't seem to work well
+ * on Win/9X. Cygnus kill works within the Gnu/Cygnus environment
+ * (where processes are given small pids, with presumably a translation
+ * table between small pids and actual process handles), but our test
+ * environment, via Tcl, does not use the Cygnus environment.
+ *
+ * Compile this and install it as c:/tools/kill.exe (or as indicated
+ * by build_win32/include.tcl ).
+ */
+
+#include <windows.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <limits.h>
+
+/*
+ * Like atol, with specified base. Would use stdlib, but
+ * strtol("0xFFFF1234", NULL, 16) returns 0x7FFFFFFF and
+ * strtol("4294712487", NULL, 16) returns 0x7FFFFFFF w/ VC++
+ */
+long
+myatol(char *s, int base)
+{
+ long result = 0;
+ char ch;
+ int sign = 1; /* + */
+ if (base == 0)
+ base = 10;
+ if (base != 10 && base != 16)
+ return LONG_MAX;
+ while ((ch = *s++) != '\0') {
+ if (ch == '-') {
+ sign = -sign;
+ }
+ else if (ch >= '0' && ch <= '9') {
+ result = result * base + (ch - '0');
+ }
+ else if (ch == 'x' || ch == 'X') {
+ /* Allow leading 0x..., and switch to base 16 */
+ base = 16;
+ }
+ else if (base == 16 && ch >= 'a' && ch <= 'f') {
+ result = result * base + (ch - 'a' + 10);
+ }
+ else if (base == 16 && ch >= 'A' && ch <= 'F') {
+ result = result * base + (ch - 'A' + 10);
+ }
+ else {
+ if (sign > 1)
+ return LONG_MAX;
+ else
+ return LONG_MIN;
+ }
+ }
+ return sign * result;
+}
+
+void
+usage_exit()
+{
+ fprintf(stderr, "Usage: kill [ -sig ] pid\n");
+ fprintf(stderr, " for win32, sig must be or 0, 15 (TERM)\n");
+ exit(1);
+}
+
+int
+main(int argc, char **argv)
+{
+ HANDLE hProcess ;
+ DWORD accessflag;
+ long pid;
+ int sig = 15;
+
+ if (argc > 2) {
+ if (argv[1][0] != '-')
+ usage_exit();
+
+ if (strcmp(argv[1], "-TERM") == 0)
+ sig = 15;
+ else {
+ /* currently sig is more or less ignored,
+ * we only care if it is zero or not
+ */
+ sig = atoi(&argv[1][1]);
+ if (sig < 0)
+ usage_exit();
+ }
+ argc--;
+ argv++;
+ }
+ if (argc < 2)
+ usage_exit();
+
+ pid = myatol(argv[1], 10);
+ /*printf("pid = %ld (0x%lx) (command line %s)\n", pid, pid, argv[1]);*/
+ if (pid == LONG_MAX || pid == LONG_MIN)
+ usage_exit();
+
+ if (sig == 0)
+ accessflag = PROCESS_QUERY_INFORMATION | PROCESS_VM_READ;
+ else
+ accessflag = STANDARD_RIGHTS_REQUIRED | PROCESS_TERMINATE;
+ hProcess = OpenProcess(accessflag, FALSE, pid);
+ if (hProcess == NULL) {
+ fprintf(stderr, "dbkill: %s: no such process\n", argv[1]);
+ exit(1);
+ }
+ if (sig == 0)
+ exit(0);
+ if (!TerminateProcess(hProcess, 99)) {
+ DWORD err = GetLastError();
+ fprintf(stderr,
+ "dbkill: cannot kill process: error %d (0x%lx)\n", err, err);
+ exit(1);
+ }
+ return 0;
+}
diff --git a/bdb/build_win32/dllmain.c b/bdb/build_win32/dllmain.c
new file mode 100644
index 00000000000..70c2e849d66
--- /dev/null
+++ b/bdb/build_win32/dllmain.c
@@ -0,0 +1,97 @@
+/*
+ * --------------------------------------------------------------------------
+ * Copyright (C) 1997 Netscape Communications Corporation
+ * --------------------------------------------------------------------------
+ *
+ * dllmain.c
+ *
+ * $Id: dllmain.c,v 1.3 2000/10/26 21:58:48 bostic Exp $
+ */
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+
+static int ProcessesAttached = 0;
+static HINSTANCE Instance; /* Global library instance handle. */
+
+/*
+ * The following declaration is for the VC++ DLL entry point.
+ */
+
+BOOL APIENTRY DllMain (HINSTANCE hInst,
+ DWORD reason, LPVOID reserved);
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * DllEntryPoint --
+ *
+ * This wrapper function is used by Borland to invoke the
+ * initialization code for Tcl. It simply calls the DllMain
+ * routine.
+ *
+ * Results:
+ * See DllMain.
+ *
+ * Side effects:
+ * See DllMain.
+ *
+ *----------------------------------------------------------------------
+ */
+
+BOOL APIENTRY
+DllEntryPoint(hInst, reason, reserved)
+ HINSTANCE hInst; /* Library instance handle. */
+ DWORD reason; /* Reason this function is being called. */
+ LPVOID reserved; /* Not used. */
+{
+ return DllMain(hInst, reason, reserved);
+}
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * DllMain --
+ *
+ * This routine is called by the VC++ C run time library init
+ * code, or the DllEntryPoint routine. It is responsible for
+ * initializing various dynamically loaded libraries.
+ *
+ * Results:
+ * TRUE on sucess, FALSE on failure.
+ *
+ * Side effects:
+ * Establishes 32-to-16 bit thunk and initializes sockets library.
+ *
+ *----------------------------------------------------------------------
+ */
+BOOL APIENTRY
+DllMain(hInst, reason, reserved)
+ HINSTANCE hInst; /* Library instance handle. */
+ DWORD reason; /* Reason this function is being called. */
+ LPVOID reserved; /* Not used. */
+{
+ switch (reason) {
+ case DLL_PROCESS_ATTACH:
+
+ /*
+ * Registration of UT need to be done only once for first
+ * attaching process. At that time set the tclWin32s flag
+ * to indicate if the DLL is executing under Win32s or not.
+ */
+
+ if (ProcessesAttached++) {
+ return FALSE; /* Not the first initialization. */
+ }
+
+ Instance = hInst;
+ return TRUE;
+
+ case DLL_PROCESS_DETACH:
+
+ ProcessesAttached--;
+ break;
+ }
+
+ return TRUE;
+}
diff --git a/bdb/build_win32/dynamic_dsp.src b/bdb/build_win32/dynamic_dsp.src
new file mode 100644
index 00000000000..d9881eda331
--- /dev/null
+++ b/bdb/build_win32/dynamic_dsp.src
@@ -0,0 +1,154 @@
+# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 5.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=@project_name@ - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "@project_name@.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "@project_name@ - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "@project_name@ - Win32 Release Static" (based on\
+ "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "@project_name@ - Win32 Debug Static" (based on\
+ "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "@project_name@ - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MT /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I "../include" /D "DB_CREATE_DLL" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /YX /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o NUL /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o NUL /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /machine:I386
+# ADD LINK32 /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll"
+
+!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 2
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MTd /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "DB_CREATE_DLL" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_WINDLL" /D "_AFXDLL" /D "_MBCS" /YX"config.h" /FD /c
+# SUBTRACT CPP /Fr
+# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o NUL /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o NUL /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG" /d "_AFXDLL"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 /nologo /base:"0x13000000" /subsystem:windows /dll /pdb:none /debug /machine:I386 /out:"Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no
+
+!ELSEIF "$(CFG)" == "@project_name@ - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "DB_DLL__"
+# PROP BASE Intermediate_Dir "DB_DLL__"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I "../include" /D "DB_CREATE_DLL" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I "../include" /D "DB_CREATE_DLL" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /YX /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o NUL /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o NUL /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll"
+# ADD LINK32 /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll"
+
+!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug Static"
+
+# PROP BASE Use_MFC 2
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "DB_DLL_0"
+# PROP BASE Intermediate_Dir "DB_DLL_0"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 2
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "DB_CREATE_DLL" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_WINDLL" /D "_AFXDLL" /YX"config.h" /FD /c
+# SUBTRACT BASE CPP /Fr
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "DB_CREATE_DLL" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_WINDLL" /D "_AFXDLL" /D "_MBCS" /YX"config.h" /FD /c
+# SUBTRACT CPP /Fr
+# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o NUL /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o NUL /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG" /d "_AFXDLL"
+# ADD RSC /l 0x409 /d "_DEBUG" /d "_AFXDLL"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 /nologo /base:"0x13000000" /subsystem:windows /dll /pdb:none /debug /machine:I386 /out:"Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no
+# ADD LINK32 /nologo /base:"0x13000000" /subsystem:windows /dll /pdb:none /debug /machine:I386 /out:"Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "@project_name@ - Win32 Release"
+# Name "@project_name@ - Win32 Debug"
+# Name "@project_name@ - Win32 Release Static"
+# Name "@project_name@ - Win32 Debug Static"
+@SOURCE_FILES@
+# End Target
+# End Project
diff --git a/bdb/build_win32/ex_access.dsp b/bdb/build_win32/ex_access.dsp
new file mode 100644
index 00000000000..8c802b1d774
--- /dev/null
+++ b/bdb/build_win32/ex_access.dsp
@@ -0,0 +1,151 @@
+# Microsoft Developer Studio Project File - Name="ex_access" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 5.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=ex_access - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "ex_access.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "ex_access.mak" CFG="ex_access - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "ex_access - Win32 Release" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "ex_access - Win32 Debug" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "ex_access - Win32 Release Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "ex_access - Win32 Debug Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "ex_access - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb32.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "ex_access - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb32d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "ex_access - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb32s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "ex_access - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb32d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb32sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "ex_access - Win32 Release"
+# Name "ex_access - Win32 Debug"
+# Name "ex_access - Win32 Release Static"
+# Name "ex_access - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\examples_c\ex_access.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/bdb/build_win32/ex_btrec.dsp b/bdb/build_win32/ex_btrec.dsp
new file mode 100644
index 00000000000..08bc90752f5
--- /dev/null
+++ b/bdb/build_win32/ex_btrec.dsp
@@ -0,0 +1,151 @@
+# Microsoft Developer Studio Project File - Name="ex_btrec" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 5.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=ex_btrec - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "ex_btrec.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "ex_btrec.mak" CFG="ex_btrec - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "ex_btrec - Win32 Release" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "ex_btrec - Win32 Debug" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "ex_btrec - Win32 Release Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "ex_btrec - Win32 Debug Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "ex_btrec - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb32.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "ex_btrec - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb32d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "ex_btrec - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb32s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "ex_btrec - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb32d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb32sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "ex_btrec - Win32 Release"
+# Name "ex_btrec - Win32 Debug"
+# Name "ex_btrec - Win32 Release Static"
+# Name "ex_btrec - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\examples_c\ex_btrec.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/bdb/build_win32/ex_env.dsp b/bdb/build_win32/ex_env.dsp
new file mode 100644
index 00000000000..a46e66dac27
--- /dev/null
+++ b/bdb/build_win32/ex_env.dsp
@@ -0,0 +1,151 @@
+# Microsoft Developer Studio Project File - Name="ex_env" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 5.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=ex_env - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "ex_env.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "ex_env.mak" CFG="ex_env - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "ex_env - Win32 Release" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "ex_env - Win32 Debug" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "ex_env - Win32 Release Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "ex_env - Win32 Debug Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "ex_env - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb32.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "ex_env - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb32d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "ex_env - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb32s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "ex_env - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb32d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb32sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "ex_env - Win32 Release"
+# Name "ex_env - Win32 Debug"
+# Name "ex_env - Win32 Release Static"
+# Name "ex_env - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\examples_c\ex_env.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/bdb/build_win32/ex_lock.dsp b/bdb/build_win32/ex_lock.dsp
new file mode 100644
index 00000000000..0fa57a960f8
--- /dev/null
+++ b/bdb/build_win32/ex_lock.dsp
@@ -0,0 +1,151 @@
+# Microsoft Developer Studio Project File - Name="ex_lock" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 5.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=ex_lock - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "ex_lock.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "ex_lock.mak" CFG="ex_lock - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "ex_lock - Win32 Release" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "ex_lock - Win32 Debug" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "ex_lock - Win32 Release Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "ex_lock - Win32 Debug Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "ex_lock - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb32.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "ex_lock - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb32d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "ex_lock - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb32s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "ex_lock - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb32d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb32sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "ex_lock - Win32 Release"
+# Name "ex_lock - Win32 Debug"
+# Name "ex_lock - Win32 Release Static"
+# Name "ex_lock - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\examples_c\ex_lock.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/bdb/build_win32/ex_mpool.dsp b/bdb/build_win32/ex_mpool.dsp
new file mode 100644
index 00000000000..3e2ee7205ce
--- /dev/null
+++ b/bdb/build_win32/ex_mpool.dsp
@@ -0,0 +1,151 @@
+# Microsoft Developer Studio Project File - Name="ex_mpool" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 5.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=ex_mpool - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "ex_mpool.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "ex_mpool.mak" CFG="ex_mpool - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "ex_mpool - Win32 Release" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "ex_mpool - Win32 Debug" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "ex_mpool - Win32 Release Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "ex_mpool - Win32 Debug Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "ex_mpool - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb32.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "ex_mpool - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb32d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "ex_mpool - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb32s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "ex_mpool - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb32d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb32sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "ex_mpool - Win32 Release"
+# Name "ex_mpool - Win32 Debug"
+# Name "ex_mpool - Win32 Release Static"
+# Name "ex_mpool - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\examples_c\ex_mpool.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/bdb/build_win32/ex_tpcb.dsp b/bdb/build_win32/ex_tpcb.dsp
new file mode 100644
index 00000000000..fbaa67de7ac
--- /dev/null
+++ b/bdb/build_win32/ex_tpcb.dsp
@@ -0,0 +1,151 @@
+# Microsoft Developer Studio Project File - Name="ex_tpcb" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 5.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=ex_tpcb - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "ex_tpcb.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "ex_tpcb.mak" CFG="ex_tpcb - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "ex_tpcb - Win32 Release" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "ex_tpcb - Win32 Debug" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "ex_tpcb - Win32 Release Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "ex_tpcb - Win32 Debug Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "ex_tpcb - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb32.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "ex_tpcb - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb32d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "ex_tpcb - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb32s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "ex_tpcb - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb32d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb32sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "ex_tpcb - Win32 Release"
+# Name "ex_tpcb - Win32 Debug"
+# Name "ex_tpcb - Win32 Release Static"
+# Name "ex_tpcb - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\examples_c\ex_tpcb.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/bdb/build_win32/excxx_access.dsp b/bdb/build_win32/excxx_access.dsp
new file mode 100644
index 00000000000..d93894dc5b7
--- /dev/null
+++ b/bdb/build_win32/excxx_access.dsp
@@ -0,0 +1,151 @@
+# Microsoft Developer Studio Project File - Name="excxx_access" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 5.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=excxx_access - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "excxx_access.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "excxx_access.mak" CFG="excxx_access - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "excxx_access - Win32 Release" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "excxx_access - Win32 Debug" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "excxx_access - Win32 Release Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "excxx_access - Win32 Debug Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "excxx_access - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb32.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "excxx_access - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb32d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "excxx_access - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb32s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "excxx_access - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb32d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb32sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "excxx_access - Win32 Release"
+# Name "excxx_access - Win32 Debug"
+# Name "excxx_access - Win32 Release Static"
+# Name "excxx_access - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\examples_cxx\AccessExample.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/bdb/build_win32/excxx_btrec.dsp b/bdb/build_win32/excxx_btrec.dsp
new file mode 100644
index 00000000000..403e438a6e1
--- /dev/null
+++ b/bdb/build_win32/excxx_btrec.dsp
@@ -0,0 +1,151 @@
+# Microsoft Developer Studio Project File - Name="excxx_btrec" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 5.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=excxx_btrec - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "excxx_btrec.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "excxx_btrec.mak" CFG="excxx_btrec - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "excxx_btrec - Win32 Release" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "excxx_btrec - Win32 Debug" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "excxx_btrec - Win32 Release Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "excxx_btrec - Win32 Debug Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "excxx_btrec - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb32.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "excxx_btrec - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb32d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "excxx_btrec - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb32s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "excxx_btrec - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb32d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb32sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "excxx_btrec - Win32 Release"
+# Name "excxx_btrec - Win32 Debug"
+# Name "excxx_btrec - Win32 Release Static"
+# Name "excxx_btrec - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\examples_cxx\BtRecExample.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/bdb/build_win32/excxx_env.dsp b/bdb/build_win32/excxx_env.dsp
new file mode 100644
index 00000000000..792358ee3ac
--- /dev/null
+++ b/bdb/build_win32/excxx_env.dsp
@@ -0,0 +1,151 @@
+# Microsoft Developer Studio Project File - Name="excxx_env" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 5.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=excxx_env - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "excxx_env.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "excxx_env.mak" CFG="excxx_env - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "excxx_env - Win32 Release" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "excxx_env - Win32 Debug" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "excxx_env - Win32 Release Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "excxx_env - Win32 Debug Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "excxx_env - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb32.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "excxx_env - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb32d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "excxx_env - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb32s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "excxx_env - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb32d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb32sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "excxx_env - Win32 Release"
+# Name "excxx_env - Win32 Debug"
+# Name "excxx_env - Win32 Release Static"
+# Name "excxx_env - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\examples_cxx\EnvExample.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/bdb/build_win32/excxx_lock.dsp b/bdb/build_win32/excxx_lock.dsp
new file mode 100644
index 00000000000..d35605cb412
--- /dev/null
+++ b/bdb/build_win32/excxx_lock.dsp
@@ -0,0 +1,151 @@
+# Microsoft Developer Studio Project File - Name="excxx_lock" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 5.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=excxx_lock - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "excxx_lock.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "excxx_lock.mak" CFG="excxx_lock - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "excxx_lock - Win32 Release" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "excxx_lock - Win32 Debug" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "excxx_lock - Win32 Release Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "excxx_lock - Win32 Debug Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "excxx_lock - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb32.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "excxx_lock - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb32d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "excxx_lock - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb32s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "excxx_lock - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb32d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb32sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "excxx_lock - Win32 Release"
+# Name "excxx_lock - Win32 Debug"
+# Name "excxx_lock - Win32 Release Static"
+# Name "excxx_lock - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\examples_cxx\LockExample.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/bdb/build_win32/excxx_mpool.dsp b/bdb/build_win32/excxx_mpool.dsp
new file mode 100644
index 00000000000..2159e75bb9a
--- /dev/null
+++ b/bdb/build_win32/excxx_mpool.dsp
@@ -0,0 +1,151 @@
+# Microsoft Developer Studio Project File - Name="excxx_mpool" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 5.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=excxx_mpool - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "excxx_mpool.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "excxx_mpool.mak" CFG="excxx_mpool - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "excxx_mpool - Win32 Release" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "excxx_mpool - Win32 Debug" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "excxx_mpool - Win32 Release Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "excxx_mpool - Win32 Debug Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "excxx_mpool - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb32.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "excxx_mpool - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb32d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "excxx_mpool - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb32s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "excxx_mpool - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb32d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb32sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "excxx_mpool - Win32 Release"
+# Name "excxx_mpool - Win32 Debug"
+# Name "excxx_mpool - Win32 Release Static"
+# Name "excxx_mpool - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\examples_cxx\MpoolExample.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/bdb/build_win32/excxx_tpcb.dsp b/bdb/build_win32/excxx_tpcb.dsp
new file mode 100644
index 00000000000..9033daa6cfe
--- /dev/null
+++ b/bdb/build_win32/excxx_tpcb.dsp
@@ -0,0 +1,151 @@
+# Microsoft Developer Studio Project File - Name="excxx_tpcb" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 5.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=excxx_tpcb - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "excxx_tpcb.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "excxx_tpcb.mak" CFG="excxx_tpcb - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "excxx_tpcb - Win32 Release" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "excxx_tpcb - Win32 Debug" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "excxx_tpcb - Win32 Release Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE "excxx_tpcb - Win32 Debug Static" (based on\
+ "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "excxx_tpcb - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb32.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "excxx_tpcb - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb32d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "excxx_tpcb - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb32s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "excxx_tpcb - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb32d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb32sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "excxx_tpcb - Win32 Release"
+# Name "excxx_tpcb - Win32 Debug"
+# Name "excxx_tpcb - Win32 Release Static"
+# Name "excxx_tpcb - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\examples_cxx\TpcbExample.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/bdb/build_win32/include.tcl b/bdb/build_win32/include.tcl
new file mode 100644
index 00000000000..4f123413020
--- /dev/null
+++ b/bdb/build_win32/include.tcl
@@ -0,0 +1,16 @@
+set tclsh_path SET_YOUR_TCLSH_PATH
+set test_path ../test
+set tcllib ./Debug/libdb_tcl32d.dll
+
+set KILL ./dbkill.exe
+
+# DO NOT EDIT BELOW THIS LINE: automatically built by dist/s_tcl.
+
+global dict
+global testdir
+global util_path
+set testdir ./TESTDIR
+
+global is_hp_test
+global is_qnx_test
+global is_windows_test
diff --git a/bdb/build_win32/java_dsp.src b/bdb/build_win32/java_dsp.src
new file mode 100644
index 00000000000..eff251a44f4
--- /dev/null
+++ b/bdb/build_win32/java_dsp.src
@@ -0,0 +1,135 @@
+# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 5.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=@project_name@ - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "@project_name@.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "@project_name@ - Win32 Release" (based on\
+ "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "@project_name@ - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MT /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /YX /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o NUL /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o NUL /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /machine:I386
+# ADD LINK32 Release/libdb32.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"Release/libdb_java@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll"
+# Begin Custom Build - Compiling java files using javac
+ProjDir=.
+InputPath=.\Release\libdb_java@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll
+SOURCE=$(InputPath)
+
+"force_compilation.txt" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ cd $(ProjDir)\..\java\src\com\sleepycat\db
+ mkdir ..\..\..\..\classes
+ echo compiling Berkeley DB classes
+ javac -d ../../../../classes -classpath "$(CLASSPATH);../../../../classes"\
+ *.java
+ echo compiling examples
+ cd ..\examples
+ javac -d ../../../../classes -classpath "$(CLASSPATH);../../../../classes"\
+ *.java
+ echo creating jar file
+ cd ..\..\..\..\classes
+ jar cf db.jar com\sleepycat\db\*.class
+ echo Java build finished
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 2
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MTd /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /D "_WINDLL" /D "_AFXDLL" /YX"config.h" /FD /c
+# SUBTRACT CPP /Fr
+# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o NUL /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o NUL /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG" /d "_AFXDLL"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb32d.lib /nologo /base:"0x13000000" /subsystem:windows /dll /pdb:none /debug /machine:I386 /out:"Debug/libdb_java@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no
+# Begin Custom Build - Compiling java files using javac
+ProjDir=.
+InputPath=.\Debug\libdb_java@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll
+SOURCE=$(InputPath)
+
+"force_compilation.txt" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ cd $(ProjDir)\..\java\src\com\sleepycat\db
+ mkdir ..\..\..\..\classes
+ echo compiling Berkeley DB classes
+ javac -g -d ../../../../classes -classpath "$(CLASSPATH);../../../../classes"\
+ *.java
+ echo compiling examples
+ javac -g -d ../../../../classes -classpath "$(CLASSPATH);../../../../classes"\
+ *.java
+ cd ..\examples
+ echo creating jar file
+ cd ..\..\..\..\classes
+ jar cf db.jar com\sleepycat\db\*.class
+ echo Java build finished
+
+# End Custom Build
+
+!ENDIF
+
+# Begin Target
+
+# Name "@project_name@ - Win32 Release"
+# Name "@project_name@ - Win32 Debug"
+@SOURCE_FILES@
+# End Target
+# End Project
diff --git a/bdb/build_win32/libdb.def b/bdb/build_win32/libdb.def
new file mode 100644
index 00000000000..a3b4cb3b26b
--- /dev/null
+++ b/bdb/build_win32/libdb.def
@@ -0,0 +1,151 @@
+; $Id: libdb.def,v 11.21 2001/01/04 15:07:33 dda Exp $
+
+DESCRIPTION 'Berkeley DB 3.2 Library'
+EXPORTS
+ lock_get @1
+ lock_id @2
+ lock_put @3
+ lock_vec @4
+ log_compare @5
+ log_file @6
+ log_flush @7
+ log_get @8
+ log_put @9
+ log_register @10
+ log_unregister @11
+ memp_fclose @12
+ memp_fget @13
+ memp_fopen @14
+ memp_fput @15
+ memp_fset @16
+ memp_fsync @17
+ memp_register @18
+ memp_sync @19
+ txn_abort @20
+ txn_begin @21
+ txn_checkpoint @22
+ txn_commit @23
+ txn_prepare @24
+ db_version @25
+ memp_stat @26
+ log_archive @27
+ lock_detect @28
+ txn_id @29
+ txn_stat @30
+ memp_trickle @31
+ log_stat @32
+ lock_stat @33
+ db_create @34
+ db_env_create @35
+ db_strerror @36
+ db_xa_switch @37
+ db_env_set_func_close @38
+ db_env_set_func_dirfree @39
+ db_env_set_func_dirlist @40
+ db_env_set_func_exists @41
+ db_env_set_func_free @42
+ db_env_set_func_fsync @43
+ db_env_set_func_ioinfo @44
+ db_env_set_func_malloc @45
+ db_env_set_func_map @46
+ db_env_set_func_open @47
+ db_env_set_func_read @48
+ db_env_set_func_realloc @49
+ db_env_set_func_rename @50
+ db_env_set_func_sleep @51
+ db_env_set_func_unlink @52
+ db_env_set_func_unmap @53
+ db_env_set_func_write @54
+ db_env_set_func_yield @55
+; FREE @56
+ db_env_set_pageyield @57
+ db_env_set_panicstate @58
+ db_env_set_region_init @59
+ db_env_set_tas_spins @60
+; these are only for testing
+ __db_loadme @201
+ __ham_func2 @202
+ __ham_func3 @203
+ __ham_func4 @204
+ __ham_func5 @205
+ __db_hcreate @206
+ __db_hsearch @207
+ __db_hdestroy @208
+ __db_dbm_init @209
+ __db_dbm_delete @210
+ __db_dbm_fetch @211
+ __db_dbm_store @212
+ __db_dbm_firstkey @213
+ __db_dbm_nextkey @214
+ __db_dbm_close @215
+ __db_ndbm_open @216
+ __db_ndbm_store @217
+ __db_ndbm_rdonly @218
+ __db_ndbm_pagfno @219
+ __db_ndbm_nextkey @220
+ __db_ndbm_firstkey @221
+ __db_ndbm_fetch @222
+ __db_ndbm_error @223
+ __db_ndbm_dirfno @224
+ __db_ndbm_delete @225
+ __db_ndbm_close @226
+ __db_ndbm_clearerr @227
+ __lock_dump_region @228
+ __memp_dump_region @229
+ __os_closehandle @230
+ __os_openhandle @231
+ __os_strdup @232
+ __db_r_attach @233
+ __db_r_detach @234
+ __db_tas_mutex_init @235
+ __db_tas_mutex_lock @236
+ __db_tas_mutex_unlock @237
+ __os_read @238
+ __os_write @239
+ __os_open @240
+ __os_ioinfo @241
+ __os_free @242
+ __os_malloc @243
+ __os_freestr @244
+ __os_calloc @245
+ __ham_test @246
+; these are needed for linking tools
+ __db_dump @401
+ __db_rpath @402
+ __db_dispatch @403
+ __db_err @404
+ __db_init_print @405
+ __txn_init_print @406
+ __log_init_print @407
+ __ham_init_print @408
+ __bam_init_print @409
+ __db_jump @410
+ __ham_pgin @411
+ __ham_pgout @412
+ __bam_pgin @413
+ __bam_pgout @414
+ __db_omode @415
+ __db_prdbt @416
+ __os_sleep @417
+ __db_e_stat @420
+ __db_getlong @421
+ __os_get_errno @422
+ __os_set_errno @423
+ __ham_get_meta @424
+ __ham_release_meta @425
+ __qam_init_print @426
+ __crdel_init_print @427
+ __qam_pgin_out @428
+ __db_pgin @429
+ __db_pgout @430
+ __db_getulong @431
+ __db_util_sigresend @432
+ __db_util_siginit @433
+ __db_util_interrupted @434
+ __db_util_logset @435
+ __db_prheader @436
+ __db_prfooter @437
+ __db_verify_callback @438
+ __db_verify_internal @439
+ __os_yield @440
+ __db_global_values @441
diff --git a/bdb/build_win32/libdb.rc b/bdb/build_win32/libdb.rc
new file mode 100644
index 00000000000..2261d69ce14
--- /dev/null
+++ b/bdb/build_win32/libdb.rc
@@ -0,0 +1,33 @@
+1 VERSIONINFO
+ FILEVERSION 3,0,2,9
+ PRODUCTVERSION 3,0,2,9
+ FILEFLAGSMASK 0x3fL
+#ifdef _DEBUG
+ FILEFLAGS 0x1L
+#else
+ FILEFLAGS 0x0L
+#endif
+ FILEOS 0x4L
+ FILETYPE 0x2L
+ FILESUBTYPE 0x0L
+
+BEGIN
+ BLOCK "StringFileInfo"
+ BEGIN
+ BLOCK "040904b0"
+ BEGIN
+ VALUE "CompanyName", "Sleepycat Software\0"
+ VALUE "FileDescription", "Berkeley DB 3.0 DLL\0"
+ VALUE "FileVersion", "3.2.9\0"
+ VALUE "InternalName", "libdb.dll\0"
+ VALUE "LegalCopyright", "Copyright © Sleepycat Software Inc. 1997, 1998, 1999, 2000\0"
+ VALUE "OriginalFilename", "libdb.dll\0"
+ VALUE "ProductName", "Sleepycat Software libdb\0"
+ VALUE "ProductVersion", "3.2.9\0"
+ END
+ END
+ BLOCK "VarFileInfo"
+ BEGIN
+ VALUE "Translation", 0x409, 1200
+ END
+END
diff --git a/bdb/build_win32/libdb_tcl.def b/bdb/build_win32/libdb_tcl.def
new file mode 100644
index 00000000000..a18459beaba
--- /dev/null
+++ b/bdb/build_win32/libdb_tcl.def
@@ -0,0 +1,35 @@
+; $Id: libdb_tcl.def,v 11.2 1999/11/21 23:10:00 bostic Exp $
+
+DESCRIPTION 'Berkeley DB TCL interface Library'
+EXPORTS
+ Db_tcl_Init
+ bdb_DbmCommand
+ bdb_HCommand
+ bdb_NdbmOpen
+ bdb_RandCommand
+ db_Cmd
+ dbc_Cmd
+ env_Cmd
+ ndbm_Cmd
+ tcl_EnvRemove
+ tcl_LockDetect
+ tcl_LockGet
+ tcl_LockStat
+ tcl_LockVec
+ tcl_LogArchive
+ tcl_LogCompare
+ tcl_LogFile
+ tcl_LogFlush
+ tcl_LogGet
+ tcl_LogPut
+ tcl_LogRegister
+ tcl_LogStat
+ tcl_LogUnregister
+ tcl_Mp
+ tcl_MpStat
+ tcl_MpSync
+ tcl_MpTrickle
+ tcl_Txn
+ tcl_TxnCheckpoint
+ tcl_TxnStat
+ txn_Cmd
diff --git a/bdb/build_win32/libdbrc.src b/bdb/build_win32/libdbrc.src
new file mode 100644
index 00000000000..82a93068c8b
--- /dev/null
+++ b/bdb/build_win32/libdbrc.src
@@ -0,0 +1,33 @@
+1 VERSIONINFO
+ FILEVERSION %MAJOR%,0,%MINOR%,%PATCH%
+ PRODUCTVERSION %MAJOR%,0,%MINOR%,%PATCH%
+ FILEFLAGSMASK 0x3fL
+#ifdef _DEBUG
+ FILEFLAGS 0x1L
+#else
+ FILEFLAGS 0x0L
+#endif
+ FILEOS 0x4L
+ FILETYPE 0x2L
+ FILESUBTYPE 0x0L
+
+BEGIN
+ BLOCK "StringFileInfo"
+ BEGIN
+ BLOCK "040904b0"
+ BEGIN
+ VALUE "CompanyName", "Sleepycat Software\0"
+ VALUE "FileDescription", "Berkeley DB 3.0 DLL\0"
+ VALUE "FileVersion", "%MAJOR%.%MINOR%.%PATCH%\0"
+ VALUE "InternalName", "libdb.dll\0"
+ VALUE "LegalCopyright", "Copyright © Sleepycat Software Inc. 1997, 1998, 1999, 2000\0"
+ VALUE "OriginalFilename", "libdb.dll\0"
+ VALUE "ProductName", "Sleepycat Software libdb\0"
+ VALUE "ProductVersion", "%MAJOR%.%MINOR%.%PATCH%\0"
+ END
+ END
+ BLOCK "VarFileInfo"
+ BEGIN
+ VALUE "Translation", 0x409, 1200
+ END
+END
diff --git a/bdb/build_win32/srcfile_dsp.src b/bdb/build_win32/srcfile_dsp.src
new file mode 100644
index 00000000000..572350e6356
--- /dev/null
+++ b/bdb/build_win32/srcfile_dsp.src
@@ -0,0 +1,4 @@
+# Begin Source File
+
+SOURCE=@srcdir@\@srcfile@
+# End Source File
diff --git a/bdb/build_win32/static_dsp.src b/bdb/build_win32/static_dsp.src
new file mode 100644
index 00000000000..99d00f14291
--- /dev/null
+++ b/bdb/build_win32/static_dsp.src
@@ -0,0 +1,127 @@
+# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 5.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Static Library" 0x0104
+
+CFG=@project_name@ - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "@project_name@.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "@project_name@ - Win32 Release" (based on "Win32 (x86) Static Library")
+!MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Static Library")
+!MESSAGE "@project_name@ - Win32 Release Static" (based on\
+ "Win32 (x86) Static Library")
+!MESSAGE "@project_name@ - Win32 Debug Static" (based on\
+ "Win32 (x86) Static Library")
+!MESSAGE
+
+# Begin Project
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+
+!IF "$(CFG)" == "@project_name@ - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "DB_Stati"
+# PROP BASE Intermediate_Dir "DB_Stati"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LIB32=link.exe -lib
+# ADD BASE LIB32 /nologo
+# ADD LIB32 /nologo /out:"Release_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib"
+
+!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "DB_Stat0"
+# PROP BASE Intermediate_Dir "DB_Stat0"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /Z7 /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I "../include" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LIB32=link.exe -lib
+# ADD BASE LIB32 /nologo
+# ADD LIB32 /nologo /out:"Debug_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib"
+
+!ELSEIF "$(CFG)" == "@project_name@ - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "DB_Stati"
+# PROP BASE Intermediate_Dir "DB_Stati"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MT /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX"config.h" /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LIB32=link.exe -lib
+# ADD BASE LIB32 /nologo /out:"Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib"
+# ADD LIB32 /nologo /out:"Release_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib"
+
+!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "DB_Stat0"
+# PROP BASE Intermediate_Dir "DB_Stat0"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I "../include" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX"config.h" /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I "../include" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LIB32=link.exe -lib
+# ADD BASE LIB32 /nologo /out:"Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib"
+# ADD LIB32 /nologo /out:"Debug_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib"
+
+!ENDIF
+
+# Begin Target
+
+# Name "@project_name@ - Win32 Release"
+# Name "@project_name@ - Win32 Debug"
+# Name "@project_name@ - Win32 Release Static"
+# Name "@project_name@ - Win32 Debug Static"
+@SOURCE_FILES@
+# End Target
+# End Project
diff --git a/bdb/build_win32/tcl_dsp.src b/bdb/build_win32/tcl_dsp.src
new file mode 100644
index 00000000000..11a36606e37
--- /dev/null
+++ b/bdb/build_win32/tcl_dsp.src
@@ -0,0 +1,92 @@
+# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 5.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=@project_name@ - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "@project_name@.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "@project_name@ - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "@project_name@ - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MT /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I "../include" /D "DB_TCL_SUPPORT" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /YX /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o NUL /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o NUL /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /machine:I386
+# ADD LINK32 Release/libdb32.lib tcl83.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"Release/libdb_tcl@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll"
+
+!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 2
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MTd /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "DB_TCL_SUPPORT" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /D "_WINDLL" /D "_AFXDLL" /YX"config.h" /FD /c
+# SUBTRACT CPP /Fr
+# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o NUL /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o NUL /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG" /d "_AFXDLL"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb32d.lib tcl83d.lib /nologo /base:"0x13000000" /subsystem:windows /dll /pdb:none /debug /machine:I386 /out:"Debug/libdb_tcl@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "@project_name@ - Win32 Release"
+# Name "@project_name@ - Win32 Debug"
+@SOURCE_FILES@
+# End Target
+# End Project
diff --git a/bdb/clib/getcwd.c b/bdb/clib/getcwd.c
new file mode 100644
index 00000000000..630facb4fdb
--- /dev/null
+++ b/bdb/clib/getcwd.c
@@ -0,0 +1,272 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1989, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: getcwd.c,v 11.7 2000/11/30 00:58:30 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#if HAVE_DIRENT_H
+# include <dirent.h>
+# define NAMLEN(dirent) strlen((dirent)->d_name)
+#else
+# define dirent direct
+# define NAMLEN(dirent) (dirent)->d_namlen
+# if HAVE_SYS_NDIR_H
+# include <sys/ndir.h>
+# endif
+# if HAVE_SYS_DIR_H
+# include <sys/dir.h>
+# endif
+# if HAVE_NDIR_H
+# include <ndir.h>
+# endif
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+#define ISDOT(dp) \
+ (dp->d_name[0] == '.' && (dp->d_name[1] == '\0' || \
+ (dp->d_name[1] == '.' && dp->d_name[2] == '\0')))
+
+#ifndef dirfd
+#define dirfd(dirp) ((dirp)->dd_fd)
+#endif
+
+/*
+ * getcwd --
+ * Get the current working directory.
+ *
+ * PUBLIC: #ifndef HAVE_GETCWD
+ * PUBLIC: char *getcwd __P((char *, size_t));
+ * PUBLIC: #endif
+ */
+char *
+getcwd(pt, size)
+ char *pt;
+ size_t size;
+{
+ register struct dirent *dp;
+ register DIR *dir;
+ register dev_t dev;
+ register ino_t ino;
+ register int first;
+ register char *bpt, *bup;
+ struct stat s;
+ dev_t root_dev;
+ ino_t root_ino;
+ size_t ptsize, upsize;
+ int ret, save_errno;
+ char *ept, *eup, *up;
+
+ /*
+ * If no buffer specified by the user, allocate one as necessary.
+ * If a buffer is specified, the size has to be non-zero. The path
+ * is built from the end of the buffer backwards.
+ */
+ if (pt) {
+ ptsize = 0;
+ if (!size) {
+ __os_set_errno(EINVAL);
+ return (NULL);
+ }
+ if (size == 1) {
+ __os_set_errno(ERANGE);
+ return (NULL);
+ }
+ ept = pt + size;
+ } else {
+ if ((ret =
+ __os_malloc(NULL, ptsize = 1024 - 4, NULL, &pt)) != 0) {
+ __os_set_errno(ret);
+ return (NULL);
+ }
+ ept = pt + ptsize;
+ }
+ bpt = ept - 1;
+ *bpt = '\0';
+
+ /*
+ * Allocate bytes (1024 - malloc space) for the string of "../"'s.
+ * Should always be enough (it's 340 levels). If it's not, allocate
+ * as necessary. Special case the first stat, it's ".", not "..".
+ */
+ if ((ret = __os_malloc(NULL, upsize = 1024 - 4, NULL, &up)) != 0)
+ goto err;
+ eup = up + 1024;
+ bup = up;
+ up[0] = '.';
+ up[1] = '\0';
+
+ /* Save root values, so know when to stop. */
+ if (stat("/", &s))
+ goto err;
+ root_dev = s.st_dev;
+ root_ino = s.st_ino;
+
+ __os_set_errno(0); /* XXX readdir has no error return. */
+
+ for (first = 1;; first = 0) {
+ /* Stat the current level. */
+ if (lstat(up, &s))
+ goto err;
+
+ /* Save current node values. */
+ ino = s.st_ino;
+ dev = s.st_dev;
+
+ /* Check for reaching root. */
+ if (root_dev == dev && root_ino == ino) {
+ *--bpt = PATH_SEPARATOR[0];
+ /*
+ * It's unclear that it's a requirement to copy the
+ * path to the beginning of the buffer, but it's always
+ * been that way and stuff would probably break.
+ */
+ bcopy(bpt, pt, ept - bpt);
+ __os_free(up, upsize);
+ return (pt);
+ }
+
+ /*
+ * Build pointer to the parent directory, allocating memory
+ * as necessary. Max length is 3 for "../", the largest
+ * possible component name, plus a trailing NULL.
+ */
+ if (bup + 3 + MAXNAMLEN + 1 >= eup) {
+ if (__os_realloc(NULL, upsize *= 2, NULL, &up) != 0)
+ goto err;
+ bup = up;
+ eup = up + upsize;
+ }
+ *bup++ = '.';
+ *bup++ = '.';
+ *bup = '\0';
+
+ /* Open and stat parent directory. */
+ if (!(dir = opendir(up)) || fstat(dirfd(dir), &s))
+ goto err;
+
+ /* Add trailing slash for next directory. */
+ *bup++ = PATH_SEPARATOR[0];
+
+ /*
+ * If it's a mount point, have to stat each element because
+ * the inode number in the directory is for the entry in the
+ * parent directory, not the inode number of the mounted file.
+ */
+ save_errno = 0;
+ if (s.st_dev == dev) {
+ for (;;) {
+ if (!(dp = readdir(dir)))
+ goto notfound;
+ if (dp->d_fileno == ino)
+ break;
+ }
+ } else
+ for (;;) {
+ if (!(dp = readdir(dir)))
+ goto notfound;
+ if (ISDOT(dp))
+ continue;
+ bcopy(dp->d_name, bup, dp->d_namlen + 1);
+
+ /* Save the first error for later. */
+ if (lstat(up, &s)) {
+ if (save_errno == 0)
+ save_errno = __os_get_errno();
+ __os_set_errno(0);
+ continue;
+ }
+ if (s.st_dev == dev && s.st_ino == ino)
+ break;
+ }
+
+ /*
+ * Check for length of the current name, preceding slash,
+ * leading slash.
+ */
+ if (bpt - pt < dp->d_namlen + (first ? 1 : 2)) {
+ size_t len, off;
+
+ if (!ptsize) {
+ __os_set_errno(ERANGE);
+ goto err;
+ }
+ off = bpt - pt;
+ len = ept - bpt;
+ if (__os_realloc(NULL, ptsize *= 2, NULL, &pt) != 0)
+ goto err;
+ bpt = pt + off;
+ ept = pt + ptsize;
+ bcopy(bpt, ept - len, len);
+ bpt = ept - len;
+ }
+ if (!first)
+ *--bpt = PATH_SEPARATOR[0];
+ bpt -= dp->d_namlen;
+ bcopy(dp->d_name, bpt, dp->d_namlen);
+ (void)closedir(dir);
+
+ /* Truncate any file name. */
+ *bup = '\0';
+ }
+
+notfound:
+ /*
+ * If readdir set errno, use it, not any saved error; otherwise,
+ * didn't find the current directory in its parent directory, set
+ * errno to ENOENT.
+ */
+ if (__os_get_errno() == 0)
+ __os_set_errno(save_errno == 0 ? ENOENT : save_errno);
+ /* FALLTHROUGH */
+err:
+ if (ptsize)
+ __os_free(pt, ptsize);
+ __os_free(up, upsize);
+ return (NULL);
+}
diff --git a/bdb/clib/getopt.c b/bdb/clib/getopt.c
new file mode 100644
index 00000000000..667fca1d78c
--- /dev/null
+++ b/bdb/clib/getopt.c
@@ -0,0 +1,139 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1987, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: getopt.c,v 11.4 2000/02/14 02:59:40 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+
+int opterr = 1, /* if error message should be printed */
+ optind = 1, /* index into parent argv vector */
+ optopt, /* character checked for validity */
+ optreset; /* reset getopt */
+char *optarg; /* argument associated with option */
+
+#undef BADCH
+#define BADCH (int)'?'
+#undef BADARG
+#define BADARG (int)':'
+#undef EMSG
+#define EMSG ""
+
+/*
+ * getopt --
+ * Parse argc/argv argument vector.
+ *
+ * PUBLIC: #ifndef HAVE_GETOPT
+ * PUBLIC: int getopt __P((int, char * const *, const char *));
+ * PUBLIC: #endif
+ */
+int
+getopt(nargc, nargv, ostr)
+ int nargc;
+ char * const *nargv;
+ const char *ostr;
+{
+ static char *progname;
+ static char *place = EMSG; /* option letter processing */
+ char *oli; /* option letter list index */
+
+ if (!progname) {
+ if ((progname = __db_rpath(*nargv)) == NULL)
+ progname = *nargv;
+ else
+ ++progname;
+ }
+
+ if (optreset || !*place) { /* update scanning pointer */
+ optreset = 0;
+ if (optind >= nargc || *(place = nargv[optind]) != '-') {
+ place = EMSG;
+ return (EOF);
+ }
+ if (place[1] && *++place == '-') { /* found "--" */
+ ++optind;
+ place = EMSG;
+ return (EOF);
+ }
+ } /* option letter okay? */
+ if ((optopt = (int)*place++) == (int)':' ||
+ !(oli = strchr(ostr, optopt))) {
+ /*
+ * if the user didn't specify '-' as an option,
+ * assume it means EOF.
+ */
+ if (optopt == (int)'-')
+ return (EOF);
+ if (!*place)
+ ++optind;
+ if (opterr && *ostr != ':')
+ (void)fprintf(stderr,
+ "%s: illegal option -- %c\n", progname, optopt);
+ return (BADCH);
+ }
+ if (*++oli != ':') { /* don't need argument */
+ optarg = NULL;
+ if (!*place)
+ ++optind;
+ }
+ else { /* need an argument */
+ if (*place) /* no white space */
+ optarg = place;
+ else if (nargc <= ++optind) { /* no arg */
+ place = EMSG;
+ if (*ostr == ':')
+ return (BADARG);
+ if (opterr)
+ (void)fprintf(stderr,
+ "%s: option requires an argument -- %c\n",
+ progname, optopt);
+ return (BADCH);
+ }
+ else /* white space */
+ optarg = nargv[optind];
+ place = EMSG;
+ ++optind;
+ }
+ return (optopt); /* dump back option letter */
+}
diff --git a/bdb/clib/memcmp.c b/bdb/clib/memcmp.c
new file mode 100644
index 00000000000..2aedc3fa6b8
--- /dev/null
+++ b/bdb/clib/memcmp.c
@@ -0,0 +1,67 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: memcmp.c,v 11.5 2000/02/24 21:58:12 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+/*
+ * memcmp --
+ *
+ * PUBLIC: #ifndef HAVE_MEMCMP
+ * PUBLIC: int memcmp __P((const void *, const void *, size_t));
+ * PUBLIC: #endif
+ */
+int
+memcmp(s1, s2, n)
+ char *s1, *s2;
+ size_t n;
+{
+ if (n != 0) {
+ unsigned char *p1 = (unsigned char *)s1,
+ *p2 = (unsigned char *)s2;
+ do {
+ if (*p1++ != *p2++)
+ return (*--p1 - *--p2);
+ } while (--n != 0);
+ }
+ return (0);
+}
diff --git a/bdb/clib/memmove.c b/bdb/clib/memmove.c
new file mode 100644
index 00000000000..da6bcfe8b13
--- /dev/null
+++ b/bdb/clib/memmove.c
@@ -0,0 +1,155 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: memmove.c,v 11.4 2000/02/14 02:59:40 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+/*
+ * sizeof(word) MUST BE A POWER OF TWO
+ * SO THAT wmask BELOW IS ALL ONES
+ */
+typedef int word; /* "word" used for optimal copy speed */
+
+#undef wsize
+#define wsize sizeof(word)
+#undef wmask
+#define wmask (wsize - 1)
+
+/*
+ * Copy a block of memory, handling overlap.
+ * This is the routine that actually implements
+ * (the portable versions of) bcopy, memcpy, and memmove.
+ */
+#ifdef MEMCOPY
+/*
+ * PUBLIC: #ifndef HAVE_MEMCPY
+ * PUBLIC: void *memcpy __P((void *, const void *, size_t));
+ * PUBLIC: #endif
+ */
+void *
+memcpy(dst0, src0, length)
+#else
+#ifdef MEMMOVE
+/*
+ * PUBLIC: #ifndef HAVE_MEMMOVE
+ * PUBLIC: void *memmove __P((void *, const void *, size_t));
+ * PUBLIC: #endif
+ */
+void *
+memmove(dst0, src0, length)
+#else
+void
+bcopy(src0, dst0, length)
+#endif
+#endif
+ void *dst0;
+ const void *src0;
+ register size_t length;
+{
+ register char *dst = dst0;
+ register const char *src = src0;
+ register size_t t;
+
+ if (length == 0 || dst == src) /* nothing to do */
+ goto done;
+
+ /*
+ * Macros: loop-t-times; and loop-t-times, t>0
+ */
+#undef TLOOP
+#define TLOOP(s) if (t) TLOOP1(s)
+#undef TLOOP1
+#define TLOOP1(s) do { s; } while (--t)
+
+ if ((unsigned long)dst < (unsigned long)src) {
+ /*
+ * Copy forward.
+ */
+ t = (int)src; /* only need low bits */
+ if ((t | (int)dst) & wmask) {
+ /*
+ * Try to align operands. This cannot be done
+ * unless the low bits match.
+ */
+ if ((t ^ (int)dst) & wmask || length < wsize)
+ t = length;
+ else
+ t = wsize - (t & wmask);
+ length -= t;
+ TLOOP1(*dst++ = *src++);
+ }
+ /*
+ * Copy whole words, then mop up any trailing bytes.
+ */
+ t = length / wsize;
+ TLOOP(*(word *)dst = *(word *)src; src += wsize; dst += wsize);
+ t = length & wmask;
+ TLOOP(*dst++ = *src++);
+ } else {
+ /*
+ * Copy backwards. Otherwise essentially the same.
+ * Alignment works as before, except that it takes
+ * (t&wmask) bytes to align, not wsize-(t&wmask).
+ */
+ src += length;
+ dst += length;
+ t = (int)src;
+ if ((t | (int)dst) & wmask) {
+ if ((t ^ (int)dst) & wmask || length <= wsize)
+ t = length;
+ else
+ t &= wmask;
+ length -= t;
+ TLOOP1(*--dst = *--src);
+ }
+ t = length / wsize;
+ TLOOP(src -= wsize; dst -= wsize; *(word *)dst = *(word *)src);
+ t = length & wmask;
+ TLOOP(*--dst = *--src);
+ }
+done:
+#if defined(MEMCOPY) || defined(MEMMOVE)
+ return (dst0);
+#else
+ return;
+#endif
+}
diff --git a/bdb/clib/raise.c b/bdb/clib/raise.c
new file mode 100644
index 00000000000..acec86cd63a
--- /dev/null
+++ b/bdb/clib/raise.c
@@ -0,0 +1,32 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: raise.c,v 11.3 2000/02/14 02:59:41 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <signal.h>
+#include <unistd.h>
+#endif
+
+/*
+ * raise --
+ * Send a signal to the current process.
+ *
+ * PUBLIC: #ifndef HAVE_RAISE
+ * PUBLIC: int raise __P((int));
+ * PUBLIC: #endif
+ */
+int
+raise(s)
+ int s;
+{
+ return (kill(getpid(), s));
+}
diff --git a/bdb/clib/snprintf.c b/bdb/clib/snprintf.c
new file mode 100644
index 00000000000..6aa9e3ae66c
--- /dev/null
+++ b/bdb/clib/snprintf.c
@@ -0,0 +1,61 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: snprintf.c,v 11.5 2000/12/22 19:38:37 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * snprintf --
+ * Bounded version of sprintf.
+ *
+ * PUBLIC: #ifndef HAVE_SNPRINTF
+ * PUBLIC: int snprintf __P((char *, size_t, const char *, ...));
+ * PUBLIC: #endif
+ */
+#ifndef HAVE_SNPRINTF
+int
+#ifdef __STDC__
+snprintf(char *str, size_t n, const char *fmt, ...)
+#else
+snprintf(str, n, fmt, va_alist)
+ char *str;
+ size_t n;
+ const char *fmt;
+ va_dcl
+#endif
+{
+ va_list ap;
+ int rval;
+
+ COMPQUIET(n, 0);
+#ifdef __STDC__
+ va_start(ap, fmt);
+#else
+ va_start(ap);
+#endif
+#ifdef SPRINTF_RET_CHARPNT
+ (void)vsprintf(str, fmt, ap);
+ va_end(ap);
+ return (strlen(str));
+#else
+ rval = vsprintf(str, fmt, ap);
+ va_end(ap);
+ return (rval);
+#endif
+}
+#endif
diff --git a/bdb/clib/strcasecmp.c b/bdb/clib/strcasecmp.c
new file mode 100644
index 00000000000..6633197bc8c
--- /dev/null
+++ b/bdb/clib/strcasecmp.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 1987, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: strcasecmp.c,v 1.4 2000/03/24 22:31:31 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <string.h>
+#endif
+
+/*
+ * This array is designed for mapping upper and lower case letter
+ * together for a case independent comparison. The mappings are
+ * based upon ascii character sequences.
+ */
+static const unsigned char charmap[] = {
+ '\000', '\001', '\002', '\003', '\004', '\005', '\006', '\007',
+ '\010', '\011', '\012', '\013', '\014', '\015', '\016', '\017',
+ '\020', '\021', '\022', '\023', '\024', '\025', '\026', '\027',
+ '\030', '\031', '\032', '\033', '\034', '\035', '\036', '\037',
+ '\040', '\041', '\042', '\043', '\044', '\045', '\046', '\047',
+ '\050', '\051', '\052', '\053', '\054', '\055', '\056', '\057',
+ '\060', '\061', '\062', '\063', '\064', '\065', '\066', '\067',
+ '\070', '\071', '\072', '\073', '\074', '\075', '\076', '\077',
+ '\100', '\141', '\142', '\143', '\144', '\145', '\146', '\147',
+ '\150', '\151', '\152', '\153', '\154', '\155', '\156', '\157',
+ '\160', '\161', '\162', '\163', '\164', '\165', '\166', '\167',
+ '\170', '\171', '\172', '\133', '\134', '\135', '\136', '\137',
+ '\140', '\141', '\142', '\143', '\144', '\145', '\146', '\147',
+ '\150', '\151', '\152', '\153', '\154', '\155', '\156', '\157',
+ '\160', '\161', '\162', '\163', '\164', '\165', '\166', '\167',
+ '\170', '\171', '\172', '\173', '\174', '\175', '\176', '\177',
+ '\200', '\201', '\202', '\203', '\204', '\205', '\206', '\207',
+ '\210', '\211', '\212', '\213', '\214', '\215', '\216', '\217',
+ '\220', '\221', '\222', '\223', '\224', '\225', '\226', '\227',
+ '\230', '\231', '\232', '\233', '\234', '\235', '\236', '\237',
+ '\240', '\241', '\242', '\243', '\244', '\245', '\246', '\247',
+ '\250', '\251', '\252', '\253', '\254', '\255', '\256', '\257',
+ '\260', '\261', '\262', '\263', '\264', '\265', '\266', '\267',
+ '\270', '\271', '\272', '\273', '\274', '\275', '\276', '\277',
+ '\300', '\301', '\302', '\303', '\304', '\305', '\306', '\307',
+ '\310', '\311', '\312', '\313', '\314', '\315', '\316', '\317',
+ '\320', '\321', '\322', '\323', '\324', '\325', '\326', '\327',
+ '\330', '\331', '\332', '\333', '\334', '\335', '\336', '\337',
+ '\340', '\341', '\342', '\343', '\344', '\345', '\346', '\347',
+ '\350', '\351', '\352', '\353', '\354', '\355', '\356', '\357',
+ '\360', '\361', '\362', '\363', '\364', '\365', '\366', '\367',
+ '\370', '\371', '\372', '\373', '\374', '\375', '\376', '\377',
+};
+
+/*
+ * strcasecmp --
+ * Do strcmp(3) in a case-insensitive manner.
+ *
+ * PUBLIC: int strcasecmp __P((const char *, const char *));
+ */
+int
+strcasecmp(s1, s2)
+ const char *s1, *s2;
+{
+ register const unsigned char *cm = charmap,
+ *us1 = (const unsigned char *)s1,
+ *us2 = (const unsigned char *)s2;
+
+ while (cm[*us1] == cm[*us2++])
+ if (*us1++ == '\0')
+ return (0);
+ return (cm[*us1] - cm[*--us2]);
+}
diff --git a/bdb/clib/strerror.c b/bdb/clib/strerror.c
new file mode 100644
index 00000000000..0f7447b0419
--- /dev/null
+++ b/bdb/clib/strerror.c
@@ -0,0 +1,77 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1988, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: strerror.c,v 11.4 2000/02/14 02:59:41 bostic Exp $";
+#endif /* not lint */
+
+/*
+ * strerror --
+ * Return the string associated with an errno.
+ *
+ * PUBLIC: #ifndef HAVE_STRERROR
+ * PUBLIC: char *strerror __P((int));
+ * PUBLIC: #endif
+ */
+char *
+strerror(num)
+ int num;
+{
+ extern int sys_nerr;
+ extern char *sys_errlist[];
+#undef UPREFIX
+#define UPREFIX "Unknown error: "
+ static char ebuf[40] = UPREFIX; /* 64-bit number + slop */
+ int errnum;
+ char *p, *t, tmp[40];
+
+ errnum = num; /* convert to unsigned */
+ if (errnum < sys_nerr)
+ return(sys_errlist[errnum]);
+
+ /* Do this by hand, so we don't include stdio(3). */
+ t = tmp;
+ do {
+ *t++ = "0123456789"[errnum % 10];
+ } while (errnum /= 10);
+ for (p = ebuf + sizeof(UPREFIX) - 1;;) {
+ *p++ = *--t;
+ if (t <= tmp)
+ break;
+ }
+ return(ebuf);
+}
diff --git a/bdb/clib/vsnprintf.c b/bdb/clib/vsnprintf.c
new file mode 100644
index 00000000000..3d27bc0d2f8
--- /dev/null
+++ b/bdb/clib/vsnprintf.c
@@ -0,0 +1,47 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: vsnprintf.c,v 11.4 2000/05/18 19:24:59 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * vsnprintf --
+ * Bounded version of vsprintf.
+ *
+ * PUBLIC: #ifndef HAVE_VSNPRINTF
+ * PUBLIC: int vsnprintf();
+ * PUBLIC: #endif
+ */
+#ifndef HAVE_VSNPRINTF
+int
+vsnprintf(str, n, fmt, ap)
+ char *str;
+ size_t n;
+ const char *fmt;
+ va_list ap;
+{
+ COMPQUIET(n, 0);
+
+#ifdef SPRINTF_RET_CHARPNT
+ (void)vsprintf(str, fmt, ap);
+ return (strlen(str));
+#else
+ return (vsprintf(str, fmt, ap));
+#endif
+}
+#endif
diff --git a/bdb/common/db_byteorder.c b/bdb/common/db_byteorder.c
new file mode 100644
index 00000000000..d089cfe4c99
--- /dev/null
+++ b/bdb/common/db_byteorder.c
@@ -0,0 +1,62 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_byteorder.c,v 11.4 2000/11/30 00:58:31 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#ifdef HAVE_ENDIAN_H
+#include <endian.h>
+#if BYTE_ORDER == BIG_ENDIAN
+#define WORDS_BIGENDIAN 1
+#endif
+#endif
+
+#endif
+
+#include "db_int.h"
+#include "common_ext.h"
+
+/*
+ * __db_byteorder --
+ * Return if we need to do byte swapping, checking for illegal
+ * values.
+ *
+ * PUBLIC: int __db_byteorder __P((DB_ENV *, int));
+ */
+int
+__db_byteorder(dbenv, lorder)
+ DB_ENV *dbenv;
+ int lorder;
+{
+ switch (lorder) {
+ case 0:
+ break;
+ case 1234:
+#if defined(WORDS_BIGENDIAN)
+ return (DB_SWAPBYTES);
+#else
+ break;
+#endif
+ case 4321:
+#if defined(WORDS_BIGENDIAN)
+ break;
+#else
+ return (DB_SWAPBYTES);
+#endif
+ default:
+ __db_err(dbenv,
+ "unsupported byte order, only big and little-endian supported");
+ return (EINVAL);
+ }
+ return (0);
+}
diff --git a/bdb/common/db_err.c b/bdb/common/db_err.c
new file mode 100644
index 00000000000..d69bd023dfd
--- /dev/null
+++ b/bdb/common/db_err.c
@@ -0,0 +1,544 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_err.c,v 11.38 2001/01/22 21:50:25 sue Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_shash.h"
+#include "lock.h"
+#include "lock_ext.h"
+#include "log.h"
+#include "log_ext.h"
+#include "mp.h"
+#include "mp_ext.h"
+#include "txn.h"
+#include "txn_ext.h"
+#include "clib_ext.h"
+#include "common_ext.h"
+#include "db_auto.h"
+
+static void __db_errcall __P((const DB_ENV *, int, int, const char *, va_list));
+static void __db_errfile __P((const DB_ENV *, int, int, const char *, va_list));
+
+/*
+ * __db_fchk --
+ * General flags checking routine.
+ *
+ * PUBLIC: int __db_fchk __P((DB_ENV *, const char *, u_int32_t, u_int32_t));
+ */
+int
+__db_fchk(dbenv, name, flags, ok_flags)
+ DB_ENV *dbenv;
+ const char *name;
+ u_int32_t flags, ok_flags;
+{
+ return (LF_ISSET(~ok_flags) ? __db_ferr(dbenv, name, 0) : 0);
+}
+
+/*
+ * __db_fcchk --
+ * General combination flags checking routine.
+ *
+ * PUBLIC: int __db_fcchk
+ * PUBLIC: __P((DB_ENV *, const char *, u_int32_t, u_int32_t, u_int32_t));
+ */
+int
+__db_fcchk(dbenv, name, flags, flag1, flag2)
+ DB_ENV *dbenv;
+ const char *name;
+ u_int32_t flags, flag1, flag2;
+{
+ return (LF_ISSET(flag1) &&
+ LF_ISSET(flag2) ? __db_ferr(dbenv, name, 1) : 0);
+}
+
+/*
+ * __db_ferr --
+ * Common flag errors.
+ *
+ * PUBLIC: int __db_ferr __P((const DB_ENV *, const char *, int));
+ */
+int
+__db_ferr(dbenv, name, iscombo)
+ const DB_ENV *dbenv;
+ const char *name;
+ int iscombo;
+{
+ __db_err(dbenv, "illegal flag %sspecified to %s",
+ iscombo ? "combination " : "", name);
+ return (EINVAL);
+}
+
+/*
+ * __db_pgerr --
+ * Error when unable to retrieve a specified page.
+ *
+ * PUBLIC: int __db_pgerr __P((DB *, db_pgno_t));
+ */
+int
+__db_pgerr(dbp, pgno)
+ DB *dbp;
+ db_pgno_t pgno;
+{
+ /*
+ * Three things are certain:
+ * Death, taxes, and lost data.
+ * Guess which has occurred.
+ */
+ __db_err(dbp->dbenv,
+ "unable to create/retrieve page %lu", (u_long)pgno);
+ return (__db_panic(dbp->dbenv, EIO));
+}
+
+/*
+ * __db_pgfmt --
+ * Error when a page has the wrong format.
+ *
+ * PUBLIC: int __db_pgfmt __P((DB *, db_pgno_t));
+ */
+int
+__db_pgfmt(dbp, pgno)
+ DB *dbp;
+ db_pgno_t pgno;
+{
+ __db_err(dbp->dbenv,
+ "page %lu: illegal page type or format", (u_long)pgno);
+ return (__db_panic(dbp->dbenv, EINVAL));
+}
+
+/*
+ * __db_eopnotsup --
+ * Common operation not supported message.
+ *
+ * PUBLIC: int __db_eopnotsup __P((const DB_ENV *));
+ */
+int
+__db_eopnotsup(dbenv)
+ const DB_ENV *dbenv;
+{
+ __db_err(dbenv, "operation not supported");
+#ifdef EOPNOTSUPP
+ return (EOPNOTSUPP);
+#else
+ return (EINVAL);
+#endif
+}
+
+#ifdef DIAGNOSTIC
+/*
+ * __db_assert --
+ * Error when an assertion fails. Only checked if #DIAGNOSTIC defined.
+ *
+ * PUBLIC: #ifdef DIAGNOSTIC
+ * PUBLIC: void __db_assert __P((const char *, const char *, int));
+ * PUBLIC: #endif
+ */
+void
+__db_assert(failedexpr, file, line)
+ const char *failedexpr, *file;
+ int line;
+{
+ (void)fprintf(stderr,
+ "__db_assert: \"%s\" failed: file \"%s\", line %d\n",
+ failedexpr, file, line);
+ fflush(stderr);
+
+ /* We want a stack trace of how this could possibly happen. */
+ abort();
+
+ /* NOTREACHED */
+}
+#endif
+
+/*
+ * __db_panic_msg --
+ * Just report that someone else paniced.
+ *
+ * PUBLIC: int __db_panic_msg __P((DB_ENV *));
+ */
+int
+__db_panic_msg(dbenv)
+ DB_ENV *dbenv;
+{
+ __db_err(dbenv, "region error detected; run recovery.");
+ return (DB_RUNRECOVERY);
+}
+
+/*
+ * __db_panic --
+ * Lock out the tree due to unrecoverable error.
+ *
+ * PUBLIC: int __db_panic __P((DB_ENV *, int));
+ */
+int
+__db_panic(dbenv, errval)
+ DB_ENV *dbenv;
+ int errval;
+{
+
+ if (dbenv != NULL) {
+ ((REGENV *)((REGINFO *)dbenv->reginfo)->primary)->panic = 1;
+
+ dbenv->db_panic = errval;
+
+ __db_err(dbenv, "PANIC: %s", db_strerror(errval));
+
+ if (dbenv->db_paniccall != NULL)
+ dbenv->db_paniccall(dbenv, errval);
+ }
+
+ /*
+ * Chaos reigns within.
+ * Reflect, repent, and reboot.
+ * Order shall return.
+ */
+ return (DB_RUNRECOVERY);
+}
+
+/*
+ * db_strerror --
+ * ANSI C strerror(3) for DB.
+ */
+char *
+db_strerror(error)
+ int error;
+{
+ if (error == 0)
+ return ("Successful return: 0");
+ if (error > 0)
+ return (strerror(error));
+
+ /*
+ * !!!
+ * The Tcl API requires that some of these return strings be compared
+ * against strings stored in application scripts. So, any of these
+ * errors that do not invariably result in a Tcl exception may not be
+ * altered.
+ */
+ switch (error) {
+ case DB_INCOMPLETE:
+ return ("DB_INCOMPLETE: Cache flush was unable to complete");
+ case DB_KEYEMPTY:
+ return ("DB_KEYEMPTY: Non-existent key/data pair");
+ case DB_KEYEXIST:
+ return ("DB_KEYEXIST: Key/data pair already exists");
+ case DB_LOCK_DEADLOCK:
+ return
+ ("DB_LOCK_DEADLOCK: Locker killed to resolve a deadlock");
+ case DB_LOCK_NOTGRANTED:
+ return ("DB_LOCK_NOTGRANTED: Lock not granted");
+ case DB_NOSERVER:
+ return ("DB_NOSERVER: Fatal error, no server");
+ case DB_NOSERVER_HOME:
+ return ("DB_NOSERVER_HOME: Home unrecognized at server");
+ case DB_NOSERVER_ID:
+ return ("DB_NOSERVER_ID: Identifier unrecognized at server");
+ case DB_NOTFOUND:
+ return ("DB_NOTFOUND: No matching key/data pair found");
+ case DB_OLD_VERSION:
+ return ("DB_OLDVERSION: Database requires a version upgrade");
+ case DB_RUNRECOVERY:
+ return ("DB_RUNRECOVERY: Fatal error, run database recovery");
+ case DB_VERIFY_BAD:
+ return ("DB_VERIFY_BAD: Database verification failed");
+ default: {
+ /*
+ * !!!
+ * Room for a 64-bit number + slop. This buffer is only used
+ * if we're given an unknown error, which should never happen.
+ * Note, however, we're no longer thread-safe if it does.
+ */
+ static char ebuf[40];
+
+ (void)snprintf(ebuf, sizeof(ebuf), "Unknown error: %d", error);
+ return (ebuf);
+ }
+ }
+}
+
+/*
+ * __db_err --
+ * Standard DB error routine. The same as db_errx, except that we
+ * don't write to stderr if no output mechanism was specified.
+ *
+ * PUBLIC: void __db_err __P((const DB_ENV *, const char *, ...));
+ */
+void
+#ifdef __STDC__
+__db_err(const DB_ENV *dbenv, const char *fmt, ...)
+#else
+__db_err(dbenv, fmt, va_alist)
+ const DB_ENV *dbenv;
+ const char *fmt;
+ va_dcl
+#endif
+{
+ va_list ap;
+
+/*
+ XXX
+ Log the message.
+
+ It would be nice to automatically log the error into the log files
+ if the application is configured for logging. The problem is that
+ if we currently hold the log region mutex, we will self-deadlock.
+ Leave all the structure in place, but turned off. I'd like to fix
+ this in the future by detecting if we have the log region already
+ locked (e.g., a flag in the environment handle), or perhaps even
+ have a finer granularity so that the only calls to __db_err we
+ can't log are those made while we have the current log buffer
+ locked, or perhaps have a separate buffer into which we log error
+ messages.
+
+#ifdef __STDC__
+ va_start(ap, fmt);
+#else
+ va_start(ap);
+#endif
+ __db_real_log(dbenv, NULL, "db_err", 0, fmt, ap);
+
+ va_end(ap);
+#endif
+*/
+
+ /* Tell the application. */
+#ifdef __STDC__
+ va_start(ap, fmt);
+#else
+ va_start(ap);
+#endif
+ __db_real_err(dbenv, 0, 0, 0, fmt, ap);
+
+ va_end(ap);
+}
+
+/*
+ * __db_real_err --
+ * All the DB error routines end up here.
+ *
+ * PUBLIC: void __db_real_err
+ * PUBLIC: __P((const DB_ENV *, int, int, int, const char *, va_list));
+ */
+void
+__db_real_err(dbenv, error, error_set, stderr_default, fmt, ap)
+ const DB_ENV *dbenv;
+ int error, error_set, stderr_default;
+ const char *fmt;
+ va_list ap;
+{
+ /* Call the user's callback function, if specified. */
+ if (dbenv != NULL && dbenv->db_errcall != NULL)
+ __db_errcall(dbenv, error, error_set, fmt, ap);
+
+ /* Write to the user's file descriptor, if specified. */
+ if (dbenv != NULL && dbenv->db_errfile != NULL)
+ __db_errfile(dbenv, error, error_set, fmt, ap);
+
+ /*
+ * If we have a default and we didn't do either of the above, write
+ * to the default.
+ */
+ if (stderr_default && (dbenv == NULL ||
+ (dbenv->db_errcall == NULL && dbenv->db_errfile == NULL)))
+ __db_errfile(dbenv, error, error_set, fmt, ap);
+}
+
+/*
+ * __db_errcall --
+ * Do the error message work for callback functions.
+ */
+static void
+__db_errcall(dbenv, error, error_set, fmt, ap)
+ const DB_ENV *dbenv;
+ int error, error_set;
+ const char *fmt;
+ va_list ap;
+{
+ char *p;
+ char __errbuf[2048]; /* !!!: END OF THE STACK DON'T TRUST SPRINTF. */
+
+ p = __errbuf;
+ if (fmt != NULL) {
+ p += vsnprintf(__errbuf, sizeof(__errbuf), fmt, ap);
+ if (error_set) {
+ *p++ = ':';
+ *p++ = ' ';
+ }
+ }
+ if (error_set)
+ (void)strcpy(p, db_strerror(error));
+
+ dbenv->db_errcall(dbenv->db_errpfx, __errbuf);
+}
+
+/*
+ * __db_errfile --
+ * Do the error message work for FILE *s.
+ */
+static void
+__db_errfile(dbenv, error, error_set, fmt, ap)
+ const DB_ENV *dbenv;
+ int error, error_set;
+ const char *fmt;
+ va_list ap;
+{
+ FILE *fp;
+
+ fp = dbenv == NULL ||
+ dbenv->db_errfile == NULL ? stderr : dbenv->db_errfile;
+
+ if (dbenv != NULL && dbenv->db_errpfx != NULL)
+ (void)fprintf(fp, "%s: ", dbenv->db_errpfx);
+ if (fmt != NULL) {
+ (void)vfprintf(fp, fmt, ap);
+ if (error_set)
+ (void)fprintf(fp, ": ");
+ }
+ if (error_set)
+ (void)fprintf(fp, "%s", db_strerror(error));
+ (void)fprintf(fp, "\n");
+ (void)fflush(fp);
+}
+
+/*
+ * __db_logmsg --
+ * Write information into the DB log.
+ *
+ * PUBLIC: void __db_logmsg __P((const DB_ENV *,
+ * PUBLIC: DB_TXN *, const char *, u_int32_t, const char *, ...));
+ */
+void
+#ifdef __STDC__
+__db_logmsg(const DB_ENV *dbenv,
+ DB_TXN *txnid, const char *opname, u_int32_t flags, const char *fmt, ...)
+#else
+__db_logmsg(dbenv, txnid, opname, flags, fmt, va_alist)
+ const DB_ENV *dbenv;
+ DB_TXN *txnid;
+ const char *opname, *fmt;
+ u_int32_t flags;
+ va_dcl
+#endif
+{
+ va_list ap;
+
+#ifdef __STDC__
+ va_start(ap, fmt);
+#else
+ va_start(ap);
+#endif
+ __db_real_log(dbenv, txnid, opname, flags, fmt, ap);
+
+ va_end(ap);
+}
+
+/*
+ * __db_real_log --
+ * Write information into the DB log.
+ *
+ * PUBLIC: void __db_real_log __P((const DB_ENV *,
+ * PUBLIC: DB_TXN *, const char *, u_int32_t, const char *, va_list ap));
+ */
+void
+#ifdef __STDC__
+__db_real_log(const DB_ENV *dbenv, DB_TXN *txnid,
+ const char *opname, u_int32_t flags, const char *fmt, va_list ap)
+#else
+__db_real_log(dbenv, txnid, opname, flags, fmt, ap)
+ const DB_ENV *dbenv;
+ DB_TXN *txnid;
+ const char *opname, *fmt;
+ u_int32_t flags;
+ va_list ap;
+#endif
+{
+ DBT opdbt, msgdbt;
+ DB_LSN lsn;
+ char __logbuf[2048]; /* !!!: END OF THE STACK DON'T TRUST SPRINTF. */
+
+ if (!LOGGING_ON(dbenv))
+ return;
+
+ memset(&opdbt, 0, sizeof(opdbt));
+ opdbt.data = (void *)opname;
+ opdbt.size = strlen(opname) + 1;
+
+ memset(&msgdbt, 0, sizeof(msgdbt));
+ msgdbt.data = __logbuf;
+ msgdbt.size = vsnprintf(__logbuf, sizeof(__logbuf), fmt, ap);
+
+ /*
+ * XXX
+ * Explicitly discard the const. Otherwise, we have to const DB_ENV
+ * references throughout the logging subsystem.
+ */
+ __db_debug_log(
+ (DB_ENV *)dbenv, txnid, &lsn, flags, &opdbt, -1, &msgdbt, NULL, 0);
+}
+
+/*
+ * __db_unknown_flag -- report internal error
+ *
+ * PUBLIC: int __db_unknown_flag __P((DB_ENV *, char *, u_int32_t));
+ */
+int
+__db_unknown_flag(dbenv, routine, flag)
+ DB_ENV *dbenv;
+ char *routine;
+ u_int32_t flag;
+{
+ __db_err(dbenv, "%s: Unknown flag: 0x%x", routine, flag);
+ DB_ASSERT(0);
+ return (EINVAL);
+}
+
+/*
+ * __db_unknown_type -- report internal error
+ *
+ * PUBLIC: int __db_unknown_type __P((DB_ENV *, char *, u_int32_t));
+ */
+int
+__db_unknown_type(dbenv, routine, type)
+ DB_ENV *dbenv;
+ char *routine;
+ u_int32_t type;
+{
+ __db_err(dbenv, "%s: Unknown db type: 0x%x", routine, type);
+ DB_ASSERT(0);
+ return (EINVAL);
+}
+
+#ifdef DIAGNOSTIC
+/*
+ * __db_missing_txn_err --
+ * Cannot combine operations with and without transactions.
+ *
+ * PUBLIC: #ifdef DIAGNOSTIC
+ * PUBLIC: int __db_missing_txn_err __P((DB_ENV *));
+ * PUBLIC: #endif
+ */
+int
+__db_missing_txn_err(dbenv)
+ DB_ENV *dbenv;
+{
+ __db_err(dbenv,
+ "DB handle previously used in transaction, missing transaction handle.");
+ return (EINVAL);
+}
+#endif
diff --git a/bdb/common/db_getlong.c b/bdb/common/db_getlong.c
new file mode 100644
index 00000000000..bead530cd94
--- /dev/null
+++ b/bdb/common/db_getlong.c
@@ -0,0 +1,159 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_getlong.c,v 11.11 2000/12/22 19:16:04 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <limits.h>
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "clib_ext.h"
+
+/*
+ * __db_getlong --
+ * Return a long value inside of basic parameters.
+ *
+ * PUBLIC: int __db_getlong
+ * PUBLIC: __P((DB *, const char *, char *, long, long, long *));
+ */
+int
+__db_getlong(dbp, progname, p, min, max, storep)
+ DB *dbp;
+ const char *progname;
+ char *p;
+ long min, max, *storep;
+{
+ long val;
+ char *end;
+
+ __os_set_errno(0);
+ val = strtol(p, &end, 10);
+ if ((val == LONG_MIN || val == LONG_MAX) &&
+ __os_get_errno() == ERANGE) {
+ if (dbp == NULL) {
+ fprintf(stderr,
+ "%s: %s: %s\n", progname, p, strerror(ERANGE));
+ exit(1);
+ }
+ dbp->err(dbp, ERANGE, "%s", p);
+ return (1);
+ }
+ if (p[0] == '\0' || (end[0] != '\0' && end[0] != '\n')) {
+ if (dbp == NULL) {
+ fprintf(stderr,
+ "%s: %s: Invalid numeric argument\n", progname, p);
+ exit(1);
+ }
+ dbp->errx(dbp, "%s: Invalid numeric argument", p);
+ return (1);
+ }
+ if (val < min) {
+ if (dbp == NULL) {
+ fprintf(stderr,
+ "%s: %s: Less than minimum value (%ld)\n",
+ progname, p, min);
+ exit(1);
+ }
+ dbp->errx(dbp, "%s: Less than minimum value (%ld)", p, min);
+ return (1);
+ }
+ if (val > max) {
+ if (dbp == NULL) {
+ fprintf(stderr,
+ "%s: %s: Greater than maximum value (%ld)\n",
+ progname, p, max);
+ exit(1);
+ }
+ dbp->errx(dbp, "%s: Greater than maximum value (%ld)", p, max);
+ exit(1);
+ }
+ *storep = val;
+ return (0);
+}
+
+/*
+ * __db_getulong --
+ * Return an unsigned long value inside of basic parameters.
+ *
+ * PUBLIC: int __db_getulong
+ * PUBLIC: __P((DB *, const char *, char *, u_long, u_long, u_long *));
+ */
+int
+__db_getulong(dbp, progname, p, min, max, storep)
+ DB *dbp;
+ const char *progname;
+ char *p;
+ u_long min, max, *storep;
+{
+#if !defined(HAVE_STRTOUL)
+ COMPQUIET(min, 0);
+
+ return (__db_getlong(dbp, progname, p, 0, max, (long *)storep));
+#else
+ u_long val;
+ char *end;
+
+ __os_set_errno(0);
+ val = strtoul(p, &end, 10);
+ if (val == ULONG_MAX && __os_get_errno() == ERANGE) {
+ if (dbp == NULL) {
+ fprintf(stderr,
+ "%s: %s: %s\n", progname, p, strerror(ERANGE));
+ exit(1);
+ }
+ dbp->err(dbp, ERANGE, "%s", p);
+ return (1);
+ }
+ if (p[0] == '\0' || (end[0] != '\0' && end[0] != '\n')) {
+ if (dbp == NULL) {
+ fprintf(stderr,
+ "%s: %s: Invalid numeric argument\n", progname, p);
+ exit(1);
+ }
+ dbp->errx(dbp, "%s: Invalid numeric argument", p);
+ return (1);
+ }
+ if (val < min) {
+ if (dbp == NULL) {
+ fprintf(stderr,
+ "%s: %s: Less than minimum value (%ld)\n",
+ progname, p, min);
+ exit(1);
+ }
+ dbp->errx(dbp, "%s: Less than minimum value (%ld)", p, min);
+ return (1);
+ }
+
+ /*
+ * We allow a 0 to substitute as a max value for ULONG_MAX because
+ * 1) accepting only a 0 value is unlikely to be necessary, and 2)
+ * we don't want callers to have to use ULONG_MAX explicitly, as it
+ * may not exist on all platforms.
+ */
+ if (max != 0 && val > max) {
+ if (dbp == NULL) {
+ fprintf(stderr,
+ "%s: %s: Greater than maximum value (%ld)\n",
+ progname, p, max);
+ exit(1);
+ }
+ dbp->errx(dbp, "%s: Greater than maximum value (%ld)", p, max);
+ exit(1);
+ }
+ *storep = val;
+ return (0);
+#endif /* !defined(HAVE_STRTOUL) */
+}
diff --git a/bdb/common/db_log2.c b/bdb/common/db_log2.c
new file mode 100644
index 00000000000..95bc69499c6
--- /dev/null
+++ b/bdb/common/db_log2.c
@@ -0,0 +1,65 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1995, 1996
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_log2.c,v 11.4 2000/02/14 02:59:41 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "common_ext.h"
+
+/*
+ * PUBLIC: u_int32_t __db_log2 __P((u_int32_t));
+ */
+u_int32_t
+__db_log2(num)
+ u_int32_t num;
+{
+ u_int32_t i, limit;
+
+ limit = 1;
+ for (i = 0; limit < num; limit = limit << 1)
+ ++i;
+ return (i);
+}
diff --git a/bdb/common/util_log.c b/bdb/common/util_log.c
new file mode 100644
index 00000000000..a4743cc2cee
--- /dev/null
+++ b/bdb/common/util_log.c
@@ -0,0 +1,63 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: util_log.c,v 1.7 2000/11/30 00:58:31 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "common_ext.h"
+
+/*
+ * __db_util_logset --
+ * Log that we're running.
+ *
+ * PUBLIC: int __db_util_logset __P((const char *, char *));
+ */
+int
+__db_util_logset(progname, fname)
+ const char *progname;
+ char *fname;
+{
+ FILE *fp;
+ time_t now;
+
+ if ((fp = fopen(fname, "w")) == NULL)
+ goto err;
+
+ (void)time(&now);
+ fprintf(fp, "%s: %lu %s", progname, (u_long)getpid(), ctime(&now));
+
+ if (fclose(fp) == EOF)
+ goto err;
+
+ return (0);
+
+err: fprintf(stderr, "%s: %s: %s\n", progname, fname, strerror(errno));
+ return (1);
+}
diff --git a/bdb/common/util_sig.c b/bdb/common/util_sig.c
new file mode 100644
index 00000000000..6fe0166fe64
--- /dev/null
+++ b/bdb/common/util_sig.c
@@ -0,0 +1,87 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: util_sig.c,v 1.3 2000/04/28 19:32:00 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <signal.h>
+#endif
+
+#include "db_int.h"
+#include "common_ext.h"
+
+static int interrupt;
+static void onint __P((int));
+
+/*
+ * onint --
+ * Interrupt signal handler.
+ */
+static void
+onint(signo)
+ int signo;
+{
+ if ((interrupt = signo) == 0)
+ interrupt = SIGINT;
+}
+
+/*
+ * __db_util_siginit --
+ *
+ * PUBLIC: void __db_util_siginit __P((void));
+ */
+void
+__db_util_siginit()
+{
+ /*
+ * Initialize the set of signals for which we want to clean up.
+ * Generally, we try not to leave the shared regions locked if
+ * we can.
+ */
+#ifdef SIGHUP
+ (void)signal(SIGHUP, onint);
+#endif
+ (void)signal(SIGINT, onint);
+#ifdef SIGPIPE
+ (void)signal(SIGPIPE, onint);
+#endif
+ (void)signal(SIGTERM, onint);
+}
+
+/*
+ * __db_util_interrupted --
+ * Return if interrupted.
+ *
+ * PUBLIC: int __db_util_interrupted __P((void));
+ */
+int
+__db_util_interrupted()
+{
+ return (interrupt != 0);
+}
+
+/*
+ * __db_util_sigresend --
+ *
+ * PUBLIC: void __db_util_sigresend __P((void));
+ */
+void
+__db_util_sigresend()
+{
+ /* Resend any caught signal. */
+ if (__db_util_interrupted != 0) {
+ (void)signal(interrupt, SIG_DFL);
+ (void)raise(interrupt);
+ /* NOTREACHED */
+ }
+}
diff --git a/bdb/cxx/cxx_app.cpp b/bdb/cxx/cxx_app.cpp
new file mode 100644
index 00000000000..1fcf04b5c43
--- /dev/null
+++ b/bdb/cxx/cxx_app.cpp
@@ -0,0 +1,671 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: cxx_app.cpp,v 11.38 2000/12/21 20:30:18 dda Exp $";
+#endif /* not lint */
+
+#include <errno.h>
+#include <stdio.h> // needed for set_error_stream
+#include <string.h>
+
+#include "db_cxx.h"
+#include "cxx_int.h"
+
+#include "db_int.h"
+#include "common_ext.h"
+
+// The reason for a static variable is that some structures
+// (like Dbts) have no connection to any Db or DbEnv, so when
+// errors occur in their methods, we must have some reasonable
+// way to determine whether to throw or return errors.
+//
+// This variable is taken from flags whenever a DbEnv is constructed.
+// Normally there is only one DbEnv per program, and even if not,
+// there is typically a single policy of throwing or returning.
+//
+static int last_known_error_policy = ON_ERROR_UNKNOWN;
+
+////////////////////////////////////////////////////////////////////////
+// //
+// DbEnv //
+// //
+////////////////////////////////////////////////////////////////////////
+
+ostream *DbEnv::error_stream_ = 0;
+
+// _destroy_check is called when there is a user error in a
+// destructor, specifically when close has not been called for an
+// object (even if it was never opened). If the DbEnv is being
+// destroyed we cannot always use DbEnv::error_stream_, so we'll
+// use cerr in that case.
+//
+void DbEnv::_destroy_check(const char *str, int isDbEnv)
+{
+ ostream *out;
+
+ out = error_stream_;
+ if (out == NULL || isDbEnv == 1)
+ out = &cerr;
+
+ (*out) << "DbEnv::_destroy_check: open " << str << " object destroyed\n";
+}
+
+// A truism for the DbEnv object is that there is a valid
+// DB_ENV handle from the constructor until close().
+// After the close, the DB_ENV handle is invalid and
+// no operations are permitted on the DbEnv (other than
+// destructor). Leaving the DbEnv handle open and not
+// doing a close is generally considered an error.
+//
+// We used to allow DbEnv objects to be closed and reopened.
+// This implied always keeping a valid DB_ENV object, and
+// coordinating the open objects between Db/DbEnv turned
+// out to be overly complicated. Now we do not allow this.
+
+DbEnv::DbEnv(u_int32_t flags)
+: imp_(0)
+, construct_error_(0)
+, construct_flags_(flags)
+, tx_recover_callback_(0)
+, paniccall_callback_(0)
+{
+ int err;
+
+ COMPQUIET(err, 0);
+ if ((err = initialize(0)) != 0)
+ DB_ERROR("DbEnv::DbEnv", err, error_policy());
+}
+
+DbEnv::DbEnv(DB_ENV *env, u_int32_t flags)
+: imp_(0)
+, construct_error_(0)
+, construct_flags_(flags)
+, tx_recover_callback_(0)
+, paniccall_callback_(0)
+{
+ int err;
+
+ COMPQUIET(err, 0);
+ if ((err = initialize(env)) != 0)
+ DB_ERROR("DbEnv::DbEnv", err, error_policy());
+}
+
+// Note: if the user has not closed, we call _destroy_check
+// to warn against this non-safe programming practice,
+// and call close anyway.
+//
+DbEnv::~DbEnv()
+{
+ DB_ENV *env = unwrap(this);
+
+ if (env != NULL) {
+ _destroy_check("DbEnv", 1);
+ (void)env->close(env, 0);
+
+ // extra safety
+ cleanup();
+ }
+}
+
+// called by Db destructor when the DbEnv is owned by DB.
+void DbEnv::cleanup()
+{
+ DB_ENV *env = unwrap(this);
+
+ if (env != NULL) {
+ env->cj_internal = 0;
+ imp_ = 0;
+ }
+}
+
+int DbEnv::close(u_int32_t flags)
+{
+ DB_ENV *env = unwrap(this);
+ int err, init_err;
+
+ COMPQUIET(init_err, 0);
+
+ // after a close (no matter if success or failure),
+ // the underlying DB_ENV object must not be accessed,
+ // so we clean up in advance.
+ //
+ cleanup();
+
+ // It's safe to throw an error after the close,
+ // since our error mechanism does not peer into
+ // the DB* structures.
+ //
+ if ((err = env->close(env, flags)) != 0) {
+ DB_ERROR("DbEnv::close", err, error_policy());
+ }
+ return (err);
+}
+
+void DbEnv::err(int error, const char *format, ...)
+{
+ va_list args;
+ DB_ENV *env = unwrap(this);
+
+ va_start(args, format);
+ __db_real_err(env, error, 1, 1, format, args);
+ va_end(args);
+}
+
+void DbEnv::errx(const char *format, ...)
+{
+ va_list args;
+ DB_ENV *env = unwrap(this);
+
+ va_start(args, format);
+ __db_real_err(env, 0, 0, 1, format, args);
+ va_end(args);
+}
+
+// used internally during constructor
+// to associate an existing DB_ENV with this DbEnv,
+// or create a new one. If there is an error,
+// construct_error_ is set; this is examined during open.
+//
+int DbEnv::initialize(DB_ENV *env)
+{
+ int err;
+
+ last_known_error_policy = error_policy();
+
+ if (env == 0) {
+ // Create a new DB_ENV environment.
+ if ((err = ::db_env_create(&env,
+ construct_flags_ & ~DB_CXX_NO_EXCEPTIONS)) != 0) {
+ construct_error_ = err;
+ return (err);
+ }
+ }
+ imp_ = wrap(env);
+ env->cj_internal = this; // for DB_ENV* to DbEnv* conversion
+ return (0);
+}
+
+// Return a tristate value corresponding to whether we should
+// throw exceptions on errors:
+// ON_ERROR_RETURN
+// ON_ERROR_THROW
+// ON_ERROR_UNKNOWN
+//
+int DbEnv::error_policy()
+{
+ if ((construct_flags_ & DB_CXX_NO_EXCEPTIONS) != 0) {
+ return (ON_ERROR_RETURN);
+ }
+ else {
+ return (ON_ERROR_THROW);
+ }
+}
+
+// If an error occurred during the constructor, report it now.
+// Otherwise, call the underlying DB->open method.
+//
+int DbEnv::open(const char *db_home, u_int32_t flags, int mode)
+{
+ DB_ENV *env = unwrap(this);
+ int err;
+
+ if ((err = construct_error_) != 0)
+ DB_ERROR("Db::open", err, error_policy());
+ else if ((err = env->open(env, db_home, flags, mode)) != 0)
+ DB_ERROR("DbEnv::open", err, error_policy());
+
+ return (err);
+}
+
+int DbEnv::remove(const char *db_home, u_int32_t flags)
+{
+ DB_ENV *env;
+ int ret;
+
+ env = unwrap(this);
+
+ // after a remove (no matter if success or failure),
+ // the underlying DB_ENV object must not be accessed,
+ // so we clean up in advance.
+ //
+ cleanup();
+
+ if ((ret = env->remove(env, db_home, flags)) != 0)
+ DB_ERROR("DbEnv::remove", ret, error_policy());
+
+ return (ret);
+}
+
+// Report an error associated with the DbEnv.
+// error_policy is one of:
+// ON_ERROR_THROW throw an error
+// ON_ERROR_RETURN do nothing here, the caller will return an error
+// ON_ERROR_UNKNOWN defer the policy to policy saved in DbEnv::DbEnv
+//
+void DbEnv::runtime_error(const char *caller, int error, int error_policy)
+{
+ if (error_policy == ON_ERROR_UNKNOWN)
+ error_policy = last_known_error_policy;
+ if (error_policy == ON_ERROR_THROW) {
+ // Creating and throwing the object in two separate
+ // statements seems to be necessary for HP compilers.
+ DbException except(caller, error);
+ throw except;
+ }
+}
+
+// static method
+char *DbEnv::strerror(int error)
+{
+ return (db_strerror(error));
+}
+
+// This is a 'glue' function declared as extern "C" so it will
+// be compatible with picky compilers that do not allow mixing
+// of function pointers to 'C' functions with function pointers
+// to C++ functions.
+//
+extern "C"
+void _stream_error_function_c(const char *prefix, char *message)
+{
+ DbEnv::_stream_error_function(prefix, message);
+}
+
+void DbEnv::_stream_error_function(const char *prefix, char *message)
+{
+ // HP compilers need the extra casts, we don't know why.
+ if (error_stream_) {
+ if (prefix) {
+ (*error_stream_) << prefix << (const char *)": ";
+ }
+ if (message) {
+ (*error_stream_) << (const char *)message;
+ }
+ (*error_stream_) << (const char *)"\n";
+ }
+}
+
+// Note: This actually behaves a bit like a static function,
+// since DB_ENV.db_errcall has no information about which
+// db_env triggered the call. A user that has multiple DB_ENVs
+// will simply not be able to have different streams for each one.
+//
+void DbEnv::set_error_stream(ostream *stream)
+{
+ DB_ENV *dbenv = unwrap(this);
+
+ error_stream_ = stream;
+ dbenv->set_errcall(dbenv, (stream == 0) ? 0 :
+ _stream_error_function_c);
+}
+
+// static method
+char *DbEnv::version(int *major, int *minor, int *patch)
+{
+ return (db_version(major, minor, patch));
+}
+
+// This is a variant of the DB_WO_ACCESS macro to define a simple set_
+// method calling the underlying C method, but unlike a simple
+// set method, it may return an error or raise an exception.
+// Note this macro expects that input _argspec is an argument
+// list element (e.g. "char *arg") defined in terms of "arg".
+//
+#define DB_DBENV_ACCESS(_name, _argspec) \
+ \
+int DbEnv::set_##_name(_argspec) \
+{ \
+ int ret; \
+ DB_ENV *dbenv = unwrap(this); \
+ \
+ if ((ret = (*(dbenv->set_##_name))(dbenv, arg)) != 0) {\
+ DB_ERROR("DbEnv::set_" # _name, ret, error_policy()); \
+ } \
+ return (ret); \
+}
+
+#define DB_DBENV_ACCESS_NORET(_name, _argspec) \
+ \
+void DbEnv::set_##_name(_argspec) \
+{ \
+ DB_ENV *dbenv = unwrap(this); \
+ \
+ (*(dbenv->set_##_name))(dbenv, arg); \
+ return; \
+}
+
+DB_DBENV_ACCESS_NORET(errfile, FILE *arg)
+DB_DBENV_ACCESS_NORET(errpfx, const char *arg)
+
+// We keep these alphabetical by field name,
+// for comparison with Java's list.
+//
+DB_DBENV_ACCESS(data_dir, const char *arg)
+DB_DBENV_ACCESS(lg_bsize, u_int32_t arg)
+DB_DBENV_ACCESS(lg_dir, const char *arg)
+DB_DBENV_ACCESS(lg_max, u_int32_t arg)
+DB_DBENV_ACCESS(lk_detect, u_int32_t arg)
+DB_DBENV_ACCESS(lk_max, u_int32_t arg)
+DB_DBENV_ACCESS(lk_max_lockers, u_int32_t arg)
+DB_DBENV_ACCESS(lk_max_locks, u_int32_t arg)
+DB_DBENV_ACCESS(lk_max_objects, u_int32_t arg)
+DB_DBENV_ACCESS(mp_mmapsize, size_t arg)
+DB_DBENV_ACCESS(mutexlocks, int arg)
+DB_DBENV_ACCESS(tmp_dir, const char *arg)
+DB_DBENV_ACCESS(tx_max, u_int32_t arg)
+
+// Here are the set methods that don't fit the above mold.
+//
+extern "C" {
+ typedef void (*db_errcall_fcn_type)
+ (const char *, char *);
+};
+
+void DbEnv::set_errcall(void (*arg)(const char *, char *))
+{
+ DB_ENV *dbenv = unwrap(this);
+
+ // XXX
+ // We are casting from a function ptr declared with C++
+ // linkage to one (same arg types) declared with C
+ // linkage. It's hard to imagine a pair of C/C++
+ // compilers from the same vendor for which this
+ // won't work. Unfortunately, we can't use a
+ // intercept function like the others since the
+ // function does not have a (DbEnv*) as one of
+ // the args. If this causes trouble, we can pull
+ // the same trick we use in Java, namely stuffing
+ // a (DbEnv*) pointer into the prefix. We're
+ // avoiding this for the moment because it obfuscates.
+ //
+ (*(dbenv->set_errcall))(dbenv, (db_errcall_fcn_type)arg);
+}
+
+int DbEnv::set_cachesize(u_int32_t gbytes, u_int32_t bytes, int ncache)
+{
+ int ret;
+ DB_ENV *dbenv = unwrap(this);
+
+ if ((ret =
+ (*(dbenv->set_cachesize))(dbenv, gbytes, bytes, ncache)) != 0)
+ DB_ERROR("DbEnv::set_cachesize", ret, error_policy());
+
+ return (ret);
+}
+
+int DbEnv::set_flags(u_int32_t flags, int onoff)
+{
+ int ret;
+ DB_ENV *dbenv = unwrap(this);
+
+ if ((ret = (dbenv->set_flags)(dbenv, flags, onoff)) != 0)
+ DB_ERROR("DbEnv::set_flags", ret, error_policy());
+
+ return (ret);
+}
+
+int DbEnv::set_lk_conflicts(u_int8_t *lk_conflicts, int lk_max)
+{
+ int ret;
+ DB_ENV *dbenv = unwrap(this);
+
+ if ((ret = (*(dbenv->set_lk_conflicts))
+ (dbenv, lk_conflicts, lk_max)) != 0)
+ DB_ERROR("DbEnv::set_lk_conflicts", ret, error_policy());
+
+ return (ret);
+}
+
+// static method
+int DbEnv::set_pageyield(int arg)
+{
+ int ret;
+
+ if ((ret = db_env_set_pageyield(arg)) != 0)
+ DB_ERROR("DbEnv::set_pageyield", ret, last_known_error_policy);
+
+ return (ret);
+}
+
+// static method
+int DbEnv::set_panicstate(int arg)
+{
+ int ret;
+
+ if ((ret = db_env_set_panicstate(arg)) != 0)
+ DB_ERROR("DbEnv::set_panicstate", ret, last_known_error_policy);
+
+ return (ret);
+}
+
+// static method
+int DbEnv::set_region_init(int arg)
+{
+ int ret;
+
+ if ((ret = db_env_set_region_init(arg)) != 0)
+ DB_ERROR("DbEnv::set_region_init", ret, last_known_error_policy);
+
+ return (ret);
+}
+
+int DbEnv::set_server(char *host, long tsec, long ssec, u_int32_t flags)
+{
+ int ret;
+ DB_ENV *dbenv = unwrap(this);
+
+ if ((ret = dbenv->set_server(dbenv, host, tsec, ssec, flags)) != 0)
+ DB_ERROR("DbEnv::set_server", ret, error_policy());
+
+ return (ret);
+}
+
+int DbEnv::set_shm_key(long shm_key)
+{
+ int ret;
+ DB_ENV *dbenv = unwrap(this);
+
+ if ((ret = dbenv->set_shm_key(dbenv, shm_key)) != 0)
+ DB_ERROR("DbEnv::set_shm_key", ret, error_policy());
+
+ return (ret);
+}
+
+// static method
+int DbEnv::set_tas_spins(u_int32_t arg)
+{
+ int ret;
+
+ if ((ret = db_env_set_tas_spins(arg)) != 0)
+ DB_ERROR("DbEnv::set_tas_spins", ret, last_known_error_policy);
+
+ return (ret);
+}
+
+int DbEnv::set_verbose(u_int32_t which, int onoff)
+{
+ int ret;
+ DB_ENV *dbenv = unwrap(this);
+
+ if ((ret = (*(dbenv->set_verbose))(dbenv, which, onoff)) != 0)
+ DB_ERROR("DbEnv::set_verbose", ret, error_policy());
+
+ return (ret);
+}
+
+// This is a 'glue' function declared as extern "C" so it will
+// be compatible with picky compilers that do not allow mixing
+// of function pointers to 'C' functions with function pointers
+// to C++ functions.
+//
+extern "C"
+int _tx_recover_intercept_c(DB_ENV *env, DBT *dbt,
+ DB_LSN *lsn, db_recops op)
+{
+ return (DbEnv::_tx_recover_intercept(env, dbt, lsn, op));
+}
+
+int DbEnv::_tx_recover_intercept(DB_ENV *env, DBT *dbt,
+ DB_LSN *lsn, db_recops op)
+{
+ if (env == 0) {
+ DB_ERROR("DbEnv::tx_recover_callback", EINVAL, ON_ERROR_UNKNOWN);
+ return (EINVAL);
+ }
+ DbEnv *cxxenv = (DbEnv *)env->cj_internal;
+ if (cxxenv == 0) {
+ DB_ERROR("DbEnv::tx_recover_callback", EINVAL, ON_ERROR_UNKNOWN);
+ return (EINVAL);
+ }
+ if (cxxenv->tx_recover_callback_ == 0) {
+ DB_ERROR("DbEnv::tx_recover_callback", EINVAL, cxxenv->error_policy());
+ return (EINVAL);
+ }
+ Dbt *cxxdbt = (Dbt *)dbt;
+ DbLsn *cxxlsn = (DbLsn *)lsn;
+ return ((*cxxenv->tx_recover_callback_)(cxxenv, cxxdbt, cxxlsn, op));
+}
+
+int DbEnv::set_tx_recover
+ (int (*arg)(DbEnv *, Dbt *, DbLsn *, db_recops))
+{
+ int ret;
+ DB_ENV *dbenv = unwrap(this);
+
+ tx_recover_callback_ = arg;
+ if ((ret =
+ (*(dbenv->set_tx_recover))(dbenv, _tx_recover_intercept_c)) != 0)
+ DB_ERROR("DbEnv::set_tx_recover", ret, error_policy());
+
+ return (ret);
+}
+
+int DbEnv::set_tx_timestamp(time_t *timestamp)
+{
+ int ret;
+ DB_ENV *dbenv = unwrap(this);
+
+ if ((ret = dbenv->set_tx_timestamp(dbenv, timestamp)) != 0)
+ DB_ERROR("DbEnv::set_tx_timestamp", ret, error_policy());
+
+ return (ret);
+}
+
+// This is a 'glue' function declared as extern "C" so it will
+// be compatible with picky compilers that do not allow mixing
+// of function pointers to 'C' functions with function pointers
+// to C++ functions.
+//
+extern "C"
+void _paniccall_intercept_c(DB_ENV *env, int errval)
+{
+ DbEnv::_paniccall_intercept(env, errval);
+}
+
+void DbEnv::_paniccall_intercept(DB_ENV *env, int errval)
+{
+ if (env == 0) {
+ DB_ERROR("DbEnv::paniccall_callback", EINVAL, ON_ERROR_UNKNOWN);
+ }
+ DbEnv *cxxenv = (DbEnv *)env->cj_internal;
+ if (cxxenv == 0) {
+ DB_ERROR("DbEnv::paniccall_callback", EINVAL, ON_ERROR_UNKNOWN);
+ }
+ if (cxxenv->paniccall_callback_ == 0) {
+ DB_ERROR("DbEnv::paniccall_callback", EINVAL, cxxenv->error_policy());
+ }
+ (*cxxenv->paniccall_callback_)(cxxenv, errval);
+}
+
+int DbEnv::set_paniccall(void (*arg)(DbEnv *, int))
+{
+ DB_ENV *dbenv = unwrap(this);
+
+ paniccall_callback_ = arg;
+
+ return ((*(dbenv->set_paniccall))(dbenv, _paniccall_intercept_c));
+}
+
+// This is a 'glue' function declared as extern "C" so it will
+// be compatible with picky compilers that do not allow mixing
+// of function pointers to 'C' functions with function pointers
+// to C++ functions.
+//
+extern "C"
+int _recovery_init_intercept_c(DB_ENV *env)
+{
+ return (DbEnv::_recovery_init_intercept(env));
+}
+
+int DbEnv::_recovery_init_intercept(DB_ENV *env)
+{
+ if (env == 0) {
+ DB_ERROR("DbEnv::recovery_init_callback", EINVAL,
+ ON_ERROR_UNKNOWN);
+ }
+ DbEnv *cxxenv = (DbEnv *)env->cj_internal;
+ if (cxxenv == 0) {
+ DB_ERROR("DbEnv::recovery_init_callback", EINVAL,
+ ON_ERROR_UNKNOWN);
+ }
+ if (cxxenv->recovery_init_callback_ == 0) {
+ DB_ERROR("DbEnv::recovery_init_callback", EINVAL,
+ cxxenv->error_policy());
+ }
+ return ((*cxxenv->recovery_init_callback_)(cxxenv));
+}
+
+int DbEnv::set_recovery_init(int (*arg)(DbEnv *))
+{
+ DB_ENV *dbenv = unwrap(this);
+
+ recovery_init_callback_ = arg;
+
+ return ((*(dbenv->set_recovery_init))(dbenv, _recovery_init_intercept_c));
+}
+
+// This is a 'glue' function declared as extern "C" so it will
+// be compatible with picky compilers that do not allow mixing
+// of function pointers to 'C' functions with function pointers
+// to C++ functions.
+//
+extern "C"
+void _feedback_intercept_c(DB_ENV *env, int opcode, int pct)
+{
+ DbEnv::_feedback_intercept(env, opcode, pct);
+}
+
+void DbEnv::_feedback_intercept(DB_ENV *env, int opcode, int pct)
+{
+ if (env == 0) {
+ DB_ERROR("DbEnv::feedback_callback", EINVAL, ON_ERROR_UNKNOWN);
+ return;
+ }
+ DbEnv *cxxenv = (DbEnv *)env->cj_internal;
+ if (cxxenv == 0) {
+ DB_ERROR("DbEnv::feedback_callback", EINVAL, ON_ERROR_UNKNOWN);
+ return;
+ }
+ if (cxxenv->feedback_callback_ == 0) {
+ DB_ERROR("DbEnv::feedback_callback", EINVAL,
+ cxxenv->error_policy());
+ return;
+ }
+ (*cxxenv->feedback_callback_)(cxxenv, opcode, pct);
+}
+
+int DbEnv::set_feedback(void (*arg)(DbEnv *, int, int))
+{
+ DB_ENV *dbenv = unwrap(this);
+
+ feedback_callback_ = arg;
+
+ return ((*(dbenv->set_feedback))(dbenv, _feedback_intercept_c));
+}
diff --git a/bdb/cxx/cxx_except.cpp b/bdb/cxx/cxx_except.cpp
new file mode 100644
index 00000000000..a62e21a767d
--- /dev/null
+++ b/bdb/cxx/cxx_except.cpp
@@ -0,0 +1,132 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: cxx_except.cpp,v 11.7 2000/09/21 15:05:45 dda Exp $";
+#endif /* not lint */
+
+#include <string.h>
+
+#include "db_cxx.h"
+#include "cxx_int.h"
+
+// tmpString is used to create strings on the stack
+//
+class tmpString
+{
+public:
+ tmpString(const char *str1,
+ const char *str2 = 0,
+ const char *str3 = 0,
+ const char *str4 = 0,
+ const char *str5 = 0)
+ {
+ int len = strlen(str1);
+ if (str2)
+ len += strlen(str2);
+ if (str3)
+ len += strlen(str3);
+ if (str4)
+ len += strlen(str4);
+ if (str5)
+ len += strlen(str5);
+
+ s_ = new char[len+1];
+
+ strcpy(s_, str1);
+ if (str2)
+ strcat(s_, str2);
+ if (str3)
+ strcat(s_, str3);
+ if (str4)
+ strcat(s_, str4);
+ if (str5)
+ strcat(s_, str5);
+ }
+ ~tmpString() { delete [] s_; }
+ operator const char *() { return (s_); }
+
+private:
+ char *s_;
+};
+
+// Note: would not be needed if we can inherit from exception
+// It does not appear to be possible to inherit from exception
+// with the current Microsoft library (VC5.0).
+//
+static char *dupString(const char *s)
+{
+ char *r = new char[strlen(s)+1];
+ strcpy(r, s);
+ return (r);
+}
+
+////////////////////////////////////////////////////////////////////////
+// //
+// DbException //
+// //
+////////////////////////////////////////////////////////////////////////
+
+DbException::~DbException()
+{
+ if (what_)
+ delete [] what_;
+}
+
+DbException::DbException(int err)
+: err_(err)
+{
+ what_ = dupString(db_strerror(err));
+}
+
+DbException::DbException(const char *description)
+: err_(0)
+{
+ what_ = dupString(tmpString(description));
+}
+
+DbException::DbException(const char *prefix, int err)
+: err_(err)
+{
+ what_ = dupString(tmpString(prefix, ": ", db_strerror(err)));
+}
+
+DbException::DbException(const char *prefix1, const char *prefix2, int err)
+: err_(err)
+{
+ what_ = dupString(tmpString(prefix1, ": ", prefix2, ": ", db_strerror(err)));
+}
+
+DbException::DbException(const DbException &that)
+: err_(that.err_)
+{
+ what_ = dupString(that.what_);
+}
+
+DbException &DbException::operator = (const DbException &that)
+{
+ if (this != &that) {
+ err_ = that.err_;
+ if (what_)
+ delete [] what_;
+ what_ = 0; // in case new throws exception
+ what_ = dupString(that.what_);
+ }
+ return (*this);
+}
+
+int DbException::get_errno() const
+{
+ return (err_);
+}
+
+const char *DbException::what() const
+{
+ return (what_);
+}
diff --git a/bdb/cxx/cxx_lock.cpp b/bdb/cxx/cxx_lock.cpp
new file mode 100644
index 00000000000..e8ce2aa9d30
--- /dev/null
+++ b/bdb/cxx/cxx_lock.cpp
@@ -0,0 +1,125 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: cxx_lock.cpp,v 11.9 2000/09/21 15:05:45 dda Exp $";
+#endif /* not lint */
+
+#include <errno.h>
+#include <string.h>
+
+#include "db_cxx.h"
+#include "cxx_int.h"
+
+int DbEnv::lock_detect(u_int32_t flags, u_int32_t atype, int *aborted)
+{
+ DB_ENV *env = unwrap(this);
+ int err;
+
+ if ((err = ::lock_detect(env, flags, atype, aborted)) != 0) {
+ DB_ERROR("DbEnv::lock_detect", err, error_policy());
+ return (err);
+ }
+ return (err);
+}
+
+int DbEnv::lock_get(u_int32_t locker, u_int32_t flags, const Dbt *obj,
+ db_lockmode_t lock_mode, DbLock *lock)
+{
+ DB_ENV *env = unwrap(this);
+ int err;
+
+ if ((err = ::lock_get(env, locker, flags, obj,
+ lock_mode, &lock->lock_)) != 0) {
+ DB_ERROR("DbEnv::lock_get", err, error_policy());
+ return (err);
+ }
+ return (err);
+}
+
+int DbEnv::lock_id(u_int32_t *idp)
+{
+ DB_ENV *env = unwrap(this);
+ int err;
+
+ if ((err = ::lock_id(env, idp)) != 0) {
+ DB_ERROR("DbEnv::lock_id", err, error_policy());
+ }
+ return (err);
+}
+
+int DbEnv::lock_stat(DB_LOCK_STAT **statp,
+ db_malloc_fcn_type db_malloc_fcn)
+{
+ DB_ENV *env = unwrap(this);
+ int err;
+
+ if ((err = ::lock_stat(env, statp, db_malloc_fcn)) != 0) {
+ DB_ERROR("DbEnv::lock_stat", err, error_policy());
+ return (err);
+ }
+ return (0);
+}
+
+int DbEnv::lock_vec(u_int32_t locker, u_int32_t flags,
+ DB_LOCKREQ list[],
+ int nlist, DB_LOCKREQ **elist_returned)
+{
+ DB_ENV *env = unwrap(this);
+ int err;
+
+ if ((err = ::lock_vec(env, locker, flags, list,
+ nlist, elist_returned)) != 0) {
+ DB_ERROR("DbEnv::lock_vec", err, error_policy());
+ return (err);
+ }
+ return (err);
+}
+
+////////////////////////////////////////////////////////////////////////
+// //
+// DbLock //
+// //
+////////////////////////////////////////////////////////////////////////
+
+DbLock::DbLock(DB_LOCK value)
+: lock_(value)
+{
+}
+
+DbLock::DbLock()
+{
+ memset(&lock_, 0, sizeof(DB_LOCK));
+}
+
+DbLock::DbLock(const DbLock &that)
+: lock_(that.lock_)
+{
+}
+
+DbLock &DbLock::operator = (const DbLock &that)
+{
+ lock_ = that.lock_;
+ return (*this);
+}
+
+int DbLock::put(DbEnv *env)
+{
+ DB_ENV *envp = unwrap(env);
+
+ if (!env) {
+ return (EINVAL); // handle never assigned
+ }
+
+ int err;
+ if ((err = lock_put(envp, &lock_)) != 0) {
+ DB_ERROR("DbLock::put", err, env->error_policy());
+ }
+ return (err);
+}
diff --git a/bdb/cxx/cxx_log.cpp b/bdb/cxx/cxx_log.cpp
new file mode 100644
index 00000000000..336b9d337f0
--- /dev/null
+++ b/bdb/cxx/cxx_log.cpp
@@ -0,0 +1,125 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: cxx_log.cpp,v 11.9 2000/09/21 15:05:45 dda Exp $";
+#endif /* not lint */
+
+#include <errno.h>
+
+#include "db_cxx.h"
+#include "cxx_int.h"
+
+////////////////////////////////////////////////////////////////////////
+// //
+// DbLog //
+// //
+////////////////////////////////////////////////////////////////////////
+
+int DbEnv::log_archive(char **list[], u_int32_t flags,
+ db_malloc_fcn_type db_malloc_fcn)
+{
+ int err;
+ DB_ENV *env = unwrap(this);
+
+ if ((err = ::log_archive(env, list, flags, db_malloc_fcn)) != 0) {
+ DB_ERROR("DbEnv::log_archive", err, error_policy());
+ return (err);
+ }
+ return (0);
+}
+
+int DbEnv::log_compare(const DbLsn *lsn0, const DbLsn *lsn1)
+{
+ return (::log_compare(lsn0, lsn1));
+}
+
+int DbEnv::log_file(DbLsn *lsn, char *namep, size_t len)
+{
+ int err;
+ DB_ENV *env = unwrap(this);
+
+ if ((err = ::log_file(env, lsn, namep, len)) != 0) {
+ DB_ERROR("DbEnv::log_file", err, error_policy());
+ return (err);
+ }
+ return (0);
+}
+
+int DbEnv::log_flush(const DbLsn *lsn)
+{
+ int err;
+ DB_ENV *env = unwrap(this);
+
+ if ((err = ::log_flush(env, lsn)) != 0) {
+ DB_ERROR("DbEnv::log_flush", err, error_policy());
+ return (err);
+ }
+ return (0);
+}
+
+int DbEnv::log_get(DbLsn *lsn, Dbt *data, u_int32_t flags)
+{
+ int err;
+ DB_ENV *env = unwrap(this);
+
+ if ((err = ::log_get(env, lsn, data, flags)) != 0) {
+ DB_ERROR("DbEnv::log_get", err, error_policy());
+ return (err);
+ }
+ return (0);
+}
+
+int DbEnv::log_put(DbLsn *lsn, const Dbt *data, u_int32_t flags)
+{
+ int err = 0;
+ DB_ENV *env = unwrap(this);
+
+ if ((err = ::log_put(env, lsn, data, flags)) != 0) {
+ DB_ERROR("DbEnv::log_put", err, error_policy());
+ return (err);
+ }
+ return (0);
+}
+
+int DbEnv::log_register(Db *dbp, const char *name)
+{
+ int err = 0;
+ DB_ENV *env = unwrap(this);
+
+ if ((err = ::log_register(env, unwrap(dbp), name)) != 0) {
+ DB_ERROR("DbEnv::log_register", err, error_policy());
+ return (err);
+ }
+ return (0);
+}
+
+int DbEnv::log_stat(DB_LOG_STAT **spp, db_malloc_fcn_type db_malloc_fcn)
+{
+ int err = 0;
+ DB_ENV *env = unwrap(this);
+
+ if ((err = ::log_stat(env, spp, db_malloc_fcn)) != 0) {
+ DB_ERROR("DbEnv::log_stat", err, error_policy());
+ return (err);
+ }
+ return (0);
+}
+
+int DbEnv::log_unregister(Db *dbp)
+{
+ int err;
+ DB_ENV *env = unwrap(this);
+
+ if ((err = ::log_unregister(env, unwrap(dbp))) != 0) {
+ DB_ERROR("DbEnv::log_unregister", err, error_policy());
+ return (err);
+ }
+ return (0);
+}
diff --git a/bdb/cxx/cxx_mpool.cpp b/bdb/cxx/cxx_mpool.cpp
new file mode 100644
index 00000000000..22f4735e333
--- /dev/null
+++ b/bdb/cxx/cxx_mpool.cpp
@@ -0,0 +1,180 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: cxx_mpool.cpp,v 11.11 2000/09/21 15:05:45 dda Exp $";
+#endif /* not lint */
+
+#include <errno.h>
+
+#include "db_cxx.h"
+#include "cxx_int.h"
+
+////////////////////////////////////////////////////////////////////////
+// //
+// DbMpoolFile //
+// //
+////////////////////////////////////////////////////////////////////////
+
+DbMpoolFile::DbMpoolFile()
+: imp_(0)
+{
+}
+
+DbMpoolFile::~DbMpoolFile()
+{
+}
+
+int DbMpoolFile::open(DbEnv *envp, const char *file,
+ u_int32_t flags, int mode, size_t pagesize,
+ DB_MPOOL_FINFO *finfop, DbMpoolFile **result)
+{
+ int err;
+
+ DB_MPOOLFILE *mpf;
+ DB_ENV *env = unwrap(envp);
+
+ if ((err = ::memp_fopen(env, file, flags, mode, pagesize,
+ finfop, &mpf)) != 0) {
+ DB_ERROR("DbMpoolFile::open", err, envp->error_policy());
+ return (err);
+ }
+ *result = new DbMpoolFile();
+ (*result)->imp_ = wrap(mpf);
+ return (0);
+}
+
+int DbMpoolFile::close()
+{
+ DB_MPOOLFILE *mpf = unwrap(this);
+ int err = 0;
+ if (!mpf) {
+ err = EINVAL;
+ }
+ else if ((err = ::memp_fclose(mpf)) != 0) {
+ DB_ERROR("DbMpoolFile::close", err, ON_ERROR_UNKNOWN);
+ return (err);
+ }
+ imp_ = 0; // extra safety
+
+ // This may seem weird, but is legal as long as we don't access
+ // any data before returning.
+ //
+ delete this;
+ return (0);
+}
+
+int DbMpoolFile::get(db_pgno_t *pgnoaddr, u_int32_t flags, void *pagep)
+{
+ DB_MPOOLFILE *mpf = unwrap(this);
+ int err = 0;
+ if (!mpf) {
+ err = EINVAL;
+ }
+ else if ((err = ::memp_fget(mpf, pgnoaddr, flags, pagep)) != 0) {
+ DB_ERROR("DbMpoolFile::get", err, ON_ERROR_UNKNOWN);
+ }
+ return (err);
+}
+
+int DbMpoolFile::put(void *pgaddr, u_int32_t flags)
+{
+ DB_MPOOLFILE *mpf = unwrap(this);
+ int err = 0;
+ if (!mpf) {
+ err = EINVAL;
+ }
+ else if ((err = ::memp_fput(mpf, pgaddr, flags)) != 0) {
+ DB_ERROR("DbMpoolFile::put", err, ON_ERROR_UNKNOWN);
+ }
+ return (err);
+}
+
+int DbMpoolFile::set(void *pgaddr, u_int32_t flags)
+{
+ DB_MPOOLFILE *mpf = unwrap(this);
+ int err = 0;
+ if (!mpf) {
+ err = EINVAL;
+ }
+ else if ((err = ::memp_fset(mpf, pgaddr, flags)) != 0) {
+ DB_ERROR("DbMpoolFile::set", err, ON_ERROR_UNKNOWN);
+ }
+ return (err);
+}
+
+int DbMpoolFile::sync()
+{
+ DB_MPOOLFILE *mpf = unwrap(this);
+ int err = 0;
+ if (!mpf) {
+ err = EINVAL;
+ }
+ else if ((err = ::memp_fsync(mpf)) != 0 && err != DB_INCOMPLETE) {
+ DB_ERROR("DbMpoolFile::sync", err, ON_ERROR_UNKNOWN);
+ }
+ return (err);
+}
+
+////////////////////////////////////////////////////////////////////////
+// //
+// DbMpool //
+// //
+////////////////////////////////////////////////////////////////////////
+
+int DbEnv::memp_register(int ftype,
+ pgin_fcn_type pgin_fcn,
+ pgout_fcn_type pgout_fcn)
+{
+ DB_ENV *env = unwrap(this);
+ int err = 0;
+
+ if ((err = ::memp_register(env, ftype, pgin_fcn, pgout_fcn)) != 0) {
+ DB_ERROR("DbEnv::memp_register", err, error_policy());
+ return (err);
+ }
+ return (err);
+}
+
+int DbEnv::memp_stat(DB_MPOOL_STAT **gsp, DB_MPOOL_FSTAT ***fsp,
+ db_malloc_fcn_type db_malloc_fcn)
+{
+ DB_ENV *env = unwrap(this);
+ int err = 0;
+
+ if ((err = ::memp_stat(env, gsp, fsp, db_malloc_fcn)) != 0) {
+ DB_ERROR("DbEnv::memp_stat", err, error_policy());
+ return (err);
+ }
+ return (err);
+}
+
+int DbEnv::memp_sync(DbLsn *sn)
+{
+ DB_ENV *env = unwrap(this);
+ int err = 0;
+
+ if ((err = ::memp_sync(env, sn)) != 0 && err != DB_INCOMPLETE) {
+ DB_ERROR("DbEnv::memp_sync", err, error_policy());
+ return (err);
+ }
+ return (err);
+}
+
+int DbEnv::memp_trickle(int pct, int *nwrotep)
+{
+ DB_ENV *env = unwrap(this);
+ int err = 0;
+
+ if ((err = ::memp_trickle(env, pct, nwrotep)) != 0) {
+ DB_ERROR("DbEnv::memp_trickle", err, error_policy());
+ return (err);
+ }
+ return (err);
+}
diff --git a/bdb/cxx/cxx_table.cpp b/bdb/cxx/cxx_table.cpp
new file mode 100644
index 00000000000..b7b335d26e9
--- /dev/null
+++ b/bdb/cxx/cxx_table.cpp
@@ -0,0 +1,808 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: cxx_table.cpp,v 11.35 2001/01/11 18:19:49 bostic Exp $";
+#endif /* not lint */
+
+#include <errno.h>
+#include <string.h>
+
+#include "db_cxx.h"
+#include "cxx_int.h"
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_ext.h"
+#include "common_ext.h"
+
+////////////////////////////////////////////////////////////////////////
+// //
+// Db //
+// //
+////////////////////////////////////////////////////////////////////////
+
+// A truism for the DbEnv object is that there is a valid
+// DB_ENV handle from the constructor until close().
+// After the close, the DB handle is invalid and
+// no operations are permitted on the Db (other than
+// destructor). Leaving the Db handle open and not
+// doing a close is generally considered an error.
+//
+// We used to allow Db objects to be closed and reopened.
+// This implied always keeping a valid DB object, and
+// coordinating the open objects between Db/DbEnv turned
+// out to be overly complicated. Now we do not allow this.
+
+Db::Db(DbEnv *env, u_int32_t flags)
+: imp_(0)
+, env_(env)
+, construct_error_(0)
+, flags_(0)
+, construct_flags_(flags)
+{
+ if (env_ == 0)
+ flags_ |= DB_CXX_PRIVATE_ENV;
+ initialize();
+}
+
+// Note: if the user has not closed, we call _destroy_check
+// to warn against this non-safe programming practice.
+// We can't close, because the environment may already
+// be closed/destroyed.
+//
+Db::~Db()
+{
+ DB *db;
+
+ db = unwrap(this);
+ if (db != NULL) {
+ DbEnv::_destroy_check("Db", 0);
+ cleanup();
+ }
+}
+
+// private method to initialize during constructor.
+// initialize must create a backing DB object,
+// and if that creates a new DB_ENV, it must be tied to a new DbEnv.
+// If there is an error, construct_error_ is set; this is examined
+// during open.
+//
+int Db::initialize()
+{
+ u_int32_t cxx_flags;
+ DB *db;
+ int err;
+ DB_ENV *cenv = unwrap(env_);
+
+ cxx_flags = construct_flags_ & DB_CXX_NO_EXCEPTIONS;
+
+ // Create a new underlying DB object.
+ // We rely on the fact that if a NULL DB_ENV* is given,
+ // one is allocated by DB.
+ //
+ if ((err = db_create(&db, cenv,
+ construct_flags_ & ~cxx_flags)) != 0) {
+ construct_error_ = err;
+ return (err);
+ }
+
+ // Associate the DB with this object
+ imp_ = wrap(db);
+ db->cj_internal = this;
+
+ // Create a new DbEnv from a DB_ENV* if it was created locally.
+ // It is deleted in Db::close().
+ //
+ if ((flags_ & DB_CXX_PRIVATE_ENV) != 0)
+ env_ = new DbEnv(db->dbenv, cxx_flags);
+
+ return (0);
+}
+
+// private method to cleanup after destructor or during close.
+// If the environment was created by this Db object, we optionally
+// delete it, or return it so the caller can delete it after
+// last use.
+//
+void Db::cleanup()
+{
+ DB *db = unwrap(this);
+
+ if (db != NULL) {
+ // extra safety
+ db->cj_internal = 0;
+ imp_ = 0;
+
+ // we must dispose of the DbEnv object if
+ // we created it. This will be the case
+ // if a NULL DbEnv was passed into the constructor.
+ // The underlying DB_ENV object will be inaccessible
+ // after the close, so we must clean it up now.
+ //
+ if ((flags_ & DB_CXX_PRIVATE_ENV) != 0) {
+ env_->cleanup();
+ delete env_;
+ env_ = 0;
+ }
+ }
+ construct_error_ = 0;
+}
+
+// Return a tristate value corresponding to whether we should
+// throw exceptions on errors:
+// ON_ERROR_RETURN
+// ON_ERROR_THROW
+// ON_ERROR_UNKNOWN
+//
+int Db::error_policy()
+{
+ if (env_ != NULL)
+ return (env_->error_policy());
+ else {
+ // If the env_ is null, that means that the user
+ // did not attach an environment, so the correct error
+ // policy can be deduced from constructor flags
+ // for this Db.
+ //
+ if ((construct_flags_ & DB_CXX_NO_EXCEPTIONS) != 0) {
+ return (ON_ERROR_RETURN);
+ }
+ else {
+ return (ON_ERROR_THROW);
+ }
+ }
+}
+
+int Db::close(u_int32_t flags)
+{
+ DB *db = unwrap(this);
+ int err;
+
+ // after a DB->close (no matter if success or failure),
+ // the underlying DB object must not be accessed,
+ // so we clean up in advance.
+ //
+ cleanup();
+
+ // It's safe to throw an error after the close,
+ // since our error mechanism does not peer into
+ // the DB* structures.
+ //
+ if ((err = db->close(db, flags)) != 0 && err != DB_INCOMPLETE)
+ DB_ERROR("Db::close", err, error_policy());
+
+ return (err);
+}
+
+int Db::cursor(DbTxn *txnid, Dbc **cursorp, u_int32_t flags)
+{
+ DB *db = unwrap(this);
+ DBC *dbc = 0;
+ int err;
+
+ if ((err = db->cursor(db, unwrap(txnid), &dbc, flags)) != 0) {
+ DB_ERROR("Db::cursor", err, error_policy());
+ return (err);
+ }
+
+ // The following cast implies that Dbc can be no larger than DBC
+ *cursorp = (Dbc*)dbc;
+ return (0);
+}
+
+int Db::del(DbTxn *txnid, Dbt *key, u_int32_t flags)
+{
+ DB *db = unwrap(this);
+ int err;
+
+ if ((err = db->del(db, unwrap(txnid), key, flags)) != 0) {
+ // DB_NOTFOUND is a "normal" return, so should not be
+ // thrown as an error
+ //
+ if (err != DB_NOTFOUND) {
+ DB_ERROR("Db::del", err, error_policy());
+ return (err);
+ }
+ }
+ return (err);
+}
+
+void Db::err(int error, const char *format, ...)
+{
+ va_list args;
+ DB *db = unwrap(this);
+
+ va_start(args, format);
+ __db_real_err(db->dbenv, error, 1, 1, format, args);
+ va_end(args);
+}
+
+void Db::errx(const char *format, ...)
+{
+ va_list args;
+ DB *db = unwrap(this);
+
+ va_start(args, format);
+ __db_real_err(db->dbenv, 0, 0, 1, format, args);
+ va_end(args);
+}
+
+int Db::fd(int *fdp)
+{
+ DB *db = unwrap(this);
+ int err;
+
+ if ((err = db->fd(db, fdp)) != 0) {
+ DB_ERROR("Db::fd", err, error_policy());
+ return (err);
+ }
+ return (0);
+}
+
+// This is a 'glue' function declared as extern "C" so it will
+// be compatible with picky compilers that do not allow mixing
+// of function pointers to 'C' functions with function pointers
+// to C++ functions.
+//
+extern "C"
+void _db_feedback_intercept_c(DB *db, int opcode, int pct)
+{
+ Db::_feedback_intercept(db, opcode, pct);
+}
+
+//static
+void Db::_feedback_intercept(DB *db, int opcode, int pct)
+{
+ if (db == 0) {
+ DB_ERROR("Db::feedback_callback", EINVAL, ON_ERROR_UNKNOWN);
+ return;
+ }
+ Db *cxxdb = (Db *)db->cj_internal;
+ if (cxxdb == 0) {
+ DB_ERROR("Db::feedback_callback", EINVAL, ON_ERROR_UNKNOWN);
+ return;
+ }
+ if (cxxdb->feedback_callback_ == 0) {
+ DB_ERROR("Db::feedback_callback", EINVAL, cxxdb->error_policy());
+ return;
+ }
+ (*cxxdb->feedback_callback_)(cxxdb, opcode, pct);
+}
+
+int Db::set_feedback(void (*arg)(Db *, int, int))
+{
+ DB *db = unwrap(this);
+
+ feedback_callback_ = arg;
+
+ return ((*(db->set_feedback))(db, _db_feedback_intercept_c));
+}
+
+// This is a 'glue' function declared as extern "C" so it will
+// be compatible with picky compilers that do not allow mixing
+// of function pointers to 'C' functions with function pointers
+// to C++ functions.
+//
+extern "C"
+int _db_append_recno_intercept_c(DB *db, DBT *data, db_recno_t recno)
+{
+ return (Db::_append_recno_intercept(db, data, recno));
+}
+
+//static
+int Db::_append_recno_intercept(DB *db, DBT *data, db_recno_t recno)
+{
+ int err;
+
+ if (db == 0) {
+ DB_ERROR("Db::append_recno_callback", EINVAL, ON_ERROR_UNKNOWN);
+ return (EINVAL);
+ }
+ Db *cxxdb = (Db *)db->cj_internal;
+ if (cxxdb == 0) {
+ DB_ERROR("Db::append_recno_callback", EINVAL, ON_ERROR_UNKNOWN);
+ return (EINVAL);
+ }
+ if (cxxdb->append_recno_callback_ == 0) {
+ DB_ERROR("Db::append_recno_callback", EINVAL, cxxdb->error_policy());
+ return (EINVAL);
+ }
+
+ // making these copies is slow but portable.
+ // Another alternative is to cast the DBT* manufactured
+ // by the C layer to a Dbt*. It 'should be' safe since
+ // Dbt is a thin shell over DBT, adding no extra data,
+ // but is nonportable, and could lead to errors if anything
+ // were added to the Dbt class.
+ //
+ Dbt cxxdbt;
+ memcpy((DBT *)&cxxdbt, data, sizeof(DBT));
+ err = (*cxxdb->append_recno_callback_)(cxxdb, &cxxdbt, recno);
+ memcpy(data, (DBT *)&cxxdbt, sizeof(DBT));
+ return (err);
+}
+
+int Db::set_append_recno(int (*arg)(Db *, Dbt *, db_recno_t))
+{
+ DB *db = unwrap(this);
+
+ append_recno_callback_ = arg;
+
+ return ((*(db->set_append_recno))(db, _db_append_recno_intercept_c));
+}
+
+int Db::get(DbTxn *txnid, Dbt *key, Dbt *value, u_int32_t flags)
+{
+ DB *db = unwrap(this);
+ int err;
+
+ if ((err = db->get(db, unwrap(txnid), key, value, flags)) != 0) {
+ // DB_NOTFOUND and DB_KEYEMPTY are "normal" returns,
+ // so should not be thrown as an error
+ //
+ if (err != DB_NOTFOUND && err != DB_KEYEMPTY) {
+ DB_ERROR("Db::get", err, error_policy());
+ return (err);
+ }
+ }
+ return (err);
+}
+
+int Db::get_byteswapped() const
+{
+ DB *db = (DB *)unwrapConst(this);
+ return (db->get_byteswapped(db));
+}
+
+DBTYPE Db::get_type() const
+{
+ DB *db = (DB *)unwrapConst(this);
+ return ((DBTYPE)db->get_type(db));
+}
+
+int Db::join(Dbc **curslist, Dbc **cursorp, u_int32_t flags)
+{
+ // Dbc is a "compatible" subclass of DBC -
+ // that is, no virtual functions or even extra data members,
+ // so this cast, although technically non-portable,
+ // "should" always be okay.
+ //
+ DBC **list = (DBC **)(curslist);
+ DB *db = unwrap(this);
+ DBC *dbc = 0;
+ int err;
+
+ if ((err = db->join(db, list, &dbc, flags)) != 0) {
+ DB_ERROR("Db::join_cursor", err, error_policy());
+ return (err);
+ }
+ *cursorp = (Dbc*)dbc;
+ return (0);
+}
+
+int Db::key_range(DbTxn *txnid, Dbt *key,
+ DB_KEY_RANGE *results, u_int32_t flags)
+{
+ DB *db = unwrap(this);
+ int err;
+
+ if ((err = db->key_range(db, unwrap(txnid), key,
+ results, flags)) != 0) {
+ DB_ERROR("Db::key_range", err, error_policy());
+ return (err);
+ }
+ return (0);
+}
+
+// If an error occurred during the constructor, report it now.
+// Otherwise, call the underlying DB->open method.
+//
+int Db::open(const char *file, const char *database,
+ DBTYPE type, u_int32_t flags, int mode)
+{
+ int err;
+ DB *db = unwrap(this);
+
+ if ((err = construct_error_) != 0)
+ DB_ERROR("Db::open", construct_error_, error_policy());
+ else if ((err = db->open(db, file, database, type, flags, mode)) != 0)
+ DB_ERROR("Db::open", err, error_policy());
+
+ return (err);
+}
+
+int Db::put(DbTxn *txnid, Dbt *key, Dbt *value, u_int32_t flags)
+{
+ int err;
+ DB *db = unwrap(this);
+
+ if ((err = db->put(db, unwrap(txnid), key, value, flags)) != 0) {
+
+ // DB_KEYEXIST is a "normal" return, so should not be
+ // thrown as an error
+ //
+ if (err != DB_KEYEXIST) {
+ DB_ERROR("Db::put", err, error_policy());
+ return (err);
+ }
+ }
+ return (err);
+}
+
+int Db::rename(const char *file, const char *database,
+ const char *newname, u_int32_t flags)
+{
+ int err = 0;
+ DB *db = unwrap(this);
+
+ if (!db) {
+ DB_ERROR("Db::rename", EINVAL, error_policy());
+ return (EINVAL);
+ }
+
+ // after a DB->rename (no matter if success or failure),
+ // the underlying DB object must not be accessed,
+ // so we clean up in advance.
+ //
+ cleanup();
+
+ if ((err = db->rename(db, file, database, newname, flags)) != 0) {
+ DB_ERROR("Db::rename", err, error_policy());
+ return (err);
+ }
+ return (0);
+}
+
+int Db::remove(const char *file, const char *database, u_int32_t flags)
+{
+ int err = 0;
+ DB *db = unwrap(this);
+
+ if (!db) {
+ DB_ERROR("Db::remove", EINVAL, error_policy());
+ return (EINVAL);
+ }
+
+ // after a DB->remove (no matter if success or failure),
+ // the underlying DB object must not be accessed,
+ // so we clean up in advance.
+ //
+ cleanup();
+
+ if ((err = db->remove(db, file, database, flags)) != 0)
+ DB_ERROR("Db::remove", err, error_policy());
+
+ return (err);
+}
+
+int Db::stat(void *sp, db_malloc_fcn_type db_malloc_fcn, u_int32_t flags)
+{
+ int err;
+ DB *db = unwrap(this);
+
+ if (!db) {
+ DB_ERROR("Db::stat", EINVAL, error_policy());
+ return (EINVAL);
+ }
+ if ((err = db->stat(db, sp, db_malloc_fcn, flags)) != 0) {
+ DB_ERROR("Db::stat", err, error_policy());
+ return (err);
+ }
+ return (0);
+}
+
+int Db::sync(u_int32_t flags)
+{
+ int err;
+ DB *db = unwrap(this);
+
+ if (!db) {
+ DB_ERROR("Db::sync", EINVAL, error_policy());
+ return (EINVAL);
+ }
+ if ((err = db->sync(db, flags)) != 0 && err != DB_INCOMPLETE) {
+ DB_ERROR("Db::sync", err, error_policy());
+ return (err);
+ }
+ return (err);
+}
+
+int Db::upgrade(const char *name, u_int32_t flags)
+{
+ int err;
+ DB *db = unwrap(this);
+
+ if (!db) {
+ DB_ERROR("Db::upgrade", EINVAL, error_policy());
+ return (EINVAL);
+ }
+ if ((err = db->upgrade(db, name, flags)) != 0) {
+ DB_ERROR("Db::upgrade", err, error_policy());
+ return (err);
+ }
+ return (0);
+}
+
+static int _verify_callback_cxx(void *handle, const void *str_arg)
+{
+ char *str;
+ ostream *out;
+
+ str = (char *)str_arg;
+ out = (ostream *)handle;
+
+ (*out) << str;
+ if (out->fail())
+ return (EIO);
+
+ return (0);
+}
+
+// This is a 'glue' function declared as extern "C" so it will
+// be compatible with picky compilers that do not allow mixing
+// of function pointers to 'C' functions with function pointers
+// to C++ functions.
+//
+extern "C"
+int _verify_callback_c(void *handle, const void *str_arg)
+{
+ return (_verify_callback_cxx(handle, str_arg));
+}
+
+int Db::verify(const char *name, const char *subdb,
+ ostream *ostr, u_int32_t flags)
+{
+ int err;
+ DB *db = unwrap(this);
+
+ if (!db) {
+ DB_ERROR("Db::verify", EINVAL, error_policy());
+ return (EINVAL);
+ }
+ if ((err = __db_verify_internal(db, name, subdb, ostr,
+ _verify_callback_c, flags)) != 0) {
+ DB_ERROR("Db::verify", err, error_policy());
+ return (err);
+ }
+ return (0);
+}
+
+// This is a variant of the DB_WO_ACCESS macro to define a simple set_
+// method calling the underlying C method, but unlike a simple
+// set method, it may return an error or raise an exception.
+// Note this macro expects that input _argspec is an argument
+// list element (e.g. "char *arg") defined in terms of "arg".
+//
+#define DB_DB_ACCESS(_name, _argspec) \
+\
+int Db::set_##_name(_argspec) \
+{ \
+ int ret; \
+ DB *db = unwrap(this); \
+ \
+ if ((ret = (*(db->set_##_name))(db, arg)) != 0) { \
+ DB_ERROR("Db::set_" # _name, ret, error_policy()); \
+ } \
+ return (ret); \
+}
+
+#define DB_DB_ACCESS_NORET(_name, _argspec) \
+ \
+void Db::set_##_name(_argspec) \
+{ \
+ DB *db = unwrap(this); \
+ \
+ (*(db->set_##_name))(db, arg); \
+ return; \
+}
+
+DB_DB_ACCESS(bt_compare, bt_compare_fcn_type arg)
+DB_DB_ACCESS(bt_maxkey, u_int32_t arg)
+DB_DB_ACCESS(bt_minkey, u_int32_t arg)
+DB_DB_ACCESS(bt_prefix, bt_prefix_fcn_type arg)
+DB_DB_ACCESS(dup_compare, dup_compare_fcn_type arg)
+DB_DB_ACCESS_NORET(errfile, FILE *arg)
+DB_DB_ACCESS_NORET(errpfx, const char *arg)
+DB_DB_ACCESS(flags, u_int32_t arg)
+DB_DB_ACCESS(h_ffactor, u_int32_t arg)
+DB_DB_ACCESS(h_hash, h_hash_fcn_type arg)
+DB_DB_ACCESS(h_nelem, u_int32_t arg)
+DB_DB_ACCESS(lorder, int arg)
+DB_DB_ACCESS(malloc, db_malloc_fcn_type arg)
+DB_DB_ACCESS(pagesize, u_int32_t arg)
+DB_DB_ACCESS(realloc, db_realloc_fcn_type arg)
+DB_DB_ACCESS(re_delim, int arg)
+DB_DB_ACCESS(re_len, u_int32_t arg)
+DB_DB_ACCESS(re_pad, int arg)
+DB_DB_ACCESS(re_source, char *arg)
+DB_DB_ACCESS(q_extentsize, u_int32_t arg)
+
+// Here are the set methods that don't fit the above mold.
+//
+
+void Db::set_errcall(void (*arg)(const char *, char *))
+{
+ env_->set_errcall(arg);
+}
+
+int Db::set_cachesize(u_int32_t gbytes, u_int32_t bytes, int ncache)
+{
+ int ret;
+ DB *db = unwrap(this);
+
+ if ((ret = (*(db->set_cachesize))(db, gbytes, bytes, ncache)) != 0) {
+ DB_ERROR("Db::set_cachesize", ret, error_policy());
+ }
+ return (ret);
+}
+
+int Db::set_paniccall(void (*callback)(DbEnv *, int))
+{
+ return (env_->set_paniccall(callback));
+}
+
+void Db::set_error_stream(ostream *error_stream)
+{
+ env_->set_error_stream(error_stream);
+}
+
+////////////////////////////////////////////////////////////////////////
+// //
+// Dbc //
+// //
+////////////////////////////////////////////////////////////////////////
+
+// It's private, and should never be called, but VC4.0 needs it resolved
+//
+Dbc::~Dbc()
+{
+}
+
+int Dbc::close()
+{
+ DBC *cursor = this;
+ int err;
+
+ if ((err = cursor->c_close(cursor)) != 0) {
+ DB_ERROR("Db::close", err, ON_ERROR_UNKNOWN);
+ return (err);
+ }
+ return (0);
+}
+
+int Dbc::count(db_recno_t *countp, u_int32_t flags_arg)
+{
+ DBC *cursor = this;
+ int err;
+
+ if ((err = cursor->c_count(cursor, countp, flags_arg)) != 0) {
+ DB_ERROR("Db::count", err, ON_ERROR_UNKNOWN);
+ return (err);
+ }
+ return (0);
+}
+
+int Dbc::del(u_int32_t flags_arg)
+{
+ DBC *cursor = this;
+ int err;
+
+ if ((err = cursor->c_del(cursor, flags_arg)) != 0) {
+
+ // DB_KEYEMPTY is a "normal" return, so should not be
+ // thrown as an error
+ //
+ if (err != DB_KEYEMPTY) {
+ DB_ERROR("Db::del", err, ON_ERROR_UNKNOWN);
+ return (err);
+ }
+ }
+ return (err);
+}
+
+int Dbc::dup(Dbc** cursorp, u_int32_t flags_arg)
+{
+ DBC *cursor = this;
+ DBC *new_cursor = 0;
+ int err;
+
+ if ((err = cursor->c_dup(cursor, &new_cursor, flags_arg)) != 0) {
+ DB_ERROR("Db::dup", err, ON_ERROR_UNKNOWN);
+ return (err);
+ }
+
+ // The following cast implies that Dbc can be no larger than DBC
+ *cursorp = (Dbc*)new_cursor;
+ return (0);
+}
+
+int Dbc::get(Dbt* key, Dbt *data, u_int32_t flags_arg)
+{
+ DBC *cursor = this;
+ int err;
+
+ if ((err = cursor->c_get(cursor, key, data, flags_arg)) != 0) {
+
+ // DB_NOTFOUND and DB_KEYEMPTY are "normal" returns,
+ // so should not be thrown as an error
+ //
+ if (err != DB_NOTFOUND && err != DB_KEYEMPTY) {
+ DB_ERROR("Db::get", err, ON_ERROR_UNKNOWN);
+ return (err);
+ }
+ }
+ return (err);
+}
+
+int Dbc::put(Dbt* key, Dbt *data, u_int32_t flags_arg)
+{
+ DBC *cursor = this;
+ int err;
+
+ if ((err = cursor->c_put(cursor, key, data, flags_arg)) != 0) {
+
+ // DB_KEYEXIST is a "normal" return, so should not be
+ // thrown as an error
+ //
+ if (err != DB_KEYEXIST) {
+ DB_ERROR("Db::put", err, ON_ERROR_UNKNOWN);
+ return (err);
+ }
+ }
+ return (err);
+}
+
+////////////////////////////////////////////////////////////////////////
+// //
+// Dbt //
+// //
+////////////////////////////////////////////////////////////////////////
+
+Dbt::Dbt()
+{
+ DBT *dbt = this;
+ memset(dbt, 0, sizeof(DBT));
+}
+
+Dbt::Dbt(void *data_arg, size_t size_arg)
+{
+ DBT *dbt = this;
+ memset(dbt, 0, sizeof(DBT));
+ set_data(data_arg);
+ set_size(size_arg);
+}
+
+Dbt::~Dbt()
+{
+}
+
+Dbt::Dbt(const Dbt &that)
+{
+ const DBT *from = &that;
+ DBT *to = this;
+ memcpy(to, from, sizeof(DBT));
+}
+
+Dbt &Dbt::operator = (const Dbt &that)
+{
+ if (this != &that) {
+ const DBT *from = &that;
+ DBT *to = this;
+ memcpy(to, from, sizeof(DBT));
+ }
+ return (*this);
+}
+
+DB_RW_ACCESS(Dbt, void *, data, data)
+DB_RW_ACCESS(Dbt, u_int32_t, size, size)
+DB_RW_ACCESS(Dbt, u_int32_t, ulen, ulen)
+DB_RW_ACCESS(Dbt, u_int32_t, dlen, dlen)
+DB_RW_ACCESS(Dbt, u_int32_t, doff, doff)
+DB_RW_ACCESS(Dbt, u_int32_t, flags, flags)
diff --git a/bdb/cxx/cxx_txn.cpp b/bdb/cxx/cxx_txn.cpp
new file mode 100644
index 00000000000..0abae982644
--- /dev/null
+++ b/bdb/cxx/cxx_txn.cpp
@@ -0,0 +1,136 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: cxx_txn.cpp,v 11.13 2000/12/21 16:24:33 dda Exp $";
+#endif /* not lint */
+
+#include <errno.h>
+
+#include "db_cxx.h"
+#include "cxx_int.h"
+
+////////////////////////////////////////////////////////////////////////
+// //
+// DbTxnMgr //
+// //
+////////////////////////////////////////////////////////////////////////
+
+int DbEnv::txn_begin(DbTxn *pid, DbTxn **tid, u_int32_t flags)
+{
+ int err;
+ DB_ENV *env = unwrap(this);
+ DB_TXN *txn;
+
+ if ((err = ::txn_begin(env, unwrap(pid), &txn, flags)) != 0) {
+ DB_ERROR("DbEnv::txn_begin", err, error_policy());
+ return (err);
+ }
+ DbTxn *result = new DbTxn();
+ result->imp_ = wrap(txn);
+ *tid = result;
+ return (err);
+}
+
+int DbEnv::txn_checkpoint(u_int32_t kbyte, u_int32_t min, u_int32_t flags)
+{
+ int err;
+ DB_ENV *env = unwrap(this);
+ if ((err = ::txn_checkpoint(env, kbyte, min, flags)) != 0 &&
+ err != DB_INCOMPLETE) {
+ DB_ERROR("DbEnv::txn_checkpoint", err, error_policy());
+ return (err);
+ }
+ return (err);
+}
+
+int DbEnv::txn_stat(DB_TXN_STAT **statp, db_malloc_fcn_type db_malloc_fcn)
+{
+ int err;
+ DB_ENV *env = unwrap(this);
+ if ((err = ::txn_stat(env, statp, db_malloc_fcn)) != 0) {
+ DB_ERROR("DbEnv::txn_stat", err, error_policy());
+ return (err);
+ }
+ return (err);
+}
+
+////////////////////////////////////////////////////////////////////////
+// //
+// DbTxn //
+// //
+////////////////////////////////////////////////////////////////////////
+
+DbTxn::DbTxn()
+: imp_(0)
+{
+}
+
+DbTxn::~DbTxn()
+{
+}
+
+int DbTxn::abort()
+{
+ int err;
+ DB_TXN *txn;
+
+ txn = unwrap(this);
+ err = txn_abort(txn);
+
+ // It may seem weird to delete this, but is legal as long
+ // as we don't access any of its data before returning.
+ //
+ delete this;
+
+ if (err != 0)
+ DB_ERROR("DbTxn::abort", err, ON_ERROR_UNKNOWN);
+
+ return (err);
+}
+
+int DbTxn::commit(u_int32_t flags)
+{
+ int err;
+ DB_TXN *txn;
+
+ txn = unwrap(this);
+ err = txn_commit(txn, flags);
+
+ // It may seem weird to delete this, but is legal as long
+ // as we don't access any of its data before returning.
+ //
+ delete this;
+
+ if (err != 0)
+ DB_ERROR("DbTxn::commit", err, ON_ERROR_UNKNOWN);
+
+ return (err);
+}
+
+u_int32_t DbTxn::id()
+{
+ DB_TXN *txn;
+
+ txn = unwrap(this);
+ return (txn_id(txn)); // no error
+}
+
+int DbTxn::prepare()
+{
+ int err;
+ DB_TXN *txn;
+
+ txn = unwrap(this);
+ if ((err = txn_prepare(txn)) != 0) {
+ DB_ERROR("DbTxn::prepare", err, ON_ERROR_UNKNOWN);
+ return (err);
+ }
+ return (0);
+}
diff --git a/bdb/cxx/namemap.txt b/bdb/cxx/namemap.txt
new file mode 100644
index 00000000000..75207718577
--- /dev/null
+++ b/bdb/cxx/namemap.txt
@@ -0,0 +1,21 @@
+$Id: namemap.txt,v 10.4 2000/02/19 20:57:54 bostic Exp $
+
+The bulk of DB provides for wrapper classes and appropriately named methods
+that call into DB. For the most part, there is a straightforward mapping of
+names. For the purposes of referencing documentation, this chart shows the
+underlying C structure name for each C++ class. In some cases, using the
+given C prefix with a C++ method name gives the underlying C function name.
+For example, DbMpoolFile::close() is implemented by memp_fclose().
+
+C++ C C prefix
+
+Db DB
+DbEnv DB_ENV
+Dbc DBC
+DbException none
+DbInfo DB_INFO
+DbLock DB_LOCK lock_
+DbLsn DB_LSN
+DbMpoolFile DB_MPOOL_FILE memp_
+Dbt DBT
+DbTxn DB_TXN txn_
diff --git a/bdb/db/Design.fileop b/bdb/db/Design.fileop
new file mode 100644
index 00000000000..187f1ffaf22
--- /dev/null
+++ b/bdb/db/Design.fileop
@@ -0,0 +1,452 @@
+# $Id: Design.fileop,v 11.4 2000/02/19 20:57:54 bostic Exp $
+
+The design of file operation recovery.
+
+Keith has asked me to write up notes on our current status of database
+create and delete and recovery, why it's so hard, and how we've violated
+all the cornerstone assumptions on which our recovery framework is based.
+
+I am including two documents at the end of this one. The first is the
+initial design of the recoverability of file create and delete (there is
+no talk of subdatabases there, because we didn't think we'd have to do
+anything special there). I will annotate this document on where things
+changed.
+
+The second is the design of recd007 which is supposed to test our ability
+to recover these operations regardless of where one crashes. This test
+is fundamentally different from our other recovery tests in the following
+manner. Normally, the application controls transaction boundaries.
+Therefore, we can perform an operation and then decide whether to commit
+or abort it. In the normal recovery tests, we force the database into
+each of the four possible states from a recovery perspective:
+
+ database is pre-op, undo (do nothing)
+ database is pre-op, redo
+ database is post-op, undo
+ database is post-op, redo (do nothing)
+
+By copying databases at various points and initiating txn_commit and abort
+appropriately, we can make all these things happen. Notice that the one
+case we don't handle is where page A is in one state (e.g., pre-op) and
+page B is in another state (e.g., post-op). I will argue that these don't
+matter because each page is recovered independently. If anyone can poke
+holes in this, I'm interested.
+
+The problem with create/delete recovery testing is that the transaction
+is begun and ended all inside the library. Therefore, there is never any
+point (outside the library) where we can copy files and or initiate
+abort/commit. In order to still put the recovery code through its paces,
+Sue designed an infrastructure that lets you tell the library where to
+make copies of things and where to suddenly inject errors so that the
+transaction gets aborted. This level of detail allows us to push the
+create/delete recovery code through just about every recovery path
+possible (although I'm sure Mike will tell me I'm wrong when he starts to
+run code coverage tools).
+
+OK, so that's all preamble and a brief discussion of the documents I'm
+enclosing.
+
+Why was this so hard and painful and why is the code so Q@#$!% complicated?
+The following is a discussion/explanation, but to the best of my knowledge,
+the structure we have in place now works. The key question we need to be
+asking is, "Does this need to have to be so complex or should we redesign
+portions to simplify it?" At this point, there is no obvious way to simplify
+it in my book, but I may be having difficulty seeing this because my mind is
+too polluted at this point.
+
+Our overall strategy for recovery is that we do write-ahead logging,
+that is we log an operation and make sure it is on disk before any
+data corresponding to the data that log record describes is on disk.
+Typically we use log sequence numbers (LSNs) to mark the data so that
+during recovery, we can look at the data and determine if it is in a
+state before a particular log record or after a particular log record.
+
+In the good old days, opens were not transaction protected, so we could
+do regular old opens during recovery and if the file existed, we opened
+it and if it didn't (or appeared corrupt), we didn't and treated it like
+a missing file. As will be discussed below in detail, our states are much
+more complicated and recovery can't make such simplistic assumptions.
+
+Also, since we are now dealing with file system operations, we have less
+control about when they actually happen and what the state of the system
+can be. That is, we have to write create log records synchronously, because
+the create/open system call may force a newly created (0-length) file to
+disk. This file has to now be identified as being in the "being-created"
+state.
+
+A. We used to make a number of assumptions during recovery:
+
+1. We could call db_open at any time and one of three things would happen:
+ a) the file would be opened cleanly
+ b) the file would not exist
+ c) we would encounter an error while opening the file
+
+Case a posed no difficulty.
+In Case b, we simply spit out a warning that a file was missing and then
+ ignored all subsequent operations to that file.
+In Case c, we reported a fatal error.
+
+2. We can always generate a warning if a file is missing.
+
+3. We never encounter NULL file names in the log.
+
+B. We also made some assumptions in the main-line library:
+
+1. If you try to open a file and it exists but is 0-length, then
+someone else is trying to open it.
+
+2. You can write pages anywhere in a file and any non-existent pages
+are 0-filled. [This breaks on Windows.]
+
+3. If you have proper permissions then you can always evict pages from
+the buffer pool.
+
+4. During open, we can close the master database handle as soon as
+we're done with it since all the rest of the activity will take place
+on the subdatabase handle.
+
+In our brave new world, most of these assumptions are no longer valid.
+Let's address them one at a time.
+
+A.1 We could call db_open at any time and one of three things would happen:
+ a) the file would be opened cleanly
+ b) the file would not exist
+ c) we would encounter an error while opening the file
+There are now additional states. Since we are trying to make file
+operations recoverable, you can now die in the middle of such an
+operation and we have to be able to pick up the pieces. What this
+now means is that:
+
+ * a 0-length file can be an indication of a create in-progress
+ * you can have a meta-data page but no root page (of a btree)
+ * if a file doesn't exist, it could mean that it was just about
+ to be created and needs to be rolled forward.
+ * if you encounter an error in a file (e.g., the meta-data page
+ is all 0's) you could still be in mid-open.
+
+I have now made this all work, but it required significant changes to the
+db_open code and error handling and this is the sort of change that makes
+everyone nervous.
+
+A.2. We can always generate a warning if a file is missing.
+
+Now that we have a delete file method in the API, we need to make sure
+that we do not generate warning messages for files that don't exist if
+we see that they were explicitly deleted.
+
+This means that we need to save state during recovery, determine which
+files were missing and were not being recreated and were not deleted and
+only complain about those.
+
+A.3. We never encounter NULL file names in the log.
+
+Now that we allow tranaction protection on memory-resident files, we write
+log messages for files with NULL file names. This means that our assumption
+of always being able to call "db_open" on any log_register OPEN message found
+in the log is no longer valid.
+
+B.1. If you try to open a file and it exists but is 0-length, then
+someone else is trying to open it.
+
+As discussed for A.1, this is no longer true. It may be instead that you
+are in the process of recovering a create.
+
+B.2. You can write pages anywhere in a file and any non-existent pages
+are 0-filled.
+
+It turns out that this is not true on Windows. This means that places
+we do group allocation (hash) must explicitly allocate each page, because
+we can't count on recognizing the uninitialized pages later.
+
+B.3. If you have proper permissions then you can always evict pages from
+the buffer pool.
+
+In the brave new world though, files can be deleted and they may
+have pages in the mpool. If you later try to evict these, you
+discover that the file doesn't exist. We'd get here when we had
+to dirty pages during a remove operation.
+
+B.4. You can close files any time you want.
+
+However, if the file takes part in the open/remove transaction,
+then we had better not close it until after the transaction
+commits/aborts, because we need to be able to get our hands on the
+dbp and the open happened in a different transaction.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+Design for recovering file create and delete in the presence of subdatabases.
+
+Assumptions:
+ Remove the O_TRUNCATE flag.
+ Single-thread all open/create/delete operations.
+ (Well, almost all; we'll optimize opens without DB_CREATE set.)
+ The reasoning for this is that with two simultaneous
+ open/creaters, during recovery, we cannot identify which
+ transaction successfully created files and therefore cannot
+ recovery correctly.
+ File system creates/deletes are synchronous
+ Once the file is open, subdatabase creates look like regular
+ get/put operations and a metadata page creation.
+
+There are 4 cases to deal with:
+ 1. Open/create file
+ 2. Open/create subdatabase
+ 3. Delete
+ 4. Recovery records
+
+ __db_fileopen_recover
+ __db_metapage_recover
+ __db_delete_recover
+ existing c_put and c_get routines for subdatabase creation
+
+ Note that the open/create of the file and the open/create of the
+ subdatabase need to be in the same transaction.
+
+1. Open/create (full file and subdb version)
+
+If create
+ LOCK_FILEOP
+ txn_begin
+ log create message (open message below)
+ do file system open/create
+ if we did not create
+ abort transaction (before going to open_only)
+ if (!subdb)
+ set dbp->open_txn = NULL
+ else
+ txn_begin a new transaction for the subdb open
+
+ construct meta-data page
+ log meta-data page (see metapage)
+ write the meta-data page
+ * It may be the case that btrees need to log both meta-data pages
+ and root pages. If that is the case, I believe that we can use
+ this same record and recovery routines for both
+
+ txn_commit
+ UNLOCK_FILEOP
+
+2. Delete
+ LOCK_FILEOP
+ txn_begin
+ log delete message (delete message below)
+ mv file __db.file.lsn
+ txn_commit
+ unlink __db.file.lsn
+ UNLOCK_FILEOP
+
+3. Recovery Routines
+
+__db_fileopen_recover
+ if (argp->name.size == 0
+ done;
+
+ if (redo) /* Commit */
+ __os_open(argp->name, DB_OSO_CREATE, argp->mode, &fh)
+ __os_closehandle(fh)
+ if (undo) /* Abort */
+ if (argp->name exists)
+ unlink(argp->name);
+
+__db_metapage_recover
+ if (redo)
+ __os_open(argp->name, 0, 0, &fh)
+ __os_lseek(meta data page)
+ __os_write(meta data page)
+ __os_closehandle(fh);
+ if (undo)
+ done = 0;
+ if (argp->name exists)
+ if (length of argp->name != 0)
+ __os_open(argp->name, 0, 0, &fh)
+ __os_lseek(meta data page)
+ __os_read(meta data page)
+ if (read succeeds && page lsn != current_lsn)
+ done = 1
+ __os_closehandle(fh);
+ if (!done)
+ unlink(argp->name)
+
+__db_delete_recover
+ if (redo)
+ Check if the backup file still exists and if so, delete it.
+
+ if (undo)
+ if (__db_appname(__db.file.lsn exists))
+ mv __db_appname(__db.file.lsn) __db_appname(file)
+
+__db_metasub_recover
+ /* This is like a normal recovery routine */
+ Get the metadata page
+ if (cmp_n && redo)
+ copy the log page onto the page
+ update the lsn
+ make sure page gets put dirty
+ else if (cmp_p && undo)
+ update the lsn to the lsn in the log record
+ make sure page gets put dirty
+
+ if the page was modified, put it back dirty
+
+In db.src
+
+# name: filename (before call to __db_appname)
+# mode: file system mode
+BEGIN open
+DBT name DBT s
+ARG mode u_int32_t o
+END
+
+# opcode: indicate if it is a create/delete and if it is a subdatabase
+# pgsize: page size on which we're going to write the meta-data page
+# pgno: page number on which to write this meta-data page
+# page: the actual meta-data page
+# lsn: LSN of the meta-data page -- 0 for new databases, may be non-0
+# for subdatabases.
+
+BEGIN metapage
+ARG opcode u_int32_t x
+DBT name DBT s
+ARG pgno db_pgno_t d
+DBT page DBT s
+POINTER lsn DB_LSN * lu
+END
+
+# We do not need a subdatabase name here because removing a subdatabase
+# name is simply a regular bt_delete operation from the master database.
+# It will get logged normally.
+# name: filename
+BEGIN delete
+DBT name DBT s
+END
+
+# We also need to reclaim pages, but we can use the existing
+# bt_pg_alloc routines.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+Testing recoverability of create/delete.
+
+These tests are unlike other tests in that they are going to
+require hooks in the library. The reason is that the create
+and delete calls are internally wrapped in a transaction, so
+that if the call returns, the transaction has already either
+commited or aborted. Using only that interface limits what
+kind of testing we can do. To match our other recovery testing
+efforts, we need to add hooks to trigger aborts at particular
+times in the create/delete path.
+
+The general recovery testing strategy is that we wish to
+execute every path through every recovery routine. That
+means that we try to:
+ catch each operation in its pre-operation state
+ call the recovery function with redo
+ call the recovery function with undo
+ catch each operation in its post-operation state
+ call the recovery function with redo
+ call the recovery function with undo
+
+In addition, there are a few critical points in the create and
+delete path that we want to make sure we capture.
+
+1. Test Structure
+
+The test structure should be similar to the existing recovery
+tests. We will want to have a structure in place where we
+can execute different commands:
+ create a file/database
+ create a file that will contain subdatabases.
+ create a subdatabase
+ remove a subdatabase (that contains valid data)
+ remove a subdatabase (that does not contain any data)
+ remove a file that used to contain subdatabases
+ remove a file that contains a database
+
+The tricky part is capturing the state of the world at the
+various points in the create/delete process.
+
+The critical points in the create process are:
+
+ 1. After we've logged the create, but before we've done anything.
+ in db/db.c
+ after the open_retry
+ after the __crdel_fileopen_log call (and before we've
+ called __os_open).
+
+ 2. Immediately after the __os_open
+
+ 3. Immediately after each __db_log_page call
+ in bt_open.c
+ log meta-data page
+ log root page
+ in hash.c
+ log meta-data page
+
+ 4. With respect to the log records above, shortly after each
+ log write is an memp_fput. We need to do a sync after
+ each memp_fput and trigger a point after that sync.
+
+The critical points in the remove process are:
+
+ 1. Right after the crdel_delete_log in db/db.c
+
+ 2. Right after the __os_rename call (below the crdel_delete_log)
+
+ 3. After the __db_remove_callback call.
+
+I believe that there are the places where we'll need some sort of hook.
+
+2. Adding hooks to the library.
+
+The hooks need two components. One component is to capture the state of
+the database at the hook point and the other is to trigger a txn_abort at
+the hook point. The second part is fairly trivial.
+
+The first part requires more thought. Let me explain what we do in a
+"normal" recovery test. In a normal recovery test, we save an intial
+copy of the database (this copy is called init). Then we execute one
+or more operations. Then, right before the commit/abort, we sync the
+file, and save another copy (the afterop copy). Finally, we call txn_commit
+or txn_abort, sync the file again, and save the database one last time (the
+final copy).
+
+Then we run recovery. The first time, this should be a no-op, because
+we've either committed the transaction and are checking to redo it or
+we aborted the transaction, undid it on the abort and are checking to
+undo it again.
+
+We then run recovery again on whatever database will force us through
+the path that requires work. In the commit case, this means we start
+with the init copy of the database and run recovery. This pushes us
+through all the redo paths. In the abort case, we start with the afterop
+copy which pushes us through all the undo cases.
+
+In some sense, we're asking the create/delete test to be more exhaustive
+by defining all the trigger points, but I think that's the correct thing
+to do, since the create/delete is not initiated by a user transaction.
+
+So, what do we have to do at the hook points?
+ 1. sync the file to disk.
+ 2. save the file itself
+ 3. save any files named __db_backup_name(name, &backup_name, lsn)
+ Since we may not know the right lsns, I think we should save
+ every file of the form __db.name.0xNNNNNNNN.0xNNNNNNNN into
+ some temporary files from which we can restore it to run
+ recovery.
+
+3. Putting it all together
+
+So, the three pieces are writing the test structure, putting in the hooks
+and then writing the recovery portions so that we restore the right thing
+that the hooks saved in order to initiate recovery.
+
+Some of the technical issues that need to be solved are:
+ How does the hook code become active (i.e., we don't
+ want it in there normally, but it's got to be
+ there when you configure for testing)?
+ How do you (the test) tell the library that you want a
+ particular hook to abort?
+ How do you (the test) tell the library that you want the
+ hook code doing its copies (do we really want
+ *every* test doing these copies during testing?
+ Maybe it's not a big deal, but maybe it is; we
+ should at least think about it).
diff --git a/bdb/db/crdel.src b/bdb/db/crdel.src
new file mode 100644
index 00000000000..17c061d6887
--- /dev/null
+++ b/bdb/db/crdel.src
@@ -0,0 +1,103 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: crdel.src,v 11.12 2000/12/12 17:41:48 bostic Exp $
+ */
+
+PREFIX crdel
+
+INCLUDE #include "db_config.h"
+INCLUDE
+INCLUDE #ifndef NO_SYSTEM_INCLUDES
+INCLUDE #include <sys/types.h>
+INCLUDE
+INCLUDE #include <ctype.h>
+INCLUDE #include <errno.h>
+INCLUDE #include <string.h>
+INCLUDE #endif
+INCLUDE
+INCLUDE #include "db_int.h"
+INCLUDE #include "db_page.h"
+INCLUDE #include "db_dispatch.h"
+INCLUDE #include "db_am.h"
+INCLUDE #include "txn.h"
+INCLUDE
+
+/*
+ * Fileopen -- log a potential file create operation
+ *
+ * name: filename
+ * subname: sub database name
+ * mode: file system mode
+ */
+BEGIN fileopen 141
+DBT name DBT s
+ARG mode u_int32_t o
+END
+
+/*
+ * Metasub: log the creation of a subdatabase meta data page.
+ *
+ * fileid: identifies the file being acted upon.
+ * pgno: page number on which to write this meta-data page
+ * page: the actual meta-data page
+ * lsn: lsn of the page.
+ */
+BEGIN metasub 142
+ARG fileid int32_t ld
+ARG pgno db_pgno_t d
+DBT page DBT s
+POINTER lsn DB_LSN * lu
+END
+
+/*
+ * Metapage: log the creation of a meta data page for a new file.
+ *
+ * fileid: identifies the file being acted upon.
+ * name: file containing the page.
+ * pgno: page number on which to write this meta-data page
+ * page: the actual meta-data page
+ */
+BEGIN metapage 143
+ARG fileid int32_t ld
+DBT name DBT s
+ARG pgno db_pgno_t d
+DBT page DBT s
+END
+
+/*
+ * Delete: remove a file.
+ * Note that we don't need a special log record for subdatabase
+ * removes, because we use normal btree operations to remove them.
+ *
+ * name: name of the file being removed (relative to DBHOME).
+ */
+DEPRECATED old_delete 144
+DBT name DBT s
+END
+
+/*
+ * Rename: rename a file
+ * We do not need this for subdatabases
+ *
+ * name: name of the file being removed (relative to DBHOME).
+ */
+BEGIN rename 145
+ARG fileid int32_t ld
+DBT name DBT s
+DBT newname DBT s
+END
+/*
+ * Delete: remove a file.
+ * Note that we don't need a special log record for subdatabase
+ * removes, because we use normal btree operations to remove them.
+ *
+ * name: name of the file being removed (relative to DBHOME).
+ */
+BEGIN delete 146
+ARG fileid int32_t ld
+DBT name DBT s
+END
diff --git a/bdb/db/crdel_auto.c b/bdb/db/crdel_auto.c
new file mode 100644
index 00000000000..f2204410ee8
--- /dev/null
+++ b/bdb/db/crdel_auto.c
@@ -0,0 +1,900 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <errno.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_dispatch.h"
+#include "db_am.h"
+#include "txn.h"
+
+int
+__crdel_fileopen_log(dbenv, txnid, ret_lsnp, flags,
+ name, mode)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ const DBT *name;
+ u_int32_t mode;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_crdel_fileopen;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t) + (name == NULL ? 0 : name->size)
+ + sizeof(mode);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ if (name == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &name->size, sizeof(name->size));
+ bp += sizeof(name->size);
+ memcpy(bp, name->data, name->size);
+ bp += name->size;
+ }
+ memcpy(bp, &mode, sizeof(mode));
+ bp += sizeof(mode);
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__crdel_fileopen_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __crdel_fileopen_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __crdel_fileopen_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]crdel_fileopen: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tname: ");
+ for (i = 0; i < argp->name.size; i++) {
+ ch = ((u_int8_t *)argp->name.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\tmode: %o\n", argp->mode);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__crdel_fileopen_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __crdel_fileopen_args **argpp;
+{
+ __crdel_fileopen_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__crdel_fileopen_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memset(&argp->name, 0, sizeof(argp->name));
+ memcpy(&argp->name.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->name.data = bp;
+ bp += argp->name.size;
+ memcpy(&argp->mode, bp, sizeof(argp->mode));
+ bp += sizeof(argp->mode);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__crdel_metasub_log(dbenv, txnid, ret_lsnp, flags,
+ fileid, pgno, page, lsn)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ int32_t fileid;
+ db_pgno_t pgno;
+ const DBT *page;
+ DB_LSN * lsn;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_crdel_metasub;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(fileid)
+ + sizeof(pgno)
+ + sizeof(u_int32_t) + (page == NULL ? 0 : page->size)
+ + sizeof(*lsn);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(bp, &fileid, sizeof(fileid));
+ bp += sizeof(fileid);
+ memcpy(bp, &pgno, sizeof(pgno));
+ bp += sizeof(pgno);
+ if (page == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &page->size, sizeof(page->size));
+ bp += sizeof(page->size);
+ memcpy(bp, page->data, page->size);
+ bp += page->size;
+ }
+ if (lsn != NULL)
+ memcpy(bp, lsn, sizeof(*lsn));
+ else
+ memset(bp, 0, sizeof(*lsn));
+ bp += sizeof(*lsn);
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__crdel_metasub_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __crdel_metasub_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __crdel_metasub_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]crdel_metasub: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tpgno: %d\n", argp->pgno);
+ printf("\tpage: ");
+ for (i = 0; i < argp->page.size; i++) {
+ ch = ((u_int8_t *)argp->page.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\tlsn: [%lu][%lu]\n",
+ (u_long)argp->lsn.file, (u_long)argp->lsn.offset);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__crdel_metasub_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __crdel_metasub_args **argpp;
+{
+ __crdel_metasub_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__crdel_metasub_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->pgno, bp, sizeof(argp->pgno));
+ bp += sizeof(argp->pgno);
+ memset(&argp->page, 0, sizeof(argp->page));
+ memcpy(&argp->page.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->page.data = bp;
+ bp += argp->page.size;
+ memcpy(&argp->lsn, bp, sizeof(argp->lsn));
+ bp += sizeof(argp->lsn);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__crdel_metapage_log(dbenv, txnid, ret_lsnp, flags,
+ fileid, name, pgno, page)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ int32_t fileid;
+ const DBT *name;
+ db_pgno_t pgno;
+ const DBT *page;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_crdel_metapage;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(fileid)
+ + sizeof(u_int32_t) + (name == NULL ? 0 : name->size)
+ + sizeof(pgno)
+ + sizeof(u_int32_t) + (page == NULL ? 0 : page->size);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(bp, &fileid, sizeof(fileid));
+ bp += sizeof(fileid);
+ if (name == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &name->size, sizeof(name->size));
+ bp += sizeof(name->size);
+ memcpy(bp, name->data, name->size);
+ bp += name->size;
+ }
+ memcpy(bp, &pgno, sizeof(pgno));
+ bp += sizeof(pgno);
+ if (page == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &page->size, sizeof(page->size));
+ bp += sizeof(page->size);
+ memcpy(bp, page->data, page->size);
+ bp += page->size;
+ }
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__crdel_metapage_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __crdel_metapage_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __crdel_metapage_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]crdel_metapage: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tname: ");
+ for (i = 0; i < argp->name.size; i++) {
+ ch = ((u_int8_t *)argp->name.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\tpgno: %d\n", argp->pgno);
+ printf("\tpage: ");
+ for (i = 0; i < argp->page.size; i++) {
+ ch = ((u_int8_t *)argp->page.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__crdel_metapage_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __crdel_metapage_args **argpp;
+{
+ __crdel_metapage_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__crdel_metapage_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memset(&argp->name, 0, sizeof(argp->name));
+ memcpy(&argp->name.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->name.data = bp;
+ bp += argp->name.size;
+ memcpy(&argp->pgno, bp, sizeof(argp->pgno));
+ bp += sizeof(argp->pgno);
+ memset(&argp->page, 0, sizeof(argp->page));
+ memcpy(&argp->page.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->page.data = bp;
+ bp += argp->page.size;
+ *argpp = argp;
+ return (0);
+}
+
+int
+__crdel_old_delete_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __crdel_old_delete_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __crdel_old_delete_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]crdel_old_delete: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tname: ");
+ for (i = 0; i < argp->name.size; i++) {
+ ch = ((u_int8_t *)argp->name.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__crdel_old_delete_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __crdel_old_delete_args **argpp;
+{
+ __crdel_old_delete_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__crdel_old_delete_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memset(&argp->name, 0, sizeof(argp->name));
+ memcpy(&argp->name.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->name.data = bp;
+ bp += argp->name.size;
+ *argpp = argp;
+ return (0);
+}
+
+int
+__crdel_rename_log(dbenv, txnid, ret_lsnp, flags,
+ fileid, name, newname)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ int32_t fileid;
+ const DBT *name;
+ const DBT *newname;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_crdel_rename;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(fileid)
+ + sizeof(u_int32_t) + (name == NULL ? 0 : name->size)
+ + sizeof(u_int32_t) + (newname == NULL ? 0 : newname->size);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(bp, &fileid, sizeof(fileid));
+ bp += sizeof(fileid);
+ if (name == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &name->size, sizeof(name->size));
+ bp += sizeof(name->size);
+ memcpy(bp, name->data, name->size);
+ bp += name->size;
+ }
+ if (newname == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &newname->size, sizeof(newname->size));
+ bp += sizeof(newname->size);
+ memcpy(bp, newname->data, newname->size);
+ bp += newname->size;
+ }
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__crdel_rename_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __crdel_rename_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __crdel_rename_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]crdel_rename: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tname: ");
+ for (i = 0; i < argp->name.size; i++) {
+ ch = ((u_int8_t *)argp->name.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\tnewname: ");
+ for (i = 0; i < argp->newname.size; i++) {
+ ch = ((u_int8_t *)argp->newname.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__crdel_rename_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __crdel_rename_args **argpp;
+{
+ __crdel_rename_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__crdel_rename_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memset(&argp->name, 0, sizeof(argp->name));
+ memcpy(&argp->name.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->name.data = bp;
+ bp += argp->name.size;
+ memset(&argp->newname, 0, sizeof(argp->newname));
+ memcpy(&argp->newname.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->newname.data = bp;
+ bp += argp->newname.size;
+ *argpp = argp;
+ return (0);
+}
+
+int
+__crdel_delete_log(dbenv, txnid, ret_lsnp, flags,
+ fileid, name)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ int32_t fileid;
+ const DBT *name;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_crdel_delete;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(fileid)
+ + sizeof(u_int32_t) + (name == NULL ? 0 : name->size);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(bp, &fileid, sizeof(fileid));
+ bp += sizeof(fileid);
+ if (name == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &name->size, sizeof(name->size));
+ bp += sizeof(name->size);
+ memcpy(bp, name->data, name->size);
+ bp += name->size;
+ }
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__crdel_delete_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __crdel_delete_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __crdel_delete_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]crdel_delete: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tname: ");
+ for (i = 0; i < argp->name.size; i++) {
+ ch = ((u_int8_t *)argp->name.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__crdel_delete_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __crdel_delete_args **argpp;
+{
+ __crdel_delete_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__crdel_delete_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memset(&argp->name, 0, sizeof(argp->name));
+ memcpy(&argp->name.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->name.data = bp;
+ bp += argp->name.size;
+ *argpp = argp;
+ return (0);
+}
+
+int
+__crdel_init_print(dbenv)
+ DB_ENV *dbenv;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv,
+ __crdel_fileopen_print, DB_crdel_fileopen)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __crdel_metasub_print, DB_crdel_metasub)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __crdel_metapage_print, DB_crdel_metapage)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __crdel_old_delete_print, DB_crdel_old_delete)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __crdel_rename_print, DB_crdel_rename)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __crdel_delete_print, DB_crdel_delete)) != 0)
+ return (ret);
+ return (0);
+}
+
+int
+__crdel_init_recover(dbenv)
+ DB_ENV *dbenv;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv,
+ __crdel_fileopen_recover, DB_crdel_fileopen)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __crdel_metasub_recover, DB_crdel_metasub)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __crdel_metapage_recover, DB_crdel_metapage)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __deprecated_recover, DB_crdel_old_delete)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __crdel_rename_recover, DB_crdel_rename)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __crdel_delete_recover, DB_crdel_delete)) != 0)
+ return (ret);
+ return (0);
+}
+
diff --git a/bdb/db/crdel_rec.c b/bdb/db/crdel_rec.c
new file mode 100644
index 00000000000..495b92a0ad7
--- /dev/null
+++ b/bdb/db/crdel_rec.c
@@ -0,0 +1,646 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: crdel_rec.c,v 11.43 2000/12/13 08:06:34 krinsky Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "log.h"
+#include "hash.h"
+#include "mp.h"
+#include "db_dispatch.h"
+
+/*
+ * __crdel_fileopen_recover --
+ * Recovery function for fileopen.
+ *
+ * PUBLIC: int __crdel_fileopen_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__crdel_fileopen_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __crdel_fileopen_args *argp;
+ DBMETA ondisk;
+ DB_FH fh;
+ size_t nr;
+ int do_unlink, ret;
+ u_int32_t b, mb, io;
+ char *real_name;
+
+ COMPQUIET(info, NULL);
+
+ real_name = NULL;
+ REC_PRINT(__crdel_fileopen_print);
+
+ if ((ret = __crdel_fileopen_read(dbenv, dbtp->data, &argp)) != 0)
+ goto out;
+ /*
+ * If this is an in-memory database, then the name is going to
+ * be NULL, which looks like a 0-length name in recovery.
+ */
+ if (argp->name.size == 0)
+ goto done;
+
+ if ((ret = __db_appname(dbenv, DB_APP_DATA,
+ NULL, argp->name.data, 0, NULL, &real_name)) != 0)
+ goto out;
+ if (DB_REDO(op)) {
+ /*
+ * The create commited, so we need to make sure that the file
+ * exists. A simple open should suffice.
+ */
+ if ((ret = __os_open(dbenv, real_name,
+ DB_OSO_CREATE, argp->mode, &fh)) != 0)
+ goto out;
+ if ((ret = __os_closehandle(&fh)) != 0)
+ goto out;
+ } else if (DB_UNDO(op)) {
+ /*
+ * If the file is 0-length then it was in the process of being
+ * created, so we should unlink it. If it is non-0 length, then
+ * either someone else created it and we need to leave it
+ * untouched or we were in the process of creating it, allocated
+ * the first page on a system that requires you to actually
+ * write pages as you allocate them, but never got any data
+ * on it.
+ * If the file doesn't exist, we never got around to creating
+ * it, so that's fine.
+ */
+ if (__os_exists(real_name, NULL) != 0)
+ goto done;
+
+ if ((ret = __os_open(dbenv, real_name, 0, 0, &fh)) != 0)
+ goto out;
+ if ((ret = __os_ioinfo(dbenv,
+ real_name, &fh, &mb, &b, &io)) != 0)
+ goto out;
+ do_unlink = 0;
+ if (mb != 0 || b != 0) {
+ /*
+ * We need to read the first page
+ * to see if its got valid data on it.
+ */
+ if ((ret = __os_read(dbenv, &fh,
+ &ondisk, sizeof(ondisk), &nr)) != 0 ||
+ nr != sizeof(ondisk))
+ goto out;
+ if (ondisk.magic == 0)
+ do_unlink = 1;
+ }
+ if ((ret = __os_closehandle(&fh)) != 0)
+ goto out;
+ /* Check for 0-length and if it is, delete it. */
+ if (do_unlink || (mb == 0 && b == 0))
+ if ((ret = __os_unlink(dbenv, real_name)) != 0)
+ goto out;
+ }
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (argp != NULL)
+ __os_free(argp, 0);
+ if (real_name != NULL)
+ __os_freestr(real_name);
+ return (ret);
+}
+
+/*
+ * __crdel_metasub_recover --
+ * Recovery function for metasub.
+ *
+ * PUBLIC: int __crdel_metasub_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__crdel_metasub_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __crdel_metasub_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ u_int8_t *file_uid, ptype;
+ int cmp_p, modified, reopen, ret;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__crdel_metasub_print);
+ REC_INTRO(__crdel_metasub_read, 0);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+ }
+
+ modified = 0;
+ reopen = 0;
+ cmp_p = log_compare(&LSN(pagep), &argp->lsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->lsn);
+
+ if (cmp_p == 0 && DB_REDO(op)) {
+ memcpy(pagep, argp->page.data, argp->page.size);
+ LSN(pagep) = *lsnp;
+ modified = 1;
+ /*
+ * If this is a meta-data page, then we must reopen;
+ * if it was a root page, then we do not.
+ */
+ ptype = ((DBMETA *)argp->page.data)->type;
+ if (ptype == P_HASHMETA || ptype == P_BTREEMETA ||
+ ptype == P_QAMMETA)
+ reopen = 1;
+ } else if (DB_UNDO(op)) {
+ /*
+ * We want to undo this page creation. The page creation
+ * happened in two parts. First, we called __bam_new which
+ * was logged separately. Then we wrote the meta-data onto
+ * the page. So long as we restore the LSN, then the recovery
+ * for __bam_new will do everything else.
+ * Don't bother checking the lsn on the page. If we
+ * are rolling back the next thing is that this page
+ * will get freed. Opening the subdb will have reinitialized
+ * the page, but not the lsn.
+ */
+ LSN(pagep) = argp->lsn;
+ modified = 1;
+ }
+ if ((ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+
+ /*
+ * If we are redoing a subdatabase create, we must close and reopen the
+ * file to be sure that we have the proper meta information in the
+ * in-memory structures
+ */
+ if (reopen) {
+ /* Close cursor if it's open. */
+ if (dbc != NULL) {
+ dbc->c_close(dbc);
+ dbc = NULL;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ DB_FILE_ID_LEN, NULL, &file_uid)) != 0)
+ goto out;
+ memcpy(file_uid, &file_dbp->fileid[0], DB_FILE_ID_LEN);
+ ret = __log_reopen_file(dbenv,
+ NULL, argp->fileid, file_uid, argp->pgno);
+ (void)__os_free(file_uid, DB_FILE_ID_LEN);
+ if (ret != 0)
+ goto out;
+ }
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __crdel_metapage_recover --
+ * Recovery function for metapage.
+ *
+ * PUBLIC: int __crdel_metapage_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__crdel_metapage_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __crdel_metapage_args *argp;
+ DB *dbp;
+ DBMETA *meta, ondisk;
+ DB_FH fh;
+ size_t nr;
+ u_int32_t b, io, mb, pagesize;
+ int is_done, ret;
+ char *real_name;
+
+ COMPQUIET(info, NULL);
+
+ real_name = NULL;
+ memset(&fh, 0, sizeof(fh));
+ REC_PRINT(__crdel_metapage_print);
+
+ if ((ret = __crdel_metapage_read(dbenv, dbtp->data, &argp)) != 0)
+ goto out;
+
+ /*
+ * If this is an in-memory database, then the name is going to
+ * be NULL, which looks like a 0-length name in recovery.
+ */
+ if (argp->name.size == 0)
+ goto done;
+
+ meta = (DBMETA *)argp->page.data;
+ __ua_memcpy(&pagesize, &meta->pagesize, sizeof(pagesize));
+
+ if ((ret = __db_appname(dbenv, DB_APP_DATA,
+ NULL, argp->name.data, 0, NULL, &real_name)) != 0)
+ goto out;
+ if (DB_REDO(op)) {
+ if ((ret = __db_fileid_to_db(dbenv,
+ &dbp, argp->fileid, 0)) != 0) {
+ if (ret == DB_DELETED)
+ goto done;
+ else
+ goto out;
+ }
+
+ /*
+ * We simply read the first page and if the LSN is 0, we
+ * write the meta-data page.
+ */
+ if ((ret = __os_open(dbenv, real_name, 0, 0, &fh)) != 0)
+ goto out;
+ if ((ret = __os_seek(dbenv, &fh,
+ pagesize, argp->pgno, 0, 0, DB_OS_SEEK_SET)) != 0)
+ goto out;
+ /*
+ * If the read succeeds then the page exists, then we need
+ * to vrify that the page has actually been written, because
+ * on some systems (e.g., Windows) we preallocate pages because
+ * files aren't allowed to have holes in them. If the page
+ * looks good then we're done.
+ */
+ if ((ret = __os_read(dbenv, &fh, &ondisk,
+ sizeof(ondisk), &nr)) == 0 && nr == sizeof(ondisk)) {
+ if (ondisk.magic != 0)
+ goto done;
+ if ((ret = __os_seek(dbenv, &fh,
+ pagesize, argp->pgno, 0, 0, DB_OS_SEEK_SET)) != 0)
+ goto out;
+ }
+
+ /*
+ * Page didn't exist, update the LSN and write a new one.
+ * (seek pointer shouldn't have moved)
+ */
+ __ua_memcpy(&meta->lsn, lsnp, sizeof(DB_LSN));
+ if ((ret = __os_write(dbp->dbenv, &fh,
+ argp->page.data, argp->page.size, &nr)) != 0)
+ goto out;
+ if (nr != (size_t)argp->page.size) {
+ __db_err(dbenv, "Write failed during recovery");
+ ret = EIO;
+ goto out;
+ }
+
+ /*
+ * We must close and reopen the file to be sure
+ * that we have the proper meta information
+ * in the in memory structures
+ */
+
+ if ((ret = __log_reopen_file(dbenv,
+ argp->name.data, argp->fileid,
+ meta->uid, argp->pgno)) != 0)
+ goto out;
+
+ /* Handle will be closed on exit. */
+ } else if (DB_UNDO(op)) {
+ is_done = 0;
+
+ /* If file does not exist, there is nothing to undo. */
+ if (__os_exists(real_name, NULL) != 0)
+ goto done;
+
+ /*
+ * Before we can look at anything on disk, we have to check
+ * if there is a valid dbp for this, and if there is, we'd
+ * better flush it.
+ */
+ dbp = NULL;
+ if ((ret =
+ __db_fileid_to_db(dbenv, &dbp, argp->fileid, 0)) == 0)
+ (void)dbp->sync(dbp, 0);
+
+ /*
+ * We need to make sure that we do not remove a file that
+ * someone else created. If the file is 0-length, then we
+ * can assume that we created it and remove it. If it is
+ * not 0-length, then we need to check the LSN and make
+ * sure that it's the file we created.
+ */
+ if ((ret = __os_open(dbenv, real_name, 0, 0, &fh)) != 0)
+ goto out;
+ if ((ret = __os_ioinfo(dbenv,
+ real_name, &fh, &mb, &b, &io)) != 0)
+ goto out;
+ if (mb != 0 || b != 0) {
+ /* The file has something in it. */
+ if ((ret = __os_seek(dbenv, &fh,
+ pagesize, argp->pgno, 0, 0, DB_OS_SEEK_SET)) != 0)
+ goto out;
+ if ((ret = __os_read(dbenv, &fh,
+ &ondisk, sizeof(ondisk), &nr)) != 0)
+ goto out;
+ if (log_compare(&ondisk.lsn, lsnp) != 0)
+ is_done = 1;
+ }
+
+ /*
+ * Must close here, because unlink with the file open fails
+ * on some systems.
+ */
+ if ((ret = __os_closehandle(&fh)) != 0)
+ goto out;
+
+ if (!is_done) {
+ /*
+ * On some systems, you cannot unlink an open file so
+ * we close the fd in the dbp here and make sure we
+ * don't try to close it again. First, check for a
+ * saved_open_fhp, then close down the mpool.
+ */
+ if (dbp != NULL && dbp->saved_open_fhp != NULL &&
+ F_ISSET(dbp->saved_open_fhp, DB_FH_VALID) &&
+ (ret = __os_closehandle(dbp->saved_open_fhp)) != 0)
+ goto out;
+ if (dbp != NULL && dbp->mpf != NULL) {
+ (void)__memp_fremove(dbp->mpf);
+ if ((ret = memp_fclose(dbp->mpf)) != 0)
+ goto out;
+ F_SET(dbp, DB_AM_DISCARD);
+ dbp->mpf = NULL;
+ }
+ if ((ret = __os_unlink(dbenv, real_name)) != 0)
+ goto out;
+ }
+ }
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (argp != NULL)
+ __os_free(argp, 0);
+ if (real_name != NULL)
+ __os_freestr(real_name);
+ if (F_ISSET(&fh, DB_FH_VALID))
+ (void)__os_closehandle(&fh);
+ return (ret);
+}
+
+/*
+ * __crdel_delete_recover --
+ * Recovery function for delete.
+ *
+ * PUBLIC: int __crdel_delete_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__crdel_delete_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ DB *dbp;
+ __crdel_delete_args *argp;
+ int ret;
+ char *backup, *real_back, *real_name;
+
+ REC_PRINT(__crdel_delete_print);
+
+ backup = real_back = real_name = NULL;
+ if ((ret = __crdel_delete_read(dbenv, dbtp->data, &argp)) != 0)
+ goto out;
+
+ if (DB_REDO(op)) {
+ /*
+ * On a recovery, as we recreate what was going on, we
+ * recreate the creation of the file. And so, even though
+ * it committed, we need to delete it. Try to delete it,
+ * but it is not an error if that delete fails.
+ */
+ if ((ret = __db_appname(dbenv, DB_APP_DATA,
+ NULL, argp->name.data, 0, NULL, &real_name)) != 0)
+ goto out;
+ if (__os_exists(real_name, NULL) == 0) {
+ /*
+ * If a file is deleted and then recreated, it's
+ * possible for the __os_exists call above to
+ * return success and for us to get here, but for
+ * the fileid we're looking for to be marked
+ * deleted. In that case, we needn't redo the
+ * unlink even though the file exists, and it's
+ * not an error.
+ */
+ ret = __db_fileid_to_db(dbenv, &dbp, argp->fileid, 0);
+ if (ret == 0) {
+ /*
+ * On Windows, the underlying file must be
+ * closed to perform a remove.
+ */
+ (void)__memp_fremove(dbp->mpf);
+ if ((ret = memp_fclose(dbp->mpf)) != 0)
+ goto out;
+ dbp->mpf = NULL;
+ if ((ret = __os_unlink(dbenv, real_name)) != 0)
+ goto out;
+ } else if (ret != DB_DELETED)
+ goto out;
+ }
+ /*
+ * The transaction committed, so the only thing that might
+ * be true is that the backup file is still around. Try
+ * to delete it, but it's not an error if that delete fails.
+ */
+ if ((ret = __db_backup_name(dbenv, argp->name.data,
+ &backup, lsnp)) != 0)
+ goto out;
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, NULL, backup, 0, NULL, &real_back)) != 0)
+ goto out;
+ if (__os_exists(real_back, NULL) == 0)
+ if ((ret = __os_unlink(dbenv, real_back)) != 0)
+ goto out;
+ if ((ret = __db_txnlist_delete(dbenv, info,
+ argp->name.data, TXNLIST_INVALID_ID, 1)) != 0)
+ goto out;
+ } else if (DB_UNDO(op)) {
+ /*
+ * Trying to undo. File may or may not have been deleted.
+ * Try to move the backup to the original. If the backup
+ * exists, then this is right. If it doesn't exist, then
+ * nothing will happen and that's OK.
+ */
+ if ((ret = __db_backup_name(dbenv, argp->name.data,
+ &backup, lsnp)) != 0)
+ goto out;
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, NULL, backup, 0, NULL, &real_back)) != 0)
+ goto out;
+ if ((ret = __db_appname(dbenv, DB_APP_DATA,
+ NULL, argp->name.data, 0, NULL, &real_name)) != 0)
+ goto out;
+ if (__os_exists(real_back, NULL) == 0)
+ if ((ret =
+ __os_rename(dbenv, real_back, real_name)) != 0)
+ goto out;
+ }
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (argp != NULL)
+ __os_free(argp, 0);
+ if (backup != NULL)
+ __os_freestr(backup);
+ if (real_back != NULL)
+ __os_freestr(real_back);
+ if (real_name != NULL)
+ __os_freestr(real_name);
+ return (ret);
+}
+/*
+ * __crdel_rename_recover --
+ * Recovery function for rename.
+ *
+ * PUBLIC: int __crdel_rename_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__crdel_rename_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ DB *dbp;
+ __crdel_rename_args *argp;
+ char *new_name, *real_name;
+ int ret, set;
+
+ COMPQUIET(info, NULL);
+
+ REC_PRINT(__crdel_rename_print);
+
+ new_name = real_name = NULL;
+
+ if ((ret = __crdel_rename_read(dbenv, dbtp->data, &argp)) != 0)
+ goto out;
+
+ if ((ret = __db_fileid_to_db(dbenv, &dbp, argp->fileid, 0)) != 0)
+ goto out;
+ if (DB_REDO(op)) {
+ /*
+ * We don't use the dbp parameter to __log_filelist_update
+ * in the rename case, so passing NULL for it is OK.
+ */
+ if ((ret = __log_filelist_update(dbenv, NULL,
+ argp->fileid, argp->newname.data, &set)) != 0)
+ goto out;
+ if (set != 0) {
+ if ((ret = __db_appname(dbenv, DB_APP_DATA,
+ NULL, argp->name.data, 0, NULL, &real_name)) != 0)
+ goto out;
+ if (__os_exists(real_name, NULL) == 0) {
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, NULL, argp->newname.data,
+ 0, NULL, &new_name)) != 0)
+ goto out;
+ /*
+ * On Windows, the underlying file
+ * must be closed to perform a remove.
+ * The db will be closed by a
+ * log_register record. Rename
+ * has exclusive access to the db.
+ */
+ (void)__memp_fremove(dbp->mpf);
+ if ((ret = memp_fclose(dbp->mpf)) != 0)
+ goto out;
+ dbp->mpf = NULL;
+ if ((ret = __os_rename(dbenv,
+ real_name, new_name)) != 0)
+ goto out;
+ }
+ }
+ } else {
+ /*
+ * We don't use the dbp parameter to __log_filelist_update
+ * in the rename case, so passing NULL for it is OK.
+ */
+ if ((ret = __log_filelist_update(dbenv, NULL,
+ argp->fileid, argp->name.data, &set)) != 0)
+ goto out;
+ if (set != 0) {
+ if ((ret = __db_appname(dbenv, DB_APP_DATA,
+ NULL, argp->newname.data, 0, NULL, &new_name)) != 0)
+ goto out;
+ if (__os_exists(new_name, NULL) == 0) {
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, NULL, argp->name.data,
+ 0, NULL, &real_name)) != 0)
+ goto out;
+ /*
+ * On Windows, the underlying file
+ * must be closed to perform a remove.
+ * The file may have already been closed
+ * if we are aborting the transaction.
+ */
+ if (dbp->mpf != NULL) {
+ (void)__memp_fremove(dbp->mpf);
+ if ((ret = memp_fclose(dbp->mpf)) != 0)
+ goto out;
+ dbp->mpf = NULL;
+ }
+ if ((ret = __os_rename(dbenv,
+ new_name, real_name)) != 0)
+ goto out;
+ }
+ }
+ }
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (argp != NULL)
+ __os_free(argp, 0);
+
+ if (new_name != NULL)
+ __os_free(new_name, 0);
+
+ if (real_name != NULL)
+ __os_free(real_name, 0);
+
+ return (ret);
+}
diff --git a/bdb/db/db.c b/bdb/db/db.c
new file mode 100644
index 00000000000..6e74b4b21bd
--- /dev/null
+++ b/bdb/db/db.c
@@ -0,0 +1,2325 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995, 1996
+ * Keith Bostic. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db.c,v 11.117 2001/01/11 18:19:50 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_shash.h"
+#include "db_swap.h"
+#include "btree.h"
+#include "db_am.h"
+#include "hash.h"
+#include "lock.h"
+#include "log.h"
+#include "mp.h"
+#include "qam.h"
+#include "common_ext.h"
+
+/* Actions that __db_master_update can take. */
+typedef enum { MU_REMOVE, MU_RENAME, MU_OPEN } mu_action;
+
+/* Flag values that __db_file_setup can return. */
+#define DB_FILE_SETUP_CREATE 0x01
+#define DB_FILE_SETUP_ZERO 0x02
+
+static int __db_file_setup __P((DB *,
+ const char *, u_int32_t, int, db_pgno_t, int *));
+static int __db_master_update __P((DB *,
+ const char *, u_int32_t,
+ db_pgno_t *, mu_action, const char *, u_int32_t));
+static int __db_refresh __P((DB *));
+static int __db_remove_callback __P((DB *, void *));
+static int __db_set_pgsize __P((DB *, DB_FH *, char *));
+static int __db_subdb_remove __P((DB *, const char *, const char *));
+static int __db_subdb_rename __P(( DB *,
+ const char *, const char *, const char *));
+#if CONFIG_TEST
+static void __db_makecopy __P((const char *, const char *));
+static int __db_testdocopy __P((DB *, const char *));
+static int __qam_testdocopy __P((DB *, const char *));
+#endif
+
+/*
+ * __db_open --
+ * Main library interface to the DB access methods.
+ *
+ * PUBLIC: int __db_open __P((DB *,
+ * PUBLIC: const char *, const char *, DBTYPE, u_int32_t, int));
+ */
+int
+__db_open(dbp, name, subdb, type, flags, mode)
+ DB *dbp;
+ const char *name, *subdb;
+ DBTYPE type;
+ u_int32_t flags;
+ int mode;
+{
+ DB_ENV *dbenv;
+ DB_LOCK open_lock;
+ DB *mdbp;
+ db_pgno_t meta_pgno;
+ u_int32_t ok_flags;
+ int ret, t_ret;
+
+ dbenv = dbp->dbenv;
+ mdbp = NULL;
+
+ /* Validate arguments. */
+#define OKFLAGS \
+ (DB_CREATE | DB_EXCL | DB_FCNTL_LOCKING | \
+ DB_NOMMAP | DB_RDONLY | DB_RDWRMASTER | DB_THREAD | DB_TRUNCATE)
+ if ((ret = __db_fchk(dbenv, "DB->open", flags, OKFLAGS)) != 0)
+ return (ret);
+ if (LF_ISSET(DB_EXCL) && !LF_ISSET(DB_CREATE))
+ return (__db_ferr(dbenv, "DB->open", 1));
+ if (LF_ISSET(DB_RDONLY) && LF_ISSET(DB_CREATE))
+ return (__db_ferr(dbenv, "DB->open", 1));
+#ifdef HAVE_VXWORKS
+ if (LF_ISSET(DB_TRUNCATE)) {
+ __db_err(dbenv, "DB_TRUNCATE unsupported in VxWorks");
+ return (__db_eopnotsup(dbenv));
+ }
+#endif
+ switch (type) {
+ case DB_UNKNOWN:
+ if (LF_ISSET(DB_CREATE|DB_TRUNCATE)) {
+ __db_err(dbenv,
+ "%s: DB_UNKNOWN type specified with DB_CREATE or DB_TRUNCATE",
+ name);
+ return (EINVAL);
+ }
+ ok_flags = 0;
+ break;
+ case DB_BTREE:
+ ok_flags = DB_OK_BTREE;
+ break;
+ case DB_HASH:
+ ok_flags = DB_OK_HASH;
+ break;
+ case DB_QUEUE:
+ ok_flags = DB_OK_QUEUE;
+ break;
+ case DB_RECNO:
+ ok_flags = DB_OK_RECNO;
+ break;
+ default:
+ __db_err(dbenv, "unknown type: %lu", (u_long)type);
+ return (EINVAL);
+ }
+ if (ok_flags)
+ DB_ILLEGAL_METHOD(dbp, ok_flags);
+
+ /* The environment may have been created, but never opened. */
+ if (!F_ISSET(dbenv, DB_ENV_DBLOCAL | DB_ENV_OPEN_CALLED)) {
+ __db_err(dbenv, "environment not yet opened");
+ return (EINVAL);
+ }
+
+ /*
+ * Historically, you could pass in an environment that didn't have a
+ * mpool, and DB would create a private one behind the scenes. This
+ * no longer works.
+ */
+ if (!F_ISSET(dbenv, DB_ENV_DBLOCAL) && !MPOOL_ON(dbenv)) {
+ __db_err(dbenv, "environment did not include a memory pool.");
+ return (EINVAL);
+ }
+
+ /*
+ * You can't specify threads during DB->open if subsystems in the
+ * environment weren't configured with them.
+ */
+ if (LF_ISSET(DB_THREAD) &&
+ !F_ISSET(dbenv, DB_ENV_DBLOCAL | DB_ENV_THREAD)) {
+ __db_err(dbenv, "environment not created using DB_THREAD");
+ return (EINVAL);
+ }
+
+ /*
+ * If the environment was configured with threads, the DB handle
+ * must also be free-threaded, so we force the DB_THREAD flag on.
+ * (See SR #2033 for why this is a requirement--recovery needs
+ * to be able to grab a dbp using __db_fileid_to_dbp, and it has
+ * no way of knowing which dbp goes with which thread, so whichever
+ * one it finds has to be usable in any of them.)
+ */
+ if (F_ISSET(dbenv, DB_ENV_THREAD))
+ LF_SET(DB_THREAD);
+
+ /* DB_TRUNCATE is not transaction recoverable. */
+ if (LF_ISSET(DB_TRUNCATE) && TXN_ON(dbenv)) {
+ __db_err(dbenv,
+ "DB_TRUNCATE illegal in a transaction protected environment");
+ return (EINVAL);
+ }
+
+ /* Subdatabase checks. */
+ if (subdb != NULL) {
+ /* Subdatabases must be created in named files. */
+ if (name == NULL) {
+ __db_err(dbenv,
+ "multiple databases cannot be created in temporary files");
+ return (EINVAL);
+ }
+
+ /* QAM can't be done as a subdatabase. */
+ if (type == DB_QUEUE) {
+ __db_err(dbenv, "Queue databases must be one-per-file");
+ return (EINVAL);
+ }
+ }
+
+ /* Convert any DB->open flags. */
+ if (LF_ISSET(DB_RDONLY))
+ F_SET(dbp, DB_AM_RDONLY);
+
+ /* Fill in the type. */
+ dbp->type = type;
+
+ /*
+ * If we're potentially creating a database, wrap the open inside of
+ * a transaction.
+ */
+ if (TXN_ON(dbenv) && LF_ISSET(DB_CREATE))
+ if ((ret = __db_metabegin(dbp, &open_lock)) != 0)
+ return (ret);
+
+ /*
+ * If we're opening a subdatabase, we have to open (and potentially
+ * create) the main database, and then get (and potentially store)
+ * our base page number in that database. Then, we can finally open
+ * the subdatabase.
+ */
+ if (subdb == NULL)
+ meta_pgno = PGNO_BASE_MD;
+ else {
+ /*
+ * Open the master database, optionally creating or updating
+ * it, and retrieve the metadata page number.
+ */
+ if ((ret =
+ __db_master_open(dbp, name, flags, mode, &mdbp)) != 0)
+ goto err;
+
+ /* Copy the page size and file id from the master. */
+ dbp->pgsize = mdbp->pgsize;
+ F_SET(dbp, DB_AM_SUBDB);
+ memcpy(dbp->fileid, mdbp->fileid, DB_FILE_ID_LEN);
+
+ if ((ret = __db_master_update(mdbp,
+ subdb, type, &meta_pgno, MU_OPEN, NULL, flags)) != 0)
+ goto err;
+
+ /*
+ * Clear the exclusive open and truncation flags, they only
+ * apply to the open of the master database.
+ */
+ LF_CLR(DB_EXCL | DB_TRUNCATE);
+ }
+
+ ret = __db_dbopen(dbp, name, flags, mode, meta_pgno);
+
+ /*
+ * You can open the database that describes the subdatabases in the
+ * rest of the file read-only. The content of each key's data is
+ * unspecified and applications should never be adding new records
+ * or updating existing records. However, during recovery, we need
+ * to open these databases R/W so we can redo/undo changes in them.
+ * Likewise, we need to open master databases read/write during
+ * rename and remove so we can be sure they're fully sync'ed, so
+ * we provide an override flag for the purpose.
+ */
+ if (subdb == NULL && !IS_RECOVERING(dbenv) && !LF_ISSET(DB_RDONLY) &&
+ !LF_ISSET(DB_RDWRMASTER) && F_ISSET(dbp, DB_AM_SUBDB)) {
+ __db_err(dbenv,
+ "files containing multiple databases may only be opened read-only");
+ ret = EINVAL;
+ goto err;
+ }
+
+err: /*
+ * End any transaction, committing if we were successful, aborting
+ * otherwise.
+ */
+ if (TXN_ON(dbenv) && LF_ISSET(DB_CREATE))
+ if ((t_ret = __db_metaend(dbp,
+ &open_lock, ret == 0, NULL, NULL)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* If we were successful, don't discard the file on close. */
+ if (ret == 0)
+ F_CLR(dbp, DB_AM_DISCARD);
+
+ /* If we were unsuccessful, destroy the DB handle. */
+ if (ret != 0) {
+ /* In recovery we set log_fileid early. */
+ if (IS_RECOVERING(dbenv))
+ dbp->log_fileid = DB_LOGFILEID_INVALID;
+ __db_refresh(dbp);
+ }
+
+ if (mdbp != NULL) {
+ /* If we were successful, don't discard the file on close. */
+ if (ret == 0)
+ F_CLR(mdbp, DB_AM_DISCARD);
+ if ((t_ret = mdbp->close(mdbp, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ }
+
+ return (ret);
+}
+
+/*
+ * __db_dbopen --
+ * Open a database.
+ * PUBLIC: int __db_dbopen __P((DB *, const char *, u_int32_t, int, db_pgno_t));
+ */
+int
+__db_dbopen(dbp, name, flags, mode, meta_pgno)
+ DB *dbp;
+ const char *name;
+ u_int32_t flags;
+ int mode;
+ db_pgno_t meta_pgno;
+{
+ DB_ENV *dbenv;
+ int ret, retinfo;
+
+ dbenv = dbp->dbenv;
+
+ /* Set up the underlying file. */
+ if ((ret = __db_file_setup(dbp,
+ name, flags, mode, meta_pgno, &retinfo)) != 0)
+ return (ret);
+
+ /*
+ * If we created the file, set the truncate flag for the mpool. This
+ * isn't for anything we've done, it's protection against stupid user
+ * tricks: if the user deleted a file behind Berkeley DB's back, we
+ * may still have pages in the mpool that match the file's "unique" ID.
+ */
+ if (retinfo & DB_FILE_SETUP_CREATE)
+ flags |= DB_TRUNCATE;
+
+ /* Set up the underlying environment. */
+ if ((ret = __db_dbenv_setup(dbp, name, flags)) != 0)
+ return (ret);
+
+ /*
+ * Do access method specific initialization.
+ *
+ * !!!
+ * Set the open flag. (The underlying access method open functions
+ * may want to do things like acquire cursors, so the open flag has
+ * to be set before calling them.)
+ */
+ F_SET(dbp, DB_OPEN_CALLED);
+
+ if (retinfo & DB_FILE_SETUP_ZERO)
+ return (0);
+
+ switch (dbp->type) {
+ case DB_BTREE:
+ ret = __bam_open(dbp, name, meta_pgno, flags);
+ break;
+ case DB_HASH:
+ ret = __ham_open(dbp, name, meta_pgno, flags);
+ break;
+ case DB_RECNO:
+ ret = __ram_open(dbp, name, meta_pgno, flags);
+ break;
+ case DB_QUEUE:
+ ret = __qam_open(dbp, name, meta_pgno, mode, flags);
+ break;
+ case DB_UNKNOWN:
+ return (__db_unknown_type(dbp->dbenv,
+ "__db_dbopen", dbp->type));
+ break;
+ }
+ return (ret);
+}
+
+/*
+ * __db_master_open --
+ * Open up a handle on a master database.
+ *
+ * PUBLIC: int __db_master_open __P((DB *,
+ * PUBLIC: const char *, u_int32_t, int, DB **));
+ */
+int
+__db_master_open(subdbp, name, flags, mode, dbpp)
+ DB *subdbp;
+ const char *name;
+ u_int32_t flags;
+ int mode;
+ DB **dbpp;
+{
+ DB *dbp;
+ int ret;
+
+ /* Open up a handle on the main database. */
+ if ((ret = db_create(&dbp, subdbp->dbenv, 0)) != 0)
+ return (ret);
+
+ /*
+ * It's always a btree.
+ * Run in the transaction we've created.
+ * Set the pagesize in case we're creating a new database.
+ * Flag that we're creating a database with subdatabases.
+ */
+ dbp->type = DB_BTREE;
+ dbp->open_txn = subdbp->open_txn;
+ dbp->pgsize = subdbp->pgsize;
+ F_SET(dbp, DB_AM_SUBDB);
+
+ if ((ret = __db_dbopen(dbp, name, flags, mode, PGNO_BASE_MD)) != 0) {
+ if (!F_ISSET(dbp, DB_AM_DISCARD))
+ dbp->close(dbp, 0);
+ return (ret);
+ }
+
+ *dbpp = dbp;
+ return (0);
+}
+
+/*
+ * __db_master_update --
+ * Add/Remove a subdatabase from a master database.
+ */
+static int
+__db_master_update(mdbp, subdb, type, meta_pgnop, action, newname, flags)
+ DB *mdbp;
+ const char *subdb;
+ u_int32_t type;
+ db_pgno_t *meta_pgnop; /* may be NULL on MU_RENAME */
+ mu_action action;
+ const char *newname;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DBC *dbc, *ndbc;
+ DBT key, data, ndata;
+ PAGE *p;
+ db_pgno_t t_pgno;
+ int modify, ret, t_ret;
+
+ dbenv = mdbp->dbenv;
+ dbc = ndbc = NULL;
+ p = NULL;
+
+ /* Might we modify the master database? If so, we'll need to lock. */
+ modify = (action != MU_OPEN || LF_ISSET(DB_CREATE)) ? 1 : 0;
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ /*
+ * Open up a cursor. If this is CDB and we're creating the database,
+ * make it an update cursor.
+ */
+ if ((ret = mdbp->cursor(mdbp, mdbp->open_txn, &dbc,
+ (CDB_LOCKING(dbenv) && modify) ? DB_WRITECURSOR : 0)) != 0)
+ goto err;
+
+ /*
+ * Try to point the cursor at the record.
+ *
+ * If we're removing or potentially creating an entry, lock the page
+ * with DB_RMW.
+ *
+ * !!!
+ * We don't include the name's nul termination in the database.
+ */
+ key.data = (char *)subdb;
+ key.size = strlen(subdb);
+ /* In the rename case, we do multiple cursor ops, so MALLOC is safer. */
+ F_SET(&data, DB_DBT_MALLOC);
+ ret = dbc->c_get(dbc, &key, &data,
+ DB_SET | ((STD_LOCKING(dbc) && modify) ? DB_RMW : 0));
+
+ /*
+ * What we do next--whether or not we found a record for the
+ * specified subdatabase--depends on what the specified action is.
+ * Handle ret appropriately as the first statement of each case.
+ */
+ switch (action) {
+ case MU_REMOVE:
+ /*
+ * We should have found something if we're removing it. Note
+ * that in the common case where the DB we're asking to remove
+ * doesn't exist, we won't get this far; __db_subdb_remove
+ * will already have returned an error from __db_open.
+ */
+ if (ret != 0)
+ goto err;
+
+ /*
+ * Delete the subdatabase entry first; if this fails,
+ * we don't want to touch the actual subdb pages.
+ */
+ if ((ret = dbc->c_del(dbc, 0)) != 0)
+ goto err;
+
+ /*
+ * We're handling actual data, not on-page meta-data,
+ * so it hasn't been converted to/from opposite
+ * endian architectures. Do it explicitly, now.
+ */
+ memcpy(meta_pgnop, data.data, sizeof(db_pgno_t));
+ DB_NTOHL(meta_pgnop);
+ if ((ret = memp_fget(mdbp->mpf, meta_pgnop, 0, &p)) != 0)
+ goto err;
+
+ /* Free and put the page. */
+ if ((ret = __db_free(dbc, p)) != 0) {
+ p = NULL;
+ goto err;
+ }
+ p = NULL;
+ break;
+ case MU_RENAME:
+ /* We should have found something if we're renaming it. */
+ if (ret != 0)
+ goto err;
+
+ /*
+ * Before we rename, we need to make sure we're not
+ * overwriting another subdatabase, or else this operation
+ * won't be undoable. Open a second cursor and check
+ * for the existence of newname; it shouldn't appear under
+ * us since we hold the metadata lock.
+ */
+ if ((ret = mdbp->cursor(mdbp, mdbp->open_txn, &ndbc, 0)) != 0)
+ goto err;
+ DB_ASSERT(newname != NULL);
+ key.data = (void *) newname;
+ key.size = strlen(newname);
+
+ /*
+ * We don't actually care what the meta page of the potentially-
+ * overwritten DB is; we just care about existence.
+ */
+ memset(&ndata, 0, sizeof(ndata));
+ F_SET(&ndata, DB_DBT_USERMEM | DB_DBT_PARTIAL);
+
+ if ((ret = ndbc->c_get(ndbc, &key, &ndata, DB_SET)) == 0) {
+ /* A subdb called newname exists. Bail. */
+ ret = EEXIST;
+ __db_err(dbenv, "rename: database %s exists", newname);
+ goto err;
+ } else if (ret != DB_NOTFOUND)
+ goto err;
+
+ /*
+ * Now do the put first; we don't want to lose our
+ * sole reference to the subdb. Use the second cursor
+ * so that the first one continues to point to the old record.
+ */
+ if ((ret = ndbc->c_put(ndbc, &key, &data, DB_KEYFIRST)) != 0)
+ goto err;
+ if ((ret = dbc->c_del(dbc, 0)) != 0) {
+ /*
+ * If the delete fails, try to delete the record
+ * we just put, in case we're not txn-protected.
+ */
+ (void)ndbc->c_del(ndbc, 0);
+ goto err;
+ }
+
+ break;
+ case MU_OPEN:
+ /*
+ * Get the subdatabase information. If it already exists,
+ * copy out the page number and we're done.
+ */
+ switch (ret) {
+ case 0:
+ memcpy(meta_pgnop, data.data, sizeof(db_pgno_t));
+ DB_NTOHL(meta_pgnop);
+ goto done;
+ case DB_NOTFOUND:
+ if (LF_ISSET(DB_CREATE))
+ break;
+ /*
+ * No db_err, it is reasonable to remove a
+ * nonexistent db.
+ */
+ ret = ENOENT;
+ goto err;
+ default:
+ goto err;
+ }
+
+ if ((ret = __db_new(dbc,
+ type == DB_HASH ? P_HASHMETA : P_BTREEMETA, &p)) != 0)
+ goto err;
+ *meta_pgnop = PGNO(p);
+
+ /*
+ * XXX
+ * We're handling actual data, not on-page meta-data, so it
+ * hasn't been converted to/from opposite endian architectures.
+ * Do it explicitly, now.
+ */
+ t_pgno = PGNO(p);
+ DB_HTONL(&t_pgno);
+ memset(&ndata, 0, sizeof(ndata));
+ ndata.data = &t_pgno;
+ ndata.size = sizeof(db_pgno_t);
+ if ((ret = dbc->c_put(dbc, &key, &ndata, DB_KEYLAST)) != 0)
+ goto err;
+ break;
+ }
+
+err:
+done: /*
+ * If we allocated a page: if we're successful, mark the page dirty
+ * and return it to the cache, otherwise, discard/free it.
+ */
+ if (p != NULL) {
+ if (ret == 0) {
+ if ((t_ret =
+ memp_fput(mdbp->mpf, p, DB_MPOOL_DIRTY)) != 0)
+ ret = t_ret;
+ /*
+ * Since we cannot close this file until after
+ * transaction commit, we need to sync the dirty
+ * pages, because we'll read these directly from
+ * disk to open.
+ */
+ if ((t_ret = mdbp->sync(mdbp, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ } else
+ (void)__db_free(dbc, p);
+ }
+
+ /* Discard the cursor(s) and data. */
+ if (data.data != NULL)
+ __os_free(data.data, data.size);
+ if (dbc != NULL && (t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+ if (ndbc != NULL && (t_ret = ndbc->c_close(ndbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __db_dbenv_setup --
+ * Set up the underlying environment during a db_open.
+ *
+ * PUBLIC: int __db_dbenv_setup __P((DB *, const char *, u_int32_t));
+ */
+int
+__db_dbenv_setup(dbp, name, flags)
+ DB *dbp;
+ const char *name;
+ u_int32_t flags;
+{
+ DB *ldbp;
+ DB_ENV *dbenv;
+ DBT pgcookie;
+ DB_MPOOL_FINFO finfo;
+ DB_PGINFO pginfo;
+ int ret;
+ u_int32_t maxid;
+
+ dbenv = dbp->dbenv;
+
+ /* If we don't yet have an environment, it's time to create it. */
+ if (!F_ISSET(dbenv, DB_ENV_OPEN_CALLED)) {
+ /* Make sure we have at least DB_MINCACHE pages in our cache. */
+ if (dbenv->mp_gbytes == 0 &&
+ dbenv->mp_bytes < dbp->pgsize * DB_MINPAGECACHE &&
+ (ret = dbenv->set_cachesize(
+ dbenv, 0, dbp->pgsize * DB_MINPAGECACHE, 0)) != 0)
+ return (ret);
+
+ if ((ret = dbenv->open(dbenv, NULL, DB_CREATE |
+ DB_INIT_MPOOL | DB_PRIVATE | LF_ISSET(DB_THREAD), 0)) != 0)
+ return (ret);
+ }
+
+ /* Register DB's pgin/pgout functions. */
+ if ((ret =
+ memp_register(dbenv, DB_FTYPE_SET, __db_pgin, __db_pgout)) != 0)
+ return (ret);
+
+ /*
+ * Open a backing file in the memory pool.
+ *
+ * If we need to pre- or post-process a file's pages on I/O, set the
+ * file type. If it's a hash file, always call the pgin and pgout
+ * routines. This means that hash files can never be mapped into
+ * process memory. If it's a btree file and requires swapping, we
+ * need to page the file in and out. This has to be right -- we can't
+ * mmap files that are being paged in and out.
+ */
+ memset(&finfo, 0, sizeof(finfo));
+ switch (dbp->type) {
+ case DB_BTREE:
+ case DB_RECNO:
+ finfo.ftype =
+ F_ISSET(dbp, DB_AM_SWAP) ? DB_FTYPE_SET : DB_FTYPE_NOTSET;
+ finfo.clear_len = DB_PAGE_DB_LEN;
+ break;
+ case DB_HASH:
+ finfo.ftype = DB_FTYPE_SET;
+ finfo.clear_len = DB_PAGE_DB_LEN;
+ break;
+ case DB_QUEUE:
+ finfo.ftype =
+ F_ISSET(dbp, DB_AM_SWAP) ? DB_FTYPE_SET : DB_FTYPE_NOTSET;
+ finfo.clear_len = DB_PAGE_QUEUE_LEN;
+ break;
+ case DB_UNKNOWN:
+ /*
+ * If we're running in the verifier, our database might
+ * be corrupt and we might not know its type--but we may
+ * still want to be able to verify and salvage.
+ *
+ * If we can't identify the type, it's not going to be safe
+ * to call __db_pgin--we pretty much have to give up all
+ * hope of salvaging cross-endianness. Proceed anyway;
+ * at worst, the database will just appear more corrupt
+ * than it actually is, but at best, we may be able
+ * to salvage some data even with no metadata page.
+ */
+ if (F_ISSET(dbp, DB_AM_VERIFYING)) {
+ finfo.ftype = DB_FTYPE_NOTSET;
+ finfo.clear_len = DB_PAGE_DB_LEN;
+ break;
+ }
+ return (__db_unknown_type(dbp->dbenv,
+ "__db_dbenv_setup", dbp->type));
+ }
+ finfo.pgcookie = &pgcookie;
+ finfo.fileid = dbp->fileid;
+ finfo.lsn_offset = 0;
+
+ pginfo.db_pagesize = dbp->pgsize;
+ pginfo.needswap = F_ISSET(dbp, DB_AM_SWAP);
+ pgcookie.data = &pginfo;
+ pgcookie.size = sizeof(DB_PGINFO);
+
+ if ((ret = memp_fopen(dbenv, name,
+ LF_ISSET(DB_RDONLY | DB_NOMMAP | DB_ODDFILESIZE | DB_TRUNCATE),
+ 0, dbp->pgsize, &finfo, &dbp->mpf)) != 0)
+ return (ret);
+
+ /*
+ * We may need a per-thread mutex. Allocate it from the environment
+ * region, there's supposed to be extra space there for that purpose.
+ */
+ if (LF_ISSET(DB_THREAD)) {
+ if ((ret = __db_mutex_alloc(
+ dbenv, dbenv->reginfo, (MUTEX **)&dbp->mutexp)) != 0)
+ return (ret);
+ if ((ret = __db_mutex_init(
+ dbenv, dbp->mutexp, 0, MUTEX_THREAD)) != 0) {
+ __db_mutex_free(dbenv, dbenv->reginfo, dbp->mutexp);
+ return (ret);
+ }
+ }
+
+ /* Get a log file id. */
+ if (LOGGING_ON(dbenv) && !IS_RECOVERING(dbenv) &&
+#if !defined(DEBUG_ROP)
+ !F_ISSET(dbp, DB_AM_RDONLY) &&
+#endif
+ (ret = log_register(dbenv, dbp, name)) != 0)
+ return (ret);
+
+ /*
+ * Insert ourselves into the DB_ENV's dblist. We allocate a
+ * unique ID to each {fileid, meta page number} pair, and to
+ * each temporary file (since they all have a zero fileid).
+ * This ID gives us something to use to tell which DB handles
+ * go with which databases in all the cursor adjustment
+ * routines, where we don't want to do a lot of ugly and
+ * expensive memcmps.
+ */
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ for (maxid = 0, ldbp = LIST_FIRST(&dbenv->dblist);
+ ldbp != NULL; ldbp = LIST_NEXT(dbp, dblistlinks)) {
+ if (name != NULL &&
+ memcmp(ldbp->fileid, dbp->fileid, DB_FILE_ID_LEN) == 0 &&
+ ldbp->meta_pgno == dbp->meta_pgno)
+ break;
+ if (ldbp->adj_fileid > maxid)
+ maxid = ldbp->adj_fileid;
+ }
+
+ /*
+ * If ldbp is NULL, we didn't find a match, or we weren't
+ * really looking because name is NULL. Assign the dbp an
+ * adj_fileid one higher than the largest we found, and
+ * insert it at the head of the master dbp list.
+ *
+ * If ldbp is not NULL, it is a match for our dbp. Give dbp
+ * the same ID that ldbp has, and add it after ldbp so they're
+ * together in the list.
+ */
+ if (ldbp == NULL) {
+ dbp->adj_fileid = maxid + 1;
+ LIST_INSERT_HEAD(&dbenv->dblist, dbp, dblistlinks);
+ } else {
+ dbp->adj_fileid = ldbp->adj_fileid;
+ LIST_INSERT_AFTER(ldbp, dbp, dblistlinks);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+ return (0);
+}
+
+/*
+ * __db_file_setup --
+ * Setup the file or in-memory data.
+ * Read the database metadata and resolve it with our arguments.
+ */
+static int
+__db_file_setup(dbp, name, flags, mode, meta_pgno, retflags)
+ DB *dbp;
+ const char *name;
+ u_int32_t flags;
+ int mode;
+ db_pgno_t meta_pgno;
+ int *retflags;
+{
+ DB *mdb;
+ DBT namedbt;
+ DB_ENV *dbenv;
+ DB_FH *fhp, fh;
+ DB_LSN lsn;
+ DB_TXN *txn;
+ size_t nr;
+ u_int32_t magic, oflags;
+ int ret, retry_cnt, t_ret;
+ char *real_name, mbuf[DBMETASIZE];
+
+#define IS_SUBDB_SETUP (meta_pgno != PGNO_BASE_MD)
+
+ dbenv = dbp->dbenv;
+ dbp->meta_pgno = meta_pgno;
+ txn = NULL;
+ *retflags = 0;
+
+ /*
+ * If we open a file handle and our caller is doing fcntl(2) locking,
+ * we can't close it because that would discard the caller's lock.
+ * Save it until we close the DB handle.
+ */
+ if (LF_ISSET(DB_FCNTL_LOCKING)) {
+ if ((ret = __os_malloc(dbenv, sizeof(*fhp), NULL, &fhp)) != 0)
+ return (ret);
+ } else
+ fhp = &fh;
+ memset(fhp, 0, sizeof(*fhp));
+
+ /*
+ * If the file is in-memory, set up is simple. Otherwise, do the
+ * hard work of opening and reading the file.
+ *
+ * If we have a file name, try and read the first page, figure out
+ * what type of file it is, and initialize everything we can based
+ * on that file's meta-data page.
+ *
+ * !!!
+ * There's a reason we don't push this code down into the buffer cache.
+ * The problem is that there's no information external to the file that
+ * we can use as a unique ID. UNIX has dev/inode pairs, but they are
+ * not necessarily unique after reboot, if the file was mounted via NFS.
+ * Windows has similar problems, as the FAT filesystem doesn't maintain
+ * dev/inode numbers across reboot. So, we must get something from the
+ * file we can use to ensure that, even after a reboot, the file we're
+ * joining in the cache is the right file for us to join. The solution
+ * we use is to maintain a file ID that's stored in the database, and
+ * that's why we have to open and read the file before calling into the
+ * buffer cache.
+ *
+ * The secondary reason is that there's additional information that
+ * we want to have before instantiating a file in the buffer cache:
+ * the page size, file type (btree/hash), if swapping is required,
+ * and flags (DB_RDONLY, DB_CREATE, DB_TRUNCATE). We could handle
+ * needing this information by allowing it to be set for a file in
+ * the buffer cache even after the file has been opened, and, of
+ * course, supporting the ability to flush a file from the cache as
+ * necessary, e.g., if we guessed wrongly about the page size. Given
+ * that we have to read the file anyway to get the file ID, we might
+ * as well get the rest, too.
+ *
+ * Get the real file name.
+ */
+ if (name == NULL) {
+ F_SET(dbp, DB_AM_INMEM);
+
+ if (dbp->type == DB_UNKNOWN) {
+ __db_err(dbenv,
+ "DBTYPE of unknown without existing file");
+ return (EINVAL);
+ }
+ real_name = NULL;
+
+ /* Set the page size if we don't have one yet. */
+ if (dbp->pgsize == 0)
+ dbp->pgsize = DB_DEF_IOSIZE;
+
+ /*
+ * If the file is a temporary file and we're doing locking,
+ * then we have to create a unique file ID. We can't use our
+ * normal dev/inode pair (or whatever this OS uses in place of
+ * dev/inode pairs) because no backing file will be created
+ * until the mpool cache is filled forcing the buffers to disk.
+ * Grab a random locker ID to use as a file ID. The created
+ * ID must never match a potential real file ID -- we know it
+ * won't because real file IDs contain a time stamp after the
+ * dev/inode pair, and we're simply storing a 4-byte value.
+ *
+ * !!!
+ * Store the locker in the file id structure -- we can get it
+ * from there as necessary, and it saves having two copies.
+ */
+ if (LOCKING_ON(dbenv) &&
+ (ret = lock_id(dbenv, (u_int32_t *)dbp->fileid)) != 0)
+ return (ret);
+
+ return (0);
+ }
+
+ /* Get the real backing file name. */
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, NULL, name, 0, NULL, &real_name)) != 0)
+ return (ret);
+
+ /*
+ * Open the backing file. We need to make sure that multiple processes
+ * attempting to create the file at the same time are properly ordered
+ * so that only one of them creates the "unique" file ID, so we open it
+ * O_EXCL and O_CREAT so two simultaneous attempts to create the region
+ * will return failure in one of the attempts. If we're the one that
+ * fails, simply retry without the O_CREAT flag, which will require the
+ * meta-data page exist.
+ */
+
+ /* Fill in the default file mode. */
+ if (mode == 0)
+ mode = __db_omode("rwrw--");
+
+ oflags = 0;
+ if (LF_ISSET(DB_RDONLY))
+ oflags |= DB_OSO_RDONLY;
+ if (LF_ISSET(DB_TRUNCATE))
+ oflags |= DB_OSO_TRUNC;
+
+ retry_cnt = 0;
+open_retry:
+ *retflags = 0;
+ ret = 0;
+ if (!IS_SUBDB_SETUP && LF_ISSET(DB_CREATE)) {
+ if (dbp->open_txn != NULL) {
+ /*
+ * Start a child transaction to wrap this individual
+ * create.
+ */
+ if ((ret =
+ txn_begin(dbenv, dbp->open_txn, &txn, 0)) != 0)
+ goto err_msg;
+
+ memset(&namedbt, 0, sizeof(namedbt));
+ namedbt.data = (char *)name;
+ namedbt.size = strlen(name) + 1;
+ if ((ret = __crdel_fileopen_log(dbenv, txn,
+ &lsn, DB_FLUSH, &namedbt, mode)) != 0)
+ goto err_msg;
+ }
+ DB_TEST_RECOVERY(dbp, DB_TEST_PREOPEN, ret, name);
+ if ((ret = __os_open(dbenv, real_name,
+ oflags | DB_OSO_CREATE | DB_OSO_EXCL, mode, fhp)) == 0) {
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTOPEN, ret, name);
+
+ /* Commit the file create. */
+ if (dbp->open_txn != NULL) {
+ if ((ret = txn_commit(txn, DB_TXN_SYNC)) != 0)
+ goto err_msg;
+ txn = NULL;
+ }
+
+ /*
+ * We created the file. This means that if we later
+ * fail, we need to delete the file and if we're going
+ * to do that, we need to trash any pages in the
+ * memory pool. Since we only know here that we
+ * created the file, we're going to set the flag here
+ * and clear it later if we commit successfully.
+ */
+ F_SET(dbp, DB_AM_DISCARD);
+ *retflags |= DB_FILE_SETUP_CREATE;
+ } else {
+ /*
+ * Abort the file create. If the abort fails, report
+ * the error returned by txn_abort(), rather than the
+ * open error, for no particular reason.
+ */
+ if (dbp->open_txn != NULL) {
+ if ((t_ret = txn_abort(txn)) != 0) {
+ ret = t_ret;
+ goto err_msg;
+ }
+ txn = NULL;
+ }
+
+ /*
+ * If we were not doing an exclusive open, try again
+ * without the create flag.
+ */
+ if (ret == EEXIST && !LF_ISSET(DB_EXCL)) {
+ LF_CLR(DB_CREATE);
+ DB_TEST_RECOVERY(dbp,
+ DB_TEST_POSTOPEN, ret, name);
+ goto open_retry;
+ }
+ }
+ } else
+ ret = __os_open(dbenv, real_name, oflags, mode, fhp);
+
+ /*
+ * Be quiet if we couldn't open the file because it didn't exist
+ * or we did not have permission,
+ * the customers don't like those messages appearing in the logs.
+ * Otherwise, complain loudly.
+ */
+ if (ret != 0) {
+ if (ret == EACCES || ret == ENOENT)
+ goto err;
+ goto err_msg;
+ }
+
+ /* Set the page size if we don't have one yet. */
+ if (dbp->pgsize == 0) {
+ if (IS_SUBDB_SETUP) {
+ if ((ret = __db_master_open(dbp,
+ name, flags, mode, &mdb)) != 0)
+ goto err;
+ dbp->pgsize = mdb->pgsize;
+ (void)mdb->close(mdb, 0);
+ } else if ((ret = __db_set_pgsize(dbp, fhp, real_name)) != 0)
+ goto err;
+ }
+
+ /*
+ * Seek to the metadata offset; if it's a master database open or a
+ * database without subdatabases, we're seeking to 0, but that's OK.
+ */
+ if ((ret = __os_seek(dbenv, fhp,
+ dbp->pgsize, meta_pgno, 0, 0, DB_OS_SEEK_SET)) != 0)
+ goto err_msg;
+
+ /*
+ * Read the metadata page. We read DBMETASIZE bytes, which is larger
+ * than any access method's metadata page and smaller than any disk
+ * sector.
+ */
+ if ((ret = __os_read(dbenv, fhp, mbuf, sizeof(mbuf), &nr)) != 0)
+ goto err_msg;
+
+ if (nr == sizeof(mbuf)) {
+ /*
+ * Figure out what access method we're dealing with, and then
+ * call access method specific code to check error conditions
+ * based on conflicts between the found file and application
+ * arguments. A found file overrides some user information --
+ * we don't consider it an error, for example, if the user set
+ * an expected byte order and the found file doesn't match it.
+ */
+ F_CLR(dbp, DB_AM_SWAP);
+ magic = ((DBMETA *)mbuf)->magic;
+
+swap_retry: switch (magic) {
+ case DB_BTREEMAGIC:
+ if ((ret =
+ __bam_metachk(dbp, name, (BTMETA *)mbuf)) != 0)
+ goto err;
+ break;
+ case DB_HASHMAGIC:
+ if ((ret =
+ __ham_metachk(dbp, name, (HMETA *)mbuf)) != 0)
+ goto err;
+ break;
+ case DB_QAMMAGIC:
+ if ((ret =
+ __qam_metachk(dbp, name, (QMETA *)mbuf)) != 0)
+ goto err;
+ break;
+ case 0:
+ /*
+ * There are two ways we can get a 0 magic number.
+ * If we're creating a subdatabase, then the magic
+ * number will be 0. We allocate a page as part of
+ * finding out what the base page number will be for
+ * the new subdatabase, but it's not initialized in
+ * any way.
+ *
+ * The second case happens if we are in recovery
+ * and we are going to recreate a database, it's
+ * possible that it's page was created (on systems
+ * where pages must be created explicitly to avoid
+ * holes in files) but is still 0.
+ */
+ if (IS_SUBDB_SETUP) { /* Case 1 */
+ if ((IS_RECOVERING(dbenv)
+ && F_ISSET((DB_LOG *)
+ dbenv->lg_handle, DBLOG_FORCE_OPEN))
+ || ((DBMETA *)mbuf)->pgno != PGNO_INVALID)
+ goto empty;
+
+ ret = EINVAL;
+ goto err;
+ }
+ /* Case 2 */
+ if (IS_RECOVERING(dbenv)) {
+ *retflags |= DB_FILE_SETUP_ZERO;
+ goto empty;
+ }
+ goto bad_format;
+ default:
+ if (F_ISSET(dbp, DB_AM_SWAP))
+ goto bad_format;
+
+ M_32_SWAP(magic);
+ F_SET(dbp, DB_AM_SWAP);
+ goto swap_retry;
+ }
+ } else {
+ /*
+ * Only newly created files are permitted to fail magic
+ * number tests.
+ */
+ if (nr != 0 || (!IS_RECOVERING(dbenv) && IS_SUBDB_SETUP))
+ goto bad_format;
+
+ /* Let the caller know that we had a 0-length file. */
+ if (!LF_ISSET(DB_CREATE | DB_TRUNCATE))
+ *retflags |= DB_FILE_SETUP_ZERO;
+
+ /*
+ * The only way we can reach here with the DB_CREATE flag set
+ * is if we created the file. If that's not the case, then
+ * either (a) someone else created the file but has not yet
+ * written out the metadata page, or (b) we truncated the file
+ * (DB_TRUNCATE) leaving it zero-length. In the case of (a),
+ * we want to sleep and give the file creator time to write
+ * the metadata page. In the case of (b), we want to continue.
+ *
+ * !!!
+ * There's a race in the case of two processes opening the file
+ * with the DB_TRUNCATE flag set at roughly the same time, and
+ * they could theoretically hurt each other. Sure hope that's
+ * unlikely.
+ */
+ if (!LF_ISSET(DB_CREATE | DB_TRUNCATE) &&
+ !IS_RECOVERING(dbenv)) {
+ if (retry_cnt++ < 3) {
+ __os_sleep(dbenv, 1, 0);
+ goto open_retry;
+ }
+bad_format: if (!IS_RECOVERING(dbenv))
+ __db_err(dbenv,
+ "%s: unexpected file type or format", name);
+ ret = EINVAL;
+ goto err;
+ }
+
+ DB_ASSERT (dbp->type != DB_UNKNOWN);
+
+empty: /*
+ * The file is empty, and that's OK. If it's not a subdatabase,
+ * though, we do need to generate a unique file ID for it. The
+ * unique file ID includes a timestamp so that we can't collide
+ * with any other files, even when the file IDs (dev/inode pair)
+ * are reused.
+ */
+ if (!IS_SUBDB_SETUP) {
+ if (*retflags & DB_FILE_SETUP_ZERO)
+ memset(dbp->fileid, 0, DB_FILE_ID_LEN);
+ else if ((ret = __os_fileid(dbenv,
+ real_name, 1, dbp->fileid)) != 0)
+ goto err_msg;
+ }
+ }
+
+ if (0) {
+err_msg: __db_err(dbenv, "%s: %s", name, db_strerror(ret));
+ }
+
+ /*
+ * Abort any running transaction -- it can only exist if something
+ * went wrong.
+ */
+err:
+DB_TEST_RECOVERY_LABEL
+
+ /*
+ * If we opened a file handle and our caller is doing fcntl(2) locking,
+ * then we can't close it because that would discard the caller's lock.
+ * Otherwise, close the handle.
+ */
+ if (F_ISSET(fhp, DB_FH_VALID)) {
+ if (ret == 0 && LF_ISSET(DB_FCNTL_LOCKING))
+ dbp->saved_open_fhp = fhp;
+ else
+ if ((t_ret = __os_closehandle(fhp)) != 0 && ret == 0)
+ ret = t_ret;
+ }
+
+ /*
+ * This must be done after the file is closed, since
+ * txn_abort() may remove the file, and an open file
+ * cannot be removed on a Windows platforms.
+ */
+ if (txn != NULL)
+ (void)txn_abort(txn);
+
+ if (real_name != NULL)
+ __os_freestr(real_name);
+
+ return (ret);
+}
+
+/*
+ * __db_set_pgsize --
+ * Set the page size based on file information.
+ */
+static int
+__db_set_pgsize(dbp, fhp, name)
+ DB *dbp;
+ DB_FH *fhp;
+ char *name;
+{
+ DB_ENV *dbenv;
+ u_int32_t iopsize;
+ int ret;
+
+ dbenv = dbp->dbenv;
+
+ /*
+ * Use the filesystem's optimum I/O size as the pagesize if a pagesize
+ * not specified. Some filesystems have 64K as their optimum I/O size,
+ * but as that results in fairly large default caches, we limit the
+ * default pagesize to 16K.
+ */
+ if ((ret = __os_ioinfo(dbenv, name, fhp, NULL, NULL, &iopsize)) != 0) {
+ __db_err(dbenv, "%s: %s", name, db_strerror(ret));
+ return (ret);
+ }
+ if (iopsize < 512)
+ iopsize = 512;
+ if (iopsize > 16 * 1024)
+ iopsize = 16 * 1024;
+
+ /*
+ * Sheer paranoia, but we don't want anything that's not a power-of-2
+ * (we rely on that for alignment of various types on the pages), and
+ * we want a multiple of the sector size as well.
+ */
+ OS_ROUNDOFF(iopsize, 512);
+
+ dbp->pgsize = iopsize;
+ F_SET(dbp, DB_AM_PGDEF);
+
+ return (0);
+}
+
+/*
+ * __db_close --
+ * DB destructor.
+ *
+ * PUBLIC: int __db_close __P((DB *, u_int32_t));
+ */
+int
+__db_close(dbp, flags)
+ DB *dbp;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DBC *dbc;
+ int ret, t_ret;
+
+ ret = 0;
+
+ dbenv = dbp->dbenv;
+ PANIC_CHECK(dbenv);
+
+ /* Validate arguments. */
+ if ((ret = __db_closechk(dbp, flags)) != 0)
+ goto err;
+
+ /* If never opened, or not currently open, it's easy. */
+ if (!F_ISSET(dbp, DB_OPEN_CALLED))
+ goto never_opened;
+
+ /* Sync the underlying access method. */
+ if (!LF_ISSET(DB_NOSYNC) && !F_ISSET(dbp, DB_AM_DISCARD) &&
+ (t_ret = dbp->sync(dbp, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /*
+ * Go through the active cursors and call the cursor recycle routine,
+ * which resolves pending operations and moves the cursors onto the
+ * free list. Then, walk the free list and call the cursor destroy
+ * routine.
+ */
+ while ((dbc = TAILQ_FIRST(&dbp->active_queue)) != NULL)
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+ while ((dbc = TAILQ_FIRST(&dbp->free_queue)) != NULL)
+ if ((t_ret = __db_c_destroy(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /*
+ * Close any outstanding join cursors. Join cursors destroy
+ * themselves on close and have no separate destroy routine.
+ */
+ while ((dbc = TAILQ_FIRST(&dbp->join_queue)) != NULL)
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Remove this DB handle from the DB_ENV's dblist. */
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ LIST_REMOVE(dbp, dblistlinks);
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+ /* Sync the memory pool. */
+ if (!LF_ISSET(DB_NOSYNC) && !F_ISSET(dbp, DB_AM_DISCARD) &&
+ (t_ret = memp_fsync(dbp->mpf)) != 0 &&
+ t_ret != DB_INCOMPLETE && ret == 0)
+ ret = t_ret;
+
+ /* Close any handle we've been holding since the open. */
+ if (dbp->saved_open_fhp != NULL &&
+ F_ISSET(dbp->saved_open_fhp, DB_FH_VALID) &&
+ (t_ret = __os_closehandle(dbp->saved_open_fhp)) != 0 && ret == 0)
+ ret = t_ret;
+
+never_opened:
+ /*
+ * Call the access specific close function.
+ *
+ * !!!
+ * Because of where the function is called in the close process,
+ * these routines can't do anything that would dirty pages or
+ * otherwise affect closing down the database.
+ */
+ if ((t_ret = __ham_db_close(dbp)) != 0 && ret == 0)
+ ret = t_ret;
+ if ((t_ret = __bam_db_close(dbp)) != 0 && ret == 0)
+ ret = t_ret;
+ if ((t_ret = __qam_db_close(dbp)) != 0 && ret == 0)
+ ret = t_ret;
+
+err:
+ /* Refresh the structure and close any local environment. */
+ if ((t_ret = __db_refresh(dbp)) != 0 && ret == 0)
+ ret = t_ret;
+ if (F_ISSET(dbenv, DB_ENV_DBLOCAL) &&
+ --dbenv->dblocal_ref == 0 &&
+ (t_ret = dbenv->close(dbenv, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ memset(dbp, CLEAR_BYTE, sizeof(*dbp));
+ __os_free(dbp, sizeof(*dbp));
+
+ return (ret);
+}
+
+/*
+ * __db_refresh --
+ * Refresh the DB structure, releasing any allocated resources.
+ */
+static int
+__db_refresh(dbp)
+ DB *dbp;
+{
+ DB_ENV *dbenv;
+ DBC *dbc;
+ int ret, t_ret;
+
+ ret = 0;
+
+ dbenv = dbp->dbenv;
+
+ /*
+ * Go through the active cursors and call the cursor recycle routine,
+ * which resolves pending operations and moves the cursors onto the
+ * free list. Then, walk the free list and call the cursor destroy
+ * routine.
+ */
+ while ((dbc = TAILQ_FIRST(&dbp->active_queue)) != NULL)
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+ while ((dbc = TAILQ_FIRST(&dbp->free_queue)) != NULL)
+ if ((t_ret = __db_c_destroy(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ dbp->type = 0;
+
+ /* Close the memory pool file handle. */
+ if (dbp->mpf != NULL) {
+ if (F_ISSET(dbp, DB_AM_DISCARD))
+ (void)__memp_fremove(dbp->mpf);
+ if ((t_ret = memp_fclose(dbp->mpf)) != 0 && ret == 0)
+ ret = t_ret;
+ dbp->mpf = NULL;
+ }
+
+ /* Discard the thread mutex. */
+ if (dbp->mutexp != NULL) {
+ __db_mutex_free(dbenv, dbenv->reginfo, dbp->mutexp);
+ dbp->mutexp = NULL;
+ }
+
+ /* Discard the log file id. */
+ if (!IS_RECOVERING(dbenv)
+ && dbp->log_fileid != DB_LOGFILEID_INVALID)
+ (void)log_unregister(dbenv, dbp);
+
+ F_CLR(dbp, DB_AM_DISCARD);
+ F_CLR(dbp, DB_AM_INMEM);
+ F_CLR(dbp, DB_AM_RDONLY);
+ F_CLR(dbp, DB_AM_SWAP);
+ F_CLR(dbp, DB_DBM_ERROR);
+ F_CLR(dbp, DB_OPEN_CALLED);
+
+ return (ret);
+}
+
+/*
+ * __db_remove
+ * Remove method for DB.
+ *
+ * PUBLIC: int __db_remove __P((DB *, const char *, const char *, u_int32_t));
+ */
+int
+__db_remove(dbp, name, subdb, flags)
+ DB *dbp;
+ const char *name, *subdb;
+ u_int32_t flags;
+{
+ DBT namedbt;
+ DB_ENV *dbenv;
+ DB_LOCK remove_lock;
+ DB_LSN newlsn;
+ int ret, t_ret, (*callback_func) __P((DB *, void *));
+ char *backup, *real_back, *real_name;
+ void *cookie;
+
+ dbenv = dbp->dbenv;
+ ret = 0;
+ backup = real_back = real_name = NULL;
+
+ PANIC_CHECK(dbenv);
+ /*
+ * Cannot use DB_ILLEGAL_AFTER_OPEN here because that returns
+ * and we cannot return, but must deal with the error and destroy
+ * the handle anyway.
+ */
+ if (F_ISSET(dbp, DB_OPEN_CALLED)) {
+ ret = __db_mi_open(dbp->dbenv, "remove", 1);
+ goto err_close;
+ }
+
+ /* Validate arguments. */
+ if ((ret = __db_removechk(dbp, flags)) != 0)
+ goto err_close;
+
+ /*
+ * Subdatabases.
+ */
+ if (subdb != NULL) {
+ /* Subdatabases must be created in named files. */
+ if (name == NULL) {
+ __db_err(dbenv,
+ "multiple databases cannot be created in temporary files");
+ goto err_close;
+ }
+ return (__db_subdb_remove(dbp, name, subdb));
+ }
+
+ if ((ret = dbp->open(dbp,
+ name, NULL, DB_UNKNOWN, DB_RDWRMASTER, 0)) != 0)
+ goto err_close;
+
+ if (LOGGING_ON(dbenv) && (ret = __log_file_lock(dbp)) != 0)
+ goto err_close;
+
+ if ((ret = dbp->sync(dbp, 0)) != 0)
+ goto err_close;
+
+ /* Start the transaction and log the delete. */
+ if (TXN_ON(dbenv) && (ret = __db_metabegin(dbp, &remove_lock)) != 0)
+ goto err_close;
+
+ if (LOGGING_ON(dbenv)) {
+ memset(&namedbt, 0, sizeof(namedbt));
+ namedbt.data = (char *)name;
+ namedbt.size = strlen(name) + 1;
+
+ if ((ret = __crdel_delete_log(dbenv,
+ dbp->open_txn, &newlsn, DB_FLUSH,
+ dbp->log_fileid, &namedbt)) != 0) {
+ __db_err(dbenv,
+ "%s: %s", name, db_strerror(ret));
+ goto err;
+ }
+ }
+
+ /* Find the real name of the file. */
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, NULL, name, 0, NULL, &real_name)) != 0)
+ goto err;
+
+ /*
+ * XXX
+ * We don't bother to open the file and call __memp_fremove on the mpf.
+ * There is a potential race here. It is at least possible that, if
+ * the unique filesystem ID (dev/inode pair on UNIX) is reallocated
+ * within a second (the granularity of the fileID timestamp), a new
+ * file open will get the same fileID as the file being "removed".
+ * We may actually want to open the file and call __memp_fremove on
+ * the mpf to get around this.
+ */
+
+ /* Create name for backup file. */
+ if (TXN_ON(dbenv)) {
+ if ((ret =
+ __db_backup_name(dbenv, name, &backup, &newlsn)) != 0)
+ goto err;
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, NULL, backup, 0, NULL, &real_back)) != 0)
+ goto err;
+ }
+
+ callback_func = __db_remove_callback;
+ cookie = real_back;
+ DB_TEST_RECOVERY(dbp, DB_TEST_PRERENAME, ret, name);
+ if (dbp->db_am_remove != NULL &&
+ (ret = dbp->db_am_remove(dbp,
+ name, subdb, &newlsn, &callback_func, &cookie)) != 0)
+ goto err;
+ /*
+ * On Windows, the underlying file must be closed to perform a remove.
+ * Nothing later in __db_remove requires that it be open, and the
+ * dbp->close closes it anyway, so we just close it early.
+ */
+ (void)__memp_fremove(dbp->mpf);
+ if ((ret = memp_fclose(dbp->mpf)) != 0)
+ goto err;
+ dbp->mpf = NULL;
+
+ if (TXN_ON(dbenv))
+ ret = __os_rename(dbenv, real_name, real_back);
+ else
+ ret = __os_unlink(dbenv, real_name);
+
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTRENAME, ret, name);
+
+err:
+DB_TEST_RECOVERY_LABEL
+ /*
+ * End the transaction, committing the transaction if we were
+ * successful, aborting otherwise.
+ */
+ if (dbp->open_txn != NULL && (t_ret = __db_metaend(dbp, &remove_lock,
+ ret == 0, callback_func, cookie)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* FALLTHROUGH */
+
+err_close:
+ if (real_back != NULL)
+ __os_freestr(real_back);
+ if (real_name != NULL)
+ __os_freestr(real_name);
+ if (backup != NULL)
+ __os_freestr(backup);
+
+ /* We no longer have an mpool, so syncing would be disastrous. */
+ if ((t_ret = dbp->close(dbp, DB_NOSYNC)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __db_subdb_remove --
+ * Remove a subdatabase.
+ */
+static int
+__db_subdb_remove(dbp, name, subdb)
+ DB *dbp;
+ const char *name, *subdb;
+{
+ DB *mdbp;
+ DBC *dbc;
+ DB_ENV *dbenv;
+ DB_LOCK remove_lock;
+ db_pgno_t meta_pgno;
+ int ret, t_ret;
+
+ mdbp = NULL;
+ dbc = NULL;
+ dbenv = dbp->dbenv;
+
+ /* Start the transaction. */
+ if (TXN_ON(dbenv) && (ret = __db_metabegin(dbp, &remove_lock)) != 0)
+ goto err_close;
+
+ /*
+ * Open the subdatabase. We can use the user's DB handle for this
+ * purpose, I think.
+ */
+ if ((ret = __db_open(dbp, name, subdb, DB_UNKNOWN, 0, 0)) != 0)
+ goto err;
+
+ /* Free up the pages in the subdatabase. */
+ switch (dbp->type) {
+ case DB_BTREE:
+ case DB_RECNO:
+ if ((ret = __bam_reclaim(dbp, dbp->open_txn)) != 0)
+ goto err;
+ break;
+ case DB_HASH:
+ if ((ret = __ham_reclaim(dbp, dbp->open_txn)) != 0)
+ goto err;
+ break;
+ default:
+ ret = __db_unknown_type(dbp->dbenv,
+ "__db_subdb_remove", dbp->type);
+ goto err;
+ }
+
+ /*
+ * Remove the entry from the main database and free the subdatabase
+ * metadata page.
+ */
+ if ((ret = __db_master_open(dbp, name, 0, 0, &mdbp)) != 0)
+ goto err;
+
+ if ((ret = __db_master_update(mdbp,
+ subdb, dbp->type, &meta_pgno, MU_REMOVE, NULL, 0)) != 0)
+ goto err;
+
+err: /*
+ * End the transaction, committing the transaction if we were
+ * successful, aborting otherwise.
+ */
+ if (dbp->open_txn != NULL && (t_ret = __db_metaend(dbp,
+ &remove_lock, ret == 0, NULL, NULL)) != 0 && ret == 0)
+ ret = t_ret;
+
+err_close:
+ /*
+ * Close the user's DB handle -- do this LAST to avoid smashing the
+ * the transaction information.
+ */
+ if ((t_ret = dbp->close(dbp, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (mdbp != NULL && (t_ret = mdbp->close(mdbp, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __db_rename
+ * Rename method for DB.
+ *
+ * PUBLIC: int __db_rename __P((DB *,
+ * PUBLIC: const char *, const char *, const char *, u_int32_t));
+ */
+int
+__db_rename(dbp, filename, subdb, newname, flags)
+ DB *dbp;
+ const char *filename, *subdb, *newname;
+ u_int32_t flags;
+{
+ DBT namedbt, newnamedbt;
+ DB_ENV *dbenv;
+ DB_LOCK remove_lock;
+ DB_LSN newlsn;
+ char *real_name, *real_newname;
+ int ret, t_ret;
+
+ dbenv = dbp->dbenv;
+ ret = 0;
+ real_name = real_newname = NULL;
+
+ PANIC_CHECK(dbenv);
+ /*
+ * Cannot use DB_ILLEGAL_AFTER_OPEN here because that returns
+ * and we cannot return, but must deal with the error and destroy
+ * the handle anyway.
+ */
+ if (F_ISSET(dbp, DB_OPEN_CALLED)) {
+ ret = __db_mi_open(dbp->dbenv, "rename", 1);
+ goto err_close;
+ }
+
+ /* Validate arguments -- has same rules as remove. */
+ if ((ret = __db_removechk(dbp, flags)) != 0)
+ goto err_close;
+
+ /*
+ * Subdatabases.
+ */
+ if (subdb != NULL) {
+ if (filename == NULL) {
+ __db_err(dbenv,
+ "multiple databases cannot be created in temporary files");
+ goto err_close;
+ }
+ return (__db_subdb_rename(dbp, filename, subdb, newname));
+ }
+
+ if ((ret = dbp->open(dbp,
+ filename, NULL, DB_UNKNOWN, DB_RDWRMASTER, 0)) != 0)
+ goto err_close;
+
+ if (LOGGING_ON(dbenv) && (ret = __log_file_lock(dbp)) != 0)
+ goto err_close;
+
+ if ((ret = dbp->sync(dbp, 0)) != 0)
+ goto err_close;
+
+ /* Start the transaction and log the rename. */
+ if (TXN_ON(dbenv) && (ret = __db_metabegin(dbp, &remove_lock)) != 0)
+ goto err_close;
+
+ if (LOGGING_ON(dbenv)) {
+ memset(&namedbt, 0, sizeof(namedbt));
+ namedbt.data = (char *)filename;
+ namedbt.size = strlen(filename) + 1;
+
+ memset(&newnamedbt, 0, sizeof(namedbt));
+ newnamedbt.data = (char *)newname;
+ newnamedbt.size = strlen(newname) + 1;
+
+ if ((ret = __crdel_rename_log(dbenv, dbp->open_txn,
+ &newlsn, 0, dbp->log_fileid, &namedbt, &newnamedbt)) != 0) {
+ __db_err(dbenv, "%s: %s", filename, db_strerror(ret));
+ goto err;
+ }
+
+ if ((ret = __log_filelist_update(dbenv, dbp,
+ dbp->log_fileid, newname, NULL)) != 0)
+ goto err;
+ }
+
+ /* Find the real name of the file. */
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, NULL, filename, 0, NULL, &real_name)) != 0)
+ goto err;
+
+ /* Find the real newname of the file. */
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, NULL, newname, 0, NULL, &real_newname)) != 0)
+ goto err;
+
+ /*
+ * It is an error to rename a file over one that already exists,
+ * as that wouldn't be transaction-safe.
+ */
+ if (__os_exists(real_newname, NULL) == 0) {
+ ret = EEXIST;
+ __db_err(dbenv, "rename: file %s exists", real_newname);
+ goto err;
+ }
+
+ DB_TEST_RECOVERY(dbp, DB_TEST_PRERENAME, ret, filename);
+ if (dbp->db_am_rename != NULL &&
+ (ret = dbp->db_am_rename(dbp, filename, subdb, newname)) != 0)
+ goto err;
+ /*
+ * We have to flush the cache for a couple of reasons. First, the
+ * underlying MPOOLFILE maintains a "name" that unrelated processes
+ * can use to open the file in order to flush pages, and that name
+ * is about to be wrong. Second, on Windows the unique file ID is
+ * generated from the file's name, not other file information as is
+ * the case on UNIX, and so a subsequent open of the old file name
+ * could conceivably result in a matching "unique" file ID.
+ */
+ if ((ret = __memp_fremove(dbp->mpf)) != 0)
+ goto err;
+
+ /*
+ * On Windows, the underlying file must be closed to perform a rename.
+ * Nothing later in __db_rename requires that it be open, and the call
+ * to dbp->close closes it anyway, so we just close it early.
+ */
+ if ((ret = memp_fclose(dbp->mpf)) != 0)
+ goto err;
+ dbp->mpf = NULL;
+
+ ret = __os_rename(dbenv, real_name, real_newname);
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTRENAME, ret, newname);
+
+DB_TEST_RECOVERY_LABEL
+err: if (dbp->open_txn != NULL && (t_ret = __db_metaend(dbp,
+ &remove_lock, ret == 0, NULL, NULL)) != 0 && ret == 0)
+ ret = t_ret;
+
+err_close:
+ /* We no longer have an mpool, so syncing would be disastrous. */
+ dbp->close(dbp, DB_NOSYNC);
+ if (real_name != NULL)
+ __os_freestr(real_name);
+ if (real_newname != NULL)
+ __os_freestr(real_newname);
+
+ return (ret);
+}
+
+/*
+ * __db_subdb_rename --
+ * Rename a subdatabase.
+ */
+static int
+__db_subdb_rename(dbp, name, subdb, newname)
+ DB *dbp;
+ const char *name, *subdb, *newname;
+{
+ DB *mdbp;
+ DBC *dbc;
+ DB_ENV *dbenv;
+ DB_LOCK remove_lock;
+ int ret, t_ret;
+
+ mdbp = NULL;
+ dbc = NULL;
+ dbenv = dbp->dbenv;
+
+ /* Start the transaction. */
+ if (TXN_ON(dbenv) && (ret = __db_metabegin(dbp, &remove_lock)) != 0)
+ goto err_close;
+
+ /*
+ * Open the subdatabase. We can use the user's DB handle for this
+ * purpose, I think.
+ */
+ if ((ret = __db_open(dbp, name, subdb, DB_UNKNOWN, 0, 0)) != 0)
+ goto err;
+
+ /*
+ * Rename the entry in the main database.
+ */
+ if ((ret = __db_master_open(dbp, name, 0, 0, &mdbp)) != 0)
+ goto err;
+
+ if ((ret = __db_master_update(mdbp,
+ subdb, dbp->type, NULL, MU_RENAME, newname, 0)) != 0)
+ goto err;
+
+err: /*
+ * End the transaction, committing the transaction if we were
+ * successful, aborting otherwise.
+ */
+ if (dbp->open_txn != NULL && (t_ret = __db_metaend(dbp,
+ &remove_lock, ret == 0, NULL, NULL)) != 0 && ret == 0)
+ ret = t_ret;
+
+err_close:
+ /*
+ * Close the user's DB handle -- do this LAST to avoid smashing the
+ * the transaction information.
+ */
+ if ((t_ret = dbp->close(dbp, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (mdbp != NULL && (t_ret = mdbp->close(mdbp, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __db_metabegin --
+ *
+ * Begin a meta-data operation. This involves doing any required locking,
+ * potentially beginning a transaction and then telling the caller if you
+ * did or did not begin the transaction.
+ *
+ * The writing flag indicates if the caller is actually allowing creates
+ * or doing deletes (i.e., if the caller is opening and not creating, then
+ * we don't need to do any of this).
+ * PUBLIC: int __db_metabegin __P((DB *, DB_LOCK *));
+ */
+int
+__db_metabegin(dbp, lockp)
+ DB *dbp;
+ DB_LOCK *lockp;
+{
+ DB_ENV *dbenv;
+ DBT dbplock;
+ u_int32_t locker, lockval;
+ int ret;
+
+ dbenv = dbp->dbenv;
+
+ lockp->off = LOCK_INVALID;
+
+ /*
+ * There is no single place where we can know that we are or are not
+ * going to be creating any files and/or subdatabases, so we will
+ * always begin a tranasaction when we start creating one. If we later
+ * discover that this was unnecessary, we will abort the transaction.
+ * Recovery is written so that if we log a file create, but then
+ * discover that we didn't have to do it, we recover correctly. The
+ * file recovery design document has details.
+ *
+ * We need to single thread all create and delete operations, so if we
+ * are running with locking, we must obtain a lock. We use lock_id to
+ * generate a unique locker id and use a handcrafted DBT as the object
+ * on which we are locking.
+ */
+ if (LOCKING_ON(dbenv)) {
+ if ((ret = lock_id(dbenv, &locker)) != 0)
+ return (ret);
+ lockval = 0;
+ dbplock.data = &lockval;
+ dbplock.size = sizeof(lockval);
+ if ((ret = lock_get(dbenv,
+ locker, 0, &dbplock, DB_LOCK_WRITE, lockp)) != 0)
+ return (ret);
+ }
+
+ return (txn_begin(dbenv, NULL, &dbp->open_txn, 0));
+}
+
+/*
+ * __db_metaend --
+ * End a meta-data operation.
+ * PUBLIC: int __db_metaend __P((DB *,
+ * PUBLIC: DB_LOCK *, int, int (*)(DB *, void *), void *));
+ */
+int
+__db_metaend(dbp, lockp, commit, callback, cookie)
+ DB *dbp;
+ DB_LOCK *lockp;
+ int commit, (*callback) __P((DB *, void *));
+ void *cookie;
+{
+ DB_ENV *dbenv;
+ int ret, t_ret;
+
+ ret = 0;
+ dbenv = dbp->dbenv;
+
+ /* End the transaction. */
+ if (commit) {
+ if ((ret = txn_commit(dbp->open_txn, DB_TXN_SYNC)) == 0) {
+ /*
+ * Unlink any underlying file, we've committed the
+ * transaction.
+ */
+ if (callback != NULL)
+ ret = callback(dbp, cookie);
+ }
+ } else if ((t_ret = txn_abort(dbp->open_txn)) && ret == 0)
+ ret = t_ret;
+
+ /* Release our lock. */
+ if (lockp->off != LOCK_INVALID &&
+ (t_ret = lock_put(dbenv, lockp)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __db_log_page
+ * Log a meta-data or root page during a create operation.
+ *
+ * PUBLIC: int __db_log_page __P((DB *,
+ * PUBLIC: const char *, DB_LSN *, db_pgno_t, PAGE *));
+ */
+int
+__db_log_page(dbp, name, lsn, pgno, page)
+ DB *dbp;
+ const char *name;
+ DB_LSN *lsn;
+ db_pgno_t pgno;
+ PAGE *page;
+{
+ DBT name_dbt, page_dbt;
+ DB_LSN new_lsn;
+ int ret;
+
+ if (dbp->open_txn == NULL)
+ return (0);
+
+ memset(&page_dbt, 0, sizeof(page_dbt));
+ page_dbt.size = dbp->pgsize;
+ page_dbt.data = page;
+ if (pgno == PGNO_BASE_MD) {
+ /*
+ * !!!
+ * Make sure that we properly handle a null name. The old
+ * Tcl sent us pathnames of the form ""; it may be the case
+ * that the new Tcl doesn't do that, so we can get rid of
+ * the second check here.
+ */
+ memset(&name_dbt, 0, sizeof(name_dbt));
+ name_dbt.data = (char *)name;
+ if (name == NULL || *name == '\0')
+ name_dbt.size = 0;
+ else
+ name_dbt.size = strlen(name) + 1;
+
+ ret = __crdel_metapage_log(dbp->dbenv,
+ dbp->open_txn, &new_lsn, DB_FLUSH,
+ dbp->log_fileid, &name_dbt, pgno, &page_dbt);
+ } else
+ ret = __crdel_metasub_log(dbp->dbenv, dbp->open_txn,
+ &new_lsn, 0, dbp->log_fileid, pgno, &page_dbt, lsn);
+
+ if (ret == 0)
+ page->lsn = new_lsn;
+ return (ret);
+}
+
+/*
+ * __db_backup_name
+ * Create the backup file name for a given file.
+ *
+ * PUBLIC: int __db_backup_name __P((DB_ENV *,
+ * PUBLIC: const char *, char **, DB_LSN *));
+ */
+#undef BACKUP_PREFIX
+#define BACKUP_PREFIX "__db."
+
+#undef MAX_LSN_TO_TEXT
+#define MAX_LSN_TO_TEXT 21
+int
+__db_backup_name(dbenv, name, backup, lsn)
+ DB_ENV *dbenv;
+ const char *name;
+ char **backup;
+ DB_LSN *lsn;
+{
+ size_t len;
+ int plen, ret;
+ char *p, *retp;
+
+ len = strlen(name) + strlen(BACKUP_PREFIX) + MAX_LSN_TO_TEXT + 1;
+
+ if ((ret = __os_malloc(dbenv, len, NULL, &retp)) != 0)
+ return (ret);
+
+ /*
+ * Create the name. Backup file names are of the form:
+ *
+ * __db.name.0x[lsn-file].0x[lsn-offset]
+ *
+ * which guarantees uniqueness.
+ *
+ * However, name may contain an env-relative path in it.
+ * In that case, put the __db. after the last portion of
+ * the pathname.
+ */
+ if ((p = __db_rpath(name)) == NULL)
+ snprintf(retp, len,
+ "%s%s.0x%x0x%x", BACKUP_PREFIX, name,
+ lsn->file, lsn->offset);
+ else {
+ plen = p - name + 1;
+ p++;
+ snprintf(retp, len,
+ "%.*s%s%s.0x%x0x%x", plen, name, BACKUP_PREFIX, p,
+ lsn->file, lsn->offset);
+ }
+
+ *backup = retp;
+ return (0);
+}
+
+/*
+ * __db_remove_callback --
+ * Callback function -- on file remove commit, it unlinks the backing
+ * file.
+ */
+static int
+__db_remove_callback(dbp, cookie)
+ DB *dbp;
+ void *cookie;
+{
+ return (__os_unlink(dbp->dbenv, cookie));
+}
+
+/*
+ * __dblist_get --
+ * Get the first element of dbenv->dblist with
+ * dbp->adj_fileid matching adjid.
+ *
+ * PUBLIC: DB *__dblist_get __P((DB_ENV *, u_int32_t));
+ */
+DB *
+__dblist_get(dbenv, adjid)
+ DB_ENV *dbenv;
+ u_int32_t adjid;
+{
+ DB *dbp;
+
+ for (dbp = LIST_FIRST(&dbenv->dblist);
+ dbp != NULL && dbp->adj_fileid != adjid;
+ dbp = LIST_NEXT(dbp, dblistlinks))
+ ;
+
+ return (dbp);
+}
+
+#if CONFIG_TEST
+/*
+ * __db_testcopy
+ * Create a copy of all backup files and our "main" DB.
+ *
+ * PUBLIC: int __db_testcopy __P((DB *, const char *));
+ */
+int
+__db_testcopy(dbp, name)
+ DB *dbp;
+ const char *name;
+{
+ if (dbp->type == DB_QUEUE)
+ return (__qam_testdocopy(dbp, name));
+ else
+ return (__db_testdocopy(dbp, name));
+}
+
+static int
+__qam_testdocopy(dbp, name)
+ DB *dbp;
+ const char *name;
+{
+ QUEUE_FILELIST *filelist, *fp;
+ char buf[256], *dir;
+ int ret;
+
+ filelist = NULL;
+ if ((ret = __db_testdocopy(dbp, name)) != 0)
+ return (ret);
+ if (dbp->mpf != NULL &&
+ (ret = __qam_gen_filelist(dbp, &filelist)) != 0)
+ return (ret);
+
+ if (filelist == NULL)
+ return (0);
+ dir = ((QUEUE *)dbp->q_internal)->dir;
+ for (fp = filelist; fp->mpf != NULL; fp++) {
+ snprintf(buf, sizeof(buf), QUEUE_EXTENT, dir, name, fp->id);
+ if ((ret = __db_testdocopy(dbp, buf)) != 0)
+ return (ret);
+ }
+
+ __os_free(filelist, 0);
+ return (0);
+}
+
+/*
+ * __db_testdocopy
+ * Create a copy of all backup files and our "main" DB.
+ *
+ */
+static int
+__db_testdocopy(dbp, name)
+ DB *dbp;
+ const char *name;
+{
+ size_t len;
+ int dircnt, i, ret;
+ char **namesp, *backup, *copy, *dir, *p, *real_name;
+ real_name = NULL;
+ /* Get the real backing file name. */
+ if ((ret = __db_appname(dbp->dbenv,
+ DB_APP_DATA, NULL, name, 0, NULL, &real_name)) != 0)
+ return (ret);
+
+ copy = backup = NULL;
+ namesp = NULL;
+
+ /*
+ * Maximum size of file, including adding a ".afterop".
+ */
+ len = strlen(real_name) + strlen(BACKUP_PREFIX) + MAX_LSN_TO_TEXT + 9;
+
+ if ((ret = __os_malloc(dbp->dbenv, len, NULL, &copy)) != 0)
+ goto out;
+
+ if ((ret = __os_malloc(dbp->dbenv, len, NULL, &backup)) != 0)
+ goto out;
+
+ /*
+ * First copy the file itself.
+ */
+ snprintf(copy, len, "%s.afterop", real_name);
+ __db_makecopy(real_name, copy);
+
+ if ((ret = __os_strdup(dbp->dbenv, real_name, &dir)) != 0)
+ goto out;
+ __os_freestr(real_name);
+ real_name = NULL;
+ /*
+ * Create the name. Backup file names are of the form:
+ *
+ * __db.name.0x[lsn-file].0x[lsn-offset]
+ *
+ * which guarantees uniqueness. We want to look for the
+ * backup name, followed by a '.0x' (so that if they have
+ * files named, say, 'a' and 'abc' we won't match 'abc' when
+ * looking for 'a'.
+ */
+ snprintf(backup, len, "%s%s.0x", BACKUP_PREFIX, name);
+
+ /*
+ * We need the directory path to do the __os_dirlist.
+ */
+ p = __db_rpath(dir);
+ if (p != NULL)
+ *p = '\0';
+ ret = __os_dirlist(dbp->dbenv, dir, &namesp, &dircnt);
+#if DIAGNOSTIC
+ /*
+ * XXX
+ * To get the memory guard code to work because it uses strlen and we
+ * just moved the end of the string somewhere sooner. This causes the
+ * guard code to fail because it looks at one byte past the end of the
+ * string.
+ */
+ *p = '/';
+#endif
+ __os_freestr(dir);
+ if (ret != 0)
+ goto out;
+ for (i = 0; i < dircnt; i++) {
+ /*
+ * Need to check if it is a backup file for this.
+ * No idea what namesp[i] may be or how long, so
+ * must use strncmp and not memcmp. We don't want
+ * to use strcmp either because we are only matching
+ * the first part of the real file's name. We don't
+ * know its LSN's.
+ */
+ if (strncmp(namesp[i], backup, strlen(backup)) == 0) {
+ if ((ret = __db_appname(dbp->dbenv, DB_APP_DATA,
+ NULL, namesp[i], 0, NULL, &real_name)) != 0)
+ goto out;
+
+ /*
+ * This should not happen. Check that old
+ * .afterop files aren't around.
+ * If so, just move on.
+ */
+ if (strstr(real_name, ".afterop") != NULL) {
+ __os_freestr(real_name);
+ real_name = NULL;
+ continue;
+ }
+ snprintf(copy, len, "%s.afterop", real_name);
+ __db_makecopy(real_name, copy);
+ __os_freestr(real_name);
+ real_name = NULL;
+ }
+ }
+out:
+ if (backup != NULL)
+ __os_freestr(backup);
+ if (copy != NULL)
+ __os_freestr(copy);
+ if (namesp != NULL)
+ __os_dirfree(namesp, dircnt);
+ if (real_name != NULL)
+ __os_freestr(real_name);
+ return (ret);
+}
+
+static void
+__db_makecopy(src, dest)
+ const char *src, *dest;
+{
+ DB_FH rfh, wfh;
+ size_t rcnt, wcnt;
+ char *buf;
+
+ memset(&rfh, 0, sizeof(rfh));
+ memset(&wfh, 0, sizeof(wfh));
+
+ if (__os_malloc(NULL, 1024, NULL, &buf) != 0)
+ return;
+
+ if (__os_open(NULL,
+ src, DB_OSO_RDONLY, __db_omode("rw----"), &rfh) != 0)
+ goto err;
+ if (__os_open(NULL, dest,
+ DB_OSO_CREATE | DB_OSO_TRUNC, __db_omode("rw----"), &wfh) != 0)
+ goto err;
+
+ for (;;)
+ if (__os_read(NULL, &rfh, buf, 1024, &rcnt) < 0 || rcnt == 0 ||
+ __os_write(NULL, &wfh, buf, rcnt, &wcnt) < 0 || wcnt != rcnt)
+ break;
+
+err: __os_free(buf, 1024);
+ if (F_ISSET(&rfh, DB_FH_VALID))
+ __os_closehandle(&rfh);
+ if (F_ISSET(&wfh, DB_FH_VALID))
+ __os_closehandle(&wfh);
+}
+#endif
diff --git a/bdb/db/db.src b/bdb/db/db.src
new file mode 100644
index 00000000000..b695e1360c5
--- /dev/null
+++ b/bdb/db/db.src
@@ -0,0 +1,178 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: db.src,v 11.8 2000/02/17 20:24:07 bostic Exp $
+ */
+
+PREFIX db
+
+INCLUDE #include "db_config.h"
+INCLUDE
+INCLUDE #ifndef NO_SYSTEM_INCLUDES
+INCLUDE #include <sys/types.h>
+INCLUDE
+INCLUDE #include <ctype.h>
+INCLUDE #include <errno.h>
+INCLUDE #include <string.h>
+INCLUDE #endif
+INCLUDE
+INCLUDE #include "db_int.h"
+INCLUDE #include "db_page.h"
+INCLUDE #include "db_dispatch.h"
+INCLUDE #include "db_am.h"
+INCLUDE #include "txn.h"
+INCLUDE
+
+/*
+ * addrem -- Add or remove an entry from a duplicate page.
+ *
+ * opcode: identifies if this is an add or delete.
+ * fileid: file identifier of the file being modified.
+ * pgno: duplicate page number.
+ * indx: location at which to insert or delete.
+ * nbytes: number of bytes added/removed to/from the page.
+ * hdr: header for the data item.
+ * dbt: data that is deleted or is to be added.
+ * pagelsn: former lsn of the page.
+ *
+ * If the hdr was NULL then, the dbt is a regular B_KEYDATA.
+ * If the dbt was NULL then the hdr is a complete item to be
+ * pasted on the page.
+ */
+BEGIN addrem 41
+ARG opcode u_int32_t lu
+ARG fileid int32_t ld
+ARG pgno db_pgno_t lu
+ARG indx u_int32_t lu
+ARG nbytes size_t lu
+DBT hdr DBT s
+DBT dbt DBT s
+POINTER pagelsn DB_LSN * lu
+END
+
+/*
+ * split -- Handles the split of a duplicate page.
+ *
+ * opcode: defines whether we are splitting from or splitting onto
+ * fileid: file identifier of the file being modified.
+ * pgno: page number being split.
+ * pageimage: entire page contents.
+ * pagelsn: former lsn of the page.
+ */
+DEPRECATED split 42
+ARG opcode u_int32_t lu
+ARG fileid int32_t ld
+ARG pgno db_pgno_t lu
+DBT pageimage DBT s
+POINTER pagelsn DB_LSN * lu
+END
+
+/*
+ * big -- Handles addition and deletion of big key/data items.
+ *
+ * opcode: identifies get/put.
+ * fileid: file identifier of the file being modified.
+ * pgno: page onto which data is being added/removed.
+ * prev_pgno: the page before the one we are logging.
+ * next_pgno: the page after the one we are logging.
+ * dbt: data being written onto the page.
+ * pagelsn: former lsn of the orig_page.
+ * prevlsn: former lsn of the prev_pgno.
+ * nextlsn: former lsn of the next_pgno. This is not currently used, but
+ * may be used later if we actually do overwrites of big key/
+ * data items in place.
+ */
+BEGIN big 43
+ARG opcode u_int32_t lu
+ARG fileid int32_t ld
+ARG pgno db_pgno_t lu
+ARG prev_pgno db_pgno_t lu
+ARG next_pgno db_pgno_t lu
+DBT dbt DBT s
+POINTER pagelsn DB_LSN * lu
+POINTER prevlsn DB_LSN * lu
+POINTER nextlsn DB_LSN * lu
+END
+
+/*
+ * ovref -- Handles increment/decrement of overflow page reference count.
+ *
+ * fileid: identifies the file being modified.
+ * pgno: page number whose ref count is being incremented/decremented.
+ * adjust: the adjustment being made.
+ * lsn: the page's original lsn.
+ */
+BEGIN ovref 44
+ARG fileid int32_t ld
+ARG pgno db_pgno_t lu
+ARG adjust int32_t ld
+POINTER lsn DB_LSN * lu
+END
+
+/*
+ * relink -- Handles relinking around a page.
+ *
+ * opcode: indicates if this is an addpage or delete page
+ * pgno: the page being changed.
+ * lsn the page's original lsn.
+ * prev: the previous page.
+ * lsn_prev: the previous page's original lsn.
+ * next: the next page.
+ * lsn_next: the previous page's original lsn.
+ */
+BEGIN relink 45
+ARG opcode u_int32_t lu
+ARG fileid int32_t ld
+ARG pgno db_pgno_t lu
+POINTER lsn DB_LSN * lu
+ARG prev db_pgno_t lu
+POINTER lsn_prev DB_LSN * lu
+ARG next db_pgno_t lu
+POINTER lsn_next DB_LSN * lu
+END
+
+/*
+ * Addpage -- Handles adding a new duplicate page onto the end of
+ * an existing duplicate page.
+ * fileid: identifies the file being changed.
+ * pgno: page number to which a new page is being added.
+ * lsn: lsn of pgno
+ * nextpgno: new page number being added.
+ * nextlsn: lsn of nextpgno;
+ */
+DEPRECATED addpage 46
+ARG fileid int32_t ld
+ARG pgno db_pgno_t lu
+POINTER lsn DB_LSN * lu
+ARG nextpgno db_pgno_t lu
+POINTER nextlsn DB_LSN * lu
+END
+
+/*
+ * Debug -- log an operation upon entering an access method.
+ * op: Operation (cursor, c_close, c_get, c_put, c_del,
+ * get, put, delete).
+ * fileid: identifies the file being acted upon.
+ * key: key paramater
+ * data: data parameter
+ * flags: flags parameter
+ */
+BEGIN debug 47
+DBT op DBT s
+ARG fileid int32_t ld
+DBT key DBT s
+DBT data DBT s
+ARG arg_flags u_int32_t lu
+END
+
+/*
+ * noop -- do nothing, but get an LSN.
+ */
+BEGIN noop 48
+ARG fileid int32_t ld
+ARG pgno db_pgno_t lu
+POINTER prevlsn DB_LSN * lu
+END
diff --git a/bdb/db/db_am.c b/bdb/db/db_am.c
new file mode 100644
index 00000000000..2d224566904
--- /dev/null
+++ b/bdb/db/db_am.c
@@ -0,0 +1,511 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_am.c,v 11.42 2001/01/11 18:19:50 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_shash.h"
+#include "btree.h"
+#include "hash.h"
+#include "qam.h"
+#include "lock.h"
+#include "mp.h"
+#include "txn.h"
+#include "db_am.h"
+#include "db_ext.h"
+
+/*
+ * __db_cursor --
+ * Allocate and return a cursor.
+ *
+ * PUBLIC: int __db_cursor __P((DB *, DB_TXN *, DBC **, u_int32_t));
+ */
+int
+__db_cursor(dbp, txn, dbcp, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ DBC **dbcp;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DBC *dbc;
+ db_lockmode_t mode;
+ u_int32_t op;
+ int ret;
+
+ dbenv = dbp->dbenv;
+
+ PANIC_CHECK(dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->cursor");
+
+ /* Check for invalid flags. */
+ if ((ret = __db_cursorchk(dbp, flags, F_ISSET(dbp, DB_AM_RDONLY))) != 0)
+ return (ret);
+
+ if ((ret =
+ __db_icursor(dbp, txn, dbp->type, PGNO_INVALID, 0, dbcp)) != 0)
+ return (ret);
+ dbc = *dbcp;
+
+ /*
+ * If this is CDB, do all the locking in the interface, which is
+ * right here.
+ */
+ if (CDB_LOCKING(dbenv)) {
+ op = LF_ISSET(DB_OPFLAGS_MASK);
+ mode = (op == DB_WRITELOCK) ? DB_LOCK_WRITE :
+ ((op == DB_WRITECURSOR) ? DB_LOCK_IWRITE : DB_LOCK_READ);
+ if ((ret = lock_get(dbenv, dbc->locker, 0,
+ &dbc->lock_dbt, mode, &dbc->mylock)) != 0) {
+ (void)__db_c_close(dbc);
+ return (ret);
+ }
+ if (op == DB_WRITECURSOR)
+ F_SET(dbc, DBC_WRITECURSOR);
+ if (op == DB_WRITELOCK)
+ F_SET(dbc, DBC_WRITER);
+ }
+
+ return (0);
+}
+
+/*
+ * __db_icursor --
+ * Internal version of __db_cursor. If dbcp is
+ * non-NULL it is assumed to point to an area to
+ * initialize as a cursor.
+ *
+ * PUBLIC: int __db_icursor
+ * PUBLIC: __P((DB *, DB_TXN *, DBTYPE, db_pgno_t, int, DBC **));
+ */
+int
+__db_icursor(dbp, txn, dbtype, root, is_opd, dbcp)
+ DB *dbp;
+ DB_TXN *txn;
+ DBTYPE dbtype;
+ db_pgno_t root;
+ int is_opd;
+ DBC **dbcp;
+{
+ DBC *dbc, *adbc;
+ DBC_INTERNAL *cp;
+ DB_ENV *dbenv;
+ int allocated, ret;
+
+ dbenv = dbp->dbenv;
+ allocated = 0;
+
+ /*
+ * Take one from the free list if it's available. Take only the
+ * right type. With off page dups we may have different kinds
+ * of cursors on the queue for a single database.
+ */
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (dbc = TAILQ_FIRST(&dbp->free_queue);
+ dbc != NULL; dbc = TAILQ_NEXT(dbc, links))
+ if (dbtype == dbc->dbtype) {
+ TAILQ_REMOVE(&dbp->free_queue, dbc, links);
+ dbc->flags = 0;
+ break;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+
+ if (dbc == NULL) {
+ if ((ret = __os_calloc(dbp->dbenv, 1, sizeof(DBC), &dbc)) != 0)
+ return (ret);
+ allocated = 1;
+ dbc->flags = 0;
+
+ dbc->dbp = dbp;
+
+ /* Set up locking information. */
+ if (LOCKING_ON(dbenv)) {
+ /*
+ * If we are not threaded, then there is no need to
+ * create new locker ids. We know that no one else
+ * is running concurrently using this DB, so we can
+ * take a peek at any cursors on the active queue.
+ */
+ if (!DB_IS_THREADED(dbp) &&
+ (adbc = TAILQ_FIRST(&dbp->active_queue)) != NULL)
+ dbc->lid = adbc->lid;
+ else
+ if ((ret = lock_id(dbenv, &dbc->lid)) != 0)
+ goto err;
+
+ memcpy(dbc->lock.fileid, dbp->fileid, DB_FILE_ID_LEN);
+ if (CDB_LOCKING(dbenv)) {
+ if (F_ISSET(dbenv, DB_ENV_CDB_ALLDB)) {
+ /*
+ * If we are doing a single lock per
+ * environment, set up the global
+ * lock object just like we do to
+ * single thread creates.
+ */
+ DB_ASSERT(sizeof(db_pgno_t) ==
+ sizeof(u_int32_t));
+ dbc->lock_dbt.size = sizeof(u_int32_t);
+ dbc->lock_dbt.data = &dbc->lock.pgno;
+ dbc->lock.pgno = 0;
+ } else {
+ dbc->lock_dbt.size = DB_FILE_ID_LEN;
+ dbc->lock_dbt.data = dbc->lock.fileid;
+ }
+ } else {
+ dbc->lock.type = DB_PAGE_LOCK;
+ dbc->lock_dbt.size = sizeof(dbc->lock);
+ dbc->lock_dbt.data = &dbc->lock;
+ }
+ }
+ /* Init the DBC internal structure. */
+ switch (dbtype) {
+ case DB_BTREE:
+ case DB_RECNO:
+ if ((ret = __bam_c_init(dbc, dbtype)) != 0)
+ goto err;
+ break;
+ case DB_HASH:
+ if ((ret = __ham_c_init(dbc)) != 0)
+ goto err;
+ break;
+ case DB_QUEUE:
+ if ((ret = __qam_c_init(dbc)) != 0)
+ goto err;
+ break;
+ default:
+ ret = __db_unknown_type(dbp->dbenv,
+ "__db_icursor", dbtype);
+ goto err;
+ }
+
+ cp = dbc->internal;
+ }
+
+ /* Refresh the DBC structure. */
+ dbc->dbtype = dbtype;
+
+ if ((dbc->txn = txn) == NULL)
+ dbc->locker = dbc->lid;
+ else {
+ dbc->locker = txn->txnid;
+ txn->cursors++;
+ }
+
+ if (is_opd)
+ F_SET(dbc, DBC_OPD);
+ if (F_ISSET(dbp, DB_AM_RECOVER))
+ F_SET(dbc, DBC_RECOVER);
+
+ /* Refresh the DBC internal structure. */
+ cp = dbc->internal;
+ cp->opd = NULL;
+
+ cp->indx = 0;
+ cp->page = NULL;
+ cp->pgno = PGNO_INVALID;
+ cp->root = root;
+
+ switch (dbtype) {
+ case DB_BTREE:
+ case DB_RECNO:
+ if ((ret = __bam_c_refresh(dbc)) != 0)
+ goto err;
+ break;
+ case DB_HASH:
+ case DB_QUEUE:
+ break;
+ default:
+ ret = __db_unknown_type(dbp->dbenv, "__db_icursor", dbp->type);
+ goto err;
+ }
+
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ TAILQ_INSERT_TAIL(&dbp->active_queue, dbc, links);
+ F_SET(dbc, DBC_ACTIVE);
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+
+ *dbcp = dbc;
+ return (0);
+
+err: if (allocated)
+ __os_free(dbc, sizeof(*dbc));
+ return (ret);
+}
+
+#ifdef DEBUG
+/*
+ * __db_cprint --
+ * Display the current cursor list.
+ *
+ * PUBLIC: int __db_cprint __P((DB *));
+ */
+int
+__db_cprint(dbp)
+ DB *dbp;
+{
+ static const FN fn[] = {
+ { DBC_ACTIVE, "active" },
+ { DBC_OPD, "off-page-dup" },
+ { DBC_RECOVER, "recover" },
+ { DBC_RMW, "read-modify-write" },
+ { DBC_WRITECURSOR, "write cursor" },
+ { DBC_WRITEDUP, "internally dup'ed write cursor" },
+ { DBC_WRITER, "short-term write cursor" },
+ { 0, NULL }
+ };
+ DBC *dbc;
+ DBC_INTERNAL *cp;
+ char *s;
+
+ MUTEX_THREAD_LOCK(dbp->dbenv, dbp->mutexp);
+ for (dbc = TAILQ_FIRST(&dbp->active_queue);
+ dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
+ switch (dbc->dbtype) {
+ case DB_BTREE:
+ s = "btree";
+ break;
+ case DB_HASH:
+ s = "hash";
+ break;
+ case DB_RECNO:
+ s = "recno";
+ break;
+ case DB_QUEUE:
+ s = "queue";
+ break;
+ default:
+ DB_ASSERT(0);
+ return (1);
+ }
+ cp = dbc->internal;
+ fprintf(stderr, "%s/%#0lx: opd: %#0lx\n",
+ s, P_TO_ULONG(dbc), P_TO_ULONG(cp->opd));
+ fprintf(stderr, "\ttxn: %#0lx lid: %lu locker: %lu\n",
+ P_TO_ULONG(dbc->txn),
+ (u_long)dbc->lid, (u_long)dbc->locker);
+ fprintf(stderr, "\troot: %lu page/index: %lu/%lu",
+ (u_long)cp->root, (u_long)cp->pgno, (u_long)cp->indx);
+ __db_prflags(dbc->flags, fn, stderr);
+ fprintf(stderr, "\n");
+
+ if (dbp->type == DB_BTREE)
+ __bam_cprint(dbc);
+ }
+ for (dbc = TAILQ_FIRST(&dbp->free_queue);
+ dbc != NULL; dbc = TAILQ_NEXT(dbc, links))
+ fprintf(stderr, "free: %#0lx ", P_TO_ULONG(dbc));
+ fprintf(stderr, "\n");
+ MUTEX_THREAD_UNLOCK(dbp->dbenv, dbp->mutexp);
+
+ return (0);
+}
+#endif /* DEBUG */
+
+/*
+ * db_fd --
+ * Return a file descriptor for flock'ing.
+ *
+ * PUBLIC: int __db_fd __P((DB *, int *));
+ */
+int
+__db_fd(dbp, fdp)
+ DB *dbp;
+ int *fdp;
+{
+ DB_FH *fhp;
+ int ret;
+
+ PANIC_CHECK(dbp->dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->fd");
+
+ /*
+ * XXX
+ * Truly spectacular layering violation.
+ */
+ if ((ret = __mp_xxx_fh(dbp->mpf, &fhp)) != 0)
+ return (ret);
+
+ if (F_ISSET(fhp, DB_FH_VALID)) {
+ *fdp = fhp->fd;
+ return (0);
+ } else {
+ *fdp = -1;
+ __db_err(dbp->dbenv, "DB does not have a valid file handle.");
+ return (ENOENT);
+ }
+}
+
+/*
+ * __db_get --
+ * Return a key/data pair.
+ *
+ * PUBLIC: int __db_get __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+ */
+int
+__db_get(dbp, txn, key, data, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ DBT *key, *data;
+ u_int32_t flags;
+{
+ DBC *dbc;
+ int mode, ret, t_ret;
+
+ PANIC_CHECK(dbp->dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->get");
+
+ if ((ret = __db_getchk(dbp, key, data, flags)) != 0)
+ return (ret);
+
+ mode = 0;
+ if (flags == DB_CONSUME || flags == DB_CONSUME_WAIT)
+ mode = DB_WRITELOCK;
+ if ((ret = dbp->cursor(dbp, txn, &dbc, mode)) != 0)
+ return (ret);
+
+ DEBUG_LREAD(dbc, txn, "__db_get", key, NULL, flags);
+
+ /*
+ * The DBC_TRANSIENT flag indicates that we're just doing a
+ * single operation with this cursor, and that in case of
+ * error we don't need to restore it to its old position--we're
+ * going to close it right away. Thus, we can perform the get
+ * without duplicating the cursor, saving some cycles in this
+ * common case.
+ */
+ F_SET(dbc, DBC_TRANSIENT);
+
+ ret = dbc->c_get(dbc, key, data,
+ flags == 0 || flags == DB_RMW ? flags | DB_SET : flags);
+
+ if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __db_put --
+ * Store a key/data pair.
+ *
+ * PUBLIC: int __db_put __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+ */
+int
+__db_put(dbp, txn, key, data, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ DBT *key, *data;
+ u_int32_t flags;
+{
+ DBC *dbc;
+ DBT tdata;
+ int ret, t_ret;
+
+ PANIC_CHECK(dbp->dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->put");
+
+ if ((ret = __db_putchk(dbp, key, data,
+ flags, F_ISSET(dbp, DB_AM_RDONLY),
+ F_ISSET(dbp, DB_AM_DUP) || F_ISSET(key, DB_DBT_DUPOK))) != 0)
+ return (ret);
+
+ DB_CHECK_TXN(dbp, txn);
+
+ if ((ret = dbp->cursor(dbp, txn, &dbc, DB_WRITELOCK)) != 0)
+ return (ret);
+
+ /*
+ * See the comment in __db_get().
+ *
+ * Note that the c_get in the DB_NOOVERWRITE case is safe to
+ * do with this flag set; if it errors in any way other than
+ * DB_NOTFOUND, we're going to close the cursor without doing
+ * anything else, and if it returns DB_NOTFOUND then it's safe
+ * to do a c_put(DB_KEYLAST) even if an access method moved the
+ * cursor, since that's not position-dependent.
+ */
+ F_SET(dbc, DBC_TRANSIENT);
+
+ DEBUG_LWRITE(dbc, txn, "__db_put", key, data, flags);
+
+ if (flags == DB_NOOVERWRITE) {
+ flags = 0;
+ /*
+ * Set DB_DBT_USERMEM, this might be a threaded application and
+ * the flags checking will catch us. We don't want the actual
+ * data, so request a partial of length 0.
+ */
+ memset(&tdata, 0, sizeof(tdata));
+ F_SET(&tdata, DB_DBT_USERMEM | DB_DBT_PARTIAL);
+
+ /*
+ * If we're doing page-level locking, set the read-modify-write
+ * flag, we're going to overwrite immediately.
+ */
+ if ((ret = dbc->c_get(dbc, key, &tdata,
+ DB_SET | (STD_LOCKING(dbc) ? DB_RMW : 0))) == 0)
+ ret = DB_KEYEXIST;
+ else if (ret == DB_NOTFOUND)
+ ret = 0;
+ }
+ if (ret == 0)
+ ret = dbc->c_put(dbc,
+ key, data, flags == 0 ? DB_KEYLAST : flags);
+
+ if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __db_sync --
+ * Flush the database cache.
+ *
+ * PUBLIC: int __db_sync __P((DB *, u_int32_t));
+ */
+int
+__db_sync(dbp, flags)
+ DB *dbp;
+ u_int32_t flags;
+{
+ int ret, t_ret;
+
+ PANIC_CHECK(dbp->dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->sync");
+
+ if ((ret = __db_syncchk(dbp, flags)) != 0)
+ return (ret);
+
+ /* Read-only trees never need to be sync'd. */
+ if (F_ISSET(dbp, DB_AM_RDONLY))
+ return (0);
+
+ /* If it's a Recno tree, write the backing source text file. */
+ if (dbp->type == DB_RECNO)
+ ret = __ram_writeback(dbp);
+
+ /* If the tree was never backed by a database file, we're done. */
+ if (F_ISSET(dbp, DB_AM_INMEM))
+ return (0);
+
+ /* Flush any dirty pages from the cache to the backing file. */
+ if ((t_ret = memp_fsync(dbp->mpf)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
diff --git a/bdb/db/db_auto.c b/bdb/db/db_auto.c
new file mode 100644
index 00000000000..23540adc2e6
--- /dev/null
+++ b/bdb/db/db_auto.c
@@ -0,0 +1,1270 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <errno.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_dispatch.h"
+#include "db_am.h"
+#include "txn.h"
+
+int
+__db_addrem_log(dbenv, txnid, ret_lsnp, flags,
+ opcode, fileid, pgno, indx, nbytes, hdr,
+ dbt, pagelsn)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ u_int32_t opcode;
+ int32_t fileid;
+ db_pgno_t pgno;
+ u_int32_t indx;
+ size_t nbytes;
+ const DBT *hdr;
+ const DBT *dbt;
+ DB_LSN * pagelsn;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_db_addrem;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(opcode)
+ + sizeof(fileid)
+ + sizeof(pgno)
+ + sizeof(indx)
+ + sizeof(nbytes)
+ + sizeof(u_int32_t) + (hdr == NULL ? 0 : hdr->size)
+ + sizeof(u_int32_t) + (dbt == NULL ? 0 : dbt->size)
+ + sizeof(*pagelsn);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(bp, &opcode, sizeof(opcode));
+ bp += sizeof(opcode);
+ memcpy(bp, &fileid, sizeof(fileid));
+ bp += sizeof(fileid);
+ memcpy(bp, &pgno, sizeof(pgno));
+ bp += sizeof(pgno);
+ memcpy(bp, &indx, sizeof(indx));
+ bp += sizeof(indx);
+ memcpy(bp, &nbytes, sizeof(nbytes));
+ bp += sizeof(nbytes);
+ if (hdr == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &hdr->size, sizeof(hdr->size));
+ bp += sizeof(hdr->size);
+ memcpy(bp, hdr->data, hdr->size);
+ bp += hdr->size;
+ }
+ if (dbt == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &dbt->size, sizeof(dbt->size));
+ bp += sizeof(dbt->size);
+ memcpy(bp, dbt->data, dbt->size);
+ bp += dbt->size;
+ }
+ if (pagelsn != NULL)
+ memcpy(bp, pagelsn, sizeof(*pagelsn));
+ else
+ memset(bp, 0, sizeof(*pagelsn));
+ bp += sizeof(*pagelsn);
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__db_addrem_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __db_addrem_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __db_addrem_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]db_addrem: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\topcode: %lu\n", (u_long)argp->opcode);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ printf("\tindx: %lu\n", (u_long)argp->indx);
+ printf("\tnbytes: %lu\n", (u_long)argp->nbytes);
+ printf("\thdr: ");
+ for (i = 0; i < argp->hdr.size; i++) {
+ ch = ((u_int8_t *)argp->hdr.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\tdbt: ");
+ for (i = 0; i < argp->dbt.size; i++) {
+ ch = ((u_int8_t *)argp->dbt.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\tpagelsn: [%lu][%lu]\n",
+ (u_long)argp->pagelsn.file, (u_long)argp->pagelsn.offset);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__db_addrem_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __db_addrem_args **argpp;
+{
+ __db_addrem_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__db_addrem_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->opcode, bp, sizeof(argp->opcode));
+ bp += sizeof(argp->opcode);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->pgno, bp, sizeof(argp->pgno));
+ bp += sizeof(argp->pgno);
+ memcpy(&argp->indx, bp, sizeof(argp->indx));
+ bp += sizeof(argp->indx);
+ memcpy(&argp->nbytes, bp, sizeof(argp->nbytes));
+ bp += sizeof(argp->nbytes);
+ memset(&argp->hdr, 0, sizeof(argp->hdr));
+ memcpy(&argp->hdr.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->hdr.data = bp;
+ bp += argp->hdr.size;
+ memset(&argp->dbt, 0, sizeof(argp->dbt));
+ memcpy(&argp->dbt.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->dbt.data = bp;
+ bp += argp->dbt.size;
+ memcpy(&argp->pagelsn, bp, sizeof(argp->pagelsn));
+ bp += sizeof(argp->pagelsn);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__db_split_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __db_split_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __db_split_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]db_split: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\topcode: %lu\n", (u_long)argp->opcode);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ printf("\tpageimage: ");
+ for (i = 0; i < argp->pageimage.size; i++) {
+ ch = ((u_int8_t *)argp->pageimage.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\tpagelsn: [%lu][%lu]\n",
+ (u_long)argp->pagelsn.file, (u_long)argp->pagelsn.offset);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__db_split_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __db_split_args **argpp;
+{
+ __db_split_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__db_split_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->opcode, bp, sizeof(argp->opcode));
+ bp += sizeof(argp->opcode);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->pgno, bp, sizeof(argp->pgno));
+ bp += sizeof(argp->pgno);
+ memset(&argp->pageimage, 0, sizeof(argp->pageimage));
+ memcpy(&argp->pageimage.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->pageimage.data = bp;
+ bp += argp->pageimage.size;
+ memcpy(&argp->pagelsn, bp, sizeof(argp->pagelsn));
+ bp += sizeof(argp->pagelsn);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__db_big_log(dbenv, txnid, ret_lsnp, flags,
+ opcode, fileid, pgno, prev_pgno, next_pgno, dbt,
+ pagelsn, prevlsn, nextlsn)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ u_int32_t opcode;
+ int32_t fileid;
+ db_pgno_t pgno;
+ db_pgno_t prev_pgno;
+ db_pgno_t next_pgno;
+ const DBT *dbt;
+ DB_LSN * pagelsn;
+ DB_LSN * prevlsn;
+ DB_LSN * nextlsn;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_db_big;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(opcode)
+ + sizeof(fileid)
+ + sizeof(pgno)
+ + sizeof(prev_pgno)
+ + sizeof(next_pgno)
+ + sizeof(u_int32_t) + (dbt == NULL ? 0 : dbt->size)
+ + sizeof(*pagelsn)
+ + sizeof(*prevlsn)
+ + sizeof(*nextlsn);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(bp, &opcode, sizeof(opcode));
+ bp += sizeof(opcode);
+ memcpy(bp, &fileid, sizeof(fileid));
+ bp += sizeof(fileid);
+ memcpy(bp, &pgno, sizeof(pgno));
+ bp += sizeof(pgno);
+ memcpy(bp, &prev_pgno, sizeof(prev_pgno));
+ bp += sizeof(prev_pgno);
+ memcpy(bp, &next_pgno, sizeof(next_pgno));
+ bp += sizeof(next_pgno);
+ if (dbt == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &dbt->size, sizeof(dbt->size));
+ bp += sizeof(dbt->size);
+ memcpy(bp, dbt->data, dbt->size);
+ bp += dbt->size;
+ }
+ if (pagelsn != NULL)
+ memcpy(bp, pagelsn, sizeof(*pagelsn));
+ else
+ memset(bp, 0, sizeof(*pagelsn));
+ bp += sizeof(*pagelsn);
+ if (prevlsn != NULL)
+ memcpy(bp, prevlsn, sizeof(*prevlsn));
+ else
+ memset(bp, 0, sizeof(*prevlsn));
+ bp += sizeof(*prevlsn);
+ if (nextlsn != NULL)
+ memcpy(bp, nextlsn, sizeof(*nextlsn));
+ else
+ memset(bp, 0, sizeof(*nextlsn));
+ bp += sizeof(*nextlsn);
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__db_big_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __db_big_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __db_big_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]db_big: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\topcode: %lu\n", (u_long)argp->opcode);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ printf("\tprev_pgno: %lu\n", (u_long)argp->prev_pgno);
+ printf("\tnext_pgno: %lu\n", (u_long)argp->next_pgno);
+ printf("\tdbt: ");
+ for (i = 0; i < argp->dbt.size; i++) {
+ ch = ((u_int8_t *)argp->dbt.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\tpagelsn: [%lu][%lu]\n",
+ (u_long)argp->pagelsn.file, (u_long)argp->pagelsn.offset);
+ printf("\tprevlsn: [%lu][%lu]\n",
+ (u_long)argp->prevlsn.file, (u_long)argp->prevlsn.offset);
+ printf("\tnextlsn: [%lu][%lu]\n",
+ (u_long)argp->nextlsn.file, (u_long)argp->nextlsn.offset);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__db_big_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __db_big_args **argpp;
+{
+ __db_big_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__db_big_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->opcode, bp, sizeof(argp->opcode));
+ bp += sizeof(argp->opcode);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->pgno, bp, sizeof(argp->pgno));
+ bp += sizeof(argp->pgno);
+ memcpy(&argp->prev_pgno, bp, sizeof(argp->prev_pgno));
+ bp += sizeof(argp->prev_pgno);
+ memcpy(&argp->next_pgno, bp, sizeof(argp->next_pgno));
+ bp += sizeof(argp->next_pgno);
+ memset(&argp->dbt, 0, sizeof(argp->dbt));
+ memcpy(&argp->dbt.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->dbt.data = bp;
+ bp += argp->dbt.size;
+ memcpy(&argp->pagelsn, bp, sizeof(argp->pagelsn));
+ bp += sizeof(argp->pagelsn);
+ memcpy(&argp->prevlsn, bp, sizeof(argp->prevlsn));
+ bp += sizeof(argp->prevlsn);
+ memcpy(&argp->nextlsn, bp, sizeof(argp->nextlsn));
+ bp += sizeof(argp->nextlsn);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__db_ovref_log(dbenv, txnid, ret_lsnp, flags,
+ fileid, pgno, adjust, lsn)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ int32_t fileid;
+ db_pgno_t pgno;
+ int32_t adjust;
+ DB_LSN * lsn;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_db_ovref;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(fileid)
+ + sizeof(pgno)
+ + sizeof(adjust)
+ + sizeof(*lsn);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(bp, &fileid, sizeof(fileid));
+ bp += sizeof(fileid);
+ memcpy(bp, &pgno, sizeof(pgno));
+ bp += sizeof(pgno);
+ memcpy(bp, &adjust, sizeof(adjust));
+ bp += sizeof(adjust);
+ if (lsn != NULL)
+ memcpy(bp, lsn, sizeof(*lsn));
+ else
+ memset(bp, 0, sizeof(*lsn));
+ bp += sizeof(*lsn);
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__db_ovref_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __db_ovref_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __db_ovref_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]db_ovref: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ printf("\tadjust: %ld\n", (long)argp->adjust);
+ printf("\tlsn: [%lu][%lu]\n",
+ (u_long)argp->lsn.file, (u_long)argp->lsn.offset);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__db_ovref_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __db_ovref_args **argpp;
+{
+ __db_ovref_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__db_ovref_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->pgno, bp, sizeof(argp->pgno));
+ bp += sizeof(argp->pgno);
+ memcpy(&argp->adjust, bp, sizeof(argp->adjust));
+ bp += sizeof(argp->adjust);
+ memcpy(&argp->lsn, bp, sizeof(argp->lsn));
+ bp += sizeof(argp->lsn);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__db_relink_log(dbenv, txnid, ret_lsnp, flags,
+ opcode, fileid, pgno, lsn, prev, lsn_prev,
+ next, lsn_next)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ u_int32_t opcode;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DB_LSN * lsn;
+ db_pgno_t prev;
+ DB_LSN * lsn_prev;
+ db_pgno_t next;
+ DB_LSN * lsn_next;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_db_relink;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(opcode)
+ + sizeof(fileid)
+ + sizeof(pgno)
+ + sizeof(*lsn)
+ + sizeof(prev)
+ + sizeof(*lsn_prev)
+ + sizeof(next)
+ + sizeof(*lsn_next);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(bp, &opcode, sizeof(opcode));
+ bp += sizeof(opcode);
+ memcpy(bp, &fileid, sizeof(fileid));
+ bp += sizeof(fileid);
+ memcpy(bp, &pgno, sizeof(pgno));
+ bp += sizeof(pgno);
+ if (lsn != NULL)
+ memcpy(bp, lsn, sizeof(*lsn));
+ else
+ memset(bp, 0, sizeof(*lsn));
+ bp += sizeof(*lsn);
+ memcpy(bp, &prev, sizeof(prev));
+ bp += sizeof(prev);
+ if (lsn_prev != NULL)
+ memcpy(bp, lsn_prev, sizeof(*lsn_prev));
+ else
+ memset(bp, 0, sizeof(*lsn_prev));
+ bp += sizeof(*lsn_prev);
+ memcpy(bp, &next, sizeof(next));
+ bp += sizeof(next);
+ if (lsn_next != NULL)
+ memcpy(bp, lsn_next, sizeof(*lsn_next));
+ else
+ memset(bp, 0, sizeof(*lsn_next));
+ bp += sizeof(*lsn_next);
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__db_relink_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __db_relink_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __db_relink_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]db_relink: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\topcode: %lu\n", (u_long)argp->opcode);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ printf("\tlsn: [%lu][%lu]\n",
+ (u_long)argp->lsn.file, (u_long)argp->lsn.offset);
+ printf("\tprev: %lu\n", (u_long)argp->prev);
+ printf("\tlsn_prev: [%lu][%lu]\n",
+ (u_long)argp->lsn_prev.file, (u_long)argp->lsn_prev.offset);
+ printf("\tnext: %lu\n", (u_long)argp->next);
+ printf("\tlsn_next: [%lu][%lu]\n",
+ (u_long)argp->lsn_next.file, (u_long)argp->lsn_next.offset);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__db_relink_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __db_relink_args **argpp;
+{
+ __db_relink_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__db_relink_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->opcode, bp, sizeof(argp->opcode));
+ bp += sizeof(argp->opcode);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->pgno, bp, sizeof(argp->pgno));
+ bp += sizeof(argp->pgno);
+ memcpy(&argp->lsn, bp, sizeof(argp->lsn));
+ bp += sizeof(argp->lsn);
+ memcpy(&argp->prev, bp, sizeof(argp->prev));
+ bp += sizeof(argp->prev);
+ memcpy(&argp->lsn_prev, bp, sizeof(argp->lsn_prev));
+ bp += sizeof(argp->lsn_prev);
+ memcpy(&argp->next, bp, sizeof(argp->next));
+ bp += sizeof(argp->next);
+ memcpy(&argp->lsn_next, bp, sizeof(argp->lsn_next));
+ bp += sizeof(argp->lsn_next);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__db_addpage_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __db_addpage_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __db_addpage_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]db_addpage: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ printf("\tlsn: [%lu][%lu]\n",
+ (u_long)argp->lsn.file, (u_long)argp->lsn.offset);
+ printf("\tnextpgno: %lu\n", (u_long)argp->nextpgno);
+ printf("\tnextlsn: [%lu][%lu]\n",
+ (u_long)argp->nextlsn.file, (u_long)argp->nextlsn.offset);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__db_addpage_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __db_addpage_args **argpp;
+{
+ __db_addpage_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__db_addpage_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->pgno, bp, sizeof(argp->pgno));
+ bp += sizeof(argp->pgno);
+ memcpy(&argp->lsn, bp, sizeof(argp->lsn));
+ bp += sizeof(argp->lsn);
+ memcpy(&argp->nextpgno, bp, sizeof(argp->nextpgno));
+ bp += sizeof(argp->nextpgno);
+ memcpy(&argp->nextlsn, bp, sizeof(argp->nextlsn));
+ bp += sizeof(argp->nextlsn);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__db_debug_log(dbenv, txnid, ret_lsnp, flags,
+ op, fileid, key, data, arg_flags)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ const DBT *op;
+ int32_t fileid;
+ const DBT *key;
+ const DBT *data;
+ u_int32_t arg_flags;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_db_debug;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t) + (op == NULL ? 0 : op->size)
+ + sizeof(fileid)
+ + sizeof(u_int32_t) + (key == NULL ? 0 : key->size)
+ + sizeof(u_int32_t) + (data == NULL ? 0 : data->size)
+ + sizeof(arg_flags);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ if (op == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &op->size, sizeof(op->size));
+ bp += sizeof(op->size);
+ memcpy(bp, op->data, op->size);
+ bp += op->size;
+ }
+ memcpy(bp, &fileid, sizeof(fileid));
+ bp += sizeof(fileid);
+ if (key == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &key->size, sizeof(key->size));
+ bp += sizeof(key->size);
+ memcpy(bp, key->data, key->size);
+ bp += key->size;
+ }
+ if (data == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &data->size, sizeof(data->size));
+ bp += sizeof(data->size);
+ memcpy(bp, data->data, data->size);
+ bp += data->size;
+ }
+ memcpy(bp, &arg_flags, sizeof(arg_flags));
+ bp += sizeof(arg_flags);
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__db_debug_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __db_debug_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __db_debug_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]db_debug: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\top: ");
+ for (i = 0; i < argp->op.size; i++) {
+ ch = ((u_int8_t *)argp->op.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tkey: ");
+ for (i = 0; i < argp->key.size; i++) {
+ ch = ((u_int8_t *)argp->key.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\tdata: ");
+ for (i = 0; i < argp->data.size; i++) {
+ ch = ((u_int8_t *)argp->data.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\targ_flags: %lu\n", (u_long)argp->arg_flags);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__db_debug_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __db_debug_args **argpp;
+{
+ __db_debug_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__db_debug_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memset(&argp->op, 0, sizeof(argp->op));
+ memcpy(&argp->op.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->op.data = bp;
+ bp += argp->op.size;
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memset(&argp->key, 0, sizeof(argp->key));
+ memcpy(&argp->key.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->key.data = bp;
+ bp += argp->key.size;
+ memset(&argp->data, 0, sizeof(argp->data));
+ memcpy(&argp->data.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->data.data = bp;
+ bp += argp->data.size;
+ memcpy(&argp->arg_flags, bp, sizeof(argp->arg_flags));
+ bp += sizeof(argp->arg_flags);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__db_noop_log(dbenv, txnid, ret_lsnp, flags,
+ fileid, pgno, prevlsn)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DB_LSN * prevlsn;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_db_noop;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(fileid)
+ + sizeof(pgno)
+ + sizeof(*prevlsn);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(bp, &fileid, sizeof(fileid));
+ bp += sizeof(fileid);
+ memcpy(bp, &pgno, sizeof(pgno));
+ bp += sizeof(pgno);
+ if (prevlsn != NULL)
+ memcpy(bp, prevlsn, sizeof(*prevlsn));
+ else
+ memset(bp, 0, sizeof(*prevlsn));
+ bp += sizeof(*prevlsn);
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__db_noop_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __db_noop_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __db_noop_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]db_noop: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ printf("\tprevlsn: [%lu][%lu]\n",
+ (u_long)argp->prevlsn.file, (u_long)argp->prevlsn.offset);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__db_noop_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __db_noop_args **argpp;
+{
+ __db_noop_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__db_noop_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->pgno, bp, sizeof(argp->pgno));
+ bp += sizeof(argp->pgno);
+ memcpy(&argp->prevlsn, bp, sizeof(argp->prevlsn));
+ bp += sizeof(argp->prevlsn);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__db_init_print(dbenv)
+ DB_ENV *dbenv;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv,
+ __db_addrem_print, DB_db_addrem)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __db_split_print, DB_db_split)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __db_big_print, DB_db_big)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __db_ovref_print, DB_db_ovref)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __db_relink_print, DB_db_relink)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __db_addpage_print, DB_db_addpage)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __db_debug_print, DB_db_debug)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __db_noop_print, DB_db_noop)) != 0)
+ return (ret);
+ return (0);
+}
+
+int
+__db_init_recover(dbenv)
+ DB_ENV *dbenv;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv,
+ __db_addrem_recover, DB_db_addrem)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __deprecated_recover, DB_db_split)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __db_big_recover, DB_db_big)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __db_ovref_recover, DB_db_ovref)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __db_relink_recover, DB_db_relink)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __deprecated_recover, DB_db_addpage)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __db_debug_recover, DB_db_debug)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __db_noop_recover, DB_db_noop)) != 0)
+ return (ret);
+ return (0);
+}
+
diff --git a/bdb/db/db_cam.c b/bdb/db/db_cam.c
new file mode 100644
index 00000000000..708d4cbda4d
--- /dev/null
+++ b/bdb/db/db_cam.c
@@ -0,0 +1,974 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_cam.c,v 11.52 2001/01/18 15:11:16 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_shash.h"
+#include "lock.h"
+#include "btree.h"
+#include "hash.h"
+#include "qam.h"
+#include "txn.h"
+#include "db_ext.h"
+
+static int __db_c_cleanup __P((DBC *, DBC *, int));
+static int __db_c_idup __P((DBC *, DBC **, u_int32_t));
+static int __db_wrlock_err __P((DB_ENV *));
+
+#define CDB_LOCKING_INIT(dbp, dbc) \
+ /* \
+ * If we are running CDB, this had better be either a write \
+ * cursor or an immediate writer. If it's a regular writer, \
+ * that means we have an IWRITE lock and we need to upgrade \
+ * it to a write lock. \
+ */ \
+ if (CDB_LOCKING((dbp)->dbenv)) { \
+ if (!F_ISSET(dbc, DBC_WRITECURSOR | DBC_WRITER)) \
+ return (__db_wrlock_err(dbp->dbenv)); \
+ \
+ if (F_ISSET(dbc, DBC_WRITECURSOR) && \
+ (ret = lock_get((dbp)->dbenv, (dbc)->locker, \
+ DB_LOCK_UPGRADE, &(dbc)->lock_dbt, DB_LOCK_WRITE, \
+ &(dbc)->mylock)) != 0) \
+ return (ret); \
+ }
+#define CDB_LOCKING_DONE(dbp, dbc) \
+ /* Release the upgraded lock. */ \
+ if (F_ISSET(dbc, DBC_WRITECURSOR)) \
+ (void)__lock_downgrade( \
+ (dbp)->dbenv, &(dbc)->mylock, DB_LOCK_IWRITE, 0);
+/*
+ * Copy the lock info from one cursor to another, so that locking
+ * in CDB can be done in the context of an internally-duplicated
+ * or off-page-duplicate cursor.
+ */
+#define CDB_LOCKING_COPY(dbp, dbc_o, dbc_n) \
+ if (CDB_LOCKING((dbp)->dbenv) && \
+ F_ISSET((dbc_o), DBC_WRITECURSOR | DBC_WRITEDUP)) { \
+ memcpy(&(dbc_n)->mylock, &(dbc_o)->mylock, \
+ sizeof((dbc_o)->mylock)); \
+ (dbc_n)->locker = (dbc_o)->locker; \
+ /* This lock isn't ours to put--just discard it on close. */ \
+ F_SET((dbc_n), DBC_WRITEDUP); \
+ }
+
+/*
+ * __db_c_close --
+ * Close the cursor.
+ *
+ * PUBLIC: int __db_c_close __P((DBC *));
+ */
+int
+__db_c_close(dbc)
+ DBC *dbc;
+{
+ DB *dbp;
+ DBC *opd;
+ DBC_INTERNAL *cp;
+ int ret, t_ret;
+
+ dbp = dbc->dbp;
+ ret = 0;
+
+ PANIC_CHECK(dbp->dbenv);
+
+ /*
+ * If the cursor is already closed we have a serious problem, and we
+ * assume that the cursor isn't on the active queue. Don't do any of
+ * the remaining cursor close processing.
+ */
+ if (!F_ISSET(dbc, DBC_ACTIVE)) {
+ if (dbp != NULL)
+ __db_err(dbp->dbenv, "Closing closed cursor");
+
+ DB_ASSERT(0);
+ return (EINVAL);
+ }
+
+ cp = dbc->internal;
+ opd = cp->opd;
+
+ /*
+ * Remove the cursor(s) from the active queue. We may be closing two
+ * cursors at once here, a top-level one and a lower-level, off-page
+ * duplicate one. The acess-method specific cursor close routine must
+ * close both of them in a single call.
+ *
+ * !!!
+ * Cursors must be removed from the active queue before calling the
+ * access specific cursor close routine, btree depends on having that
+ * order of operations. It must also happen before any action that
+ * can fail and cause __db_c_close to return an error, or else calls
+ * here from __db_close may loop indefinitely.
+ */
+ MUTEX_THREAD_LOCK(dbp->dbenv, dbp->mutexp);
+
+ if (opd != NULL) {
+ F_CLR(opd, DBC_ACTIVE);
+ TAILQ_REMOVE(&dbp->active_queue, opd, links);
+ }
+ F_CLR(dbc, DBC_ACTIVE);
+ TAILQ_REMOVE(&dbp->active_queue, dbc, links);
+
+ MUTEX_THREAD_UNLOCK(dbp->dbenv, dbp->mutexp);
+
+ /* Call the access specific cursor close routine. */
+ if ((t_ret =
+ dbc->c_am_close(dbc, PGNO_INVALID, NULL)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /*
+ * Release the lock after calling the access method specific close
+ * routine, a Btree cursor may have had pending deletes.
+ */
+ if (CDB_LOCKING(dbc->dbp->dbenv)) {
+ /*
+ * If DBC_WRITEDUP is set, the cursor is an internally
+ * duplicated write cursor and the lock isn't ours to put.
+ */
+ if (!F_ISSET(dbc, DBC_WRITEDUP) &&
+ dbc->mylock.off != LOCK_INVALID) {
+ if ((t_ret = lock_put(dbc->dbp->dbenv,
+ &dbc->mylock)) != 0 && ret == 0)
+ ret = t_ret;
+ dbc->mylock.off = LOCK_INVALID;
+ }
+
+ /* For safety's sake, since this is going on the free queue. */
+ memset(&dbc->mylock, 0, sizeof(dbc->mylock));
+ F_CLR(dbc, DBC_WRITEDUP);
+ }
+
+ if (dbc->txn != NULL)
+ dbc->txn->cursors--;
+
+ /* Move the cursor(s) to the free queue. */
+ MUTEX_THREAD_LOCK(dbp->dbenv, dbp->mutexp);
+ if (opd != NULL) {
+ if (dbc->txn != NULL)
+ dbc->txn->cursors--;
+ TAILQ_INSERT_TAIL(&dbp->free_queue, opd, links);
+ opd = NULL;
+ }
+ TAILQ_INSERT_TAIL(&dbp->free_queue, dbc, links);
+ MUTEX_THREAD_UNLOCK(dbp->dbenv, dbp->mutexp);
+
+ return (ret);
+}
+
+/*
+ * __db_c_destroy --
+ * Destroy the cursor, called after DBC->c_close.
+ *
+ * PUBLIC: int __db_c_destroy __P((DBC *));
+ */
+int
+__db_c_destroy(dbc)
+ DBC *dbc;
+{
+ DB *dbp;
+ DBC_INTERNAL *cp;
+ int ret;
+
+ dbp = dbc->dbp;
+ cp = dbc->internal;
+
+ /* Remove the cursor from the free queue. */
+ MUTEX_THREAD_LOCK(dbp->dbenv, dbp->mutexp);
+ TAILQ_REMOVE(&dbp->free_queue, dbc, links);
+ MUTEX_THREAD_UNLOCK(dbp->dbenv, dbp->mutexp);
+
+ /* Free up allocated memory. */
+ if (dbc->rkey.data != NULL)
+ __os_free(dbc->rkey.data, dbc->rkey.ulen);
+ if (dbc->rdata.data != NULL)
+ __os_free(dbc->rdata.data, dbc->rdata.ulen);
+
+ /* Call the access specific cursor destroy routine. */
+ ret = dbc->c_am_destroy == NULL ? 0 : dbc->c_am_destroy(dbc);
+
+ __os_free(dbc, sizeof(*dbc));
+
+ return (ret);
+}
+
+/*
+ * __db_c_count --
+ * Return a count of duplicate data items.
+ *
+ * PUBLIC: int __db_c_count __P((DBC *, db_recno_t *, u_int32_t));
+ */
+int
+__db_c_count(dbc, recnop, flags)
+ DBC *dbc;
+ db_recno_t *recnop;
+ u_int32_t flags;
+{
+ DB *dbp;
+ int ret;
+
+ /*
+ * Cursor Cleanup Note:
+ * All of the cursors passed to the underlying access methods by this
+ * routine are not duplicated and will not be cleaned up on return.
+ * So, pages/locks that the cursor references must be resolved by the
+ * underlying functions.
+ */
+ dbp = dbc->dbp;
+
+ PANIC_CHECK(dbp->dbenv);
+
+ /* Check for invalid flags. */
+ if ((ret = __db_ccountchk(dbp, flags, IS_INITIALIZED(dbc))) != 0)
+ return (ret);
+
+ switch (dbc->dbtype) {
+ case DB_QUEUE:
+ case DB_RECNO:
+ *recnop = 1;
+ break;
+ case DB_HASH:
+ if (dbc->internal->opd == NULL) {
+ if ((ret = __ham_c_count(dbc, recnop)) != 0)
+ return (ret);
+ break;
+ }
+ /* FALLTHROUGH */
+ case DB_BTREE:
+ if ((ret = __bam_c_count(dbc, recnop)) != 0)
+ return (ret);
+ break;
+ default:
+ return (__db_unknown_type(dbp->dbenv,
+ "__db_c_count", dbp->type));
+ }
+ return (0);
+}
+
+/*
+ * __db_c_del --
+ * Delete using a cursor.
+ *
+ * PUBLIC: int __db_c_del __P((DBC *, u_int32_t));
+ */
+int
+__db_c_del(dbc, flags)
+ DBC *dbc;
+ u_int32_t flags;
+{
+ DB *dbp;
+ DBC *opd;
+ int ret;
+
+ /*
+ * Cursor Cleanup Note:
+ * All of the cursors passed to the underlying access methods by this
+ * routine are not duplicated and will not be cleaned up on return.
+ * So, pages/locks that the cursor references must be resolved by the
+ * underlying functions.
+ */
+ dbp = dbc->dbp;
+
+ PANIC_CHECK(dbp->dbenv);
+ DB_CHECK_TXN(dbp, dbc->txn);
+
+ /* Check for invalid flags. */
+ if ((ret = __db_cdelchk(dbp, flags,
+ F_ISSET(dbp, DB_AM_RDONLY), IS_INITIALIZED(dbc))) != 0)
+ return (ret);
+
+ DEBUG_LWRITE(dbc, dbc->txn, "db_c_del", NULL, NULL, flags);
+
+ CDB_LOCKING_INIT(dbp, dbc);
+
+ /*
+ * Off-page duplicate trees are locked in the primary tree, that is,
+ * we acquire a write lock in the primary tree and no locks in the
+ * off-page dup tree. If the del operation is done in an off-page
+ * duplicate tree, call the primary cursor's upgrade routine first.
+ */
+ opd = dbc->internal->opd;
+ if (opd == NULL)
+ ret = dbc->c_am_del(dbc);
+ else
+ if ((ret = dbc->c_am_writelock(dbc)) == 0)
+ ret = opd->c_am_del(opd);
+
+ CDB_LOCKING_DONE(dbp, dbc);
+
+ return (ret);
+}
+
+/*
+ * __db_c_dup --
+ * Duplicate a cursor
+ *
+ * PUBLIC: int __db_c_dup __P((DBC *, DBC **, u_int32_t));
+ */
+int
+__db_c_dup(dbc_orig, dbcp, flags)
+ DBC *dbc_orig;
+ DBC **dbcp;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DB *dbp;
+ DBC *dbc_n, *dbc_nopd;
+ int ret;
+
+ dbp = dbc_orig->dbp;
+ dbenv = dbp->dbenv;
+ dbc_n = dbc_nopd = NULL;
+
+ PANIC_CHECK(dbp->dbenv);
+
+ /*
+ * We can never have two write cursors open in CDB, so do not
+ * allow duplication of a write cursor.
+ */
+ if (flags != DB_POSITIONI &&
+ F_ISSET(dbc_orig, DBC_WRITER | DBC_WRITECURSOR)) {
+ __db_err(dbenv, "Cannot duplicate writeable cursor");
+ return (EINVAL);
+ }
+
+ /* Allocate a new cursor and initialize it. */
+ if ((ret = __db_c_idup(dbc_orig, &dbc_n, flags)) != 0)
+ goto err;
+ *dbcp = dbc_n;
+
+ /*
+ * If we're in CDB, and this isn't an internal duplication (in which
+ * case we're explicitly overriding CDB locking), the duplicated
+ * cursor needs its own read lock. (We know it's not a write cursor
+ * because we wouldn't have made it this far; you can't dup them.)
+ */
+ if (CDB_LOCKING(dbenv) && flags != DB_POSITIONI) {
+ DB_ASSERT(!F_ISSET(dbc_orig, DBC_WRITER | DBC_WRITECURSOR));
+
+ if ((ret = lock_get(dbenv, dbc_n->locker, 0,
+ &dbc_n->lock_dbt, DB_LOCK_READ, &dbc_n->mylock)) != 0) {
+ (void)__db_c_close(dbc_n);
+ return (ret);
+ }
+ }
+
+ /*
+ * If the cursor references an off-page duplicate tree, allocate a
+ * new cursor for that tree and initialize it.
+ */
+ if (dbc_orig->internal->opd != NULL) {
+ if ((ret =
+ __db_c_idup(dbc_orig->internal->opd, &dbc_nopd, flags)) != 0)
+ goto err;
+ dbc_n->internal->opd = dbc_nopd;
+ }
+
+ return (0);
+
+err: if (dbc_n != NULL)
+ (void)dbc_n->c_close(dbc_n);
+ if (dbc_nopd != NULL)
+ (void)dbc_nopd->c_close(dbc_nopd);
+
+ return (ret);
+}
+
+/*
+ * __db_c_idup --
+ * Internal version of __db_c_dup.
+ */
+static int
+__db_c_idup(dbc_orig, dbcp, flags)
+ DBC *dbc_orig, **dbcp;
+ u_int32_t flags;
+{
+ DB *dbp;
+ DBC *dbc_n;
+ DBC_INTERNAL *int_n, *int_orig;
+ int ret;
+
+ dbp = dbc_orig->dbp;
+ dbc_n = *dbcp;
+
+ if ((ret = __db_icursor(dbp, dbc_orig->txn, dbc_orig->dbtype,
+ dbc_orig->internal->root, F_ISSET(dbc_orig, DBC_OPD), &dbc_n)) != 0)
+ return (ret);
+
+ dbc_n->locker = dbc_orig->locker;
+
+ /* If the user wants the cursor positioned, do it here. */
+ if (flags == DB_POSITION || flags == DB_POSITIONI) {
+ int_n = dbc_n->internal;
+ int_orig = dbc_orig->internal;
+
+ dbc_n->flags = dbc_orig->flags;
+
+ int_n->indx = int_orig->indx;
+ int_n->pgno = int_orig->pgno;
+ int_n->root = int_orig->root;
+ int_n->lock_mode = int_orig->lock_mode;
+
+ switch (dbc_orig->dbtype) {
+ case DB_QUEUE:
+ if ((ret = __qam_c_dup(dbc_orig, dbc_n)) != 0)
+ goto err;
+ break;
+ case DB_BTREE:
+ case DB_RECNO:
+ if ((ret = __bam_c_dup(dbc_orig, dbc_n)) != 0)
+ goto err;
+ break;
+ case DB_HASH:
+ if ((ret = __ham_c_dup(dbc_orig, dbc_n)) != 0)
+ goto err;
+ break;
+ default:
+ ret = __db_unknown_type(dbp->dbenv,
+ "__db_c_idup", dbc_orig->dbtype);
+ goto err;
+ }
+ }
+
+ /* Now take care of duping the CDB information. */
+ CDB_LOCKING_COPY(dbp, dbc_orig, dbc_n);
+
+ *dbcp = dbc_n;
+ return (0);
+
+err: (void)dbc_n->c_close(dbc_n);
+ return (ret);
+}
+
+/*
+ * __db_c_newopd --
+ * Create a new off-page duplicate cursor.
+ *
+ * PUBLIC: int __db_c_newopd __P((DBC *, db_pgno_t, DBC **));
+ */
+int
+__db_c_newopd(dbc_parent, root, dbcp)
+ DBC *dbc_parent;
+ db_pgno_t root;
+ DBC **dbcp;
+{
+ DB *dbp;
+ DBC *opd;
+ DBTYPE dbtype;
+ int ret;
+
+ dbp = dbc_parent->dbp;
+ dbtype = (dbp->dup_compare == NULL) ? DB_RECNO : DB_BTREE;
+
+ if ((ret = __db_icursor(dbp,
+ dbc_parent->txn, dbtype, root, 1, &opd)) != 0)
+ return (ret);
+
+ CDB_LOCKING_COPY(dbp, dbc_parent, opd);
+
+ *dbcp = opd;
+
+ return (0);
+}
+
+/*
+ * __db_c_get --
+ * Get using a cursor.
+ *
+ * PUBLIC: int __db_c_get __P((DBC *, DBT *, DBT *, u_int32_t));
+ */
+int
+__db_c_get(dbc_arg, key, data, flags)
+ DBC *dbc_arg;
+ DBT *key, *data;
+ u_int32_t flags;
+{
+ DB *dbp;
+ DBC *dbc, *dbc_n, *opd;
+ DBC_INTERNAL *cp, *cp_n;
+ db_pgno_t pgno;
+ u_int32_t tmp_flags, tmp_rmw;
+ u_int8_t type;
+ int ret, t_ret;
+
+ /*
+ * Cursor Cleanup Note:
+ * All of the cursors passed to the underlying access methods by this
+ * routine are duplicated cursors. On return, any referenced pages
+ * will be discarded, and, if the cursor is not intended to be used
+ * again, the close function will be called. So, pages/locks that
+ * the cursor references do not need to be resolved by the underlying
+ * functions.
+ */
+ dbp = dbc_arg->dbp;
+ dbc_n = NULL;
+ opd = NULL;
+
+ PANIC_CHECK(dbp->dbenv);
+
+ /* Check for invalid flags. */
+ if ((ret =
+ __db_cgetchk(dbp, key, data, flags, IS_INITIALIZED(dbc_arg))) != 0)
+ return (ret);
+
+ /* Clear OR'd in additional bits so we can check for flag equality. */
+ tmp_rmw = LF_ISSET(DB_RMW);
+ LF_CLR(DB_RMW);
+
+ DEBUG_LREAD(dbc_arg, dbc_arg->txn, "db_c_get",
+ flags == DB_SET || flags == DB_SET_RANGE ? key : NULL, NULL, flags);
+
+ /*
+ * Return a cursor's record number. It has nothing to do with the
+ * cursor get code except that it was put into the interface.
+ */
+ if (flags == DB_GET_RECNO)
+ return (__bam_c_rget(dbc_arg, data, flags | tmp_rmw));
+
+ if (flags == DB_CONSUME || flags == DB_CONSUME_WAIT)
+ CDB_LOCKING_INIT(dbp, dbc_arg);
+
+ /*
+ * If we have an off-page duplicates cursor, and the operation applies
+ * to it, perform the operation. Duplicate the cursor and call the
+ * underlying function.
+ *
+ * Off-page duplicate trees are locked in the primary tree, that is,
+ * we acquire a write lock in the primary tree and no locks in the
+ * off-page dup tree. If the DB_RMW flag was specified and the get
+ * operation is done in an off-page duplicate tree, call the primary
+ * cursor's upgrade routine first.
+ */
+ cp = dbc_arg->internal;
+ if (cp->opd != NULL &&
+ (flags == DB_CURRENT || flags == DB_GET_BOTHC ||
+ flags == DB_NEXT || flags == DB_NEXT_DUP || flags == DB_PREV)) {
+ if (tmp_rmw && (ret = dbc_arg->c_am_writelock(dbc_arg)) != 0)
+ return (ret);
+ if ((ret = __db_c_idup(cp->opd, &opd, DB_POSITIONI)) != 0)
+ return (ret);
+
+ switch (ret = opd->c_am_get(
+ opd, key, data, flags, NULL)) {
+ case 0:
+ goto done;
+ case DB_NOTFOUND:
+ /*
+ * Translate DB_NOTFOUND failures for the DB_NEXT and
+ * DB_PREV operations into a subsequent operation on
+ * the parent cursor.
+ */
+ if (flags == DB_NEXT || flags == DB_PREV) {
+ if ((ret = opd->c_close(opd)) != 0)
+ goto err;
+ opd = NULL;
+ break;
+ }
+ goto err;
+ default:
+ goto err;
+ }
+ }
+
+ /*
+ * Perform an operation on the main cursor. Duplicate the cursor,
+ * upgrade the lock as required, and call the underlying function.
+ */
+ switch (flags) {
+ case DB_CURRENT:
+ case DB_GET_BOTHC:
+ case DB_NEXT:
+ case DB_NEXT_DUP:
+ case DB_NEXT_NODUP:
+ case DB_PREV:
+ case DB_PREV_NODUP:
+ tmp_flags = DB_POSITIONI;
+ break;
+ default:
+ tmp_flags = 0;
+ break;
+ }
+
+ /*
+ * If this cursor is going to be closed immediately, we don't
+ * need to take precautions to clean it up on error.
+ */
+ if (F_ISSET(dbc_arg, DBC_TRANSIENT))
+ dbc_n = dbc_arg;
+ else if ((ret = __db_c_idup(dbc_arg, &dbc_n, tmp_flags)) != 0)
+ goto err;
+
+ if (tmp_rmw)
+ F_SET(dbc_n, DBC_RMW);
+ pgno = PGNO_INVALID;
+ ret = dbc_n->c_am_get(dbc_n, key, data, flags, &pgno);
+ if (tmp_rmw)
+ F_CLR(dbc_n, DBC_RMW);
+ if (ret != 0)
+ goto err;
+
+ cp_n = dbc_n->internal;
+
+ /*
+ * We may be referencing a new off-page duplicates tree. Acquire
+ * a new cursor and call the underlying function.
+ */
+ if (pgno != PGNO_INVALID) {
+ if ((ret = __db_c_newopd(dbc_arg, pgno, &cp_n->opd)) != 0)
+ goto err;
+
+ switch (flags) {
+ case DB_FIRST:
+ case DB_NEXT:
+ case DB_NEXT_NODUP:
+ case DB_SET:
+ case DB_SET_RECNO:
+ case DB_SET_RANGE:
+ tmp_flags = DB_FIRST;
+ break;
+ case DB_LAST:
+ case DB_PREV:
+ case DB_PREV_NODUP:
+ tmp_flags = DB_LAST;
+ break;
+ case DB_GET_BOTH:
+ tmp_flags = DB_GET_BOTH;
+ break;
+ case DB_GET_BOTHC:
+ tmp_flags = DB_GET_BOTHC;
+ break;
+ default:
+ ret =
+ __db_unknown_flag(dbp->dbenv, "__db_c_get", flags);
+ goto err;
+ }
+ if ((ret = cp_n->opd->c_am_get(
+ cp_n->opd, key, data, tmp_flags, NULL)) != 0)
+ goto err;
+ }
+
+done: /*
+ * Return a key/data item. The only exception is that we don't return
+ * a key if the user already gave us one, that is, if the DB_SET flag
+ * was set. The DB_SET flag is necessary. In a Btree, the user's key
+ * doesn't have to be the same as the key stored the tree, depending on
+ * the magic performed by the comparison function. As we may not have
+ * done any key-oriented operation here, the page reference may not be
+ * valid. Fill it in as necessary. We don't have to worry about any
+ * locks, the cursor must already be holding appropriate locks.
+ *
+ * XXX
+ * If not a Btree and DB_SET_RANGE is set, we shouldn't return a key
+ * either, should we?
+ */
+ cp_n = dbc_n == NULL ? dbc_arg->internal : dbc_n->internal;
+ if (!F_ISSET(key, DB_DBT_ISSET)) {
+ if (cp_n->page == NULL && (ret =
+ memp_fget(dbp->mpf, &cp_n->pgno, 0, &cp_n->page)) != 0)
+ goto err;
+
+ if ((ret = __db_ret(dbp, cp_n->page, cp_n->indx,
+ key, &dbc_arg->rkey.data, &dbc_arg->rkey.ulen)) != 0)
+ goto err;
+ }
+ dbc = opd != NULL ? opd : cp_n->opd != NULL ? cp_n->opd : dbc_n;
+ if (!F_ISSET(data, DB_DBT_ISSET)) {
+ type = TYPE(dbc->internal->page);
+ ret = __db_ret(dbp, dbc->internal->page, dbc->internal->indx +
+ (type == P_LBTREE || type == P_HASH ? O_INDX : 0),
+ data, &dbc_arg->rdata.data, &dbc_arg->rdata.ulen);
+ }
+
+err: /* Don't pass DB_DBT_ISSET back to application level, error or no. */
+ F_CLR(key, DB_DBT_ISSET);
+ F_CLR(data, DB_DBT_ISSET);
+
+ /* Cleanup and cursor resolution. */
+ if (opd != NULL) {
+ if ((t_ret =
+ __db_c_cleanup(dbc_arg->internal->opd,
+ opd, ret)) != 0 && ret == 0)
+ ret = t_ret;
+
+ }
+
+ if ((t_ret = __db_c_cleanup(dbc_arg, dbc_n, ret)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (flags == DB_CONSUME || flags == DB_CONSUME_WAIT)
+ CDB_LOCKING_DONE(dbp, dbc_arg);
+ return (ret);
+}
+
+/*
+ * __db_c_put --
+ * Put using a cursor.
+ *
+ * PUBLIC: int __db_c_put __P((DBC *, DBT *, DBT *, u_int32_t));
+ */
+int
+__db_c_put(dbc_arg, key, data, flags)
+ DBC *dbc_arg;
+ DBT *key, *data;
+ u_int32_t flags;
+{
+ DB *dbp;
+ DBC *dbc_n, *opd;
+ db_pgno_t pgno;
+ u_int32_t tmp_flags;
+ int ret, t_ret;
+
+ /*
+ * Cursor Cleanup Note:
+ * All of the cursors passed to the underlying access methods by this
+ * routine are duplicated cursors. On return, any referenced pages
+ * will be discarded, and, if the cursor is not intended to be used
+ * again, the close function will be called. So, pages/locks that
+ * the cursor references do not need to be resolved by the underlying
+ * functions.
+ */
+ dbp = dbc_arg->dbp;
+ dbc_n = NULL;
+
+ PANIC_CHECK(dbp->dbenv);
+ DB_CHECK_TXN(dbp, dbc_arg->txn);
+
+ /* Check for invalid flags. */
+ if ((ret = __db_cputchk(dbp, key, data, flags,
+ F_ISSET(dbp, DB_AM_RDONLY), IS_INITIALIZED(dbc_arg))) != 0)
+ return (ret);
+
+ DEBUG_LWRITE(dbc_arg, dbc_arg->txn, "db_c_put",
+ flags == DB_KEYFIRST || flags == DB_KEYLAST ||
+ flags == DB_NODUPDATA ? key : NULL, data, flags);
+
+ CDB_LOCKING_INIT(dbp, dbc_arg);
+
+ /*
+ * If we have an off-page duplicates cursor, and the operation applies
+ * to it, perform the operation. Duplicate the cursor and call the
+ * underlying function.
+ *
+ * Off-page duplicate trees are locked in the primary tree, that is,
+ * we acquire a write lock in the primary tree and no locks in the
+ * off-page dup tree. If the put operation is done in an off-page
+ * duplicate tree, call the primary cursor's upgrade routine first.
+ */
+ if (dbc_arg->internal->opd != NULL &&
+ (flags == DB_AFTER || flags == DB_BEFORE || flags == DB_CURRENT)) {
+ /*
+ * A special case for hash off-page duplicates. Hash doesn't
+ * support (and is documented not to support) put operations
+ * relative to a cursor which references an already deleted
+ * item. For consistency, apply the same criteria to off-page
+ * duplicates as well.
+ */
+ if (dbc_arg->dbtype == DB_HASH && F_ISSET(
+ ((BTREE_CURSOR *)(dbc_arg->internal->opd->internal)),
+ C_DELETED)) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+
+ if ((ret = dbc_arg->c_am_writelock(dbc_arg)) != 0)
+ return (ret);
+ if ((ret = __db_c_dup(dbc_arg, &dbc_n, DB_POSITIONI)) != 0)
+ goto err;
+ opd = dbc_n->internal->opd;
+ if ((ret = opd->c_am_put(
+ opd, key, data, flags, NULL)) != 0)
+ goto err;
+ goto done;
+ }
+
+ /*
+ * Perform an operation on the main cursor. Duplicate the cursor,
+ * and call the underlying function.
+ *
+ * XXX: MARGO
+ *
+ tmp_flags = flags == DB_AFTER ||
+ flags == DB_BEFORE || flags == DB_CURRENT ? DB_POSITIONI : 0;
+ */
+ tmp_flags = DB_POSITIONI;
+
+ /*
+ * If this cursor is going to be closed immediately, we don't
+ * need to take precautions to clean it up on error.
+ */
+ if (F_ISSET(dbc_arg, DBC_TRANSIENT))
+ dbc_n = dbc_arg;
+ else if ((ret = __db_c_idup(dbc_arg, &dbc_n, tmp_flags)) != 0)
+ goto err;
+
+ pgno = PGNO_INVALID;
+ if ((ret = dbc_n->c_am_put(dbc_n, key, data, flags, &pgno)) != 0)
+ goto err;
+
+ /*
+ * We may be referencing a new off-page duplicates tree. Acquire
+ * a new cursor and call the underlying function.
+ */
+ if (pgno != PGNO_INVALID) {
+ if ((ret = __db_c_newopd(dbc_arg, pgno, &opd)) != 0)
+ goto err;
+ dbc_n->internal->opd = opd;
+
+ if ((ret = opd->c_am_put(
+ opd, key, data, flags, NULL)) != 0)
+ goto err;
+ }
+
+done:
+err: /* Cleanup and cursor resolution. */
+ if ((t_ret = __db_c_cleanup(dbc_arg, dbc_n, ret)) != 0 && ret == 0)
+ ret = t_ret;
+
+ CDB_LOCKING_DONE(dbp, dbc_arg);
+
+ return (ret);
+}
+
+/*
+ * __db_duperr()
+ * Error message: we don't currently support sorted duplicate duplicates.
+ * PUBLIC: int __db_duperr __P((DB *, u_int32_t));
+ */
+int
+__db_duperr(dbp, flags)
+ DB *dbp;
+ u_int32_t flags;
+{
+ if (flags != DB_NODUPDATA)
+ __db_err(dbp->dbenv,
+ "Duplicate data items are not supported with sorted data");
+ return (DB_KEYEXIST);
+}
+
+/*
+ * __db_c_cleanup --
+ * Clean up duplicate cursors.
+ */
+static int
+__db_c_cleanup(dbc, dbc_n, failed)
+ DBC *dbc, *dbc_n;
+ int failed;
+{
+ DB *dbp;
+ DBC *opd;
+ DBC_INTERNAL *internal;
+ int ret, t_ret;
+
+ dbp = dbc->dbp;
+ internal = dbc->internal;
+ ret = 0;
+
+ /* Discard any pages we're holding. */
+ if (internal->page != NULL) {
+ if ((t_ret =
+ memp_fput(dbp->mpf, internal->page, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ internal->page = NULL;
+ }
+ opd = internal->opd;
+ if (opd != NULL && opd->internal->page != NULL) {
+ if ((t_ret = memp_fput(dbp->mpf,
+ opd->internal->page, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ opd->internal->page = NULL;
+ }
+
+ /*
+ * If dbc_n is NULL, there's no internal cursor swapping to be
+ * done and no dbc_n to close--we probably did the entire
+ * operation on an offpage duplicate cursor. Just return.
+ */
+ if (dbc_n == NULL)
+ return (ret);
+
+ /*
+ * If dbc is marked DBC_TRANSIENT, we're inside a DB->{put/get}
+ * operation, and as an optimization we performed the operation on
+ * the main cursor rather than on a duplicated one. Assert
+ * that dbc_n == dbc (i.e., that we really did skip the
+ * duplication). Then just do nothing--even if there was
+ * an error, we're about to close the cursor, and the fact that we
+ * moved it isn't a user-visible violation of our "cursor
+ * stays put on error" rule.
+ */
+ if (F_ISSET(dbc, DBC_TRANSIENT)) {
+ DB_ASSERT(dbc == dbc_n);
+ return (ret);
+ }
+
+ if (dbc_n->internal->page != NULL) {
+ if ((t_ret = memp_fput(dbp->mpf,
+ dbc_n->internal->page, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ dbc_n->internal->page = NULL;
+ }
+ opd = dbc_n->internal->opd;
+ if (opd != NULL && opd->internal->page != NULL) {
+ if ((t_ret = memp_fput(dbp->mpf,
+ opd->internal->page, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ opd->internal->page = NULL;
+ }
+
+ /*
+ * If we didn't fail before entering this routine or just now when
+ * freeing pages, swap the interesting contents of the old and new
+ * cursors.
+ */
+ if (!failed && ret == 0) {
+ dbc->internal = dbc_n->internal;
+ dbc_n->internal = internal;
+ }
+
+ /*
+ * Close the cursor we don't care about anymore. The close can fail,
+ * but we only expect DB_LOCK_DEADLOCK failures. This violates our
+ * "the cursor is unchanged on error" semantics, but since all you can
+ * do with a DB_LOCK_DEADLOCK failure is close the cursor, I believe
+ * that's OK.
+ *
+ * XXX
+ * There's no way to recover from failure to close the old cursor.
+ * All we can do is move to the new position and return an error.
+ *
+ * XXX
+ * We might want to consider adding a flag to the cursor, so that any
+ * subsequent operations other than close just return an error?
+ */
+ if ((t_ret = dbc_n->c_close(dbc_n)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __db_wrlock_err -- do not have a write lock.
+ */
+static int
+__db_wrlock_err(dbenv)
+ DB_ENV *dbenv;
+{
+ __db_err(dbenv, "Write attempted on read-only cursor");
+ return (EPERM);
+}
diff --git a/bdb/db/db_conv.c b/bdb/db/db_conv.c
new file mode 100644
index 00000000000..df60be06790
--- /dev/null
+++ b/bdb/db/db_conv.c
@@ -0,0 +1,348 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995, 1996
+ * Keith Bostic. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_conv.c,v 11.11 2000/11/30 00:58:31 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_swap.h"
+#include "db_am.h"
+#include "btree.h"
+#include "hash.h"
+#include "qam.h"
+
+/*
+ * __db_pgin --
+ * Primary page-swap routine.
+ *
+ * PUBLIC: int __db_pgin __P((DB_ENV *, db_pgno_t, void *, DBT *));
+ */
+int
+__db_pgin(dbenv, pg, pp, cookie)
+ DB_ENV *dbenv;
+ db_pgno_t pg;
+ void *pp;
+ DBT *cookie;
+{
+ DB_PGINFO *pginfo;
+
+ pginfo = (DB_PGINFO *)cookie->data;
+
+ switch (((PAGE *)pp)->type) {
+ case P_HASH:
+ case P_HASHMETA:
+ case P_INVALID:
+ return (__ham_pgin(dbenv, pg, pp, cookie));
+ case P_BTREEMETA:
+ case P_IBTREE:
+ case P_IRECNO:
+ case P_LBTREE:
+ case P_LDUP:
+ case P_LRECNO:
+ case P_OVERFLOW:
+ return (__bam_pgin(dbenv, pg, pp, cookie));
+ case P_QAMMETA:
+ case P_QAMDATA:
+ return (__qam_pgin_out(dbenv, pg, pp, cookie));
+ default:
+ break;
+ }
+ return (__db_unknown_type(dbenv, "__db_pgin", ((PAGE *)pp)->type));
+}
+
+/*
+ * __db_pgout --
+ * Primary page-swap routine.
+ *
+ * PUBLIC: int __db_pgout __P((DB_ENV *, db_pgno_t, void *, DBT *));
+ */
+int
+__db_pgout(dbenv, pg, pp, cookie)
+ DB_ENV *dbenv;
+ db_pgno_t pg;
+ void *pp;
+ DBT *cookie;
+{
+ DB_PGINFO *pginfo;
+
+ pginfo = (DB_PGINFO *)cookie->data;
+
+ switch (((PAGE *)pp)->type) {
+ case P_HASH:
+ case P_HASHMETA:
+ case P_INVALID:
+ return (__ham_pgout(dbenv, pg, pp, cookie));
+ case P_BTREEMETA:
+ case P_IBTREE:
+ case P_IRECNO:
+ case P_LBTREE:
+ case P_LDUP:
+ case P_LRECNO:
+ case P_OVERFLOW:
+ return (__bam_pgout(dbenv, pg, pp, cookie));
+ case P_QAMMETA:
+ case P_QAMDATA:
+ return (__qam_pgin_out(dbenv, pg, pp, cookie));
+ default:
+ break;
+ }
+ return (__db_unknown_type(dbenv, "__db_pgout", ((PAGE *)pp)->type));
+}
+
+/*
+ * __db_metaswap --
+ * Byteswap the common part of the meta-data page.
+ *
+ * PUBLIC: void __db_metaswap __P((PAGE *));
+ */
+void
+__db_metaswap(pg)
+ PAGE *pg;
+{
+ u_int8_t *p;
+
+ p = (u_int8_t *)pg;
+
+ /* Swap the meta-data information. */
+ SWAP32(p); /* lsn.file */
+ SWAP32(p); /* lsn.offset */
+ SWAP32(p); /* pgno */
+ SWAP32(p); /* magic */
+ SWAP32(p); /* version */
+ SWAP32(p); /* pagesize */
+ p += 4; /* unused, page type, unused, unused */
+ SWAP32(p); /* free */
+ SWAP32(p); /* alloc_lsn part 1 */
+ SWAP32(p); /* alloc_lsn part 2 */
+ SWAP32(p); /* cached key count */
+ SWAP32(p); /* cached record count */
+ SWAP32(p); /* flags */
+}
+
+/*
+ * __db_byteswap --
+ * Byteswap a page.
+ *
+ * PUBLIC: int __db_byteswap __P((DB_ENV *, db_pgno_t, PAGE *, size_t, int));
+ */
+int
+__db_byteswap(dbenv, pg, h, pagesize, pgin)
+ DB_ENV *dbenv;
+ db_pgno_t pg;
+ PAGE *h;
+ size_t pagesize;
+ int pgin;
+{
+ BINTERNAL *bi;
+ BKEYDATA *bk;
+ BOVERFLOW *bo;
+ RINTERNAL *ri;
+ db_indx_t i, len, tmp;
+ u_int8_t *p, *end;
+
+ COMPQUIET(pg, 0);
+
+ if (pgin) {
+ M_32_SWAP(h->lsn.file);
+ M_32_SWAP(h->lsn.offset);
+ M_32_SWAP(h->pgno);
+ M_32_SWAP(h->prev_pgno);
+ M_32_SWAP(h->next_pgno);
+ M_16_SWAP(h->entries);
+ M_16_SWAP(h->hf_offset);
+ }
+
+ switch (h->type) {
+ case P_HASH:
+ for (i = 0; i < NUM_ENT(h); i++) {
+ if (pgin)
+ M_16_SWAP(h->inp[i]);
+
+ switch (HPAGE_TYPE(h, i)) {
+ case H_KEYDATA:
+ break;
+ case H_DUPLICATE:
+ len = LEN_HKEYDATA(h, pagesize, i);
+ p = HKEYDATA_DATA(P_ENTRY(h, i));
+ for (end = p + len; p < end;) {
+ if (pgin) {
+ P_16_SWAP(p);
+ memcpy(&tmp,
+ p, sizeof(db_indx_t));
+ p += sizeof(db_indx_t);
+ } else {
+ memcpy(&tmp,
+ p, sizeof(db_indx_t));
+ SWAP16(p);
+ }
+ p += tmp;
+ SWAP16(p);
+ }
+ break;
+ case H_OFFDUP:
+ p = HOFFPAGE_PGNO(P_ENTRY(h, i));
+ SWAP32(p); /* pgno */
+ break;
+ case H_OFFPAGE:
+ p = HOFFPAGE_PGNO(P_ENTRY(h, i));
+ SWAP32(p); /* pgno */
+ SWAP32(p); /* tlen */
+ break;
+ }
+
+ }
+
+ /*
+ * The offsets in the inp array are used to determine
+ * the size of entries on a page; therefore they
+ * cannot be converted until we've done all the
+ * entries.
+ */
+ if (!pgin)
+ for (i = 0; i < NUM_ENT(h); i++)
+ M_16_SWAP(h->inp[i]);
+ break;
+ case P_LBTREE:
+ case P_LDUP:
+ case P_LRECNO:
+ for (i = 0; i < NUM_ENT(h); i++) {
+ if (pgin)
+ M_16_SWAP(h->inp[i]);
+
+ /*
+ * In the case of on-page duplicates, key information
+ * should only be swapped once.
+ */
+ if (h->type == P_LBTREE && i > 1) {
+ if (pgin) {
+ if (h->inp[i] == h->inp[i - 2])
+ continue;
+ } else {
+ M_16_SWAP(h->inp[i]);
+ if (h->inp[i] == h->inp[i - 2])
+ continue;
+ M_16_SWAP(h->inp[i]);
+ }
+ }
+
+ bk = GET_BKEYDATA(h, i);
+ switch (B_TYPE(bk->type)) {
+ case B_KEYDATA:
+ M_16_SWAP(bk->len);
+ break;
+ case B_DUPLICATE:
+ case B_OVERFLOW:
+ bo = (BOVERFLOW *)bk;
+ M_32_SWAP(bo->pgno);
+ M_32_SWAP(bo->tlen);
+ break;
+ }
+
+ if (!pgin)
+ M_16_SWAP(h->inp[i]);
+ }
+ break;
+ case P_IBTREE:
+ for (i = 0; i < NUM_ENT(h); i++) {
+ if (pgin)
+ M_16_SWAP(h->inp[i]);
+
+ bi = GET_BINTERNAL(h, i);
+ M_16_SWAP(bi->len);
+ M_32_SWAP(bi->pgno);
+ M_32_SWAP(bi->nrecs);
+
+ switch (B_TYPE(bi->type)) {
+ case B_KEYDATA:
+ break;
+ case B_DUPLICATE:
+ case B_OVERFLOW:
+ bo = (BOVERFLOW *)bi->data;
+ M_32_SWAP(bo->pgno);
+ M_32_SWAP(bo->tlen);
+ break;
+ }
+
+ if (!pgin)
+ M_16_SWAP(h->inp[i]);
+ }
+ break;
+ case P_IRECNO:
+ for (i = 0; i < NUM_ENT(h); i++) {
+ if (pgin)
+ M_16_SWAP(h->inp[i]);
+
+ ri = GET_RINTERNAL(h, i);
+ M_32_SWAP(ri->pgno);
+ M_32_SWAP(ri->nrecs);
+
+ if (!pgin)
+ M_16_SWAP(h->inp[i]);
+ }
+ break;
+ case P_OVERFLOW:
+ case P_INVALID:
+ /* Nothing to do. */
+ break;
+ default:
+ return (__db_unknown_type(dbenv, "__db_byteswap", h->type));
+ }
+
+ if (!pgin) {
+ /* Swap the header information. */
+ M_32_SWAP(h->lsn.file);
+ M_32_SWAP(h->lsn.offset);
+ M_32_SWAP(h->pgno);
+ M_32_SWAP(h->prev_pgno);
+ M_32_SWAP(h->next_pgno);
+ M_16_SWAP(h->entries);
+ M_16_SWAP(h->hf_offset);
+ }
+ return (0);
+}
diff --git a/bdb/db/db_dispatch.c b/bdb/db/db_dispatch.c
new file mode 100644
index 00000000000..c9beac401a7
--- /dev/null
+++ b/bdb/db/db_dispatch.c
@@ -0,0 +1,983 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1995, 1996
+ * The President and Fellows of Harvard University. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_dispatch.c,v 11.41 2001/01/11 18:19:50 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_dispatch.h"
+#include "db_am.h"
+#include "log_auto.h"
+#include "txn.h"
+#include "txn_auto.h"
+#include "log.h"
+
+static int __db_txnlist_find_internal __P((void *, db_txnlist_type,
+ u_int32_t, u_int8_t [DB_FILE_ID_LEN], DB_TXNLIST **, int));
+
+/*
+ * __db_dispatch --
+ *
+ * This is the transaction dispatch function used by the db access methods.
+ * It is designed to handle the record format used by all the access
+ * methods (the one automatically generated by the db_{h,log,read}.sh
+ * scripts in the tools directory). An application using a different
+ * recovery paradigm will supply a different dispatch function to txn_open.
+ *
+ * PUBLIC: int __db_dispatch __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_dispatch(dbenv, db, lsnp, redo, info)
+ DB_ENV *dbenv; /* The environment. */
+ DBT *db; /* The log record upon which to dispatch. */
+ DB_LSN *lsnp; /* The lsn of the record being dispatched. */
+ db_recops redo; /* Redo this op (or undo it). */
+ void *info;
+{
+ u_int32_t rectype, txnid;
+ int make_call, ret;
+
+ memcpy(&rectype, db->data, sizeof(rectype));
+ memcpy(&txnid, (u_int8_t *)db->data + sizeof(rectype), sizeof(txnid));
+ make_call = ret = 0;
+
+ /*
+ * If we find a record that is in the user's number space and they
+ * have specified a recovery routine, let them handle it. If they
+ * didn't specify a recovery routine, then we expect that they've
+ * followed all our rules and registered new recovery functions.
+ */
+ switch (redo) {
+ case DB_TXN_ABORT:
+ /*
+ * XXX
+ * db_printlog depends on DB_TXN_ABORT not examining the TXN
+ * list. If that ever changes, fix db_printlog too.
+ */
+ make_call = 1;
+ break;
+ case DB_TXN_OPENFILES:
+ if (rectype == DB_log_register)
+ return (dbenv->dtab[rectype](dbenv,
+ db, lsnp, redo, info));
+ break;
+ case DB_TXN_BACKWARD_ROLL:
+ /*
+ * Running full recovery in the backward pass. If we've
+ * seen this txnid before and added to it our commit list,
+ * then we do nothing during this pass, unless this is a child
+ * commit record, in which case we need to process it. If
+ * we've never seen it, then we call the appropriate recovery
+ * routine.
+ *
+ * We need to always undo DB_db_noop records, so that we
+ * properly handle any aborts before the file was closed.
+ */
+ if (rectype == DB_log_register ||
+ rectype == DB_txn_ckp || rectype == DB_db_noop
+ || rectype == DB_txn_child || (txnid != 0 &&
+ (ret = __db_txnlist_find(info, txnid)) != 0)) {
+ make_call = 1;
+ if (ret == DB_NOTFOUND && rectype != DB_txn_regop &&
+ rectype != DB_txn_xa_regop && (ret =
+ __db_txnlist_add(dbenv, info, txnid, 1)) != 0)
+ return (ret);
+ }
+ break;
+ case DB_TXN_FORWARD_ROLL:
+ /*
+ * In the forward pass, if we haven't seen the transaction,
+ * do nothing, else recovery it.
+ *
+ * We need to always redo DB_db_noop records, so that we
+ * properly handle any commits after the file was closed.
+ */
+ if (rectype == DB_log_register ||
+ rectype == DB_txn_ckp ||
+ rectype == DB_db_noop ||
+ __db_txnlist_find(info, txnid) == 0)
+ make_call = 1;
+ break;
+ default:
+ return (__db_unknown_flag(dbenv, "__db_dispatch", redo));
+ }
+
+ if (make_call) {
+ if (rectype >= DB_user_BEGIN && dbenv->tx_recover != NULL)
+ return (dbenv->tx_recover(dbenv, db, lsnp, redo));
+ else
+ return (dbenv->dtab[rectype](dbenv, db, lsnp, redo, info));
+ }
+
+ return (0);
+}
+
+/*
+ * __db_add_recovery --
+ *
+ * PUBLIC: int __db_add_recovery __P((DB_ENV *,
+ * PUBLIC: int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), u_int32_t));
+ */
+int
+__db_add_recovery(dbenv, func, ndx)
+ DB_ENV *dbenv;
+ int (*func) __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ u_int32_t ndx;
+{
+ u_int32_t i, nsize;
+ int ret;
+
+ /* Check if we have to grow the table. */
+ if (ndx >= dbenv->dtab_size) {
+ nsize = ndx + 40;
+ if ((ret = __os_realloc(dbenv,
+ nsize * sizeof(dbenv->dtab[0]), NULL, &dbenv->dtab)) != 0)
+ return (ret);
+ for (i = dbenv->dtab_size; i < nsize; ++i)
+ dbenv->dtab[i] = NULL;
+ dbenv->dtab_size = nsize;
+ }
+
+ dbenv->dtab[ndx] = func;
+ return (0);
+}
+
+/*
+ * __deprecated_recover --
+ * Stub routine for deprecated recovery functions.
+ *
+ * PUBLIC: int __deprecated_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__deprecated_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(dbtp, NULL);
+ COMPQUIET(lsnp, NULL);
+ COMPQUIET(op, 0);
+ COMPQUIET(info, NULL);
+ return (EINVAL);
+}
+
+/*
+ * __db_txnlist_init --
+ * Initialize transaction linked list.
+ *
+ * PUBLIC: int __db_txnlist_init __P((DB_ENV *, void *));
+ */
+int
+__db_txnlist_init(dbenv, retp)
+ DB_ENV *dbenv;
+ void *retp;
+{
+ DB_TXNHEAD *headp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv, sizeof(DB_TXNHEAD), NULL, &headp)) != 0)
+ return (ret);
+
+ LIST_INIT(&headp->head);
+ headp->maxid = 0;
+ headp->generation = 1;
+
+ *(void **)retp = headp;
+ return (0);
+}
+
+/*
+ * __db_txnlist_add --
+ * Add an element to our transaction linked list.
+ *
+ * PUBLIC: int __db_txnlist_add __P((DB_ENV *, void *, u_int32_t, int32_t));
+ */
+int
+__db_txnlist_add(dbenv, listp, txnid, aborted)
+ DB_ENV *dbenv;
+ void *listp;
+ u_int32_t txnid;
+ int32_t aborted;
+{
+ DB_TXNHEAD *hp;
+ DB_TXNLIST *elp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv, sizeof(DB_TXNLIST), NULL, &elp)) != 0)
+ return (ret);
+
+ hp = (DB_TXNHEAD *)listp;
+ LIST_INSERT_HEAD(&hp->head, elp, links);
+
+ elp->type = TXNLIST_TXNID;
+ elp->u.t.txnid = txnid;
+ elp->u.t.aborted = aborted;
+ if (txnid > hp->maxid)
+ hp->maxid = txnid;
+ elp->u.t.generation = hp->generation;
+
+ return (0);
+}
+/*
+ * __db_txnlist_remove --
+ * Remove an element from our transaction linked list.
+ *
+ * PUBLIC: int __db_txnlist_remove __P((void *, u_int32_t));
+ */
+int
+__db_txnlist_remove(listp, txnid)
+ void *listp;
+ u_int32_t txnid;
+{
+ DB_TXNLIST *entry;
+
+ return (__db_txnlist_find_internal(listp,
+ TXNLIST_TXNID, txnid, NULL, &entry, 1));
+}
+
+/* __db_txnlist_close --
+ *
+ * Call this when we close a file. It allows us to reconcile whether
+ * we have done any operations on this file with whether the file appears
+ * to have been deleted. If you never do any operations on a file, then
+ * we assume it's OK to appear deleted.
+ *
+ * PUBLIC: int __db_txnlist_close __P((void *, int32_t, u_int32_t));
+ */
+
+int
+__db_txnlist_close(listp, lid, count)
+ void *listp;
+ int32_t lid;
+ u_int32_t count;
+{
+ DB_TXNHEAD *hp;
+ DB_TXNLIST *p;
+
+ hp = (DB_TXNHEAD *)listp;
+ for (p = LIST_FIRST(&hp->head); p != NULL; p = LIST_NEXT(p, links)) {
+ if (p->type == TXNLIST_DELETE)
+ if (lid == p->u.d.fileid &&
+ !F_ISSET(&p->u.d, TXNLIST_FLAG_CLOSED)) {
+ p->u.d.count += count;
+ return (0);
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * __db_txnlist_delete --
+ *
+ * Record that a file was missing or deleted. If the deleted
+ * flag is set, then we've encountered a delete of a file, else we've
+ * just encountered a file that is missing. The lid is the log fileid
+ * and is only meaningful if deleted is not equal to 0.
+ *
+ * PUBLIC: int __db_txnlist_delete __P((DB_ENV *,
+ * PUBLIC: void *, char *, u_int32_t, int));
+ */
+int
+__db_txnlist_delete(dbenv, listp, name, lid, deleted)
+ DB_ENV *dbenv;
+ void *listp;
+ char *name;
+ u_int32_t lid;
+ int deleted;
+{
+ DB_TXNHEAD *hp;
+ DB_TXNLIST *p;
+ int ret;
+
+ hp = (DB_TXNHEAD *)listp;
+ for (p = LIST_FIRST(&hp->head); p != NULL; p = LIST_NEXT(p, links)) {
+ if (p->type == TXNLIST_DELETE)
+ if (strcmp(name, p->u.d.fname) == 0) {
+ if (deleted)
+ F_SET(&p->u.d, TXNLIST_FLAG_DELETED);
+ else
+ F_CLR(&p->u.d, TXNLIST_FLAG_CLOSED);
+ return (0);
+ }
+ }
+
+ /* Need to add it. */
+ if ((ret = __os_malloc(dbenv, sizeof(DB_TXNLIST), NULL, &p)) != 0)
+ return (ret);
+ LIST_INSERT_HEAD(&hp->head, p, links);
+
+ p->type = TXNLIST_DELETE;
+ p->u.d.flags = 0;
+ if (deleted)
+ F_SET(&p->u.d, TXNLIST_FLAG_DELETED);
+ p->u.d.fileid = lid;
+ p->u.d.count = 0;
+ ret = __os_strdup(dbenv, name, &p->u.d.fname);
+
+ return (ret);
+}
+
+/*
+ * __db_txnlist_end --
+ * Discard transaction linked list. Print out any error messages
+ * for deleted files.
+ *
+ * PUBLIC: void __db_txnlist_end __P((DB_ENV *, void *));
+ */
+void
+__db_txnlist_end(dbenv, listp)
+ DB_ENV *dbenv;
+ void *listp;
+{
+ DB_TXNHEAD *hp;
+ DB_TXNLIST *p;
+ DB_LOG *lp;
+
+ hp = (DB_TXNHEAD *)listp;
+ lp = (DB_LOG *)dbenv->lg_handle;
+ while (hp != NULL && (p = LIST_FIRST(&hp->head)) != NULL) {
+ LIST_REMOVE(p, links);
+ switch (p->type) {
+ case TXNLIST_DELETE:
+ /*
+ * If we have a file that is not deleted and has
+ * some operations, we flag the warning. Since
+ * the file could still be open, we need to check
+ * the actual log table as well.
+ */
+ if ((!F_ISSET(&p->u.d, TXNLIST_FLAG_DELETED) &&
+ p->u.d.count != 0) ||
+ (!F_ISSET(&p->u.d, TXNLIST_FLAG_CLOSED) &&
+ p->u.d.fileid != (int32_t) TXNLIST_INVALID_ID &&
+ p->u.d.fileid < lp->dbentry_cnt &&
+ lp->dbentry[p->u.d.fileid].count != 0))
+ __db_err(dbenv, "warning: %s: %s",
+ p->u.d.fname, db_strerror(ENOENT));
+ __os_freestr(p->u.d.fname);
+ break;
+ case TXNLIST_LSN:
+ __os_free(p->u.l.lsn_array,
+ p->u.l.maxn * sizeof(DB_LSN));
+ break;
+ default:
+ /* Possibly an incomplete DB_TXNLIST; just free it. */
+ break;
+ }
+ __os_free(p, sizeof(DB_TXNLIST));
+ }
+ __os_free(listp, sizeof(DB_TXNHEAD));
+}
+
+/*
+ * __db_txnlist_find --
+ * Checks to see if a txnid with the current generation is in the
+ * txnid list. This returns DB_NOTFOUND if the item isn't in the
+ * list otherwise it returns (like __db_txnlist_find_internal) a
+ * 1 or 0 indicating if the transaction is aborted or not. A txnid
+ * of 0 means the record was generated while not in a transaction.
+ *
+ * PUBLIC: int __db_txnlist_find __P((void *, u_int32_t));
+ */
+int
+__db_txnlist_find(listp, txnid)
+ void *listp;
+ u_int32_t txnid;
+{
+ DB_TXNLIST *entry;
+
+ if (txnid == 0)
+ return (DB_NOTFOUND);
+ return (__db_txnlist_find_internal(listp,
+ TXNLIST_TXNID, txnid, NULL, &entry, 0));
+}
+
+/*
+ * __db_txnlist_find_internal --
+ * Find an entry on the transaction list.
+ * If the entry is not there or the list pointeris not initialized
+ * we return DB_NOTFOUND. If the item is found, we return the aborted
+ * status (1 for aborted, 0 for not aborted). Currently we always call
+ * this with an initialized list pointer but checking for NULL keeps it general.
+ */
+static int
+__db_txnlist_find_internal(listp, type, txnid, uid, txnlistp, delete)
+ void *listp;
+ db_txnlist_type type;
+ u_int32_t txnid;
+ u_int8_t uid[DB_FILE_ID_LEN];
+ DB_TXNLIST **txnlistp;
+ int delete;
+{
+ DB_TXNHEAD *hp;
+ DB_TXNLIST *p;
+ int ret;
+
+ if ((hp = (DB_TXNHEAD *)listp) == NULL)
+ return (DB_NOTFOUND);
+
+ for (p = LIST_FIRST(&hp->head); p != NULL; p = LIST_NEXT(p, links)) {
+ if (p->type != type)
+ continue;
+ switch (type) {
+ case TXNLIST_TXNID:
+ if (p->u.t.txnid != txnid ||
+ hp->generation != p->u.t.generation)
+ continue;
+ ret = p->u.t.aborted;
+ break;
+
+ case TXNLIST_PGNO:
+ if (memcmp(uid, p->u.p.uid, DB_FILE_ID_LEN) != 0)
+ continue;
+
+ ret = 0;
+ break;
+ default:
+ DB_ASSERT(0);
+ ret = EINVAL;
+ }
+ if (delete == 1) {
+ LIST_REMOVE(p, links);
+ __os_free(p, sizeof(DB_TXNLIST));
+ } else if (p != LIST_FIRST(&hp->head)) {
+ /* Move it to head of list. */
+ LIST_REMOVE(p, links);
+ LIST_INSERT_HEAD(&hp->head, p, links);
+ }
+ *txnlistp = p;
+ return (ret);
+ }
+
+ return (DB_NOTFOUND);
+}
+
+/*
+ * __db_txnlist_gen --
+ * Change the current generation number.
+ *
+ * PUBLIC: void __db_txnlist_gen __P((void *, int));
+ */
+void
+__db_txnlist_gen(listp, incr)
+ void *listp;
+ int incr;
+{
+ DB_TXNHEAD *hp;
+
+ /*
+ * During recovery generation numbers keep track of how many "restart"
+ * checkpoints we've seen. Restart checkpoints occur whenever we take
+ * a checkpoint and there are no outstanding transactions. When that
+ * happens, we can reset transaction IDs back to 1. It always happens
+ * at recovery and it prevents us from exhausting the transaction IDs
+ * name space.
+ */
+ hp = (DB_TXNHEAD *)listp;
+ hp->generation += incr;
+}
+
+#define TXN_BUBBLE(AP, MAX) { \
+ int __j; \
+ DB_LSN __tmp; \
+ \
+ for (__j = 0; __j < MAX - 1; __j++) \
+ if (log_compare(&AP[__j], &AP[__j + 1]) < 0) { \
+ __tmp = AP[__j]; \
+ AP[__j] = AP[__j + 1]; \
+ AP[__j + 1] = __tmp; \
+ } \
+}
+
+/*
+ * __db_txnlist_lsnadd --
+ * Add to or re-sort the transaction list lsn entry.
+ * Note that since this is used during an abort, the __txn_undo
+ * code calls into the "recovery" subsystem explicitly, and there
+ * is only a single TXNLIST_LSN entry on the list.
+ *
+ * PUBLIC: int __db_txnlist_lsnadd __P((DB_ENV *, void *, DB_LSN *, u_int32_t));
+ */
+int
+__db_txnlist_lsnadd(dbenv, listp, lsnp, flags)
+ DB_ENV *dbenv;
+ void *listp;
+ DB_LSN *lsnp;
+ u_int32_t flags;
+{
+ DB_TXNHEAD *hp;
+ DB_TXNLIST *elp;
+ int i, ret;
+
+ hp = (DB_TXNHEAD *)listp;
+
+ for (elp = LIST_FIRST(&hp->head);
+ elp != NULL; elp = LIST_NEXT(elp, links))
+ if (elp->type == TXNLIST_LSN)
+ break;
+
+ if (elp == NULL)
+ return (EINVAL);
+
+ if (LF_ISSET(TXNLIST_NEW)) {
+ if (elp->u.l.ntxns >= elp->u.l.maxn) {
+ if ((ret = __os_realloc(dbenv,
+ 2 * elp->u.l.maxn * sizeof(DB_LSN),
+ NULL, &elp->u.l.lsn_array)) != 0)
+ return (ret);
+ elp->u.l.maxn *= 2;
+ }
+ elp->u.l.lsn_array[elp->u.l.ntxns++] = *lsnp;
+ } else
+ /* Simply replace the 0th element. */
+ elp->u.l.lsn_array[0] = *lsnp;
+
+ /*
+ * If we just added a new entry and there may be NULL
+ * entries, so we have to do a complete bubble sort,
+ * not just trickle a changed entry around.
+ */
+ for (i = 0; i < (!LF_ISSET(TXNLIST_NEW) ? 1 : elp->u.l.ntxns); i++)
+ TXN_BUBBLE(elp->u.l.lsn_array, elp->u.l.ntxns);
+
+ *lsnp = elp->u.l.lsn_array[0];
+
+ return (0);
+}
+
+/*
+ * __db_txnlist_lsnhead --
+ * Return a pointer to the beginning of the lsn_array.
+ *
+ * PUBLIC: int __db_txnlist_lsnhead __P((void *, DB_LSN **));
+ */
+int
+__db_txnlist_lsnhead(listp, lsnpp)
+ void *listp;
+ DB_LSN **lsnpp;
+{
+ DB_TXNHEAD *hp;
+ DB_TXNLIST *elp;
+
+ hp = (DB_TXNHEAD *)listp;
+
+ for (elp = LIST_FIRST(&hp->head);
+ elp != NULL; elp = LIST_NEXT(elp, links))
+ if (elp->type == TXNLIST_LSN)
+ break;
+
+ if (elp == NULL)
+ return (EINVAL);
+
+ *lsnpp = &elp->u.l.lsn_array[0];
+
+ return (0);
+}
+
+/*
+ * __db_txnlist_lsninit --
+ * Initialize a transaction list with an lsn array entry.
+ *
+ * PUBLIC: int __db_txnlist_lsninit __P((DB_ENV *, DB_TXNHEAD *, DB_LSN *));
+ */
+int
+__db_txnlist_lsninit(dbenv, hp, lsnp)
+ DB_ENV *dbenv;
+ DB_TXNHEAD *hp;
+ DB_LSN *lsnp;
+{
+ DB_TXNLIST *elp;
+ int ret;
+
+ elp = NULL;
+
+ if ((ret = __os_malloc(dbenv, sizeof(DB_TXNLIST), NULL, &elp)) != 0)
+ goto err;
+ LIST_INSERT_HEAD(&hp->head, elp, links);
+
+ if ((ret = __os_malloc(dbenv,
+ 12 * sizeof(DB_LSN), NULL, &elp->u.l.lsn_array)) != 0)
+ goto err;
+ elp->type = TXNLIST_LSN;
+ elp->u.l.maxn = 12;
+ elp->u.l.ntxns = 1;
+ elp->u.l.lsn_array[0] = *lsnp;
+
+ return (0);
+
+err: __db_txnlist_end(dbenv, hp);
+ return (ret);
+}
+
+/*
+ * __db_add_limbo -- add pages to the limbo list.
+ * Get the file information and call pgnoadd
+ * for each page.
+ *
+ * PUBLIC: int __db_add_limbo __P((DB_ENV *,
+ * PUBLIC: void *, int32_t, db_pgno_t, int32_t));
+ */
+int
+__db_add_limbo(dbenv, info, fileid, pgno, count)
+ DB_ENV *dbenv;
+ void *info;
+ int32_t fileid;
+ db_pgno_t pgno;
+ int32_t count;
+{
+ DB_LOG *dblp;
+ FNAME *fnp;
+ int ret;
+
+ dblp = dbenv->lg_handle;
+ if ((ret = __log_lid_to_fname(dblp, fileid, &fnp)) != 0)
+ return (ret);
+
+ do {
+ if ((ret =
+ __db_txnlist_pgnoadd(dbenv, info, fileid, fnp->ufid,
+ R_ADDR(&dblp->reginfo, fnp->name_off), pgno)) != 0)
+ return (ret);
+ pgno++;
+ } while (--count != 0);
+
+ return (0);
+}
+
+/*
+ * __db_do_the_limbo -- move pages from limbo to free.
+ *
+ * If we are in recovery we add things to the free list without
+ * logging becasue we want to incrementaly apply logs that
+ * may be generated on another copy of this environment.
+ * Otherwise we just call __db_free to put the pages on
+ * the free list and log the activity.
+ *
+ * PUBLIC: int __db_do_the_limbo __P((DB_ENV *, DB_TXNHEAD *));
+ */
+int
+__db_do_the_limbo(dbenv, hp)
+ DB_ENV *dbenv;
+ DB_TXNHEAD *hp;
+{
+ DB *dbp;
+ DBC *dbc;
+ DBMETA *meta;
+ DB_TXN *txn;
+ DB_TXNLIST *elp;
+ PAGE *pagep;
+ db_pgno_t last_pgno, pgno;
+ int i, in_recover, put_page, ret, t_ret;
+
+ dbp = NULL;
+ dbc = NULL;
+ txn = NULL;
+ ret = 0;
+
+ /* Are we in recovery? */
+ in_recover = F_ISSET((DB_LOG *)dbenv->lg_handle, DBLOG_RECOVER);
+
+ for (elp = LIST_FIRST(&hp->head);
+ elp != NULL; elp = LIST_NEXT(elp, links)) {
+ if (elp->type != TXNLIST_PGNO)
+ continue;
+
+ if (in_recover) {
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ goto err;
+
+ /*
+ * It is ok if the file is nolonger there.
+ */
+ dbp->type = DB_UNKNOWN;
+ ret = __db_dbopen(dbp,
+ elp->u.p.fname, 0, __db_omode("rw----"), 0);
+ } else {
+ /*
+ * If we are in transaction undo, then we know
+ * the fileid is still correct.
+ */
+ if ((ret =
+ __db_fileid_to_db(dbenv, &dbp,
+ elp->u.p.fileid, 0)) != 0 && ret != DB_DELETED)
+ goto err;
+ /* File is being destroyed. */
+ if (F_ISSET(dbp, DB_AM_DISCARD))
+ ret = DB_DELETED;
+ }
+ /*
+ * Verify that we are opening the same file that we were
+ * referring to when we wrote this log record.
+ */
+ if (ret == 0 &&
+ memcmp(elp->u.p.uid, dbp->fileid, DB_FILE_ID_LEN) == 0) {
+ last_pgno = PGNO_INVALID;
+ if (in_recover) {
+ pgno = PGNO_BASE_MD;
+ if ((ret = memp_fget(dbp->mpf,
+ &pgno, 0, (PAGE **)&meta)) != 0)
+ goto err;
+ last_pgno = meta->free;
+ /*
+ * Check to see if the head of the free
+ * list is any of the pages we are about
+ * to link in. We could have crashed
+ * after linking them in and before writing
+ * a checkpoint.
+ * It may not be the last one since
+ * any page may get reallocated before here.
+ */
+ if (last_pgno != PGNO_INVALID)
+ for (i = 0; i < elp->u.p.nentries; i++)
+ if (last_pgno
+ == elp->u.p.pgno_array[i])
+ goto done_it;
+ }
+
+ for (i = 0; i < elp->u.p.nentries; i++) {
+ pgno = elp->u.p.pgno_array[i];
+ if ((ret = memp_fget(dbp->mpf,
+ &pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto err;
+
+ put_page = 1;
+ if (IS_ZERO_LSN(LSN(pagep))) {
+ P_INIT(pagep, dbp->pgsize,
+ pgno, PGNO_INVALID,
+ last_pgno, 0, P_INVALID);
+
+ if (in_recover) {
+ LSN(pagep) = LSN(meta);
+ last_pgno = pgno;
+ } else {
+ /*
+ * Starting the transaction
+ * is postponed until we know
+ * we have something to do.
+ */
+ if (txn == NULL &&
+ (ret = txn_begin(dbenv,
+ NULL, &txn, 0)) != 0)
+ goto err;
+
+ if (dbc == NULL &&
+ (ret = dbp->cursor(dbp,
+ txn, &dbc, 0)) != 0)
+ goto err;
+ /* Turn off locking. */
+ F_SET(dbc, DBC_COMPENSATE);
+
+ /* __db_free puts the page. */
+ if ((ret =
+ __db_free(dbc, pagep)) != 0)
+ goto err;
+ put_page = 0;
+ }
+ }
+
+ if (put_page == 1 &&
+ (ret = memp_fput(dbp->mpf,
+ pagep, DB_MPOOL_DIRTY)) != 0)
+ goto err;
+ }
+ if (in_recover) {
+ if (last_pgno == meta->free) {
+done_it:
+ if ((ret =
+ memp_fput(dbp->mpf, meta, 0)) != 0)
+ goto err;
+ } else {
+ /*
+ * Flush the new free list then
+ * update the metapage. This is
+ * unlogged so we cannot have the
+ * metapage pointing at pages that
+ * are not on disk.
+ */
+ dbp->sync(dbp, 0);
+ meta->free = last_pgno;
+ if ((ret = memp_fput(dbp->mpf,
+ meta, DB_MPOOL_DIRTY)) != 0)
+ goto err;
+ }
+ }
+ if (dbc != NULL && (ret = dbc->c_close(dbc)) != 0)
+ goto err;
+ dbc = NULL;
+ }
+ if (in_recover && (t_ret = dbp->close(dbp, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ dbp = NULL;
+ __os_free(elp->u.p.fname, 0);
+ __os_free(elp->u.p.pgno_array, 0);
+ if (ret == ENOENT)
+ ret = 0;
+ else if (ret != 0)
+ goto err;
+ }
+
+ if (txn != NULL) {
+ ret = txn_commit(txn, 0);
+ txn = NULL;
+ }
+err:
+ if (dbc != NULL)
+ (void)dbc->c_close(dbc);
+ if (in_recover && dbp != NULL)
+ (void)dbp->close(dbp, 0);
+ if (txn != NULL)
+ (void)txn_abort(txn);
+ return (ret);
+
+}
+
+#define DB_TXNLIST_MAX_PGNO 8 /* A nice even number. */
+
+/*
+ * __db_txnlist_pgnoadd --
+ * Find the txnlist entry for a file and add this pgno,
+ * or add the list entry for the file and then add the pgno.
+ *
+ * PUBLIC: int __db_txnlist_pgnoadd __P((DB_ENV *, DB_TXNHEAD *,
+ * PUBLIC: int32_t, u_int8_t [DB_FILE_ID_LEN], char *, db_pgno_t));
+ */
+int
+__db_txnlist_pgnoadd(dbenv, hp, fileid, uid, fname, pgno)
+ DB_ENV *dbenv;
+ DB_TXNHEAD *hp;
+ int32_t fileid;
+ u_int8_t uid[DB_FILE_ID_LEN];
+ char *fname;
+ db_pgno_t pgno;
+{
+ DB_TXNLIST *elp;
+ int len, ret;
+
+ elp = NULL;
+
+ if (__db_txnlist_find_internal(hp, TXNLIST_PGNO, 0, uid, &elp, 0) != 0) {
+ if ((ret =
+ __os_malloc(dbenv, sizeof(DB_TXNLIST), NULL, &elp)) != 0)
+ goto err;
+ LIST_INSERT_HEAD(&hp->head, elp, links);
+ elp->u.p.fileid = fileid;
+ memcpy(elp->u.p.uid, uid, DB_FILE_ID_LEN);
+
+ len = strlen(fname) + 1;
+ if ((ret = __os_malloc(dbenv, len, NULL, &elp->u.p.fname)) != 0)
+ goto err;
+ memcpy(elp->u.p.fname, fname, len);
+
+ elp->u.p.maxentry = 0;
+ elp->type = TXNLIST_PGNO;
+ if ((ret = __os_malloc(dbenv,
+ 8 * sizeof(db_pgno_t), NULL, &elp->u.p.pgno_array)) != 0)
+ goto err;
+ elp->u.p.maxentry = DB_TXNLIST_MAX_PGNO;
+ elp->u.p.nentries = 0;
+ } else if (elp->u.p.nentries == elp->u.p.maxentry) {
+ elp->u.p.maxentry <<= 1;
+ if ((ret = __os_realloc(dbenv, elp->u.p.maxentry *
+ sizeof(db_pgno_t), NULL, &elp->u.p.pgno_array)) != 0)
+ goto err;
+ }
+
+ elp->u.p.pgno_array[elp->u.p.nentries++] = pgno;
+
+ return (0);
+
+err: __db_txnlist_end(dbenv, hp);
+ return (ret);
+}
+
+#ifdef DEBUG
+/*
+ * __db_txnlist_print --
+ * Print out the transaction list.
+ *
+ * PUBLIC: void __db_txnlist_print __P((void *));
+ */
+void
+__db_txnlist_print(listp)
+ void *listp;
+{
+ DB_TXNHEAD *hp;
+ DB_TXNLIST *p;
+
+ hp = (DB_TXNHEAD *)listp;
+
+ printf("Maxid: %lu Generation: %lu\n",
+ (u_long)hp->maxid, (u_long)hp->generation);
+ for (p = LIST_FIRST(&hp->head); p != NULL; p = LIST_NEXT(p, links)) {
+ switch (p->type) {
+ case TXNLIST_TXNID:
+ printf("TXNID: %lu(%lu)\n",
+ (u_long)p->u.t.txnid, (u_long)p->u.t.generation);
+ break;
+ case TXNLIST_DELETE:
+ printf("FILE: %s id=%d ops=%d %s %s\n",
+ p->u.d.fname, p->u.d.fileid, p->u.d.count,
+ F_ISSET(&p->u.d, TXNLIST_FLAG_DELETED) ?
+ "(deleted)" : "(missing)",
+ F_ISSET(&p->u.d, TXNLIST_FLAG_CLOSED) ?
+ "(closed)" : "(open)");
+
+ break;
+ default:
+ printf("Unrecognized type: %d\n", p->type);
+ break;
+ }
+ }
+}
+#endif
diff --git a/bdb/db/db_dup.c b/bdb/db/db_dup.c
new file mode 100644
index 00000000000..6d8b2df9518
--- /dev/null
+++ b/bdb/db/db_dup.c
@@ -0,0 +1,275 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_dup.c,v 11.18 2000/11/30 00:58:32 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_shash.h"
+#include "btree.h"
+#include "hash.h"
+#include "lock.h"
+#include "db_am.h"
+
+/*
+ * __db_ditem --
+ * Remove an item from a page.
+ *
+ * PUBLIC: int __db_ditem __P((DBC *, PAGE *, u_int32_t, u_int32_t));
+ */
+int
+__db_ditem(dbc, pagep, indx, nbytes)
+ DBC *dbc;
+ PAGE *pagep;
+ u_int32_t indx, nbytes;
+{
+ DB *dbp;
+ DBT ldbt;
+ db_indx_t cnt, offset;
+ int ret;
+ u_int8_t *from;
+
+ dbp = dbc->dbp;
+ if (DB_LOGGING(dbc)) {
+ ldbt.data = P_ENTRY(pagep, indx);
+ ldbt.size = nbytes;
+ if ((ret = __db_addrem_log(dbp->dbenv, dbc->txn,
+ &LSN(pagep), 0, DB_REM_DUP, dbp->log_fileid, PGNO(pagep),
+ (u_int32_t)indx, nbytes, &ldbt, NULL, &LSN(pagep))) != 0)
+ return (ret);
+ }
+
+ /*
+ * If there's only a single item on the page, we don't have to
+ * work hard.
+ */
+ if (NUM_ENT(pagep) == 1) {
+ NUM_ENT(pagep) = 0;
+ HOFFSET(pagep) = dbp->pgsize;
+ return (0);
+ }
+
+ /*
+ * Pack the remaining key/data items at the end of the page. Use
+ * memmove(3), the regions may overlap.
+ */
+ from = (u_int8_t *)pagep + HOFFSET(pagep);
+ memmove(from + nbytes, from, pagep->inp[indx] - HOFFSET(pagep));
+ HOFFSET(pagep) += nbytes;
+
+ /* Adjust the indices' offsets. */
+ offset = pagep->inp[indx];
+ for (cnt = 0; cnt < NUM_ENT(pagep); ++cnt)
+ if (pagep->inp[cnt] < offset)
+ pagep->inp[cnt] += nbytes;
+
+ /* Shift the indices down. */
+ --NUM_ENT(pagep);
+ if (indx != NUM_ENT(pagep))
+ memmove(&pagep->inp[indx], &pagep->inp[indx + 1],
+ sizeof(db_indx_t) * (NUM_ENT(pagep) - indx));
+
+ return (0);
+}
+
+/*
+ * __db_pitem --
+ * Put an item on a page.
+ *
+ * PUBLIC: int __db_pitem
+ * PUBLIC: __P((DBC *, PAGE *, u_int32_t, u_int32_t, DBT *, DBT *));
+ */
+int
+__db_pitem(dbc, pagep, indx, nbytes, hdr, data)
+ DBC *dbc;
+ PAGE *pagep;
+ u_int32_t indx;
+ u_int32_t nbytes;
+ DBT *hdr, *data;
+{
+ DB *dbp;
+ BKEYDATA bk;
+ DBT thdr;
+ int ret;
+ u_int8_t *p;
+
+ if (nbytes > P_FREESPACE(pagep)) {
+ DB_ASSERT(nbytes <= P_FREESPACE(pagep));
+ return (EINVAL);
+ }
+ /*
+ * Put a single item onto a page. The logic figuring out where to
+ * insert and whether it fits is handled in the caller. All we do
+ * here is manage the page shuffling. We cheat a little bit in that
+ * we don't want to copy the dbt on a normal put twice. If hdr is
+ * NULL, we create a BKEYDATA structure on the page, otherwise, just
+ * copy the caller's information onto the page.
+ *
+ * This routine is also used to put entries onto the page where the
+ * entry is pre-built, e.g., during recovery. In this case, the hdr
+ * will point to the entry, and the data argument will be NULL.
+ *
+ * !!!
+ * There's a tremendous potential for off-by-one errors here, since
+ * the passed in header sizes must be adjusted for the structure's
+ * placeholder for the trailing variable-length data field.
+ */
+ dbp = dbc->dbp;
+ if (DB_LOGGING(dbc))
+ if ((ret = __db_addrem_log(dbp->dbenv, dbc->txn,
+ &LSN(pagep), 0, DB_ADD_DUP, dbp->log_fileid, PGNO(pagep),
+ (u_int32_t)indx, nbytes, hdr, data, &LSN(pagep))) != 0)
+ return (ret);
+
+ if (hdr == NULL) {
+ B_TSET(bk.type, B_KEYDATA, 0);
+ bk.len = data == NULL ? 0 : data->size;
+
+ thdr.data = &bk;
+ thdr.size = SSZA(BKEYDATA, data);
+ hdr = &thdr;
+ }
+
+ /* Adjust the index table, then put the item on the page. */
+ if (indx != NUM_ENT(pagep))
+ memmove(&pagep->inp[indx + 1], &pagep->inp[indx],
+ sizeof(db_indx_t) * (NUM_ENT(pagep) - indx));
+ HOFFSET(pagep) -= nbytes;
+ pagep->inp[indx] = HOFFSET(pagep);
+ ++NUM_ENT(pagep);
+
+ p = P_ENTRY(pagep, indx);
+ memcpy(p, hdr->data, hdr->size);
+ if (data != NULL)
+ memcpy(p + hdr->size, data->data, data->size);
+
+ return (0);
+}
+
+/*
+ * __db_relink --
+ * Relink around a deleted page.
+ *
+ * PUBLIC: int __db_relink __P((DBC *, u_int32_t, PAGE *, PAGE **, int));
+ */
+int
+__db_relink(dbc, add_rem, pagep, new_next, needlock)
+ DBC *dbc;
+ u_int32_t add_rem;
+ PAGE *pagep, **new_next;
+ int needlock;
+{
+ DB *dbp;
+ PAGE *np, *pp;
+ DB_LOCK npl, ppl;
+ DB_LSN *nlsnp, *plsnp, ret_lsn;
+ int ret;
+
+ ret = 0;
+ np = pp = NULL;
+ npl.off = ppl.off = LOCK_INVALID;
+ nlsnp = plsnp = NULL;
+ dbp = dbc->dbp;
+
+ /*
+ * Retrieve and lock the one/two pages. For a remove, we may need
+ * two pages (the before and after). For an add, we only need one
+ * because, the split took care of the prev.
+ */
+ if (pagep->next_pgno != PGNO_INVALID) {
+ if (needlock && (ret = __db_lget(dbc,
+ 0, pagep->next_pgno, DB_LOCK_WRITE, 0, &npl)) != 0)
+ goto err;
+ if ((ret = memp_fget(dbp->mpf,
+ &pagep->next_pgno, 0, &np)) != 0) {
+ (void)__db_pgerr(dbp, pagep->next_pgno);
+ goto err;
+ }
+ nlsnp = &np->lsn;
+ }
+ if (add_rem == DB_REM_PAGE && pagep->prev_pgno != PGNO_INVALID) {
+ if (needlock && (ret = __db_lget(dbc,
+ 0, pagep->prev_pgno, DB_LOCK_WRITE, 0, &ppl)) != 0)
+ goto err;
+ if ((ret = memp_fget(dbp->mpf,
+ &pagep->prev_pgno, 0, &pp)) != 0) {
+ (void)__db_pgerr(dbp, pagep->next_pgno);
+ goto err;
+ }
+ plsnp = &pp->lsn;
+ }
+
+ /* Log the change. */
+ if (DB_LOGGING(dbc)) {
+ if ((ret = __db_relink_log(dbp->dbenv, dbc->txn,
+ &ret_lsn, 0, add_rem, dbp->log_fileid,
+ pagep->pgno, &pagep->lsn,
+ pagep->prev_pgno, plsnp, pagep->next_pgno, nlsnp)) != 0)
+ goto err;
+ if (np != NULL)
+ np->lsn = ret_lsn;
+ if (pp != NULL)
+ pp->lsn = ret_lsn;
+ if (add_rem == DB_REM_PAGE)
+ pagep->lsn = ret_lsn;
+ }
+
+ /*
+ * Modify and release the two pages.
+ *
+ * !!!
+ * The parameter new_next gets set to the page following the page we
+ * are removing. If there is no following page, then new_next gets
+ * set to NULL.
+ */
+ if (np != NULL) {
+ if (add_rem == DB_ADD_PAGE)
+ np->prev_pgno = pagep->pgno;
+ else
+ np->prev_pgno = pagep->prev_pgno;
+ if (new_next == NULL)
+ ret = memp_fput(dbp->mpf, np, DB_MPOOL_DIRTY);
+ else {
+ *new_next = np;
+ ret = memp_fset(dbp->mpf, np, DB_MPOOL_DIRTY);
+ }
+ if (ret != 0)
+ goto err;
+ if (needlock)
+ (void)__TLPUT(dbc, npl);
+ } else if (new_next != NULL)
+ *new_next = NULL;
+
+ if (pp != NULL) {
+ pp->next_pgno = pagep->next_pgno;
+ if ((ret = memp_fput(dbp->mpf, pp, DB_MPOOL_DIRTY)) != 0)
+ goto err;
+ if (needlock)
+ (void)__TLPUT(dbc, ppl);
+ }
+ return (0);
+
+err: if (np != NULL)
+ (void)memp_fput(dbp->mpf, np, 0);
+ if (needlock && npl.off != LOCK_INVALID)
+ (void)__TLPUT(dbc, npl);
+ if (pp != NULL)
+ (void)memp_fput(dbp->mpf, pp, 0);
+ if (needlock && ppl.off != LOCK_INVALID)
+ (void)__TLPUT(dbc, ppl);
+ return (ret);
+}
diff --git a/bdb/db/db_iface.c b/bdb/db/db_iface.c
new file mode 100644
index 00000000000..3548a2527bb
--- /dev/null
+++ b/bdb/db/db_iface.c
@@ -0,0 +1,687 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_iface.c,v 11.34 2001/01/11 18:19:51 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <errno.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_am.h"
+#include "btree.h"
+
+static int __db_curinval __P((const DB_ENV *));
+static int __db_rdonly __P((const DB_ENV *, const char *));
+static int __dbt_ferr __P((const DB *, const char *, const DBT *, int));
+
+/*
+ * __db_cursorchk --
+ * Common cursor argument checking routine.
+ *
+ * PUBLIC: int __db_cursorchk __P((const DB *, u_int32_t, int));
+ */
+int
+__db_cursorchk(dbp, flags, isrdonly)
+ const DB *dbp;
+ u_int32_t flags;
+ int isrdonly;
+{
+ /* Check for invalid function flags. */
+ switch (flags) {
+ case 0:
+ break;
+ case DB_WRITECURSOR:
+ if (isrdonly)
+ return (__db_rdonly(dbp->dbenv, "DB->cursor"));
+ if (!CDB_LOCKING(dbp->dbenv))
+ return (__db_ferr(dbp->dbenv, "DB->cursor", 0));
+ break;
+ case DB_WRITELOCK:
+ if (isrdonly)
+ return (__db_rdonly(dbp->dbenv, "DB->cursor"));
+ break;
+ default:
+ return (__db_ferr(dbp->dbenv, "DB->cursor", 0));
+ }
+
+ return (0);
+}
+
+/*
+ * __db_ccountchk --
+ * Common cursor count argument checking routine.
+ *
+ * PUBLIC: int __db_ccountchk __P((const DB *, u_int32_t, int));
+ */
+int
+__db_ccountchk(dbp, flags, isvalid)
+ const DB *dbp;
+ u_int32_t flags;
+ int isvalid;
+{
+ /* Check for invalid function flags. */
+ switch (flags) {
+ case 0:
+ break;
+ default:
+ return (__db_ferr(dbp->dbenv, "DBcursor->c_count", 0));
+ }
+
+ /*
+ * The cursor must be initialized, return EINVAL for an invalid cursor,
+ * otherwise 0.
+ */
+ return (isvalid ? 0 : __db_curinval(dbp->dbenv));
+}
+
+/*
+ * __db_cdelchk --
+ * Common cursor delete argument checking routine.
+ *
+ * PUBLIC: int __db_cdelchk __P((const DB *, u_int32_t, int, int));
+ */
+int
+__db_cdelchk(dbp, flags, isrdonly, isvalid)
+ const DB *dbp;
+ u_int32_t flags;
+ int isrdonly, isvalid;
+{
+ /* Check for changes to a read-only tree. */
+ if (isrdonly)
+ return (__db_rdonly(dbp->dbenv, "c_del"));
+
+ /* Check for invalid function flags. */
+ switch (flags) {
+ case 0:
+ break;
+ default:
+ return (__db_ferr(dbp->dbenv, "DBcursor->c_del", 0));
+ }
+
+ /*
+ * The cursor must be initialized, return EINVAL for an invalid cursor,
+ * otherwise 0.
+ */
+ return (isvalid ? 0 : __db_curinval(dbp->dbenv));
+}
+
+/*
+ * __db_cgetchk --
+ * Common cursor get argument checking routine.
+ *
+ * PUBLIC: int __db_cgetchk __P((const DB *, DBT *, DBT *, u_int32_t, int));
+ */
+int
+__db_cgetchk(dbp, key, data, flags, isvalid)
+ const DB *dbp;
+ DBT *key, *data;
+ u_int32_t flags;
+ int isvalid;
+{
+ int ret;
+
+ /*
+ * Check for read-modify-write validity. DB_RMW doesn't make sense
+ * with CDB cursors since if you're going to write the cursor, you
+ * had to create it with DB_WRITECURSOR. Regardless, we check for
+ * LOCKING_ON and not STD_LOCKING, as we don't want to disallow it.
+ * If this changes, confirm that DB does not itself set the DB_RMW
+ * flag in a path where CDB may have been configured.
+ */
+ if (LF_ISSET(DB_RMW)) {
+ if (!LOCKING_ON(dbp->dbenv)) {
+ __db_err(dbp->dbenv,
+ "the DB_RMW flag requires locking");
+ return (EINVAL);
+ }
+ LF_CLR(DB_RMW);
+ }
+
+ /* Check for invalid function flags. */
+ switch (flags) {
+ case DB_CONSUME:
+ case DB_CONSUME_WAIT:
+ if (dbp->type != DB_QUEUE)
+ goto err;
+ break;
+ case DB_CURRENT:
+ case DB_FIRST:
+ case DB_GET_BOTH:
+ case DB_LAST:
+ case DB_NEXT:
+ case DB_NEXT_DUP:
+ case DB_NEXT_NODUP:
+ case DB_PREV:
+ case DB_PREV_NODUP:
+ case DB_SET:
+ case DB_SET_RANGE:
+ break;
+ case DB_GET_BOTHC:
+ if (dbp->type == DB_QUEUE)
+ goto err;
+ break;
+ case DB_GET_RECNO:
+ if (!F_ISSET(dbp, DB_BT_RECNUM))
+ goto err;
+ break;
+ case DB_SET_RECNO:
+ if (!F_ISSET(dbp, DB_BT_RECNUM))
+ goto err;
+ break;
+ default:
+err: return (__db_ferr(dbp->dbenv, "DBcursor->c_get", 0));
+ }
+
+ /* Check for invalid key/data flags. */
+ if ((ret = __dbt_ferr(dbp, "key", key, 0)) != 0)
+ return (ret);
+ if ((ret = __dbt_ferr(dbp, "data", data, 0)) != 0)
+ return (ret);
+
+ /*
+ * The cursor must be initialized for DB_CURRENT or DB_NEXT_DUP,
+ * return EINVAL for an invalid cursor, otherwise 0.
+ */
+ if (isvalid || (flags != DB_CURRENT && flags != DB_NEXT_DUP))
+ return (0);
+
+ return (__db_curinval(dbp->dbenv));
+}
+
+/*
+ * __db_cputchk --
+ * Common cursor put argument checking routine.
+ *
+ * PUBLIC: int __db_cputchk __P((const DB *,
+ * PUBLIC: const DBT *, DBT *, u_int32_t, int, int));
+ */
+int
+__db_cputchk(dbp, key, data, flags, isrdonly, isvalid)
+ const DB *dbp;
+ const DBT *key;
+ DBT *data;
+ u_int32_t flags;
+ int isrdonly, isvalid;
+{
+ int key_flags, ret;
+
+ key_flags = 0;
+
+ /* Check for changes to a read-only tree. */
+ if (isrdonly)
+ return (__db_rdonly(dbp->dbenv, "c_put"));
+
+ /* Check for invalid function flags. */
+ switch (flags) {
+ case DB_AFTER:
+ case DB_BEFORE:
+ switch (dbp->type) {
+ case DB_BTREE:
+ case DB_HASH: /* Only with unsorted duplicates. */
+ if (!F_ISSET(dbp, DB_AM_DUP))
+ goto err;
+ if (dbp->dup_compare != NULL)
+ goto err;
+ break;
+ case DB_QUEUE: /* Not permitted. */
+ goto err;
+ case DB_RECNO: /* Only with mutable record numbers. */
+ if (!F_ISSET(dbp, DB_RE_RENUMBER))
+ goto err;
+ key_flags = 1;
+ break;
+ default:
+ goto err;
+ }
+ break;
+ case DB_CURRENT:
+ /*
+ * If there is a comparison function, doing a DB_CURRENT
+ * must not change the part of the data item that is used
+ * for the comparison.
+ */
+ break;
+ case DB_NODUPDATA:
+ if (!F_ISSET(dbp, DB_AM_DUPSORT))
+ goto err;
+ /* FALLTHROUGH */
+ case DB_KEYFIRST:
+ case DB_KEYLAST:
+ if (dbp->type == DB_QUEUE || dbp->type == DB_RECNO)
+ goto err;
+ key_flags = 1;
+ break;
+ default:
+err: return (__db_ferr(dbp->dbenv, "DBcursor->c_put", 0));
+ }
+
+ /* Check for invalid key/data flags. */
+ if (key_flags && (ret = __dbt_ferr(dbp, "key", key, 0)) != 0)
+ return (ret);
+ if ((ret = __dbt_ferr(dbp, "data", data, 0)) != 0)
+ return (ret);
+
+ /*
+ * The cursor must be initialized for anything other than DB_KEYFIRST
+ * and DB_KEYLAST, return EINVAL for an invalid cursor, otherwise 0.
+ */
+ if (isvalid || flags == DB_KEYFIRST ||
+ flags == DB_KEYLAST || flags == DB_NODUPDATA)
+ return (0);
+
+ return (__db_curinval(dbp->dbenv));
+}
+
+/*
+ * __db_closechk --
+ * DB->close flag check.
+ *
+ * PUBLIC: int __db_closechk __P((const DB *, u_int32_t));
+ */
+int
+__db_closechk(dbp, flags)
+ const DB *dbp;
+ u_int32_t flags;
+{
+ /* Check for invalid function flags. */
+ switch (flags) {
+ case 0:
+ case DB_NOSYNC:
+ break;
+ default:
+ return (__db_ferr(dbp->dbenv, "DB->close", 0));
+ }
+
+ return (0);
+}
+
+/*
+ * __db_delchk --
+ * Common delete argument checking routine.
+ *
+ * PUBLIC: int __db_delchk __P((const DB *, DBT *, u_int32_t, int));
+ */
+int
+__db_delchk(dbp, key, flags, isrdonly)
+ const DB *dbp;
+ DBT *key;
+ u_int32_t flags;
+ int isrdonly;
+{
+ COMPQUIET(key, NULL);
+
+ /* Check for changes to a read-only tree. */
+ if (isrdonly)
+ return (__db_rdonly(dbp->dbenv, "delete"));
+
+ /* Check for invalid function flags. */
+ switch (flags) {
+ case 0:
+ break;
+ default:
+ return (__db_ferr(dbp->dbenv, "DB->del", 0));
+ }
+
+ return (0);
+}
+
+/*
+ * __db_getchk --
+ * Common get argument checking routine.
+ *
+ * PUBLIC: int __db_getchk __P((const DB *, const DBT *, DBT *, u_int32_t));
+ */
+int
+__db_getchk(dbp, key, data, flags)
+ const DB *dbp;
+ const DBT *key;
+ DBT *data;
+ u_int32_t flags;
+{
+ int ret;
+
+ /*
+ * Check for read-modify-write validity. DB_RMW doesn't make sense
+ * with CDB cursors since if you're going to write the cursor, you
+ * had to create it with DB_WRITECURSOR. Regardless, we check for
+ * LOCKING_ON and not STD_LOCKING, as we don't want to disallow it.
+ * If this changes, confirm that DB does not itself set the DB_RMW
+ * flag in a path where CDB may have been configured.
+ */
+ if (LF_ISSET(DB_RMW)) {
+ if (!LOCKING_ON(dbp->dbenv)) {
+ __db_err(dbp->dbenv,
+ "the DB_RMW flag requires locking");
+ return (EINVAL);
+ }
+ LF_CLR(DB_RMW);
+ }
+
+ /* Check for invalid function flags. */
+ switch (flags) {
+ case 0:
+ case DB_GET_BOTH:
+ break;
+ case DB_SET_RECNO:
+ if (!F_ISSET(dbp, DB_BT_RECNUM))
+ goto err;
+ break;
+ case DB_CONSUME:
+ case DB_CONSUME_WAIT:
+ if (dbp->type == DB_QUEUE)
+ break;
+ /* Fall through */
+ default:
+err: return (__db_ferr(dbp->dbenv, "DB->get", 0));
+ }
+
+ /* Check for invalid key/data flags. */
+ if ((ret = __dbt_ferr(dbp, "key", key, flags == DB_SET_RECNO)) != 0)
+ return (ret);
+ if ((ret = __dbt_ferr(dbp, "data", data, 1)) != 0)
+ return (ret);
+
+ return (0);
+}
+
+/*
+ * __db_joinchk --
+ * Common join argument checking routine.
+ *
+ * PUBLIC: int __db_joinchk __P((const DB *, DBC * const *, u_int32_t));
+ */
+int
+__db_joinchk(dbp, curslist, flags)
+ const DB *dbp;
+ DBC * const *curslist;
+ u_int32_t flags;
+{
+ DB_TXN *txn;
+ int i;
+
+ switch (flags) {
+ case 0:
+ case DB_JOIN_NOSORT:
+ break;
+ default:
+ return (__db_ferr(dbp->dbenv, "DB->join", 0));
+ }
+
+ if (curslist == NULL || curslist[0] == NULL) {
+ __db_err(dbp->dbenv,
+ "At least one secondary cursor must be specified to DB->join");
+ return (EINVAL);
+ }
+
+ txn = curslist[0]->txn;
+ for (i = 1; curslist[i] != NULL; i++)
+ if (curslist[i]->txn != txn) {
+ __db_err(dbp->dbenv,
+ "All secondary cursors must share the same transaction");
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+/*
+ * __db_joingetchk --
+ * Common join_get argument checking routine.
+ *
+ * PUBLIC: int __db_joingetchk __P((const DB *, DBT *, u_int32_t));
+ */
+int
+__db_joingetchk(dbp, key, flags)
+ const DB *dbp;
+ DBT *key;
+ u_int32_t flags;
+{
+
+ if (LF_ISSET(DB_RMW)) {
+ if (!LOCKING_ON(dbp->dbenv)) {
+ __db_err(dbp->dbenv,
+ "the DB_RMW flag requires locking");
+ return (EINVAL);
+ }
+ LF_CLR(DB_RMW);
+ }
+
+ switch (flags) {
+ case 0:
+ case DB_JOIN_ITEM:
+ break;
+ default:
+ return (__db_ferr(dbp->dbenv, "DBcursor->c_get", 0));
+ }
+
+ /*
+ * A partial get of the key of a join cursor don't make much sense;
+ * the entire key is necessary to query the primary database
+ * and find the datum, and so regardless of the size of the key
+ * it would not be a performance improvement. Since it would require
+ * special handling, we simply disallow it.
+ *
+ * A partial get of the data, however, potentially makes sense (if
+ * all possible data are a predictable large structure, for instance)
+ * and causes us no headaches, so we permit it.
+ */
+ if (F_ISSET(key, DB_DBT_PARTIAL)) {
+ __db_err(dbp->dbenv,
+ "DB_DBT_PARTIAL may not be set on key during join_get");
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+/*
+ * __db_putchk --
+ * Common put argument checking routine.
+ *
+ * PUBLIC: int __db_putchk
+ * PUBLIC: __P((const DB *, DBT *, const DBT *, u_int32_t, int, int));
+ */
+int
+__db_putchk(dbp, key, data, flags, isrdonly, isdup)
+ const DB *dbp;
+ DBT *key;
+ const DBT *data;
+ u_int32_t flags;
+ int isrdonly, isdup;
+{
+ int ret;
+
+ /* Check for changes to a read-only tree. */
+ if (isrdonly)
+ return (__db_rdonly(dbp->dbenv, "put"));
+
+ /* Check for invalid function flags. */
+ switch (flags) {
+ case 0:
+ case DB_NOOVERWRITE:
+ break;
+ case DB_APPEND:
+ if (dbp->type != DB_RECNO && dbp->type != DB_QUEUE)
+ goto err;
+ break;
+ case DB_NODUPDATA:
+ if (F_ISSET(dbp, DB_AM_DUPSORT))
+ break;
+ /* FALLTHROUGH */
+ default:
+err: return (__db_ferr(dbp->dbenv, "DB->put", 0));
+ }
+
+ /* Check for invalid key/data flags. */
+ if ((ret = __dbt_ferr(dbp, "key", key, 0)) != 0)
+ return (ret);
+ if ((ret = __dbt_ferr(dbp, "data", data, 0)) != 0)
+ return (ret);
+
+ /* Check for partial puts in the presence of duplicates. */
+ if (isdup && F_ISSET(data, DB_DBT_PARTIAL)) {
+ __db_err(dbp->dbenv,
+"a partial put in the presence of duplicates requires a cursor operation");
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+/*
+ * __db_removechk --
+ * DB->remove flag check.
+ *
+ * PUBLIC: int __db_removechk __P((const DB *, u_int32_t));
+ */
+int
+__db_removechk(dbp, flags)
+ const DB *dbp;
+ u_int32_t flags;
+{
+ /* Check for invalid function flags. */
+ switch (flags) {
+ case 0:
+ break;
+ default:
+ return (__db_ferr(dbp->dbenv, "DB->remove", 0));
+ }
+
+ return (0);
+}
+
+/*
+ * __db_statchk --
+ * Common stat argument checking routine.
+ *
+ * PUBLIC: int __db_statchk __P((const DB *, u_int32_t));
+ */
+int
+__db_statchk(dbp, flags)
+ const DB *dbp;
+ u_int32_t flags;
+{
+ /* Check for invalid function flags. */
+ switch (flags) {
+ case 0:
+ case DB_CACHED_COUNTS:
+ break;
+ case DB_RECORDCOUNT:
+ if (dbp->type == DB_RECNO)
+ break;
+ if (dbp->type == DB_BTREE && F_ISSET(dbp, DB_BT_RECNUM))
+ break;
+ goto err;
+ default:
+err: return (__db_ferr(dbp->dbenv, "DB->stat", 0));
+ }
+
+ return (0);
+}
+
+/*
+ * __db_syncchk --
+ * Common sync argument checking routine.
+ *
+ * PUBLIC: int __db_syncchk __P((const DB *, u_int32_t));
+ */
+int
+__db_syncchk(dbp, flags)
+ const DB *dbp;
+ u_int32_t flags;
+{
+ /* Check for invalid function flags. */
+ switch (flags) {
+ case 0:
+ break;
+ default:
+ return (__db_ferr(dbp->dbenv, "DB->sync", 0));
+ }
+
+ return (0);
+}
+
+/*
+ * __dbt_ferr --
+ * Check a DBT for flag errors.
+ */
+static int
+__dbt_ferr(dbp, name, dbt, check_thread)
+ const DB *dbp;
+ const char *name;
+ const DBT *dbt;
+ int check_thread;
+{
+ DB_ENV *dbenv;
+ int ret;
+
+ dbenv = dbp->dbenv;
+
+ /*
+ * Check for invalid DBT flags. We allow any of the flags to be
+ * specified to any DB or DBcursor call so that applications can
+ * set DB_DBT_MALLOC when retrieving a data item from a secondary
+ * database and then specify that same DBT as a key to a primary
+ * database, without having to clear flags.
+ */
+ if ((ret = __db_fchk(dbenv, name, dbt->flags,
+ DB_DBT_MALLOC | DB_DBT_DUPOK |
+ DB_DBT_REALLOC | DB_DBT_USERMEM | DB_DBT_PARTIAL)) != 0)
+ return (ret);
+ switch (F_ISSET(dbt, DB_DBT_MALLOC | DB_DBT_REALLOC | DB_DBT_USERMEM)) {
+ case 0:
+ case DB_DBT_MALLOC:
+ case DB_DBT_REALLOC:
+ case DB_DBT_USERMEM:
+ break;
+ default:
+ return (__db_ferr(dbenv, name, 1));
+ }
+
+ if (check_thread && DB_IS_THREADED(dbp) &&
+ !F_ISSET(dbt, DB_DBT_MALLOC | DB_DBT_REALLOC | DB_DBT_USERMEM)) {
+ __db_err(dbenv,
+ "DB_THREAD mandates memory allocation flag on DBT %s",
+ name);
+ return (EINVAL);
+ }
+ return (0);
+}
+
+/*
+ * __db_rdonly --
+ * Common readonly message.
+ */
+static int
+__db_rdonly(dbenv, name)
+ const DB_ENV *dbenv;
+ const char *name;
+{
+ __db_err(dbenv, "%s: attempt to modify a read-only tree", name);
+ return (EACCES);
+}
+
+/*
+ * __db_curinval
+ * Report that a cursor is in an invalid state.
+ */
+static int
+__db_curinval(dbenv)
+ const DB_ENV *dbenv;
+{
+ __db_err(dbenv,
+ "Cursor position must be set before performing this operation");
+ return (EINVAL);
+}
diff --git a/bdb/db/db_join.c b/bdb/db/db_join.c
new file mode 100644
index 00000000000..881dedde0fc
--- /dev/null
+++ b/bdb/db/db_join.c
@@ -0,0 +1,730 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_join.c,v 11.31 2000/12/20 22:41:54 krinsky Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_join.h"
+#include "db_am.h"
+#include "btree.h"
+
+static int __db_join_close __P((DBC *));
+static int __db_join_cmp __P((const void *, const void *));
+static int __db_join_del __P((DBC *, u_int32_t));
+static int __db_join_get __P((DBC *, DBT *, DBT *, u_int32_t));
+static int __db_join_getnext __P((DBC *, DBT *, DBT *, u_int32_t));
+static int __db_join_put __P((DBC *, DBT *, DBT *, u_int32_t));
+
+/*
+ * Check to see if the Nth secondary cursor of join cursor jc is pointing
+ * to a sorted duplicate set.
+ */
+#define SORTED_SET(jc, n) ((jc)->j_curslist[(n)]->dbp->dup_compare != NULL)
+
+/*
+ * This is the duplicate-assisted join functionality. Right now we're
+ * going to write it such that we return one item at a time, although
+ * I think we may need to optimize it to return them all at once.
+ * It should be easier to get it working this way, and I believe that
+ * changing it should be fairly straightforward.
+ *
+ * We optimize the join by sorting cursors from smallest to largest
+ * cardinality. In most cases, this is indeed optimal. However, if
+ * a cursor with large cardinality has very few data in common with the
+ * first cursor, it is possible that the join will be made faster by
+ * putting it earlier in the cursor list. Since we have no way to detect
+ * cases like this, we simply provide a flag, DB_JOIN_NOSORT, which retains
+ * the sort order specified by the caller, who may know more about the
+ * structure of the data.
+ *
+ * The first cursor moves sequentially through the duplicate set while
+ * the others search explicitly for the duplicate in question.
+ *
+ */
+
+/*
+ * __db_join --
+ * This is the interface to the duplicate-assisted join functionality.
+ * In the same way that cursors mark a position in a database, a cursor
+ * can mark a position in a join. While most cursors are created by the
+ * cursor method of a DB, join cursors are created through an explicit
+ * call to DB->join.
+ *
+ * The curslist is an array of existing, intialized cursors and primary
+ * is the DB of the primary file. The data item that joins all the
+ * cursors in the curslist is used as the key into the primary and that
+ * key and data are returned. When no more items are left in the join
+ * set, the c_next operation off the join cursor will return DB_NOTFOUND.
+ *
+ * PUBLIC: int __db_join __P((DB *, DBC **, DBC **, u_int32_t));
+ */
+int
+__db_join(primary, curslist, dbcp, flags)
+ DB *primary;
+ DBC **curslist, **dbcp;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DBC *dbc;
+ JOIN_CURSOR *jc;
+ int ret;
+ u_int32_t i, ncurs, nslots;
+
+ COMPQUIET(nslots, 0);
+
+ PANIC_CHECK(primary->dbenv);
+
+ if ((ret = __db_joinchk(primary, curslist, flags)) != 0)
+ return (ret);
+
+ dbc = NULL;
+ jc = NULL;
+ dbenv = primary->dbenv;
+
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DBC), &dbc)) != 0)
+ goto err;
+
+ if ((ret = __os_calloc(dbenv,
+ 1, sizeof(JOIN_CURSOR), &jc)) != 0)
+ goto err;
+
+ if ((ret = __os_malloc(dbenv, 256, NULL, &jc->j_key.data)) != 0)
+ goto err;
+ jc->j_key.ulen = 256;
+ F_SET(&jc->j_key, DB_DBT_USERMEM);
+
+ for (jc->j_curslist = curslist;
+ *jc->j_curslist != NULL; jc->j_curslist++)
+ ;
+
+ /*
+ * The number of cursor slots we allocate is one greater than
+ * the number of cursors involved in the join, because the
+ * list is NULL-terminated.
+ */
+ ncurs = jc->j_curslist - curslist;
+ nslots = ncurs + 1;
+
+ /*
+ * !!! -- A note on the various lists hanging off jc.
+ *
+ * j_curslist is the initial NULL-terminated list of cursors passed
+ * into __db_join. The original cursors are not modified; pristine
+ * copies are required because, in databases with unsorted dups, we
+ * must reset all of the secondary cursors after the first each
+ * time the first one is incremented, or else we will lose data
+ * which happen to be sorted differently in two different cursors.
+ *
+ * j_workcurs is where we put those copies that we're planning to
+ * work with. They're lazily c_dup'ed from j_curslist as we need
+ * them, and closed when the join cursor is closed or when we need
+ * to reset them to their original values (in which case we just
+ * c_dup afresh).
+ *
+ * j_fdupcurs is an array of cursors which point to the first
+ * duplicate in the duplicate set that contains the data value
+ * we're currently interested in. We need this to make
+ * __db_join_get correctly return duplicate duplicates; i.e., if a
+ * given data value occurs twice in the set belonging to cursor #2,
+ * and thrice in the set belonging to cursor #3, and once in all
+ * the other cursors, successive calls to __db_join_get need to
+ * return that data item six times. To make this happen, each time
+ * cursor N is allowed to advance to a new datum, all cursors M
+ * such that M > N have to be reset to the first duplicate with
+ * that datum, so __db_join_get will return all the dup-dups again.
+ * We could just reset them to the original cursor from j_curslist,
+ * but that would be a bit slower in the unsorted case and a LOT
+ * slower in the sorted one.
+ *
+ * j_exhausted is a list of boolean values which represent
+ * whether or not their corresponding cursors are "exhausted",
+ * i.e. whether the datum under the corresponding cursor has
+ * been found not to exist in any unreturned combinations of
+ * later secondary cursors, in which case they are ready to be
+ * incremented.
+ */
+
+ /* We don't want to free regions whose callocs have failed. */
+ jc->j_curslist = NULL;
+ jc->j_workcurs = NULL;
+ jc->j_fdupcurs = NULL;
+ jc->j_exhausted = NULL;
+
+ if ((ret = __os_calloc(dbenv, nslots, sizeof(DBC *),
+ &jc->j_curslist)) != 0)
+ goto err;
+ if ((ret = __os_calloc(dbenv, nslots, sizeof(DBC *),
+ &jc->j_workcurs)) != 0)
+ goto err;
+ if ((ret = __os_calloc(dbenv, nslots, sizeof(DBC *),
+ &jc->j_fdupcurs)) != 0)
+ goto err;
+ if ((ret = __os_calloc(dbenv, nslots, sizeof(u_int8_t),
+ &jc->j_exhausted)) != 0)
+ goto err;
+ for (i = 0; curslist[i] != NULL; i++) {
+ jc->j_curslist[i] = curslist[i];
+ jc->j_workcurs[i] = NULL;
+ jc->j_fdupcurs[i] = NULL;
+ jc->j_exhausted[i] = 0;
+ }
+ jc->j_ncurs = ncurs;
+
+ /*
+ * If DB_JOIN_NOSORT is not set, optimize secondary cursors by
+ * sorting in order of increasing cardinality.
+ */
+ if (!LF_ISSET(DB_JOIN_NOSORT))
+ qsort(jc->j_curslist, ncurs, sizeof(DBC *), __db_join_cmp);
+
+ /*
+ * We never need to reset the 0th cursor, so there's no
+ * solid reason to use workcurs[0] rather than curslist[0] in
+ * join_get. Nonetheless, it feels cleaner to do it for symmetry,
+ * and this is the most logical place to copy it.
+ *
+ * !!!
+ * There's no need to close the new cursor if we goto err only
+ * because this is the last thing that can fail. Modifier of this
+ * function beware!
+ */
+ if ((ret = jc->j_curslist[0]->c_dup(jc->j_curslist[0], jc->j_workcurs,
+ DB_POSITIONI)) != 0)
+ goto err;
+
+ dbc->c_close = __db_join_close;
+ dbc->c_del = __db_join_del;
+ dbc->c_get = __db_join_get;
+ dbc->c_put = __db_join_put;
+ dbc->internal = (DBC_INTERNAL *) jc;
+ dbc->dbp = primary;
+ jc->j_primary = primary;
+
+ *dbcp = dbc;
+
+ MUTEX_THREAD_LOCK(dbenv, primary->mutexp);
+ TAILQ_INSERT_TAIL(&primary->join_queue, dbc, links);
+ MUTEX_THREAD_UNLOCK(dbenv, primary->mutexp);
+
+ return (0);
+
+err: if (jc != NULL) {
+ if (jc->j_curslist != NULL)
+ __os_free(jc->j_curslist, nslots * sizeof(DBC *));
+ if (jc->j_workcurs != NULL) {
+ if (jc->j_workcurs[0] != NULL)
+ __os_free(jc->j_workcurs[0], sizeof(DBC));
+ __os_free(jc->j_workcurs, nslots * sizeof(DBC *));
+ }
+ if (jc->j_fdupcurs != NULL)
+ __os_free(jc->j_fdupcurs, nslots * sizeof(DBC *));
+ if (jc->j_exhausted != NULL)
+ __os_free(jc->j_exhausted, nslots * sizeof(u_int8_t));
+ __os_free(jc, sizeof(JOIN_CURSOR));
+ }
+ if (dbc != NULL)
+ __os_free(dbc, sizeof(DBC));
+ return (ret);
+}
+
+static int
+__db_join_put(dbc, key, data, flags)
+ DBC *dbc;
+ DBT *key;
+ DBT *data;
+ u_int32_t flags;
+{
+ PANIC_CHECK(dbc->dbp->dbenv);
+
+ COMPQUIET(key, NULL);
+ COMPQUIET(data, NULL);
+ COMPQUIET(flags, 0);
+ return (EINVAL);
+}
+
+static int
+__db_join_del(dbc, flags)
+ DBC *dbc;
+ u_int32_t flags;
+{
+ PANIC_CHECK(dbc->dbp->dbenv);
+
+ COMPQUIET(flags, 0);
+ return (EINVAL);
+}
+
+static int
+__db_join_get(dbc, key_arg, data_arg, flags)
+ DBC *dbc;
+ DBT *key_arg, *data_arg;
+ u_int32_t flags;
+{
+ DBT *key_n, key_n_mem;
+ DB *dbp;
+ DBC *cp;
+ JOIN_CURSOR *jc;
+ int ret;
+ u_int32_t i, j, operation;
+
+ dbp = dbc->dbp;
+ jc = (JOIN_CURSOR *)dbc->internal;
+
+ PANIC_CHECK(dbp->dbenv);
+
+ operation = LF_ISSET(DB_OPFLAGS_MASK);
+
+ if ((ret = __db_joingetchk(dbp, key_arg, flags)) != 0)
+ return (ret);
+
+ /*
+ * Since we are fetching the key as a datum in the secondary indices,
+ * we must be careful of caller-specified DB_DBT_* memory
+ * management flags. If necessary, use a stack-allocated DBT;
+ * we'll appropriately copy and/or allocate the data later.
+ */
+ if (F_ISSET(key_arg, DB_DBT_USERMEM) ||
+ F_ISSET(key_arg, DB_DBT_MALLOC)) {
+ /* We just use the default buffer; no need to go malloc. */
+ key_n = &key_n_mem;
+ memset(key_n, 0, sizeof(DBT));
+ } else {
+ /*
+ * Either DB_DBT_REALLOC or the default buffer will work
+ * fine if we have to reuse it, as we do.
+ */
+ key_n = key_arg;
+ }
+
+ /*
+ * If our last attempt to do a get on the primary key failed,
+ * short-circuit the join and try again with the same key.
+ */
+ if (F_ISSET(jc, JOIN_RETRY))
+ goto samekey;
+ F_CLR(jc, JOIN_RETRY);
+
+retry: ret = jc->j_workcurs[0]->c_get(jc->j_workcurs[0],
+ &jc->j_key, key_n, jc->j_exhausted[0] ? DB_NEXT_DUP : DB_CURRENT);
+
+ if (ret == ENOMEM) {
+ jc->j_key.ulen <<= 1;
+ if ((ret = __os_realloc(dbp->dbenv,
+ jc->j_key.ulen, NULL, &jc->j_key.data)) != 0)
+ goto mem_err;
+ goto retry;
+ }
+
+ /*
+ * If ret == DB_NOTFOUND, we're out of elements of the first
+ * secondary cursor. This is how we finally finish the join
+ * if all goes well.
+ */
+ if (ret != 0)
+ goto err;
+
+ /*
+ * If jc->j_exhausted[0] == 1, we've just advanced the first cursor,
+ * and we're going to want to advance all the cursors that point to
+ * the first member of a duplicate duplicate set (j_fdupcurs[1..N]).
+ * Close all the cursors in j_fdupcurs; we'll reopen them the
+ * first time through the upcoming loop.
+ */
+ for (i = 1; i < jc->j_ncurs; i++) {
+ if (jc->j_fdupcurs[i] != NULL &&
+ (ret = jc->j_fdupcurs[i]->c_close(jc->j_fdupcurs[i])) != 0)
+ goto err;
+ jc->j_fdupcurs[i] = NULL;
+ }
+
+ /*
+ * If jc->j_curslist[1] == NULL, we have only one cursor in the join.
+ * Thus, we can safely increment that one cursor on each call
+ * to __db_join_get, and we signal this by setting jc->j_exhausted[0]
+ * right away.
+ *
+ * Otherwise, reset jc->j_exhausted[0] to 0, so that we don't
+ * increment it until we know we're ready to.
+ */
+ if (jc->j_curslist[1] == NULL)
+ jc->j_exhausted[0] = 1;
+ else
+ jc->j_exhausted[0] = 0;
+
+ /* We have the first element; now look for it in the other cursors. */
+ for (i = 1; i < jc->j_ncurs; i++) {
+ DB_ASSERT(jc->j_curslist[i] != NULL);
+ if (jc->j_workcurs[i] == NULL)
+ /* If this is NULL, we need to dup curslist into it. */
+ if ((ret = jc->j_curslist[i]->c_dup(
+ jc->j_curslist[i], jc->j_workcurs + i,
+ DB_POSITIONI)) != 0)
+ goto err;
+
+retry2: cp = jc->j_workcurs[i];
+
+ if ((ret = __db_join_getnext(cp, &jc->j_key, key_n,
+ jc->j_exhausted[i])) == DB_NOTFOUND) {
+ /*
+ * jc->j_workcurs[i] has no more of the datum we're
+ * interested in. Go back one cursor and get
+ * a new dup. We can't just move to a new
+ * element of the outer relation, because that way
+ * we might miss duplicate duplicates in cursor i-1.
+ *
+ * If this takes us back to the first cursor,
+ * -then- we can move to a new element of the outer
+ * relation.
+ */
+ --i;
+ jc->j_exhausted[i] = 1;
+
+ if (i == 0) {
+ for (j = 1; jc->j_workcurs[j] != NULL; j++) {
+ /*
+ * We're moving to a new element of
+ * the first secondary cursor. If
+ * that cursor is sorted, then any
+ * other sorted cursors can be safely
+ * reset to the first duplicate
+ * duplicate in the current set if we
+ * have a pointer to it (we can't just
+ * leave them be, or we'll miss
+ * duplicate duplicates in the outer
+ * relation).
+ *
+ * If the first cursor is unsorted, or
+ * if cursor j is unsorted, we can
+ * make no assumptions about what
+ * we're looking for next or where it
+ * will be, so we reset to the very
+ * beginning (setting workcurs NULL
+ * will achieve this next go-round).
+ *
+ * XXX: This is likely to break
+ * horribly if any two cursors are
+ * both sorted, but have different
+ * specified sort functions. For,
+ * now, we dismiss this as pathology
+ * and let strange things happen--we
+ * can't make rope childproof.
+ */
+ if ((ret = jc->j_workcurs[j]->c_close(
+ jc->j_workcurs[j])) != 0)
+ goto err;
+ if (!SORTED_SET(jc, 0) ||
+ !SORTED_SET(jc, j) ||
+ jc->j_fdupcurs[j] == NULL)
+ /*
+ * Unsafe conditions;
+ * reset fully.
+ */
+ jc->j_workcurs[j] = NULL;
+ else
+ /* Partial reset suffices. */
+ if ((jc->j_fdupcurs[j]->c_dup(
+ jc->j_fdupcurs[j],
+ &jc->j_workcurs[j],
+ DB_POSITIONI)) != 0)
+ goto err;
+ jc->j_exhausted[j] = 0;
+ }
+ goto retry;
+ /* NOTREACHED */
+ }
+
+ /*
+ * We're about to advance the cursor and need to
+ * reset all of the workcurs[j] where j>i, so that
+ * we don't miss any duplicate duplicates.
+ */
+ for (j = i + 1;
+ jc->j_workcurs[j] != NULL;
+ j++) {
+ if ((ret = jc->j_workcurs[j]->c_close(
+ jc->j_workcurs[j])) != 0)
+ goto err;
+ jc->j_exhausted[j] = 0;
+ if (jc->j_fdupcurs[j] != NULL &&
+ (ret = jc->j_fdupcurs[j]->c_dup(
+ jc->j_fdupcurs[j], &jc->j_workcurs[j],
+ DB_POSITIONI)) != 0)
+ goto err;
+ else
+ jc->j_workcurs[j] = NULL;
+ }
+ goto retry2;
+ /* NOTREACHED */
+ }
+
+ if (ret == ENOMEM) {
+ jc->j_key.ulen <<= 1;
+ if ((ret = __os_realloc(dbp->dbenv, jc->j_key.ulen,
+ NULL, &jc->j_key.data)) != 0) {
+mem_err: __db_err(dbp->dbenv,
+ "Allocation failed for join key, len = %lu",
+ (u_long)jc->j_key.ulen);
+ goto err;
+ }
+ goto retry2;
+ }
+
+ if (ret != 0)
+ goto err;
+
+ /*
+ * If we made it this far, we've found a matching
+ * datum in cursor i. Mark the current cursor
+ * unexhausted, so we don't miss any duplicate
+ * duplicates the next go-round--unless this is the
+ * very last cursor, in which case there are none to
+ * miss, and we'll need that exhausted flag to finally
+ * get a DB_NOTFOUND and move on to the next datum in
+ * the outermost cursor.
+ */
+ if (i + 1 != jc->j_ncurs)
+ jc->j_exhausted[i] = 0;
+ else
+ jc->j_exhausted[i] = 1;
+
+ /*
+ * If jc->j_fdupcurs[i] is NULL and the ith cursor's dups are
+ * sorted, then we're here for the first time since advancing
+ * cursor 0, and we have a new datum of interest.
+ * jc->j_workcurs[i] points to the beginning of a set of
+ * duplicate duplicates; store this into jc->j_fdupcurs[i].
+ */
+ if (SORTED_SET(jc, i) && jc->j_fdupcurs[i] == NULL && (ret =
+ cp->c_dup(cp, &jc->j_fdupcurs[i], DB_POSITIONI)) != 0)
+ goto err;
+
+ }
+
+err: if (ret != 0)
+ return (ret);
+
+ if (0) {
+samekey: /*
+ * Get the key we tried and failed to return last time;
+ * it should be the current datum of all the secondary cursors.
+ */
+ if ((ret = jc->j_workcurs[0]->c_get(jc->j_workcurs[0],
+ &jc->j_key, key_n, DB_CURRENT)) != 0)
+ return (ret);
+ F_CLR(jc, JOIN_RETRY);
+ }
+
+ /*
+ * ret == 0; we have a key to return.
+ *
+ * If DB_DBT_USERMEM or DB_DBT_MALLOC is set, we need to
+ * copy it back into the dbt we were given for the key;
+ * call __db_retcopy.
+ *
+ * Otherwise, assert that we do not in fact need to copy anything
+ * and simply proceed.
+ */
+ if (F_ISSET(key_arg, DB_DBT_USERMEM) ||
+ F_ISSET(key_arg, DB_DBT_MALLOC)) {
+ /*
+ * We need to copy the key back into our original
+ * datum. Do so.
+ */
+ if ((ret = __db_retcopy(dbp,
+ key_arg, key_n->data, key_n->size, NULL, NULL)) != 0) {
+ /*
+ * The retcopy failed, most commonly because we
+ * have a user buffer for the key which is too small.
+ * Set things up to retry next time, and return.
+ */
+ F_SET(jc, JOIN_RETRY);
+ return (ret);
+ }
+ } else
+ DB_ASSERT(key_n == key_arg);
+
+ /*
+ * If DB_JOIN_ITEM is
+ * set, we return it; otherwise we do the lookup in the
+ * primary and then return.
+ *
+ * Note that we use key_arg here; it is safe (and appropriate)
+ * to do so.
+ */
+ if (operation == DB_JOIN_ITEM)
+ return (0);
+
+ if ((ret = jc->j_primary->get(jc->j_primary,
+ jc->j_curslist[0]->txn, key_arg, data_arg, 0)) != 0)
+ /*
+ * The get on the primary failed, most commonly because we're
+ * using a user buffer that's not big enough. Flag our
+ * failure so we can return the same key next time.
+ */
+ F_SET(jc, JOIN_RETRY);
+
+ return (ret);
+}
+
+static int
+__db_join_close(dbc)
+ DBC *dbc;
+{
+ DB *dbp;
+ JOIN_CURSOR *jc;
+ int ret, t_ret;
+ u_int32_t i;
+
+ jc = (JOIN_CURSOR *)dbc->internal;
+ dbp = dbc->dbp;
+ ret = t_ret = 0;
+
+ /*
+ * Remove from active list of join cursors. Note that this
+ * must happen before any action that can fail and return, or else
+ * __db_close may loop indefinitely.
+ */
+ MUTEX_THREAD_LOCK(dbp->dbenv, dbp->mutexp);
+ TAILQ_REMOVE(&dbp->join_queue, dbc, links);
+ MUTEX_THREAD_UNLOCK(dbp->dbenv, dbp->mutexp);
+
+ PANIC_CHECK(dbc->dbp->dbenv);
+
+ /*
+ * Close any open scratch cursors. In each case, there may
+ * not be as many outstanding as there are cursors in
+ * curslist, but we want to close whatever's there.
+ *
+ * If any close fails, there's no reason not to close everything else;
+ * we'll just return the error code of the last one to fail. There's
+ * not much the caller can do anyway, since these cursors only exist
+ * hanging off a db-internal data structure that they shouldn't be
+ * mucking with.
+ */
+ for (i = 0; i < jc->j_ncurs; i++) {
+ if (jc->j_workcurs[i] != NULL && (t_ret =
+ jc->j_workcurs[i]->c_close(jc->j_workcurs[i])) != 0)
+ ret = t_ret;
+ if (jc->j_fdupcurs[i] != NULL && (t_ret =
+ jc->j_fdupcurs[i]->c_close(jc->j_fdupcurs[i])) != 0)
+ ret = t_ret;
+ }
+
+ __os_free(jc->j_exhausted, 0);
+ __os_free(jc->j_curslist, 0);
+ __os_free(jc->j_workcurs, 0);
+ __os_free(jc->j_fdupcurs, 0);
+ __os_free(jc->j_key.data, jc->j_key.ulen);
+ __os_free(jc, sizeof(JOIN_CURSOR));
+ __os_free(dbc, sizeof(DBC));
+
+ return (ret);
+}
+
+/*
+ * __db_join_getnext --
+ * This function replaces the DBC_CONTINUE and DBC_KEYSET
+ * functionality inside the various cursor get routines.
+ *
+ * If exhausted == 0, we're not done with the current datum;
+ * return it if it matches "matching", otherwise search
+ * using DB_GET_BOTHC (which is faster than iteratively doing
+ * DB_NEXT_DUP) forward until we find one that does.
+ *
+ * If exhausted == 1, we are done with the current datum, so just
+ * leap forward to searching NEXT_DUPs.
+ *
+ * If no matching datum exists, returns DB_NOTFOUND, else 0.
+ */
+static int
+__db_join_getnext(dbc, key, data, exhausted)
+ DBC *dbc;
+ DBT *key, *data;
+ u_int32_t exhausted;
+{
+ int ret, cmp;
+ DB *dbp;
+ DBT ldata;
+ int (*func) __P((DB *, const DBT *, const DBT *));
+
+ dbp = dbc->dbp;
+ func = (dbp->dup_compare == NULL) ? __bam_defcmp : dbp->dup_compare;
+
+ switch (exhausted) {
+ case 0:
+ memset(&ldata, 0, sizeof(DBT));
+ /* We don't want to step on data->data; malloc. */
+ F_SET(&ldata, DB_DBT_MALLOC);
+ if ((ret = dbc->c_get(dbc, key, &ldata, DB_CURRENT)) != 0)
+ break;
+ cmp = func(dbp, data, &ldata);
+ if (cmp == 0) {
+ /*
+ * We have to return the real data value. Copy
+ * it into data, then free the buffer we malloc'ed
+ * above.
+ */
+ if ((ret = __db_retcopy(dbp, data, ldata.data,
+ ldata.size, &data->data, &data->size)) != 0)
+ return (ret);
+ __os_free(ldata.data, 0);
+ return (0);
+ }
+
+ /*
+ * Didn't match--we want to fall through and search future
+ * dups. We just forget about ldata and free
+ * its buffer--data contains the value we're searching for.
+ */
+ __os_free(ldata.data, 0);
+ /* FALLTHROUGH */
+ case 1:
+ ret = dbc->c_get(dbc, key, data, DB_GET_BOTHC);
+ break;
+ default:
+ ret = EINVAL;
+ break;
+ }
+
+ return (ret);
+}
+
+/*
+ * __db_join_cmp --
+ * Comparison function for sorting DBCs in cardinality order.
+ */
+
+static int
+__db_join_cmp(a, b)
+ const void *a, *b;
+{
+ DBC *dbca, *dbcb;
+ db_recno_t counta, countb;
+
+ /* In case c_count fails, pretend cursors are equal. */
+ counta = countb = 0;
+
+ dbca = *((DBC * const *)a);
+ dbcb = *((DBC * const *)b);
+
+ if (dbca->c_count(dbca, &counta, 0) != 0 ||
+ dbcb->c_count(dbcb, &countb, 0) != 0)
+ return (0);
+
+ return (counta - countb);
+}
diff --git a/bdb/db/db_meta.c b/bdb/db/db_meta.c
new file mode 100644
index 00000000000..5b57c369454
--- /dev/null
+++ b/bdb/db/db_meta.c
@@ -0,0 +1,309 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995, 1996
+ * Keith Bostic. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Mike Olson.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_meta.c,v 11.26 2001/01/16 21:57:19 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_shash.h"
+#include "lock.h"
+#include "txn.h"
+#include "db_am.h"
+#include "btree.h"
+
+/*
+ * __db_new --
+ * Get a new page, preferably from the freelist.
+ *
+ * PUBLIC: int __db_new __P((DBC *, u_int32_t, PAGE **));
+ */
+int
+__db_new(dbc, type, pagepp)
+ DBC *dbc;
+ u_int32_t type;
+ PAGE **pagepp;
+{
+ DBMETA *meta;
+ DB *dbp;
+ DB_LOCK metalock;
+ PAGE *h;
+ db_pgno_t pgno;
+ int ret;
+
+ dbp = dbc->dbp;
+ meta = NULL;
+ h = NULL;
+
+ pgno = PGNO_BASE_MD;
+ if ((ret = __db_lget(dbc,
+ LCK_ALWAYS, pgno, DB_LOCK_WRITE, 0, &metalock)) != 0)
+ goto err;
+ if ((ret = memp_fget(dbp->mpf, &pgno, 0, (PAGE **)&meta)) != 0)
+ goto err;
+
+ if (meta->free == PGNO_INVALID) {
+ if ((ret = memp_fget(dbp->mpf, &pgno, DB_MPOOL_NEW, &h)) != 0)
+ goto err;
+ ZERO_LSN(h->lsn);
+ h->pgno = pgno;
+ } else {
+ pgno = meta->free;
+ if ((ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0)
+ goto err;
+ meta->free = h->next_pgno;
+ (void)memp_fset(dbp->mpf, (PAGE *)meta, DB_MPOOL_DIRTY);
+ }
+
+ DB_ASSERT(TYPE(h) == P_INVALID);
+
+ if (TYPE(h) != P_INVALID)
+ return (__db_panic(dbp->dbenv, EINVAL));
+
+ /* Log the change. */
+ if (DB_LOGGING(dbc)) {
+ if ((ret = __db_pg_alloc_log(dbp->dbenv,
+ dbc->txn, &LSN(meta), 0, dbp->log_fileid,
+ &LSN(meta), &h->lsn, h->pgno,
+ (u_int32_t)type, meta->free)) != 0)
+ goto err;
+ LSN(h) = LSN(meta);
+ }
+
+ (void)memp_fput(dbp->mpf, (PAGE *)meta, DB_MPOOL_DIRTY);
+ (void)__TLPUT(dbc, metalock);
+
+ P_INIT(h, dbp->pgsize, h->pgno, PGNO_INVALID, PGNO_INVALID, 0, type);
+ *pagepp = h;
+ return (0);
+
+err: if (h != NULL)
+ (void)memp_fput(dbp->mpf, h, 0);
+ if (meta != NULL)
+ (void)memp_fput(dbp->mpf, meta, 0);
+ (void)__TLPUT(dbc, metalock);
+ return (ret);
+}
+
+/*
+ * __db_free --
+ * Add a page to the head of the freelist.
+ *
+ * PUBLIC: int __db_free __P((DBC *, PAGE *));
+ */
+int
+__db_free(dbc, h)
+ DBC *dbc;
+ PAGE *h;
+{
+ DBMETA *meta;
+ DB *dbp;
+ DBT ldbt;
+ DB_LOCK metalock;
+ db_pgno_t pgno;
+ u_int32_t dirty_flag;
+ int ret, t_ret;
+
+ dbp = dbc->dbp;
+
+ /*
+ * Retrieve the metadata page and insert the page at the head of
+ * the free list. If either the lock get or page get routines
+ * fail, then we need to put the page with which we were called
+ * back because our caller assumes we take care of it.
+ */
+ dirty_flag = 0;
+ pgno = PGNO_BASE_MD;
+ if ((ret = __db_lget(dbc,
+ LCK_ALWAYS, pgno, DB_LOCK_WRITE, 0, &metalock)) != 0)
+ goto err;
+ if ((ret = memp_fget(dbp->mpf, &pgno, 0, (PAGE **)&meta)) != 0) {
+ (void)__TLPUT(dbc, metalock);
+ goto err;
+ }
+
+ DB_ASSERT(h->pgno != meta->free);
+ /* Log the change. */
+ if (DB_LOGGING(dbc)) {
+ memset(&ldbt, 0, sizeof(ldbt));
+ ldbt.data = h;
+ ldbt.size = P_OVERHEAD;
+ if ((ret = __db_pg_free_log(dbp->dbenv,
+ dbc->txn, &LSN(meta), 0, dbp->log_fileid, h->pgno,
+ &LSN(meta), &ldbt, meta->free)) != 0) {
+ (void)memp_fput(dbp->mpf, (PAGE *)meta, 0);
+ (void)__TLPUT(dbc, metalock);
+ return (ret);
+ }
+ LSN(h) = LSN(meta);
+ }
+
+ P_INIT(h, dbp->pgsize, h->pgno, PGNO_INVALID, meta->free, 0, P_INVALID);
+
+ meta->free = h->pgno;
+
+ /* Discard the metadata page. */
+ if ((t_ret = memp_fput(dbp->mpf,
+ (PAGE *)meta, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+ if ((t_ret = __TLPUT(dbc, metalock)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Discard the caller's page reference. */
+ dirty_flag = DB_MPOOL_DIRTY;
+err: if ((t_ret = memp_fput(dbp->mpf, h, dirty_flag)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /*
+ * XXX
+ * We have to unlock the caller's page in the caller!
+ */
+ return (ret);
+}
+
+#ifdef DEBUG
+/*
+ * __db_lprint --
+ * Print out the list of locks currently held by a cursor.
+ *
+ * PUBLIC: int __db_lprint __P((DBC *));
+ */
+int
+__db_lprint(dbc)
+ DBC *dbc;
+{
+ DB *dbp;
+ DB_LOCKREQ req;
+
+ dbp = dbc->dbp;
+
+ if (LOCKING_ON(dbp->dbenv)) {
+ req.op = DB_LOCK_DUMP;
+ lock_vec(dbp->dbenv, dbc->locker, 0, &req, 1, NULL);
+ }
+ return (0);
+}
+#endif
+
+/*
+ * __db_lget --
+ * The standard lock get call.
+ *
+ * PUBLIC: int __db_lget __P((DBC *,
+ * PUBLIC: int, db_pgno_t, db_lockmode_t, int, DB_LOCK *));
+ */
+int
+__db_lget(dbc, flags, pgno, mode, lkflags, lockp)
+ DBC *dbc;
+ int flags, lkflags;
+ db_pgno_t pgno;
+ db_lockmode_t mode;
+ DB_LOCK *lockp;
+{
+ DB *dbp;
+ DB_ENV *dbenv;
+ DB_LOCKREQ couple[2], *reqp;
+ int ret;
+
+ dbp = dbc->dbp;
+ dbenv = dbp->dbenv;
+
+ /*
+ * We do not always check if we're configured for locking before
+ * calling __db_lget to acquire the lock.
+ */
+ if (CDB_LOCKING(dbenv)
+ || !LOCKING_ON(dbenv) || F_ISSET(dbc, DBC_COMPENSATE)
+ || (!LF_ISSET(LCK_ROLLBACK) && F_ISSET(dbc, DBC_RECOVER))
+ || (!LF_ISSET(LCK_ALWAYS) && F_ISSET(dbc, DBC_OPD))) {
+ lockp->off = LOCK_INVALID;
+ return (0);
+ }
+
+ dbc->lock.pgno = pgno;
+ if (lkflags & DB_LOCK_RECORD)
+ dbc->lock.type = DB_RECORD_LOCK;
+ else
+ dbc->lock.type = DB_PAGE_LOCK;
+ lkflags &= ~DB_LOCK_RECORD;
+
+ /*
+ * If the transaction enclosing this cursor has DB_LOCK_NOWAIT set,
+ * pass that along to the lock call.
+ */
+ if (DB_NONBLOCK(dbc))
+ lkflags |= DB_LOCK_NOWAIT;
+
+ /*
+ * If the object not currently locked, acquire the lock and return,
+ * otherwise, lock couple.
+ */
+ if (LF_ISSET(LCK_COUPLE)) {
+ couple[0].op = DB_LOCK_GET;
+ couple[0].obj = &dbc->lock_dbt;
+ couple[0].mode = mode;
+ couple[1].op = DB_LOCK_PUT;
+ couple[1].lock = *lockp;
+
+ ret = lock_vec(dbenv,
+ dbc->locker, lkflags, couple, 2, &reqp);
+ if (ret == 0 || reqp == &couple[1])
+ *lockp = couple[0].lock;
+ } else {
+ ret = lock_get(dbenv,
+ dbc->locker, lkflags, &dbc->lock_dbt, mode, lockp);
+
+ if (ret != 0)
+ lockp->off = LOCK_INVALID;
+ }
+
+ return (ret);
+}
diff --git a/bdb/db/db_method.c b/bdb/db/db_method.c
new file mode 100644
index 00000000000..01568a6e144
--- /dev/null
+++ b/bdb/db/db_method.c
@@ -0,0 +1,629 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_method.c,v 11.36 2000/12/21 09:17:04 krinsky Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#ifdef HAVE_RPC
+#include <rpc/rpc.h>
+#endif
+
+#include <string.h>
+#endif
+
+#ifdef HAVE_RPC
+#include "db_server.h"
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_am.h"
+#include "btree.h"
+#include "hash.h"
+#include "qam.h"
+#include "xa.h"
+#include "xa_ext.h"
+
+#ifdef HAVE_RPC
+#include "gen_client_ext.h"
+#include "rpc_client_ext.h"
+#endif
+
+static int __db_get_byteswapped __P((DB *));
+static DBTYPE
+ __db_get_type __P((DB *));
+static int __db_init __P((DB *, u_int32_t));
+static int __db_key_range
+ __P((DB *, DB_TXN *, DBT *, DB_KEY_RANGE *, u_int32_t));
+static int __db_set_append_recno __P((DB *, int (*)(DB *, DBT *, db_recno_t)));
+static int __db_set_cachesize __P((DB *, u_int32_t, u_int32_t, int));
+static int __db_set_dup_compare
+ __P((DB *, int (*)(DB *, const DBT *, const DBT *)));
+static void __db_set_errcall __P((DB *, void (*)(const char *, char *)));
+static void __db_set_errfile __P((DB *, FILE *));
+static int __db_set_feedback __P((DB *, void (*)(DB *, int, int)));
+static int __db_set_flags __P((DB *, u_int32_t));
+static int __db_set_lorder __P((DB *, int));
+static int __db_set_malloc __P((DB *, void *(*)(size_t)));
+static int __db_set_pagesize __P((DB *, u_int32_t));
+static int __db_set_realloc __P((DB *, void *(*)(void *, size_t)));
+static void __db_set_errpfx __P((DB *, const char *));
+static int __db_set_paniccall __P((DB *, void (*)(DB_ENV *, int)));
+static void __dbh_err __P((DB *, int, const char *, ...));
+static void __dbh_errx __P((DB *, const char *, ...));
+
+/*
+ * db_create --
+ * DB constructor.
+ */
+int
+db_create(dbpp, dbenv, flags)
+ DB **dbpp;
+ DB_ENV *dbenv;
+ u_int32_t flags;
+{
+ DB *dbp;
+ int ret;
+
+ /* Check for invalid function flags. */
+ switch (flags) {
+ case 0:
+ break;
+ case DB_XA_CREATE:
+ if (dbenv != NULL) {
+ __db_err(dbenv,
+ "XA applications may not specify an environment to db_create");
+ return (EINVAL);
+ }
+
+ /*
+ * If it's an XA database, open it within the XA environment,
+ * taken from the global list of environments. (When the XA
+ * transaction manager called our xa_start() routine the
+ * "current" environment was moved to the start of the list.
+ */
+ dbenv = TAILQ_FIRST(&DB_GLOBAL(db_envq));
+ break;
+ default:
+ return (__db_ferr(dbenv, "db_create", 0));
+ }
+
+ /* Allocate the DB. */
+ if ((ret = __os_calloc(dbenv, 1, sizeof(*dbp), &dbp)) != 0)
+ return (ret);
+#ifdef HAVE_RPC
+ if (dbenv != NULL && dbenv->cl_handle != NULL)
+ ret = __dbcl_init(dbp, dbenv, flags);
+ else
+#endif
+ ret = __db_init(dbp, flags);
+ if (ret != 0) {
+ __os_free(dbp, sizeof(*dbp));
+ return (ret);
+ }
+
+ /* If we don't have an environment yet, allocate a local one. */
+ if (dbenv == NULL) {
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ __os_free(dbp, sizeof(*dbp));
+ return (ret);
+ }
+ dbenv->dblocal_ref = 0;
+ F_SET(dbenv, DB_ENV_DBLOCAL);
+ }
+ if (F_ISSET(dbenv, DB_ENV_DBLOCAL))
+ ++dbenv->dblocal_ref;
+
+ dbp->dbenv = dbenv;
+
+ *dbpp = dbp;
+ return (0);
+}
+
+/*
+ * __db_init --
+ * Initialize a DB structure.
+ */
+static int
+__db_init(dbp, flags)
+ DB *dbp;
+ u_int32_t flags;
+{
+ int ret;
+
+ dbp->log_fileid = DB_LOGFILEID_INVALID;
+
+ TAILQ_INIT(&dbp->free_queue);
+ TAILQ_INIT(&dbp->active_queue);
+ TAILQ_INIT(&dbp->join_queue);
+
+ FLD_SET(dbp->am_ok,
+ DB_OK_BTREE | DB_OK_HASH | DB_OK_QUEUE | DB_OK_RECNO);
+
+ dbp->close = __db_close;
+ dbp->cursor = __db_cursor;
+ dbp->del = NULL; /* !!! Must be set by access method. */
+ dbp->err = __dbh_err;
+ dbp->errx = __dbh_errx;
+ dbp->fd = __db_fd;
+ dbp->get = __db_get;
+ dbp->get_byteswapped = __db_get_byteswapped;
+ dbp->get_type = __db_get_type;
+ dbp->join = __db_join;
+ dbp->key_range = __db_key_range;
+ dbp->open = __db_open;
+ dbp->put = __db_put;
+ dbp->remove = __db_remove;
+ dbp->rename = __db_rename;
+ dbp->set_append_recno = __db_set_append_recno;
+ dbp->set_cachesize = __db_set_cachesize;
+ dbp->set_dup_compare = __db_set_dup_compare;
+ dbp->set_errcall = __db_set_errcall;
+ dbp->set_errfile = __db_set_errfile;
+ dbp->set_errpfx = __db_set_errpfx;
+ dbp->set_feedback = __db_set_feedback;
+ dbp->set_flags = __db_set_flags;
+ dbp->set_lorder = __db_set_lorder;
+ dbp->set_malloc = __db_set_malloc;
+ dbp->set_pagesize = __db_set_pagesize;
+ dbp->set_paniccall = __db_set_paniccall;
+ dbp->set_realloc = __db_set_realloc;
+ dbp->stat = NULL; /* !!! Must be set by access method. */
+ dbp->sync = __db_sync;
+ dbp->upgrade = __db_upgrade;
+ dbp->verify = __db_verify;
+ /* Access method specific. */
+ if ((ret = __bam_db_create(dbp)) != 0)
+ return (ret);
+ if ((ret = __ham_db_create(dbp)) != 0)
+ return (ret);
+ if ((ret = __qam_db_create(dbp)) != 0)
+ return (ret);
+
+ /*
+ * XA specific: must be last, as we replace methods set by the
+ * access methods.
+ */
+ if (LF_ISSET(DB_XA_CREATE) && (ret = __db_xa_create(dbp)) != 0)
+ return (ret);
+
+ return (0);
+}
+
+/*
+ * __dbh_am_chk --
+ * Error if an unreasonable method is called.
+ *
+ * PUBLIC: int __dbh_am_chk __P((DB *, u_int32_t));
+ */
+int
+__dbh_am_chk(dbp, flags)
+ DB *dbp;
+ u_int32_t flags;
+{
+ /*
+ * We start out allowing any access methods to be called, and as the
+ * application calls the methods the options become restricted. The
+ * idea is to quit as soon as an illegal method combination is called.
+ */
+ if ((LF_ISSET(DB_OK_BTREE) && FLD_ISSET(dbp->am_ok, DB_OK_BTREE)) ||
+ (LF_ISSET(DB_OK_HASH) && FLD_ISSET(dbp->am_ok, DB_OK_HASH)) ||
+ (LF_ISSET(DB_OK_QUEUE) && FLD_ISSET(dbp->am_ok, DB_OK_QUEUE)) ||
+ (LF_ISSET(DB_OK_RECNO) && FLD_ISSET(dbp->am_ok, DB_OK_RECNO))) {
+ FLD_CLR(dbp->am_ok, ~flags);
+ return (0);
+ }
+
+ __db_err(dbp->dbenv,
+ "call implies an access method which is inconsistent with previous calls");
+ return (EINVAL);
+}
+
+/*
+ * __dbh_err --
+ * Error message, including the standard error string.
+ */
+static void
+#ifdef __STDC__
+__dbh_err(DB *dbp, int error, const char *fmt, ...)
+#else
+__dbh_err(dbp, error, fmt, va_alist)
+ DB *dbp;
+ int error;
+ const char *fmt;
+ va_dcl
+#endif
+{
+ va_list ap;
+
+#ifdef __STDC__
+ va_start(ap, fmt);
+#else
+ va_start(ap);
+#endif
+ __db_real_err(dbp->dbenv, error, 1, 1, fmt, ap);
+
+ va_end(ap);
+}
+
+/*
+ * __dbh_errx --
+ * Error message.
+ */
+static void
+#ifdef __STDC__
+__dbh_errx(DB *dbp, const char *fmt, ...)
+#else
+__dbh_errx(dbp, fmt, va_alist)
+ DB *dbp;
+ const char *fmt;
+ va_dcl
+#endif
+{
+ va_list ap;
+
+#ifdef __STDC__
+ va_start(ap, fmt);
+#else
+ va_start(ap);
+#endif
+ __db_real_err(dbp->dbenv, 0, 0, 1, fmt, ap);
+
+ va_end(ap);
+}
+
+/*
+ * __db_get_byteswapped --
+ * Return if database requires byte swapping.
+ */
+static int
+__db_get_byteswapped(dbp)
+ DB *dbp;
+{
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "get_byteswapped");
+
+ return (F_ISSET(dbp, DB_AM_SWAP) ? 1 : 0);
+}
+
+/*
+ * __db_get_type --
+ * Return type of underlying database.
+ */
+static DBTYPE
+__db_get_type(dbp)
+ DB *dbp;
+{
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "get_type");
+
+ return (dbp->type);
+}
+
+/*
+ * __db_key_range --
+ * Return proportion of keys above and below given key.
+ */
+static int
+__db_key_range(dbp, txn, key, kr, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ DBT *key;
+ DB_KEY_RANGE *kr;
+ u_int32_t flags;
+{
+ COMPQUIET(txn, NULL);
+ COMPQUIET(key, NULL);
+ COMPQUIET(kr, NULL);
+ COMPQUIET(flags, 0);
+
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "key_range");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE);
+
+ return (EINVAL);
+}
+
+/*
+ * __db_set_append_recno --
+ * Set record number append routine.
+ */
+static int
+__db_set_append_recno(dbp, func)
+ DB *dbp;
+ int (*func) __P((DB *, DBT *, db_recno_t));
+{
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_append_recno");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_QUEUE | DB_OK_RECNO);
+
+ dbp->db_append_recno = func;
+
+ return (0);
+}
+
+/*
+ * __db_set_cachesize --
+ * Set underlying cache size.
+ */
+static int
+__db_set_cachesize(dbp, cache_gbytes, cache_bytes, ncache)
+ DB *dbp;
+ u_int32_t cache_gbytes, cache_bytes;
+ int ncache;
+{
+ DB_ILLEGAL_IN_ENV(dbp, "set_cachesize");
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_cachesize");
+
+ return (dbp->dbenv->set_cachesize(
+ dbp->dbenv, cache_gbytes, cache_bytes, ncache));
+}
+
+/*
+ * __db_set_dup_compare --
+ * Set duplicate comparison routine.
+ */
+static int
+__db_set_dup_compare(dbp, func)
+ DB *dbp;
+ int (*func) __P((DB *, const DBT *, const DBT *));
+{
+ DB_ILLEGAL_AFTER_OPEN(dbp, "dup_compare");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE | DB_OK_HASH);
+
+ dbp->dup_compare = func;
+
+ return (0);
+}
+
+static void
+__db_set_errcall(dbp, errcall)
+ DB *dbp;
+ void (*errcall) __P((const char *, char *));
+{
+ dbp->dbenv->set_errcall(dbp->dbenv, errcall);
+}
+
+static void
+__db_set_errfile(dbp, errfile)
+ DB *dbp;
+ FILE *errfile;
+{
+ dbp->dbenv->set_errfile(dbp->dbenv, errfile);
+}
+
+static void
+__db_set_errpfx(dbp, errpfx)
+ DB *dbp;
+ const char *errpfx;
+{
+ dbp->dbenv->set_errpfx(dbp->dbenv, errpfx);
+}
+
+static int
+__db_set_feedback(dbp, feedback)
+ DB *dbp;
+ void (*feedback) __P((DB *, int, int));
+{
+ dbp->db_feedback = feedback;
+ return (0);
+}
+
+static int
+__db_set_flags(dbp, flags)
+ DB *dbp;
+ u_int32_t flags;
+{
+ int ret;
+
+ /*
+ * !!!
+ * The hash access method only takes two flags: DB_DUP and DB_DUPSORT.
+ * The Btree access method uses them for the same purposes, and so we
+ * resolve them there.
+ *
+ * The queue access method takes no flags.
+ */
+ if ((ret = __bam_set_flags(dbp, &flags)) != 0)
+ return (ret);
+ if ((ret = __ram_set_flags(dbp, &flags)) != 0)
+ return (ret);
+
+ return (flags == 0 ? 0 : __db_ferr(dbp->dbenv, "DB->set_flags", 0));
+}
+
+static int
+__db_set_lorder(dbp, db_lorder)
+ DB *dbp;
+ int db_lorder;
+{
+ int ret;
+
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_lorder");
+
+ /* Flag if the specified byte order requires swapping. */
+ switch (ret = __db_byteorder(dbp->dbenv, db_lorder)) {
+ case 0:
+ F_CLR(dbp, DB_AM_SWAP);
+ break;
+ case DB_SWAPBYTES:
+ F_SET(dbp, DB_AM_SWAP);
+ break;
+ default:
+ return (ret);
+ /* NOTREACHED */
+ }
+ return (0);
+}
+
+static int
+__db_set_malloc(dbp, func)
+ DB *dbp;
+ void *(*func) __P((size_t));
+{
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_malloc");
+
+ dbp->db_malloc = func;
+ return (0);
+}
+
+static int
+__db_set_pagesize(dbp, db_pagesize)
+ DB *dbp;
+ u_int32_t db_pagesize;
+{
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_pagesize");
+
+ if (db_pagesize < DB_MIN_PGSIZE) {
+ __db_err(dbp->dbenv, "page sizes may not be smaller than %lu",
+ (u_long)DB_MIN_PGSIZE);
+ return (EINVAL);
+ }
+ if (db_pagesize > DB_MAX_PGSIZE) {
+ __db_err(dbp->dbenv, "page sizes may not be larger than %lu",
+ (u_long)DB_MAX_PGSIZE);
+ return (EINVAL);
+ }
+
+ /*
+ * We don't want anything that's not a power-of-2, as we rely on that
+ * for alignment of various types on the pages.
+ */
+ if ((u_int32_t)1 << __db_log2(db_pagesize) != db_pagesize) {
+ __db_err(dbp->dbenv, "page sizes must be a power-of-2");
+ return (EINVAL);
+ }
+
+ /*
+ * XXX
+ * Should we be checking for a page size that's not a multiple of 512,
+ * so that we never try and write less than a disk sector?
+ */
+ dbp->pgsize = db_pagesize;
+
+ return (0);
+}
+
+static int
+__db_set_realloc(dbp, func)
+ DB *dbp;
+ void *(*func) __P((void *, size_t));
+{
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_realloc");
+
+ dbp->db_realloc = func;
+ return (0);
+}
+
+static int
+__db_set_paniccall(dbp, paniccall)
+ DB *dbp;
+ void (*paniccall) __P((DB_ENV *, int));
+{
+ return (dbp->dbenv->set_paniccall(dbp->dbenv, paniccall));
+}
+
+#ifdef HAVE_RPC
+/*
+ * __dbcl_init --
+ * Initialize a DB structure on the server.
+ *
+ * PUBLIC: #ifdef HAVE_RPC
+ * PUBLIC: int __dbcl_init __P((DB *, DB_ENV *, u_int32_t));
+ * PUBLIC: #endif
+ */
+int
+__dbcl_init(dbp, dbenv, flags)
+ DB *dbp;
+ DB_ENV *dbenv;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __db_create_reply *replyp;
+ __db_create_msg req;
+ int ret;
+
+ TAILQ_INIT(&dbp->free_queue);
+ TAILQ_INIT(&dbp->active_queue);
+ /* !!!
+ * Note that we don't need to initialize the join_queue; it's
+ * not used in RPC clients. See the comment in __dbcl_db_join_ret().
+ */
+
+ dbp->close = __dbcl_db_close;
+ dbp->cursor = __dbcl_db_cursor;
+ dbp->del = __dbcl_db_del;
+ dbp->err = __dbh_err;
+ dbp->errx = __dbh_errx;
+ dbp->fd = __dbcl_db_fd;
+ dbp->get = __dbcl_db_get;
+ dbp->get_byteswapped = __dbcl_db_swapped;
+ dbp->get_type = __db_get_type;
+ dbp->join = __dbcl_db_join;
+ dbp->key_range = __dbcl_db_key_range;
+ dbp->open = __dbcl_db_open;
+ dbp->put = __dbcl_db_put;
+ dbp->remove = __dbcl_db_remove;
+ dbp->rename = __dbcl_db_rename;
+ dbp->set_append_recno = __dbcl_db_set_append_recno;
+ dbp->set_cachesize = __dbcl_db_cachesize;
+ dbp->set_dup_compare = NULL;
+ dbp->set_errcall = __db_set_errcall;
+ dbp->set_errfile = __db_set_errfile;
+ dbp->set_errpfx = __db_set_errpfx;
+ dbp->set_feedback = __dbcl_db_feedback;
+ dbp->set_flags = __dbcl_db_flags;
+ dbp->set_lorder = __dbcl_db_lorder;
+ dbp->set_malloc = __dbcl_db_malloc;
+ dbp->set_pagesize = __dbcl_db_pagesize;
+ dbp->set_paniccall = __dbcl_db_panic;
+ dbp->set_q_extentsize = __dbcl_db_extentsize;
+ dbp->set_realloc = __dbcl_db_realloc;
+ dbp->stat = __dbcl_db_stat;
+ dbp->sync = __dbcl_db_sync;
+ dbp->upgrade = __dbcl_db_upgrade;
+
+ /*
+ * Set all the method specific functions to client funcs as well.
+ */
+ dbp->set_bt_compare = __dbcl_db_bt_compare;
+ dbp->set_bt_maxkey = __dbcl_db_bt_maxkey;
+ dbp->set_bt_minkey = __dbcl_db_bt_minkey;
+ dbp->set_bt_prefix = __dbcl_db_bt_prefix;
+ dbp->set_h_ffactor = __dbcl_db_h_ffactor;
+ dbp->set_h_hash = __dbcl_db_h_hash;
+ dbp->set_h_nelem = __dbcl_db_h_nelem;
+ dbp->set_re_delim = __dbcl_db_re_delim;
+ dbp->set_re_len = __dbcl_db_re_len;
+ dbp->set_re_pad = __dbcl_db_re_pad;
+ dbp->set_re_source = __dbcl_db_re_source;
+/*
+ dbp->set_q_extentsize = __dbcl_db_q_extentsize;
+*/
+
+ cl = (CLIENT *)dbenv->cl_handle;
+ req.flags = flags;
+ req.envpcl_id = dbenv->cl_id;
+
+ /*
+ * CALL THE SERVER
+ */
+ replyp = __db_db_create_1(&req, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ return (DB_NOSERVER);
+ }
+
+ if ((ret = replyp->status) != 0)
+ return (ret);
+
+ dbp->cl_id = replyp->dbpcl_id;
+ return (0);
+}
+#endif
diff --git a/bdb/db/db_overflow.c b/bdb/db/db_overflow.c
new file mode 100644
index 00000000000..54f0a03aafe
--- /dev/null
+++ b/bdb/db/db_overflow.c
@@ -0,0 +1,681 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995, 1996
+ * Keith Bostic. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Mike Olson.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_overflow.c,v 11.21 2000/11/30 00:58:32 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_am.h"
+#include "db_verify.h"
+
+/*
+ * Big key/data code.
+ *
+ * Big key and data entries are stored on linked lists of pages. The initial
+ * reference is a structure with the total length of the item and the page
+ * number where it begins. Each entry in the linked list contains a pointer
+ * to the next page of data, and so on.
+ */
+
+/*
+ * __db_goff --
+ * Get an offpage item.
+ *
+ * PUBLIC: int __db_goff __P((DB *, DBT *,
+ * PUBLIC: u_int32_t, db_pgno_t, void **, u_int32_t *));
+ */
+int
+__db_goff(dbp, dbt, tlen, pgno, bpp, bpsz)
+ DB *dbp;
+ DBT *dbt;
+ u_int32_t tlen;
+ db_pgno_t pgno;
+ void **bpp;
+ u_int32_t *bpsz;
+{
+ DB_ENV *dbenv;
+ PAGE *h;
+ db_indx_t bytes;
+ u_int32_t curoff, needed, start;
+ u_int8_t *p, *src;
+ int ret;
+
+ dbenv = dbp->dbenv;
+
+ /*
+ * Check if the buffer is big enough; if it is not and we are
+ * allowed to malloc space, then we'll malloc it. If we are
+ * not (DB_DBT_USERMEM), then we'll set the dbt and return
+ * appropriately.
+ */
+ if (F_ISSET(dbt, DB_DBT_PARTIAL)) {
+ start = dbt->doff;
+ needed = dbt->dlen;
+ } else {
+ start = 0;
+ needed = tlen;
+ }
+
+ /* Allocate any necessary memory. */
+ if (F_ISSET(dbt, DB_DBT_USERMEM)) {
+ if (needed > dbt->ulen) {
+ dbt->size = needed;
+ return (ENOMEM);
+ }
+ } else if (F_ISSET(dbt, DB_DBT_MALLOC)) {
+ if ((ret = __os_malloc(dbenv,
+ needed, dbp->db_malloc, &dbt->data)) != 0)
+ return (ret);
+ } else if (F_ISSET(dbt, DB_DBT_REALLOC)) {
+ if ((ret = __os_realloc(dbenv,
+ needed, dbp->db_realloc, &dbt->data)) != 0)
+ return (ret);
+ } else if (*bpsz == 0 || *bpsz < needed) {
+ if ((ret = __os_realloc(dbenv, needed, NULL, bpp)) != 0)
+ return (ret);
+ *bpsz = needed;
+ dbt->data = *bpp;
+ } else
+ dbt->data = *bpp;
+
+ /*
+ * Step through the linked list of pages, copying the data on each
+ * one into the buffer. Never copy more than the total data length.
+ */
+ dbt->size = needed;
+ for (curoff = 0, p = dbt->data; pgno != PGNO_INVALID && needed > 0;) {
+ if ((ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0) {
+ (void)__db_pgerr(dbp, pgno);
+ return (ret);
+ }
+ /* Check if we need any bytes from this page. */
+ if (curoff + OV_LEN(h) >= start) {
+ src = (u_int8_t *)h + P_OVERHEAD;
+ bytes = OV_LEN(h);
+ if (start > curoff) {
+ src += start - curoff;
+ bytes -= start - curoff;
+ }
+ if (bytes > needed)
+ bytes = needed;
+ memcpy(p, src, bytes);
+ p += bytes;
+ needed -= bytes;
+ }
+ curoff += OV_LEN(h);
+ pgno = h->next_pgno;
+ memp_fput(dbp->mpf, h, 0);
+ }
+ return (0);
+}
+
+/*
+ * __db_poff --
+ * Put an offpage item.
+ *
+ * PUBLIC: int __db_poff __P((DBC *, const DBT *, db_pgno_t *));
+ */
+int
+__db_poff(dbc, dbt, pgnop)
+ DBC *dbc;
+ const DBT *dbt;
+ db_pgno_t *pgnop;
+{
+ DB *dbp;
+ PAGE *pagep, *lastp;
+ DB_LSN new_lsn, null_lsn;
+ DBT tmp_dbt;
+ db_indx_t pagespace;
+ u_int32_t sz;
+ u_int8_t *p;
+ int ret;
+
+ /*
+ * Allocate pages and copy the key/data item into them. Calculate the
+ * number of bytes we get for pages we fill completely with a single
+ * item.
+ */
+ dbp = dbc->dbp;
+ pagespace = P_MAXSPACE(dbp->pgsize);
+
+ lastp = NULL;
+ for (p = dbt->data,
+ sz = dbt->size; sz > 0; p += pagespace, sz -= pagespace) {
+ /*
+ * Reduce pagespace so we terminate the loop correctly and
+ * don't copy too much data.
+ */
+ if (sz < pagespace)
+ pagespace = sz;
+
+ /*
+ * Allocate and initialize a new page and copy all or part of
+ * the item onto the page. If sz is less than pagespace, we
+ * have a partial record.
+ */
+ if ((ret = __db_new(dbc, P_OVERFLOW, &pagep)) != 0)
+ return (ret);
+ if (DB_LOGGING(dbc)) {
+ tmp_dbt.data = p;
+ tmp_dbt.size = pagespace;
+ ZERO_LSN(null_lsn);
+ if ((ret = __db_big_log(dbp->dbenv, dbc->txn,
+ &new_lsn, 0, DB_ADD_BIG, dbp->log_fileid,
+ PGNO(pagep), lastp ? PGNO(lastp) : PGNO_INVALID,
+ PGNO_INVALID, &tmp_dbt, &LSN(pagep),
+ lastp == NULL ? &null_lsn : &LSN(lastp),
+ &null_lsn)) != 0)
+ return (ret);
+
+ /* Move lsn onto page. */
+ if (lastp)
+ LSN(lastp) = new_lsn;
+ LSN(pagep) = new_lsn;
+ }
+
+ P_INIT(pagep, dbp->pgsize,
+ PGNO(pagep), PGNO_INVALID, PGNO_INVALID, 0, P_OVERFLOW);
+ OV_LEN(pagep) = pagespace;
+ OV_REF(pagep) = 1;
+ memcpy((u_int8_t *)pagep + P_OVERHEAD, p, pagespace);
+
+ /*
+ * If this is the first entry, update the user's info.
+ * Otherwise, update the entry on the last page filled
+ * in and release that page.
+ */
+ if (lastp == NULL)
+ *pgnop = PGNO(pagep);
+ else {
+ lastp->next_pgno = PGNO(pagep);
+ pagep->prev_pgno = PGNO(lastp);
+ (void)memp_fput(dbp->mpf, lastp, DB_MPOOL_DIRTY);
+ }
+ lastp = pagep;
+ }
+ (void)memp_fput(dbp->mpf, lastp, DB_MPOOL_DIRTY);
+ return (0);
+}
+
+/*
+ * __db_ovref --
+ * Increment/decrement the reference count on an overflow page.
+ *
+ * PUBLIC: int __db_ovref __P((DBC *, db_pgno_t, int32_t));
+ */
+int
+__db_ovref(dbc, pgno, adjust)
+ DBC *dbc;
+ db_pgno_t pgno;
+ int32_t adjust;
+{
+ DB *dbp;
+ PAGE *h;
+ int ret;
+
+ dbp = dbc->dbp;
+ if ((ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0) {
+ (void)__db_pgerr(dbp, pgno);
+ return (ret);
+ }
+
+ if (DB_LOGGING(dbc))
+ if ((ret = __db_ovref_log(dbp->dbenv, dbc->txn,
+ &LSN(h), 0, dbp->log_fileid, h->pgno, adjust,
+ &LSN(h))) != 0)
+ return (ret);
+ OV_REF(h) += adjust;
+
+ (void)memp_fput(dbp->mpf, h, DB_MPOOL_DIRTY);
+ return (0);
+}
+
+/*
+ * __db_doff --
+ * Delete an offpage chain of overflow pages.
+ *
+ * PUBLIC: int __db_doff __P((DBC *, db_pgno_t));
+ */
+int
+__db_doff(dbc, pgno)
+ DBC *dbc;
+ db_pgno_t pgno;
+{
+ DB *dbp;
+ PAGE *pagep;
+ DB_LSN null_lsn;
+ DBT tmp_dbt;
+ int ret;
+
+ dbp = dbc->dbp;
+ do {
+ if ((ret = memp_fget(dbp->mpf, &pgno, 0, &pagep)) != 0) {
+ (void)__db_pgerr(dbp, pgno);
+ return (ret);
+ }
+
+ DB_ASSERT(TYPE(pagep) == P_OVERFLOW);
+ /*
+ * If it's referenced by more than one key/data item,
+ * decrement the reference count and return.
+ */
+ if (OV_REF(pagep) > 1) {
+ (void)memp_fput(dbp->mpf, pagep, 0);
+ return (__db_ovref(dbc, pgno, -1));
+ }
+
+ if (DB_LOGGING(dbc)) {
+ tmp_dbt.data = (u_int8_t *)pagep + P_OVERHEAD;
+ tmp_dbt.size = OV_LEN(pagep);
+ ZERO_LSN(null_lsn);
+ if ((ret = __db_big_log(dbp->dbenv, dbc->txn,
+ &LSN(pagep), 0, DB_REM_BIG, dbp->log_fileid,
+ PGNO(pagep), PREV_PGNO(pagep), NEXT_PGNO(pagep),
+ &tmp_dbt, &LSN(pagep), &null_lsn, &null_lsn)) != 0)
+ return (ret);
+ }
+ pgno = pagep->next_pgno;
+ if ((ret = __db_free(dbc, pagep)) != 0)
+ return (ret);
+ } while (pgno != PGNO_INVALID);
+
+ return (0);
+}
+
+/*
+ * __db_moff --
+ * Match on overflow pages.
+ *
+ * Given a starting page number and a key, return <0, 0, >0 to indicate if the
+ * key on the page is less than, equal to or greater than the key specified.
+ * We optimize this by doing chunk at a time comparison unless the user has
+ * specified a comparison function. In this case, we need to materialize
+ * the entire object and call their comparison routine.
+ *
+ * PUBLIC: int __db_moff __P((DB *, const DBT *, db_pgno_t, u_int32_t,
+ * PUBLIC: int (*)(DB *, const DBT *, const DBT *), int *));
+ */
+int
+__db_moff(dbp, dbt, pgno, tlen, cmpfunc, cmpp)
+ DB *dbp;
+ const DBT *dbt;
+ db_pgno_t pgno;
+ u_int32_t tlen;
+ int (*cmpfunc) __P((DB *, const DBT *, const DBT *)), *cmpp;
+{
+ PAGE *pagep;
+ DBT local_dbt;
+ void *buf;
+ u_int32_t bufsize, cmp_bytes, key_left;
+ u_int8_t *p1, *p2;
+ int ret;
+
+ /*
+ * If there is a user-specified comparison function, build a
+ * contiguous copy of the key, and call it.
+ */
+ if (cmpfunc != NULL) {
+ memset(&local_dbt, 0, sizeof(local_dbt));
+ buf = NULL;
+ bufsize = 0;
+
+ if ((ret = __db_goff(dbp,
+ &local_dbt, tlen, pgno, &buf, &bufsize)) != 0)
+ return (ret);
+ /* Pass the key as the first argument */
+ *cmpp = cmpfunc(dbp, dbt, &local_dbt);
+ __os_free(buf, bufsize);
+ return (0);
+ }
+
+ /* While there are both keys to compare. */
+ for (*cmpp = 0, p1 = dbt->data,
+ key_left = dbt->size; key_left > 0 && pgno != PGNO_INVALID;) {
+ if ((ret = memp_fget(dbp->mpf, &pgno, 0, &pagep)) != 0)
+ return (ret);
+
+ cmp_bytes = OV_LEN(pagep) < key_left ? OV_LEN(pagep) : key_left;
+ tlen -= cmp_bytes;
+ key_left -= cmp_bytes;
+ for (p2 =
+ (u_int8_t *)pagep + P_OVERHEAD; cmp_bytes-- > 0; ++p1, ++p2)
+ if (*p1 != *p2) {
+ *cmpp = (long)*p1 - (long)*p2;
+ break;
+ }
+ pgno = NEXT_PGNO(pagep);
+ if ((ret = memp_fput(dbp->mpf, pagep, 0)) != 0)
+ return (ret);
+ if (*cmpp != 0)
+ return (0);
+ }
+ if (key_left > 0) /* DBT is longer than the page key. */
+ *cmpp = 1;
+ else if (tlen > 0) /* DBT is shorter than the page key. */
+ *cmpp = -1;
+ else
+ *cmpp = 0;
+
+ return (0);
+}
+
+/*
+ * __db_vrfy_overflow --
+ * Verify overflow page.
+ *
+ * PUBLIC: int __db_vrfy_overflow __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t,
+ * PUBLIC: u_int32_t));
+ */
+int
+__db_vrfy_overflow(dbp, vdp, h, pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ PAGE *h;
+ db_pgno_t pgno;
+ u_int32_t flags;
+{
+ VRFY_PAGEINFO *pip;
+ int isbad, ret, t_ret;
+
+ isbad = 0;
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ if ((ret = __db_vrfy_datapage(dbp, vdp, h, pgno, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+
+ pip->refcount = OV_REF(h);
+ if (pip->refcount < 1) {
+ EPRINT((dbp->dbenv,
+ "Overflow page %lu has zero reference count",
+ (u_long)pgno));
+ isbad = 1;
+ }
+
+ /* Just store for now. */
+ pip->olen = HOFFSET(h);
+
+err: if ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0)
+ ret = t_ret;
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __db_vrfy_ovfl_structure --
+ * Walk a list of overflow pages, avoiding cycles and marking
+ * pages seen.
+ *
+ * PUBLIC: int __db_vrfy_ovfl_structure
+ * PUBLIC: __P((DB *, VRFY_DBINFO *, db_pgno_t, u_int32_t, u_int32_t));
+ */
+int
+__db_vrfy_ovfl_structure(dbp, vdp, pgno, tlen, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+ u_int32_t tlen;
+ u_int32_t flags;
+{
+ DB *pgset;
+ VRFY_PAGEINFO *pip;
+ db_pgno_t next, prev;
+ int isbad, p, ret, t_ret;
+ u_int32_t refcount;
+
+ pgset = vdp->pgset;
+ DB_ASSERT(pgset != NULL);
+ isbad = 0;
+
+ /* This shouldn't happen, but just to be sure. */
+ if (!IS_VALID_PGNO(pgno))
+ return (DB_VERIFY_BAD);
+
+ /*
+ * Check the first prev_pgno; it ought to be PGNO_INVALID,
+ * since there's no prev page.
+ */
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ /* The refcount is stored on the first overflow page. */
+ refcount = pip->refcount;
+
+ if (pip->type != P_OVERFLOW) {
+ EPRINT((dbp->dbenv,
+ "Overflow page %lu of invalid type",
+ (u_long)pgno, (u_long)pip->type));
+ ret = DB_VERIFY_BAD;
+ goto err; /* Unsafe to continue. */
+ }
+
+ prev = pip->prev_pgno;
+ if (prev != PGNO_INVALID) {
+ EPRINT((dbp->dbenv,
+ "First overflow page %lu has a prev_pgno", (u_long)pgno));
+ isbad = 1;
+ }
+
+ for (;;) {
+ /*
+ * This is slightly gross. Btree leaf pages reference
+ * individual overflow trees multiple times if the overflow page
+ * is the key to a duplicate set. The reference count does not
+ * reflect this multiple referencing. Thus, if this is called
+ * during the structure verification of a btree leaf page, we
+ * check to see whether we've seen it from a leaf page before
+ * and, if we have, adjust our count of how often we've seen it
+ * accordingly.
+ *
+ * (This will screw up if it's actually referenced--and
+ * correctly refcounted--from two different leaf pages, but
+ * that's a very unlikely brokenness that we're not checking for
+ * anyway.)
+ */
+
+ if (LF_ISSET(ST_OVFL_LEAF)) {
+ if (F_ISSET(pip, VRFY_OVFL_LEAFSEEN)) {
+ if ((ret =
+ __db_vrfy_pgset_dec(pgset, pgno)) != 0)
+ goto err;
+ } else
+ F_SET(pip, VRFY_OVFL_LEAFSEEN);
+ }
+
+ if ((ret = __db_vrfy_pgset_get(pgset, pgno, &p)) != 0)
+ goto err;
+
+ /*
+ * We may have seen this elsewhere, if the overflow entry
+ * has been promoted to an internal page.
+ */
+ if ((u_int32_t)p > refcount) {
+ EPRINT((dbp->dbenv,
+ "Page %lu encountered twice in overflow traversal",
+ (u_long)pgno));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+ if ((ret = __db_vrfy_pgset_inc(pgset, pgno)) != 0)
+ goto err;
+
+ /* Keep a running tab on how much of the item we've seen. */
+ tlen -= pip->olen;
+
+ /* Send feedback to the application about our progress. */
+ if (!LF_ISSET(DB_SALVAGE))
+ __db_vrfy_struct_feedback(dbp, vdp);
+
+ next = pip->next_pgno;
+
+ /* Are we there yet? */
+ if (next == PGNO_INVALID)
+ break;
+
+ /*
+ * We've already checked this when we saved it, but just
+ * to be sure...
+ */
+ if (!IS_VALID_PGNO(next)) {
+ DB_ASSERT(0);
+ EPRINT((dbp->dbenv,
+ "Overflow page %lu has bad next_pgno",
+ (u_long)pgno));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+
+ if ((ret = __db_vrfy_putpageinfo(vdp, pip)) != 0 ||
+ (ret = __db_vrfy_getpageinfo(vdp, next, &pip)) != 0)
+ return (ret);
+ if (pip->prev_pgno != pgno) {
+ EPRINT((dbp->dbenv,
+ "Overflow page %lu has bogus prev_pgno value",
+ (u_long)next));
+ isbad = 1;
+ /*
+ * It's safe to continue because we have separate
+ * cycle detection.
+ */
+ }
+
+ pgno = next;
+ }
+
+ if (tlen > 0) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Overflow item incomplete on page %lu", (u_long)pgno));
+ }
+
+err: if ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __db_safe_goff --
+ * Get an overflow item, very carefully, from an untrusted database,
+ * in the context of the salvager.
+ *
+ * PUBLIC: int __db_safe_goff __P((DB *, VRFY_DBINFO *, db_pgno_t,
+ * PUBLIC: DBT *, void **, u_int32_t));
+ */
+int
+__db_safe_goff(dbp, vdp, pgno, dbt, buf, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+ DBT *dbt;
+ void **buf;
+ u_int32_t flags;
+{
+ PAGE *h;
+ int ret, err_ret;
+ u_int32_t bytesgot, bytes;
+ u_int8_t *src, *dest;
+
+ ret = DB_VERIFY_BAD;
+ err_ret = 0;
+ bytesgot = bytes = 0;
+
+ while ((pgno != PGNO_INVALID) && (IS_VALID_PGNO(pgno))) {
+ /*
+ * Mark that we're looking at this page; if we've seen it
+ * already, quit.
+ */
+ if ((ret = __db_salvage_markdone(vdp, pgno)) != 0)
+ break;
+
+ if ((ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0)
+ break;
+
+ /*
+ * Make sure it's really an overflow page, unless we're
+ * being aggressive, in which case we pretend it is.
+ */
+ if (!LF_ISSET(DB_AGGRESSIVE) && TYPE(h) != P_OVERFLOW) {
+ ret = DB_VERIFY_BAD;
+ break;
+ }
+
+ src = (u_int8_t *)h + P_OVERHEAD;
+ bytes = OV_LEN(h);
+
+ if (bytes + P_OVERHEAD > dbp->pgsize)
+ bytes = dbp->pgsize - P_OVERHEAD;
+
+ if ((ret = __os_realloc(dbp->dbenv,
+ bytesgot + bytes, 0, buf)) != 0)
+ break;
+
+ dest = (u_int8_t *)*buf + bytesgot;
+ bytesgot += bytes;
+
+ memcpy(dest, src, bytes);
+
+ pgno = NEXT_PGNO(h);
+ /* Not much we can do here--we don't want to quit. */
+ if ((ret = memp_fput(dbp->mpf, h, 0)) != 0)
+ err_ret = ret;
+ }
+
+ if (ret == 0) {
+ dbt->size = bytesgot;
+ dbt->data = *buf;
+ }
+
+ return ((err_ret != 0 && ret == 0) ? err_ret : ret);
+}
diff --git a/bdb/db/db_pr.c b/bdb/db/db_pr.c
new file mode 100644
index 00000000000..cb977cadfda
--- /dev/null
+++ b/bdb/db/db_pr.c
@@ -0,0 +1,1284 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_pr.c,v 11.46 2001/01/22 17:25:06 krinsky Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "btree.h"
+#include "hash.h"
+#include "qam.h"
+#include "db_am.h"
+#include "db_verify.h"
+
+static int __db_bmeta __P((DB *, FILE *, BTMETA *, u_int32_t));
+static int __db_hmeta __P((DB *, FILE *, HMETA *, u_int32_t));
+static void __db_meta __P((DB *, DBMETA *, FILE *, FN const *, u_int32_t));
+static const char *__db_dbtype_to_string __P((DB *));
+static void __db_prdb __P((DB *, FILE *, u_int32_t));
+static FILE *__db_prinit __P((FILE *));
+static void __db_proff __P((void *));
+static int __db_prtree __P((DB *, u_int32_t));
+static void __db_psize __P((DB *));
+static int __db_qmeta __P((DB *, FILE *, QMETA *, u_int32_t));
+
+/*
+ * 64K is the maximum page size, so by default we check for offsets larger
+ * than that, and, where possible, we refine the test.
+ */
+#define PSIZE_BOUNDARY (64 * 1024 + 1)
+static size_t set_psize = PSIZE_BOUNDARY;
+
+static FILE *set_fp; /* Output file descriptor. */
+
+/*
+ * __db_loadme --
+ * A nice place to put a breakpoint.
+ *
+ * PUBLIC: void __db_loadme __P((void));
+ */
+void
+__db_loadme()
+{
+ getpid();
+}
+
+/*
+ * __db_dump --
+ * Dump the tree to a file.
+ *
+ * PUBLIC: int __db_dump __P((DB *, char *, char *));
+ */
+int
+__db_dump(dbp, op, name)
+ DB *dbp;
+ char *op, *name;
+{
+ FILE *fp, *save_fp;
+ u_int32_t flags;
+
+ COMPQUIET(save_fp, NULL);
+
+ if (set_psize == PSIZE_BOUNDARY)
+ __db_psize(dbp);
+
+ if (name != NULL) {
+ if ((fp = fopen(name, "w")) == NULL)
+ return (__os_get_errno());
+ save_fp = set_fp;
+ set_fp = fp;
+ } else
+ fp = __db_prinit(NULL);
+
+ for (flags = 0; *op != '\0'; ++op)
+ switch (*op) {
+ case 'a':
+ LF_SET(DB_PR_PAGE);
+ break;
+ case 'h':
+ break;
+ case 'r':
+ LF_SET(DB_PR_RECOVERYTEST);
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ __db_prdb(dbp, fp, flags);
+
+ fprintf(fp, "%s\n", DB_LINE);
+
+ (void)__db_prtree(dbp, flags);
+
+ fflush(fp);
+
+ if (name != NULL) {
+ fclose(fp);
+ set_fp = save_fp;
+ }
+ return (0);
+}
+
+/*
+ * __db_prdb --
+ * Print out the DB structure information.
+ */
+static void
+__db_prdb(dbp, fp, flags)
+ DB *dbp;
+ FILE *fp;
+ u_int32_t flags;
+{
+ static const FN fn[] = {
+ { DB_AM_DISCARD, "discard cached pages" },
+ { DB_AM_DUP, "duplicates" },
+ { DB_AM_INMEM, "in-memory" },
+ { DB_AM_PGDEF, "default page size" },
+ { DB_AM_RDONLY, "read-only" },
+ { DB_AM_SUBDB, "multiple-databases" },
+ { DB_AM_SWAP, "needswap" },
+ { DB_BT_RECNUM, "btree:recnum" },
+ { DB_BT_REVSPLIT, "btree:no reverse split" },
+ { DB_DBM_ERROR, "dbm/ndbm error" },
+ { DB_OPEN_CALLED, "DB->open called" },
+ { DB_RE_DELIMITER, "recno:delimiter" },
+ { DB_RE_FIXEDLEN, "recno:fixed-length" },
+ { DB_RE_PAD, "recno:pad" },
+ { DB_RE_RENUMBER, "recno:renumber" },
+ { DB_RE_SNAPSHOT, "recno:snapshot" },
+ { 0, NULL }
+ };
+ BTREE *bt;
+ HASH *h;
+ QUEUE *q;
+
+ COMPQUIET(flags, 0);
+
+ fprintf(fp,
+ "In-memory DB structure:\n%s: %#lx",
+ __db_dbtype_to_string(dbp), (u_long)dbp->flags);
+ __db_prflags(dbp->flags, fn, fp);
+ fprintf(fp, "\n");
+
+ switch (dbp->type) {
+ case DB_BTREE:
+ case DB_RECNO:
+ bt = dbp->bt_internal;
+ fprintf(fp, "bt_meta: %lu bt_root: %lu\n",
+ (u_long)bt->bt_meta, (u_long)bt->bt_root);
+ fprintf(fp, "bt_maxkey: %lu bt_minkey: %lu\n",
+ (u_long)bt->bt_maxkey, (u_long)bt->bt_minkey);
+ fprintf(fp, "bt_compare: %#lx bt_prefix: %#lx\n",
+ (u_long)bt->bt_compare, (u_long)bt->bt_prefix);
+ fprintf(fp, "bt_lpgno: %lu\n", (u_long)bt->bt_lpgno);
+ if (dbp->type == DB_RECNO) {
+ fprintf(fp,
+ "re_pad: %#lx re_delim: %#lx re_len: %lu re_source: %s\n",
+ (u_long)bt->re_pad, (u_long)bt->re_delim,
+ (u_long)bt->re_len,
+ bt->re_source == NULL ? "" : bt->re_source);
+ fprintf(fp, "re_modified: %d re_eof: %d re_last: %lu\n",
+ bt->re_modified, bt->re_eof, (u_long)bt->re_last);
+ }
+ break;
+ case DB_HASH:
+ h = dbp->h_internal;
+ fprintf(fp, "meta_pgno: %lu\n", (u_long)h->meta_pgno);
+ fprintf(fp, "h_ffactor: %lu\n", (u_long)h->h_ffactor);
+ fprintf(fp, "h_nelem: %lu\n", (u_long)h->h_nelem);
+ fprintf(fp, "h_hash: %#lx\n", (u_long)h->h_hash);
+ break;
+ case DB_QUEUE:
+ q = dbp->q_internal;
+ fprintf(fp, "q_meta: %lu\n", (u_long)q->q_meta);
+ fprintf(fp, "q_root: %lu\n", (u_long)q->q_root);
+ fprintf(fp, "re_pad: %#lx re_len: %lu\n",
+ (u_long)q->re_pad, (u_long)q->re_len);
+ fprintf(fp, "rec_page: %lu\n", (u_long)q->rec_page);
+ fprintf(fp, "page_ext: %lu\n", (u_long)q->page_ext);
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * __db_prtree --
+ * Print out the entire tree.
+ */
+static int
+__db_prtree(dbp, flags)
+ DB *dbp;
+ u_int32_t flags;
+{
+ PAGE *h;
+ db_pgno_t i, last;
+ int ret;
+
+ if (set_psize == PSIZE_BOUNDARY)
+ __db_psize(dbp);
+
+ if (dbp->type == DB_QUEUE) {
+ ret = __db_prqueue(dbp, flags);
+ goto done;
+ }
+
+ /* Find out the page number of the last page in the database. */
+ if ((ret = memp_fget(dbp->mpf, &last, DB_MPOOL_LAST, &h)) != 0)
+ return (ret);
+ if ((ret = memp_fput(dbp->mpf, h, 0)) != 0)
+ return (ret);
+
+ /* Dump each page. */
+ for (i = 0; i <= last; ++i) {
+ if ((ret = memp_fget(dbp->mpf, &i, 0, &h)) != 0)
+ return (ret);
+ (void)__db_prpage(dbp, h, flags);
+ if ((ret = memp_fput(dbp->mpf, h, 0)) != 0)
+ return (ret);
+ }
+
+done:
+ (void)fflush(__db_prinit(NULL));
+ return (0);
+}
+
+/*
+ * __db_meta --
+ * Print out common metadata information.
+ */
+static void
+__db_meta(dbp, dbmeta, fp, fn, flags)
+ DB *dbp;
+ DBMETA *dbmeta;
+ FILE *fp;
+ FN const *fn;
+ u_int32_t flags;
+{
+ PAGE *h;
+ int cnt;
+ db_pgno_t pgno;
+ u_int8_t *p;
+ int ret;
+ const char *sep;
+
+ fprintf(fp, "\tmagic: %#lx\n", (u_long)dbmeta->magic);
+ fprintf(fp, "\tversion: %lu\n", (u_long)dbmeta->version);
+ fprintf(fp, "\tpagesize: %lu\n", (u_long)dbmeta->pagesize);
+ fprintf(fp, "\ttype: %lu\n", (u_long)dbmeta->type);
+ fprintf(fp, "\tkeys: %lu\trecords: %lu\n",
+ (u_long)dbmeta->key_count, (u_long)dbmeta->record_count);
+
+ if (!LF_ISSET(DB_PR_RECOVERYTEST)) {
+ /*
+ * If we're doing recovery testing, don't display the free
+ * list, it may have changed and that makes the dump diff
+ * not work.
+ */
+ fprintf(fp, "\tfree list: %lu", (u_long)dbmeta->free);
+ for (pgno = dbmeta->free,
+ cnt = 0, sep = ", "; pgno != PGNO_INVALID;) {
+ if ((ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0) {
+ fprintf(fp,
+ "Unable to retrieve free-list page: %lu: %s\n",
+ (u_long)pgno, db_strerror(ret));
+ break;
+ }
+ pgno = h->next_pgno;
+ (void)memp_fput(dbp->mpf, h, 0);
+ fprintf(fp, "%s%lu", sep, (u_long)pgno);
+ if (++cnt % 10 == 0) {
+ fprintf(fp, "\n");
+ cnt = 0;
+ sep = "\t";
+ } else
+ sep = ", ";
+ }
+ fprintf(fp, "\n");
+ }
+
+ if (fn != NULL) {
+ fprintf(fp, "\tflags: %#lx", (u_long)dbmeta->flags);
+ __db_prflags(dbmeta->flags, fn, fp);
+ fprintf(fp, "\n");
+ }
+
+ fprintf(fp, "\tuid: ");
+ for (p = (u_int8_t *)dbmeta->uid,
+ cnt = 0; cnt < DB_FILE_ID_LEN; ++cnt) {
+ fprintf(fp, "%x", *p++);
+ if (cnt < DB_FILE_ID_LEN - 1)
+ fprintf(fp, " ");
+ }
+ fprintf(fp, "\n");
+}
+
+/*
+ * __db_bmeta --
+ * Print out the btree meta-data page.
+ */
+static int
+__db_bmeta(dbp, fp, h, flags)
+ DB *dbp;
+ FILE *fp;
+ BTMETA *h;
+ u_int32_t flags;
+{
+ static const FN mfn[] = {
+ { BTM_DUP, "duplicates" },
+ { BTM_RECNO, "recno" },
+ { BTM_RECNUM, "btree:recnum" },
+ { BTM_FIXEDLEN, "recno:fixed-length" },
+ { BTM_RENUMBER, "recno:renumber" },
+ { BTM_SUBDB, "multiple-databases" },
+ { 0, NULL }
+ };
+
+ __db_meta(dbp, (DBMETA *)h, fp, mfn, flags);
+
+ fprintf(fp, "\tmaxkey: %lu minkey: %lu\n",
+ (u_long)h->maxkey, (u_long)h->minkey);
+ if (dbp->type == DB_RECNO)
+ fprintf(fp, "\tre_len: %#lx re_pad: %lu\n",
+ (u_long)h->re_len, (u_long)h->re_pad);
+ fprintf(fp, "\troot: %lu\n", (u_long)h->root);
+
+ return (0);
+}
+
+/*
+ * __db_hmeta --
+ * Print out the hash meta-data page.
+ */
+static int
+__db_hmeta(dbp, fp, h, flags)
+ DB *dbp;
+ FILE *fp;
+ HMETA *h;
+ u_int32_t flags;
+{
+ static const FN mfn[] = {
+ { DB_HASH_DUP, "duplicates" },
+ { DB_HASH_SUBDB, "multiple-databases" },
+ { 0, NULL }
+ };
+ int i;
+
+ __db_meta(dbp, (DBMETA *)h, fp, mfn, flags);
+
+ fprintf(fp, "\tmax_bucket: %lu\n", (u_long)h->max_bucket);
+ fprintf(fp, "\thigh_mask: %#lx\n", (u_long)h->high_mask);
+ fprintf(fp, "\tlow_mask: %#lx\n", (u_long)h->low_mask);
+ fprintf(fp, "\tffactor: %lu\n", (u_long)h->ffactor);
+ fprintf(fp, "\tnelem: %lu\n", (u_long)h->nelem);
+ fprintf(fp, "\th_charkey: %#lx\n", (u_long)h->h_charkey);
+ fprintf(fp, "\tspare points: ");
+ for (i = 0; i < NCACHED; i++)
+ fprintf(fp, "%lu ", (u_long)h->spares[i]);
+ fprintf(fp, "\n");
+
+ return (0);
+}
+
+/*
+ * __db_qmeta --
+ * Print out the queue meta-data page.
+ */
+static int
+__db_qmeta(dbp, fp, h, flags)
+ DB *dbp;
+ FILE *fp;
+ QMETA *h;
+ u_int32_t flags;
+{
+ __db_meta(dbp, (DBMETA *)h, fp, NULL, flags);
+
+ fprintf(fp, "\tfirst_recno: %lu\n", (u_long)h->first_recno);
+ fprintf(fp, "\tcur_recno: %lu\n", (u_long)h->cur_recno);
+ fprintf(fp, "\tre_len: %#lx re_pad: %lu\n",
+ (u_long)h->re_len, (u_long)h->re_pad);
+ fprintf(fp, "\trec_page: %lu\n", (u_long)h->rec_page);
+ fprintf(fp, "\tpage_ext: %lu\n", (u_long)h->page_ext);
+
+ return (0);
+}
+
+/*
+ * __db_prnpage
+ * -- Print out a specific page.
+ *
+ * PUBLIC: int __db_prnpage __P((DB *, db_pgno_t));
+ */
+int
+__db_prnpage(dbp, pgno)
+ DB *dbp;
+ db_pgno_t pgno;
+{
+ PAGE *h;
+ int ret;
+
+ if (set_psize == PSIZE_BOUNDARY)
+ __db_psize(dbp);
+
+ if ((ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0)
+ return (ret);
+
+ ret = __db_prpage(dbp, h, DB_PR_PAGE);
+ (void)fflush(__db_prinit(NULL));
+
+ (void)memp_fput(dbp->mpf, h, 0);
+ return (ret);
+}
+
+/*
+ * __db_prpage
+ * -- Print out a page.
+ *
+ * PUBLIC: int __db_prpage __P((DB *, PAGE *, u_int32_t));
+ */
+int
+__db_prpage(dbp, h, flags)
+ DB *dbp;
+ PAGE *h;
+ u_int32_t flags;
+{
+ BINTERNAL *bi;
+ BKEYDATA *bk;
+ BTREE *t;
+ FILE *fp;
+ HOFFPAGE a_hkd;
+ QAMDATA *qp, *qep;
+ RINTERNAL *ri;
+ db_indx_t dlen, len, i;
+ db_pgno_t pgno;
+ db_recno_t recno;
+ int deleted, ret;
+ const char *s;
+ u_int32_t qlen;
+ u_int8_t *ep, *hk, *p;
+ void *sp;
+
+ fp = __db_prinit(NULL);
+
+ /*
+ * If we're doing recovery testing and this page is P_INVALID,
+ * assume it's a page that's on the free list, and don't display it.
+ */
+ if (LF_ISSET(DB_PR_RECOVERYTEST) && TYPE(h) == P_INVALID)
+ return (0);
+
+ s = __db_pagetype_to_string(TYPE(h));
+ if (s == NULL) {
+ fprintf(fp, "ILLEGAL PAGE TYPE: page: %lu type: %lu\n",
+ (u_long)h->pgno, (u_long)TYPE(h));
+ return (1);
+ }
+
+ /* Page number, page type. */
+ fprintf(fp, "page %lu: %s level: %lu",
+ (u_long)h->pgno, s, (u_long)h->level);
+
+ /* Record count. */
+ if (TYPE(h) == P_IBTREE ||
+ TYPE(h) == P_IRECNO || (TYPE(h) == P_LRECNO &&
+ h->pgno == ((BTREE *)dbp->bt_internal)->bt_root))
+ fprintf(fp, " records: %lu", (u_long)RE_NREC(h));
+
+ /* LSN. */
+ if (!LF_ISSET(DB_PR_RECOVERYTEST))
+ fprintf(fp, " (lsn.file: %lu lsn.offset: %lu)\n",
+ (u_long)LSN(h).file, (u_long)LSN(h).offset);
+
+ switch (TYPE(h)) {
+ case P_BTREEMETA:
+ return (__db_bmeta(dbp, fp, (BTMETA *)h, flags));
+ case P_HASHMETA:
+ return (__db_hmeta(dbp, fp, (HMETA *)h, flags));
+ case P_QAMMETA:
+ return (__db_qmeta(dbp, fp, (QMETA *)h, flags));
+ case P_QAMDATA: /* Should be meta->start. */
+ if (!LF_ISSET(DB_PR_PAGE))
+ return (0);
+
+ qlen = ((QUEUE *)dbp->q_internal)->re_len;
+ recno = (h->pgno - 1) * QAM_RECNO_PER_PAGE(dbp) + 1;
+ i = 0;
+ qep = (QAMDATA *)((u_int8_t *)h + set_psize - qlen);
+ for (qp = QAM_GET_RECORD(dbp, h, i); qp < qep;
+ recno++, i++, qp = QAM_GET_RECORD(dbp, h, i)) {
+ if (!F_ISSET(qp, QAM_SET))
+ continue;
+
+ fprintf(fp, "%s",
+ F_ISSET(qp, QAM_VALID) ? "\t" : " D");
+ fprintf(fp, "[%03lu] %4lu ",
+ (u_long)recno, (u_long)qp - (u_long)h);
+ __db_pr(qp->data, qlen);
+ }
+ return (0);
+ }
+
+ /* LSN. */
+ if (LF_ISSET(DB_PR_RECOVERYTEST))
+ fprintf(fp, " (lsn.file: %lu lsn.offset: %lu)\n",
+ (u_long)LSN(h).file, (u_long)LSN(h).offset);
+
+ t = dbp->bt_internal;
+
+ s = "\t";
+ if (TYPE(h) != P_IBTREE && TYPE(h) != P_IRECNO) {
+ fprintf(fp, "%sprev: %4lu next: %4lu",
+ s, (u_long)PREV_PGNO(h), (u_long)NEXT_PGNO(h));
+ s = " ";
+ }
+ if (TYPE(h) == P_OVERFLOW) {
+ fprintf(fp, "%sref cnt: %4lu ", s, (u_long)OV_REF(h));
+ __db_pr((u_int8_t *)h + P_OVERHEAD, OV_LEN(h));
+ return (0);
+ }
+ fprintf(fp, "%sentries: %4lu", s, (u_long)NUM_ENT(h));
+ fprintf(fp, " offset: %4lu\n", (u_long)HOFFSET(h));
+
+ if (TYPE(h) == P_INVALID || !LF_ISSET(DB_PR_PAGE))
+ return (0);
+
+ ret = 0;
+ for (i = 0; i < NUM_ENT(h); i++) {
+ if (P_ENTRY(h, i) - (u_int8_t *)h < P_OVERHEAD ||
+ (size_t)(P_ENTRY(h, i) - (u_int8_t *)h) >= set_psize) {
+ fprintf(fp,
+ "ILLEGAL PAGE OFFSET: indx: %lu of %lu\n",
+ (u_long)i, (u_long)h->inp[i]);
+ ret = EINVAL;
+ continue;
+ }
+ deleted = 0;
+ switch (TYPE(h)) {
+ case P_HASH:
+ case P_IBTREE:
+ case P_IRECNO:
+ sp = P_ENTRY(h, i);
+ break;
+ case P_LBTREE:
+ sp = P_ENTRY(h, i);
+ deleted = i % 2 == 0 &&
+ B_DISSET(GET_BKEYDATA(h, i + O_INDX)->type);
+ break;
+ case P_LDUP:
+ case P_LRECNO:
+ sp = P_ENTRY(h, i);
+ deleted = B_DISSET(GET_BKEYDATA(h, i)->type);
+ break;
+ default:
+ fprintf(fp,
+ "ILLEGAL PAGE ITEM: %lu\n", (u_long)TYPE(h));
+ ret = EINVAL;
+ continue;
+ }
+ fprintf(fp, "%s", deleted ? " D" : "\t");
+ fprintf(fp, "[%03lu] %4lu ", (u_long)i, (u_long)h->inp[i]);
+ switch (TYPE(h)) {
+ case P_HASH:
+ hk = sp;
+ switch (HPAGE_PTYPE(hk)) {
+ case H_OFFDUP:
+ memcpy(&pgno,
+ HOFFDUP_PGNO(hk), sizeof(db_pgno_t));
+ fprintf(fp,
+ "%4lu [offpage dups]\n", (u_long)pgno);
+ break;
+ case H_DUPLICATE:
+ /*
+ * If this is the first item on a page, then
+ * we cannot figure out how long it is, so
+ * we only print the first one in the duplicate
+ * set.
+ */
+ if (i != 0)
+ len = LEN_HKEYDATA(h, 0, i);
+ else
+ len = 1;
+
+ fprintf(fp, "Duplicates:\n");
+ for (p = HKEYDATA_DATA(hk),
+ ep = p + len; p < ep;) {
+ memcpy(&dlen, p, sizeof(db_indx_t));
+ p += sizeof(db_indx_t);
+ fprintf(fp, "\t\t");
+ __db_pr(p, dlen);
+ p += sizeof(db_indx_t) + dlen;
+ }
+ break;
+ case H_KEYDATA:
+ __db_pr(HKEYDATA_DATA(hk),
+ LEN_HKEYDATA(h, i == 0 ? set_psize : 0, i));
+ break;
+ case H_OFFPAGE:
+ memcpy(&a_hkd, hk, HOFFPAGE_SIZE);
+ fprintf(fp,
+ "overflow: total len: %4lu page: %4lu\n",
+ (u_long)a_hkd.tlen, (u_long)a_hkd.pgno);
+ break;
+ }
+ break;
+ case P_IBTREE:
+ bi = sp;
+ fprintf(fp, "count: %4lu pgno: %4lu type: %4lu",
+ (u_long)bi->nrecs, (u_long)bi->pgno,
+ (u_long)bi->type);
+ switch (B_TYPE(bi->type)) {
+ case B_KEYDATA:
+ __db_pr(bi->data, bi->len);
+ break;
+ case B_DUPLICATE:
+ case B_OVERFLOW:
+ __db_proff(bi->data);
+ break;
+ default:
+ fprintf(fp, "ILLEGAL BINTERNAL TYPE: %lu\n",
+ (u_long)B_TYPE(bi->type));
+ ret = EINVAL;
+ break;
+ }
+ break;
+ case P_IRECNO:
+ ri = sp;
+ fprintf(fp, "entries %4lu pgno %4lu\n",
+ (u_long)ri->nrecs, (u_long)ri->pgno);
+ break;
+ case P_LBTREE:
+ case P_LDUP:
+ case P_LRECNO:
+ bk = sp;
+ switch (B_TYPE(bk->type)) {
+ case B_KEYDATA:
+ __db_pr(bk->data, bk->len);
+ break;
+ case B_DUPLICATE:
+ case B_OVERFLOW:
+ __db_proff(bk);
+ break;
+ default:
+ fprintf(fp,
+ "ILLEGAL DUPLICATE/LBTREE/LRECNO TYPE: %lu\n",
+ (u_long)B_TYPE(bk->type));
+ ret = EINVAL;
+ break;
+ }
+ break;
+ }
+ }
+ (void)fflush(fp);
+ return (ret);
+}
+
+/*
+ * __db_pr --
+ * Print out a data element.
+ *
+ * PUBLIC: void __db_pr __P((u_int8_t *, u_int32_t));
+ */
+void
+__db_pr(p, len)
+ u_int8_t *p;
+ u_int32_t len;
+{
+ FILE *fp;
+ u_int lastch;
+ int i;
+
+ fp = __db_prinit(NULL);
+
+ fprintf(fp, "len: %3lu", (u_long)len);
+ lastch = '.';
+ if (len != 0) {
+ fprintf(fp, " data: ");
+ for (i = len <= 20 ? len : 20; i > 0; --i, ++p) {
+ lastch = *p;
+ if (isprint((int)*p) || *p == '\n')
+ fprintf(fp, "%c", *p);
+ else
+ fprintf(fp, "0x%.2x", (u_int)*p);
+ }
+ if (len > 20) {
+ fprintf(fp, "...");
+ lastch = '.';
+ }
+ }
+ if (lastch != '\n')
+ fprintf(fp, "\n");
+}
+
+/*
+ * __db_prdbt --
+ * Print out a DBT data element.
+ *
+ * PUBLIC: int __db_prdbt __P((DBT *, int, const char *, void *,
+ * PUBLIC: int (*)(void *, const void *), int, VRFY_DBINFO *));
+ */
+int
+__db_prdbt(dbtp, checkprint, prefix, handle, callback, is_recno, vdp)
+ DBT *dbtp;
+ int checkprint;
+ const char *prefix;
+ void *handle;
+ int (*callback) __P((void *, const void *));
+ int is_recno;
+ VRFY_DBINFO *vdp;
+{
+ static const char hex[] = "0123456789abcdef";
+ db_recno_t recno;
+ u_int32_t len;
+ int ret;
+#define DBTBUFLEN 100
+ char *p, *hp, buf[DBTBUFLEN], hbuf[DBTBUFLEN];
+
+ if (vdp != NULL) {
+ /*
+ * If vdp is non-NULL, we might be the first key in the
+ * "fake" subdatabase used for key/data pairs we can't
+ * associate with a known subdb.
+ *
+ * Check and clear the SALVAGE_PRINTHEADER flag; if
+ * it was set, print a subdatabase header.
+ */
+ if (F_ISSET(vdp, SALVAGE_PRINTHEADER))
+ (void)__db_prheader(NULL, "__OTHER__", 0, 0,
+ handle, callback, vdp, 0);
+ F_CLR(vdp, SALVAGE_PRINTHEADER);
+ F_SET(vdp, SALVAGE_PRINTFOOTER);
+ }
+
+ /*
+ * !!!
+ * This routine is the routine that dumps out items in the format
+ * used by db_dump(1) and db_load(1). This means that the format
+ * cannot change.
+ */
+ if (prefix != NULL && (ret = callback(handle, prefix)) != 0)
+ return (ret);
+ if (is_recno) {
+ /*
+ * We're printing a record number, and this has to be done
+ * in a platform-independent way. So we use the numeral in
+ * straight ASCII.
+ */
+ __ua_memcpy(&recno, dbtp->data, sizeof(recno));
+ snprintf(buf, DBTBUFLEN, "%lu", (u_long)recno);
+
+ /* If we're printing data as hex, print keys as hex too. */
+ if (!checkprint) {
+ for (len = strlen(buf), p = buf, hp = hbuf;
+ len-- > 0; ++p) {
+ *hp++ = hex[(u_int8_t)(*p & 0xf0) >> 4];
+ *hp++ = hex[*p & 0x0f];
+ }
+ *hp = '\0';
+ ret = callback(handle, hbuf);
+ } else
+ ret = callback(handle, buf);
+
+ if (ret != 0)
+ return (ret);
+ } else if (checkprint) {
+ for (len = dbtp->size, p = dbtp->data; len--; ++p)
+ if (isprint((int)*p)) {
+ if (*p == '\\' &&
+ (ret = callback(handle, "\\")) != 0)
+ return (ret);
+ snprintf(buf, DBTBUFLEN, "%c", *p);
+ if ((ret = callback(handle, buf)) != 0)
+ return (ret);
+ } else {
+ snprintf(buf, DBTBUFLEN, "\\%c%c",
+ hex[(u_int8_t)(*p & 0xf0) >> 4],
+ hex[*p & 0x0f]);
+ if ((ret = callback(handle, buf)) != 0)
+ return (ret);
+ }
+ } else
+ for (len = dbtp->size, p = dbtp->data; len--; ++p) {
+ snprintf(buf, DBTBUFLEN, "%c%c",
+ hex[(u_int8_t)(*p & 0xf0) >> 4],
+ hex[*p & 0x0f]);
+ if ((ret = callback(handle, buf)) != 0)
+ return (ret);
+ }
+
+ return (callback(handle, "\n"));
+}
+
+/*
+ * __db_proff --
+ * Print out an off-page element.
+ */
+static void
+__db_proff(vp)
+ void *vp;
+{
+ FILE *fp;
+ BOVERFLOW *bo;
+
+ fp = __db_prinit(NULL);
+
+ bo = vp;
+ switch (B_TYPE(bo->type)) {
+ case B_OVERFLOW:
+ fprintf(fp, "overflow: total len: %4lu page: %4lu\n",
+ (u_long)bo->tlen, (u_long)bo->pgno);
+ break;
+ case B_DUPLICATE:
+ fprintf(fp, "duplicate: page: %4lu\n", (u_long)bo->pgno);
+ break;
+ }
+}
+
+/*
+ * __db_prflags --
+ * Print out flags values.
+ *
+ * PUBLIC: void __db_prflags __P((u_int32_t, const FN *, FILE *));
+ */
+void
+__db_prflags(flags, fn, fp)
+ u_int32_t flags;
+ FN const *fn;
+ FILE *fp;
+{
+ const FN *fnp;
+ int found;
+ const char *sep;
+
+ sep = " (";
+ for (found = 0, fnp = fn; fnp->mask != 0; ++fnp)
+ if (LF_ISSET(fnp->mask)) {
+ fprintf(fp, "%s%s", sep, fnp->name);
+ sep = ", ";
+ found = 1;
+ }
+ if (found)
+ fprintf(fp, ")");
+}
+
+/*
+ * __db_prinit --
+ * Initialize tree printing routines.
+ */
+static FILE *
+__db_prinit(fp)
+ FILE *fp;
+{
+ if (set_fp == NULL)
+ set_fp = fp == NULL ? stdout : fp;
+ return (set_fp);
+}
+
+/*
+ * __db_psize --
+ * Get the page size.
+ */
+static void
+__db_psize(dbp)
+ DB *dbp;
+{
+ DBMETA *mp;
+ db_pgno_t pgno;
+
+ set_psize = PSIZE_BOUNDARY - 1;
+
+ pgno = PGNO_BASE_MD;
+ if (memp_fget(dbp->mpf, &pgno, 0, &mp) != 0)
+ return;
+
+ switch (mp->magic) {
+ case DB_BTREEMAGIC:
+ case DB_HASHMAGIC:
+ case DB_QAMMAGIC:
+ set_psize = mp->pagesize;
+ break;
+ }
+ (void)memp_fput(dbp->mpf, mp, 0);
+}
+
+/*
+ * __db_dbtype_to_string --
+ * Return the name of the database type.
+ */
+static const char *
+__db_dbtype_to_string(dbp)
+ DB *dbp;
+{
+ switch (dbp->type) {
+ case DB_BTREE:
+ return ("btree");
+ case DB_HASH:
+ return ("hash");
+ break;
+ case DB_RECNO:
+ return ("recno");
+ break;
+ case DB_QUEUE:
+ return ("queue");
+ default:
+ return ("UNKNOWN TYPE");
+ }
+ /* NOTREACHED */
+}
+
+/*
+ * __db_pagetype_to_string --
+ * Return the name of the specified page type.
+ *
+ * PUBLIC: const char *__db_pagetype_to_string __P((u_int32_t));
+ */
+const char *
+__db_pagetype_to_string(type)
+ u_int32_t type;
+{
+ char *s;
+
+ s = NULL;
+ switch (type) {
+ case P_BTREEMETA:
+ s = "btree metadata";
+ break;
+ case P_LDUP:
+ s = "duplicate";
+ break;
+ case P_HASH:
+ s = "hash";
+ break;
+ case P_HASHMETA:
+ s = "hash metadata";
+ break;
+ case P_IBTREE:
+ s = "btree internal";
+ break;
+ case P_INVALID:
+ s = "invalid";
+ break;
+ case P_IRECNO:
+ s = "recno internal";
+ break;
+ case P_LBTREE:
+ s = "btree leaf";
+ break;
+ case P_LRECNO:
+ s = "recno leaf";
+ break;
+ case P_OVERFLOW:
+ s = "overflow";
+ break;
+ case P_QAMMETA:
+ s = "queue metadata";
+ break;
+ case P_QAMDATA:
+ s = "queue";
+ break;
+ default:
+ /* Just return a NULL. */
+ break;
+ }
+ return (s);
+}
+
+/*
+ * __db_prheader --
+ * Write out header information in the format expected by db_load.
+ *
+ * PUBLIC: int __db_prheader __P((DB *, char *, int, int, void *,
+ * PUBLIC: int (*)(void *, const void *), VRFY_DBINFO *, db_pgno_t));
+ */
+int
+__db_prheader(dbp, subname, pflag, keyflag, handle, callback, vdp, meta_pgno)
+ DB *dbp;
+ char *subname;
+ int pflag, keyflag;
+ void *handle;
+ int (*callback) __P((void *, const void *));
+ VRFY_DBINFO *vdp;
+ db_pgno_t meta_pgno;
+{
+ DB_BTREE_STAT *btsp;
+ DB_ENV *dbenv;
+ DB_HASH_STAT *hsp;
+ DB_QUEUE_STAT *qsp;
+ VRFY_PAGEINFO *pip;
+ char *buf;
+ int buflen, ret, t_ret;
+ u_int32_t dbtype;
+
+ btsp = NULL;
+ hsp = NULL;
+ qsp = NULL;
+ ret = 0;
+ buf = NULL;
+ COMPQUIET(buflen, 0);
+
+ if (dbp == NULL)
+ dbenv = NULL;
+ else
+ dbenv = dbp->dbenv;
+
+ /*
+ * If we've been passed a verifier statistics object, use
+ * that; we're being called in a context where dbp->stat
+ * is unsafe.
+ */
+ if (vdp != NULL) {
+ if ((ret = __db_vrfy_getpageinfo(vdp, meta_pgno, &pip)) != 0)
+ return (ret);
+ } else
+ pip = NULL;
+
+ /*
+ * If dbp is NULL, we're being called from inside __db_prdbt,
+ * and this is a special subdatabase for "lost" items. Make it a btree.
+ * Otherwise, set dbtype to the appropriate type for the specified
+ * meta page, or the type of the dbp.
+ */
+ if (dbp == NULL)
+ dbtype = DB_BTREE;
+ else if (pip != NULL)
+ switch (pip->type) {
+ case P_BTREEMETA:
+ if (F_ISSET(pip, VRFY_IS_RECNO))
+ dbtype = DB_RECNO;
+ else
+ dbtype = DB_BTREE;
+ break;
+ case P_HASHMETA:
+ dbtype = DB_HASH;
+ break;
+ default:
+ /*
+ * If the meta page is of a bogus type, it's
+ * because we have a badly corrupt database.
+ * (We must be in the verifier for pip to be non-NULL.)
+ * Pretend we're a Btree and salvage what we can.
+ */
+ DB_ASSERT(F_ISSET(dbp, DB_AM_VERIFYING));
+ dbtype = DB_BTREE;
+ break;
+ }
+ else
+ dbtype = dbp->type;
+
+ if ((ret = callback(handle, "VERSION=3\n")) != 0)
+ goto err;
+ if (pflag) {
+ if ((ret = callback(handle, "format=print\n")) != 0)
+ goto err;
+ } else if ((ret = callback(handle, "format=bytevalue\n")) != 0)
+ goto err;
+
+ /*
+ * 64 bytes is long enough, as a minimum bound, for any of the
+ * fields besides subname. Subname can be anything, and so
+ * 64 + subname is big enough for all the things we need to print here.
+ */
+ buflen = 64 + ((subname != NULL) ? strlen(subname) : 0);
+ if ((ret = __os_malloc(dbenv, buflen, NULL, &buf)) != 0)
+ goto err;
+ if (subname != NULL) {
+ snprintf(buf, buflen, "database=%s\n", subname);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ switch (dbtype) {
+ case DB_BTREE:
+ if ((ret = callback(handle, "type=btree\n")) != 0)
+ goto err;
+ if (pip != NULL) {
+ if (F_ISSET(pip, VRFY_HAS_RECNUMS))
+ if ((ret =
+ callback(handle, "recnum=1\n")) != 0)
+ goto err;
+ if (pip->bt_maxkey != 0) {
+ snprintf(buf, buflen,
+ "bt_maxkey=%lu\n", (u_long)pip->bt_maxkey);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ if (pip->bt_minkey != 0 &&
+ pip->bt_minkey != DEFMINKEYPAGE) {
+ snprintf(buf, buflen,
+ "bt_minkey=%lu\n", (u_long)pip->bt_minkey);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ break;
+ }
+ if ((ret = dbp->stat(dbp, &btsp, NULL, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ goto err;
+ }
+ if (F_ISSET(dbp, DB_BT_RECNUM))
+ if ((ret = callback(handle, "recnum=1\n")) != 0)
+ goto err;
+ if (btsp->bt_maxkey != 0) {
+ snprintf(buf, buflen,
+ "bt_maxkey=%lu\n", (u_long)btsp->bt_maxkey);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ if (btsp->bt_minkey != 0 && btsp->bt_minkey != DEFMINKEYPAGE) {
+ snprintf(buf, buflen,
+ "bt_minkey=%lu\n", (u_long)btsp->bt_minkey);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ break;
+ case DB_HASH:
+ if ((ret = callback(handle, "type=hash\n")) != 0)
+ goto err;
+ if (pip != NULL) {
+ if (pip->h_ffactor != 0) {
+ snprintf(buf, buflen,
+ "h_ffactor=%lu\n", (u_long)pip->h_ffactor);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ if (pip->h_nelem != 0) {
+ snprintf(buf, buflen,
+ "h_nelem=%lu\n", (u_long)pip->h_nelem);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ break;
+ }
+ if ((ret = dbp->stat(dbp, &hsp, NULL, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ goto err;
+ }
+ if (hsp->hash_ffactor != 0) {
+ snprintf(buf, buflen,
+ "h_ffactor=%lu\n", (u_long)hsp->hash_ffactor);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ if (hsp->hash_nelem != 0 || hsp->hash_nkeys != 0) {
+ snprintf(buf, buflen, "h_nelem=%lu\n",
+ hsp->hash_nelem > hsp->hash_nkeys ?
+ (u_long)hsp->hash_nelem : (u_long)hsp->hash_nkeys);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ break;
+ case DB_QUEUE:
+ if ((ret = callback(handle, "type=queue\n")) != 0)
+ goto err;
+ if (vdp != NULL) {
+ snprintf(buf,
+ buflen, "re_len=%lu\n", (u_long)vdp->re_len);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ break;
+ }
+ if ((ret = dbp->stat(dbp, &qsp, NULL, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ goto err;
+ }
+ snprintf(buf, buflen, "re_len=%lu\n", (u_long)qsp->qs_re_len);
+ if (qsp->qs_re_pad != 0 && qsp->qs_re_pad != ' ')
+ snprintf(buf, buflen, "re_pad=%#x\n", qsp->qs_re_pad);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ break;
+ case DB_RECNO:
+ if ((ret = callback(handle, "type=recno\n")) != 0)
+ goto err;
+ if (pip != NULL) {
+ if (F_ISSET(pip, VRFY_IS_RRECNO))
+ if ((ret =
+ callback(handle, "renumber=1\n")) != 0)
+ goto err;
+ if (pip->re_len > 0) {
+ snprintf(buf, buflen,
+ "re_len=%lu\n", (u_long)pip->re_len);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ break;
+ }
+ if ((ret = dbp->stat(dbp, &btsp, NULL, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ goto err;
+ }
+ if (F_ISSET(dbp, DB_RE_RENUMBER))
+ if ((ret = callback(handle, "renumber=1\n")) != 0)
+ goto err;
+ if (F_ISSET(dbp, DB_RE_FIXEDLEN)) {
+ snprintf(buf, buflen,
+ "re_len=%lu\n", (u_long)btsp->bt_re_len);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ if (btsp->bt_re_pad != 0 && btsp->bt_re_pad != ' ') {
+ snprintf(buf, buflen, "re_pad=%#x\n", btsp->bt_re_pad);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ break;
+ case DB_UNKNOWN:
+ DB_ASSERT(0); /* Impossible. */
+ __db_err(dbp->dbenv, "Impossible DB type in __db_prheader");
+ ret = EINVAL;
+ goto err;
+ }
+
+ if (pip != NULL) {
+ if (F_ISSET(pip, VRFY_HAS_DUPS))
+ if ((ret = callback(handle, "duplicates=1\n")) != 0)
+ goto err;
+ if (F_ISSET(pip, VRFY_HAS_DUPSORT))
+ if ((ret = callback(handle, "dupsort=1\n")) != 0)
+ goto err;
+ /* We should handle page size. XXX */
+ } else {
+ if (F_ISSET(dbp, DB_AM_DUP))
+ if ((ret = callback(handle, "duplicates=1\n")) != 0)
+ goto err;
+ if (F_ISSET(dbp, DB_AM_DUPSORT))
+ if ((ret = callback(handle, "dupsort=1\n")) != 0)
+ goto err;
+ if (!F_ISSET(dbp, DB_AM_PGDEF)) {
+ snprintf(buf, buflen,
+ "db_pagesize=%lu\n", (u_long)dbp->pgsize);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ }
+
+ if (keyflag && (ret = callback(handle, "keys=1\n")) != 0)
+ goto err;
+
+ ret = callback(handle, "HEADER=END\n");
+
+err: if (pip != NULL &&
+ (t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+ if (btsp != NULL)
+ __os_free(btsp, 0);
+ if (hsp != NULL)
+ __os_free(hsp, 0);
+ if (qsp != NULL)
+ __os_free(qsp, 0);
+ if (buf != NULL)
+ __os_free(buf, buflen);
+
+ return (ret);
+}
+
+/*
+ * __db_prfooter --
+ * Print the footer that marks the end of a DB dump. This is trivial,
+ * but for consistency's sake we don't want to put its literal contents
+ * in multiple places.
+ *
+ * PUBLIC: int __db_prfooter __P((void *, int (*)(void *, const void *)));
+ */
+int
+__db_prfooter(handle, callback)
+ void *handle;
+ int (*callback) __P((void *, const void *));
+{
+ return (callback(handle, "DATA=END\n"));
+}
diff --git a/bdb/db/db_rec.c b/bdb/db/db_rec.c
new file mode 100644
index 00000000000..998d074290d
--- /dev/null
+++ b/bdb/db/db_rec.c
@@ -0,0 +1,529 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_rec.c,v 11.10 2000/08/03 15:32:19 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "log.h"
+#include "hash.h"
+
+/*
+ * PUBLIC: int __db_addrem_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ *
+ * This log message is generated whenever we add or remove a duplicate
+ * to/from a duplicate page. On recover, we just do the opposite.
+ */
+int
+__db_addrem_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_addrem_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ u_int32_t change;
+ int cmp_n, cmp_p, ret;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__db_addrem_print);
+ REC_INTRO(__db_addrem_read, 1);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op)) {
+ /*
+ * We are undoing and the page doesn't exist. That
+ * is equivalent to having a pagelsn of 0, so we
+ * would not have to undo anything. In this case,
+ * don't bother creating a page.
+ */
+ goto done;
+ } else
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->pagelsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->pagelsn);
+ change = 0;
+ if ((cmp_p == 0 && DB_REDO(op) && argp->opcode == DB_ADD_DUP) ||
+ (cmp_n == 0 && DB_UNDO(op) && argp->opcode == DB_REM_DUP)) {
+
+ /* Need to redo an add, or undo a delete. */
+ if ((ret = __db_pitem(dbc, pagep, argp->indx, argp->nbytes,
+ argp->hdr.size == 0 ? NULL : &argp->hdr,
+ argp->dbt.size == 0 ? NULL : &argp->dbt)) != 0)
+ goto out;
+
+ change = DB_MPOOL_DIRTY;
+
+ } else if ((cmp_n == 0 && DB_UNDO(op) && argp->opcode == DB_ADD_DUP) ||
+ (cmp_p == 0 && DB_REDO(op) && argp->opcode == DB_REM_DUP)) {
+ /* Need to undo an add, or redo a delete. */
+ if ((ret = __db_ditem(dbc,
+ pagep, argp->indx, argp->nbytes)) != 0)
+ goto out;
+ change = DB_MPOOL_DIRTY;
+ }
+
+ if (change) {
+ if (DB_REDO(op))
+ LSN(pagep) = *lsnp;
+ else
+ LSN(pagep) = argp->pagelsn;
+ }
+
+ if ((ret = memp_fput(mpf, pagep, change)) != 0)
+ goto out;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * PUBLIC: int __db_big_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_big_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_big_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ u_int32_t change;
+ int cmp_n, cmp_p, ret;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__db_big_print);
+ REC_INTRO(__db_big_read, 1);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op)) {
+ /*
+ * We are undoing and the page doesn't exist. That
+ * is equivalent to having a pagelsn of 0, so we
+ * would not have to undo anything. In this case,
+ * don't bother creating a page.
+ */
+ ret = 0;
+ goto ppage;
+ } else
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ /*
+ * There are three pages we need to check. The one on which we are
+ * adding data, the previous one whose next_pointer may have
+ * been updated, and the next one whose prev_pointer may have
+ * been updated.
+ */
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->pagelsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->pagelsn);
+ change = 0;
+ if ((cmp_p == 0 && DB_REDO(op) && argp->opcode == DB_ADD_BIG) ||
+ (cmp_n == 0 && DB_UNDO(op) && argp->opcode == DB_REM_BIG)) {
+ /* We are either redo-ing an add, or undoing a delete. */
+ P_INIT(pagep, file_dbp->pgsize, argp->pgno, argp->prev_pgno,
+ argp->next_pgno, 0, P_OVERFLOW);
+ OV_LEN(pagep) = argp->dbt.size;
+ OV_REF(pagep) = 1;
+ memcpy((u_int8_t *)pagep + P_OVERHEAD, argp->dbt.data,
+ argp->dbt.size);
+ PREV_PGNO(pagep) = argp->prev_pgno;
+ change = DB_MPOOL_DIRTY;
+ } else if ((cmp_n == 0 && DB_UNDO(op) && argp->opcode == DB_ADD_BIG) ||
+ (cmp_p == 0 && DB_REDO(op) && argp->opcode == DB_REM_BIG)) {
+ /*
+ * We are either undo-ing an add or redo-ing a delete.
+ * The page is about to be reclaimed in either case, so
+ * there really isn't anything to do here.
+ */
+ change = DB_MPOOL_DIRTY;
+ }
+ if (change)
+ LSN(pagep) = DB_REDO(op) ? *lsnp : argp->pagelsn;
+
+ if ((ret = memp_fput(mpf, pagep, change)) != 0)
+ goto out;
+
+ /* Now check the previous page. */
+ppage: if (argp->prev_pgno != PGNO_INVALID) {
+ change = 0;
+ if ((ret = memp_fget(mpf, &argp->prev_pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op)) {
+ /*
+ * We are undoing and the page doesn't exist.
+ * That is equivalent to having a pagelsn of 0,
+ * so we would not have to undo anything. In
+ * this case, don't bother creating a page.
+ */
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto npage;
+ } else
+ if ((ret = memp_fget(mpf, &argp->prev_pgno,
+ DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->prevlsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->prevlsn);
+
+ if ((cmp_p == 0 && DB_REDO(op) && argp->opcode == DB_ADD_BIG) ||
+ (cmp_n == 0 && DB_UNDO(op) && argp->opcode == DB_REM_BIG)) {
+ /* Redo add, undo delete. */
+ NEXT_PGNO(pagep) = argp->pgno;
+ change = DB_MPOOL_DIRTY;
+ } else if ((cmp_n == 0 &&
+ DB_UNDO(op) && argp->opcode == DB_ADD_BIG) ||
+ (cmp_p == 0 && DB_REDO(op) && argp->opcode == DB_REM_BIG)) {
+ /* Redo delete, undo add. */
+ NEXT_PGNO(pagep) = argp->next_pgno;
+ change = DB_MPOOL_DIRTY;
+ }
+ if (change)
+ LSN(pagep) = DB_REDO(op) ? *lsnp : argp->prevlsn;
+ if ((ret = memp_fput(mpf, pagep, change)) != 0)
+ goto out;
+ }
+
+ /* Now check the next page. Can only be set on a delete. */
+npage: if (argp->next_pgno != PGNO_INVALID) {
+ change = 0;
+ if ((ret = memp_fget(mpf, &argp->next_pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op)) {
+ /*
+ * We are undoing and the page doesn't exist.
+ * That is equivalent to having a pagelsn of 0,
+ * so we would not have to undo anything. In
+ * this case, don't bother creating a page.
+ */
+ goto done;
+ } else
+ if ((ret = memp_fget(mpf, &argp->next_pgno,
+ DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->nextlsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->nextlsn);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ PREV_PGNO(pagep) = PGNO_INVALID;
+ change = DB_MPOOL_DIRTY;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ PREV_PGNO(pagep) = argp->pgno;
+ change = DB_MPOOL_DIRTY;
+ }
+ if (change)
+ LSN(pagep) = DB_REDO(op) ? *lsnp : argp->nextlsn;
+ if ((ret = memp_fput(mpf, pagep, change)) != 0)
+ goto out;
+ }
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __db_ovref_recover --
+ * Recovery function for __db_ovref().
+ *
+ * PUBLIC: int __db_ovref_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_ovref_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_ovref_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp, modified, ret;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__db_ovref_print);
+ REC_INTRO(__db_ovref_read, 1);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op))
+ goto done;
+ (void)__db_pgerr(file_dbp, argp->pgno);
+ goto out;
+ }
+
+ modified = 0;
+ cmp = log_compare(&LSN(pagep), &argp->lsn);
+ CHECK_LSN(op, cmp, &LSN(pagep), &argp->lsn);
+ if (cmp == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ OV_REF(pagep) += argp->adjust;
+
+ pagep->lsn = *lsnp;
+ modified = 1;
+ } else if (log_compare(lsnp, &LSN(pagep)) == 0 && DB_UNDO(op)) {
+ /* Need to undo update described. */
+ OV_REF(pagep) -= argp->adjust;
+
+ pagep->lsn = argp->lsn;
+ modified = 1;
+ }
+ if ((ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __db_relink_recover --
+ * Recovery function for relink.
+ *
+ * PUBLIC: int __db_relink_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_relink_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_relink_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__db_relink_print);
+ REC_INTRO(__db_relink_read, 1);
+
+ /*
+ * There are up to three pages we need to check -- the page, and the
+ * previous and next pages, if they existed. For a page add operation,
+ * the current page is the result of a split and is being recovered
+ * elsewhere, so all we need do is recover the next page.
+ */
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_REDO(op)) {
+ (void)__db_pgerr(file_dbp, argp->pgno);
+ goto out;
+ }
+ goto next2;
+ }
+ modified = 0;
+ if (argp->opcode == DB_ADD_PAGE)
+ goto next1;
+
+ cmp_p = log_compare(&LSN(pagep), &argp->lsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->lsn);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Redo the relink. */
+ pagep->lsn = *lsnp;
+ modified = 1;
+ } else if (log_compare(lsnp, &LSN(pagep)) == 0 && DB_UNDO(op)) {
+ /* Undo the relink. */
+ pagep->next_pgno = argp->next;
+ pagep->prev_pgno = argp->prev;
+
+ pagep->lsn = argp->lsn;
+ modified = 1;
+ }
+next1: if ((ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+
+next2: if ((ret = memp_fget(mpf, &argp->next, 0, &pagep)) != 0) {
+ if (DB_REDO(op)) {
+ (void)__db_pgerr(file_dbp, argp->next);
+ goto out;
+ }
+ goto prev;
+ }
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->lsn_next);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->lsn_next);
+ if ((argp->opcode == DB_REM_PAGE && cmp_p == 0 && DB_REDO(op)) ||
+ (argp->opcode == DB_ADD_PAGE && cmp_n == 0 && DB_UNDO(op))) {
+ /* Redo the remove or undo the add. */
+ pagep->prev_pgno = argp->prev;
+
+ modified = 1;
+ } else if ((argp->opcode == DB_REM_PAGE && cmp_n == 0 && DB_UNDO(op)) ||
+ (argp->opcode == DB_ADD_PAGE && cmp_p == 0 && DB_REDO(op))) {
+ /* Undo the remove or redo the add. */
+ pagep->prev_pgno = argp->pgno;
+
+ modified = 1;
+ }
+ if (modified == 1) {
+ if (DB_UNDO(op))
+ pagep->lsn = argp->lsn_next;
+ else
+ pagep->lsn = *lsnp;
+ }
+ if ((ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+ if (argp->opcode == DB_ADD_PAGE)
+ goto done;
+
+prev: if ((ret = memp_fget(mpf, &argp->prev, 0, &pagep)) != 0) {
+ if (DB_REDO(op)) {
+ (void)__db_pgerr(file_dbp, argp->prev);
+ goto out;
+ }
+ goto done;
+ }
+ modified = 0;
+ cmp_p = log_compare(&LSN(pagep), &argp->lsn_prev);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->lsn_prev);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Redo the relink. */
+ pagep->next_pgno = argp->next;
+
+ modified = 1;
+ } else if (log_compare(lsnp, &LSN(pagep)) == 0 && DB_UNDO(op)) {
+ /* Undo the relink. */
+ pagep->next_pgno = argp->pgno;
+
+ modified = 1;
+ }
+ if (modified == 1) {
+ if (DB_UNDO(op))
+ pagep->lsn = argp->lsn_prev;
+ else
+ pagep->lsn = *lsnp;
+ }
+ if ((ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __db_debug_recover --
+ * Recovery function for debug.
+ *
+ * PUBLIC: int __db_debug_recover __P((DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_debug_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_debug_args *argp;
+ int ret;
+
+ COMPQUIET(op, 0);
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(info, NULL);
+
+ REC_PRINT(__db_debug_print);
+ REC_NOOP_INTRO(__db_debug_read);
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+ REC_NOOP_CLOSE;
+}
+
+/*
+ * __db_noop_recover --
+ * Recovery function for noop.
+ *
+ * PUBLIC: int __db_noop_recover __P((DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_noop_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_noop_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ u_int32_t change;
+ int cmp_n, cmp_p, ret;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__db_noop_print);
+ REC_INTRO(__db_noop_read, 0);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ goto out;
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->prevlsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->prevlsn);
+ change = 0;
+ if (cmp_p == 0 && DB_REDO(op)) {
+ LSN(pagep) = *lsnp;
+ change = DB_MPOOL_DIRTY;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ LSN(pagep) = argp->prevlsn;
+ change = DB_MPOOL_DIRTY;
+ }
+ ret = memp_fput(mpf, pagep, change);
+
+done: *lsnp = argp->prev_lsn;
+out: REC_CLOSE;
+}
diff --git a/bdb/db/db_reclaim.c b/bdb/db/db_reclaim.c
new file mode 100644
index 00000000000..739f348407d
--- /dev/null
+++ b/bdb/db/db_reclaim.c
@@ -0,0 +1,134 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_reclaim.c,v 11.5 2000/04/07 14:26:58 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_am.h"
+
+/*
+ * Assume that we enter with a valid pgno. We traverse a set of
+ * duplicate pages. The format of the callback routine is:
+ * callback(dbp, page, cookie, did_put). did_put is an output
+ * value that will be set to 1 by the callback routine if it
+ * already put the page back. Otherwise, this routine must
+ * put the page.
+ *
+ * PUBLIC: int __db_traverse_dup __P((DB *,
+ * PUBLIC: db_pgno_t, int (*)(DB *, PAGE *, void *, int *), void *));
+ */
+int
+__db_traverse_dup(dbp, pgno, callback, cookie)
+ DB *dbp;
+ db_pgno_t pgno;
+ int (*callback) __P((DB *, PAGE *, void *, int *));
+ void *cookie;
+{
+ PAGE *p;
+ int did_put, i, opgno, ret;
+
+ do {
+ did_put = 0;
+ if ((ret = memp_fget(dbp->mpf, &pgno, 0, &p)) != 0)
+ return (ret);
+ pgno = NEXT_PGNO(p);
+
+ for (i = 0; i < NUM_ENT(p); i++) {
+ if (B_TYPE(GET_BKEYDATA(p, i)->type) == B_OVERFLOW) {
+ opgno = GET_BOVERFLOW(p, i)->pgno;
+ if ((ret = __db_traverse_big(dbp,
+ opgno, callback, cookie)) != 0)
+ goto err;
+ }
+ }
+
+ if ((ret = callback(dbp, p, cookie, &did_put)) != 0)
+ goto err;
+
+ if (!did_put)
+ if ((ret = memp_fput(dbp->mpf, p, 0)) != 0)
+ return (ret);
+ } while (pgno != PGNO_INVALID);
+
+ if (0) {
+err: if (did_put == 0)
+ (void)memp_fput(dbp->mpf, p, 0);
+ }
+ return (ret);
+}
+
+/*
+ * __db_traverse_big
+ * Traverse a chain of overflow pages and call the callback routine
+ * on each one. The calling convention for the callback is:
+ * callback(dbp, page, cookie, did_put),
+ * where did_put is a return value indicating if the page in question has
+ * already been returned to the mpool.
+ *
+ * PUBLIC: int __db_traverse_big __P((DB *,
+ * PUBLIC: db_pgno_t, int (*)(DB *, PAGE *, void *, int *), void *));
+ */
+int
+__db_traverse_big(dbp, pgno, callback, cookie)
+ DB *dbp;
+ db_pgno_t pgno;
+ int (*callback) __P((DB *, PAGE *, void *, int *));
+ void *cookie;
+{
+ PAGE *p;
+ int did_put, ret;
+
+ do {
+ did_put = 0;
+ if ((ret = memp_fget(dbp->mpf, &pgno, 0, &p)) != 0)
+ return (ret);
+ pgno = NEXT_PGNO(p);
+ if ((ret = callback(dbp, p, cookie, &did_put)) == 0 &&
+ !did_put)
+ ret = memp_fput(dbp->mpf, p, 0);
+ } while (ret == 0 && pgno != PGNO_INVALID);
+
+ return (ret);
+}
+
+/*
+ * __db_reclaim_callback
+ * This is the callback routine used during a delete of a subdatabase.
+ * we are traversing a btree or hash table and trying to free all the
+ * pages. Since they share common code for duplicates and overflow
+ * items, we traverse them identically and use this routine to do the
+ * actual free. The reason that this is callback is because hash uses
+ * the same traversal code for statistics gathering.
+ *
+ * PUBLIC: int __db_reclaim_callback __P((DB *, PAGE *, void *, int *));
+ */
+int
+__db_reclaim_callback(dbp, p, cookie, putp)
+ DB *dbp;
+ PAGE *p;
+ void *cookie;
+ int *putp;
+{
+ int ret;
+
+ COMPQUIET(dbp, NULL);
+
+ if ((ret = __db_free(cookie, p)) != 0)
+ return (ret);
+ *putp = 1;
+
+ return (0);
+}
diff --git a/bdb/db/db_ret.c b/bdb/db/db_ret.c
new file mode 100644
index 00000000000..0782de3e450
--- /dev/null
+++ b/bdb/db/db_ret.c
@@ -0,0 +1,160 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_ret.c,v 11.12 2000/11/30 00:58:33 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "btree.h"
+#include "db_am.h"
+
+/*
+ * __db_ret --
+ * Build return DBT.
+ *
+ * PUBLIC: int __db_ret __P((DB *,
+ * PUBLIC: PAGE *, u_int32_t, DBT *, void **, u_int32_t *));
+ */
+int
+__db_ret(dbp, h, indx, dbt, memp, memsize)
+ DB *dbp;
+ PAGE *h;
+ u_int32_t indx;
+ DBT *dbt;
+ void **memp;
+ u_int32_t *memsize;
+{
+ BKEYDATA *bk;
+ HOFFPAGE ho;
+ BOVERFLOW *bo;
+ u_int32_t len;
+ u_int8_t *hk;
+ void *data;
+
+ switch (TYPE(h)) {
+ case P_HASH:
+ hk = P_ENTRY(h, indx);
+ if (HPAGE_PTYPE(hk) == H_OFFPAGE) {
+ memcpy(&ho, hk, sizeof(HOFFPAGE));
+ return (__db_goff(dbp, dbt,
+ ho.tlen, ho.pgno, memp, memsize));
+ }
+ len = LEN_HKEYDATA(h, dbp->pgsize, indx);
+ data = HKEYDATA_DATA(hk);
+ break;
+ case P_LBTREE:
+ case P_LDUP:
+ case P_LRECNO:
+ bk = GET_BKEYDATA(h, indx);
+ if (B_TYPE(bk->type) == B_OVERFLOW) {
+ bo = (BOVERFLOW *)bk;
+ return (__db_goff(dbp, dbt,
+ bo->tlen, bo->pgno, memp, memsize));
+ }
+ len = bk->len;
+ data = bk->data;
+ break;
+ default:
+ return (__db_pgfmt(dbp, h->pgno));
+ }
+
+ return (__db_retcopy(dbp, dbt, data, len, memp, memsize));
+}
+
+/*
+ * __db_retcopy --
+ * Copy the returned data into the user's DBT, handling special flags.
+ *
+ * PUBLIC: int __db_retcopy __P((DB *, DBT *,
+ * PUBLIC: void *, u_int32_t, void **, u_int32_t *));
+ */
+int
+__db_retcopy(dbp, dbt, data, len, memp, memsize)
+ DB *dbp;
+ DBT *dbt;
+ void *data;
+ u_int32_t len;
+ void **memp;
+ u_int32_t *memsize;
+{
+ DB_ENV *dbenv;
+ int ret;
+
+ dbenv = dbp == NULL ? NULL : dbp->dbenv;
+
+ /* If returning a partial record, reset the length. */
+ if (F_ISSET(dbt, DB_DBT_PARTIAL)) {
+ data = (u_int8_t *)data + dbt->doff;
+ if (len > dbt->doff) {
+ len -= dbt->doff;
+ if (len > dbt->dlen)
+ len = dbt->dlen;
+ } else
+ len = 0;
+ }
+
+ /*
+ * Return the length of the returned record in the DBT size field.
+ * This satisfies the requirement that if we're using user memory
+ * and insufficient memory was provided, return the amount necessary
+ * in the size field.
+ */
+ dbt->size = len;
+
+ /*
+ * Allocate memory to be owned by the application: DB_DBT_MALLOC,
+ * DB_DBT_REALLOC.
+ *
+ * !!!
+ * We always allocate memory, even if we're copying out 0 bytes. This
+ * guarantees consistency, i.e., the application can always free memory
+ * without concern as to how many bytes of the record were requested.
+ *
+ * Use the memory specified by the application: DB_DBT_USERMEM.
+ *
+ * !!!
+ * If the length we're going to copy is 0, the application-supplied
+ * memory pointer is allowed to be NULL.
+ */
+ if (F_ISSET(dbt, DB_DBT_MALLOC)) {
+ if ((ret = __os_malloc(dbenv, len,
+ dbp == NULL ? NULL : dbp->db_malloc, &dbt->data)) != 0)
+ return (ret);
+ } else if (F_ISSET(dbt, DB_DBT_REALLOC)) {
+ if ((ret = __os_realloc(dbenv, len,
+ dbp == NULL ? NULL : dbp->db_realloc, &dbt->data)) != 0)
+ return (ret);
+ } else if (F_ISSET(dbt, DB_DBT_USERMEM)) {
+ if (len != 0 && (dbt->data == NULL || dbt->ulen < len))
+ return (ENOMEM);
+ } else if (memp == NULL || memsize == NULL) {
+ return (EINVAL);
+ } else {
+ if (len != 0 && (*memsize == 0 || *memsize < len)) {
+ if ((ret = __os_realloc(dbenv, len, NULL, memp)) != 0) {
+ *memsize = 0;
+ return (ret);
+ }
+ *memsize = len;
+ }
+ dbt->data = *memp;
+ }
+
+ if (len != 0)
+ memcpy(dbt->data, data, len);
+ return (0);
+}
diff --git a/bdb/db/db_upg.c b/bdb/db/db_upg.c
new file mode 100644
index 00000000000..d8573146ad6
--- /dev/null
+++ b/bdb/db/db_upg.c
@@ -0,0 +1,338 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_upg.c,v 11.20 2000/12/12 17:35:30 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_swap.h"
+#include "btree.h"
+#include "hash.h"
+#include "qam.h"
+
+static int (* const func_31_list[P_PAGETYPE_MAX])
+ __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *)) = {
+ NULL, /* P_INVALID */
+ NULL, /* __P_DUPLICATE */
+ __ham_31_hash, /* P_HASH */
+ NULL, /* P_IBTREE */
+ NULL, /* P_IRECNO */
+ __bam_31_lbtree, /* P_LBTREE */
+ NULL, /* P_LRECNO */
+ NULL, /* P_OVERFLOW */
+ __ham_31_hashmeta, /* P_HASHMETA */
+ __bam_31_btreemeta, /* P_BTREEMETA */
+};
+
+static int __db_page_pass __P((DB *, char *, u_int32_t, int (* const [])
+ (DB *, char *, u_int32_t, DB_FH *, PAGE *, int *), DB_FH *));
+
+/*
+ * __db_upgrade --
+ * Upgrade an existing database.
+ *
+ * PUBLIC: int __db_upgrade __P((DB *, const char *, u_int32_t));
+ */
+int
+__db_upgrade(dbp, fname, flags)
+ DB *dbp;
+ const char *fname;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DB_FH fh;
+ size_t n;
+ int ret, t_ret;
+ u_int8_t mbuf[256];
+ char *real_name;
+
+ dbenv = dbp->dbenv;
+
+ /* Validate arguments. */
+ if ((ret = __db_fchk(dbenv, "DB->upgrade", flags, DB_DUPSORT)) != 0)
+ return (ret);
+
+ /* Get the real backing file name. */
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, NULL, fname, 0, NULL, &real_name)) != 0)
+ return (ret);
+
+ /* Open the file. */
+ if ((ret = __os_open(dbenv, real_name, 0, 0, &fh)) != 0) {
+ __db_err(dbenv, "%s: %s", real_name, db_strerror(ret));
+ return (ret);
+ }
+
+ /* Initialize the feedback. */
+ if (dbp->db_feedback != NULL)
+ dbp->db_feedback(dbp, DB_UPGRADE, 0);
+
+ /*
+ * Read the metadata page. We read 256 bytes, which is larger than
+ * any access method's metadata page and smaller than any disk sector.
+ */
+ if ((ret = __os_read(dbenv, &fh, mbuf, sizeof(mbuf), &n)) != 0)
+ goto err;
+
+ switch (((DBMETA *)mbuf)->magic) {
+ case DB_BTREEMAGIC:
+ switch (((DBMETA *)mbuf)->version) {
+ case 6:
+ /*
+ * Before V7 not all pages had page types, so we do the
+ * single meta-data page by hand.
+ */
+ if ((ret =
+ __bam_30_btreemeta(dbp, real_name, mbuf)) != 0)
+ goto err;
+ if ((ret = __os_seek(dbenv,
+ &fh, 0, 0, 0, 0, DB_OS_SEEK_SET)) != 0)
+ goto err;
+ if ((ret = __os_write(dbenv, &fh, mbuf, 256, &n)) != 0)
+ goto err;
+ /* FALLTHROUGH */
+ case 7:
+ /*
+ * We need the page size to do more. Rip it out of
+ * the meta-data page.
+ */
+ memcpy(&dbp->pgsize, mbuf + 20, sizeof(u_int32_t));
+
+ if ((ret = __db_page_pass(
+ dbp, real_name, flags, func_31_list, &fh)) != 0)
+ goto err;
+ /* FALLTHROUGH */
+ case 8:
+ break;
+ default:
+ __db_err(dbenv, "%s: unsupported btree version: %lu",
+ real_name, (u_long)((DBMETA *)mbuf)->version);
+ ret = DB_OLD_VERSION;
+ goto err;
+ }
+ break;
+ case DB_HASHMAGIC:
+ switch (((DBMETA *)mbuf)->version) {
+ case 4:
+ case 5:
+ /*
+ * Before V6 not all pages had page types, so we do the
+ * single meta-data page by hand.
+ */
+ if ((ret =
+ __ham_30_hashmeta(dbp, real_name, mbuf)) != 0)
+ goto err;
+ if ((ret = __os_seek(dbenv,
+ &fh, 0, 0, 0, 0, DB_OS_SEEK_SET)) != 0)
+ goto err;
+ if ((ret = __os_write(dbenv, &fh, mbuf, 256, &n)) != 0)
+ goto err;
+
+ /*
+ * Before V6, we created hash pages one by one as they
+ * were needed, using hashhdr.ovfl_point to reserve
+ * a block of page numbers for them. A consequence
+ * of this was that, if no overflow pages had been
+ * created, the current doubling might extend past
+ * the end of the database file.
+ *
+ * In DB 3.X, we now create all the hash pages
+ * belonging to a doubling atomicly; it's not
+ * safe to just save them for later, because when
+ * we create an overflow page we'll just create
+ * a new last page (whatever that may be). Grow
+ * the database to the end of the current doubling.
+ */
+ if ((ret =
+ __ham_30_sizefix(dbp, &fh, real_name, mbuf)) != 0)
+ goto err;
+ /* FALLTHROUGH */
+ case 6:
+ /*
+ * We need the page size to do more. Rip it out of
+ * the meta-data page.
+ */
+ memcpy(&dbp->pgsize, mbuf + 20, sizeof(u_int32_t));
+
+ if ((ret = __db_page_pass(
+ dbp, real_name, flags, func_31_list, &fh)) != 0)
+ goto err;
+ /* FALLTHROUGH */
+ case 7:
+ break;
+ default:
+ __db_err(dbenv, "%s: unsupported hash version: %lu",
+ real_name, (u_long)((DBMETA *)mbuf)->version);
+ ret = DB_OLD_VERSION;
+ goto err;
+ }
+ break;
+ case DB_QAMMAGIC:
+ switch (((DBMETA *)mbuf)->version) {
+ case 1:
+ /*
+ * If we're in a Queue database, the only page that
+ * needs upgrading is the meta-database page, don't
+ * bother with a full pass.
+ */
+ if ((ret = __qam_31_qammeta(dbp, real_name, mbuf)) != 0)
+ return (ret);
+ /* FALLTHROUGH */
+ case 2:
+ if ((ret = __qam_32_qammeta(dbp, real_name, mbuf)) != 0)
+ return (ret);
+ if ((ret = __os_seek(dbenv,
+ &fh, 0, 0, 0, 0, DB_OS_SEEK_SET)) != 0)
+ goto err;
+ if ((ret = __os_write(dbenv, &fh, mbuf, 256, &n)) != 0)
+ goto err;
+ /* FALLTHROUGH */
+ case 3:
+ break;
+ default:
+ __db_err(dbenv, "%s: unsupported queue version: %lu",
+ real_name, (u_long)((DBMETA *)mbuf)->version);
+ ret = DB_OLD_VERSION;
+ goto err;
+ }
+ break;
+ default:
+ M_32_SWAP(((DBMETA *)mbuf)->magic);
+ switch (((DBMETA *)mbuf)->magic) {
+ case DB_BTREEMAGIC:
+ case DB_HASHMAGIC:
+ case DB_QAMMAGIC:
+ __db_err(dbenv,
+ "%s: DB->upgrade only supported on native byte-order systems",
+ real_name);
+ break;
+ default:
+ __db_err(dbenv,
+ "%s: unrecognized file type", real_name);
+ break;
+ }
+ ret = EINVAL;
+ goto err;
+ }
+
+ ret = __os_fsync(dbenv, &fh);
+
+err: if ((t_ret = __os_closehandle(&fh)) != 0 && ret == 0)
+ ret = t_ret;
+ __os_freestr(real_name);
+
+ /* We're done. */
+ if (dbp->db_feedback != NULL)
+ dbp->db_feedback(dbp, DB_UPGRADE, 100);
+
+ return (ret);
+}
+
+/*
+ * __db_page_pass --
+ * Walk the pages of the database, upgrading whatever needs it.
+ */
+static int
+__db_page_pass(dbp, real_name, flags, fl, fhp)
+ DB *dbp;
+ char *real_name;
+ u_int32_t flags;
+ int (* const fl[P_PAGETYPE_MAX])
+ __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *));
+ DB_FH *fhp;
+{
+ DB_ENV *dbenv;
+ PAGE *page;
+ db_pgno_t i, pgno_last;
+ size_t n;
+ int dirty, ret;
+
+ dbenv = dbp->dbenv;
+
+ /* Determine the last page of the file. */
+ if ((ret = __db_lastpgno(dbp, real_name, fhp, &pgno_last)) != 0)
+ return (ret);
+
+ /* Allocate memory for a single page. */
+ if ((ret = __os_malloc(dbenv, dbp->pgsize, NULL, &page)) != 0)
+ return (ret);
+
+ /* Walk the file, calling the underlying conversion functions. */
+ for (i = 0; i < pgno_last; ++i) {
+ if (dbp->db_feedback != NULL)
+ dbp->db_feedback(dbp, DB_UPGRADE, (i * 100)/pgno_last);
+ if ((ret = __os_seek(dbenv,
+ fhp, dbp->pgsize, i, 0, 0, DB_OS_SEEK_SET)) != 0)
+ break;
+ if ((ret = __os_read(dbenv, fhp, page, dbp->pgsize, &n)) != 0)
+ break;
+ dirty = 0;
+ if (fl[TYPE(page)] != NULL && (ret = fl[TYPE(page)]
+ (dbp, real_name, flags, fhp, page, &dirty)) != 0)
+ break;
+ if (dirty) {
+ if ((ret = __os_seek(dbenv,
+ fhp, dbp->pgsize, i, 0, 0, DB_OS_SEEK_SET)) != 0)
+ break;
+ if ((ret = __os_write(dbenv,
+ fhp, page, dbp->pgsize, &n)) != 0)
+ break;
+ }
+ }
+
+ __os_free(page, dbp->pgsize);
+ return (ret);
+}
+
+/*
+ * __db_lastpgno --
+ * Return the current last page number of the file.
+ *
+ * PUBLIC: int __db_lastpgno __P((DB *, char *, DB_FH *, db_pgno_t *));
+ */
+int
+__db_lastpgno(dbp, real_name, fhp, pgno_lastp)
+ DB *dbp;
+ char *real_name;
+ DB_FH *fhp;
+ db_pgno_t *pgno_lastp;
+{
+ DB_ENV *dbenv;
+ db_pgno_t pgno_last;
+ u_int32_t mbytes, bytes;
+ int ret;
+
+ dbenv = dbp->dbenv;
+
+ if ((ret = __os_ioinfo(dbenv,
+ real_name, fhp, &mbytes, &bytes, NULL)) != 0) {
+ __db_err(dbenv, "%s: %s", real_name, db_strerror(ret));
+ return (ret);
+ }
+
+ /* Page sizes have to be a power-of-two. */
+ if (bytes % dbp->pgsize != 0) {
+ __db_err(dbenv,
+ "%s: file size not a multiple of the pagesize", real_name);
+ return (EINVAL);
+ }
+ pgno_last = mbytes * (MEGABYTE / dbp->pgsize);
+ pgno_last += bytes / dbp->pgsize;
+
+ *pgno_lastp = pgno_last;
+ return (0);
+}
diff --git a/bdb/db/db_upg_opd.c b/bdb/db/db_upg_opd.c
new file mode 100644
index 00000000000..a7be784afb8
--- /dev/null
+++ b/bdb/db/db_upg_opd.c
@@ -0,0 +1,353 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_upg_opd.c,v 11.9 2000/11/30 00:58:33 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_swap.h"
+#include "btree.h"
+#include "hash.h"
+#include "qam.h"
+
+static int __db_build_bi __P((DB *, DB_FH *, PAGE *, PAGE *, u_int32_t, int *));
+static int __db_build_ri __P((DB *, DB_FH *, PAGE *, PAGE *, u_int32_t, int *));
+static int __db_up_ovref __P((DB *, DB_FH *, db_pgno_t));
+
+#define GET_PAGE(dbp, fhp, pgno, page) { \
+ if ((ret = __os_seek(dbp->dbenv, \
+ fhp, (dbp)->pgsize, pgno, 0, 0, DB_OS_SEEK_SET)) != 0) \
+ goto err; \
+ if ((ret = __os_read(dbp->dbenv, \
+ fhp, page, (dbp)->pgsize, &n)) != 0) \
+ goto err; \
+}
+#define PUT_PAGE(dbp, fhp, pgno, page) { \
+ if ((ret = __os_seek(dbp->dbenv, \
+ fhp, (dbp)->pgsize, pgno, 0, 0, DB_OS_SEEK_SET)) != 0) \
+ goto err; \
+ if ((ret = __os_write(dbp->dbenv, \
+ fhp, page, (dbp)->pgsize, &n)) != 0) \
+ goto err; \
+}
+
+/*
+ * __db_31_offdup --
+ * Convert 3.0 off-page duplicates to 3.1 off-page duplicates.
+ *
+ * PUBLIC: int __db_31_offdup __P((DB *, char *, DB_FH *, int, db_pgno_t *));
+ */
+int
+__db_31_offdup(dbp, real_name, fhp, sorted, pgnop)
+ DB *dbp;
+ char *real_name;
+ DB_FH *fhp;
+ int sorted;
+ db_pgno_t *pgnop;
+{
+ PAGE *ipage, *page;
+ db_indx_t indx;
+ db_pgno_t cur_cnt, i, next_cnt, pgno, *pgno_cur, pgno_last;
+ db_pgno_t *pgno_next, pgno_max, *tmp;
+ db_recno_t nrecs;
+ size_t n;
+ int level, nomem, ret;
+
+ ipage = page = NULL;
+ pgno_cur = pgno_next = NULL;
+
+ /* Allocate room to hold a page. */
+ if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, NULL, &page)) != 0)
+ goto err;
+
+ /*
+ * Walk the chain of 3.0 off-page duplicates. Each one is converted
+ * in place to a 3.1 off-page duplicate page. If the duplicates are
+ * sorted, they are converted to a Btree leaf page, otherwise to a
+ * Recno leaf page.
+ */
+ for (nrecs = 0, cur_cnt = pgno_max = 0,
+ pgno = *pgnop; pgno != PGNO_INVALID;) {
+ if (pgno_max == cur_cnt) {
+ pgno_max += 20;
+ if ((ret = __os_realloc(dbp->dbenv, pgno_max *
+ sizeof(db_pgno_t), NULL, &pgno_cur)) != 0)
+ goto err;
+ }
+ pgno_cur[cur_cnt++] = pgno;
+
+ GET_PAGE(dbp, fhp, pgno, page);
+ nrecs += NUM_ENT(page);
+ LEVEL(page) = LEAFLEVEL;
+ TYPE(page) = sorted ? P_LDUP : P_LRECNO;
+ /*
+ * !!!
+ * DB didn't zero the LSNs on off-page duplicates pages.
+ */
+ ZERO_LSN(LSN(page));
+ PUT_PAGE(dbp, fhp, pgno, page);
+
+ pgno = NEXT_PGNO(page);
+ }
+
+ /* If we only have a single page, it's easy. */
+ if (cur_cnt > 1) {
+ /*
+ * pgno_cur is the list of pages we just converted. We're
+ * going to walk that list, but we'll need to create a new
+ * list while we do so.
+ */
+ if ((ret = __os_malloc(dbp->dbenv,
+ cur_cnt * sizeof(db_pgno_t), NULL, &pgno_next)) != 0)
+ goto err;
+
+ /* Figure out where we can start allocating new pages. */
+ if ((ret = __db_lastpgno(dbp, real_name, fhp, &pgno_last)) != 0)
+ goto err;
+
+ /* Allocate room for an internal page. */
+ if ((ret = __os_malloc(dbp->dbenv,
+ dbp->pgsize, NULL, &ipage)) != 0)
+ goto err;
+ PGNO(ipage) = PGNO_INVALID;
+ }
+
+ /*
+ * Repeatedly walk the list of pages, building internal pages, until
+ * there's only one page at a level.
+ */
+ for (level = LEAFLEVEL + 1; cur_cnt > 1; ++level) {
+ for (indx = 0, i = next_cnt = 0; i < cur_cnt;) {
+ if (indx == 0) {
+ P_INIT(ipage, dbp->pgsize, pgno_last,
+ PGNO_INVALID, PGNO_INVALID,
+ level, sorted ? P_IBTREE : P_IRECNO);
+ ZERO_LSN(LSN(ipage));
+
+ pgno_next[next_cnt++] = pgno_last++;
+ }
+
+ GET_PAGE(dbp, fhp, pgno_cur[i], page);
+
+ /*
+ * If the duplicates are sorted, put the first item on
+ * the lower-level page onto a Btree internal page. If
+ * the duplicates are not sorted, create an internal
+ * Recno structure on the page. If either case doesn't
+ * fit, push out the current page and start a new one.
+ */
+ nomem = 0;
+ if (sorted) {
+ if ((ret = __db_build_bi(
+ dbp, fhp, ipage, page, indx, &nomem)) != 0)
+ goto err;
+ } else
+ if ((ret = __db_build_ri(
+ dbp, fhp, ipage, page, indx, &nomem)) != 0)
+ goto err;
+ if (nomem) {
+ indx = 0;
+ PUT_PAGE(dbp, fhp, PGNO(ipage), ipage);
+ } else {
+ ++indx;
+ ++NUM_ENT(ipage);
+ ++i;
+ }
+ }
+
+ /*
+ * Push out the last internal page. Set the top-level record
+ * count if we've reached the top.
+ */
+ if (next_cnt == 1)
+ RE_NREC_SET(ipage, nrecs);
+ PUT_PAGE(dbp, fhp, PGNO(ipage), ipage);
+
+ /* Swap the current and next page number arrays. */
+ cur_cnt = next_cnt;
+ tmp = pgno_cur;
+ pgno_cur = pgno_next;
+ pgno_next = tmp;
+ }
+
+ *pgnop = pgno_cur[0];
+
+err: if (pgno_cur != NULL)
+ __os_free(pgno_cur, 0);
+ if (pgno_next != NULL)
+ __os_free(pgno_next, 0);
+ if (ipage != NULL)
+ __os_free(ipage, dbp->pgsize);
+ if (page != NULL)
+ __os_free(page, dbp->pgsize);
+
+ return (ret);
+}
+
+/*
+ * __db_build_bi --
+ * Build a BINTERNAL entry for a parent page.
+ */
+static int
+__db_build_bi(dbp, fhp, ipage, page, indx, nomemp)
+ DB *dbp;
+ DB_FH *fhp;
+ PAGE *ipage, *page;
+ u_int32_t indx;
+ int *nomemp;
+{
+ BINTERNAL bi, *child_bi;
+ BKEYDATA *child_bk;
+ u_int8_t *p;
+ int ret;
+
+ switch (TYPE(page)) {
+ case P_IBTREE:
+ child_bi = GET_BINTERNAL(page, 0);
+ if (P_FREESPACE(ipage) < BINTERNAL_PSIZE(child_bi->len)) {
+ *nomemp = 1;
+ return (0);
+ }
+ ipage->inp[indx] =
+ HOFFSET(ipage) -= BINTERNAL_SIZE(child_bi->len);
+ p = P_ENTRY(ipage, indx);
+
+ bi.len = child_bi->len;
+ B_TSET(bi.type, child_bi->type, 0);
+ bi.pgno = PGNO(page);
+ bi.nrecs = __bam_total(page);
+ memcpy(p, &bi, SSZA(BINTERNAL, data));
+ p += SSZA(BINTERNAL, data);
+ memcpy(p, child_bi->data, child_bi->len);
+
+ /* Increment the overflow ref count. */
+ if (B_TYPE(child_bi->type) == B_OVERFLOW)
+ if ((ret = __db_up_ovref(dbp, fhp,
+ ((BOVERFLOW *)(child_bi->data))->pgno)) != 0)
+ return (ret);
+ break;
+ case P_LDUP:
+ child_bk = GET_BKEYDATA(page, 0);
+ switch (B_TYPE(child_bk->type)) {
+ case B_KEYDATA:
+ if (P_FREESPACE(ipage) <
+ BINTERNAL_PSIZE(child_bk->len)) {
+ *nomemp = 1;
+ return (0);
+ }
+ ipage->inp[indx] =
+ HOFFSET(ipage) -= BINTERNAL_SIZE(child_bk->len);
+ p = P_ENTRY(ipage, indx);
+
+ bi.len = child_bk->len;
+ B_TSET(bi.type, child_bk->type, 0);
+ bi.pgno = PGNO(page);
+ bi.nrecs = __bam_total(page);
+ memcpy(p, &bi, SSZA(BINTERNAL, data));
+ p += SSZA(BINTERNAL, data);
+ memcpy(p, child_bk->data, child_bk->len);
+ break;
+ case B_OVERFLOW:
+ if (P_FREESPACE(ipage) <
+ BINTERNAL_PSIZE(BOVERFLOW_SIZE)) {
+ *nomemp = 1;
+ return (0);
+ }
+ ipage->inp[indx] =
+ HOFFSET(ipage) -= BINTERNAL_SIZE(BOVERFLOW_SIZE);
+ p = P_ENTRY(ipage, indx);
+
+ bi.len = BOVERFLOW_SIZE;
+ B_TSET(bi.type, child_bk->type, 0);
+ bi.pgno = PGNO(page);
+ bi.nrecs = __bam_total(page);
+ memcpy(p, &bi, SSZA(BINTERNAL, data));
+ p += SSZA(BINTERNAL, data);
+ memcpy(p, child_bk, BOVERFLOW_SIZE);
+
+ /* Increment the overflow ref count. */
+ if ((ret = __db_up_ovref(dbp, fhp,
+ ((BOVERFLOW *)child_bk)->pgno)) != 0)
+ return (ret);
+ break;
+ default:
+ return (__db_pgfmt(dbp, PGNO(page)));
+ }
+ break;
+ default:
+ return (__db_pgfmt(dbp, PGNO(page)));
+ }
+
+ return (0);
+}
+
+/*
+ * __db_build_ri --
+ * Build a RINTERNAL entry for an internal parent page.
+ */
+static int
+__db_build_ri(dbp, fhp, ipage, page, indx, nomemp)
+ DB *dbp;
+ DB_FH *fhp;
+ PAGE *ipage, *page;
+ u_int32_t indx;
+ int *nomemp;
+{
+ RINTERNAL ri;
+
+ COMPQUIET(dbp, NULL);
+ COMPQUIET(fhp, NULL);
+
+ if (P_FREESPACE(ipage) < RINTERNAL_PSIZE) {
+ *nomemp = 1;
+ return (0);
+ }
+
+ ri.pgno = PGNO(page);
+ ri.nrecs = __bam_total(page);
+ ipage->inp[indx] = HOFFSET(ipage) -= RINTERNAL_SIZE;
+ memcpy(P_ENTRY(ipage, indx), &ri, RINTERNAL_SIZE);
+
+ return (0);
+}
+
+/*
+ * __db_up_ovref --
+ * Increment/decrement the reference count on an overflow page.
+ */
+static int
+__db_up_ovref(dbp, fhp, pgno)
+ DB *dbp;
+ DB_FH *fhp;
+ db_pgno_t pgno;
+{
+ PAGE *page;
+ size_t n;
+ int ret;
+
+ /* Allocate room to hold a page. */
+ if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, NULL, &page)) != 0)
+ return (ret);
+
+ GET_PAGE(dbp, fhp, pgno, page);
+ ++OV_REF(page);
+ PUT_PAGE(dbp, fhp, pgno, page);
+
+err: __os_free(page, dbp->pgsize);
+
+ return (ret);
+}
diff --git a/bdb/db/db_vrfy.c b/bdb/db/db_vrfy.c
new file mode 100644
index 00000000000..3509e05e91f
--- /dev/null
+++ b/bdb/db/db_vrfy.c
@@ -0,0 +1,2340 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: db_vrfy.c,v 1.53 2001/01/11 18:19:51 bostic Exp $
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_vrfy.c,v 1.53 2001/01/11 18:19:51 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_swap.h"
+#include "db_verify.h"
+#include "db_ext.h"
+#include "btree.h"
+#include "hash.h"
+#include "qam.h"
+
+static int __db_guesspgsize __P((DB_ENV *, DB_FH *));
+static int __db_is_valid_magicno __P((u_int32_t, DBTYPE *));
+static int __db_is_valid_pagetype __P((u_int32_t));
+static int __db_meta2pgset
+ __P((DB *, VRFY_DBINFO *, db_pgno_t, u_int32_t, DB *));
+static int __db_salvage_subdbs
+ __P((DB *, VRFY_DBINFO *, void *,
+ int(*)(void *, const void *), u_int32_t, int *));
+static int __db_salvage_unknowns
+ __P((DB *, VRFY_DBINFO *, void *,
+ int (*)(void *, const void *), u_int32_t));
+static int __db_vrfy_common
+ __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t, u_int32_t));
+static int __db_vrfy_freelist __P((DB *, VRFY_DBINFO *, db_pgno_t, u_int32_t));
+static int __db_vrfy_invalid
+ __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t, u_int32_t));
+static int __db_vrfy_orderchkonly __P((DB *,
+ VRFY_DBINFO *, const char *, const char *, u_int32_t));
+static int __db_vrfy_pagezero __P((DB *, VRFY_DBINFO *, DB_FH *, u_int32_t));
+static int __db_vrfy_subdbs
+ __P((DB *, VRFY_DBINFO *, const char *, u_int32_t));
+static int __db_vrfy_structure
+ __P((DB *, VRFY_DBINFO *, const char *, db_pgno_t, u_int32_t));
+static int __db_vrfy_walkpages
+ __P((DB *, VRFY_DBINFO *, void *, int (*)(void *, const void *),
+ u_int32_t));
+
+/*
+ * This is the code for DB->verify, the DB database consistency checker.
+ * For now, it checks all subdatabases in a database, and verifies
+ * everything it knows how to (i.e. it's all-or-nothing, and one can't
+ * check only for a subset of possible problems).
+ */
+
+/*
+ * __db_verify --
+ * Walk the entire file page-by-page, either verifying with or without
+ * dumping in db_dump -d format, or DB_SALVAGE-ing whatever key/data
+ * pairs can be found and dumping them in standard (db_load-ready)
+ * dump format.
+ *
+ * (Salvaging isn't really a verification operation, but we put it
+ * here anyway because it requires essentially identical top-level
+ * code.)
+ *
+ * flags may be 0, DB_NOORDERCHK, DB_ORDERCHKONLY, or DB_SALVAGE
+ * (and optionally DB_AGGRESSIVE).
+ *
+ * __db_verify itself is simply a wrapper to __db_verify_internal,
+ * which lets us pass appropriate equivalents to FILE * in from the
+ * non-C APIs.
+ *
+ * PUBLIC: int __db_verify
+ * PUBLIC: __P((DB *, const char *, const char *, FILE *, u_int32_t));
+ */
+int
+__db_verify(dbp, file, database, outfile, flags)
+ DB *dbp;
+ const char *file, *database;
+ FILE *outfile;
+ u_int32_t flags;
+{
+
+ return (__db_verify_internal(dbp,
+ file, database, outfile, __db_verify_callback, flags));
+}
+
+/*
+ * __db_verify_callback --
+ * Callback function for using pr_* functions from C.
+ *
+ * PUBLIC: int __db_verify_callback __P((void *, const void *));
+ */
+int
+__db_verify_callback(handle, str_arg)
+ void *handle;
+ const void *str_arg;
+{
+ char *str;
+ FILE *f;
+
+ str = (char *)str_arg;
+ f = (FILE *)handle;
+
+ if (fprintf(f, "%s", str) != (int)strlen(str))
+ return (EIO);
+
+ return (0);
+}
+
+/*
+ * __db_verify_internal --
+ * Inner meat of __db_verify.
+ *
+ * PUBLIC: int __db_verify_internal __P((DB *, const char *,
+ * PUBLIC: const char *, void *, int (*)(void *, const void *), u_int32_t));
+ */
+int
+__db_verify_internal(dbp_orig, name, subdb, handle, callback, flags)
+ DB *dbp_orig;
+ const char *name, *subdb;
+ void *handle;
+ int (*callback) __P((void *, const void *));
+ u_int32_t flags;
+{
+ DB *dbp;
+ DB_ENV *dbenv;
+ DB_FH fh, *fhp;
+ PAGE *h;
+ VRFY_DBINFO *vdp;
+ db_pgno_t last;
+ int has, ret, isbad;
+ char *real_name;
+
+ dbenv = dbp_orig->dbenv;
+ vdp = NULL;
+ real_name = NULL;
+ ret = isbad = 0;
+
+ memset(&fh, 0, sizeof(fh));
+ fhp = &fh;
+
+ PANIC_CHECK(dbenv);
+ DB_ILLEGAL_AFTER_OPEN(dbp_orig, "verify");
+
+#define OKFLAGS (DB_AGGRESSIVE | DB_NOORDERCHK | DB_ORDERCHKONLY | DB_SALVAGE)
+ if ((ret = __db_fchk(dbenv, "DB->verify", flags, OKFLAGS)) != 0)
+ return (ret);
+
+ /*
+ * DB_SALVAGE is mutually exclusive with the other flags except
+ * DB_AGGRESSIVE.
+ */
+ if (LF_ISSET(DB_SALVAGE) &&
+ (flags & ~DB_AGGRESSIVE) != DB_SALVAGE)
+ return (__db_ferr(dbenv, "__db_verify", 1));
+
+ if (LF_ISSET(DB_ORDERCHKONLY) && flags != DB_ORDERCHKONLY)
+ return (__db_ferr(dbenv, "__db_verify", 1));
+
+ if (LF_ISSET(DB_ORDERCHKONLY) && subdb == NULL) {
+ __db_err(dbenv, "DB_ORDERCHKONLY requires a database name");
+ return (EINVAL);
+ }
+
+ /*
+ * Forbid working in an environment that uses transactions or
+ * locking; we're going to be looking at the file freely,
+ * and while we're not going to modify it, we aren't obeying
+ * locking conventions either.
+ */
+ if (TXN_ON(dbenv) || LOCKING_ON(dbenv) || LOGGING_ON(dbenv)) {
+ dbp_orig->errx(dbp_orig,
+ "verify may not be used with transactions, logging, or locking");
+ return (EINVAL);
+ /* NOTREACHED */
+ }
+
+ /* Create a dbp to use internally, which we can close at our leisure. */
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ goto err;
+
+ F_SET(dbp, DB_AM_VERIFYING);
+
+ /* Copy the supplied pagesize, which we use if the file one is bogus. */
+ if (dbp_orig->pgsize >= DB_MIN_PGSIZE &&
+ dbp_orig->pgsize <= DB_MAX_PGSIZE)
+ dbp->set_pagesize(dbp, dbp_orig->pgsize);
+
+ /* Copy the feedback function, if present, and initialize it. */
+ if (!LF_ISSET(DB_SALVAGE) && dbp_orig->db_feedback != NULL) {
+ dbp->set_feedback(dbp, dbp_orig->db_feedback);
+ dbp->db_feedback(dbp, DB_VERIFY, 0);
+ }
+
+ /*
+ * Copy the comparison and hashing functions. Note that
+ * even if the database is not a hash or btree, the respective
+ * internal structures will have been initialized.
+ */
+ if (dbp_orig->dup_compare != NULL &&
+ (ret = dbp->set_dup_compare(dbp, dbp_orig->dup_compare)) != 0)
+ goto err;
+ if (((BTREE *)dbp_orig->bt_internal)->bt_compare != NULL &&
+ (ret = dbp->set_bt_compare(dbp,
+ ((BTREE *)dbp_orig->bt_internal)->bt_compare)) != 0)
+ goto err;
+ if (((HASH *)dbp_orig->h_internal)->h_hash != NULL &&
+ (ret = dbp->set_h_hash(dbp,
+ ((HASH *)dbp_orig->h_internal)->h_hash)) != 0)
+ goto err;
+
+ /*
+ * We don't know how large the cache is, and if the database
+ * in question uses a small page size--which we don't know
+ * yet!--it may be uncomfortably small for the default page
+ * size [#2143]. However, the things we need temporary
+ * databases for in dbinfo are largely tiny, so using a
+ * 1024-byte pagesize is probably not going to be a big hit,
+ * and will make us fit better into small spaces.
+ */
+ if ((ret = __db_vrfy_dbinfo_create(dbenv, 1024, &vdp)) != 0)
+ goto err;
+
+ /* Find the real name of the file. */
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, NULL, name, 0, NULL, &real_name)) != 0)
+ goto err;
+
+ /*
+ * Our first order of business is to verify page 0, which is
+ * the metadata page for the master database of subdatabases
+ * or of the only database in the file. We want to do this by hand
+ * rather than just calling __db_open in case it's corrupt--various
+ * things in __db_open might act funny.
+ *
+ * Once we know the metadata page is healthy, I believe that it's
+ * safe to open the database normally and then use the page swapping
+ * code, which makes life easier.
+ */
+ if ((ret = __os_open(dbenv, real_name, DB_OSO_RDONLY, 0444, fhp)) != 0)
+ goto err;
+
+ /* Verify the metadata page 0; set pagesize and type. */
+ if ((ret = __db_vrfy_pagezero(dbp, vdp, fhp, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+
+ /*
+ * We can assume at this point that dbp->pagesize and dbp->type are
+ * set correctly, or at least as well as they can be, and that
+ * locking, logging, and txns are not in use. Thus we can trust
+ * the memp code not to look at the page, and thus to be safe
+ * enough to use.
+ *
+ * The dbp is not open, but the file is open in the fhp, and we
+ * cannot assume that __db_open is safe. Call __db_dbenv_setup,
+ * the [safe] part of __db_open that initializes the environment--
+ * and the mpool--manually.
+ */
+ if ((ret = __db_dbenv_setup(dbp,
+ name, DB_ODDFILESIZE | DB_RDONLY)) != 0)
+ return (ret);
+
+ /* Mark the dbp as opened, so that we correctly handle its close. */
+ F_SET(dbp, DB_OPEN_CALLED);
+
+ /*
+ * Find out the page number of the last page in the database.
+ *
+ * XXX: This currently fails if the last page is of bad type,
+ * because it calls __db_pgin and that pukes. This is bad.
+ */
+ if ((ret = memp_fget(dbp->mpf, &last, DB_MPOOL_LAST, &h)) != 0)
+ goto err;
+ if ((ret = memp_fput(dbp->mpf, h, 0)) != 0)
+ goto err;
+
+ vdp->last_pgno = last;
+
+ /*
+ * DB_ORDERCHKONLY is a special case; our file consists of
+ * several subdatabases, which use different hash, bt_compare,
+ * and/or dup_compare functions. Consequently, we couldn't verify
+ * sorting and hashing simply by calling DB->verify() on the file.
+ * DB_ORDERCHKONLY allows us to come back and check those things; it
+ * requires a subdatabase, and assumes that everything but that
+ * database's sorting/hashing is correct.
+ */
+ if (LF_ISSET(DB_ORDERCHKONLY)) {
+ ret = __db_vrfy_orderchkonly(dbp, vdp, name, subdb, flags);
+ goto done;
+ }
+
+ /*
+ * When salvaging, we use a db to keep track of whether we've seen a
+ * given overflow or dup page in the course of traversing normal data.
+ * If in the end we have not, we assume its key got lost and print it
+ * with key "UNKNOWN".
+ */
+ if (LF_ISSET(DB_SALVAGE)) {
+ if ((ret = __db_salvage_init(vdp)) != 0)
+ return (ret);
+
+ /*
+ * If we're not being aggressive, attempt to crack subdbs.
+ * "has" will indicate whether the attempt has succeeded
+ * (even in part), meaning that we have some semblance of
+ * subdbs; on the walkpages pass, we print out
+ * whichever data pages we have not seen.
+ */
+ has = 0;
+ if (!LF_ISSET(DB_AGGRESSIVE) && (__db_salvage_subdbs(dbp,
+ vdp, handle, callback, flags, &has)) != 0)
+ isbad = 1;
+
+ /*
+ * If we have subdatabases, we need to signal that if
+ * any keys are found that don't belong to a subdatabase,
+ * they'll need to have an "__OTHER__" subdatabase header
+ * printed first. Flag this. Else, print a header for
+ * the normal, non-subdb database.
+ */
+ if (has == 1)
+ F_SET(vdp, SALVAGE_PRINTHEADER);
+ else if ((ret = __db_prheader(dbp,
+ NULL, 0, 0, handle, callback, vdp, PGNO_BASE_MD)) != 0)
+ goto err;
+ }
+
+ if ((ret =
+ __db_vrfy_walkpages(dbp, vdp, handle, callback, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else if (ret != 0)
+ goto err;
+ }
+
+ /* If we're verifying, verify inter-page structure. */
+ if (!LF_ISSET(DB_SALVAGE) && isbad == 0)
+ if ((ret =
+ __db_vrfy_structure(dbp, vdp, name, 0, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else if (ret != 0)
+ goto err;
+ }
+
+ /*
+ * If we're salvaging, output with key UNKNOWN any overflow or dup pages
+ * we haven't been able to put in context. Then destroy the salvager's
+ * state-saving database.
+ */
+ if (LF_ISSET(DB_SALVAGE)) {
+ if ((ret = __db_salvage_unknowns(dbp,
+ vdp, handle, callback, flags)) != 0)
+ isbad = 1;
+ /* No return value, since there's little we can do. */
+ __db_salvage_destroy(vdp);
+ }
+
+ if (0) {
+err: (void)__db_err(dbenv, "%s: %s", name, db_strerror(ret));
+ }
+
+ if (LF_ISSET(DB_SALVAGE) &&
+ (has == 0 || F_ISSET(vdp, SALVAGE_PRINTFOOTER)))
+ (void)__db_prfooter(handle, callback);
+
+ /* Send feedback that we're done. */
+done: if (!LF_ISSET(DB_SALVAGE) && dbp->db_feedback != NULL)
+ dbp->db_feedback(dbp, DB_VERIFY, 100);
+
+ if (F_ISSET(fhp, DB_FH_VALID))
+ (void)__os_closehandle(fhp);
+ if (dbp)
+ (void)dbp->close(dbp, 0);
+ if (vdp)
+ (void)__db_vrfy_dbinfo_destroy(vdp);
+ if (real_name)
+ __os_freestr(real_name);
+
+ if ((ret == 0 && isbad == 1) || ret == DB_VERIFY_FATAL)
+ ret = DB_VERIFY_BAD;
+
+ return (ret);
+}
+
+/*
+ * __db_vrfy_pagezero --
+ * Verify the master metadata page. Use seek, read, and a local buffer
+ * rather than the DB paging code, for safety.
+ *
+ * Must correctly (or best-guess) set dbp->type and dbp->pagesize.
+ */
+static int
+__db_vrfy_pagezero(dbp, vdp, fhp, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ DB_FH *fhp;
+ u_int32_t flags;
+{
+ DBMETA *meta;
+ DB_ENV *dbenv;
+ VRFY_PAGEINFO *pip;
+ db_pgno_t freelist;
+ int t_ret, ret, nr, swapped;
+ u_int8_t mbuf[DBMETASIZE];
+
+ swapped = ret = t_ret = 0;
+ freelist = 0;
+ dbenv = dbp->dbenv;
+ meta = (DBMETA *)mbuf;
+ dbp->type = DB_UNKNOWN;
+
+ /*
+ * Seek to the metadata page.
+ * Note that if we're just starting a verification, dbp->pgsize
+ * may be zero; this is okay, as we want page zero anyway and
+ * 0*0 == 0.
+ */
+ if ((ret = __os_seek(dbenv, fhp, 0, 0, 0, 0, DB_OS_SEEK_SET)) != 0)
+ goto err;
+
+ if ((ret = __os_read(dbenv, fhp, mbuf, DBMETASIZE, (size_t *)&nr)) != 0)
+ goto err;
+
+ if (nr != DBMETASIZE) {
+ EPRINT((dbp->dbenv,
+ "Incomplete metadata page %lu", (u_long)PGNO_BASE_MD));
+ t_ret = DB_VERIFY_FATAL;
+ goto err;
+ }
+
+ /*
+ * Check all of the fields that we can.
+ */
+
+ /* 08-11: Current page number. Must == pgno. */
+ /* Note that endianness doesn't matter--it's zero. */
+ if (meta->pgno != PGNO_BASE_MD) {
+ EPRINT((dbp->dbenv, "Bad pgno: was %lu, should be %lu",
+ (u_long)meta->pgno, (u_long)PGNO_BASE_MD));
+ ret = DB_VERIFY_BAD;
+ }
+
+ /* 12-15: Magic number. Must be one of valid set. */
+ if (__db_is_valid_magicno(meta->magic, &dbp->type))
+ swapped = 0;
+ else {
+ M_32_SWAP(meta->magic);
+ if (__db_is_valid_magicno(meta->magic,
+ &dbp->type))
+ swapped = 1;
+ else {
+ EPRINT((dbp->dbenv,
+ "Bad magic number: %lu", (u_long)meta->magic));
+ ret = DB_VERIFY_BAD;
+ }
+ }
+
+ /*
+ * 16-19: Version. Must be current; for now, we
+ * don't support verification of old versions.
+ */
+ if (swapped)
+ M_32_SWAP(meta->version);
+ if ((dbp->type == DB_BTREE && meta->version != DB_BTREEVERSION) ||
+ (dbp->type == DB_HASH && meta->version != DB_HASHVERSION) ||
+ (dbp->type == DB_QUEUE && meta->version != DB_QAMVERSION)) {
+ ret = DB_VERIFY_BAD;
+ EPRINT((dbp->dbenv, "%s%s", "Old or incorrect DB ",
+ "version; extraneous errors may result"));
+ }
+
+ /*
+ * 20-23: Pagesize. Must be power of two,
+ * greater than 512, and less than 64K.
+ */
+ if (swapped)
+ M_32_SWAP(meta->pagesize);
+ if (IS_VALID_PAGESIZE(meta->pagesize))
+ dbp->pgsize = meta->pagesize;
+ else {
+ EPRINT((dbp->dbenv,
+ "Bad page size: %lu", (u_long)meta->pagesize));
+ ret = DB_VERIFY_BAD;
+
+ /*
+ * Now try to settle on a pagesize to use.
+ * If the user-supplied one is reasonable,
+ * use it; else, guess.
+ */
+ if (!IS_VALID_PAGESIZE(dbp->pgsize))
+ dbp->pgsize = __db_guesspgsize(dbenv, fhp);
+ }
+
+ /*
+ * 25: Page type. Must be correct for dbp->type,
+ * which is by now set as well as it can be.
+ */
+ /* Needs no swapping--only one byte! */
+ if ((dbp->type == DB_BTREE && meta->type != P_BTREEMETA) ||
+ (dbp->type == DB_HASH && meta->type != P_HASHMETA) ||
+ (dbp->type == DB_QUEUE && meta->type != P_QAMMETA)) {
+ ret = DB_VERIFY_BAD;
+ EPRINT((dbp->dbenv, "Bad page type: %lu", (u_long)meta->type));
+ }
+
+ /*
+ * 28-31: Free list page number.
+ * We'll verify its sensibility when we do inter-page
+ * verification later; for now, just store it.
+ */
+ if (swapped)
+ M_32_SWAP(meta->free);
+ freelist = meta->free;
+
+ /*
+ * Initialize vdp->pages to fit a single pageinfo structure for
+ * this one page. We'll realloc later when we know how many
+ * pages there are.
+ */
+ if ((ret = __db_vrfy_getpageinfo(vdp, PGNO_BASE_MD, &pip)) != 0)
+ return (ret);
+ pip->pgno = PGNO_BASE_MD;
+ pip->type = meta->type;
+
+ /*
+ * Signal that we still have to check the info specific to
+ * a given type of meta page.
+ */
+ F_SET(pip, VRFY_INCOMPLETE);
+
+ pip->free = freelist;
+
+ if ((ret = __db_vrfy_putpageinfo(vdp, pip)) != 0)
+ return (ret);
+
+ /* Set up the dbp's fileid. We don't use the regular open path. */
+ memcpy(dbp->fileid, meta->uid, DB_FILE_ID_LEN);
+
+ if (0) {
+err: __db_err(dbenv, "%s", db_strerror(ret));
+ }
+
+ if (swapped == 1)
+ F_SET(dbp, DB_AM_SWAP);
+ if (t_ret != 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __db_vrfy_walkpages --
+ * Main loop of the verifier/salvager. Walks through,
+ * page by page, and verifies all pages and/or prints all data pages.
+ */
+static int
+__db_vrfy_walkpages(dbp, vdp, handle, callback, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ void *handle;
+ int (*callback) __P((void *, const void *));
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ PAGE *h;
+ db_pgno_t i;
+ int ret, t_ret, isbad;
+
+ ret = isbad = t_ret = 0;
+ dbenv = dbp->dbenv;
+
+ if ((ret = __db_fchk(dbenv,
+ "__db_vrfy_walkpages", flags, OKFLAGS)) != 0)
+ return (ret);
+
+ for (i = 0; i <= vdp->last_pgno; i++) {
+ /*
+ * If DB_SALVAGE is set, we inspect our database of
+ * completed pages, and skip any we've already printed in
+ * the subdb pass.
+ */
+ if (LF_ISSET(DB_SALVAGE) && (__db_salvage_isdone(vdp, i) != 0))
+ continue;
+
+ /* If an individual page get fails, keep going. */
+ if ((t_ret = memp_fget(dbp->mpf, &i, 0, &h)) != 0) {
+ if (ret == 0)
+ ret = t_ret;
+ continue;
+ }
+
+ if (LF_ISSET(DB_SALVAGE)) {
+ /*
+ * We pretty much don't want to quit unless a
+ * bomb hits. May as well return that something
+ * was screwy, however.
+ */
+ if ((t_ret = __db_salvage(dbp,
+ vdp, i, h, handle, callback, flags)) != 0) {
+ if (ret == 0)
+ ret = t_ret;
+ isbad = 1;
+ }
+ } else {
+ /*
+ * Verify info common to all page
+ * types.
+ */
+ if (i != PGNO_BASE_MD)
+ if ((t_ret = __db_vrfy_common(dbp,
+ vdp, h, i, flags)) == DB_VERIFY_BAD)
+ isbad = 1;
+
+ switch (TYPE(h)) {
+ case P_INVALID:
+ t_ret = __db_vrfy_invalid(dbp,
+ vdp, h, i, flags);
+ break;
+ case __P_DUPLICATE:
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Old-style duplicate page: %lu",
+ (u_long)i));
+ break;
+ case P_HASH:
+ t_ret = __ham_vrfy(dbp,
+ vdp, h, i, flags);
+ break;
+ case P_IBTREE:
+ case P_IRECNO:
+ case P_LBTREE:
+ case P_LDUP:
+ t_ret = __bam_vrfy(dbp,
+ vdp, h, i, flags);
+ break;
+ case P_LRECNO:
+ t_ret = __ram_vrfy_leaf(dbp,
+ vdp, h, i, flags);
+ break;
+ case P_OVERFLOW:
+ t_ret = __db_vrfy_overflow(dbp,
+ vdp, h, i, flags);
+ break;
+ case P_HASHMETA:
+ t_ret = __ham_vrfy_meta(dbp,
+ vdp, (HMETA *)h, i, flags);
+ break;
+ case P_BTREEMETA:
+ t_ret = __bam_vrfy_meta(dbp,
+ vdp, (BTMETA *)h, i, flags);
+ break;
+ case P_QAMMETA:
+ t_ret = __qam_vrfy_meta(dbp,
+ vdp, (QMETA *)h, i, flags);
+ break;
+ case P_QAMDATA:
+ t_ret = __qam_vrfy_data(dbp,
+ vdp, (QPAGE *)h, i, flags);
+ break;
+ default:
+ EPRINT((dbp->dbenv,
+ "Unknown page type: %lu", (u_long)TYPE(h)));
+ isbad = 1;
+ break;
+ }
+
+ /*
+ * Set up error return.
+ */
+ if (t_ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else if (t_ret == DB_VERIFY_FATAL)
+ goto err;
+ else
+ ret = t_ret;
+
+ /*
+ * Provide feedback to the application about our
+ * progress. The range 0-50% comes from the fact
+ * that this is the first of two passes through the
+ * database (front-to-back, then top-to-bottom).
+ */
+ if (dbp->db_feedback != NULL)
+ dbp->db_feedback(dbp, DB_VERIFY,
+ (i + 1) * 50 / (vdp->last_pgno + 1));
+ }
+
+ if ((t_ret = memp_fput(dbp->mpf, h, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ }
+
+ if (0) {
+err: if ((t_ret = memp_fput(dbp->mpf, h, 0)) != 0)
+ return (ret == 0 ? t_ret : ret);
+ return (DB_VERIFY_BAD);
+ }
+
+ return ((isbad == 1 && ret == 0) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __db_vrfy_structure--
+ * After a beginning-to-end walk through the database has been
+ * completed, put together the information that has been collected
+ * to verify the overall database structure.
+ *
+ * Should only be called if we want to do a database verification,
+ * i.e. if DB_SALVAGE is not set.
+ */
+static int
+__db_vrfy_structure(dbp, vdp, dbname, meta_pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ const char *dbname;
+ db_pgno_t meta_pgno;
+ u_int32_t flags;
+{
+ DB *pgset;
+ DB_ENV *dbenv;
+ VRFY_PAGEINFO *pip;
+ db_pgno_t i;
+ int ret, isbad, hassubs, p;
+
+ isbad = 0;
+ pip = NULL;
+ dbenv = dbp->dbenv;
+ pgset = vdp->pgset;
+
+ if ((ret = __db_fchk(dbenv, "DB->verify", flags, OKFLAGS)) != 0)
+ return (ret);
+ if (LF_ISSET(DB_SALVAGE)) {
+ __db_err(dbenv, "__db_vrfy_structure called with DB_SALVAGE");
+ return (EINVAL);
+ }
+
+ /*
+ * Providing feedback here is tricky; in most situations,
+ * we fetch each page one more time, but we do so in a top-down
+ * order that depends on the access method. Worse, we do this
+ * recursively in btree, such that on any call where we're traversing
+ * a subtree we don't know where that subtree is in the whole database;
+ * worse still, any given database may be one of several subdbs.
+ *
+ * The solution is to decrement a counter vdp->pgs_remaining each time
+ * we verify (and call feedback on) a page. We may over- or
+ * under-count, but the structure feedback function will ensure that we
+ * never give a percentage under 50 or over 100. (The first pass
+ * covered the range 0-50%.)
+ */
+ if (dbp->db_feedback != NULL)
+ vdp->pgs_remaining = vdp->last_pgno + 1;
+
+ /*
+ * Call the appropriate function to downwards-traverse the db type.
+ */
+ switch(dbp->type) {
+ case DB_BTREE:
+ case DB_RECNO:
+ if ((ret = __bam_vrfy_structure(dbp, vdp, 0, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+
+ /*
+ * If we have subdatabases and we know that the database is,
+ * thus far, sound, it's safe to walk the tree of subdatabases.
+ * Do so, and verify the structure of the databases within.
+ */
+ if ((ret = __db_vrfy_getpageinfo(vdp, 0, &pip)) != 0)
+ goto err;
+ hassubs = F_ISSET(pip, VRFY_HAS_SUBDBS);
+ if ((ret = __db_vrfy_putpageinfo(vdp, pip)) != 0)
+ goto err;
+
+ if (isbad == 0 && hassubs)
+ if ((ret =
+ __db_vrfy_subdbs(dbp, vdp, dbname, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+ break;
+ case DB_HASH:
+ if ((ret = __ham_vrfy_structure(dbp, vdp, 0, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+ break;
+ case DB_QUEUE:
+ if ((ret = __qam_vrfy_structure(dbp, vdp, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ }
+
+ /*
+ * Queue pages may be unreferenced and totally zeroed, if
+ * they're empty; queue doesn't have much structure, so
+ * this is unlikely to be wrong in any troublesome sense.
+ * Skip to "err".
+ */
+ goto err;
+ /* NOTREACHED */
+ default:
+ /* This should only happen if the verifier is somehow broken. */
+ DB_ASSERT(0);
+ ret = EINVAL;
+ goto err;
+ /* NOTREACHED */
+ }
+
+ /* Walk free list. */
+ if ((ret =
+ __db_vrfy_freelist(dbp, vdp, meta_pgno, flags)) == DB_VERIFY_BAD)
+ isbad = 1;
+
+ /*
+ * If structure checks up until now have failed, it's likely that
+ * checking what pages have been missed will result in oodles of
+ * extraneous error messages being EPRINTed. Skip to the end
+ * if this is the case; we're going to be printing at least one
+ * error anyway, and probably all the more salient ones.
+ */
+ if (ret != 0 || isbad == 1)
+ goto err;
+
+ /*
+ * Make sure no page has been missed and that no page is still marked
+ * "all zeroes" (only certain hash pages can be, and they're unmarked
+ * in __ham_vrfy_structure).
+ */
+ for (i = 0; i < vdp->last_pgno + 1; i++) {
+ if ((ret = __db_vrfy_getpageinfo(vdp, i, &pip)) != 0)
+ goto err;
+ if ((ret = __db_vrfy_pgset_get(pgset, i, &p)) != 0)
+ goto err;
+ if (p == 0) {
+ EPRINT((dbp->dbenv,
+ "Unreferenced page %lu", (u_long)i));
+ isbad = 1;
+ }
+
+ if (F_ISSET(pip, VRFY_IS_ALLZEROES)) {
+ EPRINT((dbp->dbenv,
+ "Totally zeroed page %lu", (u_long)i));
+ isbad = 1;
+ }
+ if ((ret = __db_vrfy_putpageinfo(vdp, pip)) != 0)
+ goto err;
+ pip = NULL;
+ }
+
+err: if (pip != NULL)
+ (void)__db_vrfy_putpageinfo(vdp, pip);
+
+ return ((isbad == 1 && ret == 0) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __db_is_valid_pagetype
+ */
+static int
+__db_is_valid_pagetype(type)
+ u_int32_t type;
+{
+ switch (type) {
+ case P_INVALID: /* Order matches ordinal value. */
+ case P_HASH:
+ case P_IBTREE:
+ case P_IRECNO:
+ case P_LBTREE:
+ case P_LRECNO:
+ case P_OVERFLOW:
+ case P_HASHMETA:
+ case P_BTREEMETA:
+ case P_QAMMETA:
+ case P_QAMDATA:
+ case P_LDUP:
+ return (1);
+ }
+ return (0);
+}
+
+/*
+ * __db_is_valid_magicno
+ */
+static int
+__db_is_valid_magicno(magic, typep)
+ u_int32_t magic;
+ DBTYPE *typep;
+{
+ switch (magic) {
+ case DB_BTREEMAGIC:
+ *typep = DB_BTREE;
+ return (1);
+ case DB_HASHMAGIC:
+ *typep = DB_HASH;
+ return (1);
+ case DB_QAMMAGIC:
+ *typep = DB_QUEUE;
+ return (1);
+ }
+ *typep = DB_UNKNOWN;
+ return (0);
+}
+
+/*
+ * __db_vrfy_common --
+ * Verify info common to all page types.
+ */
+static int
+__db_vrfy_common(dbp, vdp, h, pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ PAGE *h;
+ db_pgno_t pgno;
+ u_int32_t flags;
+{
+ VRFY_PAGEINFO *pip;
+ int ret, t_ret;
+ u_int8_t *p;
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ pip->pgno = pgno;
+ F_CLR(pip, VRFY_IS_ALLZEROES);
+
+ /*
+ * Hash expands the table by leaving some pages between the
+ * old last and the new last totally zeroed. Its pgin function
+ * should fix things, but we might not be using that (e.g. if
+ * we're a subdatabase).
+ *
+ * Queue will create sparse files if sparse record numbers are used.
+ */
+ if (pgno != 0 && PGNO(h) == 0) {
+ for (p = (u_int8_t *)h; p < (u_int8_t *)h + dbp->pgsize; p++)
+ if (*p != 0) {
+ EPRINT((dbp->dbenv,
+ "Page %lu should be zeroed and is not",
+ (u_long)pgno));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+ /*
+ * It's totally zeroed; mark it as a hash, and we'll
+ * check that that makes sense structurally later.
+ * (The queue verification doesn't care, since queues
+ * don't really have much in the way of structure.)
+ */
+ pip->type = P_HASH;
+ F_SET(pip, VRFY_IS_ALLZEROES);
+ ret = 0;
+ goto err; /* well, not really an err. */
+ }
+
+ if (PGNO(h) != pgno) {
+ EPRINT((dbp->dbenv,
+ "Bad page number: %lu should be %lu",
+ (u_long)h->pgno, (u_long)pgno));
+ ret = DB_VERIFY_BAD;
+ }
+
+ if (!__db_is_valid_pagetype(h->type)) {
+ EPRINT((dbp->dbenv, "Bad page type: %lu", (u_long)h->type));
+ ret = DB_VERIFY_BAD;
+ }
+ pip->type = h->type;
+
+err: if ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __db_vrfy_invalid --
+ * Verify P_INVALID page.
+ * (Yes, there's not much to do here.)
+ */
+static int
+__db_vrfy_invalid(dbp, vdp, h, pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ PAGE *h;
+ db_pgno_t pgno;
+ u_int32_t flags;
+{
+ VRFY_PAGEINFO *pip;
+ int ret, t_ret;
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+ pip->next_pgno = pip->prev_pgno = 0;
+
+ if (!IS_VALID_PGNO(NEXT_PGNO(h))) {
+ EPRINT((dbp->dbenv,
+ "Invalid next_pgno %lu on page %lu",
+ (u_long)NEXT_PGNO(h), (u_long)pgno));
+ ret = DB_VERIFY_BAD;
+ } else
+ pip->next_pgno = NEXT_PGNO(h);
+
+ if ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __db_vrfy_datapage --
+ * Verify elements common to data pages (P_HASH, P_LBTREE,
+ * P_IBTREE, P_IRECNO, P_LRECNO, P_OVERFLOW, P_DUPLICATE)--i.e.,
+ * those defined in the PAGE structure.
+ *
+ * Called from each of the per-page routines, after the
+ * all-page-type-common elements of pip have been verified and filled
+ * in.
+ *
+ * PUBLIC: int __db_vrfy_datapage
+ * PUBLIC: __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t, u_int32_t));
+ */
+int
+__db_vrfy_datapage(dbp, vdp, h, pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ PAGE *h;
+ db_pgno_t pgno;
+ u_int32_t flags;
+{
+ VRFY_PAGEINFO *pip;
+ int isbad, ret, t_ret;
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+ isbad = 0;
+
+ /*
+ * prev_pgno and next_pgno: store for inter-page checks,
+ * verify that they point to actual pages and not to self.
+ *
+ * !!!
+ * Internal btree pages do not maintain these fields (indeed,
+ * they overload them). Skip.
+ */
+ if (TYPE(h) != P_IBTREE && TYPE(h) != P_IRECNO) {
+ if (!IS_VALID_PGNO(PREV_PGNO(h)) || PREV_PGNO(h) == pip->pgno) {
+ isbad = 1;
+ EPRINT((dbp->dbenv, "Page %lu: Invalid prev_pgno %lu",
+ (u_long)pip->pgno, (u_long)PREV_PGNO(h)));
+ }
+ if (!IS_VALID_PGNO(NEXT_PGNO(h)) || NEXT_PGNO(h) == pip->pgno) {
+ isbad = 1;
+ EPRINT((dbp->dbenv, "Page %lu: Invalid next_pgno %lu",
+ (u_long)pip->pgno, (u_long)NEXT_PGNO(h)));
+ }
+ pip->prev_pgno = PREV_PGNO(h);
+ pip->next_pgno = NEXT_PGNO(h);
+ }
+
+ /*
+ * Verify the number of entries on the page.
+ * There is no good way to determine if this is accurate; the
+ * best we can do is verify that it's not more than can, in theory,
+ * fit on the page. Then, we make sure there are at least
+ * this many valid elements in inp[], and hope that this catches
+ * most cases.
+ */
+ if (TYPE(h) != P_OVERFLOW) {
+ if (BKEYDATA_PSIZE(0) * NUM_ENT(h) > dbp->pgsize) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: Too many entries: %lu",
+ (u_long)pgno, (u_long)NUM_ENT(h)));
+ }
+ pip->entries = NUM_ENT(h);
+ }
+
+ /*
+ * btree level. Should be zero unless we're a btree;
+ * if we are a btree, should be between LEAFLEVEL and MAXBTREELEVEL,
+ * and we need to save it off.
+ */
+ switch (TYPE(h)) {
+ case P_IBTREE:
+ case P_IRECNO:
+ if (LEVEL(h) < LEAFLEVEL + 1 || LEVEL(h) > MAXBTREELEVEL) {
+ isbad = 1;
+ EPRINT((dbp->dbenv, "Bad btree level %lu on page %lu",
+ (u_long)LEVEL(h), (u_long)pgno));
+ }
+ pip->bt_level = LEVEL(h);
+ break;
+ case P_LBTREE:
+ case P_LDUP:
+ case P_LRECNO:
+ if (LEVEL(h) != LEAFLEVEL) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Btree leaf page %lu has incorrect level %lu",
+ (u_long)pgno, (u_long)LEVEL(h)));
+ }
+ break;
+ default:
+ if (LEVEL(h) != 0) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Nonzero level %lu in non-btree database page %lu",
+ (u_long)LEVEL(h), (u_long)pgno));
+ }
+ break;
+ }
+
+ /*
+ * Even though inp[] occurs in all PAGEs, we look at it in the
+ * access-method-specific code, since btree and hash treat
+ * item lengths very differently, and one of the most important
+ * things we want to verify is that the data--as specified
+ * by offset and length--cover the right part of the page
+ * without overlaps, gaps, or violations of the page boundary.
+ */
+ if ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __db_vrfy_meta--
+ * Verify the access-method common parts of a meta page, using
+ * normal mpool routines.
+ *
+ * PUBLIC: int __db_vrfy_meta
+ * PUBLIC: __P((DB *, VRFY_DBINFO *, DBMETA *, db_pgno_t, u_int32_t));
+ */
+int
+__db_vrfy_meta(dbp, vdp, meta, pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ DBMETA *meta;
+ db_pgno_t pgno;
+ u_int32_t flags;
+{
+ DBTYPE dbtype, magtype;
+ VRFY_PAGEINFO *pip;
+ int isbad, ret, t_ret;
+
+ isbad = 0;
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ /* type plausible for a meta page */
+ switch (meta->type) {
+ case P_BTREEMETA:
+ dbtype = DB_BTREE;
+ break;
+ case P_HASHMETA:
+ dbtype = DB_HASH;
+ break;
+ case P_QAMMETA:
+ dbtype = DB_QUEUE;
+ break;
+ default:
+ /* The verifier should never let us get here. */
+ DB_ASSERT(0);
+ ret = EINVAL;
+ goto err;
+ }
+
+ /* magic number valid */
+ if (!__db_is_valid_magicno(meta->magic, &magtype)) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Magic number invalid on page %lu", (u_long)pgno));
+ }
+ if (magtype != dbtype) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Magic number does not match type of page %lu",
+ (u_long)pgno));
+ }
+
+ /* version */
+ if ((dbtype == DB_BTREE && meta->version != DB_BTREEVERSION) ||
+ (dbtype == DB_HASH && meta->version != DB_HASHVERSION) ||
+ (dbtype == DB_QUEUE && meta->version != DB_QAMVERSION)) {
+ isbad = 1;
+ EPRINT((dbp->dbenv, "%s%s", "Old of incorrect DB ",
+ "version; extraneous errors may result"));
+ }
+
+ /* pagesize */
+ if (meta->pagesize != dbp->pgsize) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Invalid pagesize %lu on page %lu",
+ (u_long)meta->pagesize, (u_long)pgno));
+ }
+
+ /* free list */
+ /*
+ * If this is not the main, master-database meta page, it
+ * should not have a free list.
+ */
+ if (pgno != PGNO_BASE_MD && meta->free != PGNO_INVALID) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Nonempty free list on subdatabase metadata page %lu",
+ pgno));
+ }
+
+ /* Can correctly be PGNO_INVALID--that's just the end of the list. */
+ if (meta->free != PGNO_INVALID && IS_VALID_PGNO(meta->free))
+ pip->free = meta->free;
+ else if (!IS_VALID_PGNO(meta->free)) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Nonsensical free list pgno %lu on page %lu",
+ (u_long)meta->free, (u_long)pgno));
+ }
+
+ /*
+ * We have now verified the common fields of the metadata page.
+ * Clear the flag that told us they had been incompletely checked.
+ */
+ F_CLR(pip, VRFY_INCOMPLETE);
+
+err: if ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __db_vrfy_freelist --
+ * Walk free list, checking off pages and verifying absence of
+ * loops.
+ */
+static int
+__db_vrfy_freelist(dbp, vdp, meta, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ db_pgno_t meta;
+ u_int32_t flags;
+{
+ DB *pgset;
+ VRFY_PAGEINFO *pip;
+ db_pgno_t pgno;
+ int p, ret, t_ret;
+
+ pgset = vdp->pgset;
+ DB_ASSERT(pgset != NULL);
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, meta, &pip)) != 0)
+ return (ret);
+ for (pgno = pip->free; pgno != PGNO_INVALID; pgno = pip->next_pgno) {
+ if ((ret = __db_vrfy_putpageinfo(vdp, pip)) != 0)
+ return (ret);
+
+ /* This shouldn't happen, but just in case. */
+ if (!IS_VALID_PGNO(pgno)) {
+ EPRINT((dbp->dbenv,
+ "Invalid next_pgno on free list page %lu",
+ (u_long)pgno));
+ return (DB_VERIFY_BAD);
+ }
+
+ /* Detect cycles. */
+ if ((ret = __db_vrfy_pgset_get(pgset, pgno, &p)) != 0)
+ return (ret);
+ if (p != 0) {
+ EPRINT((dbp->dbenv,
+ "Page %lu encountered a second time on free list",
+ (u_long)pgno));
+ return (DB_VERIFY_BAD);
+ }
+ if ((ret = __db_vrfy_pgset_inc(pgset, pgno)) != 0)
+ return (ret);
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ if (pip->type != P_INVALID) {
+ EPRINT((dbp->dbenv,
+ "Non-invalid page %lu on free list", (u_long)pgno));
+ ret = DB_VERIFY_BAD; /* unsafe to continue */
+ break;
+ }
+ }
+
+ if ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __db_vrfy_subdbs --
+ * Walk the known-safe master database of subdbs with a cursor,
+ * verifying the structure of each subdatabase we encounter.
+ */
+static int
+__db_vrfy_subdbs(dbp, vdp, dbname, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ const char *dbname;
+ u_int32_t flags;
+{
+ DB *mdbp;
+ DBC *dbc;
+ DBT key, data;
+ VRFY_PAGEINFO *pip;
+ db_pgno_t meta_pgno;
+ int ret, t_ret, isbad;
+ u_int8_t type;
+
+ isbad = 0;
+ dbc = NULL;
+
+ if ((ret = __db_master_open(dbp, dbname, DB_RDONLY, 0, &mdbp)) != 0)
+ return (ret);
+
+ if ((ret =
+ __db_icursor(mdbp, NULL, DB_BTREE, PGNO_INVALID, 0, &dbc)) != 0)
+ goto err;
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ while ((ret = dbc->c_get(dbc, &key, &data, DB_NEXT)) == 0) {
+ if (data.size != sizeof(db_pgno_t)) {
+ EPRINT((dbp->dbenv, "Database entry of invalid size"));
+ isbad = 1;
+ goto err;
+ }
+ memcpy(&meta_pgno, data.data, data.size);
+ /*
+ * Subdatabase meta pgnos are stored in network byte
+ * order for cross-endian compatibility. Swap if appropriate.
+ */
+ DB_NTOHL(&meta_pgno);
+ if (meta_pgno == PGNO_INVALID || meta_pgno > vdp->last_pgno) {
+ EPRINT((dbp->dbenv,
+ "Database entry references invalid page %lu",
+ (u_long)meta_pgno));
+ isbad = 1;
+ goto err;
+ }
+ if ((ret = __db_vrfy_getpageinfo(vdp, meta_pgno, &pip)) != 0)
+ goto err;
+ type = pip->type;
+ if ((ret = __db_vrfy_putpageinfo(vdp, pip)) != 0)
+ goto err;
+ switch (type) {
+ case P_BTREEMETA:
+ if ((ret = __bam_vrfy_structure(
+ dbp, vdp, meta_pgno, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+ break;
+ case P_HASHMETA:
+ if ((ret = __ham_vrfy_structure(
+ dbp, vdp, meta_pgno, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+ break;
+ case P_QAMMETA:
+ default:
+ EPRINT((dbp->dbenv,
+ "Database entry references page %lu of invalid type %lu",
+ (u_long)meta_pgno, (u_long)type));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ /* NOTREACHED */
+ }
+ }
+
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+
+err: if (dbc != NULL && (t_ret = __db_c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if ((t_ret = mdbp->close(mdbp, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __db_vrfy_struct_feedback --
+ * Provide feedback during top-down database structure traversal.
+ * (See comment at the beginning of __db_vrfy_structure.)
+ *
+ * PUBLIC: int __db_vrfy_struct_feedback __P((DB *, VRFY_DBINFO *));
+ */
+int
+__db_vrfy_struct_feedback(dbp, vdp)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+{
+ int progress;
+
+ if (dbp->db_feedback == NULL)
+ return (0);
+
+ if (vdp->pgs_remaining > 0)
+ vdp->pgs_remaining--;
+
+ /* Don't allow a feedback call of 100 until we're really done. */
+ progress = 100 - (vdp->pgs_remaining * 50 / (vdp->last_pgno + 1));
+ dbp->db_feedback(dbp, DB_VERIFY, progress == 100 ? 99 : progress);
+
+ return (0);
+}
+
+/*
+ * __db_vrfy_orderchkonly --
+ * Do an sort-order/hashing check on a known-otherwise-good subdb.
+ */
+static int
+__db_vrfy_orderchkonly(dbp, vdp, name, subdb, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ const char *name, *subdb;
+ u_int32_t flags;
+{
+ BTMETA *btmeta;
+ DB *mdbp, *pgset;
+ DBC *pgsc;
+ DBT key, data;
+ HASH *h_internal;
+ HMETA *hmeta;
+ PAGE *h, *currpg;
+ db_pgno_t meta_pgno, p, pgno;
+ u_int32_t bucket;
+ int t_ret, ret;
+
+ currpg = h = NULL;
+ pgsc = NULL;
+ pgset = NULL;
+
+ LF_CLR(DB_NOORDERCHK);
+
+ /* Open the master database and get the meta_pgno for the subdb. */
+ if ((ret = db_create(&mdbp, NULL, 0)) != 0)
+ return (ret);
+ if ((ret = __db_master_open(dbp, name, DB_RDONLY, 0, &mdbp)) != 0)
+ goto err;
+
+ memset(&key, 0, sizeof(key));
+ key.data = (void *)subdb;
+ memset(&data, 0, sizeof(data));
+ if ((ret = dbp->get(dbp, NULL, &key, &data, 0)) != 0)
+ goto err;
+
+ if (data.size != sizeof(db_pgno_t)) {
+ EPRINT((dbp->dbenv, "Database entry of invalid size"));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+
+ memcpy(&meta_pgno, data.data, data.size);
+
+ if ((ret = memp_fget(dbp->mpf, &meta_pgno, 0, &h)) != 0)
+ goto err;
+
+ if ((ret = __db_vrfy_pgset(dbp->dbenv, dbp->pgsize, &pgset)) != 0)
+ goto err;
+
+ switch (TYPE(h)) {
+ case P_BTREEMETA:
+ btmeta = (BTMETA *)h;
+ if (F_ISSET(&btmeta->dbmeta, BTM_RECNO)) {
+ /* Recnos have no order to check. */
+ ret = 0;
+ goto err;
+ }
+ if ((ret =
+ __db_meta2pgset(dbp, vdp, meta_pgno, flags, pgset)) != 0)
+ goto err;
+ if ((ret = pgset->cursor(pgset, NULL, &pgsc, 0)) != 0)
+ goto err;
+ while ((ret = __db_vrfy_pgset_next(pgsc, &p)) == 0) {
+ if ((ret = memp_fget(dbp->mpf, &p, 0, &currpg)) != 0)
+ goto err;
+ if ((ret = __bam_vrfy_itemorder(dbp,
+ NULL, currpg, p, NUM_ENT(currpg), 1,
+ F_ISSET(&btmeta->dbmeta, BTM_DUP), flags)) != 0)
+ goto err;
+ if ((ret = memp_fput(dbp->mpf, currpg, 0)) != 0)
+ goto err;
+ currpg = NULL;
+ }
+ if ((ret = pgsc->c_close(pgsc)) != 0)
+ goto err;
+ break;
+ case P_HASHMETA:
+ hmeta = (HMETA *)h;
+ h_internal = (HASH *)dbp->h_internal;
+ /*
+ * Make sure h_charkey is right.
+ */
+ if (h_internal == NULL || h_internal->h_hash == NULL) {
+ EPRINT((dbp->dbenv,
+ "DB_ORDERCHKONLY requires that a hash function be set"));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+ if (hmeta->h_charkey !=
+ h_internal->h_hash(dbp, CHARKEY, sizeof(CHARKEY))) {
+ EPRINT((dbp->dbenv,
+ "Incorrect hash function for database"));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+
+ /*
+ * Foreach bucket, verify hashing on each page in the
+ * corresponding chain of pages.
+ */
+ for (bucket = 0; bucket <= hmeta->max_bucket; bucket++) {
+ pgno = BS_TO_PAGE(bucket, hmeta->spares);
+ while (pgno != PGNO_INVALID) {
+ if ((ret = memp_fget(dbp->mpf,
+ &pgno, 0, &currpg)) != 0)
+ goto err;
+ if ((ret = __ham_vrfy_hashing(dbp,
+ NUM_ENT(currpg),hmeta, bucket, pgno,
+ flags, h_internal->h_hash)) != 0)
+ goto err;
+ pgno = NEXT_PGNO(currpg);
+ if ((ret = memp_fput(dbp->mpf, currpg, 0)) != 0)
+ goto err;
+ currpg = NULL;
+ }
+ }
+ break;
+ default:
+ EPRINT((dbp->dbenv, "Database meta page %lu of bad type %lu",
+ (u_long)meta_pgno, (u_long)TYPE(h)));
+ ret = DB_VERIFY_BAD;
+ break;
+ }
+
+err: if (pgsc != NULL)
+ (void)pgsc->c_close(pgsc);
+ if (pgset != NULL)
+ (void)pgset->close(pgset, 0);
+ if (h != NULL && (t_ret = memp_fput(dbp->mpf, h, 0)) != 0)
+ ret = t_ret;
+ if (currpg != NULL && (t_ret = memp_fput(dbp->mpf, currpg, 0)) != 0)
+ ret = t_ret;
+ if ((t_ret = mdbp->close(mdbp, 0)) != 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __db_salvage --
+ * Walk through a page, salvaging all likely or plausible (w/
+ * DB_AGGRESSIVE) key/data pairs.
+ *
+ * PUBLIC: int __db_salvage __P((DB *, VRFY_DBINFO *, db_pgno_t, PAGE *,
+ * PUBLIC: void *, int (*)(void *, const void *), u_int32_t));
+ */
+int
+__db_salvage(dbp, vdp, pgno, h, handle, callback, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+ PAGE *h;
+ void *handle;
+ int (*callback) __P((void *, const void *));
+ u_int32_t flags;
+{
+ DB_ASSERT(LF_ISSET(DB_SALVAGE));
+
+ /* If we got this page in the subdb pass, we can safely skip it. */
+ if (__db_salvage_isdone(vdp, pgno))
+ return (0);
+
+ switch (TYPE(h)) {
+ case P_HASH:
+ return (__ham_salvage(dbp,
+ vdp, pgno, h, handle, callback, flags));
+ /* NOTREACHED */
+ case P_LBTREE:
+ return (__bam_salvage(dbp,
+ vdp, pgno, P_LBTREE, h, handle, callback, NULL, flags));
+ /* NOTREACHED */
+ case P_LDUP:
+ return (__db_salvage_markneeded(vdp, pgno, SALVAGE_LDUP));
+ /* NOTREACHED */
+ case P_OVERFLOW:
+ return (__db_salvage_markneeded(vdp, pgno, SALVAGE_OVERFLOW));
+ /* NOTREACHED */
+ case P_LRECNO:
+ /*
+ * Recnos are tricky -- they may represent dup pages, or
+ * they may be subdatabase/regular database pages in their
+ * own right. If the former, they need to be printed with a
+ * key, preferably when we hit the corresponding datum in
+ * a btree/hash page. If the latter, there is no key.
+ *
+ * If a database is sufficiently frotzed, we're not going
+ * to be able to get this right, so we best-guess: just
+ * mark it needed now, and if we're really a normal recno
+ * database page, the "unknowns" pass will pick us up.
+ */
+ return (__db_salvage_markneeded(vdp, pgno, SALVAGE_LRECNO));
+ /* NOTREACHED */
+ case P_IBTREE:
+ case P_INVALID:
+ case P_IRECNO:
+ case __P_DUPLICATE:
+ default:
+ /* XXX: Should we be more aggressive here? */
+ break;
+ }
+ return (0);
+}
+
+/*
+ * __db_salvage_unknowns --
+ * Walk through the salvager database, printing with key "UNKNOWN"
+ * any pages we haven't dealt with.
+ */
+static int
+__db_salvage_unknowns(dbp, vdp, handle, callback, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ void *handle;
+ int (*callback) __P((void *, const void *));
+ u_int32_t flags;
+{
+ DBT unkdbt, key, *dbt;
+ PAGE *h;
+ db_pgno_t pgno;
+ u_int32_t pgtype;
+ int ret, err_ret;
+ void *ovflbuf;
+
+ memset(&unkdbt, 0, sizeof(DBT));
+ unkdbt.size = strlen("UNKNOWN") + 1;
+ unkdbt.data = "UNKNOWN";
+
+ if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, 0, &ovflbuf)) != 0)
+ return (ret);
+
+ err_ret = 0;
+ while ((ret = __db_salvage_getnext(vdp, &pgno, &pgtype)) == 0) {
+ dbt = NULL;
+
+ if ((ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0) {
+ err_ret = ret;
+ continue;
+ }
+
+ switch (pgtype) {
+ case SALVAGE_LDUP:
+ case SALVAGE_LRECNODUP:
+ dbt = &unkdbt;
+ /* FALLTHROUGH */
+ case SALVAGE_LBTREE:
+ case SALVAGE_LRECNO:
+ if ((ret = __bam_salvage(dbp, vdp, pgno, pgtype,
+ h, handle, callback, dbt, flags)) != 0)
+ err_ret = ret;
+ break;
+ case SALVAGE_OVERFLOW:
+ /*
+ * XXX:
+ * This may generate multiple "UNKNOWN" keys in
+ * a database with no dups. What to do?
+ */
+ if ((ret = __db_safe_goff(dbp,
+ vdp, pgno, &key, &ovflbuf, flags)) != 0) {
+ err_ret = ret;
+ continue;
+ }
+ if ((ret = __db_prdbt(&key,
+ 0, " ", handle, callback, 0, NULL)) != 0) {
+ err_ret = ret;
+ continue;
+ }
+ if ((ret = __db_prdbt(&unkdbt,
+ 0, " ", handle, callback, 0, NULL)) != 0)
+ err_ret = ret;
+ break;
+ case SALVAGE_HASH:
+ if ((ret = __ham_salvage(
+ dbp, vdp, pgno, h, handle, callback, flags)) != 0)
+ err_ret = ret;
+ break;
+ case SALVAGE_INVALID:
+ case SALVAGE_IGNORE:
+ default:
+ /*
+ * Shouldn't happen, but if it does, just do what the
+ * nice man says.
+ */
+ DB_ASSERT(0);
+ break;
+ }
+ if ((ret = memp_fput(dbp->mpf, h, 0)) != 0)
+ err_ret = ret;
+ }
+
+ __os_free(ovflbuf, 0);
+
+ if (err_ret != 0 && ret == 0)
+ ret = err_ret;
+
+ return (ret == DB_NOTFOUND ? 0 : ret);
+}
+
+/*
+ * Offset of the ith inp array entry, which we can compare to the offset
+ * the entry stores.
+ */
+#define INP_OFFSET(h, i) \
+ ((db_indx_t)((u_int8_t *)(h)->inp + (i) - (u_int8_t *)(h)))
+
+/*
+ * __db_vrfy_inpitem --
+ * Verify that a single entry in the inp array is sane, and update
+ * the high water mark and current item offset. (The former of these is
+ * used for state information between calls, and is required; it must
+ * be initialized to the pagesize before the first call.)
+ *
+ * Returns DB_VERIFY_FATAL if inp has collided with the data,
+ * since verification can't continue from there; returns DB_VERIFY_BAD
+ * if anything else is wrong.
+ *
+ * PUBLIC: int __db_vrfy_inpitem __P((DB *, PAGE *,
+ * PUBLIC: db_pgno_t, u_int32_t, int, u_int32_t, u_int32_t *, u_int32_t *));
+ */
+int
+__db_vrfy_inpitem(dbp, h, pgno, i, is_btree, flags, himarkp, offsetp)
+ DB *dbp;
+ PAGE *h;
+ db_pgno_t pgno;
+ u_int32_t i;
+ int is_btree;
+ u_int32_t flags, *himarkp, *offsetp;
+{
+ BKEYDATA *bk;
+ db_indx_t offset, len;
+
+ DB_ASSERT(himarkp != NULL);
+
+ /*
+ * Check that the inp array, which grows from the beginning of the
+ * page forward, has not collided with the data, which grow from the
+ * end of the page backward.
+ */
+ if (h->inp + i >= (db_indx_t *)((u_int8_t *)h + *himarkp)) {
+ /* We've collided with the data. We need to bail. */
+ EPRINT((dbp->dbenv,
+ "Page %lu entries listing %lu overlaps data",
+ (u_long)pgno, (u_long)i));
+ return (DB_VERIFY_FATAL);
+ }
+
+ offset = h->inp[i];
+
+ /*
+ * Check that the item offset is reasonable: it points somewhere
+ * after the inp array and before the end of the page.
+ */
+ if (offset <= INP_OFFSET(h, i) || offset > dbp->pgsize) {
+ EPRINT((dbp->dbenv,
+ "Bad offset %lu at page %lu index %lu",
+ (u_long)offset, (u_long)pgno, (u_long)i));
+ return (DB_VERIFY_BAD);
+ }
+
+ /* Update the high-water mark (what HOFFSET should be) */
+ if (offset < *himarkp)
+ *himarkp = offset;
+
+ if (is_btree) {
+ /*
+ * Check that the item length remains on-page.
+ */
+ bk = GET_BKEYDATA(h, i);
+
+ /*
+ * We need to verify the type of the item here;
+ * we can't simply assume that it will be one of the
+ * expected three. If it's not a recognizable type,
+ * it can't be considered to have a verifiable
+ * length, so it's not possible to certify it as safe.
+ */
+ switch (B_TYPE(bk->type)) {
+ case B_KEYDATA:
+ len = bk->len;
+ break;
+ case B_DUPLICATE:
+ case B_OVERFLOW:
+ len = BOVERFLOW_SIZE;
+ break;
+ default:
+ EPRINT((dbp->dbenv,
+ "Item %lu on page %lu of unrecognizable type",
+ i, pgno));
+ return (DB_VERIFY_BAD);
+ }
+
+ if ((size_t)(offset + len) > dbp->pgsize) {
+ EPRINT((dbp->dbenv,
+ "Item %lu on page %lu extends past page boundary",
+ (u_long)i, (u_long)pgno));
+ return (DB_VERIFY_BAD);
+ }
+ }
+
+ if (offsetp != NULL)
+ *offsetp = offset;
+ return (0);
+}
+
+/*
+ * __db_vrfy_duptype--
+ * Given a page number and a set of flags to __bam_vrfy_subtree,
+ * verify that the dup tree type is correct--i.e., it's a recno
+ * if DUPSORT is not set and a btree if it is.
+ *
+ * PUBLIC: int __db_vrfy_duptype
+ * PUBLIC: __P((DB *, VRFY_DBINFO *, db_pgno_t, u_int32_t));
+ */
+int
+__db_vrfy_duptype(dbp, vdp, pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+ u_int32_t flags;
+{
+ VRFY_PAGEINFO *pip;
+ int ret, isbad;
+
+ isbad = 0;
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ switch (pip->type) {
+ case P_IBTREE:
+ case P_LDUP:
+ if (!LF_ISSET(ST_DUPSORT)) {
+ EPRINT((dbp->dbenv,
+ "Sorted duplicate set at page %lu in unsorted-dup database",
+ (u_long)pgno));
+ isbad = 1;
+ }
+ break;
+ case P_IRECNO:
+ case P_LRECNO:
+ if (LF_ISSET(ST_DUPSORT)) {
+ EPRINT((dbp->dbenv,
+ "Unsorted duplicate set at page %lu in sorted-dup database",
+ (u_long)pgno));
+ isbad = 1;
+ }
+ break;
+ default:
+ EPRINT((dbp->dbenv,
+ "Duplicate page %lu of inappropriate type %lu",
+ (u_long)pgno, (u_long)pip->type));
+ isbad = 1;
+ break;
+ }
+
+ if ((ret = __db_vrfy_putpageinfo(vdp, pip)) != 0)
+ return (ret);
+ return (isbad == 1 ? DB_VERIFY_BAD : 0);
+}
+
+/*
+ * __db_salvage_duptree --
+ * Attempt to salvage a given duplicate tree, given its alleged root.
+ *
+ * The key that corresponds to this dup set has been passed to us
+ * in DBT *key. Because data items follow keys, though, it has been
+ * printed once already.
+ *
+ * The basic idea here is that pgno ought to be a P_LDUP, a P_LRECNO, a
+ * P_IBTREE, or a P_IRECNO. If it's an internal page, use the verifier
+ * functions to make sure it's safe; if it's not, we simply bail and the
+ * data will have to be printed with no key later on. if it is safe,
+ * recurse on each of its children.
+ *
+ * Whether or not it's safe, if it's a leaf page, __bam_salvage it.
+ *
+ * At all times, use the DB hanging off vdp to mark and check what we've
+ * done, so each page gets printed exactly once and we don't get caught
+ * in any cycles.
+ *
+ * PUBLIC: int __db_salvage_duptree __P((DB *, VRFY_DBINFO *, db_pgno_t,
+ * PUBLIC: DBT *, void *, int (*)(void *, const void *), u_int32_t));
+ */
+int
+__db_salvage_duptree(dbp, vdp, pgno, key, handle, callback, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+ DBT *key;
+ void *handle;
+ int (*callback) __P((void *, const void *));
+ u_int32_t flags;
+{
+ PAGE *h;
+ int ret, t_ret;
+
+ if (pgno == PGNO_INVALID || !IS_VALID_PGNO(pgno))
+ return (DB_VERIFY_BAD);
+
+ /* We have a plausible page. Try it. */
+ if ((ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0)
+ return (ret);
+
+ switch (TYPE(h)) {
+ case P_IBTREE:
+ case P_IRECNO:
+ if ((ret = __db_vrfy_common(dbp, vdp, h, pgno, flags)) != 0)
+ goto err;
+ if ((ret = __bam_vrfy(dbp,
+ vdp, h, pgno, flags | DB_NOORDERCHK)) != 0 ||
+ (ret = __db_salvage_markdone(vdp, pgno)) != 0)
+ goto err;
+ /*
+ * We have a known-healthy internal page. Walk it.
+ */
+ if ((ret = __bam_salvage_walkdupint(dbp, vdp, h, key,
+ handle, callback, flags)) != 0)
+ goto err;
+ break;
+ case P_LRECNO:
+ case P_LDUP:
+ if ((ret = __bam_salvage(dbp,
+ vdp, pgno, TYPE(h), h, handle, callback, key, flags)) != 0)
+ goto err;
+ break;
+ default:
+ ret = DB_VERIFY_BAD;
+ goto err;
+ /* NOTREACHED */
+ }
+
+err: if ((t_ret = memp_fput(dbp->mpf, h, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __db_salvage_subdbs --
+ * Check and see if this database has subdbs; if so, try to salvage
+ * them independently.
+ */
+static int
+__db_salvage_subdbs(dbp, vdp, handle, callback, flags, hassubsp)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ void *handle;
+ int (*callback) __P((void *, const void *));
+ u_int32_t flags;
+ int *hassubsp;
+{
+ BTMETA *btmeta;
+ DB *pgset;
+ DBC *pgsc;
+ PAGE *h;
+ db_pgno_t p, meta_pgno;
+ int ret, err_ret;
+
+ err_ret = 0;
+ pgsc = NULL;
+ pgset = NULL;
+
+ meta_pgno = PGNO_BASE_MD;
+ if ((ret = memp_fget(dbp->mpf, &meta_pgno, 0, &h)) != 0)
+ return (ret);
+
+ if (TYPE(h) == P_BTREEMETA)
+ btmeta = (BTMETA *)h;
+ else {
+ /* Not a btree metadata, ergo no subdbs, so just return. */
+ ret = 0;
+ goto err;
+ }
+
+ /* If it's not a safe page, bail on the attempt. */
+ if ((ret = __db_vrfy_common(dbp, vdp, h, PGNO_BASE_MD, flags)) != 0 ||
+ (ret = __bam_vrfy_meta(dbp, vdp, btmeta, PGNO_BASE_MD, flags)) != 0)
+ goto err;
+
+ if (!F_ISSET(&btmeta->dbmeta, BTM_SUBDB)) {
+ /* No subdbs, just return. */
+ ret = 0;
+ goto err;
+ }
+
+ /* We think we've got subdbs. Mark it so. */
+ *hassubsp = 1;
+
+ if ((ret = memp_fput(dbp->mpf, h, 0)) != 0)
+ return (ret);
+
+ /*
+ * We have subdbs. Try to crack them.
+ *
+ * To do so, get a set of leaf pages in the master
+ * database, and then walk each of the valid ones, salvaging
+ * subdbs as we go. If any prove invalid, just drop them; we'll
+ * pick them up on a later pass.
+ */
+ if ((ret = __db_vrfy_pgset(dbp->dbenv, dbp->pgsize, &pgset)) != 0)
+ return (ret);
+ if ((ret =
+ __db_meta2pgset(dbp, vdp, PGNO_BASE_MD, flags, pgset)) != 0)
+ goto err;
+
+ if ((ret = pgset->cursor(pgset, NULL, &pgsc, 0)) != 0)
+ goto err;
+ while ((ret = __db_vrfy_pgset_next(pgsc, &p)) == 0) {
+ if ((ret = memp_fget(dbp->mpf, &p, 0, &h)) != 0) {
+ err_ret = ret;
+ continue;
+ }
+ if ((ret = __db_vrfy_common(dbp, vdp, h, p, flags)) != 0 ||
+ (ret = __bam_vrfy(dbp,
+ vdp, h, p, flags | DB_NOORDERCHK)) != 0)
+ goto nextpg;
+ if (TYPE(h) != P_LBTREE)
+ goto nextpg;
+ else if ((ret = __db_salvage_subdbpg(
+ dbp, vdp, h, handle, callback, flags)) != 0)
+ err_ret = ret;
+nextpg: if ((ret = memp_fput(dbp->mpf, h, 0)) != 0)
+ err_ret = ret;
+ }
+
+ if (ret != DB_NOTFOUND)
+ goto err;
+ if ((ret = pgsc->c_close(pgsc)) != 0)
+ goto err;
+
+ ret = pgset->close(pgset, 0);
+ return ((ret == 0 && err_ret != 0) ? err_ret : ret);
+
+ /* NOTREACHED */
+
+err: if (pgsc != NULL)
+ (void)pgsc->c_close(pgsc);
+ if (pgset != NULL)
+ (void)pgset->close(pgset, 0);
+ (void)memp_fput(dbp->mpf, h, 0);
+ return (ret);
+}
+
+/*
+ * __db_salvage_subdbpg --
+ * Given a known-good leaf page in the master database, salvage all
+ * leaf pages corresponding to each subdb.
+ *
+ * PUBLIC: int __db_salvage_subdbpg
+ * PUBLIC: __P((DB *, VRFY_DBINFO *, PAGE *, void *,
+ * PUBLIC: int (*)(void *, const void *), u_int32_t));
+ */
+int
+__db_salvage_subdbpg(dbp, vdp, master, handle, callback, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ PAGE *master;
+ void *handle;
+ int (*callback) __P((void *, const void *));
+ u_int32_t flags;
+{
+ BKEYDATA *bkkey, *bkdata;
+ BOVERFLOW *bo;
+ DB *pgset;
+ DBC *pgsc;
+ DBT key;
+ PAGE *subpg;
+ db_indx_t i;
+ db_pgno_t meta_pgno, p;
+ int ret, err_ret, t_ret;
+ char *subdbname;
+
+ ret = err_ret = 0;
+ subdbname = NULL;
+
+ if ((ret = __db_vrfy_pgset(dbp->dbenv, dbp->pgsize, &pgset)) != 0)
+ return (ret);
+
+ /*
+ * For each entry, get and salvage the set of pages
+ * corresponding to that entry.
+ */
+ for (i = 0; i < NUM_ENT(master); i += P_INDX) {
+ bkkey = GET_BKEYDATA(master, i);
+ bkdata = GET_BKEYDATA(master, i + O_INDX);
+
+ /* Get the subdatabase name. */
+ if (B_TYPE(bkkey->type) == B_OVERFLOW) {
+ /*
+ * We can, in principle anyway, have a subdb
+ * name so long it overflows. Ick.
+ */
+ bo = (BOVERFLOW *)bkkey;
+ if ((ret = __db_safe_goff(dbp, vdp, bo->pgno, &key,
+ (void **)&subdbname, flags)) != 0) {
+ err_ret = DB_VERIFY_BAD;
+ continue;
+ }
+
+ /* Nul-terminate it. */
+ if ((ret = __os_realloc(dbp->dbenv,
+ key.size + 1, NULL, &subdbname)) != 0)
+ goto err;
+ subdbname[key.size] = '\0';
+ } else if (B_TYPE(bkkey->type == B_KEYDATA)) {
+ if ((ret = __os_realloc(dbp->dbenv,
+ bkkey->len + 1, NULL, &subdbname)) != 0)
+ goto err;
+ memcpy(subdbname, bkkey->data, bkkey->len);
+ subdbname[bkkey->len] = '\0';
+ }
+
+ /* Get the corresponding pgno. */
+ if (bkdata->len != sizeof(db_pgno_t)) {
+ err_ret = DB_VERIFY_BAD;
+ continue;
+ }
+ memcpy(&meta_pgno, bkdata->data, sizeof(db_pgno_t));
+
+ /* If we can't get the subdb meta page, just skip the subdb. */
+ if (!IS_VALID_PGNO(meta_pgno) ||
+ (ret = memp_fget(dbp->mpf, &meta_pgno, 0, &subpg)) != 0) {
+ err_ret = ret;
+ continue;
+ }
+
+ /*
+ * Verify the subdatabase meta page. This has two functions.
+ * First, if it's bad, we have no choice but to skip the subdb
+ * and let the pages just get printed on a later pass. Second,
+ * the access-method-specific meta verification routines record
+ * the various state info (such as the presence of dups)
+ * that we need for __db_prheader().
+ */
+ if ((ret =
+ __db_vrfy_common(dbp, vdp, subpg, meta_pgno, flags)) != 0) {
+ err_ret = ret;
+ (void)memp_fput(dbp->mpf, subpg, 0);
+ continue;
+ }
+ switch (TYPE(subpg)) {
+ case P_BTREEMETA:
+ if ((ret = __bam_vrfy_meta(dbp,
+ vdp, (BTMETA *)subpg, meta_pgno, flags)) != 0) {
+ err_ret = ret;
+ (void)memp_fput(dbp->mpf, subpg, 0);
+ continue;
+ }
+ break;
+ case P_HASHMETA:
+ if ((ret = __ham_vrfy_meta(dbp,
+ vdp, (HMETA *)subpg, meta_pgno, flags)) != 0) {
+ err_ret = ret;
+ (void)memp_fput(dbp->mpf, subpg, 0);
+ continue;
+ }
+ break;
+ default:
+ /* This isn't an appropriate page; skip this subdb. */
+ err_ret = DB_VERIFY_BAD;
+ continue;
+ /* NOTREACHED */
+ }
+
+ if ((ret = memp_fput(dbp->mpf, subpg, 0)) != 0) {
+ err_ret = ret;
+ continue;
+ }
+
+ /* Print a subdatabase header. */
+ if ((ret = __db_prheader(dbp,
+ subdbname, 0, 0, handle, callback, vdp, meta_pgno)) != 0)
+ goto err;
+
+ if ((ret = __db_meta2pgset(dbp, vdp, meta_pgno,
+ flags, pgset)) != 0) {
+ err_ret = ret;
+ continue;
+ }
+
+ if ((ret = pgset->cursor(pgset, NULL, &pgsc, 0)) != 0)
+ goto err;
+ while ((ret = __db_vrfy_pgset_next(pgsc, &p)) == 0) {
+ if ((ret = memp_fget(dbp->mpf, &p, 0, &subpg)) != 0) {
+ err_ret = ret;
+ continue;
+ }
+ if ((ret = __db_salvage(dbp, vdp, p, subpg,
+ handle, callback, flags)) != 0)
+ err_ret = ret;
+ if ((ret = memp_fput(dbp->mpf, subpg, 0)) != 0)
+ err_ret = ret;
+ }
+
+ if (ret != DB_NOTFOUND)
+ goto err;
+
+ if ((ret = pgsc->c_close(pgsc)) != 0)
+ goto err;
+ if ((ret = __db_prfooter(handle, callback)) != 0)
+ goto err;
+ }
+err: if (subdbname)
+ __os_free(subdbname, 0);
+
+ if ((t_ret = pgset->close(pgset, 0)) != 0)
+ ret = t_ret;
+
+ if ((t_ret = __db_salvage_markdone(vdp, PGNO(master))) != 0)
+ return (t_ret);
+
+ return ((err_ret != 0) ? err_ret : ret);
+}
+
+/*
+ * __db_meta2pgset --
+ * Given a known-safe meta page number, return the set of pages
+ * corresponding to the database it represents. Return DB_VERIFY_BAD if
+ * it's not a suitable meta page or is invalid.
+ */
+static int
+__db_meta2pgset(dbp, vdp, pgno, flags, pgset)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+ u_int32_t flags;
+ DB *pgset;
+{
+ PAGE *h;
+ int ret, t_ret;
+
+ if ((ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0)
+ return (ret);
+
+ switch (TYPE(h)) {
+ case P_BTREEMETA:
+ ret = __bam_meta2pgset(dbp, vdp, (BTMETA *)h, flags, pgset);
+ break;
+ case P_HASHMETA:
+ ret = __ham_meta2pgset(dbp, vdp, (HMETA *)h, flags, pgset);
+ break;
+ default:
+ ret = DB_VERIFY_BAD;
+ break;
+ }
+
+ if ((t_ret = memp_fput(dbp->mpf, h, 0)) != 0)
+ return (t_ret);
+ return (ret);
+}
+
+/*
+ * __db_guesspgsize --
+ * Try to guess what the pagesize is if the one on the meta page
+ * and the one in the db are invalid.
+ */
+static int
+__db_guesspgsize(dbenv, fhp)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+{
+ db_pgno_t i;
+ size_t nr;
+ u_int32_t guess;
+ u_int8_t type;
+ int ret;
+
+ for (guess = DB_MAX_PGSIZE; guess >= DB_MIN_PGSIZE; guess >>= 1) {
+ /*
+ * We try to read three pages ahead after the first one
+ * and make sure we have plausible types for all of them.
+ * If the seeks fail, continue with a smaller size;
+ * we're probably just looking past the end of the database.
+ * If they succeed and the types are reasonable, also continue
+ * with a size smaller; we may be looking at pages N,
+ * 2N, and 3N for some N > 1.
+ *
+ * As soon as we hit an invalid type, we stop and return
+ * our previous guess; that last one was probably the page size.
+ */
+ for (i = 1; i <= 3; i++) {
+ if ((ret = __os_seek(dbenv, fhp, guess,
+ i, SSZ(DBMETA, type), 0, DB_OS_SEEK_SET)) != 0)
+ break;
+ if ((ret = __os_read(dbenv,
+ fhp, &type, 1, &nr)) != 0 || nr == 0)
+ break;
+ if (type == P_INVALID || type >= P_PAGETYPE_MAX)
+ return (guess << 1);
+ }
+ }
+
+ /*
+ * If we're just totally confused--the corruption takes up most of the
+ * beginning pages of the database--go with the default size.
+ */
+ return (DB_DEF_IOSIZE);
+}
diff --git a/bdb/db/db_vrfyutil.c b/bdb/db/db_vrfyutil.c
new file mode 100644
index 00000000000..89dccdcc760
--- /dev/null
+++ b/bdb/db/db_vrfyutil.c
@@ -0,0 +1,830 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: db_vrfyutil.c,v 11.11 2000/11/28 21:36:04 bostic Exp $
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_vrfyutil.c,v 11.11 2000/11/28 21:36:04 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_verify.h"
+#include "db_ext.h"
+
+static int __db_vrfy_pgset_iinc __P((DB *, db_pgno_t, int));
+
+/*
+ * __db_vrfy_dbinfo_create --
+ * Allocate and initialize a VRFY_DBINFO structure.
+ *
+ * PUBLIC: int __db_vrfy_dbinfo_create
+ * PUBLIC: __P((DB_ENV *, u_int32_t, VRFY_DBINFO **));
+ */
+int
+__db_vrfy_dbinfo_create (dbenv, pgsize, vdpp)
+ DB_ENV *dbenv;
+ u_int32_t pgsize;
+ VRFY_DBINFO **vdpp;
+{
+ DB *cdbp, *pgdbp, *pgset;
+ VRFY_DBINFO *vdp;
+ int ret;
+
+ vdp = NULL;
+ cdbp = pgdbp = pgset = NULL;
+
+ if ((ret = __os_calloc(NULL,
+ 1, sizeof(VRFY_DBINFO), (void **)&vdp)) != 0)
+ goto err;
+
+ if ((ret = db_create(&cdbp, dbenv, 0)) != 0)
+ goto err;
+
+ if ((ret = cdbp->set_flags(cdbp, DB_DUP | DB_DUPSORT)) != 0)
+ goto err;
+
+ if ((ret = cdbp->set_pagesize(cdbp, pgsize)) != 0)
+ goto err;
+
+ if ((ret =
+ cdbp->open(cdbp, NULL, NULL, DB_BTREE, DB_CREATE, 0600)) != 0)
+ goto err;
+
+ if ((ret = db_create(&pgdbp, dbenv, 0)) != 0)
+ goto err;
+
+ if ((ret = pgdbp->set_pagesize(pgdbp, pgsize)) != 0)
+ goto err;
+
+ if ((ret =
+ pgdbp->open(pgdbp, NULL, NULL, DB_BTREE, DB_CREATE, 0600)) != 0)
+ goto err;
+
+ if ((ret = __db_vrfy_pgset(dbenv, pgsize, &pgset)) != 0)
+ goto err;
+
+ LIST_INIT(&vdp->subdbs);
+ LIST_INIT(&vdp->activepips);
+
+ vdp->cdbp = cdbp;
+ vdp->pgdbp = pgdbp;
+ vdp->pgset = pgset;
+ *vdpp = vdp;
+ return (0);
+
+err: if (cdbp != NULL)
+ (void)cdbp->close(cdbp, 0);
+ if (pgdbp != NULL)
+ (void)pgdbp->close(pgdbp, 0);
+ if (vdp != NULL)
+ __os_free(vdp, sizeof(VRFY_DBINFO));
+ return (ret);
+}
+
+/*
+ * __db_vrfy_dbinfo_destroy --
+ * Destructor for VRFY_DBINFO. Destroys VRFY_PAGEINFOs and deallocates
+ * structure.
+ *
+ * PUBLIC: int __db_vrfy_dbinfo_destroy __P((VRFY_DBINFO *));
+ */
+int
+__db_vrfy_dbinfo_destroy(vdp)
+ VRFY_DBINFO *vdp;
+{
+ VRFY_CHILDINFO *c, *d;
+ int t_ret, ret;
+
+ ret = 0;
+
+ for (c = LIST_FIRST(&vdp->subdbs); c != NULL; c = d) {
+ d = LIST_NEXT(c, links);
+ __os_free(c, 0);
+ }
+
+ if ((t_ret = vdp->pgdbp->close(vdp->pgdbp, 0)) != 0)
+ ret = t_ret;
+
+ if ((t_ret = vdp->cdbp->close(vdp->cdbp, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if ((t_ret = vdp->pgset->close(vdp->pgset, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ DB_ASSERT(LIST_FIRST(&vdp->activepips) == NULL);
+
+ __os_free(vdp, sizeof(VRFY_DBINFO));
+ return (ret);
+}
+
+/*
+ * __db_vrfy_getpageinfo --
+ * Get a PAGEINFO structure for a given page, creating it if necessary.
+ *
+ * PUBLIC: int __db_vrfy_getpageinfo
+ * PUBLIC: __P((VRFY_DBINFO *, db_pgno_t, VRFY_PAGEINFO **));
+ */
+int
+__db_vrfy_getpageinfo(vdp, pgno, pipp)
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+ VRFY_PAGEINFO **pipp;
+{
+ DBT key, data;
+ DB *pgdbp;
+ VRFY_PAGEINFO *pip;
+ int ret;
+
+ /*
+ * We want a page info struct. There are three places to get it from,
+ * in decreasing order of preference:
+ *
+ * 1. vdp->activepips. If it's already "checked out", we're
+ * already using it, we return the same exact structure with a
+ * bumped refcount. This is necessary because this code is
+ * replacing array accesses, and it's common for f() to make some
+ * changes to a pip, and then call g() and h() which each make
+ * changes to the same pip. vdps are never shared between threads
+ * (they're never returned to the application), so this is safe.
+ * 2. The pgdbp. It's not in memory, but it's in the database, so
+ * get it, give it a refcount of 1, and stick it on activepips.
+ * 3. malloc. It doesn't exist yet; create it, then stick it on
+ * activepips. We'll put it in the database when we putpageinfo
+ * later.
+ */
+
+ /* Case 1. */
+ for (pip = LIST_FIRST(&vdp->activepips); pip != NULL;
+ pip = LIST_NEXT(pip, links))
+ if (pip->pgno == pgno)
+ /* Found it. */
+ goto found;
+
+ /* Case 2. */
+ pgdbp = vdp->pgdbp;
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ F_SET(&data, DB_DBT_MALLOC);
+ key.data = &pgno;
+ key.size = sizeof(db_pgno_t);
+
+ if ((ret = pgdbp->get(pgdbp, NULL, &key, &data, 0)) == 0) {
+ /* Found it. */
+ DB_ASSERT(data.size = sizeof(VRFY_PAGEINFO));
+ pip = data.data;
+ DB_ASSERT(pip->pi_refcount == 0);
+ LIST_INSERT_HEAD(&vdp->activepips, pip, links);
+ goto found;
+ } else if (ret != DB_NOTFOUND) /* Something nasty happened. */
+ return (ret);
+
+ /* Case 3 */
+ if ((ret = __db_vrfy_pageinfo_create(&pip)) != 0)
+ return (ret);
+
+ LIST_INSERT_HEAD(&vdp->activepips, pip, links);
+found: pip->pi_refcount++;
+
+ *pipp = pip;
+
+ DB_ASSERT(pip->pi_refcount > 0);
+ return (0);
+}
+
+/*
+ * __db_vrfy_putpageinfo --
+ * Put back a VRFY_PAGEINFO that we're done with.
+ *
+ * PUBLIC: int __db_vrfy_putpageinfo __P((VRFY_DBINFO *, VRFY_PAGEINFO *));
+ */
+int
+__db_vrfy_putpageinfo(vdp, pip)
+ VRFY_DBINFO *vdp;
+ VRFY_PAGEINFO *pip;
+{
+ DBT key, data;
+ DB *pgdbp;
+ VRFY_PAGEINFO *p;
+ int ret;
+#ifdef DIAGNOSTIC
+ int found;
+
+ found = 0;
+#endif
+
+ if (--pip->pi_refcount > 0)
+ return (0);
+
+ pgdbp = vdp->pgdbp;
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+
+ key.data = &pip->pgno;
+ key.size = sizeof(db_pgno_t);
+ data.data = pip;
+ data.size = sizeof(VRFY_PAGEINFO);
+
+ if ((ret = pgdbp->put(pgdbp, NULL, &key, &data, 0)) != 0)
+ return (ret);
+
+ for (p = LIST_FIRST(&vdp->activepips); p != NULL;
+ p = LIST_NEXT(p, links))
+ if (p == pip) {
+#ifdef DIAGNOSTIC
+ found++;
+#endif
+ DB_ASSERT(p->pi_refcount == 0);
+ LIST_REMOVE(p, links);
+ break;
+ }
+#ifdef DIAGNOSTIC
+ DB_ASSERT(found == 1);
+#endif
+
+ DB_ASSERT(pip->pi_refcount == 0);
+ __os_free(pip, 0);
+ return (0);
+}
+
+/*
+ * __db_vrfy_pgset --
+ * Create a temporary database for the storing of sets of page numbers.
+ * (A mapping from page number to int, used by the *_meta2pgset functions,
+ * as well as for keeping track of which pages the verifier has seen.)
+ *
+ * PUBLIC: int __db_vrfy_pgset __P((DB_ENV *, u_int32_t, DB **));
+ */
+int
+__db_vrfy_pgset(dbenv, pgsize, dbpp)
+ DB_ENV *dbenv;
+ u_int32_t pgsize;
+ DB **dbpp;
+{
+ DB *dbp;
+ int ret;
+
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ return (ret);
+ if ((ret = dbp->set_pagesize(dbp, pgsize)) != 0)
+ goto err;
+ if ((ret = dbp->open(dbp, NULL, NULL, DB_BTREE, DB_CREATE, 0600)) == 0)
+ *dbpp = dbp;
+ else
+err: (void)dbp->close(dbp, 0);
+
+ return (ret);
+}
+
+/*
+ * __db_vrfy_pgset_get --
+ * Get the value associated in a page set with a given pgno. Return
+ * a 0 value (and succeed) if we've never heard of this page.
+ *
+ * PUBLIC: int __db_vrfy_pgset_get __P((DB *, db_pgno_t, int *));
+ */
+int
+__db_vrfy_pgset_get(dbp, pgno, valp)
+ DB *dbp;
+ db_pgno_t pgno;
+ int *valp;
+{
+ DBT key, data;
+ int ret, val;
+
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+
+ key.data = &pgno;
+ key.size = sizeof(db_pgno_t);
+ data.data = &val;
+ data.ulen = sizeof(int);
+ F_SET(&data, DB_DBT_USERMEM);
+
+ if ((ret = dbp->get(dbp, NULL, &key, &data, 0)) == 0) {
+ DB_ASSERT(data.size = sizeof(int));
+ memcpy(&val, data.data, sizeof(int));
+ } else if (ret == DB_NOTFOUND)
+ val = 0;
+ else
+ return (ret);
+
+ *valp = val;
+ return (0);
+}
+
+/*
+ * __db_vrfy_pgset_inc --
+ * Increment the value associated with a pgno by 1.
+ *
+ * PUBLIC: int __db_vrfy_pgset_inc __P((DB *, db_pgno_t));
+ */
+int
+__db_vrfy_pgset_inc(dbp, pgno)
+ DB *dbp;
+ db_pgno_t pgno;
+{
+
+ return (__db_vrfy_pgset_iinc(dbp, pgno, 1));
+}
+
+/*
+ * __db_vrfy_pgset_dec --
+ * Increment the value associated with a pgno by 1.
+ *
+ * PUBLIC: int __db_vrfy_pgset_dec __P((DB *, db_pgno_t));
+ */
+int
+__db_vrfy_pgset_dec(dbp, pgno)
+ DB *dbp;
+ db_pgno_t pgno;
+{
+
+ return (__db_vrfy_pgset_iinc(dbp, pgno, -1));
+}
+
+/*
+ * __db_vrfy_pgset_iinc --
+ * Increment the value associated with a pgno by i.
+ *
+ */
+static int
+__db_vrfy_pgset_iinc(dbp, pgno, i)
+ DB *dbp;
+ db_pgno_t pgno;
+ int i;
+{
+ DBT key, data;
+ int ret;
+ int val;
+
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+
+ val = 0;
+
+ key.data = &pgno;
+ key.size = sizeof(db_pgno_t);
+ data.data = &val;
+ data.ulen = sizeof(int);
+ F_SET(&data, DB_DBT_USERMEM);
+
+ if ((ret = dbp->get(dbp, NULL, &key, &data, 0)) == 0) {
+ DB_ASSERT(data.size = sizeof(int));
+ memcpy(&val, data.data, sizeof(int));
+ } else if (ret != DB_NOTFOUND)
+ return (ret);
+
+ data.size = sizeof(int);
+ val += i;
+
+ return (dbp->put(dbp, NULL, &key, &data, 0));
+}
+
+/*
+ * __db_vrfy_pgset_next --
+ * Given a cursor open in a pgset database, get the next page in the
+ * set.
+ *
+ * PUBLIC: int __db_vrfy_pgset_next __P((DBC *, db_pgno_t *));
+ */
+int
+__db_vrfy_pgset_next(dbc, pgnop)
+ DBC *dbc;
+ db_pgno_t *pgnop;
+{
+ DBT key, data;
+ db_pgno_t pgno;
+ int ret;
+
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ /* We don't care about the data, just the keys. */
+ F_SET(&data, DB_DBT_USERMEM | DB_DBT_PARTIAL);
+ F_SET(&key, DB_DBT_USERMEM);
+ key.data = &pgno;
+ key.ulen = sizeof(db_pgno_t);
+
+ if ((ret = dbc->c_get(dbc, &key, &data, DB_NEXT)) != 0)
+ return (ret);
+
+ DB_ASSERT(key.size == sizeof(db_pgno_t));
+ *pgnop = pgno;
+
+ return (0);
+}
+
+/*
+ * __db_vrfy_childcursor --
+ * Create a cursor to walk the child list with. Returns with a nonzero
+ * final argument if the specified page has no children.
+ *
+ * PUBLIC: int __db_vrfy_childcursor __P((VRFY_DBINFO *, DBC **));
+ */
+int
+__db_vrfy_childcursor(vdp, dbcp)
+ VRFY_DBINFO *vdp;
+ DBC **dbcp;
+{
+ DB *cdbp;
+ DBC *dbc;
+ int ret;
+
+ cdbp = vdp->cdbp;
+
+ if ((ret = cdbp->cursor(cdbp, NULL, &dbc, 0)) == 0)
+ *dbcp = dbc;
+
+ return (ret);
+}
+
+/*
+ * __db_vrfy_childput --
+ * Add a child structure to the set for a given page.
+ *
+ * PUBLIC: int __db_vrfy_childput
+ * PUBLIC: __P((VRFY_DBINFO *, db_pgno_t, VRFY_CHILDINFO *));
+ */
+int
+__db_vrfy_childput(vdp, pgno, cip)
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+ VRFY_CHILDINFO *cip;
+{
+ DBT key, data;
+ DB *cdbp;
+ int ret;
+
+ cdbp = vdp->cdbp;
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+
+ key.data = &pgno;
+ key.size = sizeof(db_pgno_t);
+
+ data.data = cip;
+ data.size = sizeof(VRFY_CHILDINFO);
+
+ /*
+ * Don't add duplicate (data) entries for a given child, and accept
+ * DB_KEYEXIST as a successful return; we only need to verify
+ * each child once, even if a child (such as an overflow key) is
+ * multiply referenced.
+ */
+ ret = cdbp->put(cdbp, NULL, &key, &data, DB_NODUPDATA);
+ return (ret == DB_KEYEXIST ? 0 : ret);
+}
+
+/*
+ * __db_vrfy_ccset --
+ * Sets a cursor created with __db_vrfy_childcursor to the first
+ * child of the given pgno, and returns it in the third arg.
+ *
+ * PUBLIC: int __db_vrfy_ccset __P((DBC *, db_pgno_t, VRFY_CHILDINFO **));
+ */
+int
+__db_vrfy_ccset(dbc, pgno, cipp)
+ DBC *dbc;
+ db_pgno_t pgno;
+ VRFY_CHILDINFO **cipp;
+{
+ DBT key, data;
+ int ret;
+
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+
+ key.data = &pgno;
+ key.size = sizeof(db_pgno_t);
+
+ if ((ret = dbc->c_get(dbc, &key, &data, DB_SET)) != 0)
+ return (ret);
+
+ DB_ASSERT(data.size == sizeof(VRFY_CHILDINFO));
+ *cipp = (VRFY_CHILDINFO *)data.data;
+
+ return (0);
+}
+
+/*
+ * __db_vrfy_ccnext --
+ * Gets the next child of the given cursor created with
+ * __db_vrfy_childcursor, and returns it in the memory provided in the
+ * second arg.
+ *
+ * PUBLIC: int __db_vrfy_ccnext __P((DBC *, VRFY_CHILDINFO **));
+ */
+int
+__db_vrfy_ccnext(dbc, cipp)
+ DBC *dbc;
+ VRFY_CHILDINFO **cipp;
+{
+ DBT key, data;
+ int ret;
+
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+
+ if ((ret = dbc->c_get(dbc, &key, &data, DB_NEXT_DUP)) != 0)
+ return (ret);
+
+ DB_ASSERT(data.size == sizeof(VRFY_CHILDINFO));
+ *cipp = (VRFY_CHILDINFO *)data.data;
+
+ return (0);
+}
+
+/*
+ * __db_vrfy_ccclose --
+ * Closes the cursor created with __db_vrfy_childcursor.
+ *
+ * This doesn't actually do anything interesting now, but it's
+ * not inconceivable that we might change the internal database usage
+ * and keep the interfaces the same, and a function call here or there
+ * seldom hurts anyone.
+ *
+ * PUBLIC: int __db_vrfy_ccclose __P((DBC *));
+ */
+int
+__db_vrfy_ccclose(dbc)
+ DBC *dbc;
+{
+
+ return (dbc->c_close(dbc));
+}
+
+/*
+ * __db_vrfy_pageinfo_create --
+ * Constructor for VRFY_PAGEINFO; allocates and initializes.
+ *
+ * PUBLIC: int __db_vrfy_pageinfo_create __P((VRFY_PAGEINFO **));
+ */
+int
+__db_vrfy_pageinfo_create(pgipp)
+ VRFY_PAGEINFO **pgipp;
+{
+ VRFY_PAGEINFO *pgip;
+ int ret;
+
+ if ((ret = __os_calloc(NULL,
+ 1, sizeof(VRFY_PAGEINFO), (void **)&pgip)) != 0)
+ return (ret);
+
+ DB_ASSERT(pgip->pi_refcount == 0);
+
+ *pgipp = pgip;
+ return (0);
+}
+
+/*
+ * __db_salvage_init --
+ * Set up salvager database.
+ *
+ * PUBLIC: int __db_salvage_init __P((VRFY_DBINFO *));
+ */
+int
+__db_salvage_init(vdp)
+ VRFY_DBINFO *vdp;
+{
+ DB *dbp;
+ int ret;
+
+ if ((ret = db_create(&dbp, NULL, 0)) != 0)
+ return (ret);
+
+ if ((ret = dbp->set_pagesize(dbp, 1024)) != 0)
+ goto err;
+
+ if ((ret = dbp->open(dbp, NULL, NULL, DB_BTREE, DB_CREATE, 0)) != 0)
+ goto err;
+
+ vdp->salvage_pages = dbp;
+ return (0);
+
+err: (void)dbp->close(dbp, 0);
+ return (ret);
+}
+
+/*
+ * __db_salvage_destroy --
+ * Close salvager database.
+ * PUBLIC: void __db_salvage_destroy __P((VRFY_DBINFO *));
+ */
+void
+__db_salvage_destroy(vdp)
+ VRFY_DBINFO *vdp;
+{
+ (void)vdp->salvage_pages->close(vdp->salvage_pages, 0);
+}
+
+/*
+ * __db_salvage_getnext --
+ * Get the next (first) unprinted page in the database of pages we need to
+ * print still. Delete entries for any already-printed pages we encounter
+ * in this search, as well as the page we're returning.
+ *
+ * PUBLIC: int __db_salvage_getnext
+ * PUBLIC: __P((VRFY_DBINFO *, db_pgno_t *, u_int32_t *));
+ */
+int
+__db_salvage_getnext(vdp, pgnop, pgtypep)
+ VRFY_DBINFO *vdp;
+ db_pgno_t *pgnop;
+ u_int32_t *pgtypep;
+{
+ DB *dbp;
+ DBC *dbc;
+ DBT key, data;
+ int ret;
+ u_int32_t pgtype;
+
+ dbp = vdp->salvage_pages;
+
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+
+ if ((ret = dbp->cursor(dbp, NULL, &dbc, 0)) != 0)
+ return (ret);
+
+ while ((ret = dbc->c_get(dbc, &key, &data, DB_NEXT)) == 0) {
+ DB_ASSERT(data.size == sizeof(u_int32_t));
+ memcpy(&pgtype, data.data, sizeof(pgtype));
+
+ if ((ret = dbc->c_del(dbc, 0)) != 0)
+ goto err;
+ if (pgtype != SALVAGE_IGNORE)
+ goto found;
+ }
+
+ /* No more entries--ret probably equals DB_NOTFOUND. */
+
+ if (0) {
+found: DB_ASSERT(key.size == sizeof(db_pgno_t));
+ DB_ASSERT(data.size == sizeof(u_int32_t));
+
+ *pgnop = *(db_pgno_t *)key.data;
+ *pgtypep = *(u_int32_t *)data.data;
+ }
+
+err: (void)dbc->c_close(dbc);
+ return (ret);
+}
+
+/*
+ * __db_salvage_isdone --
+ * Return whether or not the given pgno is already marked
+ * SALVAGE_IGNORE (meaning that we don't need to print it again).
+ *
+ * Returns DB_KEYEXIST if it is marked, 0 if not, or another error on
+ * error.
+ *
+ * PUBLIC: int __db_salvage_isdone __P((VRFY_DBINFO *, db_pgno_t));
+ */
+int
+__db_salvage_isdone(vdp, pgno)
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+{
+ DBT key, data;
+ DB *dbp;
+ int ret;
+ u_int32_t currtype;
+
+ dbp = vdp->salvage_pages;
+
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+
+ currtype = SALVAGE_INVALID;
+ data.data = &currtype;
+ data.ulen = sizeof(u_int32_t);
+ data.flags = DB_DBT_USERMEM;
+
+ key.data = &pgno;
+ key.size = sizeof(db_pgno_t);
+
+ /*
+ * Put an entry for this page, with pgno as key and type as data,
+ * unless it's already there and is marked done.
+ * If it's there and is marked anything else, that's fine--we
+ * want to mark it done.
+ */
+ ret = dbp->get(dbp, NULL, &key, &data, 0);
+ if (ret == 0) {
+ /*
+ * The key's already here. Check and see if it's already
+ * marked done. If it is, return DB_KEYEXIST. If it's not,
+ * return 0.
+ */
+ if (currtype == SALVAGE_IGNORE)
+ return (DB_KEYEXIST);
+ else
+ return (0);
+ } else if (ret != DB_NOTFOUND)
+ return (ret);
+
+ /* The pgno is not yet marked anything; return 0. */
+ return (0);
+}
+
+/*
+ * __db_salvage_markdone --
+ * Mark as done a given page.
+ *
+ * PUBLIC: int __db_salvage_markdone __P((VRFY_DBINFO *, db_pgno_t));
+ */
+int
+__db_salvage_markdone(vdp, pgno)
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+{
+ DBT key, data;
+ DB *dbp;
+ int pgtype, ret;
+ u_int32_t currtype;
+
+ pgtype = SALVAGE_IGNORE;
+ dbp = vdp->salvage_pages;
+
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+
+ currtype = SALVAGE_INVALID;
+ data.data = &currtype;
+ data.ulen = sizeof(u_int32_t);
+ data.flags = DB_DBT_USERMEM;
+
+ key.data = &pgno;
+ key.size = sizeof(db_pgno_t);
+
+ /*
+ * Put an entry for this page, with pgno as key and type as data,
+ * unless it's already there and is marked done.
+ * If it's there and is marked anything else, that's fine--we
+ * want to mark it done, but db_salvage_isdone only lets
+ * us know if it's marked IGNORE.
+ *
+ * We don't want to return DB_KEYEXIST, though; this will
+ * likely get passed up all the way and make no sense to the
+ * application. Instead, use DB_VERIFY_BAD to indicate that
+ * we've seen this page already--it probably indicates a
+ * multiply-linked page.
+ */
+ if ((ret = __db_salvage_isdone(vdp, pgno)) != 0)
+ return (ret == DB_KEYEXIST ? DB_VERIFY_BAD : ret);
+
+ data.size = sizeof(u_int32_t);
+ data.data = &pgtype;
+
+ return (dbp->put(dbp, NULL, &key, &data, 0));
+}
+
+/*
+ * __db_salvage_markneeded --
+ * If it has not yet been printed, make note of the fact that a page
+ * must be dealt with later.
+ *
+ * PUBLIC: int __db_salvage_markneeded
+ * PUBLIC: __P((VRFY_DBINFO *, db_pgno_t, u_int32_t));
+ */
+int
+__db_salvage_markneeded(vdp, pgno, pgtype)
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+ u_int32_t pgtype;
+{
+ DB *dbp;
+ DBT key, data;
+ int ret;
+
+ dbp = vdp->salvage_pages;
+
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+
+ key.data = &pgno;
+ key.size = sizeof(db_pgno_t);
+
+ data.data = &pgtype;
+ data.size = sizeof(u_int32_t);
+
+ /*
+ * Put an entry for this page, with pgno as key and type as data,
+ * unless it's already there, in which case it's presumably
+ * already been marked done.
+ */
+ ret = dbp->put(dbp, NULL, &key, &data, DB_NOOVERWRITE);
+ return (ret == DB_KEYEXIST ? 0 : ret);
+}
diff --git a/bdb/db185/db185.c b/bdb/db185/db185.c
new file mode 100644
index 00000000000..84327542485
--- /dev/null
+++ b/bdb/db185/db185.c
@@ -0,0 +1,593 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2000\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id: db185.c,v 11.15 2001/01/23 21:27:03 bostic Exp $";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <fcntl.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "db185_int.h"
+
+static int db185_close __P((DB185 *));
+static int db185_compare __P((DB *, const DBT *, const DBT *));
+static int db185_del __P((const DB185 *, const DBT185 *, u_int));
+static int db185_fd __P((const DB185 *));
+static int db185_get __P((const DB185 *, const DBT185 *, DBT185 *, u_int));
+static u_int32_t
+ db185_hash __P((DB *, const void *, u_int32_t));
+static void db185_openstderr __P((DB_FH *));
+static size_t db185_prefix __P((DB *, const DBT *, const DBT *));
+static int db185_put __P((const DB185 *, DBT185 *, const DBT185 *, u_int));
+static int db185_seq __P((const DB185 *, DBT185 *, DBT185 *, u_int));
+static int db185_sync __P((const DB185 *, u_int));
+
+DB185 *
+__db185_open(file, oflags, mode, type, openinfo)
+ const char *file;
+ int oflags, mode;
+ DBTYPE type;
+ const void *openinfo;
+{
+ const BTREEINFO *bi;
+ const HASHINFO *hi;
+ const RECNOINFO *ri;
+ DB *dbp;
+ DB185 *db185p;
+ DB_FH fh;
+ size_t nw;
+ int ret;
+
+ dbp = NULL;
+ db185p = NULL;
+
+ if ((ret = db_create(&dbp, NULL, 0)) != 0)
+ goto err;
+
+ if ((ret = __os_calloc(NULL, 1, sizeof(DB185), &db185p)) != 0)
+ goto err;
+
+ /*
+ * !!!
+ * The DBTYPE enum wasn't initialized in DB 185, so it's off-by-one
+ * from DB 2.0.
+ */
+ switch (type) {
+ case 0: /* DB_BTREE */
+ type = DB_BTREE;
+ if ((bi = openinfo) != NULL) {
+ if (bi->flags & ~R_DUP)
+ goto einval;
+ if (bi->flags & R_DUP)
+ (void)dbp->set_flags(dbp, DB_DUP);
+ if (bi->cachesize != 0)
+ (void)dbp->set_cachesize
+ (dbp, 0, bi->cachesize, 0);
+ if (bi->minkeypage != 0)
+ (void)dbp->set_bt_minkey(dbp, bi->minkeypage);
+ if (bi->psize != 0)
+ (void)dbp->set_pagesize(dbp, bi->psize);
+ /*
+ * !!!
+ * Comparisons and prefix calls work because the DBT
+ * structures in 1.85 and 2.0 have the same initial
+ * fields.
+ */
+ if (bi->prefix != NULL) {
+ db185p->prefix = bi->prefix;
+ dbp->set_bt_prefix(dbp, db185_prefix);
+ }
+ if (bi->compare != NULL) {
+ db185p->compare = bi->compare;
+ dbp->set_bt_compare(dbp, db185_compare);
+ }
+ if (bi->lorder != 0)
+ dbp->set_lorder(dbp, bi->lorder);
+ }
+ break;
+ case 1: /* DB_HASH */
+ type = DB_HASH;
+ if ((hi = openinfo) != NULL) {
+ if (hi->bsize != 0)
+ (void)dbp->set_pagesize(dbp, hi->bsize);
+ if (hi->ffactor != 0)
+ (void)dbp->set_h_ffactor(dbp, hi->ffactor);
+ if (hi->nelem != 0)
+ (void)dbp->set_h_nelem(dbp, hi->nelem);
+ if (hi->cachesize != 0)
+ (void)dbp->set_cachesize
+ (dbp, 0, hi->cachesize, 0);
+ if (hi->hash != NULL) {
+ db185p->hash = hi->hash;
+ (void)dbp->set_h_hash(dbp, db185_hash);
+ }
+ if (hi->lorder != 0)
+ dbp->set_lorder(dbp, hi->lorder);
+ }
+
+ break;
+ case 2: /* DB_RECNO */
+ type = DB_RECNO;
+
+ /* DB 1.85 did renumbering by default. */
+ (void)dbp->set_flags(dbp, DB_RENUMBER);
+
+ /*
+ * !!!
+ * The file name given to DB 1.85 recno is the name of the DB
+ * 2.0 backing file. If the file doesn't exist, create it if
+ * the user has the O_CREAT flag set, DB 1.85 did it for you,
+ * and DB 2.0 doesn't.
+ *
+ * !!!
+ * Setting the file name to NULL specifies that we're creating
+ * a temporary backing file, in DB 2.X. If we're opening the
+ * DB file read-only, change the flags to read-write, because
+ * temporary backing files cannot be opened read-only, and DB
+ * 2.X will return an error. We are cheating here -- if the
+ * application does a put on the database, it will succeed --
+ * although that would be a stupid thing for the application
+ * to do.
+ *
+ * !!!
+ * Note, the file name in DB 1.85 was a const -- we don't do
+ * that in DB 2.0, so do that cast.
+ */
+ if (file != NULL) {
+ if (oflags & O_CREAT && __os_exists(file, NULL) != 0)
+ if (__os_openhandle(NULL, file,
+ oflags, mode, &fh) == 0)
+ (void)__os_closehandle(&fh);
+ (void)dbp->set_re_source(dbp, file);
+
+ if (O_RDONLY)
+ oflags &= ~O_RDONLY;
+ oflags |= O_RDWR;
+ file = NULL;
+ }
+
+ if ((ri = openinfo) != NULL) {
+ /*
+ * !!!
+ * We can't support the bfname field.
+ */
+#define BFMSG "DB: DB 1.85's recno bfname field is not supported.\n"
+ if (ri->bfname != NULL) {
+ db185_openstderr(&fh);
+ (void)__os_write(NULL, &fh,
+ BFMSG, sizeof(BFMSG) - 1, &nw);
+ goto einval;
+ }
+
+ if (ri->flags & ~(R_FIXEDLEN | R_NOKEY | R_SNAPSHOT))
+ goto einval;
+ if (ri->flags & R_FIXEDLEN) {
+ if (ri->bval != 0)
+ (void)dbp->set_re_pad(dbp, ri->bval);
+ if (ri->reclen != 0)
+ (void)dbp->set_re_len(dbp, ri->reclen);
+ } else
+ if (ri->bval != 0)
+ (void)dbp->set_re_delim(dbp, ri->bval);
+
+ /*
+ * !!!
+ * We ignore the R_NOKEY flag, but that's okay, it was
+ * only an optimization that was never implemented.
+ */
+ if (ri->flags & R_SNAPSHOT)
+ (void)dbp->set_flags(dbp, DB_SNAPSHOT);
+
+ if (ri->cachesize != 0)
+ (void)dbp->set_cachesize
+ (dbp, 0, ri->cachesize, 0);
+ if (ri->psize != 0)
+ (void)dbp->set_pagesize(dbp, ri->psize);
+ if (ri->lorder != 0)
+ dbp->set_lorder(dbp, ri->lorder);
+ }
+ break;
+ default:
+ goto einval;
+ }
+
+ db185p->close = db185_close;
+ db185p->del = db185_del;
+ db185p->fd = db185_fd;
+ db185p->get = db185_get;
+ db185p->put = db185_put;
+ db185p->seq = db185_seq;
+ db185p->sync = db185_sync;
+
+ /*
+ * Store a reference so we can indirect from the DB 1.85 structure
+ * to the underlying DB structure, and vice-versa. This has to be
+ * done BEFORE the DB::open method call because the hash callback
+ * is exercised as part of hash database initialiation.
+ *
+ * XXX
+ * Overload the cj_internal field for this purpose.
+ */
+ db185p->dbp = dbp;
+ dbp->cj_internal = db185p;
+
+ /* Open the database. */
+ if ((ret = dbp->open(dbp,
+ file, NULL, type, __db_oflags(oflags), mode)) != 0)
+ goto err;
+
+ /* Create the cursor used for sequential ops. */
+ if ((ret = dbp->cursor(dbp, NULL, &((DB185 *)db185p)->dbc, 0)) != 0)
+ goto err;
+
+ return (db185p);
+
+einval: ret = EINVAL;
+
+err: if (db185p != NULL)
+ __os_free(db185p, sizeof(DB185));
+ if (dbp != NULL)
+ (void)dbp->close(dbp, 0);
+
+ __os_set_errno(ret);
+ return (NULL);
+}
+
+static int
+db185_close(db185p)
+ DB185 *db185p;
+{
+ DB *dbp;
+ int ret;
+
+ dbp = db185p->dbp;
+
+ ret = dbp->close(dbp, 0);
+
+ __os_free(db185p, sizeof(DB185));
+
+ if (ret == 0)
+ return (0);
+
+ __os_set_errno(ret);
+ return (-1);
+}
+
+static int
+db185_del(db185p, key185, flags)
+ const DB185 *db185p;
+ const DBT185 *key185;
+ u_int flags;
+{
+ DB *dbp;
+ DBT key;
+ int ret;
+
+ dbp = db185p->dbp;
+
+ memset(&key, 0, sizeof(key));
+ key.data = key185->data;
+ key.size = key185->size;
+
+ if (flags & ~R_CURSOR)
+ goto einval;
+ if (flags & R_CURSOR)
+ ret = db185p->dbc->c_del(db185p->dbc, 0);
+ else
+ ret = dbp->del(dbp, NULL, &key, 0);
+
+ switch (ret) {
+ case 0:
+ return (0);
+ case DB_NOTFOUND:
+ return (1);
+ }
+
+ __os_set_errno(ret);
+ return (-1);
+
+einval: __os_set_errno(EINVAL);
+ return (-1);
+}
+
+static int
+db185_fd(db185p)
+ const DB185 *db185p;
+{
+ DB *dbp;
+ int fd, ret;
+
+ dbp = db185p->dbp;
+
+ if ((ret = dbp->fd(dbp, &fd)) == 0)
+ return (fd);
+
+ __os_set_errno(ret);
+ return (-1);
+}
+
+static int
+db185_get(db185p, key185, data185, flags)
+ const DB185 *db185p;
+ const DBT185 *key185;
+ DBT185 *data185;
+ u_int flags;
+{
+ DB *dbp;
+ DBT key, data;
+ int ret;
+
+ dbp = db185p->dbp;
+
+ memset(&key, 0, sizeof(key));
+ key.data = key185->data;
+ key.size = key185->size;
+ memset(&data, 0, sizeof(data));
+ data.data = data185->data;
+ data.size = data185->size;
+
+ if (flags)
+ goto einval;
+
+ switch (ret = dbp->get(dbp, NULL, &key, &data, 0)) {
+ case 0:
+ data185->data = data.data;
+ data185->size = data.size;
+ return (0);
+ case DB_NOTFOUND:
+ return (1);
+ }
+
+ __os_set_errno(ret);
+ return (-1);
+
+einval: __os_set_errno(EINVAL);
+ return (-1);
+}
+
+static int
+db185_put(db185p, key185, data185, flags)
+ const DB185 *db185p;
+ DBT185 *key185;
+ const DBT185 *data185;
+ u_int flags;
+{
+ DB *dbp;
+ DBC *dbcp_put;
+ DBT key, data;
+ int ret;
+
+ dbp = db185p->dbp;
+
+ memset(&key, 0, sizeof(key));
+ key.data = key185->data;
+ key.size = key185->size;
+ memset(&data, 0, sizeof(data));
+ data.data = data185->data;
+ data.size = data185->size;
+
+ switch (flags) {
+ case 0:
+ ret = dbp->put(dbp, NULL, &key, &data, 0);
+ break;
+ case R_CURSOR:
+ ret = db185p->dbc->c_put(db185p->dbc, &key, &data, DB_CURRENT);
+ break;
+ case R_IAFTER:
+ case R_IBEFORE:
+ if (dbp->type != DB_RECNO)
+ goto einval;
+
+ if ((ret = dbp->cursor(dbp, NULL, &dbcp_put, 0)) != 0) {
+ __os_set_errno(ret);
+ return (-1);
+ }
+ if ((ret =
+ dbcp_put->c_get(dbcp_put, &key, &data, DB_SET)) != 0) {
+ (void)dbcp_put->c_close(dbcp_put);
+ __os_set_errno(ret);
+ return (-1);
+ }
+ memset(&data, 0, sizeof(data));
+ data.data = data185->data;
+ data.size = data185->size;
+ ret = dbcp_put->c_put(dbcp_put,
+ &key, &data, flags == R_IAFTER ? DB_AFTER : DB_BEFORE);
+ (void)dbcp_put->c_close(dbcp_put);
+ __os_set_errno(ret);
+ break;
+ case R_NOOVERWRITE:
+ ret = dbp->put(dbp, NULL, &key, &data, DB_NOOVERWRITE);
+ break;
+ case R_SETCURSOR:
+ if (dbp->type != DB_BTREE && dbp->type != DB_RECNO)
+ goto einval;
+
+ if ((ret = dbp->put(dbp, NULL, &key, &data, 0)) != 0)
+ break;
+ ret =
+ db185p->dbc->c_get(db185p->dbc, &key, &data, DB_SET_RANGE);
+ break;
+ default:
+ goto einval;
+ }
+
+ switch (ret) {
+ case 0:
+ key185->data = key.data;
+ key185->size = key.size;
+ return (0);
+ case DB_KEYEXIST:
+ return (1);
+ }
+ __os_set_errno(ret);
+ return (-1);
+
+einval: __os_set_errno(EINVAL);
+ return (-1);
+}
+
+static int
+db185_seq(db185p, key185, data185, flags)
+ const DB185 *db185p;
+ DBT185 *key185, *data185;
+ u_int flags;
+{
+ DB *dbp;
+ DBT key, data;
+ int ret;
+
+ dbp = db185p->dbp;
+
+ memset(&key, 0, sizeof(key));
+ key.data = key185->data;
+ key.size = key185->size;
+ memset(&data, 0, sizeof(data));
+ data.data = data185->data;
+ data.size = data185->size;
+
+ switch (flags) {
+ case R_CURSOR:
+ flags = DB_SET_RANGE;
+ break;
+ case R_FIRST:
+ flags = DB_FIRST;
+ break;
+ case R_LAST:
+ if (dbp->type != DB_BTREE && dbp->type != DB_RECNO)
+ goto einval;
+ flags = DB_LAST;
+ break;
+ case R_NEXT:
+ flags = DB_NEXT;
+ break;
+ case R_PREV:
+ if (dbp->type != DB_BTREE && dbp->type != DB_RECNO)
+ goto einval;
+ flags = DB_PREV;
+ break;
+ default:
+ goto einval;
+ }
+ switch (ret = db185p->dbc->c_get(db185p->dbc, &key, &data, flags)) {
+ case 0:
+ key185->data = key.data;
+ key185->size = key.size;
+ data185->data = data.data;
+ data185->size = data.size;
+ return (0);
+ case DB_NOTFOUND:
+ return (1);
+ }
+
+ __os_set_errno(ret);
+ return (-1);
+
+einval: __os_set_errno(EINVAL);
+ return (-1);
+}
+
+static int
+db185_sync(db185p, flags)
+ const DB185 *db185p;
+ u_int flags;
+{
+ DB *dbp;
+ DB_FH fh;
+ size_t nw;
+ int ret;
+
+ dbp = db185p->dbp;
+
+ switch (flags) {
+ case 0:
+ break;
+ case R_RECNOSYNC:
+ /*
+ * !!!
+ * We can't support the R_RECNOSYNC flag.
+ */
+#define RSMSG "DB: DB 1.85's R_RECNOSYNC sync flag is not supported.\n"
+ db185_openstderr(&fh);
+ (void)__os_write(NULL, &fh, RSMSG, sizeof(RSMSG) - 1, &nw);
+ goto einval;
+ default:
+ goto einval;
+ }
+
+ if ((ret = dbp->sync(dbp, 0)) == 0)
+ return (0);
+
+ __os_set_errno(ret);
+ return (-1);
+
+einval: __os_set_errno(EINVAL);
+ return (-1);
+}
+
+static void
+db185_openstderr(fhp)
+ DB_FH *fhp;
+{
+ /* Dummy up the results of an __os_openhandle() on stderr. */
+ memset(fhp, 0, sizeof(*fhp));
+ F_SET(fhp, DB_FH_VALID);
+
+#ifndef STDERR_FILENO
+#define STDERR_FILENO 2
+#endif
+ fhp->fd = STDERR_FILENO;
+}
+
+/*
+ * db185_compare --
+ * Cutout routine to call the user's Btree comparison function.
+ */
+static int
+db185_compare(dbp, a, b)
+ DB *dbp;
+ const DBT *a, *b;
+{
+ return (((DB185 *)dbp->cj_internal)->compare(a, b));
+}
+
+/*
+ * db185_prefix --
+ * Cutout routine to call the user's Btree prefix function.
+ */
+static size_t
+db185_prefix(dbp, a, b)
+ DB *dbp;
+ const DBT *a, *b;
+{
+ return (((DB185 *)dbp->cj_internal)->prefix(a, b));
+}
+
+/*
+ * db185_hash --
+ * Cutout routine to call the user's hash function.
+ */
+static u_int32_t
+db185_hash(dbp, key, len)
+ DB *dbp;
+ const void *key;
+ u_int32_t len;
+{
+ return (((DB185 *)dbp->cj_internal)->hash(key, (size_t)len));
+}
diff --git a/bdb/db185/db185_int.h b/bdb/db185/db185_int.h
new file mode 100644
index 00000000000..172019d3f00
--- /dev/null
+++ b/bdb/db185/db185_int.h
@@ -0,0 +1,129 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995, 1996
+ * Keith Bostic. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id: db185_int.h,v 11.7 2001/01/22 22:22:46 krinsky Exp $
+ */
+
+#ifndef _DB185_H_
+#define _DB185_H_
+
+/* Routine flags. */
+#define R_CURSOR 1 /* del, put, seq */
+#define __R_UNUSED 2 /* UNUSED */
+#define R_FIRST 3 /* seq */
+#define R_IAFTER 4 /* put (RECNO) */
+#define R_IBEFORE 5 /* put (RECNO) */
+#define R_LAST 6 /* seq (BTREE, RECNO) */
+#define R_NEXT 7 /* seq */
+#define R_NOOVERWRITE 8 /* put */
+#define R_PREV 9 /* seq (BTREE, RECNO) */
+#define R_SETCURSOR 10 /* put (RECNO) */
+#define R_RECNOSYNC 11 /* sync (RECNO) */
+
+typedef struct {
+ void *data; /* data */
+ size_t size; /* data length */
+} DBT185;
+
+/* Access method description structure. */
+typedef struct __db185 {
+ DBTYPE type; /* Underlying db type. */
+ int (*close) __P((struct __db185 *));
+ int (*del) __P((const struct __db185 *, const DBT185 *, u_int));
+ int (*get)
+ __P((const struct __db185 *, const DBT185 *, DBT185 *, u_int));
+ int (*put)
+ __P((const struct __db185 *, DBT185 *, const DBT185 *, u_int));
+ int (*seq)
+ __P((const struct __db185 *, DBT185 *, DBT185 *, u_int));
+ int (*sync) __P((const struct __db185 *, u_int));
+ DB *dbp; /* DB structure. Was void *internal. */
+ int (*fd) __P((const struct __db185 *));
+
+ /*
+ * !!!
+ * The following elements added to the end of the DB 1.85 DB
+ * structure.
+ */
+ DBC *dbc; /* DB cursor. */
+ /* Various callback functions. */
+ int (*compare) __P((const DBT *, const DBT *));
+ size_t (*prefix) __P((const DBT *, const DBT *));
+ u_int32_t (*hash) __P((const void *, size_t));
+} DB185;
+
+/* Structure used to pass parameters to the btree routines. */
+typedef struct {
+#define R_DUP 0x01 /* duplicate keys */
+ u_int32_t flags;
+ u_int32_t cachesize; /* bytes to cache */
+ u_int32_t maxkeypage; /* maximum keys per page */
+ u_int32_t minkeypage; /* minimum keys per page */
+ u_int32_t psize; /* page size */
+ int (*compare) /* comparison function */
+ __P((const DBT *, const DBT *));
+ size_t (*prefix) /* prefix function */
+ __P((const DBT *, const DBT *));
+ int lorder; /* byte order */
+} BTREEINFO;
+
+/* Structure used to pass parameters to the hashing routines. */
+typedef struct {
+ u_int32_t bsize; /* bucket size */
+ u_int32_t ffactor; /* fill factor */
+ u_int32_t nelem; /* number of elements */
+ u_int32_t cachesize; /* bytes to cache */
+ u_int32_t /* hash function */
+ (*hash) __P((const void *, size_t));
+ int lorder; /* byte order */
+} HASHINFO;
+
+/* Structure used to pass parameters to the record routines. */
+typedef struct {
+#define R_FIXEDLEN 0x01 /* fixed-length records */
+#define R_NOKEY 0x02 /* key not required */
+#define R_SNAPSHOT 0x04 /* snapshot the input */
+ u_int32_t flags;
+ u_int32_t cachesize; /* bytes to cache */
+ u_int32_t psize; /* page size */
+ int lorder; /* byte order */
+ size_t reclen; /* record length (fixed-length records) */
+ u_char bval; /* delimiting byte (variable-length records */
+ char *bfname; /* btree file name */
+} RECNOINFO;
+#endif /* !_DB185_H_ */
diff --git a/bdb/db_archive/db_archive.c b/bdb/db_archive/db_archive.c
new file mode 100644
index 00000000000..7c91e42f390
--- /dev/null
+++ b/bdb/db_archive/db_archive.c
@@ -0,0 +1,164 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2000\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id: db_archive.c,v 11.18 2001/01/18 18:36:56 bostic Exp $";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "common_ext.h"
+
+int main __P((int, char *[]));
+void usage __P((void));
+void version_check __P((void));
+
+DB_ENV *dbenv;
+const char
+ *progname = "db_archive"; /* Program name. */
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ u_int32_t flags;
+ int ch, e_close, exitval, ret, verbose;
+ char **file, *home, **list;
+
+ version_check();
+
+ flags = 0;
+ e_close = exitval = verbose = 0;
+ home = NULL;
+ while ((ch = getopt(argc, argv, "ah:lsVv")) != EOF)
+ switch (ch) {
+ case 'a':
+ LF_SET(DB_ARCH_ABS);
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 'l':
+ LF_SET(DB_ARCH_LOG);
+ break;
+ case 's':
+ LF_SET(DB_ARCH_DATA);
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ exit(0);
+ case 'v':
+ verbose = 1;
+ break;
+ case '?':
+ default:
+ usage();
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 0)
+ usage();
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+
+ if (verbose)
+ (void)dbenv->set_verbose(dbenv, DB_VERB_CHKPOINT, 1);
+
+ /*
+ * If attaching to a pre-existing environment fails, create a
+ * private one and try again.
+ */
+ if ((ret = dbenv->open(dbenv,
+ home, DB_JOINENV | DB_USE_ENVIRON, 0)) != 0 &&
+ (ret = dbenv->open(dbenv, home, DB_CREATE |
+ DB_INIT_LOG | DB_INIT_TXN | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+
+ /* Get the list of names. */
+ if ((ret = log_archive(dbenv, &list, flags, NULL)) != 0) {
+ dbenv->err(dbenv, ret, "log_archive");
+ goto shutdown;
+ }
+
+ /* Print the list of names. */
+ if (list != NULL) {
+ for (file = list; *file != NULL; ++file)
+ printf("%s\n", *file);
+ __os_free(list, 0);
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval);
+}
+
+void
+usage()
+{
+ (void)fprintf(stderr, "usage: db_archive [-alsVv] [-h home]\n");
+ exit (1);
+}
+
+void
+version_check()
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ exit (1);
+ }
+}
diff --git a/bdb/db_checkpoint/db_checkpoint.c b/bdb/db_checkpoint/db_checkpoint.c
new file mode 100644
index 00000000000..c7d16e02334
--- /dev/null
+++ b/bdb/db_checkpoint/db_checkpoint.c
@@ -0,0 +1,237 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2000\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id: db_checkpoint.c,v 11.25 2001/01/18 18:36:57 bostic Exp $";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "btree.h"
+#include "hash.h"
+#include "qam.h"
+#include "common_ext.h"
+#include "clib_ext.h"
+
+char *check __P((DB_ENV *, long, long));
+int main __P((int, char *[]));
+void usage __P((void));
+void version_check __P((void));
+
+DB_ENV *dbenv;
+const char
+ *progname = "db_checkpoint"; /* Program name. */
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ time_t now;
+ long argval;
+ u_int32_t flags, kbytes, minutes, seconds;
+ int ch, e_close, exitval, once, ret, verbose;
+ char *home, *logfile;
+
+ version_check();
+
+ /*
+ * !!!
+ * Don't allow a fully unsigned 32-bit number, some compilers get
+ * upset and require it to be specified in hexadecimal and so on.
+ */
+#define MAX_UINT32_T 2147483647
+
+ kbytes = minutes = 0;
+ e_close = exitval = once = verbose = 0;
+ flags = 0;
+ home = logfile = NULL;
+ while ((ch = getopt(argc, argv, "1h:k:L:p:Vv")) != EOF)
+ switch (ch) {
+ case '1':
+ once = 1;
+ flags = DB_FORCE;
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 'k':
+ (void)__db_getlong(NULL, progname,
+ optarg, 1, (long)MAX_UINT32_T, &argval);
+ kbytes = argval;
+ break;
+ case 'L':
+ logfile = optarg;
+ break;
+ case 'p':
+ (void)__db_getlong(NULL, progname,
+ optarg, 1, (long)MAX_UINT32_T, &argval);
+ minutes = argval;
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ exit(0);
+ case 'v':
+ verbose = 1;
+ break;
+ case '?':
+ default:
+ usage();
+ goto shutdown;
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 0)
+ usage();
+
+ if (once == 0 && kbytes == 0 && minutes == 0) {
+ (void)fprintf(stderr,
+ "%s: at least one of -1, -k and -p must be specified\n",
+ progname);
+ exit (1);
+ }
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /* Log our process ID. */
+ if (logfile != NULL && __db_util_logset(progname, logfile))
+ goto shutdown;
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+
+ /* Initialize the environment. */
+ if ((ret = dbenv->open(dbenv,
+ home, DB_JOINENV | DB_USE_ENVIRON, 0)) != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+
+ /* Register the standard pgin/pgout functions, in case we do I/O. */
+ if ((ret =
+ memp_register(dbenv, DB_FTYPE_SET, __db_pgin, __db_pgout)) != 0) {
+ dbenv->err(dbenv, ret,
+ "failed to register access method functions");
+ goto shutdown;
+ }
+
+ /*
+ * If we have only a time delay, then we'll sleep the right amount
+ * to wake up when a checkpoint is necessary. If we have a "kbytes"
+ * field set, then we'll check every 30 seconds.
+ */
+ seconds = kbytes != 0 ? 30 : minutes * 60;
+ while (!__db_util_interrupted()) {
+ if (verbose) {
+ (void)time(&now);
+ dbenv->errx(dbenv, "checkpoint: %s", ctime(&now));
+ }
+
+ ret = txn_checkpoint(dbenv, kbytes, minutes, flags);
+ while (ret == DB_INCOMPLETE) {
+ if (verbose)
+ dbenv->errx(dbenv,
+ "checkpoint did not finish, retrying\n");
+ (void)__os_sleep(dbenv, 2, 0);
+ ret = txn_checkpoint(dbenv, 0, 0, flags);
+ }
+ if (ret != 0) {
+ dbenv->err(dbenv, ret, "txn_checkpoint");
+ goto shutdown;
+ }
+
+ if (once)
+ break;
+
+ (void)__os_sleep(dbenv, seconds, 0);
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+
+ /* Clean up the logfile. */
+ if (logfile != NULL)
+ remove(logfile);
+
+ /* Clean up the environment. */
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval);
+}
+
+void
+usage()
+{
+ (void)fprintf(stderr,
+ "usage: db_checkpoint [-1Vv] [-h home] [-k kbytes] [-L file] [-p min]\n");
+ exit(1);
+}
+
+void
+version_check()
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ exit (1);
+ }
+}
diff --git a/bdb/db_deadlock/db_deadlock.c b/bdb/db_deadlock/db_deadlock.c
new file mode 100644
index 00000000000..ac151db127a
--- /dev/null
+++ b/bdb/db_deadlock/db_deadlock.c
@@ -0,0 +1,222 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2000\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id: db_deadlock.c,v 11.19 2001/01/18 18:36:57 bostic Exp $";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "clib_ext.h"
+
+int main __P((int, char *[]));
+void usage __P((void));
+void version_check __P((void));
+
+DB_ENV *dbenv;
+const char
+ *progname = "db_deadlock"; /* Program name. */
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ u_int32_t atype;
+ time_t now;
+ long usecs;
+ u_int32_t flags;
+ int ch, e_close, exitval, ret, verbose;
+ char *home, *logfile;
+
+ version_check();
+
+ atype = DB_LOCK_DEFAULT;
+ home = logfile = NULL;
+ usecs = 0;
+ flags = 0;
+ e_close = exitval = verbose = 0;
+ while ((ch = getopt(argc, argv, "a:h:L:t:Vvw")) != EOF)
+ switch (ch) {
+ case 'a':
+ switch (optarg[0]) {
+ case 'o':
+ atype = DB_LOCK_OLDEST;
+ break;
+ case 'y':
+ atype = DB_LOCK_YOUNGEST;
+ break;
+ default:
+ usage();
+ /* NOTREACHED */
+ }
+ if (optarg[1] != '\0')
+ usage();
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 'L':
+ logfile = optarg;
+ break;
+ case 't':
+ (void)__db_getlong(NULL,
+ progname, optarg, 1, LONG_MAX, &usecs);
+ usecs *= 1000000;
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ exit(0);
+ case 'v':
+ verbose = 1;
+ break;
+ case 'w':
+ LF_SET(DB_LOCK_CONFLICT);
+ break;
+ case '?':
+ default:
+ usage();
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 0)
+ usage();
+
+ if (usecs == 0 && !LF_ISSET(DB_LOCK_CONFLICT)) {
+ fprintf(stderr,
+ "%s: at least one of -t and -w must be specified\n",
+ progname);
+ exit(1);
+ }
+
+ /*
+ * We detect every 100ms (100000 us) when we're running in
+ * DB_LOCK_CONFLICT mode.
+ */
+ if (usecs == 0)
+ usecs = 100000;
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /* Log our process ID. */
+ if (logfile != NULL && __db_util_logset(progname, logfile))
+ goto shutdown;
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+
+ if (verbose) {
+ (void)dbenv->set_verbose(dbenv, DB_VERB_DEADLOCK, 1);
+ (void)dbenv->set_verbose(dbenv, DB_VERB_WAITSFOR, 1);
+ }
+
+ /* An environment is required. */
+ if ((ret = dbenv->open(dbenv, home,
+ DB_JOINENV | DB_USE_ENVIRON, 0)) != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+
+ while (!__db_util_interrupted()) {
+ if (verbose) {
+ (void)time(&now);
+ dbenv->errx(dbenv, "running at %.24s", ctime(&now));
+ }
+
+ if ((ret = lock_detect(dbenv, flags, atype, NULL)) != 0) {
+ dbenv->err(dbenv, ret, "lock_detect");
+ goto shutdown;
+ }
+
+ /* Make a pass every "usecs" usecs. */
+ (void)__os_sleep(dbenv, 0, usecs);
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+
+ /* Clean up the logfile. */
+ if (logfile != NULL)
+ remove(logfile);
+
+ /* Clean up the environment. */
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval);
+}
+
+void
+usage()
+{
+ (void)fprintf(stderr,
+ "usage: db_deadlock [-Vvw] [-a o | y] [-h home] [-L file] [-t sec]\n");
+ exit(1);
+}
+
+void
+version_check()
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ exit (1);
+ }
+}
diff --git a/bdb/db_dump/db_dump.c b/bdb/db_dump/db_dump.c
new file mode 100644
index 00000000000..ba24dd6cc09
--- /dev/null
+++ b/bdb/db_dump/db_dump.c
@@ -0,0 +1,517 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2000\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id: db_dump.c,v 11.41 2001/01/18 18:36:57 bostic Exp $";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_shash.h"
+#include "btree.h"
+#include "hash.h"
+#include "lock.h"
+
+void configure __P((char *));
+int db_init __P((char *));
+int dump __P((DB *, int, int));
+int dump_sub __P((DB *, char *, int, int));
+int is_sub __P((DB *, int *));
+int main __P((int, char *[]));
+int show_subs __P((DB *));
+void usage __P((void));
+void version_check __P((void));
+
+DB_ENV *dbenv;
+const char
+ *progname = "db_dump"; /* Program name. */
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ DB *dbp;
+ int ch, d_close;
+ int e_close, exitval;
+ int lflag, nflag, pflag, ret, rflag, Rflag, subs, keyflag;
+ char *dopt, *home, *subname;
+
+ version_check();
+
+ dbp = NULL;
+ d_close = e_close = exitval = lflag = nflag = pflag = rflag = Rflag = 0;
+ keyflag = 0;
+ dopt = home = subname = NULL;
+ while ((ch = getopt(argc, argv, "d:f:h:klNprRs:V")) != EOF)
+ switch (ch) {
+ case 'd':
+ dopt = optarg;
+ break;
+ case 'f':
+ if (freopen(optarg, "w", stdout) == NULL) {
+ fprintf(stderr, "%s: %s: reopen: %s\n",
+ progname, optarg, strerror(errno));
+ exit (1);
+ }
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 'k':
+ keyflag = 1;
+ break;
+ case 'l':
+ lflag = 1;
+ break;
+ case 'N':
+ nflag = 1;
+ if ((ret = db_env_set_panicstate(0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_set_panicstate: %s\n",
+ progname, db_strerror(ret));
+ return (1);
+ }
+ break;
+ case 'p':
+ pflag = 1;
+ break;
+ case 's':
+ subname = optarg;
+ break;
+ case 'R':
+ Rflag = 1;
+ /* DB_AGGRESSIVE requires DB_SALVAGE */
+ /* FALLTHROUGH */
+ case 'r':
+ rflag = 1;
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ exit(0);
+ case '?':
+ default:
+ usage();
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 1)
+ usage();
+
+ if (dopt != NULL && pflag) {
+ fprintf(stderr,
+ "%s: the -d and -p options may not both be specified\n",
+ progname);
+ exit (1);
+ }
+ if (lflag && subname != NULL) {
+ fprintf(stderr,
+ "%s: the -l and -s options may not both be specified\n",
+ progname);
+ exit (1);
+ }
+
+ if (keyflag && rflag) {
+ fprintf(stderr, "%s: %s",
+ "the -k and -r or -R options may not both be specified\n",
+ progname);
+ exit(1);
+ }
+
+ if (subname != NULL && rflag) {
+ fprintf(stderr, "%s: %s",
+ "the -s and -r or R options may not both be specified\n",
+ progname);
+ exit(1);
+ }
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto err;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+
+ if (nflag && (ret = dbenv->set_mutexlocks(dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "set_mutexlocks");
+ goto err;
+ }
+
+ /* Initialize the environment. */
+ if (db_init(home) != 0)
+ goto err;
+
+ /* Create the DB object and open the file. */
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ goto err;
+ }
+ d_close = 1;
+
+ /*
+ * If we're salvaging, don't do an open; it might not be safe.
+ * Dispatch now into the salvager.
+ */
+ if (rflag) {
+ if ((ret = dbp->verify(dbp, argv[0], NULL, stdout,
+ DB_SALVAGE | (Rflag ? DB_AGGRESSIVE : 0))) != 0)
+ goto err;
+ exitval = 0;
+ goto done;
+ }
+
+ if ((ret = dbp->open(dbp,
+ argv[0], subname, DB_UNKNOWN, DB_RDONLY, 0)) != 0) {
+ dbp->err(dbp, ret, "open: %s", argv[0]);
+ goto err;
+ }
+
+ if (dopt != NULL) {
+ if (__db_dump(dbp, dopt, NULL)) {
+ dbp->err(dbp, ret, "__db_dump: %s", argv[0]);
+ goto err;
+ }
+ } else if (lflag) {
+ if (is_sub(dbp, &subs))
+ goto err;
+ if (subs == 0) {
+ dbp->errx(dbp,
+ "%s: does not contain multiple databases", argv[0]);
+ goto err;
+ }
+ if (show_subs(dbp))
+ goto err;
+ } else {
+ subs = 0;
+ if (subname == NULL && is_sub(dbp, &subs))
+ goto err;
+ if (subs) {
+ if (dump_sub(dbp, argv[0], pflag, keyflag))
+ goto err;
+ } else
+ if (__db_prheader(dbp, NULL, pflag, keyflag, stdout,
+ __db_verify_callback, NULL, 0) ||
+ dump(dbp, pflag, keyflag))
+ goto err;
+ }
+
+ if (0) {
+err: exitval = 1;
+ }
+done: if (d_close && (ret = dbp->close(dbp, 0)) != 0) {
+ exitval = 1;
+ dbp->err(dbp, ret, "close");
+ }
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval);
+}
+
+/*
+ * db_init --
+ * Initialize the environment.
+ */
+int
+db_init(home)
+ char *home;
+{
+ int ret;
+
+ /*
+ * Try and use the underlying environment when opening a database. We
+ * wish to use the buffer pool so our information is as up-to-date as
+ * possible, even if the mpool cache hasn't been flushed; we wish to
+ * use the locking system, if present, so that we are safe to use with
+ * transactions. (We don't need to use transactions explicitly, as
+ * we're read-only.)
+ *
+ * Note that in CDB, too, this will configure our environment
+ * appropriately, and our cursors will (correctly) do locking as CDB
+ * read cursors.
+ */
+ if (dbenv->open(dbenv, home, DB_JOINENV | DB_USE_ENVIRON, 0) == 0)
+ return (0);
+
+ /*
+ * An environment is required because we may be trying to look at
+ * databases in directories other than the current one. We could
+ * avoid using an environment iff the -h option wasn't specified,
+ * but that seems like more work than it's worth.
+ *
+ * No environment exists (or, at least no environment that includes
+ * an mpool region exists). Create one, but make it private so that
+ * no files are actually created.
+ */
+ if ((ret = dbenv->open(dbenv, home,
+ DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, 0)) == 0)
+ return (0);
+
+ /* An environment is required. */
+ dbenv->err(dbenv, ret, "open");
+ return (1);
+}
+
+/*
+ * is_sub --
+ * Return if the database contains subdatabases.
+ */
+int
+is_sub(dbp, yesno)
+ DB *dbp;
+ int *yesno;
+{
+ DB_BTREE_STAT *btsp;
+ DB_HASH_STAT *hsp;
+ int ret;
+
+ switch (dbp->type) {
+ case DB_BTREE:
+ case DB_RECNO:
+ if ((ret = dbp->stat(dbp, &btsp, NULL, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ return (ret);
+ }
+ *yesno = btsp->bt_metaflags & BTM_SUBDB ? 1 : 0;
+ break;
+ case DB_HASH:
+ if ((ret = dbp->stat(dbp, &hsp, NULL, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ return (ret);
+ }
+ *yesno = hsp->hash_metaflags & DB_HASH_SUBDB ? 1 : 0;
+ break;
+ case DB_QUEUE:
+ break;
+ default:
+ dbp->errx(dbp, "unknown database type");
+ return (1);
+ }
+ return (0);
+}
+
+/*
+ * dump_sub --
+ * Dump out the records for a DB containing subdatabases.
+ */
+int
+dump_sub(parent_dbp, parent_name, pflag, keyflag)
+ DB *parent_dbp;
+ char *parent_name;
+ int pflag, keyflag;
+{
+ DB *dbp;
+ DBC *dbcp;
+ DBT key, data;
+ int ret;
+ char *subdb;
+
+ /*
+ * Get a cursor and step through the database, dumping out each
+ * subdatabase.
+ */
+ if ((ret = parent_dbp->cursor(parent_dbp, NULL, &dbcp, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->cursor");
+ return (1);
+ }
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ while ((ret = dbcp->c_get(dbcp, &key, &data, DB_NEXT)) == 0) {
+ /* Nul terminate the subdatabase name. */
+ if ((subdb = malloc(key.size + 1)) == NULL) {
+ dbenv->err(dbenv, ENOMEM, NULL);
+ return (1);
+ }
+ memcpy(subdb, key.data, key.size);
+ subdb[key.size] = '\0';
+
+ /* Create the DB object and open the file. */
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ free(subdb);
+ return (1);
+ }
+ if ((ret = dbp->open(dbp,
+ parent_name, subdb, DB_UNKNOWN, DB_RDONLY, 0)) != 0)
+ dbp->err(dbp, ret,
+ "DB->open: %s:%s", parent_name, subdb);
+ if (ret == 0 &&
+ (__db_prheader(dbp, subdb, pflag, keyflag, stdout,
+ __db_verify_callback, NULL, 0) ||
+ dump(dbp, pflag, keyflag)))
+ ret = 1;
+ (void)dbp->close(dbp, 0);
+ free(subdb);
+ if (ret != 0)
+ return (1);
+ }
+ if (ret != DB_NOTFOUND) {
+ dbp->err(dbp, ret, "DBcursor->get");
+ return (1);
+ }
+
+ if ((ret = dbcp->c_close(dbcp)) != 0) {
+ dbp->err(dbp, ret, "DBcursor->close");
+ return (1);
+ }
+
+ return (0);
+}
+
+/*
+ * show_subs --
+ * Display the subdatabases for a database.
+ */
+int
+show_subs(dbp)
+ DB *dbp;
+{
+ DBC *dbcp;
+ DBT key, data;
+ int ret;
+
+ /*
+ * Get a cursor and step through the database, printing out the key
+ * of each key/data pair.
+ */
+ if ((ret = dbp->cursor(dbp, NULL, &dbcp, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->cursor");
+ return (1);
+ }
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ while ((ret = dbcp->c_get(dbcp, &key, &data, DB_NEXT)) == 0) {
+ if ((ret = __db_prdbt(&key, 1, NULL, stdout,
+ __db_verify_callback, 0, NULL)) != 0) {
+ dbp->errx(dbp, NULL);
+ return (1);
+ }
+ }
+ if (ret != DB_NOTFOUND) {
+ dbp->err(dbp, ret, "DBcursor->get");
+ return (1);
+ }
+
+ if ((ret = dbcp->c_close(dbcp)) != 0) {
+ dbp->err(dbp, ret, "DBcursor->close");
+ return (1);
+ }
+ return (0);
+}
+
+/*
+ * dump --
+ * Dump out the records for a DB.
+ */
+int
+dump(dbp, pflag, keyflag)
+ DB *dbp;
+ int pflag, keyflag;
+{
+ DBC *dbcp;
+ DBT key, data;
+ int ret, is_recno;
+
+ /*
+ * Get a cursor and step through the database, printing out each
+ * key/data pair.
+ */
+ if ((ret = dbp->cursor(dbp, NULL, &dbcp, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->cursor");
+ return (1);
+ }
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ is_recno = (dbp->type == DB_RECNO || dbp->type == DB_QUEUE);
+ keyflag = is_recno ? keyflag : 1;
+ while ((ret = dbcp->c_get(dbcp, &key, &data, DB_NEXT)) == 0)
+ if ((keyflag && (ret = __db_prdbt(&key,
+ pflag, " ", stdout, __db_verify_callback,
+ is_recno, NULL)) != 0) || (ret =
+ __db_prdbt(&data, pflag, " ", stdout,
+ __db_verify_callback, 0, NULL)) != 0) {
+ dbp->errx(dbp, NULL);
+ return (1);
+ }
+ if (ret != DB_NOTFOUND) {
+ dbp->err(dbp, ret, "DBcursor->get");
+ return (1);
+ }
+
+ if ((ret = dbcp->c_close(dbcp)) != 0) {
+ dbp->err(dbp, ret, "DBcursor->close");
+ return (1);
+ }
+
+ (void)__db_prfooter(stdout, __db_verify_callback);
+ return (0);
+}
+
+/*
+ * usage --
+ * Display the usage message.
+ */
+void
+usage()
+{
+ (void)fprintf(stderr, "usage: %s\n",
+"db_dump [-klNprRV] [-d ahr] [-f output] [-h home] [-s database] db_file");
+ exit(1);
+}
+
+void
+version_check()
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ exit (1);
+ }
+}
diff --git a/bdb/db_dump185/db_dump185.c b/bdb/db_dump185/db_dump185.c
new file mode 100644
index 00000000000..4b57fffd2dc
--- /dev/null
+++ b/bdb/db_dump185/db_dump185.c
@@ -0,0 +1,353 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#ifndef lint
+static char copyright[] =
+ "Copyright (c) 1996-2000\nSleepycat Software Inc. All rights reserved.\n";
+static char revid[] =
+ "$Id: db_dump185.c,v 11.8 2001/01/10 17:26:21 bostic Exp $";
+#endif
+
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <db.h>
+
+/* Hash Table Information */
+typedef struct hashhdr185 { /* Disk resident portion */
+ int magic; /* Magic NO for hash tables */
+ int version; /* Version ID */
+ u_int32_t lorder; /* Byte Order */
+ int bsize; /* Bucket/Page Size */
+ int bshift; /* Bucket shift */
+ int dsize; /* Directory Size */
+ int ssize; /* Segment Size */
+ int sshift; /* Segment shift */
+ int ovfl_point; /* Where overflow pages are being
+ * allocated */
+ int last_freed; /* Last overflow page freed */
+ int max_bucket; /* ID of Maximum bucket in use */
+ int high_mask; /* Mask to modulo into entire table */
+ int low_mask; /* Mask to modulo into lower half of
+ * table */
+ int ffactor; /* Fill factor */
+ int nkeys; /* Number of keys in hash table */
+} HASHHDR185;
+typedef struct htab185 { /* Memory resident data structure */
+ HASHHDR185 hdr; /* Header */
+} HTAB185;
+
+/* Hash Table Information */
+typedef struct hashhdr186 { /* Disk resident portion */
+ int32_t magic; /* Magic NO for hash tables */
+ int32_t version; /* Version ID */
+ int32_t lorder; /* Byte Order */
+ int32_t bsize; /* Bucket/Page Size */
+ int32_t bshift; /* Bucket shift */
+ int32_t ovfl_point; /* Where overflow pages are being allocated */
+ int32_t last_freed; /* Last overflow page freed */
+ int32_t max_bucket; /* ID of Maximum bucket in use */
+ int32_t high_mask; /* Mask to modulo into entire table */
+ int32_t low_mask; /* Mask to modulo into lower half of table */
+ int32_t ffactor; /* Fill factor */
+ int32_t nkeys; /* Number of keys in hash table */
+ int32_t hdrpages; /* Size of table header */
+ int32_t h_charkey; /* value of hash(CHARKEY) */
+#define NCACHED 32 /* number of bit maps and spare points */
+ int32_t spares[NCACHED];/* spare pages for overflow */
+ u_int16_t bitmaps[NCACHED]; /* address of overflow page bitmaps */
+} HASHHDR186;
+typedef struct htab186 { /* Memory resident data structure */
+ HASHHDR186 hdr; /* Header */
+} HTAB186;
+
+typedef struct _epgno {
+ u_int32_t pgno; /* the page number */
+ u_int16_t index; /* the index on the page */
+} EPGNO;
+
+typedef struct _epg {
+ void *page; /* the (pinned) page */
+ u_int16_t index; /* the index on the page */
+} EPG;
+
+typedef struct _cursor {
+ EPGNO pg; /* B: Saved tree reference. */
+ DBT key; /* B: Saved key, or key.data == NULL. */
+ u_int32_t rcursor; /* R: recno cursor (1-based) */
+
+#define CURS_ACQUIRE 0x01 /* B: Cursor needs to be reacquired. */
+#define CURS_AFTER 0x02 /* B: Unreturned cursor after key. */
+#define CURS_BEFORE 0x04 /* B: Unreturned cursor before key. */
+#define CURS_INIT 0x08 /* RB: Cursor initialized. */
+ u_int8_t flags;
+} CURSOR;
+
+/* The in-memory btree/recno data structure. */
+typedef struct _btree {
+ void *bt_mp; /* memory pool cookie */
+
+ void *bt_dbp; /* pointer to enclosing DB */
+
+ EPG bt_cur; /* current (pinned) page */
+ void *bt_pinned; /* page pinned across calls */
+
+ CURSOR bt_cursor; /* cursor */
+
+ EPGNO bt_stack[50]; /* stack of parent pages */
+ EPGNO *bt_sp; /* current stack pointer */
+
+ DBT bt_rkey; /* returned key */
+ DBT bt_rdata; /* returned data */
+
+ int bt_fd; /* tree file descriptor */
+
+ u_int32_t bt_free; /* next free page */
+ u_int32_t bt_psize; /* page size */
+ u_int16_t bt_ovflsize; /* cut-off for key/data overflow */
+ int bt_lorder; /* byte order */
+ /* sorted order */
+ enum { NOT, BACK, FORWARD } bt_order;
+ EPGNO bt_last; /* last insert */
+
+ /* B: key comparison function */
+ int (*bt_cmp) __P((DBT *, DBT *));
+ /* B: prefix comparison function */
+ size_t (*bt_pfx) __P((DBT *, DBT *));
+ /* R: recno input function */
+ int (*bt_irec) __P((struct _btree *, u_int32_t));
+
+ FILE *bt_rfp; /* R: record FILE pointer */
+ int bt_rfd; /* R: record file descriptor */
+
+ void *bt_cmap; /* R: current point in mapped space */
+ void *bt_smap; /* R: start of mapped space */
+ void *bt_emap; /* R: end of mapped space */
+ size_t bt_msize; /* R: size of mapped region. */
+
+ u_int32_t bt_nrecs; /* R: number of records */
+ size_t bt_reclen; /* R: fixed record length */
+ u_char bt_bval; /* R: delimiting byte/pad character */
+
+/*
+ * NB:
+ * B_NODUPS and R_RECNO are stored on disk, and may not be changed.
+ */
+#define B_INMEM 0x00001 /* in-memory tree */
+#define B_METADIRTY 0x00002 /* need to write metadata */
+#define B_MODIFIED 0x00004 /* tree modified */
+#define B_NEEDSWAP 0x00008 /* if byte order requires swapping */
+#define B_RDONLY 0x00010 /* read-only tree */
+
+#define B_NODUPS 0x00020 /* no duplicate keys permitted */
+#define R_RECNO 0x00080 /* record oriented tree */
+
+#define R_CLOSEFP 0x00040 /* opened a file pointer */
+#define R_EOF 0x00100 /* end of input file reached. */
+#define R_FIXLEN 0x00200 /* fixed length records */
+#define R_MEMMAPPED 0x00400 /* memory mapped file. */
+#define R_INMEM 0x00800 /* in-memory file */
+#define R_MODIFIED 0x01000 /* modified file */
+#define R_RDONLY 0x02000 /* read-only file */
+
+#define B_DB_LOCK 0x04000 /* DB_LOCK specified. */
+#define B_DB_SHMEM 0x08000 /* DB_SHMEM specified. */
+#define B_DB_TXN 0x10000 /* DB_TXN specified. */
+ u_int32_t flags;
+} BTREE;
+
+void db_btree __P((DB *, int));
+void db_hash __P((DB *, int));
+void dbt_dump __P((DBT *));
+void dbt_print __P((DBT *));
+int main __P((int, char *[]));
+void usage __P((void));
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ DB *dbp;
+ DBT key, data;
+ int ch, pflag, rval;
+
+ pflag = 0;
+ while ((ch = getopt(argc, argv, "f:p")) != EOF)
+ switch (ch) {
+ case 'f':
+ if (freopen(optarg, "w", stdout) == NULL) {
+ fprintf(stderr, "db_dump185: %s: %s\n",
+ optarg, strerror(errno));
+ exit (1);
+ }
+ break;
+ case 'p':
+ pflag = 1;
+ break;
+ case '?':
+ default:
+ usage();
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 1)
+ usage();
+
+ if ((dbp = dbopen(argv[0], O_RDONLY, 0, DB_BTREE, NULL)) == NULL) {
+ if ((dbp =
+ dbopen(argv[0], O_RDONLY, 0, DB_HASH, NULL)) == NULL) {
+ fprintf(stderr,
+ "db_dump185: %s: %s\n", argv[0], strerror(errno));
+ exit (1);
+ }
+ db_hash(dbp, pflag);
+ } else
+ db_btree(dbp, pflag);
+
+ /*
+ * !!!
+ * DB 1.85 DBTs are a subset of DB 2.0 DBTs, so we just use the
+ * new dump/print routines.
+ */
+ if (pflag)
+ while (!(rval = dbp->seq(dbp, &key, &data, R_NEXT))) {
+ dbt_print(&key);
+ dbt_print(&data);
+ }
+ else
+ while (!(rval = dbp->seq(dbp, &key, &data, R_NEXT))) {
+ dbt_dump(&key);
+ dbt_dump(&data);
+ }
+
+ if (rval == -1) {
+ fprintf(stderr, "db_dump185: seq: %s\n", strerror(errno));
+ exit (1);
+ }
+ return (0);
+}
+
+/*
+ * db_hash --
+ * Dump out hash header information.
+ */
+void
+db_hash(dbp, pflag)
+ DB *dbp;
+ int pflag;
+{
+ HTAB185 *hash185p;
+ HTAB186 *hash186p;
+
+ printf("format=%s\n", pflag ? "print" : "bytevalue");
+ printf("type=hash\n");
+
+ /* DB 1.85 was version 2, DB 1.86 was version 3. */
+ hash185p = dbp->internal;
+ if (hash185p->hdr.version > 2) {
+ hash186p = dbp->internal;
+ printf("h_ffactor=%lu\n", (u_long)hash186p->hdr.ffactor);
+ if (hash186p->hdr.lorder != 0)
+ printf("db_lorder=%lu\n", (u_long)hash186p->hdr.lorder);
+ printf("db_pagesize=%lu\n", (u_long)hash186p->hdr.bsize);
+ } else {
+ printf("h_ffactor=%lu\n", (u_long)hash185p->hdr.ffactor);
+ if (hash185p->hdr.lorder != 0)
+ printf("db_lorder=%lu\n", (u_long)hash185p->hdr.lorder);
+ printf("db_pagesize=%lu\n", (u_long)hash185p->hdr.bsize);
+ }
+ printf("HEADER=END\n");
+}
+
+/*
+ * db_btree --
+ * Dump out btree header information.
+ */
+void
+db_btree(dbp, pflag)
+ DB *dbp;
+ int pflag;
+{
+ BTREE *btp;
+
+ btp = dbp->internal;
+
+ printf("format=%s\n", pflag ? "print" : "bytevalue");
+ printf("type=btree\n");
+#ifdef NOT_AVAILABLE_IN_185
+ printf("bt_minkey=%lu\n", (u_long)XXX);
+ printf("bt_maxkey=%lu\n", (u_long)XXX);
+#endif
+ if (btp->bt_lorder != 0)
+ printf("db_lorder=%lu\n", (u_long)btp->bt_lorder);
+ printf("db_pagesize=%lu\n", (u_long)btp->bt_psize);
+ if (!(btp->flags & B_NODUPS))
+ printf("duplicates=1\n");
+ printf("HEADER=END\n");
+}
+
+static char hex[] = "0123456789abcdef";
+
+/*
+ * dbt_dump --
+ * Write out a key or data item using byte values.
+ */
+void
+dbt_dump(dbtp)
+ DBT *dbtp;
+{
+ size_t len;
+ u_int8_t *p;
+
+ for (len = dbtp->size, p = dbtp->data; len--; ++p)
+ (void)printf("%c%c",
+ hex[(*p & 0xf0) >> 4], hex[*p & 0x0f]);
+ printf("\n");
+}
+
+/*
+ * dbt_print --
+ * Write out a key or data item using printable characters.
+ */
+void
+dbt_print(dbtp)
+ DBT *dbtp;
+{
+ size_t len;
+ u_int8_t *p;
+
+ for (len = dbtp->size, p = dbtp->data; len--; ++p)
+ if (isprint((int)*p)) {
+ if (*p == '\\')
+ (void)printf("\\");
+ (void)printf("%c", *p);
+ } else
+ (void)printf("\\%c%c",
+ hex[(*p & 0xf0) >> 4], hex[*p & 0x0f]);
+ printf("\n");
+}
+
+/*
+ * usage --
+ * Display the usage message.
+ */
+void
+usage()
+{
+ (void)fprintf(stderr, "usage: db_dump185 [-p] [-f file] db_file\n");
+ exit(1);
+}
diff --git a/bdb/db_load/db_load.c b/bdb/db_load/db_load.c
new file mode 100644
index 00000000000..33e2eb5e02b
--- /dev/null
+++ b/bdb/db_load/db_load.c
@@ -0,0 +1,998 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2000\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id: db_load.c,v 11.33 2001/01/22 17:25:07 krinsky Exp $";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_am.h"
+#include "clib_ext.h"
+
+void badend __P((void));
+void badnum __P((void));
+int configure __P((DB *, char **, char **, int *));
+int db_init __P((char *));
+int dbt_rdump __P((DBT *));
+int dbt_rprint __P((DBT *));
+int dbt_rrecno __P((DBT *, int));
+int digitize __P((int, int *));
+int load __P((char *, DBTYPE, char **, int, u_int32_t));
+int main __P((int, char *[]));
+int rheader __P((DB *, DBTYPE *, char **, int *, int *));
+void usage __P((void));
+void version_check __P((void));
+
+int endodata; /* Reached the end of a database. */
+int endofile; /* Reached the end of the input. */
+int existed; /* Tried to load existing key. */
+u_long lineno; /* Input file line number. */
+int version = 1; /* Input version. */
+
+DB_ENV *dbenv;
+const char
+ *progname = "db_load"; /* Program name. */
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ DBTYPE dbtype;
+ u_int32_t db_nooverwrite;
+ int ch, exitval, no_header, ret;
+ char **clist, **clp, *home;
+
+ version_check();
+
+ home = NULL;
+ db_nooverwrite = 0;
+ exitval = no_header = 0;
+ dbtype = DB_UNKNOWN;
+
+ /* Allocate enough room for configuration arguments. */
+ if ((clp = clist = (char **)calloc(argc + 1, sizeof(char *))) == NULL) {
+ fprintf(stderr, "%s: %s\n", progname, strerror(ENOMEM));
+ exit(1);
+ }
+
+ while ((ch = getopt(argc, argv, "c:f:h:nTt:V")) != EOF)
+ switch (ch) {
+ case 'c':
+ *clp++ = optarg;
+ break;
+ case 'f':
+ if (freopen(optarg, "r", stdin) == NULL) {
+ fprintf(stderr, "%s: %s: reopen: %s\n",
+ progname, optarg, strerror(errno));
+ exit(1);
+ }
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 'n':
+ db_nooverwrite = DB_NOOVERWRITE;
+ break;
+ case 'T':
+ no_header = 1;
+ break;
+ case 't':
+ if (strcmp(optarg, "btree") == 0) {
+ dbtype = DB_BTREE;
+ break;
+ }
+ if (strcmp(optarg, "hash") == 0) {
+ dbtype = DB_HASH;
+ break;
+ }
+ if (strcmp(optarg, "recno") == 0) {
+ dbtype = DB_RECNO;
+ break;
+ }
+ if (strcmp(optarg, "queue") == 0) {
+ dbtype = DB_QUEUE;
+ break;
+ }
+ usage();
+ /* NOTREACHED */
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ exit(0);
+ case '?':
+ default:
+ usage();
+ /* NOTREACHED */
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 1)
+ usage();
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object initialized for error reporting, and
+ * then open it.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+ if (db_init(home) != 0)
+ goto shutdown;
+
+ while (!endofile)
+ if (load(argv[0],
+ dbtype, clist, no_header, db_nooverwrite) != 0)
+ goto shutdown;
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+ if ((ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ /* Return 0 on success, 1 if keys existed already, and 2 on failure. */
+ return (exitval == 0 ? (existed == 0 ? 0 : 1) : 2);
+}
+
+/*
+ * load --
+ * Load a database.
+ */
+int
+load(name, argtype, clist, no_header, db_nooverwrite)
+ char *name, **clist;
+ DBTYPE argtype;
+ int no_header;
+ u_int32_t db_nooverwrite;
+{
+ DB *dbp;
+ DBT key, rkey, data, *readp, *writep;
+ DBTYPE dbtype;
+ DB_TXN *ctxn, *txn;
+ db_recno_t recno, datarecno;
+ int checkprint, hexkeys, keys, ret, rval;
+ int keyflag, ascii_recno;
+ char *subdb;
+
+ endodata = 0;
+ subdb = NULL;
+ ctxn = txn = NULL;
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ memset(&rkey, 0, sizeof(DBT));
+
+ /* Create the DB object. */
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ return (1);
+ }
+
+ dbtype = DB_UNKNOWN;
+ keys = -1;
+ hexkeys = -1;
+ keyflag = -1;
+ /* Read the header -- if there's no header, we expect flat text. */
+ if (no_header) {
+ checkprint = 1;
+ dbtype = argtype;
+ } else {
+ if (rheader(dbp, &dbtype, &subdb, &checkprint, &keys) != 0)
+ goto err;
+ if (endofile)
+ goto done;
+ }
+
+ /*
+ * Apply command-line configuration changes. (We apply command-line
+ * configuration changes to all databases that are loaded, e.g., all
+ * subdatabases.)
+ */
+ if (configure(dbp, clist, &subdb, &keyflag))
+ goto err;
+
+ if (keys != 1) {
+ if (keyflag == 1) {
+ dbp->err(dbp, EINVAL, "No keys specified in file");
+ goto err;
+ }
+ }
+ else if (keyflag == 0) {
+ dbp->err(dbp, EINVAL, "Keys specified in file");
+ goto err;
+ }
+ else
+ keyflag = 1;
+
+ if (dbtype == DB_BTREE || dbtype == DB_HASH) {
+ if (keyflag == 0)
+ dbp->err(dbp,
+ EINVAL, "Btree and Hash must specify keys");
+ else
+ keyflag = 1;
+ }
+
+ if (argtype != DB_UNKNOWN) {
+
+ if (dbtype == DB_RECNO || dbtype == DB_QUEUE)
+ if (keyflag != 1 && argtype != DB_RECNO
+ && argtype != DB_QUEUE) {
+ dbenv->errx(dbenv,
+ "improper database type conversion specified");
+ goto err;
+ }
+ dbtype = argtype;
+ }
+
+ if (dbtype == DB_UNKNOWN) {
+ dbenv->errx(dbenv, "no database type specified");
+ goto err;
+ }
+
+ if (keyflag == -1)
+ keyflag = 0;
+
+ /*
+ * Recno keys have only been printed in hexadecimal starting
+ * with db_dump format version 3 (DB 3.2).
+ *
+ * !!!
+ * Note that version is set in rheader(), which must be called before
+ * this assignment.
+ */
+ hexkeys = (version >= 3 && keyflag == 1 && checkprint == 0);
+
+ if (keyflag == 1 && (dbtype == DB_RECNO || dbtype == DB_QUEUE))
+ ascii_recno = 1;
+ else
+ ascii_recno = 0;
+
+ /* Open the DB file. */
+ if ((ret = dbp->open(dbp,
+ name, subdb, dbtype, DB_CREATE, __db_omode("rwrwrw"))) != 0) {
+ dbp->err(dbp, ret, "DB->open: %s", name);
+ goto err;
+ }
+
+ /* Initialize the key/data pair. */
+ readp = &key;
+ writep = &key;
+ if (dbtype == DB_RECNO || dbtype == DB_QUEUE) {
+ key.size = sizeof(recno);
+ if (keyflag) {
+ key.data = &datarecno;
+ if (checkprint) {
+ readp = &rkey;
+ goto key_data;
+ }
+ }
+ else
+ key.data = &recno;
+ } else
+key_data: if ((readp->data =
+ (void *)malloc(readp->ulen = 1024)) == NULL) {
+ dbenv->err(dbenv, ENOMEM, NULL);
+ goto err;
+ }
+ if ((data.data = (void *)malloc(data.ulen = 1024)) == NULL) {
+ dbenv->err(dbenv, ENOMEM, NULL);
+ goto err;
+ }
+
+ if (TXN_ON(dbenv) && (ret = txn_begin(dbenv, NULL, &txn, 0)) != 0)
+ goto err;
+
+ /* Get each key/data pair and add them to the database. */
+ for (recno = 1; !__db_util_interrupted(); ++recno) {
+ if (!keyflag)
+ if (checkprint) {
+ if (dbt_rprint(&data))
+ goto err;
+ } else {
+ if (dbt_rdump(&data))
+ goto err;
+ }
+ else
+ if (checkprint) {
+ if (dbt_rprint(readp))
+ goto err;
+ if (!endodata && dbt_rprint(&data))
+ goto fmt;
+ } else {
+ if (ascii_recno) {
+ if (dbt_rrecno(readp, hexkeys))
+ goto err;
+ } else
+ if (dbt_rdump(readp))
+ goto err;
+ if (!endodata && dbt_rdump(&data)) {
+fmt: dbenv->errx(dbenv,
+ "odd number of key/data pairs");
+ goto err;
+ }
+ }
+ if (endodata)
+ break;
+ if (readp != writep) {
+ if (sscanf(readp->data, "%ud", &datarecno) != 1)
+ dbenv->errx(dbenv,
+ "%s: non-integer key at line: %d",
+ name, !keyflag ? recno : recno * 2 - 1);
+ if (datarecno == 0)
+ dbenv->errx(dbenv, "%s: zero key at line: %d",
+ name,
+ !keyflag ? recno : recno * 2 - 1);
+ }
+retry: if (txn != NULL)
+ if ((ret = txn_begin(dbenv, txn, &ctxn, 0)) != 0)
+ goto err;
+ switch (ret =
+ dbp->put(dbp, txn, writep, &data, db_nooverwrite)) {
+ case 0:
+ if (ctxn != NULL) {
+ if ((ret =
+ txn_commit(ctxn, DB_TXN_NOSYNC)) != 0)
+ goto err;
+ ctxn = NULL;
+ }
+ break;
+ case DB_KEYEXIST:
+ existed = 1;
+ dbenv->errx(dbenv,
+ "%s: line %d: key already exists, not loaded:",
+ name,
+ !keyflag ? recno : recno * 2 - 1);
+
+ (void)__db_prdbt(&key, checkprint, 0, stderr,
+ __db_verify_callback, 0, NULL);
+ break;
+ case DB_LOCK_DEADLOCK:
+ /* If we have a child txn, retry--else it's fatal. */
+ if (ctxn != NULL) {
+ if ((ret = txn_abort(ctxn)) != 0)
+ goto err;
+ ctxn = NULL;
+ goto retry;
+ }
+ /* FALLTHROUGH */
+ default:
+ dbenv->err(dbenv, ret, NULL);
+ if (ctxn != NULL) {
+ (void)txn_abort(ctxn);
+ ctxn = NULL;
+ }
+ goto err;
+ }
+ if (ctxn != NULL) {
+ if ((ret = txn_abort(ctxn)) != 0)
+ goto err;
+ ctxn = NULL;
+ }
+ }
+done: rval = 0;
+ DB_ASSERT(ctxn == NULL);
+ if (txn != NULL && (ret = txn_commit(txn, 0)) != 0) {
+ txn = NULL;
+ goto err;
+ }
+
+ if (0) {
+err: rval = 1;
+ DB_ASSERT(ctxn == NULL);
+ if (txn != NULL)
+ (void)txn_abort(txn);
+ }
+
+ /* Close the database. */
+ if ((ret = dbp->close(dbp, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->close");
+ rval = 1;
+ }
+
+ /* Free allocated memory. */
+ if (subdb != NULL)
+ free(subdb);
+ if (dbtype != DB_RECNO && dbtype != DB_QUEUE)
+ free(key.data);
+ if (rkey.data != NULL)
+ free(rkey.data);
+ free(data.data);
+
+ return (rval);
+}
+
+/*
+ * db_init --
+ * Initialize the environment.
+ */
+int
+db_init(home)
+ char *home;
+{
+ u_int32_t flags;
+ int ret;
+
+ /* We may be loading into a live environment. Try and join. */
+ flags = DB_USE_ENVIRON |
+ DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN;
+ if (dbenv->open(dbenv, home, flags, 0) == 0)
+ return (0);
+
+ /*
+ * We're trying to load a database.
+ *
+ * An environment is required because we may be trying to look at
+ * databases in directories other than the current one. We could
+ * avoid using an environment iff the -h option wasn't specified,
+ * but that seems like more work than it's worth.
+ *
+ * No environment exists (or, at least no environment that includes
+ * an mpool region exists). Create one, but make it private so that
+ * no files are actually created.
+ */
+ LF_CLR(DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_TXN);
+ LF_SET(DB_CREATE | DB_PRIVATE);
+ if ((ret = dbenv->open(dbenv, home, flags, 0)) == 0)
+ return (0);
+
+ /* An environment is required. */
+ dbenv->err(dbenv, ret, "DBENV->open");
+ return (1);
+}
+
+#define FLAG(name, value, keyword, flag) \
+ if (strcmp(name, keyword) == 0) { \
+ switch (*value) { \
+ case '1': \
+ if ((ret = dbp->set_flags(dbp, flag)) != 0) { \
+ dbp->err(dbp, ret, "%s: set_flags: %s", \
+ progname, name); \
+ return (1); \
+ } \
+ break; \
+ case '0': \
+ break; \
+ default: \
+ badnum(); \
+ return (1); \
+ } \
+ continue; \
+ }
+#define NUMBER(name, value, keyword, func) \
+ if (strcmp(name, keyword) == 0) { \
+ if (__db_getlong(dbp, \
+ NULL, value, 1, LONG_MAX, &val) != 0) \
+ return (1); \
+ if ((ret = dbp->func(dbp, val)) != 0) \
+ goto nameerr; \
+ continue; \
+ }
+#define STRING(name, value, keyword, func) \
+ if (strcmp(name, keyword) == 0) { \
+ if ((ret = dbp->func(dbp, value[0])) != 0) \
+ goto nameerr; \
+ continue; \
+ }
+
+/*
+ * configure --
+ * Handle command-line configuration options.
+ */
+int
+configure(dbp, clp, subdbp, keysp)
+ DB *dbp;
+ char **clp, **subdbp;
+ int *keysp;
+{
+ long val;
+ int ret, savech;
+ char *name, *value;
+
+ for (; (name = *clp) != NULL; *--value = savech, ++clp) {
+ if ((value = strchr(name, '=')) == NULL) {
+ dbp->errx(dbp,
+ "command-line configuration uses name=value format");
+ return (1);
+ }
+ savech = *value;
+ *value++ = '\0';
+
+ if (strcmp(name, "database") == 0 ||
+ strcmp(name, "subdatabase") == 0) {
+ if ((*subdbp = strdup(value)) == NULL) {
+ dbp->err(dbp, ENOMEM, NULL);
+ return (1);
+ }
+ continue;
+ }
+ if (strcmp(name, "keys") == 0) {
+ if (strcmp(value, "1") == 0)
+ *keysp = 1;
+ else if (strcmp(value, "0") == 0)
+ *keysp = 0;
+ else {
+ badnum();
+ return (1);
+ }
+ continue;
+ }
+
+#ifdef notyet
+ NUMBER(name, value, "bt_maxkey", set_bt_maxkey);
+#endif
+ NUMBER(name, value, "bt_minkey", set_bt_minkey);
+ NUMBER(name, value, "db_lorder", set_lorder);
+ NUMBER(name, value, "db_pagesize", set_pagesize);
+ FLAG(name, value, "duplicates", DB_DUP);
+ FLAG(name, value, "dupsort", DB_DUPSORT);
+ NUMBER(name, value, "h_ffactor", set_h_ffactor);
+ NUMBER(name, value, "h_nelem", set_h_nelem);
+ NUMBER(name, value, "re_len", set_re_len);
+ STRING(name, value, "re_pad", set_re_pad);
+ FLAG(name, value, "recnum", DB_RECNUM);
+ FLAG(name, value, "renumber", DB_RENUMBER);
+
+ dbp->errx(dbp,
+ "unknown command-line configuration keyword");
+ return (1);
+ }
+ return (0);
+
+nameerr:
+ dbp->err(dbp, ret, "%s: %s=%s", progname, name, value);
+ return (1);
+}
+
+/*
+ * rheader --
+ * Read the header message.
+ */
+int
+rheader(dbp, dbtypep, subdbp, checkprintp, keysp)
+ DB *dbp;
+ DBTYPE *dbtypep;
+ char **subdbp;
+ int *checkprintp, *keysp;
+{
+ long val;
+ int first, ret;
+ char *name, *value, *p, buf[128];
+
+ *dbtypep = DB_UNKNOWN;
+ *checkprintp = 0;
+
+ for (first = 1;; first = 0) {
+ ++lineno;
+
+ /* If we don't see the expected information, it's an error. */
+ if (fgets(buf, sizeof(buf), stdin) == NULL) {
+ if (!first || ferror(stdin))
+ goto badfmt;
+ endofile = 1;
+ break;
+ }
+ if ((p = strchr(name = buf, '=')) == NULL)
+ goto badfmt;
+ *p++ = '\0';
+ if ((p = strchr(value = p, '\n')) == NULL)
+ goto badfmt;
+ *p = '\0';
+ if (name[0] == '\0' || value[0] == '\0')
+ goto badfmt;
+
+ if (strcmp(name, "HEADER") == 0)
+ break;
+ if (strcmp(name, "VERSION") == 0) {
+ /*
+ * Version 1 didn't have a "VERSION" header line. We
+ * only support versions 1, 2, and 3 of the dump format.
+ */
+ version = atoi(value);
+
+ if (version > 3) {
+ dbp->errx(dbp,
+ "line %lu: VERSION %d is unsupported",
+ lineno, version);
+ return (1);
+ }
+ continue;
+ }
+ if (strcmp(name, "format") == 0) {
+ if (strcmp(value, "bytevalue") == 0) {
+ *checkprintp = 0;
+ continue;
+ }
+ if (strcmp(value, "print") == 0) {
+ *checkprintp = 1;
+ continue;
+ }
+ goto badfmt;
+ }
+ if (strcmp(name, "type") == 0) {
+ if (strcmp(value, "btree") == 0) {
+ *dbtypep = DB_BTREE;
+ continue;
+ }
+ if (strcmp(value, "hash") == 0) {
+ *dbtypep = DB_HASH;
+ continue;
+ }
+ if (strcmp(value, "recno") == 0) {
+ *dbtypep = DB_RECNO;
+ continue;
+ }
+ if (strcmp(value, "queue") == 0) {
+ *dbtypep = DB_QUEUE;
+ continue;
+ }
+ dbp->errx(dbp, "line %lu: unknown type", lineno);
+ return (1);
+ }
+ if (strcmp(name, "database") == 0 ||
+ strcmp(name, "subdatabase") == 0) {
+ if ((*subdbp = strdup(value)) == NULL) {
+ dbp->err(dbp, ENOMEM, NULL);
+ return (1);
+ }
+ continue;
+ }
+ if (strcmp(name, "keys") == 0) {
+ if (strcmp(value, "1") == 0)
+ *keysp = 1;
+ else if (strcmp(value, "0") == 0)
+ *keysp = 0;
+ else {
+ badnum();
+ return (1);
+ }
+ continue;
+ }
+
+#ifdef notyet
+ NUMBER(name, value, "bt_maxkey", set_bt_maxkey);
+#endif
+ NUMBER(name, value, "bt_minkey", set_bt_minkey);
+ NUMBER(name, value, "db_lorder", set_lorder);
+ NUMBER(name, value, "db_pagesize", set_pagesize);
+ FLAG(name, value, "duplicates", DB_DUP);
+ FLAG(name, value, "dupsort", DB_DUPSORT);
+ NUMBER(name, value, "h_ffactor", set_h_ffactor);
+ NUMBER(name, value, "h_nelem", set_h_nelem);
+ NUMBER(name, value, "re_len", set_re_len);
+ STRING(name, value, "re_pad", set_re_pad);
+ FLAG(name, value, "recnum", DB_RECNUM);
+ FLAG(name, value, "renumber", DB_RENUMBER);
+
+ dbp->errx(dbp,
+ "unknown input-file header configuration keyword");
+ return (1);
+ }
+ return (0);
+
+nameerr:
+ dbp->err(dbp, ret, "%s: %s=%s", progname, name, value);
+ return (1);
+
+badfmt:
+ dbp->errx(dbp, "line %lu: unexpected format", lineno);
+ return (1);
+}
+
+/*
+ * dbt_rprint --
+ * Read a printable line into a DBT structure.
+ */
+int
+dbt_rprint(dbtp)
+ DBT *dbtp;
+{
+ u_int32_t len;
+ u_int8_t *p;
+ int c1, c2, e, escape, first;
+ char buf[32];
+
+ ++lineno;
+
+ first = 1;
+ e = escape = 0;
+ for (p = dbtp->data, len = 0; (c1 = getchar()) != '\n';) {
+ if (c1 == EOF) {
+ if (len == 0) {
+ endofile = endodata = 1;
+ return (0);
+ }
+ badend();
+ return (1);
+ }
+ if (first) {
+ first = 0;
+ if (version > 1) {
+ if (c1 != ' ') {
+ buf[0] = c1;
+ if (fgets(buf + 1,
+ sizeof(buf) - 1, stdin) == NULL ||
+ strcmp(buf, "DATA=END\n") != 0) {
+ badend();
+ return (1);
+ }
+ endodata = 1;
+ return (0);
+ }
+ continue;
+ }
+ }
+ if (escape) {
+ if (c1 != '\\') {
+ if ((c2 = getchar()) == EOF) {
+ badend();
+ return (1);
+ }
+ c1 = digitize(c1, &e) << 4 | digitize(c2, &e);
+ if (e)
+ return (1);
+ }
+ escape = 0;
+ } else
+ if (c1 == '\\') {
+ escape = 1;
+ continue;
+ }
+ if (len >= dbtp->ulen - 10) {
+ dbtp->ulen *= 2;
+ if ((dbtp->data =
+ (void *)realloc(dbtp->data, dbtp->ulen)) == NULL) {
+ dbenv->err(dbenv, ENOMEM, NULL);
+ return (1);
+ }
+ p = (u_int8_t *)dbtp->data + len;
+ }
+ ++len;
+ *p++ = c1;
+ }
+ dbtp->size = len;
+
+ return (0);
+}
+
+/*
+ * dbt_rdump --
+ * Read a byte dump line into a DBT structure.
+ */
+int
+dbt_rdump(dbtp)
+ DBT *dbtp;
+{
+ u_int32_t len;
+ u_int8_t *p;
+ int c1, c2, e, first;
+ char buf[32];
+
+ ++lineno;
+
+ first = 1;
+ e = 0;
+ for (p = dbtp->data, len = 0; (c1 = getchar()) != '\n';) {
+ if (c1 == EOF) {
+ if (len == 0) {
+ endofile = endodata = 1;
+ return (0);
+ }
+ badend();
+ return (1);
+ }
+ if (first) {
+ first = 0;
+ if (version > 1) {
+ if (c1 != ' ') {
+ buf[0] = c1;
+ if (fgets(buf + 1,
+ sizeof(buf) - 1, stdin) == NULL ||
+ strcmp(buf, "DATA=END\n") != 0) {
+ badend();
+ return (1);
+ }
+ endodata = 1;
+ return (0);
+ }
+ continue;
+ }
+ }
+ if ((c2 = getchar()) == EOF) {
+ badend();
+ return (1);
+ }
+ if (len >= dbtp->ulen - 10) {
+ dbtp->ulen *= 2;
+ if ((dbtp->data =
+ (void *)realloc(dbtp->data, dbtp->ulen)) == NULL) {
+ dbenv->err(dbenv, ENOMEM, NULL);
+ return (1);
+ }
+ p = (u_int8_t *)dbtp->data + len;
+ }
+ ++len;
+ *p++ = digitize(c1, &e) << 4 | digitize(c2, &e);
+ if (e)
+ return (1);
+ }
+ dbtp->size = len;
+
+ return (0);
+}
+
+/*
+ * dbt_rrecno --
+ * Read a record number dump line into a DBT structure.
+ */
+int
+dbt_rrecno(dbtp, ishex)
+ DBT *dbtp;
+ int ishex;
+{
+ char buf[32], *p, *q;
+
+ ++lineno;
+
+ if (fgets(buf, sizeof(buf), stdin) == NULL) {
+ endofile = endodata = 1;
+ return (0);
+ }
+
+ if (strcmp(buf, "DATA=END\n") == 0) {
+ endodata = 1;
+ return (0);
+ }
+
+ if (buf[0] != ' ')
+ goto bad;
+
+ /*
+ * If we're expecting a hex key, do an in-place conversion
+ * of hex to straight ASCII before calling __db_getulong().
+ */
+ if (ishex) {
+ for (p = q = buf + 1; *q != '\0' && *q != '\n';) {
+ /*
+ * 0-9 in hex are 0x30-0x39, so this is easy.
+ * We should alternate between 3's and [0-9], and
+ * if the [0-9] are something unexpected,
+ * __db_getulong will fail, so we only need to catch
+ * end-of-string conditions.
+ */
+ if (*q++ != '3')
+ goto bad;
+ if (*q == '\n' || *q == '\0')
+ goto bad;
+ *p++ = *q++;
+ }
+ *p = '\0';
+ }
+
+ if (__db_getulong(NULL,
+ progname, buf + 1, 0, 0, (u_long *)dbtp->data)) {
+bad: badend();
+ return (1);
+ }
+
+ dbtp->size = sizeof(db_recno_t);
+ return (0);
+}
+
+/*
+ * digitize --
+ * Convert a character to an integer.
+ */
+int
+digitize(c, errorp)
+ int c, *errorp;
+{
+ switch (c) { /* Don't depend on ASCII ordering. */
+ case '0': return (0);
+ case '1': return (1);
+ case '2': return (2);
+ case '3': return (3);
+ case '4': return (4);
+ case '5': return (5);
+ case '6': return (6);
+ case '7': return (7);
+ case '8': return (8);
+ case '9': return (9);
+ case 'a': return (10);
+ case 'b': return (11);
+ case 'c': return (12);
+ case 'd': return (13);
+ case 'e': return (14);
+ case 'f': return (15);
+ }
+
+ dbenv->errx(dbenv, "unexpected hexadecimal value");
+ *errorp = 1;
+
+ return (0);
+}
+
+/*
+ * badnum --
+ * Display the bad number message.
+ */
+void
+badnum()
+{
+ dbenv->errx(dbenv,
+ "boolean name=value pairs require a value of 0 or 1");
+}
+
+/*
+ * badend --
+ * Display the bad end to input message.
+ */
+void
+badend()
+{
+ dbenv->errx(dbenv, "unexpected end of input data or key/data pair");
+}
+
+/*
+ * usage --
+ * Display the usage message.
+ */
+void
+usage()
+{
+ (void)fprintf(stderr, "%s\n\t%s\n",
+ "usage: db_load [-nTV]",
+ "[-c name=value] [-f file] [-h home] [-t btree | hash | recno] db_file");
+ exit(1);
+}
+
+void
+version_check()
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ exit (1);
+ }
+}
diff --git a/bdb/db_printlog/README b/bdb/db_printlog/README
new file mode 100644
index 00000000000..7d8da505e49
--- /dev/null
+++ b/bdb/db_printlog/README
@@ -0,0 +1,25 @@
+# $Id: README,v 10.5 1999/11/21 23:08:01 bostic Exp $
+
+Berkeley DB log dump utility. This utility dumps out a DB log in human
+readable form, a record at a time, to assist in recovery and transaction
+abort debugging.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+commit.awk Output transaction ID of committed transactions.
+
+count.awk Print out the number of log records for transactions
+ that we encountered.
+
+fileid.awk Take a comma-separated list of file numbers and spit out
+ all the log records that affect those file numbers.
+
+pgno.awk Take a comma-separated list of page numbers and spit
+ out all the log records that affect those page numbers.
+
+range.awk Print out a range of the log.
+
+status.awk Read through db_printlog output and list the transactions
+ encountered, and whether they commited or aborted.
+
+txn.awk Print out all the records for a comma-separated list of
+ transaction IDs.
diff --git a/bdb/db_printlog/commit.awk b/bdb/db_printlog/commit.awk
new file mode 100644
index 00000000000..66391d3fb63
--- /dev/null
+++ b/bdb/db_printlog/commit.awk
@@ -0,0 +1,7 @@
+# $Id: commit.awk,v 10.2 1999/11/21 18:01:42 bostic Exp $
+#
+# Output tid of committed transactions.
+
+/txn_regop/ {
+ print $5
+}
diff --git a/bdb/db_printlog/count.awk b/bdb/db_printlog/count.awk
new file mode 100644
index 00000000000..1d5a291950f
--- /dev/null
+++ b/bdb/db_printlog/count.awk
@@ -0,0 +1,9 @@
+# $Id: count.awk,v 10.2 1999/11/21 18:01:42 bostic Exp $
+#
+# Print out the number of log records for transactions that we
+# encountered.
+
+/^\[/{
+ if ($5 != 0)
+ print $5
+}
diff --git a/bdb/db_printlog/db_printlog.c b/bdb/db_printlog/db_printlog.c
new file mode 100644
index 00000000000..8b9fb74a6a9
--- /dev/null
+++ b/bdb/db_printlog/db_printlog.c
@@ -0,0 +1,200 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2000\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id: db_printlog.c,v 11.23 2001/01/18 18:36:58 bostic Exp $";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "btree.h"
+#include "db_am.h"
+#include "hash.h"
+#include "log.h"
+#include "qam.h"
+#include "txn.h"
+
+int main __P((int, char *[]));
+void usage __P((void));
+void version_check __P((void));
+
+DB_ENV *dbenv;
+const char
+ *progname = "db_printlog"; /* Program name. */
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ DBT data;
+ DB_LSN key;
+ int ch, e_close, exitval, nflag, ret;
+ char *home;
+
+ version_check();
+
+ e_close = exitval = 0;
+ nflag = 0;
+ home = NULL;
+ while ((ch = getopt(argc, argv, "h:NV")) != EOF)
+ switch (ch) {
+ case 'h':
+ home = optarg;
+ break;
+ case 'N':
+ nflag = 1;
+ if ((ret = db_env_set_panicstate(0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_set_panicstate: %s\n",
+ progname, db_strerror(ret));
+ return (1);
+ }
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ exit(0);
+ case '?':
+ default:
+ usage();
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc > 0)
+ usage();
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+
+ if (nflag && (ret = dbenv->set_mutexlocks(dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "set_mutexlocks");
+ goto shutdown;
+ }
+
+ /*
+ * An environment is required, but as all we're doing is reading log
+ * files, we create one if it doesn't already exist. If we create
+ * it, create it private so it automatically goes away when we're done.
+ */
+ if ((ret = dbenv->open(dbenv, home,
+ DB_JOINENV | DB_USE_ENVIRON, 0)) != 0 &&
+ (ret = dbenv->open(dbenv, home,
+ DB_CREATE | DB_INIT_LOG | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+
+ /* Initialize print callbacks. */
+ if ((ret = __bam_init_print(dbenv)) != 0 ||
+ (ret = __crdel_init_print(dbenv)) != 0 ||
+ (ret = __db_init_print(dbenv)) != 0 ||
+ (ret = __qam_init_print(dbenv)) != 0 ||
+ (ret = __ham_init_print(dbenv)) != 0 ||
+ (ret = __log_init_print(dbenv)) != 0 ||
+ (ret = __txn_init_print(dbenv)) != 0) {
+ dbenv->err(dbenv, ret, "callback: initialization");
+ goto shutdown;
+ }
+
+ memset(&data, 0, sizeof(data));
+ while (!__db_util_interrupted()) {
+ if ((ret = log_get(dbenv, &key, &data, DB_NEXT)) != 0) {
+ if (ret == DB_NOTFOUND)
+ break;
+ dbenv->err(dbenv, ret, "log_get");
+ goto shutdown;
+ }
+
+ /*
+ * XXX
+ * We use DB_TXN_ABORT as our op because that's the only op
+ * that calls the underlying recovery function without any
+ * consideration as to the contents of the transaction list.
+ */
+ ret = __db_dispatch(dbenv, &data, &key, DB_TXN_ABORT, NULL);
+
+ /*
+ * XXX
+ * Just in case the underlying routines don't flush.
+ */
+ (void)fflush(stdout);
+
+ if (ret != 0) {
+ dbenv->err(dbenv, ret, "tx: dispatch");
+ goto shutdown;
+ }
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval);
+}
+
+void
+usage()
+{
+ fprintf(stderr, "usage: db_printlog [-NV] [-h home]\n");
+ exit (1);
+}
+
+void
+version_check()
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ exit (1);
+ }
+}
diff --git a/bdb/db_printlog/dbname.awk b/bdb/db_printlog/dbname.awk
new file mode 100644
index 00000000000..d070335127c
--- /dev/null
+++ b/bdb/db_printlog/dbname.awk
@@ -0,0 +1,79 @@
+# $Id: dbname.awk,v 1.2 2000/08/03 15:06:39 ubell Exp $
+#
+# Take a comma-separated list of database names and spit out all the
+# log records that affect those databases.
+
+NR == 1 {
+ nfiles = 0
+ while ((ndx = index(DBNAME, ",")) != 0) {
+ filenames[nfiles] = substr(DBNAME, 1, ndx - 1) 0;
+ DBNAME = substr(DBNAME, ndx + 1, length(DBNAME) - ndx);
+ files[nfiles] = -1
+ nfiles++
+ }
+ filenames[nfiles] = DBNAME 0;
+ files[nfiles] = -1
+ myfile = -1;
+}
+
+/^\[.*log_register/ {
+ register = 1;
+}
+/opcode:/ {
+ if (register == 1) {
+ if ($2 == 1)
+ register = 3;
+ else
+ register = $2;
+ }
+}
+/name:/ {
+ if (register >= 2) {
+ for (i = 0; i <= nfiles; i++) {
+ if ($2 == filenames[i]) {
+ if (register == 2) {
+ printme = 0;
+ myfile = -1;
+ files[i] = -1;
+ } else {
+ myfile = i;
+ }
+ break;
+ }
+ }
+ }
+ register = 0;
+}
+/fileid:/{
+ if (myfile != -1) {
+ files[myfile] = $2;
+ printme = 1;
+ register = 0;
+ myfile = -1;
+ } else
+ for (i = 0; i <= nfiles; i++)
+ if ($2 == files[i]) {
+ printme = 1
+ break;
+ }
+}
+
+
+
+/^\[/{
+ if (printme == 1) {
+ printf("%s\n", rec);
+ printme = 0
+ }
+ rec = "";
+
+ rec = $0
+}
+/^ /{
+ rec = sprintf("%s\n%s", rec, $0);
+}
+
+END {
+ if (printme == 1)
+ printf("%s\n", rec);
+}
diff --git a/bdb/db_printlog/fileid.awk b/bdb/db_printlog/fileid.awk
new file mode 100644
index 00000000000..020644039ab
--- /dev/null
+++ b/bdb/db_printlog/fileid.awk
@@ -0,0 +1,37 @@
+# $Id: fileid.awk,v 10.4 2000/07/17 22:07:17 ubell Exp $
+#
+# Take a comma-separated list of file numbers and spit out all the
+# log records that affect those file numbers.
+
+NR == 1 {
+ nfiles = 0
+ while ((ndx = index(FILEID, ",")) != 0) {
+ files[nfiles] = substr(FILEID, 1, ndx - 1);
+ FILEID = substr(FILEID, ndx + 1, length(FILEID) - ndx);
+ nfiles++
+ }
+ files[nfiles] = FILEID;
+}
+
+/^\[/{
+ if (printme == 1) {
+ printf("%s\n", rec);
+ printme = 0
+ }
+ rec = "";
+
+ rec = $0
+}
+/^ /{
+ rec = sprintf("%s\n%s", rec, $0);
+}
+/fileid/{
+ for (i = 0; i <= nfiles; i++)
+ if ($2 == files[i])
+ printme = 1
+}
+
+END {
+ if (printme == 1)
+ printf("%s\n", rec);
+}
diff --git a/bdb/db_printlog/pgno.awk b/bdb/db_printlog/pgno.awk
new file mode 100644
index 00000000000..289fa853bc4
--- /dev/null
+++ b/bdb/db_printlog/pgno.awk
@@ -0,0 +1,47 @@
+# $Id: pgno.awk,v 10.3 2000/07/17 22:07:17 ubell Exp $
+#
+# Take a comma-separated list of page numbers and spit out all the
+# log records that affect those page numbers.
+
+NR == 1 {
+ npages = 0
+ while ((ndx = index(PGNO, ",")) != 0) {
+ pgno[npages] = substr(PGNO, 1, ndx - 1);
+ PGNO = substr(PGNO, ndx + 1, length(PGNO) - ndx);
+ npages++
+ }
+ pgno[npages] = PGNO;
+}
+
+/^\[/{
+ if (printme == 1) {
+ printf("%s\n", rec);
+ printme = 0
+ }
+ rec = "";
+
+ rec = $0
+}
+/^ /{
+ rec = sprintf("%s\n%s", rec, $0);
+}
+/pgno/{
+ for (i = 0; i <= npages; i++)
+ if ($2 == pgno[i])
+ printme = 1
+}
+/right/{
+ for (i = 0; i <= npages; i++)
+ if ($2 == pgno[i])
+ printme = 1
+}
+/left/{
+ for (i = 0; i <= npages; i++)
+ if ($2 == pgno[i])
+ printme = 1
+}
+
+END {
+ if (printme == 1)
+ printf("%s\n", rec);
+}
diff --git a/bdb/db_printlog/range.awk b/bdb/db_printlog/range.awk
new file mode 100644
index 00000000000..7abb410b40f
--- /dev/null
+++ b/bdb/db_printlog/range.awk
@@ -0,0 +1,27 @@
+# $Id: range.awk,v 10.2 1999/11/21 18:01:42 bostic Exp $
+#
+# Print out a range of the log
+
+/^\[/{
+ l = length($1) - 1;
+ i = index($1, "]");
+ file = substr($1, 2, i - 2);
+ file += 0;
+ start = i + 2;
+ offset = substr($1, start, l - start + 1);
+ i = index(offset, "]");
+ offset = substr($1, start, i - 1);
+ offset += 0;
+
+ if ((file == START_FILE && offset >= START_OFFSET || file > START_FILE)\
+ && (file < END_FILE || (file == END_FILE && offset < END_OFFSET)))
+ printme = 1
+ else if (file == END_FILE && offset > END_OFFSET || file > END_FILE)
+ exit
+ else
+ printme = 0
+}
+{
+ if (printme == 1)
+ print $0
+}
diff --git a/bdb/db_printlog/rectype.awk b/bdb/db_printlog/rectype.awk
new file mode 100644
index 00000000000..7f7b2f5ee15
--- /dev/null
+++ b/bdb/db_printlog/rectype.awk
@@ -0,0 +1,27 @@
+# $Id: rectype.awk,v 11.3 2000/07/17 22:00:49 ubell Exp $
+#
+# Print out a range of the log
+# Command line should set RECTYPE to the a comma separated list
+# of the rectypes (or partial strings of rectypes) sought.
+NR == 1 {
+ ntypes = 0
+ while ((ndx = index(RECTYPE, ",")) != 0) {
+ types[ntypes] = substr(RECTYPE, 1, ndx - 1);
+ RECTYPE = substr(RECTYPE, ndx + 1, length(RECTYPE) - ndx);
+ ntypes++
+ }
+ types[ntypes] = RECTYPE;
+}
+
+/^\[/{
+ printme = 0
+ for (i = 0; i <= ntypes; i++)
+ if (index($1, types[i]) != 0) {
+ printme = 1
+ break;
+ }
+}
+{
+ if (printme == 1)
+ print $0
+}
diff --git a/bdb/db_printlog/status.awk b/bdb/db_printlog/status.awk
new file mode 100644
index 00000000000..42e24b078b9
--- /dev/null
+++ b/bdb/db_printlog/status.awk
@@ -0,0 +1,26 @@
+# $Id: status.awk,v 10.2 1999/11/21 18:01:43 bostic Exp $
+#
+# Read through db_printlog output and list all the transactions encountered
+# and whether they commited or aborted.
+#
+# 1 = started
+# 2 = commited
+BEGIN {
+ cur_txn = 0
+}
+/^\[/{
+ if (status[$5] == 0) {
+ status[$5] = 1;
+ txns[cur_txn] = $5;
+ cur_txn++;
+ }
+}
+/txn_regop/ {
+ status[$5] = 2
+}
+END {
+ for (i = 0; i < cur_txn; i++) {
+ printf("%s\t%s\n",
+ txns[i], status[txns[i]] == 1 ? "ABORT" : "COMMIT");
+ }
+}
diff --git a/bdb/db_printlog/txn.awk b/bdb/db_printlog/txn.awk
new file mode 100644
index 00000000000..be8c44e1092
--- /dev/null
+++ b/bdb/db_printlog/txn.awk
@@ -0,0 +1,34 @@
+# $Id: txn.awk,v 10.3 2000/07/17 22:07:17 ubell Exp $
+#
+# Print out all the records for a comma-separated list of transaction ids.
+NR == 1 {
+ ntxns = 0
+ while ((ndx = index(TXN, ",")) != 0) {
+ txn[ntxns] = substr(TXN, 1, ndx - 1);
+ TXN = substr(TXN, ndx + 1, length(TXN) - ndx);
+ ntxns++
+ }
+ txn[ntxns] = TXN;
+}
+
+/^\[/{
+ if (printme == 1) {
+ printf("%s\n", rec);
+ printme = 0
+ }
+ rec = "";
+
+ for (i = 0; i <= ntxns; i++)
+ if (txn[i] == $5) {
+ rec = $0
+ printme = 1
+ }
+}
+/^ /{
+ rec = sprintf("%s\n%s", rec, $0);
+}
+
+END {
+ if (printme == 1)
+ printf("%s\n", rec);
+}
diff --git a/bdb/db_recover/db_recover.c b/bdb/db_recover/db_recover.c
new file mode 100644
index 00000000000..59ab8bcef15
--- /dev/null
+++ b/bdb/db_recover/db_recover.c
@@ -0,0 +1,288 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2000\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id: db_recover.c,v 11.17 2001/01/18 18:36:58 bostic Exp $";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "common_ext.h"
+#include "txn.h"
+
+int main __P((int, char *[]));
+void read_timestamp __P((char *, time_t *));
+void usage __P((void));
+void version_check __P((void));
+
+DB_ENV *dbenv;
+const char
+ *progname = "db_recover"; /* Program name. */
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ DB_TXNREGION *region;
+ time_t now, timestamp;
+ u_int32_t flags;
+ int ch, exitval, fatal_recover, ret, verbose;
+ char *home;
+
+ version_check();
+
+ home = NULL;
+ timestamp = 0;
+ exitval = fatal_recover = verbose = 0;
+ while ((ch = getopt(argc, argv, "ch:t:Vv")) != EOF)
+ switch (ch) {
+ case 'c':
+ fatal_recover = 1;
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 't':
+ read_timestamp(optarg, &timestamp);
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ exit(0);
+ case 'v':
+ verbose = 1;
+ break;
+ case '?':
+ default:
+ usage();
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 0)
+ usage();
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ exit (1);
+ }
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+ if (verbose) {
+ (void)dbenv->set_verbose(dbenv, DB_VERB_RECOVERY, 1);
+ (void)dbenv->set_verbose(dbenv, DB_VERB_CHKPOINT, 1);
+ }
+ if (timestamp &&
+ (ret = dbenv->set_tx_timestamp(dbenv, &timestamp)) != 0) {
+ dbenv->err(dbenv, ret, "DBENV->set_timestamp");
+ goto shutdown;
+ }
+
+ /*
+ * Initialize the environment -- we don't actually do anything
+ * else, that all that's needed to run recovery.
+ *
+ * Note that we specify a private environment, as we're about to
+ * create a region, and we don't want to to leave it around. If
+ * we leave the region around, the application that should create
+ * it will simply join it instead, and will then be running with
+ * incorrectly sized (and probably terribly small) caches.
+ */
+ flags = 0;
+ LF_SET(DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG |
+ DB_INIT_MPOOL | DB_INIT_TXN | DB_PRIVATE | DB_USE_ENVIRON);
+ LF_SET(fatal_recover ? DB_RECOVER_FATAL : DB_RECOVER);
+ if ((ret = dbenv->open(dbenv, home, flags, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DBENV->open");
+ goto shutdown;
+ }
+
+ if (verbose) {
+ (void)time(&now);
+ region = ((DB_TXNMGR *)dbenv->tx_handle)->reginfo.primary;
+ dbenv->errx(dbenv, "Recovery complete at %.24s", ctime(&now));
+ dbenv->errx(dbenv, "%s %lx %s [%lu][%lu]",
+ "Maximum transaction id", (u_long)region->last_txnid,
+ "Recovery checkpoint", (u_long)region->last_ckp.file,
+ (u_long)region->last_ckp.offset);
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+
+ /* Clean up the environment. */
+ if ((ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval);
+}
+
+#define ATOI2(ar) ((ar)[0] - '0') * 10 + ((ar)[1] - '0'); (ar) += 2;
+
+/*
+ * read_timestamp --
+ * Convert a time argument to Epoch seconds.
+ *
+ * Copyright (c) 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+void
+read_timestamp(arg, timep)
+ char *arg;
+ time_t *timep;
+{
+ struct tm *t;
+ time_t now;
+ int yearset;
+ char *p;
+ /* Start with the current time. */
+ (void)time(&now);
+ if ((t = localtime(&now)) == NULL) {
+ fprintf(stderr,
+ "%s: localtime: %s\n", progname, strerror(errno));
+ exit (1);
+ }
+ /* [[CC]YY]MMDDhhmm[.SS] */
+ if ((p = strchr(arg, '.')) == NULL)
+ t->tm_sec = 0; /* Seconds defaults to 0. */
+ else {
+ if (strlen(p + 1) != 2)
+ goto terr;
+ *p++ = '\0';
+ t->tm_sec = ATOI2(p);
+ }
+
+ yearset = 0;
+ switch(strlen(arg)) {
+ case 12: /* CCYYMMDDhhmm */
+ t->tm_year = ATOI2(arg);
+ t->tm_year *= 100;
+ yearset = 1;
+ /* FALLTHOUGH */
+ case 10: /* YYMMDDhhmm */
+ if (yearset) {
+ yearset = ATOI2(arg);
+ t->tm_year += yearset;
+ } else {
+ yearset = ATOI2(arg);
+ if (yearset < 69)
+ t->tm_year = yearset + 2000;
+ else
+ t->tm_year = yearset + 1900;
+ }
+ t->tm_year -= 1900; /* Convert to UNIX time. */
+ /* FALLTHROUGH */
+ case 8: /* MMDDhhmm */
+ t->tm_mon = ATOI2(arg);
+ --t->tm_mon; /* Convert from 01-12 to 00-11 */
+ t->tm_mday = ATOI2(arg);
+ t->tm_hour = ATOI2(arg);
+ t->tm_min = ATOI2(arg);
+ break;
+ default:
+ goto terr;
+ }
+
+ t->tm_isdst = -1; /* Figure out DST. */
+
+ *timep = mktime(t);
+ if (*timep == -1) {
+terr: fprintf(stderr,
+ "%s: out of range or illegal time specification: [[CC]YY]MMDDhhmm[.SS]",
+ progname);
+ exit (1);
+ }
+}
+
+void
+usage()
+{
+ (void)fprintf(stderr,
+ "usage: db_recover [-cVv] [-h home] [-t [[CC]YY]MMDDhhmm[.SS]]\n");
+ exit(1);
+}
+
+void
+version_check()
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ exit (1);
+ }
+}
diff --git a/bdb/db_stat/db_stat.c b/bdb/db_stat/db_stat.c
new file mode 100644
index 00000000000..9d80caa4889
--- /dev/null
+++ b/bdb/db_stat/db_stat.c
@@ -0,0 +1,989 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2000\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id: db_stat.c,v 11.42 2001/01/18 18:36:59 bostic Exp $";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <ctype.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_shash.h"
+#include "lock.h"
+#include "mp.h"
+
+#define PCT(f, t, pgsize) \
+ ((t) == 0 ? 0 : \
+ (((double)(((t) * (pgsize)) - (f)) / ((t) * (pgsize))) * 100))
+
+typedef enum { T_NOTSET, T_DB, T_ENV, T_LOCK, T_LOG, T_MPOOL, T_TXN } test_t;
+
+int argcheck __P((char *, const char *));
+int btree_stats __P((DB_ENV *, DB *, DB_BTREE_STAT *));
+int db_init __P((char *, test_t));
+void dl __P((const char *, u_long));
+void dl_bytes __P((const char *, u_long, u_long, u_long));
+int env_stats __P((DB_ENV *));
+int hash_stats __P((DB_ENV *, DB *));
+int lock_ok __P((char *));
+int lock_stats __P((DB_ENV *));
+int log_stats __P((DB_ENV *));
+int main __P((int, char *[]));
+int mpool_ok __P((char *));
+int mpool_stats __P((DB_ENV *));
+void prflags __P((u_int32_t, const FN *));
+int queue_stats __P((DB_ENV *, DB *));
+int txn_compare __P((const void *, const void *));
+int txn_stats __P((DB_ENV *));
+void usage __P((void));
+void version_check __P((void));
+
+DB_ENV *dbenv;
+char *internal;
+const char
+ *progname = "db_stat"; /* Program name. */
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ DB_BTREE_STAT *sp;
+ DB *alt_dbp, *dbp;
+ test_t ttype;
+ int ch, checked, d_close, e_close, exitval, nflag, ret;
+ char *db, *home, *subdb;
+
+ version_check();
+
+ dbp = NULL;
+ ttype = T_NOTSET;
+ nflag = 0;
+ d_close = e_close = exitval = 0;
+ db = home = subdb = NULL;
+ while ((ch = getopt(argc, argv, "C:cd:eh:lM:mNs:tV")) != EOF)
+ switch (ch) {
+ case 'C':
+ ttype = T_LOCK;
+ if (!argcheck(internal = optarg, "Acflmo"))
+ usage();
+ break;
+ case 'c':
+ ttype = T_LOCK;
+ break;
+ case 'd':
+ db = optarg;
+ ttype = T_DB;
+ break;
+ case 'e':
+ ttype = T_ENV;
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 'l':
+ ttype = T_LOG;
+ break;
+ case 'M':
+ ttype = T_MPOOL;
+ if (!argcheck(internal = optarg, "Ahlm"))
+ usage();
+ break;
+ case 'm':
+ ttype = T_MPOOL;
+ break;
+ case 'N':
+ nflag = 1;
+ if ((ret = db_env_set_panicstate(0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_set_panicstate: %s\n",
+ progname, db_strerror(ret));
+ return (1);
+ }
+ break;
+ case 's':
+ subdb = optarg;
+ ttype = T_DB;
+ break;
+ case 't':
+ ttype = T_TXN;
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ exit(0);
+ case '?':
+ default:
+ usage();
+ }
+ argc -= optind;
+ argv += optind;
+
+ switch (ttype) {
+ case T_DB:
+ if (db == NULL)
+ usage();
+ break;
+ case T_NOTSET:
+ usage();
+ /* NOTREACHED */
+ default:
+ break;
+ }
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+
+ if (nflag && (ret = dbenv->set_mutexlocks(dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "set_mutexlocks");
+ goto shutdown;
+ }
+
+ /* Initialize the environment. */
+ if (db_init(home, ttype) != 0)
+ goto shutdown;
+
+ switch (ttype) {
+ case T_DB:
+ /* Create the DB object and open the file. */
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ goto shutdown;
+ }
+
+ if ((ret =
+ dbp->open(dbp, db, subdb, DB_UNKNOWN, DB_RDONLY, 0)) != 0) {
+ dbp->err(dbp, ret, "open: %s", db);
+ goto shutdown;
+ }
+
+ /*
+ * See if we can open this db read/write to update counts.
+ * If its a master-db then we cannot. So check to see,
+ * if its btree then it might be.
+ */
+ checked = 0;
+ if (subdb == NULL && dbp->type == DB_BTREE) {
+ if ((ret = dbp->stat(dbp, &sp, NULL, 0)) != 0) {
+ dbp->err(dbp, ret, "dbp->stat");
+ return (1);
+ }
+ checked = 1;
+ }
+
+ if (subdb != NULL ||
+ dbp->type != DB_BTREE ||
+ (sp->bt_metaflags & BTM_SUBDB) == 0) {
+ if ((ret = db_create(&alt_dbp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ goto shutdown;
+ }
+ if ((ret = dbp->open(alt_dbp,
+ db, subdb, DB_UNKNOWN, 0, 0)) == 0) {
+ (void)dbp->close(dbp, 0);
+ dbp = alt_dbp;
+ }
+ /* Need to run again to update counts */
+ checked = 0;
+ }
+
+ d_close = 1;
+ switch (dbp->type) {
+ case DB_BTREE:
+ case DB_RECNO:
+ if (btree_stats(dbenv, dbp, checked == 1 ? sp : NULL))
+ goto shutdown;
+ break;
+ case DB_HASH:
+ if (hash_stats(dbenv, dbp))
+ goto shutdown;
+ break;
+ case DB_QUEUE:
+ if (queue_stats(dbenv, dbp))
+ goto shutdown;
+ break;
+ case DB_UNKNOWN:
+ abort(); /* Impossible. */
+ /* NOTREACHED */
+ }
+ break;
+ case T_ENV:
+ if (env_stats(dbenv))
+ exitval = 1;
+ break;
+ case T_LOCK:
+ if (lock_stats(dbenv))
+ exitval = 1;
+ break;
+ case T_LOG:
+ if (log_stats(dbenv))
+ exitval = 1;
+ break;
+ case T_MPOOL:
+ if (mpool_stats(dbenv))
+ exitval = 1;
+ break;
+ case T_TXN:
+ if (txn_stats(dbenv))
+ exitval = 1;
+ break;
+ case T_NOTSET:
+ abort(); /* Impossible. */
+ /* NOTREACHED */
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+ if (d_close && (ret = dbp->close(dbp, 0)) != 0) {
+ exitval = 1;
+ dbp->err(dbp, ret, "close");
+ }
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval);
+}
+
+/*
+ * env_stats --
+ * Display environment statistics.
+ */
+int
+env_stats(dbenvp)
+ DB_ENV *dbenvp;
+{
+ REGENV renv;
+ REGION *rp, regs[1024];
+ int n, ret;
+ const char *lable;
+
+ n = sizeof(regs) / sizeof(regs[0]);
+ if ((ret = __db_e_stat(dbenvp, &renv, regs, &n)) != 0) {
+ dbenvp->err(dbenvp, ret, "__db_e_stat");
+ return (1);
+ }
+
+ printf("%d.%d.%d\tEnvironment version.\n",
+ renv.majver, renv.minver, renv.patch);
+ printf("%lx\tMagic number.\n", (u_long)renv.magic);
+ printf("%d\tPanic value.\n", renv.panic);
+
+ /* Adjust the reference count for us... */
+ printf("%d\tReferences.\n", renv.refcnt - 1);
+
+ dl("Locks granted without waiting.\n",
+ (u_long)renv.mutex.mutex_set_nowait);
+ dl("Locks granted after waiting.\n",
+ (u_long)renv.mutex.mutex_set_wait);
+
+ while (n > 0) {
+ printf("%s\n", DB_LINE);
+ rp = &regs[--n];
+ switch (rp->type) {
+ case REGION_TYPE_ENV:
+ lable = "Environment";
+ break;
+ case REGION_TYPE_LOCK:
+ lable = "Lock";
+ break;
+ case REGION_TYPE_LOG:
+ lable = "Log";
+ break;
+ case REGION_TYPE_MPOOL:
+ lable = "Mpool";
+ break;
+ case REGION_TYPE_MUTEX:
+ lable = "Mutex";
+ break;
+ case REGION_TYPE_TXN:
+ lable = "Txn";
+ break;
+ case INVALID_REGION_TYPE:
+ default:
+ lable = "Invalid";
+ break;
+ }
+ printf("%s Region: %d.\n", lable, rp->id);
+ dl_bytes("Size", (u_long)0, (u_long)0, (u_long)rp->size);
+ printf("%ld\tSegment ID.\n", rp->segid);
+ dl("Locks granted without waiting.\n",
+ (u_long)rp->mutex.mutex_set_nowait);
+ dl("Locks granted after waiting.\n",
+ (u_long)rp->mutex.mutex_set_wait);
+ }
+
+ return (0);
+}
+
+/*
+ * btree_stats --
+ * Display btree/recno statistics.
+ */
+int
+btree_stats(dbenvp, dbp, msp)
+ DB_ENV *dbenvp;
+ DB *dbp;
+ DB_BTREE_STAT *msp;
+{
+ static const FN fn[] = {
+ { BTM_DUP, "duplicates" },
+ { BTM_FIXEDLEN, "fixed-length" },
+ { BTM_RECNO, "recno" },
+ { BTM_RECNUM, "record-numbers" },
+ { BTM_RENUMBER, "renumber" },
+ { BTM_SUBDB, "multiple-databases" },
+ { 0, NULL }
+ };
+ DB_BTREE_STAT *sp;
+ int ret;
+
+ COMPQUIET(dbenvp, NULL);
+
+ if (msp != NULL)
+ sp = msp;
+ else if ((ret = dbp->stat(dbp, &sp, NULL, 0)) != 0) {
+ dbp->err(dbp, ret, "dbp->stat");
+ return (1);
+ }
+
+ printf("%lx\tBtree magic number.\n", (u_long)sp->bt_magic);
+ printf("%lu\tBtree version number.\n", (u_long)sp->bt_version);
+ prflags(sp->bt_metaflags, fn);
+ if (dbp->type == DB_BTREE) {
+#ifdef NOT_IMPLEMENTED
+ dl("Maximum keys per-page.\n", (u_long)sp->bt_maxkey);
+#endif
+ dl("Minimum keys per-page.\n", (u_long)sp->bt_minkey);
+ }
+ if (dbp->type == DB_RECNO) {
+ dl("Fixed-length record size.\n", (u_long)sp->bt_re_len);
+ if (isprint(sp->bt_re_pad) && !isspace(sp->bt_re_pad))
+ printf("%c\tFixed-length record pad.\n",
+ (int)sp->bt_re_pad);
+ else
+ printf("0x%x\tFixed-length record pad.\n",
+ (int)sp->bt_re_pad);
+ }
+ dl("Underlying database page size.\n", (u_long)sp->bt_pagesize);
+ dl("Number of levels in the tree.\n", (u_long)sp->bt_levels);
+ dl(dbp->type == DB_BTREE ?
+ "Number of unique keys in the tree.\n" :
+ "Number of records in the tree.\n", (u_long)sp->bt_nkeys);
+ dl("Number of data items in the tree.\n", (u_long)sp->bt_ndata);
+
+ dl("Number of tree internal pages.\n", (u_long)sp->bt_int_pg);
+ dl("Number of bytes free in tree internal pages",
+ (u_long)sp->bt_int_pgfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->bt_int_pgfree, sp->bt_int_pg, sp->bt_pagesize));
+
+ dl("Number of tree leaf pages.\n", (u_long)sp->bt_leaf_pg);
+ dl("Number of bytes free in tree leaf pages",
+ (u_long)sp->bt_leaf_pgfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->bt_leaf_pgfree, sp->bt_leaf_pg, sp->bt_pagesize));
+
+ dl("Number of tree duplicate pages.\n", (u_long)sp->bt_dup_pg);
+ dl("Number of bytes free in tree duplicate pages",
+ (u_long)sp->bt_dup_pgfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->bt_dup_pgfree, sp->bt_dup_pg, sp->bt_pagesize));
+
+ dl("Number of tree overflow pages.\n", (u_long)sp->bt_over_pg);
+ dl("Number of bytes free in tree overflow pages",
+ (u_long)sp->bt_over_pgfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->bt_over_pgfree, sp->bt_over_pg, sp->bt_pagesize));
+
+ dl("Number of pages on the free list.\n", (u_long)sp->bt_free);
+
+ return (0);
+}
+
+/*
+ * hash_stats --
+ * Display hash statistics.
+ */
+int
+hash_stats(dbenvp, dbp)
+ DB_ENV *dbenvp;
+ DB *dbp;
+{
+ static const FN fn[] = {
+ { DB_HASH_DUP, "duplicates" },
+ { DB_HASH_SUBDB,"multiple-databases" },
+ { 0, NULL }
+ };
+ DB_HASH_STAT *sp;
+ int ret;
+
+ COMPQUIET(dbenvp, NULL);
+
+ if ((ret = dbp->stat(dbp, &sp, NULL, 0)) != 0) {
+ dbp->err(dbp, ret, "dbp->stat");
+ return (1);
+ }
+
+ printf("%lx\tHash magic number.\n", (u_long)sp->hash_magic);
+ printf("%lu\tHash version number.\n", (u_long)sp->hash_version);
+ prflags(sp->hash_metaflags, fn);
+ dl("Underlying database page size.\n", (u_long)sp->hash_pagesize);
+ dl("Number of keys in the database.\n", (u_long)sp->hash_nkeys);
+ dl("Number of data items in the database.\n", (u_long)sp->hash_ndata);
+
+ dl("Number of hash buckets.\n", (u_long)sp->hash_buckets);
+ dl("Number of bytes free on bucket pages", (u_long)sp->hash_bfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->hash_bfree, sp->hash_buckets, sp->hash_pagesize));
+
+ dl("Number of overflow pages.\n", (u_long)sp->hash_bigpages);
+ dl("Number of bytes free in overflow pages",
+ (u_long)sp->hash_big_bfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->hash_big_bfree, sp->hash_bigpages, sp->hash_pagesize));
+
+ dl("Number of bucket overflow pages.\n", (u_long)sp->hash_overflows);
+ dl("Number of bytes free in bucket overflow pages",
+ (u_long)sp->hash_ovfl_free);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->hash_ovfl_free, sp->hash_overflows, sp->hash_pagesize));
+
+ dl("Number of duplicate pages.\n", (u_long)sp->hash_dup);
+ dl("Number of bytes free in duplicate pages",
+ (u_long)sp->hash_dup_free);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->hash_dup_free, sp->hash_dup, sp->hash_pagesize));
+
+ dl("Number of pages on the free list.\n", (u_long)sp->hash_free);
+
+ return (0);
+}
+
+/*
+ * queue_stats --
+ * Display queue statistics.
+ */
+int
+queue_stats(dbenvp, dbp)
+ DB_ENV *dbenvp;
+ DB *dbp;
+{
+ DB_QUEUE_STAT *sp;
+ int ret;
+
+ COMPQUIET(dbenvp, NULL);
+
+ if ((ret = dbp->stat(dbp, &sp, NULL, 0)) != 0) {
+ dbp->err(dbp, ret, "dbp->stat");
+ return (1);
+ }
+
+ printf("%lx\tQueue magic number.\n", (u_long)sp->qs_magic);
+ printf("%lu\tQueue version number.\n", (u_long)sp->qs_version);
+ dl("Fixed-length record size.\n", (u_long)sp->qs_re_len);
+ if (isprint(sp->qs_re_pad) && !isspace(sp->qs_re_pad))
+ printf("%c\tFixed-length record pad.\n", (int)sp->qs_re_pad);
+ else
+ printf("0x%x\tFixed-length record pad.\n", (int)sp->qs_re_pad);
+ dl("Underlying database page size.\n", (u_long)sp->qs_pagesize);
+ dl("Number of records in the database.\n", (u_long)sp->qs_nkeys);
+ dl("Number of database pages.\n", (u_long)sp->qs_pages);
+ dl("Number of bytes free in database pages", (u_long)sp->qs_pgfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->qs_pgfree, sp->qs_pages, sp->qs_pagesize));
+ printf("%lu\tFirst undeleted record.\n", (u_long)sp->qs_first_recno);
+ printf(
+ "%lu\tLast allocated record number.\n", (u_long)sp->qs_cur_recno);
+
+ return (0);
+}
+
+/*
+ * lock_stats --
+ * Display lock statistics.
+ */
+int
+lock_stats(dbenvp)
+ DB_ENV *dbenvp;
+{
+ DB_LOCK_STAT *sp;
+ int ret;
+
+ if (internal != NULL) {
+ __lock_dump_region(dbenvp, internal, stdout);
+ return (0);
+ }
+
+ if ((ret = lock_stat(dbenvp, &sp, NULL)) != 0) {
+ dbenvp->err(dbenvp, ret, NULL);
+ return (1);
+ }
+
+ dl("Last allocated locker ID.\n", (u_long)sp->st_lastid);
+ dl("Number of lock modes.\n", (u_long)sp->st_nmodes);
+ dl("Maximum number of locks possible.\n", (u_long)sp->st_maxlocks);
+ dl("Maximum number of lockers possible.\n", (u_long)sp->st_maxlockers);
+ dl("Maximum number of objects possible.\n", (u_long)sp->st_maxobjects);
+ dl("Current locks.\n", (u_long)sp->st_nlocks);
+ dl("Maximum number of locks so far.\n", (u_long)sp->st_maxnlocks);
+ dl("Current number of lockers.\n", (u_long)sp->st_nlockers);
+ dl("Maximum number lockers so far.\n", (u_long)sp->st_maxnlockers);
+ dl("Current number lock objects.\n", (u_long)sp->st_nobjects);
+ dl("Maximum number of lock objects so far.\n",
+ (u_long)sp->st_maxnobjects);
+ dl("Number of lock requests.\n", (u_long)sp->st_nrequests);
+ dl("Number of lock releases.\n", (u_long)sp->st_nreleases);
+ dl("Number of lock requests that would have waited.\n",
+ (u_long)sp->st_nnowaits);
+ dl("Number of lock conflicts.\n", (u_long)sp->st_nconflicts);
+ dl("Number of deadlocks.\n", (u_long)sp->st_ndeadlocks);
+ dl_bytes("Lock region size",
+ (u_long)0, (u_long)0, (u_long)sp->st_regsize);
+ dl("The number of region locks granted without waiting.\n",
+ (u_long)sp->st_region_nowait);
+ dl("The number of region locks granted after waiting.\n",
+ (u_long)sp->st_region_wait);
+
+ return (0);
+}
+
+/*
+ * log_stats --
+ * Display log statistics.
+ */
+int
+log_stats(dbenvp)
+ DB_ENV *dbenvp;
+{
+ DB_LOG_STAT *sp;
+ int ret;
+
+ if ((ret = log_stat(dbenvp, &sp, NULL)) != 0) {
+ dbenvp->err(dbenvp, ret, NULL);
+ return (1);
+ }
+
+ printf("%lx\tLog magic number.\n", (u_long)sp->st_magic);
+ printf("%lu\tLog version number.\n", (u_long)sp->st_version);
+ dl_bytes("Log region size",
+ (u_long)0, (u_long)0, (u_long)sp->st_regsize);
+ dl_bytes("Log record cache size",
+ (u_long)0, (u_long)0, (u_long)sp->st_lg_bsize);
+ printf("%#o\tLog file mode.\n", sp->st_mode);
+ if (sp->st_lg_max % MEGABYTE == 0)
+ printf("%luMb\tLog file size.\n",
+ (u_long)sp->st_lg_max / MEGABYTE);
+ else if (sp->st_lg_max % 1024 == 0)
+ printf("%luKb\tLog file size.\n", (u_long)sp->st_lg_max / 1024);
+ else
+ printf("%lu\tLog file size.\n", (u_long)sp->st_lg_max);
+ dl_bytes("Log bytes written",
+ (u_long)0, (u_long)sp->st_w_mbytes, (u_long)sp->st_w_bytes);
+ dl_bytes("Log bytes written since last checkpoint",
+ (u_long)0, (u_long)sp->st_wc_mbytes, (u_long)sp->st_wc_bytes);
+ dl("Total log file writes.\n", (u_long)sp->st_wcount);
+ dl("Total log file write due to overflow.\n",
+ (u_long)sp->st_wcount_fill);
+ dl("Total log file flushes.\n", (u_long)sp->st_scount);
+ printf("%lu\tCurrent log file number.\n", (u_long)sp->st_cur_file);
+ printf("%lu\tCurrent log file offset.\n", (u_long)sp->st_cur_offset);
+ dl("The number of region locks granted without waiting.\n",
+ (u_long)sp->st_region_nowait);
+ dl("The number of region locks granted after waiting.\n",
+ (u_long)sp->st_region_wait);
+
+ return (0);
+}
+
+/*
+ * mpool_stats --
+ * Display mpool statistics.
+ */
+int
+mpool_stats(dbenvp)
+ DB_ENV *dbenvp;
+{
+ DB_MPOOL_FSTAT **fsp;
+ DB_MPOOL_STAT *gsp;
+ int ret;
+
+ if (internal != NULL) {
+ __memp_dump_region(dbenvp, internal, stdout);
+ return (1);
+ }
+
+ if ((ret = memp_stat(dbenvp, &gsp, &fsp, NULL)) != 0) {
+ dbenvp->err(dbenvp, ret, NULL);
+ return (1);
+ }
+
+ dl_bytes("Total cache size",
+ (u_long)gsp->st_gbytes, (u_long)0, (u_long)gsp->st_bytes);
+ dl("Number of caches.\n", (u_long)gsp->st_ncache);
+ dl("Pool individual cache size.\n", (u_long)gsp->st_regsize);
+ dl("Requested pages found in the cache", (u_long)gsp->st_cache_hit);
+ if (gsp->st_cache_hit + gsp->st_cache_miss != 0)
+ printf(" (%.0f%%)", ((double)gsp->st_cache_hit /
+ (gsp->st_cache_hit + gsp->st_cache_miss)) * 100);
+ printf(".\n");
+ dl("Requested pages mapped into the process' address space.\n",
+ (u_long)gsp->st_map);
+ dl("Requested pages not found in the cache.\n",
+ (u_long)gsp->st_cache_miss);
+ dl("Pages created in the cache.\n", (u_long)gsp->st_page_create);
+ dl("Pages read into the cache.\n", (u_long)gsp->st_page_in);
+ dl("Pages written from the cache to the backing file.\n",
+ (u_long)gsp->st_page_out);
+ dl("Clean pages forced from the cache.\n",
+ (u_long)gsp->st_ro_evict);
+ dl("Dirty pages forced from the cache.\n",
+ (u_long)gsp->st_rw_evict);
+ dl("Dirty buffers written by trickle-sync thread.\n",
+ (u_long)gsp->st_page_trickle);
+ dl("Current clean buffer count.\n",
+ (u_long)gsp->st_page_clean);
+ dl("Current dirty buffer count.\n",
+ (u_long)gsp->st_page_dirty);
+ dl("Number of hash buckets used for page location.\n",
+ (u_long)gsp->st_hash_buckets);
+ dl("Total number of times hash chains searched for a page.\n",
+ (u_long)gsp->st_hash_searches);
+ dl("The longest hash chain searched for a page.\n",
+ (u_long)gsp->st_hash_longest);
+ dl("Total number of hash buckets examined for page location.\n",
+ (u_long)gsp->st_hash_examined);
+ dl("The number of region locks granted without waiting.\n",
+ (u_long)gsp->st_region_nowait);
+ dl("The number of region locks granted after waiting.\n",
+ (u_long)gsp->st_region_wait);
+
+ for (; fsp != NULL && *fsp != NULL; ++fsp) {
+ printf("%s\n", DB_LINE);
+ printf("Pool File: %s\n", (*fsp)->file_name);
+ dl("Page size.\n", (u_long)(*fsp)->st_pagesize);
+ dl("Requested pages found in the cache",
+ (u_long)(*fsp)->st_cache_hit);
+ if ((*fsp)->st_cache_hit + (*fsp)->st_cache_miss != 0)
+ printf(" (%.0f%%)", ((double)(*fsp)->st_cache_hit /
+ ((*fsp)->st_cache_hit + (*fsp)->st_cache_miss)) *
+ 100);
+ printf(".\n");
+ dl("Requested pages mapped into the process' address space.\n",
+ (u_long)(*fsp)->st_map);
+ dl("Requested pages not found in the cache.\n",
+ (u_long)(*fsp)->st_cache_miss);
+ dl("Pages created in the cache.\n",
+ (u_long)(*fsp)->st_page_create);
+ dl("Pages read into the cache.\n",
+ (u_long)(*fsp)->st_page_in);
+ dl("Pages written from the cache to the backing file.\n",
+ (u_long)(*fsp)->st_page_out);
+ }
+
+ return (0);
+}
+
+/*
+ * txn_stats --
+ * Display transaction statistics.
+ */
+int
+txn_stats(dbenvp)
+ DB_ENV *dbenvp;
+{
+ DB_TXN_STAT *sp;
+ u_int32_t i;
+ int ret;
+ const char *p;
+
+ if ((ret = txn_stat(dbenvp, &sp, NULL)) != 0) {
+ dbenvp->err(dbenvp, ret, NULL);
+ return (1);
+ }
+
+ p = sp->st_last_ckp.file == 0 ?
+ "No checkpoint LSN." : "File/offset for last checkpoint LSN.";
+ printf("%lu/%lu\t%s\n",
+ (u_long)sp->st_last_ckp.file, (u_long)sp->st_last_ckp.offset, p);
+ p = sp->st_pending_ckp.file == 0 ?
+ "No pending checkpoint LSN." :
+ "File/offset for last pending checkpoint LSN.";
+ printf("%lu/%lu\t%s\n",
+ (u_long)sp->st_pending_ckp.file,
+ (u_long)sp->st_pending_ckp.offset, p);
+ if (sp->st_time_ckp == 0)
+ printf("0\tNo checkpoint timestamp.\n");
+ else
+ printf("%.24s\tCheckpoint timestamp.\n",
+ ctime(&sp->st_time_ckp));
+ printf("%lx\tLast transaction ID allocated.\n",
+ (u_long)sp->st_last_txnid);
+ dl("Maximum number of active transactions possible.\n",
+ (u_long)sp->st_maxtxns);
+ dl("Active transactions.\n", (u_long)sp->st_nactive);
+ dl("Maximum active transactions.\n", (u_long)sp->st_maxnactive);
+ dl("Number of transactions begun.\n", (u_long)sp->st_nbegins);
+ dl("Number of transactions aborted.\n", (u_long)sp->st_naborts);
+ dl("Number of transactions committed.\n", (u_long)sp->st_ncommits);
+ dl_bytes("Transaction region size",
+ (u_long)0, (u_long)0, (u_long)sp->st_regsize);
+ dl("The number of region locks granted without waiting.\n",
+ (u_long)sp->st_region_nowait);
+ dl("The number of region locks granted after waiting.\n",
+ (u_long)sp->st_region_wait);
+ qsort(sp->st_txnarray,
+ sp->st_nactive, sizeof(sp->st_txnarray[0]), txn_compare);
+ for (i = 0; i < sp->st_nactive; ++i)
+ printf("\tid: %lx; initial LSN file/offest %lu/%lu\n",
+ (u_long)sp->st_txnarray[i].txnid,
+ (u_long)sp->st_txnarray[i].lsn.file,
+ (u_long)sp->st_txnarray[i].lsn.offset);
+
+ return (0);
+}
+
+int
+txn_compare(a1, b1)
+ const void *a1, *b1;
+{
+ const DB_TXN_ACTIVE *a, *b;
+
+ a = a1;
+ b = b1;
+
+ if (a->txnid > b->txnid)
+ return (1);
+ if (a->txnid < b->txnid)
+ return (-1);
+ return (0);
+}
+
+/*
+ * dl --
+ * Display a big value.
+ */
+void
+dl(msg, value)
+ const char *msg;
+ u_long value;
+{
+ /*
+ * Two formats: if less than 10 million, display as the number, if
+ * greater than 10 million display as ###M.
+ */
+ if (value < 10000000)
+ printf("%lu\t%s", value, msg);
+ else
+ printf("%luM\t%s", value / 1000000, msg);
+}
+
+/*
+ * dl_bytes --
+ * Display a big number of bytes.
+ */
+void
+dl_bytes(msg, gbytes, mbytes, bytes)
+ const char *msg;
+ u_long gbytes, mbytes, bytes;
+{
+ const char *sep;
+ u_long sbytes;
+ int showbytes;
+
+ sbytes = bytes;
+ while (bytes > MEGABYTE) {
+ ++mbytes;
+ bytes -= MEGABYTE;
+ }
+ while (mbytes > GIGABYTE / MEGABYTE) {
+ ++gbytes;
+ --mbytes;
+ }
+
+ sep = "";
+ showbytes = 0;
+ if (gbytes > 0) {
+ printf("%luGB", gbytes);
+ sep = " ";
+ showbytes = 1;
+ }
+ if (mbytes > 0) {
+ printf("%s%luMB", sep, mbytes);
+ sep = " ";
+ showbytes = 1;
+ }
+ if (bytes > 1024) {
+ printf("%s%luKB", sep, bytes / 1024);
+ bytes %= 1024;
+ sep = " ";
+ showbytes = 1;
+ }
+ if (bytes > 0)
+ printf("%s%luB", sep, bytes);
+ printf("\t%s", msg);
+ if (showbytes)
+ printf(" (%lu bytes)", sbytes);
+ printf(".\n");
+}
+
+/*
+ * prflags --
+ * Print out flag values.
+ */
+void
+prflags(flags, fnp)
+ u_int32_t flags;
+ const FN *fnp;
+{
+ const char *sep;
+
+ sep = "\t";
+ printf("Flags:");
+ for (; fnp->mask != 0; ++fnp)
+ if (fnp->mask & flags) {
+ printf("%s%s", sep, fnp->name);
+ sep = ", ";
+ }
+ printf("\n");
+}
+
+/*
+ * db_init --
+ * Initialize the environment.
+ */
+int
+db_init(home, ttype)
+ char *home;
+ test_t ttype;
+{
+ int ret;
+
+ /*
+ * If our environment open fails, and we're trying to look at a
+ * shared region, it's a hard failure.
+ */
+ if ((ret = dbenv->open(dbenv,
+ home, DB_JOINENV | DB_USE_ENVIRON, 0)) == 0)
+ return (0);
+ if (ttype != T_DB) {
+ dbenv->err(dbenv, ret, "DBENV->open%s%s",
+ home == NULL ? "" : ": ", home == NULL ? "" : home);
+ return (1);
+ }
+
+ /*
+ * We're trying to look at a database.
+ *
+ * An environment is required because we may be trying to look at
+ * databases in directories other than the current one. We could
+ * avoid using an environment iff the -h option wasn't specified,
+ * but that seems like more work than it's worth.
+ *
+ *
+ * No environment exists. Create one, but make it private so that
+ * no files are actually created.
+ *
+ * Note that we will probably just drop core if the environment
+ * we joined above does not include a memory pool. This is probably
+ * acceptable; trying to use an existing shared environment that
+ * does not contain a memory pool to look at a database can
+ * be safely construed as operator error, I think.
+ */
+ if ((ret = dbenv->open(dbenv, home,
+ DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, 0)) == 0)
+ return (0);
+
+ /* An environment is required. */
+ dbenv->err(dbenv, ret, "open");
+ return (1);
+}
+
+/*
+ * argcheck --
+ * Return if argument flags are okay.
+ */
+int
+argcheck(arg, ok_args)
+ char *arg;
+ const char *ok_args;
+{
+ for (; *arg != '\0'; ++arg)
+ if (strchr(ok_args, *arg) == NULL)
+ return (0);
+ return (1);
+}
+
+void
+usage()
+{
+ fprintf(stderr, "usage: db_stat %s\n",
+ "[-celmNtV] [-C Acflmo] [-d file [-s file]] [-h home] [-M Ahlm]");
+ exit (1);
+}
+
+void
+version_check()
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ exit (1);
+ }
+}
diff --git a/bdb/db_upgrade/db_upgrade.c b/bdb/db_upgrade/db_upgrade.c
new file mode 100644
index 00000000000..dc29b6c7e0c
--- /dev/null
+++ b/bdb/db_upgrade/db_upgrade.c
@@ -0,0 +1,173 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2000\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id: db_upgrade.c,v 1.13 2001/01/18 18:36:59 bostic Exp $";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+int main __P((int, char *[]));
+void usage __P((void));
+void version_check __P((void));
+
+const char
+ *progname = "db_upgrade"; /* Program name. */
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ DB *dbp;
+ DB_ENV *dbenv;
+ u_int32_t flags;
+ int ch, e_close, exitval, nflag, ret, t_ret;
+ char *home;
+
+ version_check();
+
+ dbenv = NULL;
+ flags = nflag = 0;
+ e_close = exitval = 0;
+ home = NULL;
+ while ((ch = getopt(argc, argv, "h:NsV")) != EOF)
+ switch (ch) {
+ case 'h':
+ home = optarg;
+ break;
+ case 'N':
+ nflag = 1;
+ if ((ret = db_env_set_panicstate(0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_set_panicstate: %s\n",
+ progname, db_strerror(ret));
+ exit (1);
+ }
+ break;
+ case 's':
+ LF_SET(DB_DUPSORT);
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ exit(0);
+ case '?':
+ default:
+ usage();
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc <= 0)
+ usage();
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr, "%s: db_env_create: %s\n",
+ progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+
+ if (nflag && (ret = dbenv->set_mutexlocks(dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "set_mutexlocks");
+ goto shutdown;
+ }
+
+ /*
+ * If attaching to a pre-existing environment fails, create a
+ * private one and try again.
+ */
+ if ((ret = dbenv->open(dbenv,
+ home, DB_JOINENV | DB_USE_ENVIRON, 0)) != 0 &&
+ (ret = dbenv->open(dbenv, home,
+ DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+
+ for (; !__db_util_interrupted() && argv[0] != NULL; ++argv) {
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ dbp->set_errfile(dbp, stderr);
+ dbp->set_errpfx(dbp, progname);
+ if ((ret = dbp->upgrade(dbp, argv[0], flags)) != 0)
+ dbp->err(dbp, ret, "DB->upgrade: %s", argv[0]);
+ if ((t_ret = dbp->close(dbp, 0)) != 0 && ret == 0) {
+ dbp->err(dbp, ret, "DB->close: %s", argv[0]);
+ ret = t_ret;
+ }
+ if (ret != 0)
+ goto shutdown;
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval);
+}
+
+void
+usage()
+{
+ fprintf(stderr, "usage: db_upgrade [-NsV] [-h home] db_file ...\n");
+ exit (1);
+}
+
+void
+version_check()
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ exit (1);
+ }
+}
diff --git a/bdb/db_verify/db_verify.c b/bdb/db_verify/db_verify.c
new file mode 100644
index 00000000000..3bbf14caac6
--- /dev/null
+++ b/bdb/db_verify/db_verify.c
@@ -0,0 +1,182 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2000\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id: db_verify.c,v 1.15 2001/01/18 18:36:59 bostic Exp $";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+int main __P((int, char *[]));
+void usage __P((void));
+void version_check __P((void));
+
+const char
+ *progname = "db_verify"; /* Program name. */
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ DB *dbp;
+ DB_ENV *dbenv;
+ int ch, e_close, exitval, nflag, quiet, ret, t_ret;
+ char *home;
+
+ version_check();
+
+ dbenv = NULL;
+ e_close = exitval = nflag = quiet = 0;
+ home = NULL;
+ while ((ch = getopt(argc, argv, "h:NqV")) != EOF)
+ switch (ch) {
+ case 'h':
+ home = optarg;
+ break;
+ case 'N':
+ nflag = 1;
+ if ((ret = db_env_set_panicstate(0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_set_panicstate: %s\n",
+ progname, db_strerror(ret));
+ exit (1);
+ }
+ break;
+ break;
+ case 'q':
+ quiet = 1;
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ exit(0);
+ case '?':
+ default:
+ usage();
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc <= 0)
+ usage();
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr, "%s: db_env_create: %s\n",
+ progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ /*
+ * XXX
+ * We'd prefer to have error output configured while calling
+ * db_env_create, but there's no way to turn it off once it's
+ * turned on.
+ */
+ if (!quiet) {
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+ }
+
+ if (nflag && (ret = dbenv->set_mutexlocks(dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "set_mutexlocks");
+ goto shutdown;
+ }
+
+ /*
+ * Attach to an mpool if it exists, but if that fails, attach
+ * to a private region.
+ */
+ if ((ret = dbenv->open(dbenv,
+ home, DB_INIT_MPOOL | DB_USE_ENVIRON, 0)) != 0 &&
+ (ret = dbenv->open(dbenv, home,
+ DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+
+ for (; !__db_util_interrupted() && argv[0] != NULL; ++argv) {
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ if (!quiet) {
+ dbp->set_errfile(dbp, stderr);
+ dbp->set_errpfx(dbp, progname);
+ }
+ if ((ret = dbp->verify(dbp, argv[0], NULL, NULL, 0)) != 0)
+ dbp->err(dbp, ret, "DB->verify: %s", argv[0]);
+ if ((t_ret = dbp->close(dbp, 0)) != 0 && ret == 0) {
+ dbp->err(dbp, ret, "DB->close: %s", argv[0]);
+ ret = t_ret;
+ }
+ if (ret != 0)
+ goto shutdown;
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval);
+}
+
+void
+usage()
+{
+ fprintf(stderr, "usage: db_verify [-NqV] [-h home] db_file ...\n");
+ exit (1);
+}
+
+void
+version_check()
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ exit (1);
+ }
+}
diff --git a/bdb/dbm/dbm.c b/bdb/dbm/dbm.c
new file mode 100644
index 00000000000..e5f423572c5
--- /dev/null
+++ b/bdb/dbm/dbm.c
@@ -0,0 +1,489 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993
+ * Margo Seltzer. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: dbm.c,v 11.7 2000/11/30 00:58:35 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <fcntl.h>
+#include <string.h>
+#endif
+
+#define DB_DBM_HSEARCH 1
+#include "db_int.h"
+
+/*
+ *
+ * This package provides dbm and ndbm compatible interfaces to DB.
+ *
+ * The DBM routines, which call the NDBM routines.
+ */
+static DBM *__cur_db;
+
+static void __db_no_open __P((void));
+
+int
+__db_dbm_init(file)
+ char *file;
+{
+ if (__cur_db != NULL)
+ (void)dbm_close(__cur_db);
+ if ((__cur_db =
+ dbm_open(file, O_CREAT | O_RDWR, __db_omode("rw----"))) != NULL)
+ return (0);
+ if ((__cur_db = dbm_open(file, O_RDONLY, 0)) != NULL)
+ return (0);
+ return (-1);
+}
+
+int
+__db_dbm_close()
+{
+ if (__cur_db != NULL) {
+ dbm_close(__cur_db);
+ __cur_db = NULL;
+ }
+ return (0);
+}
+
+datum
+__db_dbm_fetch(key)
+ datum key;
+{
+ datum item;
+
+ if (__cur_db == NULL) {
+ __db_no_open();
+ item.dptr = 0;
+ return (item);
+ }
+ return (dbm_fetch(__cur_db, key));
+}
+
+datum
+__db_dbm_firstkey()
+{
+ datum item;
+
+ if (__cur_db == NULL) {
+ __db_no_open();
+ item.dptr = 0;
+ return (item);
+ }
+ return (dbm_firstkey(__cur_db));
+}
+
+datum
+__db_dbm_nextkey(key)
+ datum key;
+{
+ datum item;
+
+ COMPQUIET(key.dsize, 0);
+
+ if (__cur_db == NULL) {
+ __db_no_open();
+ item.dptr = 0;
+ return (item);
+ }
+ return (dbm_nextkey(__cur_db));
+}
+
+int
+__db_dbm_delete(key)
+ datum key;
+{
+ if (__cur_db == NULL) {
+ __db_no_open();
+ return (-1);
+ }
+ return (dbm_delete(__cur_db, key));
+}
+
+int
+__db_dbm_store(key, dat)
+ datum key, dat;
+{
+ if (__cur_db == NULL) {
+ __db_no_open();
+ return (-1);
+ }
+ return (dbm_store(__cur_db, key, dat, DBM_REPLACE));
+}
+
+static void
+__db_no_open()
+{
+ (void)fprintf(stderr, "dbm: no open database.\n");
+}
+
+/*
+ * This package provides dbm and ndbm compatible interfaces to DB.
+ *
+ * The NDBM routines, which call the DB routines.
+ */
+/*
+ * Returns:
+ * *DBM on success
+ * NULL on failure
+ */
+DBM *
+__db_ndbm_open(file, oflags, mode)
+ const char *file;
+ int oflags, mode;
+{
+ DB *dbp;
+ DBC *dbc;
+ int ret;
+ char path[MAXPATHLEN];
+
+ /*
+ * !!!
+ * Don't use sprintf(3)/snprintf(3) -- the former is dangerous, and
+ * the latter isn't standard, and we're manipulating strings handed
+ * us by the application.
+ */
+ if (strlen(file) + strlen(DBM_SUFFIX) + 1 > sizeof(path)) {
+ __os_set_errno(ENAMETOOLONG);
+ return (NULL);
+ }
+ (void)strcpy(path, file);
+ (void)strcat(path, DBM_SUFFIX);
+ if ((ret = db_create(&dbp, NULL, 0)) != 0) {
+ __os_set_errno(ret);
+ return (NULL);
+ }
+
+ /*
+ * !!!
+ * The historic ndbm library corrected for opening O_WRONLY.
+ */
+ if (oflags & O_WRONLY) {
+ oflags &= ~O_WRONLY;
+ oflags |= O_RDWR;
+ }
+
+ if ((ret = dbp->set_pagesize(dbp, 4096)) != 0 ||
+ (ret = dbp->set_h_ffactor(dbp, 40)) != 0 ||
+ (ret = dbp->set_h_nelem(dbp, 1)) != 0 ||
+ (ret = dbp->open(dbp,
+ path, NULL, DB_HASH, __db_oflags(oflags), mode)) != 0) {
+ __os_set_errno(ret);
+ return (NULL);
+ }
+
+ if ((ret = dbp->cursor(dbp, NULL, &dbc, 0)) != 0) {
+ (void)dbp->close(dbp, 0);
+ __os_set_errno(ret);
+ return (NULL);
+ }
+
+ return ((DBM *)dbc);
+}
+
+/*
+ * Returns:
+ * Nothing.
+ */
+void
+__db_ndbm_close(dbm)
+ DBM *dbm;
+{
+ DBC *dbc;
+
+ dbc = (DBC *)dbm;
+
+ (void)dbc->dbp->close(dbc->dbp, 0);
+}
+
+/*
+ * Returns:
+ * DATUM on success
+ * NULL on failure
+ */
+datum
+__db_ndbm_fetch(dbm, key)
+ DBM *dbm;
+ datum key;
+{
+ DBC *dbc;
+ DBT _key, _data;
+ datum data;
+ int ret;
+
+ dbc = (DBC *)dbm;
+
+ memset(&_key, 0, sizeof(DBT));
+ memset(&_data, 0, sizeof(DBT));
+ _key.size = key.dsize;
+ _key.data = key.dptr;
+
+ /*
+ * Note that we can't simply use the dbc we have to do a c_get/SET,
+ * because that cursor is the one used for sequential iteration and
+ * it has to remain stable in the face of intervening gets and puts.
+ */
+ if ((ret = dbc->dbp->get(dbc->dbp, NULL, &_key, &_data, 0)) == 0) {
+ data.dptr = _data.data;
+ data.dsize = _data.size;
+ } else {
+ data.dptr = NULL;
+ data.dsize = 0;
+ if (ret == DB_NOTFOUND)
+ __os_set_errno(ENOENT);
+ else {
+ __os_set_errno(ret);
+ F_SET(dbc->dbp, DB_DBM_ERROR);
+ }
+ }
+ return (data);
+}
+
+/*
+ * Returns:
+ * DATUM on success
+ * NULL on failure
+ */
+datum
+__db_ndbm_firstkey(dbm)
+ DBM *dbm;
+{
+ DBC *dbc;
+ DBT _key, _data;
+ datum key;
+ int ret;
+
+ dbc = (DBC *)dbm;
+
+ memset(&_key, 0, sizeof(DBT));
+ memset(&_data, 0, sizeof(DBT));
+
+ if ((ret = dbc->c_get(dbc, &_key, &_data, DB_FIRST)) == 0) {
+ key.dptr = _key.data;
+ key.dsize = _key.size;
+ } else {
+ key.dptr = NULL;
+ key.dsize = 0;
+ if (ret == DB_NOTFOUND)
+ __os_set_errno(ENOENT);
+ else {
+ __os_set_errno(ret);
+ F_SET(dbc->dbp, DB_DBM_ERROR);
+ }
+ }
+ return (key);
+}
+
+/*
+ * Returns:
+ * DATUM on success
+ * NULL on failure
+ */
+datum
+__db_ndbm_nextkey(dbm)
+ DBM *dbm;
+{
+ DBC *dbc;
+ DBT _key, _data;
+ datum key;
+ int ret;
+
+ dbc = (DBC *)dbm;
+
+ memset(&_key, 0, sizeof(DBT));
+ memset(&_data, 0, sizeof(DBT));
+
+ if ((ret = dbc->c_get(dbc, &_key, &_data, DB_NEXT)) == 0) {
+ key.dptr = _key.data;
+ key.dsize = _key.size;
+ } else {
+ key.dptr = NULL;
+ key.dsize = 0;
+ if (ret == DB_NOTFOUND)
+ __os_set_errno(ENOENT);
+ else {
+ __os_set_errno(ret);
+ F_SET(dbc->dbp, DB_DBM_ERROR);
+ }
+ }
+ return (key);
+}
+
+/*
+ * Returns:
+ * 0 on success
+ * <0 failure
+ */
+int
+__db_ndbm_delete(dbm, key)
+ DBM *dbm;
+ datum key;
+{
+ DBC *dbc;
+ DBT _key;
+ int ret;
+
+ dbc = (DBC *)dbm;
+
+ memset(&_key, 0, sizeof(DBT));
+ _key.data = key.dptr;
+ _key.size = key.dsize;
+
+ if ((ret = dbc->dbp->del(dbc->dbp, NULL, &_key, 0)) == 0)
+ return (0);
+
+ if (ret == DB_NOTFOUND)
+ __os_set_errno(ENOENT);
+ else {
+ __os_set_errno(ret);
+ F_SET(dbc->dbp, DB_DBM_ERROR);
+ }
+ return (-1);
+}
+
+/*
+ * Returns:
+ * 0 on success
+ * <0 failure
+ * 1 if DBM_INSERT and entry exists
+ */
+int
+__db_ndbm_store(dbm, key, data, flags)
+ DBM *dbm;
+ datum key, data;
+ int flags;
+{
+ DBC *dbc;
+ DBT _key, _data;
+ int ret;
+
+ dbc = (DBC *)dbm;
+
+ memset(&_key, 0, sizeof(DBT));
+ _key.data = key.dptr;
+ _key.size = key.dsize;
+
+ memset(&_data, 0, sizeof(DBT));
+ _data.data = data.dptr;
+ _data.size = data.dsize;
+
+ if ((ret = dbc->dbp->put(dbc->dbp, NULL,
+ &_key, &_data, flags == DBM_INSERT ? DB_NOOVERWRITE : 0)) == 0)
+ return (0);
+
+ if (ret == DB_KEYEXIST)
+ return (1);
+
+ __os_set_errno(ret);
+ F_SET(dbc->dbp, DB_DBM_ERROR);
+ return (-1);
+}
+
+int
+__db_ndbm_error(dbm)
+ DBM *dbm;
+{
+ DBC *dbc;
+
+ dbc = (DBC *)dbm;
+
+ return (F_ISSET(dbc->dbp, DB_DBM_ERROR));
+}
+
+int
+__db_ndbm_clearerr(dbm)
+ DBM *dbm;
+{
+ DBC *dbc;
+
+ dbc = (DBC *)dbm;
+
+ F_CLR(dbc->dbp, DB_DBM_ERROR);
+ return (0);
+}
+
+/*
+ * Returns:
+ * 1 if read-only
+ * 0 if not read-only
+ */
+int
+__db_ndbm_rdonly(dbm)
+ DBM *dbm;
+{
+ DBC *dbc;
+
+ dbc = (DBC *)dbm;
+
+ return (F_ISSET(dbc->dbp, DB_AM_RDONLY) ? 1 : 0);
+}
+
+/*
+ * XXX
+ * We only have a single file descriptor that we can return, not two. Return
+ * the same one for both files. Hopefully, the user is using it for locking
+ * and picked one to use at random.
+ */
+int
+__db_ndbm_dirfno(dbm)
+ DBM *dbm;
+{
+ return (dbm_pagfno(dbm));
+}
+
+int
+__db_ndbm_pagfno(dbm)
+ DBM *dbm;
+{
+ DBC *dbc;
+ int fd;
+
+ dbc = (DBC *)dbm;
+
+ (void)dbc->dbp->fd(dbc->dbp, &fd);
+ return (fd);
+}
diff --git a/bdb/dist/Makefile.in b/bdb/dist/Makefile.in
new file mode 100644
index 00000000000..73f82cd5648
--- /dev/null
+++ b/bdb/dist/Makefile.in
@@ -0,0 +1,999 @@
+# $Id: Makefile.in,v 11.66 2000/11/27 16:27:51 bostic Exp $
+
+srcdir= @srcdir@/..
+builddir=.
+
+##################################################
+# C, C++
+##################################################
+CPPFLAGS= -I$(builddir) -I$(srcdir)/include @CPPFLAGS@
+CFLAGS= -c $(CPPFLAGS) @CFLAGS@
+CXXFLAGS= -c $(CPPFLAGS) @CXXFLAGS@
+
+CC= @MAKEFILE_CC@
+CCLINK= @MAKEFILE_CCLINK@
+CXX= @MAKEFILE_CXX@
+
+INSTALLER= @INSTALLER@
+
+LDFLAGS= @LDFLAGS@
+LIBDB_ARGS= @LIBDB_ARGS@
+LIBJSO_LIBS= @LIBJSO_LIBS@
+LIBS= @LIBS@
+LIBSO_LIBS= @LIBSO_LIBS@
+LIBTOOL= @LIBTOOL@
+LIBTSO_LIBS= @LIBTSO_LIBS@
+LIBXSO_LIBS= @LIBXSO_LIBS@
+
+POSTLINK= @POSTLINK@
+SOLINK= @SOLINK@
+SOFLAGS= @SOFLAGS@
+SOMAJOR= @DB_VERSION_MAJOR@
+SOVERSION= @DB_VERSION_MAJOR@.@DB_VERSION_MINOR@
+
+libdb= libdb.a
+
+libso_base= libdb
+libso_linkname= $(libso_base)-$(SOVERSION).la
+libso= $(libso_base)-$(SOVERSION).@SOSUFFIX@
+libso_target= $(libso_base)-$(SOVERSION).la
+libso_default= $(libso_base).@SOSUFFIX@
+libso_major= $(libso_base)-$(SOMAJOR).@SOSUFFIX@
+
+##################################################
+# C++
+#
+# C++ support is optional, and it can be used with or without shared libraries.
+# You must configure it using:
+# --enable-cxx
+##################################################
+libcxx= libdb_cxx.a
+libxso_base= libdb_cxx
+libxso= $(libxso_base)-$(SOVERSION).@SOSUFFIX@
+libxso_target= $(libxso_base)-$(SOVERSION).la
+libxso_default= $(libxso_base).@SOSUFFIX@
+libxso_major= $(libxso_base)-$(SOMAJOR).@SOSUFFIX@
+
+##################################################
+# JAVA
+#
+# Java support is optional and requires shared librarires.
+# You must configure it using:
+# --enable-java --enable-dynamic
+##################################################
+CLASSPATH= $(JAVA_CLASSTOP)
+JAR= @JAR@
+JAVAC= env CLASSPATH=$(CLASSPATH) @JAVAC@
+JAVACFLAGS= @JAVACFLAGS@
+JAVA_BUILTFILE= .javabuilt
+JAVA_CLASSTOP= $(srcdir)/java/classes
+JAVA_SRCDIR= $(srcdir)/java/src
+JAVA_DBREL= com/sleepycat/db
+JAVA_DBDIR= $(JAVA_SRCDIR)/$(JAVA_DBREL)
+JAVA_EXDIR= $(JAVA_SRCDIR)/com/sleepycat/examples
+
+libj_jarfile= db.jar
+libjso_base= libdb_java
+libjso= $(libjso_base)-$(SOVERSION).@SOSUFFIX@
+libjso_target= $(libjso_base)-$(SOVERSION).la
+libjso_default= $(libjso_base).@SOSUFFIX@
+libjso_major= $(libjso_base)-$(SOMAJOR).@SOSUFFIX@
+libjso_g= $(libjso_base)-$(SOVERSION)_g.@SOSUFFIX@
+
+##################################################
+# TCL
+#
+# Tcl support is optional and requires shared libraries.
+# You must configure it using:
+# --enable-tcl --with-tcl=DIR --enable-dynamic
+##################################################
+TCFLAGS= @TCFLAGS@
+libtso_base= libdb_tcl
+libtso= $(libtso_base)-$(SOVERSION).@SOSUFFIX@
+libtso_target= $(libtso_base)-$(SOVERSION).la
+libtso_default= $(libtso_base).@SOSUFFIX@
+libtso_major= $(libtso_base)-$(SOMAJOR).@SOSUFFIX@
+
+##################################################
+# DB_DUMP185 UTILITY
+#
+# The db_dump185 application should be compiled using the system's db.h file
+# (which should be a DB 1.85/1.86 include file), and the system's 1.85/1.86
+# object library. To include the right db.h, don't include -I$(builddir) on
+# the compile line. You may also need to add a local include directory and
+# local libraries, for example. Do that by adding -I options to the DB185INC
+# line, and -l options to the DB185LIB line.
+##################################################
+DB185INC= -c @CFLAGS@ -I$(srcdir)/include @CPPFLAGS@
+DB185LIB=
+
+##################################################
+# INSTALLATION DIRECTORIES AND PERMISSIONS
+##################################################
+prefix= @prefix@
+exec_prefix=@exec_prefix@
+bindir= @bindir@
+includedir=@includedir@
+libdir= @libdir@
+docdir= $(prefix)/docs
+
+dmode= 755
+emode= 555
+fmode= 444
+
+transform=@program_transform_name@
+
+##################################################
+# PATHS FOR STANDARD USER-LEVEL COMMANDS
+##################################################
+SHELL= @db_cv_path_sh@
+ar= @db_cv_path_ar@ cr
+chmod= @db_cv_path_chmod@
+cp= @db_cv_path_cp@
+ln= @db_cv_path_ln@
+mkdir= @db_cv_path_mkdir@
+ranlib= @db_cv_path_ranlib@
+rm= @db_cv_path_rm@
+strip= @db_cv_path_strip@
+
+##################################################
+# NOTHING BELOW THIS LINE SHOULD EVER NEED TO BE MODIFIED.
+##################################################
+OBJS= @ADDITIONAL_OBJS@ @LIBOBJS@ @RPC_OBJS@ bt_compare@o@ bt_conv@o@ \
+ bt_curadj@o@ bt_cursor@o@ bt_delete@o@ bt_method@o@ bt_open@o@ \
+ bt_put@o@ bt_rec@o@ bt_reclaim@o@ bt_recno@o@ bt_rsearch@o@ \
+ bt_search@o@ bt_split@o@ bt_stat@o@ bt_upgrade@o@ bt_verify@o@ \
+ btree_auto@o@ crdel_auto@o@ crdel_rec@o@ db@o@ db_am@o@ db_auto@o@ \
+ db_byteorder@o@ db_cam@o@ db_conv@o@ db_dispatch@o@ db_dup@o@ \
+ db_err@o@ db_getlong@o@ db_iface@o@ db_join@o@ db_log2@o@ \
+ db_meta@o@ db_method@o@ db_overflow@o@ db_pr@o@ db_rec@o@ \
+ db_reclaim@o@ db_ret@o@ db_salloc@o@ db_shash@o@ db_upg@o@ \
+ db_upg_opd@o@ db_vrfy@o@ db_vrfyutil@o@ dbm@o@ env_method@o@ \
+ env_open@o@ env_recover@o@ env_region@o@ hash@o@ hash_auto@o@ \
+ hash_conv@o@ hash_dup@o@ hash_func@o@ hash_meta@o@ hash_method@o@ \
+ hash_page@o@ hash_rec@o@ hash_reclaim@o@ hash_stat@o@ hash_upgrade@o@ \
+ hash_verify@o@ hsearch@o@ lock@o@ lock_conflict@o@ \
+ lock_deadlock@o@ lock_method@o@ lock_region@o@ lock_stat@o@ \
+ lock_util@o@ log@o@ log_archive@o@ log_auto@o@ log_compare@o@ \
+ log_findckp@o@ log_get@o@ log_method@o@ log_put@o@ log_rec@o@ \
+ log_register@o@ mp_alloc@o@ mp_bh@o@ mp_fget@o@ mp_fopen@o@ \
+ mp_fput@o@ mp_fset@o@ mp_method@o@ mp_region@o@ mp_register@o@ \
+ mp_stat@o@ mp_sync@o@ mp_trickle@o@ mutex@o@ os_abs@o@ \
+ os_alloc@o@ os_dir@o@ os_errno@o@ os_fid@o@ os_finit@o@ \
+ os_fsync@o@ os_handle@o@ os_map@o@ os_method@o@ os_oflags@o@ \
+ os_open@o@ os_region@o@ os_rename@o@ os_root@o@ os_rpath@o@ \
+ os_rw@o@ os_seek@o@ os_sleep@o@ os_spin@o@ os_stat@o@ \
+ os_tmpdir@o@ os_unlink@o@ qam@o@ qam_auto@o@ qam_conv@o@ qam_files@o@ \
+ qam_method@o@ qam_open@o@ qam_rec@o@ qam_stat@o@ qam_upgrade@o@ \
+ qam_verify@o@ txn@o@ txn_auto@o@ txn_rec@o@ txn_region@o@ xa@o@ \
+ xa_db@o@ xa_map@o@
+
+COBJS= cxx_app@o@ cxx_except@o@ cxx_lock@o@ cxx_log@o@ cxx_mpool@o@ \
+ cxx_table@o@ cxx_txn@o@
+
+DBSOBJS=dbs@o@ dbs_am@o@ dbs_checkpoint@o@ dbs_debug@o@ dbs_handles@o@ \
+ dbs_log@o@ dbs_qam@o@ dbs_spawn@o@ dbs_trickle@o@ dbs_util@o@ \
+ dbs_yield@o@
+
+EOBJS= ex_access@o@ ex_btrec@o@ ex_dbclient@o@ ex_env@o@ ex_lock@o@ \
+ ex_mpool@o@ ex_thread@o@ ex_tpcb@o@
+
+JOBJS= java_Db@o@ java_DbEnv@o@ java_DbLock@o@ java_DbLsn@o@ java_DbTxn@o@ \
+ java_Dbc@o@ java_Dbt@o@ java_info@o@ java_locked@o@ java_util@o@
+
+RPC_OBJS=client@o@ db_server_clnt@o@ db_server_xdr@o@ gen_client@o@ \
+ gen_client_ret@o@
+RPC_SRV=db_server_proc@o@ db_server_svc@o@ db_server_util@o@ gen_db_server@o@
+
+TOBJS= tcl_compat@o@ tcl_db@o@ tcl_db_pkg@o@ tcl_dbcursor@o@ tcl_env@o@ \
+ tcl_internal@o@ tcl_lock@o@ tcl_log@o@ tcl_mp@o@ tcl_txn@o@
+
+UOBJS= db_archive@o@ db_checkpoint@o@ db_deadlock@o@ db_dump185@o@ \
+ db_dump@o@ db_load@o@ db_printlog@o@ db_recover@o@ db_stat@o@ \
+ db_upgrade@o@ db_verify@o@ util_log@o@ util_sig@o@
+
+PROGS= @ADDITIONAL_PROGS@ db_archive db_checkpoint db_deadlock \
+ db_dump db_load db_printlog db_recover db_stat db_upgrade db_verify
+
+JAVA_DBSRCS=\
+ $(JAVA_DBDIR)/Db.java $(JAVA_DBDIR)/DbAppendRecno.java \
+ $(JAVA_DBDIR)/DbBtreeCompare.java \
+ $(JAVA_DBDIR)/DbBtreePrefix.java $(JAVA_DBDIR)/DbBtreeStat.java \
+ $(JAVA_DBDIR)/DbConstants.java $(JAVA_DBDIR)/DbDeadlockException.java \
+ $(JAVA_DBDIR)/DbDupCompare.java $(JAVA_DBDIR)/DbEnv.java \
+ $(JAVA_DBDIR)/DbEnvFeedback.java $(JAVA_DBDIR)/DbErrcall.java \
+ $(JAVA_DBDIR)/DbException.java $(JAVA_DBDIR)/DbFeedback.java \
+ $(JAVA_DBDIR)/DbHash.java $(JAVA_DBDIR)/DbHashStat.java \
+ $(JAVA_DBDIR)/DbKeyRange.java $(JAVA_DBDIR)/DbLock.java \
+ $(JAVA_DBDIR)/DbLockStat.java $(JAVA_DBDIR)/DbLogStat.java \
+ $(JAVA_DBDIR)/DbLsn.java $(JAVA_DBDIR)/DbMemoryException.java \
+ $(JAVA_DBDIR)/DbMpoolFStat.java $(JAVA_DBDIR)/DbMpoolStat.java \
+ $(JAVA_DBDIR)/DbOutputStreamErrcall.java \
+ $(JAVA_DBDIR)/DbQueueStat.java $(JAVA_DBDIR)/DbRecoveryInit.java \
+ $(JAVA_DBDIR)/DbRunRecoveryException.java $(JAVA_DBDIR)/DbTxn.java \
+ $(JAVA_DBDIR)/DbTxnRecover.java $(JAVA_DBDIR)/DbTxnStat.java \
+ $(JAVA_DBDIR)/Dbc.java $(JAVA_DBDIR)/Dbt.java
+
+JAVA_EXSRCS=\
+ $(JAVA_EXDIR)/AccessExample.java \
+ $(JAVA_EXDIR)/BtRecExample.java \
+ $(JAVA_EXDIR)/EnvExample.java \
+ $(JAVA_EXDIR)/LockExample.java \
+ $(JAVA_EXDIR)/TpcbExample.java
+
+##################################################
+# Note: Berkeley DB Makefiles are configured to build either a static or
+# a dynamic library. You should not attempt to build both library types
+# in the same directory, as they have incompatible object file formats.
+# To build both static and dynamic libraries, create two separate build
+# directories, and configure and build them separately.
+##################################################
+all: @DEFAULT_LIB@ @ADDITIONAL_LIBS@ @ADDITIONAL_LANG@ $(PROGS)
+
+$(libdb): $(OBJS)
+ $(ar) $@ $(OBJS)
+ test ! -f $(ranlib) || $(ranlib) $@
+
+$(libcxx): $(COBJS) $(OBJS)
+ $(ar) $@ $(COBJS) $(OBJS)
+ test ! -f $(ranlib) || $(ranlib) $@
+
+$(libso_target): $(OBJS)
+ $(SOLINK) $(SOFLAGS) -o $(libso_target) \
+ $(OBJS) $(LDFLAGS) $(LIBSO_LIBS)
+
+$(libxso_target): $(COBJS) $(OBJS)
+ $(SOLINK) $(SOFLAGS) -o $(libxso_target) \
+ $(COBJS) $(OBJS) $(LDFLAGS) $(LIBXSO_LIBS)
+
+$(libjso_target): $(JOBJS) $(OBJS)
+ $(SOLINK) $(SOFLAGS) -o $(libjso_target) \
+ $(JOBJS) $(OBJS) $(LDFLAGS) $(LIBJSO_LIBS)
+
+$(libtso_target): $(TOBJS) $(OBJS)
+ $(SOLINK) $(SOFLAGS) -o $(libtso_target) \
+ $(TOBJS) $(OBJS) $(LDFLAGS) $(LIBTSO_LIBS)
+
+##################################################
+# Creating individual dependencies and actions for building class
+# files is possible, but it is very messy and error prone.
+##################################################
+java: $(JAVA_CLASSTOP) $(JAVA_BUILTFILE)
+
+$(JAVA_BUILTFILE): $(JAVA_DBSRCS) $(JAVA_EXSRCS)
+ @test -f $(rm) || (echo 'rm not found.'; exit 1)
+ @test -f $(cp) || (echo 'cp not found.'; exit 1)
+ $(JAVAC) -d $(JAVA_CLASSTOP) $(JAVACFLAGS) $(JAVA_DBSRCS)
+ @cd $(JAVA_CLASSTOP) && $(JAR) cf $(libj_jarfile) $(JAVA_DBREL)
+ $(JAVAC) -d $(JAVA_CLASSTOP) $(JAVACFLAGS) $(JAVA_EXSRCS)
+ @echo This file helps with building java using make > $(JAVA_BUILTFILE)
+
+$(JAVA_CLASSTOP):
+ @test -f $(mkdir) || (echo 'mkdir not found.'; exit 1)
+ $(mkdir) $(JAVA_CLASSTOP)
+
+##################################################
+# Utilities
+##################################################
+berkeley_db_svc: $(RPC_SRV) util_log@o@ @DEFAULT_LIB@
+ $(CCLINK) -o $@ $(LDFLAGS) $(RPC_SRV) util_log@o@ $(LIBDB_ARGS) $(LIBS)
+ $(POSTLINK) $@
+
+db_archive: db_archive@o@ util_sig@o@ @DEFAULT_LIB@
+ $(CCLINK) -o $@ $(LDFLAGS) \
+ db_archive@o@ util_sig@o@ $(LIBDB_ARGS) $(LIBS)
+ $(POSTLINK) $@
+
+db_checkpoint: db_checkpoint@o@ util_log@o@ util_sig@o@ @DEFAULT_LIB@
+ $(CCLINK) -o $@ $(LDFLAGS) \
+ db_checkpoint@o@ util_log@o@ util_sig@o@ $(LIBDB_ARGS) $(LIBS)
+ $(POSTLINK) $@
+
+db_deadlock: db_deadlock@o@ util_log@o@ util_sig@o@ @DEFAULT_LIB@
+ $(CCLINK) -o $@ $(LDFLAGS) \
+ db_deadlock@o@ util_log@o@ util_sig@o@ $(LIBDB_ARGS) $(LIBS)
+ $(POSTLINK) $@
+
+db_dump: db_dump@o@ util_sig@o@ @DEFAULT_LIB@
+ $(CCLINK) -o $@ $(LDFLAGS) db_dump@o@ util_sig@o@ $(LIBDB_ARGS) $(LIBS)
+ $(POSTLINK) $@
+
+db_dump185: db_dump185@o@ @LIBOBJS@
+ $(CCLINK) -o $@ $(LDFLAGS) db_dump185@o@ @LIBOBJS@ $(DB185LIB)
+ $(POSTLINK) $@
+
+db_load: db_load@o@ util_sig@o@ @DEFAULT_LIB@
+ $(CCLINK) -o $@ $(LDFLAGS) db_load@o@ util_sig@o@ $(LIBDB_ARGS) $(LIBS)
+ $(POSTLINK) $@
+
+db_printlog: db_printlog@o@ util_sig@o@ @DEFAULT_LIB@
+ $(CCLINK) -o $@ $(LDFLAGS) \
+ db_printlog@o@ util_sig@o@ $(LIBDB_ARGS) $(LIBS)
+ $(POSTLINK) $@
+
+db_recover: db_recover@o@ util_sig@o@ @DEFAULT_LIB@
+ $(CCLINK) -o $@ $(LDFLAGS) \
+ db_recover@o@ util_sig@o@ $(LIBDB_ARGS) $(LIBS)
+ $(POSTLINK) $@
+
+db_stat: db_stat@o@ util_sig@o@ @DEFAULT_LIB@
+ $(CCLINK) -o $@ $(LDFLAGS) db_stat@o@ util_sig@o@ $(LIBDB_ARGS) $(LIBS)
+ $(POSTLINK) $@
+
+db_upgrade: db_upgrade@o@ util_sig@o@ @DEFAULT_LIB@
+ $(CCLINK) -o $@ $(LDFLAGS) \
+ db_upgrade@o@ util_sig@o@ $(LIBDB_ARGS) $(LIBS)
+ $(POSTLINK) $@
+
+db_verify: db_verify@o@ util_sig@o@ @DEFAULT_LIB@
+ $(CCLINK) -o $@ $(LDFLAGS) \
+ db_verify@o@ util_sig@o@ $(LIBDB_ARGS) $(LIBS)
+ $(POSTLINK) $@
+
+##################################################
+# Example programs
+##################################################
+ex_access: ex_access@o@ @DEFAULT_LIB@
+ $(CCLINK) -o $@ $(LDFLAGS) ex_access@o@ $(LIBDB_ARGS) $(LIBS)
+ $(POSTLINK) $@
+
+ex_btrec: ex_btrec@o@ @DEFAULT_LIB@
+ $(CCLINK) -o $@ $(LDFLAGS) ex_btrec@o@ $(LIBDB_ARGS) $(LIBS)
+ $(POSTLINK) $@
+
+ex_dbclient: ex_dbclient@o@ @DEFAULT_LIB@
+ $(CCLINK) -o $@ $(LDFLAGS) ex_dbclient@o@ $(LIBDB_ARGS) $(LIBS)
+ $(POSTLINK) $@
+
+ex_env: ex_env@o@ @DEFAULT_LIB@
+ $(CCLINK) -o $@ $(LDFLAGS) ex_env@o@ $(LIBDB_ARGS) $(LIBS)
+ $(POSTLINK) $@
+
+ex_lock: ex_lock@o@ @DEFAULT_LIB@
+ $(CCLINK) -o $@ $(LDFLAGS) ex_lock@o@ $(LIBDB_ARGS) $(LIBS)
+ $(POSTLINK) $@
+
+ex_mpool: ex_mpool@o@ @DEFAULT_LIB@
+ $(CCLINK) -o $@ $(LDFLAGS) ex_mpool@o@ $(LIBDB_ARGS) $(LIBS)
+ $(POSTLINK) $@
+
+ex_thread: ex_thread@o@ @DEFAULT_LIB@
+ $(CCLINK) -o $@ $(LDFLAGS) ex_thread@o@ $(LIBDB_ARGS) $(LIBS)
+ $(POSTLINK) $@
+
+ex_tpcb: ex_tpcb@o@ @DEFAULT_LIB@
+ $(CCLINK) -o $@ $(LDFLAGS) ex_tpcb@o@ $(LIBDB_ARGS) $(LIBS)
+ $(POSTLINK) $@
+
+##################################################
+# Multi-threaded tester.
+##################################################
+dbs: $(DBSOBJS) @DEFAULT_LIB@
+ $(CCLINK) -o $@ $(LDFLAGS) $(DBSOBJS) $(LIBDB_ARGS) @DBS_LIBS@ $(LIBS)
+ $(POSTLINK) $@
+
+##################################################
+# Standard Makefile targets.
+##################################################
+RMLIST= berkeley_db_svc db_dump185 dbs ex_access ex_btrec ex_dbclient \
+ ex_env ex_lock ex_mpool ex_thread ex_tpcb
+clean:
+ $(rm) -f $(OBJS)
+ $(rm) -f $(COBJS) $(DBSOBJS) $(EOBJS) $(CEOBJS) $(JOBJS)
+ $(rm) -f $(TOBJS) $(UOBJS) $(RPC_OBJS) $(RPC_SRV)
+ $(rm) -f $(PROGS) $(RMLIST)
+ $(rm) -f *@o@ *.o *.lo core *.core
+ $(rm) -rf ALL.OUT TESTDIR
+ $(rm) -rf .libs $(libdb) $(libcxx)
+ $(rm) -rf $(libso_target) $(libso) $(libso_default) $(libso_major)
+ $(rm) -rf $(libxso_target) $(libxso) $(libxso_default) $(libxso_major)
+ $(rm) -rf $(libtso_target) $(libtso) $(libtso_default) $(libtso_major)
+ $(rm) -rf $(libjso_target) $(libjso) $(libjso_default) $(libjso_major)
+
+depend obj:
+
+realclean distclean: clean
+ $(rm) -f Makefile config.cache config.log config.status db_config.h
+ $(rm) -f confdefs.h db.h db_int.h db_185.h include.tcl libtool
+
+install: all install_setup \
+ install_include install_lib install_utilities install_docs
+
+uninstall: uninstall_utilities uninstall_include uninstall_lib uninstall_docs
+
+install_setup:
+ @test -f $(chmod) || (echo 'chmod not found.'; exit 1)
+ @test -f $(cp) || (echo 'cp not found.'; exit 1)
+ @test -f $(mkdir) || (echo 'mkdir not found.'; exit 1)
+ @test -f $(rm) || (echo 'rm not found.'; exit 1)
+
+install_include:
+ @echo "Installing DB include files: $(includedir) ..."
+ @test -d $(includedir) || \
+ ($(mkdir) -p $(includedir) && $(chmod) $(dmode) $(includedir))
+ @cd $(includedir) && $(rm) -f db.h db_185.h db_cxx.h
+ @$(cp) -p db.h \
+ $(srcdir)/include/db_cxx.h @ADDITIONAL_INCS@ $(includedir)
+ @cd $(includedir) && $(chmod) $(fmode) db.h db_cxx.h @ADDITIONAL_INCS@
+
+uninstall_include:
+ -cd $(includedir) && $(rm) -f db.h db_185.h db_cxx.h
+
+install_lib: @DEFAULT_INSTALL@
+
+uninstall_lib:
+ -cd $(libdir) && $(rm) -f $(libdb) $(libcxx) \
+ $(libso_target) $(libso) $(libso_default) $(libso_major) \
+ $(libxso_target) $(libxso) $(libxso_default) $(libxso_major) \
+ $(libtso_target) $(libtso) $(libtso_default) $(libtso_major) \
+ $(libjso_target) $(libjso) $(libjso_default) $(libjso_major) \
+ $(libj_jarfile)
+
+install_static:
+ @echo "Installing DB library: $(libdir) ..."
+ @test -d $(libdir) || \
+ ($(mkdir) -p $(libdir) && $(chmod) $(dmode) $(libdir))
+ @cd $(libdir) && $(rm) -f $(libdb)
+ @$(cp) -p $(libdb) $(libdir)
+ @cd $(libdir) && $(chmod) $(fmode) $(libdb)
+
+install_static_cxx:
+ @echo "Installing DB C++ static library: $(libdir) ..."
+ @test -d $(libdir) || \
+ ($(mkdir) -p $(libdir) && $(chmod) $(dmode) $(libdir))
+ @cd $(libdir) && $(rm) -f $(libcxx)
+ @$(cp) -p $(libcxx) $(libdir)
+ @cd $(libdir) && $(chmod) $(fmode) $(libcxx)
+
+install_dynamic:
+ @echo "Installing DB library: $(libdir) ..."
+ @test -d $(libdir) || \
+ ($(mkdir) -p $(libdir) && $(chmod) $(dmode) $(libdir))
+ @cd $(libdir) && $(rm) -f \
+ $(libso_target) $(libso) $(libso_default) $(libso_major)
+ @$(INSTALLER) $(libso_target) $(libdir)
+ @cd $(libdir) && $(ln) -s $(libso) $(libso_default)
+ @cd $(libdir) && $(ln) -s $(libso) $(libso_major)
+ @$(LIBTOOL) --mode=finish $(libdir)
+
+install_dynamic_cxx:
+ @echo "Installing DB C++ library: $(libdir) ..."
+ @test -d $(libdir) || \
+ ($(mkdir) -p $(libdir) && $(chmod) $(dmode) $(libdir))
+ @cd $(libdir) && $(rm) -f \
+ $(libxso_target) $(libxso) $(libxso_default) $(libxso_major)
+ @$(INSTALLER) $(libxso_target) $(libdir)
+ @cd $(libdir) && $(ln) -s $(libxso) $(libxso_default)
+ @cd $(libdir) && $(ln) -s $(libxso) $(libxso_major)
+
+install_tcl:
+ @echo "Installing DB Tcl library: $(libdir) ..."
+ @test -d $(libdir) || \
+ ($(mkdir) -p $(libdir) && $(chmod) $(dmode) $(libdir))
+ @cd $(libdir) && $(rm) -f \
+ $(libtso_target) $(libtso) $(libtso_default) $(libtso_major)
+ @$(INSTALLER) $(libtso_target) $(libdir)
+ @cd $(libdir) && $(ln) -s $(libtso) $(libtso_default)
+ @cd $(libdir) && $(ln) -s $(libtso) $(libtso_major)
+
+install_java:
+ @echo "Installing DB Java library: $(libdir) ..."
+ @test -d $(libdir) || \
+ ($(mkdir) -p $(libdir) && $(chmod) $(dmode) $(libdir))
+ @cd $(libdir) && $(rm) -f \
+ $(libjso_target) $(libjso) $(libjso_default) $(libjso_major)
+ @$(INSTALLER) $(libjso_target) $(libdir)
+ @cd $(libdir) && $(ln) -s $(libjso) $(libjso_default)
+ @cd $(libdir) && $(ln) -s $(libjso) $(libjso_major)
+ @cd $(libdir) && $(ln) -s $(libjso) $(libjso_g)
+ @$(cp) $(JAVA_CLASSTOP)/$(libj_jarfile) $(libdir)
+
+install_utilities:
+ @echo "Installing DB utilities: $(bindir) ..."
+ @test -d $(bindir) || \
+ ($(mkdir) -p $(bindir) && $(chmod) $(dmode) $(bindir))
+ @cd $(bindir) && $(rm) -f $(PROGS)
+ @$(INSTALLER) -fp $(PROGS) $(bindir)
+ @cd $(bindir) && (test ! -f $(strip) || $(strip) $(PROGS))
+ @cd $(bindir) && $(chmod) $(emode) $(PROGS)
+
+uninstall_utilities:
+ -cd $(bindir) && $(rm) -f $(PROGS)
+
+install_docs:
+ @echo "Installing documentation: $(docdir) ..."
+ @test -d $(docdir) || \
+ ($(mkdir) -p $(docdir) && $(chmod) $(dmode) $(docdir))
+ @cd $(docdir) && $(rm) -rf *
+ @cd $(srcdir)/docs && $(cp) -pr * $(docdir)/
+
+uninstall_docs:
+ -cd $(docdir) && $(rm) -rf *
+
+##################################################
+# Object build rules.
+##################################################
+# Utilities
+db_archive@o@: $(srcdir)/db_archive/db_archive.c
+ $(CC) $(CFLAGS) $?
+db_checkpoint@o@: $(srcdir)/db_checkpoint/db_checkpoint.c
+ $(CC) $(CFLAGS) $?
+db_deadlock@o@: $(srcdir)/db_deadlock/db_deadlock.c
+ $(CC) $(CFLAGS) $?
+db_dump@o@: $(srcdir)/db_dump/db_dump.c
+ $(CC) $(CFLAGS) $?
+db_dump185@o@: $(srcdir)/db_dump185/db_dump185.c
+ $(CC) $(DB185INC) $?
+db_load@o@: $(srcdir)/db_load/db_load.c
+ $(CC) $(CFLAGS) $?
+db_printlog@o@: $(srcdir)/db_printlog/db_printlog.c
+ $(CC) $(CFLAGS) $?
+db_recover@o@: $(srcdir)/db_recover/db_recover.c
+ $(CC) $(CFLAGS) $?
+db_stat@o@: $(srcdir)/db_stat/db_stat.c
+ $(CC) $(CFLAGS) $?
+db_upgrade@o@: $(srcdir)/db_upgrade/db_upgrade.c
+ $(CC) $(CFLAGS) $?
+db_verify@o@: $(srcdir)/db_verify/db_verify.c
+ $(CC) $(CFLAGS) $?
+
+# Examples
+ex_access@o@: $(srcdir)/examples_c/ex_access.c
+ $(CC) $(CFLAGS) $?
+ex_btrec@o@: $(srcdir)/examples_c/ex_btrec.c
+ $(CC) $(CFLAGS) $?
+ex_dbclient@o@: $(srcdir)/examples_c/ex_dbclient.c
+ $(CC) $(CFLAGS) $?
+ex_env@o@: $(srcdir)/examples_c/ex_env.c
+ $(CC) $(CFLAGS) $?
+ex_lock@o@: $(srcdir)/examples_c/ex_lock.c
+ $(CC) $(CFLAGS) $?
+ex_mpool@o@: $(srcdir)/examples_c/ex_mpool.c
+ $(CC) $(CFLAGS) $?
+ex_thread@o@: $(srcdir)/examples_c/ex_thread.c
+ $(CC) $(CFLAGS) $?
+ex_tpcb@o@: $(srcdir)/examples_c/ex_tpcb.c
+ $(CC) $(CFLAGS) $?
+
+# DB files
+crdel_auto@o@: $(srcdir)/db/crdel_auto.c
+ $(CC) $(CFLAGS) $?
+crdel_rec@o@: $(srcdir)/db/crdel_rec.c
+ $(CC) $(CFLAGS) $?
+db@o@: $(srcdir)/db/db.c
+ $(CC) $(CFLAGS) $?
+db_am@o@: $(srcdir)/db/db_am.c
+ $(CC) $(CFLAGS) $?
+db_auto@o@: $(srcdir)/db/db_auto.c
+ $(CC) $(CFLAGS) $?
+db_cam@o@: $(srcdir)/db/db_cam.c
+ $(CC) $(CFLAGS) $?
+db_conv@o@: $(srcdir)/db/db_conv.c
+ $(CC) $(CFLAGS) $?
+db_dispatch@o@: $(srcdir)/db/db_dispatch.c
+ $(CC) $(CFLAGS) $?
+db_dup@o@: $(srcdir)/db/db_dup.c
+ $(CC) $(CFLAGS) $?
+db_iface@o@: $(srcdir)/db/db_iface.c
+ $(CC) $(CFLAGS) $?
+db_join@o@: $(srcdir)/db/db_join.c
+ $(CC) $(CFLAGS) $?
+db_meta@o@: $(srcdir)/db/db_meta.c
+ $(CC) $(CFLAGS) $?
+db_method@o@: $(srcdir)/db/db_method.c
+ $(CC) $(CFLAGS) $?
+db_overflow@o@: $(srcdir)/db/db_overflow.c
+ $(CC) $(CFLAGS) $?
+db_pr@o@: $(srcdir)/db/db_pr.c
+ $(CC) $(CFLAGS) $?
+db_rec@o@: $(srcdir)/db/db_rec.c
+ $(CC) $(CFLAGS) $?
+db_reclaim@o@: $(srcdir)/db/db_reclaim.c
+ $(CC) $(CFLAGS) $?
+db_ret@o@: $(srcdir)/db/db_ret.c
+ $(CC) $(CFLAGS) $?
+db_upg@o@: $(srcdir)/db/db_upg.c
+ $(CC) $(CFLAGS) $?
+db_upg_opd@o@: $(srcdir)/db/db_upg_opd.c
+ $(CC) $(CFLAGS) $?
+db_vrfy@o@: $(srcdir)/db/db_vrfy.c
+ $(CC) $(CFLAGS) $?
+db_vrfyutil@o@: $(srcdir)/db/db_vrfyutil.c
+ $(CC) $(CFLAGS) $?
+
+# Environment files
+db_salloc@o@: $(srcdir)/env/db_salloc.c
+ $(CC) $(CFLAGS) $?
+db_shash@o@: $(srcdir)/env/db_shash.c
+ $(CC) $(CFLAGS) $?
+env_method@o@: $(srcdir)/env/env_method.c
+ $(CC) $(CFLAGS) $?
+env_open@o@: $(srcdir)/env/env_open.c
+ $(CC) $(CFLAGS) $?
+env_recover@o@: $(srcdir)/env/env_recover.c
+ $(CC) $(CFLAGS) $?
+env_region@o@: $(srcdir)/env/env_region.c
+ $(CC) $(CFLAGS) $?
+
+# Common files
+db_byteorder@o@: $(srcdir)/common/db_byteorder.c
+ $(CC) $(CFLAGS) $?
+db_err@o@: $(srcdir)/common/db_err.c
+ $(CC) $(CFLAGS) $?
+db_getlong@o@: $(srcdir)/common/db_getlong.c
+ $(CC) $(CFLAGS) $?
+db_log2@o@: $(srcdir)/common/db_log2.c
+ $(CC) $(CFLAGS) $?
+util_log@o@: $(srcdir)/common/util_log.c
+ $(CC) $(CFLAGS) $?
+util_sig@o@: $(srcdir)/common/util_sig.c
+ $(CC) $(CFLAGS) $?
+
+# Btree files
+bt_compare@o@: $(srcdir)/btree/bt_compare.c
+ $(CC) $(CFLAGS) $?
+bt_conv@o@: $(srcdir)/btree/bt_conv.c
+ $(CC) $(CFLAGS) $?
+bt_curadj@o@: $(srcdir)/btree/bt_curadj.c
+ $(CC) $(CFLAGS) $?
+bt_cursor@o@: $(srcdir)/btree/bt_cursor.c
+ $(CC) $(CFLAGS) $?
+bt_delete@o@: $(srcdir)/btree/bt_delete.c
+ $(CC) $(CFLAGS) $?
+bt_method@o@: $(srcdir)/btree/bt_method.c
+ $(CC) $(CFLAGS) $?
+bt_open@o@: $(srcdir)/btree/bt_open.c
+ $(CC) $(CFLAGS) $?
+bt_put@o@: $(srcdir)/btree/bt_put.c
+ $(CC) $(CFLAGS) $?
+bt_rec@o@: $(srcdir)/btree/bt_rec.c
+ $(CC) $(CFLAGS) $?
+bt_reclaim@o@: $(srcdir)/btree/bt_reclaim.c
+ $(CC) $(CFLAGS) $?
+bt_recno@o@: $(srcdir)/btree/bt_recno.c
+ $(CC) $(CFLAGS) $?
+bt_rsearch@o@: $(srcdir)/btree/bt_rsearch.c
+ $(CC) $(CFLAGS) $?
+bt_search@o@: $(srcdir)/btree/bt_search.c
+ $(CC) $(CFLAGS) $?
+bt_split@o@: $(srcdir)/btree/bt_split.c
+ $(CC) $(CFLAGS) $?
+bt_stack@o@: $(srcdir)/btree/bt_stack.c
+ $(CC) $(CFLAGS) $?
+bt_stat@o@: $(srcdir)/btree/bt_stat.c
+ $(CC) $(CFLAGS) $?
+bt_upgrade@o@: $(srcdir)/btree/bt_upgrade.c
+ $(CC) $(CFLAGS) $?
+bt_verify@o@: $(srcdir)/btree/bt_verify.c
+ $(CC) $(CFLAGS) $?
+btree_auto@o@: $(srcdir)/btree/btree_auto.c
+ $(CC) $(CFLAGS) $?
+
+# Queue files
+qam@o@: $(srcdir)/qam/qam.c
+ $(CC) $(CFLAGS) $?
+qam_auto@o@: $(srcdir)/qam/qam_auto.c
+ $(CC) $(CFLAGS) $?
+qam_conv@o@: $(srcdir)/qam/qam_conv.c
+ $(CC) $(CFLAGS) $?
+qam_files@o@: $(srcdir)/qam/qam_files.c
+ $(CC) $(CFLAGS) $?
+qam_method@o@: $(srcdir)/qam/qam_method.c
+ $(CC) $(CFLAGS) $?
+qam_open@o@: $(srcdir)/qam/qam_open.c
+ $(CC) $(CFLAGS) $?
+qam_rec@o@: $(srcdir)/qam/qam_rec.c
+ $(CC) $(CFLAGS) $?
+qam_stat@o@: $(srcdir)/qam/qam_stat.c
+ $(CC) $(CFLAGS) $?
+qam_upgrade@o@: $(srcdir)/qam/qam_upgrade.c
+ $(CC) $(CFLAGS) $?
+qam_verify@o@: $(srcdir)/qam/qam_verify.c
+ $(CC) $(CFLAGS) $?
+
+# C++ files
+cxx_app@o@: $(srcdir)/cxx/cxx_app.cpp
+ $(CXX) $(CXXFLAGS) $?
+cxx_except@o@: $(srcdir)/cxx/cxx_except.cpp
+ $(CXX) $(CXXFLAGS) $?
+cxx_lock@o@: $(srcdir)/cxx/cxx_lock.cpp
+ $(CXX) $(CXXFLAGS) $?
+cxx_log@o@: $(srcdir)/cxx/cxx_log.cpp
+ $(CXX) $(CXXFLAGS) $?
+cxx_mpool@o@: $(srcdir)/cxx/cxx_mpool.cpp
+ $(CXX) $(CXXFLAGS) $?
+cxx_table@o@: $(srcdir)/cxx/cxx_table.cpp
+ $(CXX) $(CXXFLAGS) $?
+cxx_txn@o@: $(srcdir)/cxx/cxx_txn.cpp
+ $(CXX) $(CXXFLAGS) $?
+
+# Java files
+java_Db@o@::$(srcdir)/libdb_java/java_Db.c
+ $(CC) $(CFLAGS) $?
+java_DbEnv@o@: $(srcdir)/libdb_java/java_DbEnv.c
+ $(CC) $(CFLAGS) $?
+java_DbLock@o@: $(srcdir)/libdb_java/java_DbLock.c
+ $(CC) $(CFLAGS) $?
+java_DbLsn@o@: $(srcdir)/libdb_java/java_DbLsn.c
+ $(CC) $(CFLAGS) $?
+java_DbTxn@o@: $(srcdir)/libdb_java/java_DbTxn.c
+ $(CC) $(CFLAGS) $?
+java_Dbc@o@: $(srcdir)/libdb_java/java_Dbc.c
+ $(CC) $(CFLAGS) $?
+java_Dbt@o@: $(srcdir)/libdb_java/java_Dbt.c
+ $(CC) $(CFLAGS) $?
+java_info@o@: $(srcdir)/libdb_java/java_info.c
+ $(CC) $(CFLAGS) $?
+java_locked@o@: $(srcdir)/libdb_java/java_locked.c
+ $(CC) $(CFLAGS) $?
+java_util@o@: $(srcdir)/libdb_java/java_util.c
+ $(CC) $(CFLAGS) $?
+
+# Tcl files
+tcl_compat@o@: $(srcdir)/tcl/tcl_compat.c
+ $(CC) $(CFLAGS) $(TCFLAGS) $?
+tcl_db@o@: $(srcdir)/tcl/tcl_db.c
+ $(CC) $(CFLAGS) $(TCFLAGS) $?
+tcl_db_pkg@o@: $(srcdir)/tcl/tcl_db_pkg.c
+ $(CC) $(CFLAGS) $(TCFLAGS) $?
+tcl_dbcursor@o@: $(srcdir)/tcl/tcl_dbcursor.c
+ $(CC) $(CFLAGS) $(TCFLAGS) $?
+tcl_env@o@: $(srcdir)/tcl/tcl_env.c
+ $(CC) $(CFLAGS) $(TCFLAGS) $?
+tcl_internal@o@: $(srcdir)/tcl/tcl_internal.c
+ $(CC) $(CFLAGS) $(TCFLAGS) $?
+tcl_lock@o@: $(srcdir)/tcl/tcl_lock.c
+ $(CC) $(CFLAGS) $(TCFLAGS) $?
+tcl_log@o@: $(srcdir)/tcl/tcl_log.c
+ $(CC) $(CFLAGS) $(TCFLAGS) $?
+tcl_mp@o@: $(srcdir)/tcl/tcl_mp.c
+ $(CC) $(CFLAGS) $(TCFLAGS) $?
+tcl_txn@o@: $(srcdir)/tcl/tcl_txn.c
+ $(CC) $(CFLAGS) $(TCFLAGS) $?
+
+# Hash files
+hash_auto@o@: $(srcdir)/hash/hash_auto.c
+ $(CC) $(CFLAGS) $?
+hash@o@: $(srcdir)/hash/hash.c
+ $(CC) $(CFLAGS) $?
+hash_conv@o@: $(srcdir)/hash/hash_conv.c
+ $(CC) $(CFLAGS) $?
+hash_dup@o@: $(srcdir)/hash/hash_dup.c
+ $(CC) $(CFLAGS) $?
+hash_func@o@: $(srcdir)/hash/hash_func.c
+ $(CC) $(CFLAGS) $?
+hash_meta@o@: $(srcdir)/hash/hash_meta.c
+ $(CC) $(CFLAGS) $?
+hash_method@o@: $(srcdir)/hash/hash_method.c
+ $(CC) $(CFLAGS) $?
+hash_page@o@: $(srcdir)/hash/hash_page.c
+ $(CC) $(CFLAGS) $?
+hash_rec@o@: $(srcdir)/hash/hash_rec.c
+ $(CC) $(CFLAGS) $?
+hash_reclaim@o@: $(srcdir)/hash/hash_reclaim.c
+ $(CC) $(CFLAGS) $?
+hash_stat@o@: $(srcdir)/hash/hash_stat.c
+ $(CC) $(CFLAGS) $?
+hash_upgrade@o@: $(srcdir)/hash/hash_upgrade.c
+ $(CC) $(CFLAGS) $?
+hash_verify@o@: $(srcdir)/hash/hash_verify.c
+ $(CC) $(CFLAGS) $?
+
+# Lock files
+lock@o@: $(srcdir)/lock/lock.c
+ $(CC) $(CFLAGS) $?
+lock_conflict@o@:$(srcdir)/lock/lock_conflict.c
+ $(CC) $(CFLAGS) $?
+lock_deadlock@o@:$(srcdir)/lock/lock_deadlock.c
+ $(CC) $(CFLAGS) $?
+lock_method@o@:$(srcdir)/lock/lock_method.c
+ $(CC) $(CFLAGS) $?
+lock_region@o@:$(srcdir)/lock/lock_region.c
+ $(CC) $(CFLAGS) $?
+lock_stat@o@:$(srcdir)/lock/lock_stat.c
+ $(CC) $(CFLAGS) $?
+lock_util@o@:$(srcdir)/lock/lock_util.c
+ $(CC) $(CFLAGS) $?
+
+# Log files
+log@o@: $(srcdir)/log/log.c
+ $(CC) $(CFLAGS) $?
+log_archive@o@: $(srcdir)/log/log_archive.c
+ $(CC) $(CFLAGS) $?
+log_auto@o@: $(srcdir)/log/log_auto.c
+ $(CC) $(CFLAGS) $?
+log_compare@o@: $(srcdir)/log/log_compare.c
+ $(CC) $(CFLAGS) $?
+log_findckp@o@: $(srcdir)/log/log_findckp.c
+ $(CC) $(CFLAGS) $?
+log_get@o@: $(srcdir)/log/log_get.c
+ $(CC) $(CFLAGS) $?
+log_method@o@: $(srcdir)/log/log_method.c
+ $(CC) $(CFLAGS) $?
+log_put@o@: $(srcdir)/log/log_put.c
+ $(CC) $(CFLAGS) $?
+log_rec@o@: $(srcdir)/log/log_rec.c
+ $(CC) $(CFLAGS) $?
+log_register@o@: $(srcdir)/log/log_register.c
+ $(CC) $(CFLAGS) $?
+
+# Mpool files
+mp_alloc@o@: $(srcdir)/mp/mp_alloc.c
+ $(CC) $(CFLAGS) $?
+mp_bh@o@: $(srcdir)/mp/mp_bh.c
+ $(CC) $(CFLAGS) $?
+mp_fget@o@: $(srcdir)/mp/mp_fget.c
+ $(CC) $(CFLAGS) $?
+mp_fopen@o@: $(srcdir)/mp/mp_fopen.c
+ $(CC) $(CFLAGS) $?
+mp_fput@o@: $(srcdir)/mp/mp_fput.c
+ $(CC) $(CFLAGS) $?
+mp_fset@o@: $(srcdir)/mp/mp_fset.c
+ $(CC) $(CFLAGS) $?
+mp_method@o@: $(srcdir)/mp/mp_method.c
+ $(CC) $(CFLAGS) $?
+mp_region@o@: $(srcdir)/mp/mp_region.c
+ $(CC) $(CFLAGS) $?
+mp_register@o@: $(srcdir)/mp/mp_register.c
+ $(CC) $(CFLAGS) $?
+mp_stat@o@: $(srcdir)/mp/mp_stat.c
+ $(CC) $(CFLAGS) $?
+mp_sync@o@: $(srcdir)/mp/mp_sync.c
+ $(CC) $(CFLAGS) $?
+mp_trickle@o@: $(srcdir)/mp/mp_trickle.c
+ $(CC) $(CFLAGS) $?
+
+# Mutex files
+mutex@o@: $(srcdir)/mutex/mutex.c
+ $(CC) $(CFLAGS) $?
+mut_fcntl@o@: $(srcdir)/mutex/mut_fcntl.c
+ $(CC) $(CFLAGS) $?
+mut_pthread@o@: $(srcdir)/mutex/mut_pthread.c
+ $(CC) $(CFLAGS) $?
+mut_tas@o@: $(srcdir)/mutex/mut_tas.c
+ $(CC) $(CFLAGS) $?
+# UTS4 spinlock assembly.
+uts4_cc@o@: $(srcdir)/mutex/uts4_cc.s
+ $(AS) $(ASFLAGS) -o $@ $?
+
+# Transaction files
+txn@o@: $(srcdir)/txn/txn.c
+ $(CC) $(CFLAGS) $?
+txn_auto@o@: $(srcdir)/txn/txn_auto.c
+ $(CC) $(CFLAGS) $?
+txn_rec@o@: $(srcdir)/txn/txn_rec.c
+ $(CC) $(CFLAGS) $?
+txn_region@o@: $(srcdir)/txn/txn_region.c
+ $(CC) $(CFLAGS) $?
+
+# XA files
+xa@o@: $(srcdir)/xa/xa.c
+ $(CC) $(CFLAGS) $?
+xa_db@o@: $(srcdir)/xa/xa_db.c
+ $(CC) $(CFLAGS) $?
+xa_map@o@: $(srcdir)/xa/xa_map.c
+ $(CC) $(CFLAGS) $?
+
+# RPC client files
+client@o@: $(srcdir)/rpc_client/client.c
+ $(CC) $(CFLAGS) $?
+db_server_clnt@o@: $(srcdir)/rpc_client/db_server_clnt.c
+ $(CC) $(CFLAGS) $?
+gen_client@o@: $(srcdir)/rpc_client/gen_client.c
+ $(CC) $(CFLAGS) $?
+gen_client_ret@o@: $(srcdir)/rpc_client/gen_client_ret.c
+ $(CC) $(CFLAGS) $?
+
+# RPC server files
+db_server_proc@o@: $(srcdir)/rpc_server/db_server_proc.c
+ $(CC) $(CFLAGS) $?
+db_server_svc@o@: $(srcdir)/rpc_server/db_server_svc.c
+ $(CC) $(CFLAGS) $?
+db_server_util@o@: $(srcdir)/rpc_server/db_server_util.c
+ $(CC) $(CFLAGS) $?
+db_server_xdr@o@: $(srcdir)/rpc_server/db_server_xdr.c
+ $(CC) $(CFLAGS) $?
+gen_db_server@o@: $(srcdir)/rpc_server/gen_db_server.c
+ $(CC) $(CFLAGS) $?
+
+# Historic compatibility files
+db185@o@: $(srcdir)/db185/db185.c
+ $(CC) $(CFLAGS) $?
+dbm@o@: $(srcdir)/dbm/dbm.c
+ $(CC) $(CFLAGS) $?
+hsearch@o@: $(srcdir)/hsearch/hsearch.c
+ $(CC) $(CFLAGS) $?
+
+# OS specific files
+os_abs@o@: $(srcdir)/os/os_abs.c
+ $(CC) $(CFLAGS) $?
+os_alloc@o@: $(srcdir)/os/os_alloc.c
+ $(CC) $(CFLAGS) $?
+os_dir@o@: $(srcdir)/os/os_dir.c
+ $(CC) $(CFLAGS) $?
+os_errno@o@: $(srcdir)/os/os_errno.c
+ $(CC) $(CFLAGS) $?
+os_fid@o@: $(srcdir)/os/os_fid.c
+ $(CC) $(CFLAGS) $?
+os_finit@o@: $(srcdir)/os/os_finit.c
+ $(CC) $(CFLAGS) $?
+os_fsync@o@: $(srcdir)/os/os_fsync.c
+ $(CC) $(CFLAGS) $?
+os_handle@o@: $(srcdir)/os/os_handle.c
+ $(CC) $(CFLAGS) $?
+os_map@o@: $(srcdir)/os/os_map.c
+ $(CC) $(CFLAGS) $?
+os_method@o@: $(srcdir)/os/os_method.c
+ $(CC) $(CFLAGS) $?
+os_oflags@o@: $(srcdir)/os/os_oflags.c
+ $(CC) $(CFLAGS) $?
+os_open@o@: $(srcdir)/os/os_open.c
+ $(CC) $(CFLAGS) $?
+os_region@o@: $(srcdir)/os/os_region.c
+ $(CC) $(CFLAGS) $?
+os_rename@o@: $(srcdir)/os/os_rename.c
+ $(CC) $(CFLAGS) $?
+os_root@o@: $(srcdir)/os/os_root.c
+ $(CC) $(CFLAGS) $?
+os_rpath@o@: $(srcdir)/os/os_rpath.c
+ $(CC) $(CFLAGS) $?
+os_rw@o@: $(srcdir)/os/os_rw.c
+ $(CC) $(CFLAGS) $?
+os_seek@o@: $(srcdir)/os/os_seek.c
+ $(CC) $(CFLAGS) $?
+os_sleep@o@: $(srcdir)/os/os_sleep.c
+ $(CC) $(CFLAGS) $?
+os_spin@o@: $(srcdir)/os/os_spin.c
+ $(CC) $(CFLAGS) $?
+os_stat@o@: $(srcdir)/os/os_stat.c
+ $(CC) $(CFLAGS) $?
+os_tmpdir@o@: $(srcdir)/os/os_tmpdir.c
+ $(CC) $(CFLAGS) $?
+os_unlink@o@: $(srcdir)/os/os_unlink.c
+ $(CC) $(CFLAGS) $?
+
+# Dbs.
+dbs@o@: $(srcdir)/test_server/dbs.c
+ $(CC) $(CFLAGS) -I$(srcdir)/test_server $?
+dbs_am@o@: $(srcdir)/test_server/dbs_am.c
+ $(CC) $(CFLAGS) -I$(srcdir)/test_server $?
+dbs_checkpoint@o@: $(srcdir)/test_server/dbs_checkpoint.c
+ $(CC) $(CFLAGS) -I$(srcdir)/test_server $?
+dbs_debug@o@: $(srcdir)/test_server/dbs_debug.c
+ $(CC) $(CFLAGS) -I$(srcdir)/test_server $?
+dbs_handles@o@: $(srcdir)/test_server/dbs_handles.c
+ $(CC) $(CFLAGS) -I$(srcdir)/test_server $?
+dbs_log@o@: $(srcdir)/test_server/dbs_log.c
+ $(CC) $(CFLAGS) -I$(srcdir)/test_server $?
+dbs_qam@o@: $(srcdir)/test_server/dbs_qam.c
+ $(CC) $(CFLAGS) -I$(srcdir)/test_server $?
+dbs_spawn@o@: $(srcdir)/test_server/dbs_spawn.c
+ $(CC) $(CFLAGS) -I$(srcdir)/test_server $?
+dbs_trickle@o@: $(srcdir)/test_server/dbs_trickle.c
+ $(CC) $(CFLAGS) -I$(srcdir)/test_server $?
+dbs_util@o@: $(srcdir)/test_server/dbs_util.c
+ $(CC) $(CFLAGS) -I$(srcdir)/test_server $?
+dbs_yield@o@: $(srcdir)/test_server/dbs_yield.c
+ $(CC) $(CFLAGS) -I$(srcdir)/test_server $?
+
+# Replacement files
+getcwd@o@: $(srcdir)/clib/getcwd.c
+ $(CC) $(CFLAGS) $?
+getopt@o@: $(srcdir)/clib/getopt.c
+ $(CC) $(CFLAGS) $?
+memcmp@o@: $(srcdir)/clib/memcmp.c
+ $(CC) $(CFLAGS) $?
+memcpy@o@: $(srcdir)/clib/memmove.c
+ $(CC) -DMEMCOPY $(CFLAGS) $? -o $@
+memmove@o@: $(srcdir)/clib/memmove.c
+ $(CC) -DMEMMOVE $(CFLAGS) $?
+raise@o@: $(srcdir)/clib/raise.c
+ $(CC) $(CFLAGS) $?
+strcasecmp@o@: $(srcdir)/clib/strcasecmp.c
+ $(CC) $(CFLAGS) $?
+snprintf@o@: $(srcdir)/clib/snprintf.c
+ $(CC) $(CFLAGS) $?
+strerror@o@: $(srcdir)/clib/strerror.c
+ $(CC) $(CFLAGS) $?
+vsnprintf@o@: $(srcdir)/clib/vsnprintf.c
+ $(CC) $(CFLAGS) $?
diff --git a/bdb/dist/RELEASE b/bdb/dist/RELEASE
new file mode 100644
index 00000000000..495c12637e2
--- /dev/null
+++ b/bdb/dist/RELEASE
@@ -0,0 +1,8 @@
+# $Id: RELEASE,v 11.72 2001/01/24 15:20:14 bostic Exp $
+
+DB_VERSION_MAJOR=3
+DB_VERSION_MINOR=2
+DB_VERSION_PATCH=9
+DB_RELEASE_DATE=`date "+%B %e, %Y"`
+
+DB_VERSION_STRING="Sleepycat Software: Berkeley DB $DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH: ($DB_RELEASE_DATE)"
diff --git a/bdb/dist/acconfig.h b/bdb/dist/acconfig.h
new file mode 100644
index 00000000000..e30d0e3d2c2
--- /dev/null
+++ b/bdb/dist/acconfig.h
@@ -0,0 +1,108 @@
+/*
+ * $Id: acconfig.h,v 11.29 2000/09/20 16:30:33 bostic Exp $
+ */
+
+/* Define if you are building a version for running the test suite. */
+#undef CONFIG_TEST
+
+/* Define if you want a debugging version. */
+#undef DEBUG
+
+/* Define if you want a version that logs read operations. */
+#undef DEBUG_ROP
+
+/* Define if you want a version that logs write operations. */
+#undef DEBUG_WOP
+
+/* Define if you want a version with run-time diagnostic checking. */
+#undef DIAGNOSTIC
+
+/* Define if you want to mask harmless unitialized memory read/writes. */
+#undef UMRW
+
+/* Define if fcntl/F_SETFD denies child access to file descriptors. */
+#undef HAVE_FCNTL_F_SETFD
+
+/* Define if building big-file environment (e.g., AIX, HP/UX, Solaris). */
+#undef HAVE_FILE_OFFSET_BITS
+
+/* Mutex possibilities. */
+#undef HAVE_MUTEX_68K_GCC_ASSEMBLY
+#undef HAVE_MUTEX_AIX_CHECK_LOCK
+#undef HAVE_MUTEX_ALPHA_GCC_ASSEMBLY
+#undef HAVE_MUTEX_HPPA_GCC_ASSEMBLY
+#undef HAVE_MUTEX_HPPA_MSEM_INIT
+#undef HAVE_MUTEX_IA64_GCC_ASSEMBLY
+#undef HAVE_MUTEX_MACOS
+#undef HAVE_MUTEX_MSEM_INIT
+#undef HAVE_MUTEX_PPC_GCC_ASSEMBLY
+#undef HAVE_MUTEX_PTHREADS
+#undef HAVE_MUTEX_RELIANTUNIX_INITSPIN
+#undef HAVE_MUTEX_SCO_X86_CC_ASSEMBLY
+#undef HAVE_MUTEX_SEMA_INIT
+#undef HAVE_MUTEX_SGI_INIT_LOCK
+#undef HAVE_MUTEX_SOLARIS_LOCK_TRY
+#undef HAVE_MUTEX_SOLARIS_LWP
+#undef HAVE_MUTEX_SPARC_GCC_ASSEMBLY
+#undef HAVE_MUTEX_THREADS
+#undef HAVE_MUTEX_UI_THREADS
+#undef HAVE_MUTEX_UTS_CC_ASSEMBLY
+#undef HAVE_MUTEX_VMS
+#undef HAVE_MUTEX_VXWORKS
+#undef HAVE_MUTEX_WIN16
+#undef HAVE_MUTEX_WIN32
+#undef HAVE_MUTEX_X86_GCC_ASSEMBLY
+
+/* Define if building on QNX. */
+#undef HAVE_QNX
+
+/* Define if building RPC client/server. */
+#undef HAVE_RPC
+
+/* Define if your sprintf returns a pointer, not a length. */
+#undef SPRINTF_RET_CHARPNT
+
+@BOTTOM@
+
+/*
+ * Big-file configuration.
+ */
+#ifdef HAVE_FILE_OFFSET_BITS
+#define _FILE_OFFSET_BITS 64
+#endif
+
+/*
+ * Don't step on the namespace. Other libraries may have their own
+ * implementations of these functions, we don't want to use their
+ * implementations or force them to use ours based on the load order.
+ */
+#ifndef HAVE_GETCWD
+#define getcwd __db_Cgetcwd
+#endif
+#ifndef HAVE_GETOPT
+#define getopt __db_Cgetopt
+#endif
+#ifndef HAVE_MEMCMP
+#define memcmp __db_Cmemcmp
+#endif
+#ifndef HAVE_MEMCPY
+#define memcpy __db_Cmemcpy
+#endif
+#ifndef HAVE_MEMMOVE
+#define memmove __db_Cmemmove
+#endif
+#ifndef HAVE_RAISE
+#define raise __db_Craise
+#endif
+#ifndef HAVE_SNPRINTF
+#define snprintf __db_Csnprintf
+#endif
+#ifndef HAVE_STRCASECMP
+#define strcasecmp __db_Cstrcasecmp
+#endif
+#ifndef HAVE_STRERROR
+#define strerror __db_Cstrerror
+#endif
+#ifndef HAVE_VSNPRINTF
+#define vsnprintf __db_Cvsnprintf
+#endif
diff --git a/bdb/dist/aclocal/mutex.m4 b/bdb/dist/aclocal/mutex.m4
new file mode 100644
index 00000000000..5f16ee0e114
--- /dev/null
+++ b/bdb/dist/aclocal/mutex.m4
@@ -0,0 +1,395 @@
+dnl $Id: mutex.m4,v 11.20 2000/12/20 22:16:56 bostic Exp $
+
+dnl Figure out mutexes for this compiler/architecture.
+AC_DEFUN(AM_DEFINE_MUTEXES, [
+
+AC_CACHE_CHECK([for mutexes], db_cv_mutex, [dnl
+db_cv_mutex=no
+
+orig_libs=$LIBS
+
+dnl User-specified POSIX mutexes.
+dnl
+dnl Assume that -lpthread exists when the user specifies POSIX mutexes. (I
+dnl only expect this option to be used on Solaris, which has -lpthread.)
+if test "$db_cv_posixmutexes" = yes; then
+ db_cv_mutex="posix_only"
+fi
+
+dnl User-specified UI mutexes.
+dnl
+dnl Assume that -lthread exists when the user specifies UI mutexes. (I only
+dnl expect this option to be used on Solaris, which has -lthread.)
+if test "$db_cv_uimutexes" = yes; then
+ db_cv_mutex="ui_only"
+fi
+
+dnl LWP threads: _lwp_XXX
+dnl
+dnl Test for LWP threads before testing for UI/POSIX threads, we prefer them
+dnl on Solaris. There are two reasons: the Solaris C library has UI/POSIX
+dnl interface stubs, but they're broken, configuring them for inter-process
+dnl mutexes doesn't return an error, but it doesn't work either. Second,
+dnl there's a bug in SunOS 5.7 where applications get pwrite, not pwrite64,
+dnl if they load the C library before the appropriate threads library, e.g.,
+dnl tclsh using dlopen to load the DB library. Anyway, by using LWP threads
+dnl we avoid answering lots of user questions, not to mention the bugs.
+if test "$db_cv_mutex" = no; then
+AC_TRY_RUN([
+#include <synch.h>
+main(){
+ static lwp_mutex_t mi = SHAREDMUTEX;
+ static lwp_cond_t ci = SHAREDCV;
+ lwp_mutex_t mutex = mi;
+ lwp_cond_t cond = ci;
+ exit (
+ _lwp_mutex_lock(&mutex) ||
+ _lwp_mutex_unlock(&mutex));
+}], [db_cv_mutex="Solaris/lwp"])
+fi
+
+dnl UI threads: thr_XXX
+dnl
+dnl Try with and without the -lthread library.
+if test "$db_cv_mutex" = no -o "$db_cv_mutex" = "ui_only"; then
+LIBS="-lthread $LIBS"
+AC_TRY_RUN([
+#include <thread.h>
+#include <synch.h>
+main(){
+ mutex_t mutex;
+ cond_t cond;
+ int type = USYNC_PROCESS;
+ exit (
+ mutex_init(&mutex, type, NULL) ||
+ cond_init(&cond, type, NULL) ||
+ mutex_lock(&mutex) ||
+ mutex_unlock(&mutex));
+}], [db_cv_mutex="UI/threads/library"])
+LIBS="$orig_libs"
+fi
+if test "$db_cv_mutex" = no -o "$db_cv_mutex" = "ui_only"; then
+AC_TRY_RUN([
+#include <thread.h>
+#include <synch.h>
+main(){
+ mutex_t mutex;
+ cond_t cond;
+ int type = USYNC_PROCESS;
+ exit (
+ mutex_init(&mutex, type, NULL) ||
+ cond_init(&cond, type, NULL) ||
+ mutex_lock(&mutex) ||
+ mutex_unlock(&mutex));
+}], [db_cv_mutex="UI/threads"])
+fi
+if test "$db_cv_mutex" = "ui_only"; then
+ AC_MSG_ERROR([unable to find UI mutex interfaces])
+fi
+
+
+dnl POSIX.1 pthreads: pthread_XXX
+dnl
+dnl Try with and without the -lpthread library.
+if test "$db_cv_mutex" = no -o "$db_cv_mutex" = "posix_only"; then
+AC_TRY_RUN([
+#include <pthread.h>
+main(){
+ pthread_cond_t cond;
+ pthread_mutex_t mutex;
+ pthread_condattr_t condattr;
+ pthread_mutexattr_t mutexattr;
+ exit (
+ pthread_condattr_init(&condattr) ||
+ pthread_condattr_setpshared(&condattr, PTHREAD_PROCESS_SHARED) ||
+ pthread_mutexattr_init(&mutexattr) ||
+ pthread_mutexattr_setpshared(&mutexattr, PTHREAD_PROCESS_SHARED) ||
+ pthread_cond_init(&cond, &condattr) ||
+ pthread_mutex_init(&mutex, &mutexattr) ||
+ pthread_mutex_lock(&mutex) ||
+ pthread_mutex_unlock(&mutex) ||
+ pthread_mutex_destroy(&mutex) ||
+ pthread_cond_destroy(&cond) ||
+ pthread_condattr_destroy(&condattr) ||
+ pthread_mutexattr_destroy(&mutexattr));
+}], [db_cv_mutex="POSIX/pthreads"])
+fi
+if test "$db_cv_mutex" = no -o "$db_cv_mutex" = "posix_only"; then
+LIBS="-lpthread $LIBS"
+AC_TRY_RUN([
+#include <pthread.h>
+main(){
+ pthread_cond_t cond;
+ pthread_mutex_t mutex;
+ pthread_condattr_t condattr;
+ pthread_mutexattr_t mutexattr;
+ exit (
+ pthread_condattr_init(&condattr) ||
+ pthread_condattr_setpshared(&condattr, PTHREAD_PROCESS_SHARED) ||
+ pthread_mutexattr_init(&mutexattr) ||
+ pthread_mutexattr_setpshared(&mutexattr, PTHREAD_PROCESS_SHARED) ||
+ pthread_cond_init(&cond, &condattr) ||
+ pthread_mutex_init(&mutex, &mutexattr) ||
+ pthread_mutex_lock(&mutex) ||
+ pthread_mutex_unlock(&mutex) ||
+ pthread_mutex_destroy(&mutex) ||
+ pthread_cond_destroy(&cond) ||
+ pthread_condattr_destroy(&condattr) ||
+ pthread_mutexattr_destroy(&mutexattr));
+}], [db_cv_mutex="POSIX/pthreads/library"])
+LIBS="$orig_libs"
+fi
+if test "$db_cv_mutex" = "posix_only"; then
+ AC_MSG_ERROR([unable to find POSIX mutex interfaces])
+fi
+
+dnl msemaphore: HPPA only
+dnl Try HPPA before general msem test, it needs special alignment.
+if test "$db_cv_mutex" = no; then
+AC_TRY_RUN([
+#include <sys/mman.h>
+main(){
+#if defined(__hppa)
+ typedef msemaphore tsl_t;
+ msemaphore x;
+ msem_init(&x, 0);
+ msem_lock(&x, 0);
+ msem_unlock(&x, 0);
+ exit(0);
+#else
+ exit(1);
+#endif
+}], [db_cv_mutex="HP/msem_init"])
+fi
+
+dnl msemaphore: AIX, OSF/1
+if test "$db_cv_mutex" = no; then
+AC_TRY_RUN([
+#include <sys/types.h>
+#include <sys/mman.h>;
+main(){
+ typedef msemaphore tsl_t;
+ msemaphore x;
+ msem_init(&x, 0);
+ msem_lock(&x, 0);
+ msem_unlock(&x, 0);
+ exit(0);
+}], [db_cv_mutex="UNIX/msem_init"])
+fi
+
+dnl ReliantUNIX
+if test "$db_cv_mutex" = no; then
+LIBS="$LIBS -lmproc"
+AC_TRY_LINK([#include <ulocks.h>],
+[typedef spinlock_t tsl_t;
+spinlock_t x; initspin(&x, 1); cspinlock(&x); spinunlock(&x);],
+[db_cv_mutex="ReliantUNIX/initspin"])
+LIBS="$orig_libs"
+fi
+
+dnl SCO: UnixWare has threads in libthread, but OpenServer doesn't.
+if test "$db_cv_mutex" = no; then
+AC_TRY_RUN([
+main(){
+#if defined(__USLC__)
+ exit(0);
+#endif
+ exit(1);
+}], [db_cv_mutex="SCO/x86/cc-assembly"])
+fi
+
+dnl abilock_t: SGI
+if test "$db_cv_mutex" = no; then
+AC_TRY_LINK([#include <abi_mutex.h>],
+[typedef abilock_t tsl_t;
+abilock_t x; init_lock(&x); acquire_lock(&x); release_lock(&x);],
+[db_cv_mutex="SGI/init_lock"])
+fi
+
+dnl sema_t: Solaris
+dnl The sema_XXX calls do not work on Solaris 5.5. I see no reason to ever
+dnl turn this test on, unless we find some other platform that uses the old
+dnl POSIX.1 interfaces. (I plan to move directly to pthreads on Solaris.)
+if test "$db_cv_mutex" = DOESNT_WORK; then
+AC_TRY_LINK([#include <synch.h>],
+[typedef sema_t tsl_t;
+ sema_t x;
+ sema_init(&x, 1, USYNC_PROCESS, NULL); sema_wait(&x); sema_post(&x);],
+[db_cv_mutex="UNIX/sema_init"])
+fi
+
+dnl _lock_try/_lock_clear: Solaris
+dnl On Solaris systems without Pthread or UI mutex interfaces, DB uses the
+dnl undocumented _lock_try _lock_clear function calls instead of either the
+dnl sema_trywait(3T) or sema_wait(3T) function calls. This is because of
+dnl problems in those interfaces in some releases of the Solaris C library.
+if test "$db_cv_mutex" = no; then
+AC_TRY_LINK([#include <sys/machlock.h>],
+[typedef lock_t tsl_t;
+ lock_t x;
+ _lock_try(&x); _lock_clear(&x);],
+[db_cv_mutex="Solaris/_lock_try"])
+fi
+
+dnl _check_lock/_clear_lock: AIX
+if test "$db_cv_mutex" = no; then
+AC_TRY_LINK([#include <sys/atomic_op.h>],
+[int x; _check_lock(&x,0,1); _clear_lock(&x,0);],
+[db_cv_mutex="AIX/_check_lock"])
+fi
+
+dnl Alpha/gcc: OSF/1
+dnl The alpha/gcc code doesn't work as far as I know. There are
+dnl two versions, both have problems. See Support Request #1583.
+if test "$db_cv_mutex" = DOESNT_WORK; then
+AC_TRY_RUN([main(){
+#if defined(__alpha)
+#if defined(__GNUC__)
+exit(0);
+#endif
+#endif
+exit(1);}],
+[db_cv_mutex="ALPHA/gcc-assembly"])
+fi
+
+dnl PaRisc/gcc: HP/UX
+if test "$db_cv_mutex" = no; then
+AC_TRY_RUN([main(){
+#if defined(__hppa)
+#if defined(__GNUC__)
+exit(0);
+#endif
+#endif
+exit(1);}],
+[db_cv_mutex="HPPA/gcc-assembly"])
+fi
+
+dnl PPC/gcc:
+if test "$db_cv_mutex" = no; then
+AC_TRY_RUN([main(){
+#if defined(__powerpc__)
+#if defined(__GNUC__)
+exit(0);
+#endif
+#endif
+exit(1);}],
+[db_cv_mutex="PPC/gcc-assembly"])
+fi
+
+dnl Sparc/gcc: SunOS, Solaris
+dnl The sparc/gcc code doesn't always work, specifically, I've seen assembler
+dnl failures from the stbar instruction on SunOS 4.1.4/sun4c and gcc 2.7.2.2.
+if test "$db_cv_mutex" = DOESNT_WORK; then
+AC_TRY_RUN([main(){
+#if defined(__sparc__)
+#if defined(__GNUC__)
+ exit(0);
+#endif
+#endif
+ exit(1);
+}], [db_cv_mutex="Sparc/gcc-assembly"])
+fi
+
+dnl 68K/gcc: SunOS
+if test "$db_cv_mutex" = no; then
+AC_TRY_RUN([main(){
+#if (defined(mc68020) || defined(sun3))
+#if defined(__GNUC__)
+ exit(0);
+#endif
+#endif
+ exit(1);
+}], [db_cv_mutex="68K/gcc-assembly"])
+fi
+
+dnl x86/gcc: FreeBSD, NetBSD, BSD/OS, Linux
+if test "$db_cv_mutex" = no; then
+AC_TRY_RUN([main(){
+#if defined(i386) || defined(__i386__)
+#if defined(__GNUC__)
+ exit(0);
+#endif
+#endif
+ exit(1);
+}], [db_cv_mutex="x86/gcc-assembly"])
+fi
+
+dnl ia86/gcc: Linux
+if test "$db_cv_mutex" = no; then
+AC_TRY_RUN([main(){
+#if defined(__ia64)
+#if defined(__GNUC__)
+ exit(0);
+#endif
+#endif
+ exit(1);
+}], [db_cv_mutex="ia64/gcc-assembly"])
+fi
+
+dnl: uts/cc: UTS
+if test "$db_cv_mutex" = no; then
+AC_TRY_RUN([main(){
+#if defined(_UTS)
+ exit(0);
+#endif
+ exit(1);
+}], [db_cv_mutex="UTS/cc-assembly"])
+fi
+])
+
+if test "$db_cv_mutex" = no; then
+ AC_MSG_WARN(
+ [THREAD MUTEXES NOT AVAILABLE FOR THIS COMPILER/ARCHITECTURE.])
+ ADDITIONAL_OBJS="mut_fcntl${o} $ADDITIONAL_OBJS"
+else
+ AC_DEFINE(HAVE_MUTEX_THREADS)
+fi
+
+case "$db_cv_mutex" in
+68K/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_68K_GCC_ASSEMBLY);;
+AIX/_check_lock) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_AIX_CHECK_LOCK);;
+ALPHA/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_ALPHA_GCC_ASSEMBLY);;
+HP/msem_init) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_HPPA_MSEM_INIT);;
+HPPA/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_HPPA_GCC_ASSEMBLY);;
+ia64/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_IA64_GCC_ASSEMBLY);;
+POSIX/pthreads) ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_PTHREADS);;
+POSIX/pthreads/library) LIBS="-lpthread $LIBS"
+ ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_PTHREADS);;
+PPC/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_PPC_GCC_ASSEMBLY);;
+ReliantUNIX/initspin) LIBS="$LIBS -lmproc"
+ ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_RELIANTUNIX_INITSPIN);;
+SCO/x86/cc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_SCO_X86_CC_ASSEMBLY);;
+SGI/init_lock) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_SGI_INIT_LOCK);;
+Solaris/_lock_try) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_SOLARIS_LOCK_TRY);;
+Solaris/lwp) ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_SOLARIS_LWP);;
+Sparc/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_SPARC_GCC_ASSEMBLY);;
+UI/threads) ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_UI_THREADS);;
+UI/threads/library) LIBS="-lthread $LIBS"
+ ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_UI_THREADS);;
+UNIX/msem_init) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_MSEM_INIT);;
+UNIX/sema_init) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_SEMA_INIT);;
+UTS/cc-assembly) ADDITIONAL_OBJS="$ADDITIONAL_OBJS uts4.cc${o}"
+ AC_DEFINE(HAVE_MUTEX_UTS_CC_ASSEMBLY);;
+x86/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_X86_GCC_ASSEMBLY);;
+esac
+])dnl
diff --git a/bdb/dist/aclocal/options.m4 b/bdb/dist/aclocal/options.m4
new file mode 100644
index 00000000000..c51a3952419
--- /dev/null
+++ b/bdb/dist/aclocal/options.m4
@@ -0,0 +1,121 @@
+dnl $Id: options.m4,v 11.10 2000/07/07 15:50:39 bostic Exp $
+
+dnl Process user-specified options.
+AC_DEFUN(AM_OPTIONS_SET, [
+
+AC_MSG_CHECKING(if --disable-bigfile option specified)
+AC_ARG_ENABLE(bigfile,
+ [ --disable-bigfile Disable AIX, HP/UX, Solaris big files.],
+ [db_cv_bigfile="yes"], [db_cv_bigfile="no"])
+AC_MSG_RESULT($db_cv_bigfile)
+
+AC_MSG_CHECKING(if --enable-compat185 option specified)
+AC_ARG_ENABLE(compat185,
+ [ --enable-compat185 Build DB 1.85 compatibility API.],
+ [db_cv_compat185="$enable_compat185"], [db_cv_compat185="no"])
+AC_MSG_RESULT($db_cv_compat185)
+
+AC_MSG_CHECKING(if --enable-cxx option specified)
+AC_ARG_ENABLE(cxx,
+ [ --enable-cxx Build C++ API.],
+ [db_cv_cxx="$enable_cxx"], [db_cv_cxx="no"])
+AC_MSG_RESULT($db_cv_cxx)
+
+AC_MSG_CHECKING(if --enable-debug option specified)
+AC_ARG_ENABLE(debug,
+ [ --enable-debug Build a debugging version.],
+ [db_cv_debug="$enable_debug"], [db_cv_debug="no"])
+AC_MSG_RESULT($db_cv_debug)
+
+AC_MSG_CHECKING(if --enable-debug_rop option specified)
+AC_ARG_ENABLE(debug_rop,
+ [ --enable-debug_rop Build a version that logs read operations.],
+ [db_cv_debug_rop="$enable_debug_rop"], [db_cv_debug_rop="no"])
+AC_MSG_RESULT($db_cv_debug_rop)
+
+AC_MSG_CHECKING(if --enable-debug_wop option specified)
+AC_ARG_ENABLE(debug_wop,
+ [ --enable-debug_wop Build a version that logs write operations.],
+ [db_cv_debug_wop="$enable_debug_wop"], [db_cv_debug_wop="no"])
+AC_MSG_RESULT($db_cv_debug_wop)
+
+AC_MSG_CHECKING(if --enable-diagnostic option specified)
+AC_ARG_ENABLE(diagnostic,
+ [ --enable-diagnostic Build a version with run-time diagnostics.],
+ [db_cv_diagnostic="$enable_diagnostic"], [db_cv_diagnostic="no"])
+AC_MSG_RESULT($db_cv_diagnostic)
+
+AC_MSG_CHECKING(if --enable-dump185 option specified)
+AC_ARG_ENABLE(dump185,
+ [ --enable-dump185 Build db_dump185(1) to dump 1.85 databases.],
+ [db_cv_dump185="$enable_dump185"], [db_cv_dump185="no"])
+AC_MSG_RESULT($db_cv_dump185)
+
+AC_MSG_CHECKING(if --enable-dynamic option specified)
+AC_ARG_ENABLE(dynamic,
+ [ --enable-dynamic Build with dynamic libraries.],
+ [db_cv_dynamic="$enable_dynamic"], [db_cv_dynamic="no"])
+AC_MSG_RESULT($db_cv_dynamic)
+
+AC_MSG_CHECKING(if --enable-java option specified)
+AC_ARG_ENABLE(java,
+ [ --enable-java Build Java API.],
+ [db_cv_java="$enable_java"], [db_cv_java="no"])
+AC_MSG_RESULT($db_cv_java)
+
+AC_MSG_CHECKING(if --enable-posixmutexes option specified)
+AC_ARG_ENABLE(posixmutexes,
+ [ --enable-posixmutexes Force use of POSIX standard mutexes.],
+ [db_cv_posixmutexes="$enable_posixmutexes"], [db_cv_posixmutexes="no"])
+AC_MSG_RESULT($db_cv_posixmutexes)
+
+AC_MSG_CHECKING(if --enable-rpc option specified)
+AC_ARG_ENABLE(rpc,
+ [ --enable-rpc Build RPC client/server.],
+ [db_cv_rpc="$enable_rpc"], [db_cv_rpc="no"])
+AC_MSG_RESULT($db_cv_rpc)
+
+dnl --enable-shared is an alias for --enable-dynamic. We support it for
+dnl compatibility with other applications, e.g., Tcl.
+AC_MSG_CHECKING(if --enable-shared option specified)
+AC_ARG_ENABLE(shared,
+ [ --enable-shared Build with dynamic libraries.],
+ [db_cv_shared="$enable_shared"], [db_cv_shared="no"])
+AC_MSG_RESULT($db_cv_shared)
+if test "$db_cv_shared" != "no"; then
+ db_cv_dynamic="yes"
+fi
+
+AC_MSG_CHECKING(if --enable-tcl option specified)
+AC_ARG_ENABLE(tcl,
+ [ --enable-tcl Build Tcl API.],
+ [db_cv_tcl="$enable_tcl"], [db_cv_tcl="no"])
+AC_MSG_RESULT($db_cv_tcl)
+
+AC_MSG_CHECKING(if --enable-test option specified)
+AC_ARG_ENABLE(test,
+ [ --enable-test Configure to run the test suite.],
+ [db_cv_test="$enable_test"], [db_cv_test="no"])
+AC_MSG_RESULT($db_cv_test)
+
+AC_MSG_CHECKING(if --enable-uimutexes option specified)
+AC_ARG_ENABLE(uimutexes,
+ [ --enable-uimutexes Force use of Unix International mutexes.],
+ [db_cv_uimutexes="$enable_uimutexes"], [db_cv_uimutexes="no"])
+AC_MSG_RESULT($db_cv_uimutexes)
+
+AC_MSG_CHECKING(if --enable-umrw option specified)
+AC_ARG_ENABLE(umrw,
+ [ --enable-umrw Mask harmless unitialized memory read/writes.],
+ [db_cv_umrw="$enable_umrw"], [db_cv_umrw="no"])
+AC_MSG_RESULT($db_cv_umrw)
+
+AC_MSG_CHECKING([if --with-tcl option specified])
+AC_ARG_WITH(tcl,
+ [ --with-tcl=DIR Directory location of tclConfig.sh.],
+ with_tclconfig=${withval}, with_tclconfig="no")
+AC_MSG_RESULT($with_tclconfig)
+if test "$with_tclconfig" != "no"; then
+ db_cv_tcl="yes"
+fi
+])dnl
diff --git a/bdb/dist/aclocal/programs.m4 b/bdb/dist/aclocal/programs.m4
new file mode 100644
index 00000000000..9ec04f4d8cd
--- /dev/null
+++ b/bdb/dist/aclocal/programs.m4
@@ -0,0 +1,48 @@
+dnl $Id: programs.m4,v 11.11 2000/03/30 21:20:50 bostic Exp $
+
+dnl Check for programs used in building/installation.
+AC_DEFUN(AM_PROGRAMS_SET, [
+
+AC_PATH_PROG(db_cv_path_ar, ar, missing_ar)
+if test "$db_cv_path_ar" = missing_ar; then
+ AC_MSG_ERROR([No ar utility found.])
+fi
+AC_PATH_PROG(db_cv_path_chmod, chmod, missing_chmod)
+if test "$db_cv_path_chmod" = missing_chmod; then
+ AC_MSG_ERROR([No chmod utility found.])
+fi
+AC_PATH_PROG(db_cv_path_cp, cp, missing_cp)
+if test "$db_cv_path_cp" = missing_cp; then
+ AC_MSG_ERROR([No cp utility found.])
+fi
+AC_PATH_PROG(db_cv_path_ln, ln, missing_ln)
+if test "$db_cv_path_ln" = missing_ln; then
+ AC_MSG_ERROR([No ln utility found.])
+fi
+AC_PATH_PROG(db_cv_path_mkdir, mkdir, missing_mkdir)
+if test "$db_cv_path_mkdir" = missing_mkdir; then
+ AC_MSG_ERROR([No mkdir utility found.])
+fi
+AC_PATH_PROG(db_cv_path_ranlib, ranlib, missing_ranlib)
+AC_PATH_PROG(db_cv_path_rm, rm, missing_rm)
+if test "$db_cv_path_rm" = missing_rm; then
+ AC_MSG_ERROR([No rm utility found.])
+fi
+AC_PATH_PROG(db_cv_path_sh, sh, missing_sh)
+if test "$db_cv_path_sh" = missing_sh; then
+ AC_MSG_ERROR([No sh utility found.])
+fi
+AC_PATH_PROG(db_cv_path_strip, strip, missing_strip)
+if test "$db_cv_path_strip" = missing_strip; then
+ AC_MSG_ERROR([No strip utility found.])
+fi
+
+dnl Check for programs used in testing.
+if test "$db_cv_test" = "yes"; then
+ AC_PATH_PROG(db_cv_path_kill, kill, missing_kill)
+ if test "$db_cv_path_kill" = missing_kill; then
+ AC_MSG_ERROR([No kill utility found.])
+ fi
+fi
+
+])dnl
diff --git a/bdb/dist/aclocal/tcl.m4 b/bdb/dist/aclocal/tcl.m4
new file mode 100644
index 00000000000..3d0aec2e8ff
--- /dev/null
+++ b/bdb/dist/aclocal/tcl.m4
@@ -0,0 +1,126 @@
+dnl $Id: tcl.m4,v 11.5 2000/06/27 13:21:28 bostic Exp $
+
+dnl The SC_* macros in this file are from the unix/tcl.m4 files in the Tcl
+dnl 8.3.0 distribution, with some minor changes. For this reason, license
+dnl terms for the Berkeley DB distribution dist/aclocal/tcl.m4 file are as
+dnl follows (copied from the license.terms file in the Tcl 8.3 distribution):
+dnl
+dnl This software is copyrighted by the Regents of the University of
+dnl California, Sun Microsystems, Inc., Scriptics Corporation,
+dnl and other parties. The following terms apply to all files associated
+dnl with the software unless explicitly disclaimed in individual files.
+dnl
+dnl The authors hereby grant permission to use, copy, modify, distribute,
+dnl and license this software and its documentation for any purpose, provided
+dnl that existing copyright notices are retained in all copies and that this
+dnl notice is included verbatim in any distributions. No written agreement,
+dnl license, or royalty fee is required for any of the authorized uses.
+dnl Modifications to this software may be copyrighted by their authors
+dnl and need not follow the licensing terms described here, provided that
+dnl the new terms are clearly indicated on the first page of each file where
+dnl they apply.
+dnl
+dnl IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY
+dnl FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
+dnl ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY
+dnl DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE
+dnl POSSIBILITY OF SUCH DAMAGE.
+dnl
+dnl THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
+dnl INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY,
+dnl FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
+dnl IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE
+dnl NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
+dnl MODIFICATIONS.
+dnl
+dnl GOVERNMENT USE: If you are acquiring this software on behalf of the
+dnl U.S. government, the Government shall have only "Restricted Rights"
+dnl in the software and related documentation as defined in the Federal
+dnl Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you
+dnl are acquiring the software on behalf of the Department of Defense, the
+dnl software shall be classified as "Commercial Computer Software" and the
+dnl Government shall have only "Restricted Rights" as defined in Clause
+dnl 252.227-7013 (c) (1) of DFARs. Notwithstanding the foregoing, the
+dnl authors grant the U.S. Government and others acting in its behalf
+dnl permission to use and distribute the software in accordance with the
+dnl terms specified in this license.
+
+AC_DEFUN(SC_PATH_TCLCONFIG, [
+ AC_CACHE_VAL(ac_cv_c_tclconfig,[
+
+ # First check to see if --with-tclconfig was specified.
+ if test "${with_tclconfig}" != no; then
+ if test -f "${with_tclconfig}/tclConfig.sh" ; then
+ ac_cv_c_tclconfig=`(cd ${with_tclconfig}; pwd)`
+ else
+ AC_MSG_ERROR([${with_tclconfig} directory doesn't contain tclConfig.sh])
+ fi
+ fi
+
+ # check in a few common install locations
+ if test x"${ac_cv_c_tclconfig}" = x ; then
+ for i in `ls -d /usr/local/lib 2>/dev/null` ; do
+ if test -f "$i/tclConfig.sh" ; then
+ ac_cv_c_tclconfig=`(cd $i; pwd)`
+ break
+ fi
+ done
+ fi
+
+ ])
+
+ if test x"${ac_cv_c_tclconfig}" = x ; then
+ TCL_BIN_DIR="# no Tcl configs found"
+ AC_MSG_ERROR(can't find Tcl configuration definitions)
+ else
+ TCL_BIN_DIR=${ac_cv_c_tclconfig}
+ fi
+])
+
+AC_DEFUN(SC_LOAD_TCLCONFIG, [
+ AC_MSG_CHECKING([for existence of $TCL_BIN_DIR/tclConfig.sh])
+
+ if test -f "$TCL_BIN_DIR/tclConfig.sh" ; then
+ AC_MSG_RESULT([loading])
+ . $TCL_BIN_DIR/tclConfig.sh
+ else
+ AC_MSG_RESULT([file not found])
+ fi
+
+ #
+ # The eval is required to do the TCL_DBGX substitution in the
+ # TCL_LIB_FILE variable
+ #
+ eval TCL_LIB_FILE="${TCL_LIB_FILE}"
+ eval TCL_LIB_FLAG="${TCL_LIB_FLAG}"
+ eval "TCL_LIB_SPEC=\"${TCL_LIB_SPEC}\""
+
+ AC_SUBST(TCL_BIN_DIR)
+ AC_SUBST(TCL_SRC_DIR)
+ AC_SUBST(TCL_LIB_FILE)
+
+ AC_SUBST(TCL_TCLSH)
+ TCL_TCLSH="${TCL_PREFIX}/bin/tclsh${TCL_VERSION}"
+])
+
+dnl Optional Tcl API.
+AC_DEFUN(AM_TCL_LOAD, [
+if test "$db_cv_tcl" != no; then
+ if test "$db_cv_dynamic" != "yes"; then
+ AC_MSG_ERROR([--with-tcl requires --enable-dynamic])
+ fi
+
+ AC_SUBST(TCFLAGS)
+
+ SC_PATH_TCLCONFIG
+ SC_LOAD_TCLCONFIG
+
+ if test x"$TCL_PREFIX" != x && test -f "$TCL_PREFIX/include/tcl.h"; then
+ TCFLAGS="-I$TCL_PREFIX/include"
+ fi
+
+ LIBS="$LIBS $TCL_LIB_SPEC $TCL_LIBS"
+
+ ADDITIONAL_LIBS="$ADDITIONAL_LIBS \$(libtso_target)"
+ DEFAULT_INSTALL="${DEFAULT_INSTALL} install_tcl"
+fi])
diff --git a/bdb/dist/aclocal/types.m4 b/bdb/dist/aclocal/types.m4
new file mode 100644
index 00000000000..a9a03ab6d87
--- /dev/null
+++ b/bdb/dist/aclocal/types.m4
@@ -0,0 +1,139 @@
+dnl $Id: types.m4,v 11.4 1999/12/04 19:18:28 bostic Exp $
+
+dnl Check for the standard shorthand types.
+AC_DEFUN(AM_SHORTHAND_TYPES, [dnl
+
+AC_SUBST(ssize_t_decl)
+AC_CACHE_CHECK([for ssize_t], db_cv_ssize_t, [dnl
+AC_TRY_COMPILE([#include <sys/types.h>], ssize_t foo;,
+ [db_cv_ssize_t=yes], [db_cv_ssize_t=no])])
+if test "$db_cv_ssize_t" = no; then
+ ssize_t_decl="typedef int ssize_t;"
+fi
+
+AC_SUBST(u_char_decl)
+AC_CACHE_CHECK([for u_char], db_cv_uchar, [dnl
+AC_TRY_COMPILE([#include <sys/types.h>], u_char foo;,
+ [db_cv_uchar=yes], [db_cv_uchar=no])])
+if test "$db_cv_uchar" = no; then
+ u_char_decl="typedef unsigned char u_char;"
+fi
+
+AC_SUBST(u_short_decl)
+AC_CACHE_CHECK([for u_short], db_cv_ushort, [dnl
+AC_TRY_COMPILE([#include <sys/types.h>], u_short foo;,
+ [db_cv_ushort=yes], [db_cv_ushort=no])])
+if test "$db_cv_ushort" = no; then
+ u_short_decl="typedef unsigned short u_short;"
+fi
+
+AC_SUBST(u_int_decl)
+AC_CACHE_CHECK([for u_int], db_cv_uint, [dnl
+AC_TRY_COMPILE([#include <sys/types.h>], u_int foo;,
+ [db_cv_uint=yes], [db_cv_uint=no])])
+if test "$db_cv_uint" = no; then
+ u_int_decl="typedef unsigned int u_int;"
+fi
+
+AC_SUBST(u_long_decl)
+AC_CACHE_CHECK([for u_long], db_cv_ulong, [dnl
+AC_TRY_COMPILE([#include <sys/types.h>], u_long foo;,
+ [db_cv_ulong=yes], [db_cv_ulong=no])])
+if test "$db_cv_ulong" = no; then
+ u_long_decl="typedef unsigned long u_long;"
+fi
+
+dnl DB/Vi use specific integer sizes.
+AC_SUBST(u_int8_decl)
+AC_CACHE_CHECK([for u_int8_t], db_cv_uint8, [dnl
+AC_TRY_COMPILE([#include <sys/types.h>], u_int8_t foo;,
+ [db_cv_uint8=yes],
+ AC_TRY_RUN([main(){exit(sizeof(unsigned char) != 1);}],
+ [db_cv_uint8="unsigned char"], [db_cv_uint8=no]))])
+if test "$db_cv_uint8" = no; then
+ AC_MSG_ERROR(No unsigned 8-bit integral type.)
+fi
+if test "$db_cv_uint8" != yes; then
+ u_int8_decl="typedef $db_cv_uint8 u_int8_t;"
+fi
+
+AC_SUBST(u_int16_decl)
+AC_CACHE_CHECK([for u_int16_t], db_cv_uint16, [dnl
+AC_TRY_COMPILE([#include <sys/types.h>], u_int16_t foo;,
+ [db_cv_uint16=yes],
+AC_TRY_RUN([main(){exit(sizeof(unsigned short) != 2);}],
+ [db_cv_uint16="unsigned short"],
+AC_TRY_RUN([main(){exit(sizeof(unsigned int) != 2);}],
+ [db_cv_uint16="unsigned int"], [db_cv_uint16=no])))])
+if test "$db_cv_uint16" = no; then
+ AC_MSG_ERROR([No unsigned 16-bit integral type.])
+fi
+if test "$db_cv_uint16" != yes; then
+ u_int16_decl="typedef $db_cv_uint16 u_int16_t;"
+fi
+
+AC_SUBST(int16_decl)
+AC_CACHE_CHECK([for int16_t], db_cv_int16, [dnl
+AC_TRY_COMPILE([#include <sys/types.h>], int16_t foo;,
+ [db_cv_int16=yes],
+AC_TRY_RUN([main(){exit(sizeof(short) != 2);}],
+ [db_cv_int16="short"],
+AC_TRY_RUN([main(){exit(sizeof(int) != 2);}],
+ [db_cv_int16="int"], [db_cv_int16=no])))])
+if test "$db_cv_int16" = no; then
+ AC_MSG_ERROR([No signed 16-bit integral type.])
+fi
+if test "$db_cv_int16" != yes; then
+ int16_decl="typedef $db_cv_int16 int16_t;"
+fi
+
+AC_SUBST(u_int32_decl)
+AC_CACHE_CHECK([for u_int32_t], db_cv_uint32, [dnl
+AC_TRY_COMPILE([#include <sys/types.h>], u_int32_t foo;,
+ [db_cv_uint32=yes],
+AC_TRY_RUN([main(){exit(sizeof(unsigned int) != 4);}],
+ [db_cv_uint32="unsigned int"],
+AC_TRY_RUN([main(){exit(sizeof(unsigned long) != 4);}],
+ [db_cv_uint32="unsigned long"], [db_cv_uint32=no])))])
+if test "$db_cv_uint32" = no; then
+ AC_MSG_ERROR([No unsigned 32-bit integral type.])
+fi
+if test "$db_cv_uint32" != yes; then
+ u_int32_decl="typedef $db_cv_uint32 u_int32_t;"
+fi
+
+AC_SUBST(int32_decl)
+AC_CACHE_CHECK([for int32_t], db_cv_int32, [dnl
+AC_TRY_COMPILE([#include <sys/types.h>], int32_t foo;,
+ [db_cv_int32=yes],
+AC_TRY_RUN([main(){exit(sizeof(int) != 4);}],
+ [db_cv_int32="int"],
+AC_TRY_RUN([main(){exit(sizeof(long) != 4);}],
+ [db_cv_int32="long"], [db_cv_int32=no])))])
+if test "$db_cv_int32" = no; then
+ AC_MSG_ERROR([No signed 32-bit integral type.])
+fi
+if test "$db_cv_int32" != yes; then
+ int32_decl="typedef $db_cv_int32 int32_t;"
+fi
+
+dnl Figure out largest integral type.
+AC_SUBST(db_align_t_decl)
+AC_CACHE_CHECK([for largest integral type], db_cv_align_t, [dnl
+AC_TRY_COMPILE([#include <sys/types.h>], long long foo;,
+ [db_cv_align_t="unsigned long long"], [db_cv_align_t="unsigned long"])])
+db_align_t_decl="typedef $db_cv_align_t db_align_t;"
+
+dnl Figure out integral type the same size as a pointer.
+AC_SUBST(db_alignp_t_decl)
+AC_CACHE_CHECK([for integral type equal to pointer size], db_cv_alignp_t, [dnl
+db_cv_alignp_t=$db_cv_align_t
+AC_TRY_RUN([main(){exit(sizeof(unsigned int) != sizeof(char *));}],
+ [db_cv_alignp_t="unsigned int"])
+AC_TRY_RUN([main(){exit(sizeof(unsigned long) != sizeof(char *));}],
+ [db_cv_alignp_t="unsigned long"])
+AC_TRY_RUN([main(){exit(sizeof(unsigned long long) != sizeof(char *));}],
+ [db_cv_alignp_t="unsigned long long"])])
+db_alignp_t_decl="typedef $db_cv_alignp_t db_alignp_t;"
+
+])dnl
diff --git a/bdb/dist/build/chk.def b/bdb/dist/build/chk.def
new file mode 100755
index 00000000000..abd12b83908
--- /dev/null
+++ b/bdb/dist/build/chk.def
@@ -0,0 +1,50 @@
+#!/bin/sh -
+#
+# $Id: chk.def,v 1.4 2000/12/12 18:20:59 bostic Exp $
+#
+# Check to make sure we haven't forgotten to add any interfaces
+# to the Win32 libdb.def file.
+
+# Run from the top-level directory.
+[ -f db_config.h ] && cd ..
+
+f=build_win32/libdb.def
+t1=/tmp/__1
+t2=/tmp/__2
+
+sed '/; /d' $f |
+ egrep @ |
+ awk '{print $1}' |
+ sed -e '/db_xa_switch/d' \
+ -e '/^__/d' -e '/^;/d' |
+ sort > ${t1}
+
+egrep __P include/db.src |
+ sed '/^[a-z]/!d' |
+ awk '{print $2}' |
+ sed 's/^\*//' |
+ sed '/^__/d' | sort > ${t2}
+
+if cmp -s ${t1} ${t2} ; then
+ :
+else
+ echo "<<< libdb.def"
+ echo ">>> DB include files"
+ diff ${t1} ${t2}
+fi
+
+# Check to make sure we don't have any extras in the libdb.def file.
+sed '/; /d' $f |
+ egrep @ |
+ awk '{print $1}' |
+ sed -e '/__db_global_values/d' > ${t1}
+
+for i in `cat ${t1}`; do
+ if egrep $i */*.c > /dev/null; then
+ :
+ else
+ echo "$f: $i not found in DB sources"
+ fi
+done
+
+rm -f ${t1} ${t2}
diff --git a/bdb/dist/build/chk.define b/bdb/dist/build/chk.define
new file mode 100755
index 00000000000..9e2aa0db498
--- /dev/null
+++ b/bdb/dist/build/chk.define
@@ -0,0 +1,55 @@
+#!/bin/sh -
+#
+# $Id: chk.define,v 1.8 2000/12/12 18:20:59 bostic Exp $
+#
+# Check to make sure that all #defines are actually used.
+
+# Run from the top-level directory.
+[ -f db_config.h ] && cd ..
+
+t1=/tmp/__1
+
+egrep '^#define' include/*.h |
+ sed -e '/db_185.h/d' -e '/xa.h/d' |
+ awk '{print $2}' |
+ sed -e '/^B_DELETE/d' \
+ -e '/^B_MAX/d' \
+ -e '/^CIRCLEQ/d' \
+ -e '/^DB_RO_ACCESS/d' \
+ -e '/^DEFINE_DB_CLASS/d' \
+ -e '/^LIST/d' \
+ -e '/^LOG_OP/d' \
+ -e '/^MINFILL/d' \
+ -e '/^MUTEX_FIELDS/d' \
+ -e '/^NCACHED2X/d' \
+ -e '/^NCACHED30/d' \
+ -e '/^PAIR_MASK/d' \
+ -e '/^POWER_OF_TWO/d' \
+ -e '/^P_16_COPY/d' \
+ -e '/^P_32_COPY/d' \
+ -e '/^P_32_SWAP/d' \
+ -e '/^SH_CIRCLEQ/d' \
+ -e '/^SH_LIST/d' \
+ -e '/^SH_TAILQ/d' \
+ -e '/^TAILQ/d' \
+ -e '/UNUSED/d' \
+ -e '/^WRAPPED_CLASS/d' \
+ -e '/^XA_$/d' \
+ -e '/^_DB_SERVER_H_RPCGEN/d' \
+ -e '/_AUTO_H$/d' \
+ -e '/_H_$/d' \
+ -e '/ext_h_/d' \
+ -e '/^i_/d' \
+ -e 's/(.*//' | sort > ${t1}
+
+for i in `cat ${t1}`; do
+ if egrep -w $i */*.c */*.cpp > /dev/null; then
+ :;
+ else
+ f=`egrep -l $i include/*.h |
+ sed 's;include/;;' | tr -s "[:space:]" " "`
+ echo "$i: $f"
+ fi
+done | sort +1
+
+rm -f ${t1}
diff --git a/bdb/dist/build/chk.offt b/bdb/dist/build/chk.offt
new file mode 100755
index 00000000000..ea7f0b914fc
--- /dev/null
+++ b/bdb/dist/build/chk.offt
@@ -0,0 +1,19 @@
+#!/bin/sh -
+#
+# $Id: chk.offt,v 1.3 2000/12/12 18:20:59 bostic Exp $
+#
+# Make sure that no off_t's have snuck into the release.
+
+# Run from the top-level directory.
+[ -f db_config.h ] && cd ..
+
+egrep -w off_t */*.[ch] |
+sed -e "/#undef off_t/d" \
+ -e "/mp_fopen.c:.*can't use off_t's here/d" \
+ -e "/mp_fopen.c:.*size or type off_t's or/d" \
+ -e "/mp_fopen.c:.*where an off_t is 32-bits/d" \
+ -e "/os_map.c:.*(off_t)0))/d" \
+ -e "/os_rw.c:.*(off_t)db_iop->pgno/d" \
+ -e "/os_seek.c:.*off_t offset;/d" \
+ -e "/os_seek.c:.*offset = /d" \
+ -e "/test_vxworks\/vx_mutex.c:/d"
diff --git a/bdb/dist/build/chk.srcfiles b/bdb/dist/build/chk.srcfiles
new file mode 100755
index 00000000000..cfd6f955ea7
--- /dev/null
+++ b/bdb/dist/build/chk.srcfiles
@@ -0,0 +1,29 @@
+#!/bin/sh -
+#
+# $Id: chk.srcfiles,v 1.3 2000/12/12 18:20:59 bostic Exp $
+#
+# Check to make sure we haven't forgotten to add any files to the list
+# of source files Win32 uses to build its dsp files.
+
+# Run from the top-level directory.
+[ -f db_config.h ] && cd ..
+
+f=dist/srcfiles.in
+t1=/tmp/__1
+t2=/tmp/__2
+
+sed -e '/^[ #]/d' \
+ -e '/^$/d' < $f |
+ awk '{print $1}' > ${t1}
+find . -type f |
+ sed -e 's/^\.\///' |
+ egrep '\.c$|\.cpp$|\.def$|\.rc$' |
+ sed -e '/dist\/build\/chk.def/d' \
+ -e '/perl.DB_File\/version.c/d' |
+ sort > ${t2}
+
+cmp ${t1} ${t2} > /dev/null ||
+(echo "<<< srcfiles.in >>> existing files" && \
+ diff ${t1} ${t2} | tee /tmp/_f)
+
+rm -f ${t1} ${t2}
diff --git a/bdb/dist/build/chk.tags b/bdb/dist/build/chk.tags
new file mode 100755
index 00000000000..fa67927e731
--- /dev/null
+++ b/bdb/dist/build/chk.tags
@@ -0,0 +1,39 @@
+#!/bin/sh -
+#
+# $Id: chk.tags,v 1.5 2000/12/22 15:35:32 bostic Exp $
+#
+# Check to make sure we don't need any more symbolic links to tags files.
+
+# Run from the top-level directory.
+[ -f db_config.h ] && cd ..
+
+t1=/tmp/__1
+t2=/tmp/__2
+
+(ls -F | egrep / | sort |
+ sed -e 's/\///' \
+ -e '/^CVS$/d' \
+ -e '/^build_vxworks$/d' \
+ -e '/^build_win32$/d' \
+ -e '/^docs$/d' \
+ -e '/^docs.text$/d' \
+ -e '/^docs_src$/d' \
+ -e '/^docs_text$/d' \
+ -e '/^java$/d' \
+ -e '/^perl.BerkeleyDB$/d' \
+ -e '/^perl.DB_File$/d' \
+ -e '/^test$/d' \
+ -e '/^test_purify$/d' \
+ -e '/^test_thread$/d' \
+ -e '/^test_vxworks$/d') > ${t1}
+
+(ls */tags | sed 's/\/tags$//' | sort) > ${t2}
+if diff ${t1} ${t2} > /dev/null; then
+ :
+else
+ echo "<<< source tree"
+ echo ">>> tags files"
+ diff ${t1} ${t2}
+fi
+
+rm -f ${t1} ${t2}
diff --git a/bdb/dist/build/script b/bdb/dist/build/script
new file mode 100644
index 00000000000..8eef3099f08
--- /dev/null
+++ b/bdb/dist/build/script
@@ -0,0 +1,94 @@
+#!/bin/sh -
+# $Id: script,v 1.21 2001/01/19 18:13:16 bostic Exp $
+#
+# Build the distribution archives.
+#
+# A set of commands intended to be cut and pasted into a csh window.
+
+# Development tree, release home.
+setenv D /a/db
+
+# Update the release number.
+cd $D/dist
+vi RELEASE
+setenv VERSION \
+`sh -c '. RELEASE; echo $DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH'`
+echo "Version: $VERSION"
+
+# Make sure the source tree is up-to-date, generate new support files, and
+# commit anything that's changed.
+cd $D && cvs -q update
+cd $D/dist && sh s_all
+cd $D && cvs -q commit
+
+# Build the documentation.
+cd $D/docs_src && make clean
+cd $D/docs_src && make
+cd $D/docs_src && make && make check
+
+# Copy a development tree into a release tree.
+setenv R /a/db-$VERSION
+rm -rf $R && mkdir -p $R
+cd $D && tar cf - \
+`cvs -q status | sed -n -e "/Repository/s;.*/CVSROOT/db/;;" -e "s/,v//p"` | \
+(cd $R && tar xpf -)
+
+# Fix symbolic links and permissions.
+cd $R/dist && sh s_perm
+cd $R/dist && sh s_symlink
+
+# Build the documents.
+cd $R/docs_src && make
+
+# Build a version.
+cd $R && rm -rf build_run && mkdir build_run
+cd $R/build_run && ~bostic/bin/dbconf && make >& mklog
+
+# Smoke test.
+./ex_access
+
+# Check the install
+make prefix=`pwd`/BDB install
+
+# Run distribution check scripts
+$R/dist/build/chk.def
+$R/dist/build/chk.define
+$R/dist/build/chk.offt
+$R/dist/build/chk.srcfiles
+$R/dist/build/chk.tags
+
+# Clean up the tree.
+cd $R && rm -rf build_run docs_src
+cd $R && rm -rf test_thread test_purify test_server test_vxworks test/TODO
+cd $R && rm -rf test/upgrade/databases && mkdir test/upgrade/databases
+
+# ACQUIRE ROOT PRIVILEGES
+cd $R && find . -type d | xargs chmod 775
+cd $R && find . -type f | xargs chmod 444
+cd $R/dist && sh s_perm
+chown -R 100.100 $R
+# DISCARD ROOT PRIVILEGES
+
+# Compare this release with the last one.
+set LR=3.1.X
+cd $R/.. && gzcat /a/releases/db-${LR}.tar.gz | tar xf -
+cd $R/../db-${LR} && find . | sort > /tmp/__OLD
+cd $R && find . | sort > /tmp/__NEW
+diff -c /tmp/__OLD /tmp/__NEW
+
+# Create the tar archive release.
+setenv T "$R/../db-$VERSION.tar.gz"
+cd $R/.. && tar cf - db-$VERSION | gzip --best > $T
+chmod 444 $T
+
+# Create the zip archive release.
+#
+# Remove symbolic links to tags files. They're large and we don't want to
+# store real symbolic links in the archive for portability reasons.
+# ACQUIRE ROOT PRIVILEGES
+cd $R && rm -f `find . -type l -name 'tags'`
+# DISCARD ROOT PRIVILEGES
+
+setenv T "$R/../db-$VERSION.zip"
+cd $R/.. && zip -r - db-$VERSION > $T
+chmod 444 $T
diff --git a/bdb/dist/config.guess b/bdb/dist/config.guess
new file mode 100755
index 00000000000..08e8a750ac6
--- /dev/null
+++ b/bdb/dist/config.guess
@@ -0,0 +1,1289 @@
+#! /bin/sh
+# Attempt to guess a canonical system name.
+# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000
+# Free Software Foundation, Inc.
+
+version='2000-09-05'
+
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+# Written by Per Bothner <bothner@cygnus.com>.
+# Please send patches to <config-patches@gnu.org>.
+#
+# This script attempts to guess a canonical system name similar to
+# config.sub. If it succeeds, it prints the system name on stdout, and
+# exits with 0. Otherwise, it exits with 1.
+#
+# The plan is that this can be called by configure scripts if you
+# don't specify an explicit system type (host/target name).
+#
+# Only a few systems have been added to this list; please add others
+# (but try to keep the structure clean).
+#
+
+me=`echo "$0" | sed -e 's,.*/,,'`
+
+usage="\
+Usage: $0 [OPTION]
+
+Output the configuration name of this system.
+
+Operation modes:
+ -h, --help print this help, then exit
+ -V, --version print version number, then exit"
+
+help="
+Try \`$me --help' for more information."
+
+# Parse command line
+while test $# -gt 0 ; do
+ case "$1" in
+ --version | --vers* | -V )
+ echo "$version" ; exit 0 ;;
+ --help | --h* | -h )
+ echo "$usage"; exit 0 ;;
+ -- ) # Stop option processing
+ shift; break ;;
+ - ) # Use stdin as input.
+ break ;;
+ -* )
+ exec >&2
+ echo "$me: invalid option $1"
+ echo "$help"
+ exit 1 ;;
+ * )
+ break ;;
+ esac
+done
+
+if test $# != 0; then
+ echo "$me: too many arguments$help" >&2
+ exit 1
+fi
+
+# Use $HOST_CC if defined. $CC may point to a cross-compiler
+if test x"$CC_FOR_BUILD" = x; then
+ if test x"$HOST_CC" != x; then
+ CC_FOR_BUILD="$HOST_CC"
+ else
+ if test x"$CC" != x; then
+ CC_FOR_BUILD="$CC"
+ else
+ CC_FOR_BUILD=cc
+ fi
+ fi
+fi
+
+
+# This is needed to find uname on a Pyramid OSx when run in the BSD universe.
+# (ghazi@noc.rutgers.edu 8/24/94.)
+if (test -f /.attbin/uname) >/dev/null 2>&1 ; then
+ PATH=$PATH:/.attbin ; export PATH
+fi
+
+UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown
+UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown
+UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown
+UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown
+
+dummy=dummy-$$
+trap 'rm -f $dummy.c $dummy.o $dummy; exit 1' 1 2 15
+
+# Note: order is significant - the case branches are not exclusive.
+
+case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
+ *:NetBSD:*:*)
+ # Netbsd (nbsd) targets should (where applicable) match one or
+ # more of the tupples: *-*-netbsdelf*, *-*-netbsdaout*,
+ # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently
+ # switched to ELF, *-*-netbsd* would select the old
+ # object file format. This provides both forward
+ # compatibility and a consistent mechanism for selecting the
+ # object file format.
+ # Determine the machine/vendor (is the vendor relevant).
+ case "${UNAME_MACHINE}" in
+ amiga) machine=m68k-unknown ;;
+ arm32) machine=arm-unknown ;;
+ atari*) machine=m68k-atari ;;
+ sun3*) machine=m68k-sun ;;
+ mac68k) machine=m68k-apple ;;
+ macppc) machine=powerpc-apple ;;
+ hp3[0-9][05]) machine=m68k-hp ;;
+ ibmrt|romp-ibm) machine=romp-ibm ;;
+ *) machine=${UNAME_MACHINE}-unknown ;;
+ esac
+ # The Operating System including object format.
+ if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep __ELF__ >/dev/null
+ then
+ # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout).
+ # Return netbsd for either. FIX?
+ os=netbsd
+ else
+ os=netbsdelf
+ fi
+ # The OS release
+ release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'`
+ # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM:
+ # contains redundant information, the shorter form:
+ # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used.
+ echo "${machine}-${os}${release}"
+ exit 0 ;;
+ alpha:OSF1:*:*)
+ if test $UNAME_RELEASE = "V4.0"; then
+ UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'`
+ fi
+ # A Vn.n version is a released version.
+ # A Tn.n version is a released field test version.
+ # A Xn.n version is an unreleased experimental baselevel.
+ # 1.2 uses "1.2" for uname -r.
+ cat <<EOF >$dummy.s
+ .data
+\$Lformat:
+ .byte 37,100,45,37,120,10,0 # "%d-%x\n"
+
+ .text
+ .globl main
+ .align 4
+ .ent main
+main:
+ .frame \$30,16,\$26,0
+ ldgp \$29,0(\$27)
+ .prologue 1
+ .long 0x47e03d80 # implver \$0
+ lda \$2,-1
+ .long 0x47e20c21 # amask \$2,\$1
+ lda \$16,\$Lformat
+ mov \$0,\$17
+ not \$1,\$18
+ jsr \$26,printf
+ ldgp \$29,0(\$26)
+ mov 0,\$16
+ jsr \$26,exit
+ .end main
+EOF
+ $CC_FOR_BUILD $dummy.s -o $dummy 2>/dev/null
+ if test "$?" = 0 ; then
+ case `./$dummy` in
+ 0-0)
+ UNAME_MACHINE="alpha"
+ ;;
+ 1-0)
+ UNAME_MACHINE="alphaev5"
+ ;;
+ 1-1)
+ UNAME_MACHINE="alphaev56"
+ ;;
+ 1-101)
+ UNAME_MACHINE="alphapca56"
+ ;;
+ 2-303)
+ UNAME_MACHINE="alphaev6"
+ ;;
+ 2-307)
+ UNAME_MACHINE="alphaev67"
+ ;;
+ esac
+ fi
+ rm -f $dummy.s $dummy
+ echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[VTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
+ exit 0 ;;
+ Alpha\ *:Windows_NT*:*)
+ # How do we know it's Interix rather than the generic POSIX subsystem?
+ # Should we change UNAME_MACHINE based on the output of uname instead
+ # of the specific Alpha model?
+ echo alpha-pc-interix
+ exit 0 ;;
+ 21064:Windows_NT:50:3)
+ echo alpha-dec-winnt3.5
+ exit 0 ;;
+ Amiga*:UNIX_System_V:4.0:*)
+ echo m68k-unknown-sysv4
+ exit 0;;
+ amiga:OpenBSD:*:*)
+ echo m68k-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ *:[Aa]miga[Oo][Ss]:*:*)
+ echo ${UNAME_MACHINE}-unknown-amigaos
+ exit 0 ;;
+ arc64:OpenBSD:*:*)
+ echo mips64el-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ arc:OpenBSD:*:*)
+ echo mipsel-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ hkmips:OpenBSD:*:*)
+ echo mips-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ pmax:OpenBSD:*:*)
+ echo mipsel-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ sgi:OpenBSD:*:*)
+ echo mips-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ wgrisc:OpenBSD:*:*)
+ echo mipsel-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ *:OS/390:*:*)
+ echo i370-ibm-openedition
+ exit 0 ;;
+ arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*)
+ echo arm-acorn-riscix${UNAME_RELEASE}
+ exit 0;;
+ SR2?01:HI-UX/MPP:*:*)
+ echo hppa1.1-hitachi-hiuxmpp
+ exit 0;;
+ Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*)
+ # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE.
+ if test "`(/bin/universe) 2>/dev/null`" = att ; then
+ echo pyramid-pyramid-sysv3
+ else
+ echo pyramid-pyramid-bsd
+ fi
+ exit 0 ;;
+ NILE*:*:*:dcosx)
+ echo pyramid-pyramid-svr4
+ exit 0 ;;
+ sun4H:SunOS:5.*:*)
+ echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit 0 ;;
+ sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*)
+ echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit 0 ;;
+ i86pc:SunOS:5.*:*)
+ echo i386-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit 0 ;;
+ sun4*:SunOS:6*:*)
+ # According to config.sub, this is the proper way to canonicalize
+ # SunOS6. Hard to guess exactly what SunOS6 will be like, but
+ # it's likely to be more like Solaris than SunOS4.
+ echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit 0 ;;
+ sun4*:SunOS:*:*)
+ case "`/usr/bin/arch -k`" in
+ Series*|S4*)
+ UNAME_RELEASE=`uname -v`
+ ;;
+ esac
+ # Japanese Language versions have a version number like `4.1.3-JL'.
+ echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'`
+ exit 0 ;;
+ sun3*:SunOS:*:*)
+ echo m68k-sun-sunos${UNAME_RELEASE}
+ exit 0 ;;
+ sun*:*:4.2BSD:*)
+ UNAME_RELEASE=`(head -1 /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null`
+ test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3
+ case "`/bin/arch`" in
+ sun3)
+ echo m68k-sun-sunos${UNAME_RELEASE}
+ ;;
+ sun4)
+ echo sparc-sun-sunos${UNAME_RELEASE}
+ ;;
+ esac
+ exit 0 ;;
+ aushp:SunOS:*:*)
+ echo sparc-auspex-sunos${UNAME_RELEASE}
+ exit 0 ;;
+ atari*:OpenBSD:*:*)
+ echo m68k-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ # The situation for MiNT is a little confusing. The machine name
+ # can be virtually everything (everything which is not
+ # "atarist" or "atariste" at least should have a processor
+ # > m68000). The system name ranges from "MiNT" over "FreeMiNT"
+ # to the lowercase version "mint" (or "freemint"). Finally
+ # the system name "TOS" denotes a system which is actually not
+ # MiNT. But MiNT is downward compatible to TOS, so this should
+ # be no problem.
+ atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*)
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit 0 ;;
+ atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*)
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit 0 ;;
+ *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*)
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit 0 ;;
+ milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*)
+ echo m68k-milan-mint${UNAME_RELEASE}
+ exit 0 ;;
+ hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*)
+ echo m68k-hades-mint${UNAME_RELEASE}
+ exit 0 ;;
+ *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*)
+ echo m68k-unknown-mint${UNAME_RELEASE}
+ exit 0 ;;
+ sun3*:OpenBSD:*:*)
+ echo m68k-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ mac68k:OpenBSD:*:*)
+ echo m68k-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ mvme68k:OpenBSD:*:*)
+ echo m68k-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ mvme88k:OpenBSD:*:*)
+ echo m88k-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ powerpc:machten:*:*)
+ echo powerpc-apple-machten${UNAME_RELEASE}
+ exit 0 ;;
+ RISC*:Mach:*:*)
+ echo mips-dec-mach_bsd4.3
+ exit 0 ;;
+ RISC*:ULTRIX:*:*)
+ echo mips-dec-ultrix${UNAME_RELEASE}
+ exit 0 ;;
+ VAX*:ULTRIX*:*:*)
+ echo vax-dec-ultrix${UNAME_RELEASE}
+ exit 0 ;;
+ 2020:CLIX:*:* | 2430:CLIX:*:*)
+ echo clipper-intergraph-clix${UNAME_RELEASE}
+ exit 0 ;;
+ mips:*:*:UMIPS | mips:*:*:RISCos)
+ sed 's/^ //' << EOF >$dummy.c
+#ifdef __cplusplus
+#include <stdio.h> /* for printf() prototype */
+ int main (int argc, char *argv[]) {
+#else
+ int main (argc, argv) int argc; char *argv[]; {
+#endif
+ #if defined (host_mips) && defined (MIPSEB)
+ #if defined (SYSTYPE_SYSV)
+ printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0);
+ #endif
+ #if defined (SYSTYPE_SVR4)
+ printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0);
+ #endif
+ #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD)
+ printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0);
+ #endif
+ #endif
+ exit (-1);
+ }
+EOF
+ $CC_FOR_BUILD $dummy.c -o $dummy \
+ && ./$dummy `echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` \
+ && rm $dummy.c $dummy && exit 0
+ rm -f $dummy.c $dummy
+ echo mips-mips-riscos${UNAME_RELEASE}
+ exit 0 ;;
+ Night_Hawk:Power_UNIX:*:*)
+ echo powerpc-harris-powerunix
+ exit 0 ;;
+ m88k:CX/UX:7*:*)
+ echo m88k-harris-cxux7
+ exit 0 ;;
+ m88k:*:4*:R4*)
+ echo m88k-motorola-sysv4
+ exit 0 ;;
+ m88k:*:3*:R3*)
+ echo m88k-motorola-sysv3
+ exit 0 ;;
+ AViiON:dgux:*:*)
+ # DG/UX returns AViiON for all architectures
+ UNAME_PROCESSOR=`/usr/bin/uname -p`
+ if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ]
+ then
+ if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \
+ [ ${TARGET_BINARY_INTERFACE}x = x ]
+ then
+ echo m88k-dg-dgux${UNAME_RELEASE}
+ else
+ echo m88k-dg-dguxbcs${UNAME_RELEASE}
+ fi
+ else
+ echo i586-dg-dgux${UNAME_RELEASE}
+ fi
+ exit 0 ;;
+ M88*:DolphinOS:*:*) # DolphinOS (SVR3)
+ echo m88k-dolphin-sysv3
+ exit 0 ;;
+ M88*:*:R3*:*)
+ # Delta 88k system running SVR3
+ echo m88k-motorola-sysv3
+ exit 0 ;;
+ XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3)
+ echo m88k-tektronix-sysv3
+ exit 0 ;;
+ Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD)
+ echo m68k-tektronix-bsd
+ exit 0 ;;
+ *:IRIX*:*:*)
+ echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'`
+ exit 0 ;;
+ ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX.
+ echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id
+ exit 0 ;; # Note that: echo "'`uname -s`'" gives 'AIX '
+ i?86:AIX:*:*)
+ echo i386-ibm-aix
+ exit 0 ;;
+ *:AIX:2:3)
+ if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then
+ sed 's/^ //' << EOF >$dummy.c
+ #include <sys/systemcfg.h>
+
+ main()
+ {
+ if (!__power_pc())
+ exit(1);
+ puts("powerpc-ibm-aix3.2.5");
+ exit(0);
+ }
+EOF
+ $CC_FOR_BUILD $dummy.c -o $dummy && ./$dummy && rm $dummy.c $dummy && exit 0
+ rm -f $dummy.c $dummy
+ echo rs6000-ibm-aix3.2.5
+ elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then
+ echo rs6000-ibm-aix3.2.4
+ else
+ echo rs6000-ibm-aix3.2
+ fi
+ exit 0 ;;
+ *:AIX:*:4)
+ IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | head -1 | awk '{ print $1 }'`
+ if /usr/sbin/lsattr -EHl ${IBM_CPU_ID} | grep POWER >/dev/null 2>&1; then
+ IBM_ARCH=rs6000
+ else
+ IBM_ARCH=powerpc
+ fi
+ if [ -x /usr/bin/oslevel ] ; then
+ IBM_REV=`/usr/bin/oslevel`
+ else
+ IBM_REV=4.${UNAME_RELEASE}
+ fi
+ echo ${IBM_ARCH}-ibm-aix${IBM_REV}
+ exit 0 ;;
+ *:AIX:*:*)
+ echo rs6000-ibm-aix
+ exit 0 ;;
+ ibmrt:4.4BSD:*|romp-ibm:BSD:*)
+ echo romp-ibm-bsd4.4
+ exit 0 ;;
+ ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and
+ echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to
+ exit 0 ;; # report: romp-ibm BSD 4.3
+ *:BOSX:*:*)
+ echo rs6000-bull-bosx
+ exit 0 ;;
+ DPX/2?00:B.O.S.:*:*)
+ echo m68k-bull-sysv3
+ exit 0 ;;
+ 9000/[34]??:4.3bsd:1.*:*)
+ echo m68k-hp-bsd
+ exit 0 ;;
+ hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*)
+ echo m68k-hp-bsd4.4
+ exit 0 ;;
+ 9000/[34678]??:HP-UX:*:*)
+ case "${UNAME_MACHINE}" in
+ 9000/31? ) HP_ARCH=m68000 ;;
+ 9000/[34]?? ) HP_ARCH=m68k ;;
+ 9000/[678][0-9][0-9])
+ sed 's/^ //' << EOF >$dummy.c
+
+ #define _HPUX_SOURCE
+ #include <stdlib.h>
+ #include <unistd.h>
+
+ int main ()
+ {
+ #if defined(_SC_KERNEL_BITS)
+ long bits = sysconf(_SC_KERNEL_BITS);
+ #endif
+ long cpu = sysconf (_SC_CPU_VERSION);
+
+ switch (cpu)
+ {
+ case CPU_PA_RISC1_0: puts ("hppa1.0"); break;
+ case CPU_PA_RISC1_1: puts ("hppa1.1"); break;
+ case CPU_PA_RISC2_0:
+ #if defined(_SC_KERNEL_BITS)
+ switch (bits)
+ {
+ case 64: puts ("hppa2.0w"); break;
+ case 32: puts ("hppa2.0n"); break;
+ default: puts ("hppa2.0"); break;
+ } break;
+ #else /* !defined(_SC_KERNEL_BITS) */
+ puts ("hppa2.0"); break;
+ #endif
+ default: puts ("hppa1.0"); break;
+ }
+ exit (0);
+ }
+EOF
+ (CCOPTS= $CC_FOR_BUILD $dummy.c -o $dummy 2>/dev/null ) && HP_ARCH=`./$dummy`
+ rm -f $dummy.c $dummy
+ esac
+ HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
+ echo ${HP_ARCH}-hp-hpux${HPUX_REV}
+ exit 0 ;;
+ 3050*:HI-UX:*:*)
+ sed 's/^ //' << EOF >$dummy.c
+ #include <unistd.h>
+ int
+ main ()
+ {
+ long cpu = sysconf (_SC_CPU_VERSION);
+ /* The order matters, because CPU_IS_HP_MC68K erroneously returns
+ true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct
+ results, however. */
+ if (CPU_IS_PA_RISC (cpu))
+ {
+ switch (cpu)
+ {
+ case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break;
+ case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break;
+ case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break;
+ default: puts ("hppa-hitachi-hiuxwe2"); break;
+ }
+ }
+ else if (CPU_IS_HP_MC68K (cpu))
+ puts ("m68k-hitachi-hiuxwe2");
+ else puts ("unknown-hitachi-hiuxwe2");
+ exit (0);
+ }
+EOF
+ $CC_FOR_BUILD $dummy.c -o $dummy && ./$dummy && rm $dummy.c $dummy && exit 0
+ rm -f $dummy.c $dummy
+ echo unknown-hitachi-hiuxwe2
+ exit 0 ;;
+ 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* )
+ echo hppa1.1-hp-bsd
+ exit 0 ;;
+ 9000/8??:4.3bsd:*:*)
+ echo hppa1.0-hp-bsd
+ exit 0 ;;
+ *9??*:MPE/iX:*:*)
+ echo hppa1.0-hp-mpeix
+ exit 0 ;;
+ hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* )
+ echo hppa1.1-hp-osf
+ exit 0 ;;
+ hp8??:OSF1:*:*)
+ echo hppa1.0-hp-osf
+ exit 0 ;;
+ i?86:OSF1:*:*)
+ if [ -x /usr/sbin/sysversion ] ; then
+ echo ${UNAME_MACHINE}-unknown-osf1mk
+ else
+ echo ${UNAME_MACHINE}-unknown-osf1
+ fi
+ exit 0 ;;
+ parisc*:Lites*:*:*)
+ echo hppa1.1-hp-lites
+ exit 0 ;;
+ hppa*:OpenBSD:*:*)
+ echo hppa-unknown-openbsd
+ exit 0 ;;
+ C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*)
+ echo c1-convex-bsd
+ exit 0 ;;
+ C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*)
+ if getsysinfo -f scalar_acc
+ then echo c32-convex-bsd
+ else echo c2-convex-bsd
+ fi
+ exit 0 ;;
+ C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*)
+ echo c34-convex-bsd
+ exit 0 ;;
+ C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*)
+ echo c38-convex-bsd
+ exit 0 ;;
+ C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*)
+ echo c4-convex-bsd
+ exit 0 ;;
+ CRAY*X-MP:*:*:*)
+ echo xmp-cray-unicos
+ exit 0 ;;
+ CRAY*Y-MP:*:*:*)
+ echo ymp-cray-unicos${UNAME_RELEASE}
+ exit 0 ;;
+ CRAY*[A-Z]90:*:*:*)
+ echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \
+ | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \
+ -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/
+ exit 0 ;;
+ CRAY*TS:*:*:*)
+ echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit 0 ;;
+ CRAY*T3E:*:*:*)
+ echo alpha-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit 0 ;;
+ CRAY*SV1:*:*:*)
+ echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit 0 ;;
+ CRAY-2:*:*:*)
+ echo cray2-cray-unicos
+ exit 0 ;;
+ F300:UNIX_System_V:*:*)
+ FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
+ FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'`
+ echo "f300-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
+ exit 0 ;;
+ F301:UNIX_System_V:*:*)
+ echo f301-fujitsu-uxpv`echo $UNAME_RELEASE | sed 's/ .*//'`
+ exit 0 ;;
+ hp300:OpenBSD:*:*)
+ echo m68k-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ i?86:BSD/386:*:* | i?86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*)
+ echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE}
+ exit 0 ;;
+ sparc*:BSD/OS:*:*)
+ echo sparc-unknown-bsdi${UNAME_RELEASE}
+ exit 0 ;;
+ *:BSD/OS:*:*)
+ echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE}
+ exit 0 ;;
+ *:FreeBSD:*:*)
+ echo ${UNAME_MACHINE}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`
+ exit 0 ;;
+ *:OpenBSD:*:*)
+ echo ${UNAME_MACHINE}-unknown-openbsd`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'`
+ exit 0 ;;
+ i*:CYGWIN*:*)
+ echo ${UNAME_MACHINE}-pc-cygwin
+ exit 0 ;;
+ i*:MINGW*:*)
+ echo ${UNAME_MACHINE}-pc-mingw32
+ exit 0 ;;
+ i*:PW*:*)
+ echo ${UNAME_MACHINE}-pc-pw32
+ exit 0 ;;
+ i*:Windows_NT*:* | Pentium*:Windows_NT*:*)
+ # How do we know it's Interix rather than the generic POSIX subsystem?
+ # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we
+ # UNAME_MACHINE based on the output of uname instead of i386?
+ echo i386-pc-interix
+ exit 0 ;;
+ i*:UWIN*:*)
+ echo ${UNAME_MACHINE}-pc-uwin
+ exit 0 ;;
+ p*:CYGWIN*:*)
+ echo powerpcle-unknown-cygwin
+ exit 0 ;;
+ prep*:SunOS:5.*:*)
+ echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit 0 ;;
+ *:GNU:*:*)
+ echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'`
+ exit 0 ;;
+ i*86:Minix:*:*)
+ echo ${UNAME_MACHINE}-pc-minix
+ exit 0 ;;
+ *:Linux:*:*)
+
+ # The BFD linker knows what the default object file format is, so
+ # first see if it will tell us. cd to the root directory to prevent
+ # problems with other programs or directories called `ld' in the path.
+ ld_help_string=`cd /; ld --help 2>&1`
+ ld_supported_emulations=`echo $ld_help_string \
+ | sed -ne '/supported emulations:/!d
+ s/[ ][ ]*/ /g
+ s/.*supported emulations: *//
+ s/ .*//
+ p'`
+ case "$ld_supported_emulations" in
+ *ia64)
+ echo "${UNAME_MACHINE}-unknown-linux"
+ exit 0
+ ;;
+ i?86linux)
+ echo "${UNAME_MACHINE}-pc-linux-gnuaout"
+ exit 0
+ ;;
+ elf_i?86)
+ TENTATIVE="${UNAME_MACHINE}-pc-linux-gnu"
+ ;;
+ i?86coff)
+ echo "${UNAME_MACHINE}-pc-linux-gnucoff"
+ exit 0
+ ;;
+ sparclinux)
+ echo "${UNAME_MACHINE}-unknown-linux-gnuaout"
+ exit 0
+ ;;
+ armlinux)
+ echo "${UNAME_MACHINE}-unknown-linux-gnuaout"
+ exit 0
+ ;;
+ elf32arm*)
+ echo "${UNAME_MACHINE}-unknown-linux-gnuoldld"
+ exit 0
+ ;;
+ armelf_linux*)
+ echo "${UNAME_MACHINE}-unknown-linux-gnu"
+ exit 0
+ ;;
+ m68klinux)
+ echo "${UNAME_MACHINE}-unknown-linux-gnuaout"
+ exit 0
+ ;;
+ elf32ppc | elf32ppclinux)
+ # Determine Lib Version
+ cat >$dummy.c <<EOF
+#include <features.h>
+#if defined(__GLIBC__)
+extern char __libc_version[];
+extern char __libc_release[];
+#endif
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+#if defined(__GLIBC__)
+ printf("%s %s\n", __libc_version, __libc_release);
+#else
+ printf("unkown\n");
+#endif
+ return 0;
+}
+EOF
+ LIBC=""
+ $CC_FOR_BUILD $dummy.c -o $dummy 2>/dev/null
+ if test "$?" = 0 ; then
+ ./$dummy | grep 1\.99 > /dev/null
+ if test "$?" = 0 ; then
+ LIBC="libc1"
+ fi
+ fi
+ rm -f $dummy.c $dummy
+ echo powerpc-unknown-linux-gnu${LIBC}
+ exit 0
+ ;;
+ shelf_linux)
+ echo "${UNAME_MACHINE}-unknown-linux-gnu"
+ exit 0
+ ;;
+ esac
+
+ if test "${UNAME_MACHINE}" = "alpha" ; then
+ cat <<EOF >$dummy.s
+ .data
+ \$Lformat:
+ .byte 37,100,45,37,120,10,0 # "%d-%x\n"
+
+ .text
+ .globl main
+ .align 4
+ .ent main
+ main:
+ .frame \$30,16,\$26,0
+ ldgp \$29,0(\$27)
+ .prologue 1
+ .long 0x47e03d80 # implver \$0
+ lda \$2,-1
+ .long 0x47e20c21 # amask \$2,\$1
+ lda \$16,\$Lformat
+ mov \$0,\$17
+ not \$1,\$18
+ jsr \$26,printf
+ ldgp \$29,0(\$26)
+ mov 0,\$16
+ jsr \$26,exit
+ .end main
+EOF
+ LIBC=""
+ $CC_FOR_BUILD $dummy.s -o $dummy 2>/dev/null
+ if test "$?" = 0 ; then
+ case `./$dummy` in
+ 0-0)
+ UNAME_MACHINE="alpha"
+ ;;
+ 1-0)
+ UNAME_MACHINE="alphaev5"
+ ;;
+ 1-1)
+ UNAME_MACHINE="alphaev56"
+ ;;
+ 1-101)
+ UNAME_MACHINE="alphapca56"
+ ;;
+ 2-303)
+ UNAME_MACHINE="alphaev6"
+ ;;
+ 2-307)
+ UNAME_MACHINE="alphaev67"
+ ;;
+ esac
+
+ objdump --private-headers $dummy | \
+ grep ld.so.1 > /dev/null
+ if test "$?" = 0 ; then
+ LIBC="libc1"
+ fi
+ fi
+ rm -f $dummy.s $dummy
+ echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC} ; exit 0
+ elif test "${UNAME_MACHINE}" = "mips" ; then
+ cat >$dummy.c <<EOF
+#ifdef __cplusplus
+#include <stdio.h> /* for printf() prototype */
+ int main (int argc, char *argv[]) {
+#else
+ int main (argc, argv) int argc; char *argv[]; {
+#endif
+#ifdef __MIPSEB__
+ printf ("%s-unknown-linux-gnu\n", argv[1]);
+#endif
+#ifdef __MIPSEL__
+ printf ("%sel-unknown-linux-gnu\n", argv[1]);
+#endif
+ return 0;
+}
+EOF
+ $CC_FOR_BUILD $dummy.c -o $dummy 2>/dev/null && ./$dummy "${UNAME_MACHINE}" && rm $dummy.c $dummy && exit 0
+ rm -f $dummy.c $dummy
+ elif test "${UNAME_MACHINE}" = "s390"; then
+ echo s390-ibm-linux && exit 0
+ elif test "${UNAME_MACHINE}" = "x86_64"; then
+ echo x86_64-unknown-linux-gnu && exit 0
+ else
+ # Either a pre-BFD a.out linker (linux-gnuoldld)
+ # or one that does not give us useful --help.
+ # GCC wants to distinguish between linux-gnuoldld and linux-gnuaout.
+ # If ld does not provide *any* "supported emulations:"
+ # that means it is gnuoldld.
+ echo "$ld_help_string" | grep >/dev/null 2>&1 "supported emulations:"
+ test $? != 0 && echo "${UNAME_MACHINE}-pc-linux-gnuoldld" && exit 0
+
+ case "${UNAME_MACHINE}" in
+ i?86)
+ VENDOR=pc;
+ ;;
+ *)
+ VENDOR=unknown;
+ ;;
+ esac
+ # Determine whether the default compiler is a.out or elf
+ cat >$dummy.c <<EOF
+#include <features.h>
+#ifdef __cplusplus
+#include <stdio.h> /* for printf() prototype */
+ int main (int argc, char *argv[]) {
+#else
+ int main (argc, argv) int argc; char *argv[]; {
+#endif
+#ifdef __ELF__
+# ifdef __GLIBC__
+# if __GLIBC__ >= 2
+ printf ("%s-${VENDOR}-linux-gnu\n", argv[1]);
+# else
+ printf ("%s-${VENDOR}-linux-gnulibc1\n", argv[1]);
+# endif
+# else
+ printf ("%s-${VENDOR}-linux-gnulibc1\n", argv[1]);
+# endif
+#else
+ printf ("%s-${VENDOR}-linux-gnuaout\n", argv[1]);
+#endif
+ return 0;
+}
+EOF
+ $CC_FOR_BUILD $dummy.c -o $dummy 2>/dev/null && ./$dummy "${UNAME_MACHINE}" && rm $dummy.c $dummy && exit 0
+ rm -f $dummy.c $dummy
+ test x"${TENTATIVE}" != x && echo "${TENTATIVE}" && exit 0
+ fi ;;
+# ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. earlier versions
+# are messed up and put the nodename in both sysname and nodename.
+ i?86:DYNIX/ptx:4*:*)
+ echo i386-sequent-sysv4
+ exit 0 ;;
+ i?86:UNIX_SV:4.2MP:2.*)
+ # Unixware is an offshoot of SVR4, but it has its own version
+ # number series starting with 2...
+ # I am not positive that other SVR4 systems won't match this,
+ # I just have to hope. -- rms.
+ # Use sysv4.2uw... so that sysv4* matches it.
+ echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION}
+ exit 0 ;;
+ i?86:*:4.*:* | i?86:SYSTEM_V:4.*:*)
+ UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'`
+ if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then
+ echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL}
+ else
+ echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL}
+ fi
+ exit 0 ;;
+ i?86:*:5:7*)
+ # Fixed at (any) Pentium or better
+ UNAME_MACHINE=i586
+ if [ ${UNAME_SYSTEM} = "UnixWare" ] ; then
+ echo ${UNAME_MACHINE}-sco-sysv${UNAME_RELEASE}uw${UNAME_VERSION}
+ else
+ echo ${UNAME_MACHINE}-pc-sysv${UNAME_RELEASE}
+ fi
+ exit 0 ;;
+ i?86:*:3.2:*)
+ if test -f /usr/options/cb.name; then
+ UNAME_REL=`sed -n 's/.*Version //p' </usr/options/cb.name`
+ echo ${UNAME_MACHINE}-pc-isc$UNAME_REL
+ elif /bin/uname -X 2>/dev/null >/dev/null ; then
+ UNAME_REL=`(/bin/uname -X|egrep Release|sed -e 's/.*= //')`
+ (/bin/uname -X|egrep i80486 >/dev/null) && UNAME_MACHINE=i486
+ (/bin/uname -X|egrep '^Machine.*Pentium' >/dev/null) \
+ && UNAME_MACHINE=i586
+ (/bin/uname -X|egrep '^Machine.*Pent ?II' >/dev/null) \
+ && UNAME_MACHINE=i686
+ (/bin/uname -X|egrep '^Machine.*Pentium Pro' >/dev/null) \
+ && UNAME_MACHINE=i686
+ echo ${UNAME_MACHINE}-pc-sco$UNAME_REL
+ else
+ echo ${UNAME_MACHINE}-pc-sysv32
+ fi
+ exit 0 ;;
+ i?86:*DOS:*:*)
+ echo ${UNAME_MACHINE}-pc-msdosdjgpp
+ exit 0 ;;
+ pc:*:*:*)
+ # Left here for compatibility:
+ # uname -m prints for DJGPP always 'pc', but it prints nothing about
+ # the processor, so we play safe by assuming i386.
+ echo i386-pc-msdosdjgpp
+ exit 0 ;;
+ Intel:Mach:3*:*)
+ echo i386-pc-mach3
+ exit 0 ;;
+ paragon:*:*:*)
+ echo i860-intel-osf1
+ exit 0 ;;
+ i860:*:4.*:*) # i860-SVR4
+ if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then
+ echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4
+ else # Add other i860-SVR4 vendors below as they are discovered.
+ echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4
+ fi
+ exit 0 ;;
+ mini*:CTIX:SYS*5:*)
+ # "miniframe"
+ echo m68010-convergent-sysv
+ exit 0 ;;
+ M68*:*:R3V[567]*:*)
+ test -r /sysV68 && echo 'm68k-motorola-sysv' && exit 0 ;;
+ 3[34]??:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 4850:*:4.0:3.0)
+ OS_REL=''
+ test -r /etc/.relid \
+ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && echo i486-ncr-sysv4.3${OS_REL} && exit 0
+ /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
+ && echo i586-ncr-sysv4.3${OS_REL} && exit 0 ;;
+ 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*)
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && echo i486-ncr-sysv4 && exit 0 ;;
+ m68*:LynxOS:2.*:*)
+ echo m68k-unknown-lynxos${UNAME_RELEASE}
+ exit 0 ;;
+ mc68030:UNIX_System_V:4.*:*)
+ echo m68k-atari-sysv4
+ exit 0 ;;
+ i?86:LynxOS:2.*:* | i?86:LynxOS:3.[01]*:*)
+ echo i386-unknown-lynxos${UNAME_RELEASE}
+ exit 0 ;;
+ TSUNAMI:LynxOS:2.*:*)
+ echo sparc-unknown-lynxos${UNAME_RELEASE}
+ exit 0 ;;
+ rs6000:LynxOS:2.*:* | PowerPC:LynxOS:2.*:*)
+ echo rs6000-unknown-lynxos${UNAME_RELEASE}
+ exit 0 ;;
+ SM[BE]S:UNIX_SV:*:*)
+ echo mips-dde-sysv${UNAME_RELEASE}
+ exit 0 ;;
+ RM*:ReliantUNIX-*:*:*)
+ echo mips-sni-sysv4
+ exit 0 ;;
+ RM*:SINIX-*:*:*)
+ echo mips-sni-sysv4
+ exit 0 ;;
+ *:SINIX-*:*:*)
+ if uname -p 2>/dev/null >/dev/null ; then
+ UNAME_MACHINE=`(uname -p) 2>/dev/null`
+ echo ${UNAME_MACHINE}-sni-sysv4
+ else
+ echo ns32k-sni-sysv
+ fi
+ exit 0 ;;
+ PENTIUM:CPunix:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort
+ # says <Richard.M.Bartel@ccMail.Census.GOV>
+ echo i586-unisys-sysv4
+ exit 0 ;;
+ *:UNIX_System_V:4*:FTX*)
+ # From Gerald Hewes <hewes@openmarket.com>.
+ # How about differentiating between stratus architectures? -djm
+ echo hppa1.1-stratus-sysv4
+ exit 0 ;;
+ *:*:*:FTX*)
+ # From seanf@swdc.stratus.com.
+ echo i860-stratus-sysv4
+ exit 0 ;;
+ mc68*:A/UX:*:*)
+ echo m68k-apple-aux${UNAME_RELEASE}
+ exit 0 ;;
+ news*:NEWS-OS:6*:*)
+ echo mips-sony-newsos6
+ exit 0 ;;
+ R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*)
+ if [ -d /usr/nec ]; then
+ echo mips-nec-sysv${UNAME_RELEASE}
+ else
+ echo mips-unknown-sysv${UNAME_RELEASE}
+ fi
+ exit 0 ;;
+ BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only.
+ echo powerpc-be-beos
+ exit 0 ;;
+ BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only.
+ echo powerpc-apple-beos
+ exit 0 ;;
+ BePC:BeOS:*:*) # BeOS running on Intel PC compatible.
+ echo i586-pc-beos
+ exit 0 ;;
+ SX-4:SUPER-UX:*:*)
+ echo sx4-nec-superux${UNAME_RELEASE}
+ exit 0 ;;
+ SX-5:SUPER-UX:*:*)
+ echo sx5-nec-superux${UNAME_RELEASE}
+ exit 0 ;;
+ Power*:Rhapsody:*:*)
+ echo powerpc-apple-rhapsody${UNAME_RELEASE}
+ exit 0 ;;
+ *:Rhapsody:*:*)
+ echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE}
+ exit 0 ;;
+ *:Darwin:*:*)
+ echo `uname -p`-apple-darwin${UNAME_RELEASE}
+ exit 0 ;;
+ *:procnto*:*:* | *:QNX:[0123456789]*:*)
+ if test "${UNAME_MACHINE}" = "x86pc"; then
+ UNAME_MACHINE=pc
+ fi
+ echo `uname -p`-${UNAME_MACHINE}-nto-qnx
+ exit 0 ;;
+ *:QNX:*:4*)
+ echo i386-pc-qnx
+ exit 0 ;;
+ NSR-[KW]:NONSTOP_KERNEL:*:*)
+ echo nsr-tandem-nsk${UNAME_RELEASE}
+ exit 0 ;;
+ BS2000:POSIX*:*:*)
+ echo bs2000-siemens-sysv
+ exit 0 ;;
+ DS/*:UNIX_System_V:*:*)
+ echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE}
+ exit 0 ;;
+ *:Plan9:*:*)
+ # "uname -m" is not consistent, so use $cputype instead. 386
+ # is converted to i386 for consistency with other x86
+ # operating systems.
+ if test "$cputype" = "386"; then
+ UNAME_MACHINE=i386
+ else
+ UNAME_MACHINE="$cputype"
+ fi
+ echo ${UNAME_MACHINE}-unknown-plan9
+ exit 0 ;;
+esac
+
+#echo '(No uname command or uname output not recognized.)' 1>&2
+#echo "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" 1>&2
+
+cat >$dummy.c <<EOF
+#ifdef _SEQUENT_
+# include <sys/types.h>
+# include <sys/utsname.h>
+#endif
+main ()
+{
+#if defined (sony)
+#if defined (MIPSEB)
+ /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed,
+ I don't know.... */
+ printf ("mips-sony-bsd\n"); exit (0);
+#else
+#include <sys/param.h>
+ printf ("m68k-sony-newsos%s\n",
+#ifdef NEWSOS4
+ "4"
+#else
+ ""
+#endif
+ ); exit (0);
+#endif
+#endif
+
+#if defined (__arm) && defined (__acorn) && defined (__unix)
+ printf ("arm-acorn-riscix"); exit (0);
+#endif
+
+#if defined (hp300) && !defined (hpux)
+ printf ("m68k-hp-bsd\n"); exit (0);
+#endif
+
+#if defined (NeXT)
+#if !defined (__ARCHITECTURE__)
+#define __ARCHITECTURE__ "m68k"
+#endif
+ int version;
+ version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`;
+ if (version < 4)
+ printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version);
+ else
+ printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version);
+ exit (0);
+#endif
+
+#if defined (MULTIMAX) || defined (n16)
+#if defined (UMAXV)
+ printf ("ns32k-encore-sysv\n"); exit (0);
+#else
+#if defined (CMU)
+ printf ("ns32k-encore-mach\n"); exit (0);
+#else
+ printf ("ns32k-encore-bsd\n"); exit (0);
+#endif
+#endif
+#endif
+
+#if defined (__386BSD__)
+ printf ("i386-pc-bsd\n"); exit (0);
+#endif
+
+#if defined (sequent)
+#if defined (i386)
+ printf ("i386-sequent-dynix\n"); exit (0);
+#endif
+#if defined (ns32000)
+ printf ("ns32k-sequent-dynix\n"); exit (0);
+#endif
+#endif
+
+#if defined (_SEQUENT_)
+ struct utsname un;
+
+ uname(&un);
+
+ if (strncmp(un.version, "V2", 2) == 0) {
+ printf ("i386-sequent-ptx2\n"); exit (0);
+ }
+ if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */
+ printf ("i386-sequent-ptx1\n"); exit (0);
+ }
+ printf ("i386-sequent-ptx\n"); exit (0);
+
+#endif
+
+#if defined (vax)
+#if !defined (ultrix)
+ printf ("vax-dec-bsd\n"); exit (0);
+#else
+ printf ("vax-dec-ultrix\n"); exit (0);
+#endif
+#endif
+
+#if defined (alliant) && defined (i860)
+ printf ("i860-alliant-bsd\n"); exit (0);
+#endif
+
+ exit (1);
+}
+EOF
+
+$CC_FOR_BUILD $dummy.c -o $dummy 2>/dev/null && ./$dummy && rm $dummy.c $dummy && exit 0
+rm -f $dummy.c $dummy
+
+# Apollos put the system type in the environment.
+
+test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit 0; }
+
+# Convex versions that predate uname can use getsysinfo(1)
+
+if [ -x /usr/convex/getsysinfo ]
+then
+ case `getsysinfo -f cpu_type` in
+ c1*)
+ echo c1-convex-bsd
+ exit 0 ;;
+ c2*)
+ if getsysinfo -f scalar_acc
+ then echo c32-convex-bsd
+ else echo c2-convex-bsd
+ fi
+ exit 0 ;;
+ c34*)
+ echo c34-convex-bsd
+ exit 0 ;;
+ c38*)
+ echo c38-convex-bsd
+ exit 0 ;;
+ c4*)
+ echo c4-convex-bsd
+ exit 0 ;;
+ esac
+fi
+
+cat >&2 <<EOF
+$0: unable to guess system type
+
+The $version version of this script cannot recognize your system type.
+Please download the most up to date version of the config scripts:
+
+ ftp://ftp.gnu.org/pub/gnu/config/
+
+If the version you run ($0) is already up to date, please
+send the following data and any information you think might be
+pertinent to <config-patches@gnu.org> in order to provide the needed
+information to handle your system.
+
+config.guess version = $version
+
+uname -m = `(uname -m) 2>/dev/null || echo unknown`
+uname -r = `(uname -r) 2>/dev/null || echo unknown`
+uname -s = `(uname -s) 2>/dev/null || echo unknown`
+uname -v = `(uname -v) 2>/dev/null || echo unknown`
+
+/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null`
+/bin/uname -X = `(/bin/uname -X) 2>/dev/null`
+
+hostinfo = `(hostinfo) 2>/dev/null`
+/bin/universe = `(/bin/universe) 2>/dev/null`
+/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null`
+/bin/arch = `(/bin/arch) 2>/dev/null`
+/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null`
+/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null`
+
+UNAME_MACHINE = ${UNAME_MACHINE}
+UNAME_RELEASE = ${UNAME_RELEASE}
+UNAME_SYSTEM = ${UNAME_SYSTEM}
+UNAME_VERSION = ${UNAME_VERSION}
+EOF
+
+exit 1
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "version='"
+# time-stamp-format: "%:y-%02m-%02d"
+# time-stamp-end: "'"
+# End:
diff --git a/bdb/dist/config.hin b/bdb/dist/config.hin
new file mode 100644
index 00000000000..d153bc0f872
--- /dev/null
+++ b/bdb/dist/config.hin
@@ -0,0 +1,231 @@
+/* config.hin. Generated automatically from configure.in by autoheader. */
+
+/* Define to empty if the keyword does not work. */
+#undef const
+
+/* Define if your struct stat has st_blksize. */
+#undef HAVE_ST_BLKSIZE
+
+/* Define to `int' if <sys/types.h> doesn't define. */
+#undef mode_t
+
+/* Define to `long' if <sys/types.h> doesn't define. */
+#undef off_t
+
+/* Define to `int' if <sys/types.h> doesn't define. */
+#undef pid_t
+
+/* Define to `unsigned' if <sys/types.h> doesn't define. */
+#undef size_t
+
+/* Define if the `S_IS*' macros in <sys/stat.h> do not work properly. */
+#undef STAT_MACROS_BROKEN
+
+/* Define if you have the ANSI C header files. */
+#undef STDC_HEADERS
+
+/* Define if you can safely include both <sys/time.h> and <time.h>. */
+#undef TIME_WITH_SYS_TIME
+
+/* Define if your processor stores words with the most significant
+ byte first (like Motorola and SPARC, unlike Intel and VAX). */
+#undef WORDS_BIGENDIAN
+
+/* Define if you are building a version for running the test suite. */
+#undef CONFIG_TEST
+
+/* Define if you want a debugging version. */
+#undef DEBUG
+
+/* Define if you want a version that logs read operations. */
+#undef DEBUG_ROP
+
+/* Define if you want a version that logs write operations. */
+#undef DEBUG_WOP
+
+/* Define if you want a version with run-time diagnostic checking. */
+#undef DIAGNOSTIC
+
+/* Define if you want to mask harmless unitialized memory read/writes. */
+#undef UMRW
+
+/* Define if fcntl/F_SETFD denies child access to file descriptors. */
+#undef HAVE_FCNTL_F_SETFD
+
+/* Define if building big-file environment (e.g., AIX, HP/UX, Solaris). */
+#undef HAVE_FILE_OFFSET_BITS
+
+/* Mutex possibilities. */
+#undef HAVE_MUTEX_68K_GCC_ASSEMBLY
+#undef HAVE_MUTEX_AIX_CHECK_LOCK
+#undef HAVE_MUTEX_ALPHA_GCC_ASSEMBLY
+#undef HAVE_MUTEX_HPPA_GCC_ASSEMBLY
+#undef HAVE_MUTEX_HPPA_MSEM_INIT
+#undef HAVE_MUTEX_IA64_GCC_ASSEMBLY
+#undef HAVE_MUTEX_MACOS
+#undef HAVE_MUTEX_MSEM_INIT
+#undef HAVE_MUTEX_PPC_GCC_ASSEMBLY
+#undef HAVE_MUTEX_PTHREADS
+#undef HAVE_MUTEX_RELIANTUNIX_INITSPIN
+#undef HAVE_MUTEX_SCO_X86_CC_ASSEMBLY
+#undef HAVE_MUTEX_SEMA_INIT
+#undef HAVE_MUTEX_SGI_INIT_LOCK
+#undef HAVE_MUTEX_SOLARIS_LOCK_TRY
+#undef HAVE_MUTEX_SOLARIS_LWP
+#undef HAVE_MUTEX_SPARC_GCC_ASSEMBLY
+#undef HAVE_MUTEX_THREADS
+#undef HAVE_MUTEX_UI_THREADS
+#undef HAVE_MUTEX_UTS_CC_ASSEMBLY
+#undef HAVE_MUTEX_VMS
+#undef HAVE_MUTEX_VXWORKS
+#undef HAVE_MUTEX_WIN16
+#undef HAVE_MUTEX_WIN32
+#undef HAVE_MUTEX_X86_GCC_ASSEMBLY
+
+/* Define if building on QNX. */
+#undef HAVE_QNX
+
+/* Define if building RPC client/server. */
+#undef HAVE_RPC
+
+/* Define if your sprintf returns a pointer, not a length. */
+#undef SPRINTF_RET_CHARPNT
+
+/* Define if you have the getcwd function. */
+#undef HAVE_GETCWD
+
+/* Define if you have the getopt function. */
+#undef HAVE_GETOPT
+
+/* Define if you have the getuid function. */
+#undef HAVE_GETUID
+
+/* Define if you have the memcmp function. */
+#undef HAVE_MEMCMP
+
+/* Define if you have the memcpy function. */
+#undef HAVE_MEMCPY
+
+/* Define if you have the memmove function. */
+#undef HAVE_MEMMOVE
+
+/* Define if you have the mlock function. */
+#undef HAVE_MLOCK
+
+/* Define if you have the mmap function. */
+#undef HAVE_MMAP
+
+/* Define if you have the munlock function. */
+#undef HAVE_MUNLOCK
+
+/* Define if you have the munmap function. */
+#undef HAVE_MUNMAP
+
+/* Define if you have the pread function. */
+#undef HAVE_PREAD
+
+/* Define if you have the pstat_getdynamic function. */
+#undef HAVE_PSTAT_GETDYNAMIC
+
+/* Define if you have the pwrite function. */
+#undef HAVE_PWRITE
+
+/* Define if you have the qsort function. */
+#undef HAVE_QSORT
+
+/* Define if you have the raise function. */
+#undef HAVE_RAISE
+
+/* Define if you have the sched_yield function. */
+#undef HAVE_SCHED_YIELD
+
+/* Define if you have the select function. */
+#undef HAVE_SELECT
+
+/* Define if you have the shmget function. */
+#undef HAVE_SHMGET
+
+/* Define if you have the snprintf function. */
+#undef HAVE_SNPRINTF
+
+/* Define if you have the strcasecmp function. */
+#undef HAVE_STRCASECMP
+
+/* Define if you have the strerror function. */
+#undef HAVE_STRERROR
+
+/* Define if you have the strtoul function. */
+#undef HAVE_STRTOUL
+
+/* Define if you have the sysconf function. */
+#undef HAVE_SYSCONF
+
+/* Define if you have the vsnprintf function. */
+#undef HAVE_VSNPRINTF
+
+/* Define if you have the yield function. */
+#undef HAVE_YIELD
+
+/* Define if you have the <dirent.h> header file. */
+#undef HAVE_DIRENT_H
+
+/* Define if you have the <ndir.h> header file. */
+#undef HAVE_NDIR_H
+
+/* Define if you have the <sys/dir.h> header file. */
+#undef HAVE_SYS_DIR_H
+
+/* Define if you have the <sys/ndir.h> header file. */
+#undef HAVE_SYS_NDIR_H
+
+/* Define if you have the <sys/select.h> header file. */
+#undef HAVE_SYS_SELECT_H
+
+/* Define if you have the <sys/time.h> header file. */
+#undef HAVE_SYS_TIME_H
+
+/* Define if you have the nsl library (-lnsl). */
+#undef HAVE_LIBNSL
+
+/*
+ * Big-file configuration.
+ */
+#ifdef HAVE_FILE_OFFSET_BITS
+#define _FILE_OFFSET_BITS 64
+#endif
+
+/*
+ * Don't step on the namespace. Other libraries may have their own
+ * implementations of these functions, we don't want to use their
+ * implementations or force them to use ours based on the load order.
+ */
+#ifndef HAVE_GETCWD
+#define getcwd __db_Cgetcwd
+#endif
+#ifndef HAVE_GETOPT
+#define getopt __db_Cgetopt
+#endif
+#ifndef HAVE_MEMCMP
+#define memcmp __db_Cmemcmp
+#endif
+#ifndef HAVE_MEMCPY
+#define memcpy __db_Cmemcpy
+#endif
+#ifndef HAVE_MEMMOVE
+#define memmove __db_Cmemmove
+#endif
+#ifndef HAVE_RAISE
+#define raise __db_Craise
+#endif
+#ifndef HAVE_SNPRINTF
+#define snprintf __db_Csnprintf
+#endif
+#ifndef HAVE_STRCASECMP
+#define strcasecmp __db_Cstrcasecmp
+#endif
+#ifndef HAVE_STRERROR
+#define strerror __db_Cstrerror
+#endif
+#ifndef HAVE_VSNPRINTF
+#define vsnprintf __db_Cvsnprintf
+#endif
diff --git a/bdb/dist/config.sub b/bdb/dist/config.sub
new file mode 100755
index 00000000000..42fc991d08a
--- /dev/null
+++ b/bdb/dist/config.sub
@@ -0,0 +1,1328 @@
+#! /bin/sh
+# Configuration validation subroutine script, version 1.1.
+# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000
+# Free Software Foundation, Inc.
+
+version='2000-09-11'
+
+# This file is (in principle) common to ALL GNU software.
+# The presence of a machine in this file suggests that SOME GNU software
+# can handle that machine. It does not imply ALL GNU software can.
+#
+# This file is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330,
+# Boston, MA 02111-1307, USA.
+
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+# Please send patches to <config-patches@gnu.org>.
+#
+# Configuration subroutine to validate and canonicalize a configuration type.
+# Supply the specified configuration type as an argument.
+# If it is invalid, we print an error message on stderr and exit with code 1.
+# Otherwise, we print the canonical config type on stdout and succeed.
+
+# This file is supposed to be the same for all GNU packages
+# and recognize all the CPU types, system types and aliases
+# that are meaningful with *any* GNU software.
+# Each package is responsible for reporting which valid configurations
+# it does not support. The user should be able to distinguish
+# a failure to support a valid configuration from a meaningless
+# configuration.
+
+# The goal of this file is to map all the various variations of a given
+# machine specification into a single specification in the form:
+# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM
+# or in some cases, the newer four-part form:
+# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM
+# It is wrong to echo any other type of specification.
+
+me=`echo "$0" | sed -e 's,.*/,,'`
+
+usage="\
+Usage: $0 [OPTION] CPU-MFR-OPSYS
+ $0 [OPTION] ALIAS
+
+Canonicalize a configuration name.
+
+Operation modes:
+ -h, --help print this help, then exit
+ -V, --version print version number, then exit"
+
+help="
+Try \`$me --help' for more information."
+
+# Parse command line
+while test $# -gt 0 ; do
+ case "$1" in
+ --version | --vers* | -V )
+ echo "$version" ; exit 0 ;;
+ --help | --h* | -h )
+ echo "$usage"; exit 0 ;;
+ -- ) # Stop option processing
+ shift; break ;;
+ - ) # Use stdin as input.
+ break ;;
+ -* )
+ exec >&2
+ echo "$me: invalid option $1"
+ echo "$help"
+ exit 1 ;;
+
+ *local*)
+ # First pass through any local machine types.
+ echo $1
+ exit 0;;
+
+ * )
+ break ;;
+ esac
+done
+
+case $# in
+ 0) echo "$me: missing argument$help" >&2
+ exit 1;;
+ 1) ;;
+ *) echo "$me: too many arguments$help" >&2
+ exit 1;;
+esac
+
+# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any).
+# Here we must recognize all the valid KERNEL-OS combinations.
+maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'`
+case $maybe_os in
+ nto-qnx* | linux-gnu*)
+ os=-$maybe_os
+ basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`
+ ;;
+ *)
+ basic_machine=`echo $1 | sed 's/-[^-]*$//'`
+ if [ $basic_machine != $1 ]
+ then os=`echo $1 | sed 's/.*-/-/'`
+ else os=; fi
+ ;;
+esac
+
+### Let's recognize common machines as not being operating systems so
+### that things like config.sub decstation-3100 work. We also
+### recognize some manufacturers as not being operating systems, so we
+### can provide default operating systems below.
+case $os in
+ -sun*os*)
+ # Prevent following clause from handling this invalid input.
+ ;;
+ -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \
+ -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \
+ -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \
+ -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\
+ -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \
+ -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \
+ -apple | -axis)
+ os=
+ basic_machine=$1
+ ;;
+ -sim | -cisco | -oki | -wec | -winbond)
+ os=
+ basic_machine=$1
+ ;;
+ -scout)
+ ;;
+ -wrs)
+ os=-vxworks
+ basic_machine=$1
+ ;;
+ -hiux*)
+ os=-hiuxwe2
+ ;;
+ -sco5)
+ os=-sco3.2v5
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco4)
+ os=-sco3.2v4
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco3.2.[4-9]*)
+ os=`echo $os | sed -e 's/sco3.2./sco3.2v/'`
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco3.2v[4-9]*)
+ # Don't forget version if it is 3.2v4 or newer.
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco*)
+ os=-sco3.2v2
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -udk*)
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -isc)
+ os=-isc2.2
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -clix*)
+ basic_machine=clipper-intergraph
+ ;;
+ -isc*)
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -lynx*)
+ os=-lynxos
+ ;;
+ -ptx*)
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'`
+ ;;
+ -windowsnt*)
+ os=`echo $os | sed -e 's/windowsnt/winnt/'`
+ ;;
+ -psos*)
+ os=-psos
+ ;;
+ -mint | -mint[0-9]*)
+ basic_machine=m68k-atari
+ os=-mint
+ ;;
+esac
+
+# Decode aliases for certain CPU-COMPANY combinations.
+case $basic_machine in
+ # Recognize the basic CPU types without company name.
+ # Some are omitted here because they have special meanings below.
+ tahoe | i860 | ia64 | m32r | m68k | m68000 | m88k | ns32k | arc | arm \
+ | arme[lb] | armv[2345] | armv[345][lb] | pyramid | mn10200 | mn10300 | tron | a29k \
+ | 580 | i960 | h8300 \
+ | x86 | ppcbe | mipsbe | mipsle | shbe | shle | armbe | armle \
+ | hppa | hppa1.0 | hppa1.1 | hppa2.0 | hppa2.0w | hppa2.0n \
+ | hppa64 \
+ | alpha | alphaev[4-8] | alphaev56 | alphapca5[67] \
+ | alphaev6[78] \
+ | we32k | ns16k | clipper | i370 | sh | sh[34] \
+ | powerpc | powerpcle \
+ | 1750a | dsp16xx | pdp11 | mips16 | mips64 | mipsel | mips64el \
+ | mips64orion | mips64orionel | mipstx39 | mipstx39el \
+ | mips64vr4300 | mips64vr4300el | mips64vr4100 | mips64vr4100el \
+ | mips64vr5000 | miprs64vr5000el | mcore \
+ | sparc | sparclet | sparclite | sparc64 | sparcv9 | v850 | c4x \
+ | thumb | d10v | d30v | fr30 | avr)
+ basic_machine=$basic_machine-unknown
+ ;;
+ m6811 | m68hc11 | m6812 | m68hc12)
+ # Motorola 68HC11/12.
+ basic_machine=$basic_machine-unknown
+ os=-none
+ ;;
+ m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | z8k | v70 | h8500 | w65 | pj | pjl)
+ ;;
+
+ # We use `pc' rather than `unknown'
+ # because (1) that's what they normally are, and
+ # (2) the word "unknown" tends to confuse beginning users.
+ i[234567]86 | x86_64)
+ basic_machine=$basic_machine-pc
+ ;;
+ # Object if more than one company name word.
+ *-*-*)
+ echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
+ exit 1
+ ;;
+ # Recognize the basic CPU types with company name.
+ # FIXME: clean up the formatting here.
+ vax-* | tahoe-* | i[234567]86-* | i860-* | ia64-* | m32r-* | m68k-* | m68000-* \
+ | m88k-* | sparc-* | ns32k-* | fx80-* | arc-* | arm-* | c[123]* \
+ | mips-* | pyramid-* | tron-* | a29k-* | romp-* | rs6000-* \
+ | power-* | none-* | 580-* | cray2-* | h8300-* | h8500-* | i960-* \
+ | xmp-* | ymp-* \
+ | x86-* | ppcbe-* | mipsbe-* | mipsle-* | shbe-* | shle-* | armbe-* | armle-* \
+ | hppa-* | hppa1.0-* | hppa1.1-* | hppa2.0-* | hppa2.0w-* \
+ | hppa2.0n-* | hppa64-* \
+ | alpha-* | alphaev[4-8]-* | alphaev56-* | alphapca5[67]-* \
+ | alphaev6[78]-* \
+ | we32k-* | cydra-* | ns16k-* | pn-* | np1-* | xps100-* \
+ | clipper-* | orion-* \
+ | sparclite-* | pdp11-* | sh-* | powerpc-* | powerpcle-* \
+ | sparc64-* | sparcv9-* | sparc86x-* | mips16-* | mips64-* | mipsel-* \
+ | mips64el-* | mips64orion-* | mips64orionel-* \
+ | mips64vr4100-* | mips64vr4100el-* | mips64vr4300-* | mips64vr4300el-* \
+ | mipstx39-* | mipstx39el-* | mcore-* \
+ | f301-* | armv*-* | s390-* | sv1-* | t3e-* \
+ | m88110-* | m680[01234]0-* | m683?2-* | m68360-* | z8k-* | d10v-* \
+ | thumb-* | v850-* | d30v-* | tic30-* | c30-* | fr30-* \
+ | bs2000-* | tic54x-* | c54x-* | x86_64-*)
+ ;;
+ # Recognize the various machine names and aliases which stand
+ # for a CPU type and a company and sometimes even an OS.
+ 386bsd)
+ basic_machine=i386-unknown
+ os=-bsd
+ ;;
+ 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc)
+ basic_machine=m68000-att
+ ;;
+ 3b*)
+ basic_machine=we32k-att
+ ;;
+ a29khif)
+ basic_machine=a29k-amd
+ os=-udi
+ ;;
+ adobe68k)
+ basic_machine=m68010-adobe
+ os=-scout
+ ;;
+ alliant | fx80)
+ basic_machine=fx80-alliant
+ ;;
+ altos | altos3068)
+ basic_machine=m68k-altos
+ ;;
+ am29k)
+ basic_machine=a29k-none
+ os=-bsd
+ ;;
+ amdahl)
+ basic_machine=580-amdahl
+ os=-sysv
+ ;;
+ amiga | amiga-*)
+ basic_machine=m68k-unknown
+ ;;
+ amigaos | amigados)
+ basic_machine=m68k-unknown
+ os=-amigaos
+ ;;
+ amigaunix | amix)
+ basic_machine=m68k-unknown
+ os=-sysv4
+ ;;
+ apollo68)
+ basic_machine=m68k-apollo
+ os=-sysv
+ ;;
+ apollo68bsd)
+ basic_machine=m68k-apollo
+ os=-bsd
+ ;;
+ aux)
+ basic_machine=m68k-apple
+ os=-aux
+ ;;
+ balance)
+ basic_machine=ns32k-sequent
+ os=-dynix
+ ;;
+ convex-c1)
+ basic_machine=c1-convex
+ os=-bsd
+ ;;
+ convex-c2)
+ basic_machine=c2-convex
+ os=-bsd
+ ;;
+ convex-c32)
+ basic_machine=c32-convex
+ os=-bsd
+ ;;
+ convex-c34)
+ basic_machine=c34-convex
+ os=-bsd
+ ;;
+ convex-c38)
+ basic_machine=c38-convex
+ os=-bsd
+ ;;
+ cray | ymp)
+ basic_machine=ymp-cray
+ os=-unicos
+ ;;
+ cray2)
+ basic_machine=cray2-cray
+ os=-unicos
+ ;;
+ [ctj]90-cray)
+ basic_machine=c90-cray
+ os=-unicos
+ ;;
+ crds | unos)
+ basic_machine=m68k-crds
+ ;;
+ cris | cris-* | etrax*)
+ basic_machine=cris-axis
+ ;;
+ da30 | da30-*)
+ basic_machine=m68k-da30
+ ;;
+ decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn)
+ basic_machine=mips-dec
+ ;;
+ delta | 3300 | motorola-3300 | motorola-delta \
+ | 3300-motorola | delta-motorola)
+ basic_machine=m68k-motorola
+ ;;
+ delta88)
+ basic_machine=m88k-motorola
+ os=-sysv3
+ ;;
+ dpx20 | dpx20-*)
+ basic_machine=rs6000-bull
+ os=-bosx
+ ;;
+ dpx2* | dpx2*-bull)
+ basic_machine=m68k-bull
+ os=-sysv3
+ ;;
+ ebmon29k)
+ basic_machine=a29k-amd
+ os=-ebmon
+ ;;
+ elxsi)
+ basic_machine=elxsi-elxsi
+ os=-bsd
+ ;;
+ encore | umax | mmax)
+ basic_machine=ns32k-encore
+ ;;
+ es1800 | OSE68k | ose68k | ose | OSE)
+ basic_machine=m68k-ericsson
+ os=-ose
+ ;;
+ fx2800)
+ basic_machine=i860-alliant
+ ;;
+ genix)
+ basic_machine=ns32k-ns
+ ;;
+ gmicro)
+ basic_machine=tron-gmicro
+ os=-sysv
+ ;;
+ h3050r* | hiux*)
+ basic_machine=hppa1.1-hitachi
+ os=-hiuxwe2
+ ;;
+ h8300hms)
+ basic_machine=h8300-hitachi
+ os=-hms
+ ;;
+ h8300xray)
+ basic_machine=h8300-hitachi
+ os=-xray
+ ;;
+ h8500hms)
+ basic_machine=h8500-hitachi
+ os=-hms
+ ;;
+ harris)
+ basic_machine=m88k-harris
+ os=-sysv3
+ ;;
+ hp300-*)
+ basic_machine=m68k-hp
+ ;;
+ hp300bsd)
+ basic_machine=m68k-hp
+ os=-bsd
+ ;;
+ hp300hpux)
+ basic_machine=m68k-hp
+ os=-hpux
+ ;;
+ hp3k9[0-9][0-9] | hp9[0-9][0-9])
+ basic_machine=hppa1.0-hp
+ ;;
+ hp9k2[0-9][0-9] | hp9k31[0-9])
+ basic_machine=m68000-hp
+ ;;
+ hp9k3[2-9][0-9])
+ basic_machine=m68k-hp
+ ;;
+ hp9k6[0-9][0-9] | hp6[0-9][0-9])
+ basic_machine=hppa1.0-hp
+ ;;
+ hp9k7[0-79][0-9] | hp7[0-79][0-9])
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k78[0-9] | hp78[0-9])
+ # FIXME: really hppa2.0-hp
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893)
+ # FIXME: really hppa2.0-hp
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k8[0-9][13679] | hp8[0-9][13679])
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k8[0-9][0-9] | hp8[0-9][0-9])
+ basic_machine=hppa1.0-hp
+ ;;
+ hppa-next)
+ os=-nextstep3
+ ;;
+ hppaosf)
+ basic_machine=hppa1.1-hp
+ os=-osf
+ ;;
+ hppro)
+ basic_machine=hppa1.1-hp
+ os=-proelf
+ ;;
+ i370-ibm* | ibm*)
+ basic_machine=i370-ibm
+ ;;
+# I'm not sure what "Sysv32" means. Should this be sysv3.2?
+ i[34567]86v32)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-sysv32
+ ;;
+ i[34567]86v4*)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-sysv4
+ ;;
+ i[34567]86v)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-sysv
+ ;;
+ i[34567]86sol2)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-solaris2
+ ;;
+ i386mach)
+ basic_machine=i386-mach
+ os=-mach
+ ;;
+ i386-vsta | vsta)
+ basic_machine=i386-unknown
+ os=-vsta
+ ;;
+ i386-go32 | go32)
+ basic_machine=i386-unknown
+ os=-go32
+ ;;
+ i386-mingw32 | mingw32)
+ basic_machine=i386-unknown
+ os=-mingw32
+ ;;
+ i[34567]86-pw32 | pw32)
+ basic_machine=i586-unknown
+ os=-pw32
+ ;;
+ iris | iris4d)
+ basic_machine=mips-sgi
+ case $os in
+ -irix*)
+ ;;
+ *)
+ os=-irix4
+ ;;
+ esac
+ ;;
+ isi68 | isi)
+ basic_machine=m68k-isi
+ os=-sysv
+ ;;
+ m88k-omron*)
+ basic_machine=m88k-omron
+ ;;
+ magnum | m3230)
+ basic_machine=mips-mips
+ os=-sysv
+ ;;
+ merlin)
+ basic_machine=ns32k-utek
+ os=-sysv
+ ;;
+ miniframe)
+ basic_machine=m68000-convergent
+ ;;
+ *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*)
+ basic_machine=m68k-atari
+ os=-mint
+ ;;
+ mipsel*-linux*)
+ basic_machine=mipsel-unknown
+ os=-linux-gnu
+ ;;
+ mips*-linux*)
+ basic_machine=mips-unknown
+ os=-linux-gnu
+ ;;
+ mips3*-*)
+ basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`
+ ;;
+ mips3*)
+ basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown
+ ;;
+ mmix*)
+ basic_machine=mmix-knuth
+ os=-mmixware
+ ;;
+ monitor)
+ basic_machine=m68k-rom68k
+ os=-coff
+ ;;
+ msdos)
+ basic_machine=i386-unknown
+ os=-msdos
+ ;;
+ mvs)
+ basic_machine=i370-ibm
+ os=-mvs
+ ;;
+ ncr3000)
+ basic_machine=i486-ncr
+ os=-sysv4
+ ;;
+ netbsd386)
+ basic_machine=i386-unknown
+ os=-netbsd
+ ;;
+ netwinder)
+ basic_machine=armv4l-rebel
+ os=-linux
+ ;;
+ news | news700 | news800 | news900)
+ basic_machine=m68k-sony
+ os=-newsos
+ ;;
+ news1000)
+ basic_machine=m68030-sony
+ os=-newsos
+ ;;
+ news-3600 | risc-news)
+ basic_machine=mips-sony
+ os=-newsos
+ ;;
+ necv70)
+ basic_machine=v70-nec
+ os=-sysv
+ ;;
+ next | m*-next )
+ basic_machine=m68k-next
+ case $os in
+ -nextstep* )
+ ;;
+ -ns2*)
+ os=-nextstep2
+ ;;
+ *)
+ os=-nextstep3
+ ;;
+ esac
+ ;;
+ nh3000)
+ basic_machine=m68k-harris
+ os=-cxux
+ ;;
+ nh[45]000)
+ basic_machine=m88k-harris
+ os=-cxux
+ ;;
+ nindy960)
+ basic_machine=i960-intel
+ os=-nindy
+ ;;
+ mon960)
+ basic_machine=i960-intel
+ os=-mon960
+ ;;
+ np1)
+ basic_machine=np1-gould
+ ;;
+ nsr-tandem)
+ basic_machine=nsr-tandem
+ ;;
+ op50n-* | op60c-*)
+ basic_machine=hppa1.1-oki
+ os=-proelf
+ ;;
+ OSE68000 | ose68000)
+ basic_machine=m68000-ericsson
+ os=-ose
+ ;;
+ os68k)
+ basic_machine=m68k-none
+ os=-os68k
+ ;;
+ pa-hitachi)
+ basic_machine=hppa1.1-hitachi
+ os=-hiuxwe2
+ ;;
+ paragon)
+ basic_machine=i860-intel
+ os=-osf
+ ;;
+ pbd)
+ basic_machine=sparc-tti
+ ;;
+ pbb)
+ basic_machine=m68k-tti
+ ;;
+ pc532 | pc532-*)
+ basic_machine=ns32k-pc532
+ ;;
+ pentium | p5 | k5 | k6 | nexen)
+ basic_machine=i586-pc
+ ;;
+ pentiumpro | p6 | 6x86 | athlon)
+ basic_machine=i686-pc
+ ;;
+ pentiumii | pentium2)
+ basic_machine=i786-pc
+ ;;
+ pentium-* | p5-* | k5-* | k6-* | nexen-*)
+ basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pentiumpro-* | p6-* | 6x86-* | athlon-*)
+ basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pentiumii-* | pentium2-*)
+ basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pn)
+ basic_machine=pn-gould
+ ;;
+ power) basic_machine=rs6000-ibm
+ ;;
+ ppc) basic_machine=powerpc-unknown
+ ;;
+ ppc-*) basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ppcle | powerpclittle | ppc-le | powerpc-little)
+ basic_machine=powerpcle-unknown
+ ;;
+ ppcle-* | powerpclittle-*)
+ basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ps2)
+ basic_machine=i386-ibm
+ ;;
+ rom68k)
+ basic_machine=m68k-rom68k
+ os=-coff
+ ;;
+ rm[46]00)
+ basic_machine=mips-siemens
+ ;;
+ rtpc | rtpc-*)
+ basic_machine=romp-ibm
+ ;;
+ sa29200)
+ basic_machine=a29k-amd
+ os=-udi
+ ;;
+ sequent)
+ basic_machine=i386-sequent
+ ;;
+ sh)
+ basic_machine=sh-hitachi
+ os=-hms
+ ;;
+ sparclite-wrs)
+ basic_machine=sparclite-wrs
+ os=-vxworks
+ ;;
+ sps7)
+ basic_machine=m68k-bull
+ os=-sysv2
+ ;;
+ spur)
+ basic_machine=spur-unknown
+ ;;
+ st2000)
+ basic_machine=m68k-tandem
+ ;;
+ stratus)
+ basic_machine=i860-stratus
+ os=-sysv4
+ ;;
+ sun2)
+ basic_machine=m68000-sun
+ ;;
+ sun2os3)
+ basic_machine=m68000-sun
+ os=-sunos3
+ ;;
+ sun2os4)
+ basic_machine=m68000-sun
+ os=-sunos4
+ ;;
+ sun3os3)
+ basic_machine=m68k-sun
+ os=-sunos3
+ ;;
+ sun3os4)
+ basic_machine=m68k-sun
+ os=-sunos4
+ ;;
+ sun4os3)
+ basic_machine=sparc-sun
+ os=-sunos3
+ ;;
+ sun4os4)
+ basic_machine=sparc-sun
+ os=-sunos4
+ ;;
+ sun4sol2)
+ basic_machine=sparc-sun
+ os=-solaris2
+ ;;
+ sun3 | sun3-*)
+ basic_machine=m68k-sun
+ ;;
+ sun4)
+ basic_machine=sparc-sun
+ ;;
+ sun386 | sun386i | roadrunner)
+ basic_machine=i386-sun
+ ;;
+ sv1)
+ basic_machine=sv1-cray
+ os=-unicos
+ ;;
+ symmetry)
+ basic_machine=i386-sequent
+ os=-dynix
+ ;;
+ t3e)
+ basic_machine=t3e-cray
+ os=-unicos
+ ;;
+ tic54x | c54x*)
+ basic_machine=tic54x-unknown
+ os=-coff
+ ;;
+ tx39)
+ basic_machine=mipstx39-unknown
+ ;;
+ tx39el)
+ basic_machine=mipstx39el-unknown
+ ;;
+ tower | tower-32)
+ basic_machine=m68k-ncr
+ ;;
+ udi29k)
+ basic_machine=a29k-amd
+ os=-udi
+ ;;
+ ultra3)
+ basic_machine=a29k-nyu
+ os=-sym1
+ ;;
+ v810 | necv810)
+ basic_machine=v810-nec
+ os=-none
+ ;;
+ vaxv)
+ basic_machine=vax-dec
+ os=-sysv
+ ;;
+ vms)
+ basic_machine=vax-dec
+ os=-vms
+ ;;
+ vpp*|vx|vx-*)
+ basic_machine=f301-fujitsu
+ ;;
+ vxworks960)
+ basic_machine=i960-wrs
+ os=-vxworks
+ ;;
+ vxworks68)
+ basic_machine=m68k-wrs
+ os=-vxworks
+ ;;
+ vxworks29k)
+ basic_machine=a29k-wrs
+ os=-vxworks
+ ;;
+ w65*)
+ basic_machine=w65-wdc
+ os=-none
+ ;;
+ w89k-*)
+ basic_machine=hppa1.1-winbond
+ os=-proelf
+ ;;
+ xmp)
+ basic_machine=xmp-cray
+ os=-unicos
+ ;;
+ xps | xps100)
+ basic_machine=xps100-honeywell
+ ;;
+ z8k-*-coff)
+ basic_machine=z8k-unknown
+ os=-sim
+ ;;
+ none)
+ basic_machine=none-none
+ os=-none
+ ;;
+
+# Here we handle the default manufacturer of certain CPU types. It is in
+# some cases the only manufacturer, in others, it is the most popular.
+ w89k)
+ basic_machine=hppa1.1-winbond
+ ;;
+ op50n)
+ basic_machine=hppa1.1-oki
+ ;;
+ op60c)
+ basic_machine=hppa1.1-oki
+ ;;
+ mips)
+ if [ x$os = x-linux-gnu ]; then
+ basic_machine=mips-unknown
+ else
+ basic_machine=mips-mips
+ fi
+ ;;
+ romp)
+ basic_machine=romp-ibm
+ ;;
+ rs6000)
+ basic_machine=rs6000-ibm
+ ;;
+ vax)
+ basic_machine=vax-dec
+ ;;
+ pdp11)
+ basic_machine=pdp11-dec
+ ;;
+ we32k)
+ basic_machine=we32k-att
+ ;;
+ sh3 | sh4)
+ base_machine=sh-unknown
+ ;;
+ sparc | sparcv9)
+ basic_machine=sparc-sun
+ ;;
+ cydra)
+ basic_machine=cydra-cydrome
+ ;;
+ orion)
+ basic_machine=orion-highlevel
+ ;;
+ orion105)
+ basic_machine=clipper-highlevel
+ ;;
+ mac | mpw | mac-mpw)
+ basic_machine=m68k-apple
+ ;;
+ pmac | pmac-mpw)
+ basic_machine=powerpc-apple
+ ;;
+ c4x*)
+ basic_machine=c4x-none
+ os=-coff
+ ;;
+ *)
+ echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
+ exit 1
+ ;;
+esac
+
+# Here we canonicalize certain aliases for manufacturers.
+case $basic_machine in
+ *-digital*)
+ basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'`
+ ;;
+ *-commodore*)
+ basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'`
+ ;;
+ *)
+ ;;
+esac
+
+# Decode manufacturer-specific aliases for certain operating systems.
+
+if [ x"$os" != x"" ]
+then
+case $os in
+ # First match some system type aliases
+ # that might get confused with valid system types.
+ # -solaris* is a basic system type, with this one exception.
+ -solaris1 | -solaris1.*)
+ os=`echo $os | sed -e 's|solaris1|sunos4|'`
+ ;;
+ -solaris)
+ os=-solaris2
+ ;;
+ -svr4*)
+ os=-sysv4
+ ;;
+ -unixware*)
+ os=-sysv4.2uw
+ ;;
+ -gnu/linux*)
+ os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'`
+ ;;
+ # First accept the basic system types.
+ # The portable systems comes first.
+ # Each alternative MUST END IN A *, to match a version number.
+ # -sysv* is not here because it comes later, after sysvr4.
+ -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \
+ | -*vms* | -sco* | -esix* | -isc* | -aix* | -sunos | -sunos[34]*\
+ | -hpux* | -unos* | -osf* | -luna* | -dgux* | -solaris* | -sym* \
+ | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \
+ | -aos* \
+ | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \
+ | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \
+ | -hiux* | -386bsd* | -netbsd* | -openbsd* | -freebsd* | -riscix* \
+ | -lynxos* | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \
+ | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \
+ | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \
+ | -cygwin* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
+ | -mingw32* | -linux-gnu* | -uxpv* | -beos* | -mpeix* | -udk* \
+ | -interix* | -uwin* | -rhapsody* | -darwin* | -opened* \
+ | -openstep* | -oskit* | -conix* | -pw32*)
+ # Remember, each alternative MUST END IN *, to match a version number.
+ ;;
+ -qnx*)
+ case $basic_machine in
+ x86-* | i[34567]86-*)
+ ;;
+ *)
+ os=-nto$os
+ ;;
+ esac
+ ;;
+ -nto*)
+ os=-nto-qnx
+ ;;
+ -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \
+ | -windows* | -osx | -abug | -netware* | -os9* | -beos* \
+ | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*)
+ ;;
+ -mac*)
+ os=`echo $os | sed -e 's|mac|macos|'`
+ ;;
+ -linux*)
+ os=`echo $os | sed -e 's|linux|linux-gnu|'`
+ ;;
+ -sunos5*)
+ os=`echo $os | sed -e 's|sunos5|solaris2|'`
+ ;;
+ -sunos6*)
+ os=`echo $os | sed -e 's|sunos6|solaris3|'`
+ ;;
+ -opened*)
+ os=-openedition
+ ;;
+ -wince*)
+ os=-wince
+ ;;
+ -osfrose*)
+ os=-osfrose
+ ;;
+ -osf*)
+ os=-osf
+ ;;
+ -utek*)
+ os=-bsd
+ ;;
+ -dynix*)
+ os=-bsd
+ ;;
+ -acis*)
+ os=-aos
+ ;;
+ -386bsd)
+ os=-bsd
+ ;;
+ -ctix* | -uts*)
+ os=-sysv
+ ;;
+ -ns2 )
+ os=-nextstep2
+ ;;
+ -nsk*)
+ os=-nsk
+ ;;
+ # Preserve the version number of sinix5.
+ -sinix5.*)
+ os=`echo $os | sed -e 's|sinix|sysv|'`
+ ;;
+ -sinix*)
+ os=-sysv4
+ ;;
+ -triton*)
+ os=-sysv3
+ ;;
+ -oss*)
+ os=-sysv3
+ ;;
+ -svr4)
+ os=-sysv4
+ ;;
+ -svr3)
+ os=-sysv3
+ ;;
+ -sysvr4)
+ os=-sysv4
+ ;;
+ # This must come after -sysvr4.
+ -sysv*)
+ ;;
+ -ose*)
+ os=-ose
+ ;;
+ -es1800*)
+ os=-ose
+ ;;
+ -xenix)
+ os=-xenix
+ ;;
+ -*mint | -*MiNT)
+ os=-mint
+ ;;
+ -none)
+ ;;
+ *)
+ # Get rid of the `-' at the beginning of $os.
+ os=`echo $os | sed 's/[^-]*-//'`
+ echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2
+ exit 1
+ ;;
+esac
+else
+
+# Here we handle the default operating systems that come with various machines.
+# The value should be what the vendor currently ships out the door with their
+# machine or put another way, the most popular os provided with the machine.
+
+# Note that if you're going to try to match "-MANUFACTURER" here (say,
+# "-sun"), then you have to tell the case statement up towards the top
+# that MANUFACTURER isn't an operating system. Otherwise, code above
+# will signal an error saying that MANUFACTURER isn't an operating
+# system, and we'll never get to this point.
+
+case $basic_machine in
+ *-acorn)
+ os=-riscix1.2
+ ;;
+ arm*-rebel)
+ os=-linux
+ ;;
+ arm*-semi)
+ os=-aout
+ ;;
+ pdp11-*)
+ os=-none
+ ;;
+ *-dec | vax-*)
+ os=-ultrix4.2
+ ;;
+ m68*-apollo)
+ os=-domain
+ ;;
+ i386-sun)
+ os=-sunos4.0.2
+ ;;
+ m68000-sun)
+ os=-sunos3
+ # This also exists in the configure program, but was not the
+ # default.
+ # os=-sunos4
+ ;;
+ m68*-cisco)
+ os=-aout
+ ;;
+ mips*-cisco)
+ os=-elf
+ ;;
+ mips*-*)
+ os=-elf
+ ;;
+ *-tti) # must be before sparc entry or we get the wrong os.
+ os=-sysv3
+ ;;
+ sparc-* | *-sun)
+ os=-sunos4.1.1
+ ;;
+ *-be)
+ os=-beos
+ ;;
+ *-ibm)
+ os=-aix
+ ;;
+ *-wec)
+ os=-proelf
+ ;;
+ *-winbond)
+ os=-proelf
+ ;;
+ *-oki)
+ os=-proelf
+ ;;
+ *-hp)
+ os=-hpux
+ ;;
+ *-hitachi)
+ os=-hiux
+ ;;
+ i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent)
+ os=-sysv
+ ;;
+ *-cbm)
+ os=-amigaos
+ ;;
+ *-dg)
+ os=-dgux
+ ;;
+ *-dolphin)
+ os=-sysv3
+ ;;
+ m68k-ccur)
+ os=-rtu
+ ;;
+ m88k-omron*)
+ os=-luna
+ ;;
+ *-next )
+ os=-nextstep
+ ;;
+ *-sequent)
+ os=-ptx
+ ;;
+ *-crds)
+ os=-unos
+ ;;
+ *-ns)
+ os=-genix
+ ;;
+ i370-*)
+ os=-mvs
+ ;;
+ *-next)
+ os=-nextstep3
+ ;;
+ *-gould)
+ os=-sysv
+ ;;
+ *-highlevel)
+ os=-bsd
+ ;;
+ *-encore)
+ os=-bsd
+ ;;
+ *-sgi)
+ os=-irix
+ ;;
+ *-siemens)
+ os=-sysv4
+ ;;
+ *-masscomp)
+ os=-rtu
+ ;;
+ f301-fujitsu)
+ os=-uxpv
+ ;;
+ *-rom68k)
+ os=-coff
+ ;;
+ *-*bug)
+ os=-coff
+ ;;
+ *-apple)
+ os=-macos
+ ;;
+ *-atari*)
+ os=-mint
+ ;;
+ *)
+ os=-none
+ ;;
+esac
+fi
+
+# Here we handle the case where we know the os, and the CPU type, but not the
+# manufacturer. We pick the logical manufacturer.
+vendor=unknown
+case $basic_machine in
+ *-unknown)
+ case $os in
+ -riscix*)
+ vendor=acorn
+ ;;
+ -sunos*)
+ vendor=sun
+ ;;
+ -aix*)
+ vendor=ibm
+ ;;
+ -beos*)
+ vendor=be
+ ;;
+ -hpux*)
+ vendor=hp
+ ;;
+ -mpeix*)
+ vendor=hp
+ ;;
+ -hiux*)
+ vendor=hitachi
+ ;;
+ -unos*)
+ vendor=crds
+ ;;
+ -dgux*)
+ vendor=dg
+ ;;
+ -luna*)
+ vendor=omron
+ ;;
+ -genix*)
+ vendor=ns
+ ;;
+ -mvs* | -opened*)
+ vendor=ibm
+ ;;
+ -ptx*)
+ vendor=sequent
+ ;;
+ -vxsim* | -vxworks*)
+ vendor=wrs
+ ;;
+ -aux*)
+ vendor=apple
+ ;;
+ -hms*)
+ vendor=hitachi
+ ;;
+ -mpw* | -macos*)
+ vendor=apple
+ ;;
+ -*mint | -*MiNT)
+ vendor=atari
+ ;;
+ esac
+ basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"`
+ ;;
+esac
+
+echo $basic_machine$os
+exit 0
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "version='"
+# time-stamp-format: "%:y-%02m-%02d"
+# time-stamp-end: "'"
+# End:
diff --git a/bdb/dist/configure.in b/bdb/dist/configure.in
new file mode 100644
index 00000000000..6c2a0c5d0bf
--- /dev/null
+++ b/bdb/dist/configure.in
@@ -0,0 +1,501 @@
+dnl $Id: configure.in,v 11.77 2001/01/18 19:05:25 bostic Exp $
+dnl Process this file with autoconf to produce a configure script.
+
+AC_INIT(../db/db.c)
+AC_CONFIG_HEADER(db_config.h:config.hin)
+
+dnl Configure setup.
+AC_PROG_INSTALL()
+AC_CANONICAL_HOST
+AC_ARG_PROGRAM()
+
+dnl We cannot build in the top-level directory.
+AC_MSG_CHECKING(if building in the top-level directory)
+[ test -d db_archive ] && AC_MSG_ERROR([
+Berkeley DB cannot be built in the top-level distribution directory.])
+AC_MSG_RESULT(no)
+
+dnl Substitution variables.
+AC_SUBST(ADDITIONAL_INCS)
+AC_SUBST(ADDITIONAL_LANG)
+AC_SUBST(ADDITIONAL_LIBS)
+AC_SUBST(ADDITIONAL_OBJS)
+AC_SUBST(ADDITIONAL_PROGS)
+AC_SUBST(CFLAGS)
+AC_SUBST(CPPFLAGS)
+AC_SUBST(CXX)
+AC_SUBST(CXXFLAGS)
+AC_SUBST(DBS_LIBS)
+AC_SUBST(DEFAULT_INSTALL)
+AC_SUBST(DEFAULT_LIB)
+AC_SUBST(INSTALLER)
+AC_SUBST(INSTALL_LIBS)
+AC_SUBST(JAR)
+AC_SUBST(JAVAC)
+AC_SUBST(JAVACFLAGS)
+AC_SUBST(LDFLAGS)
+AC_SUBST(LIBDB_ARGS)
+AC_SUBST(LIBJSO_LIBS)
+AC_SUBST(LIBS)
+AC_SUBST(LIBSO_LIBS)
+AC_SUBST(LIBTOOL)
+AC_SUBST(LIBTSO_LIBS)
+AC_SUBST(LIBXSO_LIBS)
+AC_SUBST(MAKEFILE_CC)
+AC_SUBST(MAKEFILE_CCLINK)
+AC_SUBST(MAKEFILE_CXX)
+AC_SUBST(POSTLINK)
+AC_SUBST(RPC_OBJS)
+AC_SUBST(SOFLAGS)
+AC_SUBST(SOLINK)
+AC_SUBST(SOSUFFIX)
+
+dnl $o is set to ".o" or ".lo", and is the file suffix used in the
+dnl Makefile instead of .o
+AC_SUBST(o)
+o=.o
+INSTALLER="\$(cp)"
+DEFAULT_LIB="\$(libdb)"
+DEFAULT_INSTALL="install_static"
+
+dnl Set the version.
+AM_VERSION_SET
+
+dnl Set the default installation location.
+AC_PREFIX_DEFAULT(/usr/local/BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@)
+
+dnl Process all options before using them. This is necessary because there
+dnl are dependencies among them.
+AM_OPTIONS_SET
+
+dnl This is where we handle stuff that autoconf can't handle: compiler,
+dnl preprocessor and load flags, libraries that the standard tests don't
+dnl look for. The default optimization is -O. We would like to set the
+dnl default optimization for systems using gcc to -O2, but we can't. By
+dnl the time we know we're using gcc, it's too late to set optimization
+dnl flags.
+dnl
+dnl There are additional libraries we need for some compiler/architecture
+dnl combinations.
+dnl
+dnl Some architectures require DB to be compiled with special flags and/or
+dnl libraries for threaded applications
+dnl
+dnl The makefile CC may be different than the CC used in config testing,
+dnl because the makefile CC may be set to use $(LIBTOOL).
+dnl
+dnl XXX
+dnl Don't override anything if it's already set from the environment.
+optimize_def="-O"
+case "$host_os" in
+aix4.*) optimize_def="-O2"
+ CC=${CC-"xlc_r"}
+ CPPFLAGS="-D_THREAD_SAFE $CPPFLAGS"
+ LIBTSO_LIBS="\$(LIBS)";;
+bsdi3*) CC=${CC-"shlicc2"}
+ optimize_def="-O2"
+ LIBS="-lipc $LIBS";;
+bsdi*) optimize_def="-O2";;
+freebsd*) optimize_def="-O2"
+ CPPFLAGS="-D_THREAD_SAFE $CPPFLAGS"
+ LIBS="-pthread";;
+hpux*) CPPFLAGS="-D_REENTRANT $CPPFLAGS";;
+irix*) optimize_def="-O2"
+ CPPFLAGS="-D_SGI_MP_SOURCE $CPPFLAGS";;
+linux*) optimize_def="-O2"
+ CFLAGS="-D_GNU_SOURCE"
+ CPPFLAGS="-D_REENTRANT $CPPFLAGS";;
+mpeix*) CPPFLAGS="-D_POSIX_SOURCE -D_SOCKET_SOURCE $CPPFLAGS"
+ LIBS="-lsocket -lsvipc $LIBS";;
+osf*) CPPFLAGS="-D_REENTRANT $CPPFLAGS";;
+*qnx) AC_DEFINE(HAVE_QNX);;
+sco3.2v4*) CC=${CC-"cc -belf"}
+ LIBS="-lsocket -lnsl_s $LIBS";;
+sco*) CC=${CC-"cc -belf"}
+ LIBS="-lsocket -lnsl $LIBS";;
+solaris*) CPPFLAGS="-D_REENTRANT $CPPFLAGS";;
+esac
+
+dnl Set CFLAGS/CXXFLAGS. We MUST set the flags before we call autoconf
+dnl compiler configuration macros, because if we don't, they set CFLAGS
+dnl to no optimization and -g, which isn't what we want.
+CFLAGS=${CFLAGS-$optimize_def}
+CXXFLAGS=${CXXFLAGS-"$CFLAGS"}
+
+dnl If the user wants a debugging environment, add -g to the CFLAGS value.
+dnl
+dnl XXX
+dnl Some compilers can't mix optimizing and debug flags. The only way to
+dnl handle this is to specify CFLAGS in the environment before configuring.
+if test "$db_cv_debug" = yes; then
+ AC_DEFINE(DEBUG)
+ CFLAGS="$CFLAGS -g"
+ CXXFLAGS="$CXXFLAGS -g"
+fi
+
+dnl The default compiler is cc (NOT gcc), the default CFLAGS is as specified
+dnl above, NOT what is set by AC_PROG_CC, as it won't set optimization flags.
+dnl We still call AC_PROG_CC so that we get the other side-effects.
+AC_CHECK_PROG(CC, cc, cc)
+AC_CHECK_PROG(CC, gcc, gcc)
+AC_PROG_CC
+
+dnl Because of dynamic library building, the ${CC} used for config tests
+dnl may be different than the ${CC} we want to put in the Makefile.
+dnl The latter is known as ${MAKEFILE_CC} in this script.
+MAKEFILE_CC=${CC}
+MAKEFILE_CCLINK="\$(CC)"
+MAKEFILE_CXX="nocxx"
+
+dnl Set some #defines based on configuration options.
+if test "$db_cv_diagnostic" = yes; then
+ AC_DEFINE(DIAGNOSTIC)
+fi
+if test "$db_cv_debug_rop" = yes; then
+ AC_DEFINE(DEBUG_ROP)
+fi
+if test "$db_cv_debug_wop" = yes; then
+ AC_DEFINE(DEBUG_WOP)
+fi
+if test "$db_cv_umrw" = yes; then
+ AC_DEFINE(UMRW)
+fi
+if test "$db_cv_test" = yes; then
+ AC_DEFINE(CONFIG_TEST)
+fi
+
+dnl See if we need the C++ compiler at all. If so, we'd like to find one that
+dnl interoperates with the C compiler we chose. Since we prefered cc over gcc,
+dnl we'll also prefer the vendor's compiler over g++/gcc. If we're wrong, the
+dnl user can set CC and CXX in their environment before running configure.
+dnl
+dnl AC_PROG_CXX sets CXX, but it uses $CXX and $CCC (in that order) as its
+dnl first choices.
+if test "$db_cv_cxx" = "yes"; then
+ if test "$GCC" != "yes"; then
+ case "$host_os" in
+ aix*) AC_CHECK_PROG(CCC, xlC_r, xlC_r);;
+ hpux*) AC_CHECK_PROG(CCC, aCC, aCC);;
+ osf*) AC_CHECK_PROG(CCC, cxx, cxx);;
+ solaris*) AC_CHECK_PROG(CCC, CC, CC);;
+ esac
+ fi
+ AC_PROG_CXX
+ MAKEFILE_CXX=${CXX}
+fi
+
+dnl XXX
+dnl Versions of GCC up to 2.8.0 required -fhandle-exceptions, but it is
+dnl renamed as -fexceptions and is the default in versions 2.8.0 and after.
+dnl
+dnl $GXX may be set as a result of enabling C++ or Java.
+if test "$GXX" = "yes"; then
+ CXXVERSION=`${MAKEFILE_CXX} --version`
+ case ${CXXVERSION} in
+ 1.*|2.[[01234567]].*|*-1.*|*-2.[[01234567]].* )
+ CXXFLAGS="-fhandle-exceptions $CXXFLAGS";;
+ * ) CXXFLAGS="-fexceptions $CXXFLAGS";;
+ esac
+fi
+
+dnl Export our compiler preferences for the libtool configuration.
+export CC CCC
+CCC=CXX
+
+dnl Dynamic library and libtool configuration; optional, but required for
+dnl Tcl or Java support.
+LIBDB_ARGS="libdb.a"
+LIBTOOL="nolibtool"
+POSTLINK="@true"
+SOSUFFIX="so"
+if test "$db_cv_dynamic" = "yes"; then
+ SAVE_CC="${MAKEFILE_CC}"
+ SAVE_CXX="${MAKEFILE_CXX}"
+
+ # Configure libtool.
+ AC_MSG_CHECKING(libtool configuration)
+ AC_MSG_RESULT([])
+ ${CONFIG_SHELL-/bin/sh} $srcdir/ltconfig \
+ --no-verify $srcdir/ltmain.sh \
+ --output=./libtool $host_os \
+ --disable-static \
+ || AC_MSG_ERROR([libtool configure failed])
+
+ SOSUFFIX=`sed -e '/^library_names_spec=/!d' -e 's/.*\.\([[a-zA-Z0-9_]]*\).*/\1/' ./libtool`
+ DEFAULT_LIB="\$(libso_target)"
+ DEFAULT_INSTALL="install_dynamic"
+ LIBDB_ARGS="\$(libso_linkname)"
+ LIBTOOL="\$(SHELL) ./libtool"
+
+ MAKEFILE_CC="\$(LIBTOOL) --mode=compile ${SAVE_CC}"
+ MAKEFILE_CXX="\$(LIBTOOL) --mode=compile ${SAVE_CXX}"
+ MAKEFILE_CCLINK="\$(LIBTOOL) --mode=link ${SAVE_CC}"
+
+ INSTALLER="\$(LIBTOOL) --mode=install cp"
+ POSTLINK="\$(LIBTOOL) --mode=execute true"
+ SOLINK="\$(LIBTOOL) --mode=link ${SAVE_CC} -avoid-version"
+ SOFLAGS="-rpath \$(libdir)"
+ o=".lo"
+fi
+
+dnl Optional C++ API.
+if test "$db_cv_cxx" = "yes"; then
+ if test "$db_cv_dynamic" = "yes"; then
+ ADDITIONAL_LIBS="$ADDITIONAL_LIBS \$(libxso_target)"
+ DEFAULT_INSTALL="${DEFAULT_INSTALL} install_dynamic_cxx"
+ else
+ ADDITIONAL_LIBS="$ADDITIONAL_LIBS \$(libcxx)"
+ DEFAULT_INSTALL="${DEFAULT_INSTALL} install_static_cxx"
+ fi
+fi
+
+dnl Optional Java API.
+if test "$db_cv_java" = "yes"; then
+ if test "$db_cv_dynamic" != "yes"; then
+ AC_MSG_ERROR([--enable-java requires --enable-dynamic])
+ fi
+
+ AC_CHECK_PROG(JAVAC, javac, javac, nojavac)
+ if test "$JAVAC" = "nojavac"; then
+ AC_MSG_ERROR([no javac compiler in PATH])
+ fi
+ AC_CHECK_PROG(JAR, jar, jar, nojar)
+ if test "$JAR" = "nojar"; then
+ AC_MSG_ERROR([no jar utility in PATH])
+ fi
+ AC_PATH_PROG(JAVACABS, javac, nojavac)
+ ADDITIONAL_LIBS="$ADDITIONAL_LIBS \$(libjso_target)"
+ ADDITIONAL_LANG="$ADDITIONAL_LANG java"
+ DEFAULT_INSTALL="${DEFAULT_INSTALL} install_java"
+
+dnl find the include directory relative to the javac executable
+ while ls -ld "$JAVACABS" 2>/dev/null | grep " -> " >/dev/null; do
+ AC_MSG_CHECKING(symlink for $JAVACABS)
+ JAVACLINK=`ls -ld $JAVACABS | sed 's/.* -> //'`
+ case "$JAVACLINK" in
+ /*) JAVACABS="$JAVACLINK";;
+dnl 'X' avoids triggering unwanted echo options.
+ *) JAVACABS=`echo "X$JAVACABS" | sed -e 's/^X//' -e 's:[[^/]]*$::'`"$JAVACLINK";;
+ esac
+ AC_MSG_RESULT($JAVACABS)
+ done
+ JTOPDIR=`echo "$JAVACABS" | sed -e 's://*:/:g' -e 's:/[[^/]]*$::'`
+ if test -f "$JTOPDIR/include/jni.h"; then
+ CPPFLAGS="$CPPFLAGSS -I$JTOPDIR/include"
+ else
+ JTOPDIR=`echo "$JTOPDIR" | sed -e 's:/[[^/]]*$::'`
+ if test -f "$JTOPDIR/include/jni.h"; then
+ CPPFLAGS="$CPPFLAGS -I$JTOPDIR/include"
+ else
+ AC_MSG_ERROR([cannot find java include files])
+ fi
+ fi
+
+dnl get the likely subdirectories for system specific java includes
+ case "$host_os" in
+ solaris*) JINCSUBDIRS="solaris";;
+ linux*) JINCSUBDIRS="linux genunix";;
+ *) JINCSUBDIRS="genunix";;
+ esac
+
+ for JINCSUBDIR in $JINCSUBDIRS
+ do
+ if test -d "$JTOPDIR/include/$JINCSUBDIR"; then
+ CPPFLAGS="$CPPFLAGS -I$JTOPDIR/include/$JINCSUBDIR"
+ fi
+ done
+else
+ JAVAC=nojavac
+fi
+
+dnl Optional RPC client/server.
+if test "$db_cv_rpc" = "yes"; then
+ AC_DEFINE(HAVE_RPC)
+
+ RPC_OBJS="\$(RPC_OBJS)"
+ ADDITIONAL_PROGS="berkeley_db_svc $ADDITIONAL_PROGS"
+
+ case "$host_os" in
+ hpux*)
+ AC_CHECK_FUNC(svc_run,,
+ AC_CHECK_LIB(nsl, svc_run,
+ LIBS="-lnsl $LIBS"; LIBTSO_LIBS="-lnsl $LIBTSO_LIBS"));;
+ solaris*)
+ AC_CHECK_FUNC(svc_run,, AC_CHECK_LIB(nsl, svc_run));;
+ esac
+fi
+
+AM_TCL_LOAD
+
+dnl Optional DB 1.85 compatibility API.
+if test "$db_cv_compat185" = "yes"; then
+ ADDITIONAL_INCS="db_185.h $ADDITIONAL_INCS"
+ ADDITIONAL_OBJS="db185${o} $ADDITIONAL_OBJS"
+fi
+
+dnl Optional utilities.
+if test "$db_cv_dump185" = "yes"; then
+ ADDITIONAL_PROGS="db_dump185 $ADDITIONAL_PROGS"
+fi
+
+dnl Test Server.
+dnl Include -lpthread if the library exists.
+AC_CHECK_LIB(pthread, pthread_create, DBS_LIBS=-lpthread)
+
+dnl Checks for typedefs, structures, and system/compiler characteristics.
+AC_C_BIGENDIAN
+AC_C_CONST
+AC_HEADER_STAT
+AC_HEADER_TIME
+AC_STRUCT_ST_BLKSIZE
+AC_TYPE_MODE_T
+AC_TYPE_OFF_T
+AC_TYPE_PID_T
+AC_TYPE_SIZE_T
+
+dnl Define any short-hand types we're missing.
+AM_SHORTHAND_TYPES
+
+dnl Checks for header files.
+AC_HEADER_DIRENT
+AC_CHECK_HEADERS(sys/select.h)
+AC_CHECK_HEADERS(sys/time.h)
+
+dnl Check for mutexes. We do this here because it changes $LIBS.
+AM_DEFINE_MUTEXES
+
+dnl Checks for system functions for which we have replacements.
+dnl
+dnl XXX
+dnl The only portable getcwd call is getcwd(char *, size_t), where the
+dnl buffer is non-NULL -- Solaris can't handle a NULL buffer, and they
+dnl deleted getwd().
+AC_REPLACE_FUNCS(getcwd getopt memcmp memcpy memmove)
+AC_REPLACE_FUNCS(raise snprintf strcasecmp strerror vsnprintf)
+
+dnl XXX
+dnl Nasty hack. AC_REPLACE_FUNCS added entries of the form xxx.o to the
+dnl LIBOBJS variable. They have to be xxx.lo if we are building shared
+dnl libraries. Use sed, configure already requires it.
+tmp="`echo \"$LIBOBJS\" | sed \"s/\.o/${o}/g\"`"
+LIBOBJS="$tmp"
+
+dnl Check for system functions we optionally use.
+AC_CHECK_FUNCS(getuid pstat_getdynamic sysconf sched_yield strtoul yield)
+
+dnl Pread/pwrite.
+dnl
+dnl HP-UX has pread/pwrite, but it doesn't work with bigfile support.
+case "$host_os" in
+hpux*)
+ AC_MSG_WARN([pread/pwrite interfaces ignored on $host_os.]);;
+*) AC_CHECK_FUNCS(pread pwrite)
+esac
+
+dnl Check for fcntl(2) to deny child process access to file descriptors.
+AC_CACHE_CHECK([for fcntl/F_SETFD], db_cv_fcntl_f_setfd, [dnl
+AC_TRY_RUN([
+#include <sys/types.h>
+#include <fcntl.h>
+main(){exit(fcntl(1, F_SETFD, 1) == -1);}],
+ [db_cv_fcntl_f_setfd=yes], [db_cv_fcntl_f_setfd=no])])
+if test "$db_cv_fcntl_f_setfd" = yes; then
+ AC_DEFINE(HAVE_FCNTL_F_SETFD)
+fi
+
+dnl A/UX has a broken getopt(3).
+case "$host_os" in
+aux*) ADDITIONAL_OBJS="getopt${o} $ADDITIONAL_OBJS";;
+esac
+
+dnl Checks for system functions for which we don't have replacements.
+
+dnl We require qsort(3) and select(2).
+AC_CHECK_FUNCS(qsort, , AC_MSG_ERROR([No qsort library function.]))
+AC_CHECK_FUNCS(select, , AC_MSG_ERROR([No select library function.]))
+
+dnl Some versions of sprintf return a pointer to the first argument instead
+dnl of a character count. We assume that the return value of snprintf and
+dnl vsprintf etc. will be the same as sprintf, and check the easy one.
+AC_CACHE_CHECK([for int type sprintf return value], db_cv_sprintf_count, [dnl
+AC_TRY_RUN([main(){char buf[20]; exit(sprintf(buf, "XXX") != 3);}],
+ [db_cv_sprintf_count=yes], [db_cv_sprintf_count=no])])
+if test "$db_cv_sprintf_count" = no; then
+ AC_DEFINE(SPRINTF_RET_CHARPNT)
+fi
+
+dnl Vendors are doing 64-bit lseek in different ways.
+dnl AIX, HP/UX, Solaris and Linux all use _FILE_OFFSET_BITS
+dnl to specify a "big-file" environment.
+dnl
+dnl You can't build C++ with big-file support on HP-UX, the include files
+dnl are wrong. On Solaris 8, <fcntl.h> included with big-file support
+dnl is not compatible with C++.
+if test "$db_cv_bigfile" = no; then
+ case "$host_os" in
+ solaris2.8|hpux*)
+ if test "$db_cv_cxx" = "yes"; then
+ AC_MSG_WARN([Large file and C++ API support are incompatible on HP-UX])
+ AC_MSG_WARN([and Solaris 8; large file support has been turned off.])
+ else
+ AC_DEFINE(HAVE_FILE_OFFSET_BITS)
+ fi;;
+ aix*|solaris*|linux*)
+ AC_DEFINE(HAVE_FILE_OFFSET_BITS);;
+ esac
+fi
+
+dnl Figure out how to create shared regions.
+dnl
+dnl First, we look for mmap.
+dnl
+dnl BSD/OS has mlock(2), but it doesn't work until the 4.1 release.
+dnl
+dnl Nextstep (version 3.3) apparently supports mmap(2) (the mmap symbol
+dnl is defined in the C library) but does not support munmap(2). Don't
+dnl try to use mmap if we can't find munmap.
+dnl
+dnl Ultrix has mmap(2), but it doesn't work.
+mmap_ok=no
+case "$host_os" in
+bsdi3*|bsdi4.0)
+ AC_MSG_WARN([mlock(2) interface ignored on BSD/OS 3.X and 4.0.])
+ mmap_ok=yes
+ AC_CHECK_FUNCS(mmap munmap, , mmap_ok=no);;
+ultrix*)
+ AC_MSG_WARN([mmap(2) interface ignored on Ultrix.]);;
+*)
+ mmap_ok=yes
+ AC_CHECK_FUNCS(mlock munlock)
+ AC_CHECK_FUNCS(mmap munmap, , mmap_ok=no);;
+esac
+
+dnl Second, we look for shmget.
+dnl
+dnl SunOS has the shmget(2) interfaces, but there appears to be a missing
+dnl #include <debug/debug.h> file, so we ignore them.
+shmget_ok=no
+case "$host_os" in
+sunos*)
+ AC_MSG_WARN([shmget(2) interface ignored on SunOS.]);;
+*)
+ shmget_ok=yes
+ AC_CHECK_FUNCS(shmget, , shmget_ok=no);;
+esac
+
+dnl We require either mmap/munmap(2) or shmget(2).
+if test "$mmap_ok" = no -a "$shmget_ok" = no; then
+ AC_MSG_WARN([Neither mmap/munmap(2) or shmget(2) library functions.])
+fi
+
+dnl Check for programs used in building and installation.
+AM_PROGRAMS_SET
+
+CREATE_LIST="Makefile
+ include.tcl:../test/include.tcl
+ db.h:../include/db.src
+ db_int.h:../include/db_int.src"
+if test "$db_cv_compat185" = "yes"; then
+ CREATE_LIST="${CREATE_LIST} db_185.h:../include/db_185.h"
+fi
+AC_OUTPUT(${CREATE_LIST})
diff --git a/bdb/dist/gen_rec.awk b/bdb/dist/gen_rec.awk
new file mode 100644
index 00000000000..5953ee05120
--- /dev/null
+++ b/bdb/dist/gen_rec.awk
@@ -0,0 +1,475 @@
+#!/bin/sh -
+#
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: gen_rec.awk,v 11.26 2001/01/08 21:06:46 bostic Exp $
+#
+
+# This awk script generates all the log, print, and read routines for the DB
+# logging. It also generates a template for the recovery functions (these
+# functions must still be edited, but are highly stylized and the initial
+# template gets you a fair way along the path).
+#
+# For a given file prefix.src, we generate a file prefix_auto.c, and a file
+# prefix_auto.h that contains:
+#
+# external declarations for the file's functions
+# defines for the physical record types
+# (logical types are defined in each subsystem manually)
+# structures to contain the data unmarshalled from the log.
+#
+# This awk script requires that five variables be set when it is called:
+#
+# source_file -- the C source file being created
+# subsystem -- the subsystem prefix, e.g., "db"
+# header_file -- the C #include file being created
+# template_file -- the template file being created
+# template_dir -- the directory to find the source template
+#
+# And stdin must be the input file that defines the recovery setup.
+
+BEGIN {
+ if (source_file == "" || subsystem == "" ||
+ header_file == "" || template_file == "" || template_dir == "") {
+ print "Usage: gen_rec.awk requires five variables to be set:"
+ print "\tsource_file\t-- the C source file being created"
+ print "\tsubsystem\t-- the subsystem prefix, e.g., \"db\""
+ print "\theader_file\t-- the C #include file being created"
+ print "\ttemplate_file\t-- the template file being created"
+ print "\ttemplate_dir\t-- the directory to find the source template"
+ exit
+ }
+ FS="[\t ][\t ]*"
+ CFILE=source_file
+ NAME=subsystem
+ HFILE=header_file
+ TFILE=template_file
+ TDIR=template_dir
+}
+/^[ ]*PREFIX/ {
+ prefix = $2
+ num_funcs = 0;
+
+ # Start .c file.
+ printf("/* Do not edit: automatically built by gen_rec.awk. */\n") \
+ > CFILE
+
+ # Start .h file, make the entire file conditional.
+ printf("/* Do not edit: automatically built by gen_rec.awk. */\n\n") \
+ > HFILE
+ printf("#ifndef\t%s_AUTO_H\n#define\t%s_AUTO_H\n", prefix, prefix) \
+ >> HFILE;
+
+ # Write recovery template file headers
+ # This assumes we're doing DB recovery.
+ printf("#include \"db_config.h\"\n\n") > TFILE
+ printf("#ifndef NO_SYSTEM_INCLUDES\n") >> TFILE
+ printf("#include <sys/types.h>\n\n") >> TFILE
+ printf("#include <string.h>\n") >> TFILE
+ printf("#endif\n\n") >> TFILE
+ printf("#include \"db_int.h\"\n") >> TFILE
+ printf("#include \"db_page.h\"\n") >> TFILE
+ printf("#include \"%s.h\"\n", prefix) >> TFILE
+ printf("#include \"log.h\"\n\n") >> TFILE
+}
+/^[ ]*INCLUDE/ {
+ if ($3 == "")
+ printf("%s\n", $2) >> CFILE
+ else
+ printf("%s %s\n", $2, $3) >> CFILE
+}
+/^[ ]*(BEGIN|DEPRECATED)/ {
+ if (in_begin) {
+ print "Invalid format: missing END statement"
+ exit
+ }
+ in_begin = 1;
+ is_dbt = 0;
+ is_deprecated = ($1 == "DEPRECATED");
+ nvars = 0;
+
+ thisfunc = $2;
+ funcname = sprintf("%s_%s", prefix, $2);
+
+ rectype = $3;
+
+ funcs[num_funcs] = funcname;
+ funcs_dep[num_funcs] = is_deprecated;
+ ++num_funcs;
+}
+/^[ ]*(ARG|DBT|POINTER)/ {
+ vars[nvars] = $2;
+ types[nvars] = $3;
+ atypes[nvars] = $1;
+ modes[nvars] = $1;
+ formats[nvars] = $NF;
+ for (i = 4; i < NF; i++)
+ types[nvars] = sprintf("%s %s", types[nvars], $i);
+
+ if ($1 == "ARG")
+ sizes[nvars] = sprintf("sizeof(%s)", $2);
+ else if ($1 == "POINTER")
+ sizes[nvars] = sprintf("sizeof(*%s)", $2);
+ else { # DBT
+ sizes[nvars] = \
+ sprintf("sizeof(u_int32_t) + (%s == NULL ? 0 : %s->size)", \
+ $2, $2);
+ is_dbt = 1;
+ }
+ nvars++;
+}
+/^[ ]*END/ {
+ if (!in_begin) {
+ print "Invalid format: missing BEGIN statement"
+ exit;
+ }
+
+ # Declare the record type.
+ printf("\n#define\tDB_%s\t%d\n", funcname, rectype) >> HFILE
+
+ # Structure declaration.
+ printf("typedef struct _%s_args {\n", funcname) >> HFILE
+
+ # Here are the required fields for every structure
+ printf("\tu_int32_t type;\n\tDB_TXN *txnid;\n") >> HFILE
+ printf("\tDB_LSN prev_lsn;\n") >>HFILE
+
+ # Here are the specified fields.
+ for (i = 0; i < nvars; i++) {
+ t = types[i];
+ if (modes[i] == "POINTER") {
+ ndx = index(t, "*");
+ t = substr(types[i], 0, ndx - 2);
+ }
+ printf("\t%s\t%s;\n", t, vars[i]) >> HFILE
+ }
+ printf("} __%s_args;\n\n", funcname) >> HFILE
+
+ # Output the log, print and read functions.
+ if (!is_deprecated)
+ log_function();
+ print_function();
+ read_function();
+
+ # Recovery template
+ cmd = sprintf("sed -e s/PREF/%s/ -e s/FUNC/%s/ < %s/rec_ctemp >> %s",
+ prefix, thisfunc, TDIR, TFILE)
+ system(cmd);
+
+ # Done writing stuff, reset and continue.
+ in_begin = 0;
+}
+
+END {
+ # Print initialization routine; function prototype
+ printf("int __%s_init_print __P((DB_ENV *));\n", prefix) >> HFILE;
+
+ # Create the routine to call db_add_recovery(print_fn, id)
+ printf("int\n__%s_init_print(dbenv)\n", prefix) >> CFILE;
+ printf("\tDB_ENV *dbenv;\n{\n\tint ret;\n\n") >> CFILE;
+ for (i = 0; i < num_funcs; i++) {
+ printf("\tif ((ret = __db_add_recovery(dbenv,\n") >> CFILE;
+ printf("\t __%s_print, DB_%s)) != 0)\n", \
+ funcs[i], funcs[i]) >> CFILE;
+ printf("\t\treturn (ret);\n") >> CFILE;
+ }
+ printf("\treturn (0);\n}\n\n") >> CFILE;
+
+ # Recover initialization routine
+ printf("int __%s_init_recover __P((DB_ENV *));\n", prefix) >> HFILE;
+
+ # Create the routine to call db_add_recovery(func, id)
+ printf("int\n__%s_init_recover(dbenv)\n", prefix) >> CFILE;
+ printf("\tDB_ENV *dbenv;\n{\n\tint ret;\n\n") >> CFILE;
+ for (i = 0; i < num_funcs; i++) {
+ printf("\tif ((ret = __db_add_recovery(dbenv,\n") >> CFILE;
+ if (funcs_dep[i] == 1)
+ printf("\t __deprecated_recover, DB_%s)) != 0)\n", \
+ funcs[i]) >> CFILE;
+ else
+ printf("\t __%s_recover, DB_%s)) != 0)\n", \
+ funcs[i], funcs[i]) >> CFILE;
+ printf("\t\treturn (ret);\n") >> CFILE;
+ }
+ printf("\treturn (0);\n}\n\n") >> CFILE;
+
+ # End the conditional for the HFILE
+ printf("#endif\n") >> HFILE;
+}
+
+function log_function() {
+ # Write the log function; function prototype
+ printf("int __%s_log __P((", funcname) >> HFILE;
+ printf("DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t") >> HFILE;
+ for (i = 0; i < nvars; i++) {
+ printf(", ") >> HFILE;
+ if (modes[i] == "DBT")
+ printf("const ") >> HFILE;
+ printf("%s", types[i]) >> HFILE;
+ if (modes[i] == "DBT")
+ printf(" *") >> HFILE;
+ }
+ printf("));\n") >> HFILE;
+
+ # Function declaration
+ printf("int\n__%s_log(dbenv, txnid, ret_lsnp, flags", \
+ funcname) >> CFILE;
+ for (i = 0; i < nvars; i++) {
+ printf(",") >> CFILE;
+ if ((i % 6) == 0)
+ printf("\n\t") >> CFILE;
+ else
+ printf(" ") >> CFILE;
+ printf("%s", vars[i]) >> CFILE;
+ }
+ printf(")\n") >> CFILE;
+
+ # Now print the parameters
+ printf("\tDB_ENV *dbenv;\n") >> CFILE;
+ printf("\tDB_TXN *txnid;\n\tDB_LSN *ret_lsnp;\n") >> CFILE;
+ printf("\tu_int32_t flags;\n") >> CFILE;
+ for (i = 0; i < nvars; i++) {
+ if (modes[i] == "DBT")
+ printf("\tconst %s *%s;\n", types[i], vars[i]) >> CFILE;
+ else
+ printf("\t%s %s;\n", types[i], vars[i]) >> CFILE;
+ }
+
+ # Function body and local decls
+ printf("{\n") >> CFILE;
+ printf("\tDBT logrec;\n") >> CFILE;
+ printf("\tDB_LSN *lsnp, null_lsn;\n") >> CFILE;
+ if (is_dbt == 1)
+ printf("\tu_int32_t zero;\n") >> CFILE;
+ printf("\tu_int32_t rectype, txn_num;\n") >> CFILE;
+ printf("\tint ret;\n") >> CFILE;
+ printf("\tu_int8_t *bp;\n\n") >> CFILE;
+
+ # Initialization
+ printf("\trectype = DB_%s;\n", funcname) >> CFILE;
+ printf("\tif (txnid != NULL &&\n") >> CFILE;
+ printf("\t TAILQ_FIRST(&txnid->kids) != NULL &&\n") >> CFILE;
+ printf("\t (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)\n")\
+ >> CFILE;
+ printf("\t\treturn (ret);\n") >> CFILE;
+ printf("\ttxn_num = txnid == NULL ? 0 : txnid->txnid;\n") >> CFILE;
+ printf("\tif (txnid == NULL) {\n") >> CFILE;
+ printf("\t\tZERO_LSN(null_lsn);\n") >> CFILE;
+ printf("\t\tlsnp = &null_lsn;\n") >> CFILE;
+ printf("\t} else\n\t\tlsnp = &txnid->last_lsn;\n") >> CFILE;
+
+ # Malloc
+ printf("\tlogrec.size = sizeof(rectype) + ") >> CFILE;
+ printf("sizeof(txn_num) + sizeof(DB_LSN)") >> CFILE;
+ for (i = 0; i < nvars; i++)
+ printf("\n\t + %s", sizes[i]) >> CFILE;
+ printf(";\n\tif ((ret = ") >> CFILE;
+ printf(\
+ "__os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)\n")\
+ >> CFILE;
+ printf("\t\treturn (ret);\n\n") >> CFILE;
+
+ # Copy args into buffer
+ printf("\tbp = logrec.data;\n") >> CFILE;
+ printf("\tmemcpy(bp, &rectype, sizeof(rectype));\n") >> CFILE;
+ printf("\tbp += sizeof(rectype);\n") >> CFILE;
+ printf("\tmemcpy(bp, &txn_num, sizeof(txn_num));\n") >> CFILE;
+ printf("\tbp += sizeof(txn_num);\n") >> CFILE;
+ printf("\tmemcpy(bp, lsnp, sizeof(DB_LSN));\n") >> CFILE;
+ printf("\tbp += sizeof(DB_LSN);\n") >> CFILE;
+
+ for (i = 0; i < nvars; i ++) {
+ if (modes[i] == "ARG") {
+ printf("\tmemcpy(bp, &%s, %s);\n", \
+ vars[i], sizes[i]) >> CFILE;
+ printf("\tbp += %s;\n", sizes[i]) >> CFILE;
+ } else if (modes[i] == "DBT") {
+ printf("\tif (%s == NULL) {\n", vars[i]) >> CFILE;
+ printf("\t\tzero = 0;\n") >> CFILE;
+ printf("\t\tmemcpy(bp, &zero, sizeof(u_int32_t));\n") \
+ >> CFILE;
+ printf("\t\tbp += sizeof(u_int32_t);\n") >> CFILE;
+ printf("\t} else {\n") >> CFILE;
+ printf("\t\tmemcpy(bp, &%s->size, ", vars[i]) >> CFILE;
+ printf("sizeof(%s->size));\n", vars[i]) >> CFILE;
+ printf("\t\tbp += sizeof(%s->size);\n", vars[i]) \
+ >> CFILE;
+ printf("\t\tmemcpy(bp, %s->data, %s->size);\n", \
+ vars[i], vars[i]) >> CFILE;
+ printf("\t\tbp += %s->size;\n\t}\n", vars[i]) >> CFILE;
+ } else { # POINTER
+ printf("\tif (%s != NULL)\n", vars[i]) >> CFILE;
+ printf("\t\tmemcpy(bp, %s, %s);\n", vars[i], \
+ sizes[i]) >> CFILE;
+ printf("\telse\n") >> CFILE;
+ printf("\t\tmemset(bp, 0, %s);\n", sizes[i]) >> CFILE;
+ printf("\tbp += %s;\n", sizes[i]) >> CFILE;
+ }
+ }
+
+ # Error checking
+ printf("\tDB_ASSERT((u_int32_t)") >> CFILE;
+ printf("(bp - (u_int8_t *)logrec.data) == logrec.size);\n") >> CFILE;
+
+ # Issue log call
+ # The logging system cannot call the public log_put routine
+ # due to mutual exclusion constraints. So, if we are
+ # generating code for the log subsystem, use the internal
+ # __log_put.
+ if (prefix == "log")
+ printf("\tret = __log_put\(dbenv, ret_lsnp, ") >> CFILE;
+ else
+ printf("\tret = log_put(dbenv, ret_lsnp, ") >> CFILE;
+ printf("(DBT *)&logrec, flags);\n") >> CFILE;
+
+ # Update the transactions last_lsn
+ printf("\tif (txnid != NULL)\n") >> CFILE;
+ printf("\t\ttxnid->last_lsn = *ret_lsnp;\n") >> CFILE;
+
+ # Free and return
+ printf("\t__os_free(logrec.data, logrec.size);\n") >> CFILE;
+ printf("\treturn (ret);\n}\n\n") >> CFILE;
+}
+
+function print_function() {
+ # Write the print function; function prototype
+ printf("int __%s_print", funcname) >> HFILE;
+ printf(" __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));\n") \
+ >> HFILE;
+
+ # Function declaration
+ printf("int\n__%s_print(dbenv, ", funcname) >> CFILE;
+ printf("dbtp, lsnp, notused2, notused3)\n") >> CFILE;
+ printf("\tDB_ENV *dbenv;\n") >> CFILE;
+ printf("\tDBT *dbtp;\n") >> CFILE;
+ printf("\tDB_LSN *lsnp;\n") >> CFILE;
+ printf("\tdb_recops notused2;\n\tvoid *notused3;\n{\n") >> CFILE;
+
+ # Locals
+ printf("\t__%s_args *argp;\n", funcname) >> CFILE;
+ printf("\tu_int32_t i;\n\tu_int ch;\n\tint ret;\n\n") >> CFILE;
+
+ # Get rid of complaints about unused parameters.
+ printf("\ti = 0;\n\tch = 0;\n") >> CFILE;
+ printf("\tnotused2 = DB_TXN_ABORT;\n\tnotused3 = NULL;\n\n") >> CFILE;
+
+ # Call read routine to initialize structure
+ printf("\tif ((ret = __%s_read(dbenv, dbtp->data, &argp)) != 0)\n", \
+ funcname) >> CFILE;
+ printf("\t\treturn (ret);\n") >> CFILE;
+
+ # Print values in every record
+ printf("\tprintf(\"[%%lu][%%lu]%s: ", funcname) >> CFILE;
+ printf("rec: %%lu txnid %%lx ") >> CFILE;
+ printf("prevlsn [%%lu][%%lu]\\n\",\n") >> CFILE;
+ printf("\t (u_long)lsnp->file,\n") >> CFILE;
+ printf("\t (u_long)lsnp->offset,\n") >> CFILE;
+ printf("\t (u_long)argp->type,\n") >> CFILE;
+ printf("\t (u_long)argp->txnid->txnid,\n") >> CFILE;
+ printf("\t (u_long)argp->prev_lsn.file,\n") >> CFILE;
+ printf("\t (u_long)argp->prev_lsn.offset);\n") >> CFILE;
+
+ # Now print fields of argp
+ for (i = 0; i < nvars; i ++) {
+ printf("\tprintf(\"\\t%s: ", vars[i]) >> CFILE;
+
+ if (modes[i] == "DBT") {
+ printf("\");\n") >> CFILE;
+ printf("\tfor (i = 0; i < ") >> CFILE;
+ printf("argp->%s.size; i++) {\n", vars[i]) >> CFILE;
+ printf("\t\tch = ((u_int8_t *)argp->%s.data)[i];\n", \
+ vars[i]) >> CFILE;
+ printf("\t\tif (isprint(ch) || ch == 0xa)\n") >> CFILE;
+ printf("\t\t\tputchar(ch);\n") >> CFILE;
+ printf("\t\telse\n") >> CFILE;
+ printf("\t\t\tprintf(\"%%#x \", ch);\n") >> CFILE;
+ printf("\t}\n\tprintf(\"\\n\");\n") >> CFILE;
+ } else if (types[i] == "DB_LSN *") {
+ printf("[%%%s][%%%s]\\n\",\n", \
+ formats[i], formats[i]) >> CFILE;
+ printf("\t (u_long)argp->%s.file,", \
+ vars[i]) >> CFILE;
+ printf(" (u_long)argp->%s.offset);\n", \
+ vars[i]) >> CFILE;
+ } else {
+ if (formats[i] == "lx")
+ printf("0x") >> CFILE;
+ printf("%%%s\\n\", ", formats[i]) >> CFILE;
+ if (formats[i] == "lx" || formats[i] == "lu")
+ printf("(u_long)") >> CFILE;
+ if (formats[i] == "ld")
+ printf("(long)") >> CFILE;
+ printf("argp->%s);\n", vars[i]) >> CFILE;
+ }
+ }
+ printf("\tprintf(\"\\n\");\n") >> CFILE;
+ printf("\t__os_free(argp, 0);\n") >> CFILE;
+ printf("\treturn (0);\n") >> CFILE;
+ printf("}\n\n") >> CFILE;
+}
+
+function read_function() {
+ # Write the read function; function prototype
+ printf("int __%s_read __P((DB_ENV *, void *, ", funcname) >> HFILE;
+ printf("__%s_args **));\n", funcname) >> HFILE;
+
+ # Function declaration
+ printf("int\n__%s_read(dbenv, recbuf, argpp)\n", funcname) >> CFILE;
+
+ # Now print the parameters
+ printf("\tDB_ENV *dbenv;\n") >> CFILE;
+ printf("\tvoid *recbuf;\n") >> CFILE;
+ printf("\t__%s_args **argpp;\n", funcname) >> CFILE;
+
+ # Function body and local decls
+ printf("{\n\t__%s_args *argp;\n", funcname) >> CFILE;
+ printf("\tu_int8_t *bp;\n") >> CFILE;
+ printf("\tint ret;\n") >> CFILE;
+
+ printf("\n\tret = __os_malloc(dbenv, sizeof(") >> CFILE;
+ printf("__%s_args) +\n\t sizeof(DB_TXN), NULL, &argp);\n", \
+ funcname) >> CFILE;
+ printf("\tif (ret != 0)\n\t\treturn (ret);\n") >> CFILE;
+
+ # Set up the pointers to the txnid and the prev lsn
+ printf("\targp->txnid = (DB_TXN *)&argp[1];\n") >> CFILE;
+
+ # First get the record type, prev_lsn, and txnid fields.
+
+ printf("\tbp = recbuf;\n") >> CFILE;
+ printf("\tmemcpy(&argp->type, bp, sizeof(argp->type));\n") >> CFILE;
+ printf("\tbp += sizeof(argp->type);\n") >> CFILE;
+ printf("\tmemcpy(&argp->txnid->txnid, bp, ") >> CFILE;
+ printf("sizeof(argp->txnid->txnid));\n") >> CFILE;
+ printf("\tbp += sizeof(argp->txnid->txnid);\n") >> CFILE;
+ printf("\tmemcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));\n") >> CFILE;
+ printf("\tbp += sizeof(DB_LSN);\n") >> CFILE;
+
+ # Now get rest of data.
+ for (i = 0; i < nvars; i ++) {
+ if (modes[i] == "DBT") {
+ printf("\tmemset(&argp->%s, 0, sizeof(argp->%s));\n", \
+ vars[i], vars[i]) >> CFILE;
+ printf("\tmemcpy(&argp->%s.size, ", vars[i]) >> CFILE;
+ printf("bp, sizeof(u_int32_t));\n") >> CFILE;
+ printf("\tbp += sizeof(u_int32_t);\n") >> CFILE;
+ printf("\targp->%s.data = bp;\n", vars[i]) >> CFILE;
+ printf("\tbp += argp->%s.size;\n", vars[i]) >> CFILE;
+ } else if (modes[i] == "ARG") {
+ printf("\tmemcpy(&argp->%s, bp, %s%s));\n", \
+ vars[i], "sizeof(argp->", vars[i]) >> CFILE;
+ printf("\tbp += sizeof(argp->%s);\n", vars[i]) >> CFILE;
+ } else { # POINTER
+ printf("\tmemcpy(&argp->%s, bp, ", vars[i]) >> CFILE;
+ printf(" sizeof(argp->%s));\n", vars[i]) >> CFILE;
+ printf("\tbp += sizeof(argp->%s);\n", vars[i]) >> CFILE;
+ }
+ }
+
+ # Free and return
+ printf("\t*argpp = argp;\n") >> CFILE;
+ printf("\treturn (0);\n}\n\n") >> CFILE;
+}
diff --git a/bdb/dist/gen_rpc.awk b/bdb/dist/gen_rpc.awk
new file mode 100644
index 00000000000..6c3bffc1aa4
--- /dev/null
+++ b/bdb/dist/gen_rpc.awk
@@ -0,0 +1,1482 @@
+#
+# $Id: gen_rpc.awk,v 11.25 2001/01/02 20:04:55 sue Exp $
+# Awk script for generating client/server RPC code.
+#
+# This awk script generates most of the RPC routines for DB client/server
+# use. It also generates a template for server and client procedures. These
+# functions must still be edited, but are highly stylized and the initial
+# template gets you a fair way along the path).
+#
+# This awk script requires that these variables be set when it is called:
+#
+# client_file -- the C source file being created for client code
+# cproto_file -- the header file create for client prototypes
+# ctmpl_file -- the C template file being created for client code
+# sed_file -- the sed file created to alter server proc code
+# server_file -- the C source file being created for server code
+# sproto_file -- the header file create for server prototypes
+# stmpl_file -- the C template file being created for server code
+# xdr_file -- the XDR message file created
+#
+# And stdin must be the input file that defines the RPC setup.
+BEGIN {
+ if (client_file == "" || cproto_file == "" || ctmpl_file == "" ||
+ sed_file == "" || server_file == "" ||
+ sproto_file == "" || stmpl_file == "" || xdr_file == "") {
+ print "Usage: gen_rpc.awk requires these variables be set:"
+ print "\tclient_file\t-- the client C source file being created"
+ print "\tcproto_file\t-- the client prototype header created"
+ print "\tctmpl_file\t-- the client template file being created"
+ print "\tsed_file\t-- the sed command file being created"
+ print "\tserver_file\t-- the server C source file being created"
+ print "\tsproto_file\t-- the server prototype header created"
+ print "\tstmpl_file\t-- the server template file being created"
+ print "\txdr_file\t-- the XDR message file being created"
+ error = 1; exit
+ }
+
+ FS="\t\t*"
+ CFILE=client_file
+ printf("/* Do not edit: automatically built by gen_rpc.awk. */\n") \
+ > CFILE
+
+ CHFILE=cproto_file
+ printf("/* Do not edit: automatically built by gen_rpc.awk. */\n") \
+ > CHFILE
+
+ TFILE = ctmpl_file
+ printf("/* Do not edit: automatically built by gen_rpc.awk. */\n") \
+ > TFILE
+
+ SFILE = server_file
+ printf("/* Do not edit: automatically built by gen_rpc.awk. */\n") \
+ > SFILE
+
+ SHFILE=sproto_file
+ printf("/* Do not edit: automatically built by gen_rpc.awk. */\n") \
+ > SHFILE
+
+ # Server procedure template and a sed file to massage an existing
+ # template source file to change args.
+ # SEDFILE should be same name as PFILE but .c
+ #
+ PFILE = stmpl_file
+ SEDFILE = sed_file
+ printf("") > SEDFILE
+ printf("/* Do not edit: automatically built by gen_rpc.awk. */\n") \
+ > PFILE
+
+ XFILE = xdr_file
+ printf("/* Do not edit: automatically built by gen_rpc.awk. */\n") \
+ > XFILE
+ nendlist = 1;
+}
+END {
+ printf("#endif /* HAVE_RPC */\n") >> CFILE
+ printf("#endif /* HAVE_RPC */\n") >> TFILE
+ printf("program DB_SERVERPROG {\n") >> XFILE
+ printf("\tversion DB_SERVERVERS {\n") >> XFILE
+
+ for (i = 1; i < nendlist; ++i)
+ printf("\t\t%s;\n", endlist[i]) >> XFILE
+
+ printf("\t} = 1;\n") >> XFILE
+ printf("} = 351457;\n") >> XFILE
+}
+
+/^[ ]*BEGIN/ {
+ name = $2;
+ msgid = $3;
+ nofunc_code = 0;
+ funcvars = 0;
+ gen_code = 1;
+ ret_code = 0;
+ if ($4 == "NOCLNTCODE")
+ gen_code = 0;
+ if ($4 == "NOFUNC")
+ nofunc_code = 1;
+ if ($4 == "RETCODE")
+ ret_code = 1;
+
+ nvars = 0;
+ rvars = 0;
+ newvars = 0;
+ db_handle = 0;
+ env_handle = 0;
+ dbc_handle = 0;
+ txn_handle = 0;
+ mp_handle = 0;
+ dbt_handle = 0;
+ xdr_free = 0;
+}
+/^[ ]*ARG/ {
+ rpc_type[nvars] = $2;
+ c_type[nvars] = $3;
+ pr_type[nvars] = $3;
+ args[nvars] = $4;
+ func_arg[nvars] = 0;
+ if (rpc_type[nvars] == "LIST") {
+ list_type[nvars] = $5;
+ } else
+ list_type[nvars] = 0;
+
+ if (c_type[nvars] == "DBT *")
+ dbt_handle = 1;
+
+ if (c_type[nvars] == "DB_ENV *") {
+ ctp_type[nvars] = "CT_ENV";
+ env_handle = 1;
+ env_idx = nvars;
+ }
+
+ if (c_type[nvars] == "DB *") {
+ ctp_type[nvars] = "CT_DB";
+ db_handle = 1;
+ db_idx = nvars;
+ }
+
+ if (c_type[nvars] == "DBC *") {
+ ctp_type[nvars] = "CT_CURSOR";
+ dbc_handle = 1;
+ dbc_idx = nvars;
+ }
+
+ if (c_type[nvars] == "DB_TXN *") {
+ ctp_type[nvars] = "CT_TXN";
+ txn_handle = 1;
+ txn_idx = nvars;
+ }
+
+ if (c_type[nvars] == "DB_MPOOLFILE *") {
+ mp_handle = 1;
+ mp_idx = nvars;
+ }
+
+ ++nvars;
+}
+/^[ ]*FUNCPROT/ {
+ pr_type[nvars] = $2;
+}
+/^[ ]*FUNCARG/ {
+ rpc_type[nvars] = "IGNORE";
+ c_type[nvars] = $2;
+ args[nvars] = sprintf("func%d", funcvars);
+ func_arg[nvars] = 1;
+ ++funcvars;
+ ++nvars;
+}
+/^[ ]*RET/ {
+ ret_type[rvars] = $2;
+ retc_type[rvars] = $3;
+ retargs[rvars] = $4;
+ if (ret_type[rvars] == "LIST" || ret_type[rvars] == "DBT") {
+ xdr_free = 1;
+ }
+ if (ret_type[rvars] == "LIST") {
+ retlist_type[rvars] = $5;
+ } else
+ retlist_type[rvars] = 0;
+
+ ++rvars;
+}
+/^[ ]*END/ {
+ #
+ # =====================================================
+ # Generate Client Nofunc code first if necessary
+ # NOTE: This code must be first, because we don't want any
+ # other code other than this function, so before we write
+ # out to the XDR and server files, we just generate this
+ # and move on if this is all we are doing.
+ #
+ if (nofunc_code == 1) {
+ #
+ # First time through, put out the general illegal function
+ #
+ if (first_nofunc == 0) {
+ printf("int __dbcl_rpc_illegal ") >> CHFILE
+ printf("__P((DB_ENV *, char *));\n") >> CHFILE
+ printf("int\n__dbcl_rpc_illegal(dbenv, name)\n") \
+ >> CFILE
+ printf("\tDB_ENV *dbenv;\n\tchar *name;\n") >> CFILE
+ printf("{\n\t__db_err(dbenv,\n") >> CFILE
+ printf("\t \"%%s method meaningless in RPC") >> CFILE
+ printf(" environment\", name);\n") >> CFILE
+ printf("\treturn (__db_eopnotsup(dbenv));\n") >> CFILE
+ printf("}\n\n") >> CFILE
+ first_nofunc = 1
+ }
+ #
+ # If we are doing a list, spit out prototype decl.
+ #
+ for (i = 0; i < nvars; i++) {
+ if (rpc_type[i] != "LIST")
+ continue;
+ printf("static int __dbcl_%s_%slist __P((", \
+ name, args[i]) >> CFILE
+ printf("__%s_%slist **, ", name, args[i]) >> CFILE
+ if (list_type[i] == "STRING")
+ printf("%s));\n", c_type[i]) >> CFILE
+ if (list_type[i] == "INT")
+ printf("u_int32_t));\n") >> CFILE
+ if (list_type[i] == "ID")
+ printf("%s));\n", c_type[i]) >> CFILE
+ printf("static void __dbcl_%s_%sfree __P((", \
+ name, args[i]) >> CFILE
+ printf("__%s_%slist **));\n", name, args[i]) >> CFILE
+ }
+ #
+ # Spit out PUBLIC prototypes.
+ #
+ printf("int __dbcl_%s __P((",name) >> CHFILE
+ sep = "";
+ for (i = 0; i < nvars; ++i) {
+ printf("%s%s", sep, pr_type[i]) >> CHFILE
+ sep = ", ";
+ }
+ printf("));\n") >> CHFILE
+ #
+ # Spit out function name/args.
+ #
+ printf("int\n") >> CFILE
+ printf("__dbcl_%s(", name) >> CFILE
+ sep = "";
+ for (i = 0; i < nvars; ++i) {
+ printf("%s%s", sep, args[i]) >> CFILE
+ sep = ", ";
+ }
+ printf(")\n") >> CFILE
+
+ for (i = 0; i < nvars; ++i)
+ if (func_arg[i] == 0)
+ printf("\t%s %s;\n", c_type[i], args[i]) \
+ >> CFILE
+ else
+ printf("\t%s;\n", c_type[i]) >> CFILE
+
+ #
+ # Call error function and return EINVAL
+ #
+ printf("{\n") >> CFILE
+
+ #
+ # If we don't have a local env, set one.
+ #
+ if (env_handle == 0) {
+ printf("\tDB_ENV *dbenv;\n\n") >> CFILE
+ if (db_handle)
+ printf("\tdbenv = %s->dbenv;\n", \
+ args[db_idx]) >> CFILE
+ else if (dbc_handle)
+ printf("\tdbenv = %s->dbp->dbenv;\n", \
+ args[dbc_idx]) >> CFILE
+ else if (txn_handle)
+ printf("\tdbenv = %s->mgrp->dbenv;\n", \
+ args[txn_idx]) >> CFILE
+ else if (mp_handle)
+ printf("\tdbenv = %s->dbmp->dbenv;\n", \
+ args[mp_idx]) >> CFILE
+ else
+ printf("\tdbenv = NULL;\n") >> CFILE
+ }
+ #
+ # Quiet the compiler for all variables.
+ #
+ # NOTE: Index 'i' starts at 1, not 0. Our first arg is
+ # the handle we need to get to the env, and we do not want
+ # to COMPQUIET that one.
+ for (i = 1; i < nvars; ++i) {
+ if (rpc_type[i] == "CONST" || rpc_type[i] == "DBT" ||
+ rpc_type[i] == "LIST" || rpc_type[i] == "STRING") {
+ printf("\tCOMPQUIET(%s, NULL);\n", args[i]) \
+ >> CFILE
+ }
+ if (rpc_type[i] == "INT" || rpc_type[i] == "IGNORE" ||
+ rpc_type[i] == "ID") {
+ printf("\tCOMPQUIET(%s, 0);\n", args[i]) \
+ >> CFILE
+ }
+ }
+
+ if (!env_handle) {
+ printf("\treturn (__dbcl_rpc_illegal(dbenv, ") >> CFILE
+ printf("\"%s\"));\n", name) >> CFILE
+ } else
+ printf("\treturn (__dbcl_rpc_illegal(%s, \"%s\"));\n", \
+ args[env_idx], name) >> CFILE
+ printf("}\n\n") >> CFILE
+
+ next;
+ }
+
+ #
+ # =====================================================
+ # XDR messages.
+ #
+ printf("\n") >> XFILE
+ #
+ # If there are any lists, generate the structure to contain them.
+ #
+ for (i = 0; i < nvars; ++i) {
+ if (rpc_type[i] == "LIST") {
+ printf("struct __%s_%slist {\n", name, args[i]) >> XFILE
+ printf("\topaque ent<>;\n") >> XFILE
+ printf("\t__%s_%slist *next;\n", name, args[i]) >> XFILE
+ printf("};\n\n") >> XFILE
+ }
+ }
+ printf("struct __%s_msg {\n", name) >> XFILE
+ for (i = 0; i < nvars; ++i) {
+ if (rpc_type[i] == "ID") {
+ printf("\tunsigned int %scl_id;\n", args[i]) >> XFILE
+ }
+ if (rpc_type[i] == "STRING") {
+ printf("\tstring %s<>;\n", args[i]) >> XFILE
+ }
+ if (rpc_type[i] == "INT") {
+ printf("\tunsigned int %s;\n", args[i]) >> XFILE
+ }
+ if (rpc_type[i] == "DBT") {
+ printf("\tunsigned int %sdlen;\n", args[i]) >> XFILE
+ printf("\tunsigned int %sdoff;\n", args[i]) >> XFILE
+ printf("\tunsigned int %sflags;\n", args[i]) >> XFILE
+ printf("\topaque %sdata<>;\n", args[i]) >> XFILE
+ }
+ if (rpc_type[i] == "LIST") {
+ printf("\t__%s_%slist *%slist;\n", \
+ name, args[i], args[i]) >> XFILE
+ }
+ }
+ printf("};\n") >> XFILE
+
+ printf("\n") >> XFILE
+ #
+ # If there are any lists, generate the structure to contain them.
+ #
+ for (i = 0; i < rvars; ++i) {
+ if (ret_type[i] == "LIST") {
+ printf("struct __%s_%sreplist {\n", \
+ name, retargs[i]) >> XFILE
+ printf("\topaque ent<>;\n") >> XFILE
+ printf("\t__%s_%sreplist *next;\n", \
+ name, retargs[i]) >> XFILE
+ printf("};\n\n") >> XFILE
+ }
+ }
+ #
+ # Generate the reply message
+ #
+ printf("struct __%s_reply {\n", name) >> XFILE
+ printf("\tunsigned int status;\n") >> XFILE
+ for (i = 0; i < rvars; ++i) {
+ if (ret_type[i] == "ID") {
+ printf("\tunsigned int %scl_id;\n", retargs[i]) >> XFILE
+ }
+ if (ret_type[i] == "STRING") {
+ printf("\tstring %s<>;\n", retargs[i]) >> XFILE
+ }
+ if (ret_type[i] == "INT") {
+ printf("\tunsigned int %s;\n", retargs[i]) >> XFILE
+ }
+ if (ret_type[i] == "DBL") {
+ printf("\tdouble %s;\n", retargs[i]) >> XFILE
+ }
+ if (ret_type[i] == "DBT") {
+ printf("\topaque %sdata<>;\n", retargs[i]) >> XFILE
+ }
+ if (ret_type[i] == "LIST") {
+ printf("\t__%s_%sreplist *%slist;\n", \
+ name, retargs[i], retargs[i]) >> XFILE
+ }
+ }
+ printf("};\n") >> XFILE
+
+ endlist[nendlist] = \
+ sprintf("__%s_reply __DB_%s(__%s_msg) = %d", \
+ name, name, name, nendlist);
+ nendlist++;
+
+ #
+ # =====================================================
+ # File headers, if necessary.
+ #
+ if (first == 0) {
+ printf("#include \"db_config.h\"\n") >> CFILE
+ printf("\n") >> CFILE
+ printf("#ifdef HAVE_RPC\n") >> CFILE
+ printf("#ifndef NO_SYSTEM_INCLUDES\n") >> CFILE
+ printf("#include <sys/types.h>\n") >> CFILE
+ printf("#include <rpc/rpc.h>\n") >> CFILE
+ printf("#include <rpc/xdr.h>\n") >> CFILE
+ printf("\n") >> CFILE
+ printf("#include <errno.h>\n") >> CFILE
+ printf("#include <string.h>\n") >> CFILE
+ printf("#endif\n") >> CFILE
+ printf("#include \"db_server.h\"\n") >> CFILE
+ printf("\n") >> CFILE
+ printf("#include \"db_int.h\"\n") >> CFILE
+ printf("#include \"db_page.h\"\n") >> CFILE
+ printf("#include \"db_ext.h\"\n") >> CFILE
+ printf("#include \"mp.h\"\n") >> CFILE
+ printf("#include \"rpc_client_ext.h\"\n") >> CFILE
+ printf("#include \"txn.h\"\n") >> CFILE
+ printf("\n") >> CFILE
+ n = split(CHFILE, hpieces, "/");
+ printf("#include \"%s\"\n", hpieces[n]) >> CFILE
+ printf("\n") >> CFILE
+
+ printf("#include \"db_config.h\"\n") >> TFILE
+ printf("\n") >> TFILE
+ printf("#ifdef HAVE_RPC\n") >> TFILE
+ printf("#ifndef NO_SYSTEM_INCLUDES\n") >> TFILE
+ printf("#include <sys/types.h>\n") >> TFILE
+ printf("#include <rpc/rpc.h>\n") >> TFILE
+ printf("\n") >> TFILE
+ printf("#include <errno.h>\n") >> TFILE
+ printf("#include <string.h>\n") >> TFILE
+ printf("#endif\n") >> TFILE
+ printf("#include \"db_server.h\"\n") >> TFILE
+ printf("\n") >> TFILE
+ printf("#include \"db_int.h\"\n") >> TFILE
+ printf("#include \"db_page.h\"\n") >> TFILE
+ printf("#include \"db_ext.h\"\n") >> TFILE
+ printf("#include \"txn.h\"\n") >> TFILE
+ printf("\n") >> TFILE
+ n = split(CHFILE, hpieces, "/");
+ printf("#include \"%s\"\n", hpieces[n]) >> TFILE
+ printf("\n") >> TFILE
+
+ printf("#include \"db_config.h\"\n") >> SFILE
+ printf("\n") >> SFILE
+ printf("#ifndef NO_SYSTEM_INCLUDES\n") >> SFILE
+ printf("#include <sys/types.h>\n") >> SFILE
+ printf("\n") >> SFILE
+ printf("#include <rpc/rpc.h>\n") >> SFILE
+ printf("#include <rpc/xdr.h>\n") >> SFILE
+ printf("\n") >> SFILE
+ printf("#include <errno.h>\n") >> SFILE
+ printf("#include <string.h>\n") >> SFILE
+ printf("#endif\n") >> SFILE
+ printf("#include \"db_server.h\"\n") >> SFILE
+ printf("\n") >> SFILE
+ printf("#include \"db_int.h\"\n") >> SFILE
+ printf("#include \"db_server_int.h\"\n") >> SFILE
+ printf("#include \"rpc_server_ext.h\"\n") >> SFILE
+ printf("\n") >> SFILE
+ n = split(SHFILE, hpieces, "/");
+ printf("#include \"%s\"\n", hpieces[n]) >> SFILE
+ printf("\n") >> SFILE
+
+ printf("#include \"db_config.h\"\n") >> PFILE
+ printf("\n") >> PFILE
+ printf("#ifndef NO_SYSTEM_INCLUDES\n") >> PFILE
+ printf("#include <sys/types.h>\n") >> PFILE
+ printf("\n") >> PFILE
+ printf("#include <rpc/rpc.h>\n") >> PFILE
+ printf("\n") >> PFILE
+ printf("#include <errno.h>\n") >> PFILE
+ printf("#include <string.h>\n") >> PFILE
+ printf("#include \"db_server.h\"\n") >> PFILE
+ printf("#endif\n") >> PFILE
+ printf("\n") >> PFILE
+ printf("#include \"db_int.h\"\n") >> PFILE
+ printf("#include \"db_server_int.h\"\n") >> PFILE
+ printf("#include \"rpc_server_ext.h\"\n") >> PFILE
+ printf("\n") >> PFILE
+ n = split(SHFILE, hpieces, "/");
+ printf("#include \"%s\"\n", hpieces[n]) >> PFILE
+ printf("\n") >> PFILE
+
+ first = 1;
+ }
+
+ #
+ # =====================================================
+ # Server functions.
+ #
+ # If we are doing a list, send out local list prototypes.
+ #
+ for (i = 0; i < nvars; ++i) {
+ if (rpc_type[i] != "LIST")
+ continue;
+ if (list_type[i] != "STRING" && list_type[i] != "INT" &&
+ list_type[i] != "ID")
+ continue;
+ printf("int __db_%s_%slist __P((", name, args[i]) >> SFILE
+ printf("__%s_%slist *, ", name, args[i]) >> SFILE
+ if (list_type[i] == "STRING") {
+ printf("char ***));\n") >> SFILE
+ }
+ if (list_type[i] == "INT" || list_type[i] == "ID") {
+ printf("u_int32_t **));\n") >> SFILE
+ }
+ printf("void __db_%s_%sfree __P((", name, args[i]) >> SFILE
+ if (list_type[i] == "STRING")
+ printf("char **));\n\n") >> SFILE
+ if (list_type[i] == "INT" || list_type[i] == "ID")
+ printf("u_int32_t *));\n\n") >> SFILE
+
+ }
+ #
+ # First spit out PUBLIC prototypes for server functions.
+ #
+ printf("__%s_reply * __db_%s_%d __P((__%s_msg *));\n", \
+ name, name, msgid, name) >> SHFILE
+
+ printf("__%s_reply *\n", name) >> SFILE
+ printf("__db_%s_%d(req)\n", name, msgid) >> SFILE
+ printf("\t__%s_msg *req;\n", name) >> SFILE;
+ printf("{\n") >> SFILE
+ doing_list = 0;
+ #
+ # If we are doing a list, decompose it for server proc we'll call.
+ #
+ for (i = 0; i < nvars; ++i) {
+ if (rpc_type[i] != "LIST")
+ continue;
+ doing_list = 1;
+ if (list_type[i] == "STRING")
+ printf("\tchar **__db_%slist;\n", args[i]) >> SFILE
+ if (list_type[i] == "ID" || list_type[i] == "INT")
+ printf("\tu_int32_t *__db_%slist;\n", args[i]) >> SFILE
+ }
+ if (doing_list)
+ printf("\tint ret;\n") >> SFILE
+ printf("\tstatic __%s_reply reply; /* must be static */\n", \
+ name) >> SFILE
+ if (xdr_free) {
+ printf("\tstatic int __%s_free = 0; /* must be static */\n\n", \
+ name) >> SFILE
+ printf("\tif (__%s_free)\n", name) >> SFILE
+ printf("\t\txdr_free((xdrproc_t)xdr___%s_reply, (void *)&reply);\n", \
+ name) >> SFILE
+ printf("\t__%s_free = 0;\n", name) >> SFILE
+ printf("\n\t/* Reinitialize allocated fields */\n") >> SFILE
+ for (i = 0; i < rvars; ++i) {
+ if (ret_type[i] == "LIST") {
+ printf("\treply.%slist = NULL;\n", \
+ retargs[i]) >> SFILE
+ }
+ if (ret_type[i] == "DBT") {
+ printf("\treply.%sdata.%sdata_val = NULL;\n", \
+ retargs[i], retargs[i]) >> SFILE
+ }
+ }
+ }
+
+ need_out = 0;
+ for (i = 0; i < nvars; ++i) {
+ if (rpc_type[i] == "LIST") {
+ printf("\n\tif ((ret = __db_%s_%slist(", \
+ name, args[i]) >> SFILE
+ printf("req->%slist, &__db_%slist)) != 0)\n", \
+ args[i], args[i]) >> SFILE
+ printf("\t\tgoto out;\n") >> SFILE
+ need_out = 1;
+ }
+ }
+
+ #
+ # Compose server proc to call. Decompose message components as args.
+ #
+ printf("\n\t__%s_%d_proc(", name, msgid) >> SFILE
+ sep = "";
+ for (i = 0; i < nvars; ++i) {
+ if (rpc_type[i] == "ID") {
+ printf("%sreq->%scl_id", sep, args[i]) >> SFILE
+ }
+ if (rpc_type[i] == "STRING") {
+ printf("%s(*req->%s == '\\0') ? NULL : req->%s", \
+ sep, args[i], args[i]) >> SFILE
+ }
+ if (rpc_type[i] == "INT") {
+ printf("%sreq->%s", sep, args[i]) >> SFILE
+ }
+ if (rpc_type[i] == "LIST") {
+ printf("%s__db_%slist", sep, args[i]) >> SFILE
+ }
+ if (rpc_type[i] == "DBT") {
+ printf("%sreq->%sdlen", sep, args[i]) >> SFILE
+ sep = ",\n\t ";
+ printf("%sreq->%sdoff", sep, args[i]) >> SFILE
+ printf("%sreq->%sflags", sep, args[i]) >> SFILE
+ printf("%sreq->%sdata.%sdata_val", \
+ sep, args[i], args[i]) >> SFILE
+ printf("%sreq->%sdata.%sdata_len", \
+ sep, args[i], args[i]) >> SFILE
+ }
+ sep = ",\n\t ";
+ }
+ printf("%s&reply", sep) >> SFILE
+ if (xdr_free)
+ printf("%s&__%s_free);\n", sep, name) >> SFILE
+ else
+ printf(");\n\n") >> SFILE
+ for (i = 0; i < nvars; ++i) {
+ if (rpc_type[i] == "LIST") {
+ printf("\t__db_%s_%sfree(__db_%slist);\n", \
+ name, args[i], args[i]) >> SFILE
+ }
+ }
+ if (need_out) {
+ printf("\nout:\n") >> SFILE
+ }
+ printf("\treturn (&reply);\n") >> SFILE
+ printf("}\n\n") >> SFILE
+
+ #
+ # If we are doing a list, write list functions for this op.
+ #
+ for (i = 0; i < nvars; ++i) {
+ if (rpc_type[i] != "LIST")
+ continue;
+ if (list_type[i] != "STRING" && list_type[i] != "INT" &&
+ list_type[i] != "ID")
+ continue;
+ printf("int\n") >> SFILE
+ printf("__db_%s_%slist(locp, ppp)\n", name, args[i]) >> SFILE
+ printf("\t__%s_%slist *locp;\n", name, args[i]) >> SFILE
+ if (list_type[i] == "STRING") {
+ printf("\tchar ***ppp;\n{\n") >> SFILE
+ printf("\tchar **pp;\n") >> SFILE
+ }
+ if (list_type[i] == "INT" || list_type[i] == "ID") {
+ printf("\tu_int32_t **ppp;\n{\n") >> SFILE
+ printf("\tu_int32_t *pp;\n") >> SFILE
+ }
+ printf("\tint cnt, ret, size;\n") >> SFILE
+ printf("\t__%s_%slist *nl;\n\n", name, args[i]) >> SFILE
+ printf("\tfor (cnt = 0, nl = locp;") >> SFILE
+ printf(" nl != NULL; cnt++, nl = nl->next)\n\t\t;\n\n") >> SFILE
+ printf("\tif (cnt == 0) {\n") >> SFILE
+ printf("\t\t*ppp = NULL;\n") >> SFILE
+ printf("\t\treturn (0);\n\t}\n") >> SFILE
+ printf("\tsize = sizeof(*pp) * (cnt + 1);\n") >> SFILE
+ printf("\tif ((ret = __os_malloc(NULL, size, ") >> SFILE
+ printf("NULL, ppp)) != 0)\n") >> SFILE
+ printf("\t\treturn (ret);\n") >> SFILE
+ printf("\tmemset(*ppp, 0, size);\n") >> SFILE
+ printf("\tfor (pp = *ppp, nl = locp;") >> SFILE
+ printf(" nl != NULL; nl = nl->next, pp++) {\n") >> SFILE
+ if (list_type[i] == "STRING") {
+ printf("\t\tif ((ret = __os_malloc(NULL ,") >> SFILE
+ printf("nl->ent.ent_len + 1, NULL, pp)) != 0)\n") \
+ >> SFILE
+ printf("\t\t\tgoto out;\n") >> SFILE
+ printf("\t\tif ((ret = __os_strdup(NULL, ") >> SFILE
+ printf("(char *)nl->ent.ent_val, pp)) != 0)\n") >> SFILE
+ printf("\t\t\tgoto out;\n") >> SFILE
+ }
+ if (list_type[i] == "INT" || list_type[i] == "ID")
+ printf("\t\t*pp = *(u_int32_t *)nl->ent.ent_val;\n") \
+ >> SFILE
+ printf("\t}\n") >> SFILE
+ printf("\treturn (0);\n") >> SFILE
+ if (list_type[i] == "STRING") {
+ printf("out:\n") >> SFILE
+ printf("\t__db_%s_%sfree(*ppp);\n", \
+ name, args[i]) >> SFILE
+ printf("\treturn (ret);\n") >> SFILE
+ }
+ printf("}\n\n") >> SFILE
+
+ printf("void\n") >> SFILE
+ printf("__db_%s_%sfree(pp)\n", name, args[i]) >> SFILE
+
+ if (list_type[i] == "STRING")
+ printf("\tchar **pp;\n") >> SFILE
+ if (list_type[i] == "INT" || list_type[i] == "ID")
+ printf("\tu_int32_t *pp;\n") >> SFILE
+
+ printf("{\n") >> SFILE
+ printf("\tsize_t size;\n") >> SFILE
+
+ if (list_type[i] == "STRING")
+ printf("\tchar **p;\n\n") >> SFILE
+ if (list_type[i] == "INT" || list_type[i] == "ID")
+ printf("\tu_int32_t *p;\n\n") >> SFILE
+
+ printf("\tif (pp == NULL)\n\t\treturn;\n") >> SFILE
+ printf("\tsize = sizeof(*p);\n") >> SFILE
+ printf("\tfor (p = pp; *p != 0; p++) {\n") >> SFILE
+ printf("\t\tsize += sizeof(*p);\n") >> SFILE
+
+ if (list_type[i] == "STRING")
+ printf("\t\t__os_free(*p, strlen(*p)+1);\n") >> SFILE
+ printf("\t}\n") >> SFILE
+ printf("\t__os_free(pp, size);\n") >> SFILE
+ printf("}\n\n") >> SFILE
+ }
+
+ #
+ # =====================================================
+ # Generate Procedure Template Server code
+ #
+ # Produce SED file commands if needed at the same time
+ #
+ # Start with PUBLIC prototypes
+ #
+ printf("void __%s_%d_proc __P((", name, msgid) >> SHFILE
+ sep = "";
+ argcount = 0;
+ for (i = 0; i < nvars; ++i) {
+ argcount++;
+ split_lines(1);
+ if (argcount == 0) {
+ sep = "";
+ }
+ if (rpc_type[i] == "IGNORE")
+ continue;
+ if (rpc_type[i] == "ID") {
+ printf("%slong", sep) >> SHFILE
+ }
+ if (rpc_type[i] == "STRING") {
+ printf("%schar *", sep) >> SHFILE
+ }
+ if (rpc_type[i] == "INT") {
+ printf("%su_int32_t", sep) >> SHFILE
+ }
+ if (rpc_type[i] == "LIST" && list_type[i] == "STRING") {
+ printf("%schar **", sep) >> SHFILE
+ }
+ if (rpc_type[i] == "LIST" && list_type[i] == "INT") {
+ printf("%su_int32_t *", sep) >> SHFILE
+ }
+ if (rpc_type[i] == "LIST" && list_type[i] == "ID") {
+ printf("%su_int32_t *", sep) >> SHFILE
+ }
+ if (rpc_type[i] == "DBT") {
+ printf("%su_int32_t", sep) >> SHFILE
+ sep = ", ";
+ argcount++;
+ split_lines(1);
+ if (argcount == 0) {
+ sep = "";
+ } else {
+ sep = ", ";
+ }
+ printf("%su_int32_t", sep) >> SHFILE
+ argcount++;
+ split_lines(1);
+ if (argcount == 0) {
+ sep = "";
+ } else {
+ sep = ", ";
+ }
+ printf("%su_int32_t", sep) >> SHFILE
+ argcount++;
+ split_lines(1);
+ if (argcount == 0) {
+ sep = "";
+ } else {
+ sep = ", ";
+ }
+ printf("%svoid *", sep) >> SHFILE
+ argcount++;
+ split_lines(1);
+ if (argcount == 0) {
+ sep = "";
+ } else {
+ sep = ", ";
+ }
+ printf("%su_int32_t", sep) >> SHFILE
+ }
+ sep = ", ";
+ }
+ printf("%s__%s_reply *", sep, name) >> SHFILE
+ if (xdr_free) {
+ printf("%sint *));\n", sep) >> SHFILE
+ } else {
+ printf("));\n") >> SHFILE
+ }
+ #
+ # Spit out function name and arg list
+ #
+ printf("/^\\/\\* BEGIN __%s_%d_proc/,/^\\/\\* END __%s_%d_proc/c\\\n", \
+ name, msgid, name, msgid) >> SEDFILE
+
+ printf("/* BEGIN __%s_%d_proc */\n", name, msgid) >> PFILE
+ printf("/* BEGIN __%s_%d_proc */\\\n", name, msgid) >> SEDFILE
+ printf("void\n") >> PFILE
+ printf("void\\\n") >> SEDFILE
+ printf("__%s_%d_proc(", name, msgid) >> PFILE
+ printf("__%s_%d_proc(", name, msgid) >> SEDFILE
+ sep = "";
+ argcount = 0;
+ for (i = 0; i < nvars; ++i) {
+ argcount++;
+ split_lines(0);
+ if (argcount == 0) {
+ sep = "";
+ }
+ if (rpc_type[i] == "IGNORE")
+ continue;
+ if (rpc_type[i] == "ID") {
+ printf("%s%scl_id", sep, args[i]) >> PFILE
+ printf("%s%scl_id", sep, args[i]) >> SEDFILE
+ }
+ if (rpc_type[i] == "STRING") {
+ printf("%s%s", sep, args[i]) >> PFILE
+ printf("%s%s", sep, args[i]) >> SEDFILE
+ }
+ if (rpc_type[i] == "INT") {
+ printf("%s%s", sep, args[i]) >> PFILE
+ printf("%s%s", sep, args[i]) >> SEDFILE
+ }
+ if (rpc_type[i] == "LIST") {
+ printf("%s%slist", sep, args[i]) >> PFILE
+ printf("%s%slist", sep, args[i]) >> SEDFILE
+ }
+ if (rpc_type[i] == "DBT") {
+ printf("%s%sdlen", sep, args[i]) >> PFILE
+ printf("%s%sdlen", sep, args[i]) >> SEDFILE
+ sep = ", ";
+ argcount++;
+ split_lines(0);
+ if (argcount == 0) {
+ sep = "";
+ } else {
+ sep = ", ";
+ }
+ printf("%s%sdoff", sep, args[i]) >> PFILE
+ printf("%s%sdoff", sep, args[i]) >> SEDFILE
+ argcount++;
+ split_lines(0);
+ if (argcount == 0) {
+ sep = "";
+ } else {
+ sep = ", ";
+ }
+ printf("%s%sflags", sep, args[i]) >> PFILE
+ printf("%s%sflags", sep, args[i]) >> SEDFILE
+ argcount++;
+ split_lines(0);
+ if (argcount == 0) {
+ sep = "";
+ } else {
+ sep = ", ";
+ }
+ printf("%s%sdata", sep, args[i]) >> PFILE
+ printf("%s%sdata", sep, args[i]) >> SEDFILE
+ argcount++;
+ split_lines(0);
+ if (argcount == 0) {
+ sep = "";
+ } else {
+ sep = ", ";
+ }
+ printf("%s%ssize", sep, args[i]) >> PFILE
+ printf("%s%ssize", sep, args[i]) >> SEDFILE
+ }
+ sep = ", ";
+ }
+ printf("%sreplyp",sep) >> PFILE
+ printf("%sreplyp",sep) >> SEDFILE
+ if (xdr_free) {
+ printf("%sfreep)\n",sep) >> PFILE
+ printf("%sfreep)\\\n",sep) >> SEDFILE
+ } else {
+ printf(")\n") >> PFILE
+ printf(")\\\n") >> SEDFILE
+ }
+ #
+ # Spit out arg types/names;
+ #
+ for (i = 0; i < nvars; ++i) {
+ if (rpc_type[i] == "ID") {
+ printf("\tlong %scl_id;\n", args[i]) >> PFILE
+ printf("\\\tlong %scl_id;\\\n", args[i]) >> SEDFILE
+ }
+ if (rpc_type[i] == "STRING") {
+ printf("\tchar *%s;\n", args[i]) >> PFILE
+ printf("\\\tchar *%s;\\\n", args[i]) >> SEDFILE
+ }
+ if (rpc_type[i] == "INT") {
+ printf("\tu_int32_t %s;\n", args[i]) >> PFILE
+ printf("\\\tu_int32_t %s;\\\n", args[i]) >> SEDFILE
+ }
+ if (rpc_type[i] == "LIST" && list_type[i] == "STRING") {
+ printf("\tchar ** %slist;\n", args[i]) >> PFILE
+ printf("\\\tchar ** %slist;\\\n", args[i]) >> SEDFILE
+ }
+ if (rpc_type[i] == "LIST" && list_type[i] == "INT") {
+ printf("\tu_int32_t * %slist;\n", args[i]) >> PFILE
+ printf("\\\tu_int32_t * %slist;\\\n", \
+ args[i]) >> SEDFILE
+ }
+ if (rpc_type[i] == "LIST" && list_type[i] == "ID") {
+ printf("\tu_int32_t * %slist;\n", args[i]) >> PFILE
+ printf("\\\tu_int32_t * %slist;\\\n", args[i]) \
+ >> SEDFILE
+ }
+ if (rpc_type[i] == "DBT") {
+ printf("\tu_int32_t %sdlen;\n", args[i]) >> PFILE
+ printf("\\\tu_int32_t %sdlen;\\\n", args[i]) >> SEDFILE
+ printf("\tu_int32_t %sdoff;\n", args[i]) >> PFILE
+ printf("\\\tu_int32_t %sdoff;\\\n", args[i]) >> SEDFILE
+ printf("\tu_int32_t %sflags;\n", args[i]) >> PFILE
+ printf("\\\tu_int32_t %sflags;\\\n", args[i]) >> SEDFILE
+ printf("\tvoid *%sdata;\n", args[i]) >> PFILE
+ printf("\\\tvoid *%sdata;\\\n", args[i]) >> SEDFILE
+ printf("\tu_int32_t %ssize;\n", args[i]) >> PFILE
+ printf("\\\tu_int32_t %ssize;\\\n", args[i]) >> SEDFILE
+ }
+ }
+ printf("\t__%s_reply *replyp;\n",name) >> PFILE
+ printf("\\\t__%s_reply *replyp;\\\n",name) >> SEDFILE
+ if (xdr_free) {
+ printf("\tint * freep;\n") >> PFILE
+ printf("\\\tint * freep;\\\n") >> SEDFILE
+ }
+
+ printf("/* END __%s_%d_proc */\n", name, msgid) >> PFILE
+ printf("/* END __%s_%d_proc */\n", name, msgid) >> SEDFILE
+
+ #
+ # Function body
+ #
+ printf("{\n") >> PFILE
+ printf("\tint ret;\n") >> PFILE
+ for (i = 0; i < nvars; ++i) {
+ if (rpc_type[i] == "ID") {
+ printf("\t%s %s;\n", c_type[i], args[i]) >> PFILE
+ printf("\tct_entry *%s_ctp;\n", args[i]) >> PFILE
+ }
+ }
+ printf("\n") >> PFILE
+ for (i = 0; i < nvars; ++i) {
+ if (rpc_type[i] == "ID") {
+ printf("\tACTIVATE_CTP(%s_ctp, %scl_id, %s);\n", \
+ args[i], args[i], ctp_type[i]) >> PFILE
+ printf("\t%s = (%s)%s_ctp->ct_anyp;\n", \
+ args[i], c_type[i], args[i]) >> PFILE
+ }
+ }
+ printf("\n\t/*\n\t * XXX Code goes here\n\t */\n\n") >> PFILE
+ printf("\treplyp->status = ret;\n") >> PFILE
+ printf("\treturn;\n") >> PFILE
+ printf("}\n\n") >> PFILE
+
+ #
+ # If we don't want client code generated, go on to next.
+ #
+ if (gen_code == 0)
+ next;
+
+ #
+ # =====================================================
+ # Generate Client code
+ #
+ # If we are doing a list, spit out prototype decl.
+ #
+ for (i = 0; i < nvars; i++) {
+ if (rpc_type[i] != "LIST")
+ continue;
+ printf("static int __dbcl_%s_%slist __P((", \
+ name, args[i]) >> CFILE
+ printf("__%s_%slist **, ", name, args[i]) >> CFILE
+ if (list_type[i] == "STRING")
+ printf("%s));\n", c_type[i]) >> CFILE
+ if (list_type[i] == "INT")
+ printf("u_int32_t));\n") >> CFILE
+ if (list_type[i] == "ID")
+ printf("%s));\n", c_type[i]) >> CFILE
+ printf("static void __dbcl_%s_%sfree __P((", \
+ name, args[i]) >> CFILE
+ printf("__%s_%slist **));\n", name, args[i]) >> CFILE
+ }
+ #
+ # Spit out PUBLIC prototypes.
+ #
+ printf("int __dbcl_%s __P((",name) >> CHFILE
+ sep = "";
+ for (i = 0; i < nvars; ++i) {
+ printf("%s%s", sep, pr_type[i]) >> CHFILE
+ sep = ", ";
+ }
+ printf("));\n") >> CHFILE
+ #
+ # Spit out function name/args.
+ #
+ printf("int\n") >> CFILE
+ printf("__dbcl_%s(", name) >> CFILE
+ sep = "";
+ for (i = 0; i < nvars; ++i) {
+ printf("%s%s", sep, args[i]) >> CFILE
+ sep = ", ";
+ }
+ printf(")\n") >> CFILE
+
+ for (i = 0; i < nvars; ++i)
+ if (func_arg[i] == 0)
+ printf("\t%s %s;\n", c_type[i], args[i]) >> CFILE
+ else
+ printf("\t%s;\n", c_type[i]) >> CFILE
+
+ printf("{\n") >> CFILE
+ printf("\tCLIENT *cl;\n") >> CFILE
+ printf("\t__%s_msg req;\n", name) >> CFILE
+ printf("\tstatic __%s_reply *replyp = NULL;\n", name) >> CFILE;
+ printf("\tint ret;\n") >> CFILE
+ if (!env_handle)
+ printf("\tDB_ENV *dbenv;\n") >> CFILE
+
+ printf("\n") >> CFILE
+ printf("\tret = 0;\n") >> CFILE
+ if (!env_handle) {
+ printf("\tdbenv = NULL;\n") >> CFILE
+ if (db_handle)
+ printf("\tdbenv = %s->dbenv;\n", args[db_idx]) >> CFILE
+ else if (dbc_handle)
+ printf("\tdbenv = %s->dbp->dbenv;\n", \
+ args[dbc_idx]) >> CFILE
+ else if (txn_handle)
+ printf("\tdbenv = %s->mgrp->dbenv;\n", \
+ args[txn_idx]) >> CFILE
+ printf("\tif (dbenv == NULL || dbenv->cl_handle == NULL) {\n") \
+ >> CFILE
+ printf("\t\t__db_err(dbenv, \"No server environment.\");\n") \
+ >> CFILE
+ } else {
+ printf("\tif (%s == NULL || %s->cl_handle == NULL) {\n", \
+ args[env_idx], args[env_idx]) >> CFILE
+ printf("\t\t__db_err(%s, \"No server environment.\");\n", \
+ args[env_idx]) >> CFILE
+ }
+ printf("\t\treturn (DB_NOSERVER);\n") >> CFILE
+ printf("\t}\n") >> CFILE
+ printf("\n") >> CFILE
+
+ #
+ # Free old reply if there was one.
+ #
+ printf("\tif (replyp != NULL) {\n") >> CFILE
+ printf("\t\txdr_free((xdrproc_t)xdr___%s_reply, (void *)replyp);\n", \
+ name) >> CFILE
+ printf("\t\treplyp = NULL;\n\t}\n") >> CFILE
+ if (!env_handle)
+ printf("\tcl = (CLIENT *)dbenv->cl_handle;\n") >> CFILE
+ else
+ printf("\tcl = (CLIENT *)%s->cl_handle;\n", \
+ args[env_idx]) >> CFILE
+
+ printf("\n") >> CFILE
+
+ #
+ # If there is a function arg, check that it is NULL
+ #
+ for (i = 0; i < nvars; ++i) {
+ if (func_arg[i] != 1)
+ continue;
+ printf("\tif (%s != NULL) {\n", args[i]) >> CFILE
+ printf("\t\t__db_err(%s, ", args[env_idx]) >> CFILE
+ printf("\"User functions not supported in RPC.\");\n") >> CFILE
+ printf("\t\treturn (EINVAL);\n\t}\n") >> CFILE
+ }
+
+ #
+ # Compose message components
+ #
+ for (i = 0; i < nvars; ++i) {
+ if (rpc_type[i] == "ID") {
+ printf("\tif (%s == NULL)\n", args[i]) >> CFILE
+ printf("\t\treq.%scl_id = 0;\n\telse\n", \
+ args[i]) >> CFILE
+ if (c_type[i] == "DB_TXN *") {
+ printf("\t\treq.%scl_id = %s->txnid;\n", \
+ args[i], args[i]) >> CFILE
+ } else {
+ printf("\t\treq.%scl_id = %s->cl_id;\n", \
+ args[i], args[i]) >> CFILE
+ }
+ }
+ if (rpc_type[i] == "INT") {
+ printf("\treq.%s = %s;\n", args[i], args[i]) >> CFILE
+ }
+ if (rpc_type[i] == "STRING") {
+ printf("\tif (%s == NULL)\n", args[i]) >> CFILE
+ printf("\t\treq.%s = \"\";\n", args[i]) >> CFILE
+ printf("\telse\n") >> CFILE
+ printf("\t\treq.%s = (char *)%s;\n", \
+ args[i], args[i]) >> CFILE
+ }
+ if (rpc_type[i] == "DBT") {
+ printf("\treq.%sdlen = %s->dlen;\n", \
+ args[i], args[i]) >> CFILE
+ printf("\treq.%sdoff = %s->doff;\n", \
+ args[i], args[i]) >> CFILE
+ printf("\treq.%sflags = %s->flags;\n", \
+ args[i], args[i]) >> CFILE
+ printf("\treq.%sdata.%sdata_val = %s->data;\n", \
+ args[i], args[i], args[i]) >> CFILE
+ printf("\treq.%sdata.%sdata_len = %s->size;\n", \
+ args[i], args[i], args[i]) >> CFILE
+ }
+ if (rpc_type[i] == "LIST") {
+ printf("\tif ((ret = __dbcl_%s_%slist(", \
+ name, args[i]) >> CFILE
+ printf("&req.%slist, %s)) != 0)\n", \
+ args[i], args[i]) >> CFILE
+ printf("\t\tgoto out;\n") >> CFILE
+ }
+ }
+
+ printf("\n") >> CFILE
+ printf("\treplyp = __db_%s_%d(&req, cl);\n", name, msgid) >> CFILE
+ printf("\tif (replyp == NULL) {\n") >> CFILE
+ if (!env_handle) {
+ printf("\t\t__db_err(dbenv, ") >> CFILE
+ printf("clnt_sperror(cl, \"Berkeley DB\"));\n") >> CFILE
+ } else {
+ printf("\t\t__db_err(%s, ", args[env_idx]) >> CFILE
+ printf("clnt_sperror(cl, \"Berkeley DB\"));\n") >> CFILE
+ }
+ printf("\t\tret = DB_NOSERVER;\n") >> CFILE
+ printf("\t\tgoto out;\n") >> CFILE
+ printf("\t}\n") >> CFILE
+
+ if (ret_code == 0) {
+ printf("\tret = replyp->status;\n") >> CFILE
+ } else {
+ for (i = 0; i < nvars; ++i) {
+ if (rpc_type[i] == "LIST") {
+ printf("\t__dbcl_%s_%sfree(&req.%slist);\n", \
+ name, args[i], args[i]) >> CFILE
+ }
+ }
+ printf("\treturn (__dbcl_%s_ret(", name) >> CFILE
+ sep = "";
+ for (i = 0; i < nvars; ++i) {
+ printf("%s%s", sep, args[i]) >> CFILE
+ sep = ", ";
+ }
+ printf("%sreplyp));\n", sep) >> CFILE
+ }
+ printf("out:\n") >> CFILE
+ for (i = 0; i < nvars; ++i) {
+ if (rpc_type[i] == "LIST") {
+ printf("\t__dbcl_%s_%sfree(&req.%slist);\n", \
+ name, args[i], args[i]) >> CFILE
+ }
+ }
+ printf("\treturn (ret);\n") >> CFILE
+ printf("}\n\n") >> CFILE
+
+ #
+ # If we are doing a list, write list functions for op.
+ #
+ for (i = 0; i < nvars; i++) {
+ if (rpc_type[i] != "LIST")
+ continue;
+ printf("int\n__dbcl_%s_%slist(locp, pp)\n", \
+ name, args[i]) >> CFILE
+ printf("\t__%s_%slist **locp;\n", name, args[i]) >> CFILE
+ if (list_type[i] == "STRING")
+ printf("\t%s pp;\n{\n\t%s p;\n", \
+ c_type[i], c_type[i]) >> CFILE
+ if (list_type[i] == "INT")
+ printf("\tu_int32_t *pp;\n{\n\tu_int32_t *p, *q;\n") \
+ >> CFILE
+ if (list_type[i] == "ID")
+ printf("\t%s pp;\n{\n\t%s p;\n\tu_int32_t *q;\n", \
+ c_type[i], c_type[i]) >> CFILE
+
+ printf("\tint ret;\n") >> CFILE
+ printf("\t__%s_%slist *nl, **nlp;\n\n", name, args[i]) >> CFILE
+ printf("\t*locp = NULL;\n") >> CFILE
+ printf("\tif (pp == NULL)\n\t\treturn (0);\n") >> CFILE
+ printf("\tnlp = locp;\n") >> CFILE
+ printf("\tfor (p = pp; *p != 0; p++) {\n") >> CFILE
+ printf("\t\tif ((ret = __os_malloc(NULL, ") >> CFILE
+ printf("sizeof(*nl), NULL, nlp)) != 0)\n") >> CFILE
+ printf("\t\t\tgoto out;\n") >> CFILE
+ printf("\t\tnl = *nlp;\n") >> CFILE
+ printf("\t\tnl->next = NULL;\n") >> CFILE
+ printf("\t\tnl->ent.ent_val = NULL;\n") >> CFILE
+ printf("\t\tnl->ent.ent_len = 0;\n") >> CFILE
+ if (list_type[i] == "STRING") {
+ printf("\t\tif ((ret = __os_strdup(NULL, ") >> CFILE
+ printf("*p, &nl->ent.ent_val)) != 0)\n") >> CFILE
+ printf("\t\t\tgoto out;\n") >> CFILE
+ printf("\t\tnl->ent.ent_len = strlen(*p)+1;\n") >> CFILE
+ }
+ if (list_type[i] == "INT") {
+ printf("\t\tif ((ret = __os_malloc(NULL, ") >> CFILE
+ printf("sizeof(%s), NULL, &nl->ent.ent_val)) != 0)\n", \
+ c_type[i]) >> CFILE
+ printf("\t\t\tgoto out;\n") >> CFILE
+ printf("\t\tq = (u_int32_t *)nl->ent.ent_val;\n") \
+ >> CFILE
+ printf("\t\t*q = *p;\n") >> CFILE
+ printf("\t\tnl->ent.ent_len = sizeof(%s);\n", \
+ c_type[i]) >> CFILE
+ }
+ if (list_type[i] == "ID") {
+ printf("\t\tif ((ret = __os_malloc(NULL, ") >> CFILE
+ printf("sizeof(u_int32_t),") >> CFILE
+ printf(" NULL, &nl->ent.ent_val)) != 0)\n") >> CFILE
+ printf("\t\t\tgoto out;\n") >> CFILE
+ printf("\t\tq = (u_int32_t *)nl->ent.ent_val;\n") \
+ >> CFILE
+ printf("\t\t*q = (*p)->cl_id;\n") >> CFILE
+ printf("\t\tnl->ent.ent_len = sizeof(u_int32_t);\n") \
+ >> CFILE
+ }
+ printf("\t\tnlp = &nl->next;\n") >> CFILE
+ printf("\t}\n") >> CFILE
+ printf("\treturn (0);\n") >> CFILE
+ printf("out:\n") >> CFILE
+ printf("\t__dbcl_%s_%sfree(locp);\n", name, args[i]) >> CFILE
+ printf("\treturn (ret);\n") >> CFILE
+
+ printf("}\n\n") >> CFILE
+
+ printf("void\n__dbcl_%s_%sfree(locp)\n", name, args[i]) >> CFILE
+ printf("\t__%s_%slist **locp;\n", name, args[i]) >> CFILE
+ printf("{\n") >> CFILE
+ printf("\t__%s_%slist *nl, *nl1;\n\n", name, args[i]) >> CFILE
+ printf("\tif (locp == NULL)\n\t\treturn;\n") >> CFILE
+ printf("\tfor (nl = *locp; nl != NULL; nl = nl1) {\n") >> CFILE
+ printf("\t\tnl1 = nl->next;\n") >> CFILE
+ printf("\t\tif (nl->ent.ent_val)\n") >> CFILE
+ printf("\t\t\t__os_free(nl->ent.ent_val, nl->ent.ent_len);\n") \
+ >> CFILE
+ printf("\t\t__os_free(nl, sizeof(*nl));\n") >> CFILE
+ printf("\t}\n}\n\n") >> CFILE
+ }
+ #
+ # Generate Client Template code
+ #
+ if (ret_code) {
+ #
+ # If we are doing a list, write prototypes
+ #
+ for (i = 0; i < rvars; ++i) {
+ if (ret_type[i] != "LIST")
+ continue;
+ if (retlist_type[i] != "STRING" &&
+ retlist_type[i] != "INT" && list_type[i] != "ID")
+ continue;
+ printf("int __db_%s_%sreplist __P((", \
+ name, retargs[i]) >> TFILE
+ printf("__%s_%sreplist, ", \
+ name, retargs[i]) >> TFILE
+ if (retlist_type[i] == "STRING") {
+ printf("char ***));\n") >> TFILE
+ }
+ if (retlist_type[i] == "INT" ||
+ retlist_type[i] == "ID") {
+ printf("u_int32_t **));\n") >> TFILE
+ }
+ printf("void __db_%s_%sfree __P((", \
+ name, retargs[i]) >> TFILE
+ if (retlist_type[i] == "STRING")
+ printf("char **));\n") >> TFILE
+ if (retlist_type[i] == "INT" || retlist_type[i] == "ID")
+ printf("u_int32_t *));\n\n") >> TFILE
+ }
+
+ printf("int __dbcl_%s_ret __P((", name) >> CHFILE
+ sep = "";
+ for (i = 0; i < nvars; ++i) {
+ printf("%s%s", sep, pr_type[i]) >> CHFILE
+ sep = ", ";
+ }
+ printf("%s__%s_reply *));\n", sep, name) >> CHFILE
+
+ printf("int\n") >> TFILE
+ printf("__dbcl_%s_ret(", name) >> TFILE
+ sep = "";
+ for (i = 0; i < nvars; ++i) {
+ printf("%s%s", sep, args[i]) >> TFILE
+ sep = ", ";
+ }
+ printf("%sreplyp)\n",sep) >> TFILE
+
+ for (i = 0; i < nvars; ++i)
+ if (func_arg[i] == 0)
+ printf("\t%s %s;\n", c_type[i], args[i]) \
+ >> TFILE
+ else
+ printf("\t%s;\n", c_type[i]) >> TFILE
+ printf("\t__%s_reply *replyp;\n", name) >> TFILE;
+ printf("{\n") >> TFILE
+ printf("\tint ret;\n") >> TFILE
+ #
+ # Local vars in template
+ #
+ for (i = 0; i < rvars; ++i) {
+ if (ret_type[i] == "ID" || ret_type[i] == "STRING" ||
+ ret_type[i] == "INT" || ret_type[i] == "DBL") {
+ printf("\t%s %s;\n", \
+ retc_type[i], retargs[i]) >> TFILE
+ } else if (ret_type[i] == "LIST") {
+ if (retlist_type[i] == "STRING")
+ printf("\tchar **__db_%slist;\n", \
+ retargs[i]) >> TFILE
+ if (retlist_type[i] == "ID" ||
+ retlist_type[i] == "INT")
+ printf("\tu_int32_t *__db_%slist;\n", \
+ retargs[i]) >> TFILE
+ } else {
+ printf("\t/* %s %s; */\n", \
+ ret_type[i], retargs[i]) >> TFILE
+ }
+ }
+ #
+ # Client return code
+ #
+ printf("\n") >> TFILE
+ printf("\tif (replyp->status != 0)\n") >> TFILE
+ printf("\t\treturn (replyp->status);\n") >> TFILE
+ for (i = 0; i < rvars; ++i) {
+ varname = "";
+ if (ret_type[i] == "ID") {
+ varname = sprintf("%scl_id", retargs[i]);
+ }
+ if (ret_type[i] == "STRING") {
+ varname = retargs[i];
+ }
+ if (ret_type[i] == "INT" || ret_type[i] == "DBL") {
+ varname = retargs[i];
+ }
+ if (ret_type[i] == "DBT") {
+ varname = sprintf("%sdata", retargs[i]);
+ }
+ if (ret_type[i] == "ID" || ret_type[i] == "STRING" ||
+ ret_type[i] == "INT" || ret_type[i] == "DBL") {
+ printf("\t%s = replyp->%s;\n", \
+ retargs[i], varname) >> TFILE
+ } else if (ret_type[i] == "LIST") {
+ printf("\n\tif ((ret = __db_%s_%slist(", \
+ name, retargs[i]) >> TFILE
+ printf("replyp->%slist, &__db_%slist)) != 0)", \
+ retargs[i], retargs[i]) >> TFILE
+ printf("\n\t\treturn (ret);\n") >> TFILE
+ printf("\n\t/*\n") >> TFILE
+ printf("\t * XXX Handle list\n") >> TFILE
+ printf("\t */\n\n") >> TFILE
+ printf("\t__db_%s_%sfree(__db_%slist);\n", \
+ name, retargs[i], retargs[i]) >> TFILE
+ } else {
+ printf("\t/* Handle replyp->%s; */\n", \
+ varname) >> TFILE
+ }
+ }
+ printf("\n\t/*\n\t * XXX Code goes here\n\t */\n\n") >> TFILE
+ printf("\treturn (replyp->status);\n") >> TFILE
+ printf("}\n\n") >> TFILE
+ #
+ # If we are doing a list, write list functions for this op.
+ #
+ for (i = 0; i < rvars; ++i) {
+ if (ret_type[i] != "LIST")
+ continue;
+ if (retlist_type[i] != "STRING" &&
+ retlist_type[i] != "INT" && list_type[i] != "ID")
+ continue;
+ printf("int\n") >> TFILE
+ printf("__db_%s_%sreplist(locp, ppp)\n", \
+ name, retargs[i]) >> TFILE
+ printf("\t__%s_%sreplist *locp;\n", \
+ name, retargs[i]) >> TFILE
+ if (retlist_type[i] == "STRING") {
+ printf("\tchar ***ppp;\n{\n") >> TFILE
+ printf("\tchar **pp;\n") >> TFILE
+ }
+ if (retlist_type[i] == "INT" ||
+ retlist_type[i] == "ID") {
+ printf("\tu_int32_t **ppp;\n{\n") >> TFILE
+ printf("\tu_int32_t *pp;\n") >> TFILE
+ }
+
+ printf("\tint cnt, ret, size;\n") >> TFILE
+ printf("\t__%s_%sreplist *nl;\n\n", \
+ name, retargs[i]) >> TFILE
+ printf("\tfor (cnt = 0, nl = locp; ") >> TFILE
+ printf("nl != NULL; cnt++, nl = nl->next)\n\t\t;\n\n") \
+ >> TFILE
+ printf("\tif (cnt == 0) {\n") >> TFILE
+ printf("\t\t*ppp = NULL;\n") >> TFILE
+ printf("\t\treturn (0);\n\t}\n") >> TFILE
+ printf("\tsize = sizeof(*pp) * cnt;\n") >> TFILE
+ printf("\tif ((ret = __os_malloc(NULL, ") >> TFILE
+ printf("size, NULL, ppp)) != 0)\n") >> TFILE
+ printf("\t\treturn (ret);\n") >> TFILE
+ printf("\tmemset(*ppp, 0, size);\n") >> TFILE
+ printf("\tfor (pp = *ppp, nl = locp; ") >> TFILE
+ printf("nl != NULL; nl = nl->next, pp++) {\n") >> TFILE
+ if (retlist_type[i] == "STRING") {
+ printf("\t\tif ((ret = __os_malloc(NULL, ") \
+ >> TFILE
+ printf("nl->ent.ent_len + 1, NULL,") >> TFILE
+ printf(" pp)) != 0)\n") >> TFILE
+ printf("\t\t\tgoto out;\n") >> TFILE
+ printf("\t\tif ((ret = __os_strdup(") >> TFILE
+ printf("NULL, (char *)nl->ent.ent_val,") \
+ >> TFILE
+ printf(" pp)) != 0)\n") >> TFILE
+ printf("\t\t\tgoto out;\n") >> TFILE
+ }
+ if (retlist_type[i] == "INT" ||
+ retlist_type[i] == "ID") {
+ printf("\t\t*pp = *(u_int32_t *)") >> TFILE
+ printf("nl->ent.ent_val;\n") >> TFILE
+ }
+ printf("\t}\n") >> TFILE
+ printf("\treturn (0);\n") >> TFILE
+ printf("out:\n") >> TFILE
+ printf("\t__db_%s_%sfree(*ppp);\n", \
+ name, retargs[i]) >> TFILE
+ printf("\treturn (ret);\n") >> TFILE
+ printf("}\n\n") >> TFILE
+
+ printf("void\n") >> TFILE
+ printf("__db_%s_%sfree(pp)\n", \
+ name, retargs[i]) >> TFILE
+
+ if (retlist_type[i] == "STRING")
+ printf("\tchar **pp;\n") >> TFILE
+ if (retlist_type[i] == "INT" || retlist_type[i] == "ID")
+ printf("\tu_int32_t *pp;\n") >> TFILE
+
+ printf("{\n") >> TFILE
+ printf("\tsize_t size;\n") >> TFILE
+
+ if (retlist_type[i] == "STRING")
+ printf("\tchar **p;\n\n") >> TFILE
+ if (retlist_type[i] == "INT" || retlist_type[i] == "ID")
+ printf("\tu_int32_t *p;\n\n") >> TFILE
+
+ printf("\tif (pp == NULL)\n\t\treturn;\n") >> TFILE
+ printf("\tsize = sizeof(*p);\n") >> TFILE
+ printf("\tfor (p = pp; *p != 0; p++) {\n") >> TFILE
+ printf("\t\tsize += sizeof(*p);\n") >> TFILE
+
+ if (retlist_type[i] == "STRING")
+ printf("\t\t__os_free(*p, strlen(*p)+1);\n") \
+ >> TFILE
+ printf("\t}\n") >> TFILE
+ printf("\t__os_free(pp, size);\n") >> TFILE
+ printf("}\n\n") >> TFILE
+ }
+ }
+}
+
+#
+# split_lines --
+# Add line separators to pretty-print the output.
+function split_lines(is_public) {
+ if (argcount > 3) {
+ # Reset the counter, remove any trailing whitespace from
+ # the separator.
+ argcount = 0;
+ sub("[ ]$", "", sep)
+
+ if (is_public) {
+ printf("%s\n\t", sep) >> SHFILE
+ } else {
+ printf("%s\n\t\t", sep) >> PFILE
+ printf("%s\\\n\\\t\\\t", sep) >> SEDFILE
+ }
+ }
+}
diff --git a/bdb/dist/install-sh b/bdb/dist/install-sh
new file mode 100755
index 00000000000..b41a2459161
--- /dev/null
+++ b/bdb/dist/install-sh
@@ -0,0 +1,251 @@
+#!/bin/sh
+#
+# install - install a program, script, or datafile
+# This comes from X11R5 (mit/util/scripts/install.sh).
+#
+# Copyright 1991 by the Massachusetts Institute of Technology
+#
+# Permission to use, copy, modify, distribute, and sell this software and its
+# documentation for any purpose is hereby granted without fee, provided that
+# the above copyright notice appear in all copies and that both that
+# copyright notice and this permission notice appear in supporting
+# documentation, and that the name of M.I.T. not be used in advertising or
+# publicity pertaining to distribution of the software without specific,
+# written prior permission. M.I.T. makes no representations about the
+# suitability of this software for any purpose. It is provided "as is"
+# without express or implied warranty.
+#
+# Calling this script install-sh is preferred over install.sh, to prevent
+# `make' implicit rules from creating a file called install from it
+# when there is no Makefile.
+#
+# This script is compatible with the BSD install script, but was written
+# from scratch. It can only install one file at a time, a restriction
+# shared with many OS's install programs.
+
+
+# set DOITPROG to echo to test this script
+
+# Don't use :- since 4.3BSD and earlier shells don't like it.
+doit="${DOITPROG-}"
+
+
+# put in absolute paths if you don't have them in your path; or use env. vars.
+
+mvprog="${MVPROG-mv}"
+cpprog="${CPPROG-cp}"
+chmodprog="${CHMODPROG-chmod}"
+chownprog="${CHOWNPROG-chown}"
+chgrpprog="${CHGRPPROG-chgrp}"
+stripprog="${STRIPPROG-strip}"
+rmprog="${RMPROG-rm}"
+mkdirprog="${MKDIRPROG-mkdir}"
+
+transformbasename=""
+transform_arg=""
+instcmd="$mvprog"
+chmodcmd="$chmodprog 0755"
+chowncmd=""
+chgrpcmd=""
+stripcmd=""
+rmcmd="$rmprog -f"
+mvcmd="$mvprog"
+src=""
+dst=""
+dir_arg=""
+
+while [ x"$1" != x ]; do
+ case $1 in
+ -c) instcmd="$cpprog"
+ shift
+ continue;;
+
+ -d) dir_arg=true
+ shift
+ continue;;
+
+ -m) chmodcmd="$chmodprog $2"
+ shift
+ shift
+ continue;;
+
+ -o) chowncmd="$chownprog $2"
+ shift
+ shift
+ continue;;
+
+ -g) chgrpcmd="$chgrpprog $2"
+ shift
+ shift
+ continue;;
+
+ -s) stripcmd="$stripprog"
+ shift
+ continue;;
+
+ -t=*) transformarg=`echo $1 | sed 's/-t=//'`
+ shift
+ continue;;
+
+ -b=*) transformbasename=`echo $1 | sed 's/-b=//'`
+ shift
+ continue;;
+
+ *) if [ x"$src" = x ]
+ then
+ src=$1
+ else
+ # this colon is to work around a 386BSD /bin/sh bug
+ :
+ dst=$1
+ fi
+ shift
+ continue;;
+ esac
+done
+
+if [ x"$src" = x ]
+then
+ echo "install: no input file specified"
+ exit 1
+else
+ true
+fi
+
+if [ x"$dir_arg" != x ]; then
+ dst=$src
+ src=""
+
+ if [ -d $dst ]; then
+ instcmd=:
+ chmodcmd=""
+ else
+ instcmd=$mkdirprog
+ fi
+else
+
+# Waiting for this to be detected by the "$instcmd $src $dsttmp" command
+# might cause directories to be created, which would be especially bad
+# if $src (and thus $dsttmp) contains '*'.
+
+ if [ -f $src -o -d $src ]
+ then
+ true
+ else
+ echo "install: $src does not exist"
+ exit 1
+ fi
+
+ if [ x"$dst" = x ]
+ then
+ echo "install: no destination specified"
+ exit 1
+ else
+ true
+ fi
+
+# If destination is a directory, append the input filename; if your system
+# does not like double slashes in filenames, you may need to add some logic
+
+ if [ -d $dst ]
+ then
+ dst="$dst"/`basename $src`
+ else
+ true
+ fi
+fi
+
+## this sed command emulates the dirname command
+dstdir=`echo $dst | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'`
+
+# Make sure that the destination directory exists.
+# this part is taken from Noah Friedman's mkinstalldirs script
+
+# Skip lots of stat calls in the usual case.
+if [ ! -d "$dstdir" ]; then
+defaultIFS='
+ '
+IFS="${IFS-${defaultIFS}}"
+
+oIFS="${IFS}"
+# Some sh's can't handle IFS=/ for some reason.
+IFS='%'
+set - `echo ${dstdir} | sed -e 's@/@%@g' -e 's@^%@/@'`
+IFS="${oIFS}"
+
+pathcomp=''
+
+while [ $# -ne 0 ] ; do
+ pathcomp="${pathcomp}${1}"
+ shift
+
+ if [ ! -d "${pathcomp}" ] ;
+ then
+ $mkdirprog "${pathcomp}"
+ else
+ true
+ fi
+
+ pathcomp="${pathcomp}/"
+done
+fi
+
+if [ x"$dir_arg" != x ]
+then
+ $doit $instcmd $dst &&
+
+ if [ x"$chowncmd" != x ]; then $doit $chowncmd $dst; else true ; fi &&
+ if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dst; else true ; fi &&
+ if [ x"$stripcmd" != x ]; then $doit $stripcmd $dst; else true ; fi &&
+ if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dst; else true ; fi
+else
+
+# If we're going to rename the final executable, determine the name now.
+
+ if [ x"$transformarg" = x ]
+ then
+ dstfile=`basename $dst`
+ else
+ dstfile=`basename $dst $transformbasename |
+ sed $transformarg`$transformbasename
+ fi
+
+# don't allow the sed command to completely eliminate the filename
+
+ if [ x"$dstfile" = x ]
+ then
+ dstfile=`basename $dst`
+ else
+ true
+ fi
+
+# Make a temp file name in the proper directory.
+
+ dsttmp=$dstdir/#inst.$$#
+
+# Move or copy the file name to the temp name
+
+ $doit $instcmd $src $dsttmp &&
+
+ trap "rm -f ${dsttmp}" 0 &&
+
+# and set any options; do chmod last to preserve setuid bits
+
+# If any of these fail, we abort the whole thing. If we want to
+# ignore errors from any of these, just make sure not to ignore
+# errors from the above "$doit $instcmd $src $dsttmp" command.
+
+ if [ x"$chowncmd" != x ]; then $doit $chowncmd $dsttmp; else true;fi &&
+ if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dsttmp; else true;fi &&
+ if [ x"$stripcmd" != x ]; then $doit $stripcmd $dsttmp; else true;fi &&
+ if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dsttmp; else true;fi &&
+
+# Now rename the file to the real destination.
+
+ $doit $rmcmd -f $dstdir/$dstfile &&
+ $doit $mvcmd $dsttmp $dstdir/$dstfile
+
+fi &&
+
+
+exit 0
diff --git a/bdb/dist/ltconfig b/bdb/dist/ltconfig
new file mode 100644
index 00000000000..f78afda0e1f
--- /dev/null
+++ b/bdb/dist/ltconfig
@@ -0,0 +1,3136 @@
+#! /bin/sh
+
+# ltconfig - Create a system-specific libtool.
+# Copyright (C) 1996-1999 Free Software Foundation, Inc.
+# Originally by Gordon Matzigkeit <gord@gnu.ai.mit.edu>, 1996
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+# A lot of this script is taken from autoconf-2.10.
+
+# Check that we are running under the correct shell.
+SHELL=${CONFIG_SHELL-/bin/sh}
+echo=echo
+if test "X$1" = X--no-reexec; then
+ # Discard the --no-reexec flag, and continue.
+ shift
+elif test "X$1" = X--fallback-echo; then
+ # Avoid inline document here, it may be left over
+ :
+elif test "X`($echo '\t') 2>/dev/null`" = 'X\t'; then
+ # Yippee, $echo works!
+ :
+else
+ # Restart under the correct shell.
+ exec "$SHELL" "$0" --no-reexec ${1+"$@"}
+fi
+
+if test "X$1" = X--fallback-echo; then
+ # used as fallback echo
+ shift
+ cat <<EOF
+$*
+EOF
+ exit 0
+fi
+
+# Find the correct PATH separator. Usually this is `:', but
+# DJGPP uses `;' like DOS.
+if test "X${PATH_SEPARATOR+set}" != Xset; then
+ UNAME=${UNAME-`uname 2>/dev/null`}
+ case X$UNAME in
+ *-DOS) PATH_SEPARATOR=';' ;;
+ *) PATH_SEPARATOR=':' ;;
+ esac
+fi
+
+# The HP-UX ksh and POSIX shell print the target directory to stdout
+# if CDPATH is set.
+if test "X${CDPATH+set}" = Xset; then CDPATH=:; export CDPATH; fi
+
+if test "X${echo_test_string+set}" != Xset; then
+ # find a string as large as possible, as long as the shell can cope with it
+ for cmd in 'sed 50q "$0"' 'sed 20q "$0"' 'sed 10q "$0"' 'sed 2q "$0"' 'echo test'; do
+ # expected sizes: less than 2Kb, 1Kb, 512 bytes, 16 bytes, ...
+ if (echo_test_string="`eval $cmd`") 2>/dev/null &&
+ echo_test_string="`eval $cmd`" &&
+ (test "X$echo_test_string" = "X$echo_test_string") 2>/dev/null; then
+ break
+ fi
+ done
+fi
+
+if test "X`($echo '\t') 2>/dev/null`" != 'X\t' ||
+ test "X`($echo "$echo_test_string") 2>/dev/null`" != X"$echo_test_string"; then
+ # The Solaris, AIX, and Digital Unix default echo programs unquote
+ # backslashes. This makes it impossible to quote backslashes using
+ # echo "$something" | sed 's/\\/\\\\/g'
+ #
+ # So, first we look for a working echo in the user's PATH.
+
+ IFS="${IFS= }"; save_ifs="$IFS"; IFS="${IFS}${PATH_SEPARATOR}"
+ for dir in $PATH /usr/ucb; do
+ if (test -f $dir/echo || test -f $dir/echo$ac_exeext) &&
+ test "X`($dir/echo '\t') 2>/dev/null`" = 'X\t' &&
+ test "X`($dir/echo "$echo_test_string") 2>/dev/null`" = X"$echo_test_string"; then
+ echo="$dir/echo"
+ break
+ fi
+ done
+ IFS="$save_ifs"
+
+ if test "X$echo" = Xecho; then
+ # We didn't find a better echo, so look for alternatives.
+ if test "X`(print -r '\t') 2>/dev/null`" = 'X\t' &&
+ test "X`(print -r "$echo_test_string") 2>/dev/null`" = X"$echo_test_string"; then
+ # This shell has a builtin print -r that does the trick.
+ echo='print -r'
+ elif (test -f /bin/ksh || test -f /bin/ksh$ac_exeext) &&
+ test "X$CONFIG_SHELL" != X/bin/ksh; then
+ # If we have ksh, try running ltconfig again with it.
+ ORIGINAL_CONFIG_SHELL="${CONFIG_SHELL-/bin/sh}"
+ export ORIGINAL_CONFIG_SHELL
+ CONFIG_SHELL=/bin/ksh
+ export CONFIG_SHELL
+ exec "$CONFIG_SHELL" "$0" --no-reexec ${1+"$@"}
+ else
+ # Try using printf.
+ echo='printf "%s\n"'
+ if test "X`($echo '\t') 2>/dev/null`" = 'X\t' &&
+ test "X`($echo "$echo_test_string") 2>/dev/null`" = X"$echo_test_string"; then
+ # Cool, printf works
+ :
+ elif test "X`("$ORIGINAL_CONFIG_SHELL" "$0" --fallback-echo '\t') 2>/dev/null`" = 'X\t' &&
+ test "X`("$ORIGINAL_CONFIG_SHELL" "$0" --fallback-echo "$echo_test_string") 2>/dev/null`" = X"$echo_test_string"; then
+ CONFIG_SHELL="$ORIGINAL_CONFIG_SHELL"
+ export CONFIG_SHELL
+ SHELL="$CONFIG_SHELL"
+ export SHELL
+ echo="$CONFIG_SHELL $0 --fallback-echo"
+ elif test "X`("$CONFIG_SHELL" "$0" --fallback-echo '\t') 2>/dev/null`" = 'X\t' &&
+ test "X`("$CONFIG_SHELL" "$0" --fallback-echo "$echo_test_string") 2>/dev/null`" = X"$echo_test_string"; then
+ echo="$CONFIG_SHELL $0 --fallback-echo"
+ else
+ # maybe with a smaller string...
+ prev=:
+
+ for cmd in 'echo test' 'sed 2q "$0"' 'sed 10q "$0"' 'sed 20q "$0"' 'sed 50q "$0"'; do
+ if (test "X$echo_test_string" = "X`eval $cmd`") 2>/dev/null; then
+ break
+ fi
+ prev="$cmd"
+ done
+
+ if test "$prev" != 'sed 50q "$0"'; then
+ echo_test_string=`eval $prev`
+ export echo_test_string
+ exec "${ORIGINAL_CONFIG_SHELL}" "$0" ${1+"$@"}
+ else
+ # Oops. We lost completely, so just stick with echo.
+ echo=echo
+ fi
+ fi
+ fi
+ fi
+fi
+
+# Sed substitution that helps us do robust quoting. It backslashifies
+# metacharacters that are still active within double-quoted strings.
+Xsed='sed -e s/^X//'
+sed_quote_subst='s/\([\\"\\`$\\\\]\)/\\\1/g'
+
+# Same as above, but do not quote variable references.
+double_quote_subst='s/\([\\"\\`\\\\]\)/\\\1/g'
+
+# Sed substitution to delay expansion of an escaped shell variable in a
+# double_quote_subst'ed string.
+delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g'
+
+# The name of this program.
+progname=`$echo "X$0" | $Xsed -e 's%^.*/%%'`
+
+# Constants:
+PROGRAM=ltconfig
+PACKAGE=libtool
+VERSION=1.3.5
+TIMESTAMP=" (1.385.2.206 2000/05/27 11:12:27)"
+ac_compile='${CC-cc} -c $CFLAGS $CPPFLAGS conftest.$ac_ext 1>&5'
+ac_link='${CC-cc} -o conftest $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS 1>&5'
+rm="rm -f"
+
+help="Try \`$progname --help' for more information."
+
+# Global variables:
+default_ofile=libtool
+can_build_shared=yes
+enable_shared=yes
+# All known linkers require a `.a' archive for static linking (except M$VC,
+# which needs '.lib').
+enable_static=yes
+enable_fast_install=yes
+enable_dlopen=unknown
+enable_win32_dll=no
+ltmain=
+silent=
+srcdir=
+ac_config_guess=
+ac_config_sub=
+host=
+nonopt=
+ofile="$default_ofile"
+verify_host=yes
+with_gcc=no
+with_gnu_ld=no
+need_locks=yes
+ac_ext=c
+objext=o
+libext=a
+exeext=
+cache_file=
+
+old_AR="$AR"
+old_CC="$CC"
+old_CFLAGS="$CFLAGS"
+old_CPPFLAGS="$CPPFLAGS"
+old_LDFLAGS="$LDFLAGS"
+old_LD="$LD"
+old_LN_S="$LN_S"
+old_LIBS="$LIBS"
+old_NM="$NM"
+old_RANLIB="$RANLIB"
+old_DLLTOOL="$DLLTOOL"
+old_OBJDUMP="$OBJDUMP"
+old_AS="$AS"
+
+# Parse the command line options.
+args=
+prev=
+for option
+do
+ case "$option" in
+ -*=*) optarg=`echo "$option" | sed 's/[-_a-zA-Z0-9]*=//'` ;;
+ *) optarg= ;;
+ esac
+
+ # If the previous option needs an argument, assign it.
+ if test -n "$prev"; then
+ eval "$prev=\$option"
+ prev=
+ continue
+ fi
+
+ case "$option" in
+ --help) cat <<EOM
+Usage: $progname [OPTION]... [HOST [LTMAIN]]
+
+Generate a system-specific libtool script.
+
+ --debug enable verbose shell tracing
+ --disable-shared do not build shared libraries
+ --disable-static do not build static libraries
+ --disable-fast-install do not optimize for fast installation
+ --enable-dlopen enable dlopen support
+ --enable-win32-dll enable building dlls on win32 hosts
+ --help display this help and exit
+ --no-verify do not verify that HOST is a valid host type
+-o, --output=FILE specify the output file [default=$default_ofile]
+ --quiet same as \`--silent'
+ --silent do not print informational messages
+ --srcdir=DIR find \`config.guess' in DIR
+ --version output version information and exit
+ --with-gcc assume that the GNU C compiler will be used
+ --with-gnu-ld assume that the C compiler uses the GNU linker
+ --disable-lock disable file locking
+ --cache-file=FILE configure cache file
+
+LTMAIN is the \`ltmain.sh' shell script fragment or \`ltmain.c' program
+that provides basic libtool functionality.
+
+HOST is the canonical host system name [default=guessed].
+EOM
+ exit 0
+ ;;
+
+ --debug)
+ echo "$progname: enabling shell trace mode"
+ set -x
+ ;;
+
+ --disable-shared) enable_shared=no ;;
+
+ --disable-static) enable_static=no ;;
+
+ --disable-fast-install) enable_fast_install=no ;;
+
+ --enable-dlopen) enable_dlopen=yes ;;
+
+ --enable-win32-dll) enable_win32_dll=yes ;;
+
+ --quiet | --silent) silent=yes ;;
+
+ --srcdir) prev=srcdir ;;
+ --srcdir=*) srcdir="$optarg" ;;
+
+ --no-verify) verify_host=no ;;
+
+ --output | -o) prev=ofile ;;
+ --output=*) ofile="$optarg" ;;
+
+ --version) echo "$PROGRAM (GNU $PACKAGE) $VERSION$TIMESTAMP"; exit 0 ;;
+
+ --with-gcc) with_gcc=yes ;;
+ --with-gnu-ld) with_gnu_ld=yes ;;
+
+ --disable-lock) need_locks=no ;;
+
+ --cache-file=*) cache_file="$optarg" ;;
+
+ -*)
+ echo "$progname: unrecognized option \`$option'" 1>&2
+ echo "$help" 1>&2
+ exit 1
+ ;;
+
+ *)
+ if test -z "$ltmain"; then
+ ltmain="$option"
+ elif test -z "$host"; then
+# This generates an unnecessary warning for sparc-sun-solaris4.1.3_U1
+# if test -n "`echo $option| sed 's/[-a-z0-9.]//g'`"; then
+# echo "$progname: warning \`$option' is not a valid host type" 1>&2
+# fi
+ host="$option"
+ else
+ echo "$progname: too many arguments" 1>&2
+ echo "$help" 1>&2
+ exit 1
+ fi ;;
+ esac
+done
+
+if test -z "$ltmain"; then
+ echo "$progname: you must specify a LTMAIN file" 1>&2
+ echo "$help" 1>&2
+ exit 1
+fi
+
+if test ! -f "$ltmain"; then
+ echo "$progname: \`$ltmain' does not exist" 1>&2
+ echo "$help" 1>&2
+ exit 1
+fi
+
+# Quote any args containing shell metacharacters.
+ltconfig_args=
+for arg
+do
+ case "$arg" in
+ *" "*|*" "*|*[\[\]\~\#\$\^\&\*\(\)\{\}\\\|\;\<\>\?]*)
+ ltconfig_args="$ltconfig_args '$arg'" ;;
+ *) ltconfig_args="$ltconfig_args $arg" ;;
+ esac
+done
+
+# A relevant subset of AC_INIT.
+
+# File descriptor usage:
+# 0 standard input
+# 1 file creation
+# 2 errors and warnings
+# 3 some systems may open it to /dev/tty
+# 4 used on the Kubota Titan
+# 5 compiler messages saved in config.log
+# 6 checking for... messages and results
+if test "$silent" = yes; then
+ exec 6>/dev/null
+else
+ exec 6>&1
+fi
+exec 5>>./config.log
+
+# NLS nuisances.
+# Only set LANG and LC_ALL to C if already set.
+# These must not be set unconditionally because not all systems understand
+# e.g. LANG=C (notably SCO).
+if test "X${LC_ALL+set}" = Xset; then LC_ALL=C; export LC_ALL; fi
+if test "X${LANG+set}" = Xset; then LANG=C; export LANG; fi
+
+if test -n "$cache_file" && test -r "$cache_file"; then
+ echo "loading cache $cache_file within ltconfig"
+ . $cache_file
+fi
+
+if (echo "testing\c"; echo 1,2,3) | grep c >/dev/null; then
+ # Stardent Vistra SVR4 grep lacks -e, says ghazi@caip.rutgers.edu.
+ if (echo -n testing; echo 1,2,3) | sed s/-n/xn/ | grep xn >/dev/null; then
+ ac_n= ac_c='
+' ac_t=' '
+ else
+ ac_n=-n ac_c= ac_t=
+ fi
+else
+ ac_n= ac_c='\c' ac_t=
+fi
+
+if test -z "$srcdir"; then
+ # Assume the source directory is the same one as the path to LTMAIN.
+ srcdir=`$echo "X$ltmain" | $Xsed -e 's%/[^/]*$%%'`
+ test "$srcdir" = "$ltmain" && srcdir=.
+fi
+
+trap "$rm conftest*; exit 1" 1 2 15
+if test "$verify_host" = yes; then
+ # Check for config.guess and config.sub.
+ ac_aux_dir=
+ for ac_dir in $srcdir $srcdir/.. $srcdir/../..; do
+ if test -f $ac_dir/config.guess; then
+ ac_aux_dir=$ac_dir
+ break
+ fi
+ done
+ if test -z "$ac_aux_dir"; then
+ echo "$progname: cannot find config.guess in $srcdir $srcdir/.. $srcdir/../.." 1>&2
+ echo "$help" 1>&2
+ exit 1
+ fi
+ ac_config_guess=$ac_aux_dir/config.guess
+ ac_config_sub=$ac_aux_dir/config.sub
+
+ # Make sure we can run config.sub.
+ if $SHELL $ac_config_sub sun4 >/dev/null 2>&1; then :
+ else
+ echo "$progname: cannot run $ac_config_sub" 1>&2
+ echo "$help" 1>&2
+ exit 1
+ fi
+
+ echo $ac_n "checking host system type""... $ac_c" 1>&6
+
+ host_alias=$host
+ case "$host_alias" in
+ "")
+ if host_alias=`$SHELL $ac_config_guess`; then :
+ else
+ echo "$progname: cannot guess host type; you must specify one" 1>&2
+ echo "$help" 1>&2
+ exit 1
+ fi ;;
+ esac
+ host=`$SHELL $ac_config_sub $host_alias`
+ echo "$ac_t$host" 1>&6
+
+ # Make sure the host verified.
+ test -z "$host" && exit 1
+
+elif test -z "$host"; then
+ echo "$progname: you must specify a host type if you use \`--no-verify'" 1>&2
+ echo "$help" 1>&2
+ exit 1
+else
+ host_alias=$host
+fi
+
+# Transform linux* to *-*-linux-gnu*, to support old configure scripts.
+case "$host_os" in
+linux-gnu*) ;;
+linux*) host=`echo $host | sed 's/^\(.*-.*-linux\)\(.*\)$/\1-gnu\2/'`
+esac
+
+host_cpu=`echo $host | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\1/'`
+host_vendor=`echo $host | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\2/'`
+host_os=`echo $host | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\3/'`
+
+case "$host_os" in
+aix3*)
+ # AIX sometimes has problems with the GCC collect2 program. For some
+ # reason, if we set the COLLECT_NAMES environment variable, the problems
+ # vanish in a puff of smoke.
+ if test "X${COLLECT_NAMES+set}" != Xset; then
+ COLLECT_NAMES=
+ export COLLECT_NAMES
+ fi
+ ;;
+esac
+
+# Determine commands to create old-style static archives.
+old_archive_cmds='$AR cru $oldlib$oldobjs'
+old_postinstall_cmds='chmod 644 $oldlib'
+old_postuninstall_cmds=
+
+# Set a sane default for `AR'.
+test -z "$AR" && AR=ar
+
+# Set a sane default for `OBJDUMP'.
+test -z "$OBJDUMP" && OBJDUMP=objdump
+
+# If RANLIB is not set, then run the test.
+if test "${RANLIB+set}" != "set"; then
+ result=no
+
+ echo $ac_n "checking for ranlib... $ac_c" 1>&6
+ IFS="${IFS= }"; save_ifs="$IFS"; IFS="${IFS}${PATH_SEPARATOR}"
+ for dir in $PATH; do
+ test -z "$dir" && dir=.
+ if test -f $dir/ranlib || test -f $dir/ranlib$ac_exeext; then
+ RANLIB="ranlib"
+ result="ranlib"
+ break
+ fi
+ done
+ IFS="$save_ifs"
+
+ echo "$ac_t$result" 1>&6
+fi
+
+if test -n "$RANLIB"; then
+ old_archive_cmds="$old_archive_cmds~\$RANLIB \$oldlib"
+ old_postinstall_cmds="\$RANLIB \$oldlib~$old_postinstall_cmds"
+fi
+
+# Set sane defaults for `DLLTOOL', `OBJDUMP', and `AS', used on cygwin.
+test -z "$DLLTOOL" && DLLTOOL=dlltool
+test -z "$OBJDUMP" && OBJDUMP=objdump
+test -z "$AS" && AS=as
+
+# Check to see if we are using GCC.
+if test "$with_gcc" != yes || test -z "$CC"; then
+ # If CC is not set, then try to find GCC or a usable CC.
+ if test -z "$CC"; then
+ echo $ac_n "checking for gcc... $ac_c" 1>&6
+ IFS="${IFS= }"; save_ifs="$IFS"; IFS="${IFS}${PATH_SEPARATOR}"
+ for dir in $PATH; do
+ test -z "$dir" && dir=.
+ if test -f $dir/gcc || test -f $dir/gcc$ac_exeext; then
+ CC="gcc"
+ break
+ fi
+ done
+ IFS="$save_ifs"
+
+ if test -n "$CC"; then
+ echo "$ac_t$CC" 1>&6
+ else
+ echo "$ac_t"no 1>&6
+ fi
+ fi
+
+ # Not "gcc", so try "cc", rejecting "/usr/ucb/cc".
+ if test -z "$CC"; then
+ echo $ac_n "checking for cc... $ac_c" 1>&6
+ IFS="${IFS= }"; save_ifs="$IFS"; IFS="${IFS}${PATH_SEPARATOR}"
+ cc_rejected=no
+ for dir in $PATH; do
+ test -z "$dir" && dir=.
+ if test -f $dir/cc || test -f $dir/cc$ac_exeext; then
+ if test "$dir/cc" = "/usr/ucb/cc"; then
+ cc_rejected=yes
+ continue
+ fi
+ CC="cc"
+ break
+ fi
+ done
+ IFS="$save_ifs"
+ if test $cc_rejected = yes; then
+ # We found a bogon in the path, so make sure we never use it.
+ set dummy $CC
+ shift
+ if test $# -gt 0; then
+ # We chose a different compiler from the bogus one.
+ # However, it has the same name, so the bogon will be chosen
+ # first if we set CC to just the name; use the full file name.
+ shift
+ set dummy "$dir/cc" "$@"
+ shift
+ CC="$@"
+ fi
+ fi
+
+ if test -n "$CC"; then
+ echo "$ac_t$CC" 1>&6
+ else
+ echo "$ac_t"no 1>&6
+ fi
+
+ if test -z "$CC"; then
+ echo "$progname: error: no acceptable cc found in \$PATH" 1>&2
+ exit 1
+ fi
+ fi
+
+ # Now see if the compiler is really GCC.
+ with_gcc=no
+ echo $ac_n "checking whether we are using GNU C... $ac_c" 1>&6
+ echo "$progname:581: checking whether we are using GNU C" >&5
+
+ $rm conftest.c
+ cat > conftest.c <<EOF
+#ifdef __GNUC__
+ yes;
+#endif
+EOF
+ if { ac_try='${CC-cc} -E conftest.c'; { (eval echo $progname:589: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }; } | egrep yes >/dev/null 2>&1; then
+ with_gcc=yes
+ fi
+ $rm conftest.c
+ echo "$ac_t$with_gcc" 1>&6
+fi
+
+# Allow CC to be a program name with arguments.
+set dummy $CC
+compiler="$2"
+
+echo $ac_n "checking for object suffix... $ac_c" 1>&6
+$rm conftest*
+echo 'int i = 1;' > conftest.c
+echo "$progname:603: checking for object suffix" >& 5
+if { (eval echo $progname:604: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>conftest.err; }; then
+ # Append any warnings to the config.log.
+ cat conftest.err 1>&5
+
+ for ac_file in conftest.*; do
+ case $ac_file in
+ *.c) ;;
+ *) objext=`echo $ac_file | sed -e s/conftest.//` ;;
+ esac
+ done
+else
+ cat conftest.err 1>&5
+ echo "$progname: failed program was:" >&5
+ cat conftest.c >&5
+fi
+$rm conftest*
+echo "$ac_t$objext" 1>&6
+
+echo $ac_n "checking for executable suffix... $ac_c" 1>&6
+if eval "test \"`echo '$''{'ac_cv_exeext'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ ac_cv_exeext="no"
+ $rm conftest*
+ echo 'main () { return 0; }' > conftest.c
+ echo "$progname:629: checking for executable suffix" >& 5
+ if { (eval echo $progname:630: \"$ac_link\") 1>&5; (eval $ac_link) 2>conftest.err; }; then
+ # Append any warnings to the config.log.
+ cat conftest.err 1>&5
+
+ for ac_file in conftest.*; do
+ case $ac_file in
+ *.c | *.err | *.$objext ) ;;
+ *) ac_cv_exeext=.`echo $ac_file | sed -e s/conftest.//` ;;
+ esac
+ done
+ else
+ cat conftest.err 1>&5
+ echo "$progname: failed program was:" >&5
+ cat conftest.c >&5
+ fi
+ $rm conftest*
+fi
+if test "X$ac_cv_exeext" = Xno; then
+ exeext=""
+else
+ exeext="$ac_cv_exeext"
+fi
+echo "$ac_t$ac_cv_exeext" 1>&6
+
+echo $ac_n "checking for $compiler option to produce PIC... $ac_c" 1>&6
+pic_flag=
+special_shlib_compile_flags=
+wl=
+link_static_flag=
+no_builtin_flag=
+
+if test "$with_gcc" = yes; then
+ wl='-Wl,'
+ link_static_flag='-static'
+
+ case "$host_os" in
+ beos* | irix5* | irix6* | osf3* | osf4* | osf5*)
+ # PIC is the default for these OSes.
+ ;;
+ aix*)
+ # Below there is a dirty hack to force normal static linking with -ldl
+ # The problem is because libdl dynamically linked with both libc and
+ # libC (AIX C++ library), which obviously doesn't included in libraries
+ # list by gcc. This cause undefined symbols with -static flags.
+ # This hack allows C programs to be linked with "-static -ldl", but
+ # we not sure about C++ programs.
+ link_static_flag="$link_static_flag ${wl}-lC"
+ ;;
+ cygwin* | mingw* | os2*)
+ # We can build DLLs from non-PIC.
+ ;;
+ amigaos*)
+ # FIXME: we need at least 68020 code to build shared libraries, but
+ # adding the `-m68020' flag to GCC prevents building anything better,
+ # like `-m68040'.
+ pic_flag='-m68020 -resident32 -malways-restore-a4'
+ ;;
+ sysv4*MP*)
+ if test -d /usr/nec; then
+ pic_flag=-Kconform_pic
+ fi
+ ;;
+ *)
+ pic_flag='-fPIC'
+ ;;
+ esac
+else
+ # PORTME Check for PIC flags for the system compiler.
+ case "$host_os" in
+ aix3* | aix4*)
+ # All AIX code is PIC.
+ link_static_flag='-bnso -bI:/lib/syscalls.exp'
+ ;;
+
+ hpux9* | hpux10* | hpux11*)
+ # Is there a better link_static_flag that works with the bundled CC?
+ wl='-Wl,'
+ link_static_flag="${wl}-a ${wl}archive"
+ pic_flag='+Z'
+ ;;
+
+ irix5* | irix6*)
+ wl='-Wl,'
+ link_static_flag='-non_shared'
+ # PIC (with -KPIC) is the default.
+ ;;
+
+ cygwin* | mingw* | os2*)
+ # We can build DLLs from non-PIC.
+ ;;
+
+ osf3* | osf4* | osf5*)
+ # All OSF/1 code is PIC.
+ wl='-Wl,'
+ link_static_flag='-non_shared'
+ ;;
+
+ sco3.2v5*)
+ pic_flag='-Kpic'
+ link_static_flag='-dn'
+ special_shlib_compile_flags='-belf'
+ ;;
+
+ solaris*)
+ pic_flag='-KPIC'
+ link_static_flag='-Bstatic'
+ wl='-Wl,'
+ ;;
+
+ sunos4*)
+ pic_flag='-PIC'
+ link_static_flag='-Bstatic'
+ wl='-Qoption ld '
+ ;;
+
+ sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*)
+ pic_flag='-KPIC'
+ link_static_flag='-Bstatic'
+ wl='-Wl,'
+ ;;
+
+ uts4*)
+ pic_flag='-pic'
+ link_static_flag='-Bstatic'
+ ;;
+ sysv4*MP*)
+ if test -d /usr/nec ;then
+ pic_flag='-Kconform_pic'
+ link_static_flag='-Bstatic'
+ fi
+ ;;
+ *)
+ can_build_shared=no
+ ;;
+ esac
+fi
+
+if test -n "$pic_flag"; then
+ echo "$ac_t$pic_flag" 1>&6
+
+ # Check to make sure the pic_flag actually works.
+ echo $ac_n "checking if $compiler PIC flag $pic_flag works... $ac_c" 1>&6
+ $rm conftest*
+ echo "int some_variable = 0;" > conftest.c
+ save_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS $pic_flag -DPIC"
+ echo "$progname:776: checking if $compiler PIC flag $pic_flag works" >&5
+ if { (eval echo $progname:777: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>conftest.err; } && test -s conftest.$objext; then
+ # Append any warnings to the config.log.
+ cat conftest.err 1>&5
+
+ case "$host_os" in
+ hpux9* | hpux10* | hpux11*)
+ # On HP-UX, both CC and GCC only warn that PIC is supported... then they
+ # create non-PIC objects. So, if there were any warnings, we assume that
+ # PIC is not supported.
+ if test -s conftest.err; then
+ echo "$ac_t"no 1>&6
+ can_build_shared=no
+ pic_flag=
+ else
+ echo "$ac_t"yes 1>&6
+ pic_flag=" $pic_flag"
+ fi
+ ;;
+ *)
+ echo "$ac_t"yes 1>&6
+ pic_flag=" $pic_flag"
+ ;;
+ esac
+ else
+ # Append any errors to the config.log.
+ cat conftest.err 1>&5
+ can_build_shared=no
+ pic_flag=
+ echo "$ac_t"no 1>&6
+ fi
+ CFLAGS="$save_CFLAGS"
+ $rm conftest*
+else
+ echo "$ac_t"none 1>&6
+fi
+
+# Check to see if options -o and -c are simultaneously supported by compiler
+echo $ac_n "checking if $compiler supports -c -o file.o... $ac_c" 1>&6
+$rm -r conftest 2>/dev/null
+mkdir conftest
+cd conftest
+$rm conftest*
+echo "int some_variable = 0;" > conftest.c
+mkdir out
+# According to Tom Tromey, Ian Lance Taylor reported there are C compilers
+# that will create temporary files in the current directory regardless of
+# the output directory. Thus, making CWD read-only will cause this test
+# to fail, enabling locking or at least warning the user not to do parallel
+# builds.
+chmod -w .
+save_CFLAGS="$CFLAGS"
+CFLAGS="$CFLAGS -o out/conftest2.o"
+echo "$progname:829: checking if $compiler supports -c -o file.o" >&5
+if { (eval echo $progname:830: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>out/conftest.err; } && test -s out/conftest2.o; then
+
+ # The compiler can only warn and ignore the option if not recognized
+ # So say no if there are warnings
+ if test -s out/conftest.err; then
+ echo "$ac_t"no 1>&6
+ compiler_c_o=no
+ else
+ echo "$ac_t"yes 1>&6
+ compiler_c_o=yes
+ fi
+else
+ # Append any errors to the config.log.
+ cat out/conftest.err 1>&5
+ compiler_c_o=no
+ echo "$ac_t"no 1>&6
+fi
+CFLAGS="$save_CFLAGS"
+chmod u+w .
+$rm conftest* out/*
+rmdir out
+cd ..
+rmdir conftest
+$rm -r conftest 2>/dev/null
+
+if test x"$compiler_c_o" = x"yes"; then
+ # Check to see if we can write to a .lo
+ echo $ac_n "checking if $compiler supports -c -o file.lo... $ac_c" 1>&6
+ $rm conftest*
+ echo "int some_variable = 0;" > conftest.c
+ save_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS -c -o conftest.lo"
+ echo "$progname:862: checking if $compiler supports -c -o file.lo" >&5
+if { (eval echo $progname:863: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>conftest.err; } && test -s conftest.lo; then
+
+ # The compiler can only warn and ignore the option if not recognized
+ # So say no if there are warnings
+ if test -s conftest.err; then
+ echo "$ac_t"no 1>&6
+ compiler_o_lo=no
+ else
+ echo "$ac_t"yes 1>&6
+ compiler_o_lo=yes
+ fi
+ else
+ # Append any errors to the config.log.
+ cat conftest.err 1>&5
+ compiler_o_lo=no
+ echo "$ac_t"no 1>&6
+ fi
+ CFLAGS="$save_CFLAGS"
+ $rm conftest*
+else
+ compiler_o_lo=no
+fi
+
+# Check to see if we can do hard links to lock some files if needed
+hard_links="nottested"
+if test "$compiler_c_o" = no && test "$need_locks" != no; then
+ # do not overwrite the value of need_locks provided by the user
+ echo $ac_n "checking if we can lock with hard links... $ac_c" 1>&6
+ hard_links=yes
+ $rm conftest*
+ ln conftest.a conftest.b 2>/dev/null && hard_links=no
+ touch conftest.a
+ ln conftest.a conftest.b 2>&5 || hard_links=no
+ ln conftest.a conftest.b 2>/dev/null && hard_links=no
+ echo "$ac_t$hard_links" 1>&6
+ $rm conftest*
+ if test "$hard_links" = no; then
+ echo "*** WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2
+ need_locks=warn
+ fi
+else
+ need_locks=no
+fi
+
+if test "$with_gcc" = yes; then
+ # Check to see if options -fno-rtti -fno-exceptions are supported by compiler
+ echo $ac_n "checking if $compiler supports -fno-rtti -fno-exceptions ... $ac_c" 1>&6
+ $rm conftest*
+ echo "int some_variable = 0;" > conftest.c
+ save_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS -fno-rtti -fno-exceptions -c conftest.c"
+ echo "$progname:914: checking if $compiler supports -fno-rtti -fno-exceptions" >&5
+ if { (eval echo $progname:915: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>conftest.err; } && test -s conftest.o; then
+
+ # The compiler can only warn and ignore the option if not recognized
+ # So say no if there are warnings
+ if test -s conftest.err; then
+ echo "$ac_t"no 1>&6
+ compiler_rtti_exceptions=no
+ else
+ echo "$ac_t"yes 1>&6
+ compiler_rtti_exceptions=yes
+ fi
+ else
+ # Append any errors to the config.log.
+ cat conftest.err 1>&5
+ compiler_rtti_exceptions=no
+ echo "$ac_t"no 1>&6
+ fi
+ CFLAGS="$save_CFLAGS"
+ $rm conftest*
+
+ if test "$compiler_rtti_exceptions" = "yes"; then
+ no_builtin_flag=' -fno-builtin -fno-rtti -fno-exceptions'
+ else
+ no_builtin_flag=' -fno-builtin'
+ fi
+
+fi
+
+# Check for any special shared library compilation flags.
+if test -n "$special_shlib_compile_flags"; then
+ echo "$progname: warning: \`$CC' requires \`$special_shlib_compile_flags' to build shared libraries" 1>&2
+ if echo "$old_CC $old_CFLAGS " | egrep -e "[ ]$special_shlib_compile_flags[ ]" >/dev/null; then :
+ else
+ echo "$progname: add \`$special_shlib_compile_flags' to the CC or CFLAGS env variable and reconfigure" 1>&2
+ can_build_shared=no
+ fi
+fi
+
+echo $ac_n "checking if $compiler static flag $link_static_flag works... $ac_c" 1>&6
+$rm conftest*
+echo 'main(){return(0);}' > conftest.c
+save_LDFLAGS="$LDFLAGS"
+LDFLAGS="$LDFLAGS $link_static_flag"
+echo "$progname:958: checking if $compiler static flag $link_static_flag works" >&5
+if { (eval echo $progname:959: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest; then
+ echo "$ac_t$link_static_flag" 1>&6
+else
+ echo "$ac_t"none 1>&6
+ link_static_flag=
+fi
+LDFLAGS="$save_LDFLAGS"
+$rm conftest*
+
+if test -z "$LN_S"; then
+ # Check to see if we can use ln -s, or we need hard links.
+ echo $ac_n "checking whether ln -s works... $ac_c" 1>&6
+ $rm conftest.dat
+ if ln -s X conftest.dat 2>/dev/null; then
+ $rm conftest.dat
+ LN_S="ln -s"
+ else
+ LN_S=ln
+ fi
+ if test "$LN_S" = "ln -s"; then
+ echo "$ac_t"yes 1>&6
+ else
+ echo "$ac_t"no 1>&6
+ fi
+fi
+
+# Make sure LD is an absolute path.
+if test -z "$LD"; then
+ ac_prog=ld
+ if test "$with_gcc" = yes; then
+ # Check if gcc -print-prog-name=ld gives a path.
+ echo $ac_n "checking for ld used by GCC... $ac_c" 1>&6
+ echo "$progname:991: checking for ld used by GCC" >&5
+ ac_prog=`($CC -print-prog-name=ld) 2>&5`
+ case "$ac_prog" in
+ # Accept absolute paths.
+ [\\/]* | [A-Za-z]:[\\/]*)
+ re_direlt='/[^/][^/]*/\.\./'
+ # Canonicalize the path of ld
+ ac_prog=`echo $ac_prog| sed 's%\\\\%/%g'`
+ while echo $ac_prog | grep "$re_direlt" > /dev/null 2>&1; do
+ ac_prog=`echo $ac_prog| sed "s%$re_direlt%/%"`
+ done
+ test -z "$LD" && LD="$ac_prog"
+ ;;
+ "")
+ # If it fails, then pretend we are not using GCC.
+ ac_prog=ld
+ ;;
+ *)
+ # If it is relative, then search for the first ld in PATH.
+ with_gnu_ld=unknown
+ ;;
+ esac
+ elif test "$with_gnu_ld" = yes; then
+ echo $ac_n "checking for GNU ld... $ac_c" 1>&6
+ echo "$progname:1015: checking for GNU ld" >&5
+ else
+ echo $ac_n "checking for non-GNU ld""... $ac_c" 1>&6
+ echo "$progname:1018: checking for non-GNU ld" >&5
+ fi
+
+ if test -z "$LD"; then
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS="${IFS}${PATH_SEPARATOR}"
+ for ac_dir in $PATH; do
+ test -z "$ac_dir" && ac_dir=.
+ if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then
+ LD="$ac_dir/$ac_prog"
+ # Check to see if the program is GNU ld. I'd rather use --version,
+ # but apparently some GNU ld's only accept -v.
+ # Break only if it was the GNU/non-GNU ld that we prefer.
+ if "$LD" -v 2>&1 < /dev/null | egrep '(GNU|with BFD)' > /dev/null; then
+ test "$with_gnu_ld" != no && break
+ else
+ test "$with_gnu_ld" != yes && break
+ fi
+ fi
+ done
+ IFS="$ac_save_ifs"
+ fi
+
+ if test -n "$LD"; then
+ echo "$ac_t$LD" 1>&6
+ else
+ echo "$ac_t"no 1>&6
+ fi
+
+ if test -z "$LD"; then
+ echo "$progname: error: no acceptable ld found in \$PATH" 1>&2
+ exit 1
+ fi
+fi
+
+# Check to see if it really is or is not GNU ld.
+echo $ac_n "checking if the linker ($LD) is GNU ld... $ac_c" 1>&6
+# I'd rather use --version here, but apparently some GNU ld's only accept -v.
+if $LD -v 2>&1 </dev/null | egrep '(GNU|with BFD)' 1>&5; then
+ with_gnu_ld=yes
+else
+ with_gnu_ld=no
+fi
+echo "$ac_t$with_gnu_ld" 1>&6
+
+# See if the linker supports building shared libraries.
+echo $ac_n "checking whether the linker ($LD) supports shared libraries... $ac_c" 1>&6
+
+allow_undefined_flag=
+no_undefined_flag=
+need_lib_prefix=unknown
+need_version=unknown
+# when you set need_version to no, make sure it does not cause -set_version
+# flags to be left without arguments
+archive_cmds=
+archive_expsym_cmds=
+old_archive_from_new_cmds=
+export_dynamic_flag_spec=
+whole_archive_flag_spec=
+thread_safe_flag_spec=
+hardcode_libdir_flag_spec=
+hardcode_libdir_separator=
+hardcode_direct=no
+hardcode_minus_L=no
+hardcode_shlibpath_var=unsupported
+runpath_var=
+always_export_symbols=no
+export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | sed '\''s/.* //'\'' | sort | uniq > $export_symbols'
+# include_expsyms should be a list of space-separated symbols to be *always*
+# included in the symbol list
+include_expsyms=
+# exclude_expsyms can be an egrep regular expression of symbols to exclude
+# it will be wrapped by ` (' and `)$', so one must not match beginning or
+# end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc',
+# as well as any symbol that contains `d'.
+exclude_expsyms="_GLOBAL_OFFSET_TABLE_"
+# Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out
+# platforms (ab)use it in PIC code, but their linkers get confused if
+# the symbol is explicitly referenced. Since portable code cannot
+# rely on this symbol name, it's probably fine to never include it in
+# preloaded symbol tables.
+
+case "$host_os" in
+cygwin* | mingw*)
+ # FIXME: the MSVC++ port hasn't been tested in a loooong time
+ # When not using gcc, we currently assume that we are using
+ # Microsoft Visual C++.
+ if test "$with_gcc" != yes; then
+ with_gnu_ld=no
+ fi
+ ;;
+
+esac
+
+ld_shlibs=yes
+if test "$with_gnu_ld" = yes; then
+ # If archive_cmds runs LD, not CC, wlarc should be empty
+ wlarc='${wl}'
+
+ # See if GNU ld supports shared libraries.
+ case "$host_os" in
+ aix3* | aix4*)
+ # On AIX, the GNU linker is very broken
+ ld_shlibs=no
+ cat <<EOF 1>&2
+
+*** Warning: the GNU linker, at least up to release 2.9.1, is reported
+*** to be unable to reliably create shared libraries on AIX.
+*** Therefore, libtool is disabling shared libraries support. If you
+*** really care for shared libraries, you may want to modify your PATH
+*** so that a non-GNU linker is found, and then restart.
+
+EOF
+ ;;
+
+ amigaos*)
+ archive_cmds='$rm $objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $objdir/a2ixlibrary.data~$AR cru $lib $libobjs~$RANLIB $lib~(cd $objdir && a2ixlibrary -32)'
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_minus_L=yes
+
+ # Samuel A. Falvo II <kc5tja@dolphin.openprojects.net> reports
+ # that the semantics of dynamic libraries on AmigaOS, at least up
+ # to version 4, is to share data among multiple programs linked
+ # with the same dynamic library. Since this doesn't match the
+ # behavior of shared libraries on other platforms, we can use
+ # them.
+ ld_shlibs=no
+ ;;
+
+ beos*)
+ if $LD --help 2>&1 | egrep ': supported targets:.* elf' > /dev/null; then
+ allow_undefined_flag=unsupported
+ # Joseph Beckenbach <jrb3@best.com> says some releases of gcc
+ # support --undefined. This deserves some investigation. FIXME
+ archive_cmds='$CC -nostart $libobjs $deplibs $linkopts ${wl}-soname $wl$soname -o $lib'
+ else
+ ld_shlibs=no
+ fi
+ ;;
+
+ cygwin* | mingw*)
+ # hardcode_libdir_flag_spec is actually meaningless, as there is
+ # no search path for DLLs.
+ hardcode_libdir_flag_spec='-L$libdir'
+ allow_undefined_flag=unsupported
+ always_export_symbols=yes
+
+ # Extract the symbol export list from an `--export-all' def file,
+ # then regenerate the def file from the symbol export list, so that
+ # the compiled dll only exports the symbol export list.
+ # Be careful not to strip the DATA tag left by newer dlltools.
+ export_symbols_cmds='test -f $objdir/$soname-ltdll.c || sed -e "/^# \/\* ltdll\.c starts here \*\//,/^# \/\* ltdll.c ends here \*\// { s/^# //; p; }" -e d < $0 > $objdir/$soname-ltdll.c~
+ test -f $objdir/$soname-ltdll.$objext || (cd $objdir && $CC -c $soname-ltdll.c)~
+ $DLLTOOL --export-all --exclude-symbols DllMain@12,_cygwin_dll_entry@12,_cygwin_noncygwin_dll_entry@12 --output-def $objdir/$soname-def $objdir/$soname-ltdll.$objext $libobjs $convenience~
+ sed -e "1,/EXPORTS/d" -e "s/ @ [0-9]*//" -e "s/ *;.*$//" < $objdir/$soname-def > $export_symbols'
+
+ # If DATA tags from a recent dlltool are present, honour them!
+ archive_expsym_cmds='echo EXPORTS > $objdir/$soname-def~
+ _lt_hint=1;
+ cat $export_symbols | while read symbol; do
+ set dummy \$symbol;
+ case \$# in
+ 2) echo " \$2 @ \$_lt_hint ; " >> $objdir/$soname-def;;
+ *) echo " \$2 @ \$_lt_hint \$3 ; " >> $objdir/$soname-def;;
+ esac;
+ _lt_hint=`expr 1 + \$_lt_hint`;
+ done~
+ test -f $objdir/$soname-ltdll.c || sed -e "/^# \/\* ltdll\.c starts here \*\//,/^# \/\* ltdll.c ends here \*\// { s/^# //; p; }" -e d < $0 > $objdir/$soname-ltdll.c~
+ test -f $objdir/$soname-ltdll.$objext || (cd $objdir && $CC -c $soname-ltdll.c)~
+ $CC -Wl,--base-file,$objdir/$soname-base -Wl,--dll -nostartfiles -Wl,-e,__cygwin_dll_entry@12 -o $lib $objdir/$soname-ltdll.$objext $libobjs $deplibs $linkopts~
+ $DLLTOOL --as=$AS --dllname $soname --exclude-symbols DllMain@12,_cygwin_dll_entry@12,_cygwin_noncygwin_dll_entry@12 --def $objdir/$soname-def --base-file $objdir/$soname-base --output-exp $objdir/$soname-exp~
+ $CC -Wl,--base-file,$objdir/$soname-base $objdir/$soname-exp -Wl,--dll -nostartfiles -Wl,-e,__cygwin_dll_entry@12 -o $lib $objdir/$soname-ltdll.$objext $libobjs $deplibs $linkopts~
+ $DLLTOOL --as=$AS --dllname $soname --exclude-symbols DllMain@12,_cygwin_dll_entry@12,_cygwin_noncygwin_dll_entry@12 --def $objdir/$soname-def --base-file $objdir/$soname-base --output-exp $objdir/$soname-exp~
+ $CC $objdir/$soname-exp -Wl,--dll -nostartfiles -Wl,-e,__cygwin_dll_entry@12 -o $lib $objdir/$soname-ltdll.$objext $libobjs $deplibs $linkopts'
+
+ old_archive_from_new_cmds='$DLLTOOL --as=$AS --dllname $soname --def $objdir/$soname-def --output-lib $objdir/$libname.a'
+ ;;
+
+ netbsd*)
+ if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then
+ archive_cmds='$CC -shared $libobjs $deplibs $linkopts ${wl}-soname $wl$soname -o $lib'
+ archive_expsym_cmds='$CC -shared $libobjs $deplibs $linkopts ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ else
+ archive_cmds='$LD -Bshareable $libobjs $deplibs $linkopts -o $lib'
+ # can we support soname and/or expsyms with a.out? -oliva
+ fi
+ ;;
+
+ solaris* | sysv5*)
+ if $LD -v 2>&1 | egrep 'BFD 2\.8' > /dev/null; then
+ ld_shlibs=no
+ cat <<EOF 1>&2
+
+*** Warning: The releases 2.8.* of the GNU linker cannot reliably
+*** create shared libraries on Solaris systems. Therefore, libtool
+*** is disabling shared libraries support. We urge you to upgrade GNU
+*** binutils to release 2.9.1 or newer. Another option is to modify
+*** your PATH or compiler configuration so that the native linker is
+*** used, and then restart.
+
+EOF
+ elif $LD --help 2>&1 | egrep ': supported targets:.* elf' > /dev/null; then
+ archive_cmds='$CC -shared $libobjs $deplibs $linkopts ${wl}-soname $wl$soname -o $lib'
+ archive_expsym_cmds='$CC -shared $libobjs $deplibs $linkopts ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ else
+ ld_shlibs=no
+ fi
+ ;;
+
+ sunos4*)
+ archive_cmds='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linkopts'
+ wlarc=
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ *)
+ if $LD --help 2>&1 | egrep ': supported targets:.* elf' > /dev/null; then
+ archive_cmds='$CC -shared $libobjs $deplibs $linkopts ${wl}-soname $wl$soname -o $lib'
+ archive_expsym_cmds='$CC -shared $libobjs $deplibs $linkopts ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ else
+ ld_shlibs=no
+ fi
+ ;;
+ esac
+
+ if test "$ld_shlibs" = yes; then
+ runpath_var=LD_RUN_PATH
+ hardcode_libdir_flag_spec='${wl}--rpath ${wl}$libdir'
+ export_dynamic_flag_spec='${wl}--export-dynamic'
+ case $host_os in
+ cygwin* | mingw*)
+ # dlltool doesn't understand --whole-archive et. al.
+ whole_archive_flag_spec=
+ ;;
+ *)
+ # ancient GNU ld didn't support --whole-archive et. al.
+ if $LD --help 2>&1 | egrep 'no-whole-archive' > /dev/null; then
+ whole_archive_flag_spec="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive'
+ else
+ whole_archive_flag_spec=
+ fi
+ ;;
+ esac
+ fi
+else
+ # PORTME fill in a description of your system's linker (not GNU ld)
+ case "$host_os" in
+ aix3*)
+ allow_undefined_flag=unsupported
+ always_export_symbols=yes
+ archive_expsym_cmds='$LD -o $objdir/$soname $libobjs $deplibs $linkopts -bE:$export_symbols -T512 -H512 -bM:SRE~$AR cru $lib $objdir/$soname'
+ # Note: this linker hardcodes the directories in LIBPATH if there
+ # are no directories specified by -L.
+ hardcode_minus_L=yes
+ if test "$with_gcc" = yes && test -z "$link_static_flag"; then
+ # Neither direct hardcoding nor static linking is supported with a
+ # broken collect2.
+ hardcode_direct=unsupported
+ fi
+ ;;
+
+ aix4*)
+ hardcode_libdir_flag_spec='${wl}-b ${wl}nolibpath ${wl}-b ${wl}libpath:$libdir:/usr/lib:/lib'
+ hardcode_libdir_separator=':'
+ if test "$with_gcc" = yes; then
+ collect2name=`${CC} -print-prog-name=collect2`
+ if test -f "$collect2name" && \
+ strings "$collect2name" | grep resolve_lib_name >/dev/null
+ then
+ # We have reworked collect2
+ hardcode_direct=yes
+ else
+ # We have old collect2
+ hardcode_direct=unsupported
+ # It fails to find uninstalled libraries when the uninstalled
+ # path is not listed in the libpath. Setting hardcode_minus_L
+ # to unsupported forces relinking
+ hardcode_minus_L=yes
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_libdir_separator=
+ fi
+ shared_flag='-shared'
+ else
+ shared_flag='${wl}-bM:SRE'
+ hardcode_direct=yes
+ fi
+ allow_undefined_flag=' ${wl}-berok'
+ archive_cmds="\$CC $shared_flag"' -o $objdir/$soname $libobjs $deplibs $linkopts ${wl}-bexpall ${wl}-bnoentry${allow_undefined_flag}'
+ archive_expsym_cmds="\$CC $shared_flag"' -o $objdir/$soname $libobjs $deplibs $linkopts ${wl}-bE:$export_symbols ${wl}-bnoentry${allow_undefined_flag}'
+ #### local change for Sleepycat DB:
+ # On AIX 4.3.2 (at least), -bexpall exports too much,
+ # causing symbol conflicts. This was:
+ # case "$host_os" in aix4.[01]|aix4.[01].*)
+ case "$host_os" in aix4.*)
+ # According to Greg Wooledge, -bexpall is only supported from AIX 4.2 on
+ always_export_symbols=yes ;;
+ esac
+ ;;
+
+ amigaos*)
+ archive_cmds='$rm $objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $objdir/a2ixlibrary.data~$AR cru $lib $libobjs~$RANLIB $lib~(cd $objdir && a2ixlibrary -32)'
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_minus_L=yes
+ # see comment about different semantics on the GNU ld section
+ ld_shlibs=no
+ ;;
+
+ cygwin* | mingw*)
+ # When not using gcc, we currently assume that we are using
+ # Microsoft Visual C++.
+ # hardcode_libdir_flag_spec is actually meaningless, as there is
+ # no search path for DLLs.
+ hardcode_libdir_flag_spec=' '
+ allow_undefined_flag=unsupported
+ # Tell ltmain to make .lib files, not .a files.
+ libext=lib
+ # FIXME: Setting linknames here is a bad hack.
+ archive_cmds='$CC -o $lib $libobjs $linkopts `echo "$deplibs" | sed -e '\''s/ -lc$//'\''` -link -dll~linknames='
+ # The linker will automatically build a .lib file if we build a DLL.
+ old_archive_from_new_cmds='true'
+ # FIXME: Should let the user specify the lib program.
+ old_archive_cmds='lib /OUT:$oldlib$oldobjs'
+ fix_srcfile_path='`cygpath -w $srcfile`'
+ ;;
+
+ freebsd1*)
+ ld_shlibs=no
+ ;;
+
+ # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor
+ # support. Future versions do this automatically, but an explicit c++rt0.o
+ # does not break anything, and helps significantly (at the cost of a little
+ # extra space).
+ freebsd2.2*)
+ archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linkopts /usr/lib/c++rt0.o'
+ hardcode_libdir_flag_spec='-R$libdir'
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ # Unfortunately, older versions of FreeBSD 2 do not have this feature.
+ freebsd2*)
+ archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linkopts'
+ hardcode_direct=yes
+ hardcode_minus_L=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ # FreeBSD 3 and greater uses gcc -shared to do shared libraries.
+ freebsd*)
+ archive_cmds='$CC -shared -o $lib $libobjs $deplibs $linkopts'
+ hardcode_libdir_flag_spec='-R$libdir'
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ hpux9* | hpux10* | hpux11*)
+ case "$host_os" in
+ hpux9*) archive_cmds='$rm $objdir/$soname~$LD -b +b $install_libdir -o $objdir/$soname $libobjs $deplibs $linkopts~test $objdir/$soname = $lib || mv $objdir/$soname $lib' ;;
+ *) archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linkopts' ;;
+ esac
+ hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir'
+ hardcode_libdir_separator=:
+ hardcode_direct=yes
+ hardcode_minus_L=yes # Not in the search PATH, but as the default
+ # location of the library.
+ export_dynamic_flag_spec='${wl}-E'
+ ;;
+
+ irix5* | irix6*)
+ if test "$with_gcc" = yes; then
+ archive_cmds='$CC -shared $libobjs $deplibs $linkopts ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${objdir}/so_locations -o $lib'
+ else
+ archive_cmds='$LD -shared $libobjs $deplibs $linkopts -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${objdir}/so_locations -o $lib'
+ fi
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ hardcode_libdir_separator=:
+ ;;
+
+ netbsd*)
+ if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then
+ archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linkopts' # a.out
+ else
+ archive_cmds='$LD -shared -o $lib $libobjs $deplibs $linkopts' # ELF
+ fi
+ hardcode_libdir_flag_spec='${wl}-R$libdir'
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ openbsd*)
+ archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linkopts'
+ hardcode_libdir_flag_spec='-R$libdir'
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ os2*)
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_minus_L=yes
+ allow_undefined_flag=unsupported
+ archive_cmds='$echo "LIBRARY $libname INITINSTANCE" > $objdir/$libname.def~$echo "DESCRIPTION \"$libname\"" >> $objdir/$libname.def~$echo DATA >> $objdir/$libname.def~$echo " SINGLE NONSHARED" >> $objdir/$libname.def~$echo EXPORTS >> $objdir/$libname.def~emxexp $libobjs >> $objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $linkopts $objdir/$libname.def'
+ old_archive_from_new_cmds='emximp -o $objdir/$libname.a $objdir/$libname.def'
+ ;;
+
+ osf3*)
+ if test "$with_gcc" = yes; then
+ allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*'
+ archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $linkopts ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${objdir}/so_locations -o $lib'
+ else
+ allow_undefined_flag=' -expect_unresolved \*'
+ archive_cmds='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linkopts -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${objdir}/so_locations -o $lib'
+ fi
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ hardcode_libdir_separator=:
+ ;;
+
+ osf4* | osf5*) # As osf3* with the addition of the -msym flag
+ if test "$with_gcc" = yes; then
+ allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*'
+ archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $linkopts ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${objdir}/so_locations -o $lib'
+ else
+ allow_undefined_flag=' -expect_unresolved \*'
+ archive_cmds='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linkopts -msym -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${objdir}/so_locations -o $lib'
+ fi
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ hardcode_libdir_separator=:
+ ;;
+ rhapsody*)
+ archive_cmds='$CC -bundle -undefined suppress -o $lib $libobjs $deplibs $linkopts'
+ hardcode_libdir_flags_spec='-L$libdir'
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ sco3.2v5*)
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linkopts'
+ hardcode_shlibpath_var=no
+ runpath_var=LD_RUN_PATH
+ hardcode_runpath_var=yes
+ ;;
+
+ solaris*)
+ no_undefined_flag=' -z text'
+ # $CC -shared without GNU ld will not create a library from C++
+ # object files and a static libstdc++, better avoid it by now
+ archive_cmds='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linkopts'
+ archive_expsym_cmds='$echo "{ global:" > $lib.exp~cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~
+ $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linkopts~$rm $lib.exp'
+ hardcode_libdir_flag_spec='-R$libdir'
+ hardcode_shlibpath_var=no
+ case "$host_os" in
+ solaris2.[0-5] | solaris2.[0-5].*) ;;
+ *) # Supported since Solaris 2.6 (maybe 2.5.1?)
+ whole_archive_flag_spec='-z allextract$convenience -z defaultextract' ;;
+ esac
+ ;;
+
+ sunos4*)
+ archive_cmds='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linkopts'
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_direct=yes
+ hardcode_minus_L=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ sysv4)
+ if test "x$host_vendor" = xsequent; then
+ # Use $CC to link under sequent, because it throws in some extra .o
+ # files that make .init and .fini sections work.
+ archive_cmds='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $linkopts'
+ else
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linkopts'
+ fi
+ runpath_var='LD_RUN_PATH'
+ hardcode_shlibpath_var=no
+ hardcode_direct=no #Motorola manual says yes, but my tests say they lie
+ ;;
+
+ sysv4.3*)
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linkopts'
+ hardcode_shlibpath_var=no
+ export_dynamic_flag_spec='-Bexport'
+ ;;
+
+ sysv5*)
+ no_undefined_flag=' -z text'
+ # $CC -shared without GNU ld will not create a library from C++
+ # object files and a static libstdc++, better avoid it by now
+ archive_cmds='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linkopts'
+ archive_expsym_cmds='$echo "{ global:" > $lib.exp~cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~
+ $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linkopts~$rm $lib.exp'
+ hardcode_libdir_flag_spec=
+ hardcode_shlibpath_var=no
+ runpath_var='LD_RUN_PATH'
+ ;;
+
+ uts4*)
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linkopts'
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_shlibpath_var=no
+ ;;
+
+ dgux*)
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linkopts'
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_shlibpath_var=no
+ ;;
+
+ sysv4*MP*)
+ if test -d /usr/nec; then
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linkopts'
+ hardcode_shlibpath_var=no
+ runpath_var=LD_RUN_PATH
+ hardcode_runpath_var=yes
+ ld_shlibs=yes
+ fi
+ ;;
+
+ sysv4.2uw2*)
+ archive_cmds='$LD -G -o $lib $libobjs $deplibs $linkopts'
+ hardcode_direct=yes
+ hardcode_minus_L=no
+ hardcode_shlibpath_var=no
+ hardcode_runpath_var=yes
+ runpath_var=LD_RUN_PATH
+ ;;
+
+ unixware7*)
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linkopts'
+ runpath_var='LD_RUN_PATH'
+ hardcode_shlibpath_var=no
+ ;;
+
+ *)
+ ld_shlibs=no
+ ;;
+ esac
+fi
+echo "$ac_t$ld_shlibs" 1>&6
+test "$ld_shlibs" = no && can_build_shared=no
+
+if test -z "$NM"; then
+ echo $ac_n "checking for BSD-compatible nm... $ac_c" 1>&6
+ case "$NM" in
+ [\\/]* | [A-Za-z]:[\\/]*) ;; # Let the user override the test with a path.
+ *)
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS="${IFS}${PATH_SEPARATOR}"
+ for ac_dir in $PATH /usr/ucb /usr/ccs/bin /bin; do
+ test -z "$ac_dir" && ac_dir=.
+ if test -f $ac_dir/nm || test -f $ac_dir/nm$ac_exeext; then
+ # Check to see if the nm accepts a BSD-compat flag.
+ # Adding the `sed 1q' prevents false positives on HP-UX, which says:
+ # nm: unknown option "B" ignored
+ if ($ac_dir/nm -B /dev/null 2>&1 | sed '1q'; exit 0) | egrep /dev/null >/dev/null; then
+ NM="$ac_dir/nm -B"
+ break
+ elif ($ac_dir/nm -p /dev/null 2>&1 | sed '1q'; exit 0) | egrep /dev/null >/dev/null; then
+ NM="$ac_dir/nm -p"
+ break
+ else
+ NM=${NM="$ac_dir/nm"} # keep the first match, but
+ continue # so that we can try to find one that supports BSD flags
+ fi
+ fi
+ done
+ IFS="$ac_save_ifs"
+ test -z "$NM" && NM=nm
+ ;;
+ esac
+ echo "$ac_t$NM" 1>&6
+fi
+
+# Check for command to grab the raw symbol name followed by C symbol from nm.
+echo $ac_n "checking command to parse $NM output... $ac_c" 1>&6
+
+# These are sane defaults that work on at least a few old systems.
+# [They come from Ultrix. What could be older than Ultrix?!! ;)]
+
+# Character class describing NM global symbol codes.
+symcode='[BCDEGRST]'
+
+# Regexp to match symbols that can be accessed directly from C.
+sympat='\([_A-Za-z][_A-Za-z0-9]*\)'
+
+# Transform the above into a raw symbol and a C symbol.
+symxfrm='\1 \2\3 \3'
+
+# Transform an extracted symbol line into a proper C declaration
+global_symbol_to_cdecl="sed -n -e 's/^. .* \(.*\)$/extern char \1;/p'"
+
+# Define system-specific variables.
+case "$host_os" in
+aix*)
+ symcode='[BCDT]'
+ ;;
+cygwin* | mingw*)
+ symcode='[ABCDGISTW]'
+ ;;
+hpux*) # Its linker distinguishes data from code symbols
+ global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern char \1();/p' -e 's/^. .* \(.*\)$/extern char \1;/p'"
+ ;;
+irix*)
+ symcode='[BCDEGRST]'
+ ;;
+solaris*)
+ symcode='[BDT]'
+ ;;
+sysv4)
+ symcode='[DFNSTU]'
+ ;;
+esac
+
+# If we're using GNU nm, then use its standard symbol codes.
+if $NM -V 2>&1 | egrep '(GNU|with BFD)' > /dev/null; then
+ symcode='[ABCDGISTW]'
+fi
+
+# Try without a prefix undercore, then with it.
+for ac_symprfx in "" "_"; do
+
+ # Write the raw and C identifiers.
+ global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode\)[ ][ ]*\($ac_symprfx\)$sympat$/$symxfrm/p'"
+
+ # Check to see that the pipe works correctly.
+ pipe_works=no
+ $rm conftest*
+ cat > conftest.c <<EOF
+#ifdef __cplusplus
+extern "C" {
+#endif
+char nm_test_var;
+void nm_test_func(){}
+#ifdef __cplusplus
+}
+#endif
+main(){nm_test_var='a';nm_test_func();return(0);}
+EOF
+
+ echo "$progname:1653: checking if global_symbol_pipe works" >&5
+ if { (eval echo $progname:1654: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; } && test -s conftest.$objext; then
+ # Now try to grab the symbols.
+ nlist=conftest.nm
+ if { echo "$progname:1657: eval \"$NM conftest.$objext | $global_symbol_pipe > $nlist\"" >&5; eval "$NM conftest.$objext | $global_symbol_pipe > $nlist 2>&5"; } && test -s "$nlist"; then
+
+ # Try sorting and uniquifying the output.
+ if sort "$nlist" | uniq > "$nlist"T; then
+ mv -f "$nlist"T "$nlist"
+ else
+ rm -f "$nlist"T
+ fi
+
+ # Make sure that we snagged all the symbols we need.
+ if egrep ' nm_test_var$' "$nlist" >/dev/null; then
+ if egrep ' nm_test_func$' "$nlist" >/dev/null; then
+ cat <<EOF > conftest.c
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+EOF
+ # Now generate the symbol file.
+ eval "$global_symbol_to_cdecl"' < "$nlist" >> conftest.c'
+
+ cat <<EOF >> conftest.c
+#if defined (__STDC__) && __STDC__
+# define lt_ptr_t void *
+#else
+# define lt_ptr_t char *
+# define const
+#endif
+
+/* The mapping between symbol names and symbols. */
+const struct {
+ const char *name;
+ lt_ptr_t address;
+}
+lt_preloaded_symbols[] =
+{
+EOF
+ sed 's/^. \(.*\) \(.*\)$/ {"\2", (lt_ptr_t) \&\2},/' < "$nlist" >> conftest.c
+ cat <<\EOF >> conftest.c
+ {0, (lt_ptr_t) 0}
+};
+
+#ifdef __cplusplus
+}
+#endif
+EOF
+ # Now try linking the two files.
+ mv conftest.$objext conftstm.$objext
+ save_LIBS="$LIBS"
+ save_CFLAGS="$CFLAGS"
+ LIBS="conftstm.$objext"
+ CFLAGS="$CFLAGS$no_builtin_flag"
+ if { (eval echo $progname:1709: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest; then
+ pipe_works=yes
+ else
+ echo "$progname: failed program was:" >&5
+ cat conftest.c >&5
+ fi
+ LIBS="$save_LIBS"
+ else
+ echo "cannot find nm_test_func in $nlist" >&5
+ fi
+ else
+ echo "cannot find nm_test_var in $nlist" >&5
+ fi
+ else
+ echo "cannot run $global_symbol_pipe" >&5
+ fi
+ else
+ echo "$progname: failed program was:" >&5
+ cat conftest.c >&5
+ fi
+ $rm conftest* conftst*
+
+ # Do not use the global_symbol_pipe unless it works.
+ if test "$pipe_works" = yes; then
+ break
+ else
+ global_symbol_pipe=
+ fi
+done
+if test "$pipe_works" = yes; then
+ echo "${ac_t}ok" 1>&6
+else
+ echo "${ac_t}failed" 1>&6
+fi
+
+if test -z "$global_symbol_pipe"; then
+ global_symbol_to_cdecl=
+fi
+
+# Check hardcoding attributes.
+echo $ac_n "checking how to hardcode library paths into programs... $ac_c" 1>&6
+hardcode_action=
+if test -n "$hardcode_libdir_flag_spec" || \
+ test -n "$runpath_var"; then
+
+ # We can hardcode non-existant directories.
+ if test "$hardcode_direct" != no &&
+ # If the only mechanism to avoid hardcoding is shlibpath_var, we
+ # have to relink, otherwise we might link with an installed library
+ # when we should be linking with a yet-to-be-installed one
+ ## test "$hardcode_shlibpath_var" != no &&
+ test "$hardcode_minus_L" != no; then
+ # Linking always hardcodes the temporary library directory.
+ hardcode_action=relink
+ else
+ # We can link without hardcoding, and we can hardcode nonexisting dirs.
+ hardcode_action=immediate
+ fi
+else
+ # We cannot hardcode anything, or else we can only hardcode existing
+ # directories.
+ hardcode_action=unsupported
+fi
+echo "$ac_t$hardcode_action" 1>&6
+
+
+reload_flag=
+reload_cmds='$LD$reload_flag -o $output$reload_objs'
+echo $ac_n "checking for $LD option to reload object files... $ac_c" 1>&6
+# PORTME Some linkers may need a different reload flag.
+reload_flag='-r'
+echo "$ac_t$reload_flag" 1>&6
+test -n "$reload_flag" && reload_flag=" $reload_flag"
+
+# PORTME Fill in your ld.so characteristics
+library_names_spec=
+libname_spec='lib$name'
+soname_spec=
+postinstall_cmds=
+postuninstall_cmds=
+finish_cmds=
+finish_eval=
+shlibpath_var=
+shlibpath_overrides_runpath=unknown
+version_type=none
+dynamic_linker="$host_os ld.so"
+sys_lib_dlsearch_path_spec="/lib /usr/lib"
+sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib"
+file_magic_cmd=
+file_magic_test_file=
+deplibs_check_method='unknown'
+# Need to set the preceding variable on all platforms that support
+# interlibrary dependencies.
+# 'none' -- dependencies not supported.
+# `unknown' -- same as none, but documents that we really don't know.
+# 'pass_all' -- all dependencies passed with no checks.
+# 'test_compile' -- check by making test program.
+# 'file_magic [regex]' -- check by looking for files in library path
+# which responds to the $file_magic_cmd with a given egrep regex.
+# If you have `file' or equivalent on your system and you're not sure
+# whether `pass_all' will *always* work, you probably want this one.
+echo $ac_n "checking dynamic linker characteristics... $ac_c" 1>&6
+case "$host_os" in
+aix3*)
+ version_type=linux
+ library_names_spec='${libname}${release}.so$versuffix $libname.a'
+ shlibpath_var=LIBPATH
+
+ # AIX has no versioning support, so we append a major version to the name.
+ soname_spec='${libname}${release}.so$major'
+ ;;
+
+aix4*)
+ version_type=linux
+ # AIX has no versioning support, so currently we can not hardcode correct
+ # soname into executable. Probably we can add versioning support to
+ # collect2, so additional links can be useful in future.
+ # We preserve .a as extension for shared libraries though AIX4.2
+ # and later linker supports .so
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.a'
+ shlibpath_var=LIBPATH
+ deplibs_check_method=pass_all
+ ;;
+
+amigaos*)
+ library_names_spec='$libname.ixlibrary $libname.a'
+ # Create ${libname}_ixlibrary.a entries in /sys/libs.
+ finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$echo "X$lib" | $Xsed -e '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $rm /sys/libs/${libname}_ixlibrary.a; $show "(cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a)"; (cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a) || exit 1; done'
+ ;;
+
+beos*)
+ library_names_spec='${libname}.so'
+ dynamic_linker="$host_os ld.so"
+ shlibpath_var=LIBRARY_PATH
+ deplibs_check_method=pass_all
+ lt_cv_dlopen="load_add_on"
+ lt_cv_dlopen_libs=
+ lt_cv_dlopen_self=yes
+ ;;
+
+bsdi4*)
+ version_type=linux
+ need_version=no
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ soname_spec='${libname}${release}.so$major'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib)'
+ file_magic_cmd=/usr/bin/file
+ file_magic_test_file=/shlib/libc.so
+ sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib"
+ sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib"
+ export_dynamic_flag_spec=-rdynamic
+ # the default ld.so.conf also contains /usr/contrib/lib and
+ # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow
+ # libtool to hard-code these into programs
+ ;;
+
+cygwin* | mingw*)
+ version_type=windows
+ need_version=no
+ need_lib_prefix=no
+ if test "$with_gcc" = yes; then
+ library_names_spec='${libname}`echo ${release} | sed -e 's/[.]/-/g'`${versuffix}.dll $libname.a'
+ else
+ library_names_spec='${libname}`echo ${release} | sed -e 's/[.]/-/g'`${versuffix}.dll $libname.lib'
+ fi
+ dynamic_linker='Win32 ld.exe'
+ deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?'
+ file_magic_cmd='${OBJDUMP} -f'
+ # FIXME: first we should search . and the directory the executable is in
+ shlibpath_var=PATH
+ lt_cv_dlopen="LoadLibrary"
+ lt_cv_dlopen_libs=
+ ;;
+
+freebsd1*)
+ dynamic_linker=no
+ ;;
+
+freebsd*)
+ objformat=`test -x /usr/bin/objformat && /usr/bin/objformat || echo aout`
+ version_type=freebsd-$objformat
+ case "$version_type" in
+ freebsd-elf*)
+ deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB shared object'
+ file_magic_cmd=/usr/bin/file
+ file_magic_test_file=`echo /usr/lib/libc.so*`
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so $libname.so'
+ need_version=no
+ need_lib_prefix=no
+ ;;
+ freebsd-*)
+ deplibs_check_method=unknown
+ library_names_spec='${libname}${release}.so$versuffix $libname.so$versuffix'
+ need_version=yes
+ ;;
+ esac
+ shlibpath_var=LD_LIBRARY_PATH
+ case "$host_os" in
+ freebsd2* | freebsd3.[01]* | freebsdelf3.[01]*)
+ shlibpath_overrides_runpath=yes
+ ;;
+ *) # from 3.2 on
+ shlibpath_overrides_runpath=no
+ ;;
+ esac
+ ;;
+
+gnu*)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so${major} ${libname}.so'
+ soname_spec='${libname}${release}.so$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ ;;
+
+hpux9* | hpux10* | hpux11*)
+ # Give a soname corresponding to the major version so that dld.sl refuses to
+ # link against other versions.
+ dynamic_linker="$host_os dld.sl"
+ version_type=sunos
+ need_lib_prefix=no
+ need_version=no
+ shlibpath_var=SHLIB_PATH
+ shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH
+ library_names_spec='${libname}${release}.sl$versuffix ${libname}${release}.sl$major $libname.sl'
+ soname_spec='${libname}${release}.sl$major'
+ # HP-UX runs *really* slowly unless shared libraries are mode 555.
+ postinstall_cmds='chmod 555 $lib'
+
+ #### local change for Sleepycat DB: [#1990]
+ # The following 3 lines added, otherwise dependent libraries are not allowed
+ # on HP. We use dependent libraries in a very straightforward way, to
+ # incorporate -lnsl into libtcl.sl, and for testing only.
+ deplibs_check_method='file_magic PA-RISC[1-9][0-9.]* shared library'
+ file_magic_cmd=/usr/bin/file
+ file_magic_test_file=`echo /lib/libc.sl*`
+ ;;
+
+irix5* | irix6*)
+ version_type=irix
+ need_lib_prefix=no
+ need_version=no
+ soname_spec='${libname}${release}.so.$major'
+ library_names_spec='${libname}${release}.so.$versuffix ${libname}${release}.so.$major ${libname}${release}.so $libname.so'
+ case "$host_os" in
+ irix5*)
+ libsuff= shlibsuff=
+ # this will be overridden with pass_all, but let us keep it just in case
+ deplibs_check_method="file_magic ELF 32-bit MSB dynamic lib MIPS - version 1"
+ ;;
+ *)
+ case "$LD" in # libtool.m4 will add one of these switches to LD
+ *-32|*"-32 ") libsuff= shlibsuff= libmagic=32-bit;;
+ *-n32|*"-n32 ") libsuff=32 shlibsuff=N32 libmagic=N32;;
+ *-64|*"-64 ") libsuff=64 shlibsuff=64 libmagic=64-bit;;
+ *) libsuff= shlibsuff= libmagic=never-match;;
+ esac
+ ;;
+ esac
+ shlibpath_var=LD_LIBRARY${shlibsuff}_PATH
+ shlibpath_overrides_runpath=no
+ sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}"
+ sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}"
+ file_magic_cmd=/usr/bin/file
+ file_magic_test_file=`echo /lib${libsuff}/libc.so*`
+ deplibs_check_method='pass_all'
+ ;;
+
+# No shared lib support for Linux oldld, aout, or coff.
+linux-gnuoldld* | linux-gnuaout* | linux-gnucoff*)
+ dynamic_linker=no
+ ;;
+
+# This must be Linux ELF.
+linux-gnu*)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ soname_spec='${libname}${release}.so$major'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=no
+ deplibs_check_method=pass_all
+
+ if test -f /lib/ld.so.1; then
+ dynamic_linker='GNU ld.so'
+ else
+ # Only the GNU ld.so supports shared libraries on MkLinux.
+ case "$host_cpu" in
+ powerpc*) dynamic_linker=no ;;
+ *) dynamic_linker='Linux ld.so' ;;
+ esac
+ fi
+ ;;
+
+netbsd*)
+ version_type=sunos
+ if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then
+ library_names_spec='${libname}${release}.so$versuffix ${libname}.so$versuffix'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+ dynamic_linker='NetBSD (a.out) ld.so'
+ else
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major ${libname}${release}.so ${libname}.so'
+ soname_spec='${libname}${release}.so$major'
+ dynamic_linker='NetBSD ld.elf_so'
+ fi
+ shlibpath_var=LD_LIBRARY_PATH
+ ;;
+
+openbsd*)
+ version_type=sunos
+ if test "$with_gnu_ld" = yes; then
+ need_lib_prefix=no
+ need_version=no
+ fi
+ library_names_spec='${libname}${release}.so$versuffix ${libname}.so$versuffix'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ ;;
+
+os2*)
+ libname_spec='$name'
+ need_lib_prefix=no
+ library_names_spec='$libname.dll $libname.a'
+ dynamic_linker='OS/2 ld.exe'
+ shlibpath_var=LIBPATH
+ ;;
+
+osf3* | osf4* | osf5*)
+ version_type=osf
+ need_version=no
+ soname_spec='${libname}${release}.so'
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so $libname.so'
+ shlibpath_var=LD_LIBRARY_PATH
+ # this will be overridden with pass_all, but let us keep it just in case
+ deplibs_check_method='file_magic COFF format alpha shared library'
+ file_magic_cmd=/usr/bin/file
+ file_magic_test_file=/shlib/libc.so
+ deplibs_check_method='pass_all'
+ sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib"
+ sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec"
+ ;;
+
+rhapsody*)
+ version_type=sunos
+ library_names_spec='${libname}.so'
+ soname_spec='${libname}.so'
+ shlibpath_var=DYLD_LIBRARY_PATH
+ deplibs_check_method=pass_all
+ ;;
+
+sco3.2v5*)
+ version_type=osf
+ soname_spec='${libname}${release}.so$major'
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ shlibpath_var=LD_LIBRARY_PATH
+ #### local change for Sleepycat DB:
+ # The following line added, otherwise dependent libraries are not allowed
+ # on SCO. We use dependent libraries in a very straightforward way.
+ deplibs_check_method='pass_all'
+ ;;
+
+solaris*)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ soname_spec='${libname}${release}.so$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ # ldd complains unless libraries are executable
+ postinstall_cmds='chmod +x $lib'
+ deplibs_check_method="file_magic ELF [0-9][0-9]-bit [LM]SB dynamic lib"
+ file_magic_cmd=/usr/bin/file
+ file_magic_test_file=/lib/libc.so
+ ;;
+
+sunos4*)
+ version_type=sunos
+ library_names_spec='${libname}${release}.so$versuffix ${libname}.so$versuffix'
+ finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ if test "$with_gnu_ld" = yes; then
+ need_lib_prefix=no
+ fi
+ need_version=yes
+ ;;
+
+sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*)
+ version_type=linux
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ soname_spec='${libname}${release}.so$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ case "$host_vendor" in
+ sequent)
+ file_magic_cmd='/bin/file'
+ deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB (shared object|dynamic lib )'
+ ;;
+ ncr)
+ deplibs_check_method='pass_all'
+ ;;
+ motorola)
+ need_lib_prefix=no
+ need_version=no
+ shlibpath_overrides_runpath=no
+ sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib'
+ deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib) M[0-9][0-9]* Version [0-9]'
+ file_magic_cmd=/usr/bin/file
+ file_magic_test_file=`echo /usr/lib/libc.so*`
+ ;;
+ esac
+ ;;
+
+uts4*)
+ version_type=linux
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ soname_spec='${libname}${release}.so$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ ;;
+
+dgux*)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ soname_spec='${libname}${release}.so$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ ;;
+
+sysv4*MP*)
+ if test -d /usr/nec ;then
+ version_type=linux
+ library_names_spec='$libname.so.$versuffix $libname.so.$major $libname.so'
+ soname_spec='$libname.so.$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ fi
+ ;;
+
+#### local change for Sleepycat DB:
+# Add in the QNX support from QNX.
+nto-qnx)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ soname_spec='${libname}${release}.so$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ deplibs_check_method='pass_all'
+ ;;
+
+*)
+ dynamic_linker=no
+ ;;
+esac
+echo "$ac_t$dynamic_linker" 1>&6
+test "$dynamic_linker" = no && can_build_shared=no
+
+# Report the final consequences.
+echo "checking if libtool supports shared libraries... $can_build_shared" 1>&6
+
+# Only try to build win32 dlls if AC_LIBTOOL_WIN32_DLL was used in
+# configure.in, otherwise build static only libraries.
+case "$host_os" in
+cygwin* | mingw* | os2*)
+ if test x$can_build_shared = xyes; then
+ test x$enable_win32_dll = xno && can_build_shared=no
+ echo "checking if package supports dlls... $can_build_shared" 1>&6
+ fi
+;;
+esac
+
+if test -n "$file_magic_test_file" && test -n "$file_magic_cmd"; then
+ case "$deplibs_check_method" in
+ "file_magic "*)
+ file_magic_regex="`expr \"$deplibs_check_method\" : \"file_magic \(.*\)\"`"
+ if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null |
+ egrep "$file_magic_regex" > /dev/null; then
+ :
+ else
+ cat <<EOF 1>&2
+
+*** Warning: the command libtool uses to detect shared libraries,
+*** $file_magic_cmd, produces output that libtool cannot recognize.
+*** The result is that libtool may fail to recognize shared libraries
+*** as such. This will affect the creation of libtool libraries that
+*** depend on shared libraries, but programs linked with such libtool
+*** libraries will work regardless of this problem. Nevertheless, you
+*** may want to report the problem to your system manager and/or to
+*** bug-libtool@gnu.org
+
+EOF
+ fi ;;
+ esac
+fi
+
+echo $ac_n "checking whether to build shared libraries... $ac_c" 1>&6
+test "$can_build_shared" = "no" && enable_shared=no
+
+# On AIX, shared libraries and static libraries use the same namespace, and
+# are all built from PIC.
+case "$host_os" in
+aix3*)
+ test "$enable_shared" = yes && enable_static=no
+ if test -n "$RANLIB"; then
+ archive_cmds="$archive_cmds~\$RANLIB \$lib"
+ postinstall_cmds='$RANLIB $lib'
+ fi
+ ;;
+
+aix4*)
+ test "$enable_shared" = yes && enable_static=no
+ ;;
+esac
+
+echo "$ac_t$enable_shared" 1>&6
+
+# Make sure either enable_shared or enable_static is yes.
+test "$enable_shared" = yes || enable_static=yes
+
+echo "checking whether to build static libraries... $enable_static" 1>&6
+
+if test "$hardcode_action" = relink; then
+ # Fast installation is not supported
+ enable_fast_install=no
+elif test "$shlibpath_overrides_runpath" = yes ||
+ test "$enable_shared" = no; then
+ # Fast installation is not necessary
+ enable_fast_install=needless
+fi
+
+echo $ac_n "checking for objdir... $ac_c" 1>&6
+rm -f .libs 2>/dev/null
+mkdir .libs 2>/dev/null
+if test -d .libs; then
+ objdir=.libs
+else
+ # MS-DOS does not allow filenames that begin with a dot.
+ objdir=_libs
+fi
+rmdir .libs 2>/dev/null
+echo "$ac_t$objdir" 1>&6
+
+if test "x$enable_dlopen" != xyes; then
+ enable_dlopen=unknown
+ enable_dlopen_self=unknown
+ enable_dlopen_self_static=unknown
+else
+if eval "test \"`echo '$''{'lt_cv_dlopen'+set}'`\" != set"; then
+ lt_cv_dlopen=no lt_cv_dlopen_libs=
+echo $ac_n "checking for dlopen in -ldl""... $ac_c" 1>&6
+echo "$progname:2248: checking for dlopen in -ldl" >&5
+ac_lib_var=`echo dl'_'dlopen | sed 'y%./+-%__p_%'`
+if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ ac_save_LIBS="$LIBS"
+LIBS="-ldl $LIBS"
+cat > conftest.$ac_ext <<EOF
+#line 2256 "ltconfig"
+/* Override any gcc2 internal prototype to avoid an error. */
+/* We use char because int might match the return type of a gcc2
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char dlopen();
+
+int main() {
+dlopen()
+; return 0; }
+EOF
+if { (eval echo $progname:2269: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+ rm -rf conftest*
+ eval "ac_cv_lib_$ac_lib_var=yes"
+else
+ echo "$progname: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ rm -rf conftest*
+ eval "ac_cv_lib_$ac_lib_var=no"
+fi
+rm -f conftest*
+LIBS="$ac_save_LIBS"
+
+fi
+if eval "test \"`echo '$ac_cv_lib_'$ac_lib_var`\" = yes"; then
+ echo "$ac_t""yes" 1>&6
+ lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"
+else
+ echo "$ac_t""no" 1>&6
+echo $ac_n "checking for dlopen""... $ac_c" 1>&6
+echo "$progname:2288: checking for dlopen" >&5
+if eval "test \"`echo '$''{'ac_cv_func_dlopen'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ cat > conftest.$ac_ext <<EOF
+#line 2293 "ltconfig"
+/* System header to define __stub macros and hopefully few prototypes,
+ which can conflict with char dlopen(); below. */
+#include <assert.h>
+/* Override any gcc2 internal prototype to avoid an error. */
+/* We use char because int might match the return type of a gcc2
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char dlopen();
+
+int main() {
+
+/* The GNU C library defines this for functions which it implements
+ to always fail with ENOSYS. Some functions are actually named
+ something starting with __ and the normal name is an alias. */
+#if defined (__stub_dlopen) || defined (__stub___dlopen)
+choke me
+#else
+dlopen();
+#endif
+
+; return 0; }
+EOF
+if { (eval echo $progname:2318: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+ rm -rf conftest*
+ eval "ac_cv_func_dlopen=yes"
+else
+ echo "$progname: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ rm -rf conftest*
+ eval "ac_cv_func_dlopen=no"
+fi
+rm -f conftest*
+fi
+if eval "test \"`echo '$ac_cv_func_'dlopen`\" = yes"; then
+ echo "$ac_t""yes" 1>&6
+ lt_cv_dlopen="dlopen"
+else
+ echo "$ac_t""no" 1>&6
+echo $ac_n "checking for dld_link in -ldld""... $ac_c" 1>&6
+echo "$progname:2335: checking for dld_link in -ldld" >&5
+ac_lib_var=`echo dld'_'dld_link | sed 'y%./+-%__p_%'`
+if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ ac_save_LIBS="$LIBS"
+LIBS="-ldld $LIBS"
+cat > conftest.$ac_ext <<EOF
+#line 2343 "ltconfig"
+/* Override any gcc2 internal prototype to avoid an error. */
+/* We use char because int might match the return type of a gcc2
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char dld_link();
+
+int main() {
+dld_link()
+; return 0; }
+EOF
+if { (eval echo $progname:2356: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+ rm -rf conftest*
+ eval "ac_cv_lib_$ac_lib_var=yes"
+else
+ echo "$progname: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ rm -rf conftest*
+ eval "ac_cv_lib_$ac_lib_var=no"
+fi
+rm -f conftest*
+LIBS="$ac_save_LIBS"
+
+fi
+if eval "test \"`echo '$ac_cv_lib_'$ac_lib_var`\" = yes"; then
+ echo "$ac_t""yes" 1>&6
+ lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-ldld"
+else
+ echo "$ac_t""no" 1>&6
+echo $ac_n "checking for shl_load""... $ac_c" 1>&6
+echo "$progname:2375: checking for shl_load" >&5
+if eval "test \"`echo '$''{'ac_cv_func_shl_load'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ cat > conftest.$ac_ext <<EOF
+#line 2380 "ltconfig"
+/* System header to define __stub macros and hopefully few prototypes,
+ which can conflict with char shl_load(); below. */
+#include <assert.h>
+/* Override any gcc2 internal prototype to avoid an error. */
+/* We use char because int might match the return type of a gcc2
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char shl_load();
+
+int main() {
+
+/* The GNU C library defines this for functions which it implements
+ to always fail with ENOSYS. Some functions are actually named
+ something starting with __ and the normal name is an alias. */
+#if defined (__stub_shl_load) || defined (__stub___shl_load)
+choke me
+#else
+shl_load();
+#endif
+
+; return 0; }
+EOF
+if { (eval echo $progname:2405: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+ rm -rf conftest*
+ eval "ac_cv_func_shl_load=yes"
+else
+ echo "$progname: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ rm -rf conftest*
+ eval "ac_cv_func_shl_load=no"
+fi
+rm -f conftest*
+fi
+
+if eval "test \"`echo '$ac_cv_func_'shl_load`\" = yes"; then
+ echo "$ac_t""yes" 1>&6
+ lt_cv_dlopen="shl_load"
+else
+ echo "$ac_t""no" 1>&6
+echo $ac_n "checking for shl_load in -ldld""... $ac_c" 1>&6
+echo "$progname:2423: checking for shl_load in -ldld" >&5
+ac_lib_var=`echo dld'_'shl_load | sed 'y%./+-%__p_%'`
+if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ ac_save_LIBS="$LIBS"
+LIBS="-ldld $LIBS"
+cat > conftest.$ac_ext <<EOF
+#line 2431 "ltconfig"
+#include "confdefs.h"
+/* Override any gcc2 internal prototype to avoid an error. */
+/* We use char because int might match the return type of a gcc2
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char shl_load();
+
+int main() {
+shl_load()
+; return 0; }
+EOF
+if { (eval echo $progname:2445: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+ rm -rf conftest*
+ eval "ac_cv_lib_$ac_lib_var=yes"
+else
+ echo "$progname: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ rm -rf conftest*
+ eval "ac_cv_lib_$ac_lib_var=no"
+fi
+rm -f conftest*
+LIBS="$ac_save_LIBS"
+
+fi
+if eval "test \"`echo '$ac_cv_lib_'$ac_lib_var`\" = yes"; then
+ echo "$ac_t""yes" 1>&6
+ lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-ldld"
+else
+ echo "$ac_t""no" 1>&6
+fi
+
+
+fi
+
+
+fi
+
+
+fi
+
+
+fi
+
+fi
+
+ if test "x$lt_cv_dlopen" != xno; then
+ enable_dlopen=yes
+ fi
+
+ case "$lt_cv_dlopen" in
+ dlopen)
+for ac_hdr in dlfcn.h; do
+ac_safe=`echo "$ac_hdr" | sed 'y%./+-%__p_%'`
+echo $ac_n "checking for $ac_hdr""... $ac_c" 1>&6
+echo "$progname:2488: checking for $ac_hdr" >&5
+if eval "test \"`echo '$''{'ac_cv_header_$ac_safe'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ cat > conftest.$ac_ext <<EOF
+#line 2493 "ltconfig"
+#include <$ac_hdr>
+int fnord = 0;
+EOF
+ac_try="$ac_compile >/dev/null 2>conftest.out"
+{ (eval echo $progname:2498: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
+ac_err=`grep -v '^ *+' conftest.out | grep -v "^conftest.${ac_ext}\$"`
+if test -z "$ac_err"; then
+ rm -rf conftest*
+ eval "ac_cv_header_$ac_safe=yes"
+else
+ echo "$ac_err" >&5
+ echo "$progname: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ rm -rf conftest*
+ eval "ac_cv_header_$ac_safe=no"
+fi
+rm -f conftest*
+fi
+if eval "test \"`echo '$ac_cv_header_'$ac_safe`\" = yes"; then
+ echo "$ac_t""yes" 1>&6
+else
+ echo "$ac_t""no" 1>&6
+fi
+done
+
+ if test "x$ac_cv_header_dlfcn_h" = xyes; then
+ CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H"
+ fi
+ eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\"
+ LIBS="$lt_cv_dlopen_libs $LIBS"
+
+ echo $ac_n "checking whether a program can dlopen itself""... $ac_c" 1>&6
+echo "$progname:2526: checking whether a program can dlopen itself" >&5
+if test "${lt_cv_dlopen_self+set}" = set; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ if test "$cross_compiling" = yes; then
+ lt_cv_dlopen_self=cross
+ else
+ cat > conftest.c <<EOF
+#line 2534 "ltconfig"
+
+#if HAVE_DLFCN_H
+#include <dlfcn.h>
+#endif
+
+#include <stdio.h>
+
+#ifdef RTLD_GLOBAL
+# define LTDL_GLOBAL RTLD_GLOBAL
+#else
+# ifdef DL_GLOBAL
+# define LTDL_GLOBAL DL_GLOBAL
+# else
+# define LTDL_GLOBAL 0
+# endif
+#endif
+
+/* We may have to define LTDL_LAZY_OR_NOW in the command line if we
+ find out it does not work in some platform. */
+#ifndef LTDL_LAZY_OR_NOW
+# ifdef RTLD_LAZY
+# define LTDL_LAZY_OR_NOW RTLD_LAZY
+# else
+# ifdef DL_LAZY
+# define LTDL_LAZY_OR_NOW DL_LAZY
+# else
+# ifdef RTLD_NOW
+# define LTDL_LAZY_OR_NOW RTLD_NOW
+# else
+# ifdef DL_NOW
+# define LTDL_LAZY_OR_NOW DL_NOW
+# else
+# define LTDL_LAZY_OR_NOW 0
+# endif
+# endif
+# endif
+# endif
+#endif
+
+fnord() { int i=42;}
+main() { void *self, *ptr1, *ptr2; self=dlopen(0,LTDL_GLOBAL|LTDL_LAZY_OR_NOW);
+ if(self) { ptr1=dlsym(self,"fnord"); ptr2=dlsym(self,"_fnord");
+ if(ptr1 || ptr2) { dlclose(self); exit(0); } } exit(1); }
+
+EOF
+if { (eval echo $progname:2580: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest && (./conftest; exit) 2>/dev/null
+then
+ lt_cv_dlopen_self=yes
+else
+ echo "$progname: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ rm -fr conftest*
+ lt_cv_dlopen_self=no
+fi
+rm -fr conftest*
+fi
+
+fi
+
+echo "$ac_t""$lt_cv_dlopen_self" 1>&6
+
+ if test "$lt_cv_dlopen_self" = yes; then
+ LDFLAGS="$LDFLAGS $link_static_flag"
+ echo $ac_n "checking whether a statically linked program can dlopen itself""... $ac_c" 1>&6
+echo "$progname:2599: checking whether a statically linked program can dlopen itself" >&5
+if test "${lt_cv_dlopen_self_static+set}" = set; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ if test "$cross_compiling" = yes; then
+ lt_cv_dlopen_self_static=cross
+ else
+ cat > conftest.c <<EOF
+#line 2607 "ltconfig"
+
+#if HAVE_DLFCN_H
+#include <dlfcn.h>
+#endif
+
+#include <stdio.h>
+
+#ifdef RTLD_GLOBAL
+# define LTDL_GLOBAL RTLD_GLOBAL
+#else
+# ifdef DL_GLOBAL
+# define LTDL_GLOBAL DL_GLOBAL
+# else
+# define LTDL_GLOBAL 0
+# endif
+#endif
+
+/* We may have to define LTDL_LAZY_OR_NOW in the command line if we
+ find out it does not work in some platform. */
+#ifndef LTDL_LAZY_OR_NOW
+# ifdef RTLD_LAZY
+# define LTDL_LAZY_OR_NOW RTLD_LAZY
+# else
+# ifdef DL_LAZY
+# define LTDL_LAZY_OR_NOW DL_LAZY
+# else
+# ifdef RTLD_NOW
+# define LTDL_LAZY_OR_NOW RTLD_NOW
+# else
+# ifdef DL_NOW
+# define LTDL_LAZY_OR_NOW DL_NOW
+# else
+# define LTDL_LAZY_OR_NOW 0
+# endif
+# endif
+# endif
+# endif
+#endif
+
+fnord() { int i=42;}
+main() { void *self, *ptr1, *ptr2; self=dlopen(0,LTDL_GLOBAL|LTDL_LAZY_OR_NOW);
+ if(self) { ptr1=dlsym(self,"fnord"); ptr2=dlsym(self,"_fnord");
+ if(ptr1 || ptr2) { dlclose(self); exit(0); } } exit(1); }
+
+EOF
+if { (eval echo $progname:2653: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest && (./conftest; exit) 2>/dev/null
+then
+ lt_cv_dlopen_self_static=yes
+else
+ echo "$progname: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ rm -fr conftest*
+ lt_cv_dlopen_self_static=no
+fi
+rm -fr conftest*
+fi
+
+fi
+
+echo "$ac_t""$lt_cv_dlopen_self_static" 1>&6
+fi
+ ;;
+ esac
+
+ case "$lt_cv_dlopen_self" in
+ yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;;
+ *) enable_dlopen_self=unknown ;;
+ esac
+
+ case "$lt_cv_dlopen_self_static" in
+ yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;;
+ *) enable_dlopen_self_static=unknown ;;
+ esac
+fi
+
+# Copy echo and quote the copy, instead of the original, because it is
+# used later.
+ltecho="$echo"
+if test "X$ltecho" = "X$CONFIG_SHELL $0 --fallback-echo"; then
+ ltecho="$CONFIG_SHELL \$0 --fallback-echo"
+fi
+LTSHELL="$SHELL"
+
+LTCONFIG_VERSION="$VERSION"
+
+# Only quote variables if we're using ltmain.sh.
+case "$ltmain" in
+*.sh)
+ # Now quote all the things that may contain metacharacters.
+ for var in ltecho old_CC old_CFLAGS old_CPPFLAGS \
+ old_LD old_LDFLAGS old_LIBS \
+ old_NM old_RANLIB old_LN_S old_DLLTOOL old_OBJDUMP old_AS \
+ AR CC LD LN_S NM LTSHELL LTCONFIG_VERSION \
+ reload_flag reload_cmds wl \
+ pic_flag link_static_flag no_builtin_flag export_dynamic_flag_spec \
+ thread_safe_flag_spec whole_archive_flag_spec libname_spec \
+ library_names_spec soname_spec \
+ RANLIB old_archive_cmds old_archive_from_new_cmds old_postinstall_cmds \
+ old_postuninstall_cmds archive_cmds archive_expsym_cmds postinstall_cmds postuninstall_cmds \
+ file_magic_cmd export_symbols_cmds deplibs_check_method allow_undefined_flag no_undefined_flag \
+ finish_cmds finish_eval global_symbol_pipe global_symbol_to_cdecl \
+ hardcode_libdir_flag_spec hardcode_libdir_separator \
+ sys_lib_search_path_spec sys_lib_dlsearch_path_spec \
+ compiler_c_o compiler_o_lo need_locks exclude_expsyms include_expsyms; do
+
+ case "$var" in
+ reload_cmds | old_archive_cmds | old_archive_from_new_cmds | \
+ old_postinstall_cmds | old_postuninstall_cmds | \
+ export_symbols_cmds | archive_cmds | archive_expsym_cmds | \
+ postinstall_cmds | postuninstall_cmds | \
+ finish_cmds | sys_lib_search_path_spec | sys_lib_dlsearch_path_spec)
+ # Double-quote double-evaled strings.
+ eval "$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$double_quote_subst\" -e \"\$sed_quote_subst\" -e \"\$delay_variable_subst\"\`\\\""
+ ;;
+ *)
+ eval "$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$sed_quote_subst\"\`\\\""
+ ;;
+ esac
+ done
+
+ case "$ltecho" in
+ *'\$0 --fallback-echo"')
+ ltecho=`$echo "X$ltecho" | $Xsed -e 's/\\\\\\\$0 --fallback-echo"$/$0 --fallback-echo"/'`
+ ;;
+ esac
+
+ trap "$rm \"$ofile\"; exit 1" 1 2 15
+ echo "creating $ofile"
+ $rm "$ofile"
+ cat <<EOF > "$ofile"
+#! $SHELL
+
+# `$echo "$ofile" | sed 's%^.*/%%'` - Provide generalized library-building support services.
+# Generated automatically by $PROGRAM (GNU $PACKAGE $VERSION$TIMESTAMP)
+# NOTE: Changes made to this file will be lost: look at ltconfig or ltmain.sh.
+#
+# Copyright (C) 1996-1999 Free Software Foundation, Inc.
+# Originally by Gordon Matzigkeit <gord@gnu.ai.mit.edu>, 1996
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+# Sed that helps us avoid accidentally triggering echo(1) options like -n.
+Xsed="sed -e s/^X//"
+
+# The HP-UX ksh and POSIX shell print the target directory to stdout
+# if CDPATH is set.
+if test "X\${CDPATH+set}" = Xset; then CDPATH=:; export CDPATH; fi
+
+### BEGIN LIBTOOL CONFIG
+EOF
+ cfgfile="$ofile"
+ ;;
+
+*)
+ # Double-quote the variables that need it (for aesthetics).
+ for var in old_CC old_CFLAGS old_CPPFLAGS \
+ old_LD old_LDFLAGS old_LIBS \
+ old_NM old_RANLIB old_LN_S old_DLLTOOL old_OBJDUMP old_AS; do
+ eval "$var=\\\"\$var\\\""
+ done
+
+ # Just create a config file.
+ cfgfile="$ofile.cfg"
+ trap "$rm \"$cfgfile\"; exit 1" 1 2 15
+ echo "creating $cfgfile"
+ $rm "$cfgfile"
+ cat <<EOF > "$cfgfile"
+# `$echo "$cfgfile" | sed 's%^.*/%%'` - Libtool configuration file.
+# Generated automatically by $PROGRAM (GNU $PACKAGE $VERSION$TIMESTAMP)
+EOF
+ ;;
+esac
+
+cat <<EOF >> "$cfgfile"
+# Libtool was configured as follows, on host `(hostname || uname -n) 2>/dev/null | sed 1q`:
+#
+# CC=$old_CC CFLAGS=$old_CFLAGS CPPFLAGS=$old_CPPFLAGS \\
+# LD=$old_LD LDFLAGS=$old_LDFLAGS LIBS=$old_LIBS \\
+# NM=$old_NM RANLIB=$old_RANLIB LN_S=$old_LN_S \\
+# DLLTOOL=$old_DLLTOOL OBJDUMP=$old_OBJDUMP AS=$old_AS \\
+# $0$ltconfig_args
+#
+# Compiler and other test output produced by $progname, useful for
+# debugging $progname, is in ./config.log if it exists.
+
+# The version of $progname that generated this script.
+LTCONFIG_VERSION=$LTCONFIG_VERSION
+
+# Shell to use when invoking shell scripts.
+SHELL=$LTSHELL
+
+# Whether or not to build shared libraries.
+build_libtool_libs=$enable_shared
+
+# Whether or not to build static libraries.
+build_old_libs=$enable_static
+
+# Whether or not to optimize for fast installation.
+fast_install=$enable_fast_install
+
+# The host system.
+host_alias=$host_alias
+host=$host
+
+# An echo program that does not interpret backslashes.
+echo=$ltecho
+
+# The archiver.
+AR=$AR
+
+# The default C compiler.
+CC=$CC
+
+# The linker used to build libraries.
+LD=$LD
+
+# Whether we need hard or soft links.
+LN_S=$LN_S
+
+# A BSD-compatible nm program.
+NM=$NM
+
+# Used on cygwin: DLL creation program.
+DLLTOOL="$DLLTOOL"
+
+# Used on cygwin: object dumper.
+OBJDUMP="$OBJDUMP"
+
+# Used on cygwin: assembler.
+AS="$AS"
+
+# The name of the directory that contains temporary libtool files.
+objdir=$objdir
+
+# How to create reloadable object files.
+reload_flag=$reload_flag
+reload_cmds=$reload_cmds
+
+# How to pass a linker flag through the compiler.
+wl=$wl
+
+# Object file suffix (normally "o").
+objext="$objext"
+
+# Old archive suffix (normally "a").
+libext="$libext"
+
+# Executable file suffix (normally "").
+exeext="$exeext"
+
+# Additional compiler flags for building library objects.
+pic_flag=$pic_flag
+
+# Does compiler simultaneously support -c and -o options?
+compiler_c_o=$compiler_c_o
+
+# Can we write directly to a .lo ?
+compiler_o_lo=$compiler_o_lo
+
+# Must we lock files when doing compilation ?
+need_locks=$need_locks
+
+# Do we need the lib prefix for modules?
+need_lib_prefix=$need_lib_prefix
+
+# Do we need a version for libraries?
+need_version=$need_version
+
+# Whether dlopen is supported.
+dlopen=$enable_dlopen
+
+# Whether dlopen of programs is supported.
+dlopen_self=$enable_dlopen_self
+
+# Whether dlopen of statically linked programs is supported.
+dlopen_self_static=$enable_dlopen_self_static
+
+# Compiler flag to prevent dynamic linking.
+link_static_flag=$link_static_flag
+
+# Compiler flag to turn off builtin functions.
+no_builtin_flag=$no_builtin_flag
+
+# Compiler flag to allow reflexive dlopens.
+export_dynamic_flag_spec=$export_dynamic_flag_spec
+
+# Compiler flag to generate shared objects directly from archives.
+whole_archive_flag_spec=$whole_archive_flag_spec
+
+# Compiler flag to generate thread-safe objects.
+thread_safe_flag_spec=$thread_safe_flag_spec
+
+# Library versioning type.
+version_type=$version_type
+
+# Format of library name prefix.
+libname_spec=$libname_spec
+
+# List of archive names. First name is the real one, the rest are links.
+# The last name is the one that the linker finds with -lNAME.
+library_names_spec=$library_names_spec
+
+# The coded name of the library, if different from the real name.
+soname_spec=$soname_spec
+
+# Commands used to build and install an old-style archive.
+RANLIB=$RANLIB
+old_archive_cmds=$old_archive_cmds
+old_postinstall_cmds=$old_postinstall_cmds
+old_postuninstall_cmds=$old_postuninstall_cmds
+
+# Create an old-style archive from a shared archive.
+old_archive_from_new_cmds=$old_archive_from_new_cmds
+
+# Commands used to build and install a shared archive.
+archive_cmds=$archive_cmds
+archive_expsym_cmds=$archive_expsym_cmds
+postinstall_cmds=$postinstall_cmds
+postuninstall_cmds=$postuninstall_cmds
+
+# Method to check whether dependent libraries are shared objects.
+deplibs_check_method=$deplibs_check_method
+
+# Command to use when deplibs_check_method == file_magic.
+file_magic_cmd=$file_magic_cmd
+
+# Flag that allows shared libraries with undefined symbols to be built.
+allow_undefined_flag=$allow_undefined_flag
+
+# Flag that forces no undefined symbols.
+no_undefined_flag=$no_undefined_flag
+
+# Commands used to finish a libtool library installation in a directory.
+finish_cmds=$finish_cmds
+
+# Same as above, but a single script fragment to be evaled but not shown.
+finish_eval=$finish_eval
+
+# Take the output of nm and produce a listing of raw symbols and C names.
+global_symbol_pipe=$global_symbol_pipe
+
+# Transform the output of nm in a proper C declaration
+global_symbol_to_cdecl=$global_symbol_to_cdecl
+
+# This is the shared library runtime path variable.
+runpath_var=$runpath_var
+
+# This is the shared library path variable.
+shlibpath_var=$shlibpath_var
+
+# Is shlibpath searched before the hard-coded library search path?
+shlibpath_overrides_runpath=$shlibpath_overrides_runpath
+
+# How to hardcode a shared library path into an executable.
+hardcode_action=$hardcode_action
+
+# Flag to hardcode \$libdir into a binary during linking.
+# This must work even if \$libdir does not exist.
+hardcode_libdir_flag_spec=$hardcode_libdir_flag_spec
+
+# Whether we need a single -rpath flag with a separated argument.
+hardcode_libdir_separator=$hardcode_libdir_separator
+
+# Set to yes if using DIR/libNAME.so during linking hardcodes DIR into the
+# resulting binary.
+hardcode_direct=$hardcode_direct
+
+# Set to yes if using the -LDIR flag during linking hardcodes DIR into the
+# resulting binary.
+hardcode_minus_L=$hardcode_minus_L
+
+# Set to yes if using SHLIBPATH_VAR=DIR during linking hardcodes DIR into
+# the resulting binary.
+hardcode_shlibpath_var=$hardcode_shlibpath_var
+
+# Compile-time system search path for libraries
+sys_lib_search_path_spec=$sys_lib_search_path_spec
+
+# Run-time system search path for libraries
+sys_lib_dlsearch_path_spec=$sys_lib_dlsearch_path_spec
+
+# Fix the shell variable \$srcfile for the compiler.
+fix_srcfile_path="$fix_srcfile_path"
+
+# Set to yes if exported symbols are required.
+always_export_symbols=$always_export_symbols
+
+# The commands to list exported symbols.
+export_symbols_cmds=$export_symbols_cmds
+
+# Symbols that should not be listed in the preloaded symbols.
+exclude_expsyms=$exclude_expsyms
+
+# Symbols that must always be exported.
+include_expsyms=$include_expsyms
+
+EOF
+
+case "$ltmain" in
+*.sh)
+ echo '### END LIBTOOL CONFIG' >> "$ofile"
+ echo >> "$ofile"
+ case "$host_os" in
+ aix3*)
+ cat <<\EOF >> "$ofile"
+
+# AIX sometimes has problems with the GCC collect2 program. For some
+# reason, if we set the COLLECT_NAMES environment variable, the problems
+# vanish in a puff of smoke.
+if test "X${COLLECT_NAMES+set}" != Xset; then
+ COLLECT_NAMES=
+ export COLLECT_NAMES
+fi
+EOF
+ ;;
+ esac
+
+ # Append the ltmain.sh script.
+ sed '$q' "$ltmain" >> "$ofile" || (rm -f "$ofile"; exit 1)
+ # We use sed instead of cat because bash on DJGPP gets confused if
+ # if finds mixed CR/LF and LF-only lines. Since sed operates in
+ # text mode, it properly converts lines to CR/LF. This bash problem
+ # is reportedly fixed, but why not run on old versions too?
+
+ chmod +x "$ofile"
+ ;;
+
+*)
+ # Compile the libtool program.
+ echo "FIXME: would compile $ltmain"
+ ;;
+esac
+
+test -n "$cache_file" || exit 0
+
+# AC_CACHE_SAVE
+trap '' 1 2 15
+cat > confcache <<\EOF
+# This file is a shell script that caches the results of configure
+# tests run on this system so they can be shared between configure
+# scripts and configure runs. It is not useful on other systems.
+# If it contains results you don't want to keep, you may remove or edit it.
+#
+# By default, configure uses ./config.cache as the cache file,
+# creating it if it does not exist already. You can give configure
+# the --cache-file=FILE option to use a different cache file; that is
+# what configure does when it calls configure scripts in
+# subdirectories, so they share the cache.
+# Giving --cache-file=/dev/null disables caching, for debugging configure.
+# config.status only pays attention to the cache file if you give it the
+# --recheck option to rerun configure.
+#
+EOF
+# The following way of writing the cache mishandles newlines in values,
+# but we know of no workaround that is simple, portable, and efficient.
+# So, don't put newlines in cache variables' values.
+# Ultrix sh set writes to stderr and can't be redirected directly,
+# and sets the high bit in the cache file unless we assign to the vars.
+(set) 2>&1 |
+ case `(ac_space=' '; set | grep ac_space) 2>&1` in
+ *ac_space=\ *)
+ # `set' does not quote correctly, so add quotes (double-quote substitution
+ # turns \\\\ into \\, and sed turns \\ into \).
+ sed -n \
+ -e "s/'/'\\\\''/g" \
+ -e "s/^\\([a-zA-Z0-9_]*_cv_[a-zA-Z0-9_]*\\)=\\(.*\\)/\\1=\${\\1='\\2'}/p"
+ ;;
+ *)
+ # `set' quotes correctly as required by POSIX, so do not add quotes.
+ sed -n -e 's/^\([a-zA-Z0-9_]*_cv_[a-zA-Z0-9_]*\)=\(.*\)/\1=${\1=\2}/p'
+ ;;
+ esac >> confcache
+if cmp -s $cache_file confcache; then
+ :
+else
+ if test -w $cache_file; then
+ echo "updating cache $cache_file"
+ cat confcache > $cache_file
+ else
+ echo "not updating unwritable cache $cache_file"
+ fi
+fi
+rm -f confcache
+
+exit 0
+
+# Local Variables:
+# mode:shell-script
+# sh-indentation:2
+# End:
+#! /bin/bash
diff --git a/bdb/dist/ltmain.sh b/bdb/dist/ltmain.sh
new file mode 100644
index 00000000000..4feadbfb759
--- /dev/null
+++ b/bdb/dist/ltmain.sh
@@ -0,0 +1,4029 @@
+# ltmain.sh - Provide generalized library-building support services.
+# NOTE: Changing this file will not affect anything until you rerun ltconfig.
+#
+# Copyright (C) 1996-1999 Free Software Foundation, Inc.
+# Originally by Gordon Matzigkeit <gord@gnu.ai.mit.edu>, 1996
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+# Check that we have a working $echo.
+if test "X$1" = X--no-reexec; then
+ # Discard the --no-reexec flag, and continue.
+ shift
+elif test "X$1" = X--fallback-echo; then
+ # Avoid inline document here, it may be left over
+ :
+elif test "X`($echo '\t') 2>/dev/null`" = 'X\t'; then
+ # Yippee, $echo works!
+ :
+else
+ # Restart under the correct shell, and then maybe $echo will work.
+ exec $SHELL "$0" --no-reexec ${1+"$@"}
+fi
+
+if test "X$1" = X--fallback-echo; then
+ # used as fallback echo
+ shift
+ cat <<EOF
+$*
+EOF
+ exit 0
+fi
+
+# The name of this program.
+progname=`$echo "$0" | sed 's%^.*/%%'`
+modename="$progname"
+
+# Constants.
+PROGRAM=ltmain.sh
+PACKAGE=libtool
+VERSION=1.3.5
+TIMESTAMP=" (1.385.2.206 2000/05/27 11:12:27)"
+
+default_mode=
+help="Try \`$progname --help' for more information."
+magic="%%%MAGIC variable%%%"
+mkdir="mkdir"
+mv="mv -f"
+rm="rm -f"
+
+# Sed substitution that helps us do robust quoting. It backslashifies
+# metacharacters that are still active within double-quoted strings.
+Xsed='sed -e 1s/^X//'
+sed_quote_subst='s/\([\\`\\"$\\\\]\)/\\\1/g'
+SP2NL='tr \040 \012'
+NL2SP='tr \015\012 \040\040'
+
+# NLS nuisances.
+# Only set LANG and LC_ALL to C if already set.
+# These must not be set unconditionally because not all systems understand
+# e.g. LANG=C (notably SCO).
+# We save the old values to restore during execute mode.
+if test "${LC_ALL+set}" = set; then
+ save_LC_ALL="$LC_ALL"; LC_ALL=C; export LC_ALL
+fi
+if test "${LANG+set}" = set; then
+ save_LANG="$LANG"; LANG=C; export LANG
+fi
+
+if test "$LTCONFIG_VERSION" != "$VERSION"; then
+ echo "$modename: ltconfig version \`$LTCONFIG_VERSION' does not match $PROGRAM version \`$VERSION'" 1>&2
+ echo "Fatal configuration error. See the $PACKAGE docs for more information." 1>&2
+ exit 1
+fi
+
+if test "$build_libtool_libs" != yes && test "$build_old_libs" != yes; then
+ echo "$modename: not configured to build any kind of library" 1>&2
+ echo "Fatal configuration error. See the $PACKAGE docs for more information." 1>&2
+ exit 1
+fi
+
+# Global variables.
+mode=$default_mode
+nonopt=
+prev=
+prevopt=
+run=
+show="$echo"
+show_help=
+execute_dlfiles=
+lo2o="s/\\.lo\$/.${objext}/"
+o2lo="s/\\.${objext}\$/.lo/"
+
+# Parse our command line options once, thoroughly.
+while test $# -gt 0
+do
+ arg="$1"
+ shift
+
+ case "$arg" in
+ -*=*) optarg=`$echo "X$arg" | $Xsed -e 's/[-_a-zA-Z0-9]*=//'` ;;
+ *) optarg= ;;
+ esac
+
+ # If the previous option needs an argument, assign it.
+ if test -n "$prev"; then
+ case "$prev" in
+ execute_dlfiles)
+ eval "$prev=\"\$$prev \$arg\""
+ ;;
+ *)
+ eval "$prev=\$arg"
+ ;;
+ esac
+
+ prev=
+ prevopt=
+ continue
+ fi
+
+ # Have we seen a non-optional argument yet?
+ case "$arg" in
+ --help)
+ show_help=yes
+ ;;
+
+ --version)
+ echo "$PROGRAM (GNU $PACKAGE) $VERSION$TIMESTAMP"
+ exit 0
+ ;;
+
+ --config)
+ sed -e '1,/^### BEGIN LIBTOOL CONFIG/d' -e '/^### END LIBTOOL CONFIG/,$d' $0
+ exit 0
+ ;;
+
+ --debug)
+ echo "$progname: enabling shell trace mode"
+ set -x
+ ;;
+
+ --dry-run | -n)
+ run=:
+ ;;
+
+ --features)
+ echo "host: $host"
+ if test "$build_libtool_libs" = yes; then
+ echo "enable shared libraries"
+ else
+ echo "disable shared libraries"
+ fi
+ if test "$build_old_libs" = yes; then
+ echo "enable static libraries"
+ else
+ echo "disable static libraries"
+ fi
+ exit 0
+ ;;
+
+ --finish) mode="finish" ;;
+
+ --mode) prevopt="--mode" prev=mode ;;
+ --mode=*) mode="$optarg" ;;
+
+ --quiet | --silent)
+ show=:
+ ;;
+
+ -dlopen)
+ prevopt="-dlopen"
+ prev=execute_dlfiles
+ ;;
+
+ -*)
+ $echo "$modename: unrecognized option \`$arg'" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ ;;
+
+ *)
+ nonopt="$arg"
+ break
+ ;;
+ esac
+done
+
+if test -n "$prevopt"; then
+ $echo "$modename: option \`$prevopt' requires an argument" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+fi
+
+if test -z "$show_help"; then
+
+ # Infer the operation mode.
+ if test -z "$mode"; then
+ case "$nonopt" in
+ *cc | *++ | gcc* | *-gcc*)
+ mode=link
+ for arg
+ do
+ case "$arg" in
+ -c)
+ mode=compile
+ break
+ ;;
+ esac
+ done
+ ;;
+ *db | *dbx | *strace | *truss)
+ mode=execute
+ ;;
+ *install*|cp|mv)
+ mode=install
+ ;;
+ *rm)
+ mode=uninstall
+ ;;
+ *)
+ # If we have no mode, but dlfiles were specified, then do execute mode.
+ test -n "$execute_dlfiles" && mode=execute
+
+ # Just use the default operation mode.
+ if test -z "$mode"; then
+ if test -n "$nonopt"; then
+ $echo "$modename: warning: cannot infer operation mode from \`$nonopt'" 1>&2
+ else
+ $echo "$modename: warning: cannot infer operation mode without MODE-ARGS" 1>&2
+ fi
+ fi
+ ;;
+ esac
+ fi
+
+ # Only execute mode is allowed to have -dlopen flags.
+ if test -n "$execute_dlfiles" && test "$mode" != execute; then
+ $echo "$modename: unrecognized option \`-dlopen'" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ fi
+
+ # Change the help message to a mode-specific one.
+ generic_help="$help"
+ help="Try \`$modename --help --mode=$mode' for more information."
+
+ # These modes are in order of execution frequency so that they run quickly.
+ case "$mode" in
+ # libtool compile mode
+ compile)
+ modename="$modename: compile"
+ # Get the compilation command and the source file.
+ base_compile=
+ lastarg=
+ srcfile="$nonopt"
+ suppress_output=
+
+ user_target=no
+ for arg
+ do
+ # Accept any command-line options.
+ case "$arg" in
+ -o)
+ if test "$user_target" != "no"; then
+ $echo "$modename: you cannot specify \`-o' more than once" 1>&2
+ exit 1
+ fi
+ user_target=next
+ ;;
+
+ -static)
+ build_old_libs=yes
+ continue
+ ;;
+ esac
+
+ case "$user_target" in
+ next)
+ # The next one is the -o target name
+ user_target=yes
+ continue
+ ;;
+ yes)
+ # We got the output file
+ user_target=set
+ libobj="$arg"
+ continue
+ ;;
+ esac
+
+ # Accept the current argument as the source file.
+ lastarg="$srcfile"
+ srcfile="$arg"
+
+ # Aesthetically quote the previous argument.
+
+ # Backslashify any backslashes, double quotes, and dollar signs.
+ # These are the only characters that are still specially
+ # interpreted inside of double-quoted scrings.
+ lastarg=`$echo "X$lastarg" | $Xsed -e "$sed_quote_subst"`
+
+ # Double-quote args containing other shell metacharacters.
+ # Many Bourne shells cannot handle close brackets correctly in scan
+ # sets, so we specify it separately.
+ case "$lastarg" in
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*)
+ lastarg="\"$lastarg\""
+ ;;
+ esac
+
+ # Add the previous argument to base_compile.
+ if test -z "$base_compile"; then
+ base_compile="$lastarg"
+ else
+ base_compile="$base_compile $lastarg"
+ fi
+ done
+
+ case "$user_target" in
+ set)
+ ;;
+ no)
+ # Get the name of the library object.
+ libobj=`$echo "X$srcfile" | $Xsed -e 's%^.*/%%'`
+ ;;
+ *)
+ $echo "$modename: you must specify a target with \`-o'" 1>&2
+ exit 1
+ ;;
+ esac
+
+ # Recognize several different file suffixes.
+ # If the user specifies -o file.o, it is replaced with file.lo
+ xform='[cCFSfmso]'
+ case "$libobj" in
+ *.ada) xform=ada ;;
+ *.adb) xform=adb ;;
+ *.ads) xform=ads ;;
+ *.asm) xform=asm ;;
+ *.c++) xform=c++ ;;
+ *.cc) xform=cc ;;
+ *.cpp) xform=cpp ;;
+ *.cxx) xform=cxx ;;
+ *.f90) xform=f90 ;;
+ *.for) xform=for ;;
+ esac
+
+ libobj=`$echo "X$libobj" | $Xsed -e "s/\.$xform$/.lo/"`
+
+ case "$libobj" in
+ *.lo) obj=`$echo "X$libobj" | $Xsed -e "$lo2o"` ;;
+ *)
+ $echo "$modename: cannot determine name of library object from \`$libobj'" 1>&2
+ exit 1
+ ;;
+ esac
+
+ if test -z "$base_compile"; then
+ $echo "$modename: you must specify a compilation command" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ fi
+
+ # Delete any leftover library objects.
+ if test "$build_old_libs" = yes; then
+ removelist="$obj $libobj"
+ else
+ removelist="$libobj"
+ fi
+
+ $run $rm $removelist
+ trap "$run $rm $removelist; exit 1" 1 2 15
+
+ # Calculate the filename of the output object if compiler does
+ # not support -o with -c
+ if test "$compiler_c_o" = no; then
+ output_obj=`$echo "X$srcfile" | $Xsed -e 's%^.*/%%' -e 's%\..*$%%'`.${objext}
+ lockfile="$output_obj.lock"
+ removelist="$removelist $output_obj $lockfile"
+ trap "$run $rm $removelist; exit 1" 1 2 15
+ else
+ need_locks=no
+ lockfile=
+ fi
+
+ # Lock this critical section if it is needed
+ # We use this script file to make the link, it avoids creating a new file
+ if test "$need_locks" = yes; then
+ until ln "$0" "$lockfile" 2>/dev/null; do
+ $show "Waiting for $lockfile to be removed"
+ sleep 2
+ done
+ elif test "$need_locks" = warn; then
+ if test -f "$lockfile"; then
+ echo "\
+*** ERROR, $lockfile exists and contains:
+`cat $lockfile 2>/dev/null`
+
+This indicates that another process is trying to use the same
+temporary object file, and libtool could not work around it because
+your compiler does not support \`-c' and \`-o' together. If you
+repeat this compilation, it may succeed, by chance, but you had better
+avoid parallel builds (make -j) in this platform, or get a better
+compiler."
+
+ $run $rm $removelist
+ exit 1
+ fi
+ echo $srcfile > "$lockfile"
+ fi
+
+ if test -n "$fix_srcfile_path"; then
+ eval srcfile=\"$fix_srcfile_path\"
+ fi
+
+ # Only build a PIC object if we are building libtool libraries.
+ if test "$build_libtool_libs" = yes; then
+ # Without this assignment, base_compile gets emptied.
+ fbsd_hideous_sh_bug=$base_compile
+
+ # All platforms use -DPIC, to notify preprocessed assembler code.
+ command="$base_compile $srcfile $pic_flag -DPIC"
+ if test "$build_old_libs" = yes; then
+ lo_libobj="$libobj"
+ dir=`$echo "X$libobj" | $Xsed -e 's%/[^/]*$%%'`
+ if test "X$dir" = "X$libobj"; then
+ dir="$objdir"
+ else
+ dir="$dir/$objdir"
+ fi
+ libobj="$dir/"`$echo "X$libobj" | $Xsed -e 's%^.*/%%'`
+
+ if test -d "$dir"; then
+ $show "$rm $libobj"
+ $run $rm $libobj
+ else
+ $show "$mkdir $dir"
+ $run $mkdir $dir
+ status=$?
+ if test $status -ne 0 && test ! -d $dir; then
+ exit $status
+ fi
+ fi
+ fi
+ if test "$compiler_o_lo" = yes; then
+ output_obj="$libobj"
+ command="$command -o $output_obj"
+ elif test "$compiler_c_o" = yes; then
+ output_obj="$obj"
+ command="$command -o $output_obj"
+ fi
+
+ $run $rm "$output_obj"
+ $show "$command"
+ if $run eval "$command"; then :
+ else
+ test -n "$output_obj" && $run $rm $removelist
+ exit 1
+ fi
+
+ if test "$need_locks" = warn &&
+ test x"`cat $lockfile 2>/dev/null`" != x"$srcfile"; then
+ echo "\
+*** ERROR, $lockfile contains:
+`cat $lockfile 2>/dev/null`
+
+but it should contain:
+$srcfile
+
+This indicates that another process is trying to use the same
+temporary object file, and libtool could not work around it because
+your compiler does not support \`-c' and \`-o' together. If you
+repeat this compilation, it may succeed, by chance, but you had better
+avoid parallel builds (make -j) in this platform, or get a better
+compiler."
+
+ $run $rm $removelist
+ exit 1
+ fi
+
+ # Just move the object if needed, then go on to compile the next one
+ if test x"$output_obj" != x"$libobj"; then
+ $show "$mv $output_obj $libobj"
+ if $run $mv $output_obj $libobj; then :
+ else
+ error=$?
+ $run $rm $removelist
+ exit $error
+ fi
+ fi
+
+ # If we have no pic_flag, then copy the object into place and finish.
+ if test -z "$pic_flag" && test "$build_old_libs" = yes; then
+ # Rename the .lo from within objdir to obj
+ if test -f $obj; then
+ $show $rm $obj
+ $run $rm $obj
+ fi
+
+ $show "$mv $libobj $obj"
+ if $run $mv $libobj $obj; then :
+ else
+ error=$?
+ $run $rm $removelist
+ exit $error
+ fi
+
+ xdir=`$echo "X$obj" | $Xsed -e 's%/[^/]*$%%'`
+ if test "X$xdir" = "X$obj"; then
+ xdir="."
+ else
+ xdir="$xdir"
+ fi
+ baseobj=`$echo "X$obj" | $Xsed -e "s%.*/%%"`
+ libobj=`$echo "X$baseobj" | $Xsed -e "$o2lo"`
+ # Now arrange that obj and lo_libobj become the same file
+ $show "(cd $xdir && $LN_S $baseobj $libobj)"
+ if $run eval '(cd $xdir && $LN_S $baseobj $libobj)'; then
+ exit 0
+ else
+ error=$?
+ $run $rm $removelist
+ exit $error
+ fi
+ fi
+
+ # Allow error messages only from the first compilation.
+ suppress_output=' >/dev/null 2>&1'
+ fi
+
+ # Only build a position-dependent object if we build old libraries.
+ if test "$build_old_libs" = yes; then
+ command="$base_compile $srcfile"
+ if test "$compiler_c_o" = yes; then
+ command="$command -o $obj"
+ output_obj="$obj"
+ fi
+
+ # Suppress compiler output if we already did a PIC compilation.
+ command="$command$suppress_output"
+ $run $rm "$output_obj"
+ $show "$command"
+ if $run eval "$command"; then :
+ else
+ $run $rm $removelist
+ exit 1
+ fi
+
+ if test "$need_locks" = warn &&
+ test x"`cat $lockfile 2>/dev/null`" != x"$srcfile"; then
+ echo "\
+*** ERROR, $lockfile contains:
+`cat $lockfile 2>/dev/null`
+
+but it should contain:
+$srcfile
+
+This indicates that another process is trying to use the same
+temporary object file, and libtool could not work around it because
+your compiler does not support \`-c' and \`-o' together. If you
+repeat this compilation, it may succeed, by chance, but you had better
+avoid parallel builds (make -j) in this platform, or get a better
+compiler."
+
+ $run $rm $removelist
+ exit 1
+ fi
+
+ # Just move the object if needed
+ if test x"$output_obj" != x"$obj"; then
+ $show "$mv $output_obj $obj"
+ if $run $mv $output_obj $obj; then :
+ else
+ error=$?
+ $run $rm $removelist
+ exit $error
+ fi
+ fi
+
+ # Create an invalid libtool object if no PIC, so that we do not
+ # accidentally link it into a program.
+ if test "$build_libtool_libs" != yes; then
+ $show "echo timestamp > $libobj"
+ $run eval "echo timestamp > \$libobj" || exit $?
+ else
+ # Move the .lo from within objdir
+ $show "$mv $libobj $lo_libobj"
+ if $run $mv $libobj $lo_libobj; then :
+ else
+ error=$?
+ $run $rm $removelist
+ exit $error
+ fi
+ fi
+ fi
+
+ # Unlock the critical section if it was locked
+ if test "$need_locks" != no; then
+ $rm "$lockfile"
+ fi
+
+ exit 0
+ ;;
+
+ # libtool link mode
+ link)
+ modename="$modename: link"
+ case "$host" in
+ *-*-cygwin* | *-*-mingw* | *-*-os2*)
+ # It is impossible to link a dll without this setting, and
+ # we shouldn't force the makefile maintainer to figure out
+ # which system we are compiling for in order to pass an extra
+ # flag for every libtool invokation.
+ # allow_undefined=no
+
+ # FIXME: Unfortunately, there are problems with the above when trying
+ # to make a dll which has undefined symbols, in which case not
+ # even a static library is built. For now, we need to specify
+ # -no-undefined on the libtool link line when we can be certain
+ # that all symbols are satisfied, otherwise we get a static library.
+ allow_undefined=yes
+
+ # This is a source program that is used to create dlls on Windows
+ # Don't remove nor modify the starting and closing comments
+# /* ltdll.c starts here */
+# #define WIN32_LEAN_AND_MEAN
+# #include <windows.h>
+# #undef WIN32_LEAN_AND_MEAN
+# #include <stdio.h>
+#
+# #ifndef __CYGWIN__
+# # ifdef __CYGWIN32__
+# # define __CYGWIN__ __CYGWIN32__
+# # endif
+# #endif
+#
+# #ifdef __cplusplus
+# extern "C" {
+# #endif
+# BOOL APIENTRY DllMain (HINSTANCE hInst, DWORD reason, LPVOID reserved);
+# #ifdef __cplusplus
+# }
+# #endif
+#
+# #ifdef __CYGWIN__
+# #include <cygwin/cygwin_dll.h>
+# DECLARE_CYGWIN_DLL( DllMain );
+# #endif
+# HINSTANCE __hDllInstance_base;
+#
+# BOOL APIENTRY
+# DllMain (HINSTANCE hInst, DWORD reason, LPVOID reserved)
+# {
+# __hDllInstance_base = hInst;
+# return TRUE;
+# }
+# /* ltdll.c ends here */
+ # This is a source program that is used to create import libraries
+ # on Windows for dlls which lack them. Don't remove nor modify the
+ # starting and closing comments
+# /* impgen.c starts here */
+# /* Copyright (C) 1999 Free Software Foundation, Inc.
+#
+# This file is part of GNU libtool.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# */
+#
+# #include <stdio.h> /* for printf() */
+# #include <unistd.h> /* for open(), lseek(), read() */
+# #include <fcntl.h> /* for O_RDONLY, O_BINARY */
+# #include <string.h> /* for strdup() */
+#
+# static unsigned int
+# pe_get16 (fd, offset)
+# int fd;
+# int offset;
+# {
+# unsigned char b[2];
+# lseek (fd, offset, SEEK_SET);
+# read (fd, b, 2);
+# return b[0] + (b[1]<<8);
+# }
+#
+# static unsigned int
+# pe_get32 (fd, offset)
+# int fd;
+# int offset;
+# {
+# unsigned char b[4];
+# lseek (fd, offset, SEEK_SET);
+# read (fd, b, 4);
+# return b[0] + (b[1]<<8) + (b[2]<<16) + (b[3]<<24);
+# }
+#
+# static unsigned int
+# pe_as32 (ptr)
+# void *ptr;
+# {
+# unsigned char *b = ptr;
+# return b[0] + (b[1]<<8) + (b[2]<<16) + (b[3]<<24);
+# }
+#
+# int
+# main (argc, argv)
+# int argc;
+# char *argv[];
+# {
+# int dll;
+# unsigned long pe_header_offset, opthdr_ofs, num_entries, i;
+# unsigned long export_rva, export_size, nsections, secptr, expptr;
+# unsigned long name_rvas, nexp;
+# unsigned char *expdata, *erva;
+# char *filename, *dll_name;
+#
+# filename = argv[1];
+#
+# dll = open(filename, O_RDONLY|O_BINARY);
+# if (!dll)
+# return 1;
+#
+# dll_name = filename;
+#
+# for (i=0; filename[i]; i++)
+# if (filename[i] == '/' || filename[i] == '\\' || filename[i] == ':')
+# dll_name = filename + i +1;
+#
+# pe_header_offset = pe_get32 (dll, 0x3c);
+# opthdr_ofs = pe_header_offset + 4 + 20;
+# num_entries = pe_get32 (dll, opthdr_ofs + 92);
+#
+# if (num_entries < 1) /* no exports */
+# return 1;
+#
+# export_rva = pe_get32 (dll, opthdr_ofs + 96);
+# export_size = pe_get32 (dll, opthdr_ofs + 100);
+# nsections = pe_get16 (dll, pe_header_offset + 4 +2);
+# secptr = (pe_header_offset + 4 + 20 +
+# pe_get16 (dll, pe_header_offset + 4 + 16));
+#
+# expptr = 0;
+# for (i = 0; i < nsections; i++)
+# {
+# char sname[8];
+# unsigned long secptr1 = secptr + 40 * i;
+# unsigned long vaddr = pe_get32 (dll, secptr1 + 12);
+# unsigned long vsize = pe_get32 (dll, secptr1 + 16);
+# unsigned long fptr = pe_get32 (dll, secptr1 + 20);
+# lseek(dll, secptr1, SEEK_SET);
+# read(dll, sname, 8);
+# if (vaddr <= export_rva && vaddr+vsize > export_rva)
+# {
+# expptr = fptr + (export_rva - vaddr);
+# if (export_rva + export_size > vaddr + vsize)
+# export_size = vsize - (export_rva - vaddr);
+# break;
+# }
+# }
+#
+# expdata = (unsigned char*)malloc(export_size);
+# lseek (dll, expptr, SEEK_SET);
+# read (dll, expdata, export_size);
+# erva = expdata - export_rva;
+#
+# nexp = pe_as32 (expdata+24);
+# name_rvas = pe_as32 (expdata+32);
+#
+# printf ("EXPORTS\n");
+# for (i = 0; i<nexp; i++)
+# {
+# unsigned long name_rva = pe_as32 (erva+name_rvas+i*4);
+# printf ("\t%s @ %ld ;\n", erva+name_rva, 1+ i);
+# }
+#
+# return 0;
+# }
+# /* impgen.c ends here */
+ ;;
+ *)
+ allow_undefined=yes
+ ;;
+ esac
+ compile_command="$nonopt"
+ finalize_command="$nonopt"
+
+ compile_rpath=
+ finalize_rpath=
+ compile_shlibpath=
+ finalize_shlibpath=
+ convenience=
+ old_convenience=
+ deplibs=
+ linkopts=
+
+ if test -n "$shlibpath_var"; then
+ # get the directories listed in $shlibpath_var
+ eval lib_search_path=\`\$echo \"X \${$shlibpath_var}\" \| \$Xsed -e \'s/:/ /g\'\`
+ else
+ lib_search_path=
+ fi
+ # now prepend the system-specific ones
+ eval lib_search_path=\"$sys_lib_search_path_spec\$lib_search_path\"
+ eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\"
+
+ avoid_version=no
+ dlfiles=
+ dlprefiles=
+ dlself=no
+ export_dynamic=no
+ export_symbols=
+ export_symbols_regex=
+ generated=
+ libobjs=
+ link_against_libtool_libs=
+ ltlibs=
+ module=no
+ objs=
+ prefer_static_libs=no
+ preload=no
+ prev=
+ prevarg=
+ release=
+ rpath=
+ xrpath=
+ perm_rpath=
+ temp_rpath=
+ thread_safe=no
+ vinfo=
+
+ # We need to know -static, to get the right output filenames.
+ for arg
+ do
+ case "$arg" in
+ -all-static | -static)
+ if test "X$arg" = "X-all-static"; then
+ if test "$build_libtool_libs" = yes && test -z "$link_static_flag"; then
+ $echo "$modename: warning: complete static linking is impossible in this configuration" 1>&2
+ fi
+ if test -n "$link_static_flag"; then
+ dlopen_self=$dlopen_self_static
+ fi
+ else
+ if test -z "$pic_flag" && test -n "$link_static_flag"; then
+ dlopen_self=$dlopen_self_static
+ fi
+ fi
+ build_libtool_libs=no
+ build_old_libs=yes
+ prefer_static_libs=yes
+ break
+ ;;
+ esac
+ done
+
+ # See if our shared archives depend on static archives.
+ test -n "$old_archive_from_new_cmds" && build_old_libs=yes
+
+ # Go through the arguments, transforming them on the way.
+ while test $# -gt 0; do
+ arg="$1"
+ shift
+
+ # If the previous option needs an argument, assign it.
+ if test -n "$prev"; then
+ case "$prev" in
+ output)
+ compile_command="$compile_command @OUTPUT@"
+ finalize_command="$finalize_command @OUTPUT@"
+ ;;
+ esac
+
+ case "$prev" in
+ dlfiles|dlprefiles)
+ if test "$preload" = no; then
+ # Add the symbol object into the linking commands.
+ compile_command="$compile_command @SYMFILE@"
+ finalize_command="$finalize_command @SYMFILE@"
+ preload=yes
+ fi
+ case "$arg" in
+ *.la | *.lo) ;; # We handle these cases below.
+ force)
+ if test "$dlself" = no; then
+ dlself=needless
+ export_dynamic=yes
+ fi
+ prev=
+ continue
+ ;;
+ self)
+ if test "$prev" = dlprefiles; then
+ dlself=yes
+ elif test "$prev" = dlfiles && test "$dlopen_self" != yes; then
+ dlself=yes
+ else
+ dlself=needless
+ export_dynamic=yes
+ fi
+ prev=
+ continue
+ ;;
+ *)
+ if test "$prev" = dlfiles; then
+ dlfiles="$dlfiles $arg"
+ else
+ dlprefiles="$dlprefiles $arg"
+ fi
+ prev=
+ ;;
+ esac
+ ;;
+ expsyms)
+ export_symbols="$arg"
+ if test ! -f "$arg"; then
+ $echo "$modename: symbol file \`$arg' does not exist"
+ exit 1
+ fi
+ prev=
+ continue
+ ;;
+ expsyms_regex)
+ export_symbols_regex="$arg"
+ prev=
+ continue
+ ;;
+ release)
+ release="-$arg"
+ prev=
+ continue
+ ;;
+ rpath | xrpath)
+ # We need an absolute path.
+ case "$arg" in
+ [\\/]* | [A-Za-z]:[\\/]*) ;;
+ *)
+ $echo "$modename: only absolute run-paths are allowed" 1>&2
+ exit 1
+ ;;
+ esac
+ if test "$prev" = rpath; then
+ case "$rpath " in
+ *" $arg "*) ;;
+ *) rpath="$rpath $arg" ;;
+ esac
+ else
+ case "$xrpath " in
+ *" $arg "*) ;;
+ *) xrpath="$xrpath $arg" ;;
+ esac
+ fi
+ prev=
+ continue
+ ;;
+ *)
+ eval "$prev=\"\$arg\""
+ prev=
+ continue
+ ;;
+ esac
+ fi
+
+ prevarg="$arg"
+
+ case "$arg" in
+ -all-static)
+ if test -n "$link_static_flag"; then
+ compile_command="$compile_command $link_static_flag"
+ finalize_command="$finalize_command $link_static_flag"
+ fi
+ continue
+ ;;
+
+ -allow-undefined)
+ # FIXME: remove this flag sometime in the future.
+ $echo "$modename: \`-allow-undefined' is deprecated because it is the default" 1>&2
+ continue
+ ;;
+
+ -avoid-version)
+ avoid_version=yes
+ continue
+ ;;
+
+ -dlopen)
+ prev=dlfiles
+ continue
+ ;;
+
+ -dlpreopen)
+ prev=dlprefiles
+ continue
+ ;;
+
+ -export-dynamic)
+ export_dynamic=yes
+ continue
+ ;;
+
+ -export-symbols | -export-symbols-regex)
+ if test -n "$export_symbols" || test -n "$export_symbols_regex"; then
+ $echo "$modename: not more than one -exported-symbols argument allowed"
+ exit 1
+ fi
+ if test "X$arg" = "X-export-symbols"; then
+ prev=expsyms
+ else
+ prev=expsyms_regex
+ fi
+ continue
+ ;;
+
+ -L*)
+ dir=`$echo "X$arg" | $Xsed -e 's/^-L//'`
+ # We need an absolute path.
+ case "$dir" in
+ [\\/]* | [A-Za-z]:[\\/]*) ;;
+ *)
+ absdir=`cd "$dir" && pwd`
+ if test -z "$absdir"; then
+ $echo "$modename: warning: cannot determine absolute directory name of \`$dir'" 1>&2
+ $echo "$modename: passing it literally to the linker, although it might fail" 1>&2
+ absdir="$dir"
+ fi
+ dir="$absdir"
+ ;;
+ esac
+ case " $deplibs " in
+ *" $arg "*) ;;
+ *) deplibs="$deplibs $arg";;
+ esac
+ case " $lib_search_path " in
+ *" $dir "*) ;;
+ *) lib_search_path="$lib_search_path $dir";;
+ esac
+ case "$host" in
+ *-*-cygwin* | *-*-mingw* | *-*-os2*)
+ dllsearchdir=`cd "$dir" && pwd || echo "$dir"`
+ case ":$dllsearchpath:" in
+ ::) dllsearchpath="$dllsearchdir";;
+ *":$dllsearchdir:"*) ;;
+ *) dllsearchpath="$dllsearchpath:$dllsearchdir";;
+ esac
+ ;;
+ esac
+ ;;
+
+ -l*)
+ if test "$arg" = "-lc"; then
+ case "$host" in
+ *-*-cygwin* | *-*-mingw* | *-*-os2* | *-*-beos*)
+ # These systems don't actually have c library (as such)
+ continue
+ ;;
+ esac
+ elif test "$arg" = "-lm"; then
+ case "$host" in
+ *-*-cygwin* | *-*-beos*)
+ # These systems don't actually have math library (as such)
+ continue
+ ;;
+ esac
+ fi
+ deplibs="$deplibs $arg"
+ ;;
+
+ -module)
+ module=yes
+ continue
+ ;;
+
+ -no-undefined)
+ allow_undefined=no
+ continue
+ ;;
+
+ -o) prev=output ;;
+
+ -release)
+ prev=release
+ continue
+ ;;
+
+ -rpath)
+ prev=rpath
+ continue
+ ;;
+
+ -R)
+ prev=xrpath
+ continue
+ ;;
+
+ -R*)
+ dir=`$echo "X$arg" | $Xsed -e 's/^-R//'`
+ # We need an absolute path.
+ case "$dir" in
+ [\\/]* | [A-Za-z]:[\\/]*) ;;
+ *)
+ $echo "$modename: only absolute run-paths are allowed" 1>&2
+ exit 1
+ ;;
+ esac
+ case "$xrpath " in
+ *" $dir "*) ;;
+ *) xrpath="$xrpath $dir" ;;
+ esac
+ continue
+ ;;
+
+ -static)
+ # If we have no pic_flag, then this is the same as -all-static.
+ if test -z "$pic_flag" && test -n "$link_static_flag"; then
+ compile_command="$compile_command $link_static_flag"
+ finalize_command="$finalize_command $link_static_flag"
+ fi
+ continue
+ ;;
+
+ -thread-safe)
+ thread_safe=yes
+ continue
+ ;;
+
+ -version-info)
+ prev=vinfo
+ continue
+ ;;
+
+ # Some other compiler flag.
+ -* | +*)
+ # Unknown arguments in both finalize_command and compile_command need
+ # to be aesthetically quoted because they are evaled later.
+ arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
+ case "$arg" in
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*)
+ arg="\"$arg\""
+ ;;
+ esac
+ ;;
+
+ *.o | *.obj | *.a | *.lib)
+ # A standard object.
+ objs="$objs $arg"
+ ;;
+
+ *.lo)
+ # A library object.
+ if test "$prev" = dlfiles; then
+ dlfiles="$dlfiles $arg"
+ if test "$build_libtool_libs" = yes && test "$dlopen" = yes; then
+ prev=
+ continue
+ else
+ # If libtool objects are unsupported, then we need to preload.
+ prev=dlprefiles
+ fi
+ fi
+
+ if test "$prev" = dlprefiles; then
+ # Preload the old-style object.
+ dlprefiles="$dlprefiles "`$echo "X$arg" | $Xsed -e "$lo2o"`
+ prev=
+ fi
+ libobjs="$libobjs $arg"
+ ;;
+
+ *.la)
+ # A libtool-controlled library.
+
+ dlname=
+ libdir=
+ library_names=
+ old_library=
+
+ # Check to see that this really is a libtool archive.
+ if (sed -e '2q' $arg | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then :
+ else
+ $echo "$modename: \`$arg' is not a valid libtool archive" 1>&2
+ exit 1
+ fi
+
+ # If the library was installed with an old release of libtool,
+ # it will not redefine variable installed.
+ installed=yes
+
+ # Read the .la file
+ # If there is no directory component, then add one.
+ case "$arg" in
+ */* | *\\*) . $arg ;;
+ *) . ./$arg ;;
+ esac
+
+ # Get the name of the library we link against.
+ linklib=
+ for l in $old_library $library_names; do
+ linklib="$l"
+ done
+
+ if test -z "$linklib"; then
+ $echo "$modename: cannot find name of link library for \`$arg'" 1>&2
+ exit 1
+ fi
+
+ # Find the relevant object directory and library name.
+ name=`$echo "X$arg" | $Xsed -e 's%^.*/%%' -e 's/\.la$//' -e 's/^lib//'`
+
+ if test "X$installed" = Xyes; then
+ dir="$libdir"
+ else
+ dir=`$echo "X$arg" | $Xsed -e 's%/[^/]*$%%'`
+ if test "X$dir" = "X$arg"; then
+ dir="$objdir"
+ else
+ dir="$dir/$objdir"
+ fi
+ fi
+
+ if test -n "$dependency_libs"; then
+ # Extract -R and -L from dependency_libs
+ temp_deplibs=
+ for deplib in $dependency_libs; do
+ case "$deplib" in
+ -R*) temp_xrpath=`$echo "X$deplib" | $Xsed -e 's/^-R//'`
+ case " $rpath $xrpath " in
+ *" $temp_xrpath "*) ;;
+ *) xrpath="$xrpath $temp_xrpath";;
+ esac;;
+ -L*) case "$compile_command $temp_deplibs " in
+ *" $deplib "*) ;;
+ *) temp_deplibs="$temp_deplibs $deplib";;
+ esac
+ temp_dir=`$echo "X$deplib" | $Xsed -e 's/^-L//'`
+ case " $lib_search_path " in
+ *" $temp_dir "*) ;;
+ *) lib_search_path="$lib_search_path $temp_dir";;
+ esac
+ ;;
+ *) temp_deplibs="$temp_deplibs $deplib";;
+ esac
+ done
+ dependency_libs="$temp_deplibs"
+ fi
+
+ if test -z "$libdir"; then
+ # It is a libtool convenience library, so add in its objects.
+ convenience="$convenience $dir/$old_library"
+ old_convenience="$old_convenience $dir/$old_library"
+ deplibs="$deplibs$dependency_libs"
+ compile_command="$compile_command $dir/$old_library$dependency_libs"
+ finalize_command="$finalize_command $dir/$old_library$dependency_libs"
+ continue
+ fi
+
+ # This library was specified with -dlopen.
+ if test "$prev" = dlfiles; then
+ dlfiles="$dlfiles $arg"
+ if test -z "$dlname" || test "$dlopen" != yes || test "$build_libtool_libs" = no; then
+ # If there is no dlname, no dlopen support or we're linking statically,
+ # we need to preload.
+ prev=dlprefiles
+ else
+ # We should not create a dependency on this library, but we
+ # may need any libraries it requires.
+ compile_command="$compile_command$dependency_libs"
+ finalize_command="$finalize_command$dependency_libs"
+ prev=
+ continue
+ fi
+ fi
+
+ # The library was specified with -dlpreopen.
+ if test "$prev" = dlprefiles; then
+ # Prefer using a static library (so that no silly _DYNAMIC symbols
+ # are required to link).
+ if test -n "$old_library"; then
+ dlprefiles="$dlprefiles $dir/$old_library"
+ else
+ dlprefiles="$dlprefiles $dir/$linklib"
+ fi
+ prev=
+ fi
+
+ if test -n "$library_names" &&
+ { test "$prefer_static_libs" = no || test -z "$old_library"; }; then
+ link_against_libtool_libs="$link_against_libtool_libs $arg"
+ if test -n "$shlibpath_var"; then
+ # Make sure the rpath contains only unique directories.
+ case "$temp_rpath " in
+ *" $dir "*) ;;
+ *) temp_rpath="$temp_rpath $dir" ;;
+ esac
+ fi
+
+ # We need an absolute path.
+ case "$dir" in
+ [\\/] | [A-Za-z]:[\\/]*) absdir="$dir" ;;
+ *)
+ absdir=`cd "$dir" && pwd`
+ if test -z "$absdir"; then
+ $echo "$modename: warning: cannot determine absolute directory name of \`$dir'" 1>&2
+ $echo "$modename: passing it literally to the linker, although it might fail" 1>&2
+ absdir="$dir"
+ fi
+ ;;
+ esac
+
+ # This is the magic to use -rpath.
+ # Skip directories that are in the system default run-time
+ # search path, unless they have been requested with -R.
+ case " $sys_lib_dlsearch_path " in
+ *" $absdir "*) ;;
+ *)
+ case "$compile_rpath " in
+ *" $absdir "*) ;;
+ *) compile_rpath="$compile_rpath $absdir"
+ esac
+ ;;
+ esac
+
+ case " $sys_lib_dlsearch_path " in
+ *" $libdir "*) ;;
+ *)
+ case "$finalize_rpath " in
+ *" $libdir "*) ;;
+ *) finalize_rpath="$finalize_rpath $libdir"
+ esac
+ ;;
+ esac
+
+ lib_linked=yes
+ case "$hardcode_action" in
+ immediate | unsupported)
+ if test "$hardcode_direct" = no; then
+ compile_command="$compile_command $dir/$linklib"
+ deplibs="$deplibs $dir/$linklib"
+ case "$host" in
+ *-*-cygwin* | *-*-mingw* | *-*-os2*)
+ dllsearchdir=`cd "$dir" && pwd || echo "$dir"`
+ if test -n "$dllsearchpath"; then
+ dllsearchpath="$dllsearchpath:$dllsearchdir"
+ else
+ dllsearchpath="$dllsearchdir"
+ fi
+ ;;
+ esac
+ elif test "$hardcode_minus_L" = no; then
+ case "$host" in
+ *-*-sunos*)
+ compile_shlibpath="$compile_shlibpath$dir:"
+ ;;
+ esac
+ case "$compile_command " in
+ *" -L$dir "*) ;;
+ *) compile_command="$compile_command -L$dir";;
+ esac
+ compile_command="$compile_command -l$name"
+ deplibs="$deplibs -L$dir -l$name"
+ elif test "$hardcode_shlibpath_var" = no; then
+ case ":$compile_shlibpath:" in
+ *":$dir:"*) ;;
+ *) compile_shlibpath="$compile_shlibpath$dir:";;
+ esac
+ compile_command="$compile_command -l$name"
+ deplibs="$deplibs -l$name"
+ else
+ lib_linked=no
+ fi
+ ;;
+
+ relink)
+ if test "$hardcode_direct" = yes; then
+ compile_command="$compile_command $absdir/$linklib"
+ deplibs="$deplibs $absdir/$linklib"
+ elif test "$hardcode_minus_L" = yes; then
+ case "$compile_command " in
+ *" -L$absdir "*) ;;
+ *) compile_command="$compile_command -L$absdir";;
+ esac
+ compile_command="$compile_command -l$name"
+ deplibs="$deplibs -L$absdir -l$name"
+ elif test "$hardcode_shlibpath_var" = yes; then
+ case ":$compile_shlibpath:" in
+ *":$absdir:"*) ;;
+ *) compile_shlibpath="$compile_shlibpath$absdir:";;
+ esac
+ compile_command="$compile_command -l$name"
+ deplibs="$deplibs -l$name"
+ else
+ lib_linked=no
+ fi
+ ;;
+
+ *)
+ lib_linked=no
+ ;;
+ esac
+
+ if test "$lib_linked" != yes; then
+ $echo "$modename: configuration error: unsupported hardcode properties"
+ exit 1
+ fi
+
+ # Finalize command for both is simple: just hardcode it.
+ if test "$hardcode_direct" = yes; then
+ finalize_command="$finalize_command $libdir/$linklib"
+ elif test "$hardcode_minus_L" = yes; then
+ case "$finalize_command " in
+ *" -L$libdir "*) ;;
+ *) finalize_command="$finalize_command -L$libdir";;
+ esac
+ finalize_command="$finalize_command -l$name"
+ elif test "$hardcode_shlibpath_var" = yes; then
+ case ":$finalize_shlibpath:" in
+ *":$libdir:"*) ;;
+ *) finalize_shlibpath="$finalize_shlibpath$libdir:";;
+ esac
+ finalize_command="$finalize_command -l$name"
+ else
+ # We cannot seem to hardcode it, guess we'll fake it.
+ case "$finalize_command " in
+ *" -L$dir "*) ;;
+ *) finalize_command="$finalize_command -L$libdir";;
+ esac
+ finalize_command="$finalize_command -l$name"
+ fi
+ else
+ # Transform directly to old archives if we don't build new libraries.
+ if test -n "$pic_flag" && test -z "$old_library"; then
+ $echo "$modename: cannot find static library for \`$arg'" 1>&2
+ exit 1
+ fi
+
+ # Here we assume that one of hardcode_direct or hardcode_minus_L
+ # is not unsupported. This is valid on all known static and
+ # shared platforms.
+ if test "$hardcode_direct" != unsupported; then
+ test -n "$old_library" && linklib="$old_library"
+ compile_command="$compile_command $dir/$linklib"
+ finalize_command="$finalize_command $dir/$linklib"
+ else
+ case "$compile_command " in
+ *" -L$dir "*) ;;
+ *) compile_command="$compile_command -L$dir";;
+ esac
+ compile_command="$compile_command -l$name"
+ case "$finalize_command " in
+ *" -L$dir "*) ;;
+ *) finalize_command="$finalize_command -L$dir";;
+ esac
+ finalize_command="$finalize_command -l$name"
+ fi
+ fi
+
+ # Add in any libraries that this one depends upon.
+ compile_command="$compile_command$dependency_libs"
+ finalize_command="$finalize_command$dependency_libs"
+ continue
+ ;;
+
+ # Some other compiler argument.
+ *)
+ # Unknown arguments in both finalize_command and compile_command need
+ # to be aesthetically quoted because they are evaled later.
+ arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
+ case "$arg" in
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*)
+ arg="\"$arg\""
+ ;;
+ esac
+ ;;
+ esac
+
+ # Now actually substitute the argument into the commands.
+ if test -n "$arg"; then
+ compile_command="$compile_command $arg"
+ finalize_command="$finalize_command $arg"
+ fi
+ done
+
+ if test -n "$prev"; then
+ $echo "$modename: the \`$prevarg' option requires an argument" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ fi
+
+ if test "$export_dynamic" = yes && test -n "$export_dynamic_flag_spec"; then
+ eval arg=\"$export_dynamic_flag_spec\"
+ compile_command="$compile_command $arg"
+ finalize_command="$finalize_command $arg"
+ fi
+
+ oldlibs=
+ # calculate the name of the file, without its directory
+ outputname=`$echo "X$output" | $Xsed -e 's%^.*/%%'`
+ libobjs_save="$libobjs"
+
+ case "$output" in
+ "")
+ $echo "$modename: you must specify an output file" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ ;;
+
+ *.a | *.lib)
+ if test -n "$link_against_libtool_libs"; then
+ $echo "$modename: error: cannot link libtool libraries into archives" 1>&2
+ exit 1
+ fi
+
+ if test -n "$deplibs"; then
+ $echo "$modename: warning: \`-l' and \`-L' are ignored for archives" 1>&2
+ fi
+
+ if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then
+ $echo "$modename: warning: \`-dlopen' is ignored for archives" 1>&2
+ fi
+
+ if test -n "$rpath"; then
+ $echo "$modename: warning: \`-rpath' is ignored for archives" 1>&2
+ fi
+
+ if test -n "$xrpath"; then
+ $echo "$modename: warning: \`-R' is ignored for archives" 1>&2
+ fi
+
+ if test -n "$vinfo"; then
+ $echo "$modename: warning: \`-version-info' is ignored for archives" 1>&2
+ fi
+
+ if test -n "$release"; then
+ $echo "$modename: warning: \`-release' is ignored for archives" 1>&2
+ fi
+
+ if test -n "$export_symbols" || test -n "$export_symbols_regex"; then
+ $echo "$modename: warning: \`-export-symbols' is ignored for archives" 1>&2
+ fi
+
+ # Now set the variables for building old libraries.
+ build_libtool_libs=no
+ oldlibs="$output"
+ ;;
+
+ *.la)
+ # Make sure we only generate libraries of the form `libNAME.la'.
+ case "$outputname" in
+ lib*)
+ name=`$echo "X$outputname" | $Xsed -e 's/\.la$//' -e 's/^lib//'`
+ eval libname=\"$libname_spec\"
+ ;;
+ *)
+ if test "$module" = no; then
+ $echo "$modename: libtool library \`$output' must begin with \`lib'" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ fi
+ if test "$need_lib_prefix" != no; then
+ # Add the "lib" prefix for modules if required
+ name=`$echo "X$outputname" | $Xsed -e 's/\.la$//'`
+ eval libname=\"$libname_spec\"
+ else
+ libname=`$echo "X$outputname" | $Xsed -e 's/\.la$//'`
+ fi
+ ;;
+ esac
+
+ output_objdir=`$echo "X$output" | $Xsed -e 's%/[^/]*$%%'`
+ if test "X$output_objdir" = "X$output"; then
+ output_objdir="$objdir"
+ else
+ output_objdir="$output_objdir/$objdir"
+ fi
+
+ if test -n "$objs"; then
+ $echo "$modename: cannot build libtool library \`$output' from non-libtool objects:$objs" 2>&1
+ exit 1
+ fi
+
+ # How the heck are we supposed to write a wrapper for a shared library?
+ if test -n "$link_against_libtool_libs"; then
+ $echo "$modename: error: cannot link shared libraries into libtool libraries" 1>&2
+ exit 1
+ fi
+
+ if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then
+ $echo "$modename: warning: \`-dlopen' is ignored for libtool libraries" 1>&2
+ fi
+
+ set dummy $rpath
+ if test $# -gt 2; then
+ $echo "$modename: warning: ignoring multiple \`-rpath's for a libtool library" 1>&2
+ fi
+ install_libdir="$2"
+
+ oldlibs=
+ if test -z "$rpath"; then
+ if test "$build_libtool_libs" = yes; then
+ # Building a libtool convenience library.
+ libext=al
+ oldlibs="$output_objdir/$libname.$libext $oldlibs"
+ build_libtool_libs=convenience
+ build_old_libs=yes
+ fi
+ dependency_libs="$deplibs"
+
+ if test -n "$vinfo"; then
+ $echo "$modename: warning: \`-version-info' is ignored for convenience libraries" 1>&2
+ fi
+
+ if test -n "$release"; then
+ $echo "$modename: warning: \`-release' is ignored for convenience libraries" 1>&2
+ fi
+ else
+
+ # Parse the version information argument.
+ IFS="${IFS= }"; save_ifs="$IFS"; IFS=':'
+ set dummy $vinfo 0 0 0
+ IFS="$save_ifs"
+
+ if test -n "$8"; then
+ $echo "$modename: too many parameters to \`-version-info'" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ fi
+
+ current="$2"
+ revision="$3"
+ age="$4"
+
+ # Check that each of the things are valid numbers.
+ case "$current" in
+ 0 | [1-9] | [1-9][0-9]*) ;;
+ *)
+ $echo "$modename: CURRENT \`$current' is not a nonnegative integer" 1>&2
+ $echo "$modename: \`$vinfo' is not valid version information" 1>&2
+ exit 1
+ ;;
+ esac
+
+ case "$revision" in
+ 0 | [1-9] | [1-9][0-9]*) ;;
+ *)
+ $echo "$modename: REVISION \`$revision' is not a nonnegative integer" 1>&2
+ $echo "$modename: \`$vinfo' is not valid version information" 1>&2
+ exit 1
+ ;;
+ esac
+
+ case "$age" in
+ 0 | [1-9] | [1-9][0-9]*) ;;
+ *)
+ $echo "$modename: AGE \`$age' is not a nonnegative integer" 1>&2
+ $echo "$modename: \`$vinfo' is not valid version information" 1>&2
+ exit 1
+ ;;
+ esac
+
+ if test $age -gt $current; then
+ $echo "$modename: AGE \`$age' is greater than the current interface number \`$current'" 1>&2
+ $echo "$modename: \`$vinfo' is not valid version information" 1>&2
+ exit 1
+ fi
+
+ # Calculate the version variables.
+ major=
+ versuffix=
+ verstring=
+ case "$version_type" in
+ none) ;;
+
+ irix)
+ major=`expr $current - $age + 1`
+ versuffix="$major.$revision"
+ verstring="sgi$major.$revision"
+
+ # Add in all the interfaces that we are compatible with.
+ loop=$revision
+ while test $loop != 0; do
+ iface=`expr $revision - $loop`
+ loop=`expr $loop - 1`
+ verstring="sgi$major.$iface:$verstring"
+ done
+ ;;
+
+ linux)
+ major=.`expr $current - $age`
+ versuffix="$major.$age.$revision"
+ ;;
+
+ osf)
+ major=`expr $current - $age`
+ versuffix=".$current.$age.$revision"
+ verstring="$current.$age.$revision"
+
+ # Add in all the interfaces that we are compatible with.
+ loop=$age
+ while test $loop != 0; do
+ iface=`expr $current - $loop`
+ loop=`expr $loop - 1`
+ verstring="$verstring:${iface}.0"
+ done
+
+ # Make executables depend on our current version.
+ verstring="$verstring:${current}.0"
+ ;;
+
+ sunos)
+ major=".$current"
+ versuffix=".$current.$revision"
+ ;;
+
+ freebsd-aout)
+ major=".$current"
+ versuffix=".$current.$revision";
+ ;;
+
+ freebsd-elf)
+ major=".$current"
+ versuffix=".$current";
+ ;;
+
+ windows)
+ # Like Linux, but with '-' rather than '.', since we only
+ # want one extension on Windows 95.
+ major=`expr $current - $age`
+ versuffix="-$major-$age-$revision"
+ ;;
+
+ *)
+ $echo "$modename: unknown library version type \`$version_type'" 1>&2
+ echo "Fatal configuration error. See the $PACKAGE docs for more information." 1>&2
+ exit 1
+ ;;
+ esac
+
+ # Clear the version info if we defaulted, and they specified a release.
+ if test -z "$vinfo" && test -n "$release"; then
+ major=
+ verstring="0.0"
+ if test "$need_version" = no; then
+ versuffix=
+ else
+ versuffix=".0.0"
+ fi
+ fi
+
+ # Remove version info from name if versioning should be avoided
+ if test "$avoid_version" = yes && test "$need_version" = no; then
+ major=
+ versuffix=
+ verstring=""
+ fi
+
+ # Check to see if the archive will have undefined symbols.
+ if test "$allow_undefined" = yes; then
+ if test "$allow_undefined_flag" = unsupported; then
+ $echo "$modename: warning: undefined symbols not allowed in $host shared libraries" 1>&2
+ build_libtool_libs=no
+ build_old_libs=yes
+ fi
+ else
+ # Don't allow undefined symbols.
+ allow_undefined_flag="$no_undefined_flag"
+ fi
+
+ dependency_libs="$deplibs"
+ case "$host" in
+ *-*-cygwin* | *-*-mingw* | *-*-os2* | *-*-beos*)
+ # these systems don't actually have a c library (as such)!
+ ;;
+
+ #### local change for Sleepycat DB: [#2380]
+ # The following case is added, since the linker's -pthread
+ # option implicitly controls use of -lc or -lc_r.
+ *freebsd*)
+ # defer to whether the user wants -lc, or -lc_r
+ ;;
+
+ *)
+ # Add libc to deplibs on all other systems.
+ deplibs="$deplibs -lc"
+ ;;
+ esac
+ fi
+
+ # Create the output directory, or remove our outputs if we need to.
+ if test -d $output_objdir; then
+ $show "${rm}r $output_objdir/$outputname $output_objdir/$libname.* $output_objdir/${libname}${release}.*"
+ $run ${rm}r $output_objdir/$outputname $output_objdir/$libname.* $output_objdir/${libname}${release}.*
+ else
+ $show "$mkdir $output_objdir"
+ $run $mkdir $output_objdir
+ status=$?
+ if test $status -ne 0 && test ! -d $output_objdir; then
+ exit $status
+ fi
+ fi
+
+ # Now set the variables for building old libraries.
+ if test "$build_old_libs" = yes && test "$build_libtool_libs" != convenience ; then
+ oldlibs="$oldlibs $output_objdir/$libname.$libext"
+
+ # Transform .lo files to .o files.
+ oldobjs="$objs "`$echo "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}'$/d' -e "$lo2o" | $NL2SP`
+ fi
+
+ if test "$build_libtool_libs" = yes; then
+ # Transform deplibs into only deplibs that can be linked in shared.
+ name_save=$name
+ libname_save=$libname
+ release_save=$release
+ versuffix_save=$versuffix
+ major_save=$major
+ # I'm not sure if I'm treating the release correctly. I think
+ # release should show up in the -l (ie -lgmp5) so we don't want to
+ # add it in twice. Is that correct?
+ release=""
+ versuffix=""
+ major=""
+ newdeplibs=
+ droppeddeps=no
+ case "$deplibs_check_method" in
+ pass_all)
+ # Don't check for shared/static. Everything works.
+ # This might be a little naive. We might want to check
+ # whether the library exists or not. But this is on
+ # osf3 & osf4 and I'm not really sure... Just
+ # implementing what was already the behaviour.
+ newdeplibs=$deplibs
+ ;;
+ test_compile)
+ # This code stresses the "libraries are programs" paradigm to its
+ # limits. Maybe even breaks it. We compile a program, linking it
+ # against the deplibs as a proxy for the library. Then we can check
+ # whether they linked in statically or dynamically with ldd.
+ $rm conftest.c
+ cat > conftest.c <<EOF
+ int main() { return 0; }
+EOF
+ $rm conftest
+ $CC -o conftest conftest.c $deplibs
+ if test $? -eq 0 ; then
+ ldd_output=`ldd conftest`
+ for i in $deplibs; do
+ name="`expr $i : '-l\(.*\)'`"
+ # If $name is empty we are operating on a -L argument.
+ if test "$name" != "" ; then
+ libname=`eval \\$echo \"$libname_spec\"`
+ deplib_matches=`eval \\$echo \"$library_names_spec\"`
+ set dummy $deplib_matches
+ deplib_match=$2
+ if test `expr "$ldd_output" : ".*$deplib_match"` -ne 0 ; then
+ newdeplibs="$newdeplibs $i"
+ else
+ droppeddeps=yes
+ echo
+ echo "*** Warning: This library needs some functionality provided by $i."
+ echo "*** I have the capability to make that library automatically link in when"
+ echo "*** you link to this library. But I can only do this if you have a"
+ echo "*** shared version of the library, which you do not appear to have."
+ fi
+ else
+ newdeplibs="$newdeplibs $i"
+ fi
+ done
+ else
+ # Error occured in the first compile. Let's try to salvage the situation:
+ # Compile a seperate program for each library.
+ for i in $deplibs; do
+ name="`expr $i : '-l\(.*\)'`"
+ # If $name is empty we are operating on a -L argument.
+ if test "$name" != "" ; then
+ $rm conftest
+ $CC -o conftest conftest.c $i
+ # Did it work?
+ if test $? -eq 0 ; then
+ ldd_output=`ldd conftest`
+ libname=`eval \\$echo \"$libname_spec\"`
+ deplib_matches=`eval \\$echo \"$library_names_spec\"`
+ set dummy $deplib_matches
+ deplib_match=$2
+ if test `expr "$ldd_output" : ".*$deplib_match"` -ne 0 ; then
+ newdeplibs="$newdeplibs $i"
+ else
+ droppeddeps=yes
+ echo
+ echo "*** Warning: This library needs some functionality provided by $i."
+ echo "*** I have the capability to make that library automatically link in when"
+ echo "*** you link to this library. But I can only do this if you have a"
+ echo "*** shared version of the library, which you do not appear to have."
+ fi
+ else
+ droppeddeps=yes
+ echo
+ echo "*** Warning! Library $i is needed by this library but I was not able to"
+ echo "*** make it link in! You will probably need to install it or some"
+ echo "*** library that it depends on before this library will be fully"
+ echo "*** functional. Installing it before continuing would be even better."
+ fi
+ else
+ newdeplibs="$newdeplibs $i"
+ fi
+ done
+ fi
+ ;;
+ file_magic*)
+ set dummy $deplibs_check_method
+ file_magic_regex="`expr \"$deplibs_check_method\" : \"$2 \(.*\)\"`"
+ for a_deplib in $deplibs; do
+ name="`expr $a_deplib : '-l\(.*\)'`"
+ # If $name is empty we are operating on a -L argument.
+ if test "$name" != "" ; then
+ libname=`eval \\$echo \"$libname_spec\"`
+ for i in $lib_search_path; do
+ potential_libs=`ls $i/$libname[.-]* 2>/dev/null`
+ for potent_lib in $potential_libs; do
+ # Follow soft links.
+ if ls -lLd "$potent_lib" 2>/dev/null \
+ | grep " -> " >/dev/null; then
+ continue
+ fi
+ # The statement above tries to avoid entering an
+ # endless loop below, in case of cyclic links.
+ # We might still enter an endless loop, since a link
+ # loop can be closed while we follow links,
+ # but so what?
+ potlib="$potent_lib"
+ while test -h "$potlib" 2>/dev/null; do
+ potliblink=`ls -ld $potlib | sed 's/.* -> //'`
+ case "$potliblink" in
+ [\\/]* | [A-Za-z]:[\\/]*) potlib="$potliblink";;
+ *) potlib=`$echo "X$potlib" | $Xsed -e 's,[^/]*$,,'`"$potliblink";;
+ esac
+ done
+ if eval $file_magic_cmd \"\$potlib\" 2>/dev/null \
+ | sed 10q \
+ | egrep "$file_magic_regex" > /dev/null; then
+ newdeplibs="$newdeplibs $a_deplib"
+ a_deplib=""
+ break 2
+ fi
+ done
+ done
+ if test -n "$a_deplib" ; then
+ droppeddeps=yes
+ echo
+ echo "*** Warning: This library needs some functionality provided by $a_deplib."
+ echo "*** I have the capability to make that library automatically link in when"
+ echo "*** you link to this library. But I can only do this if you have a"
+ echo "*** shared version of the library, which you do not appear to have."
+ fi
+ else
+ # Add a -L argument.
+ newdeplibs="$newdeplibs $a_deplib"
+ fi
+ done # Gone through all deplibs.
+ ;;
+ none | unknown | *)
+ newdeplibs=""
+ if $echo "X $deplibs" | $Xsed -e 's/ -lc$//' \
+ -e 's/ -[LR][^ ]*//g' -e 's/[ ]//g' |
+ grep . >/dev/null; then
+ echo
+ if test "X$deplibs_check_method" = "Xnone"; then
+ echo "*** Warning: inter-library dependencies are not supported in this platform."
+ else
+ echo "*** Warning: inter-library dependencies are not known to be supported."
+ fi
+ echo "*** All declared inter-library dependencies are being dropped."
+ droppeddeps=yes
+ fi
+ ;;
+ esac
+ versuffix=$versuffix_save
+ major=$major_save
+ release=$release_save
+ libname=$libname_save
+ name=$name_save
+
+ if test "$droppeddeps" = yes; then
+ if test "$module" = yes; then
+ echo
+ echo "*** Warning: libtool could not satisfy all declared inter-library"
+ echo "*** dependencies of module $libname. Therefore, libtool will create"
+ echo "*** a static module, that should work as long as the dlopening"
+ echo "*** application is linked with the -dlopen flag."
+ if test -z "$global_symbol_pipe"; then
+ echo
+ echo "*** However, this would only work if libtool was able to extract symbol"
+ echo "*** lists from a program, using \`nm' or equivalent, but libtool could"
+ echo "*** not find such a program. So, this module is probably useless."
+ echo "*** \`nm' from GNU binutils and a full rebuild may help."
+ fi
+ if test "$build_old_libs" = no; then
+ oldlibs="$output_objdir/$libname.$libext"
+ build_libtool_libs=module
+ build_old_libs=yes
+ else
+ build_libtool_libs=no
+ fi
+ else
+ echo "*** The inter-library dependencies that have been dropped here will be"
+ echo "*** automatically added whenever a program is linked with this library"
+ echo "*** or is declared to -dlopen it."
+ fi
+ fi
+ # Done checking deplibs!
+ deplibs=$newdeplibs
+ fi
+
+ # All the library-specific variables (install_libdir is set above).
+ library_names=
+ old_library=
+ dlname=
+
+ # Test again, we may have decided not to build it any more
+ if test "$build_libtool_libs" = yes; then
+ # Get the real and link names of the library.
+ eval library_names=\"$library_names_spec\"
+ set dummy $library_names
+ realname="$2"
+ shift; shift
+
+ if test -n "$soname_spec"; then
+ eval soname=\"$soname_spec\"
+ else
+ soname="$realname"
+ fi
+
+ lib="$output_objdir/$realname"
+ for link
+ do
+ linknames="$linknames $link"
+ done
+
+ # Ensure that we have .o objects for linkers which dislike .lo
+ # (e.g. aix) in case we are running --disable-static
+ for obj in $libobjs; do
+ xdir=`$echo "X$obj" | $Xsed -e 's%/[^/]*$%%'`
+ if test "X$xdir" = "X$obj"; then
+ xdir="."
+ else
+ xdir="$xdir"
+ fi
+ baseobj=`$echo "X$obj" | $Xsed -e 's%^.*/%%'`
+ oldobj=`$echo "X$baseobj" | $Xsed -e "$lo2o"`
+ if test ! -f $xdir/$oldobj; then
+ $show "(cd $xdir && ${LN_S} $baseobj $oldobj)"
+ $run eval '(cd $xdir && ${LN_S} $baseobj $oldobj)' || exit $?
+ fi
+ done
+
+ # Use standard objects if they are pic
+ test -z "$pic_flag" && libobjs=`$echo "X$libobjs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
+
+ # Prepare the list of exported symbols
+ if test -z "$export_symbols"; then
+ if test "$always_export_symbols" = yes || test -n "$export_symbols_regex"; then
+ $show "generating symbol list for \`$libname.la'"
+ export_symbols="$output_objdir/$libname.exp"
+ $run $rm $export_symbols
+ eval cmds=\"$export_symbols_cmds\"
+ IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ $show "$cmd"
+ $run eval "$cmd" || exit $?
+ done
+ IFS="$save_ifs"
+ if test -n "$export_symbols_regex"; then
+ $show "egrep -e \"$export_symbols_regex\" \"$export_symbols\" > \"${export_symbols}T\""
+ $run eval 'egrep -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"'
+ $show "$mv \"${export_symbols}T\" \"$export_symbols\""
+ $run eval '$mv "${export_symbols}T" "$export_symbols"'
+ fi
+ fi
+ fi
+
+ if test -n "$export_symbols" && test -n "$include_expsyms"; then
+ $run eval '$echo "X$include_expsyms" | $SP2NL >> "$export_symbols"'
+ fi
+
+ if test -n "$convenience"; then
+ if test -n "$whole_archive_flag_spec"; then
+ eval libobjs=\"\$libobjs $whole_archive_flag_spec\"
+ else
+ gentop="$output_objdir/${outputname}x"
+ $show "${rm}r $gentop"
+ $run ${rm}r "$gentop"
+ $show "mkdir $gentop"
+ $run mkdir "$gentop"
+ status=$?
+ if test $status -ne 0 && test ! -d "$gentop"; then
+ exit $status
+ fi
+ generated="$generated $gentop"
+
+ for xlib in $convenience; do
+ # Extract the objects.
+ case "$xlib" in
+ [\\/]* | [A-Za-z]:[\\/]*) xabs="$xlib" ;;
+ *) xabs=`pwd`"/$xlib" ;;
+ esac
+ xlib=`$echo "X$xlib" | $Xsed -e 's%^.*/%%'`
+ xdir="$gentop/$xlib"
+
+ $show "${rm}r $xdir"
+ $run ${rm}r "$xdir"
+ $show "mkdir $xdir"
+ $run mkdir "$xdir"
+ status=$?
+ if test $status -ne 0 && test ! -d "$xdir"; then
+ exit $status
+ fi
+ $show "(cd $xdir && $AR x $xabs)"
+ $run eval "(cd \$xdir && $AR x \$xabs)" || exit $?
+
+ libobjs="$libobjs "`find $xdir -name \*.o -print -o -name \*.lo -print | $NL2SP`
+ done
+ fi
+ fi
+
+ if test "$thread_safe" = yes && test -n "$thread_safe_flag_spec"; then
+ eval flag=\"$thread_safe_flag_spec\"
+ linkopts="$linkopts $flag"
+ fi
+
+ # Do each of the archive commands.
+ if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then
+ eval cmds=\"$archive_expsym_cmds\"
+ else
+ eval cmds=\"$archive_cmds\"
+ fi
+ IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ $show "$cmd"
+ $run eval "$cmd" || exit $?
+ done
+ IFS="$save_ifs"
+
+ # Create links to the real library.
+ for linkname in $linknames; do
+ if test "$realname" != "$linkname"; then
+ $show "(cd $output_objdir && $rm $linkname && $LN_S $realname $linkname)"
+ $run eval '(cd $output_objdir && $rm $linkname && $LN_S $realname $linkname)' || exit $?
+ fi
+ done
+
+ # If -module or -export-dynamic was specified, set the dlname.
+ if test "$module" = yes || test "$export_dynamic" = yes; then
+ # On all known operating systems, these are identical.
+ dlname="$soname"
+ fi
+ fi
+ ;;
+
+ *.lo | *.o | *.obj)
+ if test -n "$link_against_libtool_libs"; then
+ $echo "$modename: error: cannot link libtool libraries into objects" 1>&2
+ exit 1
+ fi
+
+ if test -n "$deplibs"; then
+ $echo "$modename: warning: \`-l' and \`-L' are ignored for objects" 1>&2
+ fi
+
+ if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then
+ $echo "$modename: warning: \`-dlopen' is ignored for objects" 1>&2
+ fi
+
+ if test -n "$rpath"; then
+ $echo "$modename: warning: \`-rpath' is ignored for objects" 1>&2
+ fi
+
+ if test -n "$xrpath"; then
+ $echo "$modename: warning: \`-R' is ignored for objects" 1>&2
+ fi
+
+ if test -n "$vinfo"; then
+ $echo "$modename: warning: \`-version-info' is ignored for objects" 1>&2
+ fi
+
+ if test -n "$release"; then
+ $echo "$modename: warning: \`-release' is ignored for objects" 1>&2
+ fi
+
+ case "$output" in
+ *.lo)
+ if test -n "$objs"; then
+ $echo "$modename: cannot build library object \`$output' from non-libtool objects" 1>&2
+ exit 1
+ fi
+ libobj="$output"
+ obj=`$echo "X$output" | $Xsed -e "$lo2o"`
+ ;;
+ *)
+ libobj=
+ obj="$output"
+ ;;
+ esac
+
+ # Delete the old objects.
+ $run $rm $obj $libobj
+
+ # Objects from convenience libraries. This assumes
+ # single-version convenience libraries. Whenever we create
+ # different ones for PIC/non-PIC, this we'll have to duplicate
+ # the extraction.
+ reload_conv_objs=
+ gentop=
+ # reload_cmds runs $LD directly, so let us get rid of
+ # -Wl from whole_archive_flag_spec
+ wl=
+
+ if test -n "$convenience"; then
+ if test -n "$whole_archive_flag_spec"; then
+ eval reload_conv_objs=\"\$reload_objs $whole_archive_flag_spec\"
+ else
+ gentop="$output_objdir/${obj}x"
+ $show "${rm}r $gentop"
+ $run ${rm}r "$gentop"
+ $show "mkdir $gentop"
+ $run mkdir "$gentop"
+ status=$?
+ if test $status -ne 0 && test ! -d "$gentop"; then
+ exit $status
+ fi
+ generated="$generated $gentop"
+
+ for xlib in $convenience; do
+ # Extract the objects.
+ case "$xlib" in
+ [\\/]* | [A-Za-z]:[\\/]*) xabs="$xlib" ;;
+ *) xabs=`pwd`"/$xlib" ;;
+ esac
+ xlib=`$echo "X$xlib" | $Xsed -e 's%^.*/%%'`
+ xdir="$gentop/$xlib"
+
+ $show "${rm}r $xdir"
+ $run ${rm}r "$xdir"
+ $show "mkdir $xdir"
+ $run mkdir "$xdir"
+ status=$?
+ if test $status -ne 0 && test ! -d "$xdir"; then
+ exit $status
+ fi
+ $show "(cd $xdir && $AR x $xabs)"
+ $run eval "(cd \$xdir && $AR x \$xabs)" || exit $?
+
+ reload_conv_objs="$reload_objs "`find $xdir -name \*.o -print -o -name \*.lo -print | $NL2SP`
+ done
+ fi
+ fi
+
+ # Create the old-style object.
+ reload_objs="$objs "`$echo "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}$'/d' -e '/\.lib$/d' -e "$lo2o" | $NL2SP`" $reload_conv_objs"
+
+ output="$obj"
+ eval cmds=\"$reload_cmds\"
+ IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ $show "$cmd"
+ $run eval "$cmd" || exit $?
+ done
+ IFS="$save_ifs"
+
+ # Exit if we aren't doing a library object file.
+ if test -z "$libobj"; then
+ if test -n "$gentop"; then
+ $show "${rm}r $gentop"
+ $run ${rm}r $gentop
+ fi
+
+ exit 0
+ fi
+
+ if test "$build_libtool_libs" != yes; then
+ if test -n "$gentop"; then
+ $show "${rm}r $gentop"
+ $run ${rm}r $gentop
+ fi
+
+ # Create an invalid libtool object if no PIC, so that we don't
+ # accidentally link it into a program.
+ $show "echo timestamp > $libobj"
+ $run eval "echo timestamp > $libobj" || exit $?
+ exit 0
+ fi
+
+ if test -n "$pic_flag"; then
+ # Only do commands if we really have different PIC objects.
+ reload_objs="$libobjs $reload_conv_objs"
+ output="$libobj"
+ eval cmds=\"$reload_cmds\"
+ IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ $show "$cmd"
+ $run eval "$cmd" || exit $?
+ done
+ IFS="$save_ifs"
+ else
+ # Just create a symlink.
+ $show $rm $libobj
+ $run $rm $libobj
+ xdir=`$echo "X$libobj" | $Xsed -e 's%/[^/]*$%%'`
+ if test "X$xdir" = "X$libobj"; then
+ xdir="."
+ else
+ xdir="$xdir"
+ fi
+ baseobj=`$echo "X$libobj" | $Xsed -e 's%^.*/%%'`
+ oldobj=`$echo "X$baseobj" | $Xsed -e "$lo2o"`
+ $show "(cd $xdir && $LN_S $oldobj $baseobj)"
+ $run eval '(cd $xdir && $LN_S $oldobj $baseobj)' || exit $?
+ fi
+
+ if test -n "$gentop"; then
+ $show "${rm}r $gentop"
+ $run ${rm}r $gentop
+ fi
+
+ exit 0
+ ;;
+
+ # Anything else should be a program.
+ *)
+ if test -n "$vinfo"; then
+ $echo "$modename: warning: \`-version-info' is ignored for programs" 1>&2
+ fi
+
+ if test -n "$release"; then
+ $echo "$modename: warning: \`-release' is ignored for programs" 1>&2
+ fi
+
+ if test "$preload" = yes; then
+ if test "$dlopen" = unknown && test "$dlopen_self" = unknown &&
+ test "$dlopen_self_static" = unknown; then
+ $echo "$modename: warning: \`AC_LIBTOOL_DLOPEN' not used. Assuming no dlopen support."
+ fi
+ fi
+
+ if test -n "$rpath$xrpath"; then
+ # If the user specified any rpath flags, then add them.
+ for libdir in $rpath $xrpath; do
+ # This is the magic to use -rpath.
+ case "$compile_rpath " in
+ *" $libdir "*) ;;
+ *) compile_rpath="$compile_rpath $libdir" ;;
+ esac
+ case "$finalize_rpath " in
+ *" $libdir "*) ;;
+ *) finalize_rpath="$finalize_rpath $libdir" ;;
+ esac
+ done
+ fi
+
+ # Now hardcode the library paths
+ rpath=
+ hardcode_libdirs=
+ for libdir in $compile_rpath $finalize_rpath; do
+ if test -n "$hardcode_libdir_flag_spec"; then
+ if test -n "$hardcode_libdir_separator"; then
+ if test -z "$hardcode_libdirs"; then
+ hardcode_libdirs="$libdir"
+ else
+ # Just accumulate the unique libdirs.
+ case "$hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator" in
+ *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
+ ;;
+ *)
+ hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir"
+ ;;
+ esac
+ fi
+ else
+ eval flag=\"$hardcode_libdir_flag_spec\"
+ rpath="$rpath $flag"
+ fi
+ elif test -n "$runpath_var"; then
+ case "$perm_rpath " in
+ *" $libdir "*) ;;
+ *) perm_rpath="$perm_rpath $libdir" ;;
+ esac
+ fi
+ done
+ # Substitute the hardcoded libdirs into the rpath.
+ if test -n "$hardcode_libdir_separator" &&
+ test -n "$hardcode_libdirs"; then
+ libdir="$hardcode_libdirs"
+ eval rpath=\" $hardcode_libdir_flag_spec\"
+ fi
+ compile_rpath="$rpath"
+
+ rpath=
+ hardcode_libdirs=
+ for libdir in $finalize_rpath; do
+ if test -n "$hardcode_libdir_flag_spec"; then
+ if test -n "$hardcode_libdir_separator"; then
+ if test -z "$hardcode_libdirs"; then
+ hardcode_libdirs="$libdir"
+ else
+ # Just accumulate the unique libdirs.
+ case "$hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator" in
+ *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
+ ;;
+ *)
+ hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir"
+ ;;
+ esac
+ fi
+ else
+ eval flag=\"$hardcode_libdir_flag_spec\"
+ rpath="$rpath $flag"
+ fi
+ elif test -n "$runpath_var"; then
+ case "$finalize_perm_rpath " in
+ *" $libdir "*) ;;
+ *) finalize_perm_rpath="$finalize_perm_rpath $libdir" ;;
+ esac
+ fi
+ done
+ # Substitute the hardcoded libdirs into the rpath.
+ if test -n "$hardcode_libdir_separator" &&
+ test -n "$hardcode_libdirs"; then
+ libdir="$hardcode_libdirs"
+ eval rpath=\" $hardcode_libdir_flag_spec\"
+ fi
+ finalize_rpath="$rpath"
+
+ output_objdir=`$echo "X$output" | $Xsed -e 's%/[^/]*$%%'`
+ if test "X$output_objdir" = "X$output"; then
+ output_objdir="$objdir"
+ else
+ output_objdir="$output_objdir/$objdir"
+ fi
+
+ # Create the binary in the object directory, then wrap it.
+ if test ! -d $output_objdir; then
+ $show "$mkdir $output_objdir"
+ $run $mkdir $output_objdir
+ status=$?
+ if test $status -ne 0 && test ! -d $output_objdir; then
+ exit $status
+ fi
+ fi
+
+ if test -n "$libobjs" && test "$build_old_libs" = yes; then
+ # Transform all the library objects into standard objects.
+ compile_command=`$echo "X$compile_command" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
+ finalize_command=`$echo "X$finalize_command" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
+ fi
+
+ dlsyms=
+ if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then
+ if test -n "$NM" && test -n "$global_symbol_pipe"; then
+ dlsyms="${outputname}S.c"
+ else
+ $echo "$modename: not configured to extract global symbols from dlpreopened files" 1>&2
+ fi
+ fi
+
+ if test -n "$dlsyms"; then
+ case "$dlsyms" in
+ "") ;;
+ *.c)
+ # Discover the nlist of each of the dlfiles.
+ nlist="$output_objdir/${outputname}.nm"
+
+ $show "$rm $nlist ${nlist}S ${nlist}T"
+ $run $rm "$nlist" "${nlist}S" "${nlist}T"
+
+ # Parse the name list into a source file.
+ $show "creating $output_objdir/$dlsyms"
+
+ test -z "$run" && $echo > "$output_objdir/$dlsyms" "\
+/* $dlsyms - symbol resolution table for \`$outputname' dlsym emulation. */
+/* Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP */
+
+#ifdef __cplusplus
+extern \"C\" {
+#endif
+
+/* Prevent the only kind of declaration conflicts we can make. */
+#define lt_preloaded_symbols some_other_symbol
+
+/* External symbol declarations for the compiler. */\
+"
+
+ if test "$dlself" = yes; then
+ $show "generating symbol list for \`$output'"
+
+ test -z "$run" && $echo ': @PROGRAM@ ' > "$nlist"
+
+ # Add our own program objects to the symbol list.
+ progfiles=`$echo "X$objs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
+ for arg in $progfiles; do
+ $show "extracting global C symbols from \`$arg'"
+ $run eval "$NM $arg | $global_symbol_pipe >> '$nlist'"
+ done
+
+ if test -n "$exclude_expsyms"; then
+ $run eval 'egrep -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T'
+ $run eval '$mv "$nlist"T "$nlist"'
+ fi
+
+ if test -n "$export_symbols_regex"; then
+ $run eval 'egrep -e "$export_symbols_regex" "$nlist" > "$nlist"T'
+ $run eval '$mv "$nlist"T "$nlist"'
+ fi
+
+ # Prepare the list of exported symbols
+ if test -z "$export_symbols"; then
+ export_symbols="$output_objdir/$output.exp"
+ $run $rm $export_symbols
+ $run eval "sed -n -e '/^: @PROGRAM@$/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"'
+ else
+ $run eval "sed -e 's/\([][.*^$]\)/\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$output.exp"'
+ $run eval 'grep -f "$output_objdir/$output.exp" < "$nlist" > "$nlist"T'
+ $run eval 'mv "$nlist"T "$nlist"'
+ fi
+ fi
+
+ for arg in $dlprefiles; do
+ $show "extracting global C symbols from \`$arg'"
+ name=`echo "$arg" | sed -e 's%^.*/%%'`
+ $run eval 'echo ": $name " >> "$nlist"'
+ $run eval "$NM $arg | $global_symbol_pipe >> '$nlist'"
+ done
+
+ if test -z "$run"; then
+ # Make sure we have at least an empty file.
+ test -f "$nlist" || : > "$nlist"
+
+ if test -n "$exclude_expsyms"; then
+ egrep -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T
+ $mv "$nlist"T "$nlist"
+ fi
+
+ # Try sorting and uniquifying the output.
+ if grep -v "^: " < "$nlist" | sort +2 | uniq > "$nlist"S; then
+ :
+ else
+ grep -v "^: " < "$nlist" > "$nlist"S
+ fi
+
+ if test -f "$nlist"S; then
+ eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$dlsyms"'
+ else
+ echo '/* NONE */' >> "$output_objdir/$dlsyms"
+ fi
+
+ $echo >> "$output_objdir/$dlsyms" "\
+
+#undef lt_preloaded_symbols
+
+#if defined (__STDC__) && __STDC__
+# define lt_ptr_t void *
+#else
+# define lt_ptr_t char *
+# define const
+#endif
+
+/* The mapping between symbol names and symbols. */
+const struct {
+ const char *name;
+ lt_ptr_t address;
+}
+lt_preloaded_symbols[] =
+{\
+"
+
+ sed -n -e 's/^: \([^ ]*\) $/ {\"\1\", (lt_ptr_t) 0},/p' \
+ -e 's/^. \([^ ]*\) \([^ ]*\)$/ {"\2", (lt_ptr_t) \&\2},/p' \
+ < "$nlist" >> "$output_objdir/$dlsyms"
+
+ $echo >> "$output_objdir/$dlsyms" "\
+ {0, (lt_ptr_t) 0}
+};
+
+/* This works around a problem in FreeBSD linker */
+#ifdef FREEBSD_WORKAROUND
+static const void *lt_preloaded_setup() {
+ return lt_preloaded_symbols;
+}
+#endif
+
+#ifdef __cplusplus
+}
+#endif\
+"
+ fi
+
+ pic_flag_for_symtable=
+ case "$host" in
+ # compiling the symbol table file with pic_flag works around
+ # a FreeBSD bug that causes programs to crash when -lm is
+ # linked before any other PIC object. But we must not use
+ # pic_flag when linking with -static. The problem exists in
+ # FreeBSD 2.2.6 and is fixed in FreeBSD 3.1.
+ *-*-freebsd2*|*-*-freebsd3.0*|*-*-freebsdelf3.0*)
+ case "$compile_command " in
+ *" -static "*) ;;
+ *) pic_flag_for_symtable=" $pic_flag -DPIC -DFREEBSD_WORKAROUND";;
+ esac;;
+ *-*-hpux*)
+ case "$compile_command " in
+ *" -static "*) ;;
+ *) pic_flag_for_symtable=" $pic_flag -DPIC";;
+ esac
+ esac
+
+ # Now compile the dynamic symbol file.
+ $show "(cd $output_objdir && $CC -c$no_builtin_flag$pic_flag_for_symtable \"$dlsyms\")"
+ $run eval '(cd $output_objdir && $CC -c$no_builtin_flag$pic_flag_for_symtable "$dlsyms")' || exit $?
+
+ # Clean up the generated files.
+ $show "$rm $output_objdir/$dlsyms $nlist ${nlist}S ${nlist}T"
+ $run $rm "$output_objdir/$dlsyms" "$nlist" "${nlist}S" "${nlist}T"
+
+ # Transform the symbol file into the correct name.
+ compile_command=`$echo "X$compile_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/${outputname}S.${objext}%"`
+ finalize_command=`$echo "X$finalize_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/${outputname}S.${objext}%"`
+ ;;
+ *)
+ $echo "$modename: unknown suffix for \`$dlsyms'" 1>&2
+ exit 1
+ ;;
+ esac
+ else
+ # We keep going just in case the user didn't refer to
+ # lt_preloaded_symbols. The linker will fail if global_symbol_pipe
+ # really was required.
+
+ # Nullify the symbol file.
+ compile_command=`$echo "X$compile_command" | $Xsed -e "s% @SYMFILE@%%"`
+ finalize_command=`$echo "X$finalize_command" | $Xsed -e "s% @SYMFILE@%%"`
+ fi
+
+ if test -z "$link_against_libtool_libs" || test "$build_libtool_libs" != yes; then
+ # Replace the output file specification.
+ compile_command=`$echo "X$compile_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'`
+ link_command="$compile_command$compile_rpath"
+
+ # We have no uninstalled library dependencies, so finalize right now.
+ $show "$link_command"
+ $run eval "$link_command"
+ status=$?
+
+ # Delete the generated files.
+ if test -n "$dlsyms"; then
+ $show "$rm $output_objdir/${outputname}S.${objext}"
+ $run $rm "$output_objdir/${outputname}S.${objext}"
+ fi
+
+ exit $status
+ fi
+
+ if test -n "$shlibpath_var"; then
+ # We should set the shlibpath_var
+ rpath=
+ for dir in $temp_rpath; do
+ case "$dir" in
+ [\\/]* | [A-Za-z]:[\\/]*)
+ # Absolute path.
+ rpath="$rpath$dir:"
+ ;;
+ *)
+ # Relative path: add a thisdir entry.
+ rpath="$rpath\$thisdir/$dir:"
+ ;;
+ esac
+ done
+ temp_rpath="$rpath"
+ fi
+
+ if test -n "$compile_shlibpath$finalize_shlibpath"; then
+ compile_command="$shlibpath_var=\"$compile_shlibpath$finalize_shlibpath\$$shlibpath_var\" $compile_command"
+ fi
+ if test -n "$finalize_shlibpath"; then
+ finalize_command="$shlibpath_var=\"$finalize_shlibpath\$$shlibpath_var\" $finalize_command"
+ fi
+
+ compile_var=
+ finalize_var=
+ if test -n "$runpath_var"; then
+ if test -n "$perm_rpath"; then
+ # We should set the runpath_var.
+ rpath=
+ for dir in $perm_rpath; do
+ rpath="$rpath$dir:"
+ done
+ compile_var="$runpath_var=\"$rpath\$$runpath_var\" "
+ fi
+ if test -n "$finalize_perm_rpath"; then
+ # We should set the runpath_var.
+ rpath=
+ for dir in $finalize_perm_rpath; do
+ rpath="$rpath$dir:"
+ done
+ finalize_var="$runpath_var=\"$rpath\$$runpath_var\" "
+ fi
+ fi
+
+ if test "$hardcode_action" = relink; then
+ # Fast installation is not supported
+ link_command="$compile_var$compile_command$compile_rpath"
+ relink_command="$finalize_var$finalize_command$finalize_rpath"
+
+ $echo "$modename: warning: this platform does not like uninstalled shared libraries" 1>&2
+ $echo "$modename: \`$output' will be relinked during installation" 1>&2
+ else
+ if test "$fast_install" != no; then
+ link_command="$finalize_var$compile_command$finalize_rpath"
+ if test "$fast_install" = yes; then
+ relink_command=`$echo "X$compile_var$compile_command$compile_rpath" | $Xsed -e 's%@OUTPUT@%\$progdir/\$file%g'`
+ else
+ # fast_install is set to needless
+ relink_command=
+ fi
+ else
+ link_command="$compile_var$compile_command$compile_rpath"
+ relink_command="$finalize_var$finalize_command$finalize_rpath"
+ fi
+ fi
+
+ # Replace the output file specification.
+ link_command=`$echo "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'`
+
+ # Delete the old output files.
+ $run $rm $output $output_objdir/$outputname $output_objdir/lt-$outputname
+
+ $show "$link_command"
+ $run eval "$link_command" || exit $?
+
+ # Now create the wrapper script.
+ $show "creating $output"
+
+ # Quote the relink command for shipping.
+ if test -n "$relink_command"; then
+ relink_command=`$echo "X$relink_command" | $Xsed -e "$sed_quote_subst"`
+ fi
+
+ # Quote $echo for shipping.
+ if test "X$echo" = "X$SHELL $0 --fallback-echo"; then
+ case "$0" in
+ [\\/]* | [A-Za-z]:[\\/]*) qecho="$SHELL $0 --fallback-echo";;
+ *) qecho="$SHELL `pwd`/$0 --fallback-echo";;
+ esac
+ qecho=`$echo "X$qecho" | $Xsed -e "$sed_quote_subst"`
+ else
+ qecho=`$echo "X$echo" | $Xsed -e "$sed_quote_subst"`
+ fi
+
+ # Only actually do things if our run command is non-null.
+ if test -z "$run"; then
+ # win32 will think the script is a binary if it has
+ # a .exe suffix, so we strip it off here.
+ case $output in
+ *.exe) output=`echo $output|sed 's,.exe$,,'` ;;
+ esac
+ $rm $output
+ trap "$rm $output; exit 1" 1 2 15
+
+ $echo > $output "\
+#! $SHELL
+
+# $output - temporary wrapper script for $objdir/$outputname
+# Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP
+#
+# The $output program cannot be directly executed until all the libtool
+# libraries that it depends on are installed.
+#
+# This wrapper script should never be moved out of the build directory.
+# If it is, it will not operate correctly.
+
+# Sed substitution that helps us do robust quoting. It backslashifies
+# metacharacters that are still active within double-quoted strings.
+Xsed='sed -e 1s/^X//'
+sed_quote_subst='$sed_quote_subst'
+
+# The HP-UX ksh and POSIX shell print the target directory to stdout
+# if CDPATH is set.
+if test \"\${CDPATH+set}\" = set; then CDPATH=:; export CDPATH; fi
+
+relink_command=\"$relink_command\"
+
+# This environment variable determines our operation mode.
+if test \"\$libtool_install_magic\" = \"$magic\"; then
+ # install mode needs the following variable:
+ link_against_libtool_libs='$link_against_libtool_libs'
+else
+ # When we are sourced in execute mode, \$file and \$echo are already set.
+ if test \"\$libtool_execute_magic\" != \"$magic\"; then
+ echo=\"$qecho\"
+ file=\"\$0\"
+ # Make sure echo works.
+ if test \"X\$1\" = X--no-reexec; then
+ # Discard the --no-reexec flag, and continue.
+ shift
+ elif test \"X\`(\$echo '\t') 2>/dev/null\`\" = 'X\t'; then
+ # Yippee, \$echo works!
+ :
+ else
+ # Restart under the correct shell, and then maybe \$echo will work.
+ exec $SHELL \"\$0\" --no-reexec \${1+\"\$@\"}
+ fi
+ fi\
+"
+ $echo >> $output "\
+
+ # Find the directory that this script lives in.
+ thisdir=\`\$echo \"X\$file\" | \$Xsed -e 's%/[^/]*$%%'\`
+ test \"x\$thisdir\" = \"x\$file\" && thisdir=.
+
+ # Follow symbolic links until we get to the real thisdir.
+ file=\`ls -ld \"\$file\" | sed -n 's/.*-> //p'\`
+ while test -n \"\$file\"; do
+ destdir=\`\$echo \"X\$file\" | \$Xsed -e 's%/[^/]*\$%%'\`
+
+ # If there was a directory component, then change thisdir.
+ if test \"x\$destdir\" != \"x\$file\"; then
+ case \"\$destdir\" in
+ [\\/]* | [A-Za-z]:[\\/]*) thisdir=\"\$destdir\" ;;
+ *) thisdir=\"\$thisdir/\$destdir\" ;;
+ esac
+ fi
+
+ file=\`\$echo \"X\$file\" | \$Xsed -e 's%^.*/%%'\`
+ file=\`ls -ld \"\$thisdir/\$file\" | sed -n 's/.*-> //p'\`
+ done
+
+ # Try to get the absolute directory name.
+ absdir=\`cd \"\$thisdir\" && pwd\`
+ test -n \"\$absdir\" && thisdir=\"\$absdir\"
+"
+
+ if test "$fast_install" = yes; then
+ echo >> $output "\
+ program=lt-'$outputname'
+ progdir=\"\$thisdir/$objdir\"
+
+ if test ! -f \"\$progdir/\$program\" || \\
+ { file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | sed 1q\`; \\
+ test \"X\$file\" != \"X\$progdir/\$program\"; }; then
+
+ file=\"\$\$-\$program\"
+
+ if test ! -d \"\$progdir\"; then
+ $mkdir \"\$progdir\"
+ else
+ $rm \"\$progdir/\$file\"
+ fi"
+
+ echo >> $output "\
+
+ # relink executable if necessary
+ if test -n \"\$relink_command\"; then
+ if (cd \"\$thisdir\" && eval \$relink_command); then :
+ else
+ $rm \"\$progdir/\$file\"
+ exit 1
+ fi
+ fi
+
+ $mv \"\$progdir/\$file\" \"\$progdir/\$program\" 2>/dev/null ||
+ { $rm \"\$progdir/\$program\";
+ $mv \"\$progdir/\$file\" \"\$progdir/\$program\"; }
+ $rm \"\$progdir/\$file\"
+ fi"
+ else
+ echo >> $output "\
+ program='$outputname'
+ progdir=\"\$thisdir/$objdir\"
+"
+ fi
+
+ echo >> $output "\
+
+ if test -f \"\$progdir/\$program\"; then"
+
+ # Export our shlibpath_var if we have one.
+ if test "$shlibpath_overrides_runpath" = yes && test -n "$shlibpath_var" && test -n "$temp_rpath"; then
+ $echo >> $output "\
+ # Add our own library path to $shlibpath_var
+ $shlibpath_var=\"$temp_rpath\$$shlibpath_var\"
+
+ # Some systems cannot cope with colon-terminated $shlibpath_var
+ # The second colon is a workaround for a bug in BeOS R4 sed
+ $shlibpath_var=\`\$echo \"X\$$shlibpath_var\" | \$Xsed -e 's/::*\$//'\`
+
+ export $shlibpath_var
+"
+ fi
+
+ # fixup the dll searchpath if we need to.
+ if test -n "$dllsearchpath"; then
+ $echo >> $output "\
+ # Add the dll search path components to the executable PATH
+ PATH=$dllsearchpath:\$PATH
+"
+ fi
+
+ $echo >> $output "\
+ if test \"\$libtool_execute_magic\" != \"$magic\"; then
+ # Run the actual program with our arguments.
+"
+ case $host in
+ # win32 systems need to use the prog path for dll
+ # lookup to work
+ *-*-cygwin*)
+ $echo >> $output "\
+ exec \$progdir/\$program \${1+\"\$@\"}
+"
+ ;;
+
+ # Backslashes separate directories on plain windows
+ *-*-mingw | *-*-os2*)
+ $echo >> $output "\
+ exec \$progdir\\\\\$program \${1+\"\$@\"}
+"
+ ;;
+
+ *)
+ $echo >> $output "\
+ # Export the path to the program.
+ PATH=\"\$progdir:\$PATH\"
+ export PATH
+
+ exec \$program \${1+\"\$@\"}
+"
+ ;;
+ esac
+ $echo >> $output "\
+ \$echo \"\$0: cannot exec \$program \${1+\"\$@\"}\"
+ exit 1
+ fi
+ else
+ # The program doesn't exist.
+ \$echo \"\$0: error: \$progdir/\$program does not exist\" 1>&2
+ \$echo \"This script is just a wrapper for \$program.\" 1>&2
+ echo \"See the $PACKAGE documentation for more information.\" 1>&2
+ exit 1
+ fi
+fi\
+"
+ chmod +x $output
+ fi
+ exit 0
+ ;;
+ esac
+
+ # See if we need to build an old-fashioned archive.
+ for oldlib in $oldlibs; do
+
+ if test "$build_libtool_libs" = convenience; then
+ oldobjs="$libobjs_save"
+ addlibs="$convenience"
+ build_libtool_libs=no
+ else
+ if test "$build_libtool_libs" = module; then
+ oldobjs="$libobjs_save"
+ build_libtool_libs=no
+ else
+ oldobjs="$objs "`$echo "X$libobjs_save" | $SP2NL | $Xsed -e '/\.'${libext}'$/d' -e '/\.lib$/d' -e "$lo2o" | $NL2SP`
+ fi
+ addlibs="$old_convenience"
+ fi
+
+ if test -n "$addlibs"; then
+ gentop="$output_objdir/${outputname}x"
+ $show "${rm}r $gentop"
+ $run ${rm}r "$gentop"
+ $show "mkdir $gentop"
+ $run mkdir "$gentop"
+ status=$?
+ if test $status -ne 0 && test ! -d "$gentop"; then
+ exit $status
+ fi
+ generated="$generated $gentop"
+
+ # Add in members from convenience archives.
+ for xlib in $addlibs; do
+ # Extract the objects.
+ case "$xlib" in
+ [\\/]* | [A-Za-z]:[\\/]*) xabs="$xlib" ;;
+ *) xabs=`pwd`"/$xlib" ;;
+ esac
+ xlib=`$echo "X$xlib" | $Xsed -e 's%^.*/%%'`
+ xdir="$gentop/$xlib"
+
+ $show "${rm}r $xdir"
+ $run ${rm}r "$xdir"
+ $show "mkdir $xdir"
+ $run mkdir "$xdir"
+ status=$?
+ if test $status -ne 0 && test ! -d "$xdir"; then
+ exit $status
+ fi
+ $show "(cd $xdir && $AR x $xabs)"
+ $run eval "(cd \$xdir && $AR x \$xabs)" || exit $?
+
+ oldobjs="$oldobjs "`find $xdir -name \*.${objext} -print -o -name \*.lo -print | $NL2SP`
+ done
+ fi
+
+ # Do each command in the archive commands.
+ if test -n "$old_archive_from_new_cmds" && test "$build_libtool_libs" = yes; then
+ eval cmds=\"$old_archive_from_new_cmds\"
+ else
+ # Ensure that we have .o objects in place in case we decided
+ # not to build a shared library, and have fallen back to building
+ # static libs even though --disable-static was passed!
+ for oldobj in $oldobjs; do
+ if test ! -f $oldobj; then
+ xdir=`$echo "X$oldobj" | $Xsed -e 's%/[^/]*$%%'`
+ if test "X$xdir" = "X$oldobj"; then
+ xdir="."
+ else
+ xdir="$xdir"
+ fi
+ baseobj=`$echo "X$oldobj" | $Xsed -e 's%^.*/%%'`
+ obj=`$echo "X$baseobj" | $Xsed -e "$o2lo"`
+ $show "(cd $xdir && ${LN_S} $obj $baseobj)"
+ $run eval '(cd $xdir && ${LN_S} $obj $baseobj)' || exit $?
+ fi
+ done
+
+ eval cmds=\"$old_archive_cmds\"
+ fi
+ IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ $show "$cmd"
+ $run eval "$cmd" || exit $?
+ done
+ IFS="$save_ifs"
+ done
+
+ if test -n "$generated"; then
+ $show "${rm}r$generated"
+ $run ${rm}r$generated
+ fi
+
+ # Now create the libtool archive.
+ case "$output" in
+ *.la)
+ old_library=
+ test "$build_old_libs" = yes && old_library="$libname.$libext"
+ $show "creating $output"
+
+ if test -n "$xrpath"; then
+ temp_xrpath=
+ for libdir in $xrpath; do
+ temp_xrpath="$temp_xrpath -R$libdir"
+ done
+ dependency_libs="$temp_xrpath $dependency_libs"
+ fi
+
+ # Only create the output if not a dry run.
+ if test -z "$run"; then
+ for installed in no yes; do
+ if test "$installed" = yes; then
+ if test -z "$install_libdir"; then
+ break
+ fi
+ output="$output_objdir/$outputname"i
+ fi
+ $rm $output
+ $echo > $output "\
+# $outputname - a libtool library file
+# Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname='$dlname'
+
+# Names of this library.
+library_names='$library_names'
+
+# The name of the static archive.
+old_library='$old_library'
+
+# Libraries that this one depends upon.
+dependency_libs='$dependency_libs'
+
+# Version information for $libname.
+current=$current
+age=$age
+revision=$revision
+
+# Is this an already installed library?
+installed=$installed
+
+# Directory that this library needs to be installed in:
+libdir='$install_libdir'\
+"
+ done
+ fi
+
+ # Do a symbolic link so that the libtool archive can be found in
+ # LD_LIBRARY_PATH before the program is installed.
+ $show "(cd $output_objdir && $rm $outputname && $LN_S ../$outputname $outputname)"
+ $run eval "(cd $output_objdir && $rm $outputname && $LN_S ../$outputname $outputname)" || exit $?
+ ;;
+ esac
+ exit 0
+ ;;
+
+ # libtool install mode
+ install)
+ modename="$modename: install"
+
+ # There may be an optional sh(1) argument at the beginning of
+ # install_prog (especially on Windows NT).
+ if test "$nonopt" = "$SHELL" || test "$nonopt" = /bin/sh; then
+ # Aesthetically quote it.
+ arg=`$echo "X$nonopt" | $Xsed -e "$sed_quote_subst"`
+ case "$arg" in
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*)
+ arg="\"$arg\""
+ ;;
+ esac
+ install_prog="$arg "
+ arg="$1"
+ shift
+ else
+ install_prog=
+ arg="$nonopt"
+ fi
+
+ # The real first argument should be the name of the installation program.
+ # Aesthetically quote it.
+ arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
+ case "$arg" in
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*)
+ arg="\"$arg\""
+ ;;
+ esac
+ install_prog="$install_prog$arg"
+
+ # We need to accept at least all the BSD install flags.
+ dest=
+ files=
+ opts=
+ prev=
+ install_type=
+ isdir=no
+ stripme=
+ for arg
+ do
+ if test -n "$dest"; then
+ files="$files $dest"
+ dest="$arg"
+ continue
+ fi
+
+ case "$arg" in
+ -d) isdir=yes ;;
+ -f) prev="-f" ;;
+ -g) prev="-g" ;;
+ -m) prev="-m" ;;
+ -o) prev="-o" ;;
+ -s)
+ stripme=" -s"
+ continue
+ ;;
+ -*) ;;
+
+ *)
+ # If the previous option needed an argument, then skip it.
+ if test -n "$prev"; then
+ prev=
+ else
+ dest="$arg"
+ continue
+ fi
+ ;;
+ esac
+
+ # Aesthetically quote the argument.
+ arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
+ case "$arg" in
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*)
+ arg="\"$arg\""
+ ;;
+ esac
+ install_prog="$install_prog $arg"
+ done
+
+ if test -z "$install_prog"; then
+ $echo "$modename: you must specify an install program" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ fi
+
+ if test -n "$prev"; then
+ $echo "$modename: the \`$prev' option requires an argument" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ fi
+
+ if test -z "$files"; then
+ if test -z "$dest"; then
+ $echo "$modename: no file or destination specified" 1>&2
+ else
+ $echo "$modename: you must specify a destination" 1>&2
+ fi
+ $echo "$help" 1>&2
+ exit 1
+ fi
+
+ # Strip any trailing slash from the destination.
+ dest=`$echo "X$dest" | $Xsed -e 's%/$%%'`
+
+ # Check to see that the destination is a directory.
+ test -d "$dest" && isdir=yes
+ if test "$isdir" = yes; then
+ destdir="$dest"
+ destname=
+ else
+ destdir=`$echo "X$dest" | $Xsed -e 's%/[^/]*$%%'`
+ test "X$destdir" = "X$dest" && destdir=.
+ destname=`$echo "X$dest" | $Xsed -e 's%^.*/%%'`
+
+ # Not a directory, so check to see that there is only one file specified.
+ set dummy $files
+ if test $# -gt 2; then
+ $echo "$modename: \`$dest' is not a directory" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ fi
+ fi
+ case "$destdir" in
+ [\\/]* | [A-Za-z]:[\\/]*) ;;
+ *)
+ for file in $files; do
+ case "$file" in
+ *.lo) ;;
+ *)
+ $echo "$modename: \`$destdir' must be an absolute directory name" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ ;;
+ esac
+ done
+ ;;
+ esac
+
+ # This variable tells wrapper scripts just to set variables rather
+ # than running their programs.
+ libtool_install_magic="$magic"
+
+ staticlibs=
+ future_libdirs=
+ current_libdirs=
+ for file in $files; do
+
+ # Do each installation.
+ case "$file" in
+ *.a | *.lib)
+ # Do the static libraries later.
+ staticlibs="$staticlibs $file"
+ ;;
+
+ *.la)
+ # Check to see that this really is a libtool archive.
+ if (sed -e '2q' $file | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then :
+ else
+ $echo "$modename: \`$file' is not a valid libtool archive" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ fi
+
+ library_names=
+ old_library=
+ # If there is no directory component, then add one.
+ case "$file" in
+ */* | *\\*) . $file ;;
+ *) . ./$file ;;
+ esac
+
+ # Add the libdir to current_libdirs if it is the destination.
+ if test "X$destdir" = "X$libdir"; then
+ case "$current_libdirs " in
+ *" $libdir "*) ;;
+ *) current_libdirs="$current_libdirs $libdir" ;;
+ esac
+ else
+ # Note the libdir as a future libdir.
+ case "$future_libdirs " in
+ *" $libdir "*) ;;
+ *) future_libdirs="$future_libdirs $libdir" ;;
+ esac
+ fi
+
+ dir="`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`/"
+ test "X$dir" = "X$file/" && dir=
+ dir="$dir$objdir"
+
+ # See the names of the shared library.
+ set dummy $library_names
+ if test -n "$2"; then
+ realname="$2"
+ shift
+ shift
+
+ # Install the shared library and build the symlinks.
+ $show "$install_prog $dir/$realname $destdir/$realname"
+ $run eval "$install_prog $dir/$realname $destdir/$realname" || exit $?
+
+ if test $# -gt 0; then
+ # Delete the old symlinks, and create new ones.
+ for linkname
+ do
+ if test "$linkname" != "$realname"; then
+ $show "(cd $destdir && $rm $linkname && $LN_S $realname $linkname)"
+ $run eval "(cd $destdir && $rm $linkname && $LN_S $realname $linkname)"
+ fi
+ done
+ fi
+
+ # Do each command in the postinstall commands.
+ lib="$destdir/$realname"
+ eval cmds=\"$postinstall_cmds\"
+ IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ $show "$cmd"
+ $run eval "$cmd" || exit $?
+ done
+ IFS="$save_ifs"
+ fi
+
+ # Install the pseudo-library for information purposes.
+ name=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
+ instname="$dir/$name"i
+ $show "$install_prog $instname $destdir/$name"
+ $run eval "$install_prog $instname $destdir/$name" || exit $?
+
+ # Maybe install the static library, too.
+ test -n "$old_library" && staticlibs="$staticlibs $dir/$old_library"
+ ;;
+
+ *.lo)
+ # Install (i.e. copy) a libtool object.
+
+ # Figure out destination file name, if it wasn't already specified.
+ if test -n "$destname"; then
+ destfile="$destdir/$destname"
+ else
+ destfile=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
+ destfile="$destdir/$destfile"
+ fi
+
+ # Deduce the name of the destination old-style object file.
+ case "$destfile" in
+ *.lo)
+ staticdest=`$echo "X$destfile" | $Xsed -e "$lo2o"`
+ ;;
+ *.o | *.obj)
+ staticdest="$destfile"
+ destfile=
+ ;;
+ *)
+ $echo "$modename: cannot copy a libtool object to \`$destfile'" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ ;;
+ esac
+
+ # Install the libtool object if requested.
+ if test -n "$destfile"; then
+ $show "$install_prog $file $destfile"
+ $run eval "$install_prog $file $destfile" || exit $?
+ fi
+
+ # Install the old object if enabled.
+ if test "$build_old_libs" = yes; then
+ # Deduce the name of the old-style object file.
+ staticobj=`$echo "X$file" | $Xsed -e "$lo2o"`
+
+ $show "$install_prog $staticobj $staticdest"
+ $run eval "$install_prog \$staticobj \$staticdest" || exit $?
+ fi
+ exit 0
+ ;;
+
+ *)
+ # Figure out destination file name, if it wasn't already specified.
+ if test -n "$destname"; then
+ destfile="$destdir/$destname"
+ else
+ destfile=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
+ destfile="$destdir/$destfile"
+ fi
+
+ # Do a test to see if this is really a libtool program.
+ if (sed -e '4q' $file | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
+ link_against_libtool_libs=
+ relink_command=
+
+ # If there is no directory component, then add one.
+ case "$file" in
+ */* | *\\*) . $file ;;
+ *) . ./$file ;;
+ esac
+
+ # Check the variables that should have been set.
+ if test -z "$link_against_libtool_libs"; then
+ $echo "$modename: invalid libtool wrapper script \`$file'" 1>&2
+ exit 1
+ fi
+
+ finalize=yes
+ for lib in $link_against_libtool_libs; do
+ # Check to see that each library is installed.
+ libdir=
+ if test -f "$lib"; then
+ # If there is no directory component, then add one.
+ case "$lib" in
+ */* | *\\*) . $lib ;;
+ *) . ./$lib ;;
+ esac
+ fi
+ libfile="$libdir/`$echo "X$lib" | $Xsed -e 's%^.*/%%g'`"
+ if test -n "$libdir" && test ! -f "$libfile"; then
+ $echo "$modename: warning: \`$lib' has not been installed in \`$libdir'" 1>&2
+ finalize=no
+ fi
+ done
+
+ outputname=
+ if test "$fast_install" = no && test -n "$relink_command"; then
+ if test "$finalize" = yes && test -z "$run"; then
+ tmpdir="/tmp"
+ test -n "$TMPDIR" && tmpdir="$TMPDIR"
+ tmpdir="$tmpdir/libtool-$$"
+ if $mkdir -p "$tmpdir" && chmod 700 "$tmpdir"; then :
+ else
+ $echo "$modename: error: cannot create temporary directory \`$tmpdir'" 1>&2
+ continue
+ fi
+ outputname="$tmpdir/$file"
+ # Replace the output file specification.
+ relink_command=`$echo "X$relink_command" | $Xsed -e 's%@OUTPUT@%'"$outputname"'%g'`
+
+ $show "$relink_command"
+ if $run eval "$relink_command"; then :
+ else
+ $echo "$modename: error: relink \`$file' with the above command before installing it" 1>&2
+ ${rm}r "$tmpdir"
+ continue
+ fi
+ file="$outputname"
+ else
+ $echo "$modename: warning: cannot relink \`$file'" 1>&2
+ fi
+ else
+ # Install the binary that we compiled earlier.
+ file=`$echo "X$file" | $Xsed -e "s%\([^/]*\)$%$objdir/\1%"`
+ fi
+ fi
+
+ $show "$install_prog$stripme $file $destfile"
+ $run eval "$install_prog\$stripme \$file \$destfile" || exit $?
+ test -n "$outputname" && ${rm}r "$tmpdir"
+ ;;
+ esac
+ done
+
+ for file in $staticlibs; do
+ name=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
+
+ # Set up the ranlib parameters.
+ oldlib="$destdir/$name"
+
+ $show "$install_prog $file $oldlib"
+ $run eval "$install_prog \$file \$oldlib" || exit $?
+
+ # Do each command in the postinstall commands.
+ eval cmds=\"$old_postinstall_cmds\"
+ IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ $show "$cmd"
+ $run eval "$cmd" || exit $?
+ done
+ IFS="$save_ifs"
+ done
+
+ if test -n "$future_libdirs"; then
+ $echo "$modename: warning: remember to run \`$progname --finish$future_libdirs'" 1>&2
+ fi
+
+ if test -n "$current_libdirs"; then
+ # Maybe just do a dry run.
+ test -n "$run" && current_libdirs=" -n$current_libdirs"
+ exec $SHELL $0 --finish$current_libdirs
+ exit 1
+ fi
+
+ exit 0
+ ;;
+
+ # libtool finish mode
+ finish)
+ modename="$modename: finish"
+ libdirs="$nonopt"
+ admincmds=
+
+ if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then
+ for dir
+ do
+ libdirs="$libdirs $dir"
+ done
+
+ for libdir in $libdirs; do
+ if test -n "$finish_cmds"; then
+ # Do each command in the finish commands.
+ eval cmds=\"$finish_cmds\"
+ IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ $show "$cmd"
+ $run eval "$cmd" || admincmds="$admincmds
+ $cmd"
+ done
+ IFS="$save_ifs"
+ fi
+ if test -n "$finish_eval"; then
+ # Do the single finish_eval.
+ eval cmds=\"$finish_eval\"
+ $run eval "$cmds" || admincmds="$admincmds
+ $cmds"
+ fi
+ done
+ fi
+
+ # Exit here if they wanted silent mode.
+ test "$show" = : && exit 0
+
+ echo "----------------------------------------------------------------------"
+ echo "Libraries have been installed in:"
+ for libdir in $libdirs; do
+ echo " $libdir"
+ done
+ echo
+ echo "If you ever happen to want to link against installed libraries"
+ echo "in a given directory, LIBDIR, you must either use libtool, and"
+ echo "specify the full pathname of the library, or use \`-LLIBDIR'"
+ echo "flag during linking and do at least one of the following:"
+ if test -n "$shlibpath_var"; then
+ echo " - add LIBDIR to the \`$shlibpath_var' environment variable"
+ echo " during execution"
+ fi
+ if test -n "$runpath_var"; then
+ echo " - add LIBDIR to the \`$runpath_var' environment variable"
+ echo " during linking"
+ fi
+ if test -n "$hardcode_libdir_flag_spec"; then
+ libdir=LIBDIR
+ eval flag=\"$hardcode_libdir_flag_spec\"
+
+ echo " - use the \`$flag' linker flag"
+ fi
+ if test -n "$admincmds"; then
+ echo " - have your system administrator run these commands:$admincmds"
+ fi
+ if test -f /etc/ld.so.conf; then
+ echo " - have your system administrator add LIBDIR to \`/etc/ld.so.conf'"
+ fi
+ echo
+ echo "See any operating system documentation about shared libraries for"
+ echo "more information, such as the ld(1) and ld.so(8) manual pages."
+ echo "----------------------------------------------------------------------"
+ exit 0
+ ;;
+
+ # libtool execute mode
+ execute)
+ modename="$modename: execute"
+
+ # The first argument is the command name.
+ cmd="$nonopt"
+ if test -z "$cmd"; then
+ $echo "$modename: you must specify a COMMAND" 1>&2
+ $echo "$help"
+ exit 1
+ fi
+
+ # Handle -dlopen flags immediately.
+ for file in $execute_dlfiles; do
+ if test ! -f "$file"; then
+ $echo "$modename: \`$file' is not a file" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ fi
+
+ dir=
+ case "$file" in
+ *.la)
+ # Check to see that this really is a libtool archive.
+ if (sed -e '2q' $file | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then :
+ else
+ $echo "$modename: \`$lib' is not a valid libtool archive" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ fi
+
+ # Read the libtool library.
+ dlname=
+ library_names=
+
+ # If there is no directory component, then add one.
+ case "$file" in
+ */* | *\\*) . $file ;;
+ *) . ./$file ;;
+ esac
+
+ # Skip this library if it cannot be dlopened.
+ if test -z "$dlname"; then
+ # Warn if it was a shared library.
+ test -n "$library_names" && $echo "$modename: warning: \`$file' was not linked with \`-export-dynamic'"
+ continue
+ fi
+
+ dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`
+ test "X$dir" = "X$file" && dir=.
+
+ if test -f "$dir/$objdir/$dlname"; then
+ dir="$dir/$objdir"
+ else
+ $echo "$modename: cannot find \`$dlname' in \`$dir' or \`$dir/$objdir'" 1>&2
+ exit 1
+ fi
+ ;;
+
+ *.lo)
+ # Just add the directory containing the .lo file.
+ dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`
+ test "X$dir" = "X$file" && dir=.
+ ;;
+
+ *)
+ $echo "$modename: warning \`-dlopen' is ignored for non-libtool libraries and objects" 1>&2
+ continue
+ ;;
+ esac
+
+ # Get the absolute pathname.
+ absdir=`cd "$dir" && pwd`
+ test -n "$absdir" && dir="$absdir"
+
+ # Now add the directory to shlibpath_var.
+ if eval "test -z \"\$$shlibpath_var\""; then
+ eval "$shlibpath_var=\"\$dir\""
+ else
+ eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\""
+ fi
+ done
+
+ # This variable tells wrapper scripts just to set shlibpath_var
+ # rather than running their programs.
+ libtool_execute_magic="$magic"
+
+ # Check if any of the arguments is a wrapper script.
+ args=
+ for file
+ do
+ case "$file" in
+ -*) ;;
+ *)
+ # Do a test to see if this is really a libtool program.
+ if (sed -e '4q' $file | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
+ # If there is no directory component, then add one.
+ case "$file" in
+ */* | *\\*) . $file ;;
+ *) . ./$file ;;
+ esac
+
+ # Transform arg to wrapped name.
+ file="$progdir/$program"
+ fi
+ ;;
+ esac
+ # Quote arguments (to preserve shell metacharacters).
+ file=`$echo "X$file" | $Xsed -e "$sed_quote_subst"`
+ args="$args \"$file\""
+ done
+
+ if test -z "$run"; then
+ if test -n "$shlibpath_var"; then
+ # Export the shlibpath_var.
+ eval "export $shlibpath_var"
+ fi
+
+ # Restore saved enviroment variables
+ if test "${save_LC_ALL+set}" = set; then
+ LC_ALL="$save_LC_ALL"; export LC_ALL
+ fi
+ if test "${save_LANG+set}" = set; then
+ LANG="$save_LANG"; export LANG
+ fi
+
+ # Now actually exec the command.
+ eval "exec \$cmd$args"
+
+ $echo "$modename: cannot exec \$cmd$args"
+ exit 1
+ else
+ # Display what would be done.
+ if test -n "$shlibpath_var"; then
+ eval "\$echo \"\$shlibpath_var=\$$shlibpath_var\""
+ $echo "export $shlibpath_var"
+ fi
+ $echo "$cmd$args"
+ exit 0
+ fi
+ ;;
+
+ # libtool uninstall mode
+ uninstall)
+ modename="$modename: uninstall"
+ rm="$nonopt"
+ files=
+
+ for arg
+ do
+ case "$arg" in
+ -*) rm="$rm $arg" ;;
+ *) files="$files $arg" ;;
+ esac
+ done
+
+ if test -z "$rm"; then
+ $echo "$modename: you must specify an RM program" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ fi
+
+ for file in $files; do
+ dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`
+ test "X$dir" = "X$file" && dir=.
+ name=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
+
+ rmfiles="$file"
+
+ case "$name" in
+ *.la)
+ # Possibly a libtool archive, so verify it.
+ if (sed -e '2q' $file | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
+ . $dir/$name
+
+ # Delete the libtool libraries and symlinks.
+ for n in $library_names; do
+ rmfiles="$rmfiles $dir/$n"
+ done
+ test -n "$old_library" && rmfiles="$rmfiles $dir/$old_library"
+
+ $show "$rm $rmfiles"
+ $run $rm $rmfiles
+
+ if test -n "$library_names"; then
+ # Do each command in the postuninstall commands.
+ eval cmds=\"$postuninstall_cmds\"
+ IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ $show "$cmd"
+ $run eval "$cmd"
+ done
+ IFS="$save_ifs"
+ fi
+
+ if test -n "$old_library"; then
+ # Do each command in the old_postuninstall commands.
+ eval cmds=\"$old_postuninstall_cmds\"
+ IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ $show "$cmd"
+ $run eval "$cmd"
+ done
+ IFS="$save_ifs"
+ fi
+
+ # FIXME: should reinstall the best remaining shared library.
+ fi
+ ;;
+
+ *.lo)
+ if test "$build_old_libs" = yes; then
+ oldobj=`$echo "X$name" | $Xsed -e "$lo2o"`
+ rmfiles="$rmfiles $dir/$oldobj"
+ fi
+ $show "$rm $rmfiles"
+ $run $rm $rmfiles
+ ;;
+
+ *)
+ $show "$rm $rmfiles"
+ $run $rm $rmfiles
+ ;;
+ esac
+ done
+ exit 0
+ ;;
+
+ "")
+ $echo "$modename: you must specify a MODE" 1>&2
+ $echo "$generic_help" 1>&2
+ exit 1
+ ;;
+ esac
+
+ $echo "$modename: invalid operation mode \`$mode'" 1>&2
+ $echo "$generic_help" 1>&2
+ exit 1
+fi # test -z "$show_help"
+
+# We need to display help for each of the modes.
+case "$mode" in
+"") $echo \
+"Usage: $modename [OPTION]... [MODE-ARG]...
+
+Provide generalized library-building support services.
+
+ --config show all configuration variables
+ --debug enable verbose shell tracing
+-n, --dry-run display commands without modifying any files
+ --features display basic configuration information and exit
+ --finish same as \`--mode=finish'
+ --help display this help message and exit
+ --mode=MODE use operation mode MODE [default=inferred from MODE-ARGS]
+ --quiet same as \`--silent'
+ --silent don't print informational messages
+ --version print version information
+
+MODE must be one of the following:
+
+ compile compile a source file into a libtool object
+ execute automatically set library path, then run a program
+ finish complete the installation of libtool libraries
+ install install libraries or executables
+ link create a library or an executable
+ uninstall remove libraries from an installed directory
+
+MODE-ARGS vary depending on the MODE. Try \`$modename --help --mode=MODE' for
+a more detailed description of MODE."
+ exit 0
+ ;;
+
+compile)
+ $echo \
+"Usage: $modename [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE
+
+Compile a source file into a libtool library object.
+
+This mode accepts the following additional options:
+
+ -o OUTPUT-FILE set the output file name to OUTPUT-FILE
+ -static always build a \`.o' file suitable for static linking
+
+COMPILE-COMMAND is a command to be used in creating a \`standard' object file
+from the given SOURCEFILE.
+
+The output file name is determined by removing the directory component from
+SOURCEFILE, then substituting the C source code suffix \`.c' with the
+library object suffix, \`.lo'."
+ ;;
+
+execute)
+ $echo \
+"Usage: $modename [OPTION]... --mode=execute COMMAND [ARGS]...
+
+Automatically set library path, then run a program.
+
+This mode accepts the following additional options:
+
+ -dlopen FILE add the directory containing FILE to the library path
+
+This mode sets the library path environment variable according to \`-dlopen'
+flags.
+
+If any of the ARGS are libtool executable wrappers, then they are translated
+into their corresponding uninstalled binary, and any of their required library
+directories are added to the library path.
+
+Then, COMMAND is executed, with ARGS as arguments."
+ ;;
+
+finish)
+ $echo \
+"Usage: $modename [OPTION]... --mode=finish [LIBDIR]...
+
+Complete the installation of libtool libraries.
+
+Each LIBDIR is a directory that contains libtool libraries.
+
+The commands that this mode executes may require superuser privileges. Use
+the \`--dry-run' option if you just want to see what would be executed."
+ ;;
+
+install)
+ $echo \
+"Usage: $modename [OPTION]... --mode=install INSTALL-COMMAND...
+
+Install executables or libraries.
+
+INSTALL-COMMAND is the installation command. The first component should be
+either the \`install' or \`cp' program.
+
+The rest of the components are interpreted as arguments to that command (only
+BSD-compatible install options are recognized)."
+ ;;
+
+link)
+ $echo \
+"Usage: $modename [OPTION]... --mode=link LINK-COMMAND...
+
+Link object files or libraries together to form another library, or to
+create an executable program.
+
+LINK-COMMAND is a command using the C compiler that you would use to create
+a program from several object files.
+
+The following components of LINK-COMMAND are treated specially:
+
+ -all-static do not do any dynamic linking at all
+ -avoid-version do not add a version suffix if possible
+ -dlopen FILE \`-dlpreopen' FILE if it cannot be dlopened at runtime
+ -dlpreopen FILE link in FILE and add its symbols to lt_preloaded_symbols
+ -export-dynamic allow symbols from OUTPUT-FILE to be resolved with dlsym(3)
+ -export-symbols SYMFILE
+ try to export only the symbols listed in SYMFILE
+ -export-symbols-regex REGEX
+ try to export only the symbols matching REGEX
+ -LLIBDIR search LIBDIR for required installed libraries
+ -lNAME OUTPUT-FILE requires the installed library libNAME
+ -module build a library that can dlopened
+ -no-undefined declare that a library does not refer to external symbols
+ -o OUTPUT-FILE create OUTPUT-FILE from the specified objects
+ -release RELEASE specify package release information
+ -rpath LIBDIR the created library will eventually be installed in LIBDIR
+ -R[ ]LIBDIR add LIBDIR to the runtime path of programs and libraries
+ -static do not do any dynamic linking of libtool libraries
+ -version-info CURRENT[:REVISION[:AGE]]
+ specify library version info [each variable defaults to 0]
+
+All other options (arguments beginning with \`-') are ignored.
+
+Every other argument is treated as a filename. Files ending in \`.la' are
+treated as uninstalled libtool libraries, other files are standard or library
+object files.
+
+If the OUTPUT-FILE ends in \`.la', then a libtool library is created,
+only library objects (\`.lo' files) may be specified, and \`-rpath' is
+required, except when creating a convenience library.
+
+If OUTPUT-FILE ends in \`.a' or \`.lib', then a standard library is created
+using \`ar' and \`ranlib', or on Windows using \`lib'.
+
+If OUTPUT-FILE ends in \`.lo' or \`.${objext}', then a reloadable object file
+is created, otherwise an executable program is created."
+ ;;
+
+uninstall)
+ $echo \
+"Usage: $modename [OPTION]... --mode=uninstall RM [RM-OPTION]... FILE...
+
+Remove libraries from an installation directory.
+
+RM is the name of the program to use to delete files associated with each FILE
+(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed
+to RM.
+
+If FILE is a libtool library, all the files associated with it are deleted.
+Otherwise, only FILE itself is deleted using RM."
+ ;;
+
+*)
+ $echo "$modename: invalid operation mode \`$mode'" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ ;;
+esac
+
+echo
+$echo "Try \`$modename --help' for more information about other modes."
+
+exit 0
+
+# Local Variables:
+# mode:shell-script
+# sh-indentation:2
+# End:
+#! /bin/bash
diff --git a/bdb/dist/rec_ctemp b/bdb/dist/rec_ctemp
new file mode 100644
index 00000000000..6be6d3166b8
--- /dev/null
+++ b/bdb/dist/rec_ctemp
@@ -0,0 +1,62 @@
+/*
+ * __PREF_FUNC_recover --
+ * Recovery function for FUNC.
+ *
+ * PUBLIC: int __PREF_FUNC_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__PREF_FUNC_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __PREF_FUNC_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__PREF_FUNC_print);
+ REC_INTRO(__PREF_FUNC_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
diff --git a/bdb/dist/s_all b/bdb/dist/s_all
new file mode 100644
index 00000000000..dab6c75913b
--- /dev/null
+++ b/bdb/dist/s_all
@@ -0,0 +1,16 @@
+#!/bin/sh -
+# $Id: s_all,v 1.7 2000/12/22 15:35:32 bostic Exp $
+
+sh s_perm # permissions.
+sh s_symlink # symbolic links.
+sh s_config # autoconf.
+sh s_readme # db/README file.
+sh s_recover # logging/recovery files.
+sh s_rpc # RPC files.
+sh s_include # standard include files.
+sh s_win32 # Win32 include files.
+sh s_win32_dsp # Win32 build environment.
+sh s_vxworks # VxWorks include files.
+sh s_java # Java support.
+sh s_tcl # Tcl support.
+sh s_tags # Tags files.
diff --git a/bdb/dist/s_config b/bdb/dist/s_config
new file mode 100755
index 00000000000..870109c38f9
--- /dev/null
+++ b/bdb/dist/s_config
@@ -0,0 +1,37 @@
+#!/bin/sh -
+# $Id: s_config,v 1.3 2000/07/13 18:38:46 bostic Exp $
+#
+# Build the autoconfiguration files.
+
+msgm4="dnl DO NOT EDIT: automatically built by dist/s_config."
+
+. ./RELEASE
+
+echo "Building aclocal.m4"
+rm -f aclocal.m4
+(echo "$msgm4" &&
+ echo "AC_DEFUN(AM_VERSION_SET, [" &&
+ echo "AC_SUBST(DB_VERSION_MAJOR)" &&
+ echo "DB_VERSION_MAJOR=$DB_VERSION_MAJOR" &&
+ echo "AC_SUBST(DB_VERSION_MINOR)" &&
+ echo "DB_VERSION_MINOR=$DB_VERSION_MINOR" &&
+ echo "AC_SUBST(DB_VERSION_PATCH)" &&
+ echo "DB_VERSION_PATCH=$DB_VERSION_PATCH" &&
+ echo "AC_SUBST(DB_VERSION_STRING)" &&
+ echo "DB_VERSION_STRING=\"\\\"$DB_VERSION_STRING\\\"\"" &&
+ echo "])dnl" &&
+ cat aclocal/*.m4) > aclocal.m4
+chmod 444 aclocal.m4
+
+rm -f config.hin
+echo "Building config.hin (autoheader)"
+(autoheader configure.in > config.hin) 2>&1 | \
+ sed '/warning: AC_TRY_RUN called without default/d'
+chmod 444 config.hin
+
+rm -f configure
+echo "Building configure (autoconf)"
+autoconf 2>&1 | sed '/warning: AC_TRY_RUN called without default/d'
+
+chmod 555 configure config.guess config.sub install-sh
+rm -f aclocal.m4
diff --git a/bdb/dist/s_include b/bdb/dist/s_include
new file mode 100755
index 00000000000..fee6e50330f
--- /dev/null
+++ b/bdb/dist/s_include
@@ -0,0 +1,33 @@
+#!/bin/sh -
+# $Id: s_include,v 1.7 2000/07/13 18:38:46 bostic Exp $
+#
+# Build the automatically generated function prototype files.
+
+msgc="/* DO NOT EDIT: automatically built by dist/s_include. */"
+cxx_if="#if defined(__cplusplus)"
+cxx_head="extern \"C\" {"
+cxx_foot="}"
+cxx_endif="#endif"
+
+tmp=/tmp/__db_inc.$$
+trap 'rm -f $tmp ; exit 0' 0 1 2 3 13 15
+
+for i in db btree clib common env hash \
+ lock log mp mutex os qam rpc_client rpc_server tcl txn xa; do
+ f=../include/${i}_ext.h
+ (echo "$msgc" &&
+ echo "#ifndef _${i}_ext_h_" &&
+ echo "#define _${i}_ext_h_" &&
+ echo "$cxx_if" &&
+ echo "$cxx_head" &&
+ echo "$cxx_endif" &&
+ sed -n "s/^ \* PUBLIC:[ ]\(.*\)/\1/p" ../$i/*.c;
+ [ $i = os ] &&
+ sed -n "s/^ \* PUBLIC:[ ]\(.*\)/\1/p" ../os_win32/*.c;
+ echo "$cxx_if" &&
+ echo "$cxx_foot" &&
+ echo "$cxx_endif" &&
+ echo "#endif /* _${i}_ext_h_ */") > $tmp
+ cmp $tmp $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $tmp $f && chmod 444 $f)
+done
diff --git a/bdb/dist/s_java b/bdb/dist/s_java
new file mode 100755
index 00000000000..f324678abaa
--- /dev/null
+++ b/bdb/dist/s_java
@@ -0,0 +1,31 @@
+#!/bin/sh -
+# $Id: s_java,v 1.3 2000/07/13 18:38:46 bostic Exp $
+#
+# Build the Java files.
+
+msgcxx="// DO NOT EDIT: automatically built by dist/s_java."
+
+. RELEASE
+
+f=../java/src/com/sleepycat/db/DbConstants.java
+echo "Building $f"
+rm -f $f
+(echo "$msgcxx" && \
+ echo &&
+ echo 'package com.sleepycat.db;' &&
+ echo &&
+ echo 'public class DbConstants' &&
+ echo '{' &&
+ egrep '^#define.DB_' ../include/db.src | \
+ sed -e '/"/d' \
+ -e '/@DB_VERSION_/d' \
+ -e '/DB_REDO/d' \
+ -e '/DB_UNDO/d' \
+ -e 's/[()]/ /g' \
+ -e 's/\/\*/ /' | \
+ awk '{ print "\tpublic static final int " $2 " = " $3 ";" }' &&
+ echo " public static final int DB_VERSION_MAJOR = $DB_VERSION_MAJOR;" &&
+ echo " public static final int DB_VERSION_MINOR = $DB_VERSION_MINOR;" &&
+ echo " public static final int DB_VERSION_PATCH = $DB_VERSION_PATCH;" &&
+ echo '}') > $f
+chmod 444 $f
diff --git a/bdb/dist/s_perm b/bdb/dist/s_perm
new file mode 100755
index 00000000000..4b0997f2f55
--- /dev/null
+++ b/bdb/dist/s_perm
@@ -0,0 +1,37 @@
+#!/bin/sh -
+# $Id: s_perm,v 1.9 2001/01/24 15:55:38 bostic Exp $
+
+echo 'Updating Berkeley DB source tree permissions...'
+
+run()
+{
+ echo " $1 ($2)"
+ if [ -f "../$1" ]; then
+ chmod "$2" "../$1"
+ else
+ echo "$1: no such file or directory"
+ exit 1
+ fi
+}
+
+run dist/build/chk.def 0555
+run dist/build/chk.define 0555
+run dist/build/chk.offt 0555
+run dist/build/chk.srcfiles 0555
+run dist/build/chk.tags 0555
+run dist/config.guess 0555
+run dist/config.sub 0555
+run dist/configure 0555
+run dist/install-sh 0555
+run dist/s_config 0555
+run dist/s_include 0555
+run dist/s_java 0555
+run dist/s_perm 0555
+run dist/s_readme 0555
+run dist/s_recover 0555
+run dist/s_symlink 0555
+run dist/s_tags 0555
+run dist/s_tcl 0555
+run dist/s_win32 0555
+run perl.BerkeleyDB/dbinfo 0555
+run perl.BerkeleyDB/mkpod 0555
diff --git a/bdb/dist/s_readme b/bdb/dist/s_readme
new file mode 100755
index 00000000000..9ff8a69bc88
--- /dev/null
+++ b/bdb/dist/s_readme
@@ -0,0 +1,18 @@
+#!/bin/sh -
+# $Id: s_readme,v 1.2 2000/01/27 21:42:18 bostic Exp $
+#
+# Build the README.
+
+. RELEASE
+
+f=../README
+echo "Building $f"
+rm -f $f
+(echo "$DB_VERSION_STRING" &&
+ echo "" &&
+ echo -n "This is version " &&
+ echo -n "$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH" &&
+ echo " of Berkeley DB from Sleepycat Software. To view" &&
+ echo "the release and installation documentation, load the distribution file" &&
+ echo "docs/index.html into your web browser.") > $f
+chmod 444 $f
diff --git a/bdb/dist/s_recover b/bdb/dist/s_recover
new file mode 100755
index 00000000000..0e8b3731c77
--- /dev/null
+++ b/bdb/dist/s_recover
@@ -0,0 +1,56 @@
+#!/bin/sh -
+# $Id: s_recover,v 1.3 2000/03/30 05:24:36 krinsky Exp $
+#
+# Build the automatically generated logging/recovery files.
+
+DIR="db btree hash log qam txn"
+
+t=/tmp/__db_$$
+loglist=../test/logtrack.list
+rm -f $t
+rm -f $loglist
+
+trap 'rm -f $t; exit 1' 1 2 3 13 15
+
+# Check to make sure we haven't duplicated a log record entry, and build
+# the list of log record types that the test suite uses.
+for i in $DIR; do
+ p=none
+ for f in ../$i/*.src; do
+ # Grab the PREFIX; there should only be one per file, and
+ # so it's okay to just take the first.
+ grep '^PREFIX' $f | head -1
+ egrep '^DEPRECATED[ ]|^BEGIN[ ]' $f | \
+ awk '{print $1 "\t" $2 "\t" $3}'
+
+ done
+done > $loglist
+grep -v '^PREFIX' $loglist | awk '{print $2 "\t" $3}' | sort +1 -n | \
+ uniq -d -f 1 > $t
+[ -s $t ] && {
+ echo "DUPLICATE LOG VALUES:"
+ cat $t
+ rm -f $t
+ exit 1
+}
+rm -f $t
+
+for i in db btree hash log qam txn; do
+ for f in ../$i/*.src; do
+ subsystem=`basename $f .src`
+ header_file=../include/${subsystem}_auto.h
+ source_file=../$i/${subsystem}_auto.c
+ template_file=template/rec_${subsystem}
+ template_source=.
+
+ echo "Building $source_file, $header_file, $template_file"
+ rm -f $header_file $source_file $template_file
+ awk -f gen_rec.awk \
+ -v subsystem=$subsystem \
+ -v source_file=$source_file \
+ -v header_file=$header_file \
+ -v template_file=$template_file \
+ -v template_dir=. < $f
+ chmod 444 $header_file $source_file $template_file
+ done
+done
diff --git a/bdb/dist/s_rpc b/bdb/dist/s_rpc
new file mode 100644
index 00000000000..88c9f47cbf4
--- /dev/null
+++ b/bdb/dist/s_rpc
@@ -0,0 +1,70 @@
+#!/bin/sh -
+# $Id: s_rpc,v 11.6 2000/04/26 19:15:51 sue Exp $
+#
+# Build the automatically generated RPC files
+
+echo "Building RPC client/server files"
+
+client_file=../rpc_client/gen_client.c
+cproto_file=../include/gen_client_ext.h
+ctmpl_file=./template/gen_client_ret
+header_file=../include/db_server.h
+rpcclnt_file=../rpc_client/db_server_clnt.c
+rpcsvc_file=../rpc_server/db_server_svc.c
+rpcxdr_file=../rpc_server/db_server_xdr.c
+sed_file=../rpc_server/db_server_proc.sed
+server_file=../rpc_server/gen_db_server.c
+sproto_file=../include/gen_server_ext.h
+stmpl_file=./template/db_server_proc
+xdr_file=../rpc_server/db_server.x
+
+#
+# NOTE: We do NOT want to remove proc_file. It is what we apply
+# sed_file to, but we do not want to remove it.
+#
+proc_file=../rpc_server/db_server_proc.c
+svrsed_file=../rpc_server/db_server_svc.sed
+rpcsed_file=../rpc_server/db_server.sed
+
+rm -f $client_file $cproto_file $ctmpl_file $header_file $rpcsvc_file \
+ $rpcclnt_file $rpcxdr_file $sed_file $server_file $sproto_file \
+ $stmpl_file $xdr_file
+
+#
+# Generate client/server/XDR code
+#
+awk -f gen_rpc.awk \
+ -v client_file=$client_file \
+ -v cproto_file=$cproto_file \
+ -v ctmpl_file=$ctmpl_file \
+ -v sed_file=$sed_file \
+ -v server_file=$server_file \
+ -v sproto_file=$sproto_file \
+ -v stmpl_file=$stmpl_file \
+ -v xdr_file=$xdr_file < ../rpc_server/rpc.src
+chmod 444 $client_file $server_file
+
+#
+# Now run rpcgen to generate all our sources from the XDR file
+#
+rpcgen -h $xdr_file > $header_file
+rpcgen -l $xdr_file > $rpcclnt_file
+rpcgen -s tcp $xdr_file > $rpcsvc_file
+rpcgen -c $xdr_file > $rpcxdr_file
+
+# Run server files through sed
+sed -f $svrsed_file $rpcsvc_file > ${rpcsvc_file}.new
+mv ${rpcsvc_file}.new $rpcsvc_file
+sed -f $sed_file $proc_file > ${proc_file}.new
+mv ${proc_file}.new $proc_file
+
+# Run rpcgen files through sed to add HAVE_RPC ifdef
+sed -f $rpcsed_file $rpcsvc_file > ${rpcsvc_file}.new
+mv ${rpcsvc_file}.new $rpcsvc_file
+sed -f $rpcsed_file $rpcxdr_file > ${rpcxdr_file}.new
+mv ${rpcxdr_file}.new $rpcxdr_file
+sed -f $rpcsed_file $rpcclnt_file > ${rpcclnt_file}.new
+mv ${rpcclnt_file}.new $rpcclnt_file
+
+chmod 444 $header_file $rpcclnt_file $rpcsvc_file $rpcxdr_file
+
diff --git a/bdb/dist/s_symlink b/bdb/dist/s_symlink
new file mode 100755
index 00000000000..70efa445521
--- /dev/null
+++ b/bdb/dist/s_symlink
@@ -0,0 +1,91 @@
+#!/bin/sh -
+# $Id: s_symlink,v 1.9 2000/09/05 21:27:04 bostic Exp $
+
+echo 'Creating Berkeley DB source tree symbolic links...'
+
+build()
+{
+ echo " $1 -> $2"
+ (cd ../`dirname $1` && rm -f `basename $1` && ln -s $2 `basename $1`)
+}
+
+build btree/tags ../dist/tags
+build build_unix/tags ../dist/tags
+build clib/tags ../dist/tags
+build common/tags ../dist/tags
+build cxx/tags ../dist/tags
+build db/tags ../dist/tags
+build db185/tags ../dist/tags
+build db_archive/tags ../dist/tags
+build db_checkpoint/tags ../dist/tags
+build db_deadlock/tags ../dist/tags
+build db_dump/tags ../dist/tags
+build db_dump185/tags ../dist/tags
+build db_load/tags ../dist/tags
+build db_printlog/tags ../dist/tags
+build db_recover/tags ../dist/tags
+build db_stat/tags ../dist/tags
+build db_upgrade/tags ../dist/tags
+build db_verify/tags ../dist/tags
+build dbm/tags ../dist/tags
+build docs_src/api_cxx/Makefile ../api_c/Makefile
+build docs_src/api_cxx/m4.seealso ../api_c/m4.seealso
+build docs_src/api_cxx/spell.ok ../api_c/spell.ok
+build docs_src/api_java/Makefile ../api_c/Makefile
+build docs_src/api_java/java_index.so ../api_cxx/cxx_index.so
+build docs_src/api_java/m4.seealso ../api_c/m4.seealso
+build docs_src/api_java/spell.ok ../api_c/spell.ok
+build docs_src/api_tcl/spell.ok ../api_c/spell.ok
+build docs_src/ref/am/spell.ok ../spell.ok
+build docs_src/ref/am_conf/spell.ok ../spell.ok
+build docs_src/ref/arch/spell.ok ../spell.ok
+build docs_src/ref/build_unix/spell.ok ../spell.ok
+build docs_src/ref/build_vxworks/spell.ok ../spell.ok
+build docs_src/ref/build_win/spell.ok ../spell.ok
+build docs_src/ref/cam/spell.ok ../spell.ok
+build docs_src/ref/debug/spell.ok ../spell.ok
+build docs_src/ref/distrib/spell.ok ../spell.ok
+build docs_src/ref/dumpload/spell.ok ../spell.ok
+build docs_src/ref/env/spell.ok ../spell.ok
+build docs_src/ref/install/spell.ok ../spell.ok
+build docs_src/ref/intro/spell.ok ../spell.ok
+build docs_src/ref/java/spell.ok ../spell.ok
+build docs_src/ref/lock/spell.ok ../spell.ok
+build docs_src/ref/log/spell.ok ../spell.ok
+build docs_src/ref/mp/spell.ok ../spell.ok
+build docs_src/ref/perl/spell.ok ../spell.ok
+build docs_src/ref/program/spell.ok ../spell.ok
+build docs_src/ref/refs/spell.ok ../spell.ok
+build docs_src/ref/rpc/spell.ok ../spell.ok
+build docs_src/ref/sendmail/spell.ok ../spell.ok
+build docs_src/ref/simple_tut/spell.ok ../spell.ok
+build docs_src/ref/tcl/spell.ok ../spell.ok
+build docs_src/ref/test/spell.ok ../spell.ok
+build docs_src/ref/transapp/spell.ok ../spell.ok
+build docs_src/ref/txn/spell.ok ../spell.ok
+build docs_src/ref/upgrade.2.0/spell.ok ../spell.ok
+build docs_src/ref/upgrade.3.0/spell.ok ../spell.ok
+build docs_src/ref/upgrade.3.1/spell.ok ../spell.ok
+build docs_src/ref/upgrade.3.2/spell.ok ../spell.ok
+build docs_src/ref/xa/spell.ok ../spell.ok
+build env/tags ../dist/tags
+build examples_c/tags ../dist/tags
+build examples_cxx/tags ../dist/tags
+build examples_java java/src/com/sleepycat/examples
+build hash/tags ../dist/tags
+build hsearch/tags ../dist/tags
+build include/tags ../dist/tags
+build libdb_java/tags ../dist/tags
+build lock/tags ../dist/tags
+build log/tags ../dist/tags
+build mp/tags ../dist/tags
+build mutex/tags ../dist/tags
+build os/tags ../dist/tags
+build os_vxworks/tags ../dist/tags
+build os_win32/tags ../dist/tags
+build qam/tags ../dist/tags
+build rpc_client/tags ../dist/tags
+build rpc_server/tags ../dist/tags
+build tcl/tags ../dist/tags
+build txn/tags ../dist/tags
+build xa/tags ../dist/tags
diff --git a/bdb/dist/s_tags b/bdb/dist/s_tags
new file mode 100755
index 00000000000..834600b9fb1
--- /dev/null
+++ b/bdb/dist/s_tags
@@ -0,0 +1,47 @@
+#!/bin/sh -
+# $Id: s_tags,v 1.5 2000/09/05 21:27:04 bostic Exp $
+#
+# Build tags files.
+
+files="../include/*.h \
+ ../include/*.src \
+ ../btree/*.[ch] \
+ ../clib/*.[ch] \
+ ../common/*.[ch] \
+ ../db/*.[ch] \
+ ../db185/*.[ch] \
+ ../dbm/*.[ch] \
+ ../env/*.[ch] \
+ ../hash/*.[ch] \
+ ../hsearch/*.[ch] \
+ ../libdb_java/*.[ch] \
+ ../lock/*.[ch] \
+ ../log/*.[ch] \
+ ../mp/*.[ch] \
+ ../mutex/*.[ch] \
+ ../os/*.[ch] \
+ ../qam/*.[ch] \
+ ../rpc_client/*.[ch] \
+ ../rpc_server/*.[ch] \
+ ../tcl/*.[ch] \
+ ../txn/*.[ch] \
+ ../xa/*.[ch]"
+
+f=tags
+echo "Building $f"
+rm -f $f
+
+# Figure out what flags this ctags accepts.
+flags=""
+if ctags -d ../db/db.c 2>/dev/null; then
+ flags="-d $flags"
+fi
+if ctags -t ../db/db.c 2>/dev/null; then
+ flags="-t $flags"
+fi
+if ctags -w ../db/db.c 2>/dev/null; then
+ flags="-w $flags"
+fi
+
+ctags $flags $files 2>/dev/null
+chmod 444 $f
diff --git a/bdb/dist/s_tcl b/bdb/dist/s_tcl
new file mode 100755
index 00000000000..7350e9a49e9
--- /dev/null
+++ b/bdb/dist/s_tcl
@@ -0,0 +1,53 @@
+#!/bin/sh -
+# $Id: s_tcl,v 1.14 2000/11/09 19:24:07 sue Exp $
+#
+# Build the Tcl test files.
+
+msgshb="# DO NOT EDIT BELOW THIS LINE: automatically built by dist/s_tcl."
+
+. RELEASE
+
+f=../test/include.tcl
+echo "Building $f"
+rm -f $f
+(echo "set tclsh_path @TCL_TCLSH@" && \
+ echo "set tcllib .libs/libdb_tcl-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@SOSUFFIX@" && \
+ echo "set rpc_server localhost" && \
+ echo "set rpc_path ." && \
+ echo "set test_path @srcdir@/../test" && \
+ echo "" && \
+ echo "set KILL \"@db_cv_path_kill@\"" && \
+ echo "" && \
+ echo "$msgshb" && \
+ echo "" && \
+ echo "global dict" && \
+ echo "global testdir" && \
+ echo "global util_path" && \
+ echo "set testdir ./TESTDIR" && \
+ echo "set rpc_testdir \$rpc_path/TESTDIR" && \
+ echo "" && \
+ echo "global is_hp_test" && \
+ echo "global is_qnx_test" && \
+ echo "global is_windows_test") > $f
+chmod 444 $f
+
+f=../build_win32/include.tcl
+echo "Building $f"
+rm -f $f
+(echo "set tclsh_path SET_YOUR_TCLSH_PATH" && \
+ echo "set test_path ../test" && \
+ echo "set tcllib ./Debug/libdb_tcl${DB_VERSION_MAJOR}${DB_VERSION_MINOR}d.dll" && \
+ echo "" && \
+ echo "set KILL ./dbkill.exe" && \
+ echo "" && \
+ echo "$msgshb" && \
+ echo "" && \
+ echo "global dict" && \
+ echo "global testdir" && \
+ echo "global util_path" && \
+ echo "set testdir ./TESTDIR" && \
+ echo "" && \
+ echo "global is_hp_test" && \
+ echo "global is_qnx_test" && \
+ echo "global is_windows_test") > $f
+chmod 444 $f
diff --git a/bdb/dist/s_vxworks b/bdb/dist/s_vxworks
new file mode 100644
index 00000000000..edf058df7ee
--- /dev/null
+++ b/bdb/dist/s_vxworks
@@ -0,0 +1,48 @@
+#!/bin/sh -
+# $Id: s_vxworks,v 1.3 2000/07/13 18:38:46 bostic Exp $
+#
+# Build the VxWorks files.
+
+msgc="/* DO NOT EDIT: automatically built by dist/s_vxworks. */"
+
+. RELEASE
+
+t=/tmp/__db_$$
+rm -f $t
+
+trap 'rm -f $t ; exit 1' 1 2 3 13 15
+
+f=../build_vxworks/db.h
+echo "Building $f"
+rm -f $f
+cat <<ENDOFSEDTEXT > $t
+s/@u_int8_decl@/typedef unsigned char u_int8_t;/
+s/@u_int16_decl@/typedef unsigned short u_int16_t;/
+s/@u_int32_decl@/typedef unsigned int u_int32_t;/
+/@int16_decl@/d
+/@int32_decl@/d
+/@u_char_decl@/d
+/@u_short_decl@/d
+/@u_int_decl@/d
+/@u_long_decl@/d
+/@ssize_t_decl@/d
+s/@DB_VERSION_MAJOR@/$DB_VERSION_MAJOR/
+s/@DB_VERSION_MINOR@/$DB_VERSION_MINOR/
+s/@DB_VERSION_PATCH@/$DB_VERSION_PATCH/
+s/@DB_VERSION_STRING@/"$DB_VERSION_STRING"/
+ENDOFSEDTEXT
+(echo "$msgc" && sed -f $t ../include/db.src) > $f
+chmod 444 $f
+
+f=../build_vxworks/db_int.h
+echo "Building $f"
+rm -f $f
+cat <<ENDOFSEDTEXT > $t
+s/\(PATH_SEPARATOR[^"]*"\)\/"/\1\/\\\\\\\\\\"/
+s/@db_align_t_decl@/typedef unsigned long db_align_t;/
+s/@db_alignp_t_decl@/typedef unsigned long db_alignp_t;/
+ENDOFSEDTEXT
+(echo "$msgc" && sed -f $t ../include/db_int.src) > $f
+chmod 444 $f
+
+rm -f $t
diff --git a/bdb/dist/s_win32 b/bdb/dist/s_win32
new file mode 100755
index 00000000000..f989a615e48
--- /dev/null
+++ b/bdb/dist/s_win32
@@ -0,0 +1,67 @@
+#!/bin/sh -
+# $Id: s_win32,v 1.9 2000/09/20 15:29:54 bostic Exp $
+#
+# Build Windows/32 include files.
+
+msgc="/* DO NOT EDIT: automatically built by dist/s_win32. */"
+
+. RELEASE
+
+t=/tmp/__db_$$
+rm -f $t
+
+trap 'rm -f $t ; exit 1' 1 2 3 13 15
+
+f=../build_win32/db.h
+echo "Building $f"
+rm -f $f
+cat <<ENDOFSEDTEXT > $t
+s/@u_int8_decl@/typedef unsigned char u_int8_t;/
+s/@int16_decl@/typedef short int16_t;/
+s/@u_int16_decl@/typedef unsigned short u_int16_t;/
+s/@int32_decl@/typedef int int32_t;/
+s/@u_int32_decl@/typedef unsigned int u_int32_t;/
+/@u_char_decl@/{
+ i\\
+ #if !defined(_WINSOCKAPI_)
+ s/@u_char_decl@/typedef unsigned char u_char;/
+}
+s/@u_short_decl@/typedef unsigned short u_short;/
+s/@u_int_decl@/typedef unsigned int u_int;/
+/@u_long_decl@/{
+ s/@u_long_decl@/typedef unsigned long u_long;/
+ a\\
+ #endif
+}
+s/@ssize_t_decl@/typedef int ssize_t;/
+s/@DB_VERSION_MAJOR@/$DB_VERSION_MAJOR/
+s/@DB_VERSION_MINOR@/$DB_VERSION_MINOR/
+s/@DB_VERSION_PATCH@/$DB_VERSION_PATCH/
+s/@DB_VERSION_STRING@/"$DB_VERSION_STRING"/
+ENDOFSEDTEXT
+(echo "$msgc" && sed -f $t ../include/db.src) > $f
+chmod 444 $f
+
+f=../build_win32/db_int.h
+echo "Building $f"
+rm -f $f
+cat <<ENDOFSEDTEXT > $t
+s/\(PATH_SEPARATOR[^"]*"\)\/"/\1\\\\\\\\\\/:\"/
+s/@db_align_t_decl@/typedef unsigned long db_align_t;/
+s/@db_alignp_t_decl@/typedef unsigned long db_alignp_t;/
+ENDOFSEDTEXT
+(echo "$msgc" && sed -f $t ../include/db_int.src) > $f
+chmod 444 $f
+
+f=../build_win32/libdb.rc
+echo "Building $f"
+rm -f $f
+cat <<ENDOFSEDTEXT > $t
+s/%MAJOR%/$DB_VERSION_MAJOR/
+s/%MINOR%/$DB_VERSION_MINOR/
+s/%PATCH%/$DB_VERSION_PATCH/
+ENDOFSEDTEXT
+sed -f $t ../build_win32/libdbrc.src > $f
+chmod 444 $f
+
+rm -f $t
diff --git a/bdb/dist/s_win32_dsp b/bdb/dist/s_win32_dsp
new file mode 100644
index 00000000000..8abee7c1a61
--- /dev/null
+++ b/bdb/dist/s_win32_dsp
@@ -0,0 +1,98 @@
+#!/bin/sh -
+# $Id: s_win32_dsp,v 1.3 2000/12/02 04:36:47 dda Exp $
+#
+# Build Windows/32 .dsp files.
+
+. RELEASE
+
+BUILDDIR=../build_win32
+SRCFILES=srcfiles.in
+
+create_dsp()
+{
+ projname="$1" # name of the .dsp file
+ match="$2" # the string used to egrep the $sources file
+ sources="$3" # a modified version of $SRCFILES to facilitate matches
+ dsptemplate="$4" # overall template file for the .dsp
+ srctemplate="$5" # template file for the src file fragments
+
+ dspoutput=$BUILDDIR/$projname.dsp
+
+ echo "Building $dspoutput"
+ rm -f $dspoutput.insert
+ for srcpath in `egrep "$match" $sources | sed -e 's/[ ].*//'`
+ do
+ # take the path name and break it up, converting / to \\.
+ # so many backslashes needed because of shell quoting and
+ # sed quoting -- we'll end up with two backslashes for every
+ # forward slash, but we need that when feeding that to the
+ # later sed command.
+ set - `echo $srcpath | sed -e 's;\(.*\)/;../\\1 ;' \
+ -e 's;../build_win32;.;' \
+ -e 's;/;\\\\\\\\;g'`
+ srcdir="$1"
+ srcfile="$2"
+ sed -e "s/@srcdir@/$srcdir/g" \
+ -e "s/@srcfile@/$srcfile/g" \
+ < $srctemplate >> $dspoutput.insert
+ done
+ sed -e "/@SOURCE_FILES@/r$dspoutput.insert" \
+ -e "/@SOURCE_FILES@/d" \
+ -e "s/@project_name@/$projname/g" \
+ -e "s/@DB_VERSION_MAJOR@/$DB_VERSION_MAJOR/g" \
+ -e "s/@DB_VERSION_MINOR@/$DB_VERSION_MINOR/g" \
+ < $dsptemplate > $dspoutput.new
+ rm -f $dspoutput $dspoutput.insert
+ mv $dspoutput.new $dspoutput
+}
+
+TMPA=/tmp/swin32dsp$$a
+trap "rm -f $TMPA; exit 1" 1 2 3 15
+
+# create a copy of the srcfiles with comments and 'skip' lines removed.
+# add a space at the end of each list of modules so that each module
+# can be unambiguously matched e.g. ' dynamic '
+#
+sed -e "s/#.*$//" \
+ -e "/^[ ]*$/d" \
+ -e "s/[ ][ ]*/ /" \
+ -e "s/[ ]*$//" \
+ -e "/ skip$/d" \
+ -e "s/$/ /" < $SRCFILES > $TMPA
+
+# get a list of all modules mentioned
+#
+MODULES="`sed -e 's/^[^ ]* //' < $TMPA \
+ | tr ' ' '\012' | sort | uniq`"
+
+for module in $MODULES
+do
+ case "$module" in
+ dynamic )
+ create_dsp db_dll " $module " $TMPA \
+ $BUILDDIR/dynamic_dsp.src $BUILDDIR/srcfile_dsp.src
+ ;;
+ java )
+ create_dsp db_java " $module " $TMPA \
+ $BUILDDIR/java_dsp.src $BUILDDIR/srcfile_dsp.src
+ ;;
+ tcl )
+ create_dsp db_tcl " $module " $TMPA \
+ $BUILDDIR/tcl_dsp.src $BUILDDIR/srcfile_dsp.src
+ ;;
+ static )
+ create_dsp db_static " $module " $TMPA \
+ $BUILDDIR/static_dsp.src $BUILDDIR/srcfile_dsp.src
+ ;;
+ app=* )
+ appname=`echo $module | sed -e 's/^app=//'`
+ create_dsp $appname " $module " $TMPA \
+ $BUILDDIR/app_dsp.src $BUILDDIR/srcfile_dsp.src
+ ;;
+ * )
+ echo "s_win32_dsp: module name $module in $SRCFILES is unknown type"
+ ;;
+ esac
+done
+
+rm -f $TMPA
diff --git a/bdb/dist/srcfiles.in b/bdb/dist/srcfiles.in
new file mode 100644
index 00000000000..bfc564e13bb
--- /dev/null
+++ b/bdb/dist/srcfiles.in
@@ -0,0 +1,269 @@
+# $Id: srcfiles.in,v 1.7 2000/11/30 18:42:21 dda Exp $
+#
+# This is an input file for the s_win32_dsp script. It describes every
+# source files used by Windows, and lists those that aren't as well,
+# as a completeness sanity check.
+#
+# Please keep this list sorted alphabetically!
+#
+# Each non-blank, non-comment line is of the form
+# filename module [ module ...]
+#
+# The possible modules, including the name of the project (.dsp) file:
+#
+# app=NAME this is linked into application NAME.exe (db_NAME.dsp)
+# dynamic file appears in the DLL (db_dll.dsp)
+# java file appears in the java DLL (db_java.dsp)
+# skip this file is not used by Windows
+# static file appears in the static library (db_static.dsp)
+# tcl file appears in the tcl DLL (db_tcl.dsp)
+
+btree/bt_compare.c dynamic static
+btree/bt_conv.c dynamic static
+btree/bt_curadj.c dynamic static
+btree/bt_cursor.c dynamic static
+btree/bt_delete.c dynamic static
+btree/bt_method.c dynamic static
+btree/bt_open.c dynamic static
+btree/bt_put.c dynamic static
+btree/bt_rec.c dynamic static
+btree/bt_reclaim.c dynamic static
+btree/bt_recno.c dynamic static
+btree/bt_rsearch.c dynamic static
+btree/bt_search.c dynamic static
+btree/bt_split.c dynamic static
+btree/bt_stat.c dynamic static
+btree/bt_upgrade.c dynamic static
+btree/bt_verify.c dynamic static
+btree/btree_auto.c dynamic static
+build_win32/dbkill.cpp skip
+build_win32/dllmain.c dynamic
+build_win32/libdb.def dynamic
+build_win32/libdb.rc dynamic
+build_win32/libdb_tcl.def tcl
+clib/getcwd.c skip
+clib/getopt.c skip # must be linked into each app
+clib/memcmp.c skip
+clib/memmove.c skip
+clib/raise.c skip
+clib/snprintf.c skip
+clib/strcasecmp.c dynamic static
+clib/strerror.c skip
+clib/vsnprintf.c skip
+common/db_byteorder.c dynamic static
+common/db_err.c dynamic static
+common/db_getlong.c dynamic static
+common/db_log2.c dynamic static
+common/util_log.c dynamic static
+common/util_sig.c dynamic static
+cxx/cxx_app.cpp dynamic static
+cxx/cxx_except.cpp dynamic static
+cxx/cxx_lock.cpp dynamic static
+cxx/cxx_log.cpp dynamic static
+cxx/cxx_mpool.cpp dynamic static
+cxx/cxx_table.cpp dynamic static
+cxx/cxx_txn.cpp dynamic static
+db/crdel_auto.c dynamic static
+db/crdel_rec.c dynamic static
+db/db.c dynamic static
+db/db_am.c dynamic static
+db/db_auto.c dynamic static
+db/db_cam.c dynamic static
+db/db_conv.c dynamic static
+db/db_dispatch.c dynamic static
+db/db_dup.c dynamic static
+db/db_iface.c dynamic static
+db/db_join.c dynamic static
+db/db_meta.c dynamic static
+db/db_method.c dynamic static
+db/db_overflow.c dynamic static
+db/db_pr.c dynamic static
+db/db_rec.c dynamic static
+db/db_reclaim.c dynamic static
+db/db_ret.c dynamic static
+db/db_upg.c dynamic static
+db/db_upg_opd.c dynamic static
+db/db_vrfy.c dynamic static
+db/db_vrfyutil.c dynamic static
+db185/db185.c skip
+db_archive/db_archive.c app=db_archive
+db_checkpoint/db_checkpoint.c app=db_checkpoint
+db_deadlock/db_deadlock.c app=db_deadlock
+db_dump/db_dump.c app=db_dump
+db_dump185/db_dump185.c skip
+db_load/db_load.c app=db_load
+db_printlog/db_printlog.c app=db_printlog
+db_recover/db_recover.c app=db_recover
+db_stat/db_stat.c app=db_stat
+db_upgrade/db_upgrade.c app=db_upgrade
+db_verify/db_verify.c app=db_verify
+dbm/dbm.c dynamic static
+env/db_salloc.c dynamic static
+env/db_shash.c dynamic static
+env/env_method.c dynamic static
+env/env_open.c dynamic static
+env/env_recover.c dynamic static
+env/env_region.c dynamic static
+examples_c/ex_access.c app=ex_access
+examples_c/ex_btrec.c app=ex_btrec
+examples_c/ex_dbclient.c skip
+examples_c/ex_env.c app=ex_env
+examples_c/ex_lock.c app=ex_lock
+examples_c/ex_mpool.c app=ex_mpool
+examples_c/ex_thread.c skip
+examples_c/ex_tpcb.c app=ex_tpcb
+examples_cxx/AccessExample.cpp app=excxx_access
+examples_cxx/BtRecExample.cpp app=excxx_btrec
+examples_cxx/EnvExample.cpp app=excxx_env
+examples_cxx/LockExample.cpp app=excxx_lock
+examples_cxx/MpoolExample.cpp app=excxx_mpool
+examples_cxx/TpcbExample.cpp app=excxx_tpcb
+hash/hash.c dynamic static
+hash/hash_auto.c dynamic static
+hash/hash_conv.c dynamic static
+hash/hash_dup.c dynamic static
+hash/hash_func.c dynamic static
+hash/hash_meta.c dynamic static
+hash/hash_method.c dynamic static
+hash/hash_page.c dynamic static
+hash/hash_rec.c dynamic static
+hash/hash_reclaim.c dynamic static
+hash/hash_stat.c dynamic static
+hash/hash_upgrade.c dynamic static
+hash/hash_verify.c dynamic static
+hsearch/hsearch.c dynamic static
+libdb_java/java_Db.c java
+libdb_java/java_DbEnv.c java
+libdb_java/java_DbLock.c java
+libdb_java/java_DbLsn.c java
+libdb_java/java_DbTxn.c java
+libdb_java/java_Dbc.c java
+libdb_java/java_Dbt.c java
+libdb_java/java_info.c java
+libdb_java/java_locked.c java
+libdb_java/java_util.c java
+lock/lock.c dynamic static
+lock/lock_conflict.c dynamic static
+lock/lock_deadlock.c dynamic static
+lock/lock_method.c dynamic static
+lock/lock_region.c dynamic static
+lock/lock_stat.c dynamic static
+lock/lock_util.c dynamic static
+log/log.c dynamic static
+log/log_archive.c dynamic static
+log/log_auto.c dynamic static
+log/log_compare.c dynamic static
+log/log_findckp.c dynamic static
+log/log_get.c dynamic static
+log/log_method.c dynamic static
+log/log_put.c dynamic static
+log/log_rec.c dynamic static
+log/log_register.c dynamic static
+mp/mp_alloc.c dynamic static
+mp/mp_bh.c dynamic static
+mp/mp_fget.c dynamic static
+mp/mp_fopen.c dynamic static
+mp/mp_fput.c dynamic static
+mp/mp_fset.c dynamic static
+mp/mp_method.c dynamic static
+mp/mp_region.c dynamic static
+mp/mp_register.c dynamic static
+mp/mp_stat.c dynamic static
+mp/mp_sync.c dynamic static
+mp/mp_trickle.c dynamic static
+mutex/mut_fcntl.c skip
+mutex/mut_pthread.c skip
+mutex/mut_tas.c dynamic static
+mutex/mutex.c dynamic static
+os/os_abs.c skip
+os/os_alloc.c dynamic static
+os/os_dir.c skip
+os/os_errno.c skip
+os/os_fid.c skip
+os/os_finit.c skip
+os/os_fsync.c dynamic static
+os/os_handle.c dynamic static
+os/os_map.c skip
+os/os_method.c dynamic static
+os/os_oflags.c dynamic static
+os/os_open.c skip
+os/os_region.c dynamic static
+os/os_rename.c skip
+os/os_root.c dynamic static
+os/os_rpath.c dynamic static
+os/os_rw.c dynamic static
+os/os_seek.c skip
+os/os_sleep.c skip
+os/os_spin.c skip
+os/os_stat.c dynamic static
+os/os_tmpdir.c dynamic static
+os/os_unlink.c dynamic static
+os_vxworks/os_abs.c skip
+os_vxworks/os_finit.c skip
+os_vxworks/os_map.c skip
+os_win32/os_abs.c dynamic static
+os_win32/os_dir.c dynamic static
+os_win32/os_errno.c dynamic static
+os_win32/os_fid.c dynamic static
+os_win32/os_finit.c dynamic static
+os_win32/os_map.c dynamic static
+os_win32/os_open.c dynamic static
+os_win32/os_rename.c dynamic static
+os_win32/os_seek.c dynamic static
+os_win32/os_sleep.c dynamic static
+os_win32/os_spin.c dynamic static
+os_win32/os_type.c dynamic static
+qam/qam.c dynamic static
+qam/qam_auto.c dynamic static
+qam/qam_conv.c dynamic static
+qam/qam_files.c dynamic static
+qam/qam_method.c dynamic static
+qam/qam_open.c dynamic static
+qam/qam_rec.c dynamic static
+qam/qam_stat.c dynamic static
+qam/qam_upgrade.c dynamic static
+qam/qam_verify.c dynamic static
+rpc_client/client.c skip
+rpc_client/db_server_clnt.c skip
+rpc_client/gen_client.c skip
+rpc_client/gen_client_ret.c skip
+rpc_server/db_server_proc.c skip
+rpc_server/db_server_svc.c skip
+rpc_server/db_server_util.c skip
+rpc_server/db_server_xdr.c skip
+rpc_server/gen_db_server.c skip
+tcl/tcl_compat.c tcl
+tcl/tcl_db.c tcl
+tcl/tcl_db_pkg.c tcl
+tcl/tcl_dbcursor.c tcl
+tcl/tcl_env.c tcl
+tcl/tcl_internal.c tcl
+tcl/tcl_lock.c tcl
+tcl/tcl_log.c tcl
+tcl/tcl_mp.c tcl
+tcl/tcl_txn.c tcl
+test_server/dbs.c skip
+test_server/dbs_am.c skip
+test_server/dbs_checkpoint.c skip
+test_server/dbs_debug.c skip
+test_server/dbs_handles.c skip
+test_server/dbs_log.c skip
+test_server/dbs_qam.c skip
+test_server/dbs_spawn.c skip
+test_server/dbs_trickle.c skip
+test_server/dbs_util.c skip
+test_server/dbs_yield.c skip
+test_thread/lock.c skip
+test_thread/log.c skip
+test_thread/mpool.c skip
+test_thread/mutex.c skip
+test_vxworks/vx_mutex.c skip
+test_vxworks/vxtpcb_files.c skip
+test_vxworks/vxtpcb_onefile.c skip
+txn/txn.c dynamic static
+txn/txn_auto.c dynamic static
+txn/txn_rec.c dynamic static
+txn/txn_region.c dynamic static
+xa/xa.c dynamic static
+xa/xa_db.c dynamic static
+xa/xa_map.c dynamic static
diff --git a/bdb/dist/template/db_server_proc b/bdb/dist/template/db_server_proc
new file mode 100644
index 00000000000..3fc73221817
--- /dev/null
+++ b/bdb/dist/template/db_server_proc
@@ -0,0 +1,1057 @@
+/* Do not edit: automatically built by gen_rpc.awk. */
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <rpc/rpc.h>
+
+#include <errno.h>
+#include <string.h>
+#include "db_server.h"
+#endif
+
+#include "db_int.h"
+#include "db_server_int.h"
+#include "rpc_server_ext.h"
+
+#include "gen_server_ext.h"
+
+/* BEGIN __env_cachesize_1_proc */
+void
+__env_cachesize_1_proc(dbenvcl_id, gbytes, bytes,
+ ncache, replyp)
+ long dbenvcl_id;
+ u_int32_t gbytes;
+ u_int32_t bytes;
+ u_int32_t ncache;
+ __env_cachesize_reply *replyp;
+/* END __env_cachesize_1_proc */
+{
+ int ret;
+ DB_ENV * dbenv;
+ ct_entry *dbenv_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_close_1_proc */
+void
+__env_close_1_proc(dbenvcl_id, flags, replyp)
+ long dbenvcl_id;
+ u_int32_t flags;
+ __env_close_reply *replyp;
+/* END __env_close_1_proc */
+{
+ int ret;
+ DB_ENV * dbenv;
+ ct_entry *dbenv_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_create_1_proc */
+void
+__env_create_1_proc(timeout, replyp)
+ u_int32_t timeout;
+ __env_create_reply *replyp;
+/* END __env_create_1_proc */
+{
+ int ret;
+
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_flags_1_proc */
+void
+__env_flags_1_proc(dbenvcl_id, flags, onoff, replyp)
+ long dbenvcl_id;
+ u_int32_t flags;
+ u_int32_t onoff;
+ __env_flags_reply *replyp;
+/* END __env_flags_1_proc */
+{
+ int ret;
+ DB_ENV * dbenv;
+ ct_entry *dbenv_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_open_1_proc */
+void
+__env_open_1_proc(dbenvcl_id, home, flags,
+ mode, replyp)
+ long dbenvcl_id;
+ char *home;
+ u_int32_t flags;
+ u_int32_t mode;
+ __env_open_reply *replyp;
+/* END __env_open_1_proc */
+{
+ int ret;
+ DB_ENV * dbenv;
+ ct_entry *dbenv_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_remove_1_proc */
+void
+__env_remove_1_proc(dbenvcl_id, home, flags, replyp)
+ long dbenvcl_id;
+ char *home;
+ u_int32_t flags;
+ __env_remove_reply *replyp;
+/* END __env_remove_1_proc */
+{
+ int ret;
+ DB_ENV * dbenv;
+ ct_entry *dbenv_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_abort_1_proc */
+void
+__txn_abort_1_proc(txnpcl_id, replyp)
+ long txnpcl_id;
+ __txn_abort_reply *replyp;
+/* END __txn_abort_1_proc */
+{
+ int ret;
+ DB_TXN * txnp;
+ ct_entry *txnp_ctp;
+
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_begin_1_proc */
+void
+__txn_begin_1_proc(envpcl_id, parentcl_id,
+ flags, replyp)
+ long envpcl_id;
+ long parentcl_id;
+ u_int32_t flags;
+ __txn_begin_reply *replyp;
+/* END __txn_begin_1_proc */
+{
+ int ret;
+ DB_ENV * envp;
+ ct_entry *envp_ctp;
+ DB_TXN * parent;
+ ct_entry *parent_ctp;
+
+ ACTIVATE_CTP(envp_ctp, envpcl_id, CT_ENV);
+ envp = (DB_ENV *)envp_ctp->ct_anyp;
+ ACTIVATE_CTP(parent_ctp, parentcl_id, CT_TXN);
+ parent = (DB_TXN *)parent_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_commit_1_proc */
+void
+__txn_commit_1_proc(txnpcl_id, flags, replyp)
+ long txnpcl_id;
+ u_int32_t flags;
+ __txn_commit_reply *replyp;
+/* END __txn_commit_1_proc */
+{
+ int ret;
+ DB_TXN * txnp;
+ ct_entry *txnp_ctp;
+
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_bt_maxkey_1_proc */
+void
+__db_bt_maxkey_1_proc(dbpcl_id, maxkey, replyp)
+ long dbpcl_id;
+ u_int32_t maxkey;
+ __db_bt_maxkey_reply *replyp;
+/* END __db_bt_maxkey_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_bt_minkey_1_proc */
+void
+__db_bt_minkey_1_proc(dbpcl_id, minkey, replyp)
+ long dbpcl_id;
+ u_int32_t minkey;
+ __db_bt_minkey_reply *replyp;
+/* END __db_bt_minkey_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_close_1_proc */
+void
+__db_close_1_proc(dbpcl_id, flags, replyp)
+ long dbpcl_id;
+ u_int32_t flags;
+ __db_close_reply *replyp;
+/* END __db_close_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_create_1_proc */
+void
+__db_create_1_proc(flags, envpcl_id, replyp)
+ u_int32_t flags;
+ long envpcl_id;
+ __db_create_reply *replyp;
+/* END __db_create_1_proc */
+{
+ int ret;
+ DB_ENV * envp;
+ ct_entry *envp_ctp;
+
+ ACTIVATE_CTP(envp_ctp, envpcl_id, CT_ENV);
+ envp = (DB_ENV *)envp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_del_1_proc */
+void
+__db_del_1_proc(dbpcl_id, txnpcl_id, keydlen,
+ keydoff, keyflags, keydata, keysize,
+ flags, replyp)
+ long dbpcl_id;
+ long txnpcl_id;
+ u_int32_t keydlen;
+ u_int32_t keydoff;
+ u_int32_t keyflags;
+ void *keydata;
+ u_int32_t keysize;
+ u_int32_t flags;
+ __db_del_reply *replyp;
+/* END __db_del_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+ DB_TXN * txnp;
+ ct_entry *txnp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_extentsize_1_proc */
+void
+__db_extentsize_1_proc(dbpcl_id, extentsize, replyp)
+ long dbpcl_id;
+ u_int32_t extentsize;
+ __db_extentsize_reply *replyp;
+/* END __db_extentsize_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_flags_1_proc */
+void
+__db_flags_1_proc(dbpcl_id, flags, replyp)
+ long dbpcl_id;
+ u_int32_t flags;
+ __db_flags_reply *replyp;
+/* END __db_flags_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_get_1_proc */
+void
+__db_get_1_proc(dbpcl_id, txnpcl_id, keydlen,
+ keydoff, keyflags, keydata, keysize,
+ datadlen, datadoff, dataflags, datadata,
+ datasize, flags, replyp, freep)
+ long dbpcl_id;
+ long txnpcl_id;
+ u_int32_t keydlen;
+ u_int32_t keydoff;
+ u_int32_t keyflags;
+ void *keydata;
+ u_int32_t keysize;
+ u_int32_t datadlen;
+ u_int32_t datadoff;
+ u_int32_t dataflags;
+ void *datadata;
+ u_int32_t datasize;
+ u_int32_t flags;
+ __db_get_reply *replyp;
+ int * freep;
+/* END __db_get_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+ DB_TXN * txnp;
+ ct_entry *txnp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_h_ffactor_1_proc */
+void
+__db_h_ffactor_1_proc(dbpcl_id, ffactor, replyp)
+ long dbpcl_id;
+ u_int32_t ffactor;
+ __db_h_ffactor_reply *replyp;
+/* END __db_h_ffactor_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_h_nelem_1_proc */
+void
+__db_h_nelem_1_proc(dbpcl_id, nelem, replyp)
+ long dbpcl_id;
+ u_int32_t nelem;
+ __db_h_nelem_reply *replyp;
+/* END __db_h_nelem_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_key_range_1_proc */
+void
+__db_key_range_1_proc(dbpcl_id, txnpcl_id, keydlen,
+ keydoff, keyflags, keydata, keysize,
+ flags, replyp)
+ long dbpcl_id;
+ long txnpcl_id;
+ u_int32_t keydlen;
+ u_int32_t keydoff;
+ u_int32_t keyflags;
+ void *keydata;
+ u_int32_t keysize;
+ u_int32_t flags;
+ __db_key_range_reply *replyp;
+/* END __db_key_range_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+ DB_TXN * txnp;
+ ct_entry *txnp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_lorder_1_proc */
+void
+__db_lorder_1_proc(dbpcl_id, lorder, replyp)
+ long dbpcl_id;
+ u_int32_t lorder;
+ __db_lorder_reply *replyp;
+/* END __db_lorder_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_open_1_proc */
+void
+__db_open_1_proc(dbpcl_id, name, subdb,
+ type, flags, mode, replyp)
+ long dbpcl_id;
+ char *name;
+ char *subdb;
+ u_int32_t type;
+ u_int32_t flags;
+ u_int32_t mode;
+ __db_open_reply *replyp;
+/* END __db_open_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_pagesize_1_proc */
+void
+__db_pagesize_1_proc(dbpcl_id, pagesize, replyp)
+ long dbpcl_id;
+ u_int32_t pagesize;
+ __db_pagesize_reply *replyp;
+/* END __db_pagesize_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_put_1_proc */
+void
+__db_put_1_proc(dbpcl_id, txnpcl_id, keydlen,
+ keydoff, keyflags, keydata, keysize,
+ datadlen, datadoff, dataflags, datadata,
+ datasize, flags, replyp, freep)
+ long dbpcl_id;
+ long txnpcl_id;
+ u_int32_t keydlen;
+ u_int32_t keydoff;
+ u_int32_t keyflags;
+ void *keydata;
+ u_int32_t keysize;
+ u_int32_t datadlen;
+ u_int32_t datadoff;
+ u_int32_t dataflags;
+ void *datadata;
+ u_int32_t datasize;
+ u_int32_t flags;
+ __db_put_reply *replyp;
+ int * freep;
+/* END __db_put_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+ DB_TXN * txnp;
+ ct_entry *txnp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_re_delim_1_proc */
+void
+__db_re_delim_1_proc(dbpcl_id, delim, replyp)
+ long dbpcl_id;
+ u_int32_t delim;
+ __db_re_delim_reply *replyp;
+/* END __db_re_delim_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_re_len_1_proc */
+void
+__db_re_len_1_proc(dbpcl_id, len, replyp)
+ long dbpcl_id;
+ u_int32_t len;
+ __db_re_len_reply *replyp;
+/* END __db_re_len_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_re_pad_1_proc */
+void
+__db_re_pad_1_proc(dbpcl_id, pad, replyp)
+ long dbpcl_id;
+ u_int32_t pad;
+ __db_re_pad_reply *replyp;
+/* END __db_re_pad_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_remove_1_proc */
+void
+__db_remove_1_proc(dbpcl_id, name, subdb,
+ flags, replyp)
+ long dbpcl_id;
+ char *name;
+ char *subdb;
+ u_int32_t flags;
+ __db_remove_reply *replyp;
+/* END __db_remove_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_rename_1_proc */
+void
+__db_rename_1_proc(dbpcl_id, name, subdb,
+ newname, flags, replyp)
+ long dbpcl_id;
+ char *name;
+ char *subdb;
+ char *newname;
+ u_int32_t flags;
+ __db_rename_reply *replyp;
+/* END __db_rename_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_stat_1_proc */
+void
+__db_stat_1_proc(dbpcl_id,
+ flags, replyp, freep)
+ long dbpcl_id;
+ u_int32_t flags;
+ __db_stat_reply *replyp;
+ int * freep;
+/* END __db_stat_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_swapped_1_proc */
+void
+__db_swapped_1_proc(dbpcl_id, replyp)
+ long dbpcl_id;
+ __db_swapped_reply *replyp;
+/* END __db_swapped_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_sync_1_proc */
+void
+__db_sync_1_proc(dbpcl_id, flags, replyp)
+ long dbpcl_id;
+ u_int32_t flags;
+ __db_sync_reply *replyp;
+/* END __db_sync_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_cursor_1_proc */
+void
+__db_cursor_1_proc(dbpcl_id, txnpcl_id,
+ flags, replyp)
+ long dbpcl_id;
+ long txnpcl_id;
+ u_int32_t flags;
+ __db_cursor_reply *replyp;
+/* END __db_cursor_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+ DB_TXN * txnp;
+ ct_entry *txnp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_join_1_proc */
+void
+__db_join_1_proc(dbpcl_id, curslist,
+ flags, replyp)
+ long dbpcl_id;
+ u_int32_t * curslist;
+ u_int32_t flags;
+ __db_join_reply *replyp;
+/* END __db_join_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_close_1_proc */
+void
+__dbc_close_1_proc(dbccl_id, replyp)
+ long dbccl_id;
+ __dbc_close_reply *replyp;
+/* END __dbc_close_1_proc */
+{
+ int ret;
+ DBC * dbc;
+ ct_entry *dbc_ctp;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (DBC *)dbc_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_count_1_proc */
+void
+__dbc_count_1_proc(dbccl_id, flags, replyp)
+ long dbccl_id;
+ u_int32_t flags;
+ __dbc_count_reply *replyp;
+/* END __dbc_count_1_proc */
+{
+ int ret;
+ DBC * dbc;
+ ct_entry *dbc_ctp;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (DBC *)dbc_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_del_1_proc */
+void
+__dbc_del_1_proc(dbccl_id, flags, replyp)
+ long dbccl_id;
+ u_int32_t flags;
+ __dbc_del_reply *replyp;
+/* END __dbc_del_1_proc */
+{
+ int ret;
+ DBC * dbc;
+ ct_entry *dbc_ctp;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (DBC *)dbc_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_dup_1_proc */
+void
+__dbc_dup_1_proc(dbccl_id, flags, replyp)
+ long dbccl_id;
+ u_int32_t flags;
+ __dbc_dup_reply *replyp;
+/* END __dbc_dup_1_proc */
+{
+ int ret;
+ DBC * dbc;
+ ct_entry *dbc_ctp;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (DBC *)dbc_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_get_1_proc */
+void
+__dbc_get_1_proc(dbccl_id, keydlen, keydoff,
+ keyflags, keydata, keysize, datadlen,
+ datadoff, dataflags, datadata, datasize,
+ flags, replyp, freep)
+ long dbccl_id;
+ u_int32_t keydlen;
+ u_int32_t keydoff;
+ u_int32_t keyflags;
+ void *keydata;
+ u_int32_t keysize;
+ u_int32_t datadlen;
+ u_int32_t datadoff;
+ u_int32_t dataflags;
+ void *datadata;
+ u_int32_t datasize;
+ u_int32_t flags;
+ __dbc_get_reply *replyp;
+ int * freep;
+/* END __dbc_get_1_proc */
+{
+ int ret;
+ DBC * dbc;
+ ct_entry *dbc_ctp;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (DBC *)dbc_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_put_1_proc */
+void
+__dbc_put_1_proc(dbccl_id, keydlen, keydoff,
+ keyflags, keydata, keysize, datadlen,
+ datadoff, dataflags, datadata, datasize,
+ flags, replyp, freep)
+ long dbccl_id;
+ u_int32_t keydlen;
+ u_int32_t keydoff;
+ u_int32_t keyflags;
+ void *keydata;
+ u_int32_t keysize;
+ u_int32_t datadlen;
+ u_int32_t datadoff;
+ u_int32_t dataflags;
+ void *datadata;
+ u_int32_t datasize;
+ u_int32_t flags;
+ __dbc_put_reply *replyp;
+ int * freep;
+/* END __dbc_put_1_proc */
+{
+ int ret;
+ DBC * dbc;
+ ct_entry *dbc_ctp;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (DBC *)dbc_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
diff --git a/bdb/dist/template/gen_client_ret b/bdb/dist/template/gen_client_ret
new file mode 100644
index 00000000000..81e14d8b27a
--- /dev/null
+++ b/bdb/dist/template/gen_client_ret
@@ -0,0 +1,522 @@
+/* Do not edit: automatically built by gen_rpc.awk. */
+#include "db_config.h"
+
+#ifdef HAVE_RPC
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <rpc/rpc.h>
+
+#include <errno.h>
+#include <string.h>
+#endif
+#include "db_server.h"
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_ext.h"
+#include "txn.h"
+
+#include "gen_client_ext.h"
+
+int
+__dbcl_env_close_ret(dbenv, flags, replyp)
+ DB_ENV * dbenv;
+ u_int32_t flags;
+ __env_close_reply *replyp;
+{
+ int ret;
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+int
+__dbcl_env_open_ret(dbenv, home, flags, mode, replyp)
+ DB_ENV * dbenv;
+ const char * home;
+ u_int32_t flags;
+ int mode;
+ __env_open_reply *replyp;
+{
+ int ret;
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+int
+__dbcl_env_remove_ret(dbenv, home, flags, replyp)
+ DB_ENV * dbenv;
+ const char * home;
+ u_int32_t flags;
+ __env_remove_reply *replyp;
+{
+ int ret;
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+int
+__dbcl_txn_abort_ret(txnp, replyp)
+ DB_TXN * txnp;
+ __txn_abort_reply *replyp;
+{
+ int ret;
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+int
+__dbcl_txn_begin_ret(envp, parent, txnpp, flags, replyp)
+ DB_ENV * envp;
+ DB_TXN * parent;
+ DB_TXN ** txnpp;
+ u_int32_t flags;
+ __txn_begin_reply *replyp;
+{
+ int ret;
+ long txnid;
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ txnid = replyp->txnidcl_id;
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+int
+__dbcl_txn_commit_ret(txnp, flags, replyp)
+ DB_TXN * txnp;
+ u_int32_t flags;
+ __txn_commit_reply *replyp;
+{
+ int ret;
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+int
+__dbcl_db_close_ret(dbp, flags, replyp)
+ DB * dbp;
+ u_int32_t flags;
+ __db_close_reply *replyp;
+{
+ int ret;
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+int
+__dbcl_db_get_ret(dbp, txnp, key, data, flags, replyp)
+ DB * dbp;
+ DB_TXN * txnp;
+ DBT * key;
+ DBT * data;
+ u_int32_t flags;
+ __db_get_reply *replyp;
+{
+ int ret;
+ /* DBT key; */
+ /* DBT data; */
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ /* Handle replyp->keydata; */
+ /* Handle replyp->datadata; */
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+int
+__dbcl_db_key_range_ret(dbp, txnp, key, range, flags, replyp)
+ DB * dbp;
+ DB_TXN * txnp;
+ DBT * key;
+ DB_KEY_RANGE * range;
+ u_int32_t flags;
+ __db_key_range_reply *replyp;
+{
+ int ret;
+ double less;
+ double equal;
+ double greater;
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ less = replyp->less;
+ equal = replyp->equal;
+ greater = replyp->greater;
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+int
+__dbcl_db_open_ret(dbp, name, subdb, type, flags, mode, replyp)
+ DB * dbp;
+ const char * name;
+ const char * subdb;
+ DBTYPE type;
+ u_int32_t flags;
+ int mode;
+ __db_open_reply *replyp;
+{
+ int ret;
+ DBTYPE type;
+ u_int32_t dbflags;
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ type = replyp->type;
+ dbflags = replyp->dbflags;
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+int
+__dbcl_db_put_ret(dbp, txnp, key, data, flags, replyp)
+ DB * dbp;
+ DB_TXN * txnp;
+ DBT * key;
+ DBT * data;
+ u_int32_t flags;
+ __db_put_reply *replyp;
+{
+ int ret;
+ /* DBT key; */
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ /* Handle replyp->keydata; */
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+int
+__dbcl_db_remove_ret(dbp, name, subdb, flags, replyp)
+ DB * dbp;
+ const char * name;
+ const char * subdb;
+ u_int32_t flags;
+ __db_remove_reply *replyp;
+{
+ int ret;
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+int
+__dbcl_db_rename_ret(dbp, name, subdb, newname, flags, replyp)
+ DB * dbp;
+ const char * name;
+ const char * subdb;
+ const char * newname;
+ u_int32_t flags;
+ __db_rename_reply *replyp;
+{
+ int ret;
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+int __db_db_stat_statsreplist __P((__db_stat_statsreplist, u_int32_t **));
+void __db_db_stat_statsfree __P((u_int32_t *));
+
+int
+__dbcl_db_stat_ret(dbp, sp, func0, flags, replyp)
+ DB * dbp;
+ void * sp;
+ void *(*func0) __P((size_t));
+ u_int32_t flags;
+ __db_stat_reply *replyp;
+{
+ int ret;
+ u_int32_t *__db_statslist;
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ if ((ret = __db_db_stat_statslist(replyp->statslist, &__db_statslist)) != 0)
+ return (ret);
+
+ /*
+ * XXX Handle list
+ */
+
+ __db_db_stat_statsfree(__db_statslist);
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+int
+__db_db_stat_statsreplist(locp, ppp)
+ __db_stat_statsreplist *locp;
+ u_int32_t **ppp;
+{
+ u_int32_t *pp;
+ int cnt, ret, size;
+ __db_stat_statsreplist *nl;
+
+ for (cnt = 0, nl = locp; nl != NULL; cnt++, nl = nl->next)
+ ;
+
+ if (cnt == 0) {
+ *ppp = NULL;
+ return (0);
+ }
+ size = sizeof(*pp) * cnt;
+ if ((ret = __os_malloc(NULL, size, NULL, ppp)) != 0)
+ return (ret);
+ memset(*ppp, 0, size);
+ for (pp = *ppp, nl = locp; nl != NULL; nl = nl->next, pp++) {
+ *pp = *(u_int32_t *)nl->ent.ent_val;
+ }
+ return (0);
+out:
+ __db_db_stat_statsfree(*ppp);
+ return (ret);
+}
+
+void
+__db_db_stat_statsfree(pp)
+ u_int32_t *pp;
+{
+ size_t size;
+ u_int32_t *p;
+
+ if (pp == NULL)
+ return;
+ size = sizeof(*p);
+ for (p = pp; *p != 0; p++) {
+ size += sizeof(*p);
+ }
+ __os_free(pp, size);
+}
+
+int
+__dbcl_db_cursor_ret(dbp, txnp, dbcpp, flags, replyp)
+ DB * dbp;
+ DB_TXN * txnp;
+ DBC ** dbcpp;
+ u_int32_t flags;
+ __db_cursor_reply *replyp;
+{
+ int ret;
+ long dbcid;
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ dbcid = replyp->dbcidcl_id;
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+int
+__dbcl_db_join_ret(dbp, curs, dbcp, flags, replyp)
+ DB * dbp;
+ DBC ** curs;
+ DBC ** dbcp;
+ u_int32_t flags;
+ __db_join_reply *replyp;
+{
+ int ret;
+ long dbcid;
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ dbcid = replyp->dbcidcl_id;
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+int
+__dbcl_dbc_close_ret(dbc, replyp)
+ DBC * dbc;
+ __dbc_close_reply *replyp;
+{
+ int ret;
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+int
+__dbcl_dbc_count_ret(dbc, countp, flags, replyp)
+ DBC * dbc;
+ db_recno_t * countp;
+ u_int32_t flags;
+ __dbc_count_reply *replyp;
+{
+ int ret;
+ db_recno_t dupcount;
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ dupcount = replyp->dupcount;
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+int
+__dbcl_dbc_dup_ret(dbc, dbcp, flags, replyp)
+ DBC * dbc;
+ DBC ** dbcp;
+ u_int32_t flags;
+ __dbc_dup_reply *replyp;
+{
+ int ret;
+ long dbcid;
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ dbcid = replyp->dbcidcl_id;
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+int
+__dbcl_dbc_get_ret(dbc, key, data, flags, replyp)
+ DBC * dbc;
+ DBT * key;
+ DBT * data;
+ u_int32_t flags;
+ __dbc_get_reply *replyp;
+{
+ int ret;
+ /* DBT key; */
+ /* DBT data; */
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ /* Handle replyp->keydata; */
+ /* Handle replyp->datadata; */
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+int
+__dbcl_dbc_put_ret(dbc, key, data, flags, replyp)
+ DBC * dbc;
+ DBT * key;
+ DBT * data;
+ u_int32_t flags;
+ __dbc_put_reply *replyp;
+{
+ int ret;
+ /* DBT key; */
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ /* Handle replyp->keydata; */
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+#endif /* HAVE_RPC */
diff --git a/bdb/dist/template/rec_btree b/bdb/dist/template/rec_btree
new file mode 100644
index 00000000000..6c954db1afb
--- /dev/null
+++ b/bdb/dist/template/rec_btree
@@ -0,0 +1,943 @@
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "bam.h"
+#include "log.h"
+
+/*
+ * __bam_pg_alloc_recover --
+ * Recovery function for pg_alloc.
+ *
+ * PUBLIC: int __bam_pg_alloc_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_pg_alloc_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_pg_alloc_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__bam_pg_alloc_print);
+ REC_INTRO(__bam_pg_alloc_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __bam_pg_alloc1_recover --
+ * Recovery function for pg_alloc1.
+ *
+ * PUBLIC: int __bam_pg_alloc1_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_pg_alloc1_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_pg_alloc1_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__bam_pg_alloc1_print);
+ REC_INTRO(__bam_pg_alloc1_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __bam_pg_free_recover --
+ * Recovery function for pg_free.
+ *
+ * PUBLIC: int __bam_pg_free_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_pg_free_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_pg_free_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__bam_pg_free_print);
+ REC_INTRO(__bam_pg_free_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __bam_pg_free1_recover --
+ * Recovery function for pg_free1.
+ *
+ * PUBLIC: int __bam_pg_free1_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_pg_free1_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_pg_free1_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__bam_pg_free1_print);
+ REC_INTRO(__bam_pg_free1_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __bam_split1_recover --
+ * Recovery function for split1.
+ *
+ * PUBLIC: int __bam_split1_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_split1_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_split1_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__bam_split1_print);
+ REC_INTRO(__bam_split1_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __bam_split_recover --
+ * Recovery function for split.
+ *
+ * PUBLIC: int __bam_split_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_split_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_split_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__bam_split_print);
+ REC_INTRO(__bam_split_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __bam_rsplit1_recover --
+ * Recovery function for rsplit1.
+ *
+ * PUBLIC: int __bam_rsplit1_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_rsplit1_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_rsplit1_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__bam_rsplit1_print);
+ REC_INTRO(__bam_rsplit1_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __bam_rsplit_recover --
+ * Recovery function for rsplit.
+ *
+ * PUBLIC: int __bam_rsplit_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_rsplit_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_rsplit_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__bam_rsplit_print);
+ REC_INTRO(__bam_rsplit_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __bam_adj_recover --
+ * Recovery function for adj.
+ *
+ * PUBLIC: int __bam_adj_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_adj_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_adj_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__bam_adj_print);
+ REC_INTRO(__bam_adj_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __bam_cadjust_recover --
+ * Recovery function for cadjust.
+ *
+ * PUBLIC: int __bam_cadjust_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_cadjust_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_cadjust_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__bam_cadjust_print);
+ REC_INTRO(__bam_cadjust_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __bam_cdel_recover --
+ * Recovery function for cdel.
+ *
+ * PUBLIC: int __bam_cdel_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_cdel_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_cdel_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__bam_cdel_print);
+ REC_INTRO(__bam_cdel_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __bam_repl_recover --
+ * Recovery function for repl.
+ *
+ * PUBLIC: int __bam_repl_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_repl_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_repl_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__bam_repl_print);
+ REC_INTRO(__bam_repl_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __bam_root_recover --
+ * Recovery function for root.
+ *
+ * PUBLIC: int __bam_root_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_root_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_root_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__bam_root_print);
+ REC_INTRO(__bam_root_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __bam_curadj_recover --
+ * Recovery function for curadj.
+ *
+ * PUBLIC: int __bam_curadj_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_curadj_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_curadj_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__bam_curadj_print);
+ REC_INTRO(__bam_curadj_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __bam_rcuradj_recover --
+ * Recovery function for rcuradj.
+ *
+ * PUBLIC: int __bam_rcuradj_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_rcuradj_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_rcuradj_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__bam_rcuradj_print);
+ REC_INTRO(__bam_rcuradj_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
diff --git a/bdb/dist/template/rec_crdel b/bdb/dist/template/rec_crdel
new file mode 100644
index 00000000000..352e9ae5f22
--- /dev/null
+++ b/bdb/dist/template/rec_crdel
@@ -0,0 +1,385 @@
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "crdel.h"
+#include "log.h"
+
+/*
+ * __crdel_fileopen_recover --
+ * Recovery function for fileopen.
+ *
+ * PUBLIC: int __crdel_fileopen_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__crdel_fileopen_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __crdel_fileopen_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__crdel_fileopen_print);
+ REC_INTRO(__crdel_fileopen_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __crdel_metasub_recover --
+ * Recovery function for metasub.
+ *
+ * PUBLIC: int __crdel_metasub_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__crdel_metasub_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __crdel_metasub_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__crdel_metasub_print);
+ REC_INTRO(__crdel_metasub_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __crdel_metapage_recover --
+ * Recovery function for metapage.
+ *
+ * PUBLIC: int __crdel_metapage_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__crdel_metapage_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __crdel_metapage_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__crdel_metapage_print);
+ REC_INTRO(__crdel_metapage_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __crdel_old_delete_recover --
+ * Recovery function for old_delete.
+ *
+ * PUBLIC: int __crdel_old_delete_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__crdel_old_delete_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __crdel_old_delete_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__crdel_old_delete_print);
+ REC_INTRO(__crdel_old_delete_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __crdel_rename_recover --
+ * Recovery function for rename.
+ *
+ * PUBLIC: int __crdel_rename_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__crdel_rename_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __crdel_rename_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__crdel_rename_print);
+ REC_INTRO(__crdel_rename_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __crdel_delete_recover --
+ * Recovery function for delete.
+ *
+ * PUBLIC: int __crdel_delete_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__crdel_delete_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __crdel_delete_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__crdel_delete_print);
+ REC_INTRO(__crdel_delete_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
diff --git a/bdb/dist/template/rec_db b/bdb/dist/template/rec_db
new file mode 100644
index 00000000000..69d941351c5
--- /dev/null
+++ b/bdb/dist/template/rec_db
@@ -0,0 +1,509 @@
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db.h"
+#include "log.h"
+
+/*
+ * __db_addrem_recover --
+ * Recovery function for addrem.
+ *
+ * PUBLIC: int __db_addrem_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_addrem_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_addrem_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__db_addrem_print);
+ REC_INTRO(__db_addrem_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __db_split_recover --
+ * Recovery function for split.
+ *
+ * PUBLIC: int __db_split_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_split_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_split_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__db_split_print);
+ REC_INTRO(__db_split_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __db_big_recover --
+ * Recovery function for big.
+ *
+ * PUBLIC: int __db_big_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_big_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_big_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__db_big_print);
+ REC_INTRO(__db_big_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __db_ovref_recover --
+ * Recovery function for ovref.
+ *
+ * PUBLIC: int __db_ovref_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_ovref_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_ovref_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__db_ovref_print);
+ REC_INTRO(__db_ovref_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __db_relink_recover --
+ * Recovery function for relink.
+ *
+ * PUBLIC: int __db_relink_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_relink_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_relink_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__db_relink_print);
+ REC_INTRO(__db_relink_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __db_addpage_recover --
+ * Recovery function for addpage.
+ *
+ * PUBLIC: int __db_addpage_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_addpage_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_addpage_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__db_addpage_print);
+ REC_INTRO(__db_addpage_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __db_debug_recover --
+ * Recovery function for debug.
+ *
+ * PUBLIC: int __db_debug_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_debug_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_debug_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__db_debug_print);
+ REC_INTRO(__db_debug_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __db_noop_recover --
+ * Recovery function for noop.
+ *
+ * PUBLIC: int __db_noop_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_noop_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_noop_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__db_noop_print);
+ REC_INTRO(__db_noop_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
diff --git a/bdb/dist/template/rec_hash b/bdb/dist/template/rec_hash
new file mode 100644
index 00000000000..bcee2131cdc
--- /dev/null
+++ b/bdb/dist/template/rec_hash
@@ -0,0 +1,881 @@
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "ham.h"
+#include "log.h"
+
+/*
+ * __ham_insdel_recover --
+ * Recovery function for insdel.
+ *
+ * PUBLIC: int __ham_insdel_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_insdel_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_insdel_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__ham_insdel_print);
+ REC_INTRO(__ham_insdel_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __ham_newpage_recover --
+ * Recovery function for newpage.
+ *
+ * PUBLIC: int __ham_newpage_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_newpage_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_newpage_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__ham_newpage_print);
+ REC_INTRO(__ham_newpage_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __ham_splitmeta_recover --
+ * Recovery function for splitmeta.
+ *
+ * PUBLIC: int __ham_splitmeta_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_splitmeta_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_splitmeta_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__ham_splitmeta_print);
+ REC_INTRO(__ham_splitmeta_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __ham_splitdata_recover --
+ * Recovery function for splitdata.
+ *
+ * PUBLIC: int __ham_splitdata_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_splitdata_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_splitdata_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__ham_splitdata_print);
+ REC_INTRO(__ham_splitdata_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __ham_replace_recover --
+ * Recovery function for replace.
+ *
+ * PUBLIC: int __ham_replace_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_replace_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_replace_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__ham_replace_print);
+ REC_INTRO(__ham_replace_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __ham_newpgno_recover --
+ * Recovery function for newpgno.
+ *
+ * PUBLIC: int __ham_newpgno_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_newpgno_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_newpgno_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__ham_newpgno_print);
+ REC_INTRO(__ham_newpgno_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __ham_ovfl_recover --
+ * Recovery function for ovfl.
+ *
+ * PUBLIC: int __ham_ovfl_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_ovfl_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_ovfl_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__ham_ovfl_print);
+ REC_INTRO(__ham_ovfl_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __ham_copypage_recover --
+ * Recovery function for copypage.
+ *
+ * PUBLIC: int __ham_copypage_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_copypage_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_copypage_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__ham_copypage_print);
+ REC_INTRO(__ham_copypage_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __ham_metagroup_recover --
+ * Recovery function for metagroup.
+ *
+ * PUBLIC: int __ham_metagroup_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_metagroup_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_metagroup_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__ham_metagroup_print);
+ REC_INTRO(__ham_metagroup_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __ham_groupalloc1_recover --
+ * Recovery function for groupalloc1.
+ *
+ * PUBLIC: int __ham_groupalloc1_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_groupalloc1_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_groupalloc1_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__ham_groupalloc1_print);
+ REC_INTRO(__ham_groupalloc1_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __ham_groupalloc2_recover --
+ * Recovery function for groupalloc2.
+ *
+ * PUBLIC: int __ham_groupalloc2_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_groupalloc2_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_groupalloc2_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__ham_groupalloc2_print);
+ REC_INTRO(__ham_groupalloc2_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __ham_groupalloc_recover --
+ * Recovery function for groupalloc.
+ *
+ * PUBLIC: int __ham_groupalloc_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_groupalloc_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_groupalloc_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__ham_groupalloc_print);
+ REC_INTRO(__ham_groupalloc_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __ham_curadj_recover --
+ * Recovery function for curadj.
+ *
+ * PUBLIC: int __ham_curadj_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_curadj_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_curadj_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__ham_curadj_print);
+ REC_INTRO(__ham_curadj_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __ham_chgpg_recover --
+ * Recovery function for chgpg.
+ *
+ * PUBLIC: int __ham_chgpg_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_chgpg_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_chgpg_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__ham_chgpg_print);
+ REC_INTRO(__ham_chgpg_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
diff --git a/bdb/dist/template/rec_log b/bdb/dist/template/rec_log
new file mode 100644
index 00000000000..1ce40b37804
--- /dev/null
+++ b/bdb/dist/template/rec_log
@@ -0,0 +1,137 @@
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "log.h"
+#include "log.h"
+
+/*
+ * __log_register1_recover --
+ * Recovery function for register1.
+ *
+ * PUBLIC: int __log_register1_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__log_register1_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __log_register1_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__log_register1_print);
+ REC_INTRO(__log_register1_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __log_register_recover --
+ * Recovery function for register.
+ *
+ * PUBLIC: int __log_register_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__log_register_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __log_register_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__log_register_print);
+ REC_INTRO(__log_register_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
diff --git a/bdb/dist/template/rec_qam b/bdb/dist/template/rec_qam
new file mode 100644
index 00000000000..fcd24d6a28e
--- /dev/null
+++ b/bdb/dist/template/rec_qam
@@ -0,0 +1,509 @@
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "qam.h"
+#include "log.h"
+
+/*
+ * __qam_inc_recover --
+ * Recovery function for inc.
+ *
+ * PUBLIC: int __qam_inc_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__qam_inc_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __qam_inc_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__qam_inc_print);
+ REC_INTRO(__qam_inc_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __qam_incfirst_recover --
+ * Recovery function for incfirst.
+ *
+ * PUBLIC: int __qam_incfirst_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__qam_incfirst_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __qam_incfirst_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__qam_incfirst_print);
+ REC_INTRO(__qam_incfirst_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __qam_mvptr_recover --
+ * Recovery function for mvptr.
+ *
+ * PUBLIC: int __qam_mvptr_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__qam_mvptr_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __qam_mvptr_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__qam_mvptr_print);
+ REC_INTRO(__qam_mvptr_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __qam_del_recover --
+ * Recovery function for del.
+ *
+ * PUBLIC: int __qam_del_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__qam_del_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __qam_del_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__qam_del_print);
+ REC_INTRO(__qam_del_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __qam_add_recover --
+ * Recovery function for add.
+ *
+ * PUBLIC: int __qam_add_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__qam_add_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __qam_add_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__qam_add_print);
+ REC_INTRO(__qam_add_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __qam_delete_recover --
+ * Recovery function for delete.
+ *
+ * PUBLIC: int __qam_delete_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__qam_delete_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __qam_delete_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__qam_delete_print);
+ REC_INTRO(__qam_delete_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __qam_rename_recover --
+ * Recovery function for rename.
+ *
+ * PUBLIC: int __qam_rename_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__qam_rename_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __qam_rename_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__qam_rename_print);
+ REC_INTRO(__qam_rename_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __qam_delext_recover --
+ * Recovery function for delext.
+ *
+ * PUBLIC: int __qam_delext_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__qam_delext_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __qam_delext_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__qam_delext_print);
+ REC_INTRO(__qam_delext_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
diff --git a/bdb/dist/template/rec_txn b/bdb/dist/template/rec_txn
new file mode 100644
index 00000000000..c66d604f578
--- /dev/null
+++ b/bdb/dist/template/rec_txn
@@ -0,0 +1,509 @@
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "txn.h"
+#include "log.h"
+
+/*
+ * __txn_old_regop_recover --
+ * Recovery function for old_regop.
+ *
+ * PUBLIC: int __txn_old_regop_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__txn_old_regop_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __txn_old_regop_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__txn_old_regop_print);
+ REC_INTRO(__txn_old_regop_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __txn_regop_recover --
+ * Recovery function for regop.
+ *
+ * PUBLIC: int __txn_regop_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__txn_regop_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __txn_regop_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__txn_regop_print);
+ REC_INTRO(__txn_regop_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __txn_old_ckp_recover --
+ * Recovery function for old_ckp.
+ *
+ * PUBLIC: int __txn_old_ckp_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__txn_old_ckp_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __txn_old_ckp_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__txn_old_ckp_print);
+ REC_INTRO(__txn_old_ckp_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __txn_ckp_recover --
+ * Recovery function for ckp.
+ *
+ * PUBLIC: int __txn_ckp_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__txn_ckp_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __txn_ckp_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__txn_ckp_print);
+ REC_INTRO(__txn_ckp_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __txn_xa_regop_old_recover --
+ * Recovery function for xa_regop_old.
+ *
+ * PUBLIC: int __txn_xa_regop_old_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__txn_xa_regop_old_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __txn_xa_regop_old_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__txn_xa_regop_old_print);
+ REC_INTRO(__txn_xa_regop_old_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __txn_xa_regop_recover --
+ * Recovery function for xa_regop.
+ *
+ * PUBLIC: int __txn_xa_regop_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__txn_xa_regop_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __txn_xa_regop_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__txn_xa_regop_print);
+ REC_INTRO(__txn_xa_regop_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __txn_child_old_recover --
+ * Recovery function for child_old.
+ *
+ * PUBLIC: int __txn_child_old_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__txn_child_old_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __txn_child_old_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__txn_child_old_print);
+ REC_INTRO(__txn_child_old_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __txn_child_recover --
+ * Recovery function for child.
+ *
+ * PUBLIC: int __txn_child_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__txn_child_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __txn_child_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__txn_child_print);
+ REC_INTRO(__txn_child_read);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
diff --git a/bdb/docs/api_c/c_index.html b/bdb/docs/api_c/c_index.html
new file mode 100644
index 00000000000..4b6023c8057
--- /dev/null
+++ b/bdb/docs/api_c/c_index.html
@@ -0,0 +1,172 @@
+<!--$Id: c_index.so,v 10.53 2000/12/21 19:11:27 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: C Interface by Function/Structure</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>C Interface by Function/Structure</h1>
+<p><table border=1 align=center>
+<tr><th>Database Environment</th><th>Description</th></tr>
+<tr><td><a href="../api_c/env_create.html">db_env_create</a></td><td>Create an environment handle</td></tr>
+<tr><td><a href="../api_c/env_close.html">DBENV-&gt;close</a></td><td>Close an environment</td></tr>
+<tr><td><a href="../api_c/db_err.html">DBENV-&gt;err</a></td><td>Error message with error string</td></tr>
+<tr><td><a href="../api_c/db_err.html">DBENV-&gt;errx</a></td><td>Error message</td></tr>
+<tr><td><a href="../api_c/env_open.html">DBENV-&gt;open</a></td><td>Open an environment</td></tr>
+<tr><td><a href="../api_c/env_remove.html">DBENV-&gt;remove</a></td><td>Remove an environment</td></tr>
+<tr><td><a href="../api_c/env_set_cachesize.html">DBENV-&gt;set_cachesize</a></td><td>Set the environment cache size</td></tr>
+<tr><td><a href="../api_c/env_set_data_dir.html">DBENV-&gt;set_data_dir</a></td><td>Set the environment data directory</td></tr>
+<tr><td><a href="../api_c/env_set_errcall.html">DBENV-&gt;set_errcall</a></td><td>Set error message callback</td></tr>
+<tr><td><a href="../api_c/env_set_errfile.html">DBENV-&gt;set_errfile</a></td><td>Set error message FILE</td></tr>
+<tr><td><a href="../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a></td><td>Set error message prefix</td></tr>
+<tr><td><a href="../api_c/env_set_feedback.html">DBENV-&gt;set_feedback</a></td><td>Set feedback callback</td></tr>
+<tr><td><a href="../api_c/env_set_flags.html">DBENV-&gt;set_flags</a></td><td>Environment configuration</td></tr>
+<tr><td><a href="../api_c/env_set_mutexlocks.html">DBENV-&gt;set_mutexlocks</a></td><td>Turn off mutual exclusion locking</td></tr>
+<tr><td><a href="../api_c/env_set_paniccall.html">DBENV-&gt;set_paniccall</a></td><td>Set panic callback</td></tr>
+<tr><td><a href="../api_c/env_set_rec_init.html">DBENV-&gt;set_recovery_init</a></td><td>Set recovery initialization callback</td></tr>
+<tr><td><a href="../api_c/env_set_server.html">DBENV-&gt;set_server</a></td><td>Establish server connection</td></tr>
+<tr><td><a href="../api_c/env_set_shm_key.html">DBENV-&gt;set_shm_key</a></td><td>Set system memory shared segment ID</td></tr>
+<tr><td><a href="../api_c/env_set_tmp_dir.html">DBENV-&gt;set_tmp_dir</a></td><td>Set the environment temporary file directory</td></tr>
+<tr><td><a href="../api_c/env_set_verbose.html">DBENV-&gt;set_verbose</a></td><td>Set verbose messages</td></tr>
+<tr><td><a href="../api_c/env_strerror.html">db_strerror</a></td><td>Error strings</td></tr>
+<tr><td><a href="../api_c/env_version.html">db_version</a></td><td>Return version information</td></tr>
+<tr><th>Database Operations</th><th>Description</th></tr>
+<tr><td><a href="../api_c/db_create.html">db_create</a></td><td>Create a database handle</td></tr>
+<tr><td><a href="../api_c/db_close.html">DB-&gt;close</a></td><td>Close a database</td></tr>
+<tr><td><a href="../api_c/db_del.html">DB-&gt;del</a></td><td>Delete items from a database</td></tr>
+<tr><td><a href="../api_c/db_err.html">DB-&gt;err</a></td><td>Error message with error string</td></tr>
+<tr><td><a href="../api_c/db_err.html">DB-&gt;errx</a></td><td>Error message</td></tr>
+<tr><td><a href="../api_c/db_fd.html">DB-&gt;fd</a></td><td>Return a file descriptor from a database</td></tr>
+<tr><td><a href="../api_c/db_get.html">DB-&gt;get</a></td><td>Get items from a database</td></tr>
+<tr><td><a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a></td><td>Return if the underlying database is in host order</td></tr>
+<tr><td><a href="../api_c/db_get_type.html">DB-&gt;get_type</a></td><td>Return the database type</td></tr>
+<tr><td><a href="../api_c/db_join.html">DB-&gt;join</a></td><td>Perform a database join on cursors</td></tr>
+<tr><td><a href="../api_c/db_key_range.html">DB-&gt;key_range</a></td><td>Return estimate of key location</td></tr>
+<tr><td><a href="../api_c/db_open.html">DB-&gt;open</a></td><td>Open a database</td></tr>
+<tr><td><a href="../api_c/db_put.html">DB-&gt;put</a></td><td>Store items into a database</td></tr>
+<tr><td><a href="../api_c/db_remove.html">DB-&gt;remove</a></td><td>Remove a database</td></tr>
+<tr><td><a href="../api_c/db_rename.html">DB-&gt;rename</a></td><td>Rename a database</td></tr>
+<tr><td><a href="../api_c/db_set_append_recno.html">DB-&gt;set_append_recno</a></td><td>Set record append callback</td></tr>
+<tr><td><a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a></td><td>Set a Btree comparison function</td></tr>
+<tr><td><a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a></td><td>Set the minimum number of keys per Btree page</td></tr>
+<tr><td><a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a></td><td>Set a Btree prefix comparison function</td></tr>
+<tr><td><a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a></td><td>Set the database cache size</td></tr>
+<tr><td><a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a></td><td>Set a duplicate comparison function</td></tr>
+<tr><td><a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a></td><td>Set error message callback</td></tr>
+<tr><td><a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a></td><td>Set error message FILE</td></tr>
+<tr><td><a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a></td><td>Set error message prefix</td></tr>
+<tr><td><a href="../api_c/db_set_feedback.html">DB-&gt;set_feedback</a></td><td>Set feedback callback</td></tr>
+<tr><td><a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a></td><td>General database configuration</td></tr>
+<tr><td><a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a></td><td>Set the Hash table density</td></tr>
+<tr><td><a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a></td><td>Set a hashing function</td></tr>
+<tr><td><a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a></td><td>Set the Hash table size</td></tr>
+<tr><td><a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a></td><td>Set the database byte order</td></tr>
+<tr><td><a href="../api_c/db_set_malloc.html">DB-&gt;set_malloc</a></td><td>Set a local space allocation function</td></tr>
+<tr><td><a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a></td><td>Set the underlying database page size</td></tr>
+<tr><td><a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a></td><td>Set panic callback</td></tr>
+<tr><td><a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a></td><td>Set Queue database extent size</td></tr>
+<tr><td><a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a></td><td>Set the variable-length record delimiter</td></tr>
+<tr><td><a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a></td><td>Set the fixed-length record length</td></tr>
+<tr><td><a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a></td><td>Set the fixed-length record pad byte</td></tr>
+<tr><td><a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a></td><td>Set the backing Recno text file</td></tr>
+<tr><td><a href="../api_c/db_set_realloc.html">DB-&gt;set_realloc</a></td><td>Set a local space allocation function</td></tr>
+<tr><td><a href="../api_c/db_stat.html">DB-&gt;stat</a></td><td>Return database statistics</td></tr>
+<tr><td><a href="../api_c/db_sync.html">DB-&gt;sync</a></td><td>Flush a database to stable storage</td></tr>
+<tr><td><a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a></td><td>Upgrade a database</td></tr>
+<tr><td><a href="../api_c/db_verify.html">DB-&gt;verify</a></td><td>Verify/salvage a database</td></tr>
+<tr><th>Database Cursors</th><th>Description</th></tr>
+<tr><td><a href="../api_c/db_cursor.html">DB-&gt;cursor</a></td><td>Open a cursor into a database</td></tr>
+<tr><td><a href="../api_c/dbc_close.html">DBcursor-&gt;c_close</a></td><td>Close a cursor</td></tr>
+<tr><td><a href="../api_c/dbc_count.html">DBcursor-&gt;c_count</a></td><td>Return count of duplicates</td></tr>
+<tr><td><a href="../api_c/dbc_del.html">DBcursor-&gt;c_del</a></td><td>Delete by cursor</td></tr>
+<tr><td><a href="../api_c/dbc_dup.html">DBcursor-&gt;c_dup</a></td><td>Duplicate a cursor</td></tr>
+<tr><td><a href="../api_c/dbc_get.html">DBcursor-&gt;c_get</a></td><td>Retrieve by cursor</td></tr>
+<tr><td><a href="../api_c/dbc_put.html">DBcursor-&gt;c_put</a></td><td>Store by cursor</td></tr>
+<tr><th>Lock Manager</th><th>Description</th></tr>
+<tr><td><a href="../api_c/env_set_lk_conflicts.html">DBENV-&gt;set_lk_conflicts</a></td><td>Set lock conflicts matrix</td></tr>
+<tr><td><a href="../api_c/env_set_lk_detect.html">DBENV-&gt;set_lk_detect</a></td><td>Set automatic deadlock detection</td></tr>
+<tr><td><a href="../api_c/env_set_lk_max.html">DBENV-&gt;set_lk_max</a></td><td>Set maximum number of locks (<b>Deprecated</b>)</td></tr>
+<tr><td><a href="../api_c/env_set_lk_max_locks.html">DBENV-&gt;set_lk_max_locks</a></td><td>Set maximum number of locks</td></tr>
+<tr><td><a href="../api_c/env_set_lk_max_lockers.html">DBENV-&gt;set_lk_max_lockers</a></td><td>Set maximum number of lockers</td></tr>
+<tr><td><a href="../api_c/env_set_lk_max_objects.html">DBENV-&gt;set_lk_max_objects</a></td><td>Set maximum number of lock objects</td></tr>
+<tr><td><a href="../api_c/lock_detect.html">lock_detect</a></td><td>Perform deadlock detection</td></tr>
+<tr><td><a href="../api_c/lock_get.html">lock_get</a></td><td>Acquire a lock</td></tr>
+<tr><td><a href="../api_c/lock_id.html">lock_id</a></td><td>Acquire a locker ID</td></tr>
+<tr><td><a href="../api_c/lock_put.html">lock_put</a></td><td>Release a lock</td></tr>
+<tr><td><a href="../api_c/lock_stat.html">lock_stat</a></td><td>Return lock subsystem statistics</td></tr>
+<tr><td><a href="../api_c/lock_vec.html">lock_vec</a></td><td>Acquire/release locks</td></tr>
+<tr><th>Log Manager</th><th>Description</th></tr>
+<tr><td><a href="../api_c/env_set_lg_bsize.html">DBENV-&gt;set_lg_bsize</a></td><td>Set log buffer size</td></tr>
+<tr><td><a href="../api_c/env_set_lg_dir.html">DBENV-&gt;set_lg_dir</a></td><td>Set the environment logging directory</td></tr>
+<tr><td><a href="../api_c/env_set_lg_max.html">DBENV-&gt;set_lg_max</a></td><td>Set log file size</td></tr>
+<tr><td><a href="../api_c/log_archive.html">log_archive</a></td><td>List log and database files</td></tr>
+<tr><td><a href="../api_c/log_compare.html">log_compare</a></td><td>Compare two Log Sequence Numbers</td></tr>
+<tr><td><a href="../api_c/log_file.html">log_file</a></td><td>Map Log Sequence Numbers to log files</td></tr>
+<tr><td><a href="../api_c/log_flush.html">log_flush</a></td><td>Flush log records</td></tr>
+<tr><td><a href="../api_c/log_get.html">log_get</a></td><td>Get a log record</td></tr>
+<tr><td><a href="../api_c/log_put.html">log_put</a></td><td>Write a log record</td></tr>
+<tr><td><a href="../api_c/log_register.html">log_register</a></td><td>Register a file name with the log manager</td></tr>
+<tr><td><a href="../api_c/log_stat.html">log_stat</a></td><td>Return log subsystem statistics</td></tr>
+<tr><td><a href="../api_c/log_unregister.html">log_unregister</a></td><td>Unregister a file name with the log manager</td></tr>
+<tr><th>Buffer Pool</th><th>Description</th></tr>
+<tr><td><a href="../api_c/env_set_cachesize.html">DBENV-&gt;set_cachesize</a></td><td>Set the environment cache size</td></tr>
+<tr><td><a href="../api_c/env_set_mp_mmapsize.html">DBENV-&gt;set_mp_mmapsize</a></td><td>Set maximum mapped-in database file size</td></tr>
+<tr><td><a href="../api_c/memp_fclose.html">memp_fclose</a></td><td>Close a file in a buffer pool</td></tr>
+<tr><td><a href="../api_c/memp_fget.html">memp_fget</a></td><td>Get a page from a file in a buffer pool</td></tr>
+<tr><td><a href="../api_c/memp_fopen.html">memp_fopen</a></td><td>Open a file in a buffer pool</td></tr>
+<tr><td><a href="../api_c/memp_fput.html">memp_fput</a></td><td>Return a page to a buffer pool</td></tr>
+<tr><td><a href="../api_c/memp_fset.html">memp_fset</a></td><td>Modify meta information for buffer pool page</td></tr>
+<tr><td><a href="../api_c/memp_fsync.html">memp_fsync</a></td><td>Flush pages from a file in a buffer pool</td></tr>
+<tr><td><a href="../api_c/memp_register.html">memp_register</a></td><td>Register input/output functions for a file in a buffer pool</td></tr>
+<tr><td><a href="../api_c/memp_stat.html">memp_stat</a></td><td>Return buffer pool statistics</td></tr>
+<tr><td><a href="../api_c/memp_sync.html">memp_sync</a></td><td>Flush pages from a buffer pool</td></tr>
+<tr><td><a href="../api_c/memp_trickle.html">memp_trickle</a></td><td>Trickle flush pages from a buffer pool</td></tr>
+<tr><th>Transaction Manager</th><th>Description</th></tr>
+<tr><td><a href="../api_c/env_set_tx_max.html">DBENV-&gt;set_tx_max</a></td><td>Set maximum number of transactions</td></tr>
+<tr><td><a href="../api_c/env_set_tx_recover.html">DBENV-&gt;set_tx_recover</a></td><td>Set transaction abort recover function</td></tr>
+<tr><td><a href="../api_c/env_set_tx_timestamp.html">DBENV-&gt;set_tx_timestamp</a></td><td>Set recovery timestamp</td></tr>
+<tr><td><a href="../api_c/txn_abort.html">txn_abort</a></td><td>Abort a transaction</td></tr>
+<tr><td><a href="../api_c/txn_begin.html">txn_begin</a></td><td>Begin a transaction</td></tr>
+<tr><td><a href="../api_c/txn_checkpoint.html">txn_checkpoint</a></td><td>Checkpoint the transaction subsystem</td></tr>
+<tr><td><a href="../api_c/txn_commit.html">txn_commit</a></td><td>Commit a transaction</td></tr>
+<tr><td><a href="../api_c/txn_id.html">txn_id</a></td><td>Return a transaction ID</td></tr>
+<tr><td><a href="../api_c/txn_prepare.html">txn_prepare</a></td><td>Prepare a transaction for commit</td></tr>
+<tr><td><a href="../api_c/txn_stat.html">txn_stat</a></td><td>Return transaction subsystem statistics</td></tr>
+<tr><th>Historic Interfaces</th><th>Description</th></tr>
+<tr><td><a href="../api_c/dbm.html">dbm</a></td><td>UNIX Dbm/Ndbm Interfaces</td></tr>
+<tr><td><a href="../api_c/hsearch.html">hsearch</a></td><td>UNIX Hsearch Interfaces</td></tr>
+<tr><th>Data Structures</th><th>Description</th></tr>
+<tr><td><a href="../api_c/dbt.html">DBT</a></td><td>DBT structures</td></tr>
+<tr><td><a href="../api_c/db_lsn.html">DB_LSN</a></td><td>DB_LSN structures</td></tr>
+<tr><th>DB Library Configuration</th><th>Description</th></tr>
+<tr><td><a href="../api_c/env_set_pageyield.html">db_env_set_pageyield</a></td><td>Yield the processor on each page access</td></tr>
+<tr><td><a href="../api_c/env_set_panicstate.html">db_env_set_panicstate</a></td><td>Reset panic state</td></tr>
+<tr><td><a href="../api_c/env_set_region_init.html">db_env_set_region_init</a></td><td>Fault in shared regions on initial access</td></tr>
+<tr><td><a href="../api_c/env_set_tas_spins.html">db_env_set_tas_spins</a></td><td>Set the number of test-and-set spins</td></tr>
+<tr><th>DB System Call Configuration</th><th>Description</th></tr>
+<tr><td><a href="../api_c/set_func_close.html">db_env_set_func_close</a></td><td>Replace underlying Berkeley DB system interfaces</td></tr>
+<tr><td><a href="../api_c/set_func_dirfree.html">db_env_set_func_dirfree</a></td><td><br></td></tr>
+<tr><td><a href="../api_c/set_func_dirlist.html">db_env_set_func_dirlist</a></td><td><br></td></tr>
+<tr><td><a href="../api_c/set_func_exists.html">db_env_set_func_exists</a></td><td><br></td></tr>
+<tr><td><a href="../api_c/set_func_free.html">db_env_set_func_free</a></td><td><br></td></tr>
+<tr><td><a href="../api_c/set_func_fsync.html">db_env_set_func_fsync</a></td><td><br></td></tr>
+<tr><td><a href="../api_c/set_func_ioinfo.html">db_env_set_func_ioinfo</a></td><td><br></td></tr>
+<tr><td><a href="../api_c/set_func_malloc.html">db_env_set_func_malloc</a></td><td><br></td></tr>
+<tr><td><a href="../api_c/set_func_map.html">db_env_set_func_map</a></td><td><br></td></tr>
+<tr><td><a href="../api_c/set_func_open.html">db_env_set_func_open</a></td><td><br></td></tr>
+<tr><td><a href="../api_c/set_func_read.html">db_env_set_func_read</a></td><td><br></td></tr>
+<tr><td><a href="../api_c/set_func_realloc.html">db_env_set_func_realloc</a></td><td><br></td></tr>
+<tr><td><a href="../api_c/set_func_rename.html">db_env_set_func_rename</a></td><td><br></td></tr>
+<tr><td><a href="../api_c/set_func_seek.html">db_env_set_func_seek</a></td><td><br></td></tr>
+<tr><td><a href="../api_c/set_func_sleep.html">db_env_set_func_sleep</a></td><td><br></td></tr>
+<tr><td><a href="../api_c/set_func_unlink.html">db_env_set_func_unlink</a></td><td><br></td></tr>
+<tr><td><a href="../api_c/set_func_unmap.html">db_env_set_func_unmap</a></td><td><br></td></tr>
+<tr><td><a href="../api_c/set_func_write.html">db_env_set_func_write</a></td><td><br></td></tr>
+<tr><td><a href="../api_c/set_func_yield.html">db_env_set_func_yield</a></td><td><br></td></tr>
+</table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/c_pindex.html b/bdb/docs/api_c/c_pindex.html
new file mode 100644
index 00000000000..725bf0068a9
--- /dev/null
+++ b/bdb/docs/api_c/c_pindex.html
@@ -0,0 +1,530 @@
+<html>
+<head>
+<title>Berkeley DB: C Interface Index</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>C Interface Index</h1>
+<center>
+<table cellspacing=0 cellpadding=0>
+<tr><td align=right> configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#5">1.85</a> API compatibility</td></tr>
+<tr><td align=right> building a utility to dump Berkeley DB </td><td><a href="../ref/build_unix/conf.html#7">1.85</a> databases</td></tr>
+<tr><td align=right> Upgrading to release </td><td><a href="../ref/upgrade.2.0/intro.html#2">2.0</a></td></tr>
+<tr><td align=right> Upgrading to release </td><td><a href="../ref/upgrade.3.0/intro.html#2">3.0</a></td></tr>
+<tr><td align=right> Upgrading to release </td><td><a href="../ref/upgrade.3.1/intro.html#2">3.1</a></td></tr>
+<tr><td align=right> Upgrading to release </td><td><a href="../ref/upgrade.3.2/intro.html#2">3.2</a></td></tr>
+<tr><td align=right> selecting an </td><td><a href="../ref/am_conf/select.html#2">access</a> method</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am_conf/intro.html#2">access</a> methods</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/aix.html#2">AIX</a></td></tr>
+<tr><td align=right> data </td><td><a href="../api_c/dbt.html#5">alignment</a></td></tr>
+<tr><td align=right> programmatic </td><td><a href="../ref/arch/apis.html#2">APIs</a></td></tr>
+<tr><td align=right> utility to </td><td><a href="../utility/db_archive.html#3">archive</a> log files</td></tr>
+<tr><td align=right> </td><td><a href="../utility/berkeley_db_svc.html#2">berkeley_db_svc</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/intro.html#2">building</a> for UNIX</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/notes.html#2">building</a> for UNIX FAQ</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_vxworks/intro.html#2">building</a> for VxWorks</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_vxworks/faq.html#2">building</a> for VxWorks FAQ</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_win/intro.html#2">building</a> for Win32</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_win/faq.html#2">building</a> for Windows FAQ</td></tr>
+<tr><td align=right> selecting a </td><td><a href="../ref/am_conf/byteorder.html#2">byte</a> order</td></tr>
+<tr><td align=right> </td><td><a href="../ref/program/byteorder.html#2">byte</a> ordering</td></tr>
+<tr><td align=right> configuring the </td><td><a href="../ref/build_unix/conf.html#6">C++</a> API</td></tr>
+<tr><td align=right> flushing the database </td><td><a href="../ref/am/sync.html#2">cache</a></td></tr>
+<tr><td align=right> selecting a </td><td><a href="../ref/am_conf/cachesize.html#2">cache</a> size</td></tr>
+<tr><td align=right> </td><td><a href="../ref/transapp/archival.html#3">catastrophic</a> recovery</td></tr>
+<tr><td align=right>Patches, Updates and </td><td><a href="http://www.sleepycat.com/update/index.html">Change</a> logs</td></tr>
+<tr><td align=right> utility to take </td><td><a href="../utility/db_checkpoint.html#3">checkpoints</a></td></tr>
+<tr><td align=right>memp_fopen</td><td><a href="../api_c/memp_fopen.html#clear_len">clear_len</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/curclose.html#2">closing</a> a cursor</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/close.html#2">closing</a> a database</td></tr>
+<tr><td align=right> specifying a Btree </td><td><a href="../ref/am_conf/bt_compare.html#2">comparison</a> function</td></tr>
+<tr><td align=right> changing </td><td><a href="../ref/build_unix/flags.html#2">compile</a> or load options</td></tr>
+<tr><td align=right> </td><td><a href="../ref/cam/intro.html#2">Concurrent</a> Data Store</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/conf.html#2">configuring</a> Berkeley DB for UNIX systems</td></tr>
+<tr><td align=right> recovering </td><td><a href="../ref/am/verify.html#4">corrupted</a> databases</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/count.html#2">counting</a> data items for a key</td></tr>
+<tr><td align=right> closing a </td><td><a href="../ref/am/curclose.html#3">cursor</a></td></tr>
+<tr><td align=right> deleting records with a </td><td><a href="../ref/am/curdel.html#3">cursor</a></td></tr>
+<tr><td align=right> duplicating a </td><td><a href="../ref/am/curdup.html#3">cursor</a></td></tr>
+<tr><td align=right> retrieving records with a </td><td><a href="../ref/am/curget.html#3">cursor</a></td></tr>
+<tr><td align=right> storing records with a </td><td><a href="../ref/am/curput.html#3">cursor</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/stability.html#2">cursor</a> stability</td></tr>
+<tr><td align=right> database </td><td><a href="../ref/am/cursor.html#2">cursors</a></td></tr>
+<tr><td align=right>DBT</td><td><a href="../api_c/dbt.html#data">data</a></td></tr>
+<tr><td align=right> utility to upgrade </td><td><a href="../utility/db_upgrade.html#3">database</a> files</td></tr>
+<tr><td align=right> utility to verify </td><td><a href="../utility/db_verify.html#3">database</a> files</td></tr>
+<tr><td align=right>DBcursor-&gt;c_put</td><td><a href="../api_c/dbc_put.html#DB_AFTER">DB_AFTER</a></td></tr>
+<tr><td align=right>DB-&gt;verify</td><td><a href="../api_c/db_verify.html#DB_AGGRESSIVE">DB_AGGRESSIVE</a></td></tr>
+<tr><td align=right>DB-&gt;put</td><td><a href="../api_c/db_put.html#DB_APPEND">DB_APPEND</a></td></tr>
+<tr><td align=right>log_archive</td><td><a href="../api_c/log_archive.html#DB_ARCH_ABS">DB_ARCH_ABS</a></td></tr>
+<tr><td align=right>log_archive</td><td><a href="../api_c/log_archive.html#DB_ARCH_DATA">DB_ARCH_DATA</a></td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_archive.html#2">db_archive</a></td></tr>
+<tr><td align=right>log_archive</td><td><a href="../api_c/log_archive.html#DB_ARCH_LOG">DB_ARCH_LOG</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_put</td><td><a href="../api_c/dbc_put.html#DB_BEFORE">DB_BEFORE</a></td></tr>
+<tr><td align=right>DB-&gt;stat</td><td><a href="../api_c/db_stat.html#DB_CACHED_COUNTS">DB_CACHED_COUNTS</a></td></tr>
+<tr><td align=right>DBENV-&gt;set_flags</td><td><a href="../api_c/env_set_flags.html#DB_CDB_ALLDB">DB_CDB_ALLDB</a></td></tr>
+<tr><td align=right>log_get</td><td><a href="../api_c/log_get.html#DB_CHECKPOINT">DB_CHECKPOINT</a></td></tr>
+<tr><td align=right>log_put</td><td><a href="../api_c/log_put.html#DB_CHECKPOINT">DB_CHECKPOINT</a></td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_checkpoint.html#2">db_checkpoint</a></td></tr>
+<tr><td align=right>db_env_create</td><td><a href="../api_c/env_create.html#DB_CLIENT">DB_CLIENT</a></td></tr>
+<tr><td align=right>File naming</td><td><a href="../ref/env/naming.html#DB_CONFIG">DB_CONFIG</a></td></tr>
+<tr><td align=right>DB-&gt;get</td><td><a href="../api_c/db_get.html#DB_CONSUME">DB_CONSUME</a></td></tr>
+<tr><td align=right>DB-&gt;get</td><td><a href="../api_c/db_get.html#DB_CONSUME_WAIT">DB_CONSUME_WAIT</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_create.html#2">db_create</a></td></tr>
+<tr><td align=right>DB-&gt;open</td><td><a href="../api_c/db_open.html#DB_CREATE">DB_CREATE</a></td></tr>
+<tr><td align=right>DBENV-&gt;open</td><td><a href="../api_c/env_open.html#DB_CREATE">DB_CREATE</a></td></tr>
+<tr><td align=right>memp_fopen</td><td><a href="../api_c/memp_fopen.html#DB_CREATE">DB_CREATE</a></td></tr>
+<tr><td align=right>log_put</td><td><a href="../api_c/log_put.html#DB_CURLSN">DB_CURLSN</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_get</td><td><a href="../api_c/dbc_get.html#DB_CURRENT">DB_CURRENT</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_put</td><td><a href="../api_c/dbc_put.html#DB_CURRENT">DB_CURRENT</a></td></tr>
+<tr><td align=right>log_get</td><td><a href="../api_c/log_get.html#DB_CURRENT">DB_CURRENT</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/dbc_close.html#2">DBcursor-&gt;c_close</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/dbc_count.html#2">DBcursor-&gt;c_count</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/dbc_del.html#2">DBcursor-&gt;c_del</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/dbc_dup.html#2">DBcursor-&gt;c_dup</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/dbc_get.html#2">DBcursor-&gt;c_get</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/dbc_put.html#2">DBcursor-&gt;c_put</a></td></tr>
+<tr><td align=right>DBT</td><td><a href="../api_c/dbt.html#DB_DBT_MALLOC">DB_DBT_MALLOC</a></td></tr>
+<tr><td align=right>DBT</td><td><a href="../api_c/dbt.html#DB_DBT_PARTIAL">DB_DBT_PARTIAL</a></td></tr>
+<tr><td align=right>DBT</td><td><a href="../api_c/dbt.html#DB_DBT_REALLOC">DB_DBT_REALLOC</a></td></tr>
+<tr><td align=right>DBT</td><td><a href="../api_c/dbt.html#DB_DBT_USERMEM">DB_DBT_USERMEM</a></td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_deadlock.html#2">db_deadlock</a></td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_dump.html#2">db_dump</a></td></tr>
+<tr><td align=right>DB-&gt;set_flags</td><td><a href="../api_c/db_set_flags.html#DB_DUP">DB_DUP</a></td></tr>
+<tr><td align=right>DB-&gt;set_flags</td><td><a href="../api_c/db_set_flags.html#DB_DUPSORT">DB_DUPSORT</a></td></tr>
+<tr><td align=right>DB-&gt;upgrade</td><td><a href="../api_c/db_upgrade.html#DB_DUPSORT">DB_DUPSORT</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/env_create.html#2">db_env_create</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/env_close.html#2">DBENV-&gt;close</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_err.html#2">DBENV-&gt;err</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/env_open.html#2">DBENV-&gt;open</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/env_remove.html#2">DBENV-&gt;remove</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/env_set_cachesize.html#2">DBENV-&gt;set_cachesize</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/env_set_data_dir.html#2">DBENV-&gt;set_data_dir</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/env_set_errcall.html#2">DBENV-&gt;set_errcall</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/env_set_errfile.html#2">DBENV-&gt;set_errfile</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/env_set_errpfx.html#2">DBENV-&gt;set_errpfx</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/env_set_feedback.html#2">DBENV-&gt;set_feedback</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/env_set_flags.html#2">DBENV-&gt;set_flags</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/env_set_lg_bsize.html#2">DBENV-&gt;set_lg_bsize</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/env_set_lg_dir.html#2">DBENV-&gt;set_lg_dir</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/env_set_lg_max.html#2">DBENV-&gt;set_lg_max</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/env_set_lk_conflicts.html#2">DBENV-&gt;set_lk_conflicts</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/env_set_lk_detect.html#2">DBENV-&gt;set_lk_detect</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/env_set_lk_max.html#2">DBENV-&gt;set_lk_max</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/env_set_lk_max_lockers.html#2">DBENV-&gt;set_lk_max_lockers</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/env_set_lk_max_locks.html#2">DBENV-&gt;set_lk_max_locks</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/env_set_lk_max_objects.html#2">DBENV-&gt;set_lk_max_objects</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/env_set_mp_mmapsize.html#2">DBENV-&gt;set_mp_mmapsize</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/env_set_mutexlocks.html#2">DBENV-&gt;set_mutexlocks</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/env_set_paniccall.html#2">DBENV-&gt;set_paniccall</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/env_set_rec_init.html#2">DBENV-&gt;set_recovery_init</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/env_set_server.html#2">DBENV-&gt;set_server</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/env_set_shm_key.html#2">DBENV-&gt;set_shm_key</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/env_set_tmp_dir.html#2">DBENV-&gt;set_tmp_dir</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/env_set_tx_max.html#2">DBENV-&gt;set_tx_max</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/env_set_tx_recover.html#2">DBENV-&gt;set_tx_recover</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/env_set_tx_timestamp.html#2">DBENV-&gt;set_tx_timestamp</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/env_set_verbose.html#2">DBENV-&gt;set_verbose</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/set_func_close.html#2">db_env_set_func_close</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/set_func_dirfree.html#2">db_env_set_func_dirfree</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/set_func_dirlist.html#2">db_env_set_func_dirlist</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/set_func_exists.html#2">db_env_set_func_exists</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/set_func_free.html#2">db_env_set_func_free</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/set_func_fsync.html#2">db_env_set_func_fsync</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/set_func_ioinfo.html#2">db_env_set_func_ioinfo</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/set_func_malloc.html#2">db_env_set_func_malloc</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/set_func_map.html#2">db_env_set_func_map</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/set_func_open.html#2">db_env_set_func_open</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/set_func_read.html#2">db_env_set_func_read</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/set_func_realloc.html#2">db_env_set_func_realloc</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/set_func_rename.html#2">db_env_set_func_rename</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/set_func_seek.html#2">db_env_set_func_seek</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/set_func_sleep.html#2">db_env_set_func_sleep</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/set_func_unlink.html#2">db_env_set_func_unlink</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/set_func_unmap.html#2">db_env_set_func_unmap</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/set_func_write.html#2">db_env_set_func_write</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/set_func_yield.html#2">db_env_set_func_yield</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/env_set_pageyield.html#2">db_env_set_pageyield</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/env_set_panicstate.html#2">db_env_set_panicstate</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/env_set_region_init.html#2">db_env_set_region_init</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/env_set_tas_spins.html#2">db_env_set_tas_spins</a></td></tr>
+<tr><td align=right>DB-&gt;open</td><td><a href="../api_c/db_open.html#DB_EXCL">DB_EXCL</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_get</td><td><a href="../api_c/dbc_get.html#DB_FIRST">DB_FIRST</a></td></tr>
+<tr><td align=right>log_get</td><td><a href="../api_c/log_get.html#DB_FIRST">DB_FIRST</a></td></tr>
+<tr><td align=right>log_put</td><td><a href="../api_c/log_put.html#DB_FLUSH">DB_FLUSH</a></td></tr>
+<tr><td align=right>DBENV-&gt;remove</td><td><a href="../api_c/env_remove.html#DB_FORCE">DB_FORCE</a></td></tr>
+<tr><td align=right>txn_checkpoint</td><td><a href="../api_c/txn_checkpoint.html#DB_FORCE">DB_FORCE</a></td></tr>
+<tr><td align=right>DB-&gt;get</td><td><a href="../api_c/db_get.html#DB_GET_BOTH">DB_GET_BOTH</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_get</td><td><a href="../api_c/dbc_get.html#DB_GET_BOTH">DB_GET_BOTH</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_get</td><td><a href="../api_c/dbc_get.html#DB_GET_RECNO">DB_GET_RECNO</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_close.html#2">DB-&gt;close</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_cursor.html#2">DB-&gt;cursor</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_del.html#2">DB-&gt;del</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_fd.html#2">DB-&gt;fd</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_get.html#2">DB-&gt;get</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_get_byteswapped.html#2">DB-&gt;get_byteswapped</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_get_type.html#2">DB-&gt;get_type</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_join.html#2">DB-&gt;join</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_key_range.html#2">DB-&gt;key_range</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_open.html#2">DB-&gt;open</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_put.html#2">DB-&gt;put</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_remove.html#2">DB-&gt;remove</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_rename.html#2">DB-&gt;rename</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_set_append_recno.html#2">DB-&gt;set_append_recno</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_set_bt_compare.html#2">DB-&gt;set_bt_compare</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_set_bt_minkey.html#2">DB-&gt;set_bt_minkey</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_set_bt_prefix.html#2">DB-&gt;set_bt_prefix</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_set_cachesize.html#2">DB-&gt;set_cachesize</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_set_dup_compare.html#2">DB-&gt;set_dup_compare</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_set_errcall.html#2">DB-&gt;set_errcall</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_set_errfile.html#2">DB-&gt;set_errfile</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_set_errpfx.html#2">DB-&gt;set_errpfx</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_set_feedback.html#2">DB-&gt;set_feedback</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_set_flags.html#2">DB-&gt;set_flags</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_set_h_ffactor.html#2">DB-&gt;set_h_ffactor</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_set_h_hash.html#2">DB-&gt;set_h_hash</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_set_h_nelem.html#2">DB-&gt;set_h_nelem</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_set_lorder.html#2">DB-&gt;set_lorder</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_set_malloc.html#2">DB-&gt;set_malloc</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_set_pagesize.html#2">DB-&gt;set_pagesize</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_set_paniccall.html#2">DB-&gt;set_paniccall</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_set_q_extentsize.html#2">DB-&gt;set_q_extentsize</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_set_realloc.html#2">DB-&gt;set_realloc</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_set_re_delim.html#2">DB-&gt;set_re_delim</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_set_re_len.html#2">DB-&gt;set_re_len</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_set_re_pad.html#2">DB-&gt;set_re_pad</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_set_re_source.html#2">DB-&gt;set_re_source</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_stat.html#2">DB-&gt;stat</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_sync.html#2">DB-&gt;sync</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_upgrade.html#2">DB-&gt;upgrade</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_verify.html#2">DB-&gt;verify</a></td></tr>
+<tr><td align=right>File naming</td><td><a href="../ref/env/naming.html#DB_HOME">DB_HOME</a></td></tr>
+<tr><td align=right>File naming</td><td><a href="../ref/env/naming.html#db_home">db_home</a></td></tr>
+<tr><td align=right> DB-&gt;close </td><td><a href="../api_c/db_close.html#3">DB_INCOMPLETE</a></td></tr>
+<tr><td align=right>DBENV-&gt;open</td><td><a href="../api_c/env_open.html#DB_INIT_CDB">DB_INIT_CDB</a></td></tr>
+<tr><td align=right>DBENV-&gt;open</td><td><a href="../api_c/env_open.html#DB_INIT_LOCK">DB_INIT_LOCK</a></td></tr>
+<tr><td align=right>DBENV-&gt;open</td><td><a href="../api_c/env_open.html#DB_INIT_LOG">DB_INIT_LOG</a></td></tr>
+<tr><td align=right>DBENV-&gt;open</td><td><a href="../api_c/env_open.html#DB_INIT_MPOOL">DB_INIT_MPOOL</a></td></tr>
+<tr><td align=right>DBENV-&gt;open</td><td><a href="../api_c/env_open.html#DB_INIT_TXN">DB_INIT_TXN</a></td></tr>
+<tr><td align=right>DBENV-&gt;open</td><td><a href="../api_c/env_open.html#DB_JOINENV">DB_JOINENV</a></td></tr>
+<tr><td align=right>DB-&gt;join</td><td><a href="../api_c/db_join.html#DB_JOIN_ITEM">DB_JOIN_ITEM</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_get</td><td><a href="../api_c/dbc_get.html#DB_JOIN_ITEM">DB_JOIN_ITEM</a></td></tr>
+<tr><td align=right>DB-&gt;join</td><td><a href="../api_c/db_join.html#DB_JOIN_NOSORT">DB_JOIN_NOSORT</a></td></tr>
+<tr><td align=right>Error returns to applications</td><td><a href="../ref/program/errorret.html#DB_KEYEMPTY">DB_KEYEMPTY</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_put</td><td><a href="../api_c/dbc_put.html#DB_KEYFIRST">DB_KEYFIRST</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_put</td><td><a href="../api_c/dbc_put.html#DB_KEYLAST">DB_KEYLAST</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_get</td><td><a href="../api_c/dbc_get.html#DB_LAST">DB_LAST</a></td></tr>
+<tr><td align=right>log_get</td><td><a href="../api_c/log_get.html#DB_LAST">DB_LAST</a></td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_load.html#2">db_load</a></td></tr>
+<tr><td align=right>lock_detect</td><td><a href="../api_c/lock_detect.html#DB_LOCK_CONFLICT">DB_LOCK_CONFLICT</a></td></tr>
+<tr><td align=right>Error returns to applications</td><td><a href="../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a></td></tr>
+<tr><td align=right>DBENV-&gt;set_lk_detect</td><td><a href="../api_c/env_set_lk_detect.html#DB_LOCK_DEFAULT">DB_LOCK_DEFAULT</a></td></tr>
+<tr><td align=right>DBENV-&gt;open</td><td><a href="../api_c/env_open.html#DB_LOCKDOWN">DB_LOCKDOWN</a></td></tr>
+<tr><td align=right>lock_vec</td><td><a href="../api_c/lock_vec.html#DB_LOCK_GET">DB_LOCK_GET</a></td></tr>
+<tr><td align=right>lock_get</td><td><a href="../api_c/lock_get.html#DB_LOCK_NOTGRANTED">DB_LOCK_NOTGRANTED</a></td></tr>
+<tr><td align=right>lock_vec</td><td><a href="../api_c/lock_vec.html#DB_LOCK_NOTGRANTED">DB_LOCK_NOTGRANTED</a></td></tr>
+<tr><td align=right>Error returns to applications</td><td><a href="../ref/program/errorret.html#DB_LOCK_NOTGRANTED">DB_LOCK_NOTGRANTED</a></td></tr>
+<tr><td align=right>lock_get</td><td><a href="../api_c/lock_get.html#DB_LOCK_NOWAIT">DB_LOCK_NOWAIT</a></td></tr>
+<tr><td align=right>lock_vec</td><td><a href="../api_c/lock_vec.html#DB_LOCK_NOWAIT">DB_LOCK_NOWAIT</a></td></tr>
+<tr><td align=right>DBENV-&gt;set_lk_detect</td><td><a href="../api_c/env_set_lk_detect.html#DB_LOCK_OLDEST">DB_LOCK_OLDEST</a></td></tr>
+<tr><td align=right>lock_vec</td><td><a href="../api_c/lock_vec.html#DB_LOCK_PUT">DB_LOCK_PUT</a></td></tr>
+<tr><td align=right>lock_vec</td><td><a href="../api_c/lock_vec.html#DB_LOCK_PUT_ALL">DB_LOCK_PUT_ALL</a></td></tr>
+<tr><td align=right>lock_vec</td><td><a href="../api_c/lock_vec.html#DB_LOCK_PUT_OBJ">DB_LOCK_PUT_OBJ</a></td></tr>
+<tr><td align=right>DBENV-&gt;set_lk_detect</td><td><a href="../api_c/env_set_lk_detect.html#DB_LOCK_RANDOM">DB_LOCK_RANDOM</a></td></tr>
+<tr><td align=right>DBENV-&gt;set_lk_detect</td><td><a href="../api_c/env_set_lk_detect.html#DB_LOCK_YOUNGEST">DB_LOCK_YOUNGEST</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/db_lsn.html#2">DB_LSN</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/dbm.html#2">dbm/ndbm</a></td></tr>
+<tr><td align=right>memp_fput</td><td><a href="../api_c/memp_fput.html#DB_MPOOL_CLEAN">DB_MPOOL_CLEAN</a></td></tr>
+<tr><td align=right>memp_fset</td><td><a href="../api_c/memp_fset.html#DB_MPOOL_CLEAN">DB_MPOOL_CLEAN</a></td></tr>
+<tr><td align=right>memp_fget</td><td><a href="../api_c/memp_fget.html#DB_MPOOL_CREATE">DB_MPOOL_CREATE</a></td></tr>
+<tr><td align=right>memp_fput</td><td><a href="../api_c/memp_fput.html#DB_MPOOL_DIRTY">DB_MPOOL_DIRTY</a></td></tr>
+<tr><td align=right>memp_fset</td><td><a href="../api_c/memp_fset.html#DB_MPOOL_DIRTY">DB_MPOOL_DIRTY</a></td></tr>
+<tr><td align=right>memp_fput</td><td><a href="../api_c/memp_fput.html#DB_MPOOL_DISCARD">DB_MPOOL_DISCARD</a></td></tr>
+<tr><td align=right>memp_fset</td><td><a href="../api_c/memp_fset.html#DB_MPOOL_DISCARD">DB_MPOOL_DISCARD</a></td></tr>
+<tr><td align=right>memp_fget</td><td><a href="../api_c/memp_fget.html#DB_MPOOL_LAST">DB_MPOOL_LAST</a></td></tr>
+<tr><td align=right>memp_fget</td><td><a href="../api_c/memp_fget.html#DB_MPOOL_NEW">DB_MPOOL_NEW</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_get</td><td><a href="../api_c/dbc_get.html#DB_NEXT">DB_NEXT</a></td></tr>
+<tr><td align=right>log_get</td><td><a href="../api_c/log_get.html#DB_NEXT">DB_NEXT</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_get</td><td><a href="../api_c/dbc_get.html#DB_NEXT_DUP">DB_NEXT_DUP</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_get</td><td><a href="../api_c/dbc_get.html#DB_NEXT_NODUP">DB_NEXT_NODUP</a></td></tr>
+<tr><td align=right>DB-&gt;put</td><td><a href="../api_c/db_put.html#DB_NODUPDATA">DB_NODUPDATA</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_put</td><td><a href="../api_c/dbc_put.html#DB_NODUPDATA">DB_NODUPDATA</a></td></tr>
+<tr><td align=right>DB-&gt;open</td><td><a href="../api_c/db_open.html#DB_NOMMAP">DB_NOMMAP</a></td></tr>
+<tr><td align=right>DBENV-&gt;set_flags</td><td><a href="../api_c/env_set_flags.html#DB_NOMMAP">DB_NOMMAP</a></td></tr>
+<tr><td align=right>memp_fopen</td><td><a href="../api_c/memp_fopen.html#DB_NOMMAP">DB_NOMMAP</a></td></tr>
+<tr><td align=right>DB-&gt;verify</td><td><a href="../api_c/db_verify.html#DB_NOORDERCHK">DB_NOORDERCHK</a></td></tr>
+<tr><td align=right>DB-&gt;put</td><td><a href="../api_c/db_put.html#DB_NOOVERWRITE">DB_NOOVERWRITE</a></td></tr>
+<tr><td align=right>DBENV-&gt;set_server</td><td><a href="../api_c/env_set_server.html#DB_NOSERVER">DB_NOSERVER</a></td></tr>
+<tr><td align=right>DBENV-&gt;set_server</td><td><a href="../api_c/env_set_server.html#DB_NOSERVER_ID">DB_NOSERVER_ID</a></td></tr>
+<tr><td align=right>DB-&gt;close</td><td><a href="../api_c/db_close.html#DB_NOSYNC">DB_NOSYNC</a></td></tr>
+<tr><td align=right>Error returns to applications</td><td><a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a></td></tr>
+<tr><td align=right>DB-&gt;open</td><td><a href="../api_c/db_open.html#DB_OLD_VERSION">DB_OLD_VERSION</a></td></tr>
+<tr><td align=right>DB-&gt;upgrade</td><td><a href="../api_c/db_upgrade.html#DB_OLD_VERSION">DB_OLD_VERSION</a></td></tr>
+<tr><td align=right>DB-&gt;verify</td><td><a href="../api_c/db_verify.html#DB_ORDERCHKONLY">DB_ORDERCHKONLY</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_dup</td><td><a href="../api_c/dbc_dup.html#DB_POSITION">DB_POSITION</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_get</td><td><a href="../api_c/dbc_get.html#DB_PREV">DB_PREV</a></td></tr>
+<tr><td align=right>log_get</td><td><a href="../api_c/log_get.html#DB_PREV">DB_PREV</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_get</td><td><a href="../api_c/dbc_get.html#DB_PREV_NODUP">DB_PREV_NODUP</a></td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_printlog.html#2">db_printlog</a></td></tr>
+<tr><td align=right>DBENV-&gt;open</td><td><a href="../api_c/env_open.html#DB_PRIVATE">DB_PRIVATE</a></td></tr>
+<tr><td align=right>DB-&gt;open</td><td><a href="../api_c/db_open.html#DB_RDONLY">DB_RDONLY</a></td></tr>
+<tr><td align=right>memp_fopen</td><td><a href="../api_c/memp_fopen.html#DB_RDONLY">DB_RDONLY</a></td></tr>
+<tr><td align=right>DB-&gt;set_flags</td><td><a href="../api_c/db_set_flags.html#DB_RECNUM">DB_RECNUM</a></td></tr>
+<tr><td align=right>DB-&gt;stat</td><td><a href="../api_c/db_stat.html#DB_RECORDCOUNT">DB_RECORDCOUNT</a></td></tr>
+<tr><td align=right>DBENV-&gt;open</td><td><a href="../api_c/env_open.html#DB_RECOVER">DB_RECOVER</a></td></tr>
+<tr><td align=right>DBENV-&gt;set_feedback</td><td><a href="../api_c/env_set_feedback.html#DB_RECOVER">DB_RECOVER</a></td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_recover.html#2">db_recover</a></td></tr>
+<tr><td align=right>DBENV-&gt;open</td><td><a href="../api_c/env_open.html#DB_RECOVER_FATAL">DB_RECOVER_FATAL</a></td></tr>
+<tr><td align=right>DB-&gt;set_flags</td><td><a href="../api_c/db_set_flags.html#DB_RENUMBER">DB_RENUMBER</a></td></tr>
+<tr><td align=right>DB-&gt;set_flags</td><td><a href="../api_c/db_set_flags.html#DB_REVSPLITOFF">DB_REVSPLITOFF</a></td></tr>
+<tr><td align=right>DB-&gt;get</td><td><a href="../api_c/db_get.html#DB_RMW">DB_RMW</a></td></tr>
+<tr><td align=right>DB-&gt;join</td><td><a href="../api_c/db_join.html#DB_RMW">DB_RMW</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_get</td><td><a href="../api_c/dbc_get.html#DB_RMW">DB_RMW</a></td></tr>
+<tr><td align=right>Error returns to applications</td><td><a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a></td></tr>
+<tr><td align=right>DB-&gt;verify</td><td><a href="../api_c/db_verify.html#DB_SALVAGE">DB_SALVAGE</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_get</td><td><a href="../api_c/dbc_get.html#DB_SET">DB_SET</a></td></tr>
+<tr><td align=right>log_get</td><td><a href="../api_c/log_get.html#DB_SET">DB_SET</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_get</td><td><a href="../api_c/dbc_get.html#DB_SET_RANGE">DB_SET_RANGE</a></td></tr>
+<tr><td align=right>DB-&gt;get</td><td><a href="../api_c/db_get.html#DB_SET_RECNO">DB_SET_RECNO</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_get</td><td><a href="../api_c/dbc_get.html#DB_SET_RECNO">DB_SET_RECNO</a></td></tr>
+<tr><td align=right>DB-&gt;set_flags</td><td><a href="../api_c/db_set_flags.html#DB_SNAPSHOT">DB_SNAPSHOT</a></td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_stat.html#2">db_stat</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/env_strerror.html#2">db_strerror</a></td></tr>
+<tr><td align=right>DBENV-&gt;open</td><td><a href="../api_c/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a></td></tr>
+<tr><td align=right>DB-&gt;open</td><td><a href="../api_c/db_open.html#DB_THREAD">DB_THREAD</a></td></tr>
+<tr><td align=right>DBENV-&gt;open</td><td><a href="../api_c/env_open.html#DB_THREAD">DB_THREAD</a></td></tr>
+<tr><td align=right>DB-&gt;open</td><td><a href="../api_c/db_open.html#DB_TRUNCATE">DB_TRUNCATE</a></td></tr>
+<tr><td align=right>DBENV-&gt;set_tx_recover</td><td><a href="../api_c/env_set_tx_recover.html#DB_TXN_ABORT">DB_TXN_ABORT</a></td></tr>
+<tr><td align=right>DBENV-&gt;set_tx_recover</td><td><a href="../api_c/env_set_tx_recover.html#DB_TXN_BACKWARD_ROLL">DB_TXN_BACKWARD_ROLL</a></td></tr>
+<tr><td align=right>DBENV-&gt;set_tx_recover</td><td><a href="../api_c/env_set_tx_recover.html#DB_TXN_FORWARD_ROLL">DB_TXN_FORWARD_ROLL</a></td></tr>
+<tr><td align=right>DBENV-&gt;set_flags</td><td><a href="../api_c/env_set_flags.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a></td></tr>
+<tr><td align=right>txn_begin</td><td><a href="../api_c/txn_begin.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a></td></tr>
+<tr><td align=right>txn_commit</td><td><a href="../api_c/txn_commit.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a></td></tr>
+<tr><td align=right>txn_begin</td><td><a href="../api_c/txn_begin.html#DB_TXN_NOWAIT">DB_TXN_NOWAIT</a></td></tr>
+<tr><td align=right>txn_begin</td><td><a href="../api_c/txn_begin.html#DB_TXN_SYNC">DB_TXN_SYNC</a></td></tr>
+<tr><td align=right>txn_commit</td><td><a href="../api_c/txn_commit.html#DB_TXN_SYNC">DB_TXN_SYNC</a></td></tr>
+<tr><td align=right>DB-&gt;set_feedback</td><td><a href="../api_c/db_set_feedback.html#DB_UPGRADE">DB_UPGRADE</a></td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_upgrade.html#2">db_upgrade</a></td></tr>
+<tr><td align=right>DBENV-&gt;open</td><td><a href="../api_c/env_open.html#DB_USE_ENVIRON">DB_USE_ENVIRON</a></td></tr>
+<tr><td align=right>DBENV-&gt;remove</td><td><a href="../api_c/env_remove.html#DB_USE_ENVIRON">DB_USE_ENVIRON</a></td></tr>
+<tr><td align=right>DBENV-&gt;open</td><td><a href="../api_c/env_open.html#DB_USE_ENVIRON_ROOT">DB_USE_ENVIRON_ROOT</a></td></tr>
+<tr><td align=right>DBENV-&gt;remove</td><td><a href="../api_c/env_remove.html#DB_USE_ENVIRON_ROOT">DB_USE_ENVIRON_ROOT</a></td></tr>
+<tr><td align=right>DBENV-&gt;set_verbose</td><td><a href="../api_c/env_set_verbose.html#DB_VERB_CHKPOINT">DB_VERB_CHKPOINT</a></td></tr>
+<tr><td align=right>DBENV-&gt;set_verbose</td><td><a href="../api_c/env_set_verbose.html#DB_VERB_DEADLOCK">DB_VERB_DEADLOCK</a></td></tr>
+<tr><td align=right>DBENV-&gt;set_verbose</td><td><a href="../api_c/env_set_verbose.html#DB_VERB_RECOVERY">DB_VERB_RECOVERY</a></td></tr>
+<tr><td align=right>DBENV-&gt;set_verbose</td><td><a href="../api_c/env_set_verbose.html#DB_VERB_WAITSFOR">DB_VERB_WAITSFOR</a></td></tr>
+<tr><td align=right>DB-&gt;set_feedback</td><td><a href="../api_c/db_set_feedback.html#DB_VERIFY">DB_VERIFY</a></td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_verify.html#2">db_verify</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/env_version.html#2">db_version</a></td></tr>
+<tr><td align=right>DB-&gt;cursor</td><td><a href="../api_c/db_cursor.html#DB_WRITECURSOR">DB_WRITECURSOR</a></td></tr>
+<tr><td align=right>db_create</td><td><a href="../api_c/db_create.html#DB_XA_CREATE">DB_XA_CREATE</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/lock/dead.html#2">deadlocks</a></td></tr>
+<tr><td align=right> utility to detect </td><td><a href="../utility/db_deadlock.html#3">deadlocks</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/debug/common.html#2">debugging</a> applications</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/delete.html#2">deleting</a> records</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/curdel.html#2">deleting</a> records with a cursor</td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--disable-bigfile">--disable-bigfile</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/program/diskspace.html#2">disk</a> space requirements</td></tr>
+<tr><td align=right>DBT</td><td><a href="../api_c/dbt.html#dlen">dlen</a></td></tr>
+<tr><td align=right>DBT</td><td><a href="../api_c/dbt.html#doff">doff</a></td></tr>
+<tr><td align=right> utility to </td><td><a href="../utility/db_dump.html#3">dump</a> databases as text files</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am_conf/dup.html#2">duplicate</a> data items</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/curdup.html#2">duplicating</a> a cursor</td></tr>
+<tr><td align=right> configuring </td><td><a href="../ref/build_unix/conf.html#9">dynamic</a> shared libraries</td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-compat185">--enable-compat185</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-cxx">--enable-cxx</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-debug">--enable-debug</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-debug_rop">--enable-debug_rop</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-debug_wop">--enable-debug_wop</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-diagnostic">--enable-diagnostic</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-dump185">--enable-dump185</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-dynamic">--enable-dynamic</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-java">--enable-java</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-posixmutexes">--enable-posixmutexes</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-rpc">--enable-rpc</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-shared">--enable-shared</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-tcl">--enable-tcl</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-test">--enable-test</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-uimutexes">--enable-uimutexes</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-umrw">--enable-umrw</a></td></tr>
+<tr><td align=right> byte </td><td><a href="../ref/program/byteorder.html#3">endian</a></td></tr>
+<tr><td align=right> database </td><td><a href="../ref/env/create.html#2">environment</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/program/environ.html#2">environment</a> variables</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/error.html#2">error</a> handling</td></tr>
+<tr><td align=right> </td><td><a href="../ref/program/errorret.html#3">error</a> name space</td></tr>
+<tr><td align=right> </td><td><a href="../ref/program/errorret.html#2">error</a> returns</td></tr>
+<tr><td align=right> </td><td><a href="../ref/install/file.html#2">/etc/magic</a></td></tr>
+<tr><td align=right> selecting a Queue </td><td><a href="../ref/am_conf/extentsize.html#2">extent</a> size</td></tr>
+<tr><td align=right> Java </td><td><a href="../ref/java/faq.html#2">FAQ</a></td></tr>
+<tr><td align=right> Tcl </td><td><a href="../ref/tcl/faq.html#2">FAQ</a></td></tr>
+<tr><td align=right> configuring without large </td><td><a href="../ref/build_unix/conf.html#4">file</a> support</td></tr>
+<tr><td align=right> </td><td><a href="../ref/install/file.html#3">file</a> utility</td></tr>
+<tr><td align=right>memp_fopen</td><td><a href="../api_c/memp_fopen.html#fileid">fileid</a></td></tr>
+<tr><td align=right> recovery and </td><td><a href="../ref/transapp/filesys.html#2">filesystem</a> operations</td></tr>
+<tr><td align=right> remote </td><td><a href="../ref/env/remote.html#2">filesystems</a></td></tr>
+<tr><td align=right> page </td><td><a href="../ref/am_conf/h_ffactor.html#2">fill</a> factor</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/freebsd.html#2">FreeBSD</a></td></tr>
+<tr><td align=right> Berkeley DB </td><td><a href="../ref/program/scope.html#3">free-threaded</a> handles</td></tr>
+<tr><td align=right>memp_fopen</td><td><a href="../api_c/memp_fopen.html#ftype">ftype</a></td></tr>
+<tr><td align=right> specifying a database </td><td><a href="../ref/am_conf/h_hash.html#2">hash</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/am_conf/h_nelem.html#2">hash</a> table size</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/hpux.html#2">HP-UX</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/hsearch.html#2">hsearch</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/install.html#2">installing</a> Berkeley DB for UNIX systems</td></tr>
+<tr><td align=right> </td><td><a href="../ref/program/compatible.html#2">interface</a> compatibility</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/irix.html#2">IRIX</a></td></tr>
+<tr><td align=right> configuring the </td><td><a href="../ref/build_unix/conf.html#10">Java</a> API</td></tr>
+<tr><td align=right> </td><td><a href="../ref/java/compat.html#2">Java</a> compatibility</td></tr>
+<tr><td align=right> </td><td><a href="../ref/java/conf.html#2">Java</a> configuration</td></tr>
+<tr><td align=right> </td><td><a href="../ref/java/faq.html#3">Java</a> FAQ</td></tr>
+<tr><td align=right> logical </td><td><a href="../ref/am/join.html#2">join</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/dbt.html#2">key/data</a> pairs</td></tr>
+<tr><td align=right> retrieved </td><td><a href="../api_c/dbt.html#4">key/data</a> permanence</td></tr>
+<tr><td align=right> database </td><td><a href="../ref/program/dbsizes.html#2">limits</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/linux.html#2">Linux</a></td></tr>
+<tr><td align=right> changing compile or </td><td><a href="../ref/build_unix/flags.html#3">load</a> options</td></tr>
+<tr><td align=right> utility to </td><td><a href="../utility/db_load.html#3">load</a> text files into databases</td></tr>
+<tr><td align=right>lock_vec</td><td><a href="../api_c/lock_vec.html#lock">lock</a></td></tr>
+<tr><td align=right> standard </td><td><a href="../ref/lock/stdmode.html#2">lock</a> modes</td></tr>
+<tr><td align=right> </td><td><a href="../api_c/lock_detect.html#2">lock_detect</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/lock_get.html#2">lock_get</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/lock_id.html#2">lock_id</a></td></tr>
+<tr><td align=right> page-level </td><td><a href="../ref/lock/page.html#2">locking</a></td></tr>
+<tr><td align=right> two-phase </td><td><a href="../ref/lock/twopl.html#2">locking</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/lock/nondb.html#2">locking</a> and non-Berkeley DB applications</td></tr>
+<tr><td align=right> </td><td><a href="../ref/lock/config.html#2">locking</a> configuration</td></tr>
+<tr><td align=right> </td><td><a href="../ref/lock/am_conv.html#2">locking</a> conventions</td></tr>
+<tr><td align=right> Berkeley DB Concurrent Data Store </td><td><a href="../ref/lock/cam_conv.html#2">locking</a> conventions</td></tr>
+<tr><td align=right> </td><td><a href="../ref/lock/intro.html#2">locking</a> introduction</td></tr>
+<tr><td align=right> sizing the </td><td><a href="../ref/lock/max.html#2">locking</a> subsystem</td></tr>
+<tr><td align=right> </td><td><a href="../ref/lock/notxn.html#2">locking</a> without transactions</td></tr>
+<tr><td align=right> </td><td><a href="../api_c/lock_put.html#2">lock_put</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/lock_stat.html#2">lock_stat</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/lock_vec.html#2">lock_vec</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/log/limits.html#2">log</a> file limits</td></tr>
+<tr><td align=right> </td><td><a href="../ref/transapp/logfile.html#2">log</a> file removal</td></tr>
+<tr><td align=right> utility to display </td><td><a href="../utility/db_printlog.html#3">log</a> files as text</td></tr>
+<tr><td align=right> </td><td><a href="../api_c/log_archive.html#2">log_archive</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/log_compare.html#2">log_compare</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/log_file.html#2">log_file</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/log_flush.html#2">log_flush</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/log_get.html#2">log_get</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/log/config.html#2">logging</a> configuration</td></tr>
+<tr><td align=right> </td><td><a href="../ref/log/intro.html#2">logging</a> introduction</td></tr>
+<tr><td align=right> </td><td><a href="../api_c/log_put.html#2">log_put</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/log_register.html#2">log_register</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/log_stat.html#2">log_stat</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/log_unregister.html#2">log_unregister</a></td></tr>
+<tr><td align=right>memp_fopen</td><td><a href="../api_c/memp_fopen.html#lsn_offset">lsn_offset</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/mp/config.html#2">memory</a> pool configuration</td></tr>
+<tr><td align=right> </td><td><a href="../api_c/memp_fclose.html#2">memp_fclose</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/memp_fget.html#2">memp_fget</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/memp_fopen.html#2">memp_fopen</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/memp_fput.html#2">memp_fput</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/memp_fset.html#2">memp_fset</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/memp_fsync.html#2">memp_fsync</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/memp_register.html#2">memp_register</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/memp_stat.html#2">memp_stat</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/memp_sync.html#2">memp_sync</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/memp_trickle.html#2">memp_trickle</a></td></tr>
+<tr><td align=right>lock_vec</td><td><a href="../api_c/lock_vec.html#mode">mode</a></td></tr>
+<tr><td align=right> Berkeley DB library </td><td><a href="../ref/program/namespace.html#2">name</a> spaces</td></tr>
+<tr><td align=right> file </td><td><a href="../ref/env/naming.html#2">naming</a></td></tr>
+<tr><td align=right> retrieving Btree records by </td><td><a href="../ref/am_conf/bt_recnum.html#2">number</a></td></tr>
+<tr><td align=right>lock_vec</td><td><a href="../api_c/lock_vec.html#obj">obj</a></td></tr>
+<tr><td align=right>lock_vec</td><td><a href="../api_c/lock_vec.html#op">op</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/open.html#2">opening</a> a database</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/osf1.html#2">OSF/1</a></td></tr>
+<tr><td align=right> selecting a </td><td><a href="../ref/am_conf/pagesize.html#2">page</a> size</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/partial.html#2">partial</a> record storage and retrieval</td></tr>
+<tr><td align=right></td><td><a href="http://www.sleepycat.com/update/index.html">Patches,</a> Updates and Change logs</td></tr>
+<tr><td align=right> </td><td><a href="../ref/perl/intro.html#2">Perl</a></td></tr>
+<tr><td align=right> retrieved key/data </td><td><a href="../api_c/dbt.html#3">permanence</a></td></tr>
+<tr><td align=right>memp_fopen</td><td><a href="../api_c/memp_fopen.html#pgcookie">pgcookie</a></td></tr>
+<tr><td align=right> Sleepycat Software's Berkeley DB </td><td><a href="../ref/intro/products.html#2">products</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/qnx.html#2">QNX</a></td></tr>
+<tr><td align=right> logical </td><td><a href="../api_c/dbt.html#6">record</a> number format</td></tr>
+<tr><td align=right> logical </td><td><a href="../ref/am_conf/logrec.html#2">record</a> numbers</td></tr>
+<tr><td align=right> managing </td><td><a href="../ref/am_conf/recno.html#2">record-based</a> databases</td></tr>
+<tr><td align=right> logically renumbering </td><td><a href="../ref/am_conf/renumber.html#2">records</a></td></tr>
+<tr><td align=right> utility to </td><td><a href="../utility/db_recover.html#3">recover</a> database environments</td></tr>
+<tr><td align=right> Berkeley DB </td><td><a href="../ref/transapp/reclimit.html#2">recoverability</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/get.html#2">retrieving</a> records</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/curget.html#2">retrieving</a> records with a cursor</td></tr>
+<tr><td align=right> </td><td><a href="../ref/rpc/client.html#2">RPC</a> client</td></tr>
+<tr><td align=right> configuring a </td><td><a href="../ref/build_unix/conf.html#11">RPC</a> client/server</td></tr>
+<tr><td align=right> utility to support </td><td><a href="../utility/berkeley_db_svc.html#3">RPC</a> client/server</td></tr>
+<tr><td align=right> </td><td><a href="../ref/rpc/server.html#2">RPC</a> server</td></tr>
+<tr><td align=right> database </td><td><a href="../ref/am/verify.html#3">salvage</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/sco.html#2">SCO</a></td></tr>
+<tr><td align=right> Berkeley DB handle </td><td><a href="../ref/program/scope.html#2">scope</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/env/security.html#2">security</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/sendmail/intro.html#2">Sendmail</a></td></tr>
+<tr><td align=right> configuring </td><td><a href="../ref/build_unix/conf.html#8">shared</a> libraries</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/shlib.html#2">shared</a> libraries</td></tr>
+<tr><td align=right> application </td><td><a href="../ref/program/appsignals.html#2">signal</a> handling</td></tr>
+<tr><td align=right>DBT</td><td><a href="../api_c/dbt.html#size">size</a></td></tr>
+<tr><td align=right></td><td><a href="http://www.sleepycat.com/">Sleepycat</a> Software</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/solaris.html#2">Solaris</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/distrib/layout.html#2">source</a> code layout</td></tr>
+<tr><td align=right> cursor </td><td><a href="../ref/am/stability.html#3">stability</a></td></tr>
+<tr><td align=right> database </td><td><a href="../ref/am/stat.html#2">statistics</a></td></tr>
+<tr><td align=right> utility to display database and environment </td><td><a href="../utility/db_stat.html#3">statistics</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/put.html#2">storing</a> records</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/curput.html#2">storing</a> records with a cursor</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/sunos.html#2">SunOS</a></td></tr>
+<tr><td align=right> loading Berkeley DB with </td><td><a href="../ref/tcl/intro.html#2">Tcl</a></td></tr>
+<tr><td align=right> using Berkeley DB with </td><td><a href="../ref/tcl/using.html#2">Tcl</a></td></tr>
+<tr><td align=right> configuring the </td><td><a href="../ref/build_unix/conf.html#12">Tcl</a> API</td></tr>
+<tr><td align=right> </td><td><a href="../ref/tcl/program.html#2">Tcl</a> API programming notes</td></tr>
+<tr><td align=right> </td><td><a href="../ref/tcl/faq.html#3">Tcl</a> FAQ</td></tr>
+<tr><td align=right> configuring the </td><td><a href="../ref/build_unix/conf.html#13">test</a> suite</td></tr>
+<tr><td align=right> running the </td><td><a href="../ref/test/run.html#2">test</a> suite</td></tr>
+<tr><td align=right> running the </td><td><a href="../ref/build_unix/test.html#2">test</a> suite under UNIX</td></tr>
+<tr><td align=right> running the </td><td><a href="../ref/build_win/test.html#2">test</a> suite under Windows</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am_conf/re_source.html#2">text</a> backing files</td></tr>
+<tr><td align=right> loading </td><td><a href="../ref/dumpload/text.html#2">text</a> into databases</td></tr>
+<tr><td align=right> dumping/loading </td><td><a href="../ref/dumpload/utility.html#2">text</a> to/from databases</td></tr>
+<tr><td align=right> building </td><td><a href="../ref/program/mt.html#2">threaded</a> applications</td></tr>
+<tr><td align=right> </td><td><a href="../ref/txn/config.html#2">transaction</a> configuration</td></tr>
+<tr><td align=right> </td><td><a href="../ref/txn/limits.html#2">transaction</a> limits</td></tr>
+<tr><td align=right> administering </td><td><a href="../ref/transapp/admin.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right> archival in </td><td><a href="../ref/transapp/archival.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right> checkpoints in </td><td><a href="../ref/transapp/checkpoint.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right> deadlock detection in </td><td><a href="../ref/transapp/deadlock.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right> recovery in </td><td><a href="../ref/transapp/recovery.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right> </td><td><a href="../ref/transapp/throughput.html#2">transaction</a> throughput</td></tr>
+<tr><td align=right> </td><td><a href="../ref/transapp/intro.html#2">Transactional</a> Data Store</td></tr>
+<tr><td align=right> Berkeley DB and </td><td><a href="../ref/txn/intro.html#2">transactions</a></td></tr>
+<tr><td align=right> nested </td><td><a href="../ref/txn/nested.html#2">transactions</a></td></tr>
+<tr><td align=right> configuring Berkeley DB with the </td><td><a href="../ref/xa/config.html#2">Tuxedo</a> System</td></tr>
+<tr><td align=right> </td><td><a href="../api_c/txn_abort.html#2">txn_abort</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/txn_begin.html#2">txn_begin</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/txn_checkpoint.html#2">txn_checkpoint</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/txn_commit.html#2">txn_commit</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/txn_id.html#2">txn_id</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/txn_prepare.html#2">txn_prepare</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_c/txn_stat.html#2">txn_stat</a></td></tr>
+<tr><td align=right>DBT</td><td><a href="../api_c/dbt.html#ulen">ulen</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/ultrix.html#2">Ultrix</a></td></tr>
+<tr><td align=right> building for </td><td><a href="../ref/build_unix/notes.html#3">UNIX</a> FAQ</td></tr>
+<tr><td align=right> configuring Berkeley DB for </td><td><a href="../ref/build_unix/conf.html#3">UNIX</a> systems</td></tr>
+<tr><td align=right>Patches, </td><td><a href="http://www.sleepycat.com/update/index.html">Updates</a> and Change logs</td></tr>
+<tr><td align=right> utility to </td><td><a href="../utility/db_upgrade.html#4">upgrade</a> database files</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/upgrade.html#2">upgrading</a> databases</td></tr>
+<tr><td align=right> </td><td><a href="../ref/arch/utilities.html#2">utilities</a></td></tr>
+<tr><td align=right> database </td><td><a href="../ref/am/verify.html#2">verification</a></td></tr>
+<tr><td align=right> utility to </td><td><a href="../utility/db_verify.html#4">verify</a> database files</td></tr>
+<tr><td align=right> building for </td><td><a href="../ref/build_vxworks/faq.html#3">VxWorks</a> FAQ</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_vxworks/notes.html#2">VxWorks</a> notes</td></tr>
+<tr><td align=right> running the test suite under </td><td><a href="../ref/build_win/test.html#3">Windows</a></td></tr>
+<tr><td align=right> building for </td><td><a href="../ref/build_win/faq.html#3">Windows</a> FAQ</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_win/notes.html#2">Windows</a> notes</td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--with-tcl=DIR">--with-tcl=DIR</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/xa/intro.html#2">XA</a> Resource Manager</td></tr>
+</table>
+</center>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_close.html b/bdb/docs/api_c/db_close.html
new file mode 100644
index 00000000000..f6c763c5b12
--- /dev/null
+++ b/bdb/docs/api_c/db_close.html
@@ -0,0 +1,119 @@
+<!--$Id: db_close.so,v 10.27 2000/09/08 15:20:28 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;close</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;close(DB *db, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB-&gt;close function flushes any cached database information to disk,
+closes any open cursors, frees any allocated resources, and closes any
+underlying files. Since key/data pairs are cached in memory, failing to
+sync the file with the DB-&gt;close or <a href="../api_c/db_sync.html">DB-&gt;sync</a> function may result
+in inconsistent or lost information.
+<p>The <b>flags</b> parameter must be set to 0 or the following value:
+<p><dl compact>
+<p><dt><a name="DB_NOSYNC">DB_NOSYNC</a><dd>Do not flush cached information to disk.
+<p>The <a href="../api_c/db_close.html#DB_NOSYNC">DB_NOSYNC</a> flag is a dangerous option. It should only be set
+if the application is doing logging (with transactions) so that the
+database is recoverable after a system or application crash, or if the
+database is always generated from scratch after any system or application
+crash.
+<p><b>It is important to understand that flushing cached information to disk
+only minimizes the window of opportunity for corrupted data.</b>
+While unlikely, it is possible for database corruption to happen if a
+system or application crash occurs while writing data to the database.
+To ensure that database corruption never occurs, applications must either:
+use transactions and logging with automatic recovery, use logging and
+application-specific recovery, or edit a copy of the database,
+and, once all applications using the database have successfully called
+DB-&gt;close, atomically replace the original database with the
+updated copy.
+</dl>
+<p>When multiple threads are using the Berkeley DB handle concurrently, only a single
+thread may call the DB-&gt;close function.
+<p>Once DB-&gt;close has been called, regardless of its return, the
+DB handle may not be accessed again.
+ <a name="3"><!--meow--></a>
+<p>The DB-&gt;close function returns a non-zero error value on failure, 0 on success, and returns <a href="../api_c/memp_fsync.html#DB_INCOMPLETE">DB_INCOMPLETE</a> if the underlying database still has
+dirty pages in the cache. (The only reason to return
+<a href="../api_c/memp_fsync.html#DB_INCOMPLETE">DB_INCOMPLETE</a> is if another thread of control was writing pages
+in the underlying database file at the same time as the
+DB-&gt;close function was called. For this reason, a return of
+<a href="../api_c/memp_fsync.html#DB_INCOMPLETE">DB_INCOMPLETE</a> can normally be ignored, or, in cases where it is
+a possible return value, the <a href="../api_c/db_close.html#DB_NOSYNC">DB_NOSYNC</a> option should probably
+have been specified.)
+<p>The DB-&gt;close function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB-&gt;close function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;close function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/db_create.html">db_create</a>,
+<a href="../api_c/db_close.html">DB-&gt;close</a>,
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>,
+<a href="../api_c/db_del.html">DB-&gt;del</a>,
+<a href="../api_c/db_err.html">DB-&gt;err</a>,
+<a href="../api_c/db_fd.html">DB-&gt;fd</a>,
+<a href="../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a>,
+<a href="../api_c/db_get_type.html">DB-&gt;get_type</a>,
+<a href="../api_c/db_join.html">DB-&gt;join</a>,
+<a href="../api_c/db_key_range.html">DB-&gt;key_range</a>,
+<a href="../api_c/db_open.html">DB-&gt;open</a>,
+<a href="../api_c/db_put.html">DB-&gt;put</a>,
+<a href="../api_c/db_remove.html">DB-&gt;remove</a>,
+<a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a>,
+<a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>,
+<a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>,
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a>,
+<a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>,
+<a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a>,
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a>,
+<a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a>,
+<a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a>,
+<a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>,
+<a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a>,
+<a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a>,
+<a href="../api_c/db_set_malloc.html">DB-&gt;set_malloc</a>,
+<a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a>,
+<a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a>,
+<a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a>,
+<a href="../api_c/db_set_realloc.html">DB-&gt;set_realloc</a>,
+<a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>,
+<a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a>,
+<a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a>,
+<a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a>,
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>,
+<a href="../api_c/db_sync.html">DB-&gt;sync</a>,
+<a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+and
+<a href="../api_c/db_verify.html">DB-&gt;verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_create.html b/bdb/docs/api_c/db_create.html
new file mode 100644
index 00000000000..c1fd3fca10b
--- /dev/null
+++ b/bdb/docs/api_c/db_create.html
@@ -0,0 +1,107 @@
+<!--$Id: db_create.so,v 10.12 2000/10/25 18:51:08 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db_create</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>db_create</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_create(DB **dbp, DB_ENV *dbenv, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The db_create function creates a DB structure which is the
+handle for a Berkeley DB database. A pointer to this structure is returned
+in the memory referenced by <b>db</b>.
+<p>If the <b>dbenv</b> argument is NULL, the database is standalone, i.e.,
+it is not part of any Berkeley DB environment.
+<p>If the <b>dbenv</b> argument is not NULL, the database is created within
+the specified Berkeley DB environment. The database access methods
+automatically make calls to the other subsystems in Berkeley DB based on the
+enclosing environment. For example, if the environment has been
+configured to use locking, then the access methods will automatically
+acquire the correct locks when reading and writing pages of the database.
+<p>The <b>flags</b> parameter must be set to 0 or one of the following
+values:
+<p><dl compact>
+<p><dt><a name="DB_XA_CREATE">DB_XA_CREATE</a><dd>Instead of creating a standalone database, create a database intended to
+be accessed via applications running under a X/Open conformant Transaction
+Manager. The database will be opened in the environment specified by the
+OPENINFO parameter of the GROUPS section of the ubbconfig file. See the
+<a href="../ref/xa/intro.html">XA Resource Manager</a> chapter in the
+Reference Guide for more information.
+</dl>
+<p>The DB handle contains a special field, "app_private", which
+is declared as type "void *". This field is provided for the use of
+the application program. It is initialized to NULL and is not further
+used by Berkeley DB in any way.
+<p>The db_create function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The db_create function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the db_create function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/db_create.html">db_create</a>,
+<a href="../api_c/db_close.html">DB-&gt;close</a>,
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>,
+<a href="../api_c/db_del.html">DB-&gt;del</a>,
+<a href="../api_c/db_err.html">DB-&gt;err</a>,
+<a href="../api_c/db_fd.html">DB-&gt;fd</a>,
+<a href="../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a>,
+<a href="../api_c/db_get_type.html">DB-&gt;get_type</a>,
+<a href="../api_c/db_join.html">DB-&gt;join</a>,
+<a href="../api_c/db_key_range.html">DB-&gt;key_range</a>,
+<a href="../api_c/db_open.html">DB-&gt;open</a>,
+<a href="../api_c/db_put.html">DB-&gt;put</a>,
+<a href="../api_c/db_remove.html">DB-&gt;remove</a>,
+<a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a>,
+<a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>,
+<a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>,
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a>,
+<a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>,
+<a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a>,
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a>,
+<a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a>,
+<a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a>,
+<a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>,
+<a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a>,
+<a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a>,
+<a href="../api_c/db_set_malloc.html">DB-&gt;set_malloc</a>,
+<a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a>,
+<a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a>,
+<a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a>,
+<a href="../api_c/db_set_realloc.html">DB-&gt;set_realloc</a>,
+<a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>,
+<a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a>,
+<a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a>,
+<a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a>,
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>,
+<a href="../api_c/db_sync.html">DB-&gt;sync</a>,
+<a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+and
+<a href="../api_c/db_verify.html">DB-&gt;verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_cursor.html b/bdb/docs/api_c/db_cursor.html
new file mode 100644
index 00000000000..1fb6616ab63
--- /dev/null
+++ b/bdb/docs/api_c/db_cursor.html
@@ -0,0 +1,103 @@
+<!--$Id: db_cursor.so,v 10.25 2000/07/11 19:11:25 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;cursor</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;cursor</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;cursor(DB *db,
+ DB_TXN *txnid, DBC **cursorp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB-&gt;cursor function
+creates a cursor and copies a pointer to it into the memory referenced
+by <b>cursorp</b>.
+<p>If the file is being accessed under transaction protection, the
+<b>txnid</b> parameter is a transaction ID returned from
+<a href="../api_c/txn_begin.html">txn_begin</a>, otherwise, NULL.
+<p>If transaction protection is enabled, cursors must be opened and closed
+within the context of a transaction, and the <b>txnid</b> parameter
+specifies the transaction context in which the cursor may be used.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or more
+of the following values.
+<p><dl compact>
+<p><dt><a name="DB_WRITECURSOR">DB_WRITECURSOR</a><dd>Specify that the cursor will be used to update the database. This
+flag should <b>only</b> be set when the <a href="../api_c/env_open.html#DB_INIT_CDB">DB_INIT_CDB</a> flag
+was specified to <a href="../api_c/env_open.html">DBENV-&gt;open</a>.
+</dl>
+<p>The DB-&gt;cursor function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB-&gt;cursor function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DB-&gt;cursor function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;cursor function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/db_create.html">db_create</a>,
+<a href="../api_c/db_close.html">DB-&gt;close</a>,
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>,
+<a href="../api_c/db_del.html">DB-&gt;del</a>,
+<a href="../api_c/db_err.html">DB-&gt;err</a>,
+<a href="../api_c/db_fd.html">DB-&gt;fd</a>,
+<a href="../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a>,
+<a href="../api_c/db_get_type.html">DB-&gt;get_type</a>,
+<a href="../api_c/db_join.html">DB-&gt;join</a>,
+<a href="../api_c/db_key_range.html">DB-&gt;key_range</a>,
+<a href="../api_c/db_open.html">DB-&gt;open</a>,
+<a href="../api_c/db_put.html">DB-&gt;put</a>,
+<a href="../api_c/db_remove.html">DB-&gt;remove</a>,
+<a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a>,
+<a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>,
+<a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>,
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a>,
+<a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>,
+<a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a>,
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a>,
+<a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a>,
+<a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a>,
+<a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>,
+<a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a>,
+<a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a>,
+<a href="../api_c/db_set_malloc.html">DB-&gt;set_malloc</a>,
+<a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a>,
+<a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a>,
+<a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a>,
+<a href="../api_c/db_set_realloc.html">DB-&gt;set_realloc</a>,
+<a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>,
+<a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a>,
+<a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a>,
+<a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a>,
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>,
+<a href="../api_c/db_sync.html">DB-&gt;sync</a>,
+<a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+and
+<a href="../api_c/db_verify.html">DB-&gt;verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_del.html b/bdb/docs/api_c/db_del.html
new file mode 100644
index 00000000000..b273d29fd14
--- /dev/null
+++ b/bdb/docs/api_c/db_del.html
@@ -0,0 +1,101 @@
+<!--$Id: db_del.so,v 10.23 2000/09/05 19:35:10 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;del</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;del</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;del(DB *db, DB_TXN *txnid, DBT *key, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB-&gt;del function removes key/data pairs from the database. The
+key/data pair associated with the specified <b>key</b> is discarded from
+the database. In the presence of duplicate key values, all records
+associated with the designated key will be discarded.
+<p>If the file is being accessed under transaction protection, the
+<b>txnid</b> parameter is a transaction ID returned from
+<a href="../api_c/txn_begin.html">txn_begin</a>, otherwise, NULL.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The DB-&gt;del function returns a non-zero error value on failure, 0 on success, and <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a> if the specified <b>key</b> did not exist in
+the file.
+<h1>Errors</h1>
+<p>The DB-&gt;del function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_LOCK_DEADLOCK<dd>The operation was selected to resolve a deadlock.
+</dl>
+<p><dl compact>
+<p><dt>EACCES<dd>An attempt was made to modify a read-only database.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DB-&gt;del function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;del function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/db_create.html">db_create</a>,
+<a href="../api_c/db_close.html">DB-&gt;close</a>,
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>,
+<a href="../api_c/db_del.html">DB-&gt;del</a>,
+<a href="../api_c/db_err.html">DB-&gt;err</a>,
+<a href="../api_c/db_fd.html">DB-&gt;fd</a>,
+<a href="../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a>,
+<a href="../api_c/db_get_type.html">DB-&gt;get_type</a>,
+<a href="../api_c/db_join.html">DB-&gt;join</a>,
+<a href="../api_c/db_key_range.html">DB-&gt;key_range</a>,
+<a href="../api_c/db_open.html">DB-&gt;open</a>,
+<a href="../api_c/db_put.html">DB-&gt;put</a>,
+<a href="../api_c/db_remove.html">DB-&gt;remove</a>,
+<a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a>,
+<a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>,
+<a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>,
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a>,
+<a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>,
+<a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a>,
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a>,
+<a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a>,
+<a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a>,
+<a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>,
+<a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a>,
+<a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a>,
+<a href="../api_c/db_set_malloc.html">DB-&gt;set_malloc</a>,
+<a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a>,
+<a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a>,
+<a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a>,
+<a href="../api_c/db_set_realloc.html">DB-&gt;set_realloc</a>,
+<a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>,
+<a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a>,
+<a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a>,
+<a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a>,
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>,
+<a href="../api_c/db_sync.html">DB-&gt;sync</a>,
+<a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+and
+<a href="../api_c/db_verify.html">DB-&gt;verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_err.html b/bdb/docs/api_c/db_err.html
new file mode 100644
index 00000000000..1eae12fabf0
--- /dev/null
+++ b/bdb/docs/api_c/db_err.html
@@ -0,0 +1,93 @@
+<!--$Id: db_err.so,v 10.11 1999/12/20 08:52:27 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DBENV-&gt;err</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DBENV-&gt;err</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+void
+DBENV-&gt;err(DB_ENV *dbenv, int error, const char *fmt, ...);
+<p>
+void
+DBENV-&gt;errx(DB_ENV *dbenv, const char *fmt, ...);
+<p>
+void
+DB-&gt;err(DB *db, int error, const char *fmt, ...);
+<p>
+void
+DB-&gt;errx(DB *db, const char *fmt, ...);
+</pre></h3>
+<h1>Description</h1>
+<p>The DBENV-&gt;err, DBENV-&gt;errx, DB-&gt;err and
+DB-&gt;errx functions provide error messaging functionality for
+applications written using the Berkeley DB library.
+<p>The DBENV-&gt;err function constructs an error message consisting of the
+following elements:
+<p><blockquote><p><dl compact>
+<p><dt>An optional prefix string<dd>If no error callback function has been set using the
+<a href="../api_c/env_set_errcall.html">DBENV-&gt;set_errcall</a> function, any prefix string specified using the
+<a href="../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a> function, followed by two separating characters: a colon
+and a &lt;space&gt; character.
+<p><dt>An optional printf-style message<dd>The supplied message <b>fmt</b>, if non-NULL, where the ANSI C X3.159-1989 (ANSI C)
+printf function specifies how subsequent arguments are converted for
+output.
+<p><dt>A separator<dd>Two separating characters: a colon and a &lt;space&gt; character.
+<p><dt>A standard error string<dd>The standard system or Berkeley DB library error string associated with the
+<b>error</b> value, as returned by the <a href="../api_c/env_strerror.html">db_strerror</a> function.
+</dl>
+</blockquote>
+<p>This constructed error message is then handled as follows:
+<p><blockquote>
+<p>If an error callback function has been set (see <a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>
+and <a href="../api_c/env_set_errcall.html">DBENV-&gt;set_errcall</a>), that function is called with two
+arguments: any prefix string specified (see <a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a> and
+<a href="../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a>), and the error message.
+<p>If a C library FILE * has been set (see <a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a> and
+<a href="../api_c/env_set_errfile.html">DBENV-&gt;set_errfile</a>), the error message is written to that output
+stream.
+<p>If none of these output options has been configured, the error message
+is written to stderr, the standard error output stream.</blockquote>
+<p>The DBENV-&gt;errx and DB-&gt;errx functions perform identically to the
+DBENV-&gt;err and DB-&gt;err functions except that they do not append
+the final separator characters and standard error string to the error
+message.
+<h1>See Also</h1>
+<a href="../api_c/env_close.html">DBENV-&gt;close</a>,
+<a href="../api_c/env_create.html">db_env_create</a>,
+<a href="../api_c/env_open.html">DBENV-&gt;open</a>,
+<a href="../api_c/env_remove.html">DBENV-&gt;remove</a>,
+<a href="../api_c/db_err.html">DBENV-&gt;err</a>,
+<a href="../api_c/env_strerror.html">db_strerror</a>,
+<a href="../api_c/env_version.html">db_version</a>,
+<a href="../api_c/env_set_cachesize.html">DBENV-&gt;set_cachesize</a>,
+<a href="../api_c/env_set_errcall.html">DBENV-&gt;set_errcall</a>,
+<a href="../api_c/env_set_errfile.html">DBENV-&gt;set_errfile</a>,
+<a href="../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a>,
+<a href="../api_c/env_set_flags.html">DBENV-&gt;set_flags</a>,
+<a href="../api_c/env_set_mutexlocks.html">DBENV-&gt;set_mutexlocks</a>,
+<a href="../api_c/env_set_paniccall.html">DBENV-&gt;set_paniccall</a>,
+and
+<a href="../api_c/env_set_verbose.html">DBENV-&gt;set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_fd.html b/bdb/docs/api_c/db_fd.html
new file mode 100644
index 00000000000..2a385c1b3d1
--- /dev/null
+++ b/bdb/docs/api_c/db_fd.html
@@ -0,0 +1,92 @@
+<!--$Id: db_fd.so,v 10.21 2000/03/01 21:41:28 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;fd</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;fd</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;fd(DB *db, int *fdp);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB-&gt;fd function
+copies a file descriptor representative of the underlying database into
+the memory referenced by <b>fdp</b>. A file descriptor referencing the
+same file will be returned to all processes that call <a href="../api_c/db_open.html">DB-&gt;open</a> with
+the same <b>file</b> argument. This file descriptor may be safely used
+as an argument to the <b>fcntl</b>(2) and <b>flock</b>(2) locking
+functions. The file descriptor is not necessarily associated with any of
+the underlying files actually used by the access method.
+<p>The DB-&gt;fd function only supports a coarse-grained form of locking.
+Applications should use the lock manager where possible.
+<p>The DB-&gt;fd function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB-&gt;fd function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;fd function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/db_create.html">db_create</a>,
+<a href="../api_c/db_close.html">DB-&gt;close</a>,
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>,
+<a href="../api_c/db_del.html">DB-&gt;del</a>,
+<a href="../api_c/db_err.html">DB-&gt;err</a>,
+<a href="../api_c/db_fd.html">DB-&gt;fd</a>,
+<a href="../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a>,
+<a href="../api_c/db_get_type.html">DB-&gt;get_type</a>,
+<a href="../api_c/db_join.html">DB-&gt;join</a>,
+<a href="../api_c/db_key_range.html">DB-&gt;key_range</a>,
+<a href="../api_c/db_open.html">DB-&gt;open</a>,
+<a href="../api_c/db_put.html">DB-&gt;put</a>,
+<a href="../api_c/db_remove.html">DB-&gt;remove</a>,
+<a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a>,
+<a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>,
+<a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>,
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a>,
+<a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>,
+<a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a>,
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a>,
+<a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a>,
+<a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a>,
+<a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>,
+<a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a>,
+<a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a>,
+<a href="../api_c/db_set_malloc.html">DB-&gt;set_malloc</a>,
+<a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a>,
+<a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a>,
+<a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a>,
+<a href="../api_c/db_set_realloc.html">DB-&gt;set_realloc</a>,
+<a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>,
+<a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a>,
+<a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a>,
+<a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a>,
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>,
+<a href="../api_c/db_sync.html">DB-&gt;sync</a>,
+<a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+and
+<a href="../api_c/db_verify.html">DB-&gt;verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_get.html b/bdb/docs/api_c/db_get.html
new file mode 100644
index 00000000000..c6cc3fcce43
--- /dev/null
+++ b/bdb/docs/api_c/db_get.html
@@ -0,0 +1,156 @@
+<!--$Id: db_get.so,v 10.31 2000/11/28 20:12:30 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;get</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;get</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;get(DB *db,
+ DB_TXN *txnid, DBT *key, DBT *data, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB-&gt;get function retrieves key/data pairs from the database. The
+address
+and length of the data associated with the specified <b>key</b> are
+returned in the structure referenced by <b>data</b>.
+<p>In the presence of duplicate key values, DB-&gt;get will return the
+first data item for the designated key. Duplicates are sorted by insert
+order except where this order has been overridden by cursor operations.
+<b>Retrieval of duplicates requires the use of cursor operations.</b>
+See <a href="../api_c/dbc_get.html">DBcursor-&gt;c_get</a> for details.
+<p>If the file is being accessed under transaction protection, the
+<b>txnid</b> parameter is a transaction ID returned from
+<a href="../api_c/txn_begin.html">txn_begin</a>, otherwise, NULL.
+<p>The <b>flags</b> parameter must be set to 0 or one of the following
+values:
+<p><dl compact>
+<p><dt><a name="DB_CONSUME">DB_CONSUME</a><dd>Return the record number and data from the available record closest to
+the head of the queue and delete the record. The cursor will be
+positioned on the deleted record. The record number will be returned
+in <b>key</b> as described in <a href="../api_c/dbt.html">DBT</a>. The data will be returned
+in the <b>data</b> parameter. A record is available if it is not
+deleted and is not currently locked. The underlying database must be
+of type Queue for DB_CONSUME to be specified.
+<p><dt><a name="DB_CONSUME_WAIT">DB_CONSUME_WAIT</a><dd>The DB_CONSUME_WAIT flag is the same as the DB_CONSUME
+flag except that if the Queue database is empty, the thread of control
+will wait until there is data in the queue before returning. The
+underlying database must be of type Queue for DB_CONSUME_WAIT
+to be specified.
+<p><dt><a name="DB_GET_BOTH">DB_GET_BOTH</a><dd>Retrieve the key/data pair only if both the key and data match the
+arguments.
+<p><dt><a name="DB_SET_RECNO">DB_SET_RECNO</a><dd>Retrieve the specified numbered key/data pair from a database.
+Upon return, both the <b>key</b> and <b>data</b> items will have been
+filled in, not just the data item as is done for all other uses of the
+DB-&gt;get function.
+<p>The <b>data</b> field of the specified <b>key</b>
+must be a pointer to a logical record number (i.e., a <b>db_recno_t</b>).
+This record number determines the record to be retrieved.
+<p>For DB_SET_RECNO to be specified, the underlying database must be
+of type Btree and it must have been created with the DB_RECNUM flag.
+</dl>
+<p>In addition, the following flag may be set by bitwise inclusively <b>OR</b>'ing it into the
+<b>flags</b> parameter:
+<p><dl compact>
+<p><dt><a name="DB_RMW">DB_RMW</a><dd>Acquire write locks instead of read locks when doing the retrieval.
+Setting this flag may decrease the likelihood of deadlock during a
+read-modify-write cycle by immediately acquiring the write lock during
+the read part of the cycle so that another thread of control acquiring
+a read lock for the same item, in its own read-modify-write cycle, will
+not result in deadlock.
+<p>As the DB-&gt;get interface will not hold locks across
+Berkeley DB interface calls in non-transactional environments, the
+<a href="../api_c/dbc_get.html#DB_RMW">DB_RMW</a> flag to the DB-&gt;get call is only meaningful in
+the presence of transactions.
+</dl>
+<p>If the database is a Queue or Recno database and the requested key exists,
+but was never explicitly created by the application or was later deleted,
+the DB-&gt;get function returns <a href="../ref/program/errorret.html#DB_KEYEMPTY">DB_KEYEMPTY</a>.
+<p>Otherwise, if the requested key is not in the database, the
+DB-&gt;get function returns <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+<p>Otherwise, the DB-&gt;get function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB-&gt;get function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_LOCK_DEADLOCK<dd>The operation was selected to resolve a deadlock.
+</dl>
+<p><dl compact>
+<p><dt>ENOMEM<dd>There was insufficient memory to return the requested item.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>A record number of 0 was specified.
+<p>The <a href="../api_c/env_open.html#DB_THREAD">DB_THREAD</a> flag was specified to the
+<a href="../api_c/db_open.html">DB-&gt;open</a> function and none of the <a href="../api_c/dbt.html#DB_DBT_MALLOC">DB_DBT_MALLOC</a>,
+<a href="../api_c/dbt.html#DB_DBT_REALLOC">DB_DBT_REALLOC</a> or <a href="../api_c/dbt.html#DB_DBT_USERMEM">DB_DBT_USERMEM</a> flags were set in the
+<a href="../api_c/dbt.html">DBT</a>.
+</dl>
+<p>The DB-&gt;get function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;get function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/db_create.html">db_create</a>,
+<a href="../api_c/db_close.html">DB-&gt;close</a>,
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>,
+<a href="../api_c/db_del.html">DB-&gt;del</a>,
+<a href="../api_c/db_err.html">DB-&gt;err</a>,
+<a href="../api_c/db_fd.html">DB-&gt;fd</a>,
+<a href="../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a>,
+<a href="../api_c/db_get_type.html">DB-&gt;get_type</a>,
+<a href="../api_c/db_join.html">DB-&gt;join</a>,
+<a href="../api_c/db_key_range.html">DB-&gt;key_range</a>,
+<a href="../api_c/db_open.html">DB-&gt;open</a>,
+<a href="../api_c/db_put.html">DB-&gt;put</a>,
+<a href="../api_c/db_remove.html">DB-&gt;remove</a>,
+<a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a>,
+<a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>,
+<a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>,
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a>,
+<a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>,
+<a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a>,
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a>,
+<a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a>,
+<a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a>,
+<a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>,
+<a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a>,
+<a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a>,
+<a href="../api_c/db_set_malloc.html">DB-&gt;set_malloc</a>,
+<a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a>,
+<a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a>,
+<a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a>,
+<a href="../api_c/db_set_realloc.html">DB-&gt;set_realloc</a>,
+<a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>,
+<a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a>,
+<a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a>,
+<a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a>,
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>,
+<a href="../api_c/db_sync.html">DB-&gt;sync</a>,
+<a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+and
+<a href="../api_c/db_verify.html">DB-&gt;verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_get_byteswapped.html b/bdb/docs/api_c/db_get_byteswapped.html
new file mode 100644
index 00000000000..205ddb79467
--- /dev/null
+++ b/bdb/docs/api_c/db_get_byteswapped.html
@@ -0,0 +1,84 @@
+<!--$Id: db_get_byteswapped.so,v 10.7 1999/12/20 08:52:27 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;get_byteswapped</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;get_byteswapped</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;get_byteswapped(DB *db);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB-&gt;get_byteswapped function returns
+0
+if the underlying database files were created on an architecture
+of the same byte order as the current one, and
+1
+if they were not (i.e., big-endian on a little-endian machine or
+vice-versa). This field may be used to determine if application
+data needs to be adjusted for this architecture or not.
+<h1>See Also</h1>
+<a href="../api_c/db_create.html">db_create</a>,
+<a href="../api_c/db_close.html">DB-&gt;close</a>,
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>,
+<a href="../api_c/db_del.html">DB-&gt;del</a>,
+<a href="../api_c/db_err.html">DB-&gt;err</a>,
+<a href="../api_c/db_fd.html">DB-&gt;fd</a>,
+<a href="../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a>,
+<a href="../api_c/db_get_type.html">DB-&gt;get_type</a>,
+<a href="../api_c/db_join.html">DB-&gt;join</a>,
+<a href="../api_c/db_key_range.html">DB-&gt;key_range</a>,
+<a href="../api_c/db_open.html">DB-&gt;open</a>,
+<a href="../api_c/db_put.html">DB-&gt;put</a>,
+<a href="../api_c/db_remove.html">DB-&gt;remove</a>,
+<a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a>,
+<a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>,
+<a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>,
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a>,
+<a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>,
+<a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a>,
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a>,
+<a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a>,
+<a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a>,
+<a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>,
+<a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a>,
+<a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a>,
+<a href="../api_c/db_set_malloc.html">DB-&gt;set_malloc</a>,
+<a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a>,
+<a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a>,
+<a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a>,
+<a href="../api_c/db_set_realloc.html">DB-&gt;set_realloc</a>,
+<a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>,
+<a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a>,
+<a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a>,
+<a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a>,
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>,
+<a href="../api_c/db_sync.html">DB-&gt;sync</a>,
+<a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+and
+<a href="../api_c/db_verify.html">DB-&gt;verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_get_type.html b/bdb/docs/api_c/db_get_type.html
new file mode 100644
index 00000000000..a1905c782e4
--- /dev/null
+++ b/bdb/docs/api_c/db_get_type.html
@@ -0,0 +1,81 @@
+<!--$Id: db_get_type.so,v 10.10 1999/12/20 08:52:27 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;get_type</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;get_type</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+DBTYPE
+DB-&gt;get_type(DB *db);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB-&gt;get_type function returns the type of the underlying access method
+(and file format). It returns one of DB_BTREE,
+DB_HASH or DB_RECNO. This value may be used to
+determine the type of the database after a return from <a href="../api_c/db_open.html">DB-&gt;open</a>
+with the <b>type</b> argument set to DB_UNKNOWN.
+<h1>See Also</h1>
+<a href="../api_c/db_create.html">db_create</a>,
+<a href="../api_c/db_close.html">DB-&gt;close</a>,
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>,
+<a href="../api_c/db_del.html">DB-&gt;del</a>,
+<a href="../api_c/db_err.html">DB-&gt;err</a>,
+<a href="../api_c/db_fd.html">DB-&gt;fd</a>,
+<a href="../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a>,
+<a href="../api_c/db_get_type.html">DB-&gt;get_type</a>,
+<a href="../api_c/db_join.html">DB-&gt;join</a>,
+<a href="../api_c/db_key_range.html">DB-&gt;key_range</a>,
+<a href="../api_c/db_open.html">DB-&gt;open</a>,
+<a href="../api_c/db_put.html">DB-&gt;put</a>,
+<a href="../api_c/db_remove.html">DB-&gt;remove</a>,
+<a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a>,
+<a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>,
+<a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>,
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a>,
+<a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>,
+<a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a>,
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a>,
+<a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a>,
+<a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a>,
+<a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>,
+<a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a>,
+<a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a>,
+<a href="../api_c/db_set_malloc.html">DB-&gt;set_malloc</a>,
+<a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a>,
+<a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a>,
+<a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a>,
+<a href="../api_c/db_set_realloc.html">DB-&gt;set_realloc</a>,
+<a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>,
+<a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a>,
+<a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a>,
+<a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a>,
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>,
+<a href="../api_c/db_sync.html">DB-&gt;sync</a>,
+<a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+and
+<a href="../api_c/db_verify.html">DB-&gt;verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_join.html b/bdb/docs/api_c/db_join.html
new file mode 100644
index 00000000000..13fe95d84d7
--- /dev/null
+++ b/bdb/docs/api_c/db_join.html
@@ -0,0 +1,151 @@
+<!--$Id: db_join.so,v 10.30 2000/12/20 15:34:50 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;join</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;join</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;join(DB *primary,
+ DBC **curslist, DBC **dbcp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB-&gt;join function creates a specialized cursor for use in performing
+joins on secondary indexes. For information on how to organize your data
+to use this functionality, see <a href="../ref/am/join.html">Logical
+join</a>.
+<p>The <b>primary</b> argument contains the DB handle of the primary
+database, which is keyed by the data values found in entries in the
+<b>curslist</b>.
+<p>The <b>curslist</b> argument contains a NULL terminated array of cursors.
+Each cursor must have been initialized to reference the key on which the
+underlying database should be joined. Typically, this initialization is done
+by a <a href="../api_c/dbc_get.html">DBcursor-&gt;c_get</a> call with the <a href="../api_c/dbc_get.html#DB_SET">DB_SET</a> flag specified. Once the
+cursors have been passed as part of a <b>curslist</b>, they should not
+be accessed or modified until the newly created join cursor has been closed,
+or else inconsistent results may be returned.
+<p>Joined values are retrieved by doing a sequential iteration over the first
+cursor in the <b>curslist</b> argument, and a nested iteration over each
+secondary cursor in the order they are specified in the <b>curslist</b>
+argument. This requires database traversals to search for the current
+datum in all the cursors after the first. For this reason, the best join
+performance normally results from sorting the cursors from the one that
+references the least number of data items to the one that references the
+most. By default, DB-&gt;join does this sort on behalf of its caller.
+<p>The <b>flags</b> parameter must be set to 0 or the following value:
+<p><dl compact>
+<p><dt><a name="DB_JOIN_NOSORT">DB_JOIN_NOSORT</a><dd>Do not sort the cursors based on the number of data items they reference.
+If the data are structured such that cursors with many data items also
+share many common elements, higher performance will result from listing
+those cursors before cursors with fewer data items, that is, a sort order
+other than the default. The DB_JOIN_NOSORT flag permits
+applications to perform join optimization prior to calling DB-&gt;join.
+</dl>
+<p>A newly created cursor is returned in the memory location referenced by
+<b>dbcp</b> and has the standard cursor functions:
+<p><dl compact>
+<p><dt><a href="../api_c/dbc_get.html">DBcursor-&gt;c_get</a><dd>Iterates over the values associated with the keys to which each item in
+<b>curslist</b> has been initialized. Any data value which appears in
+all items specified by the <b>curslist</b> argument is then used as a
+key into the <b>primary</b>, and the key/data pair found in the
+<b>primary</b> is returned.
+<p>The <b>flags</b> parameter must be set to 0 or the following value:
+<p><dl compact>
+<p><dt><a name="DB_JOIN_ITEM">DB_JOIN_ITEM</a><dd>Do not use the data value found in all of the cursors as a lookup
+key for the <b>primary</b>, but simply return it in the key parameter
+instead. The data parameter is left unchanged.
+</dl>
+<p>In addition, the following flag may be set by bitwise inclusively <b>OR</b>'ing it into the
+<b>flags</b> parameter:
+<p><dl compact>
+<p><dt><a name="DB_RMW">DB_RMW</a><dd>Acquire write locks instead of read locks when doing the retrieval.
+Setting this flag may decrease the likelihood of deadlock during a
+read-modify-write cycle by immediately acquiring the write lock during
+the read part of the cycle so that another thread of control acquiring
+a read lock for the same item, in its own read-modify-write cycle, will
+not result in deadlock.
+</dl>
+<p><dt><a href="../api_c/dbc_put.html">DBcursor-&gt;c_put</a><dd>Returns EINVAL.
+<p><dt><a href="../api_c/dbc_del.html">DBcursor-&gt;c_del</a><dd>Returns EINVAL.
+<p><dt><a href="../api_c/dbc_close.html">DBcursor-&gt;c_close</a><dd>Close the returned cursor and release all resources. (Closing the cursors
+in <b>curslist</b> is the responsibility of the caller.)
+</dl>
+<p>For the returned join cursor to be used in a transaction protected manner,
+the cursors listed in <b>curslist</b> must have been created within the
+context of the same transaction.
+<p>The DB-&gt;join function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB-&gt;join function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The <a href="../api_c/dbc_put.html">DBcursor-&gt;c_put</a> or <a href="../api_c/dbc_del.html">DBcursor-&gt;c_del</a> functions were called.
+</dl>
+<p>The DB-&gt;join function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;join function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/db_create.html">db_create</a>,
+<a href="../api_c/db_close.html">DB-&gt;close</a>,
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>,
+<a href="../api_c/db_del.html">DB-&gt;del</a>,
+<a href="../api_c/db_err.html">DB-&gt;err</a>,
+<a href="../api_c/db_fd.html">DB-&gt;fd</a>,
+<a href="../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a>,
+<a href="../api_c/db_get_type.html">DB-&gt;get_type</a>,
+<a href="../api_c/db_join.html">DB-&gt;join</a>,
+<a href="../api_c/db_key_range.html">DB-&gt;key_range</a>,
+<a href="../api_c/db_open.html">DB-&gt;open</a>,
+<a href="../api_c/db_put.html">DB-&gt;put</a>,
+<a href="../api_c/db_remove.html">DB-&gt;remove</a>,
+<a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a>,
+<a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>,
+<a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>,
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a>,
+<a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>,
+<a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a>,
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a>,
+<a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a>,
+<a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a>,
+<a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>,
+<a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a>,
+<a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a>,
+<a href="../api_c/db_set_malloc.html">DB-&gt;set_malloc</a>,
+<a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a>,
+<a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a>,
+<a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a>,
+<a href="../api_c/db_set_realloc.html">DB-&gt;set_realloc</a>,
+<a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>,
+<a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a>,
+<a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a>,
+<a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a>,
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>,
+<a href="../api_c/db_sync.html">DB-&gt;sync</a>,
+<a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+and
+<a href="../api_c/db_verify.html">DB-&gt;verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_key_range.html b/bdb/docs/api_c/db_key_range.html
new file mode 100644
index 00000000000..1e3c4c91f99
--- /dev/null
+++ b/bdb/docs/api_c/db_key_range.html
@@ -0,0 +1,106 @@
+<!--$Id: db_key_range.so,v 10.5 2000/05/01 21:57:43 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;key_range</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;key_range</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;key_range(DB *db, DB_TXN *txnid,
+ DBT *key, DB_KEY_RANGE *key_range, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB-&gt;key_range function returns an estimate of the proportion of keys
+that are less than, equal to and greater than the specified key. The
+underlying database must be of type Btree.
+<p>The information is returned in the <b>key_range</b> argument, which
+contains three elements of type double, <b>less</b>, <b>equal</b> and
+<b>greater</b>. Values are in the range of 0 to 1, e.g., if the field
+<b>less</b> is 0.05, that indicates that 5% of the keys in the database
+are less than the key argument. The value for <b>equal</b> will be zero
+if there is no matching key and non-zero otherwise.
+<p>If the file is being accessed under transaction protection, the
+<b>txnid</b> parameter is a transaction ID returned from
+<a href="../api_c/txn_begin.html">txn_begin</a>, otherwise, NULL.
+The DB-&gt;key_range function does not retain the locks it acquires for the
+life of the transaction, so estimates may not be repeatable.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The DB-&gt;key_range function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB-&gt;key_range function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_LOCK_DEADLOCK<dd>The operation was selected to resolve a deadlock.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The underlying database was not of type Btree.
+</dl>
+<p>The DB-&gt;key_range function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;key_range function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/db_create.html">db_create</a>,
+<a href="../api_c/db_close.html">DB-&gt;close</a>,
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>,
+<a href="../api_c/db_del.html">DB-&gt;del</a>,
+<a href="../api_c/db_err.html">DB-&gt;err</a>,
+<a href="../api_c/db_fd.html">DB-&gt;fd</a>,
+<a href="../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a>,
+<a href="../api_c/db_get_type.html">DB-&gt;get_type</a>,
+<a href="../api_c/db_join.html">DB-&gt;join</a>,
+<a href="../api_c/db_key_range.html">DB-&gt;key_range</a>,
+<a href="../api_c/db_open.html">DB-&gt;open</a>,
+<a href="../api_c/db_put.html">DB-&gt;put</a>,
+<a href="../api_c/db_remove.html">DB-&gt;remove</a>,
+<a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a>,
+<a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>,
+<a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>,
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a>,
+<a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>,
+<a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a>,
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a>,
+<a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a>,
+<a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a>,
+<a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>,
+<a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a>,
+<a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a>,
+<a href="../api_c/db_set_malloc.html">DB-&gt;set_malloc</a>,
+<a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a>,
+<a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a>,
+<a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a>,
+<a href="../api_c/db_set_realloc.html">DB-&gt;set_realloc</a>,
+<a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>,
+<a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a>,
+<a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a>,
+<a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a>,
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>,
+<a href="../api_c/db_sync.html">DB-&gt;sync</a>,
+<a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+and
+<a href="../api_c/db_verify.html">DB-&gt;verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_lsn.html b/bdb/docs/api_c/db_lsn.html
new file mode 100644
index 00000000000..1fc62e5e688
--- /dev/null
+++ b/bdb/docs/api_c/db_lsn.html
@@ -0,0 +1,36 @@
+<!--$Id: db_lsn.so,v 10.7 1999/12/20 08:52:27 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB_LSN</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB_LSN</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+</pre></h3>
+<h1>Description</h1>
+<p>A <a href="../api_c/db_lsn.html">DB_LSN</a> is a <b>log sequence number</b>, which indicates a
+unique position in the log. The <a href="../api_c/db_lsn.html">DB_LSN</a> structure is completely
+opaque, and no application should ever need to look inside.
+<a href="../api_c/db_lsn.html">DB_LSN</a> structures are used by the logging and memory pool
+subsystems.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_open.html b/bdb/docs/api_c/db_open.html
new file mode 100644
index 00000000000..afd410223a6
--- /dev/null
+++ b/bdb/docs/api_c/db_open.html
@@ -0,0 +1,182 @@
+<!--$Id: db_open.so,v 10.61 2000/10/25 15:24:44 dda Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;open</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;open</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;open(DB *db, const char *file,
+ const char *database, DBTYPE type, u_int32_t flags, int mode);
+</pre></h3>
+<h1>Description</h1>
+<p>The currently supported Berkeley DB file formats (or <i>access methods</i>)
+are Btree, Hash, Queue and Recno. The Btree format is a representation
+of a sorted, balanced tree structure. The Hash format is an extensible,
+dynamic hashing scheme. The Queue format supports fast access to
+fixed-length records accessed by sequentially or logical record number.
+The Recno format supports fixed- or variable-length records, accessed
+sequentially or by logical record number, and optionally retrieved from
+a flat text file.
+<p>Storage and retrieval for the Berkeley DB access methods are based on key/data
+pairs, see <a href="../api_c/dbt.html">DBT</a> for more information.
+<p>The DB-&gt;open interface opens the database represented by the
+<b>file</b> and <b>database</b> arguments for both reading and writing.
+The <b>file</b> argument is used as the name of a physical file on disk
+that will be used to back the database. The <b>database</b> argument is
+optional and allows applications to have multiple logical databases in a
+single physical file. While no <b>database</b> argument needs to be
+specified, it is an error to attempt to open a second database in a
+<b>file</b> that was not initially created using a <b>database</b> name.
+In-memory databases never intended to be preserved on disk may
+be created by setting both the <b>file</b> and <b>database</b> arguments
+to NULL. Note that in-memory databases can only ever be shared by
+sharing the single database handle that created them, in circumstances
+where doing so is safe.
+<p>The <b>type</b> argument is of type DBTYPE
+and must be set to one of DB_BTREE, DB_HASH,
+DB_QUEUE, DB_RECNO or DB_UNKNOWN, except
+that databases of type DB_QUEUE are restricted to one per
+<b>file</b>. If <b>type</b> is DB_UNKNOWN, the database must
+already exist and DB-&gt;open will automatically determine its type.
+The <a href="../api_c/db_get_type.html">DB-&gt;get_type</a> function may be used to determine the underlying type of
+databases opened using DB_UNKNOWN.
+<p>The <b>flags</b> and <b>mode</b> arguments specify how files will be opened
+and/or created if they do not already exist.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or more
+of the following values.
+<p><dl compact>
+<p><dt><a name="DB_CREATE">DB_CREATE</a><dd>Create any underlying files, as necessary. If the files do not already
+exist and the DB_CREATE flag is not specified, the call will fail.
+<p><dt><a name="DB_EXCL">DB_EXCL</a><dd>Return an error if the file already exists. Underlying filesystem
+primitives are used to implement this flag. For this reason it is only
+applicable to the physical file and cannot be used to test if a database
+in a file already exists.
+<p>The DB_EXCL flag is only meaningful when specified with the
+DB_CREATE flag.
+<p><dt><a name="DB_NOMMAP">DB_NOMMAP</a><dd>Do not map this database into process memory (see the description of the
+<a href="../api_c/env_set_mp_mmapsize.html">DBENV-&gt;set_mp_mmapsize</a> function for further information).
+<p><dt><a name="DB_RDONLY">DB_RDONLY</a><dd>Open the database for reading only. Any attempt to modify items in the
+database will fail regardless of the actual permissions of any underlying
+files.
+<p><dt><a name="DB_THREAD">DB_THREAD</a><dd>Cause the DB handle returned by DB-&gt;open to be
+<i>free-threaded</i>, that is, useable by multiple threads within a
+single address space.
+<p><dt><a name="DB_TRUNCATE">DB_TRUNCATE</a><dd>Physically truncate the underlying file, discarding all previous databases
+it might have held. Underlying filesystem primitives are used to
+implement this flag. For this reason it is only applicable to the
+physical file and cannot be used to discard databases within a file.
+<p>The DB_TRUNCATE flag cannot be transaction protected, and it is
+an error to specify it in a transaction protected environment.
+</dl>
+<p>On UNIX systems, or in IEEE/ANSI Std 1003.1 (POSIX) environments, all files created by the access methods
+are created with mode <b>mode</b> (as described in <b>chmod</b>(2)) and
+modified by the process' umask value at the time of creation (see
+<b>umask</b>(2)). The group ownership of created files is based on
+the system and directory defaults, and is not further specified by Berkeley DB.
+If <b>mode</b> is 0, files are created readable and writeable by both
+owner and group. On Windows systems, the mode argument is ignored.
+<p>Calling DB-&gt;open is a reasonably expensive operation, and
+maintaining a set of open databases will normally be preferable to
+repeatedly open and closing the database for each new query.
+<p>The DB-&gt;open function returns a non-zero error value on failure and 0 on success.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If the <b>dbenv</b> argument to <a href="../api_c/db_create.html">db_create</a> was initialized using
+<a href="../api_c/env_open.html">DBENV-&gt;open</a> the environment variable <b>DB_HOME</b> may be used
+as the path of the database environment home. Specifically, DB-&gt;open
+is affected by the configuration value DB_DATA_DIR.
+</dl>
+<p><dl compact>
+<p><dt>TMPDIR<dd>If the <b>file</b> and <b>dbenv</b> arguments to DB-&gt;open are
+NULL, the environment variable <b>TMPDIR</b> may be used as a
+directory in which to create a temporary backing file.
+</dl>
+<h1>Errors</h1>
+<p>The DB-&gt;open function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt><a name="DB_OLD_VERSION">DB_OLD_VERSION</a><dd>The database cannot be opened without being first upgraded.
+<p><dt>EEXIST<dd>DB_CREATE and DB_EXCL were specified and the file exists.
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified (e.g., unknown database
+type, page size, hash function, pad byte, byte order) or a flag value
+or parameter that is incompatible with the specified database.
+<p>
+The <a href="../api_c/env_open.html#DB_THREAD">DB_THREAD</a> flag was specified and spinlocks are not
+implemented for this architecture.
+<p>The <a href="../api_c/env_open.html#DB_THREAD">DB_THREAD</a> flag was specified to DB-&gt;open, but was not
+specified to the <a href="../api_c/env_open.html">DBENV-&gt;open</a> call for the environment in which the
+DB handle was created.
+<p>A <b>re_source</b> file was specified with either the <a href="../api_c/env_open.html#DB_THREAD">DB_THREAD</a>
+flag or the provided database environment supports transaction
+processing.
+<p><dt>ENOENT<dd>A non-existent <b>re_source</b> file was specified.
+</dl>
+<p>The DB-&gt;open function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;open function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/db_create.html">db_create</a>,
+<a href="../api_c/db_close.html">DB-&gt;close</a>,
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>,
+<a href="../api_c/db_del.html">DB-&gt;del</a>,
+<a href="../api_c/db_err.html">DB-&gt;err</a>,
+<a href="../api_c/db_fd.html">DB-&gt;fd</a>,
+<a href="../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a>,
+<a href="../api_c/db_get_type.html">DB-&gt;get_type</a>,
+<a href="../api_c/db_join.html">DB-&gt;join</a>,
+<a href="../api_c/db_key_range.html">DB-&gt;key_range</a>,
+<a href="../api_c/db_open.html">DB-&gt;open</a>,
+<a href="../api_c/db_put.html">DB-&gt;put</a>,
+<a href="../api_c/db_remove.html">DB-&gt;remove</a>,
+<a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a>,
+<a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>,
+<a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>,
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a>,
+<a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>,
+<a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a>,
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a>,
+<a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a>,
+<a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a>,
+<a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>,
+<a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a>,
+<a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a>,
+<a href="../api_c/db_set_malloc.html">DB-&gt;set_malloc</a>,
+<a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a>,
+<a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a>,
+<a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a>,
+<a href="../api_c/db_set_realloc.html">DB-&gt;set_realloc</a>,
+<a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>,
+<a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a>,
+<a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a>,
+<a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a>,
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>,
+<a href="../api_c/db_sync.html">DB-&gt;sync</a>,
+<a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+and
+<a href="../api_c/db_verify.html">DB-&gt;verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_put.html b/bdb/docs/api_c/db_put.html
new file mode 100644
index 00000000000..85c63b7cc7e
--- /dev/null
+++ b/bdb/docs/api_c/db_put.html
@@ -0,0 +1,136 @@
+<!--$Id: db_put.so,v 10.34 2000/09/16 22:27:56 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;put</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;put</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;put(DB *db,
+ DB_TXN *txnid, DBT *key, DBT *data, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB-&gt;put function stores key/data pairs in the database. The default
+behavior of the DB-&gt;put function is to enter the new key/data
+pair, replacing any previously existing key if duplicates are disallowed,
+or adding a duplicate data item if duplicates are allowed. If the database
+supports duplicates, the DB-&gt;put function adds the new data value at the
+end of the duplicate set. If the database supports sorted duplicates,
+the new data value is inserted at the correct sorted location.
+<p>If the file is being accessed under transaction protection, the
+<b>txnid</b> parameter is a transaction ID returned from
+<a href="../api_c/txn_begin.html">txn_begin</a>, otherwise, NULL.
+<p>The <b>flags</b> parameter must be set to 0 or one of the following
+values:
+<p><dl compact>
+<p><dt><a name="DB_APPEND">DB_APPEND</a><dd>Append the key/data pair to the end of the database. For the
+DB_APPEND flag to be specified, the underlying database must be
+a Queue or Recno database. The record number allocated to the record is
+returned in the specified <b>key</b>.
+<p>There is a minor behavioral difference between the Recno and Queue access
+methods for the DB_APPEND flag. If a transaction enclosing a
+DB-&gt;put operation with the DB_APPEND flag aborts, the
+record number may be decremented (and later re-allocated by a subsequent
+DB_APPEND operation) by the Recno access method, but will not be
+decremented or re-allocated by the Queue access method.
+<p><dt><a name="DB_NODUPDATA">DB_NODUPDATA</a><dd>In the case of the Btree and Hash access methods, enter the new key/data
+pair only if it does not already appear in the database. If the
+key/data pair already appears in the database, <a href="../api_c/dbc_put.html#DB_KEYEXIST">DB_KEYEXIST</a> is
+returned. The DB_NODUPDATA flag may only be specified if the
+underlying database has been configured to support sorted duplicates.
+<p>The DB_NODUPDATA flag may not be specified to the Queue or Recno
+access methods.
+<p><dt><a name="DB_NOOVERWRITE">DB_NOOVERWRITE</a><dd>Enter the new key/data pair only if the key does not already appear in
+the database. If the key already appears in the database,
+<a href="../api_c/dbc_put.html#DB_KEYEXIST">DB_KEYEXIST</a> is returned. Even if the database allows duplicates,
+a call to DB-&gt;put with the DB_NOOVERWRITE flag set will
+fail if the key already exists in the database.
+</dl>
+<p>Otherwise, the DB-&gt;put function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB-&gt;put function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_LOCK_DEADLOCK<dd>The operation was selected to resolve a deadlock.
+</dl>
+<p><dl compact>
+<p><dt>EACCES<dd>An attempt was made to modify a read-only database.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>A record number of 0 was specified.
+<p>An attempt was made to add a record to a fixed-length database that was too
+large to fit.
+<p>An attempt was made to do a partial put.
+</dl>
+<p><dl compact>
+<p><dt>ENOSPC<dd>A btree exceeded the maximum btree depth (255).
+</dl>
+<p>The DB-&gt;put function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;put function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/db_create.html">db_create</a>,
+<a href="../api_c/db_close.html">DB-&gt;close</a>,
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>,
+<a href="../api_c/db_del.html">DB-&gt;del</a>,
+<a href="../api_c/db_err.html">DB-&gt;err</a>,
+<a href="../api_c/db_fd.html">DB-&gt;fd</a>,
+<a href="../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a>,
+<a href="../api_c/db_get_type.html">DB-&gt;get_type</a>,
+<a href="../api_c/db_join.html">DB-&gt;join</a>,
+<a href="../api_c/db_key_range.html">DB-&gt;key_range</a>,
+<a href="../api_c/db_open.html">DB-&gt;open</a>,
+<a href="../api_c/db_put.html">DB-&gt;put</a>,
+<a href="../api_c/db_remove.html">DB-&gt;remove</a>,
+<a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a>,
+<a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>,
+<a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>,
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a>,
+<a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>,
+<a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a>,
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a>,
+<a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a>,
+<a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a>,
+<a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>,
+<a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a>,
+<a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a>,
+<a href="../api_c/db_set_malloc.html">DB-&gt;set_malloc</a>,
+<a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a>,
+<a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a>,
+<a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a>,
+<a href="../api_c/db_set_realloc.html">DB-&gt;set_realloc</a>,
+<a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>,
+<a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a>,
+<a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a>,
+<a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a>,
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>,
+<a href="../api_c/db_sync.html">DB-&gt;sync</a>,
+<a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+and
+<a href="../api_c/db_verify.html">DB-&gt;verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_remove.html b/bdb/docs/api_c/db_remove.html
new file mode 100644
index 00000000000..e8dae864538
--- /dev/null
+++ b/bdb/docs/api_c/db_remove.html
@@ -0,0 +1,108 @@
+<!--$Id: db_remove.so,v 10.20 2000/10/25 15:24:44 dda Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;remove</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;remove</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;remove(DB *db,
+ const char *file, const char *database, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB-&gt;remove interface removes the database specified by the
+<b>file</b> and <b>database</b> arguments. If no <b>database</b> is
+specified, the physical file represented by <b>file</b> is removed,
+incidentally removing all databases that it contained.
+<p>If a physical file is being removed and logging is currently enabled in
+the database environment, no database in the file may be open when the
+DB-&gt;remove function is called. Otherwise, no reference count of database
+use is maintained by Berkeley DB. Applications should not remove databases that
+are currently in use. In particular, some architectures do not permit
+the removal of files with open handles. On these architectures, attempts
+to remove databases that are currently in use will fail.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>Once DB-&gt;remove has been called, regardless of its return, the
+DB handle may not be accessed again.
+<p>The DB-&gt;remove function returns a non-zero error value on failure and 0 on success.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If the <b>dbenv</b> argument to <a href="../api_c/db_create.html">db_create</a> was initialized using
+<a href="../api_c/env_open.html">DBENV-&gt;open</a> the environment variable <b>DB_HOME</b> may be used
+as the path of the database environment home. Specifically, DB-&gt;remove
+is affected by the configuration value DB_DATA_DIR.
+</dl>
+<h1>Errors</h1>
+<p>The DB-&gt;remove function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>A database in the file is currently open.
+</dl>
+<p>The DB-&gt;remove function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;remove function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/db_create.html">db_create</a>,
+<a href="../api_c/db_close.html">DB-&gt;close</a>,
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>,
+<a href="../api_c/db_del.html">DB-&gt;del</a>,
+<a href="../api_c/db_err.html">DB-&gt;err</a>,
+<a href="../api_c/db_fd.html">DB-&gt;fd</a>,
+<a href="../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a>,
+<a href="../api_c/db_get_type.html">DB-&gt;get_type</a>,
+<a href="../api_c/db_join.html">DB-&gt;join</a>,
+<a href="../api_c/db_key_range.html">DB-&gt;key_range</a>,
+<a href="../api_c/db_open.html">DB-&gt;open</a>,
+<a href="../api_c/db_put.html">DB-&gt;put</a>,
+<a href="../api_c/db_remove.html">DB-&gt;remove</a>,
+<a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a>,
+<a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>,
+<a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>,
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a>,
+<a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>,
+<a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a>,
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a>,
+<a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a>,
+<a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a>,
+<a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>,
+<a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a>,
+<a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a>,
+<a href="../api_c/db_set_malloc.html">DB-&gt;set_malloc</a>,
+<a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a>,
+<a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a>,
+<a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a>,
+<a href="../api_c/db_set_realloc.html">DB-&gt;set_realloc</a>,
+<a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>,
+<a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a>,
+<a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a>,
+<a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a>,
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>,
+<a href="../api_c/db_sync.html">DB-&gt;sync</a>,
+<a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+and
+<a href="../api_c/db_verify.html">DB-&gt;verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_rename.html b/bdb/docs/api_c/db_rename.html
new file mode 100644
index 00000000000..ff90836c6b2
--- /dev/null
+++ b/bdb/docs/api_c/db_rename.html
@@ -0,0 +1,109 @@
+<!--$Id: db_rename.so,v 10.7 2000/10/25 15:24:44 dda Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;rename</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;rename</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;rename(DB *db, const char *file,
+ const char *database, const char *newname, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB-&gt;rename interface renames the database specified by the
+<b>file</b> and <b>database</b> arguments to <b>newname</b>. If no
+<b>database</b> is specified, the physical file represented by
+<b>file</b> is renamed, incidentally renaming all databases that it
+contained.
+<p>If a physical file is being renamed and logging is currently enabled in
+the database environment, no database in the file may be open when the
+DB-&gt;rename function is called. Otherwise, no reference count of database
+use is maintained by Berkeley DB. Applications should not rename databases that
+are currently in use. In particular, some architectures do not permit
+renaming files with open handles. On these architectures, attempts to
+rename databases that are currently in use will fail.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>Once DB-&gt;rename has been called, regardless of its return, the
+DB handle may not be accessed again.
+<p>The DB-&gt;rename function returns a non-zero error value on failure and 0 on success.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If the <b>dbenv</b> argument to <a href="../api_c/db_create.html">db_create</a> was initialized using
+<a href="../api_c/env_open.html">DBENV-&gt;open</a> the environment variable <b>DB_HOME</b> may be used
+as the path of the database environment home. Specifically, DB-&gt;rename
+is affected by the configuration value DB_DATA_DIR.
+</dl>
+<h1>Errors</h1>
+<p>The DB-&gt;rename function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>A database in the file is currently open.
+</dl>
+<p>The DB-&gt;rename function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;rename function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/db_create.html">db_create</a>,
+<a href="../api_c/db_close.html">DB-&gt;close</a>,
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>,
+<a href="../api_c/db_del.html">DB-&gt;del</a>,
+<a href="../api_c/db_err.html">DB-&gt;err</a>,
+<a href="../api_c/db_fd.html">DB-&gt;fd</a>,
+<a href="../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a>,
+<a href="../api_c/db_get_type.html">DB-&gt;get_type</a>,
+<a href="../api_c/db_join.html">DB-&gt;join</a>,
+<a href="../api_c/db_key_range.html">DB-&gt;key_range</a>,
+<a href="../api_c/db_open.html">DB-&gt;open</a>,
+<a href="../api_c/db_put.html">DB-&gt;put</a>,
+<a href="../api_c/db_remove.html">DB-&gt;remove</a>,
+<a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a>,
+<a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>,
+<a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>,
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a>,
+<a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>,
+<a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a>,
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a>,
+<a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a>,
+<a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a>,
+<a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>,
+<a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a>,
+<a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a>,
+<a href="../api_c/db_set_malloc.html">DB-&gt;set_malloc</a>,
+<a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a>,
+<a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a>,
+<a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a>,
+<a href="../api_c/db_set_realloc.html">DB-&gt;set_realloc</a>,
+<a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>,
+<a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a>,
+<a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a>,
+<a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a>,
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>,
+<a href="../api_c/db_sync.html">DB-&gt;sync</a>,
+<a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+and
+<a href="../api_c/db_verify.html">DB-&gt;verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_set_append_recno.html b/bdb/docs/api_c/db_set_append_recno.html
new file mode 100644
index 00000000000..4b90190ffbd
--- /dev/null
+++ b/bdb/docs/api_c/db_set_append_recno.html
@@ -0,0 +1,66 @@
+<!--$Id: db_set_append_recno.so,v 1.3 2000/07/18 16:19:44 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_append_recno</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;set_append_recno</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_append_recno(DB *,
+ int (*db_append_recno_fcn)(DB *dbp, DBT *data, db_recno_t recno));
+</pre></h3>
+<h1>Description</h1>
+<p>When using the <a href="../api_c/db_put.html#DB_APPEND">DB_APPEND</a> option of the <a href="../api_c/db_put.html">DB-&gt;put</a> method,
+it may be useful to modify the stored data based on the generated key.
+If a callback function is specified using the
+DB-&gt;set_append_recno function, it will be called after the record number
+has been selected but before the data has been stored.
+The callback function must return 0 on success and <b>errno</b> or
+a value outside of the Berkeley DB error name space on failure.
+<p>The called function must take three arguments: a reference to the
+enclosing database handle, the data <a href="../api_c/dbt.html">DBT</a> to be stored and the
+selected record number. The called function may then modify the data
+<a href="../api_c/dbt.html">DBT</a>.
+<p>The DB-&gt;set_append_recno interface may only be used to configure Berkeley DB before
+the <a href="../api_c/db_open.html">DB-&gt;open</a> interface is called.
+<p>The DB-&gt;set_append_recno function returns a non-zero error value on failure and 0 on success.
+<h1>See Also</h1>
+<a href="../api_c/env_close.html">DBENV-&gt;close</a>,
+<a href="../api_c/env_create.html">db_env_create</a>,
+<a href="../api_c/env_open.html">DBENV-&gt;open</a>,
+<a href="../api_c/env_remove.html">DBENV-&gt;remove</a>,
+<a href="../api_c/db_err.html">DBENV-&gt;err</a>,
+<a href="../api_c/env_strerror.html">db_strerror</a>,
+<a href="../api_c/env_version.html">db_version</a>,
+<a href="../api_c/env_set_cachesize.html">DBENV-&gt;set_cachesize</a>,
+<a href="../api_c/env_set_errcall.html">DBENV-&gt;set_errcall</a>,
+<a href="../api_c/env_set_errfile.html">DBENV-&gt;set_errfile</a>,
+<a href="../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a>,
+<a href="../api_c/env_set_flags.html">DBENV-&gt;set_flags</a>,
+<a href="../api_c/env_set_mutexlocks.html">DBENV-&gt;set_mutexlocks</a>,
+<a href="../api_c/env_set_paniccall.html">DBENV-&gt;set_paniccall</a>,
+and
+<a href="../api_c/env_set_verbose.html">DBENV-&gt;set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_set_bt_compare.html b/bdb/docs/api_c/db_set_bt_compare.html
new file mode 100644
index 00000000000..bf38ee51d94
--- /dev/null
+++ b/bdb/docs/api_c/db_set_bt_compare.html
@@ -0,0 +1,105 @@
+<!--$Id: db_set_bt_compare.so,v 10.24 2000/10/26 15:20:40 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_bt_compare</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;set_bt_compare</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_bt_compare(DB *db,
+ int (*bt_compare_fcn)(DB *, const DBT *, const DBT *));
+</pre></h3>
+<h1>Description</h1>
+<p>Set the Btree key comparison function. The comparison function is
+called when it is necessary to compare a key specified by the
+application with a key currently stored in the tree. The first argument
+to the comparison function is the <a href="../api_c/dbt.html">DBT</a> representing the
+application supplied key, the second is the current tree's key.
+<p>The comparison function must return an integer value less than, equal
+to, or greater than zero if the first key argument is considered to be
+respectively less than, equal to, or greater than the second key
+argument. In addition, the comparison function must cause the keys in
+the database to be <i>well-ordered</i>. The comparison function
+must correctly handle any key values used by the application (possibly
+including zero-length keys). In addition, when Btree key prefix
+comparison is being performed (see <a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a> for more
+information), the comparison routine may be passed a prefix of any
+database key. The <b>data</b> and <b>size</b> fields of the
+<a href="../api_c/dbt.html">DBT</a> are the only fields that may be used for the purposes of
+this comparison.
+<p>If no comparison function is specified, the keys are compared lexically,
+with shorter keys collating before longer keys. The same comparison
+method must be used each time a particular Btree is opened.
+<p>The DB-&gt;set_bt_compare interface may only be used to configure Berkeley DB before
+the <a href="../api_c/db_open.html">DB-&gt;open</a> interface is called.
+<p>The DB-&gt;set_bt_compare function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/db_open.html">DB-&gt;open</a> was called.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/db_create.html">db_create</a>,
+<a href="../api_c/db_close.html">DB-&gt;close</a>,
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>,
+<a href="../api_c/db_del.html">DB-&gt;del</a>,
+<a href="../api_c/db_err.html">DB-&gt;err</a>,
+<a href="../api_c/db_fd.html">DB-&gt;fd</a>,
+<a href="../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a>,
+<a href="../api_c/db_get_type.html">DB-&gt;get_type</a>,
+<a href="../api_c/db_join.html">DB-&gt;join</a>,
+<a href="../api_c/db_key_range.html">DB-&gt;key_range</a>,
+<a href="../api_c/db_open.html">DB-&gt;open</a>,
+<a href="../api_c/db_put.html">DB-&gt;put</a>,
+<a href="../api_c/db_remove.html">DB-&gt;remove</a>,
+<a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a>,
+<a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>,
+<a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>,
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a>,
+<a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>,
+<a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a>,
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a>,
+<a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a>,
+<a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a>,
+<a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>,
+<a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a>,
+<a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a>,
+<a href="../api_c/db_set_malloc.html">DB-&gt;set_malloc</a>,
+<a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a>,
+<a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a>,
+<a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a>,
+<a href="../api_c/db_set_realloc.html">DB-&gt;set_realloc</a>,
+<a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>,
+<a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a>,
+<a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a>,
+<a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a>,
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>,
+<a href="../api_c/db_sync.html">DB-&gt;sync</a>,
+<a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+and
+<a href="../api_c/db_verify.html">DB-&gt;verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_set_bt_minkey.html b/bdb/docs/api_c/db_set_bt_minkey.html
new file mode 100644
index 00000000000..c36f3637ba5
--- /dev/null
+++ b/bdb/docs/api_c/db_set_bt_minkey.html
@@ -0,0 +1,92 @@
+<!--$Id: db_set_bt_minkey.so,v 10.14 2000/05/01 21:57:43 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_bt_minkey</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;set_bt_minkey</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_bt_minkey(DB *db, u_int32_t bt_minkey);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the minimum number of keys that will be stored on any single
+Btree page.
+<p>This value is used to determine which keys will be stored on overflow
+pages, i.e. if a key or data item is larger than the underlying database
+page size divided by the <b>bt_minkey</b> value, it will be stored on
+overflow pages instead of within the page itself. The <b>bt_minkey</b>
+value specified must be at least 2; if <b>bt_minkey</b> is not explicitly
+set, a value of 2 is used.
+<p>The DB-&gt;set_bt_minkey interface may only be used to configure Berkeley DB before
+the <a href="../api_c/db_open.html">DB-&gt;open</a> interface is called.
+<p>The DB-&gt;set_bt_minkey function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/db_open.html">DB-&gt;open</a> was called.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/db_create.html">db_create</a>,
+<a href="../api_c/db_close.html">DB-&gt;close</a>,
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>,
+<a href="../api_c/db_del.html">DB-&gt;del</a>,
+<a href="../api_c/db_err.html">DB-&gt;err</a>,
+<a href="../api_c/db_fd.html">DB-&gt;fd</a>,
+<a href="../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a>,
+<a href="../api_c/db_get_type.html">DB-&gt;get_type</a>,
+<a href="../api_c/db_join.html">DB-&gt;join</a>,
+<a href="../api_c/db_key_range.html">DB-&gt;key_range</a>,
+<a href="../api_c/db_open.html">DB-&gt;open</a>,
+<a href="../api_c/db_put.html">DB-&gt;put</a>,
+<a href="../api_c/db_remove.html">DB-&gt;remove</a>,
+<a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a>,
+<a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>,
+<a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>,
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a>,
+<a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>,
+<a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a>,
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a>,
+<a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a>,
+<a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a>,
+<a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>,
+<a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a>,
+<a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a>,
+<a href="../api_c/db_set_malloc.html">DB-&gt;set_malloc</a>,
+<a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a>,
+<a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a>,
+<a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a>,
+<a href="../api_c/db_set_realloc.html">DB-&gt;set_realloc</a>,
+<a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>,
+<a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a>,
+<a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a>,
+<a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a>,
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>,
+<a href="../api_c/db_sync.html">DB-&gt;sync</a>,
+<a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+and
+<a href="../api_c/db_verify.html">DB-&gt;verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_set_bt_prefix.html b/bdb/docs/api_c/db_set_bt_prefix.html
new file mode 100644
index 00000000000..88bf3157f97
--- /dev/null
+++ b/bdb/docs/api_c/db_set_bt_prefix.html
@@ -0,0 +1,106 @@
+<!--$Id: db_set_bt_prefix.so,v 10.25 2000/09/08 21:35:26 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_bt_prefix</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;set_bt_prefix</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_bt_prefix(DB *db,
+ size_t (*bt_prefix_fcn)(DB *, const DBT *, const DBT *));
+</pre></h3>
+<h1>Description</h1>
+<p>Set the Btree prefix function. The prefix function must return the
+number of bytes of the second key argument that would be required by
+the Btree key comparison function to determine the second key argument's
+ordering relationship with respect to the first key argument. If the
+two keys are equal, the key length should be returned. The prefix
+function must correctly handle any key values used by the application
+(possibly including zero-length keys). The <b>data</b> and
+<b>size</b> fields of the <a href="../api_c/dbt.html">DBT</a> are the only fields that may be
+used for the purposes of this determination.
+<p>The prefix function is used to determine the amount by which keys stored
+on the Btree internal pages can be safely truncated without losing their
+uniqueness. See the <a href="../ref/am_conf/bt_prefix.html">Btree
+prefix comparison</a> section of the Reference Guide for more details about
+how this works. The usefulness of this is data dependent, but in some
+data sets can produce significantly reduced tree sizes and search times.
+<p>If no prefix function or key comparison function is specified by the
+application, a default lexical comparison function is used as the prefix
+function. If no prefix function is specified and a key comparison
+function is specified, no prefix function is used. It is an error to
+specify a prefix function without also specifying a key comparison
+function.
+<p>The DB-&gt;set_bt_prefix interface may only be used to configure Berkeley DB before
+the <a href="../api_c/db_open.html">DB-&gt;open</a> interface is called.
+<p>The DB-&gt;set_bt_prefix function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/db_open.html">DB-&gt;open</a> was called.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/db_create.html">db_create</a>,
+<a href="../api_c/db_close.html">DB-&gt;close</a>,
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>,
+<a href="../api_c/db_del.html">DB-&gt;del</a>,
+<a href="../api_c/db_err.html">DB-&gt;err</a>,
+<a href="../api_c/db_fd.html">DB-&gt;fd</a>,
+<a href="../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a>,
+<a href="../api_c/db_get_type.html">DB-&gt;get_type</a>,
+<a href="../api_c/db_join.html">DB-&gt;join</a>,
+<a href="../api_c/db_key_range.html">DB-&gt;key_range</a>,
+<a href="../api_c/db_open.html">DB-&gt;open</a>,
+<a href="../api_c/db_put.html">DB-&gt;put</a>,
+<a href="../api_c/db_remove.html">DB-&gt;remove</a>,
+<a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a>,
+<a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>,
+<a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>,
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a>,
+<a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>,
+<a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a>,
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a>,
+<a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a>,
+<a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a>,
+<a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>,
+<a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a>,
+<a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a>,
+<a href="../api_c/db_set_malloc.html">DB-&gt;set_malloc</a>,
+<a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a>,
+<a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a>,
+<a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a>,
+<a href="../api_c/db_set_realloc.html">DB-&gt;set_realloc</a>,
+<a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>,
+<a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a>,
+<a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a>,
+<a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a>,
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>,
+<a href="../api_c/db_sync.html">DB-&gt;sync</a>,
+<a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+and
+<a href="../api_c/db_verify.html">DB-&gt;verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_set_cachesize.html b/bdb/docs/api_c/db_set_cachesize.html
new file mode 100644
index 00000000000..fd7176cbf7d
--- /dev/null
+++ b/bdb/docs/api_c/db_set_cachesize.html
@@ -0,0 +1,107 @@
+<!--$Id: db_set_cachesize.so,v 10.17 2000/05/01 21:57:43 bostic Exp $-->
+<!--$Id: m4.cachesize,v 10.7 2000/02/11 18:54:45 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_cachesize</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;set_cachesize</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_cachesize(DB *db,
+ u_int32_t gbytes, u_int32_t bytes, int ncache);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the size of the database's shared memory buffer pool, i.e., the cache,
+to <b>gbytes</b> gigabytes plus <b>bytes</b>. The cache should be the
+size of the normal working data set of the application, with some small
+amount of additional memory for unusual situations. (Note, the working
+set is not the same as the number of simultaneously referenced pages, and
+should be quite a bit larger!)
+<p>The default cache size is 256KB, and may not be specified as less than
+20KB. Any cache size less than 500MB is automatically increased by 25%
+to account for buffer pool overhead, cache sizes larger than 500MB are
+used as specified. For information on tuning the Berkeley DB cache size, see
+<a href="../ref/am_conf/cachesize.html">Selecting a cache size</a>.
+<p>It is possible to specify caches to Berkeley DB that are large enough so that
+they cannot be allocated contiguously on some architectures, e.g., some
+releases of Solaris limit the amount of memory that may be allocated
+contiguously by a process. If <b>ncache</b> is 0 or 1, the cache will
+be allocated contiguously in memory. If it is greater than 1, the cache
+will be broken up into <b>ncache</b> equally sized separate pieces of
+memory.
+<p>As databases opened within Berkeley DB environments use the cache specified to
+the environment, it is an error to attempt to set a cache in a database
+created within an environment.
+<p>The DB-&gt;set_cachesize interface may only be used to configure Berkeley DB before
+the <a href="../api_c/db_open.html">DB-&gt;open</a> interface is called.
+<p>The DB-&gt;set_cachesize function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The specified cache size was impossibly small.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/db_create.html">db_create</a>,
+<a href="../api_c/db_close.html">DB-&gt;close</a>,
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>,
+<a href="../api_c/db_del.html">DB-&gt;del</a>,
+<a href="../api_c/db_err.html">DB-&gt;err</a>,
+<a href="../api_c/db_fd.html">DB-&gt;fd</a>,
+<a href="../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a>,
+<a href="../api_c/db_get_type.html">DB-&gt;get_type</a>,
+<a href="../api_c/db_join.html">DB-&gt;join</a>,
+<a href="../api_c/db_key_range.html">DB-&gt;key_range</a>,
+<a href="../api_c/db_open.html">DB-&gt;open</a>,
+<a href="../api_c/db_put.html">DB-&gt;put</a>,
+<a href="../api_c/db_remove.html">DB-&gt;remove</a>,
+<a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a>,
+<a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>,
+<a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>,
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a>,
+<a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>,
+<a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a>,
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a>,
+<a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a>,
+<a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a>,
+<a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>,
+<a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a>,
+<a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a>,
+<a href="../api_c/db_set_malloc.html">DB-&gt;set_malloc</a>,
+<a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a>,
+<a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a>,
+<a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a>,
+<a href="../api_c/db_set_realloc.html">DB-&gt;set_realloc</a>,
+<a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>,
+<a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a>,
+<a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a>,
+<a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a>,
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>,
+<a href="../api_c/db_sync.html">DB-&gt;sync</a>,
+<a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+and
+<a href="../api_c/db_verify.html">DB-&gt;verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_set_dup_compare.html b/bdb/docs/api_c/db_set_dup_compare.html
new file mode 100644
index 00000000000..2ac6ed9dec2
--- /dev/null
+++ b/bdb/docs/api_c/db_set_dup_compare.html
@@ -0,0 +1,102 @@
+<!--$Id: db_set_dup_compare.so,v 10.21 2000/10/26 15:20:40 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_dup_compare</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;set_dup_compare</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_dup_compare(DB *db,
+ int (*dup_compare_fcn)(DB *, const DBT *, const DBT *));
+</pre></h3>
+<h1>Description</h1>
+<p>Set the duplicate data item comparison function. The comparison function
+is called when it is necessary to compare a data item specified by the
+application with a data item currently stored in the tree. The first
+argument to the comparison function is the <a href="../api_c/dbt.html">DBT</a> representing the
+application's data item, the second is the current tree's data item.
+<p>The comparison function must return an integer value less than, equal
+to, or greater than zero if the first data item argument is considered
+to be respectively less than, equal to, or greater than the second data
+item argument. In addition, the comparison function must cause the data
+items in the set to be <i>well-ordered</i>. The comparison function
+must correctly handle any data item values used by the application
+(possibly including zero-length data items). The <b>data</b> and
+<b>size</b> fields of the <a href="../api_c/dbt.html">DBT</a> are the only fields that may be
+used for the purposes of this comparison.
+<p>If no comparison function is specified, the data items are compared
+lexically, with shorter data items collating before longer data items.
+The same duplicate data item comparison method must be used each time
+a particular Btree is opened.
+<p>The DB-&gt;set_dup_compare interface may only be used to configure Berkeley DB before
+the <a href="../api_c/db_open.html">DB-&gt;open</a> interface is called.
+<p>The DB-&gt;set_dup_compare function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/db_create.html">db_create</a>,
+<a href="../api_c/db_close.html">DB-&gt;close</a>,
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>,
+<a href="../api_c/db_del.html">DB-&gt;del</a>,
+<a href="../api_c/db_err.html">DB-&gt;err</a>,
+<a href="../api_c/db_fd.html">DB-&gt;fd</a>,
+<a href="../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a>,
+<a href="../api_c/db_get_type.html">DB-&gt;get_type</a>,
+<a href="../api_c/db_join.html">DB-&gt;join</a>,
+<a href="../api_c/db_key_range.html">DB-&gt;key_range</a>,
+<a href="../api_c/db_open.html">DB-&gt;open</a>,
+<a href="../api_c/db_put.html">DB-&gt;put</a>,
+<a href="../api_c/db_remove.html">DB-&gt;remove</a>,
+<a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a>,
+<a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>,
+<a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>,
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a>,
+<a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>,
+<a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a>,
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a>,
+<a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a>,
+<a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a>,
+<a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>,
+<a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a>,
+<a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a>,
+<a href="../api_c/db_set_malloc.html">DB-&gt;set_malloc</a>,
+<a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a>,
+<a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a>,
+<a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a>,
+<a href="../api_c/db_set_realloc.html">DB-&gt;set_realloc</a>,
+<a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>,
+<a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a>,
+<a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a>,
+<a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a>,
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>,
+<a href="../api_c/db_sync.html">DB-&gt;sync</a>,
+<a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+and
+<a href="../api_c/db_verify.html">DB-&gt;verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_set_errcall.html b/bdb/docs/api_c/db_set_errcall.html
new file mode 100644
index 00000000000..97d8d9a3aca
--- /dev/null
+++ b/bdb/docs/api_c/db_set_errcall.html
@@ -0,0 +1,76 @@
+<!--$Id: db_set_errcall.so,v 10.7 1999/12/20 08:52:28 bostic Exp $-->
+<!--$Id: m4.errset,v 10.8 2000/02/19 20:57:57 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_errcall</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;set_errcall</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+void
+DB-&gt;set_errcall(DB *,
+ void (*db_errcall_fcn)(const char *errpfx, char *msg));
+</pre></h3>
+<h1>Description</h1>
+When an error occurs in the Berkeley DB library, a Berkeley DB error or an error
+return value is returned by the function. In some cases, however,
+the <b>errno</b> value may be insufficient to completely describe
+the cause of the error especially during initial application debugging.
+<p>The DB-&gt;set_errcall function is used to enhance the mechanism for reporting error
+messages to the application. In some cases, when an error occurs, Berkeley DB
+will call <b>db_errcall_fcn</b> with additional error information. The
+function must be declared with two arguments; the first will be the prefix
+string (as previously set by <a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a> or
+<a href="../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a>), the second will be the error message string.
+It is up to the <b>db_errcall_fcn</b> function to display the error
+message in an appropriate manner.
+<p>Alternatively, you can use the <a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a> or
+<a href="../api_c/env_set_errfile.html">DBENV-&gt;set_errfile</a> functions to display the additional information
+via a C library FILE *.
+<p>This error logging enhancement does not slow performance or significantly
+increase application size, and may be run during normal operation as well
+as during application debugging.
+<p>For DB handles opened inside of Berkeley DB environments, calling the
+DB-&gt;set_errcall function affects the entire environment and is equivalent to calling
+the <a href="../api_c/env_set_errcall.html">DBENV-&gt;set_errcall</a> function.
+<p>The DB-&gt;set_errcall interface may be used to configure Berkeley DB at any time
+during the life of the application.
+<h1>See Also</h1>
+<a href="../api_c/env_close.html">DBENV-&gt;close</a>,
+<a href="../api_c/env_create.html">db_env_create</a>,
+<a href="../api_c/env_open.html">DBENV-&gt;open</a>,
+<a href="../api_c/env_remove.html">DBENV-&gt;remove</a>,
+<a href="../api_c/db_err.html">DBENV-&gt;err</a>,
+<a href="../api_c/env_strerror.html">db_strerror</a>,
+<a href="../api_c/env_version.html">db_version</a>,
+<a href="../api_c/env_set_cachesize.html">DBENV-&gt;set_cachesize</a>,
+<a href="../api_c/env_set_errcall.html">DBENV-&gt;set_errcall</a>,
+<a href="../api_c/env_set_errfile.html">DBENV-&gt;set_errfile</a>,
+<a href="../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a>,
+<a href="../api_c/env_set_flags.html">DBENV-&gt;set_flags</a>,
+<a href="../api_c/env_set_mutexlocks.html">DBENV-&gt;set_mutexlocks</a>,
+<a href="../api_c/env_set_paniccall.html">DBENV-&gt;set_paniccall</a>,
+and
+<a href="../api_c/env_set_verbose.html">DBENV-&gt;set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_set_errfile.html b/bdb/docs/api_c/db_set_errfile.html
new file mode 100644
index 00000000000..5d160ed5cf2
--- /dev/null
+++ b/bdb/docs/api_c/db_set_errfile.html
@@ -0,0 +1,73 @@
+<!--$Id: db_set_errfile.so,v 10.7 1999/12/20 08:52:28 bostic Exp $-->
+<!--$Id: m4.errset,v 10.8 2000/02/19 20:57:57 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_errfile</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;set_errfile</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+void
+DB-&gt;set_errfile(DB *db, FILE *errfile);
+</pre></h3>
+<h1>Description</h1>
+When an error occurs in the Berkeley DB library, a Berkeley DB error or an error
+return value is returned by the function. In some cases, however,
+the <b>errno</b> value may be insufficient to completely describe
+the cause of the error especially during initial application debugging.
+<p>The DB-&gt;set_errfile function is used to enhance the mechanism for reporting error
+messages to the application by setting a C library FILE * to be used for
+displaying additional Berkeley DB error messages. In some cases, when an error
+occurs, Berkeley DB will output an additional error message to the specified
+file reference.
+<p>The error message will consist of the prefix string and a colon
+("<b>:</b>") (if a prefix string was previously specified using
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a> or <a href="../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a>), an error string, and
+a trailing &lt;newline&gt; character.
+<p>This error logging enhancement does not slow performance or significantly
+increase application size, and may be run during normal operation as well
+as during application debugging.
+<p>For DB handles opened inside of Berkeley DB environments, calling the
+DB-&gt;set_errfile function affects the entire environment and is equivalent to calling
+the <a href="../api_c/env_set_errfile.html">DBENV-&gt;set_errfile</a> function.
+<p>The DB-&gt;set_errfile interface may be used to configure Berkeley DB at any time
+during the life of the application.
+<h1>See Also</h1>
+<a href="../api_c/env_close.html">DBENV-&gt;close</a>,
+<a href="../api_c/env_create.html">db_env_create</a>,
+<a href="../api_c/env_open.html">DBENV-&gt;open</a>,
+<a href="../api_c/env_remove.html">DBENV-&gt;remove</a>,
+<a href="../api_c/db_err.html">DBENV-&gt;err</a>,
+<a href="../api_c/env_strerror.html">db_strerror</a>,
+<a href="../api_c/env_version.html">db_version</a>,
+<a href="../api_c/env_set_cachesize.html">DBENV-&gt;set_cachesize</a>,
+<a href="../api_c/env_set_errcall.html">DBENV-&gt;set_errcall</a>,
+<a href="../api_c/env_set_errfile.html">DBENV-&gt;set_errfile</a>,
+<a href="../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a>,
+<a href="../api_c/env_set_flags.html">DBENV-&gt;set_flags</a>,
+<a href="../api_c/env_set_mutexlocks.html">DBENV-&gt;set_mutexlocks</a>,
+<a href="../api_c/env_set_paniccall.html">DBENV-&gt;set_paniccall</a>,
+and
+<a href="../api_c/env_set_verbose.html">DBENV-&gt;set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_set_errpfx.html b/bdb/docs/api_c/db_set_errpfx.html
new file mode 100644
index 00000000000..baf8f61fef7
--- /dev/null
+++ b/bdb/docs/api_c/db_set_errpfx.html
@@ -0,0 +1,62 @@
+<!--$Id: db_set_errpfx.so,v 10.6 1999/12/20 08:52:28 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_errpfx</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;set_errpfx</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+void
+DB-&gt;set_errpfx(DB *db, const char *errpfx);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the prefix string that appears before error messages issued by Berkeley DB.
+<p>The DB-&gt;set_errpfx function does not copy the memory referenced by the
+<b>errpfx</b> argument, rather, it maintains a reference to it. This
+allows applications to modify the error message prefix at any time,
+without repeatedly calling DB-&gt;set_errpfx, but means that the
+memory must be maintained until the handle is closed.
+<p>For DB handles opened inside of Berkeley DB environments, calling the
+DB-&gt;set_errpfx function affects the entire environment and is equivalent to calling
+the <a href="../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a> function.
+<p>The DB-&gt;set_errpfx interface may be used to configure Berkeley DB at any time
+during the life of the application.
+<h1>See Also</h1>
+<a href="../api_c/env_close.html">DBENV-&gt;close</a>,
+<a href="../api_c/env_create.html">db_env_create</a>,
+<a href="../api_c/env_open.html">DBENV-&gt;open</a>,
+<a href="../api_c/env_remove.html">DBENV-&gt;remove</a>,
+<a href="../api_c/db_err.html">DBENV-&gt;err</a>,
+<a href="../api_c/env_strerror.html">db_strerror</a>,
+<a href="../api_c/env_version.html">db_version</a>,
+<a href="../api_c/env_set_cachesize.html">DBENV-&gt;set_cachesize</a>,
+<a href="../api_c/env_set_errcall.html">DBENV-&gt;set_errcall</a>,
+<a href="../api_c/env_set_errfile.html">DBENV-&gt;set_errfile</a>,
+<a href="../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a>,
+<a href="../api_c/env_set_flags.html">DBENV-&gt;set_flags</a>,
+<a href="../api_c/env_set_mutexlocks.html">DBENV-&gt;set_mutexlocks</a>,
+<a href="../api_c/env_set_paniccall.html">DBENV-&gt;set_paniccall</a>,
+and
+<a href="../api_c/env_set_verbose.html">DBENV-&gt;set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_set_feedback.html b/bdb/docs/api_c/db_set_feedback.html
new file mode 100644
index 00000000000..213060ee765
--- /dev/null
+++ b/bdb/docs/api_c/db_set_feedback.html
@@ -0,0 +1,95 @@
+<!--$Id: db_set_feedback.so,v 10.16 2000/07/09 19:11:54 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_feedback</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;set_feedback</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_feedback(DB *,
+ void (*db_feedback_fcn)(DB *dbp, int opcode, int pct));
+</pre></h3>
+<h1>Description</h1>
+<p>Some operations performed by the Berkeley DB library can take non-trivial
+amounts of time. The DB-&gt;set_feedback function can be used by
+applications to monitor progress within these operations.
+<p>When an operation is likely to take a long time, Berkeley DB will call the
+specified callback function. This function must be declared with
+three arguments: the first will be a reference to the enclosing database
+handle, the second a flag value, and the third the percent of the
+operation that has been completed, specified as an integer value between
+0 and 100. It is up to the callback function to display this
+information in an appropriate manner.
+<p>The <b>opcode</b> argument may take on any of the following values:
+<p><dl compact>
+<p><dt><a name="DB_UPGRADE">DB_UPGRADE</a><dd>The underlying database is being upgraded.
+<p><dt><a name="DB_VERIFY">DB_VERIFY</a><dd>The underlying database is being verified.
+</dl>
+<p>The DB-&gt;set_feedback interface may be used to configure Berkeley DB at any time
+during the life of the application.
+<p>The DB-&gt;set_feedback function returns a non-zero error value on failure and 0 on success.
+<h1>See Also</h1>
+<a href="../api_c/db_create.html">db_create</a>,
+<a href="../api_c/db_close.html">DB-&gt;close</a>,
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>,
+<a href="../api_c/db_del.html">DB-&gt;del</a>,
+<a href="../api_c/db_err.html">DB-&gt;err</a>,
+<a href="../api_c/db_fd.html">DB-&gt;fd</a>,
+<a href="../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a>,
+<a href="../api_c/db_get_type.html">DB-&gt;get_type</a>,
+<a href="../api_c/db_join.html">DB-&gt;join</a>,
+<a href="../api_c/db_key_range.html">DB-&gt;key_range</a>,
+<a href="../api_c/db_open.html">DB-&gt;open</a>,
+<a href="../api_c/db_put.html">DB-&gt;put</a>,
+<a href="../api_c/db_remove.html">DB-&gt;remove</a>,
+<a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a>,
+<a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>,
+<a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>,
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a>,
+<a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>,
+<a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a>,
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a>,
+<a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a>,
+<a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a>,
+<a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>,
+<a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a>,
+<a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a>,
+<a href="../api_c/db_set_malloc.html">DB-&gt;set_malloc</a>,
+<a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a>,
+<a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a>,
+<a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a>,
+<a href="../api_c/db_set_realloc.html">DB-&gt;set_realloc</a>,
+<a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>,
+<a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a>,
+<a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a>,
+<a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a>,
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>,
+<a href="../api_c/db_sync.html">DB-&gt;sync</a>,
+<a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+and
+<a href="../api_c/db_verify.html">DB-&gt;verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_set_flags.html b/bdb/docs/api_c/db_set_flags.html
new file mode 100644
index 00000000000..f1823381776
--- /dev/null
+++ b/bdb/docs/api_c/db_set_flags.html
@@ -0,0 +1,181 @@
+<!--$Id: db_set_flags.so,v 10.26 2000/03/17 01:53:58 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_flags</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;set_flags</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_flags(DB *db, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>Calling DB-&gt;set_flags is additive, there is no way to clear flags.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or more
+of the following values.
+<h3>Btree</h3>
+<p>The following flags may be specified for the Btree access method:
+<p><dl compact>
+<p><dt><a name="DB_DUP">DB_DUP</a><dd>Permit duplicate data items in the tree, i.e. insertion when the key of
+the key/data pair being inserted already exists in the tree will be
+successful. The ordering of duplicates in the tree is determined by the
+order of insertion, unless the ordering is otherwise specified by use of
+a cursor operation. It is an error to specify both DB_DUP and
+DB_RECNUM.
+<p><dt><a name="DB_DUPSORT">DB_DUPSORT</a><dd>Permit duplicate data items in the tree, i.e. insertion when the key of
+the key/data pair being inserted already exists in the tree will be
+successful. The ordering of duplicates in the tree is determined by the
+duplicate comparison function.
+If the application does not specify a comparison function using the
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a> function, a default, lexical comparison will be
+used.
+It is an error to specify both DB_DUPSORT and DB_RECNUM.
+<p><dt><a name="DB_RECNUM">DB_RECNUM</a><dd>Support retrieval from the Btree using record numbers. For more
+information, see the DB_GET_RECNO flag to the <a href="../api_c/db_get.html">DB-&gt;get</a> and
+<a href="../api_c/dbc_get.html">DBcursor-&gt;c_get</a> methods.
+<p>Logical record numbers in Btree databases are mutable in the face of
+record insertion or deletion. See the DB_RENUMBER flag in the Recno
+access method information for further discussion.
+<p>Maintaining record counts within a Btree introduces a serious point of
+contention, namely the page locations where the record counts are stored.
+In addition, the entire tree must be locked during both insertions and
+deletions, effectively single-threading the tree for those operations.
+Specifying DB_RECNUM can result in serious performance degradation for
+some applications and data sets.
+<p>It is an error to specify both DB_DUP and DB_RECNUM.
+<p><dt><a name="DB_REVSPLITOFF">DB_REVSPLITOFF</a><dd>Turn off reverse splitting in the Btree. As pages are emptied in a
+database, the Berkeley DB Btree implementation attempts to coalesce empty pages
+into higher-level pages in order to keep the tree as small as possible
+and minimize tree search time. This can hurt performance in applications
+with cyclical data demands, that is, applications where the database grows
+and shrinks repeatedly. For example, because Berkeley DB does page-level
+locking, the maximum level of concurrency in a database of 2 pages is far
+smaller than that in a database of 100 pages, and so a database that has
+shrunk to a minimal size can cause severe deadlocking when a new cycle of
+data insertion begins.
+</dl>
+<h3>Hash</h3>
+<p>The following flags may be specified for the Hash access method:
+<p><dl compact>
+<p><dt><a name="DB_DUP">DB_DUP</a><dd>Permit duplicate data items in the tree, i.e. insertion when the key of
+the key/data pair being inserted already exists in the tree will be
+successful. The ordering of duplicates in the tree is determined by the
+order of insertion, unless the ordering is otherwise specified by use of
+a cursor operation. It is an error to specify both DB_DUP and
+DB_RECNUM.
+<p><dt><a name="DB_DUPSORT">DB_DUPSORT</a><dd>Permit duplicate data items in the tree, i.e. insertion when the key of
+the key/data pair being inserted already exists in the tree will be
+successful. The ordering of duplicates in the tree is determined by the
+duplicate comparison function.
+If the application does not specify a comparison function using the
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a> function, a default, lexical comparison will be
+used.
+It is an error to specify both DB_DUPSORT and DB_RECNUM.
+</dl>
+<h3>Queue</h3>
+<p>There are no additional flags that may be specified for the Queue access
+method.
+<h3>Recno</h3>
+<p>The following flags may be specified for the Recno access method:
+<p><dl compact>
+<p><dt><a name="DB_RENUMBER">DB_RENUMBER</a><dd>Specifying the DB_RENUMBER flag causes the logical record numbers to be
+mutable, and change as records are added to and deleted from the database.
+For example, the deletion of record number 4 causes records numbered 5
+and greater to be renumbered downward by 1. If a cursor was positioned
+to record number 4 before the deletion, it will reference the new record
+number 4, if any such record exists, after the deletion. If a cursor was
+positioned after record number 4 before the deletion, it will be shifted
+downward 1 logical record, continuing to reference the same record as it
+did before.
+<p>Using the <a href="../api_c/db_put.html">DB-&gt;put</a> or <a href="../api_c/dbc_put.html">DBcursor-&gt;c_put</a> interfaces to create new
+records will cause the creation of multiple records if the record number
+is more than one greater than the largest record currently in the
+database. For example, creating record 28, when record 25 was previously
+the last record in the database, will create records 26 and 27 as well as
+28. Attempts to retrieve records that were created in this manner will
+result in an error return of <a href="../ref/program/errorret.html#DB_KEYEMPTY">DB_KEYEMPTY</a>.
+<p>If a created record is not at the end of the database, all records
+following the new record will be automatically renumbered upward by 1.
+For example, the creation of a new record numbered 8 causes records
+numbered 8 and greater to be renumbered upward by 1. If a cursor was
+positioned to record number 8 or greater before the insertion, it will be
+shifted upward 1 logical record, continuing to reference the same record
+as it did before.
+<p>For these reasons, concurrent access to a Recno database with the
+DB_RENUMBER flag specified may be largely meaningless, although
+it is supported.
+<p><dt><a name="DB_SNAPSHOT">DB_SNAPSHOT</a><dd>This flag specifies that any specified <b>re_source</b> file be read in
+its entirety when <a href="../api_c/db_open.html">DB-&gt;open</a> is called. If this flag is not
+specified, the <b>re_source</b> file may be read lazily.
+</dl>
+<p>The DB-&gt;set_flags interface may only be used to configure Berkeley DB before
+the <a href="../api_c/db_open.html">DB-&gt;open</a> interface is called.
+<p>The DB-&gt;set_flags function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/db_create.html">db_create</a>,
+<a href="../api_c/db_close.html">DB-&gt;close</a>,
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>,
+<a href="../api_c/db_del.html">DB-&gt;del</a>,
+<a href="../api_c/db_err.html">DB-&gt;err</a>,
+<a href="../api_c/db_fd.html">DB-&gt;fd</a>,
+<a href="../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a>,
+<a href="../api_c/db_get_type.html">DB-&gt;get_type</a>,
+<a href="../api_c/db_join.html">DB-&gt;join</a>,
+<a href="../api_c/db_key_range.html">DB-&gt;key_range</a>,
+<a href="../api_c/db_open.html">DB-&gt;open</a>,
+<a href="../api_c/db_put.html">DB-&gt;put</a>,
+<a href="../api_c/db_remove.html">DB-&gt;remove</a>,
+<a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a>,
+<a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>,
+<a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>,
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a>,
+<a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>,
+<a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a>,
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a>,
+<a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a>,
+<a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a>,
+<a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>,
+<a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a>,
+<a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a>,
+<a href="../api_c/db_set_malloc.html">DB-&gt;set_malloc</a>,
+<a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a>,
+<a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a>,
+<a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a>,
+<a href="../api_c/db_set_realloc.html">DB-&gt;set_realloc</a>,
+<a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>,
+<a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a>,
+<a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a>,
+<a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a>,
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>,
+<a href="../api_c/db_sync.html">DB-&gt;sync</a>,
+<a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+and
+<a href="../api_c/db_verify.html">DB-&gt;verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_set_h_ffactor.html b/bdb/docs/api_c/db_set_h_ffactor.html
new file mode 100644
index 00000000000..c3bbb607ea5
--- /dev/null
+++ b/bdb/docs/api_c/db_set_h_ffactor.html
@@ -0,0 +1,93 @@
+<!--$Id: db_set_h_ffactor.so,v 10.15 2000/05/01 21:57:43 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_h_ffactor</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;set_h_ffactor</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_h_ffactor(DB *db, u_int32_t h_ffactor);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the desired density within the hash table.
+<p>The density is an approximation of the number of keys allowed to
+accumulate in any one bucket, determining when the hash table grows or
+shrinks. If you know the average sizes of the keys and data in your
+dataset, setting the fill factor can enhance performance. A reasonable
+rule computing fill factor is to set it to:
+<p><blockquote><pre>(pagesize - 32) / (average_key_size + average_data_size + 8)</pre></blockquote>
+<p>If no value is specified, the fill factor will be selected dynamically as
+pages are filled.
+<p>The DB-&gt;set_h_ffactor interface may only be used to configure Berkeley DB before
+the <a href="../api_c/db_open.html">DB-&gt;open</a> interface is called.
+<p>The DB-&gt;set_h_ffactor function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/db_open.html">DB-&gt;open</a> was called.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/db_create.html">db_create</a>,
+<a href="../api_c/db_close.html">DB-&gt;close</a>,
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>,
+<a href="../api_c/db_del.html">DB-&gt;del</a>,
+<a href="../api_c/db_err.html">DB-&gt;err</a>,
+<a href="../api_c/db_fd.html">DB-&gt;fd</a>,
+<a href="../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a>,
+<a href="../api_c/db_get_type.html">DB-&gt;get_type</a>,
+<a href="../api_c/db_join.html">DB-&gt;join</a>,
+<a href="../api_c/db_key_range.html">DB-&gt;key_range</a>,
+<a href="../api_c/db_open.html">DB-&gt;open</a>,
+<a href="../api_c/db_put.html">DB-&gt;put</a>,
+<a href="../api_c/db_remove.html">DB-&gt;remove</a>,
+<a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a>,
+<a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>,
+<a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>,
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a>,
+<a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>,
+<a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a>,
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a>,
+<a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a>,
+<a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a>,
+<a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>,
+<a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a>,
+<a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a>,
+<a href="../api_c/db_set_malloc.html">DB-&gt;set_malloc</a>,
+<a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a>,
+<a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a>,
+<a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a>,
+<a href="../api_c/db_set_realloc.html">DB-&gt;set_realloc</a>,
+<a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>,
+<a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a>,
+<a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a>,
+<a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a>,
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>,
+<a href="../api_c/db_sync.html">DB-&gt;sync</a>,
+<a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+and
+<a href="../api_c/db_verify.html">DB-&gt;verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_set_h_hash.html b/bdb/docs/api_c/db_set_h_hash.html
new file mode 100644
index 00000000000..cae03fa72b3
--- /dev/null
+++ b/bdb/docs/api_c/db_set_h_hash.html
@@ -0,0 +1,97 @@
+<!--$Id: db_set_h_hash.so,v 10.18 2000/07/04 18:28:27 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_h_hash</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;set_h_hash</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_h_hash(DB *db,
+ u_int32_t (*h_hash_fcn)(DB *, const void *bytes, u_int32_t length));
+</pre></h3>
+<h1>Description</h1>
+<p>Set a user defined hash method; if no hash method is specified, a default
+hash method is used. Since no hash method performs equally well on all
+possible data, the user may find that the built-in hash method performs
+poorly with a particular data set. User specified hash functions must
+take a pointer to a byte string and a length as arguments and return a
+value of type
+<b>u_int32_t</b>.
+The hash function must handle any key values used by the application
+(possibly including zero-length keys).
+<p>If a hash method is specified, <a href="../api_c/db_open.html">DB-&gt;open</a> will attempt to determine
+if the hash method specified is the same as the one with which the database
+was created, and will fail if it detects that it is not.
+<p>The DB-&gt;set_h_hash interface may only be used to configure Berkeley DB before
+the <a href="../api_c/db_open.html">DB-&gt;open</a> interface is called.
+<p>The DB-&gt;set_h_hash function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/db_open.html">DB-&gt;open</a> was called.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/db_create.html">db_create</a>,
+<a href="../api_c/db_close.html">DB-&gt;close</a>,
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>,
+<a href="../api_c/db_del.html">DB-&gt;del</a>,
+<a href="../api_c/db_err.html">DB-&gt;err</a>,
+<a href="../api_c/db_fd.html">DB-&gt;fd</a>,
+<a href="../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a>,
+<a href="../api_c/db_get_type.html">DB-&gt;get_type</a>,
+<a href="../api_c/db_join.html">DB-&gt;join</a>,
+<a href="../api_c/db_key_range.html">DB-&gt;key_range</a>,
+<a href="../api_c/db_open.html">DB-&gt;open</a>,
+<a href="../api_c/db_put.html">DB-&gt;put</a>,
+<a href="../api_c/db_remove.html">DB-&gt;remove</a>,
+<a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a>,
+<a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>,
+<a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>,
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a>,
+<a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>,
+<a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a>,
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a>,
+<a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a>,
+<a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a>,
+<a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>,
+<a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a>,
+<a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a>,
+<a href="../api_c/db_set_malloc.html">DB-&gt;set_malloc</a>,
+<a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a>,
+<a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a>,
+<a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a>,
+<a href="../api_c/db_set_realloc.html">DB-&gt;set_realloc</a>,
+<a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>,
+<a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a>,
+<a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a>,
+<a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a>,
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>,
+<a href="../api_c/db_sync.html">DB-&gt;sync</a>,
+<a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+and
+<a href="../api_c/db_verify.html">DB-&gt;verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_set_h_nelem.html b/bdb/docs/api_c/db_set_h_nelem.html
new file mode 100644
index 00000000000..d052afff7dc
--- /dev/null
+++ b/bdb/docs/api_c/db_set_h_nelem.html
@@ -0,0 +1,88 @@
+<!--$Id: db_set_h_nelem.so,v 10.15 2000/05/01 21:57:43 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_h_nelem</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;set_h_nelem</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_h_nelem(DB *db, u_int32_t h_nelem);
+</pre></h3>
+<h1>Description</h1>
+<p>Set an estimate of the final size of the hash table.
+<p>If not set or set too low, hash tables will still expand gracefully
+as keys are entered, although a slight performance degradation may be
+noticed.
+<p>The DB-&gt;set_h_nelem interface may only be used to configure Berkeley DB before
+the <a href="../api_c/db_open.html">DB-&gt;open</a> interface is called.
+<p>The DB-&gt;set_h_nelem function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/db_open.html">DB-&gt;open</a> was called.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/db_create.html">db_create</a>,
+<a href="../api_c/db_close.html">DB-&gt;close</a>,
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>,
+<a href="../api_c/db_del.html">DB-&gt;del</a>,
+<a href="../api_c/db_err.html">DB-&gt;err</a>,
+<a href="../api_c/db_fd.html">DB-&gt;fd</a>,
+<a href="../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a>,
+<a href="../api_c/db_get_type.html">DB-&gt;get_type</a>,
+<a href="../api_c/db_join.html">DB-&gt;join</a>,
+<a href="../api_c/db_key_range.html">DB-&gt;key_range</a>,
+<a href="../api_c/db_open.html">DB-&gt;open</a>,
+<a href="../api_c/db_put.html">DB-&gt;put</a>,
+<a href="../api_c/db_remove.html">DB-&gt;remove</a>,
+<a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a>,
+<a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>,
+<a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>,
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a>,
+<a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>,
+<a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a>,
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a>,
+<a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a>,
+<a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a>,
+<a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>,
+<a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a>,
+<a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a>,
+<a href="../api_c/db_set_malloc.html">DB-&gt;set_malloc</a>,
+<a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a>,
+<a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a>,
+<a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a>,
+<a href="../api_c/db_set_realloc.html">DB-&gt;set_realloc</a>,
+<a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>,
+<a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a>,
+<a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a>,
+<a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a>,
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>,
+<a href="../api_c/db_sync.html">DB-&gt;sync</a>,
+<a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+and
+<a href="../api_c/db_verify.html">DB-&gt;verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_set_lorder.html b/bdb/docs/api_c/db_set_lorder.html
new file mode 100644
index 00000000000..a9a3c923037
--- /dev/null
+++ b/bdb/docs/api_c/db_set_lorder.html
@@ -0,0 +1,94 @@
+<!--$Id: db_set_lorder.so,v 10.15 2000/05/01 21:57:43 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_lorder</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;set_lorder</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_lorder(DB *db, int lorder);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the byte order for integers in the stored database metadata. The
+number should represent the order as an integer, for example, big endian
+order is the number 4,321, and little endian order is the number 1,234.
+If <b>lorder</b> is not explicitly set, the host order of the machine
+where the Berkeley DB library was compiled is used.
+<p>The value of <b>lorder</b> is ignored except when databases are being
+created. If a database already exists, the byte order it uses is
+determined when the database is opened.
+<p><b>The access methods provide no guarantees about the byte ordering of the
+application data stored in the database, and applications are responsible
+for maintaining any necessary ordering.</b>
+<p>The DB-&gt;set_lorder interface may only be used to configure Berkeley DB before
+the <a href="../api_c/db_open.html">DB-&gt;open</a> interface is called.
+<p>The DB-&gt;set_lorder function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/db_create.html">db_create</a>,
+<a href="../api_c/db_close.html">DB-&gt;close</a>,
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>,
+<a href="../api_c/db_del.html">DB-&gt;del</a>,
+<a href="../api_c/db_err.html">DB-&gt;err</a>,
+<a href="../api_c/db_fd.html">DB-&gt;fd</a>,
+<a href="../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a>,
+<a href="../api_c/db_get_type.html">DB-&gt;get_type</a>,
+<a href="../api_c/db_join.html">DB-&gt;join</a>,
+<a href="../api_c/db_key_range.html">DB-&gt;key_range</a>,
+<a href="../api_c/db_open.html">DB-&gt;open</a>,
+<a href="../api_c/db_put.html">DB-&gt;put</a>,
+<a href="../api_c/db_remove.html">DB-&gt;remove</a>,
+<a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a>,
+<a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>,
+<a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>,
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a>,
+<a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>,
+<a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a>,
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a>,
+<a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a>,
+<a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a>,
+<a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>,
+<a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a>,
+<a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a>,
+<a href="../api_c/db_set_malloc.html">DB-&gt;set_malloc</a>,
+<a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a>,
+<a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a>,
+<a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a>,
+<a href="../api_c/db_set_realloc.html">DB-&gt;set_realloc</a>,
+<a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>,
+<a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a>,
+<a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a>,
+<a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a>,
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>,
+<a href="../api_c/db_sync.html">DB-&gt;sync</a>,
+<a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+and
+<a href="../api_c/db_verify.html">DB-&gt;verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_set_malloc.html b/bdb/docs/api_c/db_set_malloc.html
new file mode 100644
index 00000000000..2d13196a3ad
--- /dev/null
+++ b/bdb/docs/api_c/db_set_malloc.html
@@ -0,0 +1,98 @@
+<!--$Id: db_set_malloc.so,v 10.18 2000/05/25 13:47:07 dda Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_malloc</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;set_malloc</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_malloc(DB *db, void *(*db_malloc)(size_t size));
+</pre></h3>
+<h1>Description</h1>
+<p>Set the allocation function used by the DB methods to allocate
+memory in which to return key/data items to the application.
+<p>The <a href="../api_c/dbt.html#DB_DBT_MALLOC">DB_DBT_MALLOC</a> flag, when specified in the <a href="../api_c/dbt.html">DBT</a> object,
+will cause the DB methods to allocate and re-allocate memory which
+then becomes the responsibility of the calling application. See <a href="../api_c/dbt.html">DBT</a>
+for more information.
+<p>On systems where there may be multiple library versions of malloc (notably
+Windows NT), specifying the <a href="../api_c/dbt.html#DB_DBT_MALLOC">DB_DBT_MALLOC</a> flag will fail because
+the DB library will allocate memory from a different heap than
+the application will use to free it. To avoid this problem, the
+DB-&gt;set_malloc function can be used to pass Berkeley DB a reference to the
+application's allocation routine, in which case it will be used to
+allocate the memory returned when the <a href="../api_c/dbt.html#DB_DBT_MALLOC">DB_DBT_MALLOC</a> flag is set.
+<p>The function specified must match the calling conventions of the
+ANSI C X3.159-1989 (ANSI C) library routine of the same name.
+<p>The DB-&gt;set_malloc interface may only be used to configure Berkeley DB before
+the <a href="../api_c/db_open.html">DB-&gt;open</a> interface is called.
+<p>The DB-&gt;set_malloc function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/db_create.html">db_create</a>,
+<a href="../api_c/db_close.html">DB-&gt;close</a>,
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>,
+<a href="../api_c/db_del.html">DB-&gt;del</a>,
+<a href="../api_c/db_err.html">DB-&gt;err</a>,
+<a href="../api_c/db_fd.html">DB-&gt;fd</a>,
+<a href="../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a>,
+<a href="../api_c/db_get_type.html">DB-&gt;get_type</a>,
+<a href="../api_c/db_join.html">DB-&gt;join</a>,
+<a href="../api_c/db_key_range.html">DB-&gt;key_range</a>,
+<a href="../api_c/db_open.html">DB-&gt;open</a>,
+<a href="../api_c/db_put.html">DB-&gt;put</a>,
+<a href="../api_c/db_remove.html">DB-&gt;remove</a>,
+<a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a>,
+<a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>,
+<a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>,
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a>,
+<a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>,
+<a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a>,
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a>,
+<a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a>,
+<a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a>,
+<a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>,
+<a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a>,
+<a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a>,
+<a href="../api_c/db_set_malloc.html">DB-&gt;set_malloc</a>,
+<a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a>,
+<a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a>,
+<a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a>,
+<a href="../api_c/db_set_realloc.html">DB-&gt;set_realloc</a>,
+<a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>,
+<a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a>,
+<a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a>,
+<a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a>,
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>,
+<a href="../api_c/db_sync.html">DB-&gt;sync</a>,
+<a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+and
+<a href="../api_c/db_verify.html">DB-&gt;verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_set_pagesize.html b/bdb/docs/api_c/db_set_pagesize.html
new file mode 100644
index 00000000000..7fa4af53dbc
--- /dev/null
+++ b/bdb/docs/api_c/db_set_pagesize.html
@@ -0,0 +1,90 @@
+<!--$Id: db_set_pagesize.so,v 10.16 2000/05/01 21:57:43 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_pagesize</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;set_pagesize</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_pagesize(DB *db, u_int32_t pagesize);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the size of the pages used to hold items in the database, in bytes.
+The minimum page size is 512 bytes and the maximum page size is 64K bytes.
+If the page size is not explicitly set, one is selected based on the
+underlying filesystem I/O block size. The automatically selected size
+has a lower limit of 512 bytes and an upper limit of 16K bytes.
+<p>For information on tuning the Berkeley DB page size, see
+<a href="../ref/am_conf/pagesize.html">Selecting a page size</a>.
+<p>The DB-&gt;set_pagesize interface may only be used to configure Berkeley DB before
+the <a href="../api_c/db_open.html">DB-&gt;open</a> interface is called.
+<p>The DB-&gt;set_pagesize function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/db_create.html">db_create</a>,
+<a href="../api_c/db_close.html">DB-&gt;close</a>,
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>,
+<a href="../api_c/db_del.html">DB-&gt;del</a>,
+<a href="../api_c/db_err.html">DB-&gt;err</a>,
+<a href="../api_c/db_fd.html">DB-&gt;fd</a>,
+<a href="../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a>,
+<a href="../api_c/db_get_type.html">DB-&gt;get_type</a>,
+<a href="../api_c/db_join.html">DB-&gt;join</a>,
+<a href="../api_c/db_key_range.html">DB-&gt;key_range</a>,
+<a href="../api_c/db_open.html">DB-&gt;open</a>,
+<a href="../api_c/db_put.html">DB-&gt;put</a>,
+<a href="../api_c/db_remove.html">DB-&gt;remove</a>,
+<a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a>,
+<a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>,
+<a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>,
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a>,
+<a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>,
+<a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a>,
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a>,
+<a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a>,
+<a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a>,
+<a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>,
+<a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a>,
+<a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a>,
+<a href="../api_c/db_set_malloc.html">DB-&gt;set_malloc</a>,
+<a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a>,
+<a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a>,
+<a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a>,
+<a href="../api_c/db_set_realloc.html">DB-&gt;set_realloc</a>,
+<a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>,
+<a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a>,
+<a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a>,
+<a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a>,
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>,
+<a href="../api_c/db_sync.html">DB-&gt;sync</a>,
+<a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+and
+<a href="../api_c/db_verify.html">DB-&gt;verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_set_paniccall.html b/bdb/docs/api_c/db_set_paniccall.html
new file mode 100644
index 00000000000..506272c9630
--- /dev/null
+++ b/bdb/docs/api_c/db_set_paniccall.html
@@ -0,0 +1,70 @@
+<!--$Id: db_set_paniccall.so,v 10.11 2000/07/09 19:12:16 bostic Exp $-->
+<!--$Id: m4.errset,v 10.8 2000/02/19 20:57:57 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_paniccall</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;set_paniccall</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_paniccall(DB *db,
+ void (*paniccall)(DB_ENV *, int errval));
+</pre></h3>
+<h1>Description</h1>
+<p>Errors can occur in the Berkeley DB library where the only solution is to shut
+down the application and run recovery. (For example, if Berkeley DB is unable
+to write log records to disk because there is insufficient disk space.)
+In these cases, the value <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> is returned by Berkeley DB.
+<p>In these cases, it is also often simpler to shut down the application when
+such errors occur rather than attempting to gracefully return up the stack.
+The DB-&gt;set_paniccall function is used to specify a function to be called when
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> is about to be returned from a Berkeley DB method. When
+called, the <b>dbenv</b> argument will be a reference to the current
+environment, and the <b>errval</b> argument is the error value that would
+have been returned to the calling function.
+<p>For DB handles opened inside of Berkeley DB environments, calling the
+DB-&gt;set_paniccall function affects the entire environment and is equivalent to calling
+the <a href="../api_c/env_set_paniccall.html">DBENV-&gt;set_paniccall</a> function.
+<p>The DB-&gt;set_paniccall interface may be used to configure Berkeley DB at any time
+during the life of the application.
+<p>The DB-&gt;set_paniccall function returns a non-zero error value on failure and 0 on success.
+<h1>See Also</h1>
+<a href="../api_c/env_close.html">DBENV-&gt;close</a>,
+<a href="../api_c/env_create.html">db_env_create</a>,
+<a href="../api_c/env_open.html">DBENV-&gt;open</a>,
+<a href="../api_c/env_remove.html">DBENV-&gt;remove</a>,
+<a href="../api_c/db_err.html">DBENV-&gt;err</a>,
+<a href="../api_c/env_strerror.html">db_strerror</a>,
+<a href="../api_c/env_version.html">db_version</a>,
+<a href="../api_c/env_set_cachesize.html">DBENV-&gt;set_cachesize</a>,
+<a href="../api_c/env_set_errcall.html">DBENV-&gt;set_errcall</a>,
+<a href="../api_c/env_set_errfile.html">DBENV-&gt;set_errfile</a>,
+<a href="../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a>,
+<a href="../api_c/env_set_flags.html">DBENV-&gt;set_flags</a>,
+<a href="../api_c/env_set_mutexlocks.html">DBENV-&gt;set_mutexlocks</a>,
+<a href="../api_c/env_set_paniccall.html">DBENV-&gt;set_paniccall</a>,
+and
+<a href="../api_c/env_set_verbose.html">DBENV-&gt;set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_set_q_extentsize.html b/bdb/docs/api_c/db_set_q_extentsize.html
new file mode 100644
index 00000000000..7ab89bdba5d
--- /dev/null
+++ b/bdb/docs/api_c/db_set_q_extentsize.html
@@ -0,0 +1,90 @@
+<!--$Id: db_set_q_extentsize.so,v 1.3 2000/11/21 19:25:45 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_q_extentsize</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;set_q_extentsize</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_q_extentsize(DB *db, u_int32_t extentsize);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the size of the extents used to hold pages in a Queue database,
+specified as a number of pages. Each extent is created as a separate
+physical file. If no extent size is set, the default behavior is to
+create only a single underlying database file.
+<p>For information on tuning the extent size, see
+<a href="../ref/am_conf/extentsize.html">Selecting a extent size</a>.
+<p>The DB-&gt;set_q_extentsize interface may only be used to configure Berkeley DB before
+the <a href="../api_c/db_open.html">DB-&gt;open</a> interface is called.
+<p>The DB-&gt;set_q_extentsize function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/db_open.html">DB-&gt;open</a> was called.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/db_create.html">db_create</a>,
+<a href="../api_c/db_close.html">DB-&gt;close</a>,
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>,
+<a href="../api_c/db_del.html">DB-&gt;del</a>,
+<a href="../api_c/db_err.html">DB-&gt;err</a>,
+<a href="../api_c/db_fd.html">DB-&gt;fd</a>,
+<a href="../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a>,
+<a href="../api_c/db_get_type.html">DB-&gt;get_type</a>,
+<a href="../api_c/db_join.html">DB-&gt;join</a>,
+<a href="../api_c/db_key_range.html">DB-&gt;key_range</a>,
+<a href="../api_c/db_open.html">DB-&gt;open</a>,
+<a href="../api_c/db_put.html">DB-&gt;put</a>,
+<a href="../api_c/db_remove.html">DB-&gt;remove</a>,
+<a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a>,
+<a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>,
+<a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>,
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a>,
+<a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>,
+<a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a>,
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a>,
+<a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a>,
+<a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a>,
+<a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>,
+<a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a>,
+<a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a>,
+<a href="../api_c/db_set_malloc.html">DB-&gt;set_malloc</a>,
+<a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a>,
+<a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a>,
+<a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a>,
+<a href="../api_c/db_set_realloc.html">DB-&gt;set_realloc</a>,
+<a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>,
+<a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a>,
+<a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a>,
+<a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a>,
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>,
+<a href="../api_c/db_sync.html">DB-&gt;sync</a>,
+<a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+and
+<a href="../api_c/db_verify.html">DB-&gt;verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_set_re_delim.html b/bdb/docs/api_c/db_set_re_delim.html
new file mode 100644
index 00000000000..6101130a5e5
--- /dev/null
+++ b/bdb/docs/api_c/db_set_re_delim.html
@@ -0,0 +1,90 @@
+<!--$Id: db_set_re_delim.so,v 10.17 2000/05/01 21:57:43 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_re_delim</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;set_re_delim</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_re_delim(DB *db, int re_delim);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the delimiting byte used to mark the end of a record in the backing
+source file for the Recno access method.
+<p>This byte is used for variable length records, if the <b>re_source</b>
+file is specified. If the <b>re_source</b> file is specified and no
+delimiting byte was specified, &lt;newline&gt; characters (i.e.
+ASCII 0x0a) are interpreted as end-of-record markers.
+<p>The DB-&gt;set_re_delim interface may only be used to configure Berkeley DB before
+the <a href="../api_c/db_open.html">DB-&gt;open</a> interface is called.
+<p>The DB-&gt;set_re_delim function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/db_open.html">DB-&gt;open</a> was called.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/db_create.html">db_create</a>,
+<a href="../api_c/db_close.html">DB-&gt;close</a>,
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>,
+<a href="../api_c/db_del.html">DB-&gt;del</a>,
+<a href="../api_c/db_err.html">DB-&gt;err</a>,
+<a href="../api_c/db_fd.html">DB-&gt;fd</a>,
+<a href="../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a>,
+<a href="../api_c/db_get_type.html">DB-&gt;get_type</a>,
+<a href="../api_c/db_join.html">DB-&gt;join</a>,
+<a href="../api_c/db_key_range.html">DB-&gt;key_range</a>,
+<a href="../api_c/db_open.html">DB-&gt;open</a>,
+<a href="../api_c/db_put.html">DB-&gt;put</a>,
+<a href="../api_c/db_remove.html">DB-&gt;remove</a>,
+<a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a>,
+<a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>,
+<a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>,
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a>,
+<a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>,
+<a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a>,
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a>,
+<a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a>,
+<a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a>,
+<a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>,
+<a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a>,
+<a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a>,
+<a href="../api_c/db_set_malloc.html">DB-&gt;set_malloc</a>,
+<a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a>,
+<a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a>,
+<a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a>,
+<a href="../api_c/db_set_realloc.html">DB-&gt;set_realloc</a>,
+<a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>,
+<a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a>,
+<a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a>,
+<a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a>,
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>,
+<a href="../api_c/db_sync.html">DB-&gt;sync</a>,
+<a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+and
+<a href="../api_c/db_verify.html">DB-&gt;verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_set_re_len.html b/bdb/docs/api_c/db_set_re_len.html
new file mode 100644
index 00000000000..67b67ddfc0a
--- /dev/null
+++ b/bdb/docs/api_c/db_set_re_len.html
@@ -0,0 +1,94 @@
+<!--$Id: db_set_re_len.so,v 10.17 2000/05/01 21:57:43 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_re_len</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;set_re_len</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_re_len(DB *db, u_int32_t re_len);
+</pre></h3>
+<h1>Description</h1>
+<p>For the Queue access method, specify that the records are of length
+<b>re_len</b>.
+<p>For the Recno access method, specify that the records are fixed-length,
+not byte delimited, and are of length <b>re_len</b>.
+<p>Any records added to the database that are less than <b>re_len</b> bytes
+long are automatically padded (see <a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a> for more
+information).
+<p>Any attempt to insert records into the database that are greater than
+<b>re_len</b> bytes long will cause the call to fail immediately and
+return an error.
+<p>The DB-&gt;set_re_len interface may only be used to configure Berkeley DB before
+the <a href="../api_c/db_open.html">DB-&gt;open</a> interface is called.
+<p>The DB-&gt;set_re_len function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/db_open.html">DB-&gt;open</a> was called.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/db_create.html">db_create</a>,
+<a href="../api_c/db_close.html">DB-&gt;close</a>,
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>,
+<a href="../api_c/db_del.html">DB-&gt;del</a>,
+<a href="../api_c/db_err.html">DB-&gt;err</a>,
+<a href="../api_c/db_fd.html">DB-&gt;fd</a>,
+<a href="../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a>,
+<a href="../api_c/db_get_type.html">DB-&gt;get_type</a>,
+<a href="../api_c/db_join.html">DB-&gt;join</a>,
+<a href="../api_c/db_key_range.html">DB-&gt;key_range</a>,
+<a href="../api_c/db_open.html">DB-&gt;open</a>,
+<a href="../api_c/db_put.html">DB-&gt;put</a>,
+<a href="../api_c/db_remove.html">DB-&gt;remove</a>,
+<a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a>,
+<a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>,
+<a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>,
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a>,
+<a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>,
+<a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a>,
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a>,
+<a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a>,
+<a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a>,
+<a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>,
+<a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a>,
+<a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a>,
+<a href="../api_c/db_set_malloc.html">DB-&gt;set_malloc</a>,
+<a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a>,
+<a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a>,
+<a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a>,
+<a href="../api_c/db_set_realloc.html">DB-&gt;set_realloc</a>,
+<a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>,
+<a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a>,
+<a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a>,
+<a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a>,
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>,
+<a href="../api_c/db_sync.html">DB-&gt;sync</a>,
+<a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+and
+<a href="../api_c/db_verify.html">DB-&gt;verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_set_re_pad.html b/bdb/docs/api_c/db_set_re_pad.html
new file mode 100644
index 00000000000..43a6f947f5d
--- /dev/null
+++ b/bdb/docs/api_c/db_set_re_pad.html
@@ -0,0 +1,88 @@
+<!--$Id: db_set_re_pad.so,v 10.16 2000/05/01 21:57:43 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_re_pad</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;set_re_pad</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_re_pad(DB *db, int re_pad);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the padding character for short, fixed-length records for the Queue
+and Recno access methods.
+<p>If no pad character is specified, &lt;space&gt; characters (i.e.,
+ASCII 0x20) are used for padding.
+<p>The DB-&gt;set_re_pad interface may only be used to configure Berkeley DB before
+the <a href="../api_c/db_open.html">DB-&gt;open</a> interface is called.
+<p>The DB-&gt;set_re_pad function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/db_open.html">DB-&gt;open</a> was called.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/db_create.html">db_create</a>,
+<a href="../api_c/db_close.html">DB-&gt;close</a>,
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>,
+<a href="../api_c/db_del.html">DB-&gt;del</a>,
+<a href="../api_c/db_err.html">DB-&gt;err</a>,
+<a href="../api_c/db_fd.html">DB-&gt;fd</a>,
+<a href="../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a>,
+<a href="../api_c/db_get_type.html">DB-&gt;get_type</a>,
+<a href="../api_c/db_join.html">DB-&gt;join</a>,
+<a href="../api_c/db_key_range.html">DB-&gt;key_range</a>,
+<a href="../api_c/db_open.html">DB-&gt;open</a>,
+<a href="../api_c/db_put.html">DB-&gt;put</a>,
+<a href="../api_c/db_remove.html">DB-&gt;remove</a>,
+<a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a>,
+<a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>,
+<a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>,
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a>,
+<a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>,
+<a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a>,
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a>,
+<a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a>,
+<a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a>,
+<a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>,
+<a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a>,
+<a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a>,
+<a href="../api_c/db_set_malloc.html">DB-&gt;set_malloc</a>,
+<a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a>,
+<a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a>,
+<a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a>,
+<a href="../api_c/db_set_realloc.html">DB-&gt;set_realloc</a>,
+<a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>,
+<a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a>,
+<a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a>,
+<a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a>,
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>,
+<a href="../api_c/db_sync.html">DB-&gt;sync</a>,
+<a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+and
+<a href="../api_c/db_verify.html">DB-&gt;verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_set_re_source.html b/bdb/docs/api_c/db_set_re_source.html
new file mode 100644
index 00000000000..1a57cfea339
--- /dev/null
+++ b/bdb/docs/api_c/db_set_re_source.html
@@ -0,0 +1,130 @@
+<!--$Id: db_set_re_source.so,v 10.17 2000/05/01 21:57:43 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_re_source</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;set_re_source</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_re_source(DB *db, char *re_source);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the underlying source file for the Recno access method. The purpose
+of the <b>re_source</b> value is to provide fast access and modification
+to databases that are normally stored as flat text files.
+<p>If the <b>re_source</b> field is set, it specifies an underlying flat
+text database file that is read to initialize a transient record number
+index. In the case of variable length records, the records are separated
+as specified by <a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>. For example, standard UNIX
+byte stream files can be interpreted as a sequence of variable length
+records separated by &lt;newline&gt; characters.
+<p>In addition, when cached data would normally be written back to the
+underlying database file (e.g., the <a href="../api_c/db_close.html">DB-&gt;close</a> or <a href="../api_c/db_sync.html">DB-&gt;sync</a>
+methods are called), the in-memory copy of the database will be written
+back to the <b>re_source</b> file.
+<p>By default, the backing source file is read lazily, i.e., records are not
+read from the file until they are requested by the application.
+<b>If multiple processes (not threads) are accessing a Recno database
+concurrently and either inserting or deleting records, the backing source
+file must be read in its entirety before more than a single process
+accesses the database, and only that process should specify the backing
+source file as part of the <a href="../api_c/db_open.html">DB-&gt;open</a> call. See the <a href="../api_c/db_set_flags.html#DB_SNAPSHOT">DB_SNAPSHOT</a>
+flag for more information.</b>
+<p><b>Reading and writing the backing source file specified by <b>re_source</b>
+cannot be transactionally protected because it involves filesystem
+operations that are not part of the Db transaction methodology.</b>
+For this reason, if a temporary database is used to hold the records,
+i.e., a NULL was specified as the <b>file</b> argument to <a href="../api_c/db_open.html">DB-&gt;open</a>,
+it is possible to lose the contents of the <b>re_source</b> file, e.g.,
+if the system crashes at the right instant.
+If a file is used to hold the database, i.e., a file name was specified
+as the <b>file</b> argument to <a href="../api_c/db_open.html">DB-&gt;open</a>, normal database
+recovery on that file can be used to prevent information loss,
+although it is still possible that the contents of <b>re_source</b>
+will be lost if the system crashes.
+<p>The <b>re_source</b> file must already exist (but may be zero-length) when
+<a href="../api_c/db_open.html">DB-&gt;open</a> is called.
+<p>It is not an error to specify a read-only <b>re_source</b> file when
+creating a database, nor is it an error to modify the resulting database.
+However, any attempt to write the changes to the backing source file using
+either the <a href="../api_c/db_sync.html">DB-&gt;sync</a> or <a href="../api_c/db_close.html">DB-&gt;close</a> functions will fail, of course.
+Specify the <a href="../api_c/db_close.html#DB_NOSYNC">DB_NOSYNC</a> flag to the <a href="../api_c/db_close.html">DB-&gt;close</a> function to stop it
+from attempting to write the changes to the backing file, instead, they
+will be silently discarded.
+<p>For all of the above reasons, the <b>re_source</b> field is generally
+used to specify databases that are read-only for DB applications,
+and that are either generated on the fly by software tools, or modified
+using a different mechanism, e.g., a text editor.
+<p>The DB-&gt;set_re_source interface may only be used to configure Berkeley DB before
+the <a href="../api_c/db_open.html">DB-&gt;open</a> interface is called.
+<p>The DB-&gt;set_re_source function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/db_open.html">DB-&gt;open</a> was called.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/db_create.html">db_create</a>,
+<a href="../api_c/db_close.html">DB-&gt;close</a>,
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>,
+<a href="../api_c/db_del.html">DB-&gt;del</a>,
+<a href="../api_c/db_err.html">DB-&gt;err</a>,
+<a href="../api_c/db_fd.html">DB-&gt;fd</a>,
+<a href="../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a>,
+<a href="../api_c/db_get_type.html">DB-&gt;get_type</a>,
+<a href="../api_c/db_join.html">DB-&gt;join</a>,
+<a href="../api_c/db_key_range.html">DB-&gt;key_range</a>,
+<a href="../api_c/db_open.html">DB-&gt;open</a>,
+<a href="../api_c/db_put.html">DB-&gt;put</a>,
+<a href="../api_c/db_remove.html">DB-&gt;remove</a>,
+<a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a>,
+<a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>,
+<a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>,
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a>,
+<a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>,
+<a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a>,
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a>,
+<a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a>,
+<a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a>,
+<a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>,
+<a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a>,
+<a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a>,
+<a href="../api_c/db_set_malloc.html">DB-&gt;set_malloc</a>,
+<a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a>,
+<a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a>,
+<a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a>,
+<a href="../api_c/db_set_realloc.html">DB-&gt;set_realloc</a>,
+<a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>,
+<a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a>,
+<a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a>,
+<a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a>,
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>,
+<a href="../api_c/db_sync.html">DB-&gt;sync</a>,
+<a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+and
+<a href="../api_c/db_verify.html">DB-&gt;verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_set_realloc.html b/bdb/docs/api_c/db_set_realloc.html
new file mode 100644
index 00000000000..b3d8a05f771
--- /dev/null
+++ b/bdb/docs/api_c/db_set_realloc.html
@@ -0,0 +1,99 @@
+<!--$Id: db_set_realloc.so,v 10.8 2000/05/25 13:47:07 dda Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_realloc</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;set_realloc</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_realloc(DB *db,
+ void *(*db_realloc_fcn)(void *ptr, size_t size));
+</pre></h3>
+<h1>Description</h1>
+<p>Set the realloc function used by the DB methods to allocate memory
+in which to return key/data items to the application.
+<p>The <a href="../api_c/dbt.html#DB_DBT_REALLOC">DB_DBT_REALLOC</a> flag, when specified in the <a href="../api_c/dbt.html">DBT</a> object,
+will cause the DB methods to allocate and re-allocate memory which
+then becomes the responsibility of the calling application. See <a href="../api_c/dbt.html">DBT</a>
+for more information.
+<p>On systems where there may be multiple library versions of realloc (notably
+Windows NT), specifying the <a href="../api_c/dbt.html#DB_DBT_REALLOC">DB_DBT_REALLOC</a> flag will fail because
+the DB library will allocate memory from a different heap than
+the application will use to free it. To avoid this problem, the
+DB-&gt;set_realloc function can be used to pass Berkeley DB a reference to the
+application's allocation routine, in which case it will be used to
+allocate the memory returned when the <a href="../api_c/dbt.html#DB_DBT_REALLOC">DB_DBT_REALLOC</a> flag is set.
+<p>The function specified must match the calling conventions of the
+ANSI C X3.159-1989 (ANSI C) library routine of the same name.
+<p>The DB-&gt;set_realloc interface may only be used to configure Berkeley DB before
+the <a href="../api_c/db_open.html">DB-&gt;open</a> interface is called.
+<p>The DB-&gt;set_realloc function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/db_create.html">db_create</a>,
+<a href="../api_c/db_close.html">DB-&gt;close</a>,
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>,
+<a href="../api_c/db_del.html">DB-&gt;del</a>,
+<a href="../api_c/db_err.html">DB-&gt;err</a>,
+<a href="../api_c/db_fd.html">DB-&gt;fd</a>,
+<a href="../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a>,
+<a href="../api_c/db_get_type.html">DB-&gt;get_type</a>,
+<a href="../api_c/db_join.html">DB-&gt;join</a>,
+<a href="../api_c/db_key_range.html">DB-&gt;key_range</a>,
+<a href="../api_c/db_open.html">DB-&gt;open</a>,
+<a href="../api_c/db_put.html">DB-&gt;put</a>,
+<a href="../api_c/db_remove.html">DB-&gt;remove</a>,
+<a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a>,
+<a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>,
+<a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>,
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a>,
+<a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>,
+<a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a>,
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a>,
+<a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a>,
+<a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a>,
+<a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>,
+<a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a>,
+<a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a>,
+<a href="../api_c/db_set_malloc.html">DB-&gt;set_malloc</a>,
+<a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a>,
+<a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a>,
+<a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a>,
+<a href="../api_c/db_set_realloc.html">DB-&gt;set_realloc</a>,
+<a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>,
+<a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a>,
+<a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a>,
+<a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a>,
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>,
+<a href="../api_c/db_sync.html">DB-&gt;sync</a>,
+<a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+and
+<a href="../api_c/db_verify.html">DB-&gt;verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_stat.html b/bdb/docs/api_c/db_stat.html
new file mode 100644
index 00000000000..92412d76d78
--- /dev/null
+++ b/bdb/docs/api_c/db_stat.html
@@ -0,0 +1,195 @@
+<!--$Id: db_stat.so,v 10.37 2000/10/03 21:55:45 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;stat</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;stat(DB *db,
+ void *sp, void *(*db_malloc)(size_t), u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB-&gt;stat function creates a statistical structure and
+copies a pointer to it into user-specified memory locations.
+Specifically, if <b>sp</b> is non-NULL, a pointer to the statistics
+for the database are copied into the memory location it references.
+<p>Statistical structures are created in allocated memory. If <b>db_malloc</b> is non-NULL, it
+is called to allocate the memory, otherwise, the library function
+<b>malloc</b>(3) is used. The function <b>db_malloc</b> must match
+the calling conventions of the <b>malloc</b>(3) library routine.
+Regardless, the caller is responsible for deallocating the returned
+memory. To deallocate returned memory, free the returned memory
+reference, references inside the returned memory do not need to be
+individually freed.
+<p>The <b>flags</b> parameter must be set to 0 or the following value:
+<p><dl compact>
+<p><dt><a name="DB_CACHED_COUNTS">DB_CACHED_COUNTS</a><dd>Return a cached count of the keys and records in a database. This flag
+makes it possible for applications to request an possibly approximate key
+and record count without incurring the performance penalty of traversing
+the entire database. The statistics information described for the access
+method <b>XX_nkeys</b> and <b>XX_ndata</b> fields below is filled in,
+but no other information is collected. If the cached information has
+never been set, the fields will be returned set to 0.
+<p><dt><a name="DB_RECORDCOUNT">DB_RECORDCOUNT</a><dd>Return a count of the records in a Btree or Recno Access Method database.
+This flag makes it possible for applications to request a record count
+without incurring the performance penalty of traversing the entire
+database. The statistics information described for the <b>bt_nkeys</b>
+field below is filled in, but no other information is collected.
+<p>This option is only available for Recno databases, or Btree databases
+where the underlying database was created with the <a href="../api_c/db_set_flags.html#DB_RECNUM">DB_RECNUM</a>
+flag.
+</dl>
+<p>The DB-&gt;stat function may access all of the pages in the database,
+incurring a severe performance penalty as well as possibly flushing the
+underlying buffer pool.
+<p>In the presence of multiple threads or processes accessing an active
+database, the information returned by DB-&gt;stat may be out-of-date.
+<p>If the database was not opened readonly and the DB_CACHED_COUNTS
+flag was not specified, the cached key and record numbers will be updated
+after the statistical information has been gathered.
+<p>The DB-&gt;stat function cannot be transaction protected. For this reason,
+it should be called in a thread of control that has no open cursors or
+active transactions.
+<p>The DB-&gt;stat function returns a non-zero error value on failure and 0 on success.
+<h3>Hash Statistics</h3>
+<p>In the case of a Hash database,
+the statistics are stored in a structure of type DB_HASH_STAT. The
+following fields will be filled in:
+<p><dl compact>
+<p><dt>u_int32_t hash_magic;<dd>Magic number that identifies the file as a Hash file.
+<dt>u_int32_t hash_version;<dd>The version of the Hash database.
+<dt>u_int32_t hash_nkeys;<dd>The number of unique keys in the database.
+<dt>u_int32_t hash_ndata;<dd>The number of key/data pairs in the database.]
+<dt>u_int32_t hash_pagesize;<dd>The underlying Hash database page (and bucket) size.
+<dt>u_int32_t hash_nelem;<dd>The estimated size of the hash table specified at database creation time.
+<dt>u_int32_t hash_ffactor;<dd>The desired fill factor (number of items per bucket) specified at database
+creation time.
+<dt>u_int32_t hash_buckets;<dd>The number of hash buckets.
+<dt>u_int32_t hash_free;<dd>The number of pages on the free list.
+<dt>u_int32_t hash_bfree;<dd>The number of bytes free on bucket pages.
+<dt>u_int32_t hash_bigpages;<dd>The number of big key/data pages.
+<dt>u_int32_t hash_big_bfree;<dd>The number of bytes free on big item pages.
+<dt>u_int32_t hash_overflows;<dd>The number of overflow pages (overflow pages are pages that contain items
+that did not fit in the main bucket page).
+<dt>u_int32_t hash_ovfl_free;<dd>The number of bytes free on overflow pages.
+<dt>u_int32_t hash_dup;<dd>The number of duplicate pages.
+<dt>u_int32_t hash_dup_free;<dd>The number of bytes free on duplicate pages.
+</dl>
+<h3>Btree and Recno Statistics</h3>
+<p>In the case of a Btree or Recno database,
+the statistics are stored in a structure of type DB_BTREE_STAT. The
+following fields will be filled in:
+<p><dl compact>
+<p><dt>u_int32_t bt_magic;<dd>Magic number that identifies the file as a Btree database.
+<dt>u_int32_t bt_version;<dd>The version of the Btree database.
+<dt>u_int32_t bt_nkeys;<dd>For the Btree Access Method, the number of unique keys in the database.
+<p>For the Recno Access Method, the number of records in the database.
+<dt>u_int32_t bt_ndata;<dd>For the Btree Access Method, the number of key/data pairs in the database.
+<p>For the Recno Access Method, the number of records in the database. If
+the database has been configured to not re-number records during
+deletion, the number of records will only reflect undeleted records.
+<dt>u_int32_t bt_pagesize;<dd>Underlying database page size.
+<dt>u_int32_t bt_minkey;<dd>The minimum keys per page.
+<dt>u_int32_t bt_re_len;<dd>The length of fixed-length records.
+<dt>u_int32_t bt_re_pad;<dd>The padding byte value for fixed-length records.
+<dt>u_int32_t bt_levels;<dd>Number of levels in the database.
+<dt>u_int32_t bt_int_pg;<dd>Number of database internal pages.
+<dt>u_int32_t bt_leaf_pg;<dd>Number of database leaf pages.
+<dt>u_int32_t bt_dup_pg;<dd>Number of database duplicate pages.
+<dt>u_int32_t bt_over_pg;<dd>Number of database overflow pages.
+<dt>u_int32_t bt_free;<dd>Number of pages on the free list.
+<dt>u_int32_t bt_int_pgfree;<dd>Number of bytes free in database internal pages.
+<dt>u_int32_t bt_leaf_pgfree;<dd>Number of bytes free in database leaf pages.
+<dt>u_int32_t bt_dup_pgfree;<dd>Number of bytes free in database duplicate pages.
+<dt>u_int32_t bt_over_pgfree;<dd>Number of bytes free in database overflow pages.
+</dl>
+<h3>Queue Statistics</h3>
+<p>In the case of a Queue database,
+the statistics are stored in a structure of type DB_QUEUE_STAT. The
+following fields will be filled in:
+<p><dl compact>
+<p><dt>u_int32_t qs_magic;<dd>Magic number that identifies the file as a Queue file.
+<dt>u_int32_t qs_version;<dd>The version of the Queue file type.
+<dt>u_int32_t qs_nkeys;<dd>The number of records in the database.
+<dt>u_int32_t qs_ndata;<dd>The number of records in the database.
+<dt>u_int32_t qs_pagesize;<dd>Underlying database page size.
+<dt>u_int32_t qs_pages;<dd>Number of pages in the database.
+<dt>u_int32_t qs_re_len;<dd>The length of the records.
+<dt>u_int32_t qs_re_pad;<dd>The padding byte value for the records.
+<dt>u_int32_t qs_pgfree;<dd>Number of bytes free in database pages.
+<dt>u_int32_t qs_start;<dd>Start offset.
+<dt>u_int32_t qs_first_recno;<dd>First undeleted record in the database.
+<dt>u_int32_t qs_cur_recno;<dd>Last allocated record number in the database.
+</dl>
+<p>The DB-&gt;stat function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB-&gt;stat function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;stat function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/db_create.html">db_create</a>,
+<a href="../api_c/db_close.html">DB-&gt;close</a>,
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>,
+<a href="../api_c/db_del.html">DB-&gt;del</a>,
+<a href="../api_c/db_err.html">DB-&gt;err</a>,
+<a href="../api_c/db_fd.html">DB-&gt;fd</a>,
+<a href="../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a>,
+<a href="../api_c/db_get_type.html">DB-&gt;get_type</a>,
+<a href="../api_c/db_join.html">DB-&gt;join</a>,
+<a href="../api_c/db_key_range.html">DB-&gt;key_range</a>,
+<a href="../api_c/db_open.html">DB-&gt;open</a>,
+<a href="../api_c/db_put.html">DB-&gt;put</a>,
+<a href="../api_c/db_remove.html">DB-&gt;remove</a>,
+<a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a>,
+<a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>,
+<a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>,
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a>,
+<a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>,
+<a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a>,
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a>,
+<a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a>,
+<a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a>,
+<a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>,
+<a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a>,
+<a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a>,
+<a href="../api_c/db_set_malloc.html">DB-&gt;set_malloc</a>,
+<a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a>,
+<a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a>,
+<a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a>,
+<a href="../api_c/db_set_realloc.html">DB-&gt;set_realloc</a>,
+<a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>,
+<a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a>,
+<a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a>,
+<a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a>,
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>,
+<a href="../api_c/db_sync.html">DB-&gt;sync</a>,
+<a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+and
+<a href="../api_c/db_verify.html">DB-&gt;verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_sync.html b/bdb/docs/api_c/db_sync.html
new file mode 100644
index 00000000000..6af624a88f3
--- /dev/null
+++ b/bdb/docs/api_c/db_sync.html
@@ -0,0 +1,98 @@
+<!--$Id: db_sync.so,v 10.20 2000/09/08 15:20:28 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;sync</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;sync</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;sync(DB *db, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB-&gt;sync function flushes any cached information to disk.
+<p>If the database is in memory only, the DB-&gt;sync function has no effect and
+will always succeed.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>See <a href="../api_c/db_close.html">DB-&gt;close</a> for a discussion of Berkeley DB and cached data.
+<p>The DB-&gt;sync function returns a non-zero error value on failure, 0 on success, and returns <a href="../api_c/memp_fsync.html#DB_INCOMPLETE">DB_INCOMPLETE</a> if the underlying database still has
+dirty pages in the cache. (The only reason to return
+<a href="../api_c/memp_fsync.html#DB_INCOMPLETE">DB_INCOMPLETE</a> is if another thread of control was writing pages
+in the underlying database file at the same time as the
+DB-&gt;sync function was being called. For this reason, a return of
+<a href="../api_c/memp_fsync.html#DB_INCOMPLETE">DB_INCOMPLETE</a> can normally be ignored, or, in cases where it is
+a possible return value, there may be no reason to call
+DB-&gt;sync.)
+<h1>Errors</h1>
+<p>The DB-&gt;sync function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DB-&gt;sync function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;sync function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/db_create.html">db_create</a>,
+<a href="../api_c/db_close.html">DB-&gt;close</a>,
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>,
+<a href="../api_c/db_del.html">DB-&gt;del</a>,
+<a href="../api_c/db_err.html">DB-&gt;err</a>,
+<a href="../api_c/db_fd.html">DB-&gt;fd</a>,
+<a href="../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a>,
+<a href="../api_c/db_get_type.html">DB-&gt;get_type</a>,
+<a href="../api_c/db_join.html">DB-&gt;join</a>,
+<a href="../api_c/db_key_range.html">DB-&gt;key_range</a>,
+<a href="../api_c/db_open.html">DB-&gt;open</a>,
+<a href="../api_c/db_put.html">DB-&gt;put</a>,
+<a href="../api_c/db_remove.html">DB-&gt;remove</a>,
+<a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a>,
+<a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>,
+<a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>,
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a>,
+<a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>,
+<a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a>,
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a>,
+<a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a>,
+<a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a>,
+<a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>,
+<a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a>,
+<a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a>,
+<a href="../api_c/db_set_malloc.html">DB-&gt;set_malloc</a>,
+<a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a>,
+<a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a>,
+<a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a>,
+<a href="../api_c/db_set_realloc.html">DB-&gt;set_realloc</a>,
+<a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>,
+<a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a>,
+<a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a>,
+<a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a>,
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>,
+<a href="../api_c/db_sync.html">DB-&gt;sync</a>,
+<a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+and
+<a href="../api_c/db_verify.html">DB-&gt;verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_upgrade.html b/bdb/docs/api_c/db_upgrade.html
new file mode 100644
index 00000000000..e31b4d447af
--- /dev/null
+++ b/bdb/docs/api_c/db_upgrade.html
@@ -0,0 +1,135 @@
+<!--$Id: db_upgrade.so,v 10.18 2000/05/01 15:58:04 krinsky Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;upgrade</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;upgrade</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;upgrade(DB *db, const char *file, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB-&gt;upgrade function upgrades all of the databases included in the
+file <b>file</b>, if necessary. If no upgrade is necessary,
+DB-&gt;upgrade always returns success.
+<p><b>Database upgrades are done in place and are destructive, e.g., if pages
+need to be allocated and no disk space is available, the database may be
+left corrupted. Backups should be made before databases are upgraded.
+See <a href="../ref/am/upgrade.html">Upgrading databases</a> for more
+information.</b>
+<p>Unlike all other database operations, DB-&gt;upgrade may only be done
+on a system with the same byte-order as the database.
+<p>The <b>flags</b> parameter must be set to 0 or one of the following
+values:
+<p><dl compact>
+<p><dt><a name="DB_DUPSORT">DB_DUPSORT</a><dd><b>This flag is only meaningful when upgrading databases from
+releases before the Berkeley DB 3.1 release.</b>
+<p>As part of the upgrade from the Berkeley DB 3.0 release to the 3.1 release, the
+on-disk format of duplicate data items changed. To correctly upgrade the
+format requires applications specify if duplicate data items in the
+database are sorted or not. Specifying the DB_DUPSORT flag
+informs DB-&gt;upgrade that the duplicates are sorted, otherwise they
+are assumed to be unsorted. Incorrectly specifying the value of this flag
+may lead to database corruption.
+<p>Further, because the DB-&gt;upgrade function upgrades a physical file
+(including all of the databases it contains), it is not possible to use
+DB-&gt;upgrade to upgrade files where some of the databases it
+includes have sorted duplicate data items and some of the databases it
+includes have unsorted duplicate data items. If the file does not have
+more than a single database, or the databases do not support duplicate
+data items, or all of the databases that support duplicate data items
+support the same style of duplicates (either sorted or unsorted),
+DB-&gt;upgrade will work correctly as long as the DB_DUPSORT
+flag is correctly specified. Otherwise, the file cannot be upgraded using
+DB-&gt;upgrade, and must be upgraded manually by dumping and
+re-loading the databases.
+</dl>
+<p>The DB-&gt;upgrade function returns a non-zero error value on failure and 0 on success.
+<p>The DB-&gt;upgrade function is the underlying function used by the <a href="../utility/db_upgrade.html">db_upgrade</a> utility.
+See the <a href="../utility/db_upgrade.html">db_upgrade</a> utility source code for an example of using DB-&gt;upgrade
+in a IEEE/ANSI Std 1003.1 (POSIX) environment.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If the <b>dbenv</b> argument to <a href="../api_c/db_create.html">db_create</a> was initialized using
+<a href="../api_c/env_open.html">DBENV-&gt;open</a> the environment variable <b>DB_HOME</b> may be used
+as the path of the database environment home. Specifically, DB-&gt;upgrade
+is affected by the configuration value DB_DATA_DIR.
+</dl>
+<h1>Errors</h1>
+<p>The DB-&gt;upgrade function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The database is not in the same byte-order as the system.
+</dl>
+<p><dl compact>
+<p><dt><a name="DB_OLD_VERSION">DB_OLD_VERSION</a><dd>The database cannot be upgraded by this version of the Berkeley DB software.
+</dl>
+<p>The DB-&gt;upgrade function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;upgrade function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/db_create.html">db_create</a>,
+<a href="../api_c/db_close.html">DB-&gt;close</a>,
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>,
+<a href="../api_c/db_del.html">DB-&gt;del</a>,
+<a href="../api_c/db_err.html">DB-&gt;err</a>,
+<a href="../api_c/db_fd.html">DB-&gt;fd</a>,
+<a href="../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a>,
+<a href="../api_c/db_get_type.html">DB-&gt;get_type</a>,
+<a href="../api_c/db_join.html">DB-&gt;join</a>,
+<a href="../api_c/db_key_range.html">DB-&gt;key_range</a>,
+<a href="../api_c/db_open.html">DB-&gt;open</a>,
+<a href="../api_c/db_put.html">DB-&gt;put</a>,
+<a href="../api_c/db_remove.html">DB-&gt;remove</a>,
+<a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a>,
+<a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>,
+<a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>,
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a>,
+<a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>,
+<a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a>,
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a>,
+<a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a>,
+<a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a>,
+<a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>,
+<a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a>,
+<a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a>,
+<a href="../api_c/db_set_malloc.html">DB-&gt;set_malloc</a>,
+<a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a>,
+<a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a>,
+<a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a>,
+<a href="../api_c/db_set_realloc.html">DB-&gt;set_realloc</a>,
+<a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>,
+<a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a>,
+<a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a>,
+<a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a>,
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>,
+<a href="../api_c/db_sync.html">DB-&gt;sync</a>,
+<a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+and
+<a href="../api_c/db_verify.html">DB-&gt;verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/db_verify.html b/bdb/docs/api_c/db_verify.html
new file mode 100644
index 00000000000..d011d90ab8d
--- /dev/null
+++ b/bdb/docs/api_c/db_verify.html
@@ -0,0 +1,150 @@
+<!--$Id: db_verify.so,v 10.3 2000/04/11 15:13:51 dda Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;verify</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DB-&gt;verify</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;verify(DB *db, const char *file,
+ const char *database, FILE *outfile, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB-&gt;verify function verifies the integrity of all databases in the
+file specified by the file argument, and optionally outputs the databases'
+key/data pairs to a file stream.
+<p>The <b>flags</b> parameter must be set to 0 or one of the following
+values:
+<p><dl compact>
+<p><dt><a name="DB_SALVAGE">DB_SALVAGE</a><dd>Write the key/data pairs from all databases in the file to the file stream
+named in
+the <b>outfile</b> argument. The output format is the same as that
+specified for the <a href="../utility/db_dump.html">db_dump</a> utility and can be used as input for
+the <a href="../utility/db_load.html">db_load</a> utility.
+<p>Because the key/data pairs are output in page order as opposed to the sort
+order used by <a href="../utility/db_dump.html">db_dump</a>, using DB-&gt;verify to dump key/data
+pairs normally produces less than optimal loads for Btree databases.
+</dl>
+<p>In addition, the following flags may be set by bitwise inclusively <b>OR</b>'ing them into the
+<b>flags</b> parameter:
+<p><dl compact>
+<p><dt><a name="DB_AGGRESSIVE">DB_AGGRESSIVE</a><dd>Output <b>all</b> the key/data pairs in the file that can be found.
+By default, DB-&gt;verify does not assume corruption. For example,
+if a key/data pair on a page is marked as deleted, it is not then written
+to the output file. When DB_AGGRESSIVE is specified, corruption
+is assumed, and any key/data pair that can be found is written. In this
+case, key/data pairs that are corrupted or have been deleted may appear
+in the output (even if the file being salvaged is in no way corrupt), and
+the output will almost certainly require editing before being loaded into
+a database.
+<p><dt><a name="DB_NOORDERCHK">DB_NOORDERCHK</a><dd>Skip the database checks for btree and duplicate sort order and for
+hashing.
+<p>The DB-&gt;verify function normally verifies that btree keys and duplicate
+items are correctly sorted and hash keys are correctly hashed. If the
+file being verified contains multiple databases using differing sorting
+or hashing algorithms, some of them must necessarily fail database
+verification as only one sort order or hash function can be specified
+before DB-&gt;verify is called. To verify files with multiple
+databases having differing sorting orders or hashing functions, first
+perform verification of the file as a whole by using the
+DB_NOORDERCHK flag, and then individually verify the sort order
+and hashing function for each database in the file using the
+DB_ORDERCHKONLY flag.
+<p><dt><a name="DB_ORDERCHKONLY">DB_ORDERCHKONLY</a><dd>Perform the database checks for btree and duplicate sort order and for
+hashing, skipped by DB_NOORDERCHK.
+<p>When this flag is specified, a <b>database</b> argument should also be
+specified, indicating the database in the physical file which is to be
+checked. This flag is only safe to use on databases that have already
+successfully been verified using DB-&gt;verify with the
+DB_NOORDERCHK flag set.
+</dl>
+<p>The database argument must be set to NULL except when the
+DB_ORDERCHKONLY flag is set.
+<p>The DB-&gt;verify function returns a non-zero error value on failure, 0 on success, and <a href="../ref/program/errorret.html#DB_VERIFY_BAD">DB_VERIFY_BAD</a> if a database is corrupted. When the
+DB_SALVAGE flag is specified, the <a href="../ref/program/errorret.html#DB_VERIFY_BAD">DB_VERIFY_BAD</a> return
+means that all key/data pairs in the file may not have been successfully
+output.
+<p>The DB-&gt;verify function is the underlying function used by the <a href="../utility/db_verify.html">db_verify</a> utility.
+See the <a href="../utility/db_verify.html">db_verify</a> utility source code for an example of using DB-&gt;verify
+in a IEEE/ANSI Std 1003.1 (POSIX) environment.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If the <b>dbenv</b> argument to <a href="../api_c/db_create.html">db_create</a> was initialized using
+<a href="../api_c/env_open.html">DBENV-&gt;open</a> the environment variable <b>DB_HOME</b> may be used
+as the path of the database environment home. Specifically, DB-&gt;verify
+is affected by the configuration value DB_DATA_DIR.
+</dl>
+<h1>Errors</h1>
+<p>The DB-&gt;verify function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DB-&gt;verify function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;verify function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/db_create.html">db_create</a>,
+<a href="../api_c/db_close.html">DB-&gt;close</a>,
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>,
+<a href="../api_c/db_del.html">DB-&gt;del</a>,
+<a href="../api_c/db_err.html">DB-&gt;err</a>,
+<a href="../api_c/db_fd.html">DB-&gt;fd</a>,
+<a href="../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a>,
+<a href="../api_c/db_get_type.html">DB-&gt;get_type</a>,
+<a href="../api_c/db_join.html">DB-&gt;join</a>,
+<a href="../api_c/db_key_range.html">DB-&gt;key_range</a>,
+<a href="../api_c/db_open.html">DB-&gt;open</a>,
+<a href="../api_c/db_put.html">DB-&gt;put</a>,
+<a href="../api_c/db_remove.html">DB-&gt;remove</a>,
+<a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a>,
+<a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>,
+<a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>,
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a>,
+<a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>,
+<a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a>,
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a>,
+<a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a>,
+<a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a>,
+<a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>,
+<a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a>,
+<a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a>,
+<a href="../api_c/db_set_malloc.html">DB-&gt;set_malloc</a>,
+<a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a>,
+<a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a>,
+<a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a>,
+<a href="../api_c/db_set_realloc.html">DB-&gt;set_realloc</a>,
+<a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>,
+<a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a>,
+<a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a>,
+<a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a>,
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>,
+<a href="../api_c/db_sync.html">DB-&gt;sync</a>,
+<a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+and
+<a href="../api_c/db_verify.html">DB-&gt;verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/dbc_close.html b/bdb/docs/api_c/dbc_close.html
new file mode 100644
index 00000000000..20eb28d953d
--- /dev/null
+++ b/bdb/docs/api_c/dbc_close.html
@@ -0,0 +1,64 @@
+<!--$Id: dbc_close.so,v 10.20 2000/03/01 21:41:29 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DBcursor-&gt;c_close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DBcursor-&gt;c_close</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DBcursor-&gt;c_close(DBC *cursor);
+</pre></h3>
+<h1>Description</h1>
+<p>The DBcursor-&gt;c_close function discards the cursor.
+<p>It is possible for the DBcursor-&gt;c_close function to return
+<a href="../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a>, signaling that any enclosing transaction should
+be aborted. If the application is already intending to abort the
+transaction, this error should be ignored, and the application should
+proceed.
+<p>Once DBcursor-&gt;c_close has been called, regardless of its return, the
+cursor handle may not be used again.
+<p>The DBcursor-&gt;c_close function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DBcursor-&gt;c_close function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_LOCK_DEADLOCK<dd>The operation was selected to resolve a deadlock.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The cursor was previously closed.
+</dl>
+<p>The DBcursor-&gt;c_close function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DBcursor-&gt;c_close function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/dbc_close.html">DBcursor-&gt;c_close</a>,
+<a href="../api_c/dbc_count.html">DBcursor-&gt;c_count</a>,
+<a href="../api_c/dbc_del.html">DBcursor-&gt;c_del</a>,
+<a href="../api_c/dbc_dup.html">DBcursor-&gt;c_dup</a>,
+<a href="../api_c/dbc_get.html">DBcursor-&gt;c_get</a>
+and
+<a href="../api_c/dbc_put.html">DBcursor-&gt;c_put</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/dbc_count.html b/bdb/docs/api_c/dbc_count.html
new file mode 100644
index 00000000000..434a0ce8cbb
--- /dev/null
+++ b/bdb/docs/api_c/dbc_count.html
@@ -0,0 +1,55 @@
+<!--$Id: dbc_count.so,v 10.4 2000/03/01 21:41:29 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DBcursor-&gt;c_count</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DBcursor-&gt;c_count</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DBC-&gt;c_count(DBC *cursor, db_recno_t *countp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DBcursor-&gt;c_count function returns a count of the number of duplicate data
+items for the key referenced by the
+cursor into the memory location referenced by <b>countp</b>.
+If the underlying database does not support duplicate data items the call
+will still succeed and a count of 1 will be returned.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>If the <b>cursor</b> argument is not yet initialized, the DBcursor-&gt;c_count function will return EINVAL.
+<p>Otherwise, the DBcursor-&gt;c_count function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DBcursor-&gt;c_count function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DBcursor-&gt;c_count function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/dbc_close.html">DBcursor-&gt;c_close</a>,
+<a href="../api_c/dbc_count.html">DBcursor-&gt;c_count</a>,
+<a href="../api_c/dbc_del.html">DBcursor-&gt;c_del</a>,
+<a href="../api_c/dbc_dup.html">DBcursor-&gt;c_dup</a>,
+<a href="../api_c/dbc_get.html">DBcursor-&gt;c_get</a>
+and
+<a href="../api_c/dbc_put.html">DBcursor-&gt;c_put</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/dbc_del.html b/bdb/docs/api_c/dbc_del.html
new file mode 100644
index 00000000000..110d97471c8
--- /dev/null
+++ b/bdb/docs/api_c/dbc_del.html
@@ -0,0 +1,68 @@
+<!--$Id: dbc_del.so,v 10.23 2000/05/22 20:51:46 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DBcursor-&gt;c_del</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DBcursor-&gt;c_del</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DBcursor-&gt;c_del(DBC *cursor, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DBcursor-&gt;c_del function deletes the key/data pair currently referenced by
+the cursor.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The cursor position is unchanged after a delete, and subsequent calls to
+cursor functions expecting the cursor to reference an existing key will
+fail.
+<p>If the element has already been deleted, DBcursor-&gt;c_del will return
+<a href="../ref/program/errorret.html#DB_KEYEMPTY">DB_KEYEMPTY</a>.
+<p>If the cursor is not yet initialized, the DBcursor-&gt;c_del function will return EINVAL.
+<p>Otherwise, the DBcursor-&gt;c_del function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DBcursor-&gt;c_del function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_LOCK_DEADLOCK<dd>The operation was selected to resolve a deadlock.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p><dl compact>
+<p><dt>EPERM <dd>Write attempted on read-only cursor when the <a href="../api_c/env_open.html#DB_INIT_CDB">DB_INIT_CDB</a> flag was
+specified to <a href="../api_c/env_open.html">DBENV-&gt;open</a>.
+</dl>
+<p>The DBcursor-&gt;c_del function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DBcursor-&gt;c_del function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/dbc_close.html">DBcursor-&gt;c_close</a>,
+<a href="../api_c/dbc_count.html">DBcursor-&gt;c_count</a>,
+<a href="../api_c/dbc_del.html">DBcursor-&gt;c_del</a>,
+<a href="../api_c/dbc_dup.html">DBcursor-&gt;c_dup</a>,
+<a href="../api_c/dbc_get.html">DBcursor-&gt;c_get</a>
+and
+<a href="../api_c/dbc_put.html">DBcursor-&gt;c_put</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/dbc_dup.html b/bdb/docs/api_c/dbc_dup.html
new file mode 100644
index 00000000000..42e3531ca04
--- /dev/null
+++ b/bdb/docs/api_c/dbc_dup.html
@@ -0,0 +1,72 @@
+<!--$Id: dbc_dup.so,v 10.8 2000/03/17 01:53:58 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DBcursor-&gt;c_dup</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DBcursor-&gt;c_dup</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DBC-&gt;c_dup(DBC *cursor, DBC **cursorp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DBcursor-&gt;c_dup function creates a new cursor that uses the same transaction
+and locker ID as the original cursor. This is useful when an application
+is using locking and requires two or more cursors in the same thread of
+control.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or more
+of the following values.
+<p><dl compact>
+<p><dt><a name="DB_POSITION">DB_POSITION</a><dd>The newly created cursor is initialized to reference the same position
+in the database as the original cursor and hold the same locks. If the
+DB_POSITION flag is not specified, then the created cursor is
+uninitialized and will behave like a cursor newly created using
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>.
+</dl>
+<p>When using the Berkeley DB Concurrent Data Store product, there can be only one active write cursor
+at a time. For this reason, attempting to duplicate a cursor for which
+the <a href="../api_c/db_cursor.html#DB_WRITECURSOR">DB_WRITECURSOR</a> flag was specified during creation will return
+an error.
+<p>If the <b>cursor</b> argument is not yet initialized, the DBcursor-&gt;c_dup function will return EINVAL.
+<p>Otherwise, the DBcursor-&gt;c_dup function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DBcursor-&gt;c_dup function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The <b>cursor</b> argument was created using the
+<a href="../api_c/db_cursor.html#DB_WRITECURSOR">DB_WRITECURSOR</a> flag in the Berkeley DB Concurrent Data Store product.
+</dl>
+<p>The DBcursor-&gt;c_dup function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DBcursor-&gt;c_dup function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/dbc_close.html">DBcursor-&gt;c_close</a>,
+<a href="../api_c/dbc_count.html">DBcursor-&gt;c_count</a>,
+<a href="../api_c/dbc_del.html">DBcursor-&gt;c_del</a>,
+<a href="../api_c/dbc_dup.html">DBcursor-&gt;c_dup</a>,
+<a href="../api_c/dbc_get.html">DBcursor-&gt;c_get</a>
+and
+<a href="../api_c/dbc_put.html">DBcursor-&gt;c_put</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/dbc_get.html b/bdb/docs/api_c/dbc_get.html
new file mode 100644
index 00000000000..014661f33e7
--- /dev/null
+++ b/bdb/docs/api_c/dbc_get.html
@@ -0,0 +1,167 @@
+<!--$Id: dbc_get.so,v 10.46 2001/01/19 17:29:46 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DBcursor-&gt;c_get</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DBcursor-&gt;c_get</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DBcursor-&gt;c_get(DBC *cursor,
+ DBT *key, DBT *data, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DBcursor-&gt;c_get function retrieves key/data pairs from the database. The
+address and length of the key
+are returned in the object referenced by <b>key</b> (except for the case
+of the DB_SET flag where the <b>key</b> object is unchanged),
+and the address and length of
+the data are returned in the object referenced by <b>data</b>.
+<p>Modifications to the database during a sequential scan will be reflected
+in the scan, i.e. records inserted behind a cursor will not be returned
+while records inserted in front of a cursor will be returned.
+<p>In Queue and Recno databases, missing entries (i.e., entries that were
+never explicitly created or that were created and then deleted), will be
+skipped during a sequential scan.
+<p>If multiple threads or processes insert items into the same database file
+without using locking, the results are undefined.
+For more detail,
+see <a href="../ref/am/stability.html">Cursor stability</a>.
+<p>The <b>flags</b> parameter must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_CURRENT">DB_CURRENT</a><dd>Return the key/data pair currently referenced by the cursor.
+<p>If the cursor key/data pair was deleted, DBcursor-&gt;c_get will return
+<a href="../ref/program/errorret.html#DB_KEYEMPTY">DB_KEYEMPTY</a>.
+<p>If the cursor is not yet initialized, the DBcursor-&gt;c_get function will return EINVAL.
+<p><dt><a name="DB_FIRST">DB_FIRST</a>, <a name="DB_LAST">DB_LAST</a><dd>The cursor is set to reference the first (last) key/data pair of the
+database, and that pair is returned. In the presence of duplicate key
+values, the first (last) data item in the set of duplicates is returned.
+<p>If the database is a Queue or Recno database, DBcursor-&gt;c_get using the
+DB_FIRST (DB_LAST) flags will ignore any keys that exist
+but were never explicitly created by the application or were created and
+later deleted.
+<p>If the database is empty, DBcursor-&gt;c_get will return <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+<p><dt><a name="DB_GET_BOTH">DB_GET_BOTH</a><dd>The DB_GET_BOTH flag is identical to the DB_SET flag,
+except that both the key and the data arguments must be matched by the
+key and data item in the database.
+<p><dt><a name="DB_GET_RECNO">DB_GET_RECNO</a><dd>Return the record number associated with the cursor. The record number
+will be returned in <b>data</b> as described in <a href="../api_c/dbt.html">DBT</a>. The
+<b>key</b> parameter is ignored.
+<p>For DB_GET_RECNO to be specified, the underlying database must be
+of type Btree and it must have been created with the <a href="../api_c/db_set_flags.html#DB_RECNUM">DB_RECNUM</a>
+flag.
+<p><dt><a name="DB_JOIN_ITEM">DB_JOIN_ITEM</a><dd>Do not use the data value found in all of the cursors as a lookup key for
+the primary database, but simply return it in the key parameter instead.
+The data parameter is left unchanged.
+<p>For DB_JOIN_ITEM to be specified, the underlying cursor must have
+been returned from the <a href="../api_c/db_join.html">DB-&gt;join</a> function.
+<p><dt><a name="DB_NEXT">DB_NEXT</a>, <a name="DB_PREV">DB_PREV</a><dd>If the cursor is not yet initialized, DB_NEXT (DB_PREV)
+is identical to DB_FIRST (DB_LAST). Otherwise, the cursor
+is moved to the next (previous) key/data pair of the database, and that
+pair is returned. In the presence of duplicate key values, the value of
+the key may not change.
+<p>If the database is a Queue or Recno database, DBcursor-&gt;c_get using the
+DB_NEXT (DB_PREV) flag will skip any keys that exist but
+were never explicitly created by the application or were created and later
+deleted.
+<p>If the cursor is already on the last (first) record in the database,
+DBcursor-&gt;c_get will return <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+<p><dt><a name="DB_NEXT_DUP">DB_NEXT_DUP</a><dd>If the next key/data pair of the database is a duplicate record for the
+current key/data pair, the cursor is moved to the next key/data pair of
+the database, and that pair is returned. Otherwise, DBcursor-&gt;c_get will
+return <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+<p>If the cursor is not yet initialized, the DBcursor-&gt;c_get function will return EINVAL.
+<p><dt><a name="DB_NEXT_NODUP">DB_NEXT_NODUP</a>, <a name="DB_PREV_NODUP">DB_PREV_NODUP</a><dd>If the cursor is not yet initialized, DB_NEXT_NODUP
+(DB_PREV_NODUP) is identical to DB_FIRST
+(DB_LAST). Otherwise, the cursor is moved to the next (previous)
+non-duplicate key/data pair of the database, and that pair is returned.
+<p>If the database is a Queue or Recno database, DBcursor-&gt;c_get using the
+DB_NEXT_NODUP (DB_PREV_NODUP) flags will ignore any keys
+that exist but were never explicitly created by the application or were
+created and later deleted.
+<p>If no non-duplicate key/data pairs occur after (before) the cursor
+position in the database, DBcursor-&gt;c_get will return <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+<p><dt><a name="DB_SET">DB_SET</a><dd>Move the cursor to the specified key/data pair of the database, and
+return the datum associated with the given key.
+<p>In the presence of duplicate key values, DBcursor-&gt;c_get will return the
+first data item for the given key.
+<p>If the database is a Queue or Recno database and the requested key exists,
+but was never explicitly created by the application or was later deleted,
+DBcursor-&gt;c_get will return <a href="../ref/program/errorret.html#DB_KEYEMPTY">DB_KEYEMPTY</a>.
+<p>If no matching keys are found, DBcursor-&gt;c_get will return
+<a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+<p><dt><a name="DB_SET_RANGE">DB_SET_RANGE</a><dd>The DB_SET_RANGE flag is identical to the DB_SET flag,
+except that the key is returned as well as the data item, and, in the case
+of the Btree access method, the returned key/data pair is the smallest
+key greater than or equal to the specified key (as determined by the
+comparison function), permitting partial key matches and range
+searches.
+<p><dt><a name="DB_SET_RECNO">DB_SET_RECNO</a><dd>Move the cursor to the specific numbered record of the database, and
+return the associated key/data pair. The <b>data</b> field of the
+specified <b>key</b>
+must be a pointer to a memory location from which a <a href="../api_c/dbt.html#db_recno_t">db_recno_t</a>
+may be read, as described in <a href="../api_c/dbt.html">DBT</a>. This memory location will be
+read to determine the record to be retrieved.
+<p>For DB_SET_RECNO to be specified, the underlying database must be
+of type Btree and it must have been created with the <a href="../api_c/db_set_flags.html#DB_RECNUM">DB_RECNUM</a>
+flag.
+</dl>
+<p>In addition, the following flag may be set by bitwise inclusively <b>OR</b>'ing it into the
+<b>flags</b> parameter:
+<p><dl compact>
+<p><dt><a name="DB_RMW">DB_RMW</a><dd>Acquire write locks instead of read locks when doing the retrieval.
+Setting this flag may decrease the likelihood of deadlock during a
+read-modify-write cycle by immediately acquiring the write lock during
+the read part of the cycle so that another thread of control acquiring
+a read lock for the same item, in its own read-modify-write cycle, will
+not result in deadlock.
+</dl>
+<p>Otherwise, the DBcursor-&gt;c_get function returns a non-zero error value on failure and 0 on success.
+<p>If DBcursor-&gt;c_get fails for any reason, the state of the cursor will be
+unchanged.
+<h1>Errors</h1>
+<p>The DBcursor-&gt;c_get function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_LOCK_DEADLOCK<dd>The operation was selected to resolve a deadlock.
+</dl>
+<p><dl compact>
+<p><dt>ENOMEM<dd>There was insufficient memory to return the requested item.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The specified cursor was not currently initialized.
+</dl>
+<p>The DBcursor-&gt;c_get function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DBcursor-&gt;c_get function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/dbc_close.html">DBcursor-&gt;c_close</a>,
+<a href="../api_c/dbc_count.html">DBcursor-&gt;c_count</a>,
+<a href="../api_c/dbc_del.html">DBcursor-&gt;c_del</a>,
+<a href="../api_c/dbc_dup.html">DBcursor-&gt;c_dup</a>,
+<a href="../api_c/dbc_get.html">DBcursor-&gt;c_get</a>
+and
+<a href="../api_c/dbc_put.html">DBcursor-&gt;c_put</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/dbc_put.html b/bdb/docs/api_c/dbc_put.html
new file mode 100644
index 00000000000..9a8a0e8950a
--- /dev/null
+++ b/bdb/docs/api_c/dbc_put.html
@@ -0,0 +1,154 @@
+<!--$Id: dbc_put.so,v 10.33 2000/12/04 17:02:01 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DBcursor-&gt;c_put</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DBcursor-&gt;c_put</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DBcursor-&gt;c_put(DBC *, DBT *key, DBT *data, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DBcursor-&gt;c_put function stores key/data pairs into the database.
+<p>The <b>flags</b> parameter must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_AFTER">DB_AFTER</a><dd>In the case of the Btree and Hash access methods, insert the data
+element as a duplicate element of the key referenced by the cursor.
+The new element appears immediately after the current cursor position.
+It is an error to specify DB_AFTER if the underlying Btree or
+Hash database does not support duplicate data items. The <b>key</b>
+parameter is ignored.
+<p>In the case of the Recno access method, it is an error to specify
+DB_AFTER if the underlying Recno database was not created with
+the <a href="../api_c/db_set_flags.html#DB_RENUMBER">DB_RENUMBER</a> flag. If the <a href="../api_c/db_set_flags.html#DB_RENUMBER">DB_RENUMBER</a> flag was
+specified, a new key is created, all records after the inserted item
+are automatically renumbered, and the key of the new record is returned
+in the structure referenced by the parameter <b>key</b>. The initial
+value of the <b>key</b> parameter is ignored. See <a href="../api_c/db_open.html">DB-&gt;open</a>
+for more information.
+<p>The DB_AFTER flag may not be specified to the Queue access method.
+<p>If the current cursor record has already been deleted and the underlying
+access method is Hash, DBcursor-&gt;c_put will return <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+If the underlying access method is Btree or Recno, the operation will
+succeed.
+<p>If the cursor is not yet initialized or a duplicate sort function has been
+specified, the DBcursor-&gt;c_put function will return EINVAL.
+<p><dt><a name="DB_BEFORE">DB_BEFORE</a><dd>In the case of the Btree and Hash access methods, insert the data element
+as a duplicate element of the key referenced by the cursor. The new
+element appears immediately before the current cursor position. It is
+an error to specify DB_BEFORE if the underlying Btree or Hash
+database does not support duplicate data items. The <b>key</b>
+parameter is ignored.
+<p>In the case of the Recno access method, it is an error to specify
+DB_BEFORE if the underlying Recno database was not created with
+the <a href="../api_c/db_set_flags.html#DB_RENUMBER">DB_RENUMBER</a> flag. If the <a href="../api_c/db_set_flags.html#DB_RENUMBER">DB_RENUMBER</a> flag was
+specified, a new key is created, the current record and all records
+after it are automatically renumbered, and the key of the new record is
+returned in the structure referenced by the parameter <b>key</b>. The
+initial value of the <b>key</b> parameter is ignored. See
+<a href="../api_c/db_open.html">DB-&gt;open</a> for more information.
+<p>The DB_BEFORE flag may not be specified to the Queue access method.
+<p>If the current cursor record has already been deleted and the underlying
+access method is Hash, DBcursor-&gt;c_put will return <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+If the underlying access method is Btree or Recno, the operation will
+succeed.
+<p>If the cursor is not yet initialized or a duplicate sort function has been
+specified, DBcursor-&gt;c_put will return EINVAL.
+<p><dt><a name="DB_CURRENT">DB_CURRENT</a><dd>Overwrite the data of the key/data pair referenced by the cursor with the
+specified data item. The <b>key</b> parameter is ignored.
+<p>If a duplicate sort function has been specified and the data item of the
+current referenced key/data pair does not compare equally to the <b>data</b>
+parameter, DBcursor-&gt;c_put will return EINVAL.
+<p>If the current cursor record has already been deleted and the underlying
+access method is Hash, DBcursor-&gt;c_put will return <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+If the underlying access method is Btree, Queue or Recno, the operation
+will succeed.
+<p>If the cursor is not yet initialized, DBcursor-&gt;c_put will return EINVAL.
+<p><dt><a name="DB_KEYFIRST">DB_KEYFIRST</a><dd>In the case of the Btree and Hash access methods, insert the specified
+key/data pair into the database.
+<p>If the underlying database supports duplicate data items, and if the
+key already exists in the database and a duplicate sort function has
+been specified, the inserted data item is added in its sorted location.
+If the key already exists in the database and no duplicate sort function
+has been specified, the inserted data item is added as the first of the
+data items for that key.
+<p>The DB_KEYFIRST flag may not be specified to the Queue or Recno
+access methods.
+<p><dt><a name="DB_KEYLAST">DB_KEYLAST</a><dd>In the case of the Btree and Hash access methods, insert the specified
+key/data pair into the database.
+<p>If the underlying database supports duplicate data items, and if the
+key already exists in the database and a duplicate sort function has
+been specified, the inserted data item is added in its sorted location.
+If the key already exists in the database, and no duplicate sort
+function has been specified, the inserted data item is added as the last
+of the data items for that key.
+<p>The DB_KEYLAST flag may not be specified to the Queue or Recno
+access methods.
+<p><dt><a name="DB_NODUPDATA">DB_NODUPDATA</a><dd>In the case of the Btree and Hash access methods, insert the specified
+key/data pair into the database unless it already exists in the database.
+If the key/data pair already appears in the database, <a href="../api_c/dbc_put.html#DB_KEYEXIST">DB_KEYEXIST</a>
+is returned. The DB_NODUPDATA flag may only be specified if
+the underlying database has been configured to support sorted duplicate
+data items.
+<p>The DB_NODUPDATA flag may not be specified to the Queue or Recno
+access methods.
+</dl>
+<p>Otherwise, the DBcursor-&gt;c_put function returns a non-zero error value on failure and 0 on success.
+<p>If DBcursor-&gt;c_put fails for any reason, the state of the cursor will be
+unchanged. If DBcursor-&gt;c_put succeeds and an item is inserted into the
+database, the cursor is always positioned to reference the newly inserted
+item.
+<h1>Errors</h1>
+<p>The DBcursor-&gt;c_put function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_LOCK_DEADLOCK<dd>The operation was selected to resolve a deadlock.
+</dl>
+<p><dl compact>
+<p><dt>EACCES<dd>An attempt was made to modify a read-only database.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The DB_BEFORE or DB_AFTER flags were specified, and the
+underlying access method is Queue.
+<p>An attempt was made to add a record to a fixed-length database that was too
+large to fit.
+</dl>
+<p><dl compact>
+<p><dt>EPERM <dd>Write attempted on read-only cursor when the <a href="../api_c/env_open.html#DB_INIT_CDB">DB_INIT_CDB</a> flag was
+specified to <a href="../api_c/env_open.html">DBENV-&gt;open</a>.
+</dl>
+<p>The DBcursor-&gt;c_put function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DBcursor-&gt;c_put function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/dbc_close.html">DBcursor-&gt;c_close</a>,
+<a href="../api_c/dbc_count.html">DBcursor-&gt;c_count</a>,
+<a href="../api_c/dbc_del.html">DBcursor-&gt;c_del</a>,
+<a href="../api_c/dbc_dup.html">DBcursor-&gt;c_dup</a>,
+<a href="../api_c/dbc_get.html">DBcursor-&gt;c_get</a>
+and
+<a href="../api_c/dbc_put.html">DBcursor-&gt;c_put</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/dbm.html b/bdb/docs/api_c/dbm.html
new file mode 100644
index 00000000000..783d59e6271
--- /dev/null
+++ b/bdb/docs/api_c/dbm.html
@@ -0,0 +1,220 @@
+<!--$Id: dbm.so,v 10.18 2000/03/01 21:41:29 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: dbm/ndbm</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>dbm/ndbm</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#define DB_DBM_HSEARCH 1
+#include &lt;db.h&gt;
+<p>
+typedef struct {
+ char *dptr;
+ int dsize;
+} datum;
+<hr size=1 noshade>
+<h3>Dbm Functions</h3>
+int
+dbminit(char *file);
+<p>
+int
+dbmclose();
+<p>
+datum
+fetch(datum key);
+<p>
+int
+store(datum key, datum content);
+<p>
+int
+delete(datum key);
+<p>
+datum
+firstkey(void);
+<p>
+datum
+nextkey(datum key);
+<hr size=1 noshade>
+<h3>Ndbm Functions</h3>
+DBM *
+dbm_open(char *file, int flags, int mode);
+<p>
+void
+dbm_close(DBM *db);
+<p>
+datum
+dbm_fetch(DBM *db, datum key);
+<p>
+int
+dbm_store(DBM *db, datum key, datum content, int flags);
+<p>
+int
+dbm_delete(DBM *db, datum key);
+<p>
+datum
+dbm_firstkey(DBM *db);
+<p>
+datum
+dbm_nextkey(DBM *db);
+<p>
+int
+dbm_error(DBM *db);
+<p>
+int
+dbm_clearerr(DBM *db);
+</pre></h3>
+<h1>Description</h1>
+<p>The dbm interfaces to the Berkeley DB library are intended to provide
+high-performance implementations and source code compatibility for
+applications written to historic interfaces. They are not recommended
+for any other purpose. The historic dbm database format
+<b>is not supported</b>, and databases previously built using the real
+dbm libraries cannot be read by the Berkeley DB functions.
+<p>To compile dbm applications, replace the application's
+<b>#include</b> of the dbm or ndbm include file (e.g.,
+<b>#include &lt;dbm.h&gt;</b> or <b>#include &lt;ndbm.h&gt;</b>)
+with the following two lines:
+<p><blockquote><pre>#define DB_DBM_HSEARCH 1
+#include &lt;db.h&gt;</pre></blockquote>
+<p>and recompile. If the application attempts to load against a dbm library
+(e.g., <b>-ldbm</b>), remove the library from the load line.
+<p><b>Key</b> and <b>content</b> arguments are objects described by the
+<b>datum</b> typedef. A <b>datum</b> specifies a string of
+<b>dsize</b> bytes pointed to by <b>dptr</b>. Arbitrary binary data,
+as well as normal text strings, is allowed.
+<h3>Dbm Functions</h3>
+<p>Before a database can be accessed, it must be opened by dbminit.
+This will open and/or create the database <b>file</b>.db. If created,
+the database file is created read/write by owner only (as described in
+<b>chmod</b>(2) and modified by the process' umask value at the time
+of creation (see <b>umask</b>(2)). The group ownership of created
+files is based on the system and directory defaults, and is not further
+specified by Berkeley DB.
+<p>A database may be closed, and any held resources released, by calling
+dbmclose.
+<p>Once open, the data stored under a key is accessed by fetch and
+data is placed under a key by store. A key (and its associated
+contents) is deleted by delete. A linear pass through all keys
+in a database may be made, in an (apparently) random order, by use of
+firstkey and nextkey. The firstkey function will return
+the first key in the database. The nextkey function will return the next
+key in the database.
+<p>The following code will traverse the data base:
+<p><blockquote><pre>for (key = firstkey();
+ key.dptr != NULL; key = nextkey(key)) {
+ ...
+}</pre></blockquote>
+<h3>Ndbm Functions</h3>
+<p>Before a database can be accessed, it must be opened by dbm_open.
+This will open and/or create the database file <b>file.db</b> depending
+on the flags parameter (see <b>open</b>(2)). If created, the database
+file is created with mode <b>mode</b> (as described in <b>chmod</b>(2)) and modified by the process' umask value at the time of creation (see
+<b>umask</b>(2)). The group ownership of created files is based on
+the system and directory defaults, and is not further specified by
+Berkeley DB.
+<p>Once open, the data stored under a key is accessed by dbm_fetch
+and data is placed under a key by dbm_store. The <b>flags</b>
+field can be either <b>DBM_INSERT</b> or <b>DBM_REPLACE</b>.
+<b>DBM_INSERT</b> will only insert new entries into the database and will
+not change an existing entry with the same key. <b>DBM_REPLACE</b> will
+replace an existing entry if it has the same key. A key (and its
+associated contents) is deleted by dbm_delete. A linear pass
+through all keys in a database may be made, in an (apparently) random
+order, by use of dbm_firstkey and dbm_nextkey. The
+dbm_firstkey function will return the first key in the database. The
+dbm_nextkey function will return the next key in the database.
+<p>The following code will traverse the data base:
+<p><blockquote><pre>for (key = dbm_firstkey(db);
+ key.dptr != NULL; key = dbm_nextkey(db)) {
+ ...
+}</pre></blockquote>
+<h3>Compatibility Notes</h3>
+<p>The historic dbm library created two underlying database files,
+traditionally named <b>file.dir</b> and <b>file.pag</b>. The Berkeley DB
+library creates a single database file named <b>file.db</b>.
+Applications that are aware of the underlying database file names may
+require additional source code modifications.
+<p>The historic dbminit interface required that the underlying
+<b>.dir</b> and <b>.pag</b> files already exist (empty databases were
+created by first manually creating zero-length <b>.dir</b> and
+<b>.pag</b> files). Applications that expect to create databases using
+this method may require additional source code modifications.
+<p>The historic dbm_dirfno and dbm_pagfno macros are
+supported, but will return identical file descriptors as there is only a
+single underlying file used by the Berkeley DB hashing access method.
+Applications using both file descriptors for locking may require
+additional source code modifications.
+<p>If applications using the dbm interface exits without first
+closing the database, it may lose updates because the Berkeley DB library
+buffers writes to underlying databases. Such applications will require
+additional source code modifications to work correctly with the Berkeley DB
+library.
+<h3>Dbm Diagnostics</h3>
+<p>The dbminit function returns -1 on failure, setting <b>errno</b>,
+and 0 on success.
+<p>The fetch function sets the <b>dptr</b> field of the returned
+<b>datum</b> to NULL on failure, setting <b>errno</b>,
+and returns a non-NULL <b>dptr</b> on success.
+<p>The store function returns -1 on failure, setting <b>errno</b>,
+and 0 on success.
+<p>The delete function returns -1 on failure, setting <b>errno</b>,
+and 0 on success.
+<p>The firstkey function sets the <b>dptr</b> field of the returned
+<b>datum</b> to NULL on failure, setting <b>errno</b>,
+and returns a non-NULL <b>dptr</b> on success.
+<p>The nextkey function sets the <b>dptr</b> field of the returned
+<b>datum</b> to NULL on failure, setting <b>errno</b>,
+and returns a non-NULL <b>dptr</b> on success.
+<h1>Errors</h1>
+<p>The dbminit, fetch, store, delete, firstkey and nextkey functions may fail
+and return a non-zero error for errors specified for other Berkeley DB and C
+library or system functions.
+<h3>Ndbm Diagnostics</h3>
+<p>The dbm_close function returns non-zero when an error has occurred reading or
+writing the database.
+<p>The dbm_close function resets the error condition on the named database.
+<p>The dbm_open function returns NULL on failure, setting <b>errno</b>,
+and 0 on success.
+<p>The dbm_fetch function sets the <b>dptr</b> field of the returned
+<b>datum</b> to NULL on failure, setting <b>errno</b>,
+and returns a non-NULL <b>dptr</b> on success.
+<p>The dbm_store function returns -1 on failure, setting <b>errno</b>,
+0 on success, and 1 if DBM_INSERT was set and the specified key already
+existed in the database.
+<p>The dbm_delete function returns -1 on failure, setting <b>errno</b>,
+and 0 on success.
+<p>The dbm_firstkey function sets the <b>dptr</b> field of the returned
+<b>datum</b> to NULL on failure, setting <b>errno</b>,
+and returns a non-NULL <b>dptr</b> on success.
+<p>The dbm_nextkey function sets the <b>dptr</b> field of the returned
+<b>datum</b> to NULL on failure, setting <b>errno</b>,
+and returns a non-NULL <b>dptr</b> on success.
+<p>The dbm_close function returns -1 on failure, setting <b>errno</b>,
+and 0 on success.
+<p>The dbm_close function returns -1 on failure, setting <b>errno</b>,
+and 0 on success.
+<h1>Errors</h1>
+<p>The dbm_open, dbm_close, dbm_fetch, dbm_store, dbm_delete, dbm_firstkey
+and dbm_nextkey functions may fail and return a non-zero error for errors
+specified for other Berkeley DB and C library or system functions.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/dbt.html b/bdb/docs/api_c/dbt.html
new file mode 100644
index 00000000000..a0c3e76db8d
--- /dev/null
+++ b/bdb/docs/api_c/dbt.html
@@ -0,0 +1,158 @@
+<!--$Id: dbt.so,v 10.37 2000/12/18 21:05:12 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DBT</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<tt>
+ <a name="2"><!--meow--></a>
+<h3>Key/Data Pairs</h3>
+<p>Storage and retrieval for the Berkeley DB access methods are based on key/data
+pairs. Both key and data items are represented by the DBT data structure.
+(The name DBT is a mnemonic for <i>data base thang</i>, and was used
+because no one could think of a reasonable name that wasn't already in
+use somewhere else.) Key and data byte strings may reference strings of
+zero length up to strings of essentially unlimited length. See
+<a href="../ref/program/dbsizes.html">Database limits</a> for more
+information.
+<p><blockquote><pre>typedef struct {
+ void *data;
+ u_int32_t size;
+ u_int32_t ulen;
+ u_int32_t dlen;
+ u_int32_t doff;
+ u_int32_t flags;
+} DBT;</pre></blockquote>
+<p>In order to ensure compatibility with future releases of Berkeley DB, all fields
+of the DBT structure that are not explicitly set should be initialized to
+0 before the first time the structure is used. Do this by declaring the
+structure external or static, or by calling the C library routine
+<b>bzero</b>(3) or <b>memset</b>(3).
+<p>By default, the <b>flags</b> structure element is expected to be 0. In
+this default case, when the application is providing Berkeley DB a key or data
+item to store into the database, Berkeley DB expects the <b>data</b> structure
+element to point to a byte string of <b>size</b> bytes. When returning
+a key/data item to the application, Berkeley DB will store into the <b>data</b>
+structure element a pointer to a byte string of <b>size</b> bytes, and
+the memory referenced by the pointer will be allocated and managed by Berkeley DB.
+<p>The elements of the DBT structure are defined as follows:
+<p><dl compact>
+<p><dt>void *<a name="data">data</a>;<dd>A pointer to a byte string.
+<p><dt>u_int32_t <a name="size">size</a>;<dd>The length of <b>data</b>, in bytes.
+<p><dt>u_int32_t <a name="ulen">ulen</a>;<dd>The size of the user's buffer (referenced by <b>data</b>), in bytes.
+This location is not written by the Berkeley DB functions.
+<p>Note that applications can determine the length of a record by setting
+the <b>ulen</b> field to 0 and checking the return value in the
+<b>size</b> field. See the DB_DBT_USERMEM flag for more information.
+<p><dt>u_int32_t <a name="dlen">dlen</a>;<dd>The length of the partial record being read or written by the application,
+in bytes. See the DB_DBT_PARTIAL flag for more information.
+<p><dt>u_int32_t <a name="doff">doff</a>;<dd>The offset of the partial record being read or written by the application,
+in bytes. See the DB_DBT_PARTIAL flag for more information.
+<p><dt>u_int32_t flags;<dd>
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or more
+of the following values.
+<p><dl compact>
+<p><dt><a name="DB_DBT_MALLOC">DB_DBT_MALLOC</a><dd>When this flag is set Berkeley DB will allocate memory for the returned key
+or data item (using <b>malloc</b>(3), or the user-specified malloc
+function) and return a pointer to it in the <b>data</b> field of the
+key or data DBT structure. As any allocated memory becomes the
+responsibility of the calling application, the caller must be able to
+determine if memory was allocated.
+<p>It is an error to specify more than one of DB_DBT_MALLOC,
+DB_DBT_REALLOC and DB_DBT_USERMEM.
+<p><dt><a name="DB_DBT_REALLOC">DB_DBT_REALLOC</a><dd>When this flag is set Berkeley DB will allocate memory for the returned key
+or data item (using <b>realloc</b>(3), or the user-specified realloc
+function) and return a pointer to it in the <b>data</b> field of the
+key or data DBT structure. As any allocated memory becomes the
+responsibility of the calling application, the caller must be able to
+determine if memory was allocated.
+<p>The difference between DB_DBT_MALLOC and DB_DBT_REALLOC
+is that the latter will call <b>realloc</b>(3) instead of
+<b>malloc</b>(3), so the allocated memory will be grown as necessary
+instead of the application doing repeated free/malloc calls.
+<p>It is an error to specify more than one of DB_DBT_MALLOC,
+DB_DBT_REALLOC and DB_DBT_USERMEM.
+<p><dt><a name="DB_DBT_USERMEM">DB_DBT_USERMEM</a><dd>The <b>data</b> field of the key or data structure must reference
+memory that is at least <b>ulen</b> bytes in length. If the length of
+the requested item is less than or equal to that number of bytes, the
+item is copied into the memory referenced by the <b>data</b> field.
+Otherwise, the <b>size</b> field is set to the length needed for the
+requested item, and the error ENOMEM is returned.
+<p>It is an error to specify more than one of DB_DBT_MALLOC,
+DB_DBT_REALLOC and DB_DBT_USERMEM.
+<p><dt><a name="DB_DBT_PARTIAL">DB_DBT_PARTIAL</a><dd>Do partial retrieval or storage of an item. If the calling application
+is doing a get, the <b>dlen</b> bytes starting <b>doff</b> bytes from
+the beginning of the retrieved data record are returned as if they
+comprised the entire record. If any or all of the specified bytes do
+not exist in the record, the get is successful and the existing bytes
+or nul bytes are returned.
+<p>For example, if the data portion of a retrieved record was 100 bytes,
+and a partial retrieval was done using a DBT having a <b>dlen</b>
+field of 20 and a <b>doff</b> field of 85, the get call would succeed,
+the <b>data</b> field would reference the last 15 bytes of the record,
+and the <b>size</b> field would be set to 15.
+<p>If the calling application is doing a put, the <b>dlen</b> bytes
+starting <b>doff</b> bytes from the beginning of the specified key's
+data record are replaced by the data specified by the <b>data</b>
+and <b>size</b> structure elements.
+If <b>dlen</b> is smaller than <b>size</b>, the record will grow,
+and if <b>dlen</b> is larger than <b>size</b>, the record will shrink.
+If the specified bytes do not exist, the record will be extended using
+nul bytes as necessary, and the put call will succeed.
+<p>It is an error to attempt a partial put using the <a href="../api_c/db_put.html">DB-&gt;put</a> function
+in a database that supports duplicate records.
+Partial puts in databases supporting duplicate records must be done
+using a <a href="../api_c/dbc_put.html">DBcursor-&gt;c_put</a> function.
+<p>It is an error to attempt a partial put with differing <b>dlen</b> and
+<b>size</b> values in Queue or Recno databases with fixed-length records.
+<p>For example, if the data portion of a retrieved record was 100 bytes,
+and a partial put was done using a DBT having a <b>dlen</b> field of 20,
+a <b>doff</b> field of 85, and a <b>size</b> field of 30, the resulting
+record would be 115 bytes in length, where the last 30 bytes would be
+those specified by the put call.
+</dl>
+</dl>
+ <a name="3"><!--meow--></a> <a name="4"><!--meow--></a>
+<h3>Retrieved key/data permanence</h3>
+<p>When using the non-cursor Berkeley DB calls to retrieve key/data items (e.g.,
+<a href="../api_c/db_get.html">DB-&gt;get</a>), the memory referenced by the pointer stored into the
+Dbt is only valid until the next call to Berkeley DB using the Db
+handle returned by <a href="../api_c/db_open.html">DB-&gt;open</a>. (This includes <b>any</b> use of
+the returned Db handle, including by another thread of control
+within the process. For this reason, when multiple threads are using the
+returned Db handle concurrently, one of the DB_DBT_MALLOC,
+DB_DBT_REALLOC or DB_DBT_USERMEM flags must be specified
+with any non-cursor Dbt used for key or data retrieval.)
+<p>When using the cursor Berkeley DB calls to retrieve key/data items (e.g.,
+<a href="../api_c/dbc_get.html">DBcursor-&gt;c_get</a>), the memory referenced by the pointer into the
+Dbt is only valid until the next call to Berkeley DB using the
+DBC handle returned by <a href="../api_c/db_cursor.html">DB-&gt;cursor</a>.
+ <a name="5"><!--meow--></a>
+<h3>Data alignment</h3>
+<p>The Berkeley DB access methods provide no guarantees about key/data byte string
+alignment, and applications are responsible for arranging any necessary
+alignment. The DB_DBT_MALLOC, DB_DBT_REALLOC and
+DB_DBT_USERMEM flags may be used to store returned items in memory
+of arbitrary alignment.
+ <a name="6"><!--meow--></a>
+<h3>Logical Record Numbers</h3>
+<p>In all cases for the Queue and Recno access methods, and when calling the
+<a href="../api_c/db_get.html">DB-&gt;get</a> and <a href="../api_c/dbc_get.html">DBcursor-&gt;c_get</a> functions with the
+<a href="../api_c/db_get.html#DB_SET_RECNO">DB_SET_RECNO</a> flag specified, the <b>data</b> field of the key
+must be a pointer to a memory location of type <b>db_recno_t</b>, as
+typedef'd in the #include &lt;db.h&gt; include file. This type is a 32-bit
+unsigned type, (which limits the number of logical records in a Queue or
+Recno database, and the maximum logical record which may be directly
+retrieved from a Btree database, to 4,294,967,296). The <b>size</b>
+field of the key should be the size of that type, i.e., in the C
+programming language, <b>sizeof(db_recno_t)</b>.
+<p>Logical record numbers are 1-based, not 0-based, i.e., the first record
+in the database is record number 1.
+</tt>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/env_close.html b/bdb/docs/api_c/env_close.html
new file mode 100644
index 00000000000..fdb11e2e18c
--- /dev/null
+++ b/bdb/docs/api_c/env_close.html
@@ -0,0 +1,84 @@
+<!--$Id: env_close.so,v 10.21 2000/03/01 21:41:29 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DBENV-&gt;close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DBENV-&gt;close</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DBENV-&gt;close(DB_ENV *dbenv, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DBENV-&gt;close function closes the Berkeley DB environment, freeing any
+allocated resources and closing any underlying subsystems.
+<p>Calling DBENV-&gt;close does not imply closing any databases that were
+opened in the environment.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>Where the environment was initialized with the <a href="../api_c/env_open.html#DB_INIT_LOCK">DB_INIT_LOCK</a> flag,
+calling DBENV-&gt;close does not release any locks still held by the
+closing process, providing functionality for long-lived locks.
+Processes that wish to have all their locks
+released can do so by issuing the appropriate <a href="../api_c/lock_vec.html">lock_vec</a> call.
+<p>Where the environment was initialized with the <a href="../api_c/env_open.html#DB_INIT_MPOOL">DB_INIT_MPOOL</a>
+flag, calling DBENV-&gt;close implies calls to <a href="../api_c/memp_fclose.html">memp_fclose</a> for
+any remaining open files in the memory pool that were returned to this
+process by calls to <a href="../api_c/memp_fopen.html">memp_fopen</a>. It does not imply a call to
+<a href="../api_c/memp_fsync.html">memp_fsync</a> for those files.
+<p>Where the environment was initialized with the <a href="../api_c/env_open.html#DB_INIT_TXN">DB_INIT_TXN</a> flag,
+calling DBENV-&gt;close aborts any uncommitted transactions.
+(Applications are should not depend on this behavior. If the process' has
+already closed a database handle which is necessary to abort an
+uncommitted transaction, the Berkeley DB environment must then require that
+recovery be run before further operations are done, since once a
+transaction exists that cannot be committed or aborted, no future
+checkpoint can ever succeed.)
+<p>In multi-threaded applications, only a single thread may call
+DBENV-&gt;close.
+<p>Once DBENV-&gt;close has been called, regardless of its return,
+the Berkeley DB environment handle may not be accessed again.
+<p>The DBENV-&gt;close function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DBENV-&gt;close function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DBENV-&gt;close function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_close.html">DBENV-&gt;close</a>,
+<a href="../api_c/env_create.html">db_env_create</a>,
+<a href="../api_c/env_open.html">DBENV-&gt;open</a>,
+<a href="../api_c/env_remove.html">DBENV-&gt;remove</a>,
+<a href="../api_c/db_err.html">DBENV-&gt;err</a>,
+<a href="../api_c/env_strerror.html">db_strerror</a>,
+<a href="../api_c/env_version.html">db_version</a>,
+<a href="../api_c/env_set_cachesize.html">DBENV-&gt;set_cachesize</a>,
+<a href="../api_c/env_set_errcall.html">DBENV-&gt;set_errcall</a>,
+<a href="../api_c/env_set_errfile.html">DBENV-&gt;set_errfile</a>,
+<a href="../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a>,
+<a href="../api_c/env_set_flags.html">DBENV-&gt;set_flags</a>,
+<a href="../api_c/env_set_mutexlocks.html">DBENV-&gt;set_mutexlocks</a>,
+<a href="../api_c/env_set_paniccall.html">DBENV-&gt;set_paniccall</a>,
+and
+<a href="../api_c/env_set_verbose.html">DBENV-&gt;set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/env_create.html b/bdb/docs/api_c/env_create.html
new file mode 100644
index 00000000000..26ffb204ef2
--- /dev/null
+++ b/bdb/docs/api_c/env_create.html
@@ -0,0 +1,74 @@
+<!--$Id: env_create.so,v 10.12 2000/10/25 18:51:08 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_create</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>db_env_create</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_create(DB_ENV **dbenvp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The db_env_create function creates a DB_ENV structure which is
+the handle for a Berkeley DB environment. A pointer to this structure is
+returned in the memory referenced by <b>dbenvp</b>.
+<p>The <b>flags</b> parameter must be set to 0 or the following value:
+<p><dl compact>
+<p><dt><a name="DB_CLIENT">DB_CLIENT</a><dd>Create a client environment to connect to a server.
+<p>The DB_CLIENT flag indicates to the system that this environment
+is remote on a server. The use of this flag causes the environment
+methods to use functions that call a server instead of local functions.
+Prior to making any environment or database method calls, the application
+must call the <a href="../api_c/env_set_server.html">DBENV-&gt;set_server</a> function to establish the
+connection to the server.
+</dl>
+<p>The DB_ENV handle contains a special field, "app_private", which
+is declared as type "void *". This field is provided for the use of
+the application program. It is initialized to NULL and is not further
+used by Berkeley DB in any way.
+<p>The db_env_create function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The db_env_create function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the db_env_create function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_close.html">DBENV-&gt;close</a>,
+<a href="../api_c/env_create.html">db_env_create</a>,
+<a href="../api_c/env_open.html">DBENV-&gt;open</a>,
+<a href="../api_c/env_remove.html">DBENV-&gt;remove</a>,
+<a href="../api_c/db_err.html">DBENV-&gt;err</a>,
+<a href="../api_c/env_strerror.html">db_strerror</a>,
+<a href="../api_c/env_version.html">db_version</a>,
+<a href="../api_c/env_set_cachesize.html">DBENV-&gt;set_cachesize</a>,
+<a href="../api_c/env_set_errcall.html">DBENV-&gt;set_errcall</a>,
+<a href="../api_c/env_set_errfile.html">DBENV-&gt;set_errfile</a>,
+<a href="../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a>,
+<a href="../api_c/env_set_flags.html">DBENV-&gt;set_flags</a>,
+<a href="../api_c/env_set_mutexlocks.html">DBENV-&gt;set_mutexlocks</a>,
+<a href="../api_c/env_set_paniccall.html">DBENV-&gt;set_paniccall</a>,
+and
+<a href="../api_c/env_set_verbose.html">DBENV-&gt;set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/env_open.html b/bdb/docs/api_c/env_open.html
new file mode 100644
index 00000000000..677f40c1591
--- /dev/null
+++ b/bdb/docs/api_c/env_open.html
@@ -0,0 +1,205 @@
+<!--$Id: env_open.so,v 10.61 2000/12/01 15:50:31 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DBENV-&gt;open</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DBENV-&gt;open</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DBENV-&gt;open(DB_ENV *, char *db_home, u_int32_t flags, int mode);
+</pre></h3>
+<h1>Description</h1>
+<p>The DBENV-&gt;open function is the interface for opening the Berkeley DB
+environment. It provides a structure for creating a consistent
+environment for processes using one or more of the features of Berkeley DB.
+<p>The <b>db_home</b> argument to DBENV-&gt;open (and file name
+resolution in general) is described in
+<a href="../ref/env/naming.html">Berkeley DB File Naming</a>.
+<p>The <b>flags</b> argument specifies the subsystems that are initialized
+and how the application's environment affects Berkeley DB file naming, among
+other things.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or more
+of the following values.
+<p>As there are a large number of flags that can be specified, they have been
+grouped together by functionality. The first group of flags indicate
+which of the Berkeley DB subsystems should be initialized:
+<p><dl compact>
+<p><dt><a name="DB_JOINENV">DB_JOINENV</a><dd>Join an existing environment. This option allows applications to
+join an existing environment without knowing which Berkeley DB subsystems
+the environment supports.
+<p><dt><a name="DB_INIT_CDB">DB_INIT_CDB</a><dd>Initialize locking for the <a href="../ref/cam/intro.html">Berkeley DB Concurrent Data Store</a>
+product. In this mode, Berkeley DB provides multiple reader/single writer
+access. The only other subsystem that should be specified with the
+DB_INIT_CDB flag is DB_INIT_MPOOL.
+<p>Access method calls are largely unchanged when using this flag, although
+any cursors through which update operations (e.g., <a href="../api_c/dbc_put.html">DBcursor-&gt;c_put</a>,
+<a href="../api_c/dbc_del.html">DBcursor-&gt;c_del</a>) will be made must have the <a href="../api_c/db_cursor.html#DB_WRITECURSOR">DB_WRITECURSOR</a> value
+set in the flags parameter to the cursor call that creates the cursor.
+See <a href="../api_c/db_cursor.html">DB-&gt;cursor</a> for more information.
+<p><dt><a name="DB_INIT_LOCK">DB_INIT_LOCK</a><dd>Initialize the locking subsystem. This subsystem should be used when
+multiple processes or threads are going to be reading and writing a Berkeley DB
+database, so that they do not interfere with each other. If all threads
+are accessing the database(s) read-only, then locking is unnecessary.
+When the DB_INIT_LOCK flag is specified, it is usually necessary to run
+the deadlock detector, as well. See <a href="../utility/db_deadlock.html">db_deadlock</a> and
+<a href="../api_c/lock_detect.html">lock_detect</a> for more information.
+<p><dt><a name="DB_INIT_LOG">DB_INIT_LOG</a><dd>Initialize the logging subsystem. This subsystem is used when recovery
+from application or system failure is necessary.
+<p>The log is stored in one or more files in the environment directory.
+Each file is named using the format <i>log.NNNNNNNNNN</i>, where
+<i>NNNNNNNNNN</i> is the sequence number of the file within the log.
+For further information, see
+<a href="../ref/log/limits.html">Log File Limits</a>.
+<p>If the log region is being created and log files are already present, the
+log files are reviewed and subsequent log writes are appended
+to the end of the log, rather than overwriting current log entries.
+<p><dt><a name="DB_INIT_MPOOL">DB_INIT_MPOOL</a><dd>Initialize the shared memory buffer pool subsystem. This subsystem is
+used whenever the application is using any Berkeley DB access method.
+<p><dt><a name="DB_INIT_TXN">DB_INIT_TXN</a><dd>Initialize the transaction subsystem. This subsystem is used when
+recovery and atomicity of multiple operations and recovery are important.
+The DB_INIT_TXN flag implies the DB_INIT_LOG flag.
+</dl>
+<p>The second group of flags govern what recovery, if any, is performed when
+the environment is initialized:
+<p><dl compact>
+<p><dt><a name="DB_RECOVER">DB_RECOVER</a><dd>Run normal recovery on this environment before opening it for normal use.
+If this flag is set, the DB_CREATE flag must also be set since the regions
+will be removed and recreated.
+<p><dt><a name="DB_RECOVER_FATAL">DB_RECOVER_FATAL</a><dd>Run catastrophic recovery on this environment before opening it for normal
+use. If this flag is set, the DB_CREATE flag must also be set since the
+regions will be removed and recreated.
+</dl>
+<p>A standard part of the recovery process is to remove the existing Berkeley DB
+environment and create a new one in which to perform recovery. If the
+thread of control performing recovery does not specify the correct region
+initialization information (e.g., the correct memory pool cache size),
+the result can be an application running in an environment with incorrect
+cache and other subsystem sizes. For this reason, the thread of control
+performing recovery should either specify correct configuration
+information before calling the DBENV-&gt;open function, or it should remove
+the environment after recovery is completed, leaving creation of the
+correctly sized environment to a subsequent call to DBENV-&gt;open.
+<p>All Berkeley DB recovery processing must be single-threaded, that is, only a
+single thread of control may perform recovery or access a Berkeley DB
+environment while recovery is being performed. As it is not an error to
+specify DB_RECOVER for an environment for which no recovery is
+required, it is reasonable programming practice for the thread of control
+responsible for performing recovery and creating the environment to always
+specify the DB_RECOVER flag during startup.
+<p>The DBENV-&gt;open function returns successfully if DB_RECOVER
+or DB_RECOVER_FATAL is specified and no log files exist, so it is
+necessary to ensure all necessary log files are present before running
+recovery. For further information, consult <a href="../utility/db_archive.html">db_archive</a> and
+<a href="../utility/db_recover.html">db_recover</a>.
+<p>The third group of flags govern file naming extensions in the environment:
+<p><dl compact>
+<!--$Id: m4.env_flags,v 10.9 2000/06/29 22:54:10 bostic Exp $-->
+<p><dt><a name="DB_USE_ENVIRON">DB_USE_ENVIRON</a><dd>The Berkeley DB process' environment may be permitted to specify information to
+be used when naming files; see <a href="../ref/env/naming.html">Berkeley DB
+File Naming</a>. As permitting users to specify which files are used can
+create security problems, environment information will be used in file
+naming for all users only if the DB_USE_ENVIRON flag is set.
+<p><dt><a name="DB_USE_ENVIRON_ROOT">DB_USE_ENVIRON_ROOT</a><dd>The Berkeley DB process' environment may be permitted to specify information to
+be used when naming files; see <a href="../ref/env/naming.html">Berkeley DB
+File Naming</a>. As permitting users to specify which files are used can
+create security problems, if the DB_USE_ENVIRON_ROOT flag is set,
+environment information will be used for file naming only for users with
+appropriate permissions (e.g., on UNIX systems, users with a user-ID of 0).
+</dl>
+<p>Finally, there are a few additional, unrelated flags:
+<p><dl compact>
+<p><dt><a name="DB_CREATE">DB_CREATE</a><dd>Cause Berkeley DB subsystems to create any underlying files, as necessary.
+<p><dt><a name="DB_LOCKDOWN">DB_LOCKDOWN</a><dd>Lock shared Berkeley DB environment files and memory mapped databases into memory.
+<p><dt><a name="DB_PRIVATE">DB_PRIVATE</a><dd>Specify that the environment will only be accessed by a single process
+(although that process may be multi-threaded). This flag has two effects
+on the Berkeley DB environment. First, all underlying data structures are
+allocated from per-process memory instead of from shared memory that is
+potentially accessible to more than a single process. Second, mutexes
+are only configured to work between threads.
+<p>This flag should not be specified if more than a single process is
+accessing the environment, as it is likely to cause database corruption
+and unpredictable behavior, e.g., if both a server application and the
+Berkeley DB utility <a href="../utility/db_stat.html">db_stat</a> will access the environment, the
+DB_PRIVATE flag should not be specified.
+<p><dt><a name="DB_SYSTEM_MEM">DB_SYSTEM_MEM</a><dd>Allocate memory from system shared memory instead of from memory backed
+by the filesystem. See <a href="../ref/env/region.html">Shared Memory
+Regions</a> for more information.
+<p><dt><a name="DB_THREAD">DB_THREAD</a><dd>Cause the DB_ENV handle returned by DBENV-&gt;open to be
+<i>free-threaded</i>, that is, useable by multiple threads within a
+single address space.
+</dl>
+<p>On UNIX systems, or in IEEE/ANSI Std 1003.1 (POSIX) environments, all files created by Berkeley DB
+are created with mode <b>mode</b> (as described in <b>chmod</b>(2)) and
+modified by the process' umask value at the time of creation (see
+<b>umask</b>(2)). The group ownership of created files is based on
+the system and directory defaults, and is not further specified by Berkeley DB.
+If <b>mode</b> is 0, files are created readable and writeable by both
+owner and group. On Windows systems, the mode argument is ignored.
+<p>The DBENV-&gt;open function returns a non-zero error value on failure and 0 on success.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>The environment variable <b>DB_HOME</b> may be used as the path of
+the database home as described in
+<a href="../ref/env/naming.html">Berkeley DB File Naming</a>.
+</dl>
+<h1>Errors</h1>
+<p>The DBENV-&gt;open function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EAGAIN<dd>The shared memory region was locked and (repeatedly) unavailable.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>
+The DB_THREAD flag was specified and spinlocks are not
+implemented for this architecture.
+<p>The DB_HOME or TMPDIR environment variables were set but empty.
+<p>An incorrectly formatted <b>NAME VALUE</b> entry or line was found.
+</dl>
+<p><dl compact>
+<p><dt>ENOSPC<dd>HP-UX only: a previously created Berkeley DB environment for this process still
+exists.
+</dl>
+<p>The DBENV-&gt;open function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DBENV-&gt;open function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_close.html">DBENV-&gt;close</a>,
+<a href="../api_c/env_create.html">db_env_create</a>,
+<a href="../api_c/env_open.html">DBENV-&gt;open</a>,
+<a href="../api_c/env_remove.html">DBENV-&gt;remove</a>,
+<a href="../api_c/db_err.html">DBENV-&gt;err</a>,
+<a href="../api_c/env_strerror.html">db_strerror</a>,
+<a href="../api_c/env_version.html">db_version</a>,
+<a href="../api_c/env_set_cachesize.html">DBENV-&gt;set_cachesize</a>,
+<a href="../api_c/env_set_errcall.html">DBENV-&gt;set_errcall</a>,
+<a href="../api_c/env_set_errfile.html">DBENV-&gt;set_errfile</a>,
+<a href="../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a>,
+<a href="../api_c/env_set_flags.html">DBENV-&gt;set_flags</a>,
+<a href="../api_c/env_set_mutexlocks.html">DBENV-&gt;set_mutexlocks</a>,
+<a href="../api_c/env_set_paniccall.html">DBENV-&gt;set_paniccall</a>,
+and
+<a href="../api_c/env_set_verbose.html">DBENV-&gt;set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/env_remove.html b/bdb/docs/api_c/env_remove.html
new file mode 100644
index 00000000000..2d7279d79ab
--- /dev/null
+++ b/bdb/docs/api_c/env_remove.html
@@ -0,0 +1,125 @@
+<!--$Id: env_remove.so,v 10.24 2000/12/06 14:40:11 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DBENV-&gt;remove</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DBENV-&gt;remove</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DBENV-&gt;remove(DB_ENV *, char *db_home, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DBENV-&gt;remove function destroys a Berkeley DB environment, if it is not
+currently in use. The environment regions, including any backing files,
+are removed. Any log or database files and the environment directory are
+not removed.
+<p>The <b>db_home</b> argument to DBENV-&gt;remove is described in
+<a href="../ref/env/naming.html">Berkeley DB File Naming</a>.
+<p>If there are processes that have called <a href="../api_c/env_open.html">DBENV-&gt;open</a> without
+calling <a href="../api_c/env_close.html">DBENV-&gt;close</a> (i.e., there are processes currently using
+the environment), DBENV-&gt;remove will fail without further action,
+unless the <a href="../api_c/env_remove.html#DB_FORCE">DB_FORCE</a> flag is set, in which case
+DBENV-&gt;remove will attempt to remove the environment regardless
+of any processes still using it.
+<p>The result of attempting to forcibly destroy the environment when it is
+in use is unspecified. Processes using an environment often maintain open
+file descriptors for shared regions within it. On UNIX systems, the
+environment removal will usually succeed and processes that have already
+joined the region will continue to run in that region without change,
+however processes attempting to join the environment will either fail or
+create new regions. On other systems (e.g., Windows/NT), where the
+<b>unlink</b>(2) system call will fail if any process has an open
+file descriptor for the file, the region removal will fail.
+<p>Calling DBENV-&gt;remove should not be necessary for most applications,
+as the Berkeley DB environment is cleaned up as part of normal database recovery
+procedures, however, applications may wish to call DBENV-&gt;remove
+as part of application shutdown to free up system resources.
+Specifically, when the <a href="../api_c/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a> flag was specified to
+<a href="../api_c/env_open.html">DBENV-&gt;open</a>, it may be useful to call DBENV-&gt;remove in order
+to release system shared memory segments that have been allocated.
+<p>In the case of catastrophic or system failure, database recovery must be
+performed (see <a href="../utility/db_recover.html">db_recover</a>), or the <a href="../api_c/env_open.html#DB_RECOVER">DB_RECOVER</a> and
+<a href="../api_c/env_open.html#DB_RECOVER_FATAL">DB_RECOVER_FATAL</a> flags to <a href="../api_c/env_open.html">DBENV-&gt;open</a> must be specified
+when the environment is re-opened. Alternatively, if recovery is not
+required because no database state is maintained across failures, and
+the <a href="../api_c/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a> flag was not specified when the environment
+was created, it is possible to clean up an environment by removing all
+of the files in the environment directory that begin with the string
+prefix "__db", as no backing files are created in any other directory.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or more
+of the following values.
+<p><dl compact>
+<p><dt><a name="DB_FORCE">DB_FORCE</a><dd>If the <a href="../api_c/env_remove.html#DB_FORCE">DB_FORCE</a> flag is set, the environment is removed regardless
+of any processes that may still using it, and, no locks are acquired
+during this process. (Generally, the <a href="../api_c/env_remove.html#DB_FORCE">DB_FORCE</a> flag is only
+specified when applications were unable to shut down cleanly, and there
+is a risk that an application may have died holding a Berkeley DB lock.)
+<!--$Id: m4.env_flags,v 10.9 2000/06/29 22:54:10 bostic Exp $-->
+<p><dt><a name="DB_USE_ENVIRON">DB_USE_ENVIRON</a><dd>The Berkeley DB process' environment may be permitted to specify information to
+be used when naming files; see <a href="../ref/env/naming.html">Berkeley DB
+File Naming</a>. As permitting users to specify which files are used can
+create security problems, environment information will be used in file
+naming for all users only if the DB_USE_ENVIRON flag is set.
+<p><dt><a name="DB_USE_ENVIRON_ROOT">DB_USE_ENVIRON_ROOT</a><dd>The Berkeley DB process' environment may be permitted to specify information to
+be used when naming files; see <a href="../ref/env/naming.html">Berkeley DB
+File Naming</a>. As permitting users to specify which files are used can
+create security problems, if the DB_USE_ENVIRON_ROOT flag is set,
+environment information will be used for file naming only for users with
+appropriate permissions (e.g., on UNIX systems, users with a user-ID of 0).
+</dl>
+<p>In multi-threaded applications, only a single thread may call
+DBENV-&gt;remove.
+<p>A DB_ENV handle which has already been used to open an
+environment should not be used to call the DBENV-&gt;remove function, a new
+DB_ENV handle should be created for that purpose.
+<p>Once DBENV-&gt;remove has been called, regardless of its return,
+the Berkeley DB environment handle may not be accessed again.
+<p>The DBENV-&gt;remove function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EBUSY<dd>The shared memory region was in use and the force flag was not set.
+</dl>
+<p>The DBENV-&gt;remove function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DBENV-&gt;remove function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_close.html">DBENV-&gt;close</a>,
+<a href="../api_c/env_create.html">db_env_create</a>,
+<a href="../api_c/env_open.html">DBENV-&gt;open</a>,
+<a href="../api_c/env_remove.html">DBENV-&gt;remove</a>,
+<a href="../api_c/db_err.html">DBENV-&gt;err</a>,
+<a href="../api_c/env_strerror.html">db_strerror</a>,
+<a href="../api_c/env_version.html">db_version</a>,
+<a href="../api_c/env_set_cachesize.html">DBENV-&gt;set_cachesize</a>,
+<a href="../api_c/env_set_errcall.html">DBENV-&gt;set_errcall</a>,
+<a href="../api_c/env_set_errfile.html">DBENV-&gt;set_errfile</a>,
+<a href="../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a>,
+<a href="../api_c/env_set_flags.html">DBENV-&gt;set_flags</a>,
+<a href="../api_c/env_set_mutexlocks.html">DBENV-&gt;set_mutexlocks</a>,
+<a href="../api_c/env_set_paniccall.html">DBENV-&gt;set_paniccall</a>,
+and
+<a href="../api_c/env_set_verbose.html">DBENV-&gt;set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/env_set_cachesize.html b/bdb/docs/api_c/env_set_cachesize.html
new file mode 100644
index 00000000000..ba7980bb77a
--- /dev/null
+++ b/bdb/docs/api_c/env_set_cachesize.html
@@ -0,0 +1,87 @@
+<!--$Id: env_set_cachesize.so,v 10.19 2000/05/20 16:29:11 bostic Exp $-->
+<!--$Id: m4.cachesize,v 10.7 2000/02/11 18:54:45 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DBENV-&gt;set_cachesize</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DBENV-&gt;set_cachesize</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DBENV-&gt;set_cachesize(DB_ENV *dbenv,
+ u_int32_t gbytes, u_int32_t bytes, int ncache);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the size of the database's shared memory buffer pool, i.e., the cache,
+to <b>gbytes</b> gigabytes plus <b>bytes</b>. The cache should be the
+size of the normal working data set of the application, with some small
+amount of additional memory for unusual situations. (Note, the working
+set is not the same as the number of simultaneously referenced pages, and
+should be quite a bit larger!)
+<p>The default cache size is 256KB, and may not be specified as less than
+20KB. Any cache size less than 500MB is automatically increased by 25%
+to account for buffer pool overhead, cache sizes larger than 500MB are
+used as specified. For information on tuning the Berkeley DB cache size, see
+<a href="../ref/am_conf/cachesize.html">Selecting a cache size</a>.
+<p>It is possible to specify caches to Berkeley DB that are large enough so that
+they cannot be allocated contiguously on some architectures, e.g., some
+releases of Solaris limit the amount of memory that may be allocated
+contiguously by a process. If <b>ncache</b> is 0 or 1, the cache will
+be allocated contiguously in memory. If it is greater than 1, the cache
+will be broken up into <b>ncache</b> equally sized separate pieces of
+memory.
+<p>The DBENV-&gt;set_cachesize interface may only be used to configure Berkeley DB before
+the <a href="../api_c/env_open.html">DBENV-&gt;open</a> interface is called.
+<p>The DBENV-&gt;set_cachesize function returns a non-zero error value on failure and 0 on success.
+<p>The database environment's cache size may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_cachesize", one or more whitespace characters,
+and the three arguments specified to this interface, separated by whitespace
+characters, for example, "set_cachesize 1 500 2". Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/env_open.html">DBENV-&gt;open</a> was called.
+<p>The specified cache size was impossibly small.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/env_close.html">DBENV-&gt;close</a>,
+<a href="../api_c/env_create.html">db_env_create</a>,
+<a href="../api_c/env_open.html">DBENV-&gt;open</a>,
+<a href="../api_c/env_remove.html">DBENV-&gt;remove</a>,
+<a href="../api_c/db_err.html">DBENV-&gt;err</a>,
+<a href="../api_c/env_strerror.html">db_strerror</a>,
+<a href="../api_c/env_version.html">db_version</a>,
+<a href="../api_c/env_set_cachesize.html">DBENV-&gt;set_cachesize</a>,
+<a href="../api_c/env_set_errcall.html">DBENV-&gt;set_errcall</a>,
+<a href="../api_c/env_set_errfile.html">DBENV-&gt;set_errfile</a>,
+<a href="../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a>,
+<a href="../api_c/env_set_flags.html">DBENV-&gt;set_flags</a>,
+<a href="../api_c/env_set_mutexlocks.html">DBENV-&gt;set_mutexlocks</a>,
+<a href="../api_c/env_set_paniccall.html">DBENV-&gt;set_paniccall</a>,
+and
+<a href="../api_c/env_set_verbose.html">DBENV-&gt;set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/env_set_data_dir.html b/bdb/docs/api_c/env_set_data_dir.html
new file mode 100644
index 00000000000..68db6dc4725
--- /dev/null
+++ b/bdb/docs/api_c/env_set_data_dir.html
@@ -0,0 +1,77 @@
+<!--$Id: env_set_data_dir.so,v 10.3 2000/05/20 16:29:11 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DBENV-&gt;set_data_dir</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DBENV-&gt;set_data_dir</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DBENV-&gt;set_data_dir(DB_ENV *dbenv, const char *dir);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the path of a directory to be used as the location of the access
+method database files. Paths specified to the <a href="../api_c/db_open.html">DB-&gt;open</a> function
+will be searched relative to this path. Paths set using this interface
+are additive, and specifying more than one will result in each specified
+directory being searched for database files. If any directories are
+specified, created database files will always be created in the first path
+specified.
+<p>If no database directories are specified, database files can only exist
+in the environment home directory. See <a href="../ref/env/naming.html">Berkeley DB File Naming</a> for more information.
+<p>For the greatest degree of recoverability from system or application
+failure, database files and log files should be located on separate
+physical devices.
+<p>The DBENV-&gt;set_data_dir interface may only be used to configure Berkeley DB before
+the <a href="../api_c/env_open.html">DBENV-&gt;open</a> interface is called.
+<p>The DBENV-&gt;set_data_dir function returns a non-zero error value on failure and 0 on success.
+<p>The database environment's data directory may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_data_dir", one or more whitespace characters,
+and the directory name. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/env_open.html">DBENV-&gt;open</a> was called.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/env_close.html">DBENV-&gt;close</a>,
+<a href="../api_c/env_create.html">db_env_create</a>,
+<a href="../api_c/env_open.html">DBENV-&gt;open</a>,
+<a href="../api_c/env_remove.html">DBENV-&gt;remove</a>,
+<a href="../api_c/db_err.html">DBENV-&gt;err</a>,
+<a href="../api_c/env_strerror.html">db_strerror</a>,
+<a href="../api_c/env_version.html">db_version</a>,
+<a href="../api_c/env_set_cachesize.html">DBENV-&gt;set_cachesize</a>,
+<a href="../api_c/env_set_errcall.html">DBENV-&gt;set_errcall</a>,
+<a href="../api_c/env_set_errfile.html">DBENV-&gt;set_errfile</a>,
+<a href="../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a>,
+<a href="../api_c/env_set_flags.html">DBENV-&gt;set_flags</a>,
+<a href="../api_c/env_set_mutexlocks.html">DBENV-&gt;set_mutexlocks</a>,
+<a href="../api_c/env_set_paniccall.html">DBENV-&gt;set_paniccall</a>,
+and
+<a href="../api_c/env_set_verbose.html">DBENV-&gt;set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/env_set_errcall.html b/bdb/docs/api_c/env_set_errcall.html
new file mode 100644
index 00000000000..660943070ed
--- /dev/null
+++ b/bdb/docs/api_c/env_set_errcall.html
@@ -0,0 +1,73 @@
+<!--$Id: env_set_errcall.so,v 10.16 1999/12/20 08:52:29 bostic Exp $-->
+<!--$Id: m4.errset,v 10.8 2000/02/19 20:57:57 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DBENV-&gt;set_errcall</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DBENV-&gt;set_errcall</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+void
+DBENV-&gt;set_errcall(DB_ENV *dbenv,
+ void (*db_errcall_fcn)(const char *errpfx, char *msg));
+</pre></h3>
+<h1>Description</h1>
+When an error occurs in the Berkeley DB library, a Berkeley DB error or an error
+return value is returned by the function. In some cases, however,
+the <b>errno</b> value may be insufficient to completely describe
+the cause of the error especially during initial application debugging.
+<p>The DBENV-&gt;set_errcall function is used to enhance the mechanism for reporting error
+messages to the application. In some cases, when an error occurs, Berkeley DB
+will call <b>db_errcall_fcn</b> with additional error information. The
+function must be declared with two arguments; the first will be the prefix
+string (as previously set by <a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a> or
+<a href="../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a>), the second will be the error message string.
+It is up to the <b>db_errcall_fcn</b> function to display the error
+message in an appropriate manner.
+<p>Alternatively, you can use the <a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a> or
+<a href="../api_c/env_set_errfile.html">DBENV-&gt;set_errfile</a> functions to display the additional information
+via a C library FILE *.
+<p>This error logging enhancement does not slow performance or significantly
+increase application size, and may be run during normal operation as well
+as during application debugging.
+<p>The DBENV-&gt;set_errcall interface may be used to configure Berkeley DB at any time
+during the life of the application.
+<h1>See Also</h1>
+<a href="../api_c/env_close.html">DBENV-&gt;close</a>,
+<a href="../api_c/env_create.html">db_env_create</a>,
+<a href="../api_c/env_open.html">DBENV-&gt;open</a>,
+<a href="../api_c/env_remove.html">DBENV-&gt;remove</a>,
+<a href="../api_c/db_err.html">DBENV-&gt;err</a>,
+<a href="../api_c/env_strerror.html">db_strerror</a>,
+<a href="../api_c/env_version.html">db_version</a>,
+<a href="../api_c/env_set_cachesize.html">DBENV-&gt;set_cachesize</a>,
+<a href="../api_c/env_set_errcall.html">DBENV-&gt;set_errcall</a>,
+<a href="../api_c/env_set_errfile.html">DBENV-&gt;set_errfile</a>,
+<a href="../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a>,
+<a href="../api_c/env_set_flags.html">DBENV-&gt;set_flags</a>,
+<a href="../api_c/env_set_mutexlocks.html">DBENV-&gt;set_mutexlocks</a>,
+<a href="../api_c/env_set_paniccall.html">DBENV-&gt;set_paniccall</a>,
+and
+<a href="../api_c/env_set_verbose.html">DBENV-&gt;set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/env_set_errfile.html b/bdb/docs/api_c/env_set_errfile.html
new file mode 100644
index 00000000000..ba1dd75f2cc
--- /dev/null
+++ b/bdb/docs/api_c/env_set_errfile.html
@@ -0,0 +1,70 @@
+<!--$Id: env_set_errfile.so,v 10.17 1999/12/20 08:52:29 bostic Exp $-->
+<!--$Id: m4.errset,v 10.8 2000/02/19 20:57:57 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DBENV-&gt;set_errfile</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DBENV-&gt;set_errfile</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+void
+DBENV-&gt;set_errfile(DB_ENV *dbenv, FILE *errfile);
+</pre></h3>
+<h1>Description</h1>
+When an error occurs in the Berkeley DB library, a Berkeley DB error or an error
+return value is returned by the function. In some cases, however,
+the <b>errno</b> value may be insufficient to completely describe
+the cause of the error especially during initial application debugging.
+<p>The DBENV-&gt;set_errfile function is used to enhance the mechanism for reporting error
+messages to the application by setting a C library FILE * to be used for
+displaying additional Berkeley DB error messages. In some cases, when an error
+occurs, Berkeley DB will output an additional error message to the specified
+file reference.
+<p>The error message will consist of the prefix string and a colon
+("<b>:</b>") (if a prefix string was previously specified using
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a> or <a href="../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a>), an error string, and
+a trailing &lt;newline&gt; character.
+<p>This error logging enhancement does not slow performance or significantly
+increase application size, and may be run during normal operation as well
+as during application debugging.
+<p>The DBENV-&gt;set_errfile interface may be used to configure Berkeley DB at any time
+during the life of the application.
+<h1>See Also</h1>
+<a href="../api_c/env_close.html">DBENV-&gt;close</a>,
+<a href="../api_c/env_create.html">db_env_create</a>,
+<a href="../api_c/env_open.html">DBENV-&gt;open</a>,
+<a href="../api_c/env_remove.html">DBENV-&gt;remove</a>,
+<a href="../api_c/db_err.html">DBENV-&gt;err</a>,
+<a href="../api_c/env_strerror.html">db_strerror</a>,
+<a href="../api_c/env_version.html">db_version</a>,
+<a href="../api_c/env_set_cachesize.html">DBENV-&gt;set_cachesize</a>,
+<a href="../api_c/env_set_errcall.html">DBENV-&gt;set_errcall</a>,
+<a href="../api_c/env_set_errfile.html">DBENV-&gt;set_errfile</a>,
+<a href="../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a>,
+<a href="../api_c/env_set_flags.html">DBENV-&gt;set_flags</a>,
+<a href="../api_c/env_set_mutexlocks.html">DBENV-&gt;set_mutexlocks</a>,
+<a href="../api_c/env_set_paniccall.html">DBENV-&gt;set_paniccall</a>,
+and
+<a href="../api_c/env_set_verbose.html">DBENV-&gt;set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/env_set_errpfx.html b/bdb/docs/api_c/env_set_errpfx.html
new file mode 100644
index 00000000000..be803070cce
--- /dev/null
+++ b/bdb/docs/api_c/env_set_errpfx.html
@@ -0,0 +1,59 @@
+<!--$Id: env_set_errpfx.so,v 10.12 1999/12/20 08:52:29 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DBENV-&gt;set_errpfx</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DBENV-&gt;set_errpfx</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+void
+DBENV-&gt;set_errpfx(DB_ENV *dbenv, const char *errpfx);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the prefix string that appears before error messages issued by Berkeley DB.
+<p>The DBENV-&gt;set_errpfx function does not copy the memory referenced by the
+<b>errpfx</b> argument, rather, it maintains a reference to it. This
+allows applications to modify the error message prefix at any time,
+without repeatedly calling DBENV-&gt;set_errpfx, but means that the
+memory must be maintained until the handle is closed.
+<p>The DBENV-&gt;set_errpfx interface may be used to configure Berkeley DB at any time
+during the life of the application.
+<h1>See Also</h1>
+<a href="../api_c/env_close.html">DBENV-&gt;close</a>,
+<a href="../api_c/env_create.html">db_env_create</a>,
+<a href="../api_c/env_open.html">DBENV-&gt;open</a>,
+<a href="../api_c/env_remove.html">DBENV-&gt;remove</a>,
+<a href="../api_c/db_err.html">DBENV-&gt;err</a>,
+<a href="../api_c/env_strerror.html">db_strerror</a>,
+<a href="../api_c/env_version.html">db_version</a>,
+<a href="../api_c/env_set_cachesize.html">DBENV-&gt;set_cachesize</a>,
+<a href="../api_c/env_set_errcall.html">DBENV-&gt;set_errcall</a>,
+<a href="../api_c/env_set_errfile.html">DBENV-&gt;set_errfile</a>,
+<a href="../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a>,
+<a href="../api_c/env_set_flags.html">DBENV-&gt;set_flags</a>,
+<a href="../api_c/env_set_mutexlocks.html">DBENV-&gt;set_mutexlocks</a>,
+<a href="../api_c/env_set_paniccall.html">DBENV-&gt;set_paniccall</a>,
+and
+<a href="../api_c/env_set_verbose.html">DBENV-&gt;set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/env_set_feedback.html b/bdb/docs/api_c/env_set_feedback.html
new file mode 100644
index 00000000000..743f7772ff9
--- /dev/null
+++ b/bdb/docs/api_c/env_set_feedback.html
@@ -0,0 +1,69 @@
+<!--$Id: env_set_feedback.so,v 10.19 2000/07/09 19:12:39 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DBENV-&gt;set_feedback</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DBENV-&gt;set_feedback</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DBENV-&gt;set_feedback(DB_ENV *,
+ void (*db_feedback_fcn)(DB_ENV *, int opcode, int pct));
+</pre></h3>
+<h1>Description</h1>
+<p>Some operations performed by the Berkeley DB library can take non-trivial
+amounts of time. The DBENV-&gt;set_feedback function can be used by
+applications to monitor progress within these operations.
+<p>When an operation is likely to take a long time, Berkeley DB will call the
+specified callback function. This function must be declared with
+three arguments: the first will be a reference to the enclosing
+environment, the second a flag value, and the third the percent of the
+operation that has been completed, specified as an integer value between
+0 and 100. It is up to the callback function to display this
+information in an appropriate manner.
+<p>The <b>opcode</b> argument may take on any of the following values:
+<p><dl compact>
+<p><dt><a name="DB_RECOVER">DB_RECOVER</a><dd>The environment is being recovered.
+</dl>
+<p>The DBENV-&gt;set_feedback interface may be used to configure Berkeley DB at any time
+during the life of the application.
+<p>The DBENV-&gt;set_feedback function returns a non-zero error value on failure and 0 on success.
+<h1>See Also</h1>
+<a href="../api_c/env_close.html">DBENV-&gt;close</a>,
+<a href="../api_c/env_create.html">db_env_create</a>,
+<a href="../api_c/env_open.html">DBENV-&gt;open</a>,
+<a href="../api_c/env_remove.html">DBENV-&gt;remove</a>,
+<a href="../api_c/db_err.html">DBENV-&gt;err</a>,
+<a href="../api_c/env_strerror.html">db_strerror</a>,
+<a href="../api_c/env_version.html">db_version</a>,
+<a href="../api_c/env_set_cachesize.html">DBENV-&gt;set_cachesize</a>,
+<a href="../api_c/env_set_errcall.html">DBENV-&gt;set_errcall</a>,
+<a href="../api_c/env_set_errfile.html">DBENV-&gt;set_errfile</a>,
+<a href="../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a>,
+<a href="../api_c/env_set_flags.html">DBENV-&gt;set_flags</a>,
+<a href="../api_c/env_set_mutexlocks.html">DBENV-&gt;set_mutexlocks</a>,
+<a href="../api_c/env_set_paniccall.html">DBENV-&gt;set_paniccall</a>,
+and
+<a href="../api_c/env_set_verbose.html">DBENV-&gt;set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/env_set_flags.html b/bdb/docs/api_c/env_set_flags.html
new file mode 100644
index 00000000000..6dfc0950819
--- /dev/null
+++ b/bdb/docs/api_c/env_set_flags.html
@@ -0,0 +1,84 @@
+<!--$Id-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DBENV-&gt;set_flags</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DBENV-&gt;set_flags</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DBENV-&gt;set_flags(DB_ENV *dbenv, u_int32_t flags, int onoff);
+</pre></h3>
+<h1>Description</h1>
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or more
+of the following values.
+If <b>onoff</b> is zero, the specified flags are cleared, otherwise they
+are set.
+<p><dl compact>
+<p><dt><a name="DB_CDB_ALLDB">DB_CDB_ALLDB</a><dd>For Berkeley DB Concurrent Data Store applications, perform locking on an environment-wide basis
+rather than per-database. This flag may only be used to configure Berkeley DB
+before the <a href="../api_c/env_open.html">DBENV-&gt;open</a> interface is called.
+<p><dt><a name="DB_NOMMAP">DB_NOMMAP</a><dd>Copy read-only database files in this environment into the local cache
+instead of potentially mapping them into process memory (see the
+description of the <a href="../api_c/env_set_mp_mmapsize.html">DBENV-&gt;set_mp_mmapsize</a> function for further information).
+<p><dt><a name="DB_TXN_NOSYNC">DB_TXN_NOSYNC</a><dd>Do not synchronously flush the log on transaction commit or prepare.
+This means that transactions exhibit the ACI (atomicity, consistency and
+isolation) properties, but not D (durability), i.e., database integrity
+will be maintained but it is possible that some number of the most
+recently committed transactions may be undone during recovery instead of
+being redone.
+<p>The number of transactions that are potentially at risk is governed by
+how often the log is checkpointed (see <a href="../utility/db_checkpoint.html">db_checkpoint</a> for more
+information) and how many log updates can fit on a single log page.
+</dl>
+<p>The DBENV-&gt;set_flags function returns a non-zero error value on failure and 0 on success.
+<p>The database environment's flag values may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_flags", one or more whitespace characters,
+and the interface flag argument as a string, for example, "set_flags
+DB_TXN_NOSYNC". Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/env_close.html">DBENV-&gt;close</a>,
+<a href="../api_c/env_create.html">db_env_create</a>,
+<a href="../api_c/env_open.html">DBENV-&gt;open</a>,
+<a href="../api_c/env_remove.html">DBENV-&gt;remove</a>,
+<a href="../api_c/db_err.html">DBENV-&gt;err</a>,
+<a href="../api_c/env_strerror.html">db_strerror</a>,
+<a href="../api_c/env_version.html">db_version</a>,
+<a href="../api_c/env_set_cachesize.html">DBENV-&gt;set_cachesize</a>,
+<a href="../api_c/env_set_errcall.html">DBENV-&gt;set_errcall</a>,
+<a href="../api_c/env_set_errfile.html">DBENV-&gt;set_errfile</a>,
+<a href="../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a>,
+<a href="../api_c/env_set_flags.html">DBENV-&gt;set_flags</a>,
+<a href="../api_c/env_set_mutexlocks.html">DBENV-&gt;set_mutexlocks</a>,
+<a href="../api_c/env_set_paniccall.html">DBENV-&gt;set_paniccall</a>,
+and
+<a href="../api_c/env_set_verbose.html">DBENV-&gt;set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/env_set_lg_bsize.html b/bdb/docs/api_c/env_set_lg_bsize.html
new file mode 100644
index 00000000000..85e6dc12118
--- /dev/null
+++ b/bdb/docs/api_c/env_set_lg_bsize.html
@@ -0,0 +1,68 @@
+<!--$Id: env_set_lg_bsize.so,v 10.10 2000/05/20 16:29:11 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DBENV-&gt;set_lg_bsize</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DBENV-&gt;set_lg_bsize</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DBENV-&gt;set_lg_bsize(DB_ENV *dbenv, u_int32_t lg_bsize);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the size of the in-memory log buffer, in bytes. By default, or if
+the value is set to 0, a size of 32K is used.
+<p>Log information is stored in-memory until the storage space fills up
+or transaction commit forces the information to be flushed to stable
+storage. In the presence of long-running transactions or transactions
+producing large amounts of data, larger buffer sizes can increase
+throughput.
+<p>The DBENV-&gt;set_lg_bsize interface may only be used to configure Berkeley DB before
+the <a href="../api_c/env_open.html">DBENV-&gt;open</a> interface is called.
+<p>The DBENV-&gt;set_lg_bsize function returns a non-zero error value on failure and 0 on success.
+<p>The database environment's log buffer size may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lg_bsize", one or more whitespace characters,
+and the size in bytes. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/env_open.html">DBENV-&gt;open</a> was called.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/env_set_lg_bsize.html">DBENV-&gt;set_lg_bsize</a>,
+<a href="../api_c/env_set_lg_max.html">DBENV-&gt;set_lg_max</a>,
+<a href="../api_c/log_archive.html">log_archive</a>,
+<a href="../api_c/log_compare.html">log_compare</a>,
+<a href="../api_c/log_file.html">log_file</a>,
+<a href="../api_c/log_flush.html">log_flush</a>,
+<a href="../api_c/log_get.html">log_get</a>,
+<a href="../api_c/log_put.html">log_put</a>,
+<a href="../api_c/log_register.html">log_register</a>,
+<a href="../api_c/log_stat.html">log_stat</a>
+and
+<a href="../api_c/log_unregister.html">log_unregister</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/env_set_lg_dir.html b/bdb/docs/api_c/env_set_lg_dir.html
new file mode 100644
index 00000000000..a8d5c861421
--- /dev/null
+++ b/bdb/docs/api_c/env_set_lg_dir.html
@@ -0,0 +1,73 @@
+<!--$Id: env_set_lg_dir.so,v 10.3 2000/05/20 16:29:11 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DBENV-&gt;set_lg_dir</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DBENV-&gt;set_lg_dir</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DBENV-&gt;set_lg_dir(DB_ENV *dbenv, const char *dir);
+</pre></h3>
+<h1>Description</h1>
+<p>The path of a directory to be used as the location of logging files.
+Log files created by the Log Manager subsystem will be created in this
+directory.
+<p>If no logging directory is specified, log files are created in the
+environment home directory. See <a href="../ref/env/naming.html">Berkeley DB File Naming</a> for more information.
+<p>For the greatest degree of recoverability from system or application
+failure, database files and log files should be located on separate
+physical devices.
+<p>The DBENV-&gt;set_lg_dir interface may only be used to configure Berkeley DB before
+the <a href="../api_c/env_open.html">DBENV-&gt;open</a> interface is called.
+<p>The DBENV-&gt;set_lg_dir function returns a non-zero error value on failure and 0 on success.
+<p>The database environment's logging directory may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lg_dir", one or more whitespace characters,
+and the directory name. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/env_open.html">DBENV-&gt;open</a> was called.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/env_close.html">DBENV-&gt;close</a>,
+<a href="../api_c/env_create.html">db_env_create</a>,
+<a href="../api_c/env_open.html">DBENV-&gt;open</a>,
+<a href="../api_c/env_remove.html">DBENV-&gt;remove</a>,
+<a href="../api_c/db_err.html">DBENV-&gt;err</a>,
+<a href="../api_c/env_strerror.html">db_strerror</a>,
+<a href="../api_c/env_version.html">db_version</a>,
+<a href="../api_c/env_set_cachesize.html">DBENV-&gt;set_cachesize</a>,
+<a href="../api_c/env_set_errcall.html">DBENV-&gt;set_errcall</a>,
+<a href="../api_c/env_set_errfile.html">DBENV-&gt;set_errfile</a>,
+<a href="../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a>,
+<a href="../api_c/env_set_flags.html">DBENV-&gt;set_flags</a>,
+<a href="../api_c/env_set_mutexlocks.html">DBENV-&gt;set_mutexlocks</a>,
+<a href="../api_c/env_set_paniccall.html">DBENV-&gt;set_paniccall</a>,
+and
+<a href="../api_c/env_set_verbose.html">DBENV-&gt;set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/env_set_lg_max.html b/bdb/docs/api_c/env_set_lg_max.html
new file mode 100644
index 00000000000..4625db4346b
--- /dev/null
+++ b/bdb/docs/api_c/env_set_lg_max.html
@@ -0,0 +1,68 @@
+<!--$Id: env_set_lg_max.so,v 10.20 2000/05/20 16:29:12 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DBENV-&gt;set_lg_max</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DBENV-&gt;set_lg_max</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DBENV-&gt;set_lg_max(DB_ENV *dbenv, u_int32_t lg_max);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the maximum size of a single file in the log, in bytes. Because
+<a href="../api_c/db_lsn.html">DB_LSN</a> file offsets are unsigned 4-byte values, the set value may
+not be larger than the maximum unsigned 4-byte value. By default, or if
+the value is set to 0, a size of 10MB is used.
+<p>See <a href="../ref/log/limits.html">Log File Limits</a>
+for more information.
+<p>The DBENV-&gt;set_lg_max interface may only be used to configure Berkeley DB before
+the <a href="../api_c/env_open.html">DBENV-&gt;open</a> interface is called.
+<p>The DBENV-&gt;set_lg_max function returns a non-zero error value on failure and 0 on success.
+<p>The database environment's log file size may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lg_max", one or more whitespace characters,
+and the size in bytes. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/env_open.html">DBENV-&gt;open</a> was called.
+<p>The specified log file size was too large.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/env_set_lg_bsize.html">DBENV-&gt;set_lg_bsize</a>,
+<a href="../api_c/env_set_lg_max.html">DBENV-&gt;set_lg_max</a>,
+<a href="../api_c/log_archive.html">log_archive</a>,
+<a href="../api_c/log_compare.html">log_compare</a>,
+<a href="../api_c/log_file.html">log_file</a>,
+<a href="../api_c/log_flush.html">log_flush</a>,
+<a href="../api_c/log_get.html">log_get</a>,
+<a href="../api_c/log_put.html">log_put</a>,
+<a href="../api_c/log_register.html">log_register</a>,
+<a href="../api_c/log_stat.html">log_stat</a>
+and
+<a href="../api_c/log_unregister.html">log_unregister</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/env_set_lk_conflicts.html b/bdb/docs/api_c/env_set_lk_conflicts.html
new file mode 100644
index 00000000000..689464736ef
--- /dev/null
+++ b/bdb/docs/api_c/env_set_lk_conflicts.html
@@ -0,0 +1,69 @@
+<!--$Id: env_set_lk_conflicts.so,v 10.22 2000/12/08 20:43:15 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DBENV-&gt;set_lk_conflicts</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DBENV-&gt;set_lk_conflicts</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DBENV-&gt;set_lk_conflicts(DB_ENV *dbenv,
+ u_int8_t *conflicts, int nmodes);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the locking conflicts matrix.
+The <b>conflicts</b> argument
+is an <b>nmodes</b> by <b>nmodes</b> array.
+A non-0 value for the array element:
+<p><blockquote><pre>conflicts[requested_mode][held_mode]</pre></blockquote>
+<p>indicates that requested_mode and held_mode conflict. The
+<i>not-granted</i> mode must be represented by 0.
+<p>If no <b>conflicts</b> value is specified, the conflicts array
+<b>db_rw_conflicts</b> is used; see <a href="../ref/lock/stdmode.html">Standard Lock Modes</a> for a description of that array.
+<p>The DBENV-&gt;set_lk_conflicts interface may only be used to configure Berkeley DB before
+the <a href="../api_c/env_open.html">DBENV-&gt;open</a> interface is called.
+<p>The DBENV-&gt;set_lk_conflicts function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/env_open.html">DBENV-&gt;open</a> was called.
+</dl>
+<p><dl compact>
+<p><dt>ENOMEM<dd>No memory was available to copy the conflicts array.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/env_set_lk_conflicts.html">DBENV-&gt;set_lk_conflicts</a>,
+<a href="../api_c/env_set_lk_detect.html">DBENV-&gt;set_lk_detect</a>,
+<a href="../api_c/env_set_lk_max_locks.html">DBENV-&gt;set_lk_max_locks</a>,
+<a href="../api_c/env_set_lk_max_lockers.html">DBENV-&gt;set_lk_max_lockers</a>,
+<a href="../api_c/env_set_lk_max_objects.html">DBENV-&gt;set_lk_max_objects</a>,
+<a href="../api_c/env_set_lk_max.html">DBENV-&gt;set_lk_max</a>,
+<a href="../api_c/lock_detect.html">lock_detect</a>,
+<a href="../api_c/lock_get.html">lock_get</a>,
+<a href="../api_c/lock_id.html">lock_id</a>,
+<a href="../api_c/lock_put.html">lock_put</a>,
+<a href="../api_c/lock_stat.html">lock_stat</a>
+and
+<a href="../api_c/lock_vec.html">lock_vec</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/env_set_lk_detect.html b/bdb/docs/api_c/env_set_lk_detect.html
new file mode 100644
index 00000000000..460651a0dab
--- /dev/null
+++ b/bdb/docs/api_c/env_set_lk_detect.html
@@ -0,0 +1,72 @@
+<!--$Id: env_set_lk_detect.so,v 10.19 2000/12/08 20:43:15 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DBENV-&gt;set_lk_detect</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DBENV-&gt;set_lk_detect</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DBENV-&gt;set_lk_detect(DB_ENV *dbenv, u_int32_t detect);
+</pre></h3>
+<h1>Description</h1>
+<p>Set if the deadlock detector is to be run whenever a lock conflict occurs,
+and specify which transaction should be aborted in the case of a deadlock.
+The specified value must be one of the following list:
+<p><dl compact>
+<p><dt><a name="DB_LOCK_DEFAULT">DB_LOCK_DEFAULT</a><dd>Use the default policy as specified by <a href="../utility/db_deadlock.html">db_deadlock</a>.
+<dt><a name="DB_LOCK_OLDEST">DB_LOCK_OLDEST</a><dd>Abort the oldest transaction.
+<dt><a name="DB_LOCK_RANDOM">DB_LOCK_RANDOM</a><dd>Abort a random transaction involved in the deadlock.
+<dt><a name="DB_LOCK_YOUNGEST">DB_LOCK_YOUNGEST</a><dd>Abort the youngest transaction.
+</dl>
+<p>The DBENV-&gt;set_lk_detect interface may only be used to configure Berkeley DB before
+the <a href="../api_c/env_open.html">DBENV-&gt;open</a> interface is called.
+<p>The DBENV-&gt;set_lk_detect function returns a non-zero error value on failure and 0 on success.
+<p>The database environment's deadlock detector configuration may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lk_detect", one or more whitespace characters,
+and the interface <b>detect</b> argument as a string, for example,
+"set_lk_detect DB_LOCK_OLDEST". Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/env_open.html">DBENV-&gt;open</a> was called.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/env_set_lk_conflicts.html">DBENV-&gt;set_lk_conflicts</a>,
+<a href="../api_c/env_set_lk_detect.html">DBENV-&gt;set_lk_detect</a>,
+<a href="../api_c/env_set_lk_max_locks.html">DBENV-&gt;set_lk_max_locks</a>,
+<a href="../api_c/env_set_lk_max_lockers.html">DBENV-&gt;set_lk_max_lockers</a>,
+<a href="../api_c/env_set_lk_max_objects.html">DBENV-&gt;set_lk_max_objects</a>,
+<a href="../api_c/env_set_lk_max.html">DBENV-&gt;set_lk_max</a>,
+<a href="../api_c/lock_detect.html">lock_detect</a>,
+<a href="../api_c/lock_get.html">lock_get</a>,
+<a href="../api_c/lock_id.html">lock_id</a>,
+<a href="../api_c/lock_put.html">lock_put</a>,
+<a href="../api_c/lock_stat.html">lock_stat</a>
+and
+<a href="../api_c/lock_vec.html">lock_vec</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/env_set_lk_max.html b/bdb/docs/api_c/env_set_lk_max.html
new file mode 100644
index 00000000000..1e9832b59d9
--- /dev/null
+++ b/bdb/docs/api_c/env_set_lk_max.html
@@ -0,0 +1,72 @@
+<!--$Id: env_set_lk_max.so,v 10.21 2000/12/21 19:11:27 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DBENV-&gt;set_lk_max</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DBENV-&gt;set_lk_max</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DBENV-&gt;set_lk_max(DB_ENV *dbenv, u_int32_t max);
+</pre></h3>
+<h1>Description</h1>
+<p><b>The DBENV-&gt;set_lk_max function interface has been deprecated in favor of
+the <a href="../api_c/env_set_lk_max_locks.html">DBENV-&gt;set_lk_max_locks</a>, <a href="../api_c/env_set_lk_max_lockers.html">DBENV-&gt;set_lk_max_lockers</a>,
+and <a href="../api_c/env_set_lk_max_objects.html">DBENV-&gt;set_lk_max_objects</a> functions. Please update your applications.</b>
+<p>Set each of the maximum number of locks, lockers and lock objects
+supported by the Berkeley DB lock subsystem to <b>max</b>. This value is
+used by <a href="../api_c/env_open.html">DBENV-&gt;open</a> to estimate how much space to allocate for
+various lock-table data structures. For specific information on
+configuring the size of the lock subsystem, see
+<a href="../ref/lock/max.html">Configuring locking: sizing the
+system</a>.
+<p>The DBENV-&gt;set_lk_max interface may only be used to configure Berkeley DB before
+the <a href="../api_c/env_open.html">DBENV-&gt;open</a> interface is called.
+<p>The DBENV-&gt;set_lk_max function returns a non-zero error value on failure and 0 on success.
+<p>The database environment's maximum number of locks may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lk_max", one or more whitespace characters,
+and the number of locks. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/env_open.html">DBENV-&gt;open</a> was called.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/env_set_lk_conflicts.html">DBENV-&gt;set_lk_conflicts</a>,
+<a href="../api_c/env_set_lk_detect.html">DBENV-&gt;set_lk_detect</a>,
+<a href="../api_c/env_set_lk_max_locks.html">DBENV-&gt;set_lk_max_locks</a>,
+<a href="../api_c/env_set_lk_max_lockers.html">DBENV-&gt;set_lk_max_lockers</a>,
+<a href="../api_c/env_set_lk_max_objects.html">DBENV-&gt;set_lk_max_objects</a>,
+<a href="../api_c/env_set_lk_max.html">DBENV-&gt;set_lk_max</a>,
+<a href="../api_c/lock_detect.html">lock_detect</a>,
+<a href="../api_c/lock_get.html">lock_get</a>,
+<a href="../api_c/lock_id.html">lock_id</a>,
+<a href="../api_c/lock_put.html">lock_put</a>,
+<a href="../api_c/lock_stat.html">lock_stat</a>
+and
+<a href="../api_c/lock_vec.html">lock_vec</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/env_set_lk_max_lockers.html b/bdb/docs/api_c/env_set_lk_max_lockers.html
new file mode 100644
index 00000000000..e41a943fd56
--- /dev/null
+++ b/bdb/docs/api_c/env_set_lk_max_lockers.html
@@ -0,0 +1,68 @@
+<!--$Id: env_set_lk_max_lockers.so,v 1.2 2000/12/08 22:03:00 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DBENV-&gt;set_lk_max_lockers</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DBENV-&gt;set_lk_max_lockers</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DBENV-&gt;set_lk_max_lockers(DB_ENV *dbenv, u_int32_t max);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the maximum number of simultaneous locking entities supported by
+the Berkeley DB lock subsystem. This value is used by <a href="../api_c/env_open.html">DBENV-&gt;open</a> to
+estimate how much space to allocate for various lock-table data
+structures. For specific information on configuring the size of the
+lock subsystem, see
+<a href="../ref/lock/max.html">Configuring locking: sizing the system</a>.
+<p>The DBENV-&gt;set_lk_max_lockers interface may only be used to configure Berkeley DB before
+the <a href="../api_c/env_open.html">DBENV-&gt;open</a> interface is called.
+<p>The DBENV-&gt;set_lk_max_lockers function returns a non-zero error value on failure and 0 on success.
+<p>The database environment's maximum number of lockers may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lk_max_lockers", one or more whitespace characters,
+and the number of lockers. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/env_open.html">DBENV-&gt;open</a> was called.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/env_set_lk_conflicts.html">DBENV-&gt;set_lk_conflicts</a>,
+<a href="../api_c/env_set_lk_detect.html">DBENV-&gt;set_lk_detect</a>,
+<a href="../api_c/env_set_lk_max_locks.html">DBENV-&gt;set_lk_max_locks</a>,
+<a href="../api_c/env_set_lk_max_lockers.html">DBENV-&gt;set_lk_max_lockers</a>,
+<a href="../api_c/env_set_lk_max_objects.html">DBENV-&gt;set_lk_max_objects</a>,
+<a href="../api_c/env_set_lk_max.html">DBENV-&gt;set_lk_max</a>,
+<a href="../api_c/lock_detect.html">lock_detect</a>,
+<a href="../api_c/lock_get.html">lock_get</a>,
+<a href="../api_c/lock_id.html">lock_id</a>,
+<a href="../api_c/lock_put.html">lock_put</a>,
+<a href="../api_c/lock_stat.html">lock_stat</a>
+and
+<a href="../api_c/lock_vec.html">lock_vec</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/env_set_lk_max_locks.html b/bdb/docs/api_c/env_set_lk_max_locks.html
new file mode 100644
index 00000000000..a908b288f97
--- /dev/null
+++ b/bdb/docs/api_c/env_set_lk_max_locks.html
@@ -0,0 +1,67 @@
+<!--$Id: env_set_lk_max_locks.so,v 10.1 2000/12/21 19:11:27 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DBENV-&gt;set_lk_max_locks</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DBENV-&gt;set_lk_max_locks</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DBENV-&gt;set_lk_max_locks(DB_ENV *dbenv, u_int32_t max);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the maximum number of locks supported by the Berkeley DB lock subsystem.
+This value is used by <a href="../api_c/env_open.html">DBENV-&gt;open</a> to estimate how much space to
+allocate for various lock-table data structures. For specific
+information on configuring the size of the lock subsystem, see
+<a href="../ref/lock/max.html">Configuring locking: sizing the system</a>.
+<p>The DBENV-&gt;set_lk_max_locks interface may only be used to configure Berkeley DB before
+the <a href="../api_c/env_open.html">DBENV-&gt;open</a> interface is called.
+<p>The DBENV-&gt;set_lk_max_locks function returns a non-zero error value on failure and 0 on success.
+<p>The database environment's maximum number of locks may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lk_max_locks", one or more whitespace characters,
+and the number of locks. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/env_open.html">DBENV-&gt;open</a> was called.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/env_set_lk_conflicts.html">DBENV-&gt;set_lk_conflicts</a>,
+<a href="../api_c/env_set_lk_detect.html">DBENV-&gt;set_lk_detect</a>,
+<a href="../api_c/env_set_lk_max_locks.html">DBENV-&gt;set_lk_max_locks</a>,
+<a href="../api_c/env_set_lk_max_lockers.html">DBENV-&gt;set_lk_max_lockers</a>,
+<a href="../api_c/env_set_lk_max_objects.html">DBENV-&gt;set_lk_max_objects</a>,
+<a href="../api_c/env_set_lk_max.html">DBENV-&gt;set_lk_max</a>,
+<a href="../api_c/lock_detect.html">lock_detect</a>,
+<a href="../api_c/lock_get.html">lock_get</a>,
+<a href="../api_c/lock_id.html">lock_id</a>,
+<a href="../api_c/lock_put.html">lock_put</a>,
+<a href="../api_c/lock_stat.html">lock_stat</a>
+and
+<a href="../api_c/lock_vec.html">lock_vec</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/env_set_lk_max_objects.html b/bdb/docs/api_c/env_set_lk_max_objects.html
new file mode 100644
index 00000000000..8fba15876cf
--- /dev/null
+++ b/bdb/docs/api_c/env_set_lk_max_objects.html
@@ -0,0 +1,68 @@
+<!--$Id: env_set_lk_max_objects.so,v 1.2 2000/12/08 22:03:00 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DBENV-&gt;set_lk_max_objects</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DBENV-&gt;set_lk_max_objects</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DBENV-&gt;set_lk_max_objects(DB_ENV *dbenv, u_int32_t max);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the maximum number of simultaneously locked objects supported by
+the Berkeley DB lock subsystem. This value is used by <a href="../api_c/env_open.html">DBENV-&gt;open</a> to
+estimate how much space to allocate for various lock-table data
+structures. For specific information on configuring the size of the
+lock subsystem, see
+<a href="../ref/lock/max.html">Configuring locking: sizing the system</a>.
+<p>The DBENV-&gt;set_lk_max_objects interface may only be used to configure Berkeley DB before
+the <a href="../api_c/env_open.html">DBENV-&gt;open</a> interface is called.
+<p>The DBENV-&gt;set_lk_max_objects function returns a non-zero error value on failure and 0 on success.
+<p>The database environment's maximum number of objects may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lk_max_objects", one or more whitespace characters,
+and the number of objects. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/env_open.html">DBENV-&gt;open</a> was called.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/env_set_lk_conflicts.html">DBENV-&gt;set_lk_conflicts</a>,
+<a href="../api_c/env_set_lk_detect.html">DBENV-&gt;set_lk_detect</a>,
+<a href="../api_c/env_set_lk_max_locks.html">DBENV-&gt;set_lk_max_locks</a>,
+<a href="../api_c/env_set_lk_max_lockers.html">DBENV-&gt;set_lk_max_lockers</a>,
+<a href="../api_c/env_set_lk_max_objects.html">DBENV-&gt;set_lk_max_objects</a>,
+<a href="../api_c/env_set_lk_max.html">DBENV-&gt;set_lk_max</a>,
+<a href="../api_c/lock_detect.html">lock_detect</a>,
+<a href="../api_c/lock_get.html">lock_get</a>,
+<a href="../api_c/lock_id.html">lock_id</a>,
+<a href="../api_c/lock_put.html">lock_put</a>,
+<a href="../api_c/lock_stat.html">lock_stat</a>
+and
+<a href="../api_c/lock_vec.html">lock_vec</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/env_set_mp_mmapsize.html b/bdb/docs/api_c/env_set_mp_mmapsize.html
new file mode 100644
index 00000000000..3f87a140a15
--- /dev/null
+++ b/bdb/docs/api_c/env_set_mp_mmapsize.html
@@ -0,0 +1,71 @@
+<!--$Id: env_set_mp_mmapsize.so,v 10.18 2000/05/20 16:29:12 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DBENV-&gt;set_mp_mmapsize</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DBENV-&gt;set_mp_mmapsize</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DBENV-&gt;set_mp_mmapsize(DB_ENV *dbenv, size_t mp_mmapsize);
+</pre></h3>
+<h1>Description</h1>
+<p>Files that are opened read-only in the pool (and that satisfy a few other
+criteria) are, by default, mapped into the process address space instead
+of being copied into the local cache. This can result in better-than-usual
+performance, as available virtual memory is normally much larger than the
+local cache, and page faults are faster than page copying on many systems.
+However, in the presence of limited virtual memory it can cause resource
+starvation, and in the presence of large databases, it can result in immense
+process sizes.
+<p>Set the maximum file size, in bytes, for a file to be mapped into the
+process address space. If no value is specified, it defaults to 10MB.
+<p>The DBENV-&gt;set_mp_mmapsize interface may only be used to configure Berkeley DB before
+the <a href="../api_c/env_open.html">DBENV-&gt;open</a> interface is called.
+<p>The DBENV-&gt;set_mp_mmapsize function returns a non-zero error value on failure and 0 on success.
+<p>The database environment's maximum mapped file size may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_mp_mmapsize", one or more whitespace characters,
+and the size in bytes. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/env_open.html">DBENV-&gt;open</a> was called.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/env_set_mp_mmapsize.html">DBENV-&gt;set_mp_mmapsize</a>,
+<a href="../api_c/memp_fclose.html">memp_fclose</a>,
+<a href="../api_c/memp_fget.html">memp_fget</a>,
+<a href="../api_c/memp_fopen.html">memp_fopen</a>,
+<a href="../api_c/memp_fput.html">memp_fput</a>,
+<a href="../api_c/memp_fset.html">memp_fset</a>,
+<a href="../api_c/memp_fsync.html">memp_fsync</a>,
+<a href="../api_c/memp_register.html">memp_register</a>,
+<a href="../api_c/memp_stat.html">memp_stat</a>,
+<a href="../api_c/memp_sync.html">memp_sync</a>
+and
+<a href="../api_c/memp_trickle.html">memp_trickle</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/env_set_mutexlocks.html b/bdb/docs/api_c/env_set_mutexlocks.html
new file mode 100644
index 00000000000..a5fa2aa34c6
--- /dev/null
+++ b/bdb/docs/api_c/env_set_mutexlocks.html
@@ -0,0 +1,59 @@
+<!--$Id: env_set_mutexlocks.so,v 10.9 2000/11/17 19:56:52 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DBENV-&gt;set_mutexlocks</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DBENV-&gt;set_mutexlocks</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DBENV-&gt;set_mutexlocks(DB_ENV *dbenv, int do_lock);
+</pre></h3>
+<h1>Description</h1>
+<p>Toggle mutex locks. Setting <b>do_lock</b> to a zero value causes
+Berkeley DB to grant all requested mutual exclusion mutexes without regard
+for their availability.
+<p>This functionality should never be used for any other purpose than
+debugging.
+<p>The DBENV-&gt;set_mutexlocks interface may be used to configure Berkeley DB at any time
+during the life of the application.
+<p>The DBENV-&gt;set_mutexlocks function returns a non-zero error value on failure and 0 on success.
+<h1>See Also</h1>
+<a href="../api_c/env_close.html">DBENV-&gt;close</a>,
+<a href="../api_c/env_create.html">db_env_create</a>,
+<a href="../api_c/env_open.html">DBENV-&gt;open</a>,
+<a href="../api_c/env_remove.html">DBENV-&gt;remove</a>,
+<a href="../api_c/db_err.html">DBENV-&gt;err</a>,
+<a href="../api_c/env_strerror.html">db_strerror</a>,
+<a href="../api_c/env_version.html">db_version</a>,
+<a href="../api_c/env_set_cachesize.html">DBENV-&gt;set_cachesize</a>,
+<a href="../api_c/env_set_errcall.html">DBENV-&gt;set_errcall</a>,
+<a href="../api_c/env_set_errfile.html">DBENV-&gt;set_errfile</a>,
+<a href="../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a>,
+<a href="../api_c/env_set_flags.html">DBENV-&gt;set_flags</a>,
+<a href="../api_c/env_set_mutexlocks.html">DBENV-&gt;set_mutexlocks</a>,
+<a href="../api_c/env_set_paniccall.html">DBENV-&gt;set_paniccall</a>,
+and
+<a href="../api_c/env_set_verbose.html">DBENV-&gt;set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/env_set_pageyield.html b/bdb/docs/api_c/env_set_pageyield.html
new file mode 100644
index 00000000000..95372408525
--- /dev/null
+++ b/bdb/docs/api_c/env_set_pageyield.html
@@ -0,0 +1,68 @@
+<!--$Id: env_set_pageyield.so,v 10.6 2000/05/31 15:10:00 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_pageyield</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>db_env_set_pageyield</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_pageyield(int pageyield);
+</pre></h3>
+<h1>Description</h1>
+<p>Yield the processor whenever requesting a page from the cache. Setting
+<b>pageyield</b> to a non-zero value causes Berkeley DB to yield the processor
+any time a thread requests a page from the cache.
+<p>The db_env_set_pageyield interface affects the entire application, not a single
+database or database environment.
+<p>While the db_env_set_pageyield interface may be used to configure Berkeley DB at any time
+during the life of the application, it should normally be called before
+making any calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> functions.
+<p>This functionality should never be used for any other purpose than stress
+testing.
+<p>The db_env_set_pageyield interface may be used to configure Berkeley DB at any time
+during the life of the application.
+<p>The db_env_set_pageyield function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/env_close.html">DBENV-&gt;close</a>,
+<a href="../api_c/env_create.html">db_env_create</a>,
+<a href="../api_c/env_open.html">DBENV-&gt;open</a>,
+<a href="../api_c/env_remove.html">DBENV-&gt;remove</a>,
+<a href="../api_c/db_err.html">DBENV-&gt;err</a>,
+<a href="../api_c/env_strerror.html">db_strerror</a>,
+<a href="../api_c/env_version.html">db_version</a>,
+<a href="../api_c/env_set_cachesize.html">DBENV-&gt;set_cachesize</a>,
+<a href="../api_c/env_set_errcall.html">DBENV-&gt;set_errcall</a>,
+<a href="../api_c/env_set_errfile.html">DBENV-&gt;set_errfile</a>,
+<a href="../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a>,
+<a href="../api_c/env_set_flags.html">DBENV-&gt;set_flags</a>,
+<a href="../api_c/env_set_mutexlocks.html">DBENV-&gt;set_mutexlocks</a>,
+<a href="../api_c/env_set_paniccall.html">DBENV-&gt;set_paniccall</a>,
+and
+<a href="../api_c/env_set_verbose.html">DBENV-&gt;set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/env_set_paniccall.html b/bdb/docs/api_c/env_set_paniccall.html
new file mode 100644
index 00000000000..a9d58a80b83
--- /dev/null
+++ b/bdb/docs/api_c/env_set_paniccall.html
@@ -0,0 +1,67 @@
+<!--$Id: env_set_paniccall.so,v 10.14 2000/07/09 19:12:56 bostic Exp $-->
+<!--$Id: m4.errset,v 10.8 2000/02/19 20:57:57 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DBENV-&gt;set_paniccall</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DBENV-&gt;set_paniccall</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DBENV-&gt;set_paniccall(DB_ENV *dbenv,
+ void (*paniccall)(DB_ENV *, int errval));
+</pre></h3>
+<h1>Description</h1>
+<p>Errors can occur in the Berkeley DB library where the only solution is to shut
+down the application and run recovery. (For example, if Berkeley DB is unable
+to write log records to disk because there is insufficient disk space.)
+In these cases, the value <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> is returned by Berkeley DB.
+<p>In these cases, it is also often simpler to shut down the application when
+such errors occur rather than attempting to gracefully return up the stack.
+The DBENV-&gt;set_paniccall function is used to specify a function to be called when
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> is about to be returned from a Berkeley DB method. When
+called, the <b>dbenv</b> argument will be a reference to the current
+environment, and the <b>errval</b> argument is the error value that would
+have been returned to the calling function.
+<p>The DBENV-&gt;set_paniccall interface may be used to configure Berkeley DB at any time
+during the life of the application.
+<p>The DBENV-&gt;set_paniccall function returns a non-zero error value on failure and 0 on success.
+<h1>See Also</h1>
+<a href="../api_c/env_close.html">DBENV-&gt;close</a>,
+<a href="../api_c/env_create.html">db_env_create</a>,
+<a href="../api_c/env_open.html">DBENV-&gt;open</a>,
+<a href="../api_c/env_remove.html">DBENV-&gt;remove</a>,
+<a href="../api_c/db_err.html">DBENV-&gt;err</a>,
+<a href="../api_c/env_strerror.html">db_strerror</a>,
+<a href="../api_c/env_version.html">db_version</a>,
+<a href="../api_c/env_set_cachesize.html">DBENV-&gt;set_cachesize</a>,
+<a href="../api_c/env_set_errcall.html">DBENV-&gt;set_errcall</a>,
+<a href="../api_c/env_set_errfile.html">DBENV-&gt;set_errfile</a>,
+<a href="../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a>,
+<a href="../api_c/env_set_flags.html">DBENV-&gt;set_flags</a>,
+<a href="../api_c/env_set_mutexlocks.html">DBENV-&gt;set_mutexlocks</a>,
+<a href="../api_c/env_set_paniccall.html">DBENV-&gt;set_paniccall</a>,
+and
+<a href="../api_c/env_set_verbose.html">DBENV-&gt;set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/env_set_panicstate.html b/bdb/docs/api_c/env_set_panicstate.html
new file mode 100644
index 00000000000..6168ad9af7e
--- /dev/null
+++ b/bdb/docs/api_c/env_set_panicstate.html
@@ -0,0 +1,64 @@
+<!--$Id: env_set_panicstate.so,v 10.2 2001/01/17 15:32:34 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_panicstate</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>db_env_set_panicstate</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_panicstate(int panic);
+</pre></h3>
+<h1>Description</h1>
+<p>Toggle the Berkeley DB panic state. Setting <b>panic</b> to a non-zero value
+causes Berkeley DB to refuse attempts to call Berkeley DB functions with the
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> error return.
+<p>The db_env_set_panicstate interface affects the entire application, not a single
+database or database environment.
+<p>While the db_env_set_panicstate interface may be used to configure Berkeley DB at any time
+during the life of the application, it should normally be called before
+making any calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> functions.
+<p>The db_env_set_panicstate function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/env_close.html">DBENV-&gt;close</a>,
+<a href="../api_c/env_create.html">db_env_create</a>,
+<a href="../api_c/env_open.html">DBENV-&gt;open</a>,
+<a href="../api_c/env_remove.html">DBENV-&gt;remove</a>,
+<a href="../api_c/db_err.html">DBENV-&gt;err</a>,
+<a href="../api_c/env_strerror.html">db_strerror</a>,
+<a href="../api_c/env_version.html">db_version</a>,
+<a href="../api_c/env_set_cachesize.html">DBENV-&gt;set_cachesize</a>,
+<a href="../api_c/env_set_errcall.html">DBENV-&gt;set_errcall</a>,
+<a href="../api_c/env_set_errfile.html">DBENV-&gt;set_errfile</a>,
+<a href="../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a>,
+<a href="../api_c/env_set_flags.html">DBENV-&gt;set_flags</a>,
+<a href="../api_c/env_set_mutexlocks.html">DBENV-&gt;set_mutexlocks</a>,
+<a href="../api_c/env_set_paniccall.html">DBENV-&gt;set_paniccall</a>,
+and
+<a href="../api_c/env_set_verbose.html">DBENV-&gt;set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/env_set_rec_init.html b/bdb/docs/api_c/env_set_rec_init.html
new file mode 100644
index 00000000000..056ec9b717c
--- /dev/null
+++ b/bdb/docs/api_c/env_set_rec_init.html
@@ -0,0 +1,71 @@
+<!--$Id: env_set_rec_init.so,v 10.9 2000/05/01 21:57:44 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DBENV-&gt;set_recovery_init</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DBENV-&gt;set_recovery_init</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DBENV-&gt;set_recovery_init(DB_ENV *,
+ int (*db_recovery_init_fcn)(DB_ENV *));
+</pre></h3>
+<h1>Description</h1>
+<p>Applications installing application-specific recovery functions need
+to be called before Berkeley DB performs recovery so they may add their recovery
+functions to Berkeley DB's.
+<p>The DBENV-&gt;set_recovery_init function supports this functionality. The
+<b>db_recovery_init_fcn</b> function must be declared with one
+argument, a reference to the enclosing Berkeley DB environment. This
+function will be called after the <a href="../api_c/env_open.html">DBENV-&gt;open</a> has been called,
+but before recovery is started.
+<p>If the <b>db_recovery_init_fcn</b> function returns a non-zero value,
+no recovery will be performed and <a href="../api_c/env_open.html">DBENV-&gt;open</a> will return the same
+value to its caller.
+<p>The DBENV-&gt;set_recovery_init interface may only be used to configure Berkeley DB before
+the <a href="../api_c/env_open.html">DBENV-&gt;open</a> interface is called.
+<p>The DBENV-&gt;set_recovery_init function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/env_open.html">DBENV-&gt;open</a> was called.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/env_close.html">DBENV-&gt;close</a>,
+<a href="../api_c/env_create.html">db_env_create</a>,
+<a href="../api_c/env_open.html">DBENV-&gt;open</a>,
+<a href="../api_c/env_remove.html">DBENV-&gt;remove</a>,
+<a href="../api_c/db_err.html">DBENV-&gt;err</a>,
+<a href="../api_c/env_strerror.html">db_strerror</a>,
+<a href="../api_c/env_version.html">db_version</a>,
+<a href="../api_c/env_set_cachesize.html">DBENV-&gt;set_cachesize</a>,
+<a href="../api_c/env_set_errcall.html">DBENV-&gt;set_errcall</a>,
+<a href="../api_c/env_set_errfile.html">DBENV-&gt;set_errfile</a>,
+<a href="../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a>,
+<a href="../api_c/env_set_flags.html">DBENV-&gt;set_flags</a>,
+<a href="../api_c/env_set_mutexlocks.html">DBENV-&gt;set_mutexlocks</a>,
+<a href="../api_c/env_set_paniccall.html">DBENV-&gt;set_paniccall</a>,
+and
+<a href="../api_c/env_set_verbose.html">DBENV-&gt;set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/env_set_region_init.html b/bdb/docs/api_c/env_set_region_init.html
new file mode 100644
index 00000000000..3c83680ada9
--- /dev/null
+++ b/bdb/docs/api_c/env_set_region_init.html
@@ -0,0 +1,77 @@
+<!--$Id: env_set_region_init.so,v 10.10 2000/05/31 15:10:00 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_region_init</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>db_env_set_region_init</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_region_init(int region_init);
+</pre></h3>
+<h1>Description</h1>
+<p>Page-fault shared regions into memory when initially creating or joining
+a Berkeley DB environment. In some applications, the expense of page-faulting
+the shared memory regions can affect performance, e.g., when the
+page-fault occurs while holding a lock, other lock requests can convoy
+and overall throughput may decrease. Setting <b>region_init</b> to a
+non-zero value specifies that shared regions be read or written, as
+appropriate, when the region is joined by the application. This forces
+the underlying virtual memory and file systems to instantiate both the
+necessary memory and the necessary disk space. This can also avoid
+out-of-disk space failures later on.
+<p>The db_env_set_region_init interface affects the entire application, not a single
+database or database environment.
+<p>While the db_env_set_region_init interface may be used to configure Berkeley DB at any time
+during the life of the application, it should normally be called before
+making any calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> functions.
+<p>The db_env_set_region_init function returns a non-zero error value on failure and 0 on success.
+<p>The database environment's initial behavior with respect to shared memory regions may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_region_init", one or more whitespace characters,
+and the string "1". Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/env_close.html">DBENV-&gt;close</a>,
+<a href="../api_c/env_create.html">db_env_create</a>,
+<a href="../api_c/env_open.html">DBENV-&gt;open</a>,
+<a href="../api_c/env_remove.html">DBENV-&gt;remove</a>,
+<a href="../api_c/db_err.html">DBENV-&gt;err</a>,
+<a href="../api_c/env_strerror.html">db_strerror</a>,
+<a href="../api_c/env_version.html">db_version</a>,
+<a href="../api_c/env_set_cachesize.html">DBENV-&gt;set_cachesize</a>,
+<a href="../api_c/env_set_errcall.html">DBENV-&gt;set_errcall</a>,
+<a href="../api_c/env_set_errfile.html">DBENV-&gt;set_errfile</a>,
+<a href="../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a>,
+<a href="../api_c/env_set_flags.html">DBENV-&gt;set_flags</a>,
+<a href="../api_c/env_set_mutexlocks.html">DBENV-&gt;set_mutexlocks</a>,
+<a href="../api_c/env_set_paniccall.html">DBENV-&gt;set_paniccall</a>,
+and
+<a href="../api_c/env_set_verbose.html">DBENV-&gt;set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/env_set_server.html b/bdb/docs/api_c/env_set_server.html
new file mode 100644
index 00000000000..586887bfc96
--- /dev/null
+++ b/bdb/docs/api_c/env_set_server.html
@@ -0,0 +1,77 @@
+<!--"@(#)env_set_server.so 10.13 (Sleepycat) 8/25/99"-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DBENV-&gt;set_server</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DBENV-&gt;set_server</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DBENV-&gt;set_server(DB_ENV *dbenv, char *host,
+ long cl_timeout, long sv_timeout, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>Connects to the DB server on the indicated hostname and sets up a channel
+for communication.
+<p>The <b>cl_timeout</b> argument specifies the number of seconds the client
+should wait for results to come back from the server. Once the timeout
+has expired on any communication with the server, DB_NOSERVER will
+be returned. If this value is zero, a default timeout is used.
+<p>The <b>sv_timeout</b> argument specifies the number of seconds the server
+should allow a client connection to remain idle before assuming that
+client is gone. Once that timeout has been reached, the server releases
+all resources associated with that client connection. Subsequent attempts
+by that client to communicate with the server result in
+DB_NOSERVER_ID indicating that an invalid identifier has been
+given to the server. This value can be considered a hint to the server.
+The server may alter this value based on its own policies or allowed
+values. If this value is zero, a default timeout is used.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>When the DBENV-&gt;set_server function has been called, any subsequent calls
+to Berkeley DB library interfaces may return either <a name="DB_NOSERVER">DB_NOSERVER</a> or
+<a name="DB_NOSERVER_ID">DB_NOSERVER_ID</a>.
+<p>The DBENV-&gt;set_server function returns a non-zero error value on failure and 0 on success.
+<h3>Errors</h3>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>dbenv_set_server
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/env_close.html">DBENV-&gt;close</a>,
+<a href="../api_c/env_create.html">db_env_create</a>,
+<a href="../api_c/env_open.html">DBENV-&gt;open</a>,
+<a href="../api_c/env_remove.html">DBENV-&gt;remove</a>,
+<a href="../api_c/db_err.html">DBENV-&gt;err</a>,
+<a href="../api_c/env_strerror.html">db_strerror</a>,
+<a href="../api_c/env_version.html">db_version</a>,
+<a href="../api_c/env_set_cachesize.html">DBENV-&gt;set_cachesize</a>,
+<a href="../api_c/env_set_errcall.html">DBENV-&gt;set_errcall</a>,
+<a href="../api_c/env_set_errfile.html">DBENV-&gt;set_errfile</a>,
+<a href="../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a>,
+<a href="../api_c/env_set_flags.html">DBENV-&gt;set_flags</a>,
+<a href="../api_c/env_set_mutexlocks.html">DBENV-&gt;set_mutexlocks</a>,
+<a href="../api_c/env_set_paniccall.html">DBENV-&gt;set_paniccall</a>,
+and
+<a href="../api_c/env_set_verbose.html">DBENV-&gt;set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/env_set_shm_key.html b/bdb/docs/api_c/env_set_shm_key.html
new file mode 100644
index 00000000000..8de32ed842a
--- /dev/null
+++ b/bdb/docs/api_c/env_set_shm_key.html
@@ -0,0 +1,87 @@
+<!--$Id: env_set_shm_key.so,v 10.5 2000/08/09 15:45:52 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DBENV-&gt;set_shm_key</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DBENV-&gt;set_shm_key</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DBENV-&gt;set_shm_key(DB_ENV *dbenv, long shm_key);
+</pre></h3>
+<h1>Description</h1>
+<p>Specify a base segment ID for Berkeley DB environment shared memory regions
+created in system memory on VxWorks or systems supporting X/Open-style
+shared memory interfaces, e.g., UNIX systems supporting
+<b>shmget</b>(2) and related System V IPC interfaces.
+<p>This base segment ID will be used when Berkeley DB shared memory regions are
+first created. It will be incremented a small integer value each time
+a new shared memory region is created, that is, if the base ID is 35,
+the first shared memory region created will have a segment ID of 35 and
+the next one a segment ID between 36 and 40 or so. A Berkeley DB environment
+always creates a master shared memory region, plus an additional shared
+memory region for each of the subsystems supported by the environment
+(locking, logging, memory pool and transaction), plus an additional
+shared memory region for each additional memory pool cache that is
+supported. Already existing regions with the same segment IDs will be
+removed. See <a href="../ref/env/region.html">Shared Memory Regions</a>
+for more information.
+<p>The intent behind this interface is two-fold: without it, applications
+have no way to ensure that two Berkeley DB applications don't attempt to use
+the same segment IDs when creating different Berkeley DB environments. In
+addition, by using the same segment IDs each time the environment is
+created, previously created segments will be removed, and the set of
+segments on the system will not grow without bound.
+<p>The DBENV-&gt;set_shm_key interface may only be used to configure Berkeley DB before
+the <a href="../api_c/env_open.html">DBENV-&gt;open</a> interface is called.
+<p>The DBENV-&gt;set_shm_key function returns a non-zero error value on failure and 0 on success.
+<p>The database environment's base segment ID may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_shm_key", one or more whitespace characters,
+and the ID. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/env_open.html">DBENV-&gt;open</a> was called.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/env_close.html">DBENV-&gt;close</a>,
+<a href="../api_c/env_create.html">db_env_create</a>,
+<a href="../api_c/env_open.html">DBENV-&gt;open</a>,
+<a href="../api_c/env_remove.html">DBENV-&gt;remove</a>,
+<a href="../api_c/db_err.html">DBENV-&gt;err</a>,
+<a href="../api_c/env_strerror.html">db_strerror</a>,
+<a href="../api_c/env_version.html">db_version</a>,
+<a href="../api_c/env_set_cachesize.html">DBENV-&gt;set_cachesize</a>,
+<a href="../api_c/env_set_errcall.html">DBENV-&gt;set_errcall</a>,
+<a href="../api_c/env_set_errfile.html">DBENV-&gt;set_errfile</a>,
+<a href="../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a>,
+<a href="../api_c/env_set_flags.html">DBENV-&gt;set_flags</a>,
+<a href="../api_c/env_set_mutexlocks.html">DBENV-&gt;set_mutexlocks</a>,
+<a href="../api_c/env_set_paniccall.html">DBENV-&gt;set_paniccall</a>,
+and
+<a href="../api_c/env_set_verbose.html">DBENV-&gt;set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/env_set_tas_spins.html b/bdb/docs/api_c/env_set_tas_spins.html
new file mode 100644
index 00000000000..633dcda077e
--- /dev/null
+++ b/bdb/docs/api_c/env_set_tas_spins.html
@@ -0,0 +1,70 @@
+<!--$Id: env_set_tas_spins.so,v 10.9 2000/05/31 15:10:00 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_tas_spins</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>db_env_set_tas_spins</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_tas_spins(u_int32_t tas_spins);
+</pre></h3>
+<h1>Description</h1>
+<p>Specify that test-and-set mutexes should spin <b>tas_spins</b> times
+without blocking. The value defaults to 1 on uniprocessor systems and
+to 50 times the number of processors on multiprocessor systems.
+<p>The db_env_set_tas_spins interface affects the entire application, not a single
+database or database environment.
+<p>While the db_env_set_tas_spins interface may be used to configure Berkeley DB at any time
+during the life of the application, it should normally be called before
+making any calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> functions.
+<p>The db_env_set_tas_spins function returns a non-zero error value on failure and 0 on success.
+<p>The database environment's test-and-set spin count may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_tas_spins", one or more whitespace characters,
+and the number of spins. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/env_close.html">DBENV-&gt;close</a>,
+<a href="../api_c/env_create.html">db_env_create</a>,
+<a href="../api_c/env_open.html">DBENV-&gt;open</a>,
+<a href="../api_c/env_remove.html">DBENV-&gt;remove</a>,
+<a href="../api_c/db_err.html">DBENV-&gt;err</a>,
+<a href="../api_c/env_strerror.html">db_strerror</a>,
+<a href="../api_c/env_version.html">db_version</a>,
+<a href="../api_c/env_set_cachesize.html">DBENV-&gt;set_cachesize</a>,
+<a href="../api_c/env_set_errcall.html">DBENV-&gt;set_errcall</a>,
+<a href="../api_c/env_set_errfile.html">DBENV-&gt;set_errfile</a>,
+<a href="../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a>,
+<a href="../api_c/env_set_flags.html">DBENV-&gt;set_flags</a>,
+<a href="../api_c/env_set_mutexlocks.html">DBENV-&gt;set_mutexlocks</a>,
+<a href="../api_c/env_set_paniccall.html">DBENV-&gt;set_paniccall</a>,
+and
+<a href="../api_c/env_set_verbose.html">DBENV-&gt;set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/env_set_tmp_dir.html b/bdb/docs/api_c/env_set_tmp_dir.html
new file mode 100644
index 00000000000..05f3bed5da0
--- /dev/null
+++ b/bdb/docs/api_c/env_set_tmp_dir.html
@@ -0,0 +1,89 @@
+<!--$Id: env_set_tmp_dir.so,v 10.3 2000/05/20 16:29:12 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DBENV-&gt;set_tmp_dir</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DBENV-&gt;set_tmp_dir</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DBENV-&gt;set_tmp_dir(DB_ENV *dbenv, const char *dir);
+</pre></h3>
+<h1>Description</h1>
+<p>The path of a directory to be used as the location of temporary files.
+The files created to back in-memory access method databases will be
+created relative to this path. These temporary files can be quite large,
+depending on the size of the database.
+<p>If no directories are specified, the following alternatives are checked
+in the specified order. The first existing directory path is used for
+all temporary files.
+<p><ol>
+<p><li>The value of the environment variable <b>TMPDIR</b>.
+<li>The value of the environment variable <b>TEMP</b>.
+<li>The value of the environment variable <b>TMP</b>.
+<li>The value of the environment variable <b>TempFolder</b>.
+<li>The value returned by the GetTempPath interface.
+<li>The directory <b>/var/tmp</b>.
+<li>The directory <b>/usr/tmp</b>.
+<li>The directory <b>/temp</b>.
+<li>The directory <b>/tmp</b>.
+<li>The directory <b>C:/temp</b>.
+<li>The directory <b>C:/tmp</b>.
+</ol>
+<p>Note: environment variables are only checked if one of the
+<a href="../api_c/env_open.html#DB_USE_ENVIRON">DB_USE_ENVIRON</a> or <a href="../api_c/env_open.html#DB_USE_ENVIRON_ROOT">DB_USE_ENVIRON_ROOT</a> flags were
+specified.
+<p>Note: the GetTempPath interface is only checked on Win/32 platforms.
+<p>The DBENV-&gt;set_tmp_dir interface may only be used to configure Berkeley DB before
+the <a href="../api_c/env_open.html">DBENV-&gt;open</a> interface is called.
+<p>The DBENV-&gt;set_tmp_dir function returns a non-zero error value on failure and 0 on success.
+<p>The database environment's temporary file directory may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_tmp_dir", one or more whitespace characters,
+and the directory name. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/env_open.html">DBENV-&gt;open</a> was called.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/env_close.html">DBENV-&gt;close</a>,
+<a href="../api_c/env_create.html">db_env_create</a>,
+<a href="../api_c/env_open.html">DBENV-&gt;open</a>,
+<a href="../api_c/env_remove.html">DBENV-&gt;remove</a>,
+<a href="../api_c/db_err.html">DBENV-&gt;err</a>,
+<a href="../api_c/env_strerror.html">db_strerror</a>,
+<a href="../api_c/env_version.html">db_version</a>,
+<a href="../api_c/env_set_cachesize.html">DBENV-&gt;set_cachesize</a>,
+<a href="../api_c/env_set_errcall.html">DBENV-&gt;set_errcall</a>,
+<a href="../api_c/env_set_errfile.html">DBENV-&gt;set_errfile</a>,
+<a href="../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a>,
+<a href="../api_c/env_set_flags.html">DBENV-&gt;set_flags</a>,
+<a href="../api_c/env_set_mutexlocks.html">DBENV-&gt;set_mutexlocks</a>,
+<a href="../api_c/env_set_paniccall.html">DBENV-&gt;set_paniccall</a>,
+and
+<a href="../api_c/env_set_verbose.html">DBENV-&gt;set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/env_set_tx_max.html b/bdb/docs/api_c/env_set_tx_max.html
new file mode 100644
index 00000000000..82328955237
--- /dev/null
+++ b/bdb/docs/api_c/env_set_tx_max.html
@@ -0,0 +1,67 @@
+<!--$Id: env_set_tx_max.so,v 10.21 2000/05/20 16:29:12 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DBENV-&gt;set_tx_max</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DBENV-&gt;set_tx_max</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DBENV-&gt;set_tx_max(DB_ENV *dbenv, u_int32_t tx_max);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the maximum number of active transactions that are supported by the
+environment. This value bounds the size of backing shared memory regions.
+Note that child transactions must be counted as active until their
+ultimate parent commits or aborts.
+<p>When there are more than the specified number of concurrent transactions,
+calls to <a href="../api_c/txn_begin.html">txn_begin</a> will fail (until some active transactions
+complete). If no value is specified, a default value of 20 is used.
+<p>The DBENV-&gt;set_tx_max interface may only be used to configure Berkeley DB before
+the <a href="../api_c/env_open.html">DBENV-&gt;open</a> interface is called.
+<p>The DBENV-&gt;set_tx_max function returns a non-zero error value on failure and 0 on success.
+<p>The database environment's maximum number of active transactions may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_tx_max", one or more whitespace characters,
+and the number of transactions. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/env_open.html">DBENV-&gt;open</a> was called.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/env_set_tx_max.html">DBENV-&gt;set_tx_max</a>,
+<a href="../api_c/env_set_tx_recover.html">DBENV-&gt;set_tx_recover</a>,
+<a href="../api_c/env_set_tx_timestamp.html">DBENV-&gt;set_tx_timestamp</a>,
+<a href="../api_c/txn_abort.html">txn_abort</a>,
+<a href="../api_c/txn_begin.html">txn_begin</a>,
+<a href="../api_c/txn_checkpoint.html">txn_checkpoint</a>,
+<a href="../api_c/txn_commit.html">txn_commit</a>,
+<a href="../api_c/txn_id.html">txn_id</a>,
+<a href="../api_c/txn_prepare.html">txn_prepare</a>
+and
+<a href="../api_c/txn_stat.html">txn_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/env_set_tx_recover.html b/bdb/docs/api_c/env_set_tx_recover.html
new file mode 100644
index 00000000000..9295537b5f5
--- /dev/null
+++ b/bdb/docs/api_c/env_set_tx_recover.html
@@ -0,0 +1,75 @@
+<!--$Id: env_set_tx_recover.so,v 10.26 2000/07/09 19:13:19 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DBENV-&gt;set_tx_recover</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DBENV-&gt;set_tx_recover</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DBENV-&gt;set_tx_recover(DB_ENV *dbenv,
+ int (*tx_recover)(DB_ENV *dbenv,
+ DBT *log_rec, DB_LSN *lsn, db_recops op));
+</pre></h3>
+<h1>Description</h1>
+<p>Set the application's function to be called during transaction abort
+and recovery. This function must return 0 on success and either
+<b>errno</b> or a value outside of the Berkeley DB error name space on
+failure. It takes four arguments:
+<p><dl compact>
+<p><dt>dbenv <dd>A Berkeley DB environment.
+<p><dt>log_rec<dd>A log record.
+<p><dt>lsn<dd>A log sequence number.
+<p><dt>op<dd>One of the following values:
+<p><dl compact>
+<p><dt><a name="DB_TXN_BACKWARD_ROLL">DB_TXN_BACKWARD_ROLL</a><dd>The log is being read backward to determine which transactions have been
+committed and to abort those operations that were not, undo the operation
+described by the log record.
+<p><dt><a name="DB_TXN_FORWARD_ROLL">DB_TXN_FORWARD_ROLL</a><dd>The log is being played forward, redo the operation described by the log
+record.
+<p><dt><a name="DB_TXN_ABORT">DB_TXN_ABORT</a><dd>The log is being read backwards during a transaction abort, undo the
+operation described by the log record.
+</dl>
+</dl>
+<p>The DBENV-&gt;set_tx_recover interface may only be used to configure Berkeley DB before
+the <a href="../api_c/env_open.html">DBENV-&gt;open</a> interface is called.
+<p>The DBENV-&gt;set_tx_recover function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/env_open.html">DBENV-&gt;open</a> was called.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/env_set_tx_max.html">DBENV-&gt;set_tx_max</a>,
+<a href="../api_c/env_set_tx_recover.html">DBENV-&gt;set_tx_recover</a>,
+<a href="../api_c/env_set_tx_timestamp.html">DBENV-&gt;set_tx_timestamp</a>,
+<a href="../api_c/txn_abort.html">txn_abort</a>,
+<a href="../api_c/txn_begin.html">txn_begin</a>,
+<a href="../api_c/txn_checkpoint.html">txn_checkpoint</a>,
+<a href="../api_c/txn_commit.html">txn_commit</a>,
+<a href="../api_c/txn_id.html">txn_id</a>,
+<a href="../api_c/txn_prepare.html">txn_prepare</a>
+and
+<a href="../api_c/txn_stat.html">txn_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/env_set_tx_timestamp.html b/bdb/docs/api_c/env_set_tx_timestamp.html
new file mode 100644
index 00000000000..68cd0d15723
--- /dev/null
+++ b/bdb/docs/api_c/env_set_tx_timestamp.html
@@ -0,0 +1,63 @@
+<!--$Id: env_set_tx_timestamp.so,v 10.6 2000/12/21 18:33:42 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DBENV-&gt;set_tx_timestamp</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DBENV-&gt;set_tx_timestamp</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DBENV-&gt;set_tx_timestamp(DB_ENV *dbenv, time_t *timestamp);
+</pre></h3>
+<h1>Description</h1>
+<p>Recover to the time specified by <b>timestamp</b> rather than to the most
+current possible date.
+The <b>timestamp</b> argument should be the number of seconds since 0
+hours, 0 minutes, 0 seconds, January 1, 1970, Coordinated Universal Time,
+i.e., the Epoch.
+<p>Once a database environment has been upgraded to a new version of Berkeley DB
+involving a log format change (see <a href="../ref/upgrade/process.html">Upgrading Berkeley DB installations</a>, it is no longer possible to recover
+to a specific time before that upgrade.
+<p>The DBENV-&gt;set_tx_timestamp interface may only be used to configure Berkeley DB before
+the <a href="../api_c/env_open.html">DBENV-&gt;open</a> interface is called.
+<p>The DBENV-&gt;set_tx_timestamp function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>It is not possible to recover to the specified time using the
+log files currently present in the environment.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/env_set_tx_max.html">DBENV-&gt;set_tx_max</a>,
+<a href="../api_c/env_set_tx_recover.html">DBENV-&gt;set_tx_recover</a>,
+<a href="../api_c/env_set_tx_timestamp.html">DBENV-&gt;set_tx_timestamp</a>,
+<a href="../api_c/txn_abort.html">txn_abort</a>,
+<a href="../api_c/txn_begin.html">txn_begin</a>,
+<a href="../api_c/txn_checkpoint.html">txn_checkpoint</a>,
+<a href="../api_c/txn_commit.html">txn_commit</a>,
+<a href="../api_c/txn_id.html">txn_id</a>,
+<a href="../api_c/txn_prepare.html">txn_prepare</a>
+and
+<a href="../api_c/txn_stat.html">txn_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/env_set_verbose.html b/bdb/docs/api_c/env_set_verbose.html
new file mode 100644
index 00000000000..605fd577cca
--- /dev/null
+++ b/bdb/docs/api_c/env_set_verbose.html
@@ -0,0 +1,78 @@
+<!--$Id: env_set_verbose.so,v 10.23 2000/05/20 16:29:12 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DBENV-&gt;set_verbose</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DBENV-&gt;set_verbose</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DBENV-&gt;set_verbose(DB_ENV *dbenv, u_int32_t which, int onoff);
+</pre></h3>
+<h1>Description</h1>
+<p>The DBENV-&gt;set_verbose function turns additional informational and
+debugging messages in the Berkeley DB message output on and off. If
+<b>onoff</b> is set to
+non-zero,
+the additional messages are output.
+<p>The <b>which</b> parameter must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_VERB_CHKPOINT">DB_VERB_CHKPOINT</a><dd>Display checkpoint location information when searching the log for
+checkpoints.
+<p><dt><a name="DB_VERB_DEADLOCK">DB_VERB_DEADLOCK</a><dd>Display additional information when doing deadlock detection.
+<p><dt><a name="DB_VERB_RECOVERY">DB_VERB_RECOVERY</a><dd>Display additional information when performing recovery.
+<p><dt><a name="DB_VERB_WAITSFOR">DB_VERB_WAITSFOR</a><dd>Display the waits-for table when doing deadlock detection.
+</dl>
+<p>The DBENV-&gt;set_verbose interface may be used to configure Berkeley DB at any time
+during the life of the application.
+<p>The DBENV-&gt;set_verbose function returns a non-zero error value on failure and 0 on success.
+<p>The database environment's verbosity may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_verbose", one or more whitespace characters,
+and the interface <b>which</b> argument as a string, for example,
+"set_verbose DB_VERB_CHKPOINT". Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/env_close.html">DBENV-&gt;close</a>,
+<a href="../api_c/env_create.html">db_env_create</a>,
+<a href="../api_c/env_open.html">DBENV-&gt;open</a>,
+<a href="../api_c/env_remove.html">DBENV-&gt;remove</a>,
+<a href="../api_c/db_err.html">DBENV-&gt;err</a>,
+<a href="../api_c/env_strerror.html">db_strerror</a>,
+<a href="../api_c/env_version.html">db_version</a>,
+<a href="../api_c/env_set_cachesize.html">DBENV-&gt;set_cachesize</a>,
+<a href="../api_c/env_set_errcall.html">DBENV-&gt;set_errcall</a>,
+<a href="../api_c/env_set_errfile.html">DBENV-&gt;set_errfile</a>,
+<a href="../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a>,
+<a href="../api_c/env_set_flags.html">DBENV-&gt;set_flags</a>,
+<a href="../api_c/env_set_mutexlocks.html">DBENV-&gt;set_mutexlocks</a>,
+<a href="../api_c/env_set_paniccall.html">DBENV-&gt;set_paniccall</a>,
+and
+<a href="../api_c/env_set_verbose.html">DBENV-&gt;set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/env_strerror.html b/bdb/docs/api_c/env_strerror.html
new file mode 100644
index 00000000000..e6bd190a2ea
--- /dev/null
+++ b/bdb/docs/api_c/env_strerror.html
@@ -0,0 +1,60 @@
+<!--$Id: env_strerror.so,v 8.4 2000/07/30 17:59:25 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db_strerror</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>db_strerror</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+char *
+db_strerror(int error);
+</pre></h3>
+<h1>Description</h1>
+<p>The db_strerror function returns an error message string corresponding
+to the error number <b>error</b>. This interface is a superset of the
+ANSI C X3.159-1989 (ANSI C) <b>strerror</b>(3) interface. If the error number
+<b>error</b> is greater than or equal to 0, then the string returned by
+the system interface <b>strerror</b>(3) is returned. If the error
+number is less than 0, an error string appropriate to the corresponding
+Berkeley DB library error is returned. See
+<a href="../ref/program/errorret.html">Error returns to applications</a>
+for more information.
+<h1>See Also</h1>
+<a href="../api_c/env_close.html">DBENV-&gt;close</a>,
+<a href="../api_c/env_create.html">db_env_create</a>,
+<a href="../api_c/env_open.html">DBENV-&gt;open</a>,
+<a href="../api_c/env_remove.html">DBENV-&gt;remove</a>,
+<a href="../api_c/db_err.html">DBENV-&gt;err</a>,
+<a href="../api_c/env_strerror.html">db_strerror</a>,
+<a href="../api_c/env_version.html">db_version</a>,
+<a href="../api_c/env_set_cachesize.html">DBENV-&gt;set_cachesize</a>,
+<a href="../api_c/env_set_errcall.html">DBENV-&gt;set_errcall</a>,
+<a href="../api_c/env_set_errfile.html">DBENV-&gt;set_errfile</a>,
+<a href="../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a>,
+<a href="../api_c/env_set_flags.html">DBENV-&gt;set_flags</a>,
+<a href="../api_c/env_set_mutexlocks.html">DBENV-&gt;set_mutexlocks</a>,
+<a href="../api_c/env_set_paniccall.html">DBENV-&gt;set_paniccall</a>,
+and
+<a href="../api_c/env_set_verbose.html">DBENV-&gt;set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/env_version.html b/bdb/docs/api_c/env_version.html
new file mode 100644
index 00000000000..fa7704aaea2
--- /dev/null
+++ b/bdb/docs/api_c/env_version.html
@@ -0,0 +1,57 @@
+<!--$Id: env_version.so,v 10.13 1999/12/20 08:52:30 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db_version</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>db_version</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+char *
+db_version(int *major, int *minor, int *patch);
+</pre></h3>
+<h1>Description</h1>
+<p>The db_version function returns a pointer to a string containing Berkeley DB
+version information. If <b>major</b> is non-NULL, the major version
+of the Berkeley DB release is stored in the memory it references. If
+<b>minor</b> is non-NULL, the minor version of the Berkeley DB release is
+stored in the memory it references. If <b>patch</b> is non-NULL, the
+patch version of the Berkeley DB release is stored in the memory it references.
+<h1>See Also</h1>
+<a href="../api_c/env_close.html">DBENV-&gt;close</a>,
+<a href="../api_c/env_create.html">db_env_create</a>,
+<a href="../api_c/env_open.html">DBENV-&gt;open</a>,
+<a href="../api_c/env_remove.html">DBENV-&gt;remove</a>,
+<a href="../api_c/db_err.html">DBENV-&gt;err</a>,
+<a href="../api_c/env_strerror.html">db_strerror</a>,
+<a href="../api_c/env_version.html">db_version</a>,
+<a href="../api_c/env_set_cachesize.html">DBENV-&gt;set_cachesize</a>,
+<a href="../api_c/env_set_errcall.html">DBENV-&gt;set_errcall</a>,
+<a href="../api_c/env_set_errfile.html">DBENV-&gt;set_errfile</a>,
+<a href="../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a>,
+<a href="../api_c/env_set_flags.html">DBENV-&gt;set_flags</a>,
+<a href="../api_c/env_set_mutexlocks.html">DBENV-&gt;set_mutexlocks</a>,
+<a href="../api_c/env_set_paniccall.html">DBENV-&gt;set_paniccall</a>,
+and
+<a href="../api_c/env_set_verbose.html">DBENV-&gt;set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/hsearch.html b/bdb/docs/api_c/hsearch.html
new file mode 100644
index 00000000000..0d6d6ce51ed
--- /dev/null
+++ b/bdb/docs/api_c/hsearch.html
@@ -0,0 +1,107 @@
+<!--$Id: hsearch.so,v 10.18 2000/09/21 20:40:54 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: hsearch</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>hsearch</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#define DB_DBM_HSEARCH 1
+#include &lt;db.h&gt;
+<p>
+typedef enum {
+ FIND, ENTER
+} ACTION;
+<p>
+typedef struct entry {
+ char *key;
+ void *data;
+} ENTRY;
+<p>
+ENTRY *
+hsearch(ENTRY item, ACTION action);
+<p>
+int
+hcreate(size_t nelem);
+<p>
+void
+hdestroy(void);
+</pre></h3>
+<h1>Description</h1>
+<p>The hsearch interface to the Berkeley DB library is intended to
+provide a high-performance implementation and source code compatibility
+for applications written to the historic hsearch interface.
+It is not recommended for any other purpose.
+<p>To compile hsearch applications, replace the application's
+<b>#include</b> of the hsearch include
+file (e.g., <b>#include &lt;search.h&gt;</b>)
+with the following two lines:
+<p><blockquote><pre>
+#define DB_DBM_HSEARCH 1
+#include &lt;db.h&gt;</pre></blockquote>
+<p>and recompile.
+<p>The hcreate function creates an in-memory database. The
+<b>nelem</b> argument is an estimation of the maximum number of key/data
+pairs that will be stored in the database.
+<p>The <b>hdestroy</b> function discards the database.
+<p>Database elements are structures of type <b>ENTRY</b>, which contain at
+least two fields: <b>key</b> and <b>data</b>. The field <b>key</b> is
+declared to be of type <b>char *</b> and is the key used for storage
+and retrieval. The field <b>data</b> is declared to be of type
+<b>void *</b> and is its associated data.
+<p>The hsearch function retrieves key/data pairs from, and stores
+key/data pairs into the database.
+<p>The <b>action</b> argument must be set to one of two values:
+<p><dl compact>
+<p><dt>ENTER<dd>If the key does not already appear in the database,
+insert the key/data pair into the database.
+If the key already appears in the database,
+return a reference to an <b>ENTRY</b>
+structure referencing the existing key and its associated data element.
+<p><dt>FIND<dd>Retrieve the specified key/data pair from the database.
+</dl>
+<h3>Compatibility Notes</h3>
+<p>Historically, hsearch required applications to maintain the keys
+and data in the application's memory for as long as the <b>hsearch</b>
+database existed. As Berkeley DB handles key and data management internally,
+there is no requirement that applications maintain local copies of key
+and data items, although the only effect of doing so should be the
+allocation of additional memory.
+<h3>Hsearch Diagnostics</h3>
+<p>The <b>hcreate</b> function returns 0 on failure, setting <b>errno</b>
+and non-zero on success.
+<p>The <b>hsearch</b> function returns a pointer to an ENTRY structure on
+success, and NULL, setting <b>errno</b>, if the <b>action</b>
+specified was FIND and the item did not appear in the database.
+<h1>Errors</h1>
+<p>The hcreate function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the hcreate function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<p>The hsearch function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the hsearch function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<p>In addition, the <b>hsearch</b> function will fail, setting <b>errno</b>
+to 0, if the <b>action</b> specified was FIND and the item did not appear in
+the database.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/lock_detect.html b/bdb/docs/api_c/lock_detect.html
new file mode 100644
index 00000000000..7b95e98e9d0
--- /dev/null
+++ b/bdb/docs/api_c/lock_detect.html
@@ -0,0 +1,73 @@
+<!--$Id: lock_detect.so,v 10.26 2000/03/17 01:53:59 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: lock_detect</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>lock_detect</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+lock_detect(DB_ENV *env,
+ u_int32_t flags, u_int32_t atype, int *aborted);
+</pre></h3>
+<h1>Description</h1>
+<p>The lock_detect function runs one iteration of the deadlock detector.
+The deadlock detector traverses the lock table, and for each deadlock
+it finds, marks one of the participating transactions for abort.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or more
+of the following values.
+<p><dl compact>
+<p><dt><a name="DB_LOCK_CONFLICT">DB_LOCK_CONFLICT</a><dd>Only run the deadlock detector if a lock conflict has occurred since
+the last time that the deadlock detector was run.
+</dl>
+<p>The <b>atype</b> parameter specifies which transaction to abort in the
+case of deadlock. It must be set to one of possible arguments listed for
+the <a href="../api_c/env_set_lk_detect.html">DBENV-&gt;set_lk_detect</a> interface.
+<p>If the <b>aborted</b> parameter is non-NULL, the memory location it
+references will be set to the number of transactions aborted by the
+lock_detect function.
+<p>The lock_detect function is the underlying function used by the <a href="../utility/db_deadlock.html">db_deadlock</a> utility.
+See the <a href="../utility/db_deadlock.html">db_deadlock</a> utility source code for an example of using lock_detect
+in a IEEE/ANSI Std 1003.1 (POSIX) environment.
+<p>The lock_detect function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The lock_detect function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the lock_detect function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_lk_conflicts.html">DBENV-&gt;set_lk_conflicts</a>,
+<a href="../api_c/env_set_lk_detect.html">DBENV-&gt;set_lk_detect</a>,
+<a href="../api_c/env_set_lk_max_locks.html">DBENV-&gt;set_lk_max_locks</a>,
+<a href="../api_c/env_set_lk_max_lockers.html">DBENV-&gt;set_lk_max_lockers</a>,
+<a href="../api_c/env_set_lk_max_objects.html">DBENV-&gt;set_lk_max_objects</a>,
+<a href="../api_c/env_set_lk_max.html">DBENV-&gt;set_lk_max</a>,
+<a href="../api_c/lock_detect.html">lock_detect</a>,
+<a href="../api_c/lock_get.html">lock_get</a>,
+<a href="../api_c/lock_id.html">lock_id</a>,
+<a href="../api_c/lock_put.html">lock_put</a>,
+<a href="../api_c/lock_stat.html">lock_stat</a>
+and
+<a href="../api_c/lock_vec.html">lock_vec</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/lock_get.html b/bdb/docs/api_c/lock_get.html
new file mode 100644
index 00000000000..8d68ba54cb9
--- /dev/null
+++ b/bdb/docs/api_c/lock_get.html
@@ -0,0 +1,91 @@
+<!--$Id: lock_get.so,v 10.28 2000/04/24 16:33:54 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: lock_get</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>lock_get</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+lock_get(DB_ENV *env, u_int32_t locker,
+ u_int32_t flags, const DBT *obj,
+ const db_lockmode_t lock_mode, DB_LOCK *lock);
+</pre></h3>
+<h1>Description</h1>
+<p>The lock_get function acquires a lock from the lock table, returning
+information about it in
+the <b>lock</b> argument.
+<p>The <b>locker</b> argument specified to lock_get is an unsigned
+32-bit integer quantity. It represents the entity requesting or releasing
+the lock.
+<p>The <b>flags</b> value must be set to 0 or the following value:
+<p><dl compact>
+<p><dt><a name="DB_LOCK_NOWAIT">DB_LOCK_NOWAIT</a><dd>If a lock cannot be granted because the requested lock conflicts with an
+existing lock, return immediately instead of waiting for the lock to
+become available.
+</dl>
+<p>The <b>obj</b> argument is an untyped byte string that specifies the
+object to be locked or released.
+<p>The <b>mode</b> argument is an index into the environment's lock conflict
+array. See <a href="../api_c/env_set_lk_conflicts.html">DBENV-&gt;set_lk_conflicts</a> and
+<a href="../ref/lock/stdmode.html">Standard Lock Modes</a>
+for a description of that array.
+<p>The lock_get function may
+return
+one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_LOCK_NOTGRANTED">DB_LOCK_NOTGRANTED</a><dd>A lock was requested that could not be immediately granted and the
+<b>flags</b> parameter was set to DB_LOCK_NOWAIT.
+</dl>
+<p>Otherwise, the lock_get function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The lock_get function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_LOCK_DEADLOCK<dd>The operation was selected to resolve a deadlock.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p><dl compact>
+<p><dt>ENOMEM<dd>The maximum number of locks has been reached.
+</dl>
+<p>The lock_get function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the lock_get function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_lk_conflicts.html">DBENV-&gt;set_lk_conflicts</a>,
+<a href="../api_c/env_set_lk_detect.html">DBENV-&gt;set_lk_detect</a>,
+<a href="../api_c/env_set_lk_max_locks.html">DBENV-&gt;set_lk_max_locks</a>,
+<a href="../api_c/env_set_lk_max_lockers.html">DBENV-&gt;set_lk_max_lockers</a>,
+<a href="../api_c/env_set_lk_max_objects.html">DBENV-&gt;set_lk_max_objects</a>,
+<a href="../api_c/env_set_lk_max.html">DBENV-&gt;set_lk_max</a>,
+<a href="../api_c/lock_detect.html">lock_detect</a>,
+<a href="../api_c/lock_get.html">lock_get</a>,
+<a href="../api_c/lock_id.html">lock_id</a>,
+<a href="../api_c/lock_put.html">lock_put</a>,
+<a href="../api_c/lock_stat.html">lock_stat</a>
+and
+<a href="../api_c/lock_vec.html">lock_vec</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/lock_id.html b/bdb/docs/api_c/lock_id.html
new file mode 100644
index 00000000000..bd720cdb00e
--- /dev/null
+++ b/bdb/docs/api_c/lock_id.html
@@ -0,0 +1,57 @@
+<!--$Id: lock_id.so,v 10.19 2000/03/01 21:41:29 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: lock_id</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>lock_id</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+lock_id(DB_ENV *env, u_int32_t *idp);
+</pre></h3>
+<h1>Description</h1>
+<p>The lock_id function
+copies a locker ID, which is guaranteed to be unique in the specified lock
+table, into the memory location referenced by <b>idp</b>.
+<p>The lock_id function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The lock_id function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the lock_id function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_lk_conflicts.html">DBENV-&gt;set_lk_conflicts</a>,
+<a href="../api_c/env_set_lk_detect.html">DBENV-&gt;set_lk_detect</a>,
+<a href="../api_c/env_set_lk_max_locks.html">DBENV-&gt;set_lk_max_locks</a>,
+<a href="../api_c/env_set_lk_max_lockers.html">DBENV-&gt;set_lk_max_lockers</a>,
+<a href="../api_c/env_set_lk_max_objects.html">DBENV-&gt;set_lk_max_objects</a>,
+<a href="../api_c/env_set_lk_max.html">DBENV-&gt;set_lk_max</a>,
+<a href="../api_c/lock_detect.html">lock_detect</a>,
+<a href="../api_c/lock_get.html">lock_get</a>,
+<a href="../api_c/lock_id.html">lock_id</a>,
+<a href="../api_c/lock_put.html">lock_put</a>,
+<a href="../api_c/lock_stat.html">lock_stat</a>
+and
+<a href="../api_c/lock_vec.html">lock_vec</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/lock_put.html b/bdb/docs/api_c/lock_put.html
new file mode 100644
index 00000000000..777f4bdd09b
--- /dev/null
+++ b/bdb/docs/api_c/lock_put.html
@@ -0,0 +1,59 @@
+<!--$Id: lock_put.so,v 10.21 2000/03/01 21:41:29 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: lock_put</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>lock_put</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+lock_put(DB_ENV *env, DB_LOCK *lock);
+</pre></h3>
+<h1>Description</h1>
+<p>The lock_put function releases <b>lock</b> from the lock table.
+<p>The lock_put function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The lock_put function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The lock_put function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the lock_put function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_lk_conflicts.html">DBENV-&gt;set_lk_conflicts</a>,
+<a href="../api_c/env_set_lk_detect.html">DBENV-&gt;set_lk_detect</a>,
+<a href="../api_c/env_set_lk_max_locks.html">DBENV-&gt;set_lk_max_locks</a>,
+<a href="../api_c/env_set_lk_max_lockers.html">DBENV-&gt;set_lk_max_lockers</a>,
+<a href="../api_c/env_set_lk_max_objects.html">DBENV-&gt;set_lk_max_objects</a>,
+<a href="../api_c/env_set_lk_max.html">DBENV-&gt;set_lk_max</a>,
+<a href="../api_c/lock_detect.html">lock_detect</a>,
+<a href="../api_c/lock_get.html">lock_get</a>,
+<a href="../api_c/lock_id.html">lock_id</a>,
+<a href="../api_c/lock_put.html">lock_put</a>,
+<a href="../api_c/lock_stat.html">lock_stat</a>
+and
+<a href="../api_c/lock_vec.html">lock_vec</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/lock_stat.html b/bdb/docs/api_c/lock_stat.html
new file mode 100644
index 00000000000..c86024de7f9
--- /dev/null
+++ b/bdb/docs/api_c/lock_stat.html
@@ -0,0 +1,92 @@
+<!--$Id: lock_stat.so,v 10.30 2000/12/08 20:43:15 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: lock_stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>lock_stat</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+lock_stat(DB_ENV *env,
+ DB_LOCK_STAT **statp, void *(*db_malloc)(size_t));
+</pre></h3>
+<h1>Description</h1>
+<p>The lock_stat function
+creates a statistical structure and copies a pointer to it into a
+user-specified memory location.
+<p>Statistical structures are created in allocated memory. If <b>db_malloc</b> is non-NULL, it
+is called to allocate the memory, otherwise, the library function
+<b>malloc</b>(3) is used. The function <b>db_malloc</b> must match
+the calling conventions of the <b>malloc</b>(3) library routine.
+Regardless, the caller is responsible for deallocating the returned
+memory. To deallocate returned memory, free the returned memory
+reference, references inside the returned memory do not need to be
+individually freed.
+<p>The lock region statistics are stored in a structure of type
+DB_LOCK_STAT. The following DB_LOCK_STAT fields will be filled in:
+<p><dl compact>
+<dt>u_int32_t st_lastid;<dd>The last allocated lock ID.
+<dt>u_int32_t st_nmodes;<dd>The number of lock modes.
+<dt>u_int32_t st_maxlocks;<dd>The maximum number of locks possible.
+<dt>u_int32_t st_maxlockers;<dd>The maximum number of lockers possible.
+<dt>u_int32_t st_maxobjects;<dd>The maximum number of objects possible.
+<dt>u_int32_t st_nlocks;<dd>The number of current locks.
+<dt>u_int32_t st_maxnlocks;<dd>The maximum number of locks at any one time.
+<dt>u_int32_t st_nlockers;<dd>The number of current lockers.
+<dt>u_int32_t st_maxnlockers;<dd>The maximum number of lockers at any one time.
+<dt>u_int32_t st_nobjects;<dd>The number of current objects.
+<dt>u_int32_t st_maxnobjects;<dd>The maximum number of objects at any one time.
+<dt>u_int32_t st_nrequests;<dd>The total number of locks requested.
+<dt>u_int32_t st_nreleases;<dd>The total number of locks released.
+<dt>u_int32_t st_nnowaits;<dd>The total number of lock requests that failed because
+<a href="../api_c/lock_vec.html#DB_LOCK_NOWAIT">DB_LOCK_NOWAIT</a> was set.
+<dt>u_int32_t st_nconflicts;<dd>The total number of locks not immediately available due to conflicts.
+<dt>u_int32_t st_ndeadlocks;<dd>The number of deadlocks detected.
+<dt>u_int32_t st_regsize;<dd>The size of the region.
+<dt>u_int32_t st_region_wait;<dd>The number of times that a thread of control was forced to wait before
+obtaining the region lock.
+<dt>u_int32_t st_region_nowait;<dd>The number of times that a thread of control was able to obtain
+the region lock without waiting.
+</dl>
+<p>The lock_stat function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The lock_stat function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the lock_stat function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_lk_conflicts.html">DBENV-&gt;set_lk_conflicts</a>,
+<a href="../api_c/env_set_lk_detect.html">DBENV-&gt;set_lk_detect</a>,
+<a href="../api_c/env_set_lk_max_locks.html">DBENV-&gt;set_lk_max_locks</a>,
+<a href="../api_c/env_set_lk_max_lockers.html">DBENV-&gt;set_lk_max_lockers</a>,
+<a href="../api_c/env_set_lk_max_objects.html">DBENV-&gt;set_lk_max_objects</a>,
+<a href="../api_c/env_set_lk_max.html">DBENV-&gt;set_lk_max</a>,
+<a href="../api_c/lock_detect.html">lock_detect</a>,
+<a href="../api_c/lock_get.html">lock_get</a>,
+<a href="../api_c/lock_id.html">lock_id</a>,
+<a href="../api_c/lock_put.html">lock_put</a>,
+<a href="../api_c/lock_stat.html">lock_stat</a>
+and
+<a href="../api_c/lock_vec.html">lock_vec</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/lock_vec.html b/bdb/docs/api_c/lock_vec.html
new file mode 100644
index 00000000000..56d3fa96a98
--- /dev/null
+++ b/bdb/docs/api_c/lock_vec.html
@@ -0,0 +1,123 @@
+<!--$Id: lock_vec.so,v 10.31 2000/12/04 18:05:39 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: lock_vec</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>lock_vec</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+lock_vec(DB_ENV *env, u_int32_t locker, u_int32_t flags,
+ DB_LOCKREQ list[], int nlist, DB_LOCKREQ **elistp);
+</pre></h3>
+<h1>Description</h1>
+<p>The lock_vec function atomically obtains and releases one or more locks
+from the lock table. The lock_vec function is intended to support
+acquisition or trading of multiple locks under one lock table semaphore,
+as is needed for lock coupling or in multigranularity locking for lock
+escalation.
+<p>The <b>locker</b> argument specified to lock_vec is an unsigned
+32-bit integer quantity. It represents the entity requesting or releasing
+the lock.
+<p>The <b>flags</b> value must be set to 0 or the following value:
+<p><dl compact>
+<p><dt><a name="DB_LOCK_NOWAIT">DB_LOCK_NOWAIT</a><dd>If a lock cannot be immediately granted because the requested lock
+conflicts with an existing lock, return instead of waiting for the lock
+to become available.
+</dl>
+<p>The <b>list</b> array provided to lock_vec is typedef'd as
+DB_LOCKREQ. A DB_LOCKREQ structure has at least the following fields,
+which must be initialized before calling lock_vec:
+<p><dl compact>
+<p><dt>lockop_t <a name="op">op</a>;<dd>The operation to be performed, which must be set to one of the
+following values:
+<p><dl compact>
+<p><dt><a name="DB_LOCK_GET">DB_LOCK_GET</a><dd>Get a lock, as defined by the values of <b>locker</b>, <b>obj</b>,
+and <b>mode</b>. Upon return from lock_vec, if the
+<b>lock</b> field is non-NULL, a reference to the acquired lock is
+stored there. (This reference is invalidated by any call to
+lock_vec or <a href="../api_c/lock_put.html">lock_put</a> that releases the lock.)
+<p><dt><a name="DB_LOCK_PUT">DB_LOCK_PUT</a><dd>The lock referenced by the contents of the <b>lock</b> field is released.
+<p><dt><a name="DB_LOCK_PUT_ALL">DB_LOCK_PUT_ALL</a><dd>All locks held by the <b>locker</b> are released. (Any locks acquired
+as a part of the current call to lock_vec that appear after the
+DB_LOCK_PUT_ALL entry are not considered for this
+operation).
+<p><dt><a name="DB_LOCK_PUT_OBJ">DB_LOCK_PUT_OBJ</a><dd>All locks held on the object <b>obj</b> are released. The <b>mode</b>
+and <b>locker</b> parameters are ignored. Note that any locks acquired
+as a part of the current call to lock_vec that occur before the
+DB_LOCK_PUT_OBJ will also be released; those acquired afterwards
+will not be released.
+</dl>
+<p><dt>const DBT <a name="obj">obj</a>;<dd>An untyped byte string that specifies the object to be locked or released.
+<p><dt>const lockmode_t <a name="mode">mode</a>;<dd>The lock mode, used as an index into the environment's lock conflict array.
+See <a href="../api_c/env_set_lk_conflicts.html">DBENV-&gt;set_lk_conflicts</a> and <a href="../ref/lock/stdmode.html">Standard Lock Modes</a> for a description of that array.
+<p><dt>DB_LOCK <a name="lock">lock</a>;<dd>A lock reference.
+</dl>
+<p>The <b>nlist</b> argument specifies the number of elements in the
+<b>list</b> array.
+<p>If any of the requested locks cannot be acquired, or any of the locks to
+be released cannot be released, the operations before the failing
+operation are guaranteed to have completed successfully, and
+lock_vec returns a non-zero value. In addition, if <b>elistp</b>
+is not NULL, it is set to point to the DB_LOCKREQ entry that was being
+processed when the error occurred.
+<p>The lock_vec function may
+return
+one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_LOCK_NOTGRANTED">DB_LOCK_NOTGRANTED</a><dd>A lock was requested that could not be immediately granted and the
+<b>flag</b> parameter was set to DB_LOCK_NOWAIT. In this case, if
+non-NULL, <b>elistp</b> identifies the request that was not granted.
+</dl>
+<p>Otherwise, the lock_vec function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The lock_vec function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_LOCK_DEADLOCK<dd>The operation was selected to resolve a deadlock.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p><dl compact>
+<p><dt>ENOMEM<dd>The maximum number of locks has been reached.
+</dl>
+<p>The lock_vec function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the lock_vec function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_lk_conflicts.html">DBENV-&gt;set_lk_conflicts</a>,
+<a href="../api_c/env_set_lk_detect.html">DBENV-&gt;set_lk_detect</a>,
+<a href="../api_c/env_set_lk_max_locks.html">DBENV-&gt;set_lk_max_locks</a>,
+<a href="../api_c/env_set_lk_max_lockers.html">DBENV-&gt;set_lk_max_lockers</a>,
+<a href="../api_c/env_set_lk_max_objects.html">DBENV-&gt;set_lk_max_objects</a>,
+<a href="../api_c/env_set_lk_max.html">DBENV-&gt;set_lk_max</a>,
+<a href="../api_c/lock_detect.html">lock_detect</a>,
+<a href="../api_c/lock_get.html">lock_get</a>,
+<a href="../api_c/lock_id.html">lock_id</a>,
+<a href="../api_c/lock_put.html">lock_put</a>,
+<a href="../api_c/lock_stat.html">lock_stat</a>
+and
+<a href="../api_c/lock_vec.html">lock_vec</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/log_archive.html b/bdb/docs/api_c/log_archive.html
new file mode 100644
index 00000000000..6c9aea26b8d
--- /dev/null
+++ b/bdb/docs/api_c/log_archive.html
@@ -0,0 +1,102 @@
+<!--$Id: log_archive.so,v 10.26 2000/05/25 13:47:07 dda Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: log_archive</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>log_archive</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+log_archive(DB_ENV *env, char *(*listp)[],
+ u_int32_t flags, void *(*db_malloc)(size_t));
+</pre></h3>
+<h1>Description</h1>
+<p>The log_archive function
+creates a NULL-terminated array of log or database file names and copies
+a pointer to them into the user-specified memory location <b>listp</b>.
+<p>By default, log_archive returns the names of all of the log files
+that are no longer in use (e.g., no longer involved in active transactions),
+and that may safely be archived for catastrophic recovery and then removed
+from the system. If there were no file names to return, the memory location
+referenced by <b>listp</b> will be set to NULL.
+<p>Arrays of log file names are created in allocated memory. If <b>db_malloc</b> is non-NULL, it
+is called to allocate the memory, otherwise, the library function
+<b>malloc</b>(3) is used. The function <b>db_malloc</b> must match
+the calling conventions of the <b>malloc</b>(3) library routine.
+Regardless, the caller is responsible for deallocating the returned
+memory. To deallocate returned memory, free the returned memory
+reference, references inside the returned memory do not need to be
+individually freed.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or more
+of the following values.
+<p><dl compact>
+<p><dt><a name="DB_ARCH_ABS">DB_ARCH_ABS</a><dd>All pathnames are returned as absolute pathnames,
+instead of relative to the database home directory.
+<p><dt><a name="DB_ARCH_DATA">DB_ARCH_DATA</a><dd>Return the database files that need to be archived in order to recover
+the database from catastrophic failure. If any of the database files
+have not been accessed during the lifetime of the current log files,
+log_archive will not include them in this list. It is also
+possible that some of the files referenced in the log have since been
+deleted from the system.
+<p><dt><a name="DB_ARCH_LOG">DB_ARCH_LOG</a><dd>Return all the log file names regardless of whether or not they are in
+use.
+</dl>
+<p>The DB_ARCH_DATA and DB_ARCH_LOG flags are mutually
+exclusive.
+<p>See the <a href="../utility/db_archive.html">db_archive</a> manual page for more information on database
+archival procedures.
+<p>The log_archive function is the underlying function used by the <a href="../utility/db_archive.html">db_archive</a> utility.
+See the <a href="../utility/db_archive.html">db_archive</a> utility source code for an example of using log_archive
+in a IEEE/ANSI Std 1003.1 (POSIX) environment.
+<p>The log_archive function returns a non-zero error value on failure and 0 on success.
+<h1>Bugs</h1>
+<p>In a threaded application (i.e., one where the environment was created
+with the DB_THREAD flag specified), calling log_archive with the
+DB_ARCH_DATA flag will fail, returning EINVAL. To work around this
+problem, re-open the log explicitly without specifying DB_THREAD. This
+restriction is expected to be removed in a future version of Berkeley DB.
+<h1>Errors</h1>
+<p>The log_archive function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The log was corrupted.
+</dl>
+<p>The log_archive function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the log_archive function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_lg_bsize.html">DBENV-&gt;set_lg_bsize</a>,
+<a href="../api_c/env_set_lg_max.html">DBENV-&gt;set_lg_max</a>,
+<a href="../api_c/log_archive.html">log_archive</a>,
+<a href="../api_c/log_compare.html">log_compare</a>,
+<a href="../api_c/log_file.html">log_file</a>,
+<a href="../api_c/log_flush.html">log_flush</a>,
+<a href="../api_c/log_get.html">log_get</a>,
+<a href="../api_c/log_put.html">log_put</a>,
+<a href="../api_c/log_register.html">log_register</a>,
+<a href="../api_c/log_stat.html">log_stat</a>
+and
+<a href="../api_c/log_unregister.html">log_unregister</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/log_compare.html b/bdb/docs/api_c/log_compare.html
new file mode 100644
index 00000000000..c6e7743beb1
--- /dev/null
+++ b/bdb/docs/api_c/log_compare.html
@@ -0,0 +1,51 @@
+<!--$Id: log_compare.so,v 10.12 1999/12/20 08:52:30 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: log_compare</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>log_compare</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+log_compare(const DB_LSN *lsn0, const DB_LSN *lsn1);
+</pre></h3>
+<h1>Description</h1>
+<p>The log_compare function allows the caller to compare two
+DB_LSN structures,
+returning 0 if they are equal, 1 if <b>lsn0</b> is greater than
+<b>lsn1</b>, and -1 if <b>lsn0</b> is less than <b>lsn1</b>.
+<h1>See Also</h1>
+<a href="../api_c/env_set_lg_bsize.html">DBENV-&gt;set_lg_bsize</a>,
+<a href="../api_c/env_set_lg_max.html">DBENV-&gt;set_lg_max</a>,
+<a href="../api_c/log_archive.html">log_archive</a>,
+<a href="../api_c/log_compare.html">log_compare</a>,
+<a href="../api_c/log_file.html">log_file</a>,
+<a href="../api_c/log_flush.html">log_flush</a>,
+<a href="../api_c/log_get.html">log_get</a>,
+<a href="../api_c/log_put.html">log_put</a>,
+<a href="../api_c/log_register.html">log_register</a>,
+<a href="../api_c/log_stat.html">log_stat</a>
+and
+<a href="../api_c/log_unregister.html">log_unregister</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/log_file.html b/bdb/docs/api_c/log_file.html
new file mode 100644
index 00000000000..434380cccb0
--- /dev/null
+++ b/bdb/docs/api_c/log_file.html
@@ -0,0 +1,76 @@
+<!--$Id: log_file.so,v 10.18 2000/03/01 21:41:29 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: log_file</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>log_file</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+log_file(DB_ENV *env,
+ const DB_LSN *lsn, char *namep, size_t len);
+</pre></h3>
+<h1>Description</h1>
+<p>The log_file function maps
+DB_LSN structures
+to file names,
+copying the name of the file containing the record named by <b>lsn</b>
+into the memory location referenced by <b>namep</b>.
+<p>The <b>len</b> argument is the length of the <b>namep</b> buffer in bytes.
+If <b>namep</b> is too short to hold the file name, log_file will
+return ENOMEM.
+(Log file names are normally quite short, on the order of 10 characters.)
+<p>This mapping of
+DB_LSN structures
+to files is needed for database administration. For example, a
+transaction manager typically records the earliest
+DB_LSN
+needed for restart, and the database administrator may want to archive
+log files to tape when they contain only
+DB_LSN
+entries before the earliest one needed for restart.
+<p>The log_file function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The log_file function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>ENOMEM<dd>The supplied buffer was too small to hold the log file name.
+</dl>
+<p>The log_file function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the log_file function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_lg_bsize.html">DBENV-&gt;set_lg_bsize</a>,
+<a href="../api_c/env_set_lg_max.html">DBENV-&gt;set_lg_max</a>,
+<a href="../api_c/log_archive.html">log_archive</a>,
+<a href="../api_c/log_compare.html">log_compare</a>,
+<a href="../api_c/log_file.html">log_file</a>,
+<a href="../api_c/log_flush.html">log_flush</a>,
+<a href="../api_c/log_get.html">log_get</a>,
+<a href="../api_c/log_put.html">log_put</a>,
+<a href="../api_c/log_register.html">log_register</a>,
+<a href="../api_c/log_stat.html">log_stat</a>
+and
+<a href="../api_c/log_unregister.html">log_unregister</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/log_flush.html b/bdb/docs/api_c/log_flush.html
new file mode 100644
index 00000000000..1315fc10670
--- /dev/null
+++ b/bdb/docs/api_c/log_flush.html
@@ -0,0 +1,62 @@
+<!--$Id: log_flush.so,v 10.18 2000/03/01 21:41:30 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: log_flush</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>log_flush</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+log_flush(DB_ENV *env, const DB_LSN *lsn);
+</pre></h3>
+<h1>Description</h1>
+<p>The log_flush function guarantees that all log records whose
+DB_LSN values
+are less than or equal to the <b>lsn</b> argument have been
+written to disk. If <b>lsn</b> is NULL, all records in the
+log are flushed.
+<p>The log_flush function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The log_flush function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The log_flush function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the log_flush function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_lg_bsize.html">DBENV-&gt;set_lg_bsize</a>,
+<a href="../api_c/env_set_lg_max.html">DBENV-&gt;set_lg_max</a>,
+<a href="../api_c/log_archive.html">log_archive</a>,
+<a href="../api_c/log_compare.html">log_compare</a>,
+<a href="../api_c/log_file.html">log_file</a>,
+<a href="../api_c/log_flush.html">log_flush</a>,
+<a href="../api_c/log_get.html">log_get</a>,
+<a href="../api_c/log_put.html">log_put</a>,
+<a href="../api_c/log_register.html">log_register</a>,
+<a href="../api_c/log_stat.html">log_stat</a>
+and
+<a href="../api_c/log_unregister.html">log_unregister</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/log_get.html b/bdb/docs/api_c/log_get.html
new file mode 100644
index 00000000000..05761e1ea30
--- /dev/null
+++ b/bdb/docs/api_c/log_get.html
@@ -0,0 +1,114 @@
+<!--$Id: log_get.so,v 10.22 2000/03/17 01:53:59 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: log_get</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>log_get</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+log_get(DB_ENV *env, DB_LSN *lsn, DBT *data, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The log_get function implements a cursor inside of the log,
+retrieving records from the log according to the <b>lsn</b> and
+<b>flags</b> arguments.
+<p>The data field of the <b>data</b> structure is set to the record
+retrieved and the size field indicates the number of bytes in the record.
+See <a href="../api_c/dbt.html">DBT</a> for a description of other fields in the <b>data</b>
+structure. When multiple threads are using the returned log handle
+concurrently, one of the <a href="../api_c/dbt.html#DB_DBT_MALLOC">DB_DBT_MALLOC</a>, <a href="../api_c/dbt.html#DB_DBT_REALLOC">DB_DBT_REALLOC</a> or
+<a href="../api_c/dbt.html#DB_DBT_USERMEM">DB_DBT_USERMEM</a> flags must be specified for any <a href="../api_c/dbt.html">DBT</a> used
+for data retrieval.
+<p>The <b>flags</b> argument must be set to exactly one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_CHECKPOINT">DB_CHECKPOINT</a><dd>The last record written with the DB_CHECKPOINT flag specified to the
+<a href="../api_c/log_put.html">log_put</a> function is returned in the <b>data</b> argument. The
+<b>lsn</b> argument is overwritten with the <a href="../api_c/db_lsn.html">DB_LSN</a> of the record
+returned. If no record has been previously written with the DB_CHECKPOINT
+flag specified, the first record in the log is returned.
+<p>If the log is empty, the log_get function will return <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+<p><dt><a name="DB_FIRST">DB_FIRST</a><dd>The first record from any of the log files found in the log directory
+is returned in the <b>data</b> argument.
+The <b>lsn</b> argument is overwritten with the <a href="../api_c/db_lsn.html">DB_LSN</a> of the
+record returned.
+<p>If the log is empty, the log_get function will return <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+<p><dt><a name="DB_LAST">DB_LAST</a><dd>The last record in the log is returned in the <b>data</b> argument.
+The <b>lsn</b> argument is overwritten with the <a href="../api_c/db_lsn.html">DB_LSN</a> of the
+record returned.
+<p>If the log is empty, the log_get function will return <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+<p><dt><a name="DB_NEXT">DB_NEXT</a><dd>The current log position is advanced to the next record in the log and that
+record is returned in the <b>data</b> argument.
+The <b>lsn</b> argument is overwritten with the <a href="../api_c/db_lsn.html">DB_LSN</a> of the
+record returned.
+<p>If the pointer has not been initialized via DB_FIRST, DB_LAST, DB_SET,
+DB_NEXT, or DB_PREV, log_get will return the first record in the log.
+If the last log record has already been returned or the log is empty, the
+log_get function will return <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+<p>If the log was opened with the DB_THREAD flag set, calls to
+log_get with the DB_NEXT flag set will return EINVAL.
+<p><dt><a name="DB_PREV">DB_PREV</a><dd>The current log position is moved to the previous record in the log and that
+record is returned in the <b>data</b> argument.
+The <b>lsn</b> argument is overwritten with the <a href="../api_c/db_lsn.html">DB_LSN</a> of the
+record returned.
+<p>If the pointer has not been initialized via DB_FIRST, DB_LAST, DB_SET,
+DB_NEXT, or DB_PREV,
+log_get will return the last record in the log.
+If the first log record has already been returned or the log is empty, the
+log_get function will return <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+<p>If the log was opened with the DB_THREAD flag set, calls to
+log_get with the DB_PREV flag set will return EINVAL.
+<p><dt><a name="DB_CURRENT">DB_CURRENT</a><dd>Return the log record currently referenced by the log.
+<p>If the log pointer has not been initialized via DB_FIRST, DB_LAST, DB_SET,
+DB_NEXT, or DB_PREV, or if the log was opened with the DB_THREAD flag set,
+log_get will return EINVAL.
+<p><dt><a name="DB_SET">DB_SET</a><dd>Retrieve the record specified by the <b>lsn</b> argument. If the
+specified <a href="../api_c/db_lsn.html">DB_LSN</a> is invalid (e.g., does not appear in the log)
+log_get will return EINVAL.
+</dl>
+<p>Otherwise, the log_get function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The log_get function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The DB_FIRST flag was specified and no log files were found.
+</dl>
+<p>The log_get function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the log_get function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_lg_bsize.html">DBENV-&gt;set_lg_bsize</a>,
+<a href="../api_c/env_set_lg_max.html">DBENV-&gt;set_lg_max</a>,
+<a href="../api_c/log_archive.html">log_archive</a>,
+<a href="../api_c/log_compare.html">log_compare</a>,
+<a href="../api_c/log_file.html">log_file</a>,
+<a href="../api_c/log_flush.html">log_flush</a>,
+<a href="../api_c/log_get.html">log_get</a>,
+<a href="../api_c/log_put.html">log_put</a>,
+<a href="../api_c/log_register.html">log_register</a>,
+<a href="../api_c/log_stat.html">log_stat</a>
+and
+<a href="../api_c/log_unregister.html">log_unregister</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/log_put.html b/bdb/docs/api_c/log_put.html
new file mode 100644
index 00000000000..9455296986e
--- /dev/null
+++ b/bdb/docs/api_c/log_put.html
@@ -0,0 +1,81 @@
+<!--$Id: log_put.so,v 10.21 2000/03/17 01:53:59 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: log_put</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>log_put</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+log_put(DB_ENV *env,
+ DB_LSN *lsn, const DBT *data, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The log_put function appends records to the log. The <a href="../api_c/db_lsn.html">DB_LSN</a> of
+the put record is returned in the <b>lsn</b> argument. The <b>flags</b>
+argument may be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_CHECKPOINT">DB_CHECKPOINT</a><dd>The log should write a checkpoint record, recording any information
+necessary to make the log structures recoverable after a crash.
+<p><dt><a name="DB_CURLSN">DB_CURLSN</a><dd>The <a href="../api_c/db_lsn.html">DB_LSN</a> of the next record to be put is returned in the
+<b>lsn</b> argument.
+<p><dt><a name="DB_FLUSH">DB_FLUSH</a><dd>The log is forced to disk after this record is written, guaranteeing
+that all records with <a href="../api_c/db_lsn.html">DB_LSN</a> values less than or equal to the
+one being put are on disk before this function returns (this function
+is most often used for a transaction commit, see <a href="../api_c/txn_commit.html">txn_commit</a> for
+more information).
+<p>The caller is responsible for providing any necessary structure to
+<b>data</b>. (For example, in a write-ahead logging protocol, the
+application must understand what part of <b>data</b> is an operation
+code, what part is redo information, and what part is undo information.
+In addition, most transaction managers will store in <b>data</b> the
+<a href="../api_c/db_lsn.html">DB_LSN</a> of the previous log record for the same transaction, to
+support chaining back through the transaction's log records during
+undo.)
+</dl>
+<p>The log_put function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The <a href="../api_c/log_flush.html">log_flush</a> function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The record to be logged is larger than the maximum log record.
+</dl>
+<p>The log_put function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the log_put function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_lg_bsize.html">DBENV-&gt;set_lg_bsize</a>,
+<a href="../api_c/env_set_lg_max.html">DBENV-&gt;set_lg_max</a>,
+<a href="../api_c/log_archive.html">log_archive</a>,
+<a href="../api_c/log_compare.html">log_compare</a>,
+<a href="../api_c/log_file.html">log_file</a>,
+<a href="../api_c/log_flush.html">log_flush</a>,
+<a href="../api_c/log_get.html">log_get</a>,
+<a href="../api_c/log_put.html">log_put</a>,
+<a href="../api_c/log_register.html">log_register</a>,
+<a href="../api_c/log_stat.html">log_stat</a>
+and
+<a href="../api_c/log_unregister.html">log_unregister</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/log_register.html b/bdb/docs/api_c/log_register.html
new file mode 100644
index 00000000000..e993feabed2
--- /dev/null
+++ b/bdb/docs/api_c/log_register.html
@@ -0,0 +1,64 @@
+<!--$Id: log_register.so,v 10.27 2000/05/09 14:46:45 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: log_register</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>log_register</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+log_register(DB_ENV *env, DB *dbp, const char *name);
+</pre></h3>
+<h1>Description</h1>
+<p>The log_register function registers a file name with the specified Berkeley DB
+environment's log manager. The log manager records all file name mappings
+at each checkpoint so that a recovery process can identify the file to
+which a record in the log refers.
+<p>The <b>dbp</b> argument should be a reference to the DB structure being
+registered. The <b>name</b> argument should be a file name appropriate
+for opening the file in the environment, during recovery.
+<p>The log_register function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The log_register function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The log_register function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the log_register function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_lg_bsize.html">DBENV-&gt;set_lg_bsize</a>,
+<a href="../api_c/env_set_lg_max.html">DBENV-&gt;set_lg_max</a>,
+<a href="../api_c/log_archive.html">log_archive</a>,
+<a href="../api_c/log_compare.html">log_compare</a>,
+<a href="../api_c/log_file.html">log_file</a>,
+<a href="../api_c/log_flush.html">log_flush</a>,
+<a href="../api_c/log_get.html">log_get</a>,
+<a href="../api_c/log_put.html">log_put</a>,
+<a href="../api_c/log_register.html">log_register</a>,
+<a href="../api_c/log_stat.html">log_stat</a>
+and
+<a href="../api_c/log_unregister.html">log_unregister</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/log_stat.html b/bdb/docs/api_c/log_stat.html
new file mode 100644
index 00000000000..819c603d318
--- /dev/null
+++ b/bdb/docs/api_c/log_stat.html
@@ -0,0 +1,90 @@
+<!--$Id: log_stat.so,v 10.23 2000/05/25 13:47:08 dda Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: log_stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>log_stat</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+log_stat(DB_ENV *env,
+ DB_LOG_STAT **spp, void *(*db_malloc)(size_t));
+</pre></h3>
+<h1>Description</h1>
+<p>The log_stat function
+creates a statistical structure and copies a pointer to it into a
+user-specified memory location.
+<p>Statistical structures are created in allocated memory. If <b>db_malloc</b> is non-NULL, it
+is called to allocate the memory, otherwise, the library function
+<b>malloc</b>(3) is used. The function <b>db_malloc</b> must match
+the calling conventions of the <b>malloc</b>(3) library routine.
+Regardless, the caller is responsible for deallocating the returned
+memory. To deallocate returned memory, free the returned memory
+reference, references inside the returned memory do not need to be
+individually freed.
+<p>The log region statistics are stored in a structure of type DB_LOG_STAT.
+The following DB_LOG_STAT fields will be filled in:
+<p><dl compact>
+<dt>u_int32_t st_magic;<dd>The magic number that identifies a file as a log file.
+<dt>u_int32_t st_version;<dd>The version of the log file type.
+<dt>u_int32_t st_regsize;<dd>The size of the region.
+<dt>int st_mode;<dd>The mode of any created log files.
+<dt>u_int32_t st_lg_bsize;<dd>The in-memory log record cache size.
+<dt>u_int32_t st_lg_max;<dd>The maximum size of any individual file comprising the log.
+<dt>u_int32_t st_w_mbytes;<dd>The number of megabytes written to this log.
+<dt>u_int32_t st_w_bytes;<dd>The number of bytes over and above <b>st_w_mbytes</b> written to this log.
+<dt>u_int32_t st_wc_mbytes;<dd>The number of megabytes written to this log since the last checkpoint.
+<dt>u_int32_t st_wc_bytes;<dd>The number of bytes over and above <b>st_wc_mbytes</b> written to this log
+since the last checkpoint.
+<dt>u_int32_t st_wcount;<dd>The number of times the log has been written to disk.
+<dt>u_int32_t st_wcount_fill;<dd>The number of times the log has been written to disk because the
+in-memory log record cache filled up.
+<dt>u_int32_t st_scount;<dd>The number of times the log has been flushed to disk.
+<dt>u_int32_t st_cur_file;<dd>The current log file number.
+<dt>u_int32_t st_cur_offset;<dd>The byte offset in the current log file.
+<dt>u_int32_t st_region_wait;<dd>The number of times that a thread of control was forced to wait before
+obtaining the region lock.
+<dt>u_int32_t st_region_nowait;<dd>The number of times that a thread of control was able to obtain
+the region lock without waiting.
+</dl>
+<p>The log_stat function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The log_stat function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the log_stat function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_lg_bsize.html">DBENV-&gt;set_lg_bsize</a>,
+<a href="../api_c/env_set_lg_max.html">DBENV-&gt;set_lg_max</a>,
+<a href="../api_c/log_archive.html">log_archive</a>,
+<a href="../api_c/log_compare.html">log_compare</a>,
+<a href="../api_c/log_file.html">log_file</a>,
+<a href="../api_c/log_flush.html">log_flush</a>,
+<a href="../api_c/log_get.html">log_get</a>,
+<a href="../api_c/log_put.html">log_put</a>,
+<a href="../api_c/log_register.html">log_register</a>,
+<a href="../api_c/log_stat.html">log_stat</a>
+and
+<a href="../api_c/log_unregister.html">log_unregister</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/log_unregister.html b/bdb/docs/api_c/log_unregister.html
new file mode 100644
index 00000000000..cfc1e6f2e5d
--- /dev/null
+++ b/bdb/docs/api_c/log_unregister.html
@@ -0,0 +1,59 @@
+<!--$Id: log_unregister.so,v 10.21 2000/05/03 22:39:10 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: log_unregister</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>log_unregister</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+log_unregister(DB_ENV *env, DB *dbp);
+</pre></h3>
+<h1>Description</h1>
+<p>The log_unregister function function unregisters the file represented by
+the <b>dbp</b> parameter from the Berkeley DB environment's log manager.
+<p>The log_unregister function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The log_unregister function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The log_unregister function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the log_unregister function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_lg_bsize.html">DBENV-&gt;set_lg_bsize</a>,
+<a href="../api_c/env_set_lg_max.html">DBENV-&gt;set_lg_max</a>,
+<a href="../api_c/log_archive.html">log_archive</a>,
+<a href="../api_c/log_compare.html">log_compare</a>,
+<a href="../api_c/log_file.html">log_file</a>,
+<a href="../api_c/log_flush.html">log_flush</a>,
+<a href="../api_c/log_get.html">log_get</a>,
+<a href="../api_c/log_put.html">log_put</a>,
+<a href="../api_c/log_register.html">log_register</a>,
+<a href="../api_c/log_stat.html">log_stat</a>
+and
+<a href="../api_c/log_unregister.html">log_unregister</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/memp_fclose.html b/bdb/docs/api_c/memp_fclose.html
new file mode 100644
index 00000000000..ae8ce3c5647
--- /dev/null
+++ b/bdb/docs/api_c/memp_fclose.html
@@ -0,0 +1,61 @@
+<!--$Id: memp_fclose.so,v 10.20 2000/06/13 13:55:49 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: memp_fclose</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>memp_fclose</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+memp_fclose(DB_MPOOLFILE *mpf);
+</pre></h3>
+<h1>Description</h1>
+<p>The memp_fclose function closes the source file indicated by the
+DB_MPOOLFILE structure. Calling memp_fclose does not imply
+a call to <a href="../api_c/memp_fsync.html">memp_fsync</a>, i.e. no pages are written to the source
+file as as a result of calling memp_fclose.
+<p>In addition, if the <b>file</b> argument to <a href="../api_c/memp_fopen.html">memp_fopen</a> was NULL,
+any underlying files created for this DB_MPOOLFILE will be removed.
+<p>Once memp_fclose has been called, regardless of its return, the
+DB_MPOOLFILE handle may not be accessed again.
+<p>The memp_fclose function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The memp_fclose function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the memp_fclose function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_mp_mmapsize.html">DBENV-&gt;set_mp_mmapsize</a>,
+<a href="../api_c/memp_fclose.html">memp_fclose</a>,
+<a href="../api_c/memp_fget.html">memp_fget</a>,
+<a href="../api_c/memp_fopen.html">memp_fopen</a>,
+<a href="../api_c/memp_fput.html">memp_fput</a>,
+<a href="../api_c/memp_fset.html">memp_fset</a>,
+<a href="../api_c/memp_fsync.html">memp_fsync</a>,
+<a href="../api_c/memp_register.html">memp_register</a>,
+<a href="../api_c/memp_stat.html">memp_stat</a>,
+<a href="../api_c/memp_sync.html">memp_sync</a>
+and
+<a href="../api_c/memp_trickle.html">memp_trickle</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/memp_fget.html b/bdb/docs/api_c/memp_fget.html
new file mode 100644
index 00000000000..84b39e53ee1
--- /dev/null
+++ b/bdb/docs/api_c/memp_fget.html
@@ -0,0 +1,98 @@
+<!--$Id: memp_fget.so,v 10.23 2000/12/04 18:05:39 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: memp_fget</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>memp_fget</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+memp_fget(DB_MPOOLFILE *mpf,
+ db_pgno_t *pgnoaddr, u_int32_t flags, void **pagep);
+</pre></h3>
+<h1>Description</h1>
+<p>The memp_fget function copies a pointer to the page with the page
+number specified by <b>pgnoaddr</b>, from the source file in the
+DB_MPOOLFILE, into the memory location referenced by <b>pagep</b>.
+If the page does not exist or cannot be retrieved, memp_fget will
+fail.
+<p><b>Page numbers begin at 0, i.e., the first page in the file is page number
+0, not page number 1.</b>
+<p>The returned page is <b>size_t</b> type aligned.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or more
+of the following values.
+<p><dl compact>
+<p><dt><a name="DB_MPOOL_CREATE">DB_MPOOL_CREATE</a><dd>If the specified page does not exist, create it. In this case, the
+<a href="memp_register.html#pgin">pgin</a> function, if specified, is
+called.
+<p><dt><a name="DB_MPOOL_LAST">DB_MPOOL_LAST</a><dd>Return the last page of the source file and copy its page number
+to the location referenced by <b>pgnoaddr</b>.
+<p><dt><a name="DB_MPOOL_NEW">DB_MPOOL_NEW</a><dd>Create a new page in the file and copy its page number to the location
+referenced by <b>pgnoaddr</b>. In this case, the
+<a href="memp_register.html#pgin">pgin</a> function, if specified, is
+<b>not</b> called.
+</dl>
+<p>The DB_MPOOL_CREATE, DB_MPOOL_LAST and
+DB_MPOOL_NEW flags are mutually exclusive.
+<p>Created pages have all their bytes set to 0, unless otherwise specified
+when the file was opened.
+<p>All pages returned by memp_fget will be retained (i.e.
+<i>pinned</i>), in the pool until a subsequent call to
+<a href="../api_c/memp_fput.html">memp_fput</a>.
+<p>The memp_fget function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The memp_fget function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EAGAIN<dd>The page reference count has overflowed. (This should never happen unless
+there's a bug in the application.)
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The DB_MPOOL_NEW flag was set and the source file was not opened for writing.
+<p>More than one of DB_MPOOL_CREATE, DB_MPOOL_LAST and DB_MPOOL_NEW was set.
+</dl>
+<p><dl compact>
+<p><dt>EIO<dd>The requested page does not exist and DB_MPOOL_CREATE was not set.
+</dl>
+<p><dl compact>
+<p><dt>ENOMEM<dd>The cache is full and no more pages will fit in the pool.
+</dl>
+<p>The memp_fget function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the memp_fget function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_mp_mmapsize.html">DBENV-&gt;set_mp_mmapsize</a>,
+<a href="../api_c/memp_fclose.html">memp_fclose</a>,
+<a href="../api_c/memp_fget.html">memp_fget</a>,
+<a href="../api_c/memp_fopen.html">memp_fopen</a>,
+<a href="../api_c/memp_fput.html">memp_fput</a>,
+<a href="../api_c/memp_fset.html">memp_fset</a>,
+<a href="../api_c/memp_fsync.html">memp_fsync</a>,
+<a href="../api_c/memp_register.html">memp_register</a>,
+<a href="../api_c/memp_stat.html">memp_stat</a>,
+<a href="../api_c/memp_sync.html">memp_sync</a>
+and
+<a href="../api_c/memp_trickle.html">memp_trickle</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/memp_fopen.html b/bdb/docs/api_c/memp_fopen.html
new file mode 100644
index 00000000000..ea0250246cb
--- /dev/null
+++ b/bdb/docs/api_c/memp_fopen.html
@@ -0,0 +1,157 @@
+<!--$Id: memp_fopen.so,v 10.28 2000/12/18 21:05:12 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: memp_fopen</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>memp_fopen</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+memp_fopen(DB_ENV *env, char *file, u_int32_t flags,
+ int mode, size_t pagesize, DB_MPOOL_FINFO *finfop,
+ DB_MPOOLFILE **mpf);
+</pre></h3>
+<h1>Description</h1>
+<p>The memp_fopen function opens a file in the pool specified by the
+DB_ENV <b>env</b>, copying the DB_MPOOLFILE pointer
+representing it into the memory location referenced by <b>mpf</b>.
+<p>The <b>file</b> argument is the name of the file to be opened.
+If <b>file</b> is NULL, a private file is created that cannot be
+shared with any other process (although it may be shared with
+other threads).
+<p>The <b>flags</b> and <b>mode</b> arguments specify how files will be opened
+and/or created if they do not already exist.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or more
+of the following values.
+<p><dl compact>
+<p><dt><a name="DB_CREATE">DB_CREATE</a><dd>Create any underlying files, as necessary. If the files do not already
+exist and the DB_CREATE flag is not specified, the call will fail.
+<p><dt><a name="DB_NOMMAP">DB_NOMMAP</a><dd>Always copy this file into the local cache instead of potentially mapping
+it into process memory (see the description of the
+<a href="../api_c/env_set_mp_mmapsize.html">DBENV-&gt;set_mp_mmapsize</a> function for further information).
+<p><dt><a name="DB_RDONLY">DB_RDONLY</a><dd>Open any underlying files for reading only. Any attempt to write the file
+using the pool functions will fail, regardless of the actual permissions
+of the file.
+</dl>
+<p>On UNIX systems, or in IEEE/ANSI Std 1003.1 (POSIX) environments, all files created by function memp_fopen
+are created with mode <b>mode</b> (as described in <b>chmod</b>(2)) and
+modified by the process' umask value at the time of creation (see
+<b>umask</b>(2)). The group ownership of created files is based on
+the system and directory defaults, and is not further specified by Berkeley DB.
+If <b>mode</b> is 0, files are created readable and writeable by both
+owner and group. On Windows systems, the mode argument is ignored.
+<p>The <b>pagesize</b> argument is the size, in bytes, of the unit of transfer
+between the application and the pool, although it is not necessarily the
+unit of transfer between the pool and the source file.
+<p>Files opened in the pool may be further configured based on the
+<b>finfop</b> argument to memp_fopen (which is a pointer to a
+structure of type DB_MPOOL_FINFO). No references to the <b>finfop</b>
+structure are maintained by Berkeley DB, so it may be discarded when the
+memp_fopen function returns. In order to ensure compatibility
+with future releases of Berkeley DB, all fields of the DB_MPOOL_FINFO structure
+that are not explicitly set should be initialized to 0 before the first
+time the structure is used. Do this by declaring the structure external
+or static, or by calling the C library routine <b>bzero</b>(3) or
+<b>memset</b>(3).
+<p>The fields of the DB_MPOOL_FINFO structure used by memp_fopen are
+described below. If <b>finfop</b> is NULL or any of its fields are
+set to their default value, defaults appropriate for the system are used.
+<p><dl compact>
+<p><dt>int <a name="ftype">ftype</a>;<dd>The <b>ftype</b> field should be the same as a <b>ftype</b> argument
+previously specified to the <a href="../api_c/memp_register.html">memp_register</a> function, unless no
+input or output processing of the file's pages are necessary, in which
+case it should be 0. (See the description of the <a href="../api_c/memp_register.html">memp_register</a>
+function for more information.)
+<p><dt>DBT *<a name="pgcookie">pgcookie</a>;<dd>The <b>pgcookie</b> field contains the byte string that is passed to the
+<b>pgin</b> and <b>pgout</b> functions for this file, if any. If no
+<b>pgin</b> or <b>pgout</b> functions are specified, the
+<b>pgcookie</b> field should be NULL. (See the description of the
+<a href="../api_c/memp_register.html">memp_register</a> function for more information.)
+<p><dt>u_int8_t *<a name="fileid">fileid</a>;<dd>The <b>fileid</b> field is a unique identifier for the file. If the
+<b>fileid</b> field is non-NULL, it must reference a DB_FILE_ID_LEN
+length array of bytes that will be used to uniquely identify the file.
+<p>The mpool functions must be able to uniquely identify files in order that
+multiple processes wanting to share a file will correctly identify it in
+the pool.
+<p>On most UNIX/POSIX systems, the <b>fileid</b> field will not need to be
+set and the mpool functions will simply use the file's device and inode
+numbers for this purpose. On Windows systems, the mpool functions use
+the values returned by GetFileInformationByHandle() by default -- these
+values are known to be constant between processes and over reboot in the
+case of NTFS (where they are the NTFS MFT indexes).
+<p>On other filesystems, (e.g., FAT or NFS) these default values are not
+necessarily unique between processes or across system reboots.
+<b>Applications wanting to maintain a shared memory buffer pool
+between processes or across system reboots, where the pool contains pages
+from files stored on such filesystems, must specify a unique file
+identifier to the memp_fopen call and each process opening or
+registering the file must provide the same unique identifier.</b>
+<p>This should not be necessary for most applications. Specifically, it is
+not necessary if the memory pool is not shared between processes and is
+re-instantiated after each system reboot, or the application is using the
+Berkeley DB access methods instead of calling the pool functions explicitly, or
+the files in the memory pool are stored on filesystems where the default
+values as described above are invariant between process and across system
+reboots.
+<p><dt>int32_t <a name="lsn_offset">lsn_offset</a>;<dd>The <b>lsn_offset</b> field is the zero-based byte offset in the page of
+the page's log sequence number (LSN), or -1 if no LSN offset is specified.
+(See the description of the <a href="../api_c/memp_sync.html">memp_sync</a> function for more
+information.)
+<p><dt>u_int32_t <a name="clear_len">clear_len</a>;<dd>The <b>clear_len</b> field is the number of initial bytes in a page
+that should be set to zero when the page is created as a result of the
+DB_MPOOL_CREATE or DB_MPOOL_NEW flags being specified to <a href="../api_c/memp_fget.html">memp_fget</a>.
+If <b>finfop</b> is NULL or <b>clear_len</b> is 0, the entire page is
+cleared.
+</dl>
+<p>The memp_fopen function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The memp_fopen function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The file has already been entered into the pool, and the <b>pagesize</b>
+value is not the same as when the file was entered into the pool, or the
+length of the file is not zero or a multiple of the <b>pagesize</b>.
+<p>The DB_RDONLY flag was specified for an in-memory pool.
+</dl>
+<p><dl compact>
+<p><dt>ENOMEM<dd>The maximum number of open files has been reached.
+</dl>
+<p>The memp_fopen function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the memp_fopen function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_mp_mmapsize.html">DBENV-&gt;set_mp_mmapsize</a>,
+<a href="../api_c/memp_fclose.html">memp_fclose</a>,
+<a href="../api_c/memp_fget.html">memp_fget</a>,
+<a href="../api_c/memp_fopen.html">memp_fopen</a>,
+<a href="../api_c/memp_fput.html">memp_fput</a>,
+<a href="../api_c/memp_fset.html">memp_fset</a>,
+<a href="../api_c/memp_fsync.html">memp_fsync</a>,
+<a href="../api_c/memp_register.html">memp_register</a>,
+<a href="../api_c/memp_stat.html">memp_stat</a>,
+<a href="../api_c/memp_sync.html">memp_sync</a>
+and
+<a href="../api_c/memp_trickle.html">memp_trickle</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/memp_fput.html b/bdb/docs/api_c/memp_fput.html
new file mode 100644
index 00000000000..ce382b4d034
--- /dev/null
+++ b/bdb/docs/api_c/memp_fput.html
@@ -0,0 +1,79 @@
+<!--$Id: memp_fput.so,v 10.18 2000/03/01 21:41:30 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: memp_fput</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>memp_fput</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+memp_fput(DB_MPOOLFILE *mpf, void *pgaddr, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The memp_fput function indicates that the page referenced by
+<b>pgaddr</b> can be evicted from the pool. The <b>pgaddr</b>
+argument must be an address previously returned by <a href="../api_c/memp_fget.html">memp_fget</a>.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or more
+of the following values.
+<p><dl compact>
+<p><dt><a name="DB_MPOOL_CLEAN">DB_MPOOL_CLEAN</a><dd>Clear any previously set modification information (i.e., don't bother
+writing the page back to the source file).
+<p><dt><a name="DB_MPOOL_DIRTY">DB_MPOOL_DIRTY</a><dd>The page has been modified and must be written to the source file
+before being evicted from the pool.
+<p><dt><a name="DB_MPOOL_DISCARD">DB_MPOOL_DISCARD</a><dd>The page is unlikely to be useful in the near future,
+and should be discarded before other pages in the pool.
+</dl>
+<p>The DB_MPOOL_CLEAN and DB_MPOOL_DIRTY flags are
+mutually exclusive.
+<p>The memp_fput function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The memp_fput function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EACCES<dd>The DB_MPOOL_DIRTY flag was set and the source file was not opened for
+writing.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The <b>pgaddr</b> parameter does not reference a page returned by
+<a href="../api_c/memp_fget.html">memp_fget</a>.
+<p>More than one of DB_MPOOL_CLEAN and DB_MPOOL_DIRTY flags was set.
+</dl>
+<p>The memp_fput function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the memp_fput function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_mp_mmapsize.html">DBENV-&gt;set_mp_mmapsize</a>,
+<a href="../api_c/memp_fclose.html">memp_fclose</a>,
+<a href="../api_c/memp_fget.html">memp_fget</a>,
+<a href="../api_c/memp_fopen.html">memp_fopen</a>,
+<a href="../api_c/memp_fput.html">memp_fput</a>,
+<a href="../api_c/memp_fset.html">memp_fset</a>,
+<a href="../api_c/memp_fsync.html">memp_fsync</a>,
+<a href="../api_c/memp_register.html">memp_register</a>,
+<a href="../api_c/memp_stat.html">memp_stat</a>,
+<a href="../api_c/memp_sync.html">memp_sync</a>
+and
+<a href="../api_c/memp_trickle.html">memp_trickle</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/memp_fset.html b/bdb/docs/api_c/memp_fset.html
new file mode 100644
index 00000000000..73acd322c4e
--- /dev/null
+++ b/bdb/docs/api_c/memp_fset.html
@@ -0,0 +1,72 @@
+<!--$Id: memp_fset.so,v 10.18 2000/03/01 21:41:30 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: memp_fset</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>memp_fset</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+memp_fset(DB_MPOOLFILE *mpf, void *pgaddr, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The memp_fset function sets the flags associated with the page referenced
+by <b>pgaddr</b> without unpinning it from the pool. The <b>pgaddr</b>
+argument must be an address previously returned by <a href="../api_c/memp_fget.html">memp_fget</a>.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or more
+of the following values.
+<p><dl compact>
+<p><dt><a name="DB_MPOOL_CLEAN">DB_MPOOL_CLEAN</a><dd>Clear any previously set modification information (i.e., don't bother
+writing the page back to the source file).
+<p><dt><a name="DB_MPOOL_DIRTY">DB_MPOOL_DIRTY</a><dd>The page has been modified and must be written to the source file
+before being evicted from the pool.
+<p><dt><a name="DB_MPOOL_DISCARD">DB_MPOOL_DISCARD</a><dd>The page is unlikely to be useful in the near future,
+and should be discarded before other pages in the pool.
+</dl>
+<p>The DB_MPOOL_CLEAN and DB_MPOOL_DIRTY flags are
+mutually exclusive.
+<p>The memp_fset function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The memp_fset function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The memp_fset function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the memp_fset function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_mp_mmapsize.html">DBENV-&gt;set_mp_mmapsize</a>,
+<a href="../api_c/memp_fclose.html">memp_fclose</a>,
+<a href="../api_c/memp_fget.html">memp_fget</a>,
+<a href="../api_c/memp_fopen.html">memp_fopen</a>,
+<a href="../api_c/memp_fput.html">memp_fput</a>,
+<a href="../api_c/memp_fset.html">memp_fset</a>,
+<a href="../api_c/memp_fsync.html">memp_fsync</a>,
+<a href="../api_c/memp_register.html">memp_register</a>,
+<a href="../api_c/memp_stat.html">memp_stat</a>,
+<a href="../api_c/memp_sync.html">memp_sync</a>
+and
+<a href="../api_c/memp_trickle.html">memp_trickle</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/memp_fsync.html b/bdb/docs/api_c/memp_fsync.html
new file mode 100644
index 00000000000..ad429ccf390
--- /dev/null
+++ b/bdb/docs/api_c/memp_fsync.html
@@ -0,0 +1,59 @@
+<!--$Id: memp_fsync.so,v 10.22 2000/09/08 15:20:28 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: memp_fsync</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>memp_fsync</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+memp_fsync(DB_MPOOLFILE *mpf);
+</pre></h3>
+<h1>Description</h1>
+<p>The memp_fsync function writes all pages associated with the
+DB_MPOOLFILE, that were marked as modified using <a href="../api_c/memp_fput.html">memp_fput</a>
+or <a href="../api_c/memp_fset.html">memp_fset</a>, back to the source file. If any of the modified
+pages are also <i>pinned</i> (i.e., currently referenced by this or
+another process) memp_fsync will ignore them.
+<p>The memp_fsync function returns a non-zero error value on failure, 0 on success, and returns <a href="../api_c/memp_fsync.html#DB_INCOMPLETE">DB_INCOMPLETE</a> if there were pages which were
+modified but which memp_fsync was unable to write immediately.
+<h1>Errors</h1>
+<p>The memp_fsync function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the memp_fsync function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_mp_mmapsize.html">DBENV-&gt;set_mp_mmapsize</a>,
+<a href="../api_c/memp_fclose.html">memp_fclose</a>,
+<a href="../api_c/memp_fget.html">memp_fget</a>,
+<a href="../api_c/memp_fopen.html">memp_fopen</a>,
+<a href="../api_c/memp_fput.html">memp_fput</a>,
+<a href="../api_c/memp_fset.html">memp_fset</a>,
+<a href="../api_c/memp_fsync.html">memp_fsync</a>,
+<a href="../api_c/memp_register.html">memp_register</a>,
+<a href="../api_c/memp_stat.html">memp_stat</a>,
+<a href="../api_c/memp_sync.html">memp_sync</a>
+and
+<a href="../api_c/memp_trickle.html">memp_trickle</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/memp_register.html b/bdb/docs/api_c/memp_register.html
new file mode 100644
index 00000000000..7c50a89ed2b
--- /dev/null
+++ b/bdb/docs/api_c/memp_register.html
@@ -0,0 +1,93 @@
+<!--$Id: memp_register.so,v 10.23 2000/05/25 13:47:08 dda Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: memp_register</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>memp_register</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+memp_register(DB_ENV *env, int ftype,
+ int (*pgin_fcn)(DB_ENV *, db_pgno_t pgno, void *pgaddr, DBT *pgcookie),
+ int (*pgout_fcn)(DB_ENV *, db_pgno_t pgno, void *pgaddr, DBT *pgcookie));
+</pre></h3>
+<h1>Description</h1>
+<p>The memp_register function registers page-in and page-out
+functions for files of type <b>ftype</b> in the specified pool.
+<p>If the <b>pgin_fcn</b> function is non-NULL, it is called each time
+a page is read into the memory pool from a file of type <b>ftype</b>, or
+a page is created for a file of type <b>ftype</b> (see the
+DB_MPOOL_CREATE flag for the <a href="../api_c/memp_fget.html">memp_fget</a> function).
+<p>If the <b>pgout_fcn</b> function is non-NULL, it is called each time
+a page is written to a file of type <b>ftype</b>.
+<p>Both the <b>pgin_fcn</b> and <b>pgout_fcn</b> functions are called with
+a reference to the current environment, the page number, a pointer to the
+page being read or written, and any argument <b>pgcookie</b> that was
+specified to the <a href="../api_c/memp_fopen.html">memp_fopen</a> function when the file was opened.
+The <b>pgin_fcn</b> and <b>pgout_fcn</b> functions should return 0 on
+success, and an applicable non-zero <b>errno</b> value on failure, in
+which case the shared memory pool interface routine (and, by extension,
+any Berkeley DB library function) calling it will also fail, returning that
+<b>errno</b> value.
+<p>The purpose of the memp_register function is to support processing
+when pages are entered into, or flushed from, the pool. A file type must
+be specified to make it possible for unrelated threads or processes, that
+are sharing a pool, to evict each other's pages from the pool.
+Applications should call memp_register, during initialization,
+for each type of file requiring input or output processing that will be
+sharing the underlying pool. (No registry is necessary for the standard
+Berkeley DB access method types, as <a href="../api_c/db_open.html">DB-&gt;open</a> registers them
+separately.)
+<p>If a thread or process does not call memp_register for a file
+type, it is impossible for it to evict pages for any file requiring input
+or output processing from the pool. For this reason,
+memp_register should always be called by each application sharing
+a pool for each type of file included in the pool, regardless of whether
+or not the application itself uses files of that type.
+<p>There are no standard values for <b>ftype</b>, <b>pgin_fcn</b>,
+<b>pgout_fcn</b> and <b>pgcookie</b>, except that the <b>ftype</b>
+value for a file must be a non-zero positive number, as negative numbers
+are reserved for internal use by the Berkeley DB library. For this reason,
+applications sharing a pool must coordinate their values amongst
+themselves.
+<p>The memp_register function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The memp_register function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the memp_register function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_mp_mmapsize.html">DBENV-&gt;set_mp_mmapsize</a>,
+<a href="../api_c/memp_fclose.html">memp_fclose</a>,
+<a href="../api_c/memp_fget.html">memp_fget</a>,
+<a href="../api_c/memp_fopen.html">memp_fopen</a>,
+<a href="../api_c/memp_fput.html">memp_fput</a>,
+<a href="../api_c/memp_fset.html">memp_fset</a>,
+<a href="../api_c/memp_fsync.html">memp_fsync</a>,
+<a href="../api_c/memp_register.html">memp_register</a>,
+<a href="../api_c/memp_stat.html">memp_stat</a>,
+<a href="../api_c/memp_sync.html">memp_sync</a>
+and
+<a href="../api_c/memp_trickle.html">memp_trickle</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/memp_stat.html b/bdb/docs/api_c/memp_stat.html
new file mode 100644
index 00000000000..8e9d136a90b
--- /dev/null
+++ b/bdb/docs/api_c/memp_stat.html
@@ -0,0 +1,118 @@
+<!--$Id: memp_stat.so,v 10.28 2000/05/25 13:47:08 dda Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: memp_stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>memp_stat</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+memp_stat(DB_ENV *env, DB_MPOOL_STAT **gsp,
+ DB_MPOOL_FSTAT *(*fsp)[], void *(*db_malloc)(size_t));
+</pre></h3>
+<h1>Description</h1>
+<p>The memp_stat function method creates statistical structures and copies
+pointers to them into user-specified memory locations. The statistics
+include the number of files participating in the pool, the active pages
+in the pool, and information as to how effective the cache has been.
+<p>Statistical structures are created in allocated memory. If <b>db_malloc</b> is non-NULL, it
+is called to allocate the memory, otherwise, the library function
+<b>malloc</b>(3) is used. The function <b>db_malloc</b> must match
+the calling conventions of the <b>malloc</b>(3) library routine.
+Regardless, the caller is responsible for deallocating the returned
+memory. To deallocate returned memory, free the returned memory
+reference, references inside the returned memory do not need to be
+individually freed.
+<p>If <b>gsp</b> is non-NULL, the global statistics for the memory pool
+<b>mp</b> are copied into the memory location it references. The
+global statistics are stored in a structure of type DB_MPOOL_STAT.
+<p>The following DB_MPOOL_STAT fields will be filled in:
+<p><dl compact>
+<dt>size_t st_gbytes;<dd>Gigabytes of cache (total cache size is st_gbytes + st_bytes)
+<dt>size_t st_bytes;<dd>Bytes of cache (total cache size is st_gbytes + st_bytes)
+<dt>u_int32_t st_ncache;<dd>Number of caches.
+<dt>u_int32_t st_regsize;<dd>Individual cache size.
+<dt>u_int32_t st_cache_hit;<dd>Requested pages found in the cache.
+<dt>u_int32_t st_cache_miss;<dd>Requested pages not found in the cache.
+<dt>u_int32_t st_map;<dd>Requested pages mapped into the process' address space (there is no
+available information as to whether or not this request caused disk I/O,
+although examining the application page fault rate may be helpful).
+<dt>u_int32_t st_page_create;<dd>Pages created in the cache.
+<dt>u_int32_t st_page_in;<dd>Pages read into the cache.
+<dt>u_int32_t st_page_out;<dd>Pages written from the cache to the backing file.
+<dt>u_int32_t st_ro_evict;<dd>Clean pages forced from the cache.
+<dt>u_int32_t st_rw_evict;<dd>Dirty pages forced from the cache.
+<dt>u_int32_t st_hash_buckets;<dd>Number of hash buckets in buffer hash table.
+<dt>u_int32_t st_hash_searches;<dd>Total number of buffer hash table lookups.
+<dt>u_int32_t st_hash_longest;<dd>The longest chain ever encountered in buffer hash table lookups.
+<dt>u_int32_t st_hash_examined;<dd>Total number of hash elements traversed during hash table lookups.
+<dt>u_int32_t st_page_clean;<dd>Clean pages currently in the cache.
+<dt>u_int32_t st_page_dirty;<dd>Dirty pages currently in the cache.
+<dt>u_int32_t st_page_trickle;<dd>Dirty pages written using the <a href="../api_c/memp_trickle.html">memp_trickle</a> interface.
+<dt>u_int32_t st_region_wait;<dd>The number of times that a thread of control was forced to wait before
+obtaining the region lock.
+<dt>u_int32_t st_region_nowait;<dd>The number of times that a thread of control was able to obtain
+the region lock without waiting.
+</dl>
+<p>If <b>fsp</b> is non-NULL, a pointer to a NULL-terminated variable
+length array of statistics for individual files, in the memory pool <b>mp</b>,
+is copied into the memory location it references. If no individual files
+currently exist in the memory pool, <b>fsp</b> will be set to NULL.
+<p>The per-file statistics are stored in structures of type DB_MPOOL_FSTAT.
+The following DB_MPOOL_FSTAT fields will be filled in for each file in
+the pool, i.e., each element of the array:
+<p><dl compact>
+<dt>char *file_name;<dd>The name of the file.
+<dt>size_t st_pagesize;<dd>Page size in bytes.
+<dt>u_int32_t st_cache_hit;<dd>Requested pages found in the cache.
+<dt>u_int32_t st_cache_miss;<dd>Requested pages not found in the cache.
+<dt>u_int32_t st_map;<dd>Requested pages mapped into the process' address space.
+<dt>u_int32_t st_page_create;<dd>Pages created in the cache.
+<dt>u_int32_t st_page_in;<dd>Pages read into the cache.
+<dt>u_int32_t st_page_out;<dd>Pages written from the cache to the backing file.
+</dl>
+<p>The memp_stat function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The memp_stat function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The memp_stat function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the memp_stat function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_mp_mmapsize.html">DBENV-&gt;set_mp_mmapsize</a>,
+<a href="../api_c/memp_fclose.html">memp_fclose</a>,
+<a href="../api_c/memp_fget.html">memp_fget</a>,
+<a href="../api_c/memp_fopen.html">memp_fopen</a>,
+<a href="../api_c/memp_fput.html">memp_fput</a>,
+<a href="../api_c/memp_fset.html">memp_fset</a>,
+<a href="../api_c/memp_fsync.html">memp_fsync</a>,
+<a href="../api_c/memp_register.html">memp_register</a>,
+<a href="../api_c/memp_stat.html">memp_stat</a>,
+<a href="../api_c/memp_sync.html">memp_sync</a>
+and
+<a href="../api_c/memp_trickle.html">memp_trickle</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/memp_sync.html b/bdb/docs/api_c/memp_sync.html
new file mode 100644
index 00000000000..fc693d47eff
--- /dev/null
+++ b/bdb/docs/api_c/memp_sync.html
@@ -0,0 +1,83 @@
+<!--$Id: memp_sync.so,v 10.25 2000/09/08 15:20:28 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: memp_sync</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>memp_sync</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+memp_sync(DB_ENV *env, DB_LSN *lsn);
+</pre></h3>
+<h1>Description</h1>
+<p>The memp_sync function ensures that any modified pages in the pool with
+log sequence numbers less than the <b>lsn</b> argument are written to
+disk. If <b>lsn</b> is NULL all modified pages in the pool are
+flushed.
+<p>The primary purpose of the memp_sync function is to enable a
+transaction manager to ensure, as part of a checkpoint, that all pages
+modified by a certain time have been written to disk. Pages in the pool
+that cannot be written back to disk immediately (e.g., that are currently
+pinned) are written to disk as soon as it is possible to do so. The
+expected behavior of the Berkeley DB or other transaction subsystem is to call
+the memp_sync function and then, if the return indicates that some
+pages could not be written immediately, to wait briefly and retry again
+with the same log sequence number until the memp_sync function
+returns that all pages have been written.
+<p>To support the memp_sync functionality, it is necessary that the
+pool functions know the location of the log sequence number on the page
+for each file type. This location should be specified when the file is
+opened using the <a href="../api_c/memp_fopen.html">memp_fopen</a> function. It is not required that
+the log sequence number be aligned on the page in any way.
+<p>The memp_sync function returns a non-zero error value on failure, 0 on success, and returns <a href="../api_c/memp_fsync.html#DB_INCOMPLETE">DB_INCOMPLETE</a> if there were pages which need to be
+written but which memp_sync was unable to write immediately.
+In addition, if memp_sync returns success, the value of
+<b>lsn</b> will be overwritten with the largest log sequence number
+from any page which was written by memp_sync to satisfy this
+request.
+<h1>Errors</h1>
+<p>The memp_sync function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The memp_sync function was called without logging having been
+initialized in the environment.
+</dl>
+<p>The memp_sync function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the memp_sync function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_mp_mmapsize.html">DBENV-&gt;set_mp_mmapsize</a>,
+<a href="../api_c/memp_fclose.html">memp_fclose</a>,
+<a href="../api_c/memp_fget.html">memp_fget</a>,
+<a href="../api_c/memp_fopen.html">memp_fopen</a>,
+<a href="../api_c/memp_fput.html">memp_fput</a>,
+<a href="../api_c/memp_fset.html">memp_fset</a>,
+<a href="../api_c/memp_fsync.html">memp_fsync</a>,
+<a href="../api_c/memp_register.html">memp_register</a>,
+<a href="../api_c/memp_stat.html">memp_stat</a>,
+<a href="../api_c/memp_sync.html">memp_sync</a>
+and
+<a href="../api_c/memp_trickle.html">memp_trickle</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/memp_trickle.html b/bdb/docs/api_c/memp_trickle.html
new file mode 100644
index 00000000000..d7cfd723020
--- /dev/null
+++ b/bdb/docs/api_c/memp_trickle.html
@@ -0,0 +1,66 @@
+<!--$Id: memp_trickle.so,v 10.21 2000/03/01 21:41:30 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: memp_trickle</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>memp_trickle</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+memp_trickle(DB_ENV *env, int pct, int *nwrotep);
+</pre></h3>
+<h1>Description</h1>
+<p>The memp_trickle function ensures that at least <b>pct</b> percent of
+the pages in the shared memory pool are clean by writing dirty pages to
+their backing files.
+If the <b>nwrotep</b> argument is non-NULL, the number of pages that
+were written to reach the correct percentage is returned in the memory
+location it references.
+<p>The purpose of the memp_trickle function is to enable a memory
+pool manager to ensure that a page is always available for reading in new
+information without having to wait for a write.
+<p>The memp_trickle function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The memp_trickle function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The memp_trickle function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the memp_trickle function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_mp_mmapsize.html">DBENV-&gt;set_mp_mmapsize</a>,
+<a href="../api_c/memp_fclose.html">memp_fclose</a>,
+<a href="../api_c/memp_fget.html">memp_fget</a>,
+<a href="../api_c/memp_fopen.html">memp_fopen</a>,
+<a href="../api_c/memp_fput.html">memp_fput</a>,
+<a href="../api_c/memp_fset.html">memp_fset</a>,
+<a href="../api_c/memp_fsync.html">memp_fsync</a>,
+<a href="../api_c/memp_register.html">memp_register</a>,
+<a href="../api_c/memp_stat.html">memp_stat</a>,
+<a href="../api_c/memp_sync.html">memp_sync</a>
+and
+<a href="../api_c/memp_trickle.html">memp_trickle</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/pindex.src b/bdb/docs/api_c/pindex.src
new file mode 100644
index 00000000000..1818c50a6d1
--- /dev/null
+++ b/bdb/docs/api_c/pindex.src
@@ -0,0 +1,301 @@
+__APIREL__/api_c/db_create.html#2 @db_create
+__APIREL__/api_c/db_create.html#DB_XA_CREATE db_create@DB_XA_CREATE
+__APIREL__/api_c/db_err.html#2 @DBENV-__GT__err
+__APIREL__/api_c/db_lsn.html#2 @DB_LSN
+__APIREL__/api_c/db_set_errfile.html#2 @DB-__GT__set_errfile
+__APIREL__/api_c/db_set_malloc.html#2 @DB-__GT__set_malloc
+__APIREL__/api_c/db_set_paniccall.html#2 @DB-__GT__set_paniccall
+__APIREL__/api_c/db_set_realloc.html#2 @DB-__GT__set_realloc
+__APIREL__/api_c/dbm.html#2 @dbm/ndbm
+__APIREL__/api_c/dbt.html#2 @key/data pairs
+__APIREL__/api_c/dbt.html#data DBT@data
+__APIREL__/api_c/dbt.html#size DBT@size
+__APIREL__/api_c/dbt.html#ulen DBT@ulen
+__APIREL__/api_c/dbt.html#dlen DBT@dlen
+__APIREL__/api_c/dbt.html#doff DBT@doff
+__APIREL__/api_c/dbt.html#DB_DBT_MALLOC DBT@DB_DBT_MALLOC
+__APIREL__/api_c/dbt.html#DB_DBT_REALLOC DBT@DB_DBT_REALLOC
+__APIREL__/api_c/dbt.html#DB_DBT_USERMEM DBT@DB_DBT_USERMEM
+__APIREL__/api_c/dbt.html#DB_DBT_PARTIAL DBT@DB_DBT_PARTIAL
+__APIREL__/api_c/dbt.html#3 retrieved key/data @permanence
+__APIREL__/api_c/dbt.html#4 retrieved @key/data permanence
+__APIREL__/api_c/dbt.html#5 data @alignment
+__APIREL__/api_c/dbt.html#6 logical @record number format
+__APIREL__/api_c/env_create.html#2 @db_env_create
+__APIREL__/api_c/env_create.html#DB_CLIENT db_env_create@DB_CLIENT
+__APIREL__/api_c/env_set_errfile.html#2 @DBENV-__GT__set_errfile
+__APIREL__/api_c/env_set_paniccall.html#2 @DBENV-__GT__set_paniccall
+__APIREL__/api_c/hsearch.html#2 @hsearch
+__APIREL__/api_c/set_func_close.html#2 @db_env_set_func_close
+__APIREL__/api_c/set_func_dirfree.html#2 @db_env_set_func_dirfree
+__APIREL__/api_c/set_func_dirlist.html#2 @db_env_set_func_dirlist
+__APIREL__/api_c/set_func_exists.html#2 @db_env_set_func_exists
+__APIREL__/api_c/set_func_free.html#2 @db_env_set_func_free
+__APIREL__/api_c/set_func_fsync.html#2 @db_env_set_func_fsync
+__APIREL__/api_c/set_func_ioinfo.html#2 @db_env_set_func_ioinfo
+__APIREL__/api_c/set_func_malloc.html#2 @db_env_set_func_malloc
+__APIREL__/api_c/set_func_map.html#2 @db_env_set_func_map
+__APIREL__/api_c/set_func_open.html#2 @db_env_set_func_open
+__APIREL__/api_c/set_func_read.html#2 @db_env_set_func_read
+__APIREL__/api_c/set_func_realloc.html#2 @db_env_set_func_realloc
+__APIREL__/api_c/set_func_rename.html#2 @db_env_set_func_rename
+__APIREL__/api_c/set_func_seek.html#2 @db_env_set_func_seek
+__APIREL__/api_c/set_func_sleep.html#2 @db_env_set_func_sleep
+__APIREL__/api_c/set_func_unlink.html#2 @db_env_set_func_unlink
+__APIREL__/api_c/set_func_unmap.html#2 @db_env_set_func_unmap
+__APIREL__/api_c/set_func_write.html#2 @db_env_set_func_write
+__APIREL__/api_c/set_func_yield.html#2 @db_env_set_func_yield
+__APIREL__/api_c/db_close.html#2 @DB-__GT__close
+__APIREL__/api_c/db_close.html#DB_NOSYNC DB-__GT__close@DB_NOSYNC
+__APIREL__/api_c/db_close.html#3 DB-__GT__close @DB_INCOMPLETE
+__APIREL__/api_c/db_cursor.html#2 @DB-__GT__cursor
+__APIREL__/api_c/db_cursor.html#DB_WRITECURSOR DB-__GT__cursor@DB_WRITECURSOR
+__APIREL__/api_c/db_del.html#2 @DB-__GT__del
+__APIREL__/api_c/db_fd.html#2 @DB-__GT__fd
+__APIREL__/api_c/db_get.html#2 @DB-__GT__get
+__APIREL__/api_c/db_get.html#DB_CONSUME DB-__GT__get@DB_CONSUME
+__APIREL__/api_c/db_get.html#DB_CONSUME_WAIT DB-__GT__get@DB_CONSUME_WAIT
+__APIREL__/api_c/db_get.html#DB_GET_BOTH DB-__GT__get@DB_GET_BOTH
+__APIREL__/api_c/db_get.html#DB_SET_RECNO DB-__GT__get@DB_SET_RECNO
+__APIREL__/api_c/db_get.html#DB_RMW DB-__GT__get@DB_RMW
+__APIREL__/api_c/db_get_byteswapped.html#2 @DB-__GT__get_byteswapped
+__APIREL__/api_c/db_get_type.html#2 @DB-__GT__get_type
+__APIREL__/api_c/db_join.html#2 @DB-__GT__join
+__APIREL__/api_c/db_join.html#DB_JOIN_NOSORT DB-__GT__join@DB_JOIN_NOSORT
+__APIREL__/api_c/db_join.html#DB_JOIN_ITEM DB-__GT__join@DB_JOIN_ITEM
+__APIREL__/api_c/db_join.html#DB_RMW DB-__GT__join@DB_RMW
+__APIREL__/api_c/db_key_range.html#2 @DB-__GT__key_range
+__APIREL__/api_c/db_open.html#2 @DB-__GT__open
+__APIREL__/api_c/db_open.html#DB_CREATE DB-__GT__open@DB_CREATE
+__APIREL__/api_c/db_open.html#DB_EXCL DB-__GT__open@DB_EXCL
+__APIREL__/api_c/db_open.html#DB_NOMMAP DB-__GT__open@DB_NOMMAP
+__APIREL__/api_c/db_open.html#DB_RDONLY DB-__GT__open@DB_RDONLY
+__APIREL__/api_c/db_open.html#DB_THREAD DB-__GT__open@DB_THREAD
+__APIREL__/api_c/db_open.html#DB_TRUNCATE DB-__GT__open@DB_TRUNCATE
+__APIREL__/api_c/db_open.html#DB_OLD_VERSION DB-__GT__open@DB_OLD_VERSION
+__APIREL__/api_c/db_put.html#2 @DB-__GT__put
+__APIREL__/api_c/db_put.html#DB_APPEND DB-__GT__put@DB_APPEND
+__APIREL__/api_c/db_put.html#DB_NODUPDATA DB-__GT__put@DB_NODUPDATA
+__APIREL__/api_c/db_put.html#DB_NOOVERWRITE DB-__GT__put@DB_NOOVERWRITE
+__APIREL__/api_c/db_remove.html#2 @DB-__GT__remove
+__APIREL__/api_c/db_rename.html#2 @DB-__GT__rename
+__APIREL__/api_c/db_set_append_recno.html#2 @DB-__GT__set_append_recno
+__APIREL__/api_c/db_set_bt_compare.html#2 @DB-__GT__set_bt_compare
+__APIREL__/api_c/db_set_bt_minkey.html#2 @DB-__GT__set_bt_minkey
+__APIREL__/api_c/db_set_bt_prefix.html#2 @DB-__GT__set_bt_prefix
+__APIREL__/api_c/db_set_cachesize.html#2 @DB-__GT__set_cachesize
+__APIREL__/api_c/db_set_dup_compare.html#2 @DB-__GT__set_dup_compare
+__APIREL__/api_c/db_set_errcall.html#2 @DB-__GT__set_errcall
+__APIREL__/api_c/db_set_errpfx.html#2 @DB-__GT__set_errpfx
+__APIREL__/api_c/db_set_feedback.html#2 @DB-__GT__set_feedback
+__APIREL__/api_c/db_set_feedback.html#DB_UPGRADE DB-__GT__set_feedback@DB_UPGRADE
+__APIREL__/api_c/db_set_feedback.html#DB_VERIFY DB-__GT__set_feedback@DB_VERIFY
+__APIREL__/api_c/db_set_flags.html#2 @DB-__GT__set_flags
+__APIREL__/api_c/db_set_flags.html#DB_DUP DB-__GT__set_flags@DB_DUP
+__APIREL__/api_c/db_set_flags.html#DB_DUPSORT DB-__GT__set_flags@DB_DUPSORT
+__APIREL__/api_c/db_set_flags.html#DB_RECNUM DB-__GT__set_flags@DB_RECNUM
+__APIREL__/api_c/db_set_flags.html#DB_REVSPLITOFF DB-__GT__set_flags@DB_REVSPLITOFF
+__APIREL__/api_c/db_set_flags.html#DB_DUP DB-__GT__set_flags@DB_DUP
+__APIREL__/api_c/db_set_flags.html#DB_DUPSORT DB-__GT__set_flags@DB_DUPSORT
+__APIREL__/api_c/db_set_flags.html#DB_RENUMBER DB-__GT__set_flags@DB_RENUMBER
+__APIREL__/api_c/db_set_flags.html#DB_SNAPSHOT DB-__GT__set_flags@DB_SNAPSHOT
+__APIREL__/api_c/db_set_h_ffactor.html#2 @DB-__GT__set_h_ffactor
+__APIREL__/api_c/db_set_h_hash.html#2 @DB-__GT__set_h_hash
+__APIREL__/api_c/db_set_h_nelem.html#2 @DB-__GT__set_h_nelem
+__APIREL__/api_c/db_set_lorder.html#2 @DB-__GT__set_lorder
+__APIREL__/api_c/db_set_pagesize.html#2 @DB-__GT__set_pagesize
+__APIREL__/api_c/db_set_q_extentsize.html#2 @DB-__GT__set_q_extentsize
+__APIREL__/api_c/db_set_re_delim.html#2 @DB-__GT__set_re_delim
+__APIREL__/api_c/db_set_re_len.html#2 @DB-__GT__set_re_len
+__APIREL__/api_c/db_set_re_pad.html#2 @DB-__GT__set_re_pad
+__APIREL__/api_c/db_set_re_source.html#2 @DB-__GT__set_re_source
+__APIREL__/api_c/db_stat.html#2 @DB-__GT__stat
+__APIREL__/api_c/db_stat.html#DB_CACHED_COUNTS DB-__GT__stat@DB_CACHED_COUNTS
+__APIREL__/api_c/db_stat.html#DB_RECORDCOUNT DB-__GT__stat@DB_RECORDCOUNT
+__APIREL__/api_c/db_sync.html#2 @DB-__GT__sync
+__APIREL__/api_c/db_upgrade.html#2 @DB-__GT__upgrade
+__APIREL__/api_c/db_upgrade.html#DB_DUPSORT DB-__GT__upgrade@DB_DUPSORT
+__APIREL__/api_c/db_upgrade.html#DB_OLD_VERSION DB-__GT__upgrade@DB_OLD_VERSION
+__APIREL__/api_c/db_verify.html#2 @DB-__GT__verify
+__APIREL__/api_c/db_verify.html#DB_SALVAGE DB-__GT__verify@DB_SALVAGE
+__APIREL__/api_c/db_verify.html#DB_AGGRESSIVE DB-__GT__verify@DB_AGGRESSIVE
+__APIREL__/api_c/db_verify.html#DB_NOORDERCHK DB-__GT__verify@DB_NOORDERCHK
+__APIREL__/api_c/db_verify.html#DB_ORDERCHKONLY DB-__GT__verify@DB_ORDERCHKONLY
+__APIREL__/api_c/dbc_close.html#2 @DBcursor-__GT__c_close
+__APIREL__/api_c/dbc_count.html#2 @DBcursor-__GT__c_count
+__APIREL__/api_c/dbc_del.html#2 @DBcursor-__GT__c_del
+__APIREL__/api_c/dbc_dup.html#2 @DBcursor-__GT__c_dup
+__APIREL__/api_c/dbc_dup.html#DB_POSITION DBcursor-__GT__c_dup@DB_POSITION
+__APIREL__/api_c/dbc_get.html#2 @DBcursor-__GT__c_get
+__APIREL__/api_c/dbc_get.html#DB_CURRENT DBcursor-__GT__c_get@DB_CURRENT
+__APIREL__/api_c/dbc_get.html#DB_FIRST DBcursor-__GT__c_get@DB_FIRST
+__APIREL__/api_c/dbc_get.html#DB_LAST DBcursor-__GT__c_get@DB_LAST
+__APIREL__/api_c/dbc_get.html#DB_GET_BOTH DBcursor-__GT__c_get@DB_GET_BOTH
+__APIREL__/api_c/dbc_get.html#DB_GET_RECNO DBcursor-__GT__c_get@DB_GET_RECNO
+__APIREL__/api_c/dbc_get.html#DB_JOIN_ITEM DBcursor-__GT__c_get@DB_JOIN_ITEM
+__APIREL__/api_c/dbc_get.html#DB_NEXT DBcursor-__GT__c_get@DB_NEXT
+__APIREL__/api_c/dbc_get.html#DB_PREV DBcursor-__GT__c_get@DB_PREV
+__APIREL__/api_c/dbc_get.html#DB_NEXT_DUP DBcursor-__GT__c_get@DB_NEXT_DUP
+__APIREL__/api_c/dbc_get.html#DB_NEXT_NODUP DBcursor-__GT__c_get@DB_NEXT_NODUP
+__APIREL__/api_c/dbc_get.html#DB_PREV_NODUP DBcursor-__GT__c_get@DB_PREV_NODUP
+__APIREL__/api_c/dbc_get.html#DB_SET DBcursor-__GT__c_get@DB_SET
+__APIREL__/api_c/dbc_get.html#DB_SET_RANGE DBcursor-__GT__c_get@DB_SET_RANGE
+__APIREL__/api_c/dbc_get.html#DB_SET_RECNO DBcursor-__GT__c_get@DB_SET_RECNO
+__APIREL__/api_c/dbc_get.html#DB_RMW DBcursor-__GT__c_get@DB_RMW
+__APIREL__/api_c/dbc_put.html#2 @DBcursor-__GT__c_put
+__APIREL__/api_c/dbc_put.html#DB_AFTER DBcursor-__GT__c_put@DB_AFTER
+__APIREL__/api_c/dbc_put.html#DB_BEFORE DBcursor-__GT__c_put@DB_BEFORE
+__APIREL__/api_c/dbc_put.html#DB_CURRENT DBcursor-__GT__c_put@DB_CURRENT
+__APIREL__/api_c/dbc_put.html#DB_KEYFIRST DBcursor-__GT__c_put@DB_KEYFIRST
+__APIREL__/api_c/dbc_put.html#DB_KEYLAST DBcursor-__GT__c_put@DB_KEYLAST
+__APIREL__/api_c/dbc_put.html#DB_NODUPDATA DBcursor-__GT__c_put@DB_NODUPDATA
+__APIREL__/api_c/env_close.html#2 @DBENV-__GT__close
+__APIREL__/api_c/env_open.html#2 @DBENV-__GT__open
+__APIREL__/api_c/env_open.html#DB_JOINENV DBENV-__GT__open@DB_JOINENV
+__APIREL__/api_c/env_open.html#DB_INIT_CDB DBENV-__GT__open@DB_INIT_CDB
+__APIREL__/api_c/env_open.html#DB_INIT_LOCK DBENV-__GT__open@DB_INIT_LOCK
+__APIREL__/api_c/env_open.html#DB_INIT_LOG DBENV-__GT__open@DB_INIT_LOG
+__APIREL__/api_c/env_open.html#DB_INIT_MPOOL DBENV-__GT__open@DB_INIT_MPOOL
+__APIREL__/api_c/env_open.html#DB_INIT_TXN DBENV-__GT__open@DB_INIT_TXN
+__APIREL__/api_c/env_open.html#DB_RECOVER DBENV-__GT__open@DB_RECOVER
+__APIREL__/api_c/env_open.html#DB_RECOVER_FATAL DBENV-__GT__open@DB_RECOVER_FATAL
+__APIREL__/api_c/env_open.html#DB_USE_ENVIRON DBENV-__GT__open@DB_USE_ENVIRON
+__APIREL__/api_c/env_open.html#DB_USE_ENVIRON_ROOT DBENV-__GT__open@DB_USE_ENVIRON_ROOT
+__APIREL__/api_c/env_open.html#DB_CREATE DBENV-__GT__open@DB_CREATE
+__APIREL__/api_c/env_open.html#DB_LOCKDOWN DBENV-__GT__open@DB_LOCKDOWN
+__APIREL__/api_c/env_open.html#DB_PRIVATE DBENV-__GT__open@DB_PRIVATE
+__APIREL__/api_c/env_open.html#DB_SYSTEM_MEM DBENV-__GT__open@DB_SYSTEM_MEM
+__APIREL__/api_c/env_open.html#DB_THREAD DBENV-__GT__open@DB_THREAD
+__APIREL__/api_c/env_remove.html#2 @DBENV-__GT__remove
+__APIREL__/api_c/env_remove.html#DB_FORCE DBENV-__GT__remove@DB_FORCE
+__APIREL__/api_c/env_remove.html#DB_USE_ENVIRON DBENV-__GT__remove@DB_USE_ENVIRON
+__APIREL__/api_c/env_remove.html#DB_USE_ENVIRON_ROOT DBENV-__GT__remove@DB_USE_ENVIRON_ROOT
+__APIREL__/api_c/env_set_cachesize.html#2 @DBENV-__GT__set_cachesize
+__APIREL__/api_c/env_set_data_dir.html#2 @DBENV-__GT__set_data_dir
+__APIREL__/api_c/env_set_errcall.html#2 @DBENV-__GT__set_errcall
+__APIREL__/api_c/env_set_errpfx.html#2 @DBENV-__GT__set_errpfx
+__APIREL__/api_c/env_set_feedback.html#2 @DBENV-__GT__set_feedback
+__APIREL__/api_c/env_set_feedback.html#DB_RECOVER DBENV-__GT__set_feedback@DB_RECOVER
+__APIREL__/api_c/env_set_flags.html#2 @DBENV-__GT__set_flags
+__APIREL__/api_c/env_set_flags.html#DB_CDB_ALLDB DBENV-__GT__set_flags@DB_CDB_ALLDB
+__APIREL__/api_c/env_set_flags.html#DB_NOMMAP DBENV-__GT__set_flags@DB_NOMMAP
+__APIREL__/api_c/env_set_flags.html#DB_TXN_NOSYNC DBENV-__GT__set_flags@DB_TXN_NOSYNC
+__APIREL__/api_c/env_set_lg_bsize.html#2 @DBENV-__GT__set_lg_bsize
+__APIREL__/api_c/env_set_lg_dir.html#2 @DBENV-__GT__set_lg_dir
+__APIREL__/api_c/env_set_lg_max.html#2 @DBENV-__GT__set_lg_max
+__APIREL__/api_c/env_set_lk_conflicts.html#2 @DBENV-__GT__set_lk_conflicts
+__APIREL__/api_c/env_set_lk_detect.html#2 @DBENV-__GT__set_lk_detect
+__APIREL__/api_c/env_set_lk_detect.html#DB_LOCK_DEFAULT DBENV-__GT__set_lk_detect@DB_LOCK_DEFAULT
+__APIREL__/api_c/env_set_lk_detect.html#DB_LOCK_OLDEST DBENV-__GT__set_lk_detect@DB_LOCK_OLDEST
+__APIREL__/api_c/env_set_lk_detect.html#DB_LOCK_RANDOM DBENV-__GT__set_lk_detect@DB_LOCK_RANDOM
+__APIREL__/api_c/env_set_lk_detect.html#DB_LOCK_YOUNGEST DBENV-__GT__set_lk_detect@DB_LOCK_YOUNGEST
+__APIREL__/api_c/env_set_lk_max.html#2 @DBENV-__GT__set_lk_max
+__APIREL__/api_c/env_set_lk_max_locks.html#2 @DBENV-__GT__set_lk_max_locks
+__APIREL__/api_c/env_set_lk_max_lockers.html#2 @DBENV-__GT__set_lk_max_lockers
+__APIREL__/api_c/env_set_lk_max_objects.html#2 @DBENV-__GT__set_lk_max_objects
+__APIREL__/api_c/env_set_mp_mmapsize.html#2 @DBENV-__GT__set_mp_mmapsize
+__APIREL__/api_c/env_set_mutexlocks.html#2 @DBENV-__GT__set_mutexlocks
+__APIREL__/api_c/env_set_pageyield.html#2 @db_env_set_pageyield
+__APIREL__/api_c/env_set_panicstate.html#2 @db_env_set_panicstate
+__APIREL__/api_c/env_set_rec_init.html#2 @DBENV-__GT__set_recovery_init
+__APIREL__/api_c/env_set_region_init.html#2 @db_env_set_region_init
+__APIREL__/api_c/env_set_server.html#2 @DBENV-__GT__set_server
+__APIREL__/api_c/env_set_server.html#DB_NOSERVER DBENV-__GT__set_server@DB_NOSERVER
+__APIREL__/api_c/env_set_server.html#DB_NOSERVER_ID DBENV-__GT__set_server@DB_NOSERVER_ID
+__APIREL__/api_c/env_set_shm_key.html#2 @DBENV-__GT__set_shm_key
+__APIREL__/api_c/env_set_tas_spins.html#2 @db_env_set_tas_spins
+__APIREL__/api_c/env_set_tmp_dir.html#2 @DBENV-__GT__set_tmp_dir
+__APIREL__/api_c/env_set_tx_max.html#2 @DBENV-__GT__set_tx_max
+__APIREL__/api_c/env_set_tx_recover.html#2 @DBENV-__GT__set_tx_recover
+__APIREL__/api_c/env_set_tx_recover.html#DB_TXN_BACKWARD_ROLL DBENV-__GT__set_tx_recover@DB_TXN_BACKWARD_ROLL
+__APIREL__/api_c/env_set_tx_recover.html#DB_TXN_FORWARD_ROLL DBENV-__GT__set_tx_recover@DB_TXN_FORWARD_ROLL
+__APIREL__/api_c/env_set_tx_recover.html#DB_TXN_ABORT DBENV-__GT__set_tx_recover@DB_TXN_ABORT
+__APIREL__/api_c/env_set_tx_timestamp.html#2 @DBENV-__GT__set_tx_timestamp
+__APIREL__/api_c/env_set_verbose.html#2 @DBENV-__GT__set_verbose
+__APIREL__/api_c/env_set_verbose.html#DB_VERB_CHKPOINT DBENV-__GT__set_verbose@DB_VERB_CHKPOINT
+__APIREL__/api_c/env_set_verbose.html#DB_VERB_DEADLOCK DBENV-__GT__set_verbose@DB_VERB_DEADLOCK
+__APIREL__/api_c/env_set_verbose.html#DB_VERB_RECOVERY DBENV-__GT__set_verbose@DB_VERB_RECOVERY
+__APIREL__/api_c/env_set_verbose.html#DB_VERB_WAITSFOR DBENV-__GT__set_verbose@DB_VERB_WAITSFOR
+__APIREL__/api_c/env_strerror.html#2 @db_strerror
+__APIREL__/api_c/env_version.html#2 @db_version
+__APIREL__/api_c/lock_detect.html#2 @lock_detect
+__APIREL__/api_c/lock_detect.html#DB_LOCK_CONFLICT lock_detect@DB_LOCK_CONFLICT
+__APIREL__/api_c/lock_get.html#2 @lock_get
+__APIREL__/api_c/lock_get.html#DB_LOCK_NOWAIT lock_get@DB_LOCK_NOWAIT
+__APIREL__/api_c/lock_get.html#DB_LOCK_NOTGRANTED lock_get@DB_LOCK_NOTGRANTED
+__APIREL__/api_c/lock_id.html#2 @lock_id
+__APIREL__/api_c/lock_put.html#2 @lock_put
+__APIREL__/api_c/lock_stat.html#2 @lock_stat
+__APIREL__/api_c/lock_vec.html#2 @lock_vec
+__APIREL__/api_c/lock_vec.html#DB_LOCK_NOWAIT lock_vec@DB_LOCK_NOWAIT
+__APIREL__/api_c/lock_vec.html#op lock_vec@op
+__APIREL__/api_c/lock_vec.html#DB_LOCK_GET lock_vec@DB_LOCK_GET
+__APIREL__/api_c/lock_vec.html#DB_LOCK_PUT lock_vec@DB_LOCK_PUT
+__APIREL__/api_c/lock_vec.html#DB_LOCK_PUT_ALL lock_vec@DB_LOCK_PUT_ALL
+__APIREL__/api_c/lock_vec.html#DB_LOCK_PUT_OBJ lock_vec@DB_LOCK_PUT_OBJ
+__APIREL__/api_c/lock_vec.html#obj lock_vec@obj
+__APIREL__/api_c/lock_vec.html#mode lock_vec@mode
+__APIREL__/api_c/lock_vec.html#lock lock_vec@lock
+__APIREL__/api_c/lock_vec.html#DB_LOCK_NOTGRANTED lock_vec@DB_LOCK_NOTGRANTED
+__APIREL__/api_c/log_archive.html#2 @log_archive
+__APIREL__/api_c/log_archive.html#DB_ARCH_ABS log_archive@DB_ARCH_ABS
+__APIREL__/api_c/log_archive.html#DB_ARCH_DATA log_archive@DB_ARCH_DATA
+__APIREL__/api_c/log_archive.html#DB_ARCH_LOG log_archive@DB_ARCH_LOG
+__APIREL__/api_c/log_compare.html#2 @log_compare
+__APIREL__/api_c/log_file.html#2 @log_file
+__APIREL__/api_c/log_flush.html#2 @log_flush
+__APIREL__/api_c/log_get.html#2 @log_get
+__APIREL__/api_c/log_get.html#DB_CHECKPOINT log_get@DB_CHECKPOINT
+__APIREL__/api_c/log_get.html#DB_FIRST log_get@DB_FIRST
+__APIREL__/api_c/log_get.html#DB_LAST log_get@DB_LAST
+__APIREL__/api_c/log_get.html#DB_NEXT log_get@DB_NEXT
+__APIREL__/api_c/log_get.html#DB_PREV log_get@DB_PREV
+__APIREL__/api_c/log_get.html#DB_CURRENT log_get@DB_CURRENT
+__APIREL__/api_c/log_get.html#DB_SET log_get@DB_SET
+__APIREL__/api_c/log_put.html#2 @log_put
+__APIREL__/api_c/log_put.html#DB_CHECKPOINT log_put@DB_CHECKPOINT
+__APIREL__/api_c/log_put.html#DB_CURLSN log_put@DB_CURLSN
+__APIREL__/api_c/log_put.html#DB_FLUSH log_put@DB_FLUSH
+__APIREL__/api_c/log_register.html#2 @log_register
+__APIREL__/api_c/log_stat.html#2 @log_stat
+__APIREL__/api_c/log_unregister.html#2 @log_unregister
+__APIREL__/api_c/memp_fclose.html#2 @memp_fclose
+__APIREL__/api_c/memp_fget.html#2 @memp_fget
+__APIREL__/api_c/memp_fget.html#DB_MPOOL_CREATE memp_fget@DB_MPOOL_CREATE
+__APIREL__/api_c/memp_fget.html#DB_MPOOL_LAST memp_fget@DB_MPOOL_LAST
+__APIREL__/api_c/memp_fget.html#DB_MPOOL_NEW memp_fget@DB_MPOOL_NEW
+__APIREL__/api_c/memp_fopen.html#2 @memp_fopen
+__APIREL__/api_c/memp_fopen.html#DB_CREATE memp_fopen@DB_CREATE
+__APIREL__/api_c/memp_fopen.html#DB_NOMMAP memp_fopen@DB_NOMMAP
+__APIREL__/api_c/memp_fopen.html#DB_RDONLY memp_fopen@DB_RDONLY
+__APIREL__/api_c/memp_fopen.html#ftype memp_fopen@ftype
+__APIREL__/api_c/memp_fopen.html#pgcookie memp_fopen@pgcookie
+__APIREL__/api_c/memp_fopen.html#fileid memp_fopen@fileid
+__APIREL__/api_c/memp_fopen.html#lsn_offset memp_fopen@lsn_offset
+__APIREL__/api_c/memp_fopen.html#clear_len memp_fopen@clear_len
+__APIREL__/api_c/memp_fput.html#2 @memp_fput
+__APIREL__/api_c/memp_fput.html#DB_MPOOL_CLEAN memp_fput@DB_MPOOL_CLEAN
+__APIREL__/api_c/memp_fput.html#DB_MPOOL_DIRTY memp_fput@DB_MPOOL_DIRTY
+__APIREL__/api_c/memp_fput.html#DB_MPOOL_DISCARD memp_fput@DB_MPOOL_DISCARD
+__APIREL__/api_c/memp_fset.html#2 @memp_fset
+__APIREL__/api_c/memp_fset.html#DB_MPOOL_CLEAN memp_fset@DB_MPOOL_CLEAN
+__APIREL__/api_c/memp_fset.html#DB_MPOOL_DIRTY memp_fset@DB_MPOOL_DIRTY
+__APIREL__/api_c/memp_fset.html#DB_MPOOL_DISCARD memp_fset@DB_MPOOL_DISCARD
+__APIREL__/api_c/memp_fsync.html#2 @memp_fsync
+__APIREL__/api_c/memp_register.html#2 @memp_register
+__APIREL__/api_c/memp_stat.html#2 @memp_stat
+__APIREL__/api_c/memp_sync.html#2 @memp_sync
+__APIREL__/api_c/memp_trickle.html#2 @memp_trickle
+__APIREL__/api_c/txn_abort.html#2 @txn_abort
+__APIREL__/api_c/txn_begin.html#2 @txn_begin
+__APIREL__/api_c/txn_begin.html#DB_TXN_NOSYNC txn_begin@DB_TXN_NOSYNC
+__APIREL__/api_c/txn_begin.html#DB_TXN_NOWAIT txn_begin@DB_TXN_NOWAIT
+__APIREL__/api_c/txn_begin.html#DB_TXN_SYNC txn_begin@DB_TXN_SYNC
+__APIREL__/api_c/txn_checkpoint.html#2 @txn_checkpoint
+__APIREL__/api_c/txn_checkpoint.html#DB_FORCE txn_checkpoint@DB_FORCE
+__APIREL__/api_c/txn_commit.html#2 @txn_commit
+__APIREL__/api_c/txn_commit.html#DB_TXN_NOSYNC txn_commit@DB_TXN_NOSYNC
+__APIREL__/api_c/txn_commit.html#DB_TXN_SYNC txn_commit@DB_TXN_SYNC
+__APIREL__/api_c/txn_id.html#2 @txn_id
+__APIREL__/api_c/txn_prepare.html#2 @txn_prepare
+__APIREL__/api_c/txn_stat.html#2 @txn_stat
diff --git a/bdb/docs/api_c/set_func_close.html b/bdb/docs/api_c/set_func_close.html
new file mode 100644
index 00000000000..0d4af0aef66
--- /dev/null
+++ b/bdb/docs/api_c/set_func_close.html
@@ -0,0 +1,66 @@
+<!--$Id: set_func_close.so,v 10.6 2000/05/31 15:10:00 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_func_close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>db_env_set_func_close</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_func_close(int (*func_close)(int fd));
+</pre></h3>
+<h1>Description</h1>
+<p>Replace Berkeley DB calls to the IEEE/ANSI Std 1003.1 (POSIX) <b>close</b> function
+with <b>func_close</b>, which must conform to the standard interface.
+<p>The db_env_set_func_close interface affects the entire application, not a single
+database or database environment.
+<p>While the db_env_set_func_close interface may be used to configure Berkeley DB at any time
+during the life of the application, it should normally be called before
+making any calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> functions.
+<p>The db_env_set_func_close function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/set_func_close.html">db_env_set_func_close</a>,
+<a href="../api_c/set_func_dirfree.html">db_env_set_func_dirfree</a>,
+<a href="../api_c/set_func_dirlist.html">db_env_set_func_dirlist</a>,
+<a href="../api_c/set_func_exists.html">db_env_set_func_exists</a>,
+<a href="../api_c/set_func_free.html">db_env_set_func_free</a>,
+<a href="../api_c/set_func_fsync.html">db_env_set_func_fsync</a>,
+<a href="../api_c/set_func_ioinfo.html">db_env_set_func_ioinfo</a>,
+<a href="../api_c/set_func_malloc.html">db_env_set_func_malloc</a>,
+<a href="../api_c/set_func_map.html">db_env_set_func_map</a>,
+<a href="../api_c/set_func_open.html">db_env_set_func_open</a>,
+<a href="../api_c/set_func_read.html">db_env_set_func_read</a>,
+<a href="../api_c/set_func_realloc.html">db_env_set_func_realloc</a>,
+<a href="../api_c/set_func_seek.html">db_env_set_func_seek</a>,
+<a href="../api_c/set_func_sleep.html">db_env_set_func_sleep</a>,
+<a href="../api_c/set_func_unlink.html">db_env_set_func_unlink</a>,
+<a href="../api_c/set_func_unmap.html">db_env_set_func_unmap</a>,
+<a href="../api_c/set_func_write.html">db_env_set_func_write</a>
+and
+<a href="../api_c/set_func_yield.html">db_env_set_func_yield</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/set_func_dirfree.html b/bdb/docs/api_c/set_func_dirfree.html
new file mode 100644
index 00000000000..249f69cc676
--- /dev/null
+++ b/bdb/docs/api_c/set_func_dirfree.html
@@ -0,0 +1,75 @@
+<!--$Id: set_func_dirfree.so,v 10.6 2000/05/31 15:10:00 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_func_dirfree</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>db_env_set_func_dirfree</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_func_dirfree(void (*func_dirfree)(char **namesp, int cnt));
+</pre></h3>
+<h1>Description</h1>
+<p>The Berkeley DB library requires the ability to return any memory allocated as part
+of the routine which reads through a directory and creates a list of files
+that that the directory contains (see <a href="../api_c/set_func_dirlist.html">db_env_set_func_dirlist</a>).
+The <b>func_dirfree</b> argument must conform to the following interface:
+<p><blockquote><pre>int dirfree(char **namesp, int cnt);</pre></blockquote>
+<p>The <b>namesp</b> and <b>cnt</b> arguments are the same values as were
+returned by the <a href="../api_c/set_func_dirlist.html">db_env_set_func_dirlist</a> function.
+<p>The <b>func_dirfree</b> function must return the value of <b>errno</b> on
+failure and 0 on success.
+<p>The db_env_set_func_dirfree interface affects the entire application, not a single
+database or database environment.
+<p>While the db_env_set_func_dirfree interface may be used to configure Berkeley DB at any time
+during the life of the application, it should normally be called before
+making any calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> functions.
+<p>The db_env_set_func_dirfree interface may only be used to configure Berkeley DB before
+the <a href="../api_c/env_open.html">DBENV-&gt;open</a> interface is called.
+<p>The db_env_set_func_dirfree function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/set_func_close.html">db_env_set_func_close</a>,
+<a href="../api_c/set_func_dirfree.html">db_env_set_func_dirfree</a>,
+<a href="../api_c/set_func_dirlist.html">db_env_set_func_dirlist</a>,
+<a href="../api_c/set_func_exists.html">db_env_set_func_exists</a>,
+<a href="../api_c/set_func_free.html">db_env_set_func_free</a>,
+<a href="../api_c/set_func_fsync.html">db_env_set_func_fsync</a>,
+<a href="../api_c/set_func_ioinfo.html">db_env_set_func_ioinfo</a>,
+<a href="../api_c/set_func_malloc.html">db_env_set_func_malloc</a>,
+<a href="../api_c/set_func_map.html">db_env_set_func_map</a>,
+<a href="../api_c/set_func_open.html">db_env_set_func_open</a>,
+<a href="../api_c/set_func_read.html">db_env_set_func_read</a>,
+<a href="../api_c/set_func_realloc.html">db_env_set_func_realloc</a>,
+<a href="../api_c/set_func_seek.html">db_env_set_func_seek</a>,
+<a href="../api_c/set_func_sleep.html">db_env_set_func_sleep</a>,
+<a href="../api_c/set_func_unlink.html">db_env_set_func_unlink</a>,
+<a href="../api_c/set_func_unmap.html">db_env_set_func_unmap</a>,
+<a href="../api_c/set_func_write.html">db_env_set_func_write</a>
+and
+<a href="../api_c/set_func_yield.html">db_env_set_func_yield</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/set_func_dirlist.html b/bdb/docs/api_c/set_func_dirlist.html
new file mode 100644
index 00000000000..5025912e5d9
--- /dev/null
+++ b/bdb/docs/api_c/set_func_dirlist.html
@@ -0,0 +1,78 @@
+<!--$Id: set_func_dirlist.so,v 10.6 2000/05/31 15:10:00 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_func_dirlist</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>db_env_set_func_dirlist</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_func_dirlist(
+ int (*func_dirlist)(const char *dir, char ***namesp, int *cntp));
+</pre></h3>
+<h1>Description</h1>
+<p>The Berkeley DB library requires the ability to read through a directory and
+create a list of files that that the directory contains. The
+<b>func_dirlist</b> argument must conform to the following interface:
+<p><blockquote><pre>int dirlist(const char *dir, char ***namesp, int *cntp);</pre></blockquote>
+<p>The <b>dir</b> argument is the name of the directory to be searched.
+The function must return a pointer to an array of nul-terminated file
+names in the memory location referenced by the argument <b>namesp</b>,
+and a count of the number of elements in the array in the memory location
+referenced by <b>cntp</b>.
+<p>The <b>func_dirlist</b> function must return the value of <b>errno</b> on
+failure and 0 on success.
+<p>The db_env_set_func_dirlist interface affects the entire application, not a single
+database or database environment.
+<p>While the db_env_set_func_dirlist interface may be used to configure Berkeley DB at any time
+during the life of the application, it should normally be called before
+making any calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> functions.
+<p>The db_env_set_func_dirlist interface may only be used to configure Berkeley DB before
+the <a href="../api_c/env_open.html">DBENV-&gt;open</a> interface is called.
+<p>The db_env_set_func_dirlist function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/set_func_close.html">db_env_set_func_close</a>,
+<a href="../api_c/set_func_dirfree.html">db_env_set_func_dirfree</a>,
+<a href="../api_c/set_func_dirlist.html">db_env_set_func_dirlist</a>,
+<a href="../api_c/set_func_exists.html">db_env_set_func_exists</a>,
+<a href="../api_c/set_func_free.html">db_env_set_func_free</a>,
+<a href="../api_c/set_func_fsync.html">db_env_set_func_fsync</a>,
+<a href="../api_c/set_func_ioinfo.html">db_env_set_func_ioinfo</a>,
+<a href="../api_c/set_func_malloc.html">db_env_set_func_malloc</a>,
+<a href="../api_c/set_func_map.html">db_env_set_func_map</a>,
+<a href="../api_c/set_func_open.html">db_env_set_func_open</a>,
+<a href="../api_c/set_func_read.html">db_env_set_func_read</a>,
+<a href="../api_c/set_func_realloc.html">db_env_set_func_realloc</a>,
+<a href="../api_c/set_func_seek.html">db_env_set_func_seek</a>,
+<a href="../api_c/set_func_sleep.html">db_env_set_func_sleep</a>,
+<a href="../api_c/set_func_unlink.html">db_env_set_func_unlink</a>,
+<a href="../api_c/set_func_unmap.html">db_env_set_func_unmap</a>,
+<a href="../api_c/set_func_write.html">db_env_set_func_write</a>
+and
+<a href="../api_c/set_func_yield.html">db_env_set_func_yield</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/set_func_exists.html b/bdb/docs/api_c/set_func_exists.html
new file mode 100644
index 00000000000..0b38b1e2203
--- /dev/null
+++ b/bdb/docs/api_c/set_func_exists.html
@@ -0,0 +1,75 @@
+<!--$Id: set_func_exists.so,v 10.6 2000/05/31 15:10:00 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_func_exists</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>db_env_set_func_exists</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_func_exists(int (*func_exists)(const char *path, int *isdirp));
+</pre></h3>
+<h1>Description</h1>
+<p>The Berkeley DB library requires the ability to determine if a file exists, and
+optionally, if it is a file of type directory. The <b>func</b> argument
+must conform to the following interface:
+<p><blockquote><pre>int exists(const char *path, int *isdirp);</pre></blockquote>
+<p>The <b>path</b> argument is the pathname of the file to be checked.
+<p>If the <b>isdirp</b> argument is non-NULL, it must be set to non-0 if
+<b>path</b> is a directory, and 0 if <b>path</b> is not a directory.
+<p>The <b>func_exists</b> function must return the value of <b>errno</b> on
+failure and 0 on success.
+<p>The db_env_set_func_exists interface affects the entire application, not a single
+database or database environment.
+<p>While the db_env_set_func_exists interface may be used to configure Berkeley DB at any time
+during the life of the application, it should normally be called before
+making any calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> functions.
+<p>The db_env_set_func_exists interface may only be used to configure Berkeley DB before
+the <a href="../api_c/env_open.html">DBENV-&gt;open</a> interface is called.
+<p>The db_env_set_func_exists function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/set_func_close.html">db_env_set_func_close</a>,
+<a href="../api_c/set_func_dirfree.html">db_env_set_func_dirfree</a>,
+<a href="../api_c/set_func_dirlist.html">db_env_set_func_dirlist</a>,
+<a href="../api_c/set_func_exists.html">db_env_set_func_exists</a>,
+<a href="../api_c/set_func_free.html">db_env_set_func_free</a>,
+<a href="../api_c/set_func_fsync.html">db_env_set_func_fsync</a>,
+<a href="../api_c/set_func_ioinfo.html">db_env_set_func_ioinfo</a>,
+<a href="../api_c/set_func_malloc.html">db_env_set_func_malloc</a>,
+<a href="../api_c/set_func_map.html">db_env_set_func_map</a>,
+<a href="../api_c/set_func_open.html">db_env_set_func_open</a>,
+<a href="../api_c/set_func_read.html">db_env_set_func_read</a>,
+<a href="../api_c/set_func_realloc.html">db_env_set_func_realloc</a>,
+<a href="../api_c/set_func_seek.html">db_env_set_func_seek</a>,
+<a href="../api_c/set_func_sleep.html">db_env_set_func_sleep</a>,
+<a href="../api_c/set_func_unlink.html">db_env_set_func_unlink</a>,
+<a href="../api_c/set_func_unmap.html">db_env_set_func_unmap</a>,
+<a href="../api_c/set_func_write.html">db_env_set_func_write</a>
+and
+<a href="../api_c/set_func_yield.html">db_env_set_func_yield</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/set_func_free.html b/bdb/docs/api_c/set_func_free.html
new file mode 100644
index 00000000000..8b7b1afa60c
--- /dev/null
+++ b/bdb/docs/api_c/set_func_free.html
@@ -0,0 +1,67 @@
+<!--$Id: set_func_free.so,v 10.6 2000/05/31 15:10:00 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_func_free</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>db_env_set_func_free</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_func_free(void (*func_free)(void *ptr));
+</pre></h3>
+<h1>Description</h1>
+<p>Replace Berkeley DB calls to the ANSI C X3.159-1989 (ANSI C) standard
+<b>free</b> function with <b>func_free</b>, which must conform to
+the standard interface.
+<p>The db_env_set_func_free interface affects the entire application, not a single
+database or database environment.
+<p>While the db_env_set_func_free interface may be used to configure Berkeley DB at any time
+during the life of the application, it should normally be called before
+making any calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> functions.
+<p>The db_env_set_func_free function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/set_func_close.html">db_env_set_func_close</a>,
+<a href="../api_c/set_func_dirfree.html">db_env_set_func_dirfree</a>,
+<a href="../api_c/set_func_dirlist.html">db_env_set_func_dirlist</a>,
+<a href="../api_c/set_func_exists.html">db_env_set_func_exists</a>,
+<a href="../api_c/set_func_free.html">db_env_set_func_free</a>,
+<a href="../api_c/set_func_fsync.html">db_env_set_func_fsync</a>,
+<a href="../api_c/set_func_ioinfo.html">db_env_set_func_ioinfo</a>,
+<a href="../api_c/set_func_malloc.html">db_env_set_func_malloc</a>,
+<a href="../api_c/set_func_map.html">db_env_set_func_map</a>,
+<a href="../api_c/set_func_open.html">db_env_set_func_open</a>,
+<a href="../api_c/set_func_read.html">db_env_set_func_read</a>,
+<a href="../api_c/set_func_realloc.html">db_env_set_func_realloc</a>,
+<a href="../api_c/set_func_seek.html">db_env_set_func_seek</a>,
+<a href="../api_c/set_func_sleep.html">db_env_set_func_sleep</a>,
+<a href="../api_c/set_func_unlink.html">db_env_set_func_unlink</a>,
+<a href="../api_c/set_func_unmap.html">db_env_set_func_unmap</a>,
+<a href="../api_c/set_func_write.html">db_env_set_func_write</a>
+and
+<a href="../api_c/set_func_yield.html">db_env_set_func_yield</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/set_func_fsync.html b/bdb/docs/api_c/set_func_fsync.html
new file mode 100644
index 00000000000..f73956108b2
--- /dev/null
+++ b/bdb/docs/api_c/set_func_fsync.html
@@ -0,0 +1,66 @@
+<!--$Id: set_func_fsync.so,v 10.6 2000/05/31 15:10:00 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_func_fsync</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>db_env_set_func_fsync</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_func_fsync(int (*func_fsync)(int fd));
+</pre></h3>
+<h1>Description</h1>
+<p>Replace Berkeley DB calls to the IEEE/ANSI Std 1003.1 (POSIX) <b>fsync</b> function
+with <b>func_fsync</b>, which must conform to the standard interface.
+<p>The db_env_set_func_fsync interface affects the entire application, not a single
+database or database environment.
+<p>While the db_env_set_func_fsync interface may be used to configure Berkeley DB at any time
+during the life of the application, it should normally be called before
+making any calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> functions.
+<p>The db_env_set_func_fsync function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/set_func_close.html">db_env_set_func_close</a>,
+<a href="../api_c/set_func_dirfree.html">db_env_set_func_dirfree</a>,
+<a href="../api_c/set_func_dirlist.html">db_env_set_func_dirlist</a>,
+<a href="../api_c/set_func_exists.html">db_env_set_func_exists</a>,
+<a href="../api_c/set_func_free.html">db_env_set_func_free</a>,
+<a href="../api_c/set_func_fsync.html">db_env_set_func_fsync</a>,
+<a href="../api_c/set_func_ioinfo.html">db_env_set_func_ioinfo</a>,
+<a href="../api_c/set_func_malloc.html">db_env_set_func_malloc</a>,
+<a href="../api_c/set_func_map.html">db_env_set_func_map</a>,
+<a href="../api_c/set_func_open.html">db_env_set_func_open</a>,
+<a href="../api_c/set_func_read.html">db_env_set_func_read</a>,
+<a href="../api_c/set_func_realloc.html">db_env_set_func_realloc</a>,
+<a href="../api_c/set_func_seek.html">db_env_set_func_seek</a>,
+<a href="../api_c/set_func_sleep.html">db_env_set_func_sleep</a>,
+<a href="../api_c/set_func_unlink.html">db_env_set_func_unlink</a>,
+<a href="../api_c/set_func_unmap.html">db_env_set_func_unmap</a>,
+<a href="../api_c/set_func_write.html">db_env_set_func_write</a>
+and
+<a href="../api_c/set_func_yield.html">db_env_set_func_yield</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/set_func_ioinfo.html b/bdb/docs/api_c/set_func_ioinfo.html
new file mode 100644
index 00000000000..3a0143e57ea
--- /dev/null
+++ b/bdb/docs/api_c/set_func_ioinfo.html
@@ -0,0 +1,83 @@
+<!--$Id: set_func_ioinfo.so,v 10.6 2000/05/31 15:10:00 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_func_ioinfo</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>db_env_set_func_ioinfo</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_func_ioinfo(int (*func_ioinfo)(const char *path,
+ int fd, u_int32_t *mbytesp, u_int32_t *bytesp, u_int32_t *iosizep));
+</pre></h3>
+<h1>Description</h1>
+<p>The Berkeley DB library requires the ability to determine the size and I/O
+characteristics of a file. The <b>func_ioinfo</b> argument must conform
+to the following interface:
+<p><blockquote><pre>int ioinfo(const char *path, int fd,
+u_int32_t *mbytesp, u_int32_t *bytesp, u_int32_t *iosizep);</pre></blockquote>
+<p>The <b>path</b> argument is the pathname of the file to be checked, and the
+<b>fd</b> argument is an open file descriptor on the file.
+<p>If the <b>mbytesp</b> and <b>bytesp</b> arguments are non-NULL, the
+<b>ioinfo</b> function must return in them the size of the file: the
+number of megabytes in the file into the memory location referenced by
+the <b>mbytesp</b> argument, and the number of bytes over and above that
+number of megabytes into the memory location referenced by the
+<b>bytesp</b> argument.
+<p>In addition, if the <b>iosizep</b> argument is non-NULL, the <b>ioinfo</b>
+function must return the optimum granularity for I/O operations to the file
+in the memory location referenced by it.
+<p>The <b>func_ioinfo</b> function must return the value of <b>errno</b> on
+failure and 0 on success.
+<p>The db_env_set_func_ioinfo interface affects the entire application, not a single
+database or database environment.
+<p>While the db_env_set_func_ioinfo interface may be used to configure Berkeley DB at any time
+during the life of the application, it should normally be called before
+making any calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> functions.
+<p>The db_env_set_func_ioinfo function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/set_func_close.html">db_env_set_func_close</a>,
+<a href="../api_c/set_func_dirfree.html">db_env_set_func_dirfree</a>,
+<a href="../api_c/set_func_dirlist.html">db_env_set_func_dirlist</a>,
+<a href="../api_c/set_func_exists.html">db_env_set_func_exists</a>,
+<a href="../api_c/set_func_free.html">db_env_set_func_free</a>,
+<a href="../api_c/set_func_fsync.html">db_env_set_func_fsync</a>,
+<a href="../api_c/set_func_ioinfo.html">db_env_set_func_ioinfo</a>,
+<a href="../api_c/set_func_malloc.html">db_env_set_func_malloc</a>,
+<a href="../api_c/set_func_map.html">db_env_set_func_map</a>,
+<a href="../api_c/set_func_open.html">db_env_set_func_open</a>,
+<a href="../api_c/set_func_read.html">db_env_set_func_read</a>,
+<a href="../api_c/set_func_realloc.html">db_env_set_func_realloc</a>,
+<a href="../api_c/set_func_seek.html">db_env_set_func_seek</a>,
+<a href="../api_c/set_func_sleep.html">db_env_set_func_sleep</a>,
+<a href="../api_c/set_func_unlink.html">db_env_set_func_unlink</a>,
+<a href="../api_c/set_func_unmap.html">db_env_set_func_unmap</a>,
+<a href="../api_c/set_func_write.html">db_env_set_func_write</a>
+and
+<a href="../api_c/set_func_yield.html">db_env_set_func_yield</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/set_func_malloc.html b/bdb/docs/api_c/set_func_malloc.html
new file mode 100644
index 00000000000..a4be5bfb04e
--- /dev/null
+++ b/bdb/docs/api_c/set_func_malloc.html
@@ -0,0 +1,67 @@
+<!--$Id: set_func_malloc.so,v 10.6 2000/05/31 15:10:01 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_func_malloc</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>db_env_set_func_malloc</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_func_malloc(void *(*func_malloc)(size_t size));
+</pre></h3>
+<h1>Description</h1>
+<p>Replace Berkeley DB calls to the ANSI C X3.159-1989 (ANSI C) standard
+<b>malloc</b> function with <b>func_malloc</b>, which must conform to
+the standard interface.
+<p>The db_env_set_func_malloc interface affects the entire application, not a single
+database or database environment.
+<p>While the db_env_set_func_malloc interface may be used to configure Berkeley DB at any time
+during the life of the application, it should normally be called before
+making any calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> functions.
+<p>The db_env_set_func_malloc function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/set_func_close.html">db_env_set_func_close</a>,
+<a href="../api_c/set_func_dirfree.html">db_env_set_func_dirfree</a>,
+<a href="../api_c/set_func_dirlist.html">db_env_set_func_dirlist</a>,
+<a href="../api_c/set_func_exists.html">db_env_set_func_exists</a>,
+<a href="../api_c/set_func_free.html">db_env_set_func_free</a>,
+<a href="../api_c/set_func_fsync.html">db_env_set_func_fsync</a>,
+<a href="../api_c/set_func_ioinfo.html">db_env_set_func_ioinfo</a>,
+<a href="../api_c/set_func_malloc.html">db_env_set_func_malloc</a>,
+<a href="../api_c/set_func_map.html">db_env_set_func_map</a>,
+<a href="../api_c/set_func_open.html">db_env_set_func_open</a>,
+<a href="../api_c/set_func_read.html">db_env_set_func_read</a>,
+<a href="../api_c/set_func_realloc.html">db_env_set_func_realloc</a>,
+<a href="../api_c/set_func_seek.html">db_env_set_func_seek</a>,
+<a href="../api_c/set_func_sleep.html">db_env_set_func_sleep</a>,
+<a href="../api_c/set_func_unlink.html">db_env_set_func_unlink</a>,
+<a href="../api_c/set_func_unmap.html">db_env_set_func_unmap</a>,
+<a href="../api_c/set_func_write.html">db_env_set_func_write</a>
+and
+<a href="../api_c/set_func_yield.html">db_env_set_func_yield</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/set_func_map.html b/bdb/docs/api_c/set_func_map.html
new file mode 100644
index 00000000000..e14e9c4aad7
--- /dev/null
+++ b/bdb/docs/api_c/set_func_map.html
@@ -0,0 +1,86 @@
+<!--$Id: set_func_map.so,v 10.8 2000/05/31 15:10:01 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_func_map</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>db_env_set_func_map</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_func_map(int (*func_map)(char *path,
+ size_t len, int is_region, int is_rdonly, void **addr));
+</pre></h3>
+<h1>Description</h1>
+<p>The Berkeley DB library requires the ability to map a file into memory and to
+create shared memory regions (which may or may not be backed by files).
+The <b>func_map</b> argument must conform to the following interface:
+<p><blockquote><pre>int map(char *path, size_t len,
+int is_region, int is_rdonly, void **addr);</pre></blockquote>
+<p>The <b>path</b> argument is the name of a file.
+<p>The <b>is_region</b> argument will be zero if the intention is to map a
+file into shared memory. In this case, the <b>map</b> function must map
+the first <b>len</b> bytes of the file into memory and return a pointer
+to the mapped location in the memory location referenced by the argument
+<b>addr</b>. The <b>is_rdonly</b> argument will be non-zero if the file
+is considered read-only by the caller.
+<p>The <b>is_region</b> argument will be non-zero if the memory is intended
+to be used as a shared memory region for synchronization between Berkeley DB
+threads/processes. In this case, the returned memory may be of any kind
+(e.g., anonymous), but must be able to support semaphores. In this case,
+the <b>path</b> argument may be ignored (although future <b>map</b>
+calls using the same <b>path</b> must return the same memory), and the
+<b>is_rdonly</b> argument will always be zero.
+<p>The <b>func_map</b> function must return the value of <b>errno</b> on
+failure and 0 on success.
+<p>The db_env_set_func_map interface affects the entire application, not a single
+database or database environment.
+<p>While the db_env_set_func_map interface may be used to configure Berkeley DB at any time
+during the life of the application, it should normally be called before
+making any calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> functions.
+<p>The db_env_set_func_map function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/set_func_close.html">db_env_set_func_close</a>,
+<a href="../api_c/set_func_dirfree.html">db_env_set_func_dirfree</a>,
+<a href="../api_c/set_func_dirlist.html">db_env_set_func_dirlist</a>,
+<a href="../api_c/set_func_exists.html">db_env_set_func_exists</a>,
+<a href="../api_c/set_func_free.html">db_env_set_func_free</a>,
+<a href="../api_c/set_func_fsync.html">db_env_set_func_fsync</a>,
+<a href="../api_c/set_func_ioinfo.html">db_env_set_func_ioinfo</a>,
+<a href="../api_c/set_func_malloc.html">db_env_set_func_malloc</a>,
+<a href="../api_c/set_func_map.html">db_env_set_func_map</a>,
+<a href="../api_c/set_func_open.html">db_env_set_func_open</a>,
+<a href="../api_c/set_func_read.html">db_env_set_func_read</a>,
+<a href="../api_c/set_func_realloc.html">db_env_set_func_realloc</a>,
+<a href="../api_c/set_func_seek.html">db_env_set_func_seek</a>,
+<a href="../api_c/set_func_sleep.html">db_env_set_func_sleep</a>,
+<a href="../api_c/set_func_unlink.html">db_env_set_func_unlink</a>,
+<a href="../api_c/set_func_unmap.html">db_env_set_func_unmap</a>,
+<a href="../api_c/set_func_write.html">db_env_set_func_write</a>
+and
+<a href="../api_c/set_func_yield.html">db_env_set_func_yield</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/set_func_open.html b/bdb/docs/api_c/set_func_open.html
new file mode 100644
index 00000000000..ff72d9882ed
--- /dev/null
+++ b/bdb/docs/api_c/set_func_open.html
@@ -0,0 +1,66 @@
+<!--$Id: set_func_open.so,v 10.6 2000/05/31 15:10:01 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_func_open</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>db_env_set_func_open</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_func_open(int (*func_open)(const char *path, int flags, int mode));
+</pre></h3>
+<h1>Description</h1>
+<p>Replace Berkeley DB calls to the IEEE/ANSI Std 1003.1 (POSIX) <b>open</b> function
+with <b>func_open</b>, which must conform to the standard interface.
+<p>The db_env_set_func_open interface affects the entire application, not a single
+database or database environment.
+<p>While the db_env_set_func_open interface may be used to configure Berkeley DB at any time
+during the life of the application, it should normally be called before
+making any calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> functions.
+<p>The db_env_set_func_open function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/set_func_close.html">db_env_set_func_close</a>,
+<a href="../api_c/set_func_dirfree.html">db_env_set_func_dirfree</a>,
+<a href="../api_c/set_func_dirlist.html">db_env_set_func_dirlist</a>,
+<a href="../api_c/set_func_exists.html">db_env_set_func_exists</a>,
+<a href="../api_c/set_func_free.html">db_env_set_func_free</a>,
+<a href="../api_c/set_func_fsync.html">db_env_set_func_fsync</a>,
+<a href="../api_c/set_func_ioinfo.html">db_env_set_func_ioinfo</a>,
+<a href="../api_c/set_func_malloc.html">db_env_set_func_malloc</a>,
+<a href="../api_c/set_func_map.html">db_env_set_func_map</a>,
+<a href="../api_c/set_func_open.html">db_env_set_func_open</a>,
+<a href="../api_c/set_func_read.html">db_env_set_func_read</a>,
+<a href="../api_c/set_func_realloc.html">db_env_set_func_realloc</a>,
+<a href="../api_c/set_func_seek.html">db_env_set_func_seek</a>,
+<a href="../api_c/set_func_sleep.html">db_env_set_func_sleep</a>,
+<a href="../api_c/set_func_unlink.html">db_env_set_func_unlink</a>,
+<a href="../api_c/set_func_unmap.html">db_env_set_func_unmap</a>,
+<a href="../api_c/set_func_write.html">db_env_set_func_write</a>
+and
+<a href="../api_c/set_func_yield.html">db_env_set_func_yield</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/set_func_read.html b/bdb/docs/api_c/set_func_read.html
new file mode 100644
index 00000000000..b3ee9308118
--- /dev/null
+++ b/bdb/docs/api_c/set_func_read.html
@@ -0,0 +1,66 @@
+<!--$Id: set_func_read.so,v 10.6 2000/05/31 15:10:01 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_func_read</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>db_env_set_func_read</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_func_read(ssize_t (*func_read)(int fd, void *buf, size_t nbytes));
+</pre></h3>
+<h1>Description</h1>
+<p>Replace Berkeley DB calls to the IEEE/ANSI Std 1003.1 (POSIX) <b>read</b> function
+with <b>func_read</b>, which must conform to the standard interface.
+<p>The db_env_set_func_read interface affects the entire application, not a single
+database or database environment.
+<p>While the db_env_set_func_read interface may be used to configure Berkeley DB at any time
+during the life of the application, it should normally be called before
+making any calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> functions.
+<p>The db_env_set_func_read function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/set_func_close.html">db_env_set_func_close</a>,
+<a href="../api_c/set_func_dirfree.html">db_env_set_func_dirfree</a>,
+<a href="../api_c/set_func_dirlist.html">db_env_set_func_dirlist</a>,
+<a href="../api_c/set_func_exists.html">db_env_set_func_exists</a>,
+<a href="../api_c/set_func_free.html">db_env_set_func_free</a>,
+<a href="../api_c/set_func_fsync.html">db_env_set_func_fsync</a>,
+<a href="../api_c/set_func_ioinfo.html">db_env_set_func_ioinfo</a>,
+<a href="../api_c/set_func_malloc.html">db_env_set_func_malloc</a>,
+<a href="../api_c/set_func_map.html">db_env_set_func_map</a>,
+<a href="../api_c/set_func_open.html">db_env_set_func_open</a>,
+<a href="../api_c/set_func_read.html">db_env_set_func_read</a>,
+<a href="../api_c/set_func_realloc.html">db_env_set_func_realloc</a>,
+<a href="../api_c/set_func_seek.html">db_env_set_func_seek</a>,
+<a href="../api_c/set_func_sleep.html">db_env_set_func_sleep</a>,
+<a href="../api_c/set_func_unlink.html">db_env_set_func_unlink</a>,
+<a href="../api_c/set_func_unmap.html">db_env_set_func_unmap</a>,
+<a href="../api_c/set_func_write.html">db_env_set_func_write</a>
+and
+<a href="../api_c/set_func_yield.html">db_env_set_func_yield</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/set_func_realloc.html b/bdb/docs/api_c/set_func_realloc.html
new file mode 100644
index 00000000000..91e5835bcca
--- /dev/null
+++ b/bdb/docs/api_c/set_func_realloc.html
@@ -0,0 +1,67 @@
+<!--$Id: set_func_realloc.so,v 10.6 2000/05/31 15:10:01 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_func_realloc</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>db_env_set_func_realloc</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_func_realloc(void *(*func_realloc)(void *ptr, size_t size));
+</pre></h3>
+<h1>Description</h1>
+<p>Replace Berkeley DB calls to the ANSI C X3.159-1989 (ANSI C) standard
+<b>realloc</b> function with <b>func_realloc</b>, which must conform to
+the standard interface.
+<p>The db_env_set_func_realloc interface affects the entire application, not a single
+database or database environment.
+<p>While the db_env_set_func_realloc interface may be used to configure Berkeley DB at any time
+during the life of the application, it should normally be called before
+making any calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> functions.
+<p>The db_env_set_func_realloc function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/set_func_close.html">db_env_set_func_close</a>,
+<a href="../api_c/set_func_dirfree.html">db_env_set_func_dirfree</a>,
+<a href="../api_c/set_func_dirlist.html">db_env_set_func_dirlist</a>,
+<a href="../api_c/set_func_exists.html">db_env_set_func_exists</a>,
+<a href="../api_c/set_func_free.html">db_env_set_func_free</a>,
+<a href="../api_c/set_func_fsync.html">db_env_set_func_fsync</a>,
+<a href="../api_c/set_func_ioinfo.html">db_env_set_func_ioinfo</a>,
+<a href="../api_c/set_func_malloc.html">db_env_set_func_malloc</a>,
+<a href="../api_c/set_func_map.html">db_env_set_func_map</a>,
+<a href="../api_c/set_func_open.html">db_env_set_func_open</a>,
+<a href="../api_c/set_func_read.html">db_env_set_func_read</a>,
+<a href="../api_c/set_func_realloc.html">db_env_set_func_realloc</a>,
+<a href="../api_c/set_func_seek.html">db_env_set_func_seek</a>,
+<a href="../api_c/set_func_sleep.html">db_env_set_func_sleep</a>,
+<a href="../api_c/set_func_unlink.html">db_env_set_func_unlink</a>,
+<a href="../api_c/set_func_unmap.html">db_env_set_func_unmap</a>,
+<a href="../api_c/set_func_write.html">db_env_set_func_write</a>
+and
+<a href="../api_c/set_func_yield.html">db_env_set_func_yield</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/set_func_rename.html b/bdb/docs/api_c/set_func_rename.html
new file mode 100644
index 00000000000..bb588672359
--- /dev/null
+++ b/bdb/docs/api_c/set_func_rename.html
@@ -0,0 +1,66 @@
+<!--$Id: set_func_rename.so,v 10.6 2000/05/31 15:10:01 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_func_rename</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>db_env_set_func_rename</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_func_rename(int (*func_rename)(int fd));
+</pre></h3>
+<h1>Description</h1>
+<p>Replace Berkeley DB calls to the IEEE/ANSI Std 1003.1 (POSIX) <b>close</b> function
+with <b>func_close</b>, which must conform to the standard interface.
+<p>The db_env_set_func_rename interface affects the entire application, not a single
+database or database environment.
+<p>While the db_env_set_func_rename interface may be used to configure Berkeley DB at any time
+during the life of the application, it should normally be called before
+making any calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> functions.
+<p>The db_env_set_func_rename function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/set_func_close.html">db_env_set_func_close</a>,
+<a href="../api_c/set_func_dirfree.html">db_env_set_func_dirfree</a>,
+<a href="../api_c/set_func_dirlist.html">db_env_set_func_dirlist</a>,
+<a href="../api_c/set_func_exists.html">db_env_set_func_exists</a>,
+<a href="../api_c/set_func_free.html">db_env_set_func_free</a>,
+<a href="../api_c/set_func_fsync.html">db_env_set_func_fsync</a>,
+<a href="../api_c/set_func_ioinfo.html">db_env_set_func_ioinfo</a>,
+<a href="../api_c/set_func_malloc.html">db_env_set_func_malloc</a>,
+<a href="../api_c/set_func_map.html">db_env_set_func_map</a>,
+<a href="../api_c/set_func_open.html">db_env_set_func_open</a>,
+<a href="../api_c/set_func_read.html">db_env_set_func_read</a>,
+<a href="../api_c/set_func_realloc.html">db_env_set_func_realloc</a>,
+<a href="../api_c/set_func_seek.html">db_env_set_func_seek</a>,
+<a href="../api_c/set_func_sleep.html">db_env_set_func_sleep</a>,
+<a href="../api_c/set_func_unlink.html">db_env_set_func_unlink</a>,
+<a href="../api_c/set_func_unmap.html">db_env_set_func_unmap</a>,
+<a href="../api_c/set_func_write.html">db_env_set_func_write</a>
+and
+<a href="../api_c/set_func_yield.html">db_env_set_func_yield</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/set_func_seek.html b/bdb/docs/api_c/set_func_seek.html
new file mode 100644
index 00000000000..dd27384f672
--- /dev/null
+++ b/bdb/docs/api_c/set_func_seek.html
@@ -0,0 +1,81 @@
+<!--$Id: set_func_seek.so,v 10.7 2000/05/31 15:10:01 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_func_seek</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>db_env_set_func_seek</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_func_seek(int (*func_seek)(int fd, size_t pgsize,
+ db_pgno_t pageno, u_int32_t relative, int rewind, int whence));
+</pre></h3>
+<h1>Description</h1>
+<p>The Berkeley DB library requires the ability to specify that a subsequent read
+from or write to a file will occur at a specific location in that file.
+The <b>func_seek</b> argument must conform to the following interface:
+<p><blockquote><pre>int seek(int fd, size_t pgsize, db_pgno_t pageno,
+u_int32_t relative, int rewind, int whence);</pre></blockquote>
+<p>The <b>fd</b> argument is an open file descriptor on the file.
+<p>The <b>seek</b> function must cause a subsequent read from or write to
+the file to occur at a byte offset specified by the calculation:
+<p><blockquote><pre>(pgsize * pageno) + relative</pre></blockquote>
+<p>If <b>rewind</b> is non-zero, the byte offset is treated as a backwards
+seek, not a forwards one.
+<p>The <b>whence</b> argument specifies where in the file the byte offset
+is relative to, as described by the IEEE/ANSI Std 1003.1 (POSIX) <b>lseek</b> system
+call.
+<p>The <b>func_seek</b> function must return the value of <b>errno</b> on
+failure and 0 on success.
+<p>The db_env_set_func_seek interface affects the entire application, not a single
+database or database environment.
+<p>While the db_env_set_func_seek interface may be used to configure Berkeley DB at any time
+during the life of the application, it should normally be called before
+making any calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> functions.
+<p>The db_env_set_func_seek function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/set_func_close.html">db_env_set_func_close</a>,
+<a href="../api_c/set_func_dirfree.html">db_env_set_func_dirfree</a>,
+<a href="../api_c/set_func_dirlist.html">db_env_set_func_dirlist</a>,
+<a href="../api_c/set_func_exists.html">db_env_set_func_exists</a>,
+<a href="../api_c/set_func_free.html">db_env_set_func_free</a>,
+<a href="../api_c/set_func_fsync.html">db_env_set_func_fsync</a>,
+<a href="../api_c/set_func_ioinfo.html">db_env_set_func_ioinfo</a>,
+<a href="../api_c/set_func_malloc.html">db_env_set_func_malloc</a>,
+<a href="../api_c/set_func_map.html">db_env_set_func_map</a>,
+<a href="../api_c/set_func_open.html">db_env_set_func_open</a>,
+<a href="../api_c/set_func_read.html">db_env_set_func_read</a>,
+<a href="../api_c/set_func_realloc.html">db_env_set_func_realloc</a>,
+<a href="../api_c/set_func_seek.html">db_env_set_func_seek</a>,
+<a href="../api_c/set_func_sleep.html">db_env_set_func_sleep</a>,
+<a href="../api_c/set_func_unlink.html">db_env_set_func_unlink</a>,
+<a href="../api_c/set_func_unmap.html">db_env_set_func_unmap</a>,
+<a href="../api_c/set_func_write.html">db_env_set_func_write</a>
+and
+<a href="../api_c/set_func_yield.html">db_env_set_func_yield</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/set_func_sleep.html b/bdb/docs/api_c/set_func_sleep.html
new file mode 100644
index 00000000000..dd454b1a725
--- /dev/null
+++ b/bdb/docs/api_c/set_func_sleep.html
@@ -0,0 +1,76 @@
+<!--$Id: set_func_sleep.so,v 10.7 2000/05/31 15:10:01 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_func_sleep</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>db_env_set_func_sleep</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_func_sleep(int (*func_sleep)(u_long seconds, u_long microseconds));
+</pre></h3>
+<h1>Description</h1>
+<p>The Berkeley DB library requires the ability to cause a process to suspend itself
+for a period of time, relinquishing control of the processor to any other
+waiting thread of control. The <b>func_sleep</b> argument must conform
+to the following interface:
+<p><blockquote><pre>int sleep(u_long seconds, u_long microseconds);</pre></blockquote>
+<p>The <b>seconds</b> and <b>microseconds</b> arguments specify the amount
+of time to wait until the suspending thread of control should run again.
+<p>The <b>seconds</b> and <b>microseconds</b> arguments may not be
+normalized when the <b>sleep</b> function is called, i.e., the
+<b>microseconds</b> argument may be greater than 1000000.
+<p>The <b>func_sleep</b> function must return the value of <b>errno</b> on
+failure and 0 on success.
+<p>The db_env_set_func_sleep interface affects the entire application, not a single
+database or database environment.
+<p>While the db_env_set_func_sleep interface may be used to configure Berkeley DB at any time
+during the life of the application, it should normally be called before
+making any calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> functions.
+<p>The db_env_set_func_sleep function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/set_func_close.html">db_env_set_func_close</a>,
+<a href="../api_c/set_func_dirfree.html">db_env_set_func_dirfree</a>,
+<a href="../api_c/set_func_dirlist.html">db_env_set_func_dirlist</a>,
+<a href="../api_c/set_func_exists.html">db_env_set_func_exists</a>,
+<a href="../api_c/set_func_free.html">db_env_set_func_free</a>,
+<a href="../api_c/set_func_fsync.html">db_env_set_func_fsync</a>,
+<a href="../api_c/set_func_ioinfo.html">db_env_set_func_ioinfo</a>,
+<a href="../api_c/set_func_malloc.html">db_env_set_func_malloc</a>,
+<a href="../api_c/set_func_map.html">db_env_set_func_map</a>,
+<a href="../api_c/set_func_open.html">db_env_set_func_open</a>,
+<a href="../api_c/set_func_read.html">db_env_set_func_read</a>,
+<a href="../api_c/set_func_realloc.html">db_env_set_func_realloc</a>,
+<a href="../api_c/set_func_seek.html">db_env_set_func_seek</a>,
+<a href="../api_c/set_func_sleep.html">db_env_set_func_sleep</a>,
+<a href="../api_c/set_func_unlink.html">db_env_set_func_unlink</a>,
+<a href="../api_c/set_func_unmap.html">db_env_set_func_unmap</a>,
+<a href="../api_c/set_func_write.html">db_env_set_func_write</a>
+and
+<a href="../api_c/set_func_yield.html">db_env_set_func_yield</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/set_func_unlink.html b/bdb/docs/api_c/set_func_unlink.html
new file mode 100644
index 00000000000..1e5e8e3cba9
--- /dev/null
+++ b/bdb/docs/api_c/set_func_unlink.html
@@ -0,0 +1,66 @@
+<!--$Id: set_func_unlink.so,v 10.6 2000/05/31 15:10:01 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_func_unlink</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>db_env_set_func_unlink</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_func_unlink(int (*func_unlink)(const char *path));
+</pre></h3>
+<h1>Description</h1>
+<p>Replace Berkeley DB calls to the IEEE/ANSI Std 1003.1 (POSIX) <b>unlink</b> function
+with <b>func_unlink</b>, which must conform to the standard interface.
+<p>The db_env_set_func_unlink interface affects the entire application, not a single
+database or database environment.
+<p>While the db_env_set_func_unlink interface may be used to configure Berkeley DB at any time
+during the life of the application, it should normally be called before
+making any calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> functions.
+<p>The db_env_set_func_unlink function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/set_func_close.html">db_env_set_func_close</a>,
+<a href="../api_c/set_func_dirfree.html">db_env_set_func_dirfree</a>,
+<a href="../api_c/set_func_dirlist.html">db_env_set_func_dirlist</a>,
+<a href="../api_c/set_func_exists.html">db_env_set_func_exists</a>,
+<a href="../api_c/set_func_free.html">db_env_set_func_free</a>,
+<a href="../api_c/set_func_fsync.html">db_env_set_func_fsync</a>,
+<a href="../api_c/set_func_ioinfo.html">db_env_set_func_ioinfo</a>,
+<a href="../api_c/set_func_malloc.html">db_env_set_func_malloc</a>,
+<a href="../api_c/set_func_map.html">db_env_set_func_map</a>,
+<a href="../api_c/set_func_open.html">db_env_set_func_open</a>,
+<a href="../api_c/set_func_read.html">db_env_set_func_read</a>,
+<a href="../api_c/set_func_realloc.html">db_env_set_func_realloc</a>,
+<a href="../api_c/set_func_seek.html">db_env_set_func_seek</a>,
+<a href="../api_c/set_func_sleep.html">db_env_set_func_sleep</a>,
+<a href="../api_c/set_func_unlink.html">db_env_set_func_unlink</a>,
+<a href="../api_c/set_func_unmap.html">db_env_set_func_unmap</a>,
+<a href="../api_c/set_func_write.html">db_env_set_func_write</a>
+and
+<a href="../api_c/set_func_yield.html">db_env_set_func_yield</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/set_func_unmap.html b/bdb/docs/api_c/set_func_unmap.html
new file mode 100644
index 00000000000..07635b48a80
--- /dev/null
+++ b/bdb/docs/api_c/set_func_unmap.html
@@ -0,0 +1,75 @@
+<!--$Id: set_func_unmap.so,v 10.8 2000/05/31 15:10:01 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_func_unmap</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>db_env_set_func_unmap</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_func_unmap(int (*func_unmap)(void *addr, size_t len));
+</pre></h3>
+<h1>Description</h1>
+<p>The Berkeley DB library requires the ability to unmap a file or shared memory
+region from memory. The <b>func_unmap</b> argument must conform to the
+following interface:
+<p><blockquote><pre>int unmap(void *addr, size_t len);</pre></blockquote>
+<p>The <b>addr</b> argument is the argument returned by the
+<a href="../api_c/set_func_map.html">db_env_set_func_map</a> function when the file or region was mapped
+into memory, and the <b>len</b> argument is the same as the <b>len</b>
+argument specified to the <a href="../api_c/set_func_map.html">db_env_set_func_map</a> function when the
+file or region was mapped into memory.
+<p>The <b>func_unmap</b> function must return the value of <b>errno</b> on
+failure and 0 on success.
+<p>The db_env_set_func_unmap interface affects the entire application, not a single
+database or database environment.
+<p>While the db_env_set_func_unmap interface may be used to configure Berkeley DB at any time
+during the life of the application, it should normally be called before
+making any calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> functions.
+<p>The db_env_set_func_unmap function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/set_func_close.html">db_env_set_func_close</a>,
+<a href="../api_c/set_func_dirfree.html">db_env_set_func_dirfree</a>,
+<a href="../api_c/set_func_dirlist.html">db_env_set_func_dirlist</a>,
+<a href="../api_c/set_func_exists.html">db_env_set_func_exists</a>,
+<a href="../api_c/set_func_free.html">db_env_set_func_free</a>,
+<a href="../api_c/set_func_fsync.html">db_env_set_func_fsync</a>,
+<a href="../api_c/set_func_ioinfo.html">db_env_set_func_ioinfo</a>,
+<a href="../api_c/set_func_malloc.html">db_env_set_func_malloc</a>,
+<a href="../api_c/set_func_map.html">db_env_set_func_map</a>,
+<a href="../api_c/set_func_open.html">db_env_set_func_open</a>,
+<a href="../api_c/set_func_read.html">db_env_set_func_read</a>,
+<a href="../api_c/set_func_realloc.html">db_env_set_func_realloc</a>,
+<a href="../api_c/set_func_seek.html">db_env_set_func_seek</a>,
+<a href="../api_c/set_func_sleep.html">db_env_set_func_sleep</a>,
+<a href="../api_c/set_func_unlink.html">db_env_set_func_unlink</a>,
+<a href="../api_c/set_func_unmap.html">db_env_set_func_unmap</a>,
+<a href="../api_c/set_func_write.html">db_env_set_func_write</a>
+and
+<a href="../api_c/set_func_yield.html">db_env_set_func_yield</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/set_func_write.html b/bdb/docs/api_c/set_func_write.html
new file mode 100644
index 00000000000..7b52b5078ba
--- /dev/null
+++ b/bdb/docs/api_c/set_func_write.html
@@ -0,0 +1,67 @@
+<!--$Id: set_func_write.so,v 10.6 2000/05/31 15:10:01 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_func_write</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>db_env_set_func_write</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_func_write(
+ ssize_t (*func_write)(int fd, const void *buffer, size_t nbytes));
+</pre></h3>
+<h1>Description</h1>
+<p>Replace Berkeley DB calls to the IEEE/ANSI Std 1003.1 (POSIX) <b>write</b> function
+with <b>func_write</b>, which must conform to the standard interface.
+<p>The db_env_set_func_write interface affects the entire application, not a single
+database or database environment.
+<p>While the db_env_set_func_write interface may be used to configure Berkeley DB at any time
+during the life of the application, it should normally be called before
+making any calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> functions.
+<p>The db_env_set_func_write function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/set_func_close.html">db_env_set_func_close</a>,
+<a href="../api_c/set_func_dirfree.html">db_env_set_func_dirfree</a>,
+<a href="../api_c/set_func_dirlist.html">db_env_set_func_dirlist</a>,
+<a href="../api_c/set_func_exists.html">db_env_set_func_exists</a>,
+<a href="../api_c/set_func_free.html">db_env_set_func_free</a>,
+<a href="../api_c/set_func_fsync.html">db_env_set_func_fsync</a>,
+<a href="../api_c/set_func_ioinfo.html">db_env_set_func_ioinfo</a>,
+<a href="../api_c/set_func_malloc.html">db_env_set_func_malloc</a>,
+<a href="../api_c/set_func_map.html">db_env_set_func_map</a>,
+<a href="../api_c/set_func_open.html">db_env_set_func_open</a>,
+<a href="../api_c/set_func_read.html">db_env_set_func_read</a>,
+<a href="../api_c/set_func_realloc.html">db_env_set_func_realloc</a>,
+<a href="../api_c/set_func_seek.html">db_env_set_func_seek</a>,
+<a href="../api_c/set_func_sleep.html">db_env_set_func_sleep</a>,
+<a href="../api_c/set_func_unlink.html">db_env_set_func_unlink</a>,
+<a href="../api_c/set_func_unmap.html">db_env_set_func_unmap</a>,
+<a href="../api_c/set_func_write.html">db_env_set_func_write</a>
+and
+<a href="../api_c/set_func_yield.html">db_env_set_func_yield</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/set_func_yield.html b/bdb/docs/api_c/set_func_yield.html
new file mode 100644
index 00000000000..23915aea68b
--- /dev/null
+++ b/bdb/docs/api_c/set_func_yield.html
@@ -0,0 +1,84 @@
+<!--$Id: set_func_yield.so,v 10.8 2000/05/31 15:10:01 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_func_yield</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>db_env_set_func_yield</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_func_yield(int (*func_yield)(void));
+</pre></h3>
+<h1>Description</h1>
+<p>The Berkeley DB library requires the ability to yield the processor from the current
+thread of control to any other waiting threads of control.
+The <b>func_yield</b> argument must conform to the following interface:
+<p><blockquote><pre>int yield(void);</pre></blockquote>
+<p>The <b>func_yield</b> function must be able to cause the rescheduling
+all participants in the current Berkeley DB environment, whether threaded or
+not. It may be incorrect to supply a thread <b>yield</b> function if
+more than a single process is operating in the Berkeley DB environment. This
+is because many thread-yield functions will not allow other processes to
+run, and the contested lock may be held by another process, not by another
+thread.
+<p>If no <b>func_yield</b> function is specified, or if the <b>yield</b>
+function returns an error, the function specified by the
+<a href="../api_c/set_func_sleep.html">db_env_set_func_sleep</a> entry will be used instead or subsequently,
+i.e., if no <b>yield</b> function is specified, or it is possible for
+the <b>yield</b> function to fail, the <b>sleep</b> function
+<b>must</b> cause the processor to reschedule any waiting threads of
+control for execution.
+<p>The <b>func_yield</b> function must return the value of <b>errno</b> on
+failure and 0 on success.
+<p>The db_env_set_func_yield interface affects the entire application, not a single
+database or database environment.
+<p>While the db_env_set_func_yield interface may be used to configure Berkeley DB at any time
+during the life of the application, it should normally be called before
+making any calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> functions.
+<p>The db_env_set_func_yield function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h1>See Also</h1>
+<a href="../api_c/set_func_close.html">db_env_set_func_close</a>,
+<a href="../api_c/set_func_dirfree.html">db_env_set_func_dirfree</a>,
+<a href="../api_c/set_func_dirlist.html">db_env_set_func_dirlist</a>,
+<a href="../api_c/set_func_exists.html">db_env_set_func_exists</a>,
+<a href="../api_c/set_func_free.html">db_env_set_func_free</a>,
+<a href="../api_c/set_func_fsync.html">db_env_set_func_fsync</a>,
+<a href="../api_c/set_func_ioinfo.html">db_env_set_func_ioinfo</a>,
+<a href="../api_c/set_func_malloc.html">db_env_set_func_malloc</a>,
+<a href="../api_c/set_func_map.html">db_env_set_func_map</a>,
+<a href="../api_c/set_func_open.html">db_env_set_func_open</a>,
+<a href="../api_c/set_func_read.html">db_env_set_func_read</a>,
+<a href="../api_c/set_func_realloc.html">db_env_set_func_realloc</a>,
+<a href="../api_c/set_func_seek.html">db_env_set_func_seek</a>,
+<a href="../api_c/set_func_sleep.html">db_env_set_func_sleep</a>,
+<a href="../api_c/set_func_unlink.html">db_env_set_func_unlink</a>,
+<a href="../api_c/set_func_unmap.html">db_env_set_func_unmap</a>,
+<a href="../api_c/set_func_write.html">db_env_set_func_write</a>
+and
+<a href="../api_c/set_func_yield.html">db_env_set_func_yield</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/txn_abort.html b/bdb/docs/api_c/txn_abort.html
new file mode 100644
index 00000000000..00056023aba
--- /dev/null
+++ b/bdb/docs/api_c/txn_abort.html
@@ -0,0 +1,63 @@
+<!--$Id: txn_abort.so,v 10.25 2000/12/31 19:26:21 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: txn_abort</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>txn_abort</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+txn_abort(DB_TXN *tid);
+</pre></h3>
+<h1>Description</h1>
+<p>The txn_abort function causes an abnormal termination of the
+transaction. The log is played backwards and any necessary recovery
+operations are initiated through the <b>recover</b> function specified
+to <a href="../api_c/env_open.html">DBENV-&gt;open</a>. After the log processing is completed, all locks
+held by the transaction are released. As is the case for
+<a href="../api_c/txn_commit.html">txn_commit</a>, applications that require strict two-phase locking
+should not explicitly release any locks.
+<p>In the case of nested transactions, aborting a parent transaction causes
+all children (unresolved or not) of the parent transaction to be aborted.
+<p>Once the txn_abort function returns, the DB_TXN handle may not
+be accessed again.
+<p>The txn_abort function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The txn_abort function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the txn_abort function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_tx_max.html">DBENV-&gt;set_tx_max</a>,
+<a href="../api_c/env_set_tx_recover.html">DBENV-&gt;set_tx_recover</a>,
+<a href="../api_c/env_set_tx_timestamp.html">DBENV-&gt;set_tx_timestamp</a>,
+<a href="../api_c/txn_abort.html">txn_abort</a>,
+<a href="../api_c/txn_begin.html">txn_begin</a>,
+<a href="../api_c/txn_checkpoint.html">txn_checkpoint</a>,
+<a href="../api_c/txn_commit.html">txn_commit</a>,
+<a href="../api_c/txn_id.html">txn_id</a>,
+<a href="../api_c/txn_prepare.html">txn_prepare</a>
+and
+<a href="../api_c/txn_stat.html">txn_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/txn_begin.html b/bdb/docs/api_c/txn_begin.html
new file mode 100644
index 00000000000..0241d5c1fdb
--- /dev/null
+++ b/bdb/docs/api_c/txn_begin.html
@@ -0,0 +1,93 @@
+<!--$Id: txn_begin.so,v 10.37 2001/01/11 17:47:12 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: txn_begin</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>txn_begin</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+txn_begin(DB_ENV *env,
+ DB_TXN *parent, DB_TXN **tid, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The txn_begin method creates a new transaction in the environment
+and copies a pointer to a DB_TXN that uniquely identifies it into
+the memory referenced by <b>tid</b>.
+<p>If the <b>parent</b> argument is non-NULL, the new transaction will
+be a nested transaction, with the transaction indicated by
+<b>parent</b> as its parent. Transactions may be
+nested to any level.
+<p>The <b>flags</b> parameter must be set to 0 or one of the following
+values:
+<p><dl compact>
+<p><dt><a name="DB_TXN_NOSYNC">DB_TXN_NOSYNC</a><dd>Do not synchronously flush the log when this transaction commits or
+prepares. This means the transaction will exhibit the ACI (atomicity,
+consistency and isolation) properties, but not D (durability), i.e.,
+database integrity will be maintained but it is possible that this
+transaction may be undone during recovery instead of being redone.
+<p>This behavior may be set for an entire Berkeley DB environment as part of the
+<a href="../api_c/env_set_flags.html">DBENV-&gt;set_flags</a> interface.
+<p><dt><a name="DB_TXN_NOWAIT">DB_TXN_NOWAIT</a><dd>If a lock is unavailable for any Berkeley DB operation performed in the context
+of this transaction, return immediately instead of blocking on the lock.
+The error return in the case will be <a href="../ref/program/errorret.html#DB_LOCK_NOTGRANTED">DB_LOCK_NOTGRANTED</a>.
+<p><dt><a name="DB_TXN_SYNC">DB_TXN_SYNC</a><dd>Synchronously flush the log when this transaction commits or prepares.
+This means the transaction will exhibit all of the ACID (atomicity,
+consistency and isolation and durability) properties.
+<p>This behavior is the default for Berkeley DB environments unless the
+<a href="../api_c/env_open.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a> flag was specified to the <a href="../api_c/env_set_flags.html">DBENV-&gt;set_flags</a>
+interface.
+</dl>
+<p><b>Note: An transaction may not span threads,
+i.e., each transaction must begin and end in the same thread, and each
+transaction may only be used by a single thread.</b>
+<p><b>Note: cursors may not span transactions, i.e., each cursor must be opened
+and closed within a single transaction.</b>
+<p><b>Note: a parent transaction may not issue any Berkeley DB operations, except for
+txn_begin, <a href="../api_c/txn_abort.html">txn_abort</a> and <a href="../api_c/txn_commit.html">txn_commit</a>, while it has
+active child transactions (child transactions that have not yet been
+committed or aborted).</b>
+<p>The txn_begin function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The txn_begin function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>ENOMEM<dd>The maximum number of concurrent transactions has been reached.
+</dl>
+<p>The txn_begin function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the txn_begin function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_tx_max.html">DBENV-&gt;set_tx_max</a>,
+<a href="../api_c/env_set_tx_recover.html">DBENV-&gt;set_tx_recover</a>,
+<a href="../api_c/env_set_tx_timestamp.html">DBENV-&gt;set_tx_timestamp</a>,
+<a href="../api_c/txn_abort.html">txn_abort</a>,
+<a href="../api_c/txn_begin.html">txn_begin</a>,
+<a href="../api_c/txn_checkpoint.html">txn_checkpoint</a>,
+<a href="../api_c/txn_commit.html">txn_commit</a>,
+<a href="../api_c/txn_id.html">txn_id</a>,
+<a href="../api_c/txn_prepare.html">txn_prepare</a>
+and
+<a href="../api_c/txn_stat.html">txn_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/txn_checkpoint.html b/bdb/docs/api_c/txn_checkpoint.html
new file mode 100644
index 00000000000..140edee57d7
--- /dev/null
+++ b/bdb/docs/api_c/txn_checkpoint.html
@@ -0,0 +1,75 @@
+<!--$Id: txn_checkpoint.so,v 10.25 2000/09/08 15:20:28 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: txn_checkpoint</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>txn_checkpoint</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+txn_checkpoint(const DB_ENV *env,
+ u_int32_t kbyte, u_int32_t min, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The txn_checkpoint function flushes the underlying memory pool,
+writes a checkpoint record to the log and then flushes the log.
+<p>If either <b>kbyte</b> or <b>min</b> is non-zero, the checkpoint is only
+done if there has been activity since the last checkpoint and either
+more than <b>min</b> minutes have passed since the last checkpoint,
+or if more than <b>kbyte</b> kilobytes of log data have been written since
+the last checkpoint.
+<p>The <b>flags</b> parameter must be set to 0 or one of the following
+values:
+<p><dl compact>
+<p><dt><a name="DB_FORCE">DB_FORCE</a><dd>Force a checkpoint record even if there has been no activity since the
+last checkpoint.
+</dl>
+<p>The txn_checkpoint function returns a non-zero error value on failure, 0 on success, and returns <a href="../api_c/memp_fsync.html#DB_INCOMPLETE">DB_INCOMPLETE</a> if there were pages that needed to be
+written to complete the checkpoint but that <a href="../api_c/memp_sync.html">memp_sync</a> was unable
+to write immediately.
+<p>The txn_checkpoint function is the underlying function used by the <a href="../utility/db_checkpoint.html">db_checkpoint</a> utility.
+See the <a href="../utility/db_checkpoint.html">db_checkpoint</a> utility source code for an example of using txn_checkpoint
+in a IEEE/ANSI Std 1003.1 (POSIX) environment.
+<h1>Errors</h1>
+<p>The txn_checkpoint function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The txn_checkpoint function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the txn_checkpoint function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_tx_max.html">DBENV-&gt;set_tx_max</a>,
+<a href="../api_c/env_set_tx_recover.html">DBENV-&gt;set_tx_recover</a>,
+<a href="../api_c/env_set_tx_timestamp.html">DBENV-&gt;set_tx_timestamp</a>,
+<a href="../api_c/txn_abort.html">txn_abort</a>,
+<a href="../api_c/txn_begin.html">txn_begin</a>,
+<a href="../api_c/txn_checkpoint.html">txn_checkpoint</a>,
+<a href="../api_c/txn_commit.html">txn_commit</a>,
+<a href="../api_c/txn_id.html">txn_id</a>,
+<a href="../api_c/txn_prepare.html">txn_prepare</a>
+and
+<a href="../api_c/txn_stat.html">txn_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/txn_commit.html b/bdb/docs/api_c/txn_commit.html
new file mode 100644
index 00000000000..7fca3d08d7b
--- /dev/null
+++ b/bdb/docs/api_c/txn_commit.html
@@ -0,0 +1,83 @@
+<!--$Id: txn_commit.so,v 10.27 2000/12/31 19:26:21 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: txn_commit</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>txn_commit</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+txn_commit(DB_TXN *tid, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The txn_commit function ends the transaction. In the case of nested
+transactions, if the transaction is a parent transaction, committing
+the parent transaction causes all unresolved children of the parent to
+be committed.
+<p>In the case of nested transactions, if the transaction is a child
+transaction, its locks are not released, but are acquired by its parent.
+While the commit of the child transaction will succeed, the actual
+resolution of the child transaction is postponed until the parent
+transaction is committed or aborted, i.e., if its parent transaction
+commits, it will be committed, and if its parent transaction aborts, it
+will be aborted.
+<p>The <b>flags</b> parameter must be set to 0 or one of the following
+values:
+<p><dl compact>
+<p><dt><a name="DB_TXN_NOSYNC">DB_TXN_NOSYNC</a><dd>Do not synchronously flush the log. This means the transaction will
+exhibit the ACI (atomicity, consistency and isolation) properties, but
+not D (durability), i.e., database integrity will be maintained but it is
+possible that this transaction may be undone during recovery instead of
+being redone.
+<p>This behavior may be set for an entire Berkeley DB environment as part of the
+<a href="../api_c/env_set_flags.html">DBENV-&gt;set_flags</a> interface.
+<p><dt><a name="DB_TXN_SYNC">DB_TXN_SYNC</a><dd>Synchronously flush the log. This means the transaction will exhibit
+all of the ACID (atomicity, consistency and isolation and durability)
+properties.
+<p>This behavior is the default for Berkeley DB environments unless the
+<a href="../api_c/env_open.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a> flag was specified to the <a href="../api_c/env_set_flags.html">DBENV-&gt;set_flags</a>
+or <a href="../api_c/txn_begin.html">txn_begin</a> interfaces.
+</dl>
+<p>Once the txn_commit function returns, the DB_TXN handle may not
+be accessed again. If txn_commit encounters an error, the
+transaction and all child transactions of the transaction are aborted.
+<p>The txn_commit function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The txn_commit function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the txn_commit function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_tx_max.html">DBENV-&gt;set_tx_max</a>,
+<a href="../api_c/env_set_tx_recover.html">DBENV-&gt;set_tx_recover</a>,
+<a href="../api_c/env_set_tx_timestamp.html">DBENV-&gt;set_tx_timestamp</a>,
+<a href="../api_c/txn_abort.html">txn_abort</a>,
+<a href="../api_c/txn_begin.html">txn_begin</a>,
+<a href="../api_c/txn_checkpoint.html">txn_checkpoint</a>,
+<a href="../api_c/txn_commit.html">txn_commit</a>,
+<a href="../api_c/txn_id.html">txn_id</a>,
+<a href="../api_c/txn_prepare.html">txn_prepare</a>
+and
+<a href="../api_c/txn_stat.html">txn_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/txn_id.html b/bdb/docs/api_c/txn_id.html
new file mode 100644
index 00000000000..bcda4bcdfff
--- /dev/null
+++ b/bdb/docs/api_c/txn_id.html
@@ -0,0 +1,50 @@
+<!--$Id: txn_id.so,v 10.12 1999/12/20 08:52:32 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: txn_id</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>txn_id</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+u_int32_t
+txn_id(DB_TXN *tid);
+</pre></h3>
+<h1>Description</h1>
+<p>The txn_id function returns the unique transaction id associated with the
+specified transaction. Locking calls made on behalf of this transaction
+should use the value returned from txn_id as the locker parameter
+to the <a href="../api_c/lock_get.html">lock_get</a> or <a href="../api_c/lock_vec.html">lock_vec</a> calls.
+<h1>See Also</h1>
+<a href="../api_c/env_set_tx_max.html">DBENV-&gt;set_tx_max</a>,
+<a href="../api_c/env_set_tx_recover.html">DBENV-&gt;set_tx_recover</a>,
+<a href="../api_c/env_set_tx_timestamp.html">DBENV-&gt;set_tx_timestamp</a>,
+<a href="../api_c/txn_abort.html">txn_abort</a>,
+<a href="../api_c/txn_begin.html">txn_begin</a>,
+<a href="../api_c/txn_checkpoint.html">txn_checkpoint</a>,
+<a href="../api_c/txn_commit.html">txn_commit</a>,
+<a href="../api_c/txn_id.html">txn_id</a>,
+<a href="../api_c/txn_prepare.html">txn_prepare</a>
+and
+<a href="../api_c/txn_stat.html">txn_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/txn_prepare.html b/bdb/docs/api_c/txn_prepare.html
new file mode 100644
index 00000000000..549a6f074a0
--- /dev/null
+++ b/bdb/docs/api_c/txn_prepare.html
@@ -0,0 +1,63 @@
+<!--$Id: txn_prepare.so,v 10.17 2000/12/31 19:26:21 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: txn_prepare</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>txn_prepare</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+txn_prepare(DB_TXN *tid);
+</pre></h3>
+<h1>Description</h1>
+<p>The txn_prepare function initiates the beginning of a two-phase commit.
+<p>In a distributed transaction environment, Berkeley DB can be used as a local
+transaction manager. In this case, the distributed transaction manager
+must send <i>prepare</i> messages to each local manager. The local
+manager must then issue a txn_prepare and await its successful
+return before responding to the distributed transaction manager. Only
+after the distributed transaction manager receives successful responses
+from all of its <i>prepare</i> messages should it issue any
+<i>commit</i> messages.
+<p>In the case of nested transactions, preparing a parent transaction
+causes all unresolved children of the parent transaction to be prepared.
+<p>The txn_prepare function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The txn_prepare function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the txn_prepare function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_tx_max.html">DBENV-&gt;set_tx_max</a>,
+<a href="../api_c/env_set_tx_recover.html">DBENV-&gt;set_tx_recover</a>,
+<a href="../api_c/env_set_tx_timestamp.html">DBENV-&gt;set_tx_timestamp</a>,
+<a href="../api_c/txn_abort.html">txn_abort</a>,
+<a href="../api_c/txn_begin.html">txn_begin</a>,
+<a href="../api_c/txn_checkpoint.html">txn_checkpoint</a>,
+<a href="../api_c/txn_commit.html">txn_commit</a>,
+<a href="../api_c/txn_id.html">txn_id</a>,
+<a href="../api_c/txn_prepare.html">txn_prepare</a>
+and
+<a href="../api_c/txn_stat.html">txn_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_c/txn_stat.html b/bdb/docs/api_c/txn_stat.html
new file mode 100644
index 00000000000..769283f93c2
--- /dev/null
+++ b/bdb/docs/api_c/txn_stat.html
@@ -0,0 +1,94 @@
+<!--$Id: txn_stat.so,v 10.27 2000/05/25 13:47:08 dda Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: txn_stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>txn_stat</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+txn_stat(DB_ENV *env,
+ DB_TXN_STAT **statp, void *(*db_malloc)(size_t));
+</pre></h3>
+<h1>Description</h1>
+<p>The txn_stat function
+creates a statistical structure and copies a pointer to it into a
+user-specified memory location.
+<p>Statistical structures are created in allocated memory. If <b>db_malloc</b> is non-NULL, it
+is called to allocate the memory, otherwise, the library function
+<b>malloc</b>(3) is used. The function <b>db_malloc</b> must match
+the calling conventions of the <b>malloc</b>(3) library routine.
+Regardless, the caller is responsible for deallocating the returned
+memory. To deallocate returned memory, free the returned memory
+reference, references inside the returned memory do not need to be
+individually freed.
+<p>The transaction region statistics are stored in a structure of type
+DB_TXN_STAT. The following DB_TXN_STAT fields will be filled in:
+<p><dl compact>
+<dt><a href="../api_c/db_lsn.html">DB_LSN</a> st_last_ckp;<dd>The LSN of the last checkpoint.
+<dt><a href="../api_c/db_lsn.html">DB_LSN</a> st_pending_ckp;<dd>The LSN of any checkpoint that is currently in progress. If
+<b>st_pending_ckp</b> is the same as <b>st_last_ckp</b> there
+is no checkpoint in progress.
+<dt>time_t st_time_ckp;<dd>The time the last completed checkpoint finished (as the number of seconds
+since the Epoch, returned by the IEEE/ANSI Std 1003.1 (POSIX) <b>time</b> interface).
+<dt>u_int32_t st_last_txnid;<dd>The last transaction ID allocated.
+<dt>u_int32_t st_maxtxns;<dd>The maximum number of active transactions possible.
+<dt>u_int32_t st_nactive;<dd>The number of transactions that are currently active.
+<dt>u_int32_t st_maxnactive;<dd>The maximum number of active transactions at any one time.
+<dt>u_int32_t st_nbegins;<dd>The number of transactions that have begun.
+<dt>u_int32_t st_naborts;<dd>The number of transactions that have aborted.
+<dt>u_int32_t st_ncommits;<dd>The number of transactions that have committed.
+<dt>u_int32_t st_regsize;<dd>The size of the region.
+<dt>u_int32_t st_region_wait;<dd>The number of times that a thread of control was forced to wait before
+obtaining the region lock.
+<dt>u_int32_t st_region_nowait;<dd>The number of times that a thread of control was able to obtain
+the region lock without waiting.
+<dt>DB_TXN_ACTIVE * st_txnarray;<dd>A pointer to an array of <b>st_nactive</b> DB_TXN_ACTIVE structures,
+describing the currently active transactions. The following fields of
+the DB_TXN_ACTIVE structure will be filled in:
+<p><dl compact>
+<p><dt>u_int32_t txnid;<dd>The transaction ID as returned by <a href="../api_c/txn_begin.html">txn_begin</a>.
+<dt>u_int32_t parentid;<dd>The transaction ID of the parent transaction (or 0, if no parent).
+<dt><a href="../api_c/db_lsn.html">DB_LSN</a> lsn;<dd>The log sequence number of the transaction-begin record.
+</dl>
+</dl>
+<p>The txn_stat function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The txn_stat function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the txn_stat function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_tx_max.html">DBENV-&gt;set_tx_max</a>,
+<a href="../api_c/env_set_tx_recover.html">DBENV-&gt;set_tx_recover</a>,
+<a href="../api_c/env_set_tx_timestamp.html">DBENV-&gt;set_tx_timestamp</a>,
+<a href="../api_c/txn_abort.html">txn_abort</a>,
+<a href="../api_c/txn_begin.html">txn_begin</a>,
+<a href="../api_c/txn_checkpoint.html">txn_checkpoint</a>,
+<a href="../api_c/txn_commit.html">txn_commit</a>,
+<a href="../api_c/txn_id.html">txn_id</a>,
+<a href="../api_c/txn_prepare.html">txn_prepare</a>
+and
+<a href="../api_c/txn_stat.html">txn_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/cxx_index.html b/bdb/docs/api_cxx/cxx_index.html
new file mode 100644
index 00000000000..1ba43a0f227
--- /dev/null
+++ b/bdb/docs/api_cxx/cxx_index.html
@@ -0,0 +1,148 @@
+<!--$Id: cxx_index.so,v 10.65 2000/12/21 19:11:27 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: C++ Interface by Class</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>C++ Interface by Class</h1>
+<p><table border=1 align=center>
+<tr><th>Class</th><th>Method</th><th>Description</th></tr>
+<tr><td><a href="../api_cxx/dbenv_class.html">DbEnv</a></td><td><br></td><td>Berkeley DB Environment Class</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_close.html">DbEnv::close</a></td><td>Close an environment</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_err.html">DbEnv::err</a></td><td>Error message with error string</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_err.html">DbEnv::errx</a></td><td>Error message</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_open.html">DbEnv::open</a></td><td>Open an environment</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_remove.html">DbEnv::remove</a></td><td>Remove an environment</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a></td><td>Set the environment cache size</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_data_dir.html">DbEnv::set_data_dir</a></td><td>Set the environment data directory</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a></td><td>Set error message callback</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a></td><td>Set error message FILE *</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a></td><td>Set error message output stream</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a></td><td>Set error message prefix</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_feedback.html">DbEnv::set_feedback</a></td><td>Set feedback callback</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a></td><td>Environment configuration</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_lg_bsize.html">DbEnv::set_lg_bsize</a></td><td>Set log buffer size</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_lg_dir.html">DbEnv::set_lg_dir</a></td><td>Set the environment logging directory</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_lg_max.html">DbEnv::set_lg_max</a></td><td>Set log file size</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_lk_conflicts.html">DbEnv::set_lk_conflicts</a></td><td>Set lock conflicts matrix</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_lk_detect.html">DbEnv::set_lk_detect</a></td><td>Set automatic deadlock detection</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_lk_max.html">DbEnv::set_lk_max</a></td><td>Set maximum number of locks (<b>Deprecated</b>)</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_lk_max_locks.html">DbEnv::set_lk_max_locks</a></td><td>Set maximum number of locks</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_lk_max_lockers.html">DbEnv::set_lk_max_lockers</a></td><td>Set maximum number of lockers</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_lk_max_objects.html">DbEnv::set_lk_max_objects</a></td><td>Set maximum number of lock objects</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_mp_mmapsize.html">DbEnv::set_mp_mmapsize</a></td><td>Set maximum mapped-in database file size</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_mutexlocks.html">DbEnv::set_mutexlocks</a></td><td>Turn off mutual exclusion locking</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_pageyield.html">DbEnv::set_pageyield</a></td><td>Yield the processor on each page access</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a></td><td>Set panic callback</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_panicstate.html">DbEnv::set_panicstate</a></td><td>Reset panic state</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_rec_init.html">DbEnv::set_recovery_init</a></td><td>Set recovery initialization callback</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_region_init.html">DbEnv::set_region_init</a></td><td>Fault in shared regions on initial access</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_server.html">DbEnv::set_server</a></td><td>Establish server connection</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_shm_key.html">DbEnv::set_shm_key</a></td><td>Set system memory shared segment ID</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_tas_spins.html">DbEnv::set_tas_spins</a></td><td>Set the number of test-and-set spins</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_tmp_dir.html">DbEnv::set_tmp_dir</a></td><td>Set the environment temporary file directory</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_tx_max.html">DbEnv::set_tx_max</a></td><td>Set maximum number of transactions</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_tx_recover.html">DbEnv::set_tx_recover</a></td><td>Set transaction abort recover function</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_tx_timestamp.html">DbEnv::set_tx_timestamp</a></td><td>Set recovery timestamp</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_verbose.html">DbEnv::set_verbose</a></td><td>Set verbose messages</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_strerror.html">DbEnv::strerror</a></td><td>Error strings</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/lock_detect.html">DbEnv::lock_detect</a></td><td>Perform deadlock detection</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/lock_get.html">DbEnv::lock_get</a></td><td>Acquire a lock</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/lock_id.html">DbEnv::lock_id</a></td><td>Acquire a locker ID</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/lock_stat.html">DbEnv::lock_stat</a></td><td>Return lock subsystem statistics</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/lock_vec.html">DbEnv::lock_vec</a></td><td>Acquire/release locks</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/log_archive.html">DbEnv::log_archive</a></td><td>List log and database files</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/log_compare.html">DbEnv::log_compare</a></td><td>Compare two Log Sequence Numbers</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/log_file.html">DbEnv::log_file</a></td><td>Map Log Sequence Numbers to log files</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/log_flush.html">DbEnv::log_flush</a></td><td>Flush log records</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/log_get.html">DbEnv::log_get</a></td><td>Get a log record</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/log_put.html">DbEnv::log_put</a></td><td>Write a log record</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/log_register.html">DbEnv::log_register</a></td><td>Register a file name with the log manager</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/log_stat.html">DbEnv::log_stat</a></td><td>Return log subsystem statistics</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/log_unregister.html">DbEnv::log_unregister</a></td><td>Unregister a file name with the log manager</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_register.html">DbEnv::memp_register</a></td><td>Register input/output functions for a file in a buffer pool.</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_stat.html">DbEnv::memp_stat</a></td><td>Return buffer pool statistics</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_sync.html">DbEnv::memp_sync</a></td><td>Flush pages from a buffer pool</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_trickle.html">DbEnv::memp_trickle</a></td><td>Trickle flush pages from a buffer pool</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/txn_begin.html">DbEnv::txn_begin</a></td><td>Begin a transaction</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/txn_checkpoint.html">DbEnv::txn_checkpoint</a></td><td>Checkpoint the transaction subsystem</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/txn_stat.html">DbEnv::txn_stat</a></td><td>Return transaction subsystem statistics</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_version.html">DbEnv::version</a></td><td>Return version information</td></tr>
+<tr><td><a href="../api_cxx/db_class.html">Db</a></td><td><br></td><td>Berkeley DB Access Method Class</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_close.html">Db::close</a></td><td>Close a database</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_cursor.html">Db::cursor</a></td><td>Open a cursor into a database</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_del.html">Db::del</a></td><td>Delete items from a database</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_err.html">Db::err</a></td><td>Error message with error string</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_err.html">Db::errx</a></td><td>Error message</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_fd.html">Db::fd</a></td><td>Return a file descriptor from a database</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_get.html">Db::get</a></td><td>Get items from a database</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a></td><td>Return if the underlying database is in host order</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_get_type.html">Db::get_type</a></td><td>Return the database type</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_join.html">Db::join</a></td><td>Perform a database join on cursors</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_key_range.html">Db::key_range</a></td><td>Return estimate of key location</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_open.html">Db::open</a></td><td>Open a database</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_put.html">Db::put</a></td><td>Store items into a database</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_remove.html">Db::remove</a></td><td>Remove a database</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_rename.html">Db::rename</a></td><td>Rename a database</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_append_recno.html">Db::set_append_recno</a></td><td>Set record append callback</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a></td><td>Set a Btree comparison function</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a></td><td>Set the minimum number of keys per Btree page</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a></td><td>Set a Btree prefix comparison function</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a></td><td>Set the database cache size</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a></td><td>Set a duplicate comparison function</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a></td><td>Set error message callback</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a></td><td>Set error message FILE *</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a></td><td>Set error message prefix</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_feedback.html">Db::set_feedback</a></td><td>Set feedback callback</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_flags.html">Db::set_flags</a></td><td>General database configuration</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a></td><td>Set the Hash table density</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a></td><td>Set a hashing function</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a></td><td>Set the Hash table size</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a></td><td>Set the database byte order</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_malloc.html">Db::set_malloc</a></td><td>Set a local space allocation function</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a></td><td>Set the underlying database page size</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a></td><td>Set panic callback</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a></td><td>Set Queue database extent size</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a></td><td>Set the variable-length record delimiter</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a></td><td>Set the fixed-length record length</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a></td><td>Set the fixed-length record pad byte</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a></td><td>Set the backing Recno text file</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_realloc.html">Db::set_realloc</a></td><td>Set a local space allocation function</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_stat.html">Db::stat</a></td><td>Return database statistics</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_sync.html">Db::sync</a></td><td>Flush a database to stable storage</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_upgrade.html">Db::upgrade</a></td><td>Upgrade a database</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_verify.html">Db::verify</a></td><td>Verify/upgrade a database</td></tr>
+<tr><td><a href="../api_cxx/dbc_class.html">Dbc</a></td><td><br></td><td>Berkeley DB Cursor Class</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/dbc_close.html">Dbc::close</a></td><td>Close a cursor</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/dbc_count.html">Dbc::count</a></td><td>Return count of duplicates</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/dbc_del.html">Dbc::del</a></td><td>Delete by cursor</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/dbc_dup.html">Dbc::dup</a></td><td>Duplicate a cursor</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/dbc_get.html">Dbc::get</a></td><td>Retrieve by cursor</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/dbc_put.html">Dbc::put</a></td><td>Store by cursor</td></tr>
+<tr><td><a href="../api_cxx/dbt_class.html">Dbt</a></td><td><br></td><td>Key/Data Encoding Class</td></tr>
+<tr><td><a href="../api_cxx/lock_class.html">DbLock</a></td><td><br></td><td>Lock Class</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/lock_put.html">DbLock::put</a></td><td>Release a lock</td></tr>
+<tr><td><a href="../api_cxx/lsn_class.html">DbLsn</a></td><td><br></td><td>Log Sequence Number Class</td></tr>
+<tr><td><a href="../api_cxx/mempfile_class.html">DbMpoolFile</a></td><td><br></td><td>Memory Pool File Class</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_fclose.html">DbMpoolFile::close</a></td><td>Close a file in a buffer pool</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_fget.html">DbMpoolFile::get</a></td><td>Get page from a file in a buffer pool</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_fopen.html">DbMpoolFile::open</a></td><td>Open a file in a buffer pool</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_fput.html">DbMpoolFile::put</a></td><td>Return a page to a buffer pool</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_fset.html">DbMpoolFile::set</a></td><td>Modify meta information for buffer pool page</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_fsync.html">DbMpoolFile::sync</a></td><td>Flush pages from a file in a buffer pool</td></tr>
+<tr><td><a href="../api_cxx/txn_class.html">DbTxn</a></td><td><br></td><td>Transaction Class</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/txn_abort.html">DbTxn::abort</a></td><td>Abort a transaction</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/txn_commit.html">DbTxn::commit</a></td><td>Commit a transaction</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/txn_id.html">DbTxn::id</a></td><td>Return a transaction ID</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/txn_prepare.html">DbTxn::prepare</a></td><td>Prepare a transaction for commit</td></tr>
+<tr><td><a href="../api_cxx/except_class.html">DbException</a></td><td><br></td><td>Exception Class for Berkeley DB Activity</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/get_errno.html">DbException::get_errno</a></td><td>Get the error value</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/what.html">DbException::what</a></td><td>Get the error string</td></tr>
+</table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/cxx_pindex.html b/bdb/docs/api_cxx/cxx_pindex.html
new file mode 100644
index 00000000000..d460fcf65c4
--- /dev/null
+++ b/bdb/docs/api_cxx/cxx_pindex.html
@@ -0,0 +1,516 @@
+<html>
+<head>
+<title>Berkeley DB: C++ Interface Index</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>C++ Interface Index</h1>
+<center>
+<table cellspacing=0 cellpadding=0>
+<tr><td align=right> configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#5">1.85</a> API compatibility</td></tr>
+<tr><td align=right> building a utility to dump Berkeley DB </td><td><a href="../ref/build_unix/conf.html#7">1.85</a> databases</td></tr>
+<tr><td align=right> Upgrading to release </td><td><a href="../ref/upgrade.2.0/intro.html#2">2.0</a></td></tr>
+<tr><td align=right> Upgrading to release </td><td><a href="../ref/upgrade.3.0/intro.html#2">3.0</a></td></tr>
+<tr><td align=right> Upgrading to release </td><td><a href="../ref/upgrade.3.1/intro.html#2">3.1</a></td></tr>
+<tr><td align=right> Upgrading to release </td><td><a href="../ref/upgrade.3.2/intro.html#2">3.2</a></td></tr>
+<tr><td align=right> selecting an </td><td><a href="../ref/am_conf/select.html#2">access</a> method</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am_conf/intro.html#2">access</a> methods</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/aix.html#2">AIX</a></td></tr>
+<tr><td align=right> data </td><td><a href="../api_cxx/dbt_class.html#6">alignment</a></td></tr>
+<tr><td align=right> programmatic </td><td><a href="../ref/arch/apis.html#2">APIs</a></td></tr>
+<tr><td align=right> utility to </td><td><a href="../utility/db_archive.html#3">archive</a> log files</td></tr>
+<tr><td align=right> </td><td><a href="../utility/berkeley_db_svc.html#2">berkeley_db_svc</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/intro.html#2">building</a> for UNIX</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/notes.html#2">building</a> for UNIX FAQ</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_vxworks/intro.html#2">building</a> for VxWorks</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_vxworks/faq.html#2">building</a> for VxWorks FAQ</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_win/intro.html#2">building</a> for Win32</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_win/faq.html#2">building</a> for Windows FAQ</td></tr>
+<tr><td align=right> selecting a </td><td><a href="../ref/am_conf/byteorder.html#2">byte</a> order</td></tr>
+<tr><td align=right> </td><td><a href="../ref/program/byteorder.html#2">byte</a> ordering</td></tr>
+<tr><td align=right> configuring the </td><td><a href="../ref/build_unix/conf.html#6">C++</a> API</td></tr>
+<tr><td align=right> flushing the database </td><td><a href="../ref/am/sync.html#2">cache</a></td></tr>
+<tr><td align=right> selecting a </td><td><a href="../ref/am_conf/cachesize.html#2">cache</a> size</td></tr>
+<tr><td align=right> </td><td><a href="../ref/transapp/archival.html#3">catastrophic</a> recovery</td></tr>
+<tr><td align=right>Patches, Updates and </td><td><a href="http://www.sleepycat.com/update/index.html">Change</a> logs</td></tr>
+<tr><td align=right> utility to take </td><td><a href="../utility/db_checkpoint.html#3">checkpoints</a></td></tr>
+<tr><td align=right>DbMpoolFile::open</td><td><a href="../api_cxx/memp_fopen.html#clear_len">clear_len</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/curclose.html#2">closing</a> a cursor</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/close.html#2">closing</a> a database</td></tr>
+<tr><td align=right> specifying a Btree </td><td><a href="../ref/am_conf/bt_compare.html#2">comparison</a> function</td></tr>
+<tr><td align=right> changing </td><td><a href="../ref/build_unix/flags.html#2">compile</a> or load options</td></tr>
+<tr><td align=right> </td><td><a href="../ref/cam/intro.html#2">Concurrent</a> Data Store</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/conf.html#2">configuring</a> Berkeley DB for UNIX systems</td></tr>
+<tr><td align=right> recovering </td><td><a href="../ref/am/verify.html#4">corrupted</a> databases</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/count.html#2">counting</a> data items for a key</td></tr>
+<tr><td align=right> closing a </td><td><a href="../ref/am/curclose.html#3">cursor</a></td></tr>
+<tr><td align=right> deleting records with a </td><td><a href="../ref/am/curdel.html#3">cursor</a></td></tr>
+<tr><td align=right> duplicating a </td><td><a href="../ref/am/curdup.html#3">cursor</a></td></tr>
+<tr><td align=right> retrieving records with a </td><td><a href="../ref/am/curget.html#3">cursor</a></td></tr>
+<tr><td align=right> storing records with a </td><td><a href="../ref/am/curput.html#3">cursor</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/stability.html#2">cursor</a> stability</td></tr>
+<tr><td align=right> database </td><td><a href="../ref/am/cursor.html#2">cursors</a></td></tr>
+<tr><td align=right>Dbt</td><td><a href="../api_cxx/dbt_class.html#data">data</a></td></tr>
+<tr><td align=right> utility to upgrade </td><td><a href="../utility/db_upgrade.html#3">database</a> files</td></tr>
+<tr><td align=right> utility to verify </td><td><a href="../utility/db_verify.html#3">database</a> files</td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_class.html#2">Db</a></td></tr>
+<tr><td align=right>Dbc::put</td><td><a href="../api_cxx/dbc_put.html#DB_AFTER">DB_AFTER</a></td></tr>
+<tr><td align=right>Db::verify</td><td><a href="../api_cxx/db_verify.html#DB_AGGRESSIVE">DB_AGGRESSIVE</a></td></tr>
+<tr><td align=right>Db::put</td><td><a href="../api_cxx/db_put.html#DB_APPEND">DB_APPEND</a></td></tr>
+<tr><td align=right>DbEnv::log_archive</td><td><a href="../api_cxx/log_archive.html#DB_ARCH_ABS">DB_ARCH_ABS</a></td></tr>
+<tr><td align=right>DbEnv::log_archive</td><td><a href="../api_cxx/log_archive.html#DB_ARCH_DATA">DB_ARCH_DATA</a></td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_archive.html#2">db_archive</a></td></tr>
+<tr><td align=right>DbEnv::log_archive</td><td><a href="../api_cxx/log_archive.html#DB_ARCH_LOG">DB_ARCH_LOG</a></td></tr>
+<tr><td align=right>Dbc::put</td><td><a href="../api_cxx/dbc_put.html#DB_BEFORE">DB_BEFORE</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/dbc_class.html#2">Dbc</a></td></tr>
+<tr><td align=right>Db::stat</td><td><a href="../api_cxx/db_stat.html#DB_CACHED_COUNTS">DB_CACHED_COUNTS</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/dbc_close.html#2">Dbc::close</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/dbc_count.html#2">Dbc::count</a></td></tr>
+<tr><td align=right>DbEnv::set_flags</td><td><a href="../api_cxx/env_set_flags.html#DB_CDB_ALLDB">DB_CDB_ALLDB</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/dbc_del.html#2">Dbc::del</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/dbc_dup.html#2">Dbc::dup</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/dbc_get.html#2">Dbc::get</a></td></tr>
+<tr><td align=right>DbEnv::log_get</td><td><a href="../api_cxx/log_get.html#DB_CHECKPOINT">DB_CHECKPOINT</a></td></tr>
+<tr><td align=right>DbEnv::log_put</td><td><a href="../api_cxx/log_put.html#DB_CHECKPOINT">DB_CHECKPOINT</a></td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_checkpoint.html#2">db_checkpoint</a></td></tr>
+<tr><td align=right>DbEnv</td><td><a href="../api_cxx/dbenv_class.html#DB_CLIENT">DB_CLIENT</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_close.html#2">Db::close</a></td></tr>
+<tr><td align=right>File naming</td><td><a href="../ref/env/naming.html#DB_CONFIG">DB_CONFIG</a></td></tr>
+<tr><td align=right>Db::get</td><td><a href="../api_cxx/db_get.html#DB_CONSUME">DB_CONSUME</a></td></tr>
+<tr><td align=right>Db::get</td><td><a href="../api_cxx/db_get.html#DB_CONSUME_WAIT">DB_CONSUME_WAIT</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/dbc_put.html#2">Dbc::put</a></td></tr>
+<tr><td align=right>Db::open</td><td><a href="../api_cxx/db_open.html#DB_CREATE">DB_CREATE</a></td></tr>
+<tr><td align=right>DbEnv::open</td><td><a href="../api_cxx/env_open.html#DB_CREATE">DB_CREATE</a></td></tr>
+<tr><td align=right>DbMpoolFile::open</td><td><a href="../api_cxx/memp_fopen.html#DB_CREATE">DB_CREATE</a></td></tr>
+<tr><td align=right>DbEnv::log_put</td><td><a href="../api_cxx/log_put.html#DB_CURLSN">DB_CURLSN</a></td></tr>
+<tr><td align=right>Dbc::get</td><td><a href="../api_cxx/dbc_get.html#DB_CURRENT">DB_CURRENT</a></td></tr>
+<tr><td align=right>Dbc::put</td><td><a href="../api_cxx/dbc_put.html#DB_CURRENT">DB_CURRENT</a></td></tr>
+<tr><td align=right>DbEnv::log_get</td><td><a href="../api_cxx/log_get.html#DB_CURRENT">DB_CURRENT</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_cursor.html#2">Db::cursor</a></td></tr>
+<tr><td align=right>Db</td><td><a href="../api_cxx/db_class.html#DB_CXX_NO_EXCEPTIONS">DB_CXX_NO_EXCEPTIONS</a></td></tr>
+<tr><td align=right>DbEnv</td><td><a href="../api_cxx/dbenv_class.html#DB_CXX_NO_EXCEPTIONS">DB_CXX_NO_EXCEPTIONS</a></td></tr>
+<tr><td align=right>Dbt</td><td><a href="../api_cxx/dbt_class.html#DB_DBT_MALLOC">DB_DBT_MALLOC</a></td></tr>
+<tr><td align=right>Dbt</td><td><a href="../api_cxx/dbt_class.html#DB_DBT_PARTIAL">DB_DBT_PARTIAL</a></td></tr>
+<tr><td align=right>Dbt</td><td><a href="../api_cxx/dbt_class.html#DB_DBT_REALLOC">DB_DBT_REALLOC</a></td></tr>
+<tr><td align=right>Dbt</td><td><a href="../api_cxx/dbt_class.html#DB_DBT_USERMEM">DB_DBT_USERMEM</a></td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_deadlock.html#2">db_deadlock</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_del.html#2">Db::del</a></td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_dump.html#2">db_dump</a></td></tr>
+<tr><td align=right>Db::set_flags</td><td><a href="../api_cxx/db_set_flags.html#DB_DUP">DB_DUP</a></td></tr>
+<tr><td align=right>Db::set_flags</td><td><a href="../api_cxx/db_set_flags.html#DB_DUPSORT">DB_DUPSORT</a></td></tr>
+<tr><td align=right>Db::upgrade</td><td><a href="../api_cxx/db_upgrade.html#DB_DUPSORT">DB_DUPSORT</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/dbenv_class.html#2">DbEnv</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/env_close.html#2">DbEnv::close</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_err.html#2">DbEnv::err</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/lock_detect.html#2">DbEnv::lock_detect</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/lock_get.html#2">DbEnv::lock_get</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/lock_id.html#2">DbEnv::lock_id</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/lock_stat.html#2">DbEnv::lock_stat</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/lock_vec.html#2">DbEnv::lock_vec</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/log_archive.html#2">DbEnv::log_archive</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/log_compare.html#2">DbEnv::log_compare</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/log_file.html#2">DbEnv::log_file</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/log_flush.html#2">DbEnv::log_flush</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/log_get.html#2">DbEnv::log_get</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/log_put.html#2">DbEnv::log_put</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/log_register.html#2">DbEnv::log_register</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/log_stat.html#2">DbEnv::log_stat</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/log_unregister.html#2">DbEnv::log_unregister</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/memp_register.html#2">DbEnv::memp_register</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/memp_stat.html#2">DbEnv::memp_stat</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/memp_sync.html#2">DbEnv::memp_sync</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/memp_trickle.html#2">DbEnv::memp_trickle</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/env_open.html#2">DbEnv::open</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/env_remove.html#2">DbEnv::remove</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/env_set_cachesize.html#2">DbEnv::set_cachesize</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/env_set_data_dir.html#2">DbEnv::set_data_dir</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/env_set_errcall.html#2">DbEnv::set_errcall</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/env_set_errfile.html#2">DbEnv::set_errfile</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/env_set_error_stream.html#2">DbEnv::set_error_stream</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/env_set_errpfx.html#2">DbEnv::set_errpfx</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/env_set_feedback.html#2">DbEnv::set_feedback</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/env_set_flags.html#2">DbEnv::set_flags</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/env_set_lg_bsize.html#2">DbEnv::set_lg_bsize</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/env_set_lg_dir.html#2">DbEnv::set_lg_dir</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/env_set_lg_max.html#2">DbEnv::set_lg_max</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/env_set_lk_conflicts.html#2">DbEnv::set_lk_conflicts</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/env_set_lk_detect.html#2">DbEnv::set_lk_detect</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/env_set_lk_max.html#2">DbEnv::set_lk_max</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/env_set_lk_max_lockers.html#2">DbEnv::set_lk_max_lockers</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/env_set_lk_max_locks.html#2">DbEnv::set_lk_max_locks</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/env_set_lk_max_objects.html#2">DbEnv::set_lk_max_objects</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/env_set_mp_mmapsize.html#2">DbEnv::set_mp_mmapsize</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/env_set_mutexlocks.html#2">DbEnv::set_mutexlocks</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/env_set_pageyield.html#2">DbEnv::set_pageyield</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/env_set_paniccall.html#2">DbEnv::set_paniccall</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/env_set_panicstate.html#2">DbEnv::set_panicstate</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/env_set_rec_init.html#2">DbEnv::set_recovery_init</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/env_set_region_init.html#2">DbEnv::set_region_init</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/env_set_server.html#2">DbEnv::set_server</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/env_set_shm_key.html#2">DbEnv::set_shm_key</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/env_set_tas_spins.html#2">DbEnv::set_tas_spins</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/env_set_tmp_dir.html#2">DbEnv::set_tmp_dir</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/env_set_tx_max.html#2">DbEnv::set_tx_max</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/env_set_tx_recover.html#2">DbEnv::set_tx_recover</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/env_set_tx_timestamp.html#2">DbEnv::set_tx_timestamp</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/env_set_verbose.html#2">DbEnv::set_verbose</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/env_strerror.html#2">DbEnv::strerror</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/txn_begin.html#2">DbEnv::txn_begin</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/txn_checkpoint.html#2">DbEnv::txn_checkpoint</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/txn_stat.html#2">DbEnv::txn_stat</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/env_version.html#2">DbEnv::version</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/except_class.html#2">DbException</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/get_errno.html#2">DbException::get_errno</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/what.html#2">DbException::what</a></td></tr>
+<tr><td align=right>Db::open</td><td><a href="../api_cxx/db_open.html#DB_EXCL">DB_EXCL</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_fd.html#2">Db::fd</a></td></tr>
+<tr><td align=right>Dbc::get</td><td><a href="../api_cxx/dbc_get.html#DB_FIRST">DB_FIRST</a></td></tr>
+<tr><td align=right>DbEnv::log_get</td><td><a href="../api_cxx/log_get.html#DB_FIRST">DB_FIRST</a></td></tr>
+<tr><td align=right>DbEnv::log_put</td><td><a href="../api_cxx/log_put.html#DB_FLUSH">DB_FLUSH</a></td></tr>
+<tr><td align=right>DbEnv::remove</td><td><a href="../api_cxx/env_remove.html#DB_FORCE">DB_FORCE</a></td></tr>
+<tr><td align=right>DbEnv::txn_checkpoint</td><td><a href="../api_cxx/txn_checkpoint.html#DB_FORCE">DB_FORCE</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_get.html#2">Db::get</a></td></tr>
+<tr><td align=right>Db::get</td><td><a href="../api_cxx/db_get.html#DB_GET_BOTH">DB_GET_BOTH</a></td></tr>
+<tr><td align=right>Dbc::get</td><td><a href="../api_cxx/dbc_get.html#DB_GET_BOTH">DB_GET_BOTH</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_get_byteswapped.html#2">Db::get_byteswapped</a></td></tr>
+<tr><td align=right>Dbc::get</td><td><a href="../api_cxx/dbc_get.html#DB_GET_RECNO">DB_GET_RECNO</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_get_type.html#2">Db::get_type</a></td></tr>
+<tr><td align=right>File naming</td><td><a href="../ref/env/naming.html#DB_HOME">DB_HOME</a></td></tr>
+<tr><td align=right>File naming</td><td><a href="../ref/env/naming.html#db_home">db_home</a></td></tr>
+<tr><td align=right> Db::close </td><td><a href="../api_cxx/db_close.html#3">DB_INCOMPLETE</a></td></tr>
+<tr><td align=right>DbEnv::open</td><td><a href="../api_cxx/env_open.html#DB_INIT_CDB">DB_INIT_CDB</a></td></tr>
+<tr><td align=right>DbEnv::open</td><td><a href="../api_cxx/env_open.html#DB_INIT_LOCK">DB_INIT_LOCK</a></td></tr>
+<tr><td align=right>DbEnv::open</td><td><a href="../api_cxx/env_open.html#DB_INIT_LOG">DB_INIT_LOG</a></td></tr>
+<tr><td align=right>DbEnv::open</td><td><a href="../api_cxx/env_open.html#DB_INIT_MPOOL">DB_INIT_MPOOL</a></td></tr>
+<tr><td align=right>DbEnv::open</td><td><a href="../api_cxx/env_open.html#DB_INIT_TXN">DB_INIT_TXN</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_join.html#2">Db::join</a></td></tr>
+<tr><td align=right>DbEnv::open</td><td><a href="../api_cxx/env_open.html#DB_JOINENV">DB_JOINENV</a></td></tr>
+<tr><td align=right>Db::join</td><td><a href="../api_cxx/db_join.html#DB_JOIN_ITEM">DB_JOIN_ITEM</a></td></tr>
+<tr><td align=right>Dbc::get</td><td><a href="../api_cxx/dbc_get.html#DB_JOIN_ITEM">DB_JOIN_ITEM</a></td></tr>
+<tr><td align=right>Db::join</td><td><a href="../api_cxx/db_join.html#DB_JOIN_NOSORT">DB_JOIN_NOSORT</a></td></tr>
+<tr><td align=right>Error returns to applications</td><td><a href="../ref/program/errorret.html#DB_KEYEMPTY">DB_KEYEMPTY</a></td></tr>
+<tr><td align=right>Dbc::put</td><td><a href="../api_cxx/dbc_put.html#DB_KEYFIRST">DB_KEYFIRST</a></td></tr>
+<tr><td align=right>Dbc::put</td><td><a href="../api_cxx/dbc_put.html#DB_KEYLAST">DB_KEYLAST</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_key_range.html#2">Db::key_range</a></td></tr>
+<tr><td align=right>Dbc::get</td><td><a href="../api_cxx/dbc_get.html#DB_LAST">DB_LAST</a></td></tr>
+<tr><td align=right>DbEnv::log_get</td><td><a href="../api_cxx/log_get.html#DB_LAST">DB_LAST</a></td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_load.html#2">db_load</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/lock_class.html#2">DbLock</a></td></tr>
+<tr><td align=right>DbEnv::lock_detect</td><td><a href="../api_cxx/lock_detect.html#DB_LOCK_CONFLICT">DB_LOCK_CONFLICT</a></td></tr>
+<tr><td align=right>Error returns to applications</td><td><a href="../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a></td></tr>
+<tr><td align=right>DbEnv::set_lk_detect</td><td><a href="../api_cxx/env_set_lk_detect.html#DB_LOCK_DEFAULT">DB_LOCK_DEFAULT</a></td></tr>
+<tr><td align=right>DbEnv::open</td><td><a href="../api_cxx/env_open.html#DB_LOCKDOWN">DB_LOCKDOWN</a></td></tr>
+<tr><td align=right>DbEnv::lock_vec</td><td><a href="../api_cxx/lock_vec.html#DB_LOCK_GET">DB_LOCK_GET</a></td></tr>
+<tr><td align=right>DbEnv::lock_get</td><td><a href="../api_cxx/lock_get.html#DB_LOCK_NOTGRANTED">DB_LOCK_NOTGRANTED</a></td></tr>
+<tr><td align=right>DbEnv::lock_vec</td><td><a href="../api_cxx/lock_vec.html#DB_LOCK_NOTGRANTED">DB_LOCK_NOTGRANTED</a></td></tr>
+<tr><td align=right>Error returns to applications</td><td><a href="../ref/program/errorret.html#DB_LOCK_NOTGRANTED">DB_LOCK_NOTGRANTED</a></td></tr>
+<tr><td align=right>DbEnv::lock_get</td><td><a href="../api_cxx/lock_get.html#DB_LOCK_NOWAIT">DB_LOCK_NOWAIT</a></td></tr>
+<tr><td align=right>DbEnv::lock_vec</td><td><a href="../api_cxx/lock_vec.html#DB_LOCK_NOWAIT">DB_LOCK_NOWAIT</a></td></tr>
+<tr><td align=right>DbEnv::set_lk_detect</td><td><a href="../api_cxx/env_set_lk_detect.html#DB_LOCK_OLDEST">DB_LOCK_OLDEST</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/lock_put.html#2">DbLock::put</a></td></tr>
+<tr><td align=right>DbEnv::lock_vec</td><td><a href="../api_cxx/lock_vec.html#DB_LOCK_PUT">DB_LOCK_PUT</a></td></tr>
+<tr><td align=right>DbEnv::lock_vec</td><td><a href="../api_cxx/lock_vec.html#DB_LOCK_PUT_ALL">DB_LOCK_PUT_ALL</a></td></tr>
+<tr><td align=right>DbEnv::lock_vec</td><td><a href="../api_cxx/lock_vec.html#DB_LOCK_PUT_OBJ">DB_LOCK_PUT_OBJ</a></td></tr>
+<tr><td align=right>DbEnv::set_lk_detect</td><td><a href="../api_cxx/env_set_lk_detect.html#DB_LOCK_RANDOM">DB_LOCK_RANDOM</a></td></tr>
+<tr><td align=right>DbEnv::set_lk_detect</td><td><a href="../api_cxx/env_set_lk_detect.html#DB_LOCK_YOUNGEST">DB_LOCK_YOUNGEST</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/lsn_class.html#2">DbLsn</a></td></tr>
+<tr><td align=right>DbMpoolFile::put</td><td><a href="../api_cxx/memp_fput.html#DB_MPOOL_CLEAN">DB_MPOOL_CLEAN</a></td></tr>
+<tr><td align=right>DbMpoolFile::set</td><td><a href="../api_cxx/memp_fset.html#DB_MPOOL_CLEAN">DB_MPOOL_CLEAN</a></td></tr>
+<tr><td align=right>DbMpoolFile::get</td><td><a href="../api_cxx/memp_fget.html#DB_MPOOL_CREATE">DB_MPOOL_CREATE</a></td></tr>
+<tr><td align=right>DbMpoolFile::put</td><td><a href="../api_cxx/memp_fput.html#DB_MPOOL_DIRTY">DB_MPOOL_DIRTY</a></td></tr>
+<tr><td align=right>DbMpoolFile::set</td><td><a href="../api_cxx/memp_fset.html#DB_MPOOL_DIRTY">DB_MPOOL_DIRTY</a></td></tr>
+<tr><td align=right>DbMpoolFile::put</td><td><a href="../api_cxx/memp_fput.html#DB_MPOOL_DISCARD">DB_MPOOL_DISCARD</a></td></tr>
+<tr><td align=right>DbMpoolFile::set</td><td><a href="../api_cxx/memp_fset.html#DB_MPOOL_DISCARD">DB_MPOOL_DISCARD</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/mempfile_class.html#2">DbMpoolFile</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/memp_fclose.html#2">DbMpoolFile::close</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/memp_fget.html#2">DbMpoolFile::get</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/memp_fopen.html#2">DbMpoolFile::open</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/memp_fput.html#2">DbMpoolFile::put</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/memp_fset.html#2">DbMpoolFile::set</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/memp_fsync.html#2">DbMpoolFile::sync</a></td></tr>
+<tr><td align=right>DbMpoolFile::get</td><td><a href="../api_cxx/memp_fget.html#DB_MPOOL_LAST">DB_MPOOL_LAST</a></td></tr>
+<tr><td align=right>DbMpoolFile::get</td><td><a href="../api_cxx/memp_fget.html#DB_MPOOL_NEW">DB_MPOOL_NEW</a></td></tr>
+<tr><td align=right>Dbc::get</td><td><a href="../api_cxx/dbc_get.html#DB_NEXT">DB_NEXT</a></td></tr>
+<tr><td align=right>DbEnv::log_get</td><td><a href="../api_cxx/log_get.html#DB_NEXT">DB_NEXT</a></td></tr>
+<tr><td align=right>Dbc::get</td><td><a href="../api_cxx/dbc_get.html#DB_NEXT_DUP">DB_NEXT_DUP</a></td></tr>
+<tr><td align=right>Dbc::get</td><td><a href="../api_cxx/dbc_get.html#DB_NEXT_NODUP">DB_NEXT_NODUP</a></td></tr>
+<tr><td align=right>Db::put</td><td><a href="../api_cxx/db_put.html#DB_NODUPDATA">DB_NODUPDATA</a></td></tr>
+<tr><td align=right>Dbc::put</td><td><a href="../api_cxx/dbc_put.html#DB_NODUPDATA">DB_NODUPDATA</a></td></tr>
+<tr><td align=right>Db::open</td><td><a href="../api_cxx/db_open.html#DB_NOMMAP">DB_NOMMAP</a></td></tr>
+<tr><td align=right>DbEnv::set_flags</td><td><a href="../api_cxx/env_set_flags.html#DB_NOMMAP">DB_NOMMAP</a></td></tr>
+<tr><td align=right>DbMpoolFile::open</td><td><a href="../api_cxx/memp_fopen.html#DB_NOMMAP">DB_NOMMAP</a></td></tr>
+<tr><td align=right>Db::verify</td><td><a href="../api_cxx/db_verify.html#DB_NOORDERCHK">DB_NOORDERCHK</a></td></tr>
+<tr><td align=right>Db::put</td><td><a href="../api_cxx/db_put.html#DB_NOOVERWRITE">DB_NOOVERWRITE</a></td></tr>
+<tr><td align=right>DbEnv::set_server</td><td><a href="../api_cxx/env_set_server.html#DB_NOSERVER">DB_NOSERVER</a></td></tr>
+<tr><td align=right>DbEnv::set_server</td><td><a href="../api_cxx/env_set_server.html#DB_NOSERVER_ID">DB_NOSERVER_ID</a></td></tr>
+<tr><td align=right>Db::close</td><td><a href="../api_cxx/db_close.html#DB_NOSYNC">DB_NOSYNC</a></td></tr>
+<tr><td align=right>Error returns to applications</td><td><a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a></td></tr>
+<tr><td align=right>Db::open</td><td><a href="../api_cxx/db_open.html#DB_OLD_VERSION">DB_OLD_VERSION</a></td></tr>
+<tr><td align=right>Db::upgrade</td><td><a href="../api_cxx/db_upgrade.html#DB_OLD_VERSION">DB_OLD_VERSION</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_open.html#2">Db::open</a></td></tr>
+<tr><td align=right>Db::verify</td><td><a href="../api_cxx/db_verify.html#DB_ORDERCHKONLY">DB_ORDERCHKONLY</a></td></tr>
+<tr><td align=right>Dbc::dup</td><td><a href="../api_cxx/dbc_dup.html#DB_POSITION">DB_POSITION</a></td></tr>
+<tr><td align=right>Dbc::get</td><td><a href="../api_cxx/dbc_get.html#DB_PREV">DB_PREV</a></td></tr>
+<tr><td align=right>DbEnv::log_get</td><td><a href="../api_cxx/log_get.html#DB_PREV">DB_PREV</a></td></tr>
+<tr><td align=right>Dbc::get</td><td><a href="../api_cxx/dbc_get.html#DB_PREV_NODUP">DB_PREV_NODUP</a></td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_printlog.html#2">db_printlog</a></td></tr>
+<tr><td align=right>DbEnv::open</td><td><a href="../api_cxx/env_open.html#DB_PRIVATE">DB_PRIVATE</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_put.html#2">Db::put</a></td></tr>
+<tr><td align=right>Db::open</td><td><a href="../api_cxx/db_open.html#DB_RDONLY">DB_RDONLY</a></td></tr>
+<tr><td align=right>DbMpoolFile::open</td><td><a href="../api_cxx/memp_fopen.html#DB_RDONLY">DB_RDONLY</a></td></tr>
+<tr><td align=right>Db::set_flags</td><td><a href="../api_cxx/db_set_flags.html#DB_RECNUM">DB_RECNUM</a></td></tr>
+<tr><td align=right>Db::stat</td><td><a href="../api_cxx/db_stat.html#DB_RECORDCOUNT">DB_RECORDCOUNT</a></td></tr>
+<tr><td align=right>DbEnv::open</td><td><a href="../api_cxx/env_open.html#DB_RECOVER">DB_RECOVER</a></td></tr>
+<tr><td align=right>DbEnv::set_feedback</td><td><a href="../api_cxx/env_set_feedback.html#DB_RECOVER">DB_RECOVER</a></td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_recover.html#2">db_recover</a></td></tr>
+<tr><td align=right>DbEnv::open</td><td><a href="../api_cxx/env_open.html#DB_RECOVER_FATAL">DB_RECOVER_FATAL</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_remove.html#2">Db::remove</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_rename.html#2">Db::rename</a></td></tr>
+<tr><td align=right>Db::set_flags</td><td><a href="../api_cxx/db_set_flags.html#DB_RENUMBER">DB_RENUMBER</a></td></tr>
+<tr><td align=right>Db::set_flags</td><td><a href="../api_cxx/db_set_flags.html#DB_REVSPLITOFF">DB_REVSPLITOFF</a></td></tr>
+<tr><td align=right>Db::get</td><td><a href="../api_cxx/db_get.html#DB_RMW">DB_RMW</a></td></tr>
+<tr><td align=right>Db::join</td><td><a href="../api_cxx/db_join.html#DB_RMW">DB_RMW</a></td></tr>
+<tr><td align=right>Dbc::get</td><td><a href="../api_cxx/dbc_get.html#DB_RMW">DB_RMW</a></td></tr>
+<tr><td align=right>Error returns to applications</td><td><a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a></td></tr>
+<tr><td align=right>Db::verify</td><td><a href="../api_cxx/db_verify.html#DB_SALVAGE">DB_SALVAGE</a></td></tr>
+<tr><td align=right>Dbc::get</td><td><a href="../api_cxx/dbc_get.html#DB_SET">DB_SET</a></td></tr>
+<tr><td align=right>DbEnv::log_get</td><td><a href="../api_cxx/log_get.html#DB_SET">DB_SET</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_set_append_recno.html#2">Db::set_append_recno</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_set_bt_compare.html#2">Db::set_bt_compare</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_set_bt_minkey.html#2">Db::set_bt_minkey</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_set_bt_prefix.html#2">Db::set_bt_prefix</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_set_cachesize.html#2">Db::set_cachesize</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_set_dup_compare.html#2">Db::set_dup_compare</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_set_errcall.html#2">Db::set_errcall</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_set_errfile.html#2">Db::set_errfile</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_set_errpfx.html#2">Db::set_errpfx</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_set_feedback.html#2">Db::set_feedback</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_set_flags.html#2">Db::set_flags</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_set_h_ffactor.html#2">Db::set_h_ffactor</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_set_h_hash.html#2">Db::set_h_hash</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_set_h_nelem.html#2">Db::set_h_nelem</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_set_lorder.html#2">Db::set_lorder</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_set_malloc.html#2">Db::set_malloc</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_set_pagesize.html#2">Db::set_pagesize</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_set_paniccall.html#2">Db::set_paniccall</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_set_q_extentsize.html#2">Db::set_q_extentsize</a></td></tr>
+<tr><td align=right>Dbc::get</td><td><a href="../api_cxx/dbc_get.html#DB_SET_RANGE">DB_SET_RANGE</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_set_realloc.html#2">Db::set_realloc</a></td></tr>
+<tr><td align=right>Db::get</td><td><a href="../api_cxx/db_get.html#DB_SET_RECNO">DB_SET_RECNO</a></td></tr>
+<tr><td align=right>Dbc::get</td><td><a href="../api_cxx/dbc_get.html#DB_SET_RECNO">DB_SET_RECNO</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_set_re_delim.html#2">Db::set_re_delim</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_set_re_len.html#2">Db::set_re_len</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_set_re_pad.html#2">Db::set_re_pad</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_set_re_source.html#2">Db::set_re_source</a></td></tr>
+<tr><td align=right>Db::set_flags</td><td><a href="../api_cxx/db_set_flags.html#DB_SNAPSHOT">DB_SNAPSHOT</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_stat.html#2">Db::stat</a></td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_stat.html#2">db_stat</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_sync.html#2">Db::sync</a></td></tr>
+<tr><td align=right>DbEnv::open</td><td><a href="../api_cxx/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/dbt_class.html#2">Dbt</a></td></tr>
+<tr><td align=right>Db::open</td><td><a href="../api_cxx/db_open.html#DB_THREAD">DB_THREAD</a></td></tr>
+<tr><td align=right>DbEnv::open</td><td><a href="../api_cxx/env_open.html#DB_THREAD">DB_THREAD</a></td></tr>
+<tr><td align=right>Db::open</td><td><a href="../api_cxx/db_open.html#DB_TRUNCATE">DB_TRUNCATE</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/txn_class.html#2">DbTxn</a></td></tr>
+<tr><td align=right>DbEnv::set_tx_recover</td><td><a href="../api_cxx/env_set_tx_recover.html#DB_TXN_ABORT">DB_TXN_ABORT</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/txn_abort.html#2">DbTxn::abort</a></td></tr>
+<tr><td align=right>DbEnv::set_tx_recover</td><td><a href="../api_cxx/env_set_tx_recover.html#DB_TXN_BACKWARD_ROLL">DB_TXN_BACKWARD_ROLL</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/txn_commit.html#2">DbTxn::commit</a></td></tr>
+<tr><td align=right>DbEnv::set_tx_recover</td><td><a href="../api_cxx/env_set_tx_recover.html#DB_TXN_FORWARD_ROLL">DB_TXN_FORWARD_ROLL</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/txn_id.html#2">DbTxn::id</a></td></tr>
+<tr><td align=right>DbEnv::set_flags</td><td><a href="../api_cxx/env_set_flags.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a></td></tr>
+<tr><td align=right>DbEnv::txn_begin</td><td><a href="../api_cxx/txn_begin.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a></td></tr>
+<tr><td align=right>DbTxn::commit</td><td><a href="../api_cxx/txn_commit.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a></td></tr>
+<tr><td align=right>DbEnv::txn_begin</td><td><a href="../api_cxx/txn_begin.html#DB_TXN_NOWAIT">DB_TXN_NOWAIT</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/txn_prepare.html#2">DbTxn::prepare</a></td></tr>
+<tr><td align=right>DbEnv::txn_begin</td><td><a href="../api_cxx/txn_begin.html#DB_TXN_SYNC">DB_TXN_SYNC</a></td></tr>
+<tr><td align=right>DbTxn::commit</td><td><a href="../api_cxx/txn_commit.html#DB_TXN_SYNC">DB_TXN_SYNC</a></td></tr>
+<tr><td align=right>Db::set_feedback</td><td><a href="../api_cxx/db_set_feedback.html#DB_UPGRADE">DB_UPGRADE</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_upgrade.html#2">Db::upgrade</a></td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_upgrade.html#2">db_upgrade</a></td></tr>
+<tr><td align=right>DbEnv::open</td><td><a href="../api_cxx/env_open.html#DB_USE_ENVIRON">DB_USE_ENVIRON</a></td></tr>
+<tr><td align=right>DbEnv::remove</td><td><a href="../api_cxx/env_remove.html#DB_USE_ENVIRON">DB_USE_ENVIRON</a></td></tr>
+<tr><td align=right>DbEnv::open</td><td><a href="../api_cxx/env_open.html#DB_USE_ENVIRON_ROOT">DB_USE_ENVIRON_ROOT</a></td></tr>
+<tr><td align=right>DbEnv::remove</td><td><a href="../api_cxx/env_remove.html#DB_USE_ENVIRON_ROOT">DB_USE_ENVIRON_ROOT</a></td></tr>
+<tr><td align=right>DbEnv::set_verbose</td><td><a href="../api_cxx/env_set_verbose.html#DB_VERB_CHKPOINT">DB_VERB_CHKPOINT</a></td></tr>
+<tr><td align=right>DbEnv::set_verbose</td><td><a href="../api_cxx/env_set_verbose.html#DB_VERB_DEADLOCK">DB_VERB_DEADLOCK</a></td></tr>
+<tr><td align=right>DbEnv::set_verbose</td><td><a href="../api_cxx/env_set_verbose.html#DB_VERB_RECOVERY">DB_VERB_RECOVERY</a></td></tr>
+<tr><td align=right>DbEnv::set_verbose</td><td><a href="../api_cxx/env_set_verbose.html#DB_VERB_WAITSFOR">DB_VERB_WAITSFOR</a></td></tr>
+<tr><td align=right>Db::set_feedback</td><td><a href="../api_cxx/db_set_feedback.html#DB_VERIFY">DB_VERIFY</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/db_verify.html#2">Db::verify</a></td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_verify.html#2">db_verify</a></td></tr>
+<tr><td align=right>Db::cursor</td><td><a href="../api_cxx/db_cursor.html#DB_WRITECURSOR">DB_WRITECURSOR</a></td></tr>
+<tr><td align=right>Db</td><td><a href="../api_cxx/db_class.html#DB_XA_CREATE">DB_XA_CREATE</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/lock/dead.html#2">deadlocks</a></td></tr>
+<tr><td align=right> utility to detect </td><td><a href="../utility/db_deadlock.html#3">deadlocks</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/debug/common.html#2">debugging</a> applications</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/delete.html#2">deleting</a> records</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/curdel.html#2">deleting</a> records with a cursor</td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--disable-bigfile">--disable-bigfile</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/program/diskspace.html#2">disk</a> space requirements</td></tr>
+<tr><td align=right> utility to </td><td><a href="../utility/db_dump.html#3">dump</a> databases as text files</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am_conf/dup.html#2">duplicate</a> data items</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/curdup.html#2">duplicating</a> a cursor</td></tr>
+<tr><td align=right> configuring </td><td><a href="../ref/build_unix/conf.html#9">dynamic</a> shared libraries</td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-compat185">--enable-compat185</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-cxx">--enable-cxx</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-debug">--enable-debug</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-debug_rop">--enable-debug_rop</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-debug_wop">--enable-debug_wop</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-diagnostic">--enable-diagnostic</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-dump185">--enable-dump185</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-dynamic">--enable-dynamic</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-java">--enable-java</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-posixmutexes">--enable-posixmutexes</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-rpc">--enable-rpc</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-shared">--enable-shared</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-tcl">--enable-tcl</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-test">--enable-test</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-uimutexes">--enable-uimutexes</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-umrw">--enable-umrw</a></td></tr>
+<tr><td align=right> byte </td><td><a href="../ref/program/byteorder.html#3">endian</a></td></tr>
+<tr><td align=right> database </td><td><a href="../ref/env/create.html#2">environment</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/program/environ.html#2">environment</a> variables</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/error.html#2">error</a> handling</td></tr>
+<tr><td align=right> </td><td><a href="../ref/program/errorret.html#3">error</a> name space</td></tr>
+<tr><td align=right> </td><td><a href="../ref/program/errorret.html#2">error</a> returns</td></tr>
+<tr><td align=right> </td><td><a href="../ref/install/file.html#2">/etc/magic</a></td></tr>
+<tr><td align=right> selecting a Queue </td><td><a href="../ref/am_conf/extentsize.html#2">extent</a> size</td></tr>
+<tr><td align=right> Java </td><td><a href="../ref/java/faq.html#2">FAQ</a></td></tr>
+<tr><td align=right> Tcl </td><td><a href="../ref/tcl/faq.html#2">FAQ</a></td></tr>
+<tr><td align=right> configuring without large </td><td><a href="../ref/build_unix/conf.html#4">file</a> support</td></tr>
+<tr><td align=right> </td><td><a href="../ref/install/file.html#3">file</a> utility</td></tr>
+<tr><td align=right>DbMpoolFile::open</td><td><a href="../api_cxx/memp_fopen.html#fileid">fileid</a></td></tr>
+<tr><td align=right> recovery and </td><td><a href="../ref/transapp/filesys.html#2">filesystem</a> operations</td></tr>
+<tr><td align=right> remote </td><td><a href="../ref/env/remote.html#2">filesystems</a></td></tr>
+<tr><td align=right> page </td><td><a href="../ref/am_conf/h_ffactor.html#2">fill</a> factor</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/freebsd.html#2">FreeBSD</a></td></tr>
+<tr><td align=right> Berkeley DB </td><td><a href="../ref/program/scope.html#3">free-threaded</a> handles</td></tr>
+<tr><td align=right>DbMpoolFile::open</td><td><a href="../api_cxx/memp_fopen.html#ftype">ftype</a></td></tr>
+<tr><td align=right> specifying a database </td><td><a href="../ref/am_conf/h_hash.html#2">hash</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/am_conf/h_nelem.html#2">hash</a> table size</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/hpux.html#2">HP-UX</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/install.html#2">installing</a> Berkeley DB for UNIX systems</td></tr>
+<tr><td align=right> </td><td><a href="../ref/program/compatible.html#2">interface</a> compatibility</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/irix.html#2">IRIX</a></td></tr>
+<tr><td align=right> configuring the </td><td><a href="../ref/build_unix/conf.html#10">Java</a> API</td></tr>
+<tr><td align=right> </td><td><a href="../ref/java/compat.html#2">Java</a> compatibility</td></tr>
+<tr><td align=right> </td><td><a href="../ref/java/conf.html#2">Java</a> configuration</td></tr>
+<tr><td align=right> </td><td><a href="../ref/java/faq.html#3">Java</a> FAQ</td></tr>
+<tr><td align=right> logical </td><td><a href="../ref/am/join.html#2">join</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_cxx/dbt_class.html#3">key/data</a> pairs</td></tr>
+<tr><td align=right> retrieved </td><td><a href="../api_cxx/dbt_class.html#5">key/data</a> permanence</td></tr>
+<tr><td align=right> database </td><td><a href="../ref/program/dbsizes.html#2">limits</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/linux.html#2">Linux</a></td></tr>
+<tr><td align=right> changing compile or </td><td><a href="../ref/build_unix/flags.html#3">load</a> options</td></tr>
+<tr><td align=right> utility to </td><td><a href="../utility/db_load.html#3">load</a> text files into databases</td></tr>
+<tr><td align=right>DbEnv::lock_vec</td><td><a href="../api_cxx/lock_vec.html#lock">lock</a></td></tr>
+<tr><td align=right> standard </td><td><a href="../ref/lock/stdmode.html#2">lock</a> modes</td></tr>
+<tr><td align=right> page-level </td><td><a href="../ref/lock/page.html#2">locking</a></td></tr>
+<tr><td align=right> two-phase </td><td><a href="../ref/lock/twopl.html#2">locking</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/lock/nondb.html#2">locking</a> and non-Berkeley DB applications</td></tr>
+<tr><td align=right> </td><td><a href="../ref/lock/config.html#2">locking</a> configuration</td></tr>
+<tr><td align=right> </td><td><a href="../ref/lock/am_conv.html#2">locking</a> conventions</td></tr>
+<tr><td align=right> Berkeley DB Concurrent Data Store </td><td><a href="../ref/lock/cam_conv.html#2">locking</a> conventions</td></tr>
+<tr><td align=right> </td><td><a href="../ref/lock/intro.html#2">locking</a> introduction</td></tr>
+<tr><td align=right> sizing the </td><td><a href="../ref/lock/max.html#2">locking</a> subsystem</td></tr>
+<tr><td align=right> </td><td><a href="../ref/lock/notxn.html#2">locking</a> without transactions</td></tr>
+<tr><td align=right> </td><td><a href="../ref/log/limits.html#2">log</a> file limits</td></tr>
+<tr><td align=right> </td><td><a href="../ref/transapp/logfile.html#2">log</a> file removal</td></tr>
+<tr><td align=right> utility to display </td><td><a href="../utility/db_printlog.html#3">log</a> files as text</td></tr>
+<tr><td align=right> </td><td><a href="../ref/log/config.html#2">logging</a> configuration</td></tr>
+<tr><td align=right> </td><td><a href="../ref/log/intro.html#2">logging</a> introduction</td></tr>
+<tr><td align=right>DbMpoolFile::open</td><td><a href="../api_cxx/memp_fopen.html#lsn_offset">lsn_offset</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/mp/config.html#2">memory</a> pool configuration</td></tr>
+<tr><td align=right>DbEnv::lock_vec</td><td><a href="../api_cxx/lock_vec.html#mode">mode</a></td></tr>
+<tr><td align=right> Berkeley DB library </td><td><a href="../ref/program/namespace.html#2">name</a> spaces</td></tr>
+<tr><td align=right> file </td><td><a href="../ref/env/naming.html#2">naming</a></td></tr>
+<tr><td align=right> retrieving Btree records by </td><td><a href="../ref/am_conf/bt_recnum.html#2">number</a></td></tr>
+<tr><td align=right>DbEnv::lock_vec</td><td><a href="../api_cxx/lock_vec.html#obj">obj</a></td></tr>
+<tr><td align=right>DbEnv::lock_vec</td><td><a href="../api_cxx/lock_vec.html#op">op</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/open.html#2">opening</a> a database</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/osf1.html#2">OSF/1</a></td></tr>
+<tr><td align=right> selecting a </td><td><a href="../ref/am_conf/pagesize.html#2">page</a> size</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/partial.html#2">partial</a> record storage and retrieval</td></tr>
+<tr><td align=right></td><td><a href="http://www.sleepycat.com/update/index.html">Patches,</a> Updates and Change logs</td></tr>
+<tr><td align=right> </td><td><a href="../ref/perl/intro.html#2">Perl</a></td></tr>
+<tr><td align=right> retrieved key/data </td><td><a href="../api_cxx/dbt_class.html#4">permanence</a></td></tr>
+<tr><td align=right>DbMpoolFile::open</td><td><a href="../api_cxx/memp_fopen.html#pgcookie">pgcookie</a></td></tr>
+<tr><td align=right> Sleepycat Software's Berkeley DB </td><td><a href="../ref/intro/products.html#2">products</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/qnx.html#2">QNX</a></td></tr>
+<tr><td align=right> logical </td><td><a href="../api_cxx/dbt_class.html#7">record</a> number format</td></tr>
+<tr><td align=right> logical </td><td><a href="../ref/am_conf/logrec.html#2">record</a> numbers</td></tr>
+<tr><td align=right> managing </td><td><a href="../ref/am_conf/recno.html#2">record-based</a> databases</td></tr>
+<tr><td align=right> logically renumbering </td><td><a href="../ref/am_conf/renumber.html#2">records</a></td></tr>
+<tr><td align=right> utility to </td><td><a href="../utility/db_recover.html#3">recover</a> database environments</td></tr>
+<tr><td align=right> Berkeley DB </td><td><a href="../ref/transapp/reclimit.html#2">recoverability</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/get.html#2">retrieving</a> records</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/curget.html#2">retrieving</a> records with a cursor</td></tr>
+<tr><td align=right> </td><td><a href="../ref/rpc/client.html#2">RPC</a> client</td></tr>
+<tr><td align=right> configuring a </td><td><a href="../ref/build_unix/conf.html#11">RPC</a> client/server</td></tr>
+<tr><td align=right> utility to support </td><td><a href="../utility/berkeley_db_svc.html#3">RPC</a> client/server</td></tr>
+<tr><td align=right> </td><td><a href="../ref/rpc/server.html#2">RPC</a> server</td></tr>
+<tr><td align=right> database </td><td><a href="../ref/am/verify.html#3">salvage</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/sco.html#2">SCO</a></td></tr>
+<tr><td align=right> Berkeley DB handle </td><td><a href="../ref/program/scope.html#2">scope</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/env/security.html#2">security</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/sendmail/intro.html#2">Sendmail</a></td></tr>
+<tr><td align=right> configuring </td><td><a href="../ref/build_unix/conf.html#8">shared</a> libraries</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/shlib.html#2">shared</a> libraries</td></tr>
+<tr><td align=right> application </td><td><a href="../ref/program/appsignals.html#2">signal</a> handling</td></tr>
+<tr><td align=right></td><td><a href="http://www.sleepycat.com/">Sleepycat</a> Software</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/solaris.html#2">Solaris</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/distrib/layout.html#2">source</a> code layout</td></tr>
+<tr><td align=right> cursor </td><td><a href="../ref/am/stability.html#3">stability</a></td></tr>
+<tr><td align=right> database </td><td><a href="../ref/am/stat.html#2">statistics</a></td></tr>
+<tr><td align=right> utility to display database and environment </td><td><a href="../utility/db_stat.html#3">statistics</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/put.html#2">storing</a> records</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/curput.html#2">storing</a> records with a cursor</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/sunos.html#2">SunOS</a></td></tr>
+<tr><td align=right> loading Berkeley DB with </td><td><a href="../ref/tcl/intro.html#2">Tcl</a></td></tr>
+<tr><td align=right> using Berkeley DB with </td><td><a href="../ref/tcl/using.html#2">Tcl</a></td></tr>
+<tr><td align=right> configuring the </td><td><a href="../ref/build_unix/conf.html#12">Tcl</a> API</td></tr>
+<tr><td align=right> </td><td><a href="../ref/tcl/program.html#2">Tcl</a> API programming notes</td></tr>
+<tr><td align=right> </td><td><a href="../ref/tcl/faq.html#3">Tcl</a> FAQ</td></tr>
+<tr><td align=right> configuring the </td><td><a href="../ref/build_unix/conf.html#13">test</a> suite</td></tr>
+<tr><td align=right> running the </td><td><a href="../ref/test/run.html#2">test</a> suite</td></tr>
+<tr><td align=right> running the </td><td><a href="../ref/build_unix/test.html#2">test</a> suite under UNIX</td></tr>
+<tr><td align=right> running the </td><td><a href="../ref/build_win/test.html#2">test</a> suite under Windows</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am_conf/re_source.html#2">text</a> backing files</td></tr>
+<tr><td align=right> loading </td><td><a href="../ref/dumpload/text.html#2">text</a> into databases</td></tr>
+<tr><td align=right> dumping/loading </td><td><a href="../ref/dumpload/utility.html#2">text</a> to/from databases</td></tr>
+<tr><td align=right> building </td><td><a href="../ref/program/mt.html#2">threaded</a> applications</td></tr>
+<tr><td align=right> </td><td><a href="../ref/txn/config.html#2">transaction</a> configuration</td></tr>
+<tr><td align=right> </td><td><a href="../ref/txn/limits.html#2">transaction</a> limits</td></tr>
+<tr><td align=right> administering </td><td><a href="../ref/transapp/admin.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right> archival in </td><td><a href="../ref/transapp/archival.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right> checkpoints in </td><td><a href="../ref/transapp/checkpoint.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right> deadlock detection in </td><td><a href="../ref/transapp/deadlock.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right> recovery in </td><td><a href="../ref/transapp/recovery.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right> </td><td><a href="../ref/transapp/throughput.html#2">transaction</a> throughput</td></tr>
+<tr><td align=right> </td><td><a href="../ref/transapp/intro.html#2">Transactional</a> Data Store</td></tr>
+<tr><td align=right> Berkeley DB and </td><td><a href="../ref/txn/intro.html#2">transactions</a></td></tr>
+<tr><td align=right> nested </td><td><a href="../ref/txn/nested.html#2">transactions</a></td></tr>
+<tr><td align=right> configuring Berkeley DB with the </td><td><a href="../ref/xa/config.html#2">Tuxedo</a> System</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/ultrix.html#2">Ultrix</a></td></tr>
+<tr><td align=right> building for </td><td><a href="../ref/build_unix/notes.html#3">UNIX</a> FAQ</td></tr>
+<tr><td align=right> configuring Berkeley DB for </td><td><a href="../ref/build_unix/conf.html#3">UNIX</a> systems</td></tr>
+<tr><td align=right>Patches, </td><td><a href="http://www.sleepycat.com/update/index.html">Updates</a> and Change logs</td></tr>
+<tr><td align=right> utility to </td><td><a href="../utility/db_upgrade.html#4">upgrade</a> database files</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/upgrade.html#2">upgrading</a> databases</td></tr>
+<tr><td align=right> </td><td><a href="../ref/arch/utilities.html#2">utilities</a></td></tr>
+<tr><td align=right> database </td><td><a href="../ref/am/verify.html#2">verification</a></td></tr>
+<tr><td align=right> utility to </td><td><a href="../utility/db_verify.html#4">verify</a> database files</td></tr>
+<tr><td align=right> building for </td><td><a href="../ref/build_vxworks/faq.html#3">VxWorks</a> FAQ</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_vxworks/notes.html#2">VxWorks</a> notes</td></tr>
+<tr><td align=right> running the test suite under </td><td><a href="../ref/build_win/test.html#3">Windows</a></td></tr>
+<tr><td align=right> building for </td><td><a href="../ref/build_win/faq.html#3">Windows</a> FAQ</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_win/notes.html#2">Windows</a> notes</td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--with-tcl=DIR">--with-tcl=DIR</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/xa/intro.html#2">XA</a> Resource Manager</td></tr>
+</table>
+</center>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_class.html b/bdb/docs/api_cxx/db_class.html
new file mode 100644
index 00000000000..75296aeee61
--- /dev/null
+++ b/bdb/docs/api_cxx/db_class.html
@@ -0,0 +1,109 @@
+<!--$Id: db_class.so,v 10.23 2000/03/17 01:54:00 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+class Db {
+public:
+ Db(DbEnv *dbenv, u_int32_t flags);
+ ~Db();
+ ...
+};
+</pre></h3>
+<h1>Description</h1>
+<p>This manual page describes the specific details of the Db class,
+which is the center of access method activity.
+<p>If no <b>dbenv</b> value is specified, the database is standalone, i.e.,
+it is not part of any Berkeley DB environment.
+<p>If a <b>dbenv</b> value is specified, the database is created within the
+specified Berkeley DB environment. The database access methods automatically
+make calls to the other subsystems in Berkeley DB based on the enclosing
+environment. For example, if the environment has been configured to use
+locking, then the access methods will automatically acquire the correct
+locks when reading and writing pages of the database.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or more
+of the following values.
+<p><dl compact>
+<p><dt><a name="DB_CXX_NO_EXCEPTIONS">DB_CXX_NO_EXCEPTIONS</a><dd>The Berkeley DB C++ API supports two different error behaviors. By default,
+whenever an error occurs an exception is thrown that encapsulates the
+error information. This generally allows for cleaner logic for
+transaction processing, as a try block can surround a single transaction.
+However, if DB_CXX_NO_EXCEPTIONS is specified, exceptions are not
+thrown, instead each individual function returns an error code.
+<p>If <b>dbenv</b> is not null, this flag is ignored and the error behavior
+of the specified environment is used instead.
+<p><dt><a name="DB_XA_CREATE">DB_XA_CREATE</a><dd>Instead of creating a standalone database, create a database intended to
+be accessed via applications running under a X/Open conformant Transaction
+Manager. The database will be opened in the environment specified by the
+OPENINFO parameter of the GROUPS section of the ubbconfig file. See the
+<a href="../ref/xa/intro.html">XA Resource Manager</a> chapter in the
+Reference Guide for more information.
+</dl>
+<h3>Class</h3>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_close.html">Db::close</a>,
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>,
+<a href="../api_cxx/db_del.html">Db::del</a>,
+<a href="../api_cxx/db_err.html">Db::err</a>,
+<a href="../api_cxx/db_fd.html">Db::fd</a>,
+<a href="../api_cxx/db_get.html">Db::get</a>,
+<a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a>,
+<a href="../api_cxx/db_get_type.html">Db::get_type</a>,
+<a href="../api_cxx/db_join.html">Db::join</a>,
+<a href="../api_cxx/db_key_range.html">Db::key_range</a>,
+<a href="../api_cxx/db_open.html">Db::open</a>,
+<a href="../api_cxx/db_put.html">Db::put</a>,
+<a href="../api_cxx/db_remove.html">Db::remove</a>,
+<a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>,
+<a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a>,
+<a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a>,
+<a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a>,
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a>,
+<a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>,
+<a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>,
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a>,
+<a href="../api_cxx/db_set_flags.html">Db::set_flags</a>,
+<a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a>,
+<a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a>,
+<a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a>,
+<a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a>,
+<a href="../api_cxx/db_set_malloc.html">Db::set_malloc</a>,
+<a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a>,
+<a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a>,
+<a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a>,
+<a href="../api_cxx/db_set_realloc.html">Db::set_realloc</a>,
+<a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>,
+<a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a>,
+<a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a>,
+<a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a>,
+<a href="../api_cxx/db_stat.html">Db::stat</a>,
+<a href="../api_cxx/db_sync.html">Db::sync</a>,
+<a href="../api_cxx/db_upgrade.html">Db::upgrade</a>
+and
+<a href="../api_cxx/db_verify.html">Db::verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_close.html b/bdb/docs/api_cxx/db_close.html
new file mode 100644
index 00000000000..fdde15bdb67
--- /dev/null
+++ b/bdb/docs/api_cxx/db_close.html
@@ -0,0 +1,123 @@
+<!--$Id: db_close.so,v 10.27 2000/09/08 15:20:28 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::close</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::close(u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The Db::close method flushes any cached database information to disk,
+closes any open cursors, frees any allocated resources, and closes any
+underlying files. Since key/data pairs are cached in memory, failing to
+sync the file with the Db::close or <a href="../api_cxx/db_sync.html">Db::sync</a> method may result
+in inconsistent or lost information.
+<p>The <b>flags</b> parameter must be set to 0 or the following value:
+<p><dl compact>
+<p><dt><a name="DB_NOSYNC">DB_NOSYNC</a><dd>Do not flush cached information to disk.
+<p>The <a href="../api_cxx/db_close.html#DB_NOSYNC">DB_NOSYNC</a> flag is a dangerous option. It should only be set
+if the application is doing logging (with transactions) so that the
+database is recoverable after a system or application crash, or if the
+database is always generated from scratch after any system or application
+crash.
+<p><b>It is important to understand that flushing cached information to disk
+only minimizes the window of opportunity for corrupted data.</b>
+While unlikely, it is possible for database corruption to happen if a
+system or application crash occurs while writing data to the database.
+To ensure that database corruption never occurs, applications must either:
+use transactions and logging with automatic recovery, use logging and
+application-specific recovery, or edit a copy of the database,
+and, once all applications using the database have successfully called
+Db::close, atomically replace the original database with the
+updated copy.
+</dl>
+<p>When multiple threads are using the Berkeley DB handle concurrently, only a single
+thread may call the Db::close method.
+<p>Once Db::close has been called, regardless of its return, the
+<a href="../api_cxx/db_class.html">Db</a> handle may not be accessed again.
+ <a name="3"><!--meow--></a>
+<p>The Db::close method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, 0 on success, and returns <a href="../api_c/memp_fsync.html#DB_INCOMPLETE">DB_INCOMPLETE</a> if the underlying database still has
+dirty pages in the cache. (The only reason to return
+<a href="../api_c/memp_fsync.html#DB_INCOMPLETE">DB_INCOMPLETE</a> is if another thread of control was writing pages
+in the underlying database file at the same time as the
+Db::close method was called. For this reason, a return of
+<a href="../api_c/memp_fsync.html#DB_INCOMPLETE">DB_INCOMPLETE</a> can normally be ignored, or, in cases where it is
+a possible return value, the <a href="../api_cxx/db_close.html#DB_NOSYNC">DB_NOSYNC</a> option should probably
+have been specified.)
+<p>The Db::close method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Db::close method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::close method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Class</h3>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_close.html">Db::close</a>,
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>,
+<a href="../api_cxx/db_del.html">Db::del</a>,
+<a href="../api_cxx/db_err.html">Db::err</a>,
+<a href="../api_cxx/db_fd.html">Db::fd</a>,
+<a href="../api_cxx/db_get.html">Db::get</a>,
+<a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a>,
+<a href="../api_cxx/db_get_type.html">Db::get_type</a>,
+<a href="../api_cxx/db_join.html">Db::join</a>,
+<a href="../api_cxx/db_key_range.html">Db::key_range</a>,
+<a href="../api_cxx/db_open.html">Db::open</a>,
+<a href="../api_cxx/db_put.html">Db::put</a>,
+<a href="../api_cxx/db_remove.html">Db::remove</a>,
+<a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>,
+<a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a>,
+<a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a>,
+<a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a>,
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a>,
+<a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>,
+<a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>,
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a>,
+<a href="../api_cxx/db_set_flags.html">Db::set_flags</a>,
+<a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a>,
+<a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a>,
+<a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a>,
+<a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a>,
+<a href="../api_cxx/db_set_malloc.html">Db::set_malloc</a>,
+<a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a>,
+<a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a>,
+<a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a>,
+<a href="../api_cxx/db_set_realloc.html">Db::set_realloc</a>,
+<a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>,
+<a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a>,
+<a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a>,
+<a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a>,
+<a href="../api_cxx/db_stat.html">Db::stat</a>,
+<a href="../api_cxx/db_sync.html">Db::sync</a>,
+<a href="../api_cxx/db_upgrade.html">Db::upgrade</a>
+and
+<a href="../api_cxx/db_verify.html">Db::verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_cursor.html b/bdb/docs/api_cxx/db_cursor.html
new file mode 100644
index 00000000000..b6954b9f329
--- /dev/null
+++ b/bdb/docs/api_cxx/db_cursor.html
@@ -0,0 +1,105 @@
+<!--$Id: db_cursor.so,v 10.25 2000/07/11 19:11:25 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::cursor</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::cursor</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::cursor(DbTxn *txnid, Dbc **cursorp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The Db::cursor method
+creates a cursor and copies a pointer to it into the memory referenced
+by <b>cursorp</b>.
+<p>If the file is being accessed under transaction protection, the
+<b>txnid</b> parameter is a transaction ID returned from
+<a href="../api_cxx/txn_begin.html">DbEnv::txn_begin</a>, otherwise, NULL.
+<p>If transaction protection is enabled, cursors must be opened and closed
+within the context of a transaction, and the <b>txnid</b> parameter
+specifies the transaction context in which the cursor may be used.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or more
+of the following values.
+<p><dl compact>
+<p><dt><a name="DB_WRITECURSOR">DB_WRITECURSOR</a><dd>Specify that the cursor will be used to update the database. This
+flag should <b>only</b> be set when the <a href="../api_cxx/env_open.html#DB_INIT_CDB">DB_INIT_CDB</a> flag
+was specified to <a href="../api_cxx/env_open.html">DbEnv::open</a>.
+</dl>
+<p>The Db::cursor method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Db::cursor method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The Db::cursor method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::cursor method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Class</h3>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_close.html">Db::close</a>,
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>,
+<a href="../api_cxx/db_del.html">Db::del</a>,
+<a href="../api_cxx/db_err.html">Db::err</a>,
+<a href="../api_cxx/db_fd.html">Db::fd</a>,
+<a href="../api_cxx/db_get.html">Db::get</a>,
+<a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a>,
+<a href="../api_cxx/db_get_type.html">Db::get_type</a>,
+<a href="../api_cxx/db_join.html">Db::join</a>,
+<a href="../api_cxx/db_key_range.html">Db::key_range</a>,
+<a href="../api_cxx/db_open.html">Db::open</a>,
+<a href="../api_cxx/db_put.html">Db::put</a>,
+<a href="../api_cxx/db_remove.html">Db::remove</a>,
+<a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>,
+<a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a>,
+<a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a>,
+<a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a>,
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a>,
+<a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>,
+<a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>,
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a>,
+<a href="../api_cxx/db_set_flags.html">Db::set_flags</a>,
+<a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a>,
+<a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a>,
+<a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a>,
+<a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a>,
+<a href="../api_cxx/db_set_malloc.html">Db::set_malloc</a>,
+<a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a>,
+<a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a>,
+<a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a>,
+<a href="../api_cxx/db_set_realloc.html">Db::set_realloc</a>,
+<a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>,
+<a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a>,
+<a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a>,
+<a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a>,
+<a href="../api_cxx/db_stat.html">Db::stat</a>,
+<a href="../api_cxx/db_sync.html">Db::sync</a>,
+<a href="../api_cxx/db_upgrade.html">Db::upgrade</a>
+and
+<a href="../api_cxx/db_verify.html">Db::verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_del.html b/bdb/docs/api_cxx/db_del.html
new file mode 100644
index 00000000000..ec30c6ad01c
--- /dev/null
+++ b/bdb/docs/api_cxx/db_del.html
@@ -0,0 +1,104 @@
+<!--$Id: db_del.so,v 10.23 2000/09/05 19:35:10 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::del</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::del</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::del(DbTxn *txnid, Dbt *key, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The Db::del method removes key/data pairs from the database. The
+key/data pair associated with the specified <b>key</b> is discarded from
+the database. In the presence of duplicate key values, all records
+associated with the designated key will be discarded.
+<p>If the file is being accessed under transaction protection, the
+<b>txnid</b> parameter is a transaction ID returned from
+<a href="../api_cxx/txn_begin.html">DbEnv::txn_begin</a>, otherwise, NULL.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The Db::del method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, 0 on success, and <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a> if the specified <b>key</b> did not exist in
+the file.
+<h1>Errors</h1>
+<p>The Db::del method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_LOCK_DEADLOCK<dd>The operation was selected to resolve a deadlock.
+</dl>
+<p><dl compact>
+<p><dt>EACCES<dd>An attempt was made to modify a read-only database.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The Db::del method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::del method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Class</h3>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_close.html">Db::close</a>,
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>,
+<a href="../api_cxx/db_del.html">Db::del</a>,
+<a href="../api_cxx/db_err.html">Db::err</a>,
+<a href="../api_cxx/db_fd.html">Db::fd</a>,
+<a href="../api_cxx/db_get.html">Db::get</a>,
+<a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a>,
+<a href="../api_cxx/db_get_type.html">Db::get_type</a>,
+<a href="../api_cxx/db_join.html">Db::join</a>,
+<a href="../api_cxx/db_key_range.html">Db::key_range</a>,
+<a href="../api_cxx/db_open.html">Db::open</a>,
+<a href="../api_cxx/db_put.html">Db::put</a>,
+<a href="../api_cxx/db_remove.html">Db::remove</a>,
+<a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>,
+<a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a>,
+<a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a>,
+<a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a>,
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a>,
+<a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>,
+<a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>,
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a>,
+<a href="../api_cxx/db_set_flags.html">Db::set_flags</a>,
+<a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a>,
+<a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a>,
+<a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a>,
+<a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a>,
+<a href="../api_cxx/db_set_malloc.html">Db::set_malloc</a>,
+<a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a>,
+<a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a>,
+<a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a>,
+<a href="../api_cxx/db_set_realloc.html">Db::set_realloc</a>,
+<a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>,
+<a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a>,
+<a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a>,
+<a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a>,
+<a href="../api_cxx/db_stat.html">Db::stat</a>,
+<a href="../api_cxx/db_sync.html">Db::sync</a>,
+<a href="../api_cxx/db_upgrade.html">Db::upgrade</a>
+and
+<a href="../api_cxx/db_verify.html">Db::verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_err.html b/bdb/docs/api_cxx/db_err.html
new file mode 100644
index 00000000000..fa0bccc3926
--- /dev/null
+++ b/bdb/docs/api_cxx/db_err.html
@@ -0,0 +1,94 @@
+<!--$Id: db_err.so,v 10.11 1999/12/20 08:52:27 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::err</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::err</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+DbEnv::err(int error, const char *fmt, ...);
+<p>
+DbEnv::errx(const char *fmt, ...);
+<p>
+Db::err(int error, const char *fmt, ...);
+<p>
+Db::errx(const char *fmt, ...);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::err, DbEnv::errx, Db::err and
+Db::errx methods provide error messaging functionality for
+applications written using the Berkeley DB library.
+<p>The DbEnv::err method constructs an error message consisting of the
+following elements:
+<p><blockquote><p><dl compact>
+<p><dt>An optional prefix string<dd>If no error callback method has been set using the
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a> method, any prefix string specified using the
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a> method, followed by two separating characters: a colon
+and a &lt;space&gt; character.
+<p><dt>An optional printf-style message<dd>The supplied message <b>fmt</b>, if non-NULL, where the ANSI C X3.159-1989 (ANSI C)
+printf function specifies how subsequent arguments are converted for
+output.
+<p><dt>A separator<dd>Two separating characters: a colon and a &lt;space&gt; character.
+<p><dt>A standard error string<dd>The standard system or Berkeley DB library error string associated with the
+<b>error</b> value, as returned by the <a href="../api_cxx/env_strerror.html">DbEnv::strerror</a> method.
+</dl>
+</blockquote>
+<p>This constructed error message is then handled as follows:
+<p><blockquote>
+<p>If an error callback method has been set (see <a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>
+and <a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a>), that method is called with two
+arguments: any prefix string specified (see <a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a> and
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>), and the error message.
+<p>If a C library FILE * has been set (see <a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a> and
+<a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a>), the error message is written to that output
+stream.
+<p>If a C++ ostream has been set
+(see <a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a>), the error message is written to that
+stream.
+<p>If none of these output options has been configured, the error message
+is written to stderr, the standard error output stream.</blockquote>
+<p>The DbEnv::errx and Db::errx methods perform identically to the
+DbEnv::err and Db::err methods except that they do not append
+the final separator characters and standard error string to the error
+message.
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_close.html">DbEnv::close</a>,
+<a href="../api_cxx/env_open.html">DbEnv::open</a>,
+<a href="../api_cxx/env_remove.html">DbEnv::remove</a>,
+<a href="../api_cxx/db_err.html">DbEnv::err</a>,
+<a href="../api_cxx/env_strerror.html">DbEnv::strerror</a>,
+<a href="../api_cxx/env_version.html">DbEnv::version</a>,
+<a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a>,
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a>,
+<a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a>,
+<a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a>,
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>,
+<a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a>,
+<a href="../api_cxx/env_set_mutexlocks.html">DbEnv::set_mutexlocks</a>,
+<a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a>,
+and
+<a href="../api_cxx/env_set_verbose.html">DbEnv::set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_fd.html b/bdb/docs/api_cxx/db_fd.html
new file mode 100644
index 00000000000..1cb98fb6bc7
--- /dev/null
+++ b/bdb/docs/api_cxx/db_fd.html
@@ -0,0 +1,95 @@
+<!--$Id: db_fd.so,v 10.21 2000/03/01 21:41:28 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::fd</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::fd</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::fd(int *fdp);
+</pre></h3>
+<h1>Description</h1>
+<p>The Db::fd method
+copies a file descriptor representative of the underlying database into
+the memory referenced by <b>fdp</b>. A file descriptor referencing the
+same file will be returned to all processes that call <a href="../api_cxx/db_open.html">Db::open</a> with
+the same <b>file</b> argument. This file descriptor may be safely used
+as an argument to the <b>fcntl</b>(2) and <b>flock</b>(2) locking
+functions. The file descriptor is not necessarily associated with any of
+the underlying files actually used by the access method.
+<p>The Db::fd method only supports a coarse-grained form of locking.
+Applications should use the lock manager where possible.
+<p>The Db::fd method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Db::fd method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::fd method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Class</h3>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_close.html">Db::close</a>,
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>,
+<a href="../api_cxx/db_del.html">Db::del</a>,
+<a href="../api_cxx/db_err.html">Db::err</a>,
+<a href="../api_cxx/db_fd.html">Db::fd</a>,
+<a href="../api_cxx/db_get.html">Db::get</a>,
+<a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a>,
+<a href="../api_cxx/db_get_type.html">Db::get_type</a>,
+<a href="../api_cxx/db_join.html">Db::join</a>,
+<a href="../api_cxx/db_key_range.html">Db::key_range</a>,
+<a href="../api_cxx/db_open.html">Db::open</a>,
+<a href="../api_cxx/db_put.html">Db::put</a>,
+<a href="../api_cxx/db_remove.html">Db::remove</a>,
+<a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>,
+<a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a>,
+<a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a>,
+<a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a>,
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a>,
+<a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>,
+<a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>,
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a>,
+<a href="../api_cxx/db_set_flags.html">Db::set_flags</a>,
+<a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a>,
+<a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a>,
+<a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a>,
+<a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a>,
+<a href="../api_cxx/db_set_malloc.html">Db::set_malloc</a>,
+<a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a>,
+<a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a>,
+<a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a>,
+<a href="../api_cxx/db_set_realloc.html">Db::set_realloc</a>,
+<a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>,
+<a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a>,
+<a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a>,
+<a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a>,
+<a href="../api_cxx/db_stat.html">Db::stat</a>,
+<a href="../api_cxx/db_sync.html">Db::sync</a>,
+<a href="../api_cxx/db_upgrade.html">Db::upgrade</a>
+and
+<a href="../api_cxx/db_verify.html">Db::verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_get.html b/bdb/docs/api_cxx/db_get.html
new file mode 100644
index 00000000000..0cee5526b50
--- /dev/null
+++ b/bdb/docs/api_cxx/db_get.html
@@ -0,0 +1,158 @@
+<!--$Id: db_get.so,v 10.31 2000/11/28 20:12:30 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::get</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::get</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::get(DbTxn *txnid, Dbt *key, Dbt *data, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The Db::get method retrieves key/data pairs from the database. The
+address
+and length of the data associated with the specified <b>key</b> are
+returned in the structure referenced by <b>data</b>.
+<p>In the presence of duplicate key values, Db::get will return the
+first data item for the designated key. Duplicates are sorted by insert
+order except where this order has been overridden by cursor operations.
+<b>Retrieval of duplicates requires the use of cursor operations.</b>
+See <a href="../api_cxx/dbc_get.html">Dbc::get</a> for details.
+<p>If the file is being accessed under transaction protection, the
+<b>txnid</b> parameter is a transaction ID returned from
+<a href="../api_cxx/txn_begin.html">DbEnv::txn_begin</a>, otherwise, NULL.
+<p>The <b>flags</b> parameter must be set to 0 or one of the following
+values:
+<p><dl compact>
+<p><dt><a name="DB_CONSUME">DB_CONSUME</a><dd>Return the record number and data from the available record closest to
+the head of the queue and delete the record. The cursor will be
+positioned on the deleted record. The record number will be returned
+in <b>key</b> as described in <a href="../api_cxx/dbt_class.html">Dbt</a>. The data will be returned
+in the <b>data</b> parameter. A record is available if it is not
+deleted and is not currently locked. The underlying database must be
+of type Queue for DB_CONSUME to be specified.
+<p><dt><a name="DB_CONSUME_WAIT">DB_CONSUME_WAIT</a><dd>The DB_CONSUME_WAIT flag is the same as the DB_CONSUME
+flag except that if the Queue database is empty, the thread of control
+will wait until there is data in the queue before returning. The
+underlying database must be of type Queue for DB_CONSUME_WAIT
+to be specified.
+<p><dt><a name="DB_GET_BOTH">DB_GET_BOTH</a><dd>Retrieve the key/data pair only if both the key and data match the
+arguments.
+<p><dt><a name="DB_SET_RECNO">DB_SET_RECNO</a><dd>Retrieve the specified numbered key/data pair from a database.
+Upon return, both the <b>key</b> and <b>data</b> items will have been
+filled in, not just the data item as is done for all other uses of the
+Db::get method.
+<p>The <b>data</b> field of the specified <b>key</b>
+must be a pointer to a logical record number (i.e., a <b>db_recno_t</b>).
+This record number determines the record to be retrieved.
+<p>For DB_SET_RECNO to be specified, the underlying database must be
+of type Btree and it must have been created with the DB_RECNUM flag.
+</dl>
+<p>In addition, the following flag may be set by bitwise inclusively <b>OR</b>'ing it into the
+<b>flags</b> parameter:
+<p><dl compact>
+<p><dt><a name="DB_RMW">DB_RMW</a><dd>Acquire write locks instead of read locks when doing the retrieval.
+Setting this flag may decrease the likelihood of deadlock during a
+read-modify-write cycle by immediately acquiring the write lock during
+the read part of the cycle so that another thread of control acquiring
+a read lock for the same item, in its own read-modify-write cycle, will
+not result in deadlock.
+<p>As the Db::get interface will not hold locks across
+Berkeley DB interface calls in non-transactional environments, the
+<a href="../api_cxx/dbc_get.html#DB_RMW">DB_RMW</a> flag to the Db::get call is only meaningful in
+the presence of transactions.
+</dl>
+<p>If the database is a Queue or Recno database and the requested key exists,
+but was never explicitly created by the application or was later deleted,
+the Db::get method returns <a href="../ref/program/errorret.html#DB_KEYEMPTY">DB_KEYEMPTY</a>.
+<p>Otherwise, if the requested key is not in the database, the
+Db::get function returns <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+<p>Otherwise, the Db::get method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Db::get method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_LOCK_DEADLOCK<dd>The operation was selected to resolve a deadlock.
+</dl>
+<p><dl compact>
+<p><dt>ENOMEM<dd>There was insufficient memory to return the requested item.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>A record number of 0 was specified.
+<p>The <a href="../api_cxx/env_open.html#DB_THREAD">DB_THREAD</a> flag was specified to the
+<a href="../api_cxx/db_open.html">Db::open</a> method and none of the <a href="../api_cxx/dbt_class.html#DB_DBT_MALLOC">DB_DBT_MALLOC</a>,
+<a href="../api_cxx/dbt_class.html#DB_DBT_REALLOC">DB_DBT_REALLOC</a> or <a href="../api_cxx/dbt_class.html#DB_DBT_USERMEM">DB_DBT_USERMEM</a> flags were set in the
+<a href="../api_cxx/dbt_class.html">Dbt</a>.
+</dl>
+<p>The Db::get method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::get method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Class</h3>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_close.html">Db::close</a>,
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>,
+<a href="../api_cxx/db_del.html">Db::del</a>,
+<a href="../api_cxx/db_err.html">Db::err</a>,
+<a href="../api_cxx/db_fd.html">Db::fd</a>,
+<a href="../api_cxx/db_get.html">Db::get</a>,
+<a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a>,
+<a href="../api_cxx/db_get_type.html">Db::get_type</a>,
+<a href="../api_cxx/db_join.html">Db::join</a>,
+<a href="../api_cxx/db_key_range.html">Db::key_range</a>,
+<a href="../api_cxx/db_open.html">Db::open</a>,
+<a href="../api_cxx/db_put.html">Db::put</a>,
+<a href="../api_cxx/db_remove.html">Db::remove</a>,
+<a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>,
+<a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a>,
+<a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a>,
+<a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a>,
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a>,
+<a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>,
+<a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>,
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a>,
+<a href="../api_cxx/db_set_flags.html">Db::set_flags</a>,
+<a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a>,
+<a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a>,
+<a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a>,
+<a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a>,
+<a href="../api_cxx/db_set_malloc.html">Db::set_malloc</a>,
+<a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a>,
+<a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a>,
+<a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a>,
+<a href="../api_cxx/db_set_realloc.html">Db::set_realloc</a>,
+<a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>,
+<a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a>,
+<a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a>,
+<a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a>,
+<a href="../api_cxx/db_stat.html">Db::stat</a>,
+<a href="../api_cxx/db_sync.html">Db::sync</a>,
+<a href="../api_cxx/db_upgrade.html">Db::upgrade</a>
+and
+<a href="../api_cxx/db_verify.html">Db::verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_get_byteswapped.html b/bdb/docs/api_cxx/db_get_byteswapped.html
new file mode 100644
index 00000000000..5c661fa5776
--- /dev/null
+++ b/bdb/docs/api_cxx/db_get_byteswapped.html
@@ -0,0 +1,85 @@
+<!--$Id: db_get_byteswapped.so,v 10.7 1999/12/20 08:52:27 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::get_byteswapped</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::get_byteswapped</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::get_byteswapped(void) const;
+</pre></h3>
+<h1>Description</h1>
+<p>The Db::get_byteswapped method returns
+0
+if the underlying database files were created on an architecture
+of the same byte order as the current one, and
+1
+if they were not (i.e., big-endian on a little-endian machine or
+vice-versa). This field may be used to determine if application
+data needs to be adjusted for this architecture or not.
+<h3>Class</h3>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_close.html">Db::close</a>,
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>,
+<a href="../api_cxx/db_del.html">Db::del</a>,
+<a href="../api_cxx/db_err.html">Db::err</a>,
+<a href="../api_cxx/db_fd.html">Db::fd</a>,
+<a href="../api_cxx/db_get.html">Db::get</a>,
+<a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a>,
+<a href="../api_cxx/db_get_type.html">Db::get_type</a>,
+<a href="../api_cxx/db_join.html">Db::join</a>,
+<a href="../api_cxx/db_key_range.html">Db::key_range</a>,
+<a href="../api_cxx/db_open.html">Db::open</a>,
+<a href="../api_cxx/db_put.html">Db::put</a>,
+<a href="../api_cxx/db_remove.html">Db::remove</a>,
+<a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>,
+<a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a>,
+<a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a>,
+<a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a>,
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a>,
+<a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>,
+<a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>,
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a>,
+<a href="../api_cxx/db_set_flags.html">Db::set_flags</a>,
+<a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a>,
+<a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a>,
+<a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a>,
+<a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a>,
+<a href="../api_cxx/db_set_malloc.html">Db::set_malloc</a>,
+<a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a>,
+<a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a>,
+<a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a>,
+<a href="../api_cxx/db_set_realloc.html">Db::set_realloc</a>,
+<a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>,
+<a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a>,
+<a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a>,
+<a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a>,
+<a href="../api_cxx/db_stat.html">Db::stat</a>,
+<a href="../api_cxx/db_sync.html">Db::sync</a>,
+<a href="../api_cxx/db_upgrade.html">Db::upgrade</a>
+and
+<a href="../api_cxx/db_verify.html">Db::verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_get_type.html b/bdb/docs/api_cxx/db_get_type.html
new file mode 100644
index 00000000000..755032390fc
--- /dev/null
+++ b/bdb/docs/api_cxx/db_get_type.html
@@ -0,0 +1,82 @@
+<!--$Id: db_get_type.so,v 10.10 1999/12/20 08:52:27 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::get_type</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::get_type</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+DBTYPE
+Db::get_type(void) const;
+</pre></h3>
+<h1>Description</h1>
+<p>The Db::get_type method returns the type of the underlying access method
+(and file format). It returns one of DB_BTREE,
+DB_HASH or DB_RECNO. This value may be used to
+determine the type of the database after a return from <a href="../api_cxx/db_open.html">Db::open</a>
+with the <b>type</b> argument set to DB_UNKNOWN.
+<h3>Class</h3>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_close.html">Db::close</a>,
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>,
+<a href="../api_cxx/db_del.html">Db::del</a>,
+<a href="../api_cxx/db_err.html">Db::err</a>,
+<a href="../api_cxx/db_fd.html">Db::fd</a>,
+<a href="../api_cxx/db_get.html">Db::get</a>,
+<a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a>,
+<a href="../api_cxx/db_get_type.html">Db::get_type</a>,
+<a href="../api_cxx/db_join.html">Db::join</a>,
+<a href="../api_cxx/db_key_range.html">Db::key_range</a>,
+<a href="../api_cxx/db_open.html">Db::open</a>,
+<a href="../api_cxx/db_put.html">Db::put</a>,
+<a href="../api_cxx/db_remove.html">Db::remove</a>,
+<a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>,
+<a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a>,
+<a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a>,
+<a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a>,
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a>,
+<a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>,
+<a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>,
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a>,
+<a href="../api_cxx/db_set_flags.html">Db::set_flags</a>,
+<a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a>,
+<a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a>,
+<a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a>,
+<a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a>,
+<a href="../api_cxx/db_set_malloc.html">Db::set_malloc</a>,
+<a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a>,
+<a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a>,
+<a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a>,
+<a href="../api_cxx/db_set_realloc.html">Db::set_realloc</a>,
+<a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>,
+<a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a>,
+<a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a>,
+<a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a>,
+<a href="../api_cxx/db_stat.html">Db::stat</a>,
+<a href="../api_cxx/db_sync.html">Db::sync</a>,
+<a href="../api_cxx/db_upgrade.html">Db::upgrade</a>
+and
+<a href="../api_cxx/db_verify.html">Db::verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_join.html b/bdb/docs/api_cxx/db_join.html
new file mode 100644
index 00000000000..6767aaf769e
--- /dev/null
+++ b/bdb/docs/api_cxx/db_join.html
@@ -0,0 +1,153 @@
+<!--$Id: db_join.so,v 10.30 2000/12/20 15:34:50 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::join</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::join</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::join(Dbc **curslist, Dbc **dbcp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The Db::join method creates a specialized cursor for use in performing
+joins on secondary indexes. For information on how to organize your data
+to use this functionality, see <a href="../ref/am/join.html">Logical
+join</a>.
+<p>The <b>primary</b> argument contains the <a href="../api_cxx/db_class.html">Db</a> handle of the primary
+database, which is keyed by the data values found in entries in the
+<b>curslist</b>.
+<p>The <b>curslist</b> argument contains a NULL terminated array of cursors.
+Each cursor must have been initialized to reference the key on which the
+underlying database should be joined. Typically, this initialization is done
+by a <a href="../api_cxx/dbc_get.html">Dbc::get</a> call with the <a href="../api_cxx/dbc_get.html#DB_SET">DB_SET</a> flag specified. Once the
+cursors have been passed as part of a <b>curslist</b>, they should not
+be accessed or modified until the newly created join cursor has been closed,
+or else inconsistent results may be returned.
+<p>Joined values are retrieved by doing a sequential iteration over the first
+cursor in the <b>curslist</b> argument, and a nested iteration over each
+secondary cursor in the order they are specified in the <b>curslist</b>
+argument. This requires database traversals to search for the current
+datum in all the cursors after the first. For this reason, the best join
+performance normally results from sorting the cursors from the one that
+references the least number of data items to the one that references the
+most. By default, Db::join does this sort on behalf of its caller.
+<p>The <b>flags</b> parameter must be set to 0 or the following value:
+<p><dl compact>
+<p><dt><a name="DB_JOIN_NOSORT">DB_JOIN_NOSORT</a><dd>Do not sort the cursors based on the number of data items they reference.
+If the data are structured such that cursors with many data items also
+share many common elements, higher performance will result from listing
+those cursors before cursors with fewer data items, that is, a sort order
+other than the default. The DB_JOIN_NOSORT flag permits
+applications to perform join optimization prior to calling Db::join.
+</dl>
+<p>A newly created cursor is returned in the memory location referenced by
+<b>dbcp</b> and has the standard cursor functions:
+<p><dl compact>
+<p><dt><a href="../api_cxx/dbc_get.html">Dbc::get</a><dd>Iterates over the values associated with the keys to which each item in
+<b>curslist</b> has been initialized. Any data value which appears in
+all items specified by the <b>curslist</b> argument is then used as a
+key into the <b>primary</b>, and the key/data pair found in the
+<b>primary</b> is returned.
+<p>The <b>flags</b> parameter must be set to 0 or the following value:
+<p><dl compact>
+<p><dt><a name="DB_JOIN_ITEM">DB_JOIN_ITEM</a><dd>Do not use the data value found in all of the cursors as a lookup
+key for the <b>primary</b>, but simply return it in the key parameter
+instead. The data parameter is left unchanged.
+</dl>
+<p>In addition, the following flag may be set by bitwise inclusively <b>OR</b>'ing it into the
+<b>flags</b> parameter:
+<p><dl compact>
+<p><dt><a name="DB_RMW">DB_RMW</a><dd>Acquire write locks instead of read locks when doing the retrieval.
+Setting this flag may decrease the likelihood of deadlock during a
+read-modify-write cycle by immediately acquiring the write lock during
+the read part of the cycle so that another thread of control acquiring
+a read lock for the same item, in its own read-modify-write cycle, will
+not result in deadlock.
+</dl>
+<p><dt><a href="../api_cxx/dbc_put.html">Dbc::put</a><dd>Returns EINVAL.
+<p><dt><a href="../api_cxx/dbc_del.html">Dbc::del</a><dd>Returns EINVAL.
+<p><dt><a href="../api_cxx/dbc_close.html">Dbc::close</a><dd>Close the returned cursor and release all resources. (Closing the cursors
+in <b>curslist</b> is the responsibility of the caller.)
+</dl>
+<p>For the returned join cursor to be used in a transaction protected manner,
+the cursors listed in <b>curslist</b> must have been created within the
+context of the same transaction.
+<p>The Db::join method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Db::join method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The <a href="../api_cxx/dbc_put.html">Dbc::put</a> or <a href="../api_cxx/dbc_del.html">Dbc::del</a> functions were called.
+</dl>
+<p>The Db::join method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::join method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Class</h3>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_close.html">Db::close</a>,
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>,
+<a href="../api_cxx/db_del.html">Db::del</a>,
+<a href="../api_cxx/db_err.html">Db::err</a>,
+<a href="../api_cxx/db_fd.html">Db::fd</a>,
+<a href="../api_cxx/db_get.html">Db::get</a>,
+<a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a>,
+<a href="../api_cxx/db_get_type.html">Db::get_type</a>,
+<a href="../api_cxx/db_join.html">Db::join</a>,
+<a href="../api_cxx/db_key_range.html">Db::key_range</a>,
+<a href="../api_cxx/db_open.html">Db::open</a>,
+<a href="../api_cxx/db_put.html">Db::put</a>,
+<a href="../api_cxx/db_remove.html">Db::remove</a>,
+<a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>,
+<a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a>,
+<a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a>,
+<a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a>,
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a>,
+<a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>,
+<a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>,
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a>,
+<a href="../api_cxx/db_set_flags.html">Db::set_flags</a>,
+<a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a>,
+<a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a>,
+<a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a>,
+<a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a>,
+<a href="../api_cxx/db_set_malloc.html">Db::set_malloc</a>,
+<a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a>,
+<a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a>,
+<a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a>,
+<a href="../api_cxx/db_set_realloc.html">Db::set_realloc</a>,
+<a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>,
+<a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a>,
+<a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a>,
+<a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a>,
+<a href="../api_cxx/db_stat.html">Db::stat</a>,
+<a href="../api_cxx/db_sync.html">Db::sync</a>,
+<a href="../api_cxx/db_upgrade.html">Db::upgrade</a>
+and
+<a href="../api_cxx/db_verify.html">Db::verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_key_range.html b/bdb/docs/api_cxx/db_key_range.html
new file mode 100644
index 00000000000..980dc119ae6
--- /dev/null
+++ b/bdb/docs/api_cxx/db_key_range.html
@@ -0,0 +1,109 @@
+<!--$Id: db_key_range.so,v 10.5 2000/05/01 21:57:43 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::key_range</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::key_range</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::key_range(DbTxn *txnid
+ Dbt *key, DB_KEY_RANGE *key_range, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The Db::key_range method returns an estimate of the proportion of keys
+that are less than, equal to and greater than the specified key. The
+underlying database must be of type Btree.
+<p>The information is returned in the <b>key_range</b> argument, which
+contains three elements of type double, <b>less</b>, <b>equal</b> and
+<b>greater</b>. Values are in the range of 0 to 1, e.g., if the field
+<b>less</b> is 0.05, that indicates that 5% of the keys in the database
+are less than the key argument. The value for <b>equal</b> will be zero
+if there is no matching key and non-zero otherwise.
+<p>If the file is being accessed under transaction protection, the
+<b>txnid</b> parameter is a transaction ID returned from
+<a href="../api_cxx/txn_begin.html">DbEnv::txn_begin</a>, otherwise, NULL.
+The Db::key_range method does not retain the locks it acquires for the
+life of the transaction, so estimates may not be repeatable.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The Db::key_range method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Db::key_range method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_LOCK_DEADLOCK<dd>The operation was selected to resolve a deadlock.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The underlying database was not of type Btree.
+</dl>
+<p>The Db::key_range method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::key_range method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Class</h3>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_close.html">Db::close</a>,
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>,
+<a href="../api_cxx/db_del.html">Db::del</a>,
+<a href="../api_cxx/db_err.html">Db::err</a>,
+<a href="../api_cxx/db_fd.html">Db::fd</a>,
+<a href="../api_cxx/db_get.html">Db::get</a>,
+<a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a>,
+<a href="../api_cxx/db_get_type.html">Db::get_type</a>,
+<a href="../api_cxx/db_join.html">Db::join</a>,
+<a href="../api_cxx/db_key_range.html">Db::key_range</a>,
+<a href="../api_cxx/db_open.html">Db::open</a>,
+<a href="../api_cxx/db_put.html">Db::put</a>,
+<a href="../api_cxx/db_remove.html">Db::remove</a>,
+<a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>,
+<a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a>,
+<a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a>,
+<a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a>,
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a>,
+<a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>,
+<a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>,
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a>,
+<a href="../api_cxx/db_set_flags.html">Db::set_flags</a>,
+<a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a>,
+<a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a>,
+<a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a>,
+<a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a>,
+<a href="../api_cxx/db_set_malloc.html">Db::set_malloc</a>,
+<a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a>,
+<a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a>,
+<a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a>,
+<a href="../api_cxx/db_set_realloc.html">Db::set_realloc</a>,
+<a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>,
+<a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a>,
+<a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a>,
+<a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a>,
+<a href="../api_cxx/db_stat.html">Db::stat</a>,
+<a href="../api_cxx/db_sync.html">Db::sync</a>,
+<a href="../api_cxx/db_upgrade.html">Db::upgrade</a>
+and
+<a href="../api_cxx/db_verify.html">Db::verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_open.html b/bdb/docs/api_cxx/db_open.html
new file mode 100644
index 00000000000..4c8cb75f452
--- /dev/null
+++ b/bdb/docs/api_cxx/db_open.html
@@ -0,0 +1,185 @@
+<!--$Id: db_open.so,v 10.61 2000/10/25 15:24:44 dda Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::open</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::open</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::open(const char *file,
+ const char *database, DBTYPE type, u_int32_t flags, int mode);
+</pre></h3>
+<h1>Description</h1>
+<p>The currently supported Berkeley DB file formats (or <i>access methods</i>)
+are Btree, Hash, Queue and Recno. The Btree format is a representation
+of a sorted, balanced tree structure. The Hash format is an extensible,
+dynamic hashing scheme. The Queue format supports fast access to
+fixed-length records accessed by sequentially or logical record number.
+The Recno format supports fixed- or variable-length records, accessed
+sequentially or by logical record number, and optionally retrieved from
+a flat text file.
+<p>Storage and retrieval for the Berkeley DB access methods are based on key/data
+pairs, see <a href="../api_cxx/dbt_class.html">Dbt</a> for more information.
+<p>The Db::open interface opens the database represented by the
+<b>file</b> and <b>database</b> arguments for both reading and writing.
+The <b>file</b> argument is used as the name of a physical file on disk
+that will be used to back the database. The <b>database</b> argument is
+optional and allows applications to have multiple logical databases in a
+single physical file. While no <b>database</b> argument needs to be
+specified, it is an error to attempt to open a second database in a
+<b>file</b> that was not initially created using a <b>database</b> name.
+In-memory databases never intended to be preserved on disk may
+be created by setting both the <b>file</b> and <b>database</b> arguments
+to NULL. Note that in-memory databases can only ever be shared by
+sharing the single database handle that created them, in circumstances
+where doing so is safe.
+<p>The <b>type</b> argument is of type DBTYPE
+and must be set to one of DB_BTREE, DB_HASH,
+DB_QUEUE, DB_RECNO or DB_UNKNOWN, except
+that databases of type DB_QUEUE are restricted to one per
+<b>file</b>. If <b>type</b> is DB_UNKNOWN, the database must
+already exist and Db::open will automatically determine its type.
+The <a href="../api_cxx/db_get_type.html">Db::get_type</a> method may be used to determine the underlying type of
+databases opened using DB_UNKNOWN.
+<p>The <b>flags</b> and <b>mode</b> arguments specify how files will be opened
+and/or created if they do not already exist.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or more
+of the following values.
+<p><dl compact>
+<p><dt><a name="DB_CREATE">DB_CREATE</a><dd>Create any underlying files, as necessary. If the files do not already
+exist and the DB_CREATE flag is not specified, the call will fail.
+<p><dt><a name="DB_EXCL">DB_EXCL</a><dd>Return an error if the file already exists. Underlying filesystem
+primitives are used to implement this flag. For this reason it is only
+applicable to the physical file and cannot be used to test if a database
+in a file already exists.
+<p>The DB_EXCL flag is only meaningful when specified with the
+DB_CREATE flag.
+<p><dt><a name="DB_NOMMAP">DB_NOMMAP</a><dd>Do not map this database into process memory (see the description of the
+<a href="../api_cxx/env_set_mp_mmapsize.html">DbEnv::set_mp_mmapsize</a> method for further information).
+<p><dt><a name="DB_RDONLY">DB_RDONLY</a><dd>Open the database for reading only. Any attempt to modify items in the
+database will fail regardless of the actual permissions of any underlying
+files.
+<p><dt><a name="DB_THREAD">DB_THREAD</a><dd>Cause the <a href="../api_cxx/db_class.html">Db</a> handle returned by Db::open to be
+<i>free-threaded</i>, that is, useable by multiple threads within a
+single address space.
+<p><dt><a name="DB_TRUNCATE">DB_TRUNCATE</a><dd>Physically truncate the underlying file, discarding all previous databases
+it might have held. Underlying filesystem primitives are used to
+implement this flag. For this reason it is only applicable to the
+physical file and cannot be used to discard databases within a file.
+<p>The DB_TRUNCATE flag cannot be transaction protected, and it is
+an error to specify it in a transaction protected environment.
+</dl>
+<p>On UNIX systems, or in IEEE/ANSI Std 1003.1 (POSIX) environments, all files created by the access methods
+are created with mode <b>mode</b> (as described in <b>chmod</b>(2)) and
+modified by the process' umask value at the time of creation (see
+<b>umask</b>(2)). The group ownership of created files is based on
+the system and directory defaults, and is not further specified by Berkeley DB.
+If <b>mode</b> is 0, files are created readable and writeable by both
+owner and group. On Windows systems, the mode argument is ignored.
+<p>Calling Db::open is a reasonably expensive operation, and
+maintaining a set of open databases will normally be preferable to
+repeatedly open and closing the database for each new query.
+<p>The Db::open method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If the <b>dbenv</b> argument to <a href="../api_c/db_create.html">db_create</a> was initialized using
+<a href="../api_cxx/env_open.html">DbEnv::open</a> the environment variable <b>DB_HOME</b> may be used
+as the path of the database environment home. Specifically, Db::open
+is affected by the configuration value DB_DATA_DIR.
+</dl>
+<p><dl compact>
+<p><dt>TMPDIR<dd>If the <b>file</b> and <b>dbenv</b> arguments to Db::open are
+NULL, the environment variable <b>TMPDIR</b> may be used as a
+directory in which to create a temporary backing file.
+</dl>
+<h1>Errors</h1>
+<p>The Db::open method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt><a name="DB_OLD_VERSION">DB_OLD_VERSION</a><dd>The database cannot be opened without being first upgraded.
+<p><dt>EEXIST<dd>DB_CREATE and DB_EXCL were specified and the file exists.
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified (e.g., unknown database
+type, page size, hash function, pad byte, byte order) or a flag value
+or parameter that is incompatible with the specified database.
+<p>
+The <a href="../api_cxx/env_open.html#DB_THREAD">DB_THREAD</a> flag was specified and spinlocks are not
+implemented for this architecture.
+<p>The <a href="../api_cxx/env_open.html#DB_THREAD">DB_THREAD</a> flag was specified to Db::open, but was not
+specified to the <a href="../api_cxx/env_open.html">DbEnv::open</a> call for the environment in which the
+<a href="../api_cxx/db_class.html">Db</a> handle was created.
+<p>A <b>re_source</b> file was specified with either the <a href="../api_cxx/env_open.html#DB_THREAD">DB_THREAD</a>
+flag or the provided database environment supports transaction
+processing.
+<p><dt>ENOENT<dd>A non-existent <b>re_source</b> file was specified.
+</dl>
+<p>The Db::open method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::open method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Class</h3>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_close.html">Db::close</a>,
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>,
+<a href="../api_cxx/db_del.html">Db::del</a>,
+<a href="../api_cxx/db_err.html">Db::err</a>,
+<a href="../api_cxx/db_fd.html">Db::fd</a>,
+<a href="../api_cxx/db_get.html">Db::get</a>,
+<a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a>,
+<a href="../api_cxx/db_get_type.html">Db::get_type</a>,
+<a href="../api_cxx/db_join.html">Db::join</a>,
+<a href="../api_cxx/db_key_range.html">Db::key_range</a>,
+<a href="../api_cxx/db_open.html">Db::open</a>,
+<a href="../api_cxx/db_put.html">Db::put</a>,
+<a href="../api_cxx/db_remove.html">Db::remove</a>,
+<a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>,
+<a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a>,
+<a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a>,
+<a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a>,
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a>,
+<a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>,
+<a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>,
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a>,
+<a href="../api_cxx/db_set_flags.html">Db::set_flags</a>,
+<a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a>,
+<a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a>,
+<a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a>,
+<a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a>,
+<a href="../api_cxx/db_set_malloc.html">Db::set_malloc</a>,
+<a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a>,
+<a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a>,
+<a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a>,
+<a href="../api_cxx/db_set_realloc.html">Db::set_realloc</a>,
+<a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>,
+<a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a>,
+<a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a>,
+<a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a>,
+<a href="../api_cxx/db_stat.html">Db::stat</a>,
+<a href="../api_cxx/db_sync.html">Db::sync</a>,
+<a href="../api_cxx/db_upgrade.html">Db::upgrade</a>
+and
+<a href="../api_cxx/db_verify.html">Db::verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_put.html b/bdb/docs/api_cxx/db_put.html
new file mode 100644
index 00000000000..5e2d1b8c4c9
--- /dev/null
+++ b/bdb/docs/api_cxx/db_put.html
@@ -0,0 +1,138 @@
+<!--$Id: db_put.so,v 10.34 2000/09/16 22:27:56 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::put</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::put</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::put(DbTxn *txnid, Dbt *key, Dbt *data, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The Db::put method stores key/data pairs in the database. The default
+behavior of the Db::put function is to enter the new key/data
+pair, replacing any previously existing key if duplicates are disallowed,
+or adding a duplicate data item if duplicates are allowed. If the database
+supports duplicates, the Db::put method adds the new data value at the
+end of the duplicate set. If the database supports sorted duplicates,
+the new data value is inserted at the correct sorted location.
+<p>If the file is being accessed under transaction protection, the
+<b>txnid</b> parameter is a transaction ID returned from
+<a href="../api_cxx/txn_begin.html">DbEnv::txn_begin</a>, otherwise, NULL.
+<p>The <b>flags</b> parameter must be set to 0 or one of the following
+values:
+<p><dl compact>
+<p><dt><a name="DB_APPEND">DB_APPEND</a><dd>Append the key/data pair to the end of the database. For the
+DB_APPEND flag to be specified, the underlying database must be
+a Queue or Recno database. The record number allocated to the record is
+returned in the specified <b>key</b>.
+<p>There is a minor behavioral difference between the Recno and Queue access
+methods for the DB_APPEND flag. If a transaction enclosing a
+Db::put operation with the DB_APPEND flag aborts, the
+record number may be decremented (and later re-allocated by a subsequent
+DB_APPEND operation) by the Recno access method, but will not be
+decremented or re-allocated by the Queue access method.
+<p><dt><a name="DB_NODUPDATA">DB_NODUPDATA</a><dd>In the case of the Btree and Hash access methods, enter the new key/data
+pair only if it does not already appear in the database. If the
+key/data pair already appears in the database, <a href="../api_cxx/dbc_put.html#DB_KEYEXIST">DB_KEYEXIST</a> is
+returned. The DB_NODUPDATA flag may only be specified if the
+underlying database has been configured to support sorted duplicates.
+<p>The DB_NODUPDATA flag may not be specified to the Queue or Recno
+access methods.
+<p><dt><a name="DB_NOOVERWRITE">DB_NOOVERWRITE</a><dd>Enter the new key/data pair only if the key does not already appear in
+the database. If the key already appears in the database,
+<a href="../api_cxx/dbc_put.html#DB_KEYEXIST">DB_KEYEXIST</a> is returned. Even if the database allows duplicates,
+a call to Db::put with the DB_NOOVERWRITE flag set will
+fail if the key already exists in the database.
+</dl>
+<p>Otherwise, the Db::put method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Db::put method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_LOCK_DEADLOCK<dd>The operation was selected to resolve a deadlock.
+</dl>
+<p><dl compact>
+<p><dt>EACCES<dd>An attempt was made to modify a read-only database.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>A record number of 0 was specified.
+<p>An attempt was made to add a record to a fixed-length database that was too
+large to fit.
+<p>An attempt was made to do a partial put.
+</dl>
+<p><dl compact>
+<p><dt>ENOSPC<dd>A btree exceeded the maximum btree depth (255).
+</dl>
+<p>The Db::put method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::put method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Class</h3>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_close.html">Db::close</a>,
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>,
+<a href="../api_cxx/db_del.html">Db::del</a>,
+<a href="../api_cxx/db_err.html">Db::err</a>,
+<a href="../api_cxx/db_fd.html">Db::fd</a>,
+<a href="../api_cxx/db_get.html">Db::get</a>,
+<a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a>,
+<a href="../api_cxx/db_get_type.html">Db::get_type</a>,
+<a href="../api_cxx/db_join.html">Db::join</a>,
+<a href="../api_cxx/db_key_range.html">Db::key_range</a>,
+<a href="../api_cxx/db_open.html">Db::open</a>,
+<a href="../api_cxx/db_put.html">Db::put</a>,
+<a href="../api_cxx/db_remove.html">Db::remove</a>,
+<a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>,
+<a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a>,
+<a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a>,
+<a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a>,
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a>,
+<a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>,
+<a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>,
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a>,
+<a href="../api_cxx/db_set_flags.html">Db::set_flags</a>,
+<a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a>,
+<a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a>,
+<a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a>,
+<a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a>,
+<a href="../api_cxx/db_set_malloc.html">Db::set_malloc</a>,
+<a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a>,
+<a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a>,
+<a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a>,
+<a href="../api_cxx/db_set_realloc.html">Db::set_realloc</a>,
+<a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>,
+<a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a>,
+<a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a>,
+<a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a>,
+<a href="../api_cxx/db_stat.html">Db::stat</a>,
+<a href="../api_cxx/db_sync.html">Db::sync</a>,
+<a href="../api_cxx/db_upgrade.html">Db::upgrade</a>
+and
+<a href="../api_cxx/db_verify.html">Db::verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_remove.html b/bdb/docs/api_cxx/db_remove.html
new file mode 100644
index 00000000000..56cc5a23439
--- /dev/null
+++ b/bdb/docs/api_cxx/db_remove.html
@@ -0,0 +1,110 @@
+<!--$Id: db_remove.so,v 10.20 2000/10/25 15:24:44 dda Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::remove</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::remove</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::remove(const char *file, const char *database, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The Db::remove interface removes the database specified by the
+<b>file</b> and <b>database</b> arguments. If no <b>database</b> is
+specified, the physical file represented by <b>file</b> is removed,
+incidentally removing all databases that it contained.
+<p>If a physical file is being removed and logging is currently enabled in
+the database environment, no database in the file may be open when the
+Db::remove method is called. Otherwise, no reference count of database
+use is maintained by Berkeley DB. Applications should not remove databases that
+are currently in use. In particular, some architectures do not permit
+the removal of files with open handles. On these architectures, attempts
+to remove databases that are currently in use will fail.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>Once Db::remove has been called, regardless of its return, the
+<a href="../api_cxx/db_class.html">Db</a> handle may not be accessed again.
+<p>The Db::remove method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If the <b>dbenv</b> argument to <a href="../api_c/db_create.html">db_create</a> was initialized using
+<a href="../api_cxx/env_open.html">DbEnv::open</a> the environment variable <b>DB_HOME</b> may be used
+as the path of the database environment home. Specifically, Db::remove
+is affected by the configuration value DB_DATA_DIR.
+</dl>
+<h1>Errors</h1>
+<p>The Db::remove method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>A database in the file is currently open.
+</dl>
+<p>The Db::remove method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::remove method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Class</h3>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_close.html">Db::close</a>,
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>,
+<a href="../api_cxx/db_del.html">Db::del</a>,
+<a href="../api_cxx/db_err.html">Db::err</a>,
+<a href="../api_cxx/db_fd.html">Db::fd</a>,
+<a href="../api_cxx/db_get.html">Db::get</a>,
+<a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a>,
+<a href="../api_cxx/db_get_type.html">Db::get_type</a>,
+<a href="../api_cxx/db_join.html">Db::join</a>,
+<a href="../api_cxx/db_key_range.html">Db::key_range</a>,
+<a href="../api_cxx/db_open.html">Db::open</a>,
+<a href="../api_cxx/db_put.html">Db::put</a>,
+<a href="../api_cxx/db_remove.html">Db::remove</a>,
+<a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>,
+<a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a>,
+<a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a>,
+<a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a>,
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a>,
+<a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>,
+<a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>,
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a>,
+<a href="../api_cxx/db_set_flags.html">Db::set_flags</a>,
+<a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a>,
+<a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a>,
+<a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a>,
+<a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a>,
+<a href="../api_cxx/db_set_malloc.html">Db::set_malloc</a>,
+<a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a>,
+<a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a>,
+<a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a>,
+<a href="../api_cxx/db_set_realloc.html">Db::set_realloc</a>,
+<a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>,
+<a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a>,
+<a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a>,
+<a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a>,
+<a href="../api_cxx/db_stat.html">Db::stat</a>,
+<a href="../api_cxx/db_sync.html">Db::sync</a>,
+<a href="../api_cxx/db_upgrade.html">Db::upgrade</a>
+and
+<a href="../api_cxx/db_verify.html">Db::verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_rename.html b/bdb/docs/api_cxx/db_rename.html
new file mode 100644
index 00000000000..03f2063d41a
--- /dev/null
+++ b/bdb/docs/api_cxx/db_rename.html
@@ -0,0 +1,112 @@
+<!--$Id: db_rename.so,v 10.7 2000/10/25 15:24:44 dda Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::rename</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::rename</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::rename(const char *file,
+ const char *database, const char *newname, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The Db::rename interface renames the database specified by the
+<b>file</b> and <b>database</b> arguments to <b>newname</b>. If no
+<b>database</b> is specified, the physical file represented by
+<b>file</b> is renamed, incidentally renaming all databases that it
+contained.
+<p>If a physical file is being renamed and logging is currently enabled in
+the database environment, no database in the file may be open when the
+Db::rename method is called. Otherwise, no reference count of database
+use is maintained by Berkeley DB. Applications should not rename databases that
+are currently in use. In particular, some architectures do not permit
+renaming files with open handles. On these architectures, attempts to
+rename databases that are currently in use will fail.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>Once Db::rename has been called, regardless of its return, the
+<a href="../api_cxx/db_class.html">Db</a> handle may not be accessed again.
+<p>The Db::rename method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If the <b>dbenv</b> argument to <a href="../api_c/db_create.html">db_create</a> was initialized using
+<a href="../api_cxx/env_open.html">DbEnv::open</a> the environment variable <b>DB_HOME</b> may be used
+as the path of the database environment home. Specifically, Db::rename
+is affected by the configuration value DB_DATA_DIR.
+</dl>
+<h1>Errors</h1>
+<p>The Db::rename method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>A database in the file is currently open.
+</dl>
+<p>The Db::rename method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::rename method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Class</h3>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_close.html">Db::close</a>,
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>,
+<a href="../api_cxx/db_del.html">Db::del</a>,
+<a href="../api_cxx/db_err.html">Db::err</a>,
+<a href="../api_cxx/db_fd.html">Db::fd</a>,
+<a href="../api_cxx/db_get.html">Db::get</a>,
+<a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a>,
+<a href="../api_cxx/db_get_type.html">Db::get_type</a>,
+<a href="../api_cxx/db_join.html">Db::join</a>,
+<a href="../api_cxx/db_key_range.html">Db::key_range</a>,
+<a href="../api_cxx/db_open.html">Db::open</a>,
+<a href="../api_cxx/db_put.html">Db::put</a>,
+<a href="../api_cxx/db_remove.html">Db::remove</a>,
+<a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>,
+<a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a>,
+<a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a>,
+<a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a>,
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a>,
+<a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>,
+<a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>,
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a>,
+<a href="../api_cxx/db_set_flags.html">Db::set_flags</a>,
+<a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a>,
+<a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a>,
+<a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a>,
+<a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a>,
+<a href="../api_cxx/db_set_malloc.html">Db::set_malloc</a>,
+<a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a>,
+<a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a>,
+<a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a>,
+<a href="../api_cxx/db_set_realloc.html">Db::set_realloc</a>,
+<a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>,
+<a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a>,
+<a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a>,
+<a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a>,
+<a href="../api_cxx/db_stat.html">Db::stat</a>,
+<a href="../api_cxx/db_sync.html">Db::sync</a>,
+<a href="../api_cxx/db_upgrade.html">Db::upgrade</a>
+and
+<a href="../api_cxx/db_verify.html">Db::verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_set_append_recno.html b/bdb/docs/api_cxx/db_set_append_recno.html
new file mode 100644
index 00000000000..296b4748407
--- /dev/null
+++ b/bdb/docs/api_cxx/db_set_append_recno.html
@@ -0,0 +1,69 @@
+<!--$Id: db_set_append_recno.so,v 1.3 2000/07/18 16:19:44 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_append_recno</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::set_append_recno</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::set_append_recno(
+ int (*db_append_recno_fcn)(DB *dbp, Dbt *data, db_recno_t recno));
+</pre></h3>
+<h1>Description</h1>
+<p>When using the <a href="../api_cxx/db_put.html#DB_APPEND">DB_APPEND</a> option of the <a href="../api_cxx/db_put.html">Db::put</a> method,
+it may be useful to modify the stored data based on the generated key.
+If a callback method is specified using the
+Db::set_append_recno method, it will be called after the record number
+has been selected but before the data has been stored.
+The callback function must return 0 on success and <b>errno</b> or
+a value outside of the Berkeley DB error name space on failure.
+<p>The called function must take three arguments: a reference to the
+enclosing database handle, the data <a href="../api_cxx/dbt_class.html">Dbt</a> to be stored and the
+selected record number. The called function may then modify the data
+<a href="../api_cxx/dbt_class.html">Dbt</a>.
+<p>The Db::set_append_recno interface may only be used to configure Berkeley DB before
+the <a href="../api_cxx/db_open.html">Db::open</a> interface is called.
+<p>The Db::set_append_recno method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_close.html">DbEnv::close</a>,
+<a href="../api_cxx/env_open.html">DbEnv::open</a>,
+<a href="../api_cxx/env_remove.html">DbEnv::remove</a>,
+<a href="../api_cxx/db_err.html">DbEnv::err</a>,
+<a href="../api_cxx/env_strerror.html">DbEnv::strerror</a>,
+<a href="../api_cxx/env_version.html">DbEnv::version</a>,
+<a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a>,
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a>,
+<a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a>,
+<a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a>,
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>,
+<a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a>,
+<a href="../api_cxx/env_set_mutexlocks.html">DbEnv::set_mutexlocks</a>,
+<a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a>,
+and
+<a href="../api_cxx/env_set_verbose.html">DbEnv::set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_set_bt_compare.html b/bdb/docs/api_cxx/db_set_bt_compare.html
new file mode 100644
index 00000000000..5ca95d40110
--- /dev/null
+++ b/bdb/docs/api_cxx/db_set_bt_compare.html
@@ -0,0 +1,109 @@
+<!--$Id: db_set_bt_compare.so,v 10.24 2000/10/26 15:20:40 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_bt_compare</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::set_bt_compare</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+extern "C" {
+ typedef int (*bt_compare_fcn_type)(DB *, const DBT *, const DBT *);
+};
+int
+Db::set_bt_compare(bt_compare_fcn_type bt_compare_fcn);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the Btree key comparison function. The comparison function is
+called when it is necessary to compare a key specified by the
+application with a key currently stored in the tree. The first argument
+to the comparison function is the <a href="../api_cxx/dbt_class.html">Dbt</a> representing the
+application supplied key, the second is the current tree's key.
+<p>The comparison function must return an integer value less than, equal
+to, or greater than zero if the first key argument is considered to be
+respectively less than, equal to, or greater than the second key
+argument. In addition, the comparison function must cause the keys in
+the database to be <i>well-ordered</i>. The comparison function
+must correctly handle any key values used by the application (possibly
+including zero-length keys). In addition, when Btree key prefix
+comparison is being performed (see <a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a> for more
+information), the comparison routine may be passed a prefix of any
+database key. The <b>data</b> and <b>size</b> fields of the
+<a href="../api_cxx/dbt_class.html">Dbt</a> are the only fields that may be used for the purposes of
+this comparison.
+<p>If no comparison function is specified, the keys are compared lexically,
+with shorter keys collating before longer keys. The same comparison
+method must be used each time a particular Btree is opened.
+<p>The Db::set_bt_compare interface may only be used to configure Berkeley DB before
+the <a href="../api_cxx/db_open.html">Db::open</a> interface is called.
+<p>The Db::set_bt_compare method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/db_open.html">Db::open</a> was called.
+</dl>
+<h3>Class</h3>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_close.html">Db::close</a>,
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>,
+<a href="../api_cxx/db_del.html">Db::del</a>,
+<a href="../api_cxx/db_err.html">Db::err</a>,
+<a href="../api_cxx/db_fd.html">Db::fd</a>,
+<a href="../api_cxx/db_get.html">Db::get</a>,
+<a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a>,
+<a href="../api_cxx/db_get_type.html">Db::get_type</a>,
+<a href="../api_cxx/db_join.html">Db::join</a>,
+<a href="../api_cxx/db_key_range.html">Db::key_range</a>,
+<a href="../api_cxx/db_open.html">Db::open</a>,
+<a href="../api_cxx/db_put.html">Db::put</a>,
+<a href="../api_cxx/db_remove.html">Db::remove</a>,
+<a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>,
+<a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a>,
+<a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a>,
+<a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a>,
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a>,
+<a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>,
+<a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>,
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a>,
+<a href="../api_cxx/db_set_flags.html">Db::set_flags</a>,
+<a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a>,
+<a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a>,
+<a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a>,
+<a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a>,
+<a href="../api_cxx/db_set_malloc.html">Db::set_malloc</a>,
+<a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a>,
+<a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a>,
+<a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a>,
+<a href="../api_cxx/db_set_realloc.html">Db::set_realloc</a>,
+<a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>,
+<a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a>,
+<a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a>,
+<a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a>,
+<a href="../api_cxx/db_stat.html">Db::stat</a>,
+<a href="../api_cxx/db_sync.html">Db::sync</a>,
+<a href="../api_cxx/db_upgrade.html">Db::upgrade</a>
+and
+<a href="../api_cxx/db_verify.html">Db::verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_set_bt_minkey.html b/bdb/docs/api_cxx/db_set_bt_minkey.html
new file mode 100644
index 00000000000..c0c5aced12b
--- /dev/null
+++ b/bdb/docs/api_cxx/db_set_bt_minkey.html
@@ -0,0 +1,94 @@
+<!--$Id: db_set_bt_minkey.so,v 10.14 2000/05/01 21:57:43 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_bt_minkey</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::set_bt_minkey</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::set_bt_minkey(u_int32_t bt_minkey);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the minimum number of keys that will be stored on any single
+Btree page.
+<p>This value is used to determine which keys will be stored on overflow
+pages, i.e. if a key or data item is larger than the underlying database
+page size divided by the <b>bt_minkey</b> value, it will be stored on
+overflow pages instead of within the page itself. The <b>bt_minkey</b>
+value specified must be at least 2; if <b>bt_minkey</b> is not explicitly
+set, a value of 2 is used.
+<p>The Db::set_bt_minkey interface may only be used to configure Berkeley DB before
+the <a href="../api_cxx/db_open.html">Db::open</a> interface is called.
+<p>The Db::set_bt_minkey method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/db_open.html">Db::open</a> was called.
+</dl>
+<h3>Class</h3>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_close.html">Db::close</a>,
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>,
+<a href="../api_cxx/db_del.html">Db::del</a>,
+<a href="../api_cxx/db_err.html">Db::err</a>,
+<a href="../api_cxx/db_fd.html">Db::fd</a>,
+<a href="../api_cxx/db_get.html">Db::get</a>,
+<a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a>,
+<a href="../api_cxx/db_get_type.html">Db::get_type</a>,
+<a href="../api_cxx/db_join.html">Db::join</a>,
+<a href="../api_cxx/db_key_range.html">Db::key_range</a>,
+<a href="../api_cxx/db_open.html">Db::open</a>,
+<a href="../api_cxx/db_put.html">Db::put</a>,
+<a href="../api_cxx/db_remove.html">Db::remove</a>,
+<a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>,
+<a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a>,
+<a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a>,
+<a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a>,
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a>,
+<a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>,
+<a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>,
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a>,
+<a href="../api_cxx/db_set_flags.html">Db::set_flags</a>,
+<a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a>,
+<a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a>,
+<a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a>,
+<a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a>,
+<a href="../api_cxx/db_set_malloc.html">Db::set_malloc</a>,
+<a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a>,
+<a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a>,
+<a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a>,
+<a href="../api_cxx/db_set_realloc.html">Db::set_realloc</a>,
+<a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>,
+<a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a>,
+<a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a>,
+<a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a>,
+<a href="../api_cxx/db_stat.html">Db::stat</a>,
+<a href="../api_cxx/db_sync.html">Db::sync</a>,
+<a href="../api_cxx/db_upgrade.html">Db::upgrade</a>
+and
+<a href="../api_cxx/db_verify.html">Db::verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_set_bt_prefix.html b/bdb/docs/api_cxx/db_set_bt_prefix.html
new file mode 100644
index 00000000000..ecf9495459a
--- /dev/null
+++ b/bdb/docs/api_cxx/db_set_bt_prefix.html
@@ -0,0 +1,110 @@
+<!--$Id: db_set_bt_prefix.so,v 10.25 2000/09/08 21:35:26 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_bt_prefix</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::set_bt_prefix</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+extern "C" {
+ typedef size_t (*bt_prefix_fcn_type)(DB *, const DBT *, const DBT *);
+};
+int
+Db::set_bt_prefix(bt_prefix_fcn_type bt_prefix_fcn);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the Btree prefix function. The prefix function must return the
+number of bytes of the second key argument that would be required by
+the Btree key comparison function to determine the second key argument's
+ordering relationship with respect to the first key argument. If the
+two keys are equal, the key length should be returned. The prefix
+function must correctly handle any key values used by the application
+(possibly including zero-length keys). The <b>data</b> and
+<b>size</b> fields of the <a href="../api_cxx/dbt_class.html">Dbt</a> are the only fields that may be
+used for the purposes of this determination.
+<p>The prefix function is used to determine the amount by which keys stored
+on the Btree internal pages can be safely truncated without losing their
+uniqueness. See the <a href="../ref/am_conf/bt_prefix.html">Btree
+prefix comparison</a> section of the Reference Guide for more details about
+how this works. The usefulness of this is data dependent, but in some
+data sets can produce significantly reduced tree sizes and search times.
+<p>If no prefix function or key comparison function is specified by the
+application, a default lexical comparison function is used as the prefix
+function. If no prefix function is specified and a key comparison
+function is specified, no prefix function is used. It is an error to
+specify a prefix function without also specifying a key comparison
+function.
+<p>The Db::set_bt_prefix interface may only be used to configure Berkeley DB before
+the <a href="../api_cxx/db_open.html">Db::open</a> interface is called.
+<p>The Db::set_bt_prefix method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/db_open.html">Db::open</a> was called.
+</dl>
+<h3>Class</h3>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_close.html">Db::close</a>,
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>,
+<a href="../api_cxx/db_del.html">Db::del</a>,
+<a href="../api_cxx/db_err.html">Db::err</a>,
+<a href="../api_cxx/db_fd.html">Db::fd</a>,
+<a href="../api_cxx/db_get.html">Db::get</a>,
+<a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a>,
+<a href="../api_cxx/db_get_type.html">Db::get_type</a>,
+<a href="../api_cxx/db_join.html">Db::join</a>,
+<a href="../api_cxx/db_key_range.html">Db::key_range</a>,
+<a href="../api_cxx/db_open.html">Db::open</a>,
+<a href="../api_cxx/db_put.html">Db::put</a>,
+<a href="../api_cxx/db_remove.html">Db::remove</a>,
+<a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>,
+<a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a>,
+<a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a>,
+<a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a>,
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a>,
+<a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>,
+<a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>,
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a>,
+<a href="../api_cxx/db_set_flags.html">Db::set_flags</a>,
+<a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a>,
+<a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a>,
+<a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a>,
+<a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a>,
+<a href="../api_cxx/db_set_malloc.html">Db::set_malloc</a>,
+<a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a>,
+<a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a>,
+<a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a>,
+<a href="../api_cxx/db_set_realloc.html">Db::set_realloc</a>,
+<a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>,
+<a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a>,
+<a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a>,
+<a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a>,
+<a href="../api_cxx/db_stat.html">Db::stat</a>,
+<a href="../api_cxx/db_sync.html">Db::sync</a>,
+<a href="../api_cxx/db_upgrade.html">Db::upgrade</a>
+and
+<a href="../api_cxx/db_verify.html">Db::verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_set_cachesize.html b/bdb/docs/api_cxx/db_set_cachesize.html
new file mode 100644
index 00000000000..cc8e020cc30
--- /dev/null
+++ b/bdb/docs/api_cxx/db_set_cachesize.html
@@ -0,0 +1,108 @@
+<!--$Id: db_set_cachesize.so,v 10.17 2000/05/01 21:57:43 bostic Exp $-->
+<!--$Id: m4.cachesize,v 10.7 2000/02/11 18:54:45 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_cachesize</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::set_cachesize</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::set_cachesize(u_int32_t gbytes, u_int32_t bytes, int ncache);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the size of the database's shared memory buffer pool, i.e., the cache,
+to <b>gbytes</b> gigabytes plus <b>bytes</b>. The cache should be the
+size of the normal working data set of the application, with some small
+amount of additional memory for unusual situations. (Note, the working
+set is not the same as the number of simultaneously referenced pages, and
+should be quite a bit larger!)
+<p>The default cache size is 256KB, and may not be specified as less than
+20KB. Any cache size less than 500MB is automatically increased by 25%
+to account for buffer pool overhead, cache sizes larger than 500MB are
+used as specified. For information on tuning the Berkeley DB cache size, see
+<a href="../ref/am_conf/cachesize.html">Selecting a cache size</a>.
+<p>It is possible to specify caches to Berkeley DB that are large enough so that
+they cannot be allocated contiguously on some architectures, e.g., some
+releases of Solaris limit the amount of memory that may be allocated
+contiguously by a process. If <b>ncache</b> is 0 or 1, the cache will
+be allocated contiguously in memory. If it is greater than 1, the cache
+will be broken up into <b>ncache</b> equally sized separate pieces of
+memory.
+<p>As databases opened within Berkeley DB environments use the cache specified to
+the environment, it is an error to attempt to set a cache in a database
+created within an environment.
+<p>The Db::set_cachesize interface may only be used to configure Berkeley DB before
+the <a href="../api_cxx/db_open.html">Db::open</a> interface is called.
+<p>The Db::set_cachesize method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The specified cache size was impossibly small.
+</dl>
+<h3>Class</h3>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_close.html">Db::close</a>,
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>,
+<a href="../api_cxx/db_del.html">Db::del</a>,
+<a href="../api_cxx/db_err.html">Db::err</a>,
+<a href="../api_cxx/db_fd.html">Db::fd</a>,
+<a href="../api_cxx/db_get.html">Db::get</a>,
+<a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a>,
+<a href="../api_cxx/db_get_type.html">Db::get_type</a>,
+<a href="../api_cxx/db_join.html">Db::join</a>,
+<a href="../api_cxx/db_key_range.html">Db::key_range</a>,
+<a href="../api_cxx/db_open.html">Db::open</a>,
+<a href="../api_cxx/db_put.html">Db::put</a>,
+<a href="../api_cxx/db_remove.html">Db::remove</a>,
+<a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>,
+<a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a>,
+<a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a>,
+<a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a>,
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a>,
+<a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>,
+<a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>,
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a>,
+<a href="../api_cxx/db_set_flags.html">Db::set_flags</a>,
+<a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a>,
+<a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a>,
+<a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a>,
+<a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a>,
+<a href="../api_cxx/db_set_malloc.html">Db::set_malloc</a>,
+<a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a>,
+<a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a>,
+<a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a>,
+<a href="../api_cxx/db_set_realloc.html">Db::set_realloc</a>,
+<a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>,
+<a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a>,
+<a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a>,
+<a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a>,
+<a href="../api_cxx/db_stat.html">Db::stat</a>,
+<a href="../api_cxx/db_sync.html">Db::sync</a>,
+<a href="../api_cxx/db_upgrade.html">Db::upgrade</a>
+and
+<a href="../api_cxx/db_verify.html">Db::verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_set_dup_compare.html b/bdb/docs/api_cxx/db_set_dup_compare.html
new file mode 100644
index 00000000000..7cd09ec0cb1
--- /dev/null
+++ b/bdb/docs/api_cxx/db_set_dup_compare.html
@@ -0,0 +1,106 @@
+<!--$Id: db_set_dup_compare.so,v 10.21 2000/10/26 15:20:40 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_dup_compare</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::set_dup_compare</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+extern "C" {
+ typedef int (*dup_compare_fcn_type)(DB *, const DBT *, const DBT *);
+};
+int
+Db::set_dup_compare(dup_compare_fcn_type dup_compare_fcn);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the duplicate data item comparison function. The comparison function
+is called when it is necessary to compare a data item specified by the
+application with a data item currently stored in the tree. The first
+argument to the comparison function is the <a href="../api_cxx/dbt_class.html">Dbt</a> representing the
+application's data item, the second is the current tree's data item.
+<p>The comparison function must return an integer value less than, equal
+to, or greater than zero if the first data item argument is considered
+to be respectively less than, equal to, or greater than the second data
+item argument. In addition, the comparison function must cause the data
+items in the set to be <i>well-ordered</i>. The comparison function
+must correctly handle any data item values used by the application
+(possibly including zero-length data items). The <b>data</b> and
+<b>size</b> fields of the <a href="../api_cxx/dbt_class.html">Dbt</a> are the only fields that may be
+used for the purposes of this comparison.
+<p>If no comparison function is specified, the data items are compared
+lexically, with shorter data items collating before longer data items.
+The same duplicate data item comparison method must be used each time
+a particular Btree is opened.
+<p>The Db::set_dup_compare interface may only be used to configure Berkeley DB before
+the <a href="../api_cxx/db_open.html">Db::open</a> interface is called.
+<p>The Db::set_dup_compare method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h3>Class</h3>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_close.html">Db::close</a>,
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>,
+<a href="../api_cxx/db_del.html">Db::del</a>,
+<a href="../api_cxx/db_err.html">Db::err</a>,
+<a href="../api_cxx/db_fd.html">Db::fd</a>,
+<a href="../api_cxx/db_get.html">Db::get</a>,
+<a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a>,
+<a href="../api_cxx/db_get_type.html">Db::get_type</a>,
+<a href="../api_cxx/db_join.html">Db::join</a>,
+<a href="../api_cxx/db_key_range.html">Db::key_range</a>,
+<a href="../api_cxx/db_open.html">Db::open</a>,
+<a href="../api_cxx/db_put.html">Db::put</a>,
+<a href="../api_cxx/db_remove.html">Db::remove</a>,
+<a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>,
+<a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a>,
+<a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a>,
+<a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a>,
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a>,
+<a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>,
+<a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>,
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a>,
+<a href="../api_cxx/db_set_flags.html">Db::set_flags</a>,
+<a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a>,
+<a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a>,
+<a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a>,
+<a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a>,
+<a href="../api_cxx/db_set_malloc.html">Db::set_malloc</a>,
+<a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a>,
+<a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a>,
+<a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a>,
+<a href="../api_cxx/db_set_realloc.html">Db::set_realloc</a>,
+<a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>,
+<a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a>,
+<a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a>,
+<a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a>,
+<a href="../api_cxx/db_stat.html">Db::stat</a>,
+<a href="../api_cxx/db_sync.html">Db::sync</a>,
+<a href="../api_cxx/db_upgrade.html">Db::upgrade</a>
+and
+<a href="../api_cxx/db_verify.html">Db::verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_set_errcall.html b/bdb/docs/api_cxx/db_set_errcall.html
new file mode 100644
index 00000000000..6cc5310eb50
--- /dev/null
+++ b/bdb/docs/api_cxx/db_set_errcall.html
@@ -0,0 +1,79 @@
+<!--$Id: db_set_errcall.so,v 10.7 1999/12/20 08:52:28 bostic Exp $-->
+<!--$Id: m4.errset,v 10.8 2000/02/19 20:57:57 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_errcall</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::set_errcall</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+void Db::set_errcall(
+ void (*db_errcall_fcn)(const char *errpfx, char *msg));
+</pre></h3>
+<h1>Description</h1>
+When an error occurs in the Berkeley DB library, an exception is thrown or an
+error return value is returned by the method. In some cases,
+however, the <b>errno</b> value may be insufficient to completely
+describe the cause of the error, especially during initial application
+debugging.
+<p>The Db::set_errcall method is used to enhance the mechanism for reporting error
+messages to the application. In some cases, when an error occurs, Berkeley DB
+will call <b>db_errcall_fcn</b> with additional error information. The
+function must be defined with two arguments; the first will be the prefix
+string (as previously set by <a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a> or
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>), the second will be the error message string.
+It is up to the <b>db_errcall_fcn</b> method to display the error
+message in an appropriate manner.
+<p>Alternatively, you can use the <a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a> method to display
+the additional information via an output stream, or the <a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>
+or <a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a> methods to display the additional information via a C
+library FILE *. You should not mix these approaches.
+<p>This error logging enhancement does not slow performance or significantly
+increase application size, and may be run during normal operation as well
+as during application debugging.
+<p>For <a href="../api_cxx/db_class.html">Db</a> handles opened inside of Berkeley DB environments, calling the
+Db::set_errcall method affects the entire environment and is equivalent to calling
+the <a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a> method.
+<p>The Db::set_errcall interface may be used to configure Berkeley DB at any time
+during the life of the application.
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_close.html">DbEnv::close</a>,
+<a href="../api_cxx/env_open.html">DbEnv::open</a>,
+<a href="../api_cxx/env_remove.html">DbEnv::remove</a>,
+<a href="../api_cxx/db_err.html">DbEnv::err</a>,
+<a href="../api_cxx/env_strerror.html">DbEnv::strerror</a>,
+<a href="../api_cxx/env_version.html">DbEnv::version</a>,
+<a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a>,
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a>,
+<a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a>,
+<a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a>,
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>,
+<a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a>,
+<a href="../api_cxx/env_set_mutexlocks.html">DbEnv::set_mutexlocks</a>,
+<a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a>,
+and
+<a href="../api_cxx/env_set_verbose.html">DbEnv::set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_set_errfile.html b/bdb/docs/api_cxx/db_set_errfile.html
new file mode 100644
index 00000000000..50c6bebd1d8
--- /dev/null
+++ b/bdb/docs/api_cxx/db_set_errfile.html
@@ -0,0 +1,80 @@
+<!--$Id: db_set_errfile.so,v 10.7 1999/12/20 08:52:28 bostic Exp $-->
+<!--$Id: m4.errset,v 10.8 2000/02/19 20:57:57 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_errfile</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::set_errfile</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+void Db::set_errfile(FILE *errfile);
+</pre></h3>
+<h1>Description</h1>
+When an error occurs in the Berkeley DB library, an exception is thrown or an
+error return value is returned by the method. In some cases,
+however, the <b>errno</b> value may be insufficient to completely
+describe the cause of the error, especially during initial application
+debugging.
+<p>The Db::set_errfile method is used to enhance the mechanism for reporting error
+messages to the application by setting a C library FILE * to be used for
+displaying additional Berkeley DB error messages. In some cases, when an error
+occurs, Berkeley DB will output an additional error message to the specified
+file reference.
+<p>Alternatively, you can use the <a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a> method to display
+the additional information via an output stream, or the
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a> method to capture the additional error information in
+a way that does not use either output streams or C library FILE *'s. You
+should not mix these approaches.
+<p>The error message will consist of the prefix string and a colon
+("<b>:</b>") (if a prefix string was previously specified using
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a> or <a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>), an error string, and
+a trailing &lt;newline&gt; character.
+<p>This error logging enhancement does not slow performance or significantly
+increase application size, and may be run during normal operation as well
+as during application debugging.
+<p>For <a href="../api_cxx/db_class.html">Db</a> handles opened inside of Berkeley DB environments, calling the
+Db::set_errfile method affects the entire environment and is equivalent to calling
+the <a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a> method.
+<p>The Db::set_errfile interface may be used to configure Berkeley DB at any time
+during the life of the application.
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_close.html">DbEnv::close</a>,
+<a href="../api_cxx/env_open.html">DbEnv::open</a>,
+<a href="../api_cxx/env_remove.html">DbEnv::remove</a>,
+<a href="../api_cxx/db_err.html">DbEnv::err</a>,
+<a href="../api_cxx/env_strerror.html">DbEnv::strerror</a>,
+<a href="../api_cxx/env_version.html">DbEnv::version</a>,
+<a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a>,
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a>,
+<a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a>,
+<a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a>,
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>,
+<a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a>,
+<a href="../api_cxx/env_set_mutexlocks.html">DbEnv::set_mutexlocks</a>,
+<a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a>,
+and
+<a href="../api_cxx/env_set_verbose.html">DbEnv::set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_set_errpfx.html b/bdb/docs/api_cxx/db_set_errpfx.html
new file mode 100644
index 00000000000..0baa3ba674c
--- /dev/null
+++ b/bdb/docs/api_cxx/db_set_errpfx.html
@@ -0,0 +1,63 @@
+<!--$Id: db_set_errpfx.so,v 10.6 1999/12/20 08:52:28 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_errpfx</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::set_errpfx</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+void Db::set_errpfx(const char *errpfx);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the prefix string that appears before error messages issued by Berkeley DB.
+<p>The Db::set_errpfx method does not copy the memory referenced by the
+<b>errpfx</b> argument, rather, it maintains a reference to it. This
+allows applications to modify the error message prefix at any time,
+without repeatedly calling Db::set_errpfx, but means that the
+memory must be maintained until the handle is closed.
+<p>For <a href="../api_cxx/db_class.html">Db</a> handles opened inside of Berkeley DB environments, calling the
+Db::set_errpfx method affects the entire environment and is equivalent to calling
+the <a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a> method.
+<p>The Db::set_errpfx interface may be used to configure Berkeley DB at any time
+during the life of the application.
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_close.html">DbEnv::close</a>,
+<a href="../api_cxx/env_open.html">DbEnv::open</a>,
+<a href="../api_cxx/env_remove.html">DbEnv::remove</a>,
+<a href="../api_cxx/db_err.html">DbEnv::err</a>,
+<a href="../api_cxx/env_strerror.html">DbEnv::strerror</a>,
+<a href="../api_cxx/env_version.html">DbEnv::version</a>,
+<a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a>,
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a>,
+<a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a>,
+<a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a>,
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>,
+<a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a>,
+<a href="../api_cxx/env_set_mutexlocks.html">DbEnv::set_mutexlocks</a>,
+<a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a>,
+and
+<a href="../api_cxx/env_set_verbose.html">DbEnv::set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_set_feedback.html b/bdb/docs/api_cxx/db_set_feedback.html
new file mode 100644
index 00000000000..97a5a85b717
--- /dev/null
+++ b/bdb/docs/api_cxx/db_set_feedback.html
@@ -0,0 +1,97 @@
+<!--$Id: db_set_feedback.so,v 10.16 2000/07/09 19:11:54 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_feedback</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::set_feedback</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::set_feedback(
+ void (*db_feedback_fcn)(DB *dbp, int opcode, int pct));
+</pre></h3>
+<h1>Description</h1>
+<p>Some operations performed by the Berkeley DB library can take non-trivial
+amounts of time. The Db::set_feedback method can be used by
+applications to monitor progress within these operations.
+<p>When an operation is likely to take a long time, Berkeley DB will call the
+specified callback method. This method must be declared with
+three arguments: the first will be a reference to the enclosing database
+handle, the second a flag value, and the third the percent of the
+operation that has been completed, specified as an integer value between
+0 and 100. It is up to the callback method to display this
+information in an appropriate manner.
+<p>The <b>opcode</b> argument may take on any of the following values:
+<p><dl compact>
+<p><dt><a name="DB_UPGRADE">DB_UPGRADE</a><dd>The underlying database is being upgraded.
+<p><dt><a name="DB_VERIFY">DB_VERIFY</a><dd>The underlying database is being verified.
+</dl>
+<p>The Db::set_feedback interface may be used to configure Berkeley DB at any time
+during the life of the application.
+<p>The Db::set_feedback method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h3>Class</h3>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_close.html">Db::close</a>,
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>,
+<a href="../api_cxx/db_del.html">Db::del</a>,
+<a href="../api_cxx/db_err.html">Db::err</a>,
+<a href="../api_cxx/db_fd.html">Db::fd</a>,
+<a href="../api_cxx/db_get.html">Db::get</a>,
+<a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a>,
+<a href="../api_cxx/db_get_type.html">Db::get_type</a>,
+<a href="../api_cxx/db_join.html">Db::join</a>,
+<a href="../api_cxx/db_key_range.html">Db::key_range</a>,
+<a href="../api_cxx/db_open.html">Db::open</a>,
+<a href="../api_cxx/db_put.html">Db::put</a>,
+<a href="../api_cxx/db_remove.html">Db::remove</a>,
+<a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>,
+<a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a>,
+<a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a>,
+<a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a>,
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a>,
+<a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>,
+<a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>,
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a>,
+<a href="../api_cxx/db_set_flags.html">Db::set_flags</a>,
+<a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a>,
+<a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a>,
+<a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a>,
+<a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a>,
+<a href="../api_cxx/db_set_malloc.html">Db::set_malloc</a>,
+<a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a>,
+<a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a>,
+<a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a>,
+<a href="../api_cxx/db_set_realloc.html">Db::set_realloc</a>,
+<a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>,
+<a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a>,
+<a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a>,
+<a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a>,
+<a href="../api_cxx/db_stat.html">Db::stat</a>,
+<a href="../api_cxx/db_sync.html">Db::sync</a>,
+<a href="../api_cxx/db_upgrade.html">Db::upgrade</a>
+and
+<a href="../api_cxx/db_verify.html">Db::verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_set_flags.html b/bdb/docs/api_cxx/db_set_flags.html
new file mode 100644
index 00000000000..059810357ec
--- /dev/null
+++ b/bdb/docs/api_cxx/db_set_flags.html
@@ -0,0 +1,183 @@
+<!--$Id: db_set_flags.so,v 10.26 2000/03/17 01:53:58 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_flags</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::set_flags</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::set_flags(u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>Calling Db::set_flags is additive, there is no way to clear flags.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or more
+of the following values.
+<h3>Btree</h3>
+<p>The following flags may be specified for the Btree access method:
+<p><dl compact>
+<p><dt><a name="DB_DUP">DB_DUP</a><dd>Permit duplicate data items in the tree, i.e. insertion when the key of
+the key/data pair being inserted already exists in the tree will be
+successful. The ordering of duplicates in the tree is determined by the
+order of insertion, unless the ordering is otherwise specified by use of
+a cursor operation. It is an error to specify both DB_DUP and
+DB_RECNUM.
+<p><dt><a name="DB_DUPSORT">DB_DUPSORT</a><dd>Permit duplicate data items in the tree, i.e. insertion when the key of
+the key/data pair being inserted already exists in the tree will be
+successful. The ordering of duplicates in the tree is determined by the
+duplicate comparison function.
+If the application does not specify a comparison function using the
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a> method, a default, lexical comparison will be
+used.
+It is an error to specify both DB_DUPSORT and DB_RECNUM.
+<p><dt><a name="DB_RECNUM">DB_RECNUM</a><dd>Support retrieval from the Btree using record numbers. For more
+information, see the DB_GET_RECNO flag to the <a href="../api_cxx/db_get.html">Db::get</a> and
+<a href="../api_cxx/dbc_get.html">Dbc::get</a> methods.
+<p>Logical record numbers in Btree databases are mutable in the face of
+record insertion or deletion. See the DB_RENUMBER flag in the Recno
+access method information for further discussion.
+<p>Maintaining record counts within a Btree introduces a serious point of
+contention, namely the page locations where the record counts are stored.
+In addition, the entire tree must be locked during both insertions and
+deletions, effectively single-threading the tree for those operations.
+Specifying DB_RECNUM can result in serious performance degradation for
+some applications and data sets.
+<p>It is an error to specify both DB_DUP and DB_RECNUM.
+<p><dt><a name="DB_REVSPLITOFF">DB_REVSPLITOFF</a><dd>Turn off reverse splitting in the Btree. As pages are emptied in a
+database, the Berkeley DB Btree implementation attempts to coalesce empty pages
+into higher-level pages in order to keep the tree as small as possible
+and minimize tree search time. This can hurt performance in applications
+with cyclical data demands, that is, applications where the database grows
+and shrinks repeatedly. For example, because Berkeley DB does page-level
+locking, the maximum level of concurrency in a database of 2 pages is far
+smaller than that in a database of 100 pages, and so a database that has
+shrunk to a minimal size can cause severe deadlocking when a new cycle of
+data insertion begins.
+</dl>
+<h3>Hash</h3>
+<p>The following flags may be specified for the Hash access method:
+<p><dl compact>
+<p><dt><a name="DB_DUP">DB_DUP</a><dd>Permit duplicate data items in the tree, i.e. insertion when the key of
+the key/data pair being inserted already exists in the tree will be
+successful. The ordering of duplicates in the tree is determined by the
+order of insertion, unless the ordering is otherwise specified by use of
+a cursor operation. It is an error to specify both DB_DUP and
+DB_RECNUM.
+<p><dt><a name="DB_DUPSORT">DB_DUPSORT</a><dd>Permit duplicate data items in the tree, i.e. insertion when the key of
+the key/data pair being inserted already exists in the tree will be
+successful. The ordering of duplicates in the tree is determined by the
+duplicate comparison function.
+If the application does not specify a comparison function using the
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a> method, a default, lexical comparison will be
+used.
+It is an error to specify both DB_DUPSORT and DB_RECNUM.
+</dl>
+<h3>Queue</h3>
+<p>There are no additional flags that may be specified for the Queue access
+method.
+<h3>Recno</h3>
+<p>The following flags may be specified for the Recno access method:
+<p><dl compact>
+<p><dt><a name="DB_RENUMBER">DB_RENUMBER</a><dd>Specifying the DB_RENUMBER flag causes the logical record numbers to be
+mutable, and change as records are added to and deleted from the database.
+For example, the deletion of record number 4 causes records numbered 5
+and greater to be renumbered downward by 1. If a cursor was positioned
+to record number 4 before the deletion, it will reference the new record
+number 4, if any such record exists, after the deletion. If a cursor was
+positioned after record number 4 before the deletion, it will be shifted
+downward 1 logical record, continuing to reference the same record as it
+did before.
+<p>Using the <a href="../api_cxx/db_put.html">Db::put</a> or <a href="../api_cxx/dbc_put.html">Dbc::put</a> interfaces to create new
+records will cause the creation of multiple records if the record number
+is more than one greater than the largest record currently in the
+database. For example, creating record 28, when record 25 was previously
+the last record in the database, will create records 26 and 27 as well as
+28. Attempts to retrieve records that were created in this manner will
+result in an error return of <a href="../ref/program/errorret.html#DB_KEYEMPTY">DB_KEYEMPTY</a>.
+<p>If a created record is not at the end of the database, all records
+following the new record will be automatically renumbered upward by 1.
+For example, the creation of a new record numbered 8 causes records
+numbered 8 and greater to be renumbered upward by 1. If a cursor was
+positioned to record number 8 or greater before the insertion, it will be
+shifted upward 1 logical record, continuing to reference the same record
+as it did before.
+<p>For these reasons, concurrent access to a Recno database with the
+DB_RENUMBER flag specified may be largely meaningless, although
+it is supported.
+<p><dt><a name="DB_SNAPSHOT">DB_SNAPSHOT</a><dd>This flag specifies that any specified <b>re_source</b> file be read in
+its entirety when <a href="../api_cxx/db_open.html">Db::open</a> is called. If this flag is not
+specified, the <b>re_source</b> file may be read lazily.
+</dl>
+<p>The Db::set_flags interface may only be used to configure Berkeley DB before
+the <a href="../api_cxx/db_open.html">Db::open</a> interface is called.
+<p>The Db::set_flags method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h3>Class</h3>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_close.html">Db::close</a>,
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>,
+<a href="../api_cxx/db_del.html">Db::del</a>,
+<a href="../api_cxx/db_err.html">Db::err</a>,
+<a href="../api_cxx/db_fd.html">Db::fd</a>,
+<a href="../api_cxx/db_get.html">Db::get</a>,
+<a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a>,
+<a href="../api_cxx/db_get_type.html">Db::get_type</a>,
+<a href="../api_cxx/db_join.html">Db::join</a>,
+<a href="../api_cxx/db_key_range.html">Db::key_range</a>,
+<a href="../api_cxx/db_open.html">Db::open</a>,
+<a href="../api_cxx/db_put.html">Db::put</a>,
+<a href="../api_cxx/db_remove.html">Db::remove</a>,
+<a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>,
+<a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a>,
+<a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a>,
+<a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a>,
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a>,
+<a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>,
+<a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>,
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a>,
+<a href="../api_cxx/db_set_flags.html">Db::set_flags</a>,
+<a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a>,
+<a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a>,
+<a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a>,
+<a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a>,
+<a href="../api_cxx/db_set_malloc.html">Db::set_malloc</a>,
+<a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a>,
+<a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a>,
+<a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a>,
+<a href="../api_cxx/db_set_realloc.html">Db::set_realloc</a>,
+<a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>,
+<a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a>,
+<a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a>,
+<a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a>,
+<a href="../api_cxx/db_stat.html">Db::stat</a>,
+<a href="../api_cxx/db_sync.html">Db::sync</a>,
+<a href="../api_cxx/db_upgrade.html">Db::upgrade</a>
+and
+<a href="../api_cxx/db_verify.html">Db::verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_set_h_ffactor.html b/bdb/docs/api_cxx/db_set_h_ffactor.html
new file mode 100644
index 00000000000..fa7d4209e8e
--- /dev/null
+++ b/bdb/docs/api_cxx/db_set_h_ffactor.html
@@ -0,0 +1,95 @@
+<!--$Id: db_set_h_ffactor.so,v 10.15 2000/05/01 21:57:43 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_h_ffactor</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::set_h_ffactor</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::set_h_ffactor(u_int32_t h_ffactor);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the desired density within the hash table.
+<p>The density is an approximation of the number of keys allowed to
+accumulate in any one bucket, determining when the hash table grows or
+shrinks. If you know the average sizes of the keys and data in your
+dataset, setting the fill factor can enhance performance. A reasonable
+rule computing fill factor is to set it to:
+<p><blockquote><pre>(pagesize - 32) / (average_key_size + average_data_size + 8)</pre></blockquote>
+<p>If no value is specified, the fill factor will be selected dynamically as
+pages are filled.
+<p>The Db::set_h_ffactor interface may only be used to configure Berkeley DB before
+the <a href="../api_cxx/db_open.html">Db::open</a> interface is called.
+<p>The Db::set_h_ffactor method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/db_open.html">Db::open</a> was called.
+</dl>
+<h3>Class</h3>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_close.html">Db::close</a>,
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>,
+<a href="../api_cxx/db_del.html">Db::del</a>,
+<a href="../api_cxx/db_err.html">Db::err</a>,
+<a href="../api_cxx/db_fd.html">Db::fd</a>,
+<a href="../api_cxx/db_get.html">Db::get</a>,
+<a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a>,
+<a href="../api_cxx/db_get_type.html">Db::get_type</a>,
+<a href="../api_cxx/db_join.html">Db::join</a>,
+<a href="../api_cxx/db_key_range.html">Db::key_range</a>,
+<a href="../api_cxx/db_open.html">Db::open</a>,
+<a href="../api_cxx/db_put.html">Db::put</a>,
+<a href="../api_cxx/db_remove.html">Db::remove</a>,
+<a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>,
+<a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a>,
+<a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a>,
+<a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a>,
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a>,
+<a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>,
+<a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>,
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a>,
+<a href="../api_cxx/db_set_flags.html">Db::set_flags</a>,
+<a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a>,
+<a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a>,
+<a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a>,
+<a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a>,
+<a href="../api_cxx/db_set_malloc.html">Db::set_malloc</a>,
+<a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a>,
+<a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a>,
+<a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a>,
+<a href="../api_cxx/db_set_realloc.html">Db::set_realloc</a>,
+<a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>,
+<a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a>,
+<a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a>,
+<a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a>,
+<a href="../api_cxx/db_stat.html">Db::stat</a>,
+<a href="../api_cxx/db_sync.html">Db::sync</a>,
+<a href="../api_cxx/db_upgrade.html">Db::upgrade</a>
+and
+<a href="../api_cxx/db_verify.html">Db::verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_set_h_hash.html b/bdb/docs/api_cxx/db_set_h_hash.html
new file mode 100644
index 00000000000..71808b081c8
--- /dev/null
+++ b/bdb/docs/api_cxx/db_set_h_hash.html
@@ -0,0 +1,102 @@
+<!--$Id: db_set_h_hash.so,v 10.18 2000/07/04 18:28:27 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_h_hash</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::set_h_hash</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+extern "C" {
+ typedef u_int32_t (*h_hash_fcn_type)
+ (DB *, const void *bytes, u_int32_t length);
+};
+int
+Db::set_h_hash(h_hash_fcn_type h_hash_fcn);
+</pre></h3>
+<h1>Description</h1>
+<p>Set a user defined hash method; if no hash method is specified, a default
+hash method is used. Since no hash method performs equally well on all
+possible data, the user may find that the built-in hash method performs
+poorly with a particular data set. User specified hash functions must
+take a pointer to a byte string and a length as arguments and return a
+value of type
+<b>u_int32_t</b>.
+The hash function must handle any key values used by the application
+(possibly including zero-length keys).
+<p>If a hash method is specified, <a href="../api_cxx/db_open.html">Db::open</a> will attempt to determine
+if the hash method specified is the same as the one with which the database
+was created, and will fail if it detects that it is not.
+<p>The Db::set_h_hash interface may only be used to configure Berkeley DB before
+the <a href="../api_cxx/db_open.html">Db::open</a> interface is called.
+<p>The Db::set_h_hash method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/db_open.html">Db::open</a> was called.
+</dl>
+<h3>Class</h3>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_close.html">Db::close</a>,
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>,
+<a href="../api_cxx/db_del.html">Db::del</a>,
+<a href="../api_cxx/db_err.html">Db::err</a>,
+<a href="../api_cxx/db_fd.html">Db::fd</a>,
+<a href="../api_cxx/db_get.html">Db::get</a>,
+<a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a>,
+<a href="../api_cxx/db_get_type.html">Db::get_type</a>,
+<a href="../api_cxx/db_join.html">Db::join</a>,
+<a href="../api_cxx/db_key_range.html">Db::key_range</a>,
+<a href="../api_cxx/db_open.html">Db::open</a>,
+<a href="../api_cxx/db_put.html">Db::put</a>,
+<a href="../api_cxx/db_remove.html">Db::remove</a>,
+<a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>,
+<a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a>,
+<a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a>,
+<a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a>,
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a>,
+<a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>,
+<a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>,
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a>,
+<a href="../api_cxx/db_set_flags.html">Db::set_flags</a>,
+<a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a>,
+<a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a>,
+<a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a>,
+<a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a>,
+<a href="../api_cxx/db_set_malloc.html">Db::set_malloc</a>,
+<a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a>,
+<a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a>,
+<a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a>,
+<a href="../api_cxx/db_set_realloc.html">Db::set_realloc</a>,
+<a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>,
+<a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a>,
+<a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a>,
+<a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a>,
+<a href="../api_cxx/db_stat.html">Db::stat</a>,
+<a href="../api_cxx/db_sync.html">Db::sync</a>,
+<a href="../api_cxx/db_upgrade.html">Db::upgrade</a>
+and
+<a href="../api_cxx/db_verify.html">Db::verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_set_h_nelem.html b/bdb/docs/api_cxx/db_set_h_nelem.html
new file mode 100644
index 00000000000..55698d45737
--- /dev/null
+++ b/bdb/docs/api_cxx/db_set_h_nelem.html
@@ -0,0 +1,90 @@
+<!--$Id: db_set_h_nelem.so,v 10.15 2000/05/01 21:57:43 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_h_nelem</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::set_h_nelem</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::set_h_nelem(u_int32_t h_nelem);
+</pre></h3>
+<h1>Description</h1>
+<p>Set an estimate of the final size of the hash table.
+<p>If not set or set too low, hash tables will still expand gracefully
+as keys are entered, although a slight performance degradation may be
+noticed.
+<p>The Db::set_h_nelem interface may only be used to configure Berkeley DB before
+the <a href="../api_cxx/db_open.html">Db::open</a> interface is called.
+<p>The Db::set_h_nelem method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/db_open.html">Db::open</a> was called.
+</dl>
+<h3>Class</h3>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_close.html">Db::close</a>,
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>,
+<a href="../api_cxx/db_del.html">Db::del</a>,
+<a href="../api_cxx/db_err.html">Db::err</a>,
+<a href="../api_cxx/db_fd.html">Db::fd</a>,
+<a href="../api_cxx/db_get.html">Db::get</a>,
+<a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a>,
+<a href="../api_cxx/db_get_type.html">Db::get_type</a>,
+<a href="../api_cxx/db_join.html">Db::join</a>,
+<a href="../api_cxx/db_key_range.html">Db::key_range</a>,
+<a href="../api_cxx/db_open.html">Db::open</a>,
+<a href="../api_cxx/db_put.html">Db::put</a>,
+<a href="../api_cxx/db_remove.html">Db::remove</a>,
+<a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>,
+<a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a>,
+<a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a>,
+<a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a>,
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a>,
+<a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>,
+<a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>,
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a>,
+<a href="../api_cxx/db_set_flags.html">Db::set_flags</a>,
+<a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a>,
+<a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a>,
+<a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a>,
+<a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a>,
+<a href="../api_cxx/db_set_malloc.html">Db::set_malloc</a>,
+<a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a>,
+<a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a>,
+<a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a>,
+<a href="../api_cxx/db_set_realloc.html">Db::set_realloc</a>,
+<a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>,
+<a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a>,
+<a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a>,
+<a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a>,
+<a href="../api_cxx/db_stat.html">Db::stat</a>,
+<a href="../api_cxx/db_sync.html">Db::sync</a>,
+<a href="../api_cxx/db_upgrade.html">Db::upgrade</a>
+and
+<a href="../api_cxx/db_verify.html">Db::verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_set_lorder.html b/bdb/docs/api_cxx/db_set_lorder.html
new file mode 100644
index 00000000000..f25779a252a
--- /dev/null
+++ b/bdb/docs/api_cxx/db_set_lorder.html
@@ -0,0 +1,96 @@
+<!--$Id: db_set_lorder.so,v 10.15 2000/05/01 21:57:43 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_lorder</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::set_lorder</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::set_lorder(int lorder);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the byte order for integers in the stored database metadata. The
+number should represent the order as an integer, for example, big endian
+order is the number 4,321, and little endian order is the number 1,234.
+If <b>lorder</b> is not explicitly set, the host order of the machine
+where the Berkeley DB library was compiled is used.
+<p>The value of <b>lorder</b> is ignored except when databases are being
+created. If a database already exists, the byte order it uses is
+determined when the database is opened.
+<p><b>The access methods provide no guarantees about the byte ordering of the
+application data stored in the database, and applications are responsible
+for maintaining any necessary ordering.</b>
+<p>The Db::set_lorder interface may only be used to configure Berkeley DB before
+the <a href="../api_cxx/db_open.html">Db::open</a> interface is called.
+<p>The Db::set_lorder method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h3>Class</h3>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_close.html">Db::close</a>,
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>,
+<a href="../api_cxx/db_del.html">Db::del</a>,
+<a href="../api_cxx/db_err.html">Db::err</a>,
+<a href="../api_cxx/db_fd.html">Db::fd</a>,
+<a href="../api_cxx/db_get.html">Db::get</a>,
+<a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a>,
+<a href="../api_cxx/db_get_type.html">Db::get_type</a>,
+<a href="../api_cxx/db_join.html">Db::join</a>,
+<a href="../api_cxx/db_key_range.html">Db::key_range</a>,
+<a href="../api_cxx/db_open.html">Db::open</a>,
+<a href="../api_cxx/db_put.html">Db::put</a>,
+<a href="../api_cxx/db_remove.html">Db::remove</a>,
+<a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>,
+<a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a>,
+<a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a>,
+<a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a>,
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a>,
+<a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>,
+<a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>,
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a>,
+<a href="../api_cxx/db_set_flags.html">Db::set_flags</a>,
+<a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a>,
+<a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a>,
+<a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a>,
+<a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a>,
+<a href="../api_cxx/db_set_malloc.html">Db::set_malloc</a>,
+<a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a>,
+<a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a>,
+<a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a>,
+<a href="../api_cxx/db_set_realloc.html">Db::set_realloc</a>,
+<a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>,
+<a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a>,
+<a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a>,
+<a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a>,
+<a href="../api_cxx/db_stat.html">Db::stat</a>,
+<a href="../api_cxx/db_sync.html">Db::sync</a>,
+<a href="../api_cxx/db_upgrade.html">Db::upgrade</a>
+and
+<a href="../api_cxx/db_verify.html">Db::verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_set_malloc.html b/bdb/docs/api_cxx/db_set_malloc.html
new file mode 100644
index 00000000000..e38092fcbeb
--- /dev/null
+++ b/bdb/docs/api_cxx/db_set_malloc.html
@@ -0,0 +1,103 @@
+<!--$Id: db_set_malloc.so,v 10.18 2000/05/25 13:47:07 dda Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_malloc</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::set_malloc</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+extern "C" {
+ typedef void *(*db_malloc_fcn_type)(size_t);
+};
+int
+Db::set_malloc(db_malloc_fcn_type db_malloc);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the allocation function used by the <a href="../api_cxx/db_class.html">Db</a> methods to allocate
+memory in which to return key/data items to the application.
+<p>The <a href="../api_cxx/dbt_class.html#DB_DBT_MALLOC">DB_DBT_MALLOC</a> flag, when specified in the <a href="../api_cxx/dbt_class.html">Dbt</a> object,
+will cause the <a href="../api_cxx/db_class.html">Db</a> methods to allocate and re-allocate memory which
+then becomes the responsibility of the calling application. See <a href="../api_cxx/dbt_class.html">Dbt</a>
+for more information.
+<p>On systems where there may be multiple library versions of malloc (notably
+Windows NT), specifying the <a href="../api_cxx/dbt_class.html#DB_DBT_MALLOC">DB_DBT_MALLOC</a> flag will fail because
+the <a href="../api_cxx/db_class.html">Db</a> library will allocate memory from a different heap than
+the application will use to free it. To avoid this problem, the
+Db::set_malloc method can be used to pass Berkeley DB a reference to the
+application's allocation routine, in which case it will be used to
+allocate the memory returned when the <a href="../api_cxx/dbt_class.html#DB_DBT_MALLOC">DB_DBT_MALLOC</a> flag is set.
+<p>The method specified must match the calling conventions of the
+ANSI C X3.159-1989 (ANSI C) library routine of the same name.
+<p>The Db::set_malloc interface may only be used to configure Berkeley DB before
+the <a href="../api_cxx/db_open.html">Db::open</a> interface is called.
+<p>The Db::set_malloc method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h3>Class</h3>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_close.html">Db::close</a>,
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>,
+<a href="../api_cxx/db_del.html">Db::del</a>,
+<a href="../api_cxx/db_err.html">Db::err</a>,
+<a href="../api_cxx/db_fd.html">Db::fd</a>,
+<a href="../api_cxx/db_get.html">Db::get</a>,
+<a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a>,
+<a href="../api_cxx/db_get_type.html">Db::get_type</a>,
+<a href="../api_cxx/db_join.html">Db::join</a>,
+<a href="../api_cxx/db_key_range.html">Db::key_range</a>,
+<a href="../api_cxx/db_open.html">Db::open</a>,
+<a href="../api_cxx/db_put.html">Db::put</a>,
+<a href="../api_cxx/db_remove.html">Db::remove</a>,
+<a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>,
+<a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a>,
+<a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a>,
+<a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a>,
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a>,
+<a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>,
+<a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>,
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a>,
+<a href="../api_cxx/db_set_flags.html">Db::set_flags</a>,
+<a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a>,
+<a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a>,
+<a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a>,
+<a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a>,
+<a href="../api_cxx/db_set_malloc.html">Db::set_malloc</a>,
+<a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a>,
+<a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a>,
+<a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a>,
+<a href="../api_cxx/db_set_realloc.html">Db::set_realloc</a>,
+<a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>,
+<a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a>,
+<a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a>,
+<a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a>,
+<a href="../api_cxx/db_stat.html">Db::stat</a>,
+<a href="../api_cxx/db_sync.html">Db::sync</a>,
+<a href="../api_cxx/db_upgrade.html">Db::upgrade</a>
+and
+<a href="../api_cxx/db_verify.html">Db::verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_set_pagesize.html b/bdb/docs/api_cxx/db_set_pagesize.html
new file mode 100644
index 00000000000..114f0578aa7
--- /dev/null
+++ b/bdb/docs/api_cxx/db_set_pagesize.html
@@ -0,0 +1,92 @@
+<!--$Id: db_set_pagesize.so,v 10.16 2000/05/01 21:57:43 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_pagesize</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::set_pagesize</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::set_pagesize(u_int32_t pagesize);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the size of the pages used to hold items in the database, in bytes.
+The minimum page size is 512 bytes and the maximum page size is 64K bytes.
+If the page size is not explicitly set, one is selected based on the
+underlying filesystem I/O block size. The automatically selected size
+has a lower limit of 512 bytes and an upper limit of 16K bytes.
+<p>For information on tuning the Berkeley DB page size, see
+<a href="../ref/am_conf/pagesize.html">Selecting a page size</a>.
+<p>The Db::set_pagesize interface may only be used to configure Berkeley DB before
+the <a href="../api_cxx/db_open.html">Db::open</a> interface is called.
+<p>The Db::set_pagesize method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h3>Class</h3>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_close.html">Db::close</a>,
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>,
+<a href="../api_cxx/db_del.html">Db::del</a>,
+<a href="../api_cxx/db_err.html">Db::err</a>,
+<a href="../api_cxx/db_fd.html">Db::fd</a>,
+<a href="../api_cxx/db_get.html">Db::get</a>,
+<a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a>,
+<a href="../api_cxx/db_get_type.html">Db::get_type</a>,
+<a href="../api_cxx/db_join.html">Db::join</a>,
+<a href="../api_cxx/db_key_range.html">Db::key_range</a>,
+<a href="../api_cxx/db_open.html">Db::open</a>,
+<a href="../api_cxx/db_put.html">Db::put</a>,
+<a href="../api_cxx/db_remove.html">Db::remove</a>,
+<a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>,
+<a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a>,
+<a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a>,
+<a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a>,
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a>,
+<a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>,
+<a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>,
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a>,
+<a href="../api_cxx/db_set_flags.html">Db::set_flags</a>,
+<a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a>,
+<a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a>,
+<a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a>,
+<a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a>,
+<a href="../api_cxx/db_set_malloc.html">Db::set_malloc</a>,
+<a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a>,
+<a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a>,
+<a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a>,
+<a href="../api_cxx/db_set_realloc.html">Db::set_realloc</a>,
+<a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>,
+<a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a>,
+<a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a>,
+<a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a>,
+<a href="../api_cxx/db_stat.html">Db::stat</a>,
+<a href="../api_cxx/db_sync.html">Db::sync</a>,
+<a href="../api_cxx/db_upgrade.html">Db::upgrade</a>
+and
+<a href="../api_cxx/db_verify.html">Db::verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_set_paniccall.html b/bdb/docs/api_cxx/db_set_paniccall.html
new file mode 100644
index 00000000000..7cd08de4a53
--- /dev/null
+++ b/bdb/docs/api_cxx/db_set_paniccall.html
@@ -0,0 +1,76 @@
+<!--$Id: db_set_paniccall.so,v 10.11 2000/07/09 19:12:16 bostic Exp $-->
+<!--$Id: m4.errset,v 10.8 2000/02/19 20:57:57 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_paniccall</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::set_paniccall</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::set_paniccall(
+ void (*db_paniccall_fcn)(DbEnv *dbenv, int errval));
+</pre></h3>
+<h1>Description</h1>
+<p>Errors can occur in the Berkeley DB library where the only solution is to shut
+down the application and run recovery. (For example, if Berkeley DB is unable
+to write log records to disk because there is insufficient disk space.)
+In these cases, when the C++ error model has been configured so that the
+individual Berkeley DB methods return error codes (see <a href="../api_cxx/except_class.html">DbException</a> for
+more information), the value <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> is returned by Berkeley DB
+methods.
+<p>In these cases, it is also often simpler to shut down the application when
+such errors occur rather than attempting to gracefully return up the stack.
+The Db::set_paniccall method is used to specify a method to be called when
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> is about to be returned from a Berkeley DB method. When
+called, the <b>dbenv</b> argument will be a reference to the current
+environment, and the <b>errval</b> argument is the error value that would
+have been returned to the calling method.
+<p>For <a href="../api_cxx/db_class.html">Db</a> handles opened inside of Berkeley DB environments, calling the
+Db::set_paniccall method affects the entire environment and is equivalent to calling
+the <a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a> method.
+<p>The Db::set_paniccall interface may be used to configure Berkeley DB at any time
+during the life of the application.
+<p>The Db::set_paniccall method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_close.html">DbEnv::close</a>,
+<a href="../api_cxx/env_open.html">DbEnv::open</a>,
+<a href="../api_cxx/env_remove.html">DbEnv::remove</a>,
+<a href="../api_cxx/db_err.html">DbEnv::err</a>,
+<a href="../api_cxx/env_strerror.html">DbEnv::strerror</a>,
+<a href="../api_cxx/env_version.html">DbEnv::version</a>,
+<a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a>,
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a>,
+<a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a>,
+<a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a>,
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>,
+<a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a>,
+<a href="../api_cxx/env_set_mutexlocks.html">DbEnv::set_mutexlocks</a>,
+<a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a>,
+and
+<a href="../api_cxx/env_set_verbose.html">DbEnv::set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_set_q_extentsize.html b/bdb/docs/api_cxx/db_set_q_extentsize.html
new file mode 100644
index 00000000000..d9c702196b7
--- /dev/null
+++ b/bdb/docs/api_cxx/db_set_q_extentsize.html
@@ -0,0 +1,92 @@
+<!--$Id: db_set_q_extentsize.so,v 1.3 2000/11/21 19:25:45 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_q_extentsize</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::set_q_extentsize</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::set_q_extentsize(u_int32_t extentsize);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the size of the extents used to hold pages in a Queue database,
+specified as a number of pages. Each extent is created as a separate
+physical file. If no extent size is set, the default behavior is to
+create only a single underlying database file.
+<p>For information on tuning the extent size, see
+<a href="../ref/am_conf/extentsize.html">Selecting a extent size</a>.
+<p>The Db::set_q_extentsize interface may only be used to configure Berkeley DB before
+the <a href="../api_cxx/db_open.html">Db::open</a> interface is called.
+<p>The Db::set_q_extentsize method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/db_open.html">Db::open</a> was called.
+</dl>
+<h3>Class</h3>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_close.html">Db::close</a>,
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>,
+<a href="../api_cxx/db_del.html">Db::del</a>,
+<a href="../api_cxx/db_err.html">Db::err</a>,
+<a href="../api_cxx/db_fd.html">Db::fd</a>,
+<a href="../api_cxx/db_get.html">Db::get</a>,
+<a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a>,
+<a href="../api_cxx/db_get_type.html">Db::get_type</a>,
+<a href="../api_cxx/db_join.html">Db::join</a>,
+<a href="../api_cxx/db_key_range.html">Db::key_range</a>,
+<a href="../api_cxx/db_open.html">Db::open</a>,
+<a href="../api_cxx/db_put.html">Db::put</a>,
+<a href="../api_cxx/db_remove.html">Db::remove</a>,
+<a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>,
+<a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a>,
+<a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a>,
+<a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a>,
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a>,
+<a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>,
+<a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>,
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a>,
+<a href="../api_cxx/db_set_flags.html">Db::set_flags</a>,
+<a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a>,
+<a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a>,
+<a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a>,
+<a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a>,
+<a href="../api_cxx/db_set_malloc.html">Db::set_malloc</a>,
+<a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a>,
+<a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a>,
+<a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a>,
+<a href="../api_cxx/db_set_realloc.html">Db::set_realloc</a>,
+<a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>,
+<a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a>,
+<a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a>,
+<a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a>,
+<a href="../api_cxx/db_stat.html">Db::stat</a>,
+<a href="../api_cxx/db_sync.html">Db::sync</a>,
+<a href="../api_cxx/db_upgrade.html">Db::upgrade</a>
+and
+<a href="../api_cxx/db_verify.html">Db::verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_set_re_delim.html b/bdb/docs/api_cxx/db_set_re_delim.html
new file mode 100644
index 00000000000..c88d6e89e06
--- /dev/null
+++ b/bdb/docs/api_cxx/db_set_re_delim.html
@@ -0,0 +1,92 @@
+<!--$Id: db_set_re_delim.so,v 10.17 2000/05/01 21:57:43 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_re_delim</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::set_re_delim</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::set_re_delim(int re_delim);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the delimiting byte used to mark the end of a record in the backing
+source file for the Recno access method.
+<p>This byte is used for variable length records, if the <b>re_source</b>
+file is specified. If the <b>re_source</b> file is specified and no
+delimiting byte was specified, &lt;newline&gt; characters (i.e.
+ASCII 0x0a) are interpreted as end-of-record markers.
+<p>The Db::set_re_delim interface may only be used to configure Berkeley DB before
+the <a href="../api_cxx/db_open.html">Db::open</a> interface is called.
+<p>The Db::set_re_delim method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/db_open.html">Db::open</a> was called.
+</dl>
+<h3>Class</h3>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_close.html">Db::close</a>,
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>,
+<a href="../api_cxx/db_del.html">Db::del</a>,
+<a href="../api_cxx/db_err.html">Db::err</a>,
+<a href="../api_cxx/db_fd.html">Db::fd</a>,
+<a href="../api_cxx/db_get.html">Db::get</a>,
+<a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a>,
+<a href="../api_cxx/db_get_type.html">Db::get_type</a>,
+<a href="../api_cxx/db_join.html">Db::join</a>,
+<a href="../api_cxx/db_key_range.html">Db::key_range</a>,
+<a href="../api_cxx/db_open.html">Db::open</a>,
+<a href="../api_cxx/db_put.html">Db::put</a>,
+<a href="../api_cxx/db_remove.html">Db::remove</a>,
+<a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>,
+<a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a>,
+<a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a>,
+<a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a>,
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a>,
+<a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>,
+<a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>,
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a>,
+<a href="../api_cxx/db_set_flags.html">Db::set_flags</a>,
+<a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a>,
+<a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a>,
+<a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a>,
+<a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a>,
+<a href="../api_cxx/db_set_malloc.html">Db::set_malloc</a>,
+<a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a>,
+<a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a>,
+<a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a>,
+<a href="../api_cxx/db_set_realloc.html">Db::set_realloc</a>,
+<a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>,
+<a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a>,
+<a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a>,
+<a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a>,
+<a href="../api_cxx/db_stat.html">Db::stat</a>,
+<a href="../api_cxx/db_sync.html">Db::sync</a>,
+<a href="../api_cxx/db_upgrade.html">Db::upgrade</a>
+and
+<a href="../api_cxx/db_verify.html">Db::verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_set_re_len.html b/bdb/docs/api_cxx/db_set_re_len.html
new file mode 100644
index 00000000000..7432ced166d
--- /dev/null
+++ b/bdb/docs/api_cxx/db_set_re_len.html
@@ -0,0 +1,96 @@
+<!--$Id: db_set_re_len.so,v 10.17 2000/05/01 21:57:43 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_re_len</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::set_re_len</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::set_re_len(u_int32_t re_len);
+</pre></h3>
+<h1>Description</h1>
+<p>For the Queue access method, specify that the records are of length
+<b>re_len</b>.
+<p>For the Recno access method, specify that the records are fixed-length,
+not byte delimited, and are of length <b>re_len</b>.
+<p>Any records added to the database that are less than <b>re_len</b> bytes
+long are automatically padded (see <a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a> for more
+information).
+<p>Any attempt to insert records into the database that are greater than
+<b>re_len</b> bytes long will cause the call to fail immediately and
+return an error.
+<p>The Db::set_re_len interface may only be used to configure Berkeley DB before
+the <a href="../api_cxx/db_open.html">Db::open</a> interface is called.
+<p>The Db::set_re_len method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/db_open.html">Db::open</a> was called.
+</dl>
+<h3>Class</h3>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_close.html">Db::close</a>,
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>,
+<a href="../api_cxx/db_del.html">Db::del</a>,
+<a href="../api_cxx/db_err.html">Db::err</a>,
+<a href="../api_cxx/db_fd.html">Db::fd</a>,
+<a href="../api_cxx/db_get.html">Db::get</a>,
+<a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a>,
+<a href="../api_cxx/db_get_type.html">Db::get_type</a>,
+<a href="../api_cxx/db_join.html">Db::join</a>,
+<a href="../api_cxx/db_key_range.html">Db::key_range</a>,
+<a href="../api_cxx/db_open.html">Db::open</a>,
+<a href="../api_cxx/db_put.html">Db::put</a>,
+<a href="../api_cxx/db_remove.html">Db::remove</a>,
+<a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>,
+<a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a>,
+<a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a>,
+<a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a>,
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a>,
+<a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>,
+<a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>,
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a>,
+<a href="../api_cxx/db_set_flags.html">Db::set_flags</a>,
+<a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a>,
+<a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a>,
+<a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a>,
+<a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a>,
+<a href="../api_cxx/db_set_malloc.html">Db::set_malloc</a>,
+<a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a>,
+<a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a>,
+<a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a>,
+<a href="../api_cxx/db_set_realloc.html">Db::set_realloc</a>,
+<a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>,
+<a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a>,
+<a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a>,
+<a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a>,
+<a href="../api_cxx/db_stat.html">Db::stat</a>,
+<a href="../api_cxx/db_sync.html">Db::sync</a>,
+<a href="../api_cxx/db_upgrade.html">Db::upgrade</a>
+and
+<a href="../api_cxx/db_verify.html">Db::verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_set_re_pad.html b/bdb/docs/api_cxx/db_set_re_pad.html
new file mode 100644
index 00000000000..5b9453d0db2
--- /dev/null
+++ b/bdb/docs/api_cxx/db_set_re_pad.html
@@ -0,0 +1,90 @@
+<!--$Id: db_set_re_pad.so,v 10.16 2000/05/01 21:57:43 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_re_pad</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::set_re_pad</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::set_re_pad(int re_pad);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the padding character for short, fixed-length records for the Queue
+and Recno access methods.
+<p>If no pad character is specified, &lt;space&gt; characters (i.e.,
+ASCII 0x20) are used for padding.
+<p>The Db::set_re_pad interface may only be used to configure Berkeley DB before
+the <a href="../api_cxx/db_open.html">Db::open</a> interface is called.
+<p>The Db::set_re_pad method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/db_open.html">Db::open</a> was called.
+</dl>
+<h3>Class</h3>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_close.html">Db::close</a>,
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>,
+<a href="../api_cxx/db_del.html">Db::del</a>,
+<a href="../api_cxx/db_err.html">Db::err</a>,
+<a href="../api_cxx/db_fd.html">Db::fd</a>,
+<a href="../api_cxx/db_get.html">Db::get</a>,
+<a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a>,
+<a href="../api_cxx/db_get_type.html">Db::get_type</a>,
+<a href="../api_cxx/db_join.html">Db::join</a>,
+<a href="../api_cxx/db_key_range.html">Db::key_range</a>,
+<a href="../api_cxx/db_open.html">Db::open</a>,
+<a href="../api_cxx/db_put.html">Db::put</a>,
+<a href="../api_cxx/db_remove.html">Db::remove</a>,
+<a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>,
+<a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a>,
+<a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a>,
+<a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a>,
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a>,
+<a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>,
+<a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>,
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a>,
+<a href="../api_cxx/db_set_flags.html">Db::set_flags</a>,
+<a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a>,
+<a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a>,
+<a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a>,
+<a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a>,
+<a href="../api_cxx/db_set_malloc.html">Db::set_malloc</a>,
+<a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a>,
+<a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a>,
+<a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a>,
+<a href="../api_cxx/db_set_realloc.html">Db::set_realloc</a>,
+<a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>,
+<a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a>,
+<a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a>,
+<a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a>,
+<a href="../api_cxx/db_stat.html">Db::stat</a>,
+<a href="../api_cxx/db_sync.html">Db::sync</a>,
+<a href="../api_cxx/db_upgrade.html">Db::upgrade</a>
+and
+<a href="../api_cxx/db_verify.html">Db::verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_set_re_source.html b/bdb/docs/api_cxx/db_set_re_source.html
new file mode 100644
index 00000000000..ea51dde6202
--- /dev/null
+++ b/bdb/docs/api_cxx/db_set_re_source.html
@@ -0,0 +1,132 @@
+<!--$Id: db_set_re_source.so,v 10.17 2000/05/01 21:57:43 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_re_source</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::set_re_source</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::set_re_source(char *re_source);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the underlying source file for the Recno access method. The purpose
+of the <b>re_source</b> value is to provide fast access and modification
+to databases that are normally stored as flat text files.
+<p>If the <b>re_source</b> field is set, it specifies an underlying flat
+text database file that is read to initialize a transient record number
+index. In the case of variable length records, the records are separated
+as specified by <a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>. For example, standard UNIX
+byte stream files can be interpreted as a sequence of variable length
+records separated by &lt;newline&gt; characters.
+<p>In addition, when cached data would normally be written back to the
+underlying database file (e.g., the <a href="../api_cxx/db_close.html">Db::close</a> or <a href="../api_cxx/db_sync.html">Db::sync</a>
+methods are called), the in-memory copy of the database will be written
+back to the <b>re_source</b> file.
+<p>By default, the backing source file is read lazily, i.e., records are not
+read from the file until they are requested by the application.
+<b>If multiple processes (not threads) are accessing a Recno database
+concurrently and either inserting or deleting records, the backing source
+file must be read in its entirety before more than a single process
+accesses the database, and only that process should specify the backing
+source file as part of the <a href="../api_cxx/db_open.html">Db::open</a> call. See the <a href="../api_cxx/db_set_flags.html#DB_SNAPSHOT">DB_SNAPSHOT</a>
+flag for more information.</b>
+<p><b>Reading and writing the backing source file specified by <b>re_source</b>
+cannot be transactionally protected because it involves filesystem
+operations that are not part of the Db transaction methodology.</b>
+For this reason, if a temporary database is used to hold the records,
+i.e., a NULL was specified as the <b>file</b> argument to <a href="../api_cxx/db_open.html">Db::open</a>,
+it is possible to lose the contents of the <b>re_source</b> file, e.g.,
+if the system crashes at the right instant.
+If a file is used to hold the database, i.e., a file name was specified
+as the <b>file</b> argument to <a href="../api_cxx/db_open.html">Db::open</a>, normal database
+recovery on that file can be used to prevent information loss,
+although it is still possible that the contents of <b>re_source</b>
+will be lost if the system crashes.
+<p>The <b>re_source</b> file must already exist (but may be zero-length) when
+<a href="../api_cxx/db_open.html">Db::open</a> is called.
+<p>It is not an error to specify a read-only <b>re_source</b> file when
+creating a database, nor is it an error to modify the resulting database.
+However, any attempt to write the changes to the backing source file using
+either the <a href="../api_cxx/db_sync.html">Db::sync</a> or <a href="../api_cxx/db_close.html">Db::close</a> methods will fail, of course.
+Specify the <a href="../api_cxx/db_close.html#DB_NOSYNC">DB_NOSYNC</a> flag to the <a href="../api_cxx/db_close.html">Db::close</a> method to stop it
+from attempting to write the changes to the backing file, instead, they
+will be silently discarded.
+<p>For all of the above reasons, the <b>re_source</b> field is generally
+used to specify databases that are read-only for <a href="../api_cxx/db_class.html">Db</a> applications,
+and that are either generated on the fly by software tools, or modified
+using a different mechanism, e.g., a text editor.
+<p>The Db::set_re_source interface may only be used to configure Berkeley DB before
+the <a href="../api_cxx/db_open.html">Db::open</a> interface is called.
+<p>The Db::set_re_source method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/db_open.html">Db::open</a> was called.
+</dl>
+<h3>Class</h3>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_close.html">Db::close</a>,
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>,
+<a href="../api_cxx/db_del.html">Db::del</a>,
+<a href="../api_cxx/db_err.html">Db::err</a>,
+<a href="../api_cxx/db_fd.html">Db::fd</a>,
+<a href="../api_cxx/db_get.html">Db::get</a>,
+<a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a>,
+<a href="../api_cxx/db_get_type.html">Db::get_type</a>,
+<a href="../api_cxx/db_join.html">Db::join</a>,
+<a href="../api_cxx/db_key_range.html">Db::key_range</a>,
+<a href="../api_cxx/db_open.html">Db::open</a>,
+<a href="../api_cxx/db_put.html">Db::put</a>,
+<a href="../api_cxx/db_remove.html">Db::remove</a>,
+<a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>,
+<a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a>,
+<a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a>,
+<a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a>,
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a>,
+<a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>,
+<a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>,
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a>,
+<a href="../api_cxx/db_set_flags.html">Db::set_flags</a>,
+<a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a>,
+<a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a>,
+<a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a>,
+<a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a>,
+<a href="../api_cxx/db_set_malloc.html">Db::set_malloc</a>,
+<a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a>,
+<a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a>,
+<a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a>,
+<a href="../api_cxx/db_set_realloc.html">Db::set_realloc</a>,
+<a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>,
+<a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a>,
+<a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a>,
+<a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a>,
+<a href="../api_cxx/db_stat.html">Db::stat</a>,
+<a href="../api_cxx/db_sync.html">Db::sync</a>,
+<a href="../api_cxx/db_upgrade.html">Db::upgrade</a>
+and
+<a href="../api_cxx/db_verify.html">Db::verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_set_realloc.html b/bdb/docs/api_cxx/db_set_realloc.html
new file mode 100644
index 00000000000..e163a889c33
--- /dev/null
+++ b/bdb/docs/api_cxx/db_set_realloc.html
@@ -0,0 +1,103 @@
+<!--$Id: db_set_realloc.so,v 10.8 2000/05/25 13:47:07 dda Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_realloc</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::set_realloc</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+extern "C" {
+ typedef void *(*db_realloc_fcn_type)(void *, size_t);
+};
+int
+Db::set_realloc(db_realloc_fcn_type db_realloc_fcn);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the realloc function used by the <a href="../api_cxx/db_class.html">Db</a> methods to allocate memory
+in which to return key/data items to the application.
+<p>The <a href="../api_cxx/dbt_class.html#DB_DBT_REALLOC">DB_DBT_REALLOC</a> flag, when specified in the <a href="../api_cxx/dbt_class.html">Dbt</a> object,
+will cause the <a href="../api_cxx/db_class.html">Db</a> methods to allocate and re-allocate memory which
+then becomes the responsibility of the calling application. See <a href="../api_cxx/dbt_class.html">Dbt</a>
+for more information.
+<p>On systems where there may be multiple library versions of realloc (notably
+Windows NT), specifying the <a href="../api_cxx/dbt_class.html#DB_DBT_REALLOC">DB_DBT_REALLOC</a> flag will fail because
+the <a href="../api_cxx/db_class.html">Db</a> library will allocate memory from a different heap than
+the application will use to free it. To avoid this problem, the
+Db::set_realloc method can be used to pass Berkeley DB a reference to the
+application's allocation routine, in which case it will be used to
+allocate the memory returned when the <a href="../api_cxx/dbt_class.html#DB_DBT_REALLOC">DB_DBT_REALLOC</a> flag is set.
+<p>The method specified must match the calling conventions of the
+ANSI C X3.159-1989 (ANSI C) library routine of the same name.
+<p>The Db::set_realloc interface may only be used to configure Berkeley DB before
+the <a href="../api_cxx/db_open.html">Db::open</a> interface is called.
+<p>The Db::set_realloc method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h3>Class</h3>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_close.html">Db::close</a>,
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>,
+<a href="../api_cxx/db_del.html">Db::del</a>,
+<a href="../api_cxx/db_err.html">Db::err</a>,
+<a href="../api_cxx/db_fd.html">Db::fd</a>,
+<a href="../api_cxx/db_get.html">Db::get</a>,
+<a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a>,
+<a href="../api_cxx/db_get_type.html">Db::get_type</a>,
+<a href="../api_cxx/db_join.html">Db::join</a>,
+<a href="../api_cxx/db_key_range.html">Db::key_range</a>,
+<a href="../api_cxx/db_open.html">Db::open</a>,
+<a href="../api_cxx/db_put.html">Db::put</a>,
+<a href="../api_cxx/db_remove.html">Db::remove</a>,
+<a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>,
+<a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a>,
+<a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a>,
+<a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a>,
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a>,
+<a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>,
+<a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>,
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a>,
+<a href="../api_cxx/db_set_flags.html">Db::set_flags</a>,
+<a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a>,
+<a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a>,
+<a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a>,
+<a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a>,
+<a href="../api_cxx/db_set_malloc.html">Db::set_malloc</a>,
+<a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a>,
+<a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a>,
+<a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a>,
+<a href="../api_cxx/db_set_realloc.html">Db::set_realloc</a>,
+<a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>,
+<a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a>,
+<a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a>,
+<a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a>,
+<a href="../api_cxx/db_stat.html">Db::stat</a>,
+<a href="../api_cxx/db_sync.html">Db::sync</a>,
+<a href="../api_cxx/db_upgrade.html">Db::upgrade</a>
+and
+<a href="../api_cxx/db_verify.html">Db::verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_stat.html b/bdb/docs/api_cxx/db_stat.html
new file mode 100644
index 00000000000..4245fd91704
--- /dev/null
+++ b/bdb/docs/api_cxx/db_stat.html
@@ -0,0 +1,201 @@
+<!--$Id: db_stat.so,v 10.37 2000/10/03 21:55:45 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::stat</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+extern "C" {
+ typedef void *(*db_malloc_fcn_type)(size_t);
+};
+int
+Db::stat(void *sp, db_malloc_fcn_type db_malloc, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The Db::stat method creates a statistical structure and
+copies a pointer to it into user-specified memory locations.
+Specifically, if <b>sp</b> is non-NULL, a pointer to the statistics
+for the database are copied into the memory location it references.
+<p>Statistical structures are created in allocated memory. If <b>db_malloc</b> is non-NULL, it
+is called to allocate the memory, otherwise, the library function
+<b>malloc</b>(3) is used. The function <b>db_malloc</b> must match
+the calling conventions of the <b>malloc</b>(3) library routine.
+Regardless, the caller is responsible for deallocating the returned
+memory. To deallocate returned memory, free the returned memory
+reference, references inside the returned memory do not need to be
+individually freed.
+<p>The <b>flags</b> parameter must be set to 0 or the following value:
+<p><dl compact>
+<p><dt><a name="DB_CACHED_COUNTS">DB_CACHED_COUNTS</a><dd>Return a cached count of the keys and records in a database. This flag
+makes it possible for applications to request an possibly approximate key
+and record count without incurring the performance penalty of traversing
+the entire database. The statistics information described for the access
+method <b>XX_nkeys</b> and <b>XX_ndata</b> fields below is filled in,
+but no other information is collected. If the cached information has
+never been set, the fields will be returned set to 0.
+<p><dt><a name="DB_RECORDCOUNT">DB_RECORDCOUNT</a><dd>Return a count of the records in a Btree or Recno Access Method database.
+This flag makes it possible for applications to request a record count
+without incurring the performance penalty of traversing the entire
+database. The statistics information described for the <b>bt_nkeys</b>
+field below is filled in, but no other information is collected.
+<p>This option is only available for Recno databases, or Btree databases
+where the underlying database was created with the <a href="../api_cxx/db_set_flags.html#DB_RECNUM">DB_RECNUM</a>
+flag.
+</dl>
+<p>The Db::stat method may access all of the pages in the database,
+incurring a severe performance penalty as well as possibly flushing the
+underlying buffer pool.
+<p>In the presence of multiple threads or processes accessing an active
+database, the information returned by Db::stat may be out-of-date.
+<p>If the database was not opened readonly and the DB_CACHED_COUNTS
+flag was not specified, the cached key and record numbers will be updated
+after the statistical information has been gathered.
+<p>The Db::stat method cannot be transaction protected. For this reason,
+it should be called in a thread of control that has no open cursors or
+active transactions.
+<p>The Db::stat method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h3>Hash Statistics</h3>
+<p>In the case of a Hash database,
+the statistics are stored in a structure of type DB_HASH_STAT. The
+following fields will be filled in:
+<p><dl compact>
+<p><dt>u_int32_t hash_magic;<dd>Magic number that identifies the file as a Hash file.
+<dt>u_int32_t hash_version;<dd>The version of the Hash database.
+<dt>u_int32_t hash_nkeys;<dd>The number of unique keys in the database.
+<dt>u_int32_t hash_ndata;<dd>The number of key/data pairs in the database.]
+<dt>u_int32_t hash_pagesize;<dd>The underlying Hash database page (and bucket) size.
+<dt>u_int32_t hash_nelem;<dd>The estimated size of the hash table specified at database creation time.
+<dt>u_int32_t hash_ffactor;<dd>The desired fill factor (number of items per bucket) specified at database
+creation time.
+<dt>u_int32_t hash_buckets;<dd>The number of hash buckets.
+<dt>u_int32_t hash_free;<dd>The number of pages on the free list.
+<dt>u_int32_t hash_bfree;<dd>The number of bytes free on bucket pages.
+<dt>u_int32_t hash_bigpages;<dd>The number of big key/data pages.
+<dt>u_int32_t hash_big_bfree;<dd>The number of bytes free on big item pages.
+<dt>u_int32_t hash_overflows;<dd>The number of overflow pages (overflow pages are pages that contain items
+that did not fit in the main bucket page).
+<dt>u_int32_t hash_ovfl_free;<dd>The number of bytes free on overflow pages.
+<dt>u_int32_t hash_dup;<dd>The number of duplicate pages.
+<dt>u_int32_t hash_dup_free;<dd>The number of bytes free on duplicate pages.
+</dl>
+<h3>Btree and Recno Statistics</h3>
+<p>In the case of a Btree or Recno database,
+the statistics are stored in a structure of type DB_BTREE_STAT. The
+following fields will be filled in:
+<p><dl compact>
+<p><dt>u_int32_t bt_magic;<dd>Magic number that identifies the file as a Btree database.
+<dt>u_int32_t bt_version;<dd>The version of the Btree database.
+<dt>u_int32_t bt_nkeys;<dd>For the Btree Access Method, the number of unique keys in the database.
+<p>For the Recno Access Method, the number of records in the database.
+<dt>u_int32_t bt_ndata;<dd>For the Btree Access Method, the number of key/data pairs in the database.
+<p>For the Recno Access Method, the number of records in the database. If
+the database has been configured to not re-number records during
+deletion, the number of records will only reflect undeleted records.
+<dt>u_int32_t bt_pagesize;<dd>Underlying database page size.
+<dt>u_int32_t bt_minkey;<dd>The minimum keys per page.
+<dt>u_int32_t bt_re_len;<dd>The length of fixed-length records.
+<dt>u_int32_t bt_re_pad;<dd>The padding byte value for fixed-length records.
+<dt>u_int32_t bt_levels;<dd>Number of levels in the database.
+<dt>u_int32_t bt_int_pg;<dd>Number of database internal pages.
+<dt>u_int32_t bt_leaf_pg;<dd>Number of database leaf pages.
+<dt>u_int32_t bt_dup_pg;<dd>Number of database duplicate pages.
+<dt>u_int32_t bt_over_pg;<dd>Number of database overflow pages.
+<dt>u_int32_t bt_free;<dd>Number of pages on the free list.
+<dt>u_int32_t bt_int_pgfree;<dd>Number of bytes free in database internal pages.
+<dt>u_int32_t bt_leaf_pgfree;<dd>Number of bytes free in database leaf pages.
+<dt>u_int32_t bt_dup_pgfree;<dd>Number of bytes free in database duplicate pages.
+<dt>u_int32_t bt_over_pgfree;<dd>Number of bytes free in database overflow pages.
+</dl>
+<h3>Queue Statistics</h3>
+<p>In the case of a Queue database,
+the statistics are stored in a structure of type DB_QUEUE_STAT. The
+following fields will be filled in:
+<p><dl compact>
+<p><dt>u_int32_t qs_magic;<dd>Magic number that identifies the file as a Queue file.
+<dt>u_int32_t qs_version;<dd>The version of the Queue file type.
+<dt>u_int32_t qs_nkeys;<dd>The number of records in the database.
+<dt>u_int32_t qs_ndata;<dd>The number of records in the database.
+<dt>u_int32_t qs_pagesize;<dd>Underlying database page size.
+<dt>u_int32_t qs_pages;<dd>Number of pages in the database.
+<dt>u_int32_t qs_re_len;<dd>The length of the records.
+<dt>u_int32_t qs_re_pad;<dd>The padding byte value for the records.
+<dt>u_int32_t qs_pgfree;<dd>Number of bytes free in database pages.
+<dt>u_int32_t qs_start;<dd>Start offset.
+<dt>u_int32_t qs_first_recno;<dd>First undeleted record in the database.
+<dt>u_int32_t qs_cur_recno;<dd>Last allocated record number in the database.
+</dl>
+<p>The Db::stat method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Db::stat method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::stat method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Class</h3>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_close.html">Db::close</a>,
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>,
+<a href="../api_cxx/db_del.html">Db::del</a>,
+<a href="../api_cxx/db_err.html">Db::err</a>,
+<a href="../api_cxx/db_fd.html">Db::fd</a>,
+<a href="../api_cxx/db_get.html">Db::get</a>,
+<a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a>,
+<a href="../api_cxx/db_get_type.html">Db::get_type</a>,
+<a href="../api_cxx/db_join.html">Db::join</a>,
+<a href="../api_cxx/db_key_range.html">Db::key_range</a>,
+<a href="../api_cxx/db_open.html">Db::open</a>,
+<a href="../api_cxx/db_put.html">Db::put</a>,
+<a href="../api_cxx/db_remove.html">Db::remove</a>,
+<a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>,
+<a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a>,
+<a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a>,
+<a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a>,
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a>,
+<a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>,
+<a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>,
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a>,
+<a href="../api_cxx/db_set_flags.html">Db::set_flags</a>,
+<a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a>,
+<a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a>,
+<a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a>,
+<a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a>,
+<a href="../api_cxx/db_set_malloc.html">Db::set_malloc</a>,
+<a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a>,
+<a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a>,
+<a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a>,
+<a href="../api_cxx/db_set_realloc.html">Db::set_realloc</a>,
+<a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>,
+<a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a>,
+<a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a>,
+<a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a>,
+<a href="../api_cxx/db_stat.html">Db::stat</a>,
+<a href="../api_cxx/db_sync.html">Db::sync</a>,
+<a href="../api_cxx/db_upgrade.html">Db::upgrade</a>
+and
+<a href="../api_cxx/db_verify.html">Db::verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_sync.html b/bdb/docs/api_cxx/db_sync.html
new file mode 100644
index 00000000000..170d99127f5
--- /dev/null
+++ b/bdb/docs/api_cxx/db_sync.html
@@ -0,0 +1,101 @@
+<!--$Id: db_sync.so,v 10.20 2000/09/08 15:20:28 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::sync</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::sync</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::sync(u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The Db::sync method flushes any cached information to disk.
+<p>If the database is in memory only, the Db::sync method has no effect and
+will always succeed.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>See <a href="../api_cxx/db_close.html">Db::close</a> for a discussion of Berkeley DB and cached data.
+<p>The Db::sync method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, 0 on success, and returns <a href="../api_c/memp_fsync.html#DB_INCOMPLETE">DB_INCOMPLETE</a> if the underlying database still has
+dirty pages in the cache. (The only reason to return
+<a href="../api_c/memp_fsync.html#DB_INCOMPLETE">DB_INCOMPLETE</a> is if another thread of control was writing pages
+in the underlying database file at the same time as the
+Db::sync method was being called. For this reason, a return of
+<a href="../api_c/memp_fsync.html#DB_INCOMPLETE">DB_INCOMPLETE</a> can normally be ignored, or, in cases where it is
+a possible return value, there may be no reason to call
+Db::sync.)
+<h1>Errors</h1>
+<p>The Db::sync method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The Db::sync method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::sync method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Class</h3>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_close.html">Db::close</a>,
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>,
+<a href="../api_cxx/db_del.html">Db::del</a>,
+<a href="../api_cxx/db_err.html">Db::err</a>,
+<a href="../api_cxx/db_fd.html">Db::fd</a>,
+<a href="../api_cxx/db_get.html">Db::get</a>,
+<a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a>,
+<a href="../api_cxx/db_get_type.html">Db::get_type</a>,
+<a href="../api_cxx/db_join.html">Db::join</a>,
+<a href="../api_cxx/db_key_range.html">Db::key_range</a>,
+<a href="../api_cxx/db_open.html">Db::open</a>,
+<a href="../api_cxx/db_put.html">Db::put</a>,
+<a href="../api_cxx/db_remove.html">Db::remove</a>,
+<a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>,
+<a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a>,
+<a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a>,
+<a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a>,
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a>,
+<a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>,
+<a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>,
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a>,
+<a href="../api_cxx/db_set_flags.html">Db::set_flags</a>,
+<a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a>,
+<a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a>,
+<a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a>,
+<a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a>,
+<a href="../api_cxx/db_set_malloc.html">Db::set_malloc</a>,
+<a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a>,
+<a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a>,
+<a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a>,
+<a href="../api_cxx/db_set_realloc.html">Db::set_realloc</a>,
+<a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>,
+<a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a>,
+<a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a>,
+<a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a>,
+<a href="../api_cxx/db_stat.html">Db::stat</a>,
+<a href="../api_cxx/db_sync.html">Db::sync</a>,
+<a href="../api_cxx/db_upgrade.html">Db::upgrade</a>
+and
+<a href="../api_cxx/db_verify.html">Db::verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_upgrade.html b/bdb/docs/api_cxx/db_upgrade.html
new file mode 100644
index 00000000000..8cbc3561fe3
--- /dev/null
+++ b/bdb/docs/api_cxx/db_upgrade.html
@@ -0,0 +1,135 @@
+<!--$Id: db_upgrade.so,v 10.18 2000/05/01 15:58:04 krinsky Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::upgrade</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::upgrade</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::upgrade(const char *file, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The Db::upgrade method upgrades all of the databases included in the
+file <b>file</b>, if necessary. If no upgrade is necessary,
+Db::upgrade always returns success.
+<p><b>Database upgrades are done in place and are destructive, e.g., if pages
+need to be allocated and no disk space is available, the database may be
+left corrupted. Backups should be made before databases are upgraded.
+See <a href="../ref/am/upgrade.html">Upgrading databases</a> for more
+information.</b>
+<p>Unlike all other database operations, Db::upgrade may only be done
+on a system with the same byte-order as the database.
+<p>The <b>flags</b> parameter must be set to 0 or one of the following
+values:
+<p><dl compact>
+<p><dt><a name="DB_DUPSORT">DB_DUPSORT</a><dd><b>This flag is only meaningful when upgrading databases from
+releases before the Berkeley DB 3.1 release.</b>
+<p>As part of the upgrade from the Berkeley DB 3.0 release to the 3.1 release, the
+on-disk format of duplicate data items changed. To correctly upgrade the
+format requires applications specify if duplicate data items in the
+database are sorted or not. Specifying the DB_DUPSORT flag
+informs Db::upgrade that the duplicates are sorted, otherwise they
+are assumed to be unsorted. Incorrectly specifying the value of this flag
+may lead to database corruption.
+<p>Further, because the Db::upgrade method upgrades a physical file
+(including all of the databases it contains), it is not possible to use
+Db::upgrade to upgrade files where some of the databases it
+includes have sorted duplicate data items and some of the databases it
+includes have unsorted duplicate data items. If the file does not have
+more than a single database, or the databases do not support duplicate
+data items, or all of the databases that support duplicate data items
+support the same style of duplicates (either sorted or unsorted),
+Db::upgrade will work correctly as long as the DB_DUPSORT
+flag is correctly specified. Otherwise, the file cannot be upgraded using
+Db::upgrade, and must be upgraded manually by dumping and
+re-loading the databases.
+</dl>
+<p>The Db::upgrade method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If the <b>dbenv</b> argument to <a href="../api_c/db_create.html">db_create</a> was initialized using
+<a href="../api_cxx/env_open.html">DbEnv::open</a> the environment variable <b>DB_HOME</b> may be used
+as the path of the database environment home. Specifically, Db::upgrade
+is affected by the configuration value DB_DATA_DIR.
+</dl>
+<h1>Errors</h1>
+<p>The Db::upgrade method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The database is not in the same byte-order as the system.
+</dl>
+<p><dl compact>
+<p><dt><a name="DB_OLD_VERSION">DB_OLD_VERSION</a><dd>The database cannot be upgraded by this version of the Berkeley DB software.
+</dl>
+<p>The Db::upgrade method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::upgrade method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Class</h3>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_close.html">Db::close</a>,
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>,
+<a href="../api_cxx/db_del.html">Db::del</a>,
+<a href="../api_cxx/db_err.html">Db::err</a>,
+<a href="../api_cxx/db_fd.html">Db::fd</a>,
+<a href="../api_cxx/db_get.html">Db::get</a>,
+<a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a>,
+<a href="../api_cxx/db_get_type.html">Db::get_type</a>,
+<a href="../api_cxx/db_join.html">Db::join</a>,
+<a href="../api_cxx/db_key_range.html">Db::key_range</a>,
+<a href="../api_cxx/db_open.html">Db::open</a>,
+<a href="../api_cxx/db_put.html">Db::put</a>,
+<a href="../api_cxx/db_remove.html">Db::remove</a>,
+<a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>,
+<a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a>,
+<a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a>,
+<a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a>,
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a>,
+<a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>,
+<a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>,
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a>,
+<a href="../api_cxx/db_set_flags.html">Db::set_flags</a>,
+<a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a>,
+<a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a>,
+<a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a>,
+<a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a>,
+<a href="../api_cxx/db_set_malloc.html">Db::set_malloc</a>,
+<a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a>,
+<a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a>,
+<a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a>,
+<a href="../api_cxx/db_set_realloc.html">Db::set_realloc</a>,
+<a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>,
+<a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a>,
+<a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a>,
+<a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a>,
+<a href="../api_cxx/db_stat.html">Db::stat</a>,
+<a href="../api_cxx/db_sync.html">Db::sync</a>,
+<a href="../api_cxx/db_upgrade.html">Db::upgrade</a>
+and
+<a href="../api_cxx/db_verify.html">Db::verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/db_verify.html b/bdb/docs/api_cxx/db_verify.html
new file mode 100644
index 00000000000..7e742af4c50
--- /dev/null
+++ b/bdb/docs/api_cxx/db_verify.html
@@ -0,0 +1,150 @@
+<!--$Id: db_verify.so,v 10.3 2000/04/11 15:13:51 dda Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db::verify</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db::verify</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::verify(const char *file,
+ const char *database, ostream *outfile, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The Db::verify method verifies the integrity of all databases in the
+file specified by the file argument, and optionally outputs the databases'
+key/data pairs to a file stream.
+<p>The <b>flags</b> parameter must be set to 0 or one of the following
+values:
+<p><dl compact>
+<p><dt><a name="DB_SALVAGE">DB_SALVAGE</a><dd>Write the key/data pairs from all databases in the file to the file stream
+named in
+the <b>outfile</b> argument. The output format is the same as that
+specified for the <a href="../utility/db_dump.html">db_dump</a> utility and can be used as input for
+the <a href="../utility/db_load.html">db_load</a> utility.
+<p>Because the key/data pairs are output in page order as opposed to the sort
+order used by <a href="../utility/db_dump.html">db_dump</a>, using Db::verify to dump key/data
+pairs normally produces less than optimal loads for Btree databases.
+</dl>
+<p>In addition, the following flags may be set by bitwise inclusively <b>OR</b>'ing them into the
+<b>flags</b> parameter:
+<p><dl compact>
+<p><dt><a name="DB_AGGRESSIVE">DB_AGGRESSIVE</a><dd>Output <b>all</b> the key/data pairs in the file that can be found.
+By default, Db::verify does not assume corruption. For example,
+if a key/data pair on a page is marked as deleted, it is not then written
+to the output file. When DB_AGGRESSIVE is specified, corruption
+is assumed, and any key/data pair that can be found is written. In this
+case, key/data pairs that are corrupted or have been deleted may appear
+in the output (even if the file being salvaged is in no way corrupt), and
+the output will almost certainly require editing before being loaded into
+a database.
+<p><dt><a name="DB_NOORDERCHK">DB_NOORDERCHK</a><dd>Skip the database checks for btree and duplicate sort order and for
+hashing.
+<p>The Db::verify method normally verifies that btree keys and duplicate
+items are correctly sorted and hash keys are correctly hashed. If the
+file being verified contains multiple databases using differing sorting
+or hashing algorithms, some of them must necessarily fail database
+verification as only one sort order or hash function can be specified
+before Db::verify is called. To verify files with multiple
+databases having differing sorting orders or hashing functions, first
+perform verification of the file as a whole by using the
+DB_NOORDERCHK flag, and then individually verify the sort order
+and hashing function for each database in the file using the
+DB_ORDERCHKONLY flag.
+<p><dt><a name="DB_ORDERCHKONLY">DB_ORDERCHKONLY</a><dd>Perform the database checks for btree and duplicate sort order and for
+hashing, skipped by DB_NOORDERCHK.
+<p>When this flag is specified, a <b>database</b> argument should also be
+specified, indicating the database in the physical file which is to be
+checked. This flag is only safe to use on databases that have already
+successfully been verified using Db::verify with the
+DB_NOORDERCHK flag set.
+</dl>
+<p>The database argument must be set to NULL except when the
+DB_ORDERCHKONLY flag is set.
+<p>The Db::verify method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, 0 on success, and <a href="../ref/program/errorret.html#DB_VERIFY_BAD">DB_VERIFY_BAD</a> if a database is corrupted. When the
+DB_SALVAGE flag is specified, the <a href="../ref/program/errorret.html#DB_VERIFY_BAD">DB_VERIFY_BAD</a> return
+means that all key/data pairs in the file may not have been successfully
+output.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If the <b>dbenv</b> argument to <a href="../api_c/db_create.html">db_create</a> was initialized using
+<a href="../api_cxx/env_open.html">DbEnv::open</a> the environment variable <b>DB_HOME</b> may be used
+as the path of the database environment home. Specifically, Db::verify
+is affected by the configuration value DB_DATA_DIR.
+</dl>
+<h1>Errors</h1>
+<p>The Db::verify method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The Db::verify method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::verify method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Class</h3>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_close.html">Db::close</a>,
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>,
+<a href="../api_cxx/db_del.html">Db::del</a>,
+<a href="../api_cxx/db_err.html">Db::err</a>,
+<a href="../api_cxx/db_fd.html">Db::fd</a>,
+<a href="../api_cxx/db_get.html">Db::get</a>,
+<a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a>,
+<a href="../api_cxx/db_get_type.html">Db::get_type</a>,
+<a href="../api_cxx/db_join.html">Db::join</a>,
+<a href="../api_cxx/db_key_range.html">Db::key_range</a>,
+<a href="../api_cxx/db_open.html">Db::open</a>,
+<a href="../api_cxx/db_put.html">Db::put</a>,
+<a href="../api_cxx/db_remove.html">Db::remove</a>,
+<a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>,
+<a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a>,
+<a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a>,
+<a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a>,
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a>,
+<a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>,
+<a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>,
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a>,
+<a href="../api_cxx/db_set_flags.html">Db::set_flags</a>,
+<a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a>,
+<a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a>,
+<a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a>,
+<a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a>,
+<a href="../api_cxx/db_set_malloc.html">Db::set_malloc</a>,
+<a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a>,
+<a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a>,
+<a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a>,
+<a href="../api_cxx/db_set_realloc.html">Db::set_realloc</a>,
+<a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>,
+<a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a>,
+<a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a>,
+<a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a>,
+<a href="../api_cxx/db_stat.html">Db::stat</a>,
+<a href="../api_cxx/db_sync.html">Db::sync</a>,
+<a href="../api_cxx/db_upgrade.html">Db::upgrade</a>
+and
+<a href="../api_cxx/db_verify.html">Db::verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/dbc_class.html b/bdb/docs/api_cxx/dbc_class.html
new file mode 100644
index 00000000000..ac8081d4ab3
--- /dev/null
+++ b/bdb/docs/api_cxx/dbc_class.html
@@ -0,0 +1,49 @@
+<!--$Id: dbc_class.so,v 10.12 1999/12/20 08:52:33 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Dbc</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Dbc</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+class Dbc { ... };
+</pre></h3>
+<h1>Description</h1>
+<p>This manual page describes the specific details of the Dbc class,
+which provides cursor support for the access methods in Db.
+<p>The Dbc functions are the library interface supporting sequential
+access to the records stored by the access methods of the Berkeley DB library.
+Cursors are created by calling the <a href="../api_cxx/db_cursor.html">Db::cursor</a> method which returns a
+pointer to a Dbc object.
+<h3>Class</h3>
+<a href="../api_cxx/dbc_class.html">Dbc</a>
+<h1>See Also</h1>
+<a href="../api_cxx/dbc_close.html">Dbc::close</a>,
+<a href="../api_cxx/dbc_count.html">Dbc::count</a>,
+<a href="../api_cxx/dbc_del.html">Dbc::del</a>,
+<a href="../api_cxx/dbc_dup.html">Dbc::dup</a>,
+<a href="../api_cxx/dbc_get.html">Dbc::get</a>
+and
+<a href="../api_cxx/dbc_put.html">Dbc::put</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/dbc_close.html b/bdb/docs/api_cxx/dbc_close.html
new file mode 100644
index 00000000000..881bc3f7de8
--- /dev/null
+++ b/bdb/docs/api_cxx/dbc_close.html
@@ -0,0 +1,68 @@
+<!--$Id: dbc_close.so,v 10.20 2000/03/01 21:41:29 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Dbc::close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Dbc::close</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Dbc::close(void);
+</pre></h3>
+<h1>Description</h1>
+<p>The Dbc::close method discards the cursor.
+<p>It is possible for the Dbc::close method to return
+<a href="../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a>, signaling that any enclosing transaction should
+be aborted. If the application is already intending to abort the
+transaction, this error should be ignored, and the application should
+proceed.
+<p>Once Dbc::close has been called, regardless of its return, the
+cursor handle may not be used again.
+<p>The Dbc::close method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Dbc::close method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_LOCK_DEADLOCK<dd>The operation was selected to resolve a deadlock.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The cursor was previously closed.
+</dl>
+<p>The Dbc::close method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Dbc::close method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Class</h3>
+<a href="../api_cxx/dbc_class.html">Dbc</a>
+<h1>See Also</h1>
+<a href="../api_cxx/dbc_close.html">Dbc::close</a>,
+<a href="../api_cxx/dbc_count.html">Dbc::count</a>,
+<a href="../api_cxx/dbc_del.html">Dbc::del</a>,
+<a href="../api_cxx/dbc_dup.html">Dbc::dup</a>,
+<a href="../api_cxx/dbc_get.html">Dbc::get</a>
+and
+<a href="../api_cxx/dbc_put.html">Dbc::put</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/dbc_count.html b/bdb/docs/api_cxx/dbc_count.html
new file mode 100644
index 00000000000..be8f6b8e601
--- /dev/null
+++ b/bdb/docs/api_cxx/dbc_count.html
@@ -0,0 +1,59 @@
+<!--$Id: dbc_count.so,v 10.4 2000/03/01 21:41:29 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Dbc::count</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Dbc::count</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Dbc::count(db_recno_t *countp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The Dbc::count method returns a count of the number of duplicate data
+items for the key referenced by the
+cursor into the memory location referenced by <b>countp</b>.
+If the underlying database does not support duplicate data items the call
+will still succeed and a count of 1 will be returned.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>If the <b>cursor</b> argument is not yet initialized, the Dbc::count method either returns EINVAL or throws an exception that encapsulates EINVAL.
+<p>Otherwise, the Dbc::count method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Dbc::count method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Dbc::count method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Class</h3>
+<a href="../api_cxx/dbc_class.html">Dbc</a>
+<h1>See Also</h1>
+<a href="../api_cxx/dbc_close.html">Dbc::close</a>,
+<a href="../api_cxx/dbc_count.html">Dbc::count</a>,
+<a href="../api_cxx/dbc_del.html">Dbc::del</a>,
+<a href="../api_cxx/dbc_dup.html">Dbc::dup</a>,
+<a href="../api_cxx/dbc_get.html">Dbc::get</a>
+and
+<a href="../api_cxx/dbc_put.html">Dbc::put</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/dbc_del.html b/bdb/docs/api_cxx/dbc_del.html
new file mode 100644
index 00000000000..18f6959a8b3
--- /dev/null
+++ b/bdb/docs/api_cxx/dbc_del.html
@@ -0,0 +1,72 @@
+<!--$Id: dbc_del.so,v 10.23 2000/05/22 20:51:46 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Dbc::del</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Dbc::del</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Dbc::del(u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The Dbc::del method deletes the key/data pair currently referenced by
+the cursor.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The cursor position is unchanged after a delete, and subsequent calls to
+cursor functions expecting the cursor to reference an existing key will
+fail.
+<p>If the element has already been deleted, Dbc::del will return
+<a href="../ref/program/errorret.html#DB_KEYEMPTY">DB_KEYEMPTY</a>.
+<p>If the cursor is not yet initialized, the Dbc::del method either returns EINVAL or throws an exception that encapsulates EINVAL.
+<p>Otherwise, the Dbc::del method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Dbc::del method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_LOCK_DEADLOCK<dd>The operation was selected to resolve a deadlock.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p><dl compact>
+<p><dt>EPERM <dd>Write attempted on read-only cursor when the <a href="../api_cxx/env_open.html#DB_INIT_CDB">DB_INIT_CDB</a> flag was
+specified to <a href="../api_cxx/env_open.html">DbEnv::open</a>.
+</dl>
+<p>The Dbc::del method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Dbc::del method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Class</h3>
+<a href="../api_cxx/dbc_class.html">Dbc</a>
+<h1>See Also</h1>
+<a href="../api_cxx/dbc_close.html">Dbc::close</a>,
+<a href="../api_cxx/dbc_count.html">Dbc::count</a>,
+<a href="../api_cxx/dbc_del.html">Dbc::del</a>,
+<a href="../api_cxx/dbc_dup.html">Dbc::dup</a>,
+<a href="../api_cxx/dbc_get.html">Dbc::get</a>
+and
+<a href="../api_cxx/dbc_put.html">Dbc::put</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/dbc_dup.html b/bdb/docs/api_cxx/dbc_dup.html
new file mode 100644
index 00000000000..5dec52e46a6
--- /dev/null
+++ b/bdb/docs/api_cxx/dbc_dup.html
@@ -0,0 +1,76 @@
+<!--$Id: dbc_dup.so,v 10.8 2000/03/17 01:53:58 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Dbc::dup</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Dbc::dup</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Dbc::dup(Dbc **cursorp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The Dbc::dup method creates a new cursor that uses the same transaction
+and locker ID as the original cursor. This is useful when an application
+is using locking and requires two or more cursors in the same thread of
+control.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or more
+of the following values.
+<p><dl compact>
+<p><dt><a name="DB_POSITION">DB_POSITION</a><dd>The newly created cursor is initialized to reference the same position
+in the database as the original cursor and hold the same locks. If the
+DB_POSITION flag is not specified, then the created cursor is
+uninitialized and will behave like a cursor newly created using
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>.
+</dl>
+<p>When using the Berkeley DB Concurrent Data Store product, there can be only one active write cursor
+at a time. For this reason, attempting to duplicate a cursor for which
+the <a href="../api_cxx/db_cursor.html#DB_WRITECURSOR">DB_WRITECURSOR</a> flag was specified during creation will return
+an error.
+<p>If the <b>cursor</b> argument is not yet initialized, the Dbc::dup method either returns EINVAL or throws an exception that encapsulates EINVAL.
+<p>Otherwise, the Dbc::dup method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Dbc::dup method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The <b>cursor</b> argument was created using the
+<a href="../api_cxx/db_cursor.html#DB_WRITECURSOR">DB_WRITECURSOR</a> flag in the Berkeley DB Concurrent Data Store product.
+</dl>
+<p>The Dbc::dup method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Dbc::dup method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Class</h3>
+<a href="../api_cxx/dbc_class.html">Dbc</a>
+<h1>See Also</h1>
+<a href="../api_cxx/dbc_close.html">Dbc::close</a>,
+<a href="../api_cxx/dbc_count.html">Dbc::count</a>,
+<a href="../api_cxx/dbc_del.html">Dbc::del</a>,
+<a href="../api_cxx/dbc_dup.html">Dbc::dup</a>,
+<a href="../api_cxx/dbc_get.html">Dbc::get</a>
+and
+<a href="../api_cxx/dbc_put.html">Dbc::put</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/dbc_get.html b/bdb/docs/api_cxx/dbc_get.html
new file mode 100644
index 00000000000..d42a194e514
--- /dev/null
+++ b/bdb/docs/api_cxx/dbc_get.html
@@ -0,0 +1,170 @@
+<!--$Id: dbc_get.so,v 10.46 2001/01/19 17:29:46 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Dbc::get</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Dbc::get</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Dbc::get(Dbt *key, Dbt *data, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The Dbc::get method retrieves key/data pairs from the database. The
+address and length of the key
+are returned in the object referenced by <b>key</b> (except for the case
+of the DB_SET flag where the <b>key</b> object is unchanged),
+and the address and length of
+the data are returned in the object referenced by <b>data</b>.
+<p>Modifications to the database during a sequential scan will be reflected
+in the scan, i.e. records inserted behind a cursor will not be returned
+while records inserted in front of a cursor will be returned.
+<p>In Queue and Recno databases, missing entries (i.e., entries that were
+never explicitly created or that were created and then deleted), will be
+skipped during a sequential scan.
+<p>If multiple threads or processes insert items into the same database file
+without using locking, the results are undefined.
+For more detail,
+see <a href="../ref/am/stability.html">Cursor stability</a>.
+<p>The <b>flags</b> parameter must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_CURRENT">DB_CURRENT</a><dd>Return the key/data pair currently referenced by the cursor.
+<p>If the cursor key/data pair was deleted, Dbc::get will return
+<a href="../ref/program/errorret.html#DB_KEYEMPTY">DB_KEYEMPTY</a>.
+<p>If the cursor is not yet initialized, the Dbc::get method either returns EINVAL or throws an exception that encapsulates EINVAL.
+<p><dt><a name="DB_FIRST">DB_FIRST</a>, <a name="DB_LAST">DB_LAST</a><dd>The cursor is set to reference the first (last) key/data pair of the
+database, and that pair is returned. In the presence of duplicate key
+values, the first (last) data item in the set of duplicates is returned.
+<p>If the database is a Queue or Recno database, Dbc::get using the
+DB_FIRST (DB_LAST) flags will ignore any keys that exist
+but were never explicitly created by the application or were created and
+later deleted.
+<p>If the database is empty, Dbc::get will return <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+<p><dt><a name="DB_GET_BOTH">DB_GET_BOTH</a><dd>The DB_GET_BOTH flag is identical to the DB_SET flag,
+except that both the key and the data arguments must be matched by the
+key and data item in the database.
+<p><dt><a name="DB_GET_RECNO">DB_GET_RECNO</a><dd>Return the record number associated with the cursor. The record number
+will be returned in <b>data</b> as described in <a href="../api_cxx/dbt_class.html">Dbt</a>. The
+<b>key</b> parameter is ignored.
+<p>For DB_GET_RECNO to be specified, the underlying database must be
+of type Btree and it must have been created with the <a href="../api_cxx/db_set_flags.html#DB_RECNUM">DB_RECNUM</a>
+flag.
+<p><dt><a name="DB_JOIN_ITEM">DB_JOIN_ITEM</a><dd>Do not use the data value found in all of the cursors as a lookup key for
+the primary database, but simply return it in the key parameter instead.
+The data parameter is left unchanged.
+<p>For DB_JOIN_ITEM to be specified, the underlying cursor must have
+been returned from the <a href="../api_cxx/db_join.html">Db::join</a> method.
+<p><dt><a name="DB_NEXT">DB_NEXT</a>, <a name="DB_PREV">DB_PREV</a><dd>If the cursor is not yet initialized, DB_NEXT (DB_PREV)
+is identical to DB_FIRST (DB_LAST). Otherwise, the cursor
+is moved to the next (previous) key/data pair of the database, and that
+pair is returned. In the presence of duplicate key values, the value of
+the key may not change.
+<p>If the database is a Queue or Recno database, Dbc::get using the
+DB_NEXT (DB_PREV) flag will skip any keys that exist but
+were never explicitly created by the application or were created and later
+deleted.
+<p>If the cursor is already on the last (first) record in the database,
+Dbc::get will return <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+<p><dt><a name="DB_NEXT_DUP">DB_NEXT_DUP</a><dd>If the next key/data pair of the database is a duplicate record for the
+current key/data pair, the cursor is moved to the next key/data pair of
+the database, and that pair is returned. Otherwise, Dbc::get will
+return <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+<p>If the cursor is not yet initialized, the Dbc::get method either returns EINVAL or throws an exception that encapsulates EINVAL.
+<p><dt><a name="DB_NEXT_NODUP">DB_NEXT_NODUP</a>, <a name="DB_PREV_NODUP">DB_PREV_NODUP</a><dd>If the cursor is not yet initialized, DB_NEXT_NODUP
+(DB_PREV_NODUP) is identical to DB_FIRST
+(DB_LAST). Otherwise, the cursor is moved to the next (previous)
+non-duplicate key/data pair of the database, and that pair is returned.
+<p>If the database is a Queue or Recno database, Dbc::get using the
+DB_NEXT_NODUP (DB_PREV_NODUP) flags will ignore any keys
+that exist but were never explicitly created by the application or were
+created and later deleted.
+<p>If no non-duplicate key/data pairs occur after (before) the cursor
+position in the database, Dbc::get will return <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+<p><dt><a name="DB_SET">DB_SET</a><dd>Move the cursor to the specified key/data pair of the database, and
+return the datum associated with the given key.
+<p>In the presence of duplicate key values, Dbc::get will return the
+first data item for the given key.
+<p>If the database is a Queue or Recno database and the requested key exists,
+but was never explicitly created by the application or was later deleted,
+Dbc::get will return <a href="../ref/program/errorret.html#DB_KEYEMPTY">DB_KEYEMPTY</a>.
+<p>If no matching keys are found, Dbc::get will return
+<a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+<p><dt><a name="DB_SET_RANGE">DB_SET_RANGE</a><dd>The DB_SET_RANGE flag is identical to the DB_SET flag,
+except that the key is returned as well as the data item, and, in the case
+of the Btree access method, the returned key/data pair is the smallest
+key greater than or equal to the specified key (as determined by the
+comparison method), permitting partial key matches and range
+searches.
+<p><dt><a name="DB_SET_RECNO">DB_SET_RECNO</a><dd>Move the cursor to the specific numbered record of the database, and
+return the associated key/data pair. The <b>data</b> field of the
+specified <b>key</b>
+must be a pointer to a memory location from which a <a href="../api_cxx/dbt_class.html#db_recno_t">db_recno_t</a>
+may be read, as described in <a href="../api_cxx/dbt_class.html">Dbt</a>. This memory location will be
+read to determine the record to be retrieved.
+<p>For DB_SET_RECNO to be specified, the underlying database must be
+of type Btree and it must have been created with the <a href="../api_cxx/db_set_flags.html#DB_RECNUM">DB_RECNUM</a>
+flag.
+</dl>
+<p>In addition, the following flag may be set by bitwise inclusively <b>OR</b>'ing it into the
+<b>flags</b> parameter:
+<p><dl compact>
+<p><dt><a name="DB_RMW">DB_RMW</a><dd>Acquire write locks instead of read locks when doing the retrieval.
+Setting this flag may decrease the likelihood of deadlock during a
+read-modify-write cycle by immediately acquiring the write lock during
+the read part of the cycle so that another thread of control acquiring
+a read lock for the same item, in its own read-modify-write cycle, will
+not result in deadlock.
+</dl>
+<p>Otherwise, the Dbc::get method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>If Dbc::get fails for any reason, the state of the cursor will be
+unchanged.
+<h1>Errors</h1>
+<p>The Dbc::get method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_LOCK_DEADLOCK<dd>The operation was selected to resolve a deadlock.
+</dl>
+<p><dl compact>
+<p><dt>ENOMEM<dd>There was insufficient memory to return the requested item.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The specified cursor was not currently initialized.
+</dl>
+<p>The Dbc::get method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Dbc::get method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Class</h3>
+<a href="../api_cxx/dbc_class.html">Dbc</a>
+<h1>See Also</h1>
+<a href="../api_cxx/dbc_close.html">Dbc::close</a>,
+<a href="../api_cxx/dbc_count.html">Dbc::count</a>,
+<a href="../api_cxx/dbc_del.html">Dbc::del</a>,
+<a href="../api_cxx/dbc_dup.html">Dbc::dup</a>,
+<a href="../api_cxx/dbc_get.html">Dbc::get</a>
+and
+<a href="../api_cxx/dbc_put.html">Dbc::put</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/dbc_put.html b/bdb/docs/api_cxx/dbc_put.html
new file mode 100644
index 00000000000..05a95cd36bc
--- /dev/null
+++ b/bdb/docs/api_cxx/dbc_put.html
@@ -0,0 +1,158 @@
+<!--$Id: dbc_put.so,v 10.33 2000/12/04 17:02:01 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Dbc::put</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Dbc::put</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Dbc::put(Dbt *key, Dbt *data, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The Dbc::put method stores key/data pairs into the database.
+<p>The <b>flags</b> parameter must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_AFTER">DB_AFTER</a><dd>In the case of the Btree and Hash access methods, insert the data
+element as a duplicate element of the key referenced by the cursor.
+The new element appears immediately after the current cursor position.
+It is an error to specify DB_AFTER if the underlying Btree or
+Hash database does not support duplicate data items. The <b>key</b>
+parameter is ignored.
+<p>In the case of the Recno access method, it is an error to specify
+DB_AFTER if the underlying Recno database was not created with
+the <a href="../api_cxx/db_set_flags.html#DB_RENUMBER">DB_RENUMBER</a> flag. If the <a href="../api_cxx/db_set_flags.html#DB_RENUMBER">DB_RENUMBER</a> flag was
+specified, a new key is created, all records after the inserted item
+are automatically renumbered, and the key of the new record is returned
+in the structure referenced by the parameter <b>key</b>. The initial
+value of the <b>key</b> parameter is ignored. See <a href="../api_cxx/db_open.html">Db::open</a>
+for more information.
+<p>The DB_AFTER flag may not be specified to the Queue access method.
+<p>If the current cursor record has already been deleted and the underlying
+access method is Hash, Dbc::put will return <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+If the underlying access method is Btree or Recno, the operation will
+succeed.
+<p>If the cursor is not yet initialized or a duplicate sort function has been
+specified, the Dbc::put function will return EINVAL.
+<p><dt><a name="DB_BEFORE">DB_BEFORE</a><dd>In the case of the Btree and Hash access methods, insert the data element
+as a duplicate element of the key referenced by the cursor. The new
+element appears immediately before the current cursor position. It is
+an error to specify DB_BEFORE if the underlying Btree or Hash
+database does not support duplicate data items. The <b>key</b>
+parameter is ignored.
+<p>In the case of the Recno access method, it is an error to specify
+DB_BEFORE if the underlying Recno database was not created with
+the <a href="../api_cxx/db_set_flags.html#DB_RENUMBER">DB_RENUMBER</a> flag. If the <a href="../api_cxx/db_set_flags.html#DB_RENUMBER">DB_RENUMBER</a> flag was
+specified, a new key is created, the current record and all records
+after it are automatically renumbered, and the key of the new record is
+returned in the structure referenced by the parameter <b>key</b>. The
+initial value of the <b>key</b> parameter is ignored. See
+<a href="../api_cxx/db_open.html">Db::open</a> for more information.
+<p>The DB_BEFORE flag may not be specified to the Queue access method.
+<p>If the current cursor record has already been deleted and the underlying
+access method is Hash, Dbc::put will return <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+If the underlying access method is Btree or Recno, the operation will
+succeed.
+<p>If the cursor is not yet initialized or a duplicate sort function has been
+specified, Dbc::put will return EINVAL.
+<p><dt><a name="DB_CURRENT">DB_CURRENT</a><dd>Overwrite the data of the key/data pair referenced by the cursor with the
+specified data item. The <b>key</b> parameter is ignored.
+<p>If a duplicate sort function has been specified and the data item of the
+current referenced key/data pair does not compare equally to the <b>data</b>
+parameter, Dbc::put will return EINVAL.
+<p>If the current cursor record has already been deleted and the underlying
+access method is Hash, Dbc::put will return <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+If the underlying access method is Btree, Queue or Recno, the operation
+will succeed.
+<p>If the cursor is not yet initialized, Dbc::put will return EINVAL.
+<p><dt><a name="DB_KEYFIRST">DB_KEYFIRST</a><dd>In the case of the Btree and Hash access methods, insert the specified
+key/data pair into the database.
+<p>If the underlying database supports duplicate data items, and if the
+key already exists in the database and a duplicate sort function has
+been specified, the inserted data item is added in its sorted location.
+If the key already exists in the database and no duplicate sort function
+has been specified, the inserted data item is added as the first of the
+data items for that key.
+<p>The DB_KEYFIRST flag may not be specified to the Queue or Recno
+access methods.
+<p><dt><a name="DB_KEYLAST">DB_KEYLAST</a><dd>In the case of the Btree and Hash access methods, insert the specified
+key/data pair into the database.
+<p>If the underlying database supports duplicate data items, and if the
+key already exists in the database and a duplicate sort function has
+been specified, the inserted data item is added in its sorted location.
+If the key already exists in the database, and no duplicate sort
+function has been specified, the inserted data item is added as the last
+of the data items for that key.
+<p>The DB_KEYLAST flag may not be specified to the Queue or Recno
+access methods.
+<p><dt><a name="DB_NODUPDATA">DB_NODUPDATA</a><dd>In the case of the Btree and Hash access methods, insert the specified
+key/data pair into the database unless it already exists in the database.
+If the key/data pair already appears in the database, <a href="../api_cxx/dbc_put.html#DB_KEYEXIST">DB_KEYEXIST</a>
+is returned. The DB_NODUPDATA flag may only be specified if
+the underlying database has been configured to support sorted duplicate
+data items.
+<p>The DB_NODUPDATA flag may not be specified to the Queue or Recno
+access methods.
+</dl>
+<p>Otherwise, the Dbc::put method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>If Dbc::put fails for any reason, the state of the cursor will be
+unchanged. If Dbc::put succeeds and an item is inserted into the
+database, the cursor is always positioned to reference the newly inserted
+item.
+<h1>Errors</h1>
+<p>The Dbc::put method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_LOCK_DEADLOCK<dd>The operation was selected to resolve a deadlock.
+</dl>
+<p><dl compact>
+<p><dt>EACCES<dd>An attempt was made to modify a read-only database.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The DB_BEFORE or DB_AFTER flags were specified, and the
+underlying access method is Queue.
+<p>An attempt was made to add a record to a fixed-length database that was too
+large to fit.
+</dl>
+<p><dl compact>
+<p><dt>EPERM <dd>Write attempted on read-only cursor when the <a href="../api_cxx/env_open.html#DB_INIT_CDB">DB_INIT_CDB</a> flag was
+specified to <a href="../api_cxx/env_open.html">DbEnv::open</a>.
+</dl>
+<p>The Dbc::put method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Dbc::put method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Class</h3>
+<a href="../api_cxx/dbc_class.html">Dbc</a>
+<h1>See Also</h1>
+<a href="../api_cxx/dbc_close.html">Dbc::close</a>,
+<a href="../api_cxx/dbc_count.html">Dbc::count</a>,
+<a href="../api_cxx/dbc_del.html">Dbc::del</a>,
+<a href="../api_cxx/dbc_dup.html">Dbc::dup</a>,
+<a href="../api_cxx/dbc_get.html">Dbc::get</a>
+and
+<a href="../api_cxx/dbc_put.html">Dbc::put</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/dbenv_class.html b/bdb/docs/api_cxx/dbenv_class.html
new file mode 100644
index 00000000000..1c59dcbf1a1
--- /dev/null
+++ b/bdb/docs/api_cxx/dbenv_class.html
@@ -0,0 +1,76 @@
+<!--$Id: dbenv_class.so,v 10.20 2000/07/27 13:10:54 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+class DbEnv {
+public:
+ DbEnv(u_int32 flags);
+ ~DbEnv();
+ ...
+};
+</pre></h3>
+<h1>Description</h1>
+<p>This manual page describes the specific details of the DbEnv
+class, which is the center of the Berkeley DB environment.
+<p>The following <b>flags</b> value may be specified:
+<p><dl compact>
+<p><dt><a name="DB_CLIENT">DB_CLIENT</a><dd>Create a client environment to connect to a server.
+<p>The DB_CLIENT flag indicates to the system that this environment
+is remote on a server. The use of this flag causes the environment
+methods to use functions that call a server instead of local functions.
+Prior to making any environment or database method calls, the
+application must call the <a href="../api_cxx/env_set_server.html">DbEnv::set_server</a> function to establish
+the connection to the server.
+<p><dt><a name="DB_CXX_NO_EXCEPTIONS">DB_CXX_NO_EXCEPTIONS</a><dd>The Berkeley DB C++ API supports two different error behaviors. By default,
+whenever an error occurs an exception is thrown that encapsulates the
+error information. This generally allows for cleaner logic for
+transaction processing, as a try block can surround a single
+transaction. However, if DB_CXX_NO_EXCEPTIONS is specified,
+exceptions are not thrown, instead each individual function returns an
+error code.
+</dl>
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_close.html">DbEnv::close</a>,
+<a href="../api_cxx/env_open.html">DbEnv::open</a>,
+<a href="../api_cxx/env_remove.html">DbEnv::remove</a>,
+<a href="../api_cxx/db_err.html">DbEnv::err</a>,
+<a href="../api_cxx/env_strerror.html">DbEnv::strerror</a>,
+<a href="../api_cxx/env_version.html">DbEnv::version</a>,
+<a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a>,
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a>,
+<a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a>,
+<a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a>,
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>,
+<a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a>,
+<a href="../api_cxx/env_set_mutexlocks.html">DbEnv::set_mutexlocks</a>,
+<a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a>,
+and
+<a href="../api_cxx/env_set_verbose.html">DbEnv::set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/dbt_class.html b/bdb/docs/api_cxx/dbt_class.html
new file mode 100644
index 00000000000..24d18c60e50
--- /dev/null
+++ b/bdb/docs/api_cxx/dbt_class.html
@@ -0,0 +1,230 @@
+<!--$Id: dbt_class.so,v 10.33 2000/12/18 21:05:13 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Dbt</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Dbt</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+class Dbt {
+public:
+ void *get_data() const;
+ void set_data(void *);
+<p>
+ u_int32_t get_size() const;
+ void set_size(u_int32_t);
+<p>
+ u_int32_t get_ulen() const;
+ void set_ulen(u_int32_t);
+<p>
+ u_int32_t get_dlen() const;
+ void set_dlen(u_int32_t);
+<p>
+ u_int32_t get_doff() const;
+ void set_doff(u_int32_t);
+<p>
+ u_int32_t get_flags() const;
+ void set_flags(u_int32_t);
+<p>
+ Dbt(void *data, size_t size);
+ Dbt();
+ Dbt(const Dbt &);
+ Dbt &operator = (const Dbt &);
+ ~Dbt();
+};
+</pre></h3>
+<h1>Description</h1>
+<p>This manual page describes the specific details of the Dbt class,
+used to encode keys and data items in a database.
+ <a name="3"><!--meow--></a>
+<h3>Key/Data Pairs</h3>
+<p>Storage and retrieval for the Db access methods are based on
+key/data pairs. Both key and data items are represented by Dbt
+objects. Key and data byte strings may reference strings of zero length
+up to strings of essentially unlimited length. See
+<a href="../ref/program/dbsizes.html">Database limits</a> for more
+information.
+<p>The Dbt class provides simple access to an underlying data structure,
+whose elements can be examined or changed using the <b>set_</b> or
+<b>get_</b> methods. The remainder of the manual page sometimes refers
+to these accesses using the underlying name, e.g., simply <b>ulen</b>
+instead of Dbt::get_ulen and Dbt::set_ulen.
+Dbt can be subclassed, providing a way to associate
+with it additional data, or references to other structures.
+<p>The constructors set all elements of the underlying structure to zero.
+The constructor with two arguments has the effect of setting all elements
+to zero except for the specified <b>data</b> and <b>size</b> elements.
+<p>In the case where the <b>flags</b> structure element is 0, when the
+application is providing Berkeley DB a key or data item to store into the
+database, Berkeley DB expects the <b>data</b> object to point to a byte string
+of <b>size</b> bytes. When returning a key/data item to the application,
+Berkeley DB will store into the <b>data</b> object a pointer to a byte string
+of <b>size</b> bytes, and the memory referenced by the pointer will be
+allocated and managed by Berkeley DB.
+<p>The elements of the structure underlying the Dbt class are defined as follows:
+<p><dl compact>
+<p><dt>void *<a name="data">data</a>;<dd>A pointer to a byte string.
+This element is accessed using Dbt::get_data and
+Dbt::set_data, and may be initialized using one
+of the constructors.
+<p><dt>int offset;<dd>The number of bytes offset into the <b>data</b> array to determine the
+portion of the array actually used.
+This element is accessed using Dbt::get_offset and
+Dbt::set_offset.
+<p><dt>u_int32_t size;<dd>The length of <b>data</b>, in bytes.
+This element is accessed using Dbt::get_size and
+Dbt::set_size, and may be initialized
+using the constructor with two arguments.
+<p><dt>u_int32_t ulen;<dd>The size of the user's buffer (referenced by <b>data</b>), in bytes.
+This location is not written by the Db methods.
+<p>Note that applications can determine the length of a record by setting
+the <b>ulen</b> to 0 and checking the return value found in <b>size</b>.
+See the DB_DBT_USERMEM flag for more information.
+<p>This element is accessed using
+Dbt::get_ulen and Dbt::set_ulen.
+<p><dt>u_int32_t dlen;<dd>The length of the partial record being read or written by the application,
+in bytes.
+See the DB_DBT_PARTIAL flag for more information.
+This element is accessed using
+Dbt::get_dlen, and Dbt::set_dlen.
+<p><dt>u_int32_t doff;<dd>The offset of the partial record being read or written by the application,
+in bytes.
+See the DB_DBT_PARTIAL flag for more information.
+This element is accessed using
+Dbt::get_doff and Dbt::set_doff.
+<p><dt>u_int32_t flags;<dd>This element is accessed using Dbt::get_flags and
+Dbt::set_flags.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or more
+of the following values.
+<p><dl compact>
+<p><dt><a name="DB_DBT_MALLOC">DB_DBT_MALLOC</a><dd>When this flag is set Berkeley DB will allocate memory for the returned key
+or data item
+(using <b>malloc</b>(3) or the user-specified malloc method) and
+return a pointer to it in the <b>data</b> field of the key or data
+Dbt object. As any allocated memory becomes the responsibility
+of the calling application, the caller must be able to determine if
+memory was allocated.
+<p>It is an error to specify more than one of DB_DBT_MALLOC,
+DB_DBT_REALLOC and DB_DBT_USERMEM.
+<p><dt><a name="DB_DBT_REALLOC">DB_DBT_REALLOC</a><dd>When this flag is set Berkeley DB
+will allocate memory for the returned key or data item (using
+<b>realloc</b>(3) or the user-specified realloc method) and return
+a pointer to it in the <b>data</b> field of the key or data Dbt
+object. As any allocated memory becomes the responsibility of the
+calling application, the caller must be able to determine if memory was
+allocated.
+<p>It is an error to specify more than one of DB_DBT_MALLOC,
+DB_DBT_REALLOC and DB_DBT_USERMEM.
+<p><dt><a name="DB_DBT_USERMEM">DB_DBT_USERMEM</a><dd>The <b>data</b> field of the key or data object must reference memory
+that is at least <b>ulen</b> bytes in length. If the length of the
+requested item is less than or equal to that number of bytes, the item
+is copied into the memory referenced by the <b>data</b> field.
+Otherwise, the <b>size</b> field is set to the length needed for the
+requested item, and the error ENOMEM is returned.
+<p>It is an error to specify more than one of DB_DBT_MALLOC,
+DB_DBT_REALLOC and DB_DBT_USERMEM.
+</dl>
+<p>If DB_DBT_MALLOC or DB_DBT_REALLOC is specified, Berkeley DB
+allocates a properly sized byte array to contain the data. This can be
+convenient if you know little about the nature of the data, specifically
+the size of data in the database. However, if your application makes
+repeated calls to retrieve keys or data, you may notice increased garbage
+collection due to this allocation. If you know the maximum size of data
+you are retrieving, you might decrease the memory burden and speed your
+application by allocating your own byte array and using
+DB_DBT_USERMEM. Even if you don't know the maximum size, you can
+use this option and reallocate your array whenever your retrieval API call
+returns an ENOMEM error, or throws an exception encapsulating an ENOMEM.
+<p><dl compact>
+<p><dt><a name="DB_DBT_PARTIAL">DB_DBT_PARTIAL</a><dd>Do partial retrieval or storage of an item. If the calling application
+is doing a get, the <b>dlen</b> bytes starting <b>doff</b> bytes from
+the beginning of the retrieved data record are returned as if they
+comprised the entire record. If any or all of the specified bytes do
+not exist in the record, the get is successful and the existing bytes
+or nul bytes are returned.
+<p>For example, if the data portion of a retrieved record was 100 bytes,
+and a partial retrieval was done using a Dbt having a <b>dlen</b>
+field of 20 and a <b>doff</b> field of 85, the get call would succeed,
+the <b>data</b> field would reference the last 15 bytes of the record,
+and the <b>size</b> field would be set to 15.
+<p>If the calling application is doing a put, the <b>dlen</b> bytes starting
+<b>doff</b> bytes from the beginning of the specified key's data record
+are replaced by the data specified by the <b>data</b> and <b>size</b>
+objects.
+If <b>dlen</b> is smaller than <b>size</b>, the record will grow, and if
+<b>dlen</b> is larger than <b>size</b>, the record will shrink.
+If the specified bytes do not exist, the record will be extended using nul
+bytes as necessary, and the put call will succeed.
+<p>It is an error to attempt a partial put using the <a href="../api_cxx/db_put.html">Db::put</a>
+method in a database that supports duplicate records.
+Partial puts in databases supporting duplicate records must be done
+using a <a href="../api_cxx/dbc_class.html">Dbc</a> method.
+<p>It is an error to attempt a partial put with differing <b>dlen</b> and
+<b>size</b> values in Queue or Recno databases with fixed-length records.
+<p>For example, if the data portion of a retrieved record was 100 bytes,
+and a partial put was done using a Dbt having a <b>dlen</b>
+field of 20, a <b>doff</b> field of 85, and a <b>size</b> field of 30,
+the resulting record would be 115 bytes in length, where the last 30
+bytes would be those specified by the put call.
+</dl>
+</dl>
+ <a name="4"><!--meow--></a> <a name="5"><!--meow--></a>
+<h3>Retrieved key/data permanence</h3>
+<p>When using the non-cursor Berkeley DB calls to retrieve key/data items (e.g.,
+<a href="../api_cxx/db_get.html">Db::get</a>), the memory referenced by the pointer stored into the
+Dbt is only valid until the next call to Berkeley DB using the
+Db handle returned by <a href="../api_cxx/db_open.html">Db::open</a>. (This includes
+<b>any</b> use of the returned Db handle, including by another
+thread of control within the process. For this reason, when multiple
+threads are using the returned DB handle concurrently, one of the
+DB_DBT_MALLOC, DB_DBT_REALLOC or DB_DBT_USERMEM
+flags must be specified for any non-cursor Dbt used for key or
+data retrieval.)
+<p>When using the cursor Berkeley DB calls to retrieve key/data items (e.g.,
+<a href="../api_cxx/dbc_get.html">Dbc::get</a>), the memory referenced by the pointer into the
+Dbt is only valid until the next call to Berkeley DB using the
+<a href="../api_cxx/dbc_class.html">Dbc</a> handle returned by <a href="../api_cxx/db_cursor.html">Db::cursor</a>.
+ <a name="6"><!--meow--></a>
+<h3>Data alignment</h3>
+<p>The Berkeley DB access methods provide no guarantees about key/data byte string
+alignment, and applications are responsible for arranging any necessary
+alignment. The DB_DBT_MALLOC, DB_DBT_REALLOC and
+DB_DBT_USERMEM flags may be used to store returned items in memory
+of arbitrary alignment.
+ <a name="7"><!--meow--></a>
+<h3>Logical Record Numbers</h3>
+<p>In all cases for the Queue and Recno access methods, and when calling the
+<a href="../api_cxx/db_get.html">Db::get</a> and <a href="../api_cxx/dbc_get.html">Dbc::get</a> functions with the
+<a href="../api_cxx/db_get.html#DB_SET_RECNO">DB_SET_RECNO</a> flag specified, the <b>data</b>
+field of the key must be a pointer to a memory location of type
+<b>db_recno_t</b>, as typedef'd in the #include &lt;db_cxx.h&gt; include file.
+This type is a 32-bit unsigned type,
+(which limits the number of logical records in a Queue or Recno database,
+and the maximum logical record which may be directly retrieved from a
+Btree database, to 4,294,967,296). The <b>size</b> field of the key
+should be the size of that type, i.e.,
+in the C programming language, <b>sizeof(db_recno_t)</b>.
+<p>Logical record numbers are 1-based, not 0-based, i.e., the first record
+in the database is record number 1.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/env_close.html b/bdb/docs/api_cxx/env_close.html
new file mode 100644
index 00000000000..fc1ae2573a4
--- /dev/null
+++ b/bdb/docs/api_cxx/env_close.html
@@ -0,0 +1,87 @@
+<!--$Id: env_close.so,v 10.21 2000/03/01 21:41:29 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::close</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+DbEnv::close(u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::close method closes the Berkeley DB environment, freeing any
+allocated resources and closing any underlying subsystems.
+<p>Calling DbEnv::close does not imply closing any databases that were
+opened in the environment.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>Where the environment was initialized with the <a href="../api_cxx/env_open.html#DB_INIT_LOCK">DB_INIT_LOCK</a> flag,
+calling DbEnv::close does not release any locks still held by the
+closing process, providing functionality for long-lived locks.
+Processes that wish to have all their locks
+released can do so by issuing the appropriate <a href="../api_cxx/lock_vec.html">DbEnv::lock_vec</a> call.
+<p>Where the environment was initialized with the <a href="../api_cxx/env_open.html#DB_INIT_MPOOL">DB_INIT_MPOOL</a>
+flag, calling DbEnv::close implies calls to <a href="../api_cxx/memp_fclose.html">DbMpoolFile::close</a> for
+any remaining open files in the memory pool that were returned to this
+process by calls to <a href="../api_cxx/memp_fopen.html">DbMpoolFile::open</a>. It does not imply a call to
+<a href="../api_cxx/memp_fsync.html">DbMpoolFile::sync</a> for those files.
+<p>Where the environment was initialized with the <a href="../api_cxx/env_open.html#DB_INIT_TXN">DB_INIT_TXN</a> flag,
+calling DbEnv::close aborts any uncommitted transactions.
+(Applications are should not depend on this behavior. If the process' has
+already closed a database handle which is necessary to abort an
+uncommitted transaction, the Berkeley DB environment must then require that
+recovery be run before further operations are done, since once a
+transaction exists that cannot be committed or aborted, no future
+checkpoint can ever succeed.)
+<p>In multi-threaded applications, only a single thread may call
+DbEnv::close.
+<p>Once DbEnv::close has been called, regardless of its return,
+the Berkeley DB environment handle may not be accessed again.
+<p>The DbEnv::close method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::close method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::close method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_close.html">DbEnv::close</a>,
+<a href="../api_cxx/env_open.html">DbEnv::open</a>,
+<a href="../api_cxx/env_remove.html">DbEnv::remove</a>,
+<a href="../api_cxx/db_err.html">DbEnv::err</a>,
+<a href="../api_cxx/env_strerror.html">DbEnv::strerror</a>,
+<a href="../api_cxx/env_version.html">DbEnv::version</a>,
+<a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a>,
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a>,
+<a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a>,
+<a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a>,
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>,
+<a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a>,
+<a href="../api_cxx/env_set_mutexlocks.html">DbEnv::set_mutexlocks</a>,
+<a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a>,
+and
+<a href="../api_cxx/env_set_verbose.html">DbEnv::set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/env_open.html b/bdb/docs/api_cxx/env_open.html
new file mode 100644
index 00000000000..53c9908dcb8
--- /dev/null
+++ b/bdb/docs/api_cxx/env_open.html
@@ -0,0 +1,209 @@
+<!--$Id: env_open.so,v 10.61 2000/12/01 15:50:31 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::open</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::open</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::open(const char *db_home, u_int32_t flags, int mode);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::open method is the interface for opening the Berkeley DB
+environment. It provides a structure for creating a consistent
+environment for processes using one or more of the features of Berkeley DB.
+<p>The <b>db_home</b> argument to DbEnv::open (and file name
+resolution in general) is described in
+<a href="../ref/env/naming.html">Berkeley DB File Naming</a>.
+<p>The <b>flags</b> argument specifies the subsystems that are initialized
+and how the application's environment affects Berkeley DB file naming, among
+other things.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or more
+of the following values.
+<p>As there are a large number of flags that can be specified, they have been
+grouped together by functionality. The first group of flags indicate
+which of the Berkeley DB subsystems should be initialized:
+<p><dl compact>
+<p><dt><a name="DB_JOINENV">DB_JOINENV</a><dd>Join an existing environment. This option allows applications to
+join an existing environment without knowing which Berkeley DB subsystems
+the environment supports.
+<p><dt><a name="DB_INIT_CDB">DB_INIT_CDB</a><dd>Initialize locking for the <a href="../ref/cam/intro.html">Berkeley DB Concurrent Data Store</a>
+product. In this mode, Berkeley DB provides multiple reader/single writer
+access. The only other subsystem that should be specified with the
+DB_INIT_CDB flag is DB_INIT_MPOOL.
+<p>Access method calls are largely unchanged when using this flag, although
+any cursors through which update operations (e.g., <a href="../api_cxx/dbc_put.html">Dbc::put</a>,
+<a href="../api_cxx/dbc_del.html">Dbc::del</a>) will be made must have the <a href="../api_cxx/db_cursor.html#DB_WRITECURSOR">DB_WRITECURSOR</a> value
+set in the flags parameter to the cursor call that creates the cursor.
+See <a href="../api_cxx/db_cursor.html">Db::cursor</a> for more information.
+<p><dt><a name="DB_INIT_LOCK">DB_INIT_LOCK</a><dd>Initialize the locking subsystem. This subsystem should be used when
+multiple processes or threads are going to be reading and writing a Berkeley DB
+database, so that they do not interfere with each other. If all threads
+are accessing the database(s) read-only, then locking is unnecessary.
+When the DB_INIT_LOCK flag is specified, it is usually necessary to run
+the deadlock detector, as well. See <a href="../utility/db_deadlock.html">db_deadlock</a> and
+<a href="../api_cxx/lock_detect.html">DbEnv::lock_detect</a> for more information.
+<p><dt><a name="DB_INIT_LOG">DB_INIT_LOG</a><dd>Initialize the logging subsystem. This subsystem is used when recovery
+from application or system failure is necessary.
+<p>The log is stored in one or more files in the environment directory.
+Each file is named using the format <i>log.NNNNNNNNNN</i>, where
+<i>NNNNNNNNNN</i> is the sequence number of the file within the log.
+For further information, see
+<a href="../ref/log/limits.html">Log File Limits</a>.
+<p>If the log region is being created and log files are already present, the
+log files are reviewed and subsequent log writes are appended
+to the end of the log, rather than overwriting current log entries.
+<p><dt><a name="DB_INIT_MPOOL">DB_INIT_MPOOL</a><dd>Initialize the shared memory buffer pool subsystem. This subsystem is
+used whenever the application is using any Berkeley DB access method.
+<p><dt><a name="DB_INIT_TXN">DB_INIT_TXN</a><dd>Initialize the transaction subsystem. This subsystem is used when
+recovery and atomicity of multiple operations and recovery are important.
+The DB_INIT_TXN flag implies the DB_INIT_LOG flag.
+</dl>
+<p>The second group of flags govern what recovery, if any, is performed when
+the environment is initialized:
+<p><dl compact>
+<p><dt><a name="DB_RECOVER">DB_RECOVER</a><dd>Run normal recovery on this environment before opening it for normal use.
+If this flag is set, the DB_CREATE flag must also be set since the regions
+will be removed and recreated.
+<p><dt><a name="DB_RECOVER_FATAL">DB_RECOVER_FATAL</a><dd>Run catastrophic recovery on this environment before opening it for normal
+use. If this flag is set, the DB_CREATE flag must also be set since the
+regions will be removed and recreated.
+</dl>
+<p>A standard part of the recovery process is to remove the existing Berkeley DB
+environment and create a new one in which to perform recovery. If the
+thread of control performing recovery does not specify the correct region
+initialization information (e.g., the correct memory pool cache size),
+the result can be an application running in an environment with incorrect
+cache and other subsystem sizes. For this reason, the thread of control
+performing recovery should either specify correct configuration
+information before calling the DbEnv::open method, or it should remove
+the environment after recovery is completed, leaving creation of the
+correctly sized environment to a subsequent call to DbEnv::open.
+<p>All Berkeley DB recovery processing must be single-threaded, that is, only a
+single thread of control may perform recovery or access a Berkeley DB
+environment while recovery is being performed. As it is not an error to
+specify DB_RECOVER for an environment for which no recovery is
+required, it is reasonable programming practice for the thread of control
+responsible for performing recovery and creating the environment to always
+specify the DB_RECOVER flag during startup.
+<p>The DbEnv::open function returns successfully if DB_RECOVER
+or DB_RECOVER_FATAL is specified and no log files exist, so it is
+necessary to ensure all necessary log files are present before running
+recovery. For further information, consult <a href="../utility/db_archive.html">db_archive</a> and
+<a href="../utility/db_recover.html">db_recover</a>.
+<p>The third group of flags govern file naming extensions in the environment:
+<p><dl compact>
+<!--$Id: m4.env_flags,v 10.9 2000/06/29 22:54:10 bostic Exp $-->
+<p><dt><a name="DB_USE_ENVIRON">DB_USE_ENVIRON</a><dd>The Berkeley DB process' environment may be permitted to specify information to
+be used when naming files; see <a href="../ref/env/naming.html">Berkeley DB
+File Naming</a>. As permitting users to specify which files are used can
+create security problems, environment information will be used in file
+naming for all users only if the DB_USE_ENVIRON flag is set.
+<p><dt><a name="DB_USE_ENVIRON_ROOT">DB_USE_ENVIRON_ROOT</a><dd>The Berkeley DB process' environment may be permitted to specify information to
+be used when naming files; see <a href="../ref/env/naming.html">Berkeley DB
+File Naming</a>. As permitting users to specify which files are used can
+create security problems, if the DB_USE_ENVIRON_ROOT flag is set,
+environment information will be used for file naming only for users with
+appropriate permissions (e.g., on UNIX systems, users with a user-ID of 0).
+</dl>
+<p>Finally, there are a few additional, unrelated flags:
+<p><dl compact>
+<p><dt><a name="DB_CREATE">DB_CREATE</a><dd>Cause Berkeley DB subsystems to create any underlying files, as necessary.
+<p><dt><a name="DB_LOCKDOWN">DB_LOCKDOWN</a><dd>Lock shared Berkeley DB environment files and memory mapped databases into memory.
+<p><dt><a name="DB_PRIVATE">DB_PRIVATE</a><dd>Specify that the environment will only be accessed by a single process
+(although that process may be multi-threaded). This flag has two effects
+on the Berkeley DB environment. First, all underlying data structures are
+allocated from per-process memory instead of from shared memory that is
+potentially accessible to more than a single process. Second, mutexes
+are only configured to work between threads.
+<p>This flag should not be specified if more than a single process is
+accessing the environment, as it is likely to cause database corruption
+and unpredictable behavior, e.g., if both a server application and the
+Berkeley DB utility <a href="../utility/db_stat.html">db_stat</a> will access the environment, the
+DB_PRIVATE flag should not be specified.
+<p><dt><a name="DB_SYSTEM_MEM">DB_SYSTEM_MEM</a><dd>Allocate memory from system shared memory instead of from memory backed
+by the filesystem. See <a href="../ref/env/region.html">Shared Memory
+Regions</a> for more information.
+<p><dt><a name="DB_THREAD">DB_THREAD</a><dd>Cause the <a href="../api_cxx/dbenv_class.html">DbEnv</a> handle returned by DbEnv::open to be
+<i>free-threaded</i>, that is, useable by multiple threads within a
+single address space.
+</dl>
+<p>On UNIX systems, or in IEEE/ANSI Std 1003.1 (POSIX) environments, all files created by Berkeley DB
+are created with mode <b>mode</b> (as described in <b>chmod</b>(2)) and
+modified by the process' umask value at the time of creation (see
+<b>umask</b>(2)). The group ownership of created files is based on
+the system and directory defaults, and is not further specified by Berkeley DB.
+If <b>mode</b> is 0, files are created readable and writeable by both
+owner and group. On Windows systems, the mode argument is ignored.
+<p>The DbEnv::open method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>The environment variable <b>DB_HOME</b> may be used as the path of
+the database home as described in
+<a href="../ref/env/naming.html">Berkeley DB File Naming</a>.
+</dl>
+<h1>Errors</h1>
+<p>The DbEnv::open method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EAGAIN<dd>The shared memory region was locked and (repeatedly) unavailable.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>
+The DB_THREAD flag was specified and spinlocks are not
+implemented for this architecture.
+<p>The DB_HOME or TMPDIR environment variables were set but empty.
+<p>An incorrectly formatted <b>NAME VALUE</b> entry or line was found.
+</dl>
+<p><dl compact>
+<p><dt>ENOSPC<dd>HP-UX only: a previously created Berkeley DB environment for this process still
+exists.
+</dl>
+<p>The DbEnv::open method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::open method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_close.html">DbEnv::close</a>,
+<a href="../api_cxx/env_open.html">DbEnv::open</a>,
+<a href="../api_cxx/env_remove.html">DbEnv::remove</a>,
+<a href="../api_cxx/db_err.html">DbEnv::err</a>,
+<a href="../api_cxx/env_strerror.html">DbEnv::strerror</a>,
+<a href="../api_cxx/env_version.html">DbEnv::version</a>,
+<a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a>,
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a>,
+<a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a>,
+<a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a>,
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>,
+<a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a>,
+<a href="../api_cxx/env_set_mutexlocks.html">DbEnv::set_mutexlocks</a>,
+<a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a>,
+and
+<a href="../api_cxx/env_set_verbose.html">DbEnv::set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/env_remove.html b/bdb/docs/api_cxx/env_remove.html
new file mode 100644
index 00000000000..58c3ff5de0c
--- /dev/null
+++ b/bdb/docs/api_cxx/env_remove.html
@@ -0,0 +1,129 @@
+<!--$Id: env_remove.so,v 10.24 2000/12/06 14:40:11 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::remove</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::remove</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::remove(const char *db_home, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::remove method destroys a Berkeley DB environment, if it is not
+currently in use. The environment regions, including any backing files,
+are removed. Any log or database files and the environment directory are
+not removed.
+<p>The <b>db_home</b> argument to DbEnv::remove is described in
+<a href="../ref/env/naming.html">Berkeley DB File Naming</a>.
+<p>If there are processes that have called <a href="../api_cxx/env_open.html">DbEnv::open</a> without
+calling <a href="../api_cxx/env_close.html">DbEnv::close</a> (i.e., there are processes currently using
+the environment), DbEnv::remove will fail without further action,
+unless the <a href="../api_cxx/env_remove.html#DB_FORCE">DB_FORCE</a> flag is set, in which case
+DbEnv::remove will attempt to remove the environment regardless
+of any processes still using it.
+<p>The result of attempting to forcibly destroy the environment when it is
+in use is unspecified. Processes using an environment often maintain open
+file descriptors for shared regions within it. On UNIX systems, the
+environment removal will usually succeed and processes that have already
+joined the region will continue to run in that region without change,
+however processes attempting to join the environment will either fail or
+create new regions. On other systems (e.g., Windows/NT), where the
+<b>unlink</b>(2) system call will fail if any process has an open
+file descriptor for the file, the region removal will fail.
+<p>Calling DbEnv::remove should not be necessary for most applications,
+as the Berkeley DB environment is cleaned up as part of normal database recovery
+procedures, however, applications may wish to call DbEnv::remove
+as part of application shutdown to free up system resources.
+Specifically, when the <a href="../api_cxx/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a> flag was specified to
+<a href="../api_cxx/env_open.html">DbEnv::open</a>, it may be useful to call DbEnv::remove in order
+to release system shared memory segments that have been allocated.
+<p>In the case of catastrophic or system failure, database recovery must be
+performed (see <a href="../utility/db_recover.html">db_recover</a>), or the <a href="../api_cxx/env_open.html#DB_RECOVER">DB_RECOVER</a> and
+<a href="../api_cxx/env_open.html#DB_RECOVER_FATAL">DB_RECOVER_FATAL</a> flags to <a href="../api_cxx/env_open.html">DbEnv::open</a> must be specified
+when the environment is re-opened. Alternatively, if recovery is not
+required because no database state is maintained across failures, and
+the <a href="../api_cxx/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a> flag was not specified when the environment
+was created, it is possible to clean up an environment by removing all
+of the files in the environment directory that begin with the string
+prefix "__db", as no backing files are created in any other directory.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or more
+of the following values.
+<p><dl compact>
+<p><dt><a name="DB_FORCE">DB_FORCE</a><dd>If the <a href="../api_cxx/env_remove.html#DB_FORCE">DB_FORCE</a> flag is set, the environment is removed regardless
+of any processes that may still using it, and, no locks are acquired
+during this process. (Generally, the <a href="../api_cxx/env_remove.html#DB_FORCE">DB_FORCE</a> flag is only
+specified when applications were unable to shut down cleanly, and there
+is a risk that an application may have died holding a Berkeley DB lock.)
+<!--$Id: m4.env_flags,v 10.9 2000/06/29 22:54:10 bostic Exp $-->
+<p><dt><a name="DB_USE_ENVIRON">DB_USE_ENVIRON</a><dd>The Berkeley DB process' environment may be permitted to specify information to
+be used when naming files; see <a href="../ref/env/naming.html">Berkeley DB
+File Naming</a>. As permitting users to specify which files are used can
+create security problems, environment information will be used in file
+naming for all users only if the DB_USE_ENVIRON flag is set.
+<p><dt><a name="DB_USE_ENVIRON_ROOT">DB_USE_ENVIRON_ROOT</a><dd>The Berkeley DB process' environment may be permitted to specify information to
+be used when naming files; see <a href="../ref/env/naming.html">Berkeley DB
+File Naming</a>. As permitting users to specify which files are used can
+create security problems, if the DB_USE_ENVIRON_ROOT flag is set,
+environment information will be used for file naming only for users with
+appropriate permissions (e.g., on UNIX systems, users with a user-ID of 0).
+</dl>
+<p>In multi-threaded applications, only a single thread may call
+DbEnv::remove.
+<p>A <a href="../api_cxx/dbenv_class.html">DbEnv</a> handle which has already been used to open an
+environment should not be used to call the DbEnv::remove method, a new
+<a href="../api_cxx/dbenv_class.html">DbEnv</a> handle should be created for that purpose.
+<p>Once DbEnv::remove has been called, regardless of its return,
+the Berkeley DB environment handle may not be accessed again.
+<p>The DbEnv::remove method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EBUSY<dd>The shared memory region was in use and the force flag was not set.
+</dl>
+<p>The DbEnv::remove method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::remove method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_close.html">DbEnv::close</a>,
+<a href="../api_cxx/env_open.html">DbEnv::open</a>,
+<a href="../api_cxx/env_remove.html">DbEnv::remove</a>,
+<a href="../api_cxx/db_err.html">DbEnv::err</a>,
+<a href="../api_cxx/env_strerror.html">DbEnv::strerror</a>,
+<a href="../api_cxx/env_version.html">DbEnv::version</a>,
+<a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a>,
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a>,
+<a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a>,
+<a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a>,
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>,
+<a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a>,
+<a href="../api_cxx/env_set_mutexlocks.html">DbEnv::set_mutexlocks</a>,
+<a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a>,
+and
+<a href="../api_cxx/env_set_verbose.html">DbEnv::set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/env_set_cachesize.html b/bdb/docs/api_cxx/env_set_cachesize.html
new file mode 100644
index 00000000000..57ad573cb3f
--- /dev/null
+++ b/bdb/docs/api_cxx/env_set_cachesize.html
@@ -0,0 +1,89 @@
+<!--$Id: env_set_cachesize.so,v 10.19 2000/05/20 16:29:11 bostic Exp $-->
+<!--$Id: m4.cachesize,v 10.7 2000/02/11 18:54:45 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_cachesize</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::set_cachesize</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_cachesize(u_int32_t gbytes, u_int32_t bytes, int ncache);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the size of the database's shared memory buffer pool, i.e., the cache,
+to <b>gbytes</b> gigabytes plus <b>bytes</b>. The cache should be the
+size of the normal working data set of the application, with some small
+amount of additional memory for unusual situations. (Note, the working
+set is not the same as the number of simultaneously referenced pages, and
+should be quite a bit larger!)
+<p>The default cache size is 256KB, and may not be specified as less than
+20KB. Any cache size less than 500MB is automatically increased by 25%
+to account for buffer pool overhead, cache sizes larger than 500MB are
+used as specified. For information on tuning the Berkeley DB cache size, see
+<a href="../ref/am_conf/cachesize.html">Selecting a cache size</a>.
+<p>It is possible to specify caches to Berkeley DB that are large enough so that
+they cannot be allocated contiguously on some architectures, e.g., some
+releases of Solaris limit the amount of memory that may be allocated
+contiguously by a process. If <b>ncache</b> is 0 or 1, the cache will
+be allocated contiguously in memory. If it is greater than 1, the cache
+will be broken up into <b>ncache</b> equally sized separate pieces of
+memory.
+<p>The DbEnv::set_cachesize interface may only be used to configure Berkeley DB before
+the <a href="../api_cxx/env_open.html">DbEnv::open</a> interface is called.
+<p>The DbEnv::set_cachesize method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>The database environment's cache size may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_cachesize", one or more whitespace characters,
+and the three arguments specified to this interface, separated by whitespace
+characters, for example, "set_cachesize 1 500 2". Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/env_open.html">DbEnv::open</a> was called.
+<p>The specified cache size was impossibly small.
+</dl>
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_close.html">DbEnv::close</a>,
+<a href="../api_cxx/env_open.html">DbEnv::open</a>,
+<a href="../api_cxx/env_remove.html">DbEnv::remove</a>,
+<a href="../api_cxx/db_err.html">DbEnv::err</a>,
+<a href="../api_cxx/env_strerror.html">DbEnv::strerror</a>,
+<a href="../api_cxx/env_version.html">DbEnv::version</a>,
+<a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a>,
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a>,
+<a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a>,
+<a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a>,
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>,
+<a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a>,
+<a href="../api_cxx/env_set_mutexlocks.html">DbEnv::set_mutexlocks</a>,
+<a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a>,
+and
+<a href="../api_cxx/env_set_verbose.html">DbEnv::set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/env_set_data_dir.html b/bdb/docs/api_cxx/env_set_data_dir.html
new file mode 100644
index 00000000000..7c8bd44ff3d
--- /dev/null
+++ b/bdb/docs/api_cxx/env_set_data_dir.html
@@ -0,0 +1,80 @@
+<!--$Id: env_set_data_dir.so,v 10.3 2000/05/20 16:29:11 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_data_dir</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::set_data_dir</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_data_dir(const char *dir);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the path of a directory to be used as the location of the access
+method database files. Paths specified to the <a href="../api_cxx/db_open.html">Db::open</a> function
+will be searched relative to this path. Paths set using this interface
+are additive, and specifying more than one will result in each specified
+directory being searched for database files. If any directories are
+specified, created database files will always be created in the first path
+specified.
+<p>If no database directories are specified, database files can only exist
+in the environment home directory. See <a href="../ref/env/naming.html">Berkeley DB File Naming</a> for more information.
+<p>For the greatest degree of recoverability from system or application
+failure, database files and log files should be located on separate
+physical devices.
+<p>The DbEnv::set_data_dir interface may only be used to configure Berkeley DB before
+the <a href="../api_cxx/env_open.html">DbEnv::open</a> interface is called.
+<p>The DbEnv::set_data_dir method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>The database environment's data directory may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_data_dir", one or more whitespace characters,
+and the directory name. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/env_open.html">DbEnv::open</a> was called.
+</dl>
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_close.html">DbEnv::close</a>,
+<a href="../api_cxx/env_open.html">DbEnv::open</a>,
+<a href="../api_cxx/env_remove.html">DbEnv::remove</a>,
+<a href="../api_cxx/db_err.html">DbEnv::err</a>,
+<a href="../api_cxx/env_strerror.html">DbEnv::strerror</a>,
+<a href="../api_cxx/env_version.html">DbEnv::version</a>,
+<a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a>,
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a>,
+<a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a>,
+<a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a>,
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>,
+<a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a>,
+<a href="../api_cxx/env_set_mutexlocks.html">DbEnv::set_mutexlocks</a>,
+<a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a>,
+and
+<a href="../api_cxx/env_set_verbose.html">DbEnv::set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/env_set_errcall.html b/bdb/docs/api_cxx/env_set_errcall.html
new file mode 100644
index 00000000000..8c59632c684
--- /dev/null
+++ b/bdb/docs/api_cxx/env_set_errcall.html
@@ -0,0 +1,76 @@
+<!--$Id: env_set_errcall.so,v 10.16 1999/12/20 08:52:29 bostic Exp $-->
+<!--$Id: m4.errset,v 10.8 2000/02/19 20:57:57 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_errcall</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::set_errcall</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+void DbEnv::set_errcall(
+ void (*db_errcall_fcn)(const char *errpfx, char *msg));
+</pre></h3>
+<h1>Description</h1>
+When an error occurs in the Berkeley DB library, an exception is thrown or an
+error return value is returned by the method. In some cases,
+however, the <b>errno</b> value may be insufficient to completely
+describe the cause of the error, especially during initial application
+debugging.
+<p>The DbEnv::set_errcall method is used to enhance the mechanism for reporting error
+messages to the application. In some cases, when an error occurs, Berkeley DB
+will call <b>db_errcall_fcn</b> with additional error information. The
+function must be defined with two arguments; the first will be the prefix
+string (as previously set by <a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a> or
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>), the second will be the error message string.
+It is up to the <b>db_errcall_fcn</b> method to display the error
+message in an appropriate manner.
+<p>Alternatively, you can use the <a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a> method to display
+the additional information via an output stream, or the <a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>
+or <a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a> methods to display the additional information via a C
+library FILE *. You should not mix these approaches.
+<p>This error logging enhancement does not slow performance or significantly
+increase application size, and may be run during normal operation as well
+as during application debugging.
+<p>The DbEnv::set_errcall interface may be used to configure Berkeley DB at any time
+during the life of the application.
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_close.html">DbEnv::close</a>,
+<a href="../api_cxx/env_open.html">DbEnv::open</a>,
+<a href="../api_cxx/env_remove.html">DbEnv::remove</a>,
+<a href="../api_cxx/db_err.html">DbEnv::err</a>,
+<a href="../api_cxx/env_strerror.html">DbEnv::strerror</a>,
+<a href="../api_cxx/env_version.html">DbEnv::version</a>,
+<a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a>,
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a>,
+<a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a>,
+<a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a>,
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>,
+<a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a>,
+<a href="../api_cxx/env_set_mutexlocks.html">DbEnv::set_mutexlocks</a>,
+<a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a>,
+and
+<a href="../api_cxx/env_set_verbose.html">DbEnv::set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/env_set_errfile.html b/bdb/docs/api_cxx/env_set_errfile.html
new file mode 100644
index 00000000000..e9658cd18ec
--- /dev/null
+++ b/bdb/docs/api_cxx/env_set_errfile.html
@@ -0,0 +1,77 @@
+<!--$Id: env_set_errfile.so,v 10.17 1999/12/20 08:52:29 bostic Exp $-->
+<!--$Id: m4.errset,v 10.8 2000/02/19 20:57:57 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_errfile</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::set_errfile</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+void DbEnv::set_errfile(FILE *errfile);
+</pre></h3>
+<h1>Description</h1>
+When an error occurs in the Berkeley DB library, an exception is thrown or an
+error return value is returned by the method. In some cases,
+however, the <b>errno</b> value may be insufficient to completely
+describe the cause of the error, especially during initial application
+debugging.
+<p>The DbEnv::set_errfile method is used to enhance the mechanism for reporting error
+messages to the application by setting a C library FILE * to be used for
+displaying additional Berkeley DB error messages. In some cases, when an error
+occurs, Berkeley DB will output an additional error message to the specified
+file reference.
+<p>Alternatively, you can use the <a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a> method to display
+the additional information via an output stream, or the
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a> method to capture the additional error information in
+a way that does not use either output streams or C library FILE *'s. You
+should not mix these approaches.
+<p>The error message will consist of the prefix string and a colon
+("<b>:</b>") (if a prefix string was previously specified using
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a> or <a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>), an error string, and
+a trailing &lt;newline&gt; character.
+<p>This error logging enhancement does not slow performance or significantly
+increase application size, and may be run during normal operation as well
+as during application debugging.
+<p>The DbEnv::set_errfile interface may be used to configure Berkeley DB at any time
+during the life of the application.
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_close.html">DbEnv::close</a>,
+<a href="../api_cxx/env_open.html">DbEnv::open</a>,
+<a href="../api_cxx/env_remove.html">DbEnv::remove</a>,
+<a href="../api_cxx/db_err.html">DbEnv::err</a>,
+<a href="../api_cxx/env_strerror.html">DbEnv::strerror</a>,
+<a href="../api_cxx/env_version.html">DbEnv::version</a>,
+<a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a>,
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a>,
+<a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a>,
+<a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a>,
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>,
+<a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a>,
+<a href="../api_cxx/env_set_mutexlocks.html">DbEnv::set_mutexlocks</a>,
+<a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a>,
+and
+<a href="../api_cxx/env_set_verbose.html">DbEnv::set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/env_set_error_stream.html b/bdb/docs/api_cxx/env_set_error_stream.html
new file mode 100644
index 00000000000..18dc192cc77
--- /dev/null
+++ b/bdb/docs/api_cxx/env_set_error_stream.html
@@ -0,0 +1,74 @@
+<!--$Id: env_set_error_stream.so,v 10.13 1999/12/20 08:52:33 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_error_stream</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::set_error_stream</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+void DbEnv::set_error_stream(class ostream*);
+</pre></h3>
+<h1>Description</h1>
+<p>When an error occurs in the Berkeley DB library, an exception is thrown or an
+<b>errno</b> value is returned by the method. In some cases,
+however, the <b>errno</b> value may be insufficient to completely
+describe the cause of the error, especially during initial application
+debugging.
+<p>The DbEnv::set_error_stream method is used to enhance the mechanism for
+reporting error messages to the application by setting the C++ ostream
+used for displaying additional Berkeley DB error messages. In some cases,
+when an error occurs, Berkeley DB will output an additional error message to
+the specified stream.
+<p>The error message will consist of the prefix string and a colon
+("<b>:</b>") (if a prefix string was previously specified using
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>), an error string, and a trailing
+&lt;newline&gt; character.
+<p>Alternatively, you can use the <a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a> method to display
+the additional information via a C library FILE *, or the
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a> method to capture the additional error information in
+a way that does not use either output streams or C library FILE *'s. You
+should not mix these approaches.
+<p>This error logging enhancement does not slow performance or significantly
+increase application size, and may be run during normal operation as well
+as during application debugging.
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_close.html">DbEnv::close</a>,
+<a href="../api_cxx/env_open.html">DbEnv::open</a>,
+<a href="../api_cxx/env_remove.html">DbEnv::remove</a>,
+<a href="../api_cxx/db_err.html">DbEnv::err</a>,
+<a href="../api_cxx/env_strerror.html">DbEnv::strerror</a>,
+<a href="../api_cxx/env_version.html">DbEnv::version</a>,
+<a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a>,
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a>,
+<a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a>,
+<a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a>,
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>,
+<a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a>,
+<a href="../api_cxx/env_set_mutexlocks.html">DbEnv::set_mutexlocks</a>,
+<a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a>,
+and
+<a href="../api_cxx/env_set_verbose.html">DbEnv::set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/env_set_errpfx.html b/bdb/docs/api_cxx/env_set_errpfx.html
new file mode 100644
index 00000000000..62167d96ed4
--- /dev/null
+++ b/bdb/docs/api_cxx/env_set_errpfx.html
@@ -0,0 +1,60 @@
+<!--$Id: env_set_errpfx.so,v 10.12 1999/12/20 08:52:29 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_errpfx</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::set_errpfx</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+void DbEnv::set_errpfx(const char *errpfx);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the prefix string that appears before error messages issued by Berkeley DB.
+<p>The DbEnv::set_errpfx method does not copy the memory referenced by the
+<b>errpfx</b> argument, rather, it maintains a reference to it. This
+allows applications to modify the error message prefix at any time,
+without repeatedly calling DbEnv::set_errpfx, but means that the
+memory must be maintained until the handle is closed.
+<p>The DbEnv::set_errpfx interface may be used to configure Berkeley DB at any time
+during the life of the application.
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_close.html">DbEnv::close</a>,
+<a href="../api_cxx/env_open.html">DbEnv::open</a>,
+<a href="../api_cxx/env_remove.html">DbEnv::remove</a>,
+<a href="../api_cxx/db_err.html">DbEnv::err</a>,
+<a href="../api_cxx/env_strerror.html">DbEnv::strerror</a>,
+<a href="../api_cxx/env_version.html">DbEnv::version</a>,
+<a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a>,
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a>,
+<a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a>,
+<a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a>,
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>,
+<a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a>,
+<a href="../api_cxx/env_set_mutexlocks.html">DbEnv::set_mutexlocks</a>,
+<a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a>,
+and
+<a href="../api_cxx/env_set_verbose.html">DbEnv::set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/env_set_feedback.html b/bdb/docs/api_cxx/env_set_feedback.html
new file mode 100644
index 00000000000..147a5dc5930
--- /dev/null
+++ b/bdb/docs/api_cxx/env_set_feedback.html
@@ -0,0 +1,72 @@
+<!--$Id: env_set_feedback.so,v 10.19 2000/07/09 19:12:39 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_feedback</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::set_feedback</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_feedback(
+ void (*db_feedback_fcn)(DbEnv *, int opcode, int pct));
+</pre></h3>
+<h1>Description</h1>
+<p>Some operations performed by the Berkeley DB library can take non-trivial
+amounts of time. The DbEnv::set_feedback method can be used by
+applications to monitor progress within these operations.
+<p>When an operation is likely to take a long time, Berkeley DB will call the
+specified callback method. This method must be declared with
+three arguments: the first will be a reference to the enclosing
+environment, the second a flag value, and the third the percent of the
+operation that has been completed, specified as an integer value between
+0 and 100. It is up to the callback method to display this
+information in an appropriate manner.
+<p>The <b>opcode</b> argument may take on any of the following values:
+<p><dl compact>
+<p><dt><a name="DB_RECOVER">DB_RECOVER</a><dd>The environment is being recovered.
+</dl>
+<p>The DbEnv::set_feedback interface may be used to configure Berkeley DB at any time
+during the life of the application.
+<p>The DbEnv::set_feedback method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_close.html">DbEnv::close</a>,
+<a href="../api_cxx/env_open.html">DbEnv::open</a>,
+<a href="../api_cxx/env_remove.html">DbEnv::remove</a>,
+<a href="../api_cxx/db_err.html">DbEnv::err</a>,
+<a href="../api_cxx/env_strerror.html">DbEnv::strerror</a>,
+<a href="../api_cxx/env_version.html">DbEnv::version</a>,
+<a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a>,
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a>,
+<a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a>,
+<a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a>,
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>,
+<a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a>,
+<a href="../api_cxx/env_set_mutexlocks.html">DbEnv::set_mutexlocks</a>,
+<a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a>,
+and
+<a href="../api_cxx/env_set_verbose.html">DbEnv::set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/env_set_flags.html b/bdb/docs/api_cxx/env_set_flags.html
new file mode 100644
index 00000000000..ad8f4fc1ce2
--- /dev/null
+++ b/bdb/docs/api_cxx/env_set_flags.html
@@ -0,0 +1,87 @@
+<!--$Id-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_flags</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::set_flags</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_flags(u_int32_t flags, int onoff);
+</pre></h3>
+<h1>Description</h1>
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or more
+of the following values.
+If <b>onoff</b> is zero, the specified flags are cleared, otherwise they
+are set.
+<p><dl compact>
+<p><dt><a name="DB_CDB_ALLDB">DB_CDB_ALLDB</a><dd>For Berkeley DB Concurrent Data Store applications, perform locking on an environment-wide basis
+rather than per-database. This flag may only be used to configure Berkeley DB
+before the <a href="../api_cxx/env_open.html">DbEnv::open</a> interface is called.
+<p><dt><a name="DB_NOMMAP">DB_NOMMAP</a><dd>Copy read-only database files in this environment into the local cache
+instead of potentially mapping them into process memory (see the
+description of the <a href="../api_cxx/env_set_mp_mmapsize.html">DbEnv::set_mp_mmapsize</a> method for further information).
+<p><dt><a name="DB_TXN_NOSYNC">DB_TXN_NOSYNC</a><dd>Do not synchronously flush the log on transaction commit or prepare.
+This means that transactions exhibit the ACI (atomicity, consistency and
+isolation) properties, but not D (durability), i.e., database integrity
+will be maintained but it is possible that some number of the most
+recently committed transactions may be undone during recovery instead of
+being redone.
+<p>The number of transactions that are potentially at risk is governed by
+how often the log is checkpointed (see <a href="../utility/db_checkpoint.html">db_checkpoint</a> for more
+information) and how many log updates can fit on a single log page.
+</dl>
+<p>The DbEnv::set_flags method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>The database environment's flag values may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_flags", one or more whitespace characters,
+and the interface flag argument as a string, for example, "set_flags
+DB_TXN_NOSYNC". Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_close.html">DbEnv::close</a>,
+<a href="../api_cxx/env_open.html">DbEnv::open</a>,
+<a href="../api_cxx/env_remove.html">DbEnv::remove</a>,
+<a href="../api_cxx/db_err.html">DbEnv::err</a>,
+<a href="../api_cxx/env_strerror.html">DbEnv::strerror</a>,
+<a href="../api_cxx/env_version.html">DbEnv::version</a>,
+<a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a>,
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a>,
+<a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a>,
+<a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a>,
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>,
+<a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a>,
+<a href="../api_cxx/env_set_mutexlocks.html">DbEnv::set_mutexlocks</a>,
+<a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a>,
+and
+<a href="../api_cxx/env_set_verbose.html">DbEnv::set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/env_set_lg_bsize.html b/bdb/docs/api_cxx/env_set_lg_bsize.html
new file mode 100644
index 00000000000..fb9efecef3f
--- /dev/null
+++ b/bdb/docs/api_cxx/env_set_lg_bsize.html
@@ -0,0 +1,71 @@
+<!--$Id: env_set_lg_bsize.so,v 10.10 2000/05/20 16:29:11 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_lg_bsize</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::set_lg_bsize</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_lg_bsize(u_int32_t lg_bsize);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the size of the in-memory log buffer, in bytes. By default, or if
+the value is set to 0, a size of 32K is used.
+<p>Log information is stored in-memory until the storage space fills up
+or transaction commit forces the information to be flushed to stable
+storage. In the presence of long-running transactions or transactions
+producing large amounts of data, larger buffer sizes can increase
+throughput.
+<p>The DbEnv::set_lg_bsize interface may only be used to configure Berkeley DB before
+the <a href="../api_cxx/env_open.html">DbEnv::open</a> interface is called.
+<p>The DbEnv::set_lg_bsize method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>The database environment's log buffer size may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lg_bsize", one or more whitespace characters,
+and the size in bytes. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/env_open.html">DbEnv::open</a> was called.
+</dl>
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_lg_bsize.html">DbEnv::set_lg_bsize</a>,
+<a href="../api_cxx/env_set_lg_max.html">DbEnv::set_lg_max</a>,
+<a href="../api_cxx/log_archive.html">DbEnv::log_archive</a>,
+<a href="../api_cxx/log_compare.html">DbEnv::log_compare</a>,
+<a href="../api_cxx/log_file.html">DbEnv::log_file</a>,
+<a href="../api_cxx/log_flush.html">DbEnv::log_flush</a>,
+<a href="../api_cxx/log_get.html">DbEnv::log_get</a>,
+<a href="../api_cxx/log_put.html">DbEnv::log_put</a>,
+<a href="../api_cxx/log_register.html">DbEnv::log_register</a>,
+<a href="../api_cxx/log_stat.html">DbEnv::log_stat</a>
+and
+<a href="../api_cxx/log_unregister.html">DbEnv::log_unregister</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/env_set_lg_dir.html b/bdb/docs/api_cxx/env_set_lg_dir.html
new file mode 100644
index 00000000000..9a97eb3fe00
--- /dev/null
+++ b/bdb/docs/api_cxx/env_set_lg_dir.html
@@ -0,0 +1,76 @@
+<!--$Id: env_set_lg_dir.so,v 10.3 2000/05/20 16:29:11 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_lg_dir</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::set_lg_dir</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_lg_dir(const char *dir);
+</pre></h3>
+<h1>Description</h1>
+<p>The path of a directory to be used as the location of logging files.
+Log files created by the Log Manager subsystem will be created in this
+directory.
+<p>If no logging directory is specified, log files are created in the
+environment home directory. See <a href="../ref/env/naming.html">Berkeley DB File Naming</a> for more information.
+<p>For the greatest degree of recoverability from system or application
+failure, database files and log files should be located on separate
+physical devices.
+<p>The DbEnv::set_lg_dir interface may only be used to configure Berkeley DB before
+the <a href="../api_cxx/env_open.html">DbEnv::open</a> interface is called.
+<p>The DbEnv::set_lg_dir method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>The database environment's logging directory may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lg_dir", one or more whitespace characters,
+and the directory name. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/env_open.html">DbEnv::open</a> was called.
+</dl>
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_close.html">DbEnv::close</a>,
+<a href="../api_cxx/env_open.html">DbEnv::open</a>,
+<a href="../api_cxx/env_remove.html">DbEnv::remove</a>,
+<a href="../api_cxx/db_err.html">DbEnv::err</a>,
+<a href="../api_cxx/env_strerror.html">DbEnv::strerror</a>,
+<a href="../api_cxx/env_version.html">DbEnv::version</a>,
+<a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a>,
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a>,
+<a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a>,
+<a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a>,
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>,
+<a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a>,
+<a href="../api_cxx/env_set_mutexlocks.html">DbEnv::set_mutexlocks</a>,
+<a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a>,
+and
+<a href="../api_cxx/env_set_verbose.html">DbEnv::set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/env_set_lg_max.html b/bdb/docs/api_cxx/env_set_lg_max.html
new file mode 100644
index 00000000000..c0f27d19b20
--- /dev/null
+++ b/bdb/docs/api_cxx/env_set_lg_max.html
@@ -0,0 +1,71 @@
+<!--$Id: env_set_lg_max.so,v 10.20 2000/05/20 16:29:12 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_lg_max</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::set_lg_max</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_lg_max(u_int32_t lg_max);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the maximum size of a single file in the log, in bytes. Because
+<a href="../api_cxx/lsn_class.html">DbLsn</a> file offsets are unsigned 4-byte values, the set value may
+not be larger than the maximum unsigned 4-byte value. By default, or if
+the value is set to 0, a size of 10MB is used.
+<p>See <a href="../ref/log/limits.html">Log File Limits</a>
+for more information.
+<p>The DbEnv::set_lg_max interface may only be used to configure Berkeley DB before
+the <a href="../api_cxx/env_open.html">DbEnv::open</a> interface is called.
+<p>The DbEnv::set_lg_max method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>The database environment's log file size may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lg_max", one or more whitespace characters,
+and the size in bytes. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/env_open.html">DbEnv::open</a> was called.
+<p>The specified log file size was too large.
+</dl>
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_lg_bsize.html">DbEnv::set_lg_bsize</a>,
+<a href="../api_cxx/env_set_lg_max.html">DbEnv::set_lg_max</a>,
+<a href="../api_cxx/log_archive.html">DbEnv::log_archive</a>,
+<a href="../api_cxx/log_compare.html">DbEnv::log_compare</a>,
+<a href="../api_cxx/log_file.html">DbEnv::log_file</a>,
+<a href="../api_cxx/log_flush.html">DbEnv::log_flush</a>,
+<a href="../api_cxx/log_get.html">DbEnv::log_get</a>,
+<a href="../api_cxx/log_put.html">DbEnv::log_put</a>,
+<a href="../api_cxx/log_register.html">DbEnv::log_register</a>,
+<a href="../api_cxx/log_stat.html">DbEnv::log_stat</a>
+and
+<a href="../api_cxx/log_unregister.html">DbEnv::log_unregister</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/env_set_lk_conflicts.html b/bdb/docs/api_cxx/env_set_lk_conflicts.html
new file mode 100644
index 00000000000..9ef5e8c7802
--- /dev/null
+++ b/bdb/docs/api_cxx/env_set_lk_conflicts.html
@@ -0,0 +1,71 @@
+<!--$Id: env_set_lk_conflicts.so,v 10.22 2000/12/08 20:43:15 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_lk_conflicts</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::set_lk_conflicts</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_lk_conflicts(u_int8_t *conflicts, int nmodes);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the locking conflicts matrix.
+The <b>conflicts</b> argument
+is an <b>nmodes</b> by <b>nmodes</b> array.
+A non-0 value for the array element:
+<p><blockquote><pre>conflicts[requested_mode][held_mode]</pre></blockquote>
+<p>indicates that requested_mode and held_mode conflict. The
+<i>not-granted</i> mode must be represented by 0.
+<p>If no <b>conflicts</b> value is specified, the conflicts array
+<b>db_rw_conflicts</b> is used; see <a href="../ref/lock/stdmode.html">Standard Lock Modes</a> for a description of that array.
+<p>The DbEnv::set_lk_conflicts interface may only be used to configure Berkeley DB before
+the <a href="../api_cxx/env_open.html">DbEnv::open</a> interface is called.
+<p>The DbEnv::set_lk_conflicts method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/env_open.html">DbEnv::open</a> was called.
+</dl>
+<p><dl compact>
+<p><dt>ENOMEM<dd>No memory was available to copy the conflicts array.
+</dl>
+<h3>Classes</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_lk_conflicts.html">DbEnv::set_lk_conflicts</a>,
+<a href="../api_cxx/env_set_lk_detect.html">DbEnv::set_lk_detect</a>,
+<a href="../api_cxx/env_set_lk_max_locks.html">DbEnv::set_lk_max_locks</a>,
+<a href="../api_cxx/env_set_lk_max_lockers.html">DbEnv::set_lk_max_lockers</a>,
+<a href="../api_cxx/env_set_lk_max_objects.html">DbEnv::set_lk_max_objects</a>,
+<a href="../api_cxx/env_set_lk_max.html">DbEnv::set_lk_max</a>,
+<a href="../api_cxx/lock_detect.html">DbEnv::lock_detect</a>,
+<a href="../api_cxx/lock_get.html">DbEnv::lock_get</a>,
+<a href="../api_cxx/lock_id.html">DbEnv::lock_id</a>,
+<a href="../api_cxx/lock_put.html">DbLock::put</a>,
+<a href="../api_cxx/lock_stat.html">DbEnv::lock_stat</a>
+and
+<a href="../api_cxx/lock_vec.html">DbEnv::lock_vec</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/env_set_lk_detect.html b/bdb/docs/api_cxx/env_set_lk_detect.html
new file mode 100644
index 00000000000..ee17ce2a46c
--- /dev/null
+++ b/bdb/docs/api_cxx/env_set_lk_detect.html
@@ -0,0 +1,75 @@
+<!--$Id: env_set_lk_detect.so,v 10.19 2000/12/08 20:43:15 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_lk_detect</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::set_lk_detect</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_lk_detect(u_int32_t detect);
+</pre></h3>
+<h1>Description</h1>
+<p>Set if the deadlock detector is to be run whenever a lock conflict occurs,
+and specify which transaction should be aborted in the case of a deadlock.
+The specified value must be one of the following list:
+<p><dl compact>
+<p><dt><a name="DB_LOCK_DEFAULT">DB_LOCK_DEFAULT</a><dd>Use the default policy as specified by <a href="../utility/db_deadlock.html">db_deadlock</a>.
+<dt><a name="DB_LOCK_OLDEST">DB_LOCK_OLDEST</a><dd>Abort the oldest transaction.
+<dt><a name="DB_LOCK_RANDOM">DB_LOCK_RANDOM</a><dd>Abort a random transaction involved in the deadlock.
+<dt><a name="DB_LOCK_YOUNGEST">DB_LOCK_YOUNGEST</a><dd>Abort the youngest transaction.
+</dl>
+<p>The DbEnv::set_lk_detect interface may only be used to configure Berkeley DB before
+the <a href="../api_cxx/env_open.html">DbEnv::open</a> interface is called.
+<p>The DbEnv::set_lk_detect method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>The database environment's deadlock detector configuration may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lk_detect", one or more whitespace characters,
+and the interface <b>detect</b> argument as a string, for example,
+"set_lk_detect DB_LOCK_OLDEST". Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/env_open.html">DbEnv::open</a> was called.
+</dl>
+<h3>Classes</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_lk_conflicts.html">DbEnv::set_lk_conflicts</a>,
+<a href="../api_cxx/env_set_lk_detect.html">DbEnv::set_lk_detect</a>,
+<a href="../api_cxx/env_set_lk_max_locks.html">DbEnv::set_lk_max_locks</a>,
+<a href="../api_cxx/env_set_lk_max_lockers.html">DbEnv::set_lk_max_lockers</a>,
+<a href="../api_cxx/env_set_lk_max_objects.html">DbEnv::set_lk_max_objects</a>,
+<a href="../api_cxx/env_set_lk_max.html">DbEnv::set_lk_max</a>,
+<a href="../api_cxx/lock_detect.html">DbEnv::lock_detect</a>,
+<a href="../api_cxx/lock_get.html">DbEnv::lock_get</a>,
+<a href="../api_cxx/lock_id.html">DbEnv::lock_id</a>,
+<a href="../api_cxx/lock_put.html">DbLock::put</a>,
+<a href="../api_cxx/lock_stat.html">DbEnv::lock_stat</a>
+and
+<a href="../api_cxx/lock_vec.html">DbEnv::lock_vec</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/env_set_lk_max.html b/bdb/docs/api_cxx/env_set_lk_max.html
new file mode 100644
index 00000000000..7e614d4ac6f
--- /dev/null
+++ b/bdb/docs/api_cxx/env_set_lk_max.html
@@ -0,0 +1,75 @@
+<!--$Id: env_set_lk_max.so,v 10.21 2000/12/21 19:11:27 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_lk_max</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::set_lk_max</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_lk_max(u_int32_t max);
+</pre></h3>
+<h1>Description</h1>
+<p><b>The DbEnv::set_lk_max method interface has been deprecated in favor of
+the <a href="../api_cxx/env_set_lk_max_locks.html">DbEnv::set_lk_max_locks</a>, <a href="../api_cxx/env_set_lk_max_lockers.html">DbEnv::set_lk_max_lockers</a>,
+and <a href="../api_cxx/env_set_lk_max_objects.html">DbEnv::set_lk_max_objects</a> methods. Please update your applications.</b>
+<p>Set each of the maximum number of locks, lockers and lock objects
+supported by the Berkeley DB lock subsystem to <b>max</b>. This value is
+used by <a href="../api_cxx/env_open.html">DbEnv::open</a> to estimate how much space to allocate for
+various lock-table data structures. For specific information on
+configuring the size of the lock subsystem, see
+<a href="../ref/lock/max.html">Configuring locking: sizing the
+system</a>.
+<p>The DbEnv::set_lk_max interface may only be used to configure Berkeley DB before
+the <a href="../api_cxx/env_open.html">DbEnv::open</a> interface is called.
+<p>The DbEnv::set_lk_max method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>The database environment's maximum number of locks may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lk_max", one or more whitespace characters,
+and the number of locks. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/env_open.html">DbEnv::open</a> was called.
+</dl>
+<h3>Classes</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_lk_conflicts.html">DbEnv::set_lk_conflicts</a>,
+<a href="../api_cxx/env_set_lk_detect.html">DbEnv::set_lk_detect</a>,
+<a href="../api_cxx/env_set_lk_max_locks.html">DbEnv::set_lk_max_locks</a>,
+<a href="../api_cxx/env_set_lk_max_lockers.html">DbEnv::set_lk_max_lockers</a>,
+<a href="../api_cxx/env_set_lk_max_objects.html">DbEnv::set_lk_max_objects</a>,
+<a href="../api_cxx/env_set_lk_max.html">DbEnv::set_lk_max</a>,
+<a href="../api_cxx/lock_detect.html">DbEnv::lock_detect</a>,
+<a href="../api_cxx/lock_get.html">DbEnv::lock_get</a>,
+<a href="../api_cxx/lock_id.html">DbEnv::lock_id</a>,
+<a href="../api_cxx/lock_put.html">DbLock::put</a>,
+<a href="../api_cxx/lock_stat.html">DbEnv::lock_stat</a>
+and
+<a href="../api_cxx/lock_vec.html">DbEnv::lock_vec</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/env_set_lk_max_lockers.html b/bdb/docs/api_cxx/env_set_lk_max_lockers.html
new file mode 100644
index 00000000000..9e84c0150fb
--- /dev/null
+++ b/bdb/docs/api_cxx/env_set_lk_max_lockers.html
@@ -0,0 +1,71 @@
+<!--$Id: env_set_lk_max_lockers.so,v 1.2 2000/12/08 22:03:00 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_lk_max_lockers</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::set_lk_max_lockers</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_lk_max_lockers(u_int32_t max);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the maximum number of simultaneous locking entities supported by
+the Berkeley DB lock subsystem. This value is used by <a href="../api_cxx/env_open.html">DbEnv::open</a> to
+estimate how much space to allocate for various lock-table data
+structures. For specific information on configuring the size of the
+lock subsystem, see
+<a href="../ref/lock/max.html">Configuring locking: sizing the system</a>.
+<p>The DbEnv::set_lk_max_lockers interface may only be used to configure Berkeley DB before
+the <a href="../api_cxx/env_open.html">DbEnv::open</a> interface is called.
+<p>The DbEnv::set_lk_max_lockers method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>The database environment's maximum number of lockers may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lk_max_lockers", one or more whitespace characters,
+and the number of lockers. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/env_open.html">DbEnv::open</a> was called.
+</dl>
+<h3>Classes</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_lk_conflicts.html">DbEnv::set_lk_conflicts</a>,
+<a href="../api_cxx/env_set_lk_detect.html">DbEnv::set_lk_detect</a>,
+<a href="../api_cxx/env_set_lk_max_locks.html">DbEnv::set_lk_max_locks</a>,
+<a href="../api_cxx/env_set_lk_max_lockers.html">DbEnv::set_lk_max_lockers</a>,
+<a href="../api_cxx/env_set_lk_max_objects.html">DbEnv::set_lk_max_objects</a>,
+<a href="../api_cxx/env_set_lk_max.html">DbEnv::set_lk_max</a>,
+<a href="../api_cxx/lock_detect.html">DbEnv::lock_detect</a>,
+<a href="../api_cxx/lock_get.html">DbEnv::lock_get</a>,
+<a href="../api_cxx/lock_id.html">DbEnv::lock_id</a>,
+<a href="../api_cxx/lock_put.html">DbLock::put</a>,
+<a href="../api_cxx/lock_stat.html">DbEnv::lock_stat</a>
+and
+<a href="../api_cxx/lock_vec.html">DbEnv::lock_vec</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/env_set_lk_max_locks.html b/bdb/docs/api_cxx/env_set_lk_max_locks.html
new file mode 100644
index 00000000000..4e296e97939
--- /dev/null
+++ b/bdb/docs/api_cxx/env_set_lk_max_locks.html
@@ -0,0 +1,70 @@
+<!--$Id: env_set_lk_max_locks.so,v 10.1 2000/12/21 19:11:27 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_lk_max_locks</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::set_lk_max_locks</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_lk_max_locks(u_int32_t max);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the maximum number of locks supported by the Berkeley DB lock subsystem.
+This value is used by <a href="../api_cxx/env_open.html">DbEnv::open</a> to estimate how much space to
+allocate for various lock-table data structures. For specific
+information on configuring the size of the lock subsystem, see
+<a href="../ref/lock/max.html">Configuring locking: sizing the system</a>.
+<p>The DbEnv::set_lk_max_locks interface may only be used to configure Berkeley DB before
+the <a href="../api_cxx/env_open.html">DbEnv::open</a> interface is called.
+<p>The DbEnv::set_lk_max_locks method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>The database environment's maximum number of locks may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lk_max_locks", one or more whitespace characters,
+and the number of locks. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/env_open.html">DbEnv::open</a> was called.
+</dl>
+<h3>Classes</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_lk_conflicts.html">DbEnv::set_lk_conflicts</a>,
+<a href="../api_cxx/env_set_lk_detect.html">DbEnv::set_lk_detect</a>,
+<a href="../api_cxx/env_set_lk_max_locks.html">DbEnv::set_lk_max_locks</a>,
+<a href="../api_cxx/env_set_lk_max_lockers.html">DbEnv::set_lk_max_lockers</a>,
+<a href="../api_cxx/env_set_lk_max_objects.html">DbEnv::set_lk_max_objects</a>,
+<a href="../api_cxx/env_set_lk_max.html">DbEnv::set_lk_max</a>,
+<a href="../api_cxx/lock_detect.html">DbEnv::lock_detect</a>,
+<a href="../api_cxx/lock_get.html">DbEnv::lock_get</a>,
+<a href="../api_cxx/lock_id.html">DbEnv::lock_id</a>,
+<a href="../api_cxx/lock_put.html">DbLock::put</a>,
+<a href="../api_cxx/lock_stat.html">DbEnv::lock_stat</a>
+and
+<a href="../api_cxx/lock_vec.html">DbEnv::lock_vec</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/env_set_lk_max_objects.html b/bdb/docs/api_cxx/env_set_lk_max_objects.html
new file mode 100644
index 00000000000..b196cb92593
--- /dev/null
+++ b/bdb/docs/api_cxx/env_set_lk_max_objects.html
@@ -0,0 +1,71 @@
+<!--$Id: env_set_lk_max_objects.so,v 1.2 2000/12/08 22:03:00 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_lk_max_objects</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::set_lk_max_objects</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_lk_max_objects(u_int32_t max);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the maximum number of simultaneously locked objects supported by
+the Berkeley DB lock subsystem. This value is used by <a href="../api_cxx/env_open.html">DbEnv::open</a> to
+estimate how much space to allocate for various lock-table data
+structures. For specific information on configuring the size of the
+lock subsystem, see
+<a href="../ref/lock/max.html">Configuring locking: sizing the system</a>.
+<p>The DbEnv::set_lk_max_objects interface may only be used to configure Berkeley DB before
+the <a href="../api_cxx/env_open.html">DbEnv::open</a> interface is called.
+<p>The DbEnv::set_lk_max_objects method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>The database environment's maximum number of objects may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lk_max_objects", one or more whitespace characters,
+and the number of objects. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/env_open.html">DbEnv::open</a> was called.
+</dl>
+<h3>Classes</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_lk_conflicts.html">DbEnv::set_lk_conflicts</a>,
+<a href="../api_cxx/env_set_lk_detect.html">DbEnv::set_lk_detect</a>,
+<a href="../api_cxx/env_set_lk_max_locks.html">DbEnv::set_lk_max_locks</a>,
+<a href="../api_cxx/env_set_lk_max_lockers.html">DbEnv::set_lk_max_lockers</a>,
+<a href="../api_cxx/env_set_lk_max_objects.html">DbEnv::set_lk_max_objects</a>,
+<a href="../api_cxx/env_set_lk_max.html">DbEnv::set_lk_max</a>,
+<a href="../api_cxx/lock_detect.html">DbEnv::lock_detect</a>,
+<a href="../api_cxx/lock_get.html">DbEnv::lock_get</a>,
+<a href="../api_cxx/lock_id.html">DbEnv::lock_id</a>,
+<a href="../api_cxx/lock_put.html">DbLock::put</a>,
+<a href="../api_cxx/lock_stat.html">DbEnv::lock_stat</a>
+and
+<a href="../api_cxx/lock_vec.html">DbEnv::lock_vec</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/env_set_mp_mmapsize.html b/bdb/docs/api_cxx/env_set_mp_mmapsize.html
new file mode 100644
index 00000000000..da7b3b5a698
--- /dev/null
+++ b/bdb/docs/api_cxx/env_set_mp_mmapsize.html
@@ -0,0 +1,74 @@
+<!--$Id: env_set_mp_mmapsize.so,v 10.18 2000/05/20 16:29:12 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_mp_mmapsize</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::set_mp_mmapsize</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_mp_mmapsize(size_t mp_mmapsize);
+</pre></h3>
+<h1>Description</h1>
+<p>Files that are opened read-only in the pool (and that satisfy a few other
+criteria) are, by default, mapped into the process address space instead
+of being copied into the local cache. This can result in better-than-usual
+performance, as available virtual memory is normally much larger than the
+local cache, and page faults are faster than page copying on many systems.
+However, in the presence of limited virtual memory it can cause resource
+starvation, and in the presence of large databases, it can result in immense
+process sizes.
+<p>Set the maximum file size, in bytes, for a file to be mapped into the
+process address space. If no value is specified, it defaults to 10MB.
+<p>The DbEnv::set_mp_mmapsize interface may only be used to configure Berkeley DB before
+the <a href="../api_cxx/env_open.html">DbEnv::open</a> interface is called.
+<p>The DbEnv::set_mp_mmapsize method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>The database environment's maximum mapped file size may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_mp_mmapsize", one or more whitespace characters,
+and the size in bytes. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/env_open.html">DbEnv::open</a> was called.
+</dl>
+<h3>Classes</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_mp_mmapsize.html">DbEnv::set_mp_mmapsize</a>,
+<a href="../api_cxx/memp_fclose.html">DbMpoolFile::close</a>,
+<a href="../api_cxx/memp_fget.html">DbMpoolFile::get</a>,
+<a href="../api_cxx/memp_fopen.html">DbMpoolFile::open</a>,
+<a href="../api_cxx/memp_fput.html">DbMpoolFile::put</a>,
+<a href="../api_cxx/memp_fset.html">DbMpoolFile::set</a>,
+<a href="../api_cxx/memp_fsync.html">DbMpoolFile::sync</a>,
+<a href="../api_cxx/memp_register.html">DbEnv::memp_register</a>,
+<a href="../api_cxx/memp_stat.html">DbEnv::memp_stat</a>,
+<a href="../api_cxx/memp_sync.html">DbEnv::memp_sync</a>
+and
+<a href="../api_cxx/memp_trickle.html">DbEnv::memp_trickle</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/env_set_mutexlocks.html b/bdb/docs/api_cxx/env_set_mutexlocks.html
new file mode 100644
index 00000000000..b728927a2b4
--- /dev/null
+++ b/bdb/docs/api_cxx/env_set_mutexlocks.html
@@ -0,0 +1,62 @@
+<!--$Id: env_set_mutexlocks.so,v 10.9 2000/11/17 19:56:52 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_mutexlocks</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::set_mutexlocks</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_mutexlocks(int do_lock);
+</pre></h3>
+<h1>Description</h1>
+<p>Toggle mutex locks. Setting <b>do_lock</b> to a zero value causes
+Berkeley DB to grant all requested mutual exclusion mutexes without regard
+for their availability.
+<p>This functionality should never be used for any other purpose than
+debugging.
+<p>The DbEnv::set_mutexlocks interface may be used to configure Berkeley DB at any time
+during the life of the application.
+<p>The DbEnv::set_mutexlocks method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_close.html">DbEnv::close</a>,
+<a href="../api_cxx/env_open.html">DbEnv::open</a>,
+<a href="../api_cxx/env_remove.html">DbEnv::remove</a>,
+<a href="../api_cxx/db_err.html">DbEnv::err</a>,
+<a href="../api_cxx/env_strerror.html">DbEnv::strerror</a>,
+<a href="../api_cxx/env_version.html">DbEnv::version</a>,
+<a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a>,
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a>,
+<a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a>,
+<a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a>,
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>,
+<a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a>,
+<a href="../api_cxx/env_set_mutexlocks.html">DbEnv::set_mutexlocks</a>,
+<a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a>,
+and
+<a href="../api_cxx/env_set_verbose.html">DbEnv::set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/env_set_pageyield.html b/bdb/docs/api_cxx/env_set_pageyield.html
new file mode 100644
index 00000000000..01247edc50b
--- /dev/null
+++ b/bdb/docs/api_cxx/env_set_pageyield.html
@@ -0,0 +1,71 @@
+<!--$Id: env_set_pageyield.so,v 10.6 2000/05/31 15:10:00 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_pageyield</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::set_pageyield</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+static int
+DbEnv::set_pageyield(int pageyield);
+</pre></h3>
+<h1>Description</h1>
+<p>Yield the processor whenever requesting a page from the cache. Setting
+<b>pageyield</b> to a non-zero value causes Berkeley DB to yield the processor
+any time a thread requests a page from the cache.
+<p>The DbEnv::set_pageyield interface affects the entire application, not a single
+database or database environment.
+<p>While the DbEnv::set_pageyield interface may be used to configure Berkeley DB at any time
+during the life of the application, it should normally be called before
+making any calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> methods.
+<p>This functionality should never be used for any other purpose than stress
+testing.
+<p>The DbEnv::set_pageyield interface may be used to configure Berkeley DB at any time
+during the life of the application.
+<p>The DbEnv::set_pageyield method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_close.html">DbEnv::close</a>,
+<a href="../api_cxx/env_open.html">DbEnv::open</a>,
+<a href="../api_cxx/env_remove.html">DbEnv::remove</a>,
+<a href="../api_cxx/db_err.html">DbEnv::err</a>,
+<a href="../api_cxx/env_strerror.html">DbEnv::strerror</a>,
+<a href="../api_cxx/env_version.html">DbEnv::version</a>,
+<a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a>,
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a>,
+<a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a>,
+<a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a>,
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>,
+<a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a>,
+<a href="../api_cxx/env_set_mutexlocks.html">DbEnv::set_mutexlocks</a>,
+<a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a>,
+and
+<a href="../api_cxx/env_set_verbose.html">DbEnv::set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/env_set_paniccall.html b/bdb/docs/api_cxx/env_set_paniccall.html
new file mode 100644
index 00000000000..61950ad0417
--- /dev/null
+++ b/bdb/docs/api_cxx/env_set_paniccall.html
@@ -0,0 +1,72 @@
+<!--$Id: env_set_paniccall.so,v 10.14 2000/07/09 19:12:56 bostic Exp $-->
+<!--$Id: m4.errset,v 10.8 2000/02/19 20:57:57 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_paniccall</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::set_paniccall</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_paniccall(void (*)(DbEnv *, int));
+</pre></h3>
+<h1>Description</h1>
+<p>Errors can occur in the Berkeley DB library where the only solution is to shut
+down the application and run recovery. (For example, if Berkeley DB is unable
+to write log records to disk because there is insufficient disk space.)
+In these cases, when the C++ error model has been configured so that the
+individual Berkeley DB methods return error codes (see <a href="../api_cxx/except_class.html">DbException</a> for
+more information), the value <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> is returned by Berkeley DB
+methods.
+<p>In these cases, it is also often simpler to shut down the application when
+such errors occur rather than attempting to gracefully return up the stack.
+The DbEnv::set_paniccall method is used to specify a method to be called when
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> is about to be returned from a Berkeley DB method. When
+called, the <b>dbenv</b> argument will be a reference to the current
+environment, and the <b>errval</b> argument is the error value that would
+have been returned to the calling method.
+<p>The DbEnv::set_paniccall interface may be used to configure Berkeley DB at any time
+during the life of the application.
+<p>The DbEnv::set_paniccall method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_close.html">DbEnv::close</a>,
+<a href="../api_cxx/env_open.html">DbEnv::open</a>,
+<a href="../api_cxx/env_remove.html">DbEnv::remove</a>,
+<a href="../api_cxx/db_err.html">DbEnv::err</a>,
+<a href="../api_cxx/env_strerror.html">DbEnv::strerror</a>,
+<a href="../api_cxx/env_version.html">DbEnv::version</a>,
+<a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a>,
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a>,
+<a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a>,
+<a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a>,
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>,
+<a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a>,
+<a href="../api_cxx/env_set_mutexlocks.html">DbEnv::set_mutexlocks</a>,
+<a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a>,
+and
+<a href="../api_cxx/env_set_verbose.html">DbEnv::set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/env_set_panicstate.html b/bdb/docs/api_cxx/env_set_panicstate.html
new file mode 100644
index 00000000000..6655003ccc4
--- /dev/null
+++ b/bdb/docs/api_cxx/env_set_panicstate.html
@@ -0,0 +1,67 @@
+<!--$Id: env_set_panicstate.so,v 10.2 2001/01/17 15:32:34 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_panicstate</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::set_panicstate</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+static int
+DbEnv::set_panicstate(int panic);
+</pre></h3>
+<h1>Description</h1>
+<p>Toggle the Berkeley DB panic state. Setting <b>panic</b> to a non-zero value
+causes Berkeley DB to refuse attempts to call Berkeley DB functions with the
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> error return.
+<p>The DbEnv::set_panicstate interface affects the entire application, not a single
+database or database environment.
+<p>While the DbEnv::set_panicstate interface may be used to configure Berkeley DB at any time
+during the life of the application, it should normally be called before
+making any calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> methods.
+<p>The DbEnv::set_panicstate method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_close.html">DbEnv::close</a>,
+<a href="../api_cxx/env_open.html">DbEnv::open</a>,
+<a href="../api_cxx/env_remove.html">DbEnv::remove</a>,
+<a href="../api_cxx/db_err.html">DbEnv::err</a>,
+<a href="../api_cxx/env_strerror.html">DbEnv::strerror</a>,
+<a href="../api_cxx/env_version.html">DbEnv::version</a>,
+<a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a>,
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a>,
+<a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a>,
+<a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a>,
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>,
+<a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a>,
+<a href="../api_cxx/env_set_mutexlocks.html">DbEnv::set_mutexlocks</a>,
+<a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a>,
+and
+<a href="../api_cxx/env_set_verbose.html">DbEnv::set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/env_set_rec_init.html b/bdb/docs/api_cxx/env_set_rec_init.html
new file mode 100644
index 00000000000..96af5948541
--- /dev/null
+++ b/bdb/docs/api_cxx/env_set_rec_init.html
@@ -0,0 +1,73 @@
+<!--$Id: env_set_rec_init.so,v 10.9 2000/05/01 21:57:44 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_recovery_init</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::set_recovery_init</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_recovery_init(int (*db_recovery_init_fcn)(DbEnv *));
+</pre></h3>
+<h1>Description</h1>
+<p>Applications installing application-specific recovery methods need
+to be called before Berkeley DB performs recovery so they may add their recovery
+methods to Berkeley DB's.
+<p>The DbEnv::set_recovery_init method supports this functionality. The
+<b>db_recovery_init_fcn</b> method must be declared with one
+argument, a reference to the enclosing Berkeley DB environment. This
+method will be called after the <a href="../api_cxx/env_open.html">DbEnv::open</a> has been called,
+but before recovery is started.
+<p>If the <b>db_recovery_init_fcn</b> method returns a non-zero value,
+no recovery will be performed and <a href="../api_cxx/env_open.html">DbEnv::open</a> will return the same
+value to its caller.
+<p>The DbEnv::set_recovery_init interface may only be used to configure Berkeley DB before
+the <a href="../api_cxx/env_open.html">DbEnv::open</a> interface is called.
+<p>The DbEnv::set_recovery_init method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/env_open.html">DbEnv::open</a> was called.
+</dl>
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_close.html">DbEnv::close</a>,
+<a href="../api_cxx/env_open.html">DbEnv::open</a>,
+<a href="../api_cxx/env_remove.html">DbEnv::remove</a>,
+<a href="../api_cxx/db_err.html">DbEnv::err</a>,
+<a href="../api_cxx/env_strerror.html">DbEnv::strerror</a>,
+<a href="../api_cxx/env_version.html">DbEnv::version</a>,
+<a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a>,
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a>,
+<a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a>,
+<a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a>,
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>,
+<a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a>,
+<a href="../api_cxx/env_set_mutexlocks.html">DbEnv::set_mutexlocks</a>,
+<a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a>,
+and
+<a href="../api_cxx/env_set_verbose.html">DbEnv::set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/env_set_region_init.html b/bdb/docs/api_cxx/env_set_region_init.html
new file mode 100644
index 00000000000..f052adaf69e
--- /dev/null
+++ b/bdb/docs/api_cxx/env_set_region_init.html
@@ -0,0 +1,80 @@
+<!--$Id: env_set_region_init.so,v 10.10 2000/05/31 15:10:00 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_region_init</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::set_region_init</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+static int
+DbEnv::set_region_init(int region_init);
+</pre></h3>
+<h1>Description</h1>
+<p>Page-fault shared regions into memory when initially creating or joining
+a Berkeley DB environment. In some applications, the expense of page-faulting
+the shared memory regions can affect performance, e.g., when the
+page-fault occurs while holding a lock, other lock requests can convoy
+and overall throughput may decrease. Setting <b>region_init</b> to a
+non-zero value specifies that shared regions be read or written, as
+appropriate, when the region is joined by the application. This forces
+the underlying virtual memory and file systems to instantiate both the
+necessary memory and the necessary disk space. This can also avoid
+out-of-disk space failures later on.
+<p>The DbEnv::set_region_init interface affects the entire application, not a single
+database or database environment.
+<p>While the DbEnv::set_region_init interface may be used to configure Berkeley DB at any time
+during the life of the application, it should normally be called before
+making any calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> methods.
+<p>The DbEnv::set_region_init method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>The database environment's initial behavior with respect to shared memory regions may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_region_init", one or more whitespace characters,
+and the string "1". Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_close.html">DbEnv::close</a>,
+<a href="../api_cxx/env_open.html">DbEnv::open</a>,
+<a href="../api_cxx/env_remove.html">DbEnv::remove</a>,
+<a href="../api_cxx/db_err.html">DbEnv::err</a>,
+<a href="../api_cxx/env_strerror.html">DbEnv::strerror</a>,
+<a href="../api_cxx/env_version.html">DbEnv::version</a>,
+<a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a>,
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a>,
+<a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a>,
+<a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a>,
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>,
+<a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a>,
+<a href="../api_cxx/env_set_mutexlocks.html">DbEnv::set_mutexlocks</a>,
+<a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a>,
+and
+<a href="../api_cxx/env_set_verbose.html">DbEnv::set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/env_set_server.html b/bdb/docs/api_cxx/env_set_server.html
new file mode 100644
index 00000000000..208c9cc9c3a
--- /dev/null
+++ b/bdb/docs/api_cxx/env_set_server.html
@@ -0,0 +1,80 @@
+<!--"@(#)env_set_server.so 10.13 (Sleepycat) 8/25/99"-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_server</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::set_server</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_server(char *host,
+ long cl_timeout, long sv_timeout, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>Connects to the DB server on the indicated hostname and sets up a channel
+for communication.
+<p>The <b>cl_timeout</b> argument specifies the number of seconds the client
+should wait for results to come back from the server. Once the timeout
+has expired on any communication with the server, DB_NOSERVER will
+be returned. If this value is zero, a default timeout is used.
+<p>The <b>sv_timeout</b> argument specifies the number of seconds the server
+should allow a client connection to remain idle before assuming that
+client is gone. Once that timeout has been reached, the server releases
+all resources associated with that client connection. Subsequent attempts
+by that client to communicate with the server result in
+DB_NOSERVER_ID indicating that an invalid identifier has been
+given to the server. This value can be considered a hint to the server.
+The server may alter this value based on its own policies or allowed
+values. If this value is zero, a default timeout is used.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>When the DbEnv::set_server method has been called, any subsequent calls
+to Berkeley DB library interfaces may return either <a name="DB_NOSERVER">DB_NOSERVER</a> or
+<a name="DB_NOSERVER_ID">DB_NOSERVER_ID</a>.
+<p>The DbEnv::set_server method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h3>Errors</h3>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>dbenv_set_server
+</dl>
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_close.html">DbEnv::close</a>,
+<a href="../api_cxx/env_open.html">DbEnv::open</a>,
+<a href="../api_cxx/env_remove.html">DbEnv::remove</a>,
+<a href="../api_cxx/db_err.html">DbEnv::err</a>,
+<a href="../api_cxx/env_strerror.html">DbEnv::strerror</a>,
+<a href="../api_cxx/env_version.html">DbEnv::version</a>,
+<a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a>,
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a>,
+<a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a>,
+<a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a>,
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>,
+<a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a>,
+<a href="../api_cxx/env_set_mutexlocks.html">DbEnv::set_mutexlocks</a>,
+<a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a>,
+and
+<a href="../api_cxx/env_set_verbose.html">DbEnv::set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/env_set_shm_key.html b/bdb/docs/api_cxx/env_set_shm_key.html
new file mode 100644
index 00000000000..643bc1afdb3
--- /dev/null
+++ b/bdb/docs/api_cxx/env_set_shm_key.html
@@ -0,0 +1,90 @@
+<!--$Id: env_set_shm_key.so,v 10.5 2000/08/09 15:45:52 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_shm_key</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::set_shm_key</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_shm_key(long shm_key);
+</pre></h3>
+<h1>Description</h1>
+<p>Specify a base segment ID for Berkeley DB environment shared memory regions
+created in system memory on VxWorks or systems supporting X/Open-style
+shared memory interfaces, e.g., UNIX systems supporting
+<b>shmget</b>(2) and related System V IPC interfaces.
+<p>This base segment ID will be used when Berkeley DB shared memory regions are
+first created. It will be incremented a small integer value each time
+a new shared memory region is created, that is, if the base ID is 35,
+the first shared memory region created will have a segment ID of 35 and
+the next one a segment ID between 36 and 40 or so. A Berkeley DB environment
+always creates a master shared memory region, plus an additional shared
+memory region for each of the subsystems supported by the environment
+(locking, logging, memory pool and transaction), plus an additional
+shared memory region for each additional memory pool cache that is
+supported. Already existing regions with the same segment IDs will be
+removed. See <a href="../ref/env/region.html">Shared Memory Regions</a>
+for more information.
+<p>The intent behind this interface is two-fold: without it, applications
+have no way to ensure that two Berkeley DB applications don't attempt to use
+the same segment IDs when creating different Berkeley DB environments. In
+addition, by using the same segment IDs each time the environment is
+created, previously created segments will be removed, and the set of
+segments on the system will not grow without bound.
+<p>The DbEnv::set_shm_key interface may only be used to configure Berkeley DB before
+the <a href="../api_cxx/env_open.html">DbEnv::open</a> interface is called.
+<p>The DbEnv::set_shm_key method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>The database environment's base segment ID may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_shm_key", one or more whitespace characters,
+and the ID. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/env_open.html">DbEnv::open</a> was called.
+</dl>
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_close.html">DbEnv::close</a>,
+<a href="../api_cxx/env_open.html">DbEnv::open</a>,
+<a href="../api_cxx/env_remove.html">DbEnv::remove</a>,
+<a href="../api_cxx/db_err.html">DbEnv::err</a>,
+<a href="../api_cxx/env_strerror.html">DbEnv::strerror</a>,
+<a href="../api_cxx/env_version.html">DbEnv::version</a>,
+<a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a>,
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a>,
+<a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a>,
+<a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a>,
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>,
+<a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a>,
+<a href="../api_cxx/env_set_mutexlocks.html">DbEnv::set_mutexlocks</a>,
+<a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a>,
+and
+<a href="../api_cxx/env_set_verbose.html">DbEnv::set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/env_set_tas_spins.html b/bdb/docs/api_cxx/env_set_tas_spins.html
new file mode 100644
index 00000000000..fd21f03d341
--- /dev/null
+++ b/bdb/docs/api_cxx/env_set_tas_spins.html
@@ -0,0 +1,73 @@
+<!--$Id: env_set_tas_spins.so,v 10.9 2000/05/31 15:10:00 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_tas_spins</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::set_tas_spins</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+static int
+DbEnv::set_tas_spins(u_int32_t tas_spins);
+</pre></h3>
+<h1>Description</h1>
+<p>Specify that test-and-set mutexes should spin <b>tas_spins</b> times
+without blocking. The value defaults to 1 on uniprocessor systems and
+to 50 times the number of processors on multiprocessor systems.
+<p>The DbEnv::set_tas_spins interface affects the entire application, not a single
+database or database environment.
+<p>While the DbEnv::set_tas_spins interface may be used to configure Berkeley DB at any time
+during the life of the application, it should normally be called before
+making any calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> methods.
+<p>The DbEnv::set_tas_spins method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>The database environment's test-and-set spin count may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_tas_spins", one or more whitespace characters,
+and the number of spins. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_close.html">DbEnv::close</a>,
+<a href="../api_cxx/env_open.html">DbEnv::open</a>,
+<a href="../api_cxx/env_remove.html">DbEnv::remove</a>,
+<a href="../api_cxx/db_err.html">DbEnv::err</a>,
+<a href="../api_cxx/env_strerror.html">DbEnv::strerror</a>,
+<a href="../api_cxx/env_version.html">DbEnv::version</a>,
+<a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a>,
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a>,
+<a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a>,
+<a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a>,
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>,
+<a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a>,
+<a href="../api_cxx/env_set_mutexlocks.html">DbEnv::set_mutexlocks</a>,
+<a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a>,
+and
+<a href="../api_cxx/env_set_verbose.html">DbEnv::set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/env_set_tmp_dir.html b/bdb/docs/api_cxx/env_set_tmp_dir.html
new file mode 100644
index 00000000000..5993fe8a84a
--- /dev/null
+++ b/bdb/docs/api_cxx/env_set_tmp_dir.html
@@ -0,0 +1,92 @@
+<!--$Id: env_set_tmp_dir.so,v 10.3 2000/05/20 16:29:12 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_tmp_dir</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::set_tmp_dir</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_tmp_dir(const char *dir);
+</pre></h3>
+<h1>Description</h1>
+<p>The path of a directory to be used as the location of temporary files.
+The files created to back in-memory access method databases will be
+created relative to this path. These temporary files can be quite large,
+depending on the size of the database.
+<p>If no directories are specified, the following alternatives are checked
+in the specified order. The first existing directory path is used for
+all temporary files.
+<p><ol>
+<p><li>The value of the environment variable <b>TMPDIR</b>.
+<li>The value of the environment variable <b>TEMP</b>.
+<li>The value of the environment variable <b>TMP</b>.
+<li>The value of the environment variable <b>TempFolder</b>.
+<li>The value returned by the GetTempPath interface.
+<li>The directory <b>/var/tmp</b>.
+<li>The directory <b>/usr/tmp</b>.
+<li>The directory <b>/temp</b>.
+<li>The directory <b>/tmp</b>.
+<li>The directory <b>C:/temp</b>.
+<li>The directory <b>C:/tmp</b>.
+</ol>
+<p>Note: environment variables are only checked if one of the
+<a href="../api_cxx/env_open.html#DB_USE_ENVIRON">DB_USE_ENVIRON</a> or <a href="../api_cxx/env_open.html#DB_USE_ENVIRON_ROOT">DB_USE_ENVIRON_ROOT</a> flags were
+specified.
+<p>Note: the GetTempPath interface is only checked on Win/32 platforms.
+<p>The DbEnv::set_tmp_dir interface may only be used to configure Berkeley DB before
+the <a href="../api_cxx/env_open.html">DbEnv::open</a> interface is called.
+<p>The DbEnv::set_tmp_dir method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>The database environment's temporary file directory may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_tmp_dir", one or more whitespace characters,
+and the directory name. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/env_open.html">DbEnv::open</a> was called.
+</dl>
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_close.html">DbEnv::close</a>,
+<a href="../api_cxx/env_open.html">DbEnv::open</a>,
+<a href="../api_cxx/env_remove.html">DbEnv::remove</a>,
+<a href="../api_cxx/db_err.html">DbEnv::err</a>,
+<a href="../api_cxx/env_strerror.html">DbEnv::strerror</a>,
+<a href="../api_cxx/env_version.html">DbEnv::version</a>,
+<a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a>,
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a>,
+<a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a>,
+<a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a>,
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>,
+<a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a>,
+<a href="../api_cxx/env_set_mutexlocks.html">DbEnv::set_mutexlocks</a>,
+<a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a>,
+and
+<a href="../api_cxx/env_set_verbose.html">DbEnv::set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/env_set_tx_max.html b/bdb/docs/api_cxx/env_set_tx_max.html
new file mode 100644
index 00000000000..9189528948c
--- /dev/null
+++ b/bdb/docs/api_cxx/env_set_tx_max.html
@@ -0,0 +1,70 @@
+<!--$Id: env_set_tx_max.so,v 10.21 2000/05/20 16:29:12 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_tx_max</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::set_tx_max</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_tx_max(u_int32_t tx_max);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the maximum number of active transactions that are supported by the
+environment. This value bounds the size of backing shared memory regions.
+Note that child transactions must be counted as active until their
+ultimate parent commits or aborts.
+<p>When there are more than the specified number of concurrent transactions,
+calls to <a href="../api_cxx/txn_begin.html">DbEnv::txn_begin</a> will fail (until some active transactions
+complete). If no value is specified, a default value of 20 is used.
+<p>The DbEnv::set_tx_max interface may only be used to configure Berkeley DB before
+the <a href="../api_cxx/env_open.html">DbEnv::open</a> interface is called.
+<p>The DbEnv::set_tx_max method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>The database environment's maximum number of active transactions may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_tx_max", one or more whitespace characters,
+and the number of transactions. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/env_open.html">DbEnv::open</a> was called.
+</dl>
+<h3>Classes</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_tx_max.html">DbEnv::set_tx_max</a>,
+<a href="../api_cxx/env_set_tx_recover.html">DbEnv::set_tx_recover</a>,
+<a href="../api_cxx/env_set_tx_timestamp.html">DbEnv::set_tx_timestamp</a>,
+<a href="../api_cxx/txn_abort.html">DbTxn::abort</a>,
+<a href="../api_cxx/txn_begin.html">DbEnv::txn_begin</a>,
+<a href="../api_cxx/txn_checkpoint.html">DbEnv::txn_checkpoint</a>,
+<a href="../api_cxx/txn_commit.html">DbTxn::commit</a>,
+<a href="../api_cxx/txn_id.html">DbTxn::id</a>,
+<a href="../api_cxx/txn_prepare.html">DbTxn::prepare</a>
+and
+<a href="../api_cxx/txn_stat.html">DbEnv::txn_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/env_set_tx_recover.html b/bdb/docs/api_cxx/env_set_tx_recover.html
new file mode 100644
index 00000000000..08ceec64d47
--- /dev/null
+++ b/bdb/docs/api_cxx/env_set_tx_recover.html
@@ -0,0 +1,77 @@
+<!--$Id: env_set_tx_recover.so,v 10.26 2000/07/09 19:13:19 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_tx_recover</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::set_tx_recover</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_tx_recover(int (*)(DbEnv *dbenv,
+ Dbt *log_rec, DbLsn *lsn, db_recops op));
+</pre></h3>
+<h1>Description</h1>
+<p>Set the application's method to be called during transaction abort
+and recovery. This method must return 0 on success and either
+<b>errno</b> or a value outside of the Berkeley DB error name space on
+failure. It takes four arguments:
+<p><dl compact>
+<p><dt>dbenv <dd>A Berkeley DB environment.
+<p><dt>log_rec<dd>A log record.
+<p><dt>lsn<dd>A log sequence number.
+<p><dt>op<dd>One of the following values:
+<p><dl compact>
+<p><dt><a name="DB_TXN_BACKWARD_ROLL">DB_TXN_BACKWARD_ROLL</a><dd>The log is being read backward to determine which transactions have been
+committed and to abort those operations that were not, undo the operation
+described by the log record.
+<p><dt><a name="DB_TXN_FORWARD_ROLL">DB_TXN_FORWARD_ROLL</a><dd>The log is being played forward, redo the operation described by the log
+record.
+<p><dt><a name="DB_TXN_ABORT">DB_TXN_ABORT</a><dd>The log is being read backwards during a transaction abort, undo the
+operation described by the log record.
+</dl>
+</dl>
+<p>The DbEnv::set_tx_recover interface may only be used to configure Berkeley DB before
+the <a href="../api_cxx/env_open.html">DbEnv::open</a> interface is called.
+<p>The DbEnv::set_tx_recover method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/env_open.html">DbEnv::open</a> was called.
+</dl>
+<h3>Classes</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_tx_max.html">DbEnv::set_tx_max</a>,
+<a href="../api_cxx/env_set_tx_recover.html">DbEnv::set_tx_recover</a>,
+<a href="../api_cxx/env_set_tx_timestamp.html">DbEnv::set_tx_timestamp</a>,
+<a href="../api_cxx/txn_abort.html">DbTxn::abort</a>,
+<a href="../api_cxx/txn_begin.html">DbEnv::txn_begin</a>,
+<a href="../api_cxx/txn_checkpoint.html">DbEnv::txn_checkpoint</a>,
+<a href="../api_cxx/txn_commit.html">DbTxn::commit</a>,
+<a href="../api_cxx/txn_id.html">DbTxn::id</a>,
+<a href="../api_cxx/txn_prepare.html">DbTxn::prepare</a>
+and
+<a href="../api_cxx/txn_stat.html">DbEnv::txn_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/env_set_tx_timestamp.html b/bdb/docs/api_cxx/env_set_tx_timestamp.html
new file mode 100644
index 00000000000..fa793324ca9
--- /dev/null
+++ b/bdb/docs/api_cxx/env_set_tx_timestamp.html
@@ -0,0 +1,66 @@
+<!--$Id: env_set_tx_timestamp.so,v 10.6 2000/12/21 18:33:42 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_tx_timestamp</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::set_tx_timestamp</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_tx_timestamp(time_t *timestamp);
+</pre></h3>
+<h1>Description</h1>
+<p>Recover to the time specified by <b>timestamp</b> rather than to the most
+current possible date.
+The <b>timestamp</b> argument should be the number of seconds since 0
+hours, 0 minutes, 0 seconds, January 1, 1970, Coordinated Universal Time,
+i.e., the Epoch.
+<p>Once a database environment has been upgraded to a new version of Berkeley DB
+involving a log format change (see <a href="../ref/upgrade/process.html">Upgrading Berkeley DB installations</a>, it is no longer possible to recover
+to a specific time before that upgrade.
+<p>The DbEnv::set_tx_timestamp interface may only be used to configure Berkeley DB before
+the <a href="../api_cxx/env_open.html">DbEnv::open</a> interface is called.
+<p>The DbEnv::set_tx_timestamp method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>It is not possible to recover to the specified time using the
+log files currently present in the environment.
+</dl>
+<h3>Classes</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_tx_max.html">DbEnv::set_tx_max</a>,
+<a href="../api_cxx/env_set_tx_recover.html">DbEnv::set_tx_recover</a>,
+<a href="../api_cxx/env_set_tx_timestamp.html">DbEnv::set_tx_timestamp</a>,
+<a href="../api_cxx/txn_abort.html">DbTxn::abort</a>,
+<a href="../api_cxx/txn_begin.html">DbEnv::txn_begin</a>,
+<a href="../api_cxx/txn_checkpoint.html">DbEnv::txn_checkpoint</a>,
+<a href="../api_cxx/txn_commit.html">DbTxn::commit</a>,
+<a href="../api_cxx/txn_id.html">DbTxn::id</a>,
+<a href="../api_cxx/txn_prepare.html">DbTxn::prepare</a>
+and
+<a href="../api_cxx/txn_stat.html">DbEnv::txn_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/env_set_verbose.html b/bdb/docs/api_cxx/env_set_verbose.html
new file mode 100644
index 00000000000..48b2809645e
--- /dev/null
+++ b/bdb/docs/api_cxx/env_set_verbose.html
@@ -0,0 +1,81 @@
+<!--$Id: env_set_verbose.so,v 10.23 2000/05/20 16:29:12 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_verbose</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::set_verbose</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_verbose(u_int32_t which, int onoff);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::set_verbose method turns additional informational and
+debugging messages in the Berkeley DB message output on and off. If
+<b>onoff</b> is set to
+non-zero,
+the additional messages are output.
+<p>The <b>which</b> parameter must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_VERB_CHKPOINT">DB_VERB_CHKPOINT</a><dd>Display checkpoint location information when searching the log for
+checkpoints.
+<p><dt><a name="DB_VERB_DEADLOCK">DB_VERB_DEADLOCK</a><dd>Display additional information when doing deadlock detection.
+<p><dt><a name="DB_VERB_RECOVERY">DB_VERB_RECOVERY</a><dd>Display additional information when performing recovery.
+<p><dt><a name="DB_VERB_WAITSFOR">DB_VERB_WAITSFOR</a><dd>Display the waits-for table when doing deadlock detection.
+</dl>
+<p>The DbEnv::set_verbose interface may be used to configure Berkeley DB at any time
+during the life of the application.
+<p>The DbEnv::set_verbose method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>The database environment's verbosity may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_verbose", one or more whitespace characters,
+and the interface <b>which</b> argument as a string, for example,
+"set_verbose DB_VERB_CHKPOINT". Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_close.html">DbEnv::close</a>,
+<a href="../api_cxx/env_open.html">DbEnv::open</a>,
+<a href="../api_cxx/env_remove.html">DbEnv::remove</a>,
+<a href="../api_cxx/db_err.html">DbEnv::err</a>,
+<a href="../api_cxx/env_strerror.html">DbEnv::strerror</a>,
+<a href="../api_cxx/env_version.html">DbEnv::version</a>,
+<a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a>,
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a>,
+<a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a>,
+<a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a>,
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>,
+<a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a>,
+<a href="../api_cxx/env_set_mutexlocks.html">DbEnv::set_mutexlocks</a>,
+<a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a>,
+and
+<a href="../api_cxx/env_set_verbose.html">DbEnv::set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/env_strerror.html b/bdb/docs/api_cxx/env_strerror.html
new file mode 100644
index 00000000000..e1572018be3
--- /dev/null
+++ b/bdb/docs/api_cxx/env_strerror.html
@@ -0,0 +1,62 @@
+<!--$Id: env_strerror.so,v 8.4 2000/07/30 17:59:25 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::strerror</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::strerror</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+static char *
+DbEnv::strerror(int error);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::strerror method returns an error message string corresponding
+to the error number <b>error</b>. This interface is a superset of the
+ANSI C X3.159-1989 (ANSI C) <b>strerror</b>(3) interface. If the error number
+<b>error</b> is greater than or equal to 0, then the string returned by
+the system interface <b>strerror</b>(3) is returned. If the error
+number is less than 0, an error string appropriate to the corresponding
+Berkeley DB library error is returned. See
+<a href="../ref/program/errorret.html">Error returns to applications</a>
+for more information.
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_close.html">DbEnv::close</a>,
+<a href="../api_cxx/env_open.html">DbEnv::open</a>,
+<a href="../api_cxx/env_remove.html">DbEnv::remove</a>,
+<a href="../api_cxx/db_err.html">DbEnv::err</a>,
+<a href="../api_cxx/env_strerror.html">DbEnv::strerror</a>,
+<a href="../api_cxx/env_version.html">DbEnv::version</a>,
+<a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a>,
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a>,
+<a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a>,
+<a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a>,
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>,
+<a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a>,
+<a href="../api_cxx/env_set_mutexlocks.html">DbEnv::set_mutexlocks</a>,
+<a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a>,
+and
+<a href="../api_cxx/env_set_verbose.html">DbEnv::set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/env_version.html b/bdb/docs/api_cxx/env_version.html
new file mode 100644
index 00000000000..8d40aa8c5df
--- /dev/null
+++ b/bdb/docs/api_cxx/env_version.html
@@ -0,0 +1,59 @@
+<!--$Id: env_version.so,v 10.13 1999/12/20 08:52:30 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::version</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::version</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+static char *
+DbEnv::version(int *major, int *minor, int *patch);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::version method returns a pointer to a string containing Berkeley DB
+version information. If <b>major</b> is non-NULL, the major version
+of the Berkeley DB release is stored in the memory it references. If
+<b>minor</b> is non-NULL, the minor version of the Berkeley DB release is
+stored in the memory it references. If <b>patch</b> is non-NULL, the
+patch version of the Berkeley DB release is stored in the memory it references.
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_close.html">DbEnv::close</a>,
+<a href="../api_cxx/env_open.html">DbEnv::open</a>,
+<a href="../api_cxx/env_remove.html">DbEnv::remove</a>,
+<a href="../api_cxx/db_err.html">DbEnv::err</a>,
+<a href="../api_cxx/env_strerror.html">DbEnv::strerror</a>,
+<a href="../api_cxx/env_version.html">DbEnv::version</a>,
+<a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a>,
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a>,
+<a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a>,
+<a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a>,
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>,
+<a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a>,
+<a href="../api_cxx/env_set_mutexlocks.html">DbEnv::set_mutexlocks</a>,
+<a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a>,
+and
+<a href="../api_cxx/env_set_verbose.html">DbEnv::set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/except_class.html b/bdb/docs/api_cxx/except_class.html
new file mode 100644
index 00000000000..063bede9530
--- /dev/null
+++ b/bdb/docs/api_cxx/except_class.html
@@ -0,0 +1,64 @@
+<!--$Id: except_class.so,v 10.15 1999/12/20 08:52:33 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbException</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbException</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+class DbException {
+ DbException(int err);
+ DbException(const char *description);
+ DbException(const char *prefix, int err);
+ DbException(const char *prefix1, const char *prefix2, int err);
+};
+</pre></h3>
+<h1>Description</h1>
+<p>This manual page describes the DbException class and how it is used
+by the various Berkeley DB classes.
+<p>Most methods in the Berkeley DB classes return an int but also throw an
+exception. This allows for two different error behaviors. By default,
+the Berkeley DB C++ API is configured to throw an exception whenever a serious
+error occurs. This generally allows for cleaner logic for transaction
+processing, as a try block can surround a single transaction.
+Alternatively, Berkeley DB can be configured to not throw exceptions, and
+instead have the individual function return an error code, by setting
+the constructor flags for the Db and <a href="../api_cxx/dbenv_class.html">DbEnv</a> objects.
+<p>A DbException object contains an informational string and an errno.
+The errno can be obtained by using <a href="../api_cxx/get_errno.html">DbException::get_errno</a>.
+The informational string can be obtained by using <a href="../api_cxx/what.html">DbException::what</a>.
+<p>We expect in the future that this class will inherit from the standard
+class exception, but certain language implementation bugs currently
+prevent this on some platforms.
+<p>Some methods may return non-zero values without issuing an exception.
+This occurs in situations that are not normally considered an error, but
+when some informational status is returned. For example, <a href="../api_cxx/db_get.html">Db::get</a>
+returns DB_NOTFOUND when a requested key does not appear in the database.
+<h3>Class</h3>
+<a href="../api_cxx/except_class.html">DbException</a>
+<h1>See Also</h1>
+<a href="../api_cxx/get_errno.html">DbException::get_errno</a>
+and
+<a href="../api_cxx/what.html">DbException::what</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/get_errno.html b/bdb/docs/api_cxx/get_errno.html
new file mode 100644
index 00000000000..25c639ac2d6
--- /dev/null
+++ b/bdb/docs/api_cxx/get_errno.html
@@ -0,0 +1,43 @@
+<!--$Id: get_errno.so,v 10.8 1999/12/20 08:52:33 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbException::get_errno</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbException::get_errno</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+const int
+DbException::get_errno();
+</pre></h3>
+<h1>Description</h1>
+<p>A DbException object contains an informational string and an errno.
+The errno can be obtained by using DbException::get_errno.
+The informational string can be obtained by using <a href="../api_cxx/what.html">DbException::what</a>.
+<h3>Class</h3>
+<a href="../api_cxx/except_class.html">DbException</a>
+<h1>See Also</h1>
+<a href="../api_cxx/get_errno.html">DbException::get_errno</a>
+and
+<a href="../api_cxx/what.html">DbException::what</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/lock_class.html b/bdb/docs/api_cxx/lock_class.html
new file mode 100644
index 00000000000..c0aa324d9f0
--- /dev/null
+++ b/bdb/docs/api_cxx/lock_class.html
@@ -0,0 +1,61 @@
+<!--$Id: lock_class.so,v 10.13 1999/12/20 08:52:33 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbLock</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbLock</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+class DbLock {
+public:
+ DbLock();
+ DbLock(const DbLock &);
+ DbLock &operator = (const DbLock &);
+ ~DbLock();
+};
+</pre></h3>
+<h1>Description</h1>
+<p>The <a href="../api_cxx/dbenv_class.html">DbEnv</a> lock methods and the DbLock class are used
+to provide general-purpose locking. While designed to work with the
+other Db classes, they are also useful for more general locking
+purposes. Locks can be shared between processes.
+<p>In most cases, when multiple threads or processes are using locking, the
+deadlock detector, <a href="../utility/db_deadlock.html">db_deadlock</a> should be run.
+<h3>Classes</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_lk_conflicts.html">DbEnv::set_lk_conflicts</a>,
+<a href="../api_cxx/env_set_lk_detect.html">DbEnv::set_lk_detect</a>,
+<a href="../api_cxx/env_set_lk_max_locks.html">DbEnv::set_lk_max_locks</a>,
+<a href="../api_cxx/env_set_lk_max_lockers.html">DbEnv::set_lk_max_lockers</a>,
+<a href="../api_cxx/env_set_lk_max_objects.html">DbEnv::set_lk_max_objects</a>,
+<a href="../api_cxx/env_set_lk_max.html">DbEnv::set_lk_max</a>,
+<a href="../api_cxx/lock_detect.html">DbEnv::lock_detect</a>,
+<a href="../api_cxx/lock_get.html">DbEnv::lock_get</a>,
+<a href="../api_cxx/lock_id.html">DbEnv::lock_id</a>,
+<a href="../api_cxx/lock_put.html">DbLock::put</a>,
+<a href="../api_cxx/lock_stat.html">DbEnv::lock_stat</a>
+and
+<a href="../api_cxx/lock_vec.html">DbEnv::lock_vec</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/lock_detect.html b/bdb/docs/api_cxx/lock_detect.html
new file mode 100644
index 00000000000..889d0f52048
--- /dev/null
+++ b/bdb/docs/api_cxx/lock_detect.html
@@ -0,0 +1,73 @@
+<!--$Id: lock_detect.so,v 10.26 2000/03/17 01:53:59 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::lock_detect</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::lock_detect</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::lock_detect(u_int32_t flags, u_int32_t atype, int *aborted);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::lock_detect method runs one iteration of the deadlock detector.
+The deadlock detector traverses the lock table, and for each deadlock
+it finds, marks one of the participating transactions for abort.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or more
+of the following values.
+<p><dl compact>
+<p><dt><a name="DB_LOCK_CONFLICT">DB_LOCK_CONFLICT</a><dd>Only run the deadlock detector if a lock conflict has occurred since
+the last time that the deadlock detector was run.
+</dl>
+<p>The <b>atype</b> parameter specifies which transaction to abort in the
+case of deadlock. It must be set to one of possible arguments listed for
+the <a href="../api_cxx/env_set_lk_detect.html">DbEnv::set_lk_detect</a> interface.
+<p>If the <b>aborted</b> parameter is non-NULL, the memory location it
+references will be set to the number of transactions aborted by the
+DbEnv::lock_detect method.
+<p>The DbEnv::lock_detect method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::lock_detect method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::lock_detect method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Classes</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_lk_conflicts.html">DbEnv::set_lk_conflicts</a>,
+<a href="../api_cxx/env_set_lk_detect.html">DbEnv::set_lk_detect</a>,
+<a href="../api_cxx/env_set_lk_max_locks.html">DbEnv::set_lk_max_locks</a>,
+<a href="../api_cxx/env_set_lk_max_lockers.html">DbEnv::set_lk_max_lockers</a>,
+<a href="../api_cxx/env_set_lk_max_objects.html">DbEnv::set_lk_max_objects</a>,
+<a href="../api_cxx/env_set_lk_max.html">DbEnv::set_lk_max</a>,
+<a href="../api_cxx/lock_detect.html">DbEnv::lock_detect</a>,
+<a href="../api_cxx/lock_get.html">DbEnv::lock_get</a>,
+<a href="../api_cxx/lock_id.html">DbEnv::lock_id</a>,
+<a href="../api_cxx/lock_put.html">DbLock::put</a>,
+<a href="../api_cxx/lock_stat.html">DbEnv::lock_stat</a>
+and
+<a href="../api_cxx/lock_vec.html">DbEnv::lock_vec</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/lock_get.html b/bdb/docs/api_cxx/lock_get.html
new file mode 100644
index 00000000000..4dae9f5dc67
--- /dev/null
+++ b/bdb/docs/api_cxx/lock_get.html
@@ -0,0 +1,94 @@
+<!--$Id: lock_get.so,v 10.28 2000/04/24 16:33:54 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::lock_get</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::lock_get</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::lock_get(u_int32_t locker, u_int32_t flags,
+ const Dbt *obj, const db_lockmode_t lock_mode, DbLock *lock);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::lock_get method acquires a lock from the lock table, returning
+information about it in
+the <b>lock</b> argument.
+<p>The <b>locker</b> argument specified to DbEnv::lock_get is an unsigned
+32-bit integer quantity. It represents the entity requesting or releasing
+the lock.
+<p>The <b>flags</b> value must be set to 0 or the following value:
+<p><dl compact>
+<p><dt><a name="DB_LOCK_NOWAIT">DB_LOCK_NOWAIT</a><dd>If a lock cannot be granted because the requested lock conflicts with an
+existing lock, return immediately instead of waiting for the lock to
+become available.
+</dl>
+<p>The <b>obj</b> argument is an untyped byte string that specifies the
+object to be locked or released.
+<p>The <b>mode</b> argument is an index into the environment's lock conflict
+array. See <a href="../api_cxx/env_set_lk_conflicts.html">DbEnv::set_lk_conflicts</a> and
+<a href="../ref/lock/stdmode.html">Standard Lock Modes</a>
+for a description of that array.
+<p>The DbEnv::lock_get method may
+return or throw an exception encapsulating
+one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_LOCK_NOTGRANTED">DB_LOCK_NOTGRANTED</a><dd>A lock was requested that could not be immediately granted and the
+<b>flags</b> parameter was set to DB_LOCK_NOWAIT.
+</dl>
+<p>Otherwise, the DbEnv::lock_get method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::lock_get method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_LOCK_DEADLOCK<dd>The operation was selected to resolve a deadlock.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p><dl compact>
+<p><dt>ENOMEM<dd>The maximum number of locks has been reached.
+</dl>
+<p>The DbEnv::lock_get method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::lock_get method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Classes</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_lk_conflicts.html">DbEnv::set_lk_conflicts</a>,
+<a href="../api_cxx/env_set_lk_detect.html">DbEnv::set_lk_detect</a>,
+<a href="../api_cxx/env_set_lk_max_locks.html">DbEnv::set_lk_max_locks</a>,
+<a href="../api_cxx/env_set_lk_max_lockers.html">DbEnv::set_lk_max_lockers</a>,
+<a href="../api_cxx/env_set_lk_max_objects.html">DbEnv::set_lk_max_objects</a>,
+<a href="../api_cxx/env_set_lk_max.html">DbEnv::set_lk_max</a>,
+<a href="../api_cxx/lock_detect.html">DbEnv::lock_detect</a>,
+<a href="../api_cxx/lock_get.html">DbEnv::lock_get</a>,
+<a href="../api_cxx/lock_id.html">DbEnv::lock_id</a>,
+<a href="../api_cxx/lock_put.html">DbLock::put</a>,
+<a href="../api_cxx/lock_stat.html">DbEnv::lock_stat</a>
+and
+<a href="../api_cxx/lock_vec.html">DbEnv::lock_vec</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/lock_id.html b/bdb/docs/api_cxx/lock_id.html
new file mode 100644
index 00000000000..72ab2a274db
--- /dev/null
+++ b/bdb/docs/api_cxx/lock_id.html
@@ -0,0 +1,61 @@
+<!--$Id: lock_id.so,v 10.19 2000/03/01 21:41:29 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::lock_id</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::lock_id</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::lock_id(u_int32_t *idp);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::lock_id method
+copies a locker ID, which is guaranteed to be unique in the specified lock
+table, into the memory location referenced by <b>idp</b>.
+<p>The DbEnv::lock_id method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::lock_id method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::lock_id method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Classes</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_lk_conflicts.html">DbEnv::set_lk_conflicts</a>,
+<a href="../api_cxx/env_set_lk_detect.html">DbEnv::set_lk_detect</a>,
+<a href="../api_cxx/env_set_lk_max_locks.html">DbEnv::set_lk_max_locks</a>,
+<a href="../api_cxx/env_set_lk_max_lockers.html">DbEnv::set_lk_max_lockers</a>,
+<a href="../api_cxx/env_set_lk_max_objects.html">DbEnv::set_lk_max_objects</a>,
+<a href="../api_cxx/env_set_lk_max.html">DbEnv::set_lk_max</a>,
+<a href="../api_cxx/lock_detect.html">DbEnv::lock_detect</a>,
+<a href="../api_cxx/lock_get.html">DbEnv::lock_get</a>,
+<a href="../api_cxx/lock_id.html">DbEnv::lock_id</a>,
+<a href="../api_cxx/lock_put.html">DbLock::put</a>,
+<a href="../api_cxx/lock_stat.html">DbEnv::lock_stat</a>
+and
+<a href="../api_cxx/lock_vec.html">DbEnv::lock_vec</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/lock_put.html b/bdb/docs/api_cxx/lock_put.html
new file mode 100644
index 00000000000..2875e4cfed4
--- /dev/null
+++ b/bdb/docs/api_cxx/lock_put.html
@@ -0,0 +1,63 @@
+<!--$Id: lock_put.so,v 10.21 2000/03/01 21:41:29 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbLock::put</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbLock::put</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbLock::put(DbEnv *env);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbLock::put method releases <b>lock</b> from the lock table.
+<p>The DbLock::put method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbLock::put method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbLock::put method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbLock::put method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Classes</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_lk_conflicts.html">DbEnv::set_lk_conflicts</a>,
+<a href="../api_cxx/env_set_lk_detect.html">DbEnv::set_lk_detect</a>,
+<a href="../api_cxx/env_set_lk_max_locks.html">DbEnv::set_lk_max_locks</a>,
+<a href="../api_cxx/env_set_lk_max_lockers.html">DbEnv::set_lk_max_lockers</a>,
+<a href="../api_cxx/env_set_lk_max_objects.html">DbEnv::set_lk_max_objects</a>,
+<a href="../api_cxx/env_set_lk_max.html">DbEnv::set_lk_max</a>,
+<a href="../api_cxx/lock_detect.html">DbEnv::lock_detect</a>,
+<a href="../api_cxx/lock_get.html">DbEnv::lock_get</a>,
+<a href="../api_cxx/lock_id.html">DbEnv::lock_id</a>,
+<a href="../api_cxx/lock_put.html">DbLock::put</a>,
+<a href="../api_cxx/lock_stat.html">DbEnv::lock_stat</a>
+and
+<a href="../api_cxx/lock_vec.html">DbEnv::lock_vec</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/lock_stat.html b/bdb/docs/api_cxx/lock_stat.html
new file mode 100644
index 00000000000..87bdc9d75a4
--- /dev/null
+++ b/bdb/docs/api_cxx/lock_stat.html
@@ -0,0 +1,98 @@
+<!--$Id: lock_stat.so,v 10.30 2000/12/08 20:43:15 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::lock_stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::lock_stat</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+extern "C" {
+ typedef void *(*db_malloc_fcn_type)(size_t);
+};
+int
+DbEnv::lock_stat(DB_LOCK_STAT **statp, db_malloc_fcn_type db_malloc);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::lock_stat method
+creates a statistical structure and copies a pointer to it into a
+user-specified memory location.
+<p>Statistical structures are created in allocated memory. If <b>db_malloc</b> is non-NULL, it
+is called to allocate the memory, otherwise, the library function
+<b>malloc</b>(3) is used. The function <b>db_malloc</b> must match
+the calling conventions of the <b>malloc</b>(3) library routine.
+Regardless, the caller is responsible for deallocating the returned
+memory. To deallocate returned memory, free the returned memory
+reference, references inside the returned memory do not need to be
+individually freed.
+<p>The lock region statistics are stored in a structure of type
+DB_LOCK_STAT. The following DB_LOCK_STAT fields will be filled in:
+<p><dl compact>
+<dt>u_int32_t st_lastid;<dd>The last allocated lock ID.
+<dt>u_int32_t st_nmodes;<dd>The number of lock modes.
+<dt>u_int32_t st_maxlocks;<dd>The maximum number of locks possible.
+<dt>u_int32_t st_maxlockers;<dd>The maximum number of lockers possible.
+<dt>u_int32_t st_maxobjects;<dd>The maximum number of objects possible.
+<dt>u_int32_t st_nlocks;<dd>The number of current locks.
+<dt>u_int32_t st_maxnlocks;<dd>The maximum number of locks at any one time.
+<dt>u_int32_t st_nlockers;<dd>The number of current lockers.
+<dt>u_int32_t st_maxnlockers;<dd>The maximum number of lockers at any one time.
+<dt>u_int32_t st_nobjects;<dd>The number of current objects.
+<dt>u_int32_t st_maxnobjects;<dd>The maximum number of objects at any one time.
+<dt>u_int32_t st_nrequests;<dd>The total number of locks requested.
+<dt>u_int32_t st_nreleases;<dd>The total number of locks released.
+<dt>u_int32_t st_nnowaits;<dd>The total number of lock requests that failed because
+<a href="../api_cxx/lock_vec.html#DB_LOCK_NOWAIT">DB_LOCK_NOWAIT</a> was set.
+<dt>u_int32_t st_nconflicts;<dd>The total number of locks not immediately available due to conflicts.
+<dt>u_int32_t st_ndeadlocks;<dd>The number of deadlocks detected.
+<dt>u_int32_t st_regsize;<dd>The size of the region.
+<dt>u_int32_t st_region_wait;<dd>The number of times that a thread of control was forced to wait before
+obtaining the region lock.
+<dt>u_int32_t st_region_nowait;<dd>The number of times that a thread of control was able to obtain
+the region lock without waiting.
+</dl>
+<p>The DbEnv::lock_stat method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::lock_stat method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::lock_stat method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Classes</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_lk_conflicts.html">DbEnv::set_lk_conflicts</a>,
+<a href="../api_cxx/env_set_lk_detect.html">DbEnv::set_lk_detect</a>,
+<a href="../api_cxx/env_set_lk_max_locks.html">DbEnv::set_lk_max_locks</a>,
+<a href="../api_cxx/env_set_lk_max_lockers.html">DbEnv::set_lk_max_lockers</a>,
+<a href="../api_cxx/env_set_lk_max_objects.html">DbEnv::set_lk_max_objects</a>,
+<a href="../api_cxx/env_set_lk_max.html">DbEnv::set_lk_max</a>,
+<a href="../api_cxx/lock_detect.html">DbEnv::lock_detect</a>,
+<a href="../api_cxx/lock_get.html">DbEnv::lock_get</a>,
+<a href="../api_cxx/lock_id.html">DbEnv::lock_id</a>,
+<a href="../api_cxx/lock_put.html">DbLock::put</a>,
+<a href="../api_cxx/lock_stat.html">DbEnv::lock_stat</a>
+and
+<a href="../api_cxx/lock_vec.html">DbEnv::lock_vec</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/lock_vec.html b/bdb/docs/api_cxx/lock_vec.html
new file mode 100644
index 00000000000..46180f2cee8
--- /dev/null
+++ b/bdb/docs/api_cxx/lock_vec.html
@@ -0,0 +1,127 @@
+<!--$Id: lock_vec.so,v 10.31 2000/12/04 18:05:39 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::lock_vec</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::lock_vec</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::lock_vec(u_int32_t locker, u_int32_t flags,
+ DB_LOCKREQ list[], int nlist, DB_LOCKREQ **elistp);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::lock_vec method atomically obtains and releases one or more locks
+from the lock table. The DbEnv::lock_vec method is intended to support
+acquisition or trading of multiple locks under one lock table semaphore,
+as is needed for lock coupling or in multigranularity locking for lock
+escalation.
+<p>The <b>locker</b> argument specified to DbEnv::lock_vec is an unsigned
+32-bit integer quantity. It represents the entity requesting or releasing
+the lock.
+<p>The <b>flags</b> value must be set to 0 or the following value:
+<p><dl compact>
+<p><dt><a name="DB_LOCK_NOWAIT">DB_LOCK_NOWAIT</a><dd>If a lock cannot be immediately granted because the requested lock
+conflicts with an existing lock, return instead of waiting for the lock
+to become available.
+</dl>
+<p>The <b>list</b> array provided to DbEnv::lock_vec is typedef'd as
+DB_LOCKREQ. A DB_LOCKREQ structure has at least the following fields,
+which must be initialized before calling DbEnv::lock_vec:
+<p><dl compact>
+<p><dt>lockop_t <a name="op">op</a>;<dd>The operation to be performed, which must be set to one of the
+following values:
+<p><dl compact>
+<p><dt><a name="DB_LOCK_GET">DB_LOCK_GET</a><dd>Get a lock, as defined by the values of <b>locker</b>, <b>obj</b>,
+and <b>mode</b>. Upon return from DbEnv::lock_vec, if the
+<b>lock</b> field is non-NULL, a reference to the acquired lock is
+stored there. (This reference is invalidated by any call to
+DbEnv::lock_vec or <a href="../api_cxx/lock_put.html">DbLock::put</a> that releases the lock.)
+<p><dt><a name="DB_LOCK_PUT">DB_LOCK_PUT</a><dd>The lock referenced by the contents of the <b>lock</b> field is released.
+<p><dt><a name="DB_LOCK_PUT_ALL">DB_LOCK_PUT_ALL</a><dd>All locks held by the <b>locker</b> are released. (Any locks acquired
+as a part of the current call to DbEnv::lock_vec that appear after the
+DB_LOCK_PUT_ALL entry are not considered for this
+operation).
+<p><dt><a name="DB_LOCK_PUT_OBJ">DB_LOCK_PUT_OBJ</a><dd>All locks held on the object <b>obj</b> are released. The <b>mode</b>
+and <b>locker</b> parameters are ignored. Note that any locks acquired
+as a part of the current call to DbEnv::lock_vec that occur before the
+DB_LOCK_PUT_OBJ will also be released; those acquired afterwards
+will not be released.
+</dl>
+<p><dt>const Dbt <a name="obj">obj</a>;<dd>An untyped byte string that specifies the object to be locked or released.
+<p><dt>const lockmode_t <a name="mode">mode</a>;<dd>The lock mode, used as an index into the environment's lock conflict array.
+See <a href="../api_cxx/env_set_lk_conflicts.html">DbEnv::set_lk_conflicts</a> and <a href="../ref/lock/stdmode.html">Standard Lock Modes</a> for a description of that array.
+<p><dt>DB_LOCK <a name="lock">lock</a>;<dd>A lock reference.
+</dl>
+<p>The <b>nlist</b> argument specifies the number of elements in the
+<b>list</b> array.
+<p>If any of the requested locks cannot be acquired, or any of the locks to
+be released cannot be released, the operations before the failing
+operation are guaranteed to have completed successfully, and
+DbEnv::lock_vec returns a non-zero value. In addition, if <b>elistp</b>
+is not NULL, it is set to point to the DB_LOCKREQ entry that was being
+processed when the error occurred.
+<p>The DbEnv::lock_vec method may
+return or throw an exception encapsulating
+one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_LOCK_NOTGRANTED">DB_LOCK_NOTGRANTED</a><dd>A lock was requested that could not be immediately granted and the
+<b>flag</b> parameter was set to DB_LOCK_NOWAIT. In this case, if
+non-NULL, <b>elistp</b> identifies the request that was not granted.
+</dl>
+<p>Otherwise, the DbEnv::lock_vec method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::lock_vec method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_LOCK_DEADLOCK<dd>The operation was selected to resolve a deadlock.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p><dl compact>
+<p><dt>ENOMEM<dd>The maximum number of locks has been reached.
+</dl>
+<p>The DbEnv::lock_vec method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::lock_vec method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Classes</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_lk_conflicts.html">DbEnv::set_lk_conflicts</a>,
+<a href="../api_cxx/env_set_lk_detect.html">DbEnv::set_lk_detect</a>,
+<a href="../api_cxx/env_set_lk_max_locks.html">DbEnv::set_lk_max_locks</a>,
+<a href="../api_cxx/env_set_lk_max_lockers.html">DbEnv::set_lk_max_lockers</a>,
+<a href="../api_cxx/env_set_lk_max_objects.html">DbEnv::set_lk_max_objects</a>,
+<a href="../api_cxx/env_set_lk_max.html">DbEnv::set_lk_max</a>,
+<a href="../api_cxx/lock_detect.html">DbEnv::lock_detect</a>,
+<a href="../api_cxx/lock_get.html">DbEnv::lock_get</a>,
+<a href="../api_cxx/lock_id.html">DbEnv::lock_id</a>,
+<a href="../api_cxx/lock_put.html">DbLock::put</a>,
+<a href="../api_cxx/lock_stat.html">DbEnv::lock_stat</a>
+and
+<a href="../api_cxx/lock_vec.html">DbEnv::lock_vec</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/log_archive.html b/bdb/docs/api_cxx/log_archive.html
new file mode 100644
index 00000000000..e5436431a02
--- /dev/null
+++ b/bdb/docs/api_cxx/log_archive.html
@@ -0,0 +1,106 @@
+<!--$Id: log_archive.so,v 10.26 2000/05/25 13:47:07 dda Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::log_archive</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::log_archive</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+extern "C" {
+ typedef void *(*db_malloc_fcn_type)(size_t);
+};
+int
+DbEnv::log_archive(char *(*listp)[],
+ u_int32_t flags, db_malloc_fcn_type db_malloc);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::log_archive method
+creates a NULL-terminated array of log or database file names and copies
+a pointer to them into the user-specified memory location <b>listp</b>.
+<p>By default, DbEnv::log_archive returns the names of all of the log files
+that are no longer in use (e.g., no longer involved in active transactions),
+and that may safely be archived for catastrophic recovery and then removed
+from the system. If there were no file names to return, the memory location
+referenced by <b>listp</b> will be set to NULL.
+<p>Arrays of log file names are created in allocated memory. If <b>db_malloc</b> is non-NULL, it
+is called to allocate the memory, otherwise, the library function
+<b>malloc</b>(3) is used. The function <b>db_malloc</b> must match
+the calling conventions of the <b>malloc</b>(3) library routine.
+Regardless, the caller is responsible for deallocating the returned
+memory. To deallocate returned memory, free the returned memory
+reference, references inside the returned memory do not need to be
+individually freed.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or more
+of the following values.
+<p><dl compact>
+<p><dt><a name="DB_ARCH_ABS">DB_ARCH_ABS</a><dd>All pathnames are returned as absolute pathnames,
+instead of relative to the database home directory.
+<p><dt><a name="DB_ARCH_DATA">DB_ARCH_DATA</a><dd>Return the database files that need to be archived in order to recover
+the database from catastrophic failure. If any of the database files
+have not been accessed during the lifetime of the current log files,
+DbEnv::log_archive will not include them in this list. It is also
+possible that some of the files referenced in the log have since been
+deleted from the system.
+<p><dt><a name="DB_ARCH_LOG">DB_ARCH_LOG</a><dd>Return all the log file names regardless of whether or not they are in
+use.
+</dl>
+<p>The DB_ARCH_DATA and DB_ARCH_LOG flags are mutually
+exclusive.
+<p>See the <a href="../utility/db_archive.html">db_archive</a> manual page for more information on database
+archival procedures.
+<p>The DbEnv::log_archive method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Bugs</h1>
+<p>In a threaded application (i.e., one where the environment was created
+with the DB_THREAD flag specified), calling DbEnv::log_archive with the
+DB_ARCH_DATA flag will fail, returning EINVAL. To work around this
+problem, re-open the log explicitly without specifying DB_THREAD. This
+restriction is expected to be removed in a future version of Berkeley DB.
+<h1>Errors</h1>
+<p>The DbEnv::log_archive method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The log was corrupted.
+</dl>
+<p>The DbEnv::log_archive method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::log_archive method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_lg_bsize.html">DbEnv::set_lg_bsize</a>,
+<a href="../api_cxx/env_set_lg_max.html">DbEnv::set_lg_max</a>,
+<a href="../api_cxx/log_archive.html">DbEnv::log_archive</a>,
+<a href="../api_cxx/log_compare.html">DbEnv::log_compare</a>,
+<a href="../api_cxx/log_file.html">DbEnv::log_file</a>,
+<a href="../api_cxx/log_flush.html">DbEnv::log_flush</a>,
+<a href="../api_cxx/log_get.html">DbEnv::log_get</a>,
+<a href="../api_cxx/log_put.html">DbEnv::log_put</a>,
+<a href="../api_cxx/log_register.html">DbEnv::log_register</a>,
+<a href="../api_cxx/log_stat.html">DbEnv::log_stat</a>
+and
+<a href="../api_cxx/log_unregister.html">DbEnv::log_unregister</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/log_compare.html b/bdb/docs/api_cxx/log_compare.html
new file mode 100644
index 00000000000..7d1b7ebb9d6
--- /dev/null
+++ b/bdb/docs/api_cxx/log_compare.html
@@ -0,0 +1,53 @@
+<!--$Id: log_compare.so,v 10.12 1999/12/20 08:52:30 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::log_compare</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::log_compare</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+static int
+DbEnv::log_compare(const DbLsn *lsn0, const DbLsn *lsn1);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::log_compare method allows the caller to compare two
+<a href="../api_cxx/lsn_class.html">DbLsn</a> objects,
+returning 0 if they are equal, 1 if <b>lsn0</b> is greater than
+<b>lsn1</b>, and -1 if <b>lsn0</b> is less than <b>lsn1</b>.
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_lg_bsize.html">DbEnv::set_lg_bsize</a>,
+<a href="../api_cxx/env_set_lg_max.html">DbEnv::set_lg_max</a>,
+<a href="../api_cxx/log_archive.html">DbEnv::log_archive</a>,
+<a href="../api_cxx/log_compare.html">DbEnv::log_compare</a>,
+<a href="../api_cxx/log_file.html">DbEnv::log_file</a>,
+<a href="../api_cxx/log_flush.html">DbEnv::log_flush</a>,
+<a href="../api_cxx/log_get.html">DbEnv::log_get</a>,
+<a href="../api_cxx/log_put.html">DbEnv::log_put</a>,
+<a href="../api_cxx/log_register.html">DbEnv::log_register</a>,
+<a href="../api_cxx/log_stat.html">DbEnv::log_stat</a>
+and
+<a href="../api_cxx/log_unregister.html">DbEnv::log_unregister</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/log_file.html b/bdb/docs/api_cxx/log_file.html
new file mode 100644
index 00000000000..fa0ed4e5332
--- /dev/null
+++ b/bdb/docs/api_cxx/log_file.html
@@ -0,0 +1,79 @@
+<!--$Id: log_file.so,v 10.18 2000/03/01 21:41:29 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::log_file</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::log_file</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::log_file(const DbLsn *lsn, char *namep, size_t len);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::log_file method maps
+<a href="../api_cxx/lsn_class.html">DbLsn</a> objects
+to file names,
+copying the name of the file containing the record named by <b>lsn</b>
+into the memory location referenced by <b>namep</b>.
+<p>The <b>len</b> argument is the length of the <b>namep</b> buffer in bytes.
+If <b>namep</b> is too short to hold the file name, DbEnv::log_file will
+return ENOMEM.
+(Log file names are normally quite short, on the order of 10 characters.)
+<p>This mapping of
+<a href="../api_cxx/lsn_class.html">DbLsn</a> objects
+to files is needed for database administration. For example, a
+transaction manager typically records the earliest
+<a href="../api_cxx/lsn_class.html">DbLsn</a>
+needed for restart, and the database administrator may want to archive
+log files to tape when they contain only
+<a href="../api_cxx/lsn_class.html">DbLsn</a>
+entries before the earliest one needed for restart.
+<p>The DbEnv::log_file method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::log_file method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>ENOMEM<dd>The supplied buffer was too small to hold the log file name.
+</dl>
+<p>The DbEnv::log_file method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::log_file method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_lg_bsize.html">DbEnv::set_lg_bsize</a>,
+<a href="../api_cxx/env_set_lg_max.html">DbEnv::set_lg_max</a>,
+<a href="../api_cxx/log_archive.html">DbEnv::log_archive</a>,
+<a href="../api_cxx/log_compare.html">DbEnv::log_compare</a>,
+<a href="../api_cxx/log_file.html">DbEnv::log_file</a>,
+<a href="../api_cxx/log_flush.html">DbEnv::log_flush</a>,
+<a href="../api_cxx/log_get.html">DbEnv::log_get</a>,
+<a href="../api_cxx/log_put.html">DbEnv::log_put</a>,
+<a href="../api_cxx/log_register.html">DbEnv::log_register</a>,
+<a href="../api_cxx/log_stat.html">DbEnv::log_stat</a>
+and
+<a href="../api_cxx/log_unregister.html">DbEnv::log_unregister</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/log_flush.html b/bdb/docs/api_cxx/log_flush.html
new file mode 100644
index 00000000000..ecb13b9c0c6
--- /dev/null
+++ b/bdb/docs/api_cxx/log_flush.html
@@ -0,0 +1,66 @@
+<!--$Id: log_flush.so,v 10.18 2000/03/01 21:41:30 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::log_flush</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::log_flush</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::log_flush(const DbLsn *lsn);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::log_flush method guarantees that all log records whose
+<a href="../api_cxx/lsn_class.html">DbLsn</a> values
+are less than or equal to the <b>lsn</b> argument have been
+written to disk. If <b>lsn</b> is NULL, all records in the
+log are flushed.
+<p>The DbEnv::log_flush method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::log_flush method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbEnv::log_flush method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::log_flush method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_lg_bsize.html">DbEnv::set_lg_bsize</a>,
+<a href="../api_cxx/env_set_lg_max.html">DbEnv::set_lg_max</a>,
+<a href="../api_cxx/log_archive.html">DbEnv::log_archive</a>,
+<a href="../api_cxx/log_compare.html">DbEnv::log_compare</a>,
+<a href="../api_cxx/log_file.html">DbEnv::log_file</a>,
+<a href="../api_cxx/log_flush.html">DbEnv::log_flush</a>,
+<a href="../api_cxx/log_get.html">DbEnv::log_get</a>,
+<a href="../api_cxx/log_put.html">DbEnv::log_put</a>,
+<a href="../api_cxx/log_register.html">DbEnv::log_register</a>,
+<a href="../api_cxx/log_stat.html">DbEnv::log_stat</a>
+and
+<a href="../api_cxx/log_unregister.html">DbEnv::log_unregister</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/log_get.html b/bdb/docs/api_cxx/log_get.html
new file mode 100644
index 00000000000..37a8c497bbc
--- /dev/null
+++ b/bdb/docs/api_cxx/log_get.html
@@ -0,0 +1,118 @@
+<!--$Id: log_get.so,v 10.22 2000/03/17 01:53:59 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::log_get</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::log_get</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::log_get(DbLsn *lsn, Dbt *data, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::log_get method implements a cursor inside of the log,
+retrieving records from the log according to the <b>lsn</b> and
+<b>flags</b> arguments.
+<p>The data field of the <b>data</b> structure is set to the record
+retrieved and the size field indicates the number of bytes in the record.
+See <a href="../api_cxx/dbt_class.html">Dbt</a> for a description of other fields in the <b>data</b>
+structure. When multiple threads are using the returned log handle
+concurrently, one of the <a href="../api_cxx/dbt_class.html#DB_DBT_MALLOC">DB_DBT_MALLOC</a>, <a href="../api_cxx/dbt_class.html#DB_DBT_REALLOC">DB_DBT_REALLOC</a> or
+<a href="../api_cxx/dbt_class.html#DB_DBT_USERMEM">DB_DBT_USERMEM</a> flags must be specified for any <a href="../api_cxx/dbt_class.html">Dbt</a> used
+for data retrieval.
+<p>The <b>flags</b> argument must be set to exactly one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_CHECKPOINT">DB_CHECKPOINT</a><dd>The last record written with the DB_CHECKPOINT flag specified to the
+<a href="../api_cxx/log_put.html">DbEnv::log_put</a> method is returned in the <b>data</b> argument. The
+<b>lsn</b> argument is overwritten with the <a href="../api_cxx/lsn_class.html">DbLsn</a> of the record
+returned. If no record has been previously written with the DB_CHECKPOINT
+flag specified, the first record in the log is returned.
+<p>If the log is empty, the DbEnv::log_get method will return <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+<p><dt><a name="DB_FIRST">DB_FIRST</a><dd>The first record from any of the log files found in the log directory
+is returned in the <b>data</b> argument.
+The <b>lsn</b> argument is overwritten with the <a href="../api_cxx/lsn_class.html">DbLsn</a> of the
+record returned.
+<p>If the log is empty, the DbEnv::log_get method will return <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+<p><dt><a name="DB_LAST">DB_LAST</a><dd>The last record in the log is returned in the <b>data</b> argument.
+The <b>lsn</b> argument is overwritten with the <a href="../api_cxx/lsn_class.html">DbLsn</a> of the
+record returned.
+<p>If the log is empty, the DbEnv::log_get method will return <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+<p><dt><a name="DB_NEXT">DB_NEXT</a><dd>The current log position is advanced to the next record in the log and that
+record is returned in the <b>data</b> argument.
+The <b>lsn</b> argument is overwritten with the <a href="../api_cxx/lsn_class.html">DbLsn</a> of the
+record returned.
+<p>If the pointer has not been initialized via DB_FIRST, DB_LAST, DB_SET,
+DB_NEXT, or DB_PREV, DbEnv::log_get will return the first record in the log.
+If the last log record has already been returned or the log is empty, the
+DbEnv::log_get method will return <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+<p>If the log was opened with the DB_THREAD flag set, calls to
+DbEnv::log_get with the DB_NEXT flag set will return EINVAL.
+<p><dt><a name="DB_PREV">DB_PREV</a><dd>The current log position is moved to the previous record in the log and that
+record is returned in the <b>data</b> argument.
+The <b>lsn</b> argument is overwritten with the <a href="../api_cxx/lsn_class.html">DbLsn</a> of the
+record returned.
+<p>If the pointer has not been initialized via DB_FIRST, DB_LAST, DB_SET,
+DB_NEXT, or DB_PREV,
+DbEnv::log_get will return the last record in the log.
+If the first log record has already been returned or the log is empty, the
+DbEnv::log_get method will return <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+<p>If the log was opened with the DB_THREAD flag set, calls to
+DbEnv::log_get with the DB_PREV flag set will return EINVAL.
+<p><dt><a name="DB_CURRENT">DB_CURRENT</a><dd>Return the log record currently referenced by the log.
+<p>If the log pointer has not been initialized via DB_FIRST, DB_LAST, DB_SET,
+DB_NEXT, or DB_PREV, or if the log was opened with the DB_THREAD flag set,
+DbEnv::log_get will return EINVAL.
+<p><dt><a name="DB_SET">DB_SET</a><dd>Retrieve the record specified by the <b>lsn</b> argument. If the
+specified <a href="../api_cxx/lsn_class.html">DbLsn</a> is invalid (e.g., does not appear in the log)
+DbEnv::log_get will return EINVAL.
+</dl>
+<p>Otherwise, the DbEnv::log_get method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::log_get method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The DB_FIRST flag was specified and no log files were found.
+</dl>
+<p>The DbEnv::log_get method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::log_get method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_lg_bsize.html">DbEnv::set_lg_bsize</a>,
+<a href="../api_cxx/env_set_lg_max.html">DbEnv::set_lg_max</a>,
+<a href="../api_cxx/log_archive.html">DbEnv::log_archive</a>,
+<a href="../api_cxx/log_compare.html">DbEnv::log_compare</a>,
+<a href="../api_cxx/log_file.html">DbEnv::log_file</a>,
+<a href="../api_cxx/log_flush.html">DbEnv::log_flush</a>,
+<a href="../api_cxx/log_get.html">DbEnv::log_get</a>,
+<a href="../api_cxx/log_put.html">DbEnv::log_put</a>,
+<a href="../api_cxx/log_register.html">DbEnv::log_register</a>,
+<a href="../api_cxx/log_stat.html">DbEnv::log_stat</a>
+and
+<a href="../api_cxx/log_unregister.html">DbEnv::log_unregister</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/log_put.html b/bdb/docs/api_cxx/log_put.html
new file mode 100644
index 00000000000..ecd84e33c78
--- /dev/null
+++ b/bdb/docs/api_cxx/log_put.html
@@ -0,0 +1,84 @@
+<!--$Id: log_put.so,v 10.21 2000/03/17 01:53:59 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::log_put</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::log_put</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::log_put(DbLsn *lsn, const Dbt *data, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::log_put method appends records to the log. The <a href="../api_cxx/lsn_class.html">DbLsn</a> of
+the put record is returned in the <b>lsn</b> argument. The <b>flags</b>
+argument may be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_CHECKPOINT">DB_CHECKPOINT</a><dd>The log should write a checkpoint record, recording any information
+necessary to make the log structures recoverable after a crash.
+<p><dt><a name="DB_CURLSN">DB_CURLSN</a><dd>The <a href="../api_cxx/lsn_class.html">DbLsn</a> of the next record to be put is returned in the
+<b>lsn</b> argument.
+<p><dt><a name="DB_FLUSH">DB_FLUSH</a><dd>The log is forced to disk after this record is written, guaranteeing
+that all records with <a href="../api_cxx/lsn_class.html">DbLsn</a> values less than or equal to the
+one being put are on disk before this function returns (this function
+is most often used for a transaction commit, see <a href="../api_cxx/txn_commit.html">DbTxn::commit</a> for
+more information).
+<p>The caller is responsible for providing any necessary structure to
+<b>data</b>. (For example, in a write-ahead logging protocol, the
+application must understand what part of <b>data</b> is an operation
+code, what part is redo information, and what part is undo information.
+In addition, most transaction managers will store in <b>data</b> the
+<a href="../api_cxx/lsn_class.html">DbLsn</a> of the previous log record for the same transaction, to
+support chaining back through the transaction's log records during
+undo.)
+</dl>
+<p>The DbEnv::log_put method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The <a href="../api_cxx/log_flush.html">DbEnv::log_flush</a> method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The record to be logged is larger than the maximum log record.
+</dl>
+<p>The DbEnv::log_put method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::log_put method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_lg_bsize.html">DbEnv::set_lg_bsize</a>,
+<a href="../api_cxx/env_set_lg_max.html">DbEnv::set_lg_max</a>,
+<a href="../api_cxx/log_archive.html">DbEnv::log_archive</a>,
+<a href="../api_cxx/log_compare.html">DbEnv::log_compare</a>,
+<a href="../api_cxx/log_file.html">DbEnv::log_file</a>,
+<a href="../api_cxx/log_flush.html">DbEnv::log_flush</a>,
+<a href="../api_cxx/log_get.html">DbEnv::log_get</a>,
+<a href="../api_cxx/log_put.html">DbEnv::log_put</a>,
+<a href="../api_cxx/log_register.html">DbEnv::log_register</a>,
+<a href="../api_cxx/log_stat.html">DbEnv::log_stat</a>
+and
+<a href="../api_cxx/log_unregister.html">DbEnv::log_unregister</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/log_register.html b/bdb/docs/api_cxx/log_register.html
new file mode 100644
index 00000000000..b837a60b352
--- /dev/null
+++ b/bdb/docs/api_cxx/log_register.html
@@ -0,0 +1,68 @@
+<!--$Id: log_register.so,v 10.27 2000/05/09 14:46:45 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::log_register</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::log_register</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::log_register(Db *dbp, const char *name);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::log_register method registers a file name with the specified Berkeley DB
+environment's log manager. The log manager records all file name mappings
+at each checkpoint so that a recovery process can identify the file to
+which a record in the log refers.
+<p>The <b>dbp</b> argument should be a reference to the <a href="../api_cxx/db_class.html">Db</a> object being
+registered. The <b>name</b> argument should be a file name appropriate
+for opening the file in the environment, during recovery.
+<p>The DbEnv::log_register method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::log_register method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbEnv::log_register method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::log_register method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_lg_bsize.html">DbEnv::set_lg_bsize</a>,
+<a href="../api_cxx/env_set_lg_max.html">DbEnv::set_lg_max</a>,
+<a href="../api_cxx/log_archive.html">DbEnv::log_archive</a>,
+<a href="../api_cxx/log_compare.html">DbEnv::log_compare</a>,
+<a href="../api_cxx/log_file.html">DbEnv::log_file</a>,
+<a href="../api_cxx/log_flush.html">DbEnv::log_flush</a>,
+<a href="../api_cxx/log_get.html">DbEnv::log_get</a>,
+<a href="../api_cxx/log_put.html">DbEnv::log_put</a>,
+<a href="../api_cxx/log_register.html">DbEnv::log_register</a>,
+<a href="../api_cxx/log_stat.html">DbEnv::log_stat</a>
+and
+<a href="../api_cxx/log_unregister.html">DbEnv::log_unregister</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/log_stat.html b/bdb/docs/api_cxx/log_stat.html
new file mode 100644
index 00000000000..061685ab497
--- /dev/null
+++ b/bdb/docs/api_cxx/log_stat.html
@@ -0,0 +1,96 @@
+<!--$Id: log_stat.so,v 10.23 2000/05/25 13:47:08 dda Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::log_stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::log_stat</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+extern "C" {
+ typedef void *(*db_malloc_fcn_type)(size_t);
+};
+int
+DbEnv::log_stat(DB_LOG_STAT **spp, db_malloc_fcn_type db_malloc);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::log_stat method
+creates a statistical structure and copies a pointer to it into a
+user-specified memory location.
+<p>Statistical structures are created in allocated memory. If <b>db_malloc</b> is non-NULL, it
+is called to allocate the memory, otherwise, the library function
+<b>malloc</b>(3) is used. The function <b>db_malloc</b> must match
+the calling conventions of the <b>malloc</b>(3) library routine.
+Regardless, the caller is responsible for deallocating the returned
+memory. To deallocate returned memory, free the returned memory
+reference, references inside the returned memory do not need to be
+individually freed.
+<p>The log region statistics are stored in a structure of type DB_LOG_STAT.
+The following DB_LOG_STAT fields will be filled in:
+<p><dl compact>
+<dt>u_int32_t st_magic;<dd>The magic number that identifies a file as a log file.
+<dt>u_int32_t st_version;<dd>The version of the log file type.
+<dt>u_int32_t st_regsize;<dd>The size of the region.
+<dt>int st_mode;<dd>The mode of any created log files.
+<dt>u_int32_t st_lg_bsize;<dd>The in-memory log record cache size.
+<dt>u_int32_t st_lg_max;<dd>The maximum size of any individual file comprising the log.
+<dt>u_int32_t st_w_mbytes;<dd>The number of megabytes written to this log.
+<dt>u_int32_t st_w_bytes;<dd>The number of bytes over and above <b>st_w_mbytes</b> written to this log.
+<dt>u_int32_t st_wc_mbytes;<dd>The number of megabytes written to this log since the last checkpoint.
+<dt>u_int32_t st_wc_bytes;<dd>The number of bytes over and above <b>st_wc_mbytes</b> written to this log
+since the last checkpoint.
+<dt>u_int32_t st_wcount;<dd>The number of times the log has been written to disk.
+<dt>u_int32_t st_wcount_fill;<dd>The number of times the log has been written to disk because the
+in-memory log record cache filled up.
+<dt>u_int32_t st_scount;<dd>The number of times the log has been flushed to disk.
+<dt>u_int32_t st_cur_file;<dd>The current log file number.
+<dt>u_int32_t st_cur_offset;<dd>The byte offset in the current log file.
+<dt>u_int32_t st_region_wait;<dd>The number of times that a thread of control was forced to wait before
+obtaining the region lock.
+<dt>u_int32_t st_region_nowait;<dd>The number of times that a thread of control was able to obtain
+the region lock without waiting.
+</dl>
+<p>The DbEnv::log_stat method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::log_stat method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::log_stat method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_lg_bsize.html">DbEnv::set_lg_bsize</a>,
+<a href="../api_cxx/env_set_lg_max.html">DbEnv::set_lg_max</a>,
+<a href="../api_cxx/log_archive.html">DbEnv::log_archive</a>,
+<a href="../api_cxx/log_compare.html">DbEnv::log_compare</a>,
+<a href="../api_cxx/log_file.html">DbEnv::log_file</a>,
+<a href="../api_cxx/log_flush.html">DbEnv::log_flush</a>,
+<a href="../api_cxx/log_get.html">DbEnv::log_get</a>,
+<a href="../api_cxx/log_put.html">DbEnv::log_put</a>,
+<a href="../api_cxx/log_register.html">DbEnv::log_register</a>,
+<a href="../api_cxx/log_stat.html">DbEnv::log_stat</a>
+and
+<a href="../api_cxx/log_unregister.html">DbEnv::log_unregister</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/log_unregister.html b/bdb/docs/api_cxx/log_unregister.html
new file mode 100644
index 00000000000..364e62259b5
--- /dev/null
+++ b/bdb/docs/api_cxx/log_unregister.html
@@ -0,0 +1,63 @@
+<!--$Id: log_unregister.so,v 10.21 2000/05/03 22:39:10 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::log_unregister</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::log_unregister</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::log_unregister(int32_t DB *dbp);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::log_unregister method function unregisters the file represented by
+the <b>dbp</b> parameter from the Berkeley DB environment's log manager.
+<p>The DbEnv::log_unregister method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::log_unregister method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbEnv::log_unregister method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::log_unregister method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Class</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_lg_bsize.html">DbEnv::set_lg_bsize</a>,
+<a href="../api_cxx/env_set_lg_max.html">DbEnv::set_lg_max</a>,
+<a href="../api_cxx/log_archive.html">DbEnv::log_archive</a>,
+<a href="../api_cxx/log_compare.html">DbEnv::log_compare</a>,
+<a href="../api_cxx/log_file.html">DbEnv::log_file</a>,
+<a href="../api_cxx/log_flush.html">DbEnv::log_flush</a>,
+<a href="../api_cxx/log_get.html">DbEnv::log_get</a>,
+<a href="../api_cxx/log_put.html">DbEnv::log_put</a>,
+<a href="../api_cxx/log_register.html">DbEnv::log_register</a>,
+<a href="../api_cxx/log_stat.html">DbEnv::log_stat</a>
+and
+<a href="../api_cxx/log_unregister.html">DbEnv::log_unregister</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/lsn_class.html b/bdb/docs/api_cxx/lsn_class.html
new file mode 100644
index 00000000000..db4d5656794
--- /dev/null
+++ b/bdb/docs/api_cxx/lsn_class.html
@@ -0,0 +1,38 @@
+<!--$Id: lsn_class.so,v 10.11 1999/12/20 08:52:33 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbLsn</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbLsn</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+class DbLsn { ... };
+</pre></h3>
+<h1>Description</h1>
+<p>A DbLsn is a <b>log sequence number</b> that is fully
+encapsulated. The class itself has no methods, other than a default
+constructor, so there is no way for the user to manipulate its data
+directly.
+Methods always take a pointer to a DbLsn as an argument.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/memp_fclose.html b/bdb/docs/api_cxx/memp_fclose.html
new file mode 100644
index 00000000000..0906388e3ee
--- /dev/null
+++ b/bdb/docs/api_cxx/memp_fclose.html
@@ -0,0 +1,65 @@
+<!--$Id: memp_fclose.so,v 10.20 2000/06/13 13:55:49 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbMpoolFile::close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbMpoolFile::close</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbMpoolFile::close();
+</pre></h3>
+<h1>Description</h1>
+<p>The DbMpoolFile::close method closes the source file indicated by the
+<a href="../api_cxx/mempfile_class.html">DbMpoolFile</a> object. Calling DbMpoolFile::close does not imply
+a call to <a href="../api_cxx/memp_fsync.html">DbMpoolFile::sync</a>, i.e. no pages are written to the source
+file as as a result of calling DbMpoolFile::close.
+<p>In addition, if the <b>file</b> argument to <a href="../api_cxx/memp_fopen.html">DbMpoolFile::open</a> was NULL,
+any underlying files created for this <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a> will be removed.
+<p>Once DbMpoolFile::close has been called, regardless of its return, the
+<a href="../api_cxx/mempfile_class.html">DbMpoolFile</a> handle may not be accessed again.
+<p>The DbMpoolFile::close method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbMpoolFile::close method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbMpoolFile::close method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Classes</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_mp_mmapsize.html">DbEnv::set_mp_mmapsize</a>,
+<a href="../api_cxx/memp_fclose.html">DbMpoolFile::close</a>,
+<a href="../api_cxx/memp_fget.html">DbMpoolFile::get</a>,
+<a href="../api_cxx/memp_fopen.html">DbMpoolFile::open</a>,
+<a href="../api_cxx/memp_fput.html">DbMpoolFile::put</a>,
+<a href="../api_cxx/memp_fset.html">DbMpoolFile::set</a>,
+<a href="../api_cxx/memp_fsync.html">DbMpoolFile::sync</a>,
+<a href="../api_cxx/memp_register.html">DbEnv::memp_register</a>,
+<a href="../api_cxx/memp_stat.html">DbEnv::memp_stat</a>,
+<a href="../api_cxx/memp_sync.html">DbEnv::memp_sync</a>
+and
+<a href="../api_cxx/memp_trickle.html">DbEnv::memp_trickle</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/memp_fget.html b/bdb/docs/api_cxx/memp_fget.html
new file mode 100644
index 00000000000..c8067603c77
--- /dev/null
+++ b/bdb/docs/api_cxx/memp_fget.html
@@ -0,0 +1,101 @@
+<!--$Id: memp_fget.so,v 10.23 2000/12/04 18:05:39 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbMpoolFile::get</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbMpoolFile::get</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbMpoolFile::get(db_pgno_t *pgnoaddr, u_int32_t flags, void **pagep);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbMpoolFile::get method copies a pointer to the page with the page
+number specified by <b>pgnoaddr</b>, from the source file in the
+<a href="../api_cxx/mempfile_class.html">DbMpoolFile</a>, into the memory location referenced by <b>pagep</b>.
+If the page does not exist or cannot be retrieved, DbMpoolFile::get will
+fail.
+<p><b>Page numbers begin at 0, i.e., the first page in the file is page number
+0, not page number 1.</b>
+<p>The returned page is <b>size_t</b> type aligned.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or more
+of the following values.
+<p><dl compact>
+<p><dt><a name="DB_MPOOL_CREATE">DB_MPOOL_CREATE</a><dd>If the specified page does not exist, create it. In this case, the
+<a href="memp_register.html#pgin">pgin</a> method, if specified, is
+called.
+<p><dt><a name="DB_MPOOL_LAST">DB_MPOOL_LAST</a><dd>Return the last page of the source file and copy its page number
+to the location referenced by <b>pgnoaddr</b>.
+<p><dt><a name="DB_MPOOL_NEW">DB_MPOOL_NEW</a><dd>Create a new page in the file and copy its page number to the location
+referenced by <b>pgnoaddr</b>. In this case, the
+<a href="memp_register.html#pgin">pgin</a> method, if specified, is
+<b>not</b> called.
+</dl>
+<p>The DB_MPOOL_CREATE, DB_MPOOL_LAST and
+DB_MPOOL_NEW flags are mutually exclusive.
+<p>Created pages have all their bytes set to 0, unless otherwise specified
+when the file was opened.
+<p>All pages returned by DbMpoolFile::get will be retained (i.e.
+<i>pinned</i>), in the pool until a subsequent call to
+<a href="../api_cxx/memp_fput.html">DbMpoolFile::put</a>.
+<p>The DbMpoolFile::get method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbMpoolFile::get method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EAGAIN<dd>The page reference count has overflowed. (This should never happen unless
+there's a bug in the application.)
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The DB_MPOOL_NEW flag was set and the source file was not opened for writing.
+<p>More than one of DB_MPOOL_CREATE, DB_MPOOL_LAST and DB_MPOOL_NEW was set.
+</dl>
+<p><dl compact>
+<p><dt>EIO<dd>The requested page does not exist and DB_MPOOL_CREATE was not set.
+</dl>
+<p><dl compact>
+<p><dt>ENOMEM<dd>The cache is full and no more pages will fit in the pool.
+</dl>
+<p>The DbMpoolFile::get method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbMpoolFile::get method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Classes</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_mp_mmapsize.html">DbEnv::set_mp_mmapsize</a>,
+<a href="../api_cxx/memp_fclose.html">DbMpoolFile::close</a>,
+<a href="../api_cxx/memp_fget.html">DbMpoolFile::get</a>,
+<a href="../api_cxx/memp_fopen.html">DbMpoolFile::open</a>,
+<a href="../api_cxx/memp_fput.html">DbMpoolFile::put</a>,
+<a href="../api_cxx/memp_fset.html">DbMpoolFile::set</a>,
+<a href="../api_cxx/memp_fsync.html">DbMpoolFile::sync</a>,
+<a href="../api_cxx/memp_register.html">DbEnv::memp_register</a>,
+<a href="../api_cxx/memp_stat.html">DbEnv::memp_stat</a>,
+<a href="../api_cxx/memp_sync.html">DbEnv::memp_sync</a>
+and
+<a href="../api_cxx/memp_trickle.html">DbEnv::memp_trickle</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/memp_fopen.html b/bdb/docs/api_cxx/memp_fopen.html
new file mode 100644
index 00000000000..c993ee6f11d
--- /dev/null
+++ b/bdb/docs/api_cxx/memp_fopen.html
@@ -0,0 +1,160 @@
+<!--$Id: memp_fopen.so,v 10.28 2000/12/18 21:05:12 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbMpoolFile::open</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbMpoolFile::open</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+static int
+DbMpoolFile::open(DbEnv *env, const char *file, u_int32_t flags, int mode,
+ size_t pagesize, DB_MPOOL_FINFO *finfop, DbMpoolFile **mpf);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbMpoolFile::open method opens a file in the pool specified by the
+<a href="../api_cxx/dbenv_class.html">DbEnv</a> <b>env</b>, copying the <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a> pointer
+representing it into the memory location referenced by <b>mpf</b>.
+<p>The <b>file</b> argument is the name of the file to be opened.
+If <b>file</b> is NULL, a private file is created that cannot be
+shared with any other process (although it may be shared with
+other threads).
+<p>The <b>flags</b> and <b>mode</b> arguments specify how files will be opened
+and/or created if they do not already exist.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or more
+of the following values.
+<p><dl compact>
+<p><dt><a name="DB_CREATE">DB_CREATE</a><dd>Create any underlying files, as necessary. If the files do not already
+exist and the DB_CREATE flag is not specified, the call will fail.
+<p><dt><a name="DB_NOMMAP">DB_NOMMAP</a><dd>Always copy this file into the local cache instead of potentially mapping
+it into process memory (see the description of the
+<a href="../api_cxx/env_set_mp_mmapsize.html">DbEnv::set_mp_mmapsize</a> method for further information).
+<p><dt><a name="DB_RDONLY">DB_RDONLY</a><dd>Open any underlying files for reading only. Any attempt to write the file
+using the pool functions will fail, regardless of the actual permissions
+of the file.
+</dl>
+<p>On UNIX systems, or in IEEE/ANSI Std 1003.1 (POSIX) environments, all files created by function DbMpoolFile::open
+are created with mode <b>mode</b> (as described in <b>chmod</b>(2)) and
+modified by the process' umask value at the time of creation (see
+<b>umask</b>(2)). The group ownership of created files is based on
+the system and directory defaults, and is not further specified by Berkeley DB.
+If <b>mode</b> is 0, files are created readable and writeable by both
+owner and group. On Windows systems, the mode argument is ignored.
+<p>The <b>pagesize</b> argument is the size, in bytes, of the unit of transfer
+between the application and the pool, although it is not necessarily the
+unit of transfer between the pool and the source file.
+<p>Files opened in the pool may be further configured based on the
+<b>finfop</b> argument to DbMpoolFile::open (which is a pointer to a
+structure of type DB_MPOOL_FINFO). No references to the <b>finfop</b>
+structure are maintained by Berkeley DB, so it may be discarded when the
+DbMpoolFile::open function returns. In order to ensure compatibility
+with future releases of Berkeley DB, all fields of the DB_MPOOL_FINFO structure
+that are not explicitly set should be initialized to 0 before the first
+time the structure is used. Do this by declaring the structure external
+or static, or by calling the C library routine <b>bzero</b>(3) or
+<b>memset</b>(3).
+<p>The fields of the DB_MPOOL_FINFO structure used by DbMpoolFile::open are
+described below. If <b>finfop</b> is NULL or any of its fields are
+set to their default value, defaults appropriate for the system are used.
+<p><dl compact>
+<p><dt>int <a name="ftype">ftype</a>;<dd>The <b>ftype</b> field should be the same as a <b>ftype</b> argument
+previously specified to the <a href="../api_cxx/memp_register.html">DbEnv::memp_register</a> function, unless no
+input or output processing of the file's pages are necessary, in which
+case it should be 0. (See the description of the <a href="../api_cxx/memp_register.html">DbEnv::memp_register</a>
+function for more information.)
+<p><dt>DBT *<a name="pgcookie">pgcookie</a>;<dd>The <b>pgcookie</b> field contains the byte string that is passed to the
+<b>pgin</b> and <b>pgout</b> functions for this file, if any. If no
+<b>pgin</b> or <b>pgout</b> functions are specified, the
+<b>pgcookie</b> field should be NULL. (See the description of the
+<a href="../api_cxx/memp_register.html">DbEnv::memp_register</a> function for more information.)
+<p><dt>u_int8_t *<a name="fileid">fileid</a>;<dd>The <b>fileid</b> field is a unique identifier for the file. If the
+<b>fileid</b> field is non-NULL, it must reference a DB_FILE_ID_LEN
+length array of bytes that will be used to uniquely identify the file.
+<p>The mpool functions must be able to uniquely identify files in order that
+multiple processes wanting to share a file will correctly identify it in
+the pool.
+<p>On most UNIX/POSIX systems, the <b>fileid</b> field will not need to be
+set and the mpool functions will simply use the file's device and inode
+numbers for this purpose. On Windows systems, the mpool functions use
+the values returned by GetFileInformationByHandle() by default -- these
+values are known to be constant between processes and over reboot in the
+case of NTFS (where they are the NTFS MFT indexes).
+<p>On other filesystems, (e.g., FAT or NFS) these default values are not
+necessarily unique between processes or across system reboots.
+<b>Applications wanting to maintain a shared memory buffer pool
+between processes or across system reboots, where the pool contains pages
+from files stored on such filesystems, must specify a unique file
+identifier to the DbMpoolFile::open call and each process opening or
+registering the file must provide the same unique identifier.</b>
+<p>This should not be necessary for most applications. Specifically, it is
+not necessary if the memory pool is not shared between processes and is
+re-instantiated after each system reboot, or the application is using the
+Berkeley DB access methods instead of calling the pool functions explicitly, or
+the files in the memory pool are stored on filesystems where the default
+values as described above are invariant between process and across system
+reboots.
+<p><dt>int32_t <a name="lsn_offset">lsn_offset</a>;<dd>The <b>lsn_offset</b> field is the zero-based byte offset in the page of
+the page's log sequence number (LSN), or -1 if no LSN offset is specified.
+(See the description of the <a href="../api_cxx/memp_sync.html">DbEnv::memp_sync</a> function for more
+information.)
+<p><dt>u_int32_t <a name="clear_len">clear_len</a>;<dd>The <b>clear_len</b> field is the number of initial bytes in a page
+that should be set to zero when the page is created as a result of the
+DB_MPOOL_CREATE or DB_MPOOL_NEW flags being specified to <a href="../api_cxx/memp_fget.html">DbMpoolFile::get</a>.
+If <b>finfop</b> is NULL or <b>clear_len</b> is 0, the entire page is
+cleared.
+</dl>
+<p>The DbMpoolFile::open method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbMpoolFile::open method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The file has already been entered into the pool, and the <b>pagesize</b>
+value is not the same as when the file was entered into the pool, or the
+length of the file is not zero or a multiple of the <b>pagesize</b>.
+<p>The DB_RDONLY flag was specified for an in-memory pool.
+</dl>
+<p><dl compact>
+<p><dt>ENOMEM<dd>The maximum number of open files has been reached.
+</dl>
+<p>The DbMpoolFile::open method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbMpoolFile::open method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Classes</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_mp_mmapsize.html">DbEnv::set_mp_mmapsize</a>,
+<a href="../api_cxx/memp_fclose.html">DbMpoolFile::close</a>,
+<a href="../api_cxx/memp_fget.html">DbMpoolFile::get</a>,
+<a href="../api_cxx/memp_fopen.html">DbMpoolFile::open</a>,
+<a href="../api_cxx/memp_fput.html">DbMpoolFile::put</a>,
+<a href="../api_cxx/memp_fset.html">DbMpoolFile::set</a>,
+<a href="../api_cxx/memp_fsync.html">DbMpoolFile::sync</a>,
+<a href="../api_cxx/memp_register.html">DbEnv::memp_register</a>,
+<a href="../api_cxx/memp_stat.html">DbEnv::memp_stat</a>,
+<a href="../api_cxx/memp_sync.html">DbEnv::memp_sync</a>
+and
+<a href="../api_cxx/memp_trickle.html">DbEnv::memp_trickle</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/memp_fput.html b/bdb/docs/api_cxx/memp_fput.html
new file mode 100644
index 00000000000..f49c809c093
--- /dev/null
+++ b/bdb/docs/api_cxx/memp_fput.html
@@ -0,0 +1,83 @@
+<!--$Id: memp_fput.so,v 10.18 2000/03/01 21:41:30 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbMpoolFile::put</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbMpoolFile::put</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbMpoolFile::put(void *pgaddr, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbMpoolFile::put method indicates that the page referenced by
+<b>pgaddr</b> can be evicted from the pool. The <b>pgaddr</b>
+argument must be an address previously returned by <a href="../api_cxx/memp_fget.html">DbMpoolFile::get</a>.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or more
+of the following values.
+<p><dl compact>
+<p><dt><a name="DB_MPOOL_CLEAN">DB_MPOOL_CLEAN</a><dd>Clear any previously set modification information (i.e., don't bother
+writing the page back to the source file).
+<p><dt><a name="DB_MPOOL_DIRTY">DB_MPOOL_DIRTY</a><dd>The page has been modified and must be written to the source file
+before being evicted from the pool.
+<p><dt><a name="DB_MPOOL_DISCARD">DB_MPOOL_DISCARD</a><dd>The page is unlikely to be useful in the near future,
+and should be discarded before other pages in the pool.
+</dl>
+<p>The DB_MPOOL_CLEAN and DB_MPOOL_DIRTY flags are
+mutually exclusive.
+<p>The DbMpoolFile::put method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbMpoolFile::put method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EACCES<dd>The DB_MPOOL_DIRTY flag was set and the source file was not opened for
+writing.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The <b>pgaddr</b> parameter does not reference a page returned by
+<a href="../api_cxx/memp_fget.html">DbMpoolFile::get</a>.
+<p>More than one of DB_MPOOL_CLEAN and DB_MPOOL_DIRTY flags was set.
+</dl>
+<p>The DbMpoolFile::put method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbMpoolFile::put method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Classes</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_mp_mmapsize.html">DbEnv::set_mp_mmapsize</a>,
+<a href="../api_cxx/memp_fclose.html">DbMpoolFile::close</a>,
+<a href="../api_cxx/memp_fget.html">DbMpoolFile::get</a>,
+<a href="../api_cxx/memp_fopen.html">DbMpoolFile::open</a>,
+<a href="../api_cxx/memp_fput.html">DbMpoolFile::put</a>,
+<a href="../api_cxx/memp_fset.html">DbMpoolFile::set</a>,
+<a href="../api_cxx/memp_fsync.html">DbMpoolFile::sync</a>,
+<a href="../api_cxx/memp_register.html">DbEnv::memp_register</a>,
+<a href="../api_cxx/memp_stat.html">DbEnv::memp_stat</a>,
+<a href="../api_cxx/memp_sync.html">DbEnv::memp_sync</a>
+and
+<a href="../api_cxx/memp_trickle.html">DbEnv::memp_trickle</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/memp_fset.html b/bdb/docs/api_cxx/memp_fset.html
new file mode 100644
index 00000000000..6e46d45c1f4
--- /dev/null
+++ b/bdb/docs/api_cxx/memp_fset.html
@@ -0,0 +1,76 @@
+<!--$Id: memp_fset.so,v 10.18 2000/03/01 21:41:30 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbMpoolFile::set</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbMpoolFile::set</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbMpoolFile::set(void *pgaddr, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbMpoolFile::set method sets the flags associated with the page referenced
+by <b>pgaddr</b> without unpinning it from the pool. The <b>pgaddr</b>
+argument must be an address previously returned by <a href="../api_cxx/memp_fget.html">DbMpoolFile::get</a>.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or more
+of the following values.
+<p><dl compact>
+<p><dt><a name="DB_MPOOL_CLEAN">DB_MPOOL_CLEAN</a><dd>Clear any previously set modification information (i.e., don't bother
+writing the page back to the source file).
+<p><dt><a name="DB_MPOOL_DIRTY">DB_MPOOL_DIRTY</a><dd>The page has been modified and must be written to the source file
+before being evicted from the pool.
+<p><dt><a name="DB_MPOOL_DISCARD">DB_MPOOL_DISCARD</a><dd>The page is unlikely to be useful in the near future,
+and should be discarded before other pages in the pool.
+</dl>
+<p>The DB_MPOOL_CLEAN and DB_MPOOL_DIRTY flags are
+mutually exclusive.
+<p>The DbMpoolFile::set method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbMpoolFile::set method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbMpoolFile::set method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbMpoolFile::set method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Classes</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_mp_mmapsize.html">DbEnv::set_mp_mmapsize</a>,
+<a href="../api_cxx/memp_fclose.html">DbMpoolFile::close</a>,
+<a href="../api_cxx/memp_fget.html">DbMpoolFile::get</a>,
+<a href="../api_cxx/memp_fopen.html">DbMpoolFile::open</a>,
+<a href="../api_cxx/memp_fput.html">DbMpoolFile::put</a>,
+<a href="../api_cxx/memp_fset.html">DbMpoolFile::set</a>,
+<a href="../api_cxx/memp_fsync.html">DbMpoolFile::sync</a>,
+<a href="../api_cxx/memp_register.html">DbEnv::memp_register</a>,
+<a href="../api_cxx/memp_stat.html">DbEnv::memp_stat</a>,
+<a href="../api_cxx/memp_sync.html">DbEnv::memp_sync</a>
+and
+<a href="../api_cxx/memp_trickle.html">DbEnv::memp_trickle</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/memp_fsync.html b/bdb/docs/api_cxx/memp_fsync.html
new file mode 100644
index 00000000000..a38366f9e18
--- /dev/null
+++ b/bdb/docs/api_cxx/memp_fsync.html
@@ -0,0 +1,63 @@
+<!--$Id: memp_fsync.so,v 10.22 2000/09/08 15:20:28 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbMpoolFile::sync</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbMpoolFile::sync</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbMpoolFile::sync();
+</pre></h3>
+<h1>Description</h1>
+<p>The DbMpoolFile::sync method writes all pages associated with the
+<a href="../api_cxx/mempfile_class.html">DbMpoolFile</a>, that were marked as modified using <a href="../api_cxx/memp_fput.html">DbMpoolFile::put</a>
+or <a href="../api_cxx/memp_fset.html">DbMpoolFile::set</a>, back to the source file. If any of the modified
+pages are also <i>pinned</i> (i.e., currently referenced by this or
+another process) DbMpoolFile::sync will ignore them.
+<p>The DbMpoolFile::sync method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, 0 on success, and returns <a href="../api_c/memp_fsync.html#DB_INCOMPLETE">DB_INCOMPLETE</a> if there were pages which were
+modified but which DbMpoolFile::sync was unable to write immediately.
+<h1>Errors</h1>
+<p>The DbMpoolFile::sync method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbMpoolFile::sync method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Classes</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_mp_mmapsize.html">DbEnv::set_mp_mmapsize</a>,
+<a href="../api_cxx/memp_fclose.html">DbMpoolFile::close</a>,
+<a href="../api_cxx/memp_fget.html">DbMpoolFile::get</a>,
+<a href="../api_cxx/memp_fopen.html">DbMpoolFile::open</a>,
+<a href="../api_cxx/memp_fput.html">DbMpoolFile::put</a>,
+<a href="../api_cxx/memp_fset.html">DbMpoolFile::set</a>,
+<a href="../api_cxx/memp_fsync.html">DbMpoolFile::sync</a>,
+<a href="../api_cxx/memp_register.html">DbEnv::memp_register</a>,
+<a href="../api_cxx/memp_stat.html">DbEnv::memp_stat</a>,
+<a href="../api_cxx/memp_sync.html">DbEnv::memp_sync</a>
+and
+<a href="../api_cxx/memp_trickle.html">DbEnv::memp_trickle</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/memp_register.html b/bdb/docs/api_cxx/memp_register.html
new file mode 100644
index 00000000000..4c5f0748e98
--- /dev/null
+++ b/bdb/docs/api_cxx/memp_register.html
@@ -0,0 +1,102 @@
+<!--$Id: memp_register.so,v 10.23 2000/05/25 13:47:08 dda Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::memp_register</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::memp_register</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+extern "C" {
+ typedef int (*pgin_fcn_type)(DB_ENV *dbenv,
+ db_pgno_t pgno, void *pgaddr, DBT *pgcookie);
+ typedef int (*pgout_fcn_type)(DB_ENV *dbenv,
+ db_pgno_t pgno, void *pgaddr, DBT *pgcookie);
+};
+int
+DbEnv::memp_register(int ftype,
+ pgin_fcn_type pgin_fcn, pgout_fcn_type pgout_fcn);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::memp_register method registers page-in and page-out
+functions for files of type <b>ftype</b> in the specified pool.
+<p>If the <b>pgin_fcn</b> function is non-NULL, it is called each time
+a page is read into the memory pool from a file of type <b>ftype</b>, or
+a page is created for a file of type <b>ftype</b> (see the
+DB_MPOOL_CREATE flag for the <a href="../api_cxx/memp_fget.html">DbMpoolFile::get</a> method).
+<p>If the <b>pgout_fcn</b> function is non-NULL, it is called each time
+a page is written to a file of type <b>ftype</b>.
+<p>Both the <b>pgin_fcn</b> and <b>pgout_fcn</b> functions are called with
+a reference to the current environment, the page number, a pointer to the
+page being read or written, and any argument <b>pgcookie</b> that was
+specified to the <a href="../api_cxx/memp_fopen.html">DbMpoolFile::open</a> function when the file was opened.
+The <b>pgin_fcn</b> and <b>pgout_fcn</b> functions should return 0 on
+success, and an applicable non-zero <b>errno</b> value on failure, in
+which case the shared memory pool interface routine (and, by extension,
+any Berkeley DB library function) calling it will also fail, returning that
+<b>errno</b> value.
+<p>The purpose of the DbEnv::memp_register function is to support processing
+when pages are entered into, or flushed from, the pool. A file type must
+be specified to make it possible for unrelated threads or processes, that
+are sharing a pool, to evict each other's pages from the pool.
+Applications should call DbEnv::memp_register, during initialization,
+for each type of file requiring input or output processing that will be
+sharing the underlying pool. (No registry is necessary for the standard
+Berkeley DB access method types, as <a href="../api_cxx/db_open.html">Db::open</a> registers them
+separately.)
+<p>If a thread or process does not call DbEnv::memp_register for a file
+type, it is impossible for it to evict pages for any file requiring input
+or output processing from the pool. For this reason,
+DbEnv::memp_register should always be called by each application sharing
+a pool for each type of file included in the pool, regardless of whether
+or not the application itself uses files of that type.
+<p>There are no standard values for <b>ftype</b>, <b>pgin_fcn</b>,
+<b>pgout_fcn</b> and <b>pgcookie</b>, except that the <b>ftype</b>
+value for a file must be a non-zero positive number, as negative numbers
+are reserved for internal use by the Berkeley DB library. For this reason,
+applications sharing a pool must coordinate their values amongst
+themselves.
+<p>The DbEnv::memp_register method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::memp_register method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::memp_register method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Classes</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_mp_mmapsize.html">DbEnv::set_mp_mmapsize</a>,
+<a href="../api_cxx/memp_fclose.html">DbMpoolFile::close</a>,
+<a href="../api_cxx/memp_fget.html">DbMpoolFile::get</a>,
+<a href="../api_cxx/memp_fopen.html">DbMpoolFile::open</a>,
+<a href="../api_cxx/memp_fput.html">DbMpoolFile::put</a>,
+<a href="../api_cxx/memp_fset.html">DbMpoolFile::set</a>,
+<a href="../api_cxx/memp_fsync.html">DbMpoolFile::sync</a>,
+<a href="../api_cxx/memp_register.html">DbEnv::memp_register</a>,
+<a href="../api_cxx/memp_stat.html">DbEnv::memp_stat</a>,
+<a href="../api_cxx/memp_sync.html">DbEnv::memp_sync</a>
+and
+<a href="../api_cxx/memp_trickle.html">DbEnv::memp_trickle</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/memp_stat.html b/bdb/docs/api_cxx/memp_stat.html
new file mode 100644
index 00000000000..1c7f16a2a98
--- /dev/null
+++ b/bdb/docs/api_cxx/memp_stat.html
@@ -0,0 +1,125 @@
+<!--$Id: memp_stat.so,v 10.28 2000/05/25 13:47:08 dda Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::memp_stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::memp_stat</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+extern "C" {
+ typedef void *(*db_malloc_fcn_type)(size_t);
+};
+int
+DbEnv::memp_stat(DB_MPOOL_STAT **gsp,
+ DB_MPOOL_FSTAT *(*fsp)[], db_malloc_fcn_type db_malloc);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::memp_stat method method creates statistical structures and copies
+pointers to them into user-specified memory locations. The statistics
+include the number of files participating in the pool, the active pages
+in the pool, and information as to how effective the cache has been.
+<p>Statistical structures are created in allocated memory. If <b>db_malloc</b> is non-NULL, it
+is called to allocate the memory, otherwise, the library function
+<b>malloc</b>(3) is used. The function <b>db_malloc</b> must match
+the calling conventions of the <b>malloc</b>(3) library routine.
+Regardless, the caller is responsible for deallocating the returned
+memory. To deallocate returned memory, free the returned memory
+reference, references inside the returned memory do not need to be
+individually freed.
+<p>If <b>gsp</b> is non-NULL, the global statistics for the memory pool
+<b>mp</b> are copied into the memory location it references. The
+global statistics are stored in a structure of type DB_MPOOL_STAT.
+<p>The following DB_MPOOL_STAT fields will be filled in:
+<p><dl compact>
+<dt>size_t st_gbytes;<dd>Gigabytes of cache (total cache size is st_gbytes + st_bytes)
+<dt>size_t st_bytes;<dd>Bytes of cache (total cache size is st_gbytes + st_bytes)
+<dt>u_int32_t st_ncache;<dd>Number of caches.
+<dt>u_int32_t st_regsize;<dd>Individual cache size.
+<dt>u_int32_t st_cache_hit;<dd>Requested pages found in the cache.
+<dt>u_int32_t st_cache_miss;<dd>Requested pages not found in the cache.
+<dt>u_int32_t st_map;<dd>Requested pages mapped into the process' address space (there is no
+available information as to whether or not this request caused disk I/O,
+although examining the application page fault rate may be helpful).
+<dt>u_int32_t st_page_create;<dd>Pages created in the cache.
+<dt>u_int32_t st_page_in;<dd>Pages read into the cache.
+<dt>u_int32_t st_page_out;<dd>Pages written from the cache to the backing file.
+<dt>u_int32_t st_ro_evict;<dd>Clean pages forced from the cache.
+<dt>u_int32_t st_rw_evict;<dd>Dirty pages forced from the cache.
+<dt>u_int32_t st_hash_buckets;<dd>Number of hash buckets in buffer hash table.
+<dt>u_int32_t st_hash_searches;<dd>Total number of buffer hash table lookups.
+<dt>u_int32_t st_hash_longest;<dd>The longest chain ever encountered in buffer hash table lookups.
+<dt>u_int32_t st_hash_examined;<dd>Total number of hash elements traversed during hash table lookups.
+<dt>u_int32_t st_page_clean;<dd>Clean pages currently in the cache.
+<dt>u_int32_t st_page_dirty;<dd>Dirty pages currently in the cache.
+<dt>u_int32_t st_page_trickle;<dd>Dirty pages written using the <a href="../api_cxx/memp_trickle.html">DbEnv::memp_trickle</a> interface.
+<dt>u_int32_t st_region_wait;<dd>The number of times that a thread of control was forced to wait before
+obtaining the region lock.
+<dt>u_int32_t st_region_nowait;<dd>The number of times that a thread of control was able to obtain
+the region lock without waiting.
+</dl>
+<p>If <b>fsp</b> is non-NULL, a pointer to a NULL-terminated variable
+length array of statistics for individual files, in the memory pool <b>mp</b>,
+is copied into the memory location it references. If no individual files
+currently exist in the memory pool, <b>fsp</b> will be set to NULL.
+<p>The per-file statistics are stored in structures of type DB_MPOOL_FSTAT.
+The following DB_MPOOL_FSTAT fields will be filled in for each file in
+the pool, i.e., each element of the array:
+<p><dl compact>
+<dt>char *file_name;<dd>The name of the file.
+<dt>size_t st_pagesize;<dd>Page size in bytes.
+<dt>u_int32_t st_cache_hit;<dd>Requested pages found in the cache.
+<dt>u_int32_t st_cache_miss;<dd>Requested pages not found in the cache.
+<dt>u_int32_t st_map;<dd>Requested pages mapped into the process' address space.
+<dt>u_int32_t st_page_create;<dd>Pages created in the cache.
+<dt>u_int32_t st_page_in;<dd>Pages read into the cache.
+<dt>u_int32_t st_page_out;<dd>Pages written from the cache to the backing file.
+</dl>
+<p>The DbEnv::memp_stat method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::memp_stat method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbEnv::memp_stat method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::memp_stat method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Classes</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_mp_mmapsize.html">DbEnv::set_mp_mmapsize</a>,
+<a href="../api_cxx/memp_fclose.html">DbMpoolFile::close</a>,
+<a href="../api_cxx/memp_fget.html">DbMpoolFile::get</a>,
+<a href="../api_cxx/memp_fopen.html">DbMpoolFile::open</a>,
+<a href="../api_cxx/memp_fput.html">DbMpoolFile::put</a>,
+<a href="../api_cxx/memp_fset.html">DbMpoolFile::set</a>,
+<a href="../api_cxx/memp_fsync.html">DbMpoolFile::sync</a>,
+<a href="../api_cxx/memp_register.html">DbEnv::memp_register</a>,
+<a href="../api_cxx/memp_stat.html">DbEnv::memp_stat</a>,
+<a href="../api_cxx/memp_sync.html">DbEnv::memp_sync</a>
+and
+<a href="../api_cxx/memp_trickle.html">DbEnv::memp_trickle</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/memp_sync.html b/bdb/docs/api_cxx/memp_sync.html
new file mode 100644
index 00000000000..fe63f1dffc4
--- /dev/null
+++ b/bdb/docs/api_cxx/memp_sync.html
@@ -0,0 +1,87 @@
+<!--$Id: memp_sync.so,v 10.25 2000/09/08 15:20:28 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::memp_sync</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::memp_sync</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::memp_sync(DbLsn *lsn);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::memp_sync method ensures that any modified pages in the pool with
+log sequence numbers less than the <b>lsn</b> argument are written to
+disk. If <b>lsn</b> is NULL all modified pages in the pool are
+flushed.
+<p>The primary purpose of the DbEnv::memp_sync function is to enable a
+transaction manager to ensure, as part of a checkpoint, that all pages
+modified by a certain time have been written to disk. Pages in the pool
+that cannot be written back to disk immediately (e.g., that are currently
+pinned) are written to disk as soon as it is possible to do so. The
+expected behavior of the Berkeley DB or other transaction subsystem is to call
+the DbEnv::memp_sync function and then, if the return indicates that some
+pages could not be written immediately, to wait briefly and retry again
+with the same log sequence number until the DbEnv::memp_sync function
+returns that all pages have been written.
+<p>To support the DbEnv::memp_sync functionality, it is necessary that the
+pool functions know the location of the log sequence number on the page
+for each file type. This location should be specified when the file is
+opened using the <a href="../api_cxx/memp_fopen.html">DbMpoolFile::open</a> function. It is not required that
+the log sequence number be aligned on the page in any way.
+<p>The DbEnv::memp_sync method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, 0 on success, and returns <a href="../api_c/memp_fsync.html#DB_INCOMPLETE">DB_INCOMPLETE</a> if there were pages which need to be
+written but which DbEnv::memp_sync was unable to write immediately.
+In addition, if DbEnv::memp_sync returns success, the value of
+<b>lsn</b> will be overwritten with the largest log sequence number
+from any page which was written by DbEnv::memp_sync to satisfy this
+request.
+<h1>Errors</h1>
+<p>The DbEnv::memp_sync method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The DbEnv::memp_sync function was called without logging having been
+initialized in the environment.
+</dl>
+<p>The DbEnv::memp_sync method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::memp_sync method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Classes</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_mp_mmapsize.html">DbEnv::set_mp_mmapsize</a>,
+<a href="../api_cxx/memp_fclose.html">DbMpoolFile::close</a>,
+<a href="../api_cxx/memp_fget.html">DbMpoolFile::get</a>,
+<a href="../api_cxx/memp_fopen.html">DbMpoolFile::open</a>,
+<a href="../api_cxx/memp_fput.html">DbMpoolFile::put</a>,
+<a href="../api_cxx/memp_fset.html">DbMpoolFile::set</a>,
+<a href="../api_cxx/memp_fsync.html">DbMpoolFile::sync</a>,
+<a href="../api_cxx/memp_register.html">DbEnv::memp_register</a>,
+<a href="../api_cxx/memp_stat.html">DbEnv::memp_stat</a>,
+<a href="../api_cxx/memp_sync.html">DbEnv::memp_sync</a>
+and
+<a href="../api_cxx/memp_trickle.html">DbEnv::memp_trickle</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/memp_trickle.html b/bdb/docs/api_cxx/memp_trickle.html
new file mode 100644
index 00000000000..185bc5481a4
--- /dev/null
+++ b/bdb/docs/api_cxx/memp_trickle.html
@@ -0,0 +1,70 @@
+<!--$Id: memp_trickle.so,v 10.21 2000/03/01 21:41:30 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::memp_trickle</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::memp_trickle</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::memp_trickle(int pct, int *nwrotep);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::memp_trickle method ensures that at least <b>pct</b> percent of
+the pages in the shared memory pool are clean by writing dirty pages to
+their backing files.
+If the <b>nwrotep</b> argument is non-NULL, the number of pages that
+were written to reach the correct percentage is returned in the memory
+location it references.
+<p>The purpose of the DbEnv::memp_trickle function is to enable a memory
+pool manager to ensure that a page is always available for reading in new
+information without having to wait for a write.
+<p>The DbEnv::memp_trickle method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::memp_trickle method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbEnv::memp_trickle method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::memp_trickle method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Classes</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_mp_mmapsize.html">DbEnv::set_mp_mmapsize</a>,
+<a href="../api_cxx/memp_fclose.html">DbMpoolFile::close</a>,
+<a href="../api_cxx/memp_fget.html">DbMpoolFile::get</a>,
+<a href="../api_cxx/memp_fopen.html">DbMpoolFile::open</a>,
+<a href="../api_cxx/memp_fput.html">DbMpoolFile::put</a>,
+<a href="../api_cxx/memp_fset.html">DbMpoolFile::set</a>,
+<a href="../api_cxx/memp_fsync.html">DbMpoolFile::sync</a>,
+<a href="../api_cxx/memp_register.html">DbEnv::memp_register</a>,
+<a href="../api_cxx/memp_stat.html">DbEnv::memp_stat</a>,
+<a href="../api_cxx/memp_sync.html">DbEnv::memp_sync</a>
+and
+<a href="../api_cxx/memp_trickle.html">DbEnv::memp_trickle</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/mempfile_class.html b/bdb/docs/api_cxx/mempfile_class.html
new file mode 100644
index 00000000000..ce10974d14d
--- /dev/null
+++ b/bdb/docs/api_cxx/mempfile_class.html
@@ -0,0 +1,62 @@
+<!--$Id: mempfile_class.so,v 10.10 1999/12/20 08:52:33 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbMpoolFile</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbMpoolFile</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+class DbMpoolFile { ... };
+</pre></h3>
+<h1>Description</h1>
+<p>This manual page describes the specific details of the DbMpoolFile
+class.
+<p>The <a href="../api_cxx/dbenv_class.html">DbEnv</a> memory pool methods and the DbMpoolFile class
+are the library interface that provide general-purpose, page-oriented
+buffer management of one or more files. While designed to work with the
+other Db classes, they are also useful for more general purposes.
+The memory pools are referred to in this document as simply <i>pools</i>.
+<p>Pools may be shared between processes. Pools are usually filled by pages
+from one or more files. Pages in the pool are replaced in LRU
+(least-recently-used) order, with each new page replacing the page that
+has been unused the longest. Pages retrieved from the pool using
+<a href="../api_cxx/memp_fget.html">DbMpoolFile::get</a> are <i>pinned</i> in the pool until they are
+returned to the control of the buffer pool using the <a href="../api_cxx/memp_fput.html">DbMpoolFile::put</a>
+method.
+<h3>Classes</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_mp_mmapsize.html">DbEnv::set_mp_mmapsize</a>,
+<a href="../api_cxx/memp_fclose.html">DbMpoolFile::close</a>,
+<a href="../api_cxx/memp_fget.html">DbMpoolFile::get</a>,
+<a href="../api_cxx/memp_fopen.html">DbMpoolFile::open</a>,
+<a href="../api_cxx/memp_fput.html">DbMpoolFile::put</a>,
+<a href="../api_cxx/memp_fset.html">DbMpoolFile::set</a>,
+<a href="../api_cxx/memp_fsync.html">DbMpoolFile::sync</a>,
+<a href="../api_cxx/memp_register.html">DbEnv::memp_register</a>,
+<a href="../api_cxx/memp_stat.html">DbEnv::memp_stat</a>,
+<a href="../api_cxx/memp_sync.html">DbEnv::memp_sync</a>
+and
+<a href="../api_cxx/memp_trickle.html">DbEnv::memp_trickle</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/pindex.src b/bdb/docs/api_cxx/pindex.src
new file mode 100644
index 00000000000..cf4a58836d0
--- /dev/null
+++ b/bdb/docs/api_cxx/pindex.src
@@ -0,0 +1,287 @@
+__APIREL__/api_cxx/db_class.html#2 @Db
+__APIREL__/api_cxx/db_class.html#DB_CXX_NO_EXCEPTIONS Db@DB_CXX_NO_EXCEPTIONS
+__APIREL__/api_cxx/db_class.html#DB_XA_CREATE Db@DB_XA_CREATE
+__APIREL__/api_cxx/db_err.html#2 @DbEnv::err
+__APIREL__/api_cxx/db_set_errfile.html#2 @Db::set_errfile
+__APIREL__/api_cxx/db_set_malloc.html#2 @Db::set_malloc
+__APIREL__/api_cxx/db_set_paniccall.html#2 @Db::set_paniccall
+__APIREL__/api_cxx/db_set_realloc.html#2 @Db::set_realloc
+__APIREL__/api_cxx/dbc_class.html#2 @Dbc
+__APIREL__/api_cxx/dbenv_class.html#2 @DbEnv
+__APIREL__/api_cxx/dbenv_class.html#DB_CLIENT DbEnv@DB_CLIENT
+__APIREL__/api_cxx/dbenv_class.html#DB_CXX_NO_EXCEPTIONS DbEnv@DB_CXX_NO_EXCEPTIONS
+__APIREL__/api_cxx/dbt_class.html#2 @Dbt
+__APIREL__/api_cxx/dbt_class.html#3 @key/data pairs
+__APIREL__/api_cxx/dbt_class.html#data Dbt@data
+__APIREL__/api_cxx/dbt_class.html#DB_DBT_MALLOC Dbt@DB_DBT_MALLOC
+__APIREL__/api_cxx/dbt_class.html#DB_DBT_REALLOC Dbt@DB_DBT_REALLOC
+__APIREL__/api_cxx/dbt_class.html#DB_DBT_USERMEM Dbt@DB_DBT_USERMEM
+__APIREL__/api_cxx/dbt_class.html#DB_DBT_PARTIAL Dbt@DB_DBT_PARTIAL
+__APIREL__/api_cxx/dbt_class.html#4 retrieved key/data @permanence
+__APIREL__/api_cxx/dbt_class.html#5 retrieved @key/data permanence
+__APIREL__/api_cxx/dbt_class.html#6 data @alignment
+__APIREL__/api_cxx/dbt_class.html#7 logical @record number format
+__APIREL__/api_cxx/env_set_errfile.html#2 @DbEnv::set_errfile
+__APIREL__/api_cxx/env_set_error_stream.html#2 @DbEnv::set_error_stream
+__APIREL__/api_cxx/env_set_paniccall.html#2 @DbEnv::set_paniccall
+__APIREL__/api_cxx/except_class.html#2 @DbException
+__APIREL__/api_cxx/get_errno.html#2 @DbException::get_errno
+__APIREL__/api_cxx/lock_class.html#2 @DbLock
+__APIREL__/api_cxx/lsn_class.html#2 @DbLsn
+__APIREL__/api_cxx/mempfile_class.html#2 @DbMpoolFile
+__APIREL__/api_cxx/txn_class.html#2 @DbTxn
+__APIREL__/api_cxx/what.html#2 @DbException::what
+__APIREL__/api_cxx/db_close.html#2 @Db::close
+__APIREL__/api_cxx/db_close.html#DB_NOSYNC Db::close@DB_NOSYNC
+__APIREL__/api_cxx/db_close.html#3 Db::close @DB_INCOMPLETE
+__APIREL__/api_cxx/db_cursor.html#2 @Db::cursor
+__APIREL__/api_cxx/db_cursor.html#DB_WRITECURSOR Db::cursor@DB_WRITECURSOR
+__APIREL__/api_cxx/db_del.html#2 @Db::del
+__APIREL__/api_cxx/db_fd.html#2 @Db::fd
+__APIREL__/api_cxx/db_get.html#2 @Db::get
+__APIREL__/api_cxx/db_get.html#DB_CONSUME Db::get@DB_CONSUME
+__APIREL__/api_cxx/db_get.html#DB_CONSUME_WAIT Db::get@DB_CONSUME_WAIT
+__APIREL__/api_cxx/db_get.html#DB_GET_BOTH Db::get@DB_GET_BOTH
+__APIREL__/api_cxx/db_get.html#DB_SET_RECNO Db::get@DB_SET_RECNO
+__APIREL__/api_cxx/db_get.html#DB_RMW Db::get@DB_RMW
+__APIREL__/api_cxx/db_get_byteswapped.html#2 @Db::get_byteswapped
+__APIREL__/api_cxx/db_get_type.html#2 @Db::get_type
+__APIREL__/api_cxx/db_join.html#2 @Db::join
+__APIREL__/api_cxx/db_join.html#DB_JOIN_NOSORT Db::join@DB_JOIN_NOSORT
+__APIREL__/api_cxx/db_join.html#DB_JOIN_ITEM Db::join@DB_JOIN_ITEM
+__APIREL__/api_cxx/db_join.html#DB_RMW Db::join@DB_RMW
+__APIREL__/api_cxx/db_key_range.html#2 @Db::key_range
+__APIREL__/api_cxx/db_open.html#2 @Db::open
+__APIREL__/api_cxx/db_open.html#DB_CREATE Db::open@DB_CREATE
+__APIREL__/api_cxx/db_open.html#DB_EXCL Db::open@DB_EXCL
+__APIREL__/api_cxx/db_open.html#DB_NOMMAP Db::open@DB_NOMMAP
+__APIREL__/api_cxx/db_open.html#DB_RDONLY Db::open@DB_RDONLY
+__APIREL__/api_cxx/db_open.html#DB_THREAD Db::open@DB_THREAD
+__APIREL__/api_cxx/db_open.html#DB_TRUNCATE Db::open@DB_TRUNCATE
+__APIREL__/api_cxx/db_open.html#DB_OLD_VERSION Db::open@DB_OLD_VERSION
+__APIREL__/api_cxx/db_put.html#2 @Db::put
+__APIREL__/api_cxx/db_put.html#DB_APPEND Db::put@DB_APPEND
+__APIREL__/api_cxx/db_put.html#DB_NODUPDATA Db::put@DB_NODUPDATA
+__APIREL__/api_cxx/db_put.html#DB_NOOVERWRITE Db::put@DB_NOOVERWRITE
+__APIREL__/api_cxx/db_remove.html#2 @Db::remove
+__APIREL__/api_cxx/db_rename.html#2 @Db::rename
+__APIREL__/api_cxx/db_set_append_recno.html#2 @Db::set_append_recno
+__APIREL__/api_cxx/db_set_bt_compare.html#2 @Db::set_bt_compare
+__APIREL__/api_cxx/db_set_bt_minkey.html#2 @Db::set_bt_minkey
+__APIREL__/api_cxx/db_set_bt_prefix.html#2 @Db::set_bt_prefix
+__APIREL__/api_cxx/db_set_cachesize.html#2 @Db::set_cachesize
+__APIREL__/api_cxx/db_set_dup_compare.html#2 @Db::set_dup_compare
+__APIREL__/api_cxx/db_set_errcall.html#2 @Db::set_errcall
+__APIREL__/api_cxx/db_set_errpfx.html#2 @Db::set_errpfx
+__APIREL__/api_cxx/db_set_feedback.html#2 @Db::set_feedback
+__APIREL__/api_cxx/db_set_feedback.html#DB_UPGRADE Db::set_feedback@DB_UPGRADE
+__APIREL__/api_cxx/db_set_feedback.html#DB_VERIFY Db::set_feedback@DB_VERIFY
+__APIREL__/api_cxx/db_set_flags.html#2 @Db::set_flags
+__APIREL__/api_cxx/db_set_flags.html#DB_DUP Db::set_flags@DB_DUP
+__APIREL__/api_cxx/db_set_flags.html#DB_DUPSORT Db::set_flags@DB_DUPSORT
+__APIREL__/api_cxx/db_set_flags.html#DB_RECNUM Db::set_flags@DB_RECNUM
+__APIREL__/api_cxx/db_set_flags.html#DB_REVSPLITOFF Db::set_flags@DB_REVSPLITOFF
+__APIREL__/api_cxx/db_set_flags.html#DB_DUP Db::set_flags@DB_DUP
+__APIREL__/api_cxx/db_set_flags.html#DB_DUPSORT Db::set_flags@DB_DUPSORT
+__APIREL__/api_cxx/db_set_flags.html#DB_RENUMBER Db::set_flags@DB_RENUMBER
+__APIREL__/api_cxx/db_set_flags.html#DB_SNAPSHOT Db::set_flags@DB_SNAPSHOT
+__APIREL__/api_cxx/db_set_h_ffactor.html#2 @Db::set_h_ffactor
+__APIREL__/api_cxx/db_set_h_hash.html#2 @Db::set_h_hash
+__APIREL__/api_cxx/db_set_h_nelem.html#2 @Db::set_h_nelem
+__APIREL__/api_cxx/db_set_lorder.html#2 @Db::set_lorder
+__APIREL__/api_cxx/db_set_pagesize.html#2 @Db::set_pagesize
+__APIREL__/api_cxx/db_set_q_extentsize.html#2 @Db::set_q_extentsize
+__APIREL__/api_cxx/db_set_re_delim.html#2 @Db::set_re_delim
+__APIREL__/api_cxx/db_set_re_len.html#2 @Db::set_re_len
+__APIREL__/api_cxx/db_set_re_pad.html#2 @Db::set_re_pad
+__APIREL__/api_cxx/db_set_re_source.html#2 @Db::set_re_source
+__APIREL__/api_cxx/db_stat.html#2 @Db::stat
+__APIREL__/api_cxx/db_stat.html#DB_CACHED_COUNTS Db::stat@DB_CACHED_COUNTS
+__APIREL__/api_cxx/db_stat.html#DB_RECORDCOUNT Db::stat@DB_RECORDCOUNT
+__APIREL__/api_cxx/db_sync.html#2 @Db::sync
+__APIREL__/api_cxx/db_upgrade.html#2 @Db::upgrade
+__APIREL__/api_cxx/db_upgrade.html#DB_DUPSORT Db::upgrade@DB_DUPSORT
+__APIREL__/api_cxx/db_upgrade.html#DB_OLD_VERSION Db::upgrade@DB_OLD_VERSION
+__APIREL__/api_cxx/db_verify.html#2 @Db::verify
+__APIREL__/api_cxx/db_verify.html#DB_SALVAGE Db::verify@DB_SALVAGE
+__APIREL__/api_cxx/db_verify.html#DB_AGGRESSIVE Db::verify@DB_AGGRESSIVE
+__APIREL__/api_cxx/db_verify.html#DB_NOORDERCHK Db::verify@DB_NOORDERCHK
+__APIREL__/api_cxx/db_verify.html#DB_ORDERCHKONLY Db::verify@DB_ORDERCHKONLY
+__APIREL__/api_cxx/dbc_close.html#2 @Dbc::close
+__APIREL__/api_cxx/dbc_count.html#2 @Dbc::count
+__APIREL__/api_cxx/dbc_del.html#2 @Dbc::del
+__APIREL__/api_cxx/dbc_dup.html#2 @Dbc::dup
+__APIREL__/api_cxx/dbc_dup.html#DB_POSITION Dbc::dup@DB_POSITION
+__APIREL__/api_cxx/dbc_get.html#2 @Dbc::get
+__APIREL__/api_cxx/dbc_get.html#DB_CURRENT Dbc::get@DB_CURRENT
+__APIREL__/api_cxx/dbc_get.html#DB_FIRST Dbc::get@DB_FIRST
+__APIREL__/api_cxx/dbc_get.html#DB_LAST Dbc::get@DB_LAST
+__APIREL__/api_cxx/dbc_get.html#DB_GET_BOTH Dbc::get@DB_GET_BOTH
+__APIREL__/api_cxx/dbc_get.html#DB_GET_RECNO Dbc::get@DB_GET_RECNO
+__APIREL__/api_cxx/dbc_get.html#DB_JOIN_ITEM Dbc::get@DB_JOIN_ITEM
+__APIREL__/api_cxx/dbc_get.html#DB_NEXT Dbc::get@DB_NEXT
+__APIREL__/api_cxx/dbc_get.html#DB_PREV Dbc::get@DB_PREV
+__APIREL__/api_cxx/dbc_get.html#DB_NEXT_DUP Dbc::get@DB_NEXT_DUP
+__APIREL__/api_cxx/dbc_get.html#DB_NEXT_NODUP Dbc::get@DB_NEXT_NODUP
+__APIREL__/api_cxx/dbc_get.html#DB_PREV_NODUP Dbc::get@DB_PREV_NODUP
+__APIREL__/api_cxx/dbc_get.html#DB_SET Dbc::get@DB_SET
+__APIREL__/api_cxx/dbc_get.html#DB_SET_RANGE Dbc::get@DB_SET_RANGE
+__APIREL__/api_cxx/dbc_get.html#DB_SET_RECNO Dbc::get@DB_SET_RECNO
+__APIREL__/api_cxx/dbc_get.html#DB_RMW Dbc::get@DB_RMW
+__APIREL__/api_cxx/dbc_put.html#2 @Dbc::put
+__APIREL__/api_cxx/dbc_put.html#DB_AFTER Dbc::put@DB_AFTER
+__APIREL__/api_cxx/dbc_put.html#DB_BEFORE Dbc::put@DB_BEFORE
+__APIREL__/api_cxx/dbc_put.html#DB_CURRENT Dbc::put@DB_CURRENT
+__APIREL__/api_cxx/dbc_put.html#DB_KEYFIRST Dbc::put@DB_KEYFIRST
+__APIREL__/api_cxx/dbc_put.html#DB_KEYLAST Dbc::put@DB_KEYLAST
+__APIREL__/api_cxx/dbc_put.html#DB_NODUPDATA Dbc::put@DB_NODUPDATA
+__APIREL__/api_cxx/env_close.html#2 @DbEnv::close
+__APIREL__/api_cxx/env_open.html#2 @DbEnv::open
+__APIREL__/api_cxx/env_open.html#DB_JOINENV DbEnv::open@DB_JOINENV
+__APIREL__/api_cxx/env_open.html#DB_INIT_CDB DbEnv::open@DB_INIT_CDB
+__APIREL__/api_cxx/env_open.html#DB_INIT_LOCK DbEnv::open@DB_INIT_LOCK
+__APIREL__/api_cxx/env_open.html#DB_INIT_LOG DbEnv::open@DB_INIT_LOG
+__APIREL__/api_cxx/env_open.html#DB_INIT_MPOOL DbEnv::open@DB_INIT_MPOOL
+__APIREL__/api_cxx/env_open.html#DB_INIT_TXN DbEnv::open@DB_INIT_TXN
+__APIREL__/api_cxx/env_open.html#DB_RECOVER DbEnv::open@DB_RECOVER
+__APIREL__/api_cxx/env_open.html#DB_RECOVER_FATAL DbEnv::open@DB_RECOVER_FATAL
+__APIREL__/api_cxx/env_open.html#DB_USE_ENVIRON DbEnv::open@DB_USE_ENVIRON
+__APIREL__/api_cxx/env_open.html#DB_USE_ENVIRON_ROOT DbEnv::open@DB_USE_ENVIRON_ROOT
+__APIREL__/api_cxx/env_open.html#DB_CREATE DbEnv::open@DB_CREATE
+__APIREL__/api_cxx/env_open.html#DB_LOCKDOWN DbEnv::open@DB_LOCKDOWN
+__APIREL__/api_cxx/env_open.html#DB_PRIVATE DbEnv::open@DB_PRIVATE
+__APIREL__/api_cxx/env_open.html#DB_SYSTEM_MEM DbEnv::open@DB_SYSTEM_MEM
+__APIREL__/api_cxx/env_open.html#DB_THREAD DbEnv::open@DB_THREAD
+__APIREL__/api_cxx/env_remove.html#2 @DbEnv::remove
+__APIREL__/api_cxx/env_remove.html#DB_FORCE DbEnv::remove@DB_FORCE
+__APIREL__/api_cxx/env_remove.html#DB_USE_ENVIRON DbEnv::remove@DB_USE_ENVIRON
+__APIREL__/api_cxx/env_remove.html#DB_USE_ENVIRON_ROOT DbEnv::remove@DB_USE_ENVIRON_ROOT
+__APIREL__/api_cxx/env_set_cachesize.html#2 @DbEnv::set_cachesize
+__APIREL__/api_cxx/env_set_data_dir.html#2 @DbEnv::set_data_dir
+__APIREL__/api_cxx/env_set_errcall.html#2 @DbEnv::set_errcall
+__APIREL__/api_cxx/env_set_errpfx.html#2 @DbEnv::set_errpfx
+__APIREL__/api_cxx/env_set_feedback.html#2 @DbEnv::set_feedback
+__APIREL__/api_cxx/env_set_feedback.html#DB_RECOVER DbEnv::set_feedback@DB_RECOVER
+__APIREL__/api_cxx/env_set_flags.html#2 @DbEnv::set_flags
+__APIREL__/api_cxx/env_set_flags.html#DB_CDB_ALLDB DbEnv::set_flags@DB_CDB_ALLDB
+__APIREL__/api_cxx/env_set_flags.html#DB_NOMMAP DbEnv::set_flags@DB_NOMMAP
+__APIREL__/api_cxx/env_set_flags.html#DB_TXN_NOSYNC DbEnv::set_flags@DB_TXN_NOSYNC
+__APIREL__/api_cxx/env_set_lg_bsize.html#2 @DbEnv::set_lg_bsize
+__APIREL__/api_cxx/env_set_lg_dir.html#2 @DbEnv::set_lg_dir
+__APIREL__/api_cxx/env_set_lg_max.html#2 @DbEnv::set_lg_max
+__APIREL__/api_cxx/env_set_lk_conflicts.html#2 @DbEnv::set_lk_conflicts
+__APIREL__/api_cxx/env_set_lk_detect.html#2 @DbEnv::set_lk_detect
+__APIREL__/api_cxx/env_set_lk_detect.html#DB_LOCK_DEFAULT DbEnv::set_lk_detect@DB_LOCK_DEFAULT
+__APIREL__/api_cxx/env_set_lk_detect.html#DB_LOCK_OLDEST DbEnv::set_lk_detect@DB_LOCK_OLDEST
+__APIREL__/api_cxx/env_set_lk_detect.html#DB_LOCK_RANDOM DbEnv::set_lk_detect@DB_LOCK_RANDOM
+__APIREL__/api_cxx/env_set_lk_detect.html#DB_LOCK_YOUNGEST DbEnv::set_lk_detect@DB_LOCK_YOUNGEST
+__APIREL__/api_cxx/env_set_lk_max.html#2 @DbEnv::set_lk_max
+__APIREL__/api_cxx/env_set_lk_max_locks.html#2 @DbEnv::set_lk_max_locks
+__APIREL__/api_cxx/env_set_lk_max_lockers.html#2 @DbEnv::set_lk_max_lockers
+__APIREL__/api_cxx/env_set_lk_max_objects.html#2 @DbEnv::set_lk_max_objects
+__APIREL__/api_cxx/env_set_mp_mmapsize.html#2 @DbEnv::set_mp_mmapsize
+__APIREL__/api_cxx/env_set_mutexlocks.html#2 @DbEnv::set_mutexlocks
+__APIREL__/api_cxx/env_set_pageyield.html#2 @DbEnv::set_pageyield
+__APIREL__/api_cxx/env_set_panicstate.html#2 @DbEnv::set_panicstate
+__APIREL__/api_cxx/env_set_rec_init.html#2 @DbEnv::set_recovery_init
+__APIREL__/api_cxx/env_set_region_init.html#2 @DbEnv::set_region_init
+__APIREL__/api_cxx/env_set_server.html#2 @DbEnv::set_server
+__APIREL__/api_cxx/env_set_server.html#DB_NOSERVER DbEnv::set_server@DB_NOSERVER
+__APIREL__/api_cxx/env_set_server.html#DB_NOSERVER_ID DbEnv::set_server@DB_NOSERVER_ID
+__APIREL__/api_cxx/env_set_shm_key.html#2 @DbEnv::set_shm_key
+__APIREL__/api_cxx/env_set_tas_spins.html#2 @DbEnv::set_tas_spins
+__APIREL__/api_cxx/env_set_tmp_dir.html#2 @DbEnv::set_tmp_dir
+__APIREL__/api_cxx/env_set_tx_max.html#2 @DbEnv::set_tx_max
+__APIREL__/api_cxx/env_set_tx_recover.html#2 @DbEnv::set_tx_recover
+__APIREL__/api_cxx/env_set_tx_recover.html#DB_TXN_BACKWARD_ROLL DbEnv::set_tx_recover@DB_TXN_BACKWARD_ROLL
+__APIREL__/api_cxx/env_set_tx_recover.html#DB_TXN_FORWARD_ROLL DbEnv::set_tx_recover@DB_TXN_FORWARD_ROLL
+__APIREL__/api_cxx/env_set_tx_recover.html#DB_TXN_ABORT DbEnv::set_tx_recover@DB_TXN_ABORT
+__APIREL__/api_cxx/env_set_tx_timestamp.html#2 @DbEnv::set_tx_timestamp
+__APIREL__/api_cxx/env_set_verbose.html#2 @DbEnv::set_verbose
+__APIREL__/api_cxx/env_set_verbose.html#DB_VERB_CHKPOINT DbEnv::set_verbose@DB_VERB_CHKPOINT
+__APIREL__/api_cxx/env_set_verbose.html#DB_VERB_DEADLOCK DbEnv::set_verbose@DB_VERB_DEADLOCK
+__APIREL__/api_cxx/env_set_verbose.html#DB_VERB_RECOVERY DbEnv::set_verbose@DB_VERB_RECOVERY
+__APIREL__/api_cxx/env_set_verbose.html#DB_VERB_WAITSFOR DbEnv::set_verbose@DB_VERB_WAITSFOR
+__APIREL__/api_cxx/env_strerror.html#2 @DbEnv::strerror
+__APIREL__/api_cxx/env_version.html#2 @DbEnv::version
+__APIREL__/api_cxx/lock_detect.html#2 @DbEnv::lock_detect
+__APIREL__/api_cxx/lock_detect.html#DB_LOCK_CONFLICT DbEnv::lock_detect@DB_LOCK_CONFLICT
+__APIREL__/api_cxx/lock_get.html#2 @DbEnv::lock_get
+__APIREL__/api_cxx/lock_get.html#DB_LOCK_NOWAIT DbEnv::lock_get@DB_LOCK_NOWAIT
+__APIREL__/api_cxx/lock_get.html#DB_LOCK_NOTGRANTED DbEnv::lock_get@DB_LOCK_NOTGRANTED
+__APIREL__/api_cxx/lock_id.html#2 @DbEnv::lock_id
+__APIREL__/api_cxx/lock_put.html#2 @DbLock::put
+__APIREL__/api_cxx/lock_stat.html#2 @DbEnv::lock_stat
+__APIREL__/api_cxx/lock_vec.html#2 @DbEnv::lock_vec
+__APIREL__/api_cxx/lock_vec.html#DB_LOCK_NOWAIT DbEnv::lock_vec@DB_LOCK_NOWAIT
+__APIREL__/api_cxx/lock_vec.html#op DbEnv::lock_vec@op
+__APIREL__/api_cxx/lock_vec.html#DB_LOCK_GET DbEnv::lock_vec@DB_LOCK_GET
+__APIREL__/api_cxx/lock_vec.html#DB_LOCK_PUT DbEnv::lock_vec@DB_LOCK_PUT
+__APIREL__/api_cxx/lock_vec.html#DB_LOCK_PUT_ALL DbEnv::lock_vec@DB_LOCK_PUT_ALL
+__APIREL__/api_cxx/lock_vec.html#DB_LOCK_PUT_OBJ DbEnv::lock_vec@DB_LOCK_PUT_OBJ
+__APIREL__/api_cxx/lock_vec.html#obj DbEnv::lock_vec@obj
+__APIREL__/api_cxx/lock_vec.html#mode DbEnv::lock_vec@mode
+__APIREL__/api_cxx/lock_vec.html#lock DbEnv::lock_vec@lock
+__APIREL__/api_cxx/lock_vec.html#DB_LOCK_NOTGRANTED DbEnv::lock_vec@DB_LOCK_NOTGRANTED
+__APIREL__/api_cxx/log_archive.html#2 @DbEnv::log_archive
+__APIREL__/api_cxx/log_archive.html#DB_ARCH_ABS DbEnv::log_archive@DB_ARCH_ABS
+__APIREL__/api_cxx/log_archive.html#DB_ARCH_DATA DbEnv::log_archive@DB_ARCH_DATA
+__APIREL__/api_cxx/log_archive.html#DB_ARCH_LOG DbEnv::log_archive@DB_ARCH_LOG
+__APIREL__/api_cxx/log_compare.html#2 @DbEnv::log_compare
+__APIREL__/api_cxx/log_file.html#2 @DbEnv::log_file
+__APIREL__/api_cxx/log_flush.html#2 @DbEnv::log_flush
+__APIREL__/api_cxx/log_get.html#2 @DbEnv::log_get
+__APIREL__/api_cxx/log_get.html#DB_CHECKPOINT DbEnv::log_get@DB_CHECKPOINT
+__APIREL__/api_cxx/log_get.html#DB_FIRST DbEnv::log_get@DB_FIRST
+__APIREL__/api_cxx/log_get.html#DB_LAST DbEnv::log_get@DB_LAST
+__APIREL__/api_cxx/log_get.html#DB_NEXT DbEnv::log_get@DB_NEXT
+__APIREL__/api_cxx/log_get.html#DB_PREV DbEnv::log_get@DB_PREV
+__APIREL__/api_cxx/log_get.html#DB_CURRENT DbEnv::log_get@DB_CURRENT
+__APIREL__/api_cxx/log_get.html#DB_SET DbEnv::log_get@DB_SET
+__APIREL__/api_cxx/log_put.html#2 @DbEnv::log_put
+__APIREL__/api_cxx/log_put.html#DB_CHECKPOINT DbEnv::log_put@DB_CHECKPOINT
+__APIREL__/api_cxx/log_put.html#DB_CURLSN DbEnv::log_put@DB_CURLSN
+__APIREL__/api_cxx/log_put.html#DB_FLUSH DbEnv::log_put@DB_FLUSH
+__APIREL__/api_cxx/log_register.html#2 @DbEnv::log_register
+__APIREL__/api_cxx/log_stat.html#2 @DbEnv::log_stat
+__APIREL__/api_cxx/log_unregister.html#2 @DbEnv::log_unregister
+__APIREL__/api_cxx/memp_fclose.html#2 @DbMpoolFile::close
+__APIREL__/api_cxx/memp_fget.html#2 @DbMpoolFile::get
+__APIREL__/api_cxx/memp_fget.html#DB_MPOOL_CREATE DbMpoolFile::get@DB_MPOOL_CREATE
+__APIREL__/api_cxx/memp_fget.html#DB_MPOOL_LAST DbMpoolFile::get@DB_MPOOL_LAST
+__APIREL__/api_cxx/memp_fget.html#DB_MPOOL_NEW DbMpoolFile::get@DB_MPOOL_NEW
+__APIREL__/api_cxx/memp_fopen.html#2 @DbMpoolFile::open
+__APIREL__/api_cxx/memp_fopen.html#DB_CREATE DbMpoolFile::open@DB_CREATE
+__APIREL__/api_cxx/memp_fopen.html#DB_NOMMAP DbMpoolFile::open@DB_NOMMAP
+__APIREL__/api_cxx/memp_fopen.html#DB_RDONLY DbMpoolFile::open@DB_RDONLY
+__APIREL__/api_cxx/memp_fopen.html#ftype DbMpoolFile::open@ftype
+__APIREL__/api_cxx/memp_fopen.html#pgcookie DbMpoolFile::open@pgcookie
+__APIREL__/api_cxx/memp_fopen.html#fileid DbMpoolFile::open@fileid
+__APIREL__/api_cxx/memp_fopen.html#lsn_offset DbMpoolFile::open@lsn_offset
+__APIREL__/api_cxx/memp_fopen.html#clear_len DbMpoolFile::open@clear_len
+__APIREL__/api_cxx/memp_fput.html#2 @DbMpoolFile::put
+__APIREL__/api_cxx/memp_fput.html#DB_MPOOL_CLEAN DbMpoolFile::put@DB_MPOOL_CLEAN
+__APIREL__/api_cxx/memp_fput.html#DB_MPOOL_DIRTY DbMpoolFile::put@DB_MPOOL_DIRTY
+__APIREL__/api_cxx/memp_fput.html#DB_MPOOL_DISCARD DbMpoolFile::put@DB_MPOOL_DISCARD
+__APIREL__/api_cxx/memp_fset.html#2 @DbMpoolFile::set
+__APIREL__/api_cxx/memp_fset.html#DB_MPOOL_CLEAN DbMpoolFile::set@DB_MPOOL_CLEAN
+__APIREL__/api_cxx/memp_fset.html#DB_MPOOL_DIRTY DbMpoolFile::set@DB_MPOOL_DIRTY
+__APIREL__/api_cxx/memp_fset.html#DB_MPOOL_DISCARD DbMpoolFile::set@DB_MPOOL_DISCARD
+__APIREL__/api_cxx/memp_fsync.html#2 @DbMpoolFile::sync
+__APIREL__/api_cxx/memp_register.html#2 @DbEnv::memp_register
+__APIREL__/api_cxx/memp_stat.html#2 @DbEnv::memp_stat
+__APIREL__/api_cxx/memp_sync.html#2 @DbEnv::memp_sync
+__APIREL__/api_cxx/memp_trickle.html#2 @DbEnv::memp_trickle
+__APIREL__/api_cxx/txn_abort.html#2 @DbTxn::abort
+__APIREL__/api_cxx/txn_begin.html#2 @DbEnv::txn_begin
+__APIREL__/api_cxx/txn_begin.html#DB_TXN_NOSYNC DbEnv::txn_begin@DB_TXN_NOSYNC
+__APIREL__/api_cxx/txn_begin.html#DB_TXN_NOWAIT DbEnv::txn_begin@DB_TXN_NOWAIT
+__APIREL__/api_cxx/txn_begin.html#DB_TXN_SYNC DbEnv::txn_begin@DB_TXN_SYNC
+__APIREL__/api_cxx/txn_checkpoint.html#2 @DbEnv::txn_checkpoint
+__APIREL__/api_cxx/txn_checkpoint.html#DB_FORCE DbEnv::txn_checkpoint@DB_FORCE
+__APIREL__/api_cxx/txn_commit.html#2 @DbTxn::commit
+__APIREL__/api_cxx/txn_commit.html#DB_TXN_NOSYNC DbTxn::commit@DB_TXN_NOSYNC
+__APIREL__/api_cxx/txn_commit.html#DB_TXN_SYNC DbTxn::commit@DB_TXN_SYNC
+__APIREL__/api_cxx/txn_id.html#2 @DbTxn::id
+__APIREL__/api_cxx/txn_prepare.html#2 @DbTxn::prepare
+__APIREL__/api_cxx/txn_stat.html#2 @DbEnv::txn_stat
diff --git a/bdb/docs/api_cxx/txn_abort.html b/bdb/docs/api_cxx/txn_abort.html
new file mode 100644
index 00000000000..f9c863b3e87
--- /dev/null
+++ b/bdb/docs/api_cxx/txn_abort.html
@@ -0,0 +1,67 @@
+<!--$Id: txn_abort.so,v 10.25 2000/12/31 19:26:21 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbTxn::abort</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbTxn::abort</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbTxn::abort();
+</pre></h3>
+<h1>Description</h1>
+<p>The DbTxn::abort method causes an abnormal termination of the
+transaction. The log is played backwards and any necessary recovery
+operations are initiated through the <b>recover</b> function specified
+to <a href="../api_cxx/env_open.html">DbEnv::open</a>. After the log processing is completed, all locks
+held by the transaction are released. As is the case for
+<a href="../api_cxx/txn_commit.html">DbTxn::commit</a>, applications that require strict two-phase locking
+should not explicitly release any locks.
+<p>In the case of nested transactions, aborting a parent transaction causes
+all children (unresolved or not) of the parent transaction to be aborted.
+<p>Once the DbTxn::abort method returns, the <a href="../api_cxx/txn_class.html">DbTxn</a> handle may not
+be accessed again.
+<p>The DbTxn::abort method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbTxn::abort method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbTxn::abort method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Classes</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_tx_max.html">DbEnv::set_tx_max</a>,
+<a href="../api_cxx/env_set_tx_recover.html">DbEnv::set_tx_recover</a>,
+<a href="../api_cxx/env_set_tx_timestamp.html">DbEnv::set_tx_timestamp</a>,
+<a href="../api_cxx/txn_abort.html">DbTxn::abort</a>,
+<a href="../api_cxx/txn_begin.html">DbEnv::txn_begin</a>,
+<a href="../api_cxx/txn_checkpoint.html">DbEnv::txn_checkpoint</a>,
+<a href="../api_cxx/txn_commit.html">DbTxn::commit</a>,
+<a href="../api_cxx/txn_id.html">DbTxn::id</a>,
+<a href="../api_cxx/txn_prepare.html">DbTxn::prepare</a>
+and
+<a href="../api_cxx/txn_stat.html">DbEnv::txn_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/txn_begin.html b/bdb/docs/api_cxx/txn_begin.html
new file mode 100644
index 00000000000..4cacec56088
--- /dev/null
+++ b/bdb/docs/api_cxx/txn_begin.html
@@ -0,0 +1,96 @@
+<!--$Id: txn_begin.so,v 10.37 2001/01/11 17:47:12 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::txn_begin</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::txn_begin</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::txn_begin(DbTxn *parent, DbTxn **tid, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::txn_begin method creates a new transaction in the environment
+and copies a pointer to a <a href="../api_cxx/txn_class.html">DbTxn</a> that uniquely identifies it into
+the memory referenced by <b>tid</b>.
+<p>If the <b>parent</b> argument is non-NULL, the new transaction will
+be a nested transaction, with the transaction indicated by
+<b>parent</b> as its parent. Transactions may be
+nested to any level.
+<p>The <b>flags</b> parameter must be set to 0 or one of the following
+values:
+<p><dl compact>
+<p><dt><a name="DB_TXN_NOSYNC">DB_TXN_NOSYNC</a><dd>Do not synchronously flush the log when this transaction commits or
+prepares. This means the transaction will exhibit the ACI (atomicity,
+consistency and isolation) properties, but not D (durability), i.e.,
+database integrity will be maintained but it is possible that this
+transaction may be undone during recovery instead of being redone.
+<p>This behavior may be set for an entire Berkeley DB environment as part of the
+<a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a> interface.
+<p><dt><a name="DB_TXN_NOWAIT">DB_TXN_NOWAIT</a><dd>If a lock is unavailable for any Berkeley DB operation performed in the context
+of this transaction, return immediately instead of blocking on the lock.
+The error return in the case will be <a href="../ref/program/errorret.html#DB_LOCK_NOTGRANTED">DB_LOCK_NOTGRANTED</a>.
+<p><dt><a name="DB_TXN_SYNC">DB_TXN_SYNC</a><dd>Synchronously flush the log when this transaction commits or prepares.
+This means the transaction will exhibit all of the ACID (atomicity,
+consistency and isolation and durability) properties.
+<p>This behavior is the default for Berkeley DB environments unless the
+<a href="../api_cxx/env_open.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a> flag was specified to the <a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a>
+interface.
+</dl>
+<p><b>Note: An transaction may not span threads,
+i.e., each transaction must begin and end in the same thread, and each
+transaction may only be used by a single thread.</b>
+<p><b>Note: cursors may not span transactions, i.e., each cursor must be opened
+and closed within a single transaction.</b>
+<p><b>Note: a parent transaction may not issue any Berkeley DB operations, except for
+DbEnv::txn_begin, <a href="../api_cxx/txn_abort.html">DbTxn::abort</a> and <a href="../api_cxx/txn_commit.html">DbTxn::commit</a>, while it has
+active child transactions (child transactions that have not yet been
+committed or aborted).</b>
+<p>The DbEnv::txn_begin method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::txn_begin method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>ENOMEM<dd>The maximum number of concurrent transactions has been reached.
+</dl>
+<p>The DbEnv::txn_begin method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::txn_begin method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Classes</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_tx_max.html">DbEnv::set_tx_max</a>,
+<a href="../api_cxx/env_set_tx_recover.html">DbEnv::set_tx_recover</a>,
+<a href="../api_cxx/env_set_tx_timestamp.html">DbEnv::set_tx_timestamp</a>,
+<a href="../api_cxx/txn_abort.html">DbTxn::abort</a>,
+<a href="../api_cxx/txn_begin.html">DbEnv::txn_begin</a>,
+<a href="../api_cxx/txn_checkpoint.html">DbEnv::txn_checkpoint</a>,
+<a href="../api_cxx/txn_commit.html">DbTxn::commit</a>,
+<a href="../api_cxx/txn_id.html">DbTxn::id</a>,
+<a href="../api_cxx/txn_prepare.html">DbTxn::prepare</a>
+and
+<a href="../api_cxx/txn_stat.html">DbEnv::txn_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/txn_checkpoint.html b/bdb/docs/api_cxx/txn_checkpoint.html
new file mode 100644
index 00000000000..3bac70bccbc
--- /dev/null
+++ b/bdb/docs/api_cxx/txn_checkpoint.html
@@ -0,0 +1,75 @@
+<!--$Id: txn_checkpoint.so,v 10.25 2000/09/08 15:20:28 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::txn_checkpoint</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::txn_checkpoint</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::txn_checkpoint(u_int32_t kbyte, u_int32_t min, u_int32_t flags) const;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::txn_checkpoint method flushes the underlying memory pool,
+writes a checkpoint record to the log and then flushes the log.
+<p>If either <b>kbyte</b> or <b>min</b> is non-zero, the checkpoint is only
+done if there has been activity since the last checkpoint and either
+more than <b>min</b> minutes have passed since the last checkpoint,
+or if more than <b>kbyte</b> kilobytes of log data have been written since
+the last checkpoint.
+<p>The <b>flags</b> parameter must be set to 0 or one of the following
+values:
+<p><dl compact>
+<p><dt><a name="DB_FORCE">DB_FORCE</a><dd>Force a checkpoint record even if there has been no activity since the
+last checkpoint.
+</dl>
+<p>The DbEnv::txn_checkpoint method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, 0 on success, and returns <a href="../api_c/memp_fsync.html#DB_INCOMPLETE">DB_INCOMPLETE</a> if there were pages that needed to be
+written to complete the checkpoint but that <a href="../api_cxx/memp_sync.html">DbEnv::memp_sync</a> was unable
+to write immediately.
+<h1>Errors</h1>
+<p>The DbEnv::txn_checkpoint method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbEnv::txn_checkpoint method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::txn_checkpoint method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Classes</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_tx_max.html">DbEnv::set_tx_max</a>,
+<a href="../api_cxx/env_set_tx_recover.html">DbEnv::set_tx_recover</a>,
+<a href="../api_cxx/env_set_tx_timestamp.html">DbEnv::set_tx_timestamp</a>,
+<a href="../api_cxx/txn_abort.html">DbTxn::abort</a>,
+<a href="../api_cxx/txn_begin.html">DbEnv::txn_begin</a>,
+<a href="../api_cxx/txn_checkpoint.html">DbEnv::txn_checkpoint</a>,
+<a href="../api_cxx/txn_commit.html">DbTxn::commit</a>,
+<a href="../api_cxx/txn_id.html">DbTxn::id</a>,
+<a href="../api_cxx/txn_prepare.html">DbTxn::prepare</a>
+and
+<a href="../api_cxx/txn_stat.html">DbEnv::txn_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/txn_class.html b/bdb/docs/api_cxx/txn_class.html
new file mode 100644
index 00000000000..7a335f92a1a
--- /dev/null
+++ b/bdb/docs/api_cxx/txn_class.html
@@ -0,0 +1,59 @@
+<!--$Id: txn_class.so,v 10.13 2000/12/04 18:05:39 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbTxn</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbTxn</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+class DbTxn { ... };
+</pre></h3>
+<h1>Description</h1>
+<p>This manual page describes the specific details of the DbTxn class.
+<p>The <a href="../api_cxx/dbenv_class.html">DbEnv</a> transaction methods and the DbTxn class provide
+transaction semantics. Full transaction support is provided by a
+collection of modules that provide interfaces to the services required
+for transaction processing. These services are recovery, concurrency
+control and the management of shared data.
+<p>Transaction semantics can be applied to the access methods described in
+Db through method call parameters.
+<p>The model intended for transactional use (and the one that is used by
+the access methods) is write-ahead logging to record both before- and
+after-images. Locking follows a two-phase protocol, with all locks being
+released at transaction commit.
+<h3>Classes</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_tx_max.html">DbEnv::set_tx_max</a>,
+<a href="../api_cxx/env_set_tx_recover.html">DbEnv::set_tx_recover</a>,
+<a href="../api_cxx/env_set_tx_timestamp.html">DbEnv::set_tx_timestamp</a>,
+<a href="../api_cxx/txn_abort.html">DbTxn::abort</a>,
+<a href="../api_cxx/txn_begin.html">DbEnv::txn_begin</a>,
+<a href="../api_cxx/txn_checkpoint.html">DbEnv::txn_checkpoint</a>,
+<a href="../api_cxx/txn_commit.html">DbTxn::commit</a>,
+<a href="../api_cxx/txn_id.html">DbTxn::id</a>,
+<a href="../api_cxx/txn_prepare.html">DbTxn::prepare</a>
+and
+<a href="../api_cxx/txn_stat.html">DbEnv::txn_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/txn_commit.html b/bdb/docs/api_cxx/txn_commit.html
new file mode 100644
index 00000000000..16e20c2535c
--- /dev/null
+++ b/bdb/docs/api_cxx/txn_commit.html
@@ -0,0 +1,87 @@
+<!--$Id: txn_commit.so,v 10.27 2000/12/31 19:26:21 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbTxn::commit</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbTxn::commit</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbTxn::commit(u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbTxn::commit method ends the transaction. In the case of nested
+transactions, if the transaction is a parent transaction, committing
+the parent transaction causes all unresolved children of the parent to
+be committed.
+<p>In the case of nested transactions, if the transaction is a child
+transaction, its locks are not released, but are acquired by its parent.
+While the commit of the child transaction will succeed, the actual
+resolution of the child transaction is postponed until the parent
+transaction is committed or aborted, i.e., if its parent transaction
+commits, it will be committed, and if its parent transaction aborts, it
+will be aborted.
+<p>The <b>flags</b> parameter must be set to 0 or one of the following
+values:
+<p><dl compact>
+<p><dt><a name="DB_TXN_NOSYNC">DB_TXN_NOSYNC</a><dd>Do not synchronously flush the log. This means the transaction will
+exhibit the ACI (atomicity, consistency and isolation) properties, but
+not D (durability), i.e., database integrity will be maintained but it is
+possible that this transaction may be undone during recovery instead of
+being redone.
+<p>This behavior may be set for an entire Berkeley DB environment as part of the
+<a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a> interface.
+<p><dt><a name="DB_TXN_SYNC">DB_TXN_SYNC</a><dd>Synchronously flush the log. This means the transaction will exhibit
+all of the ACID (atomicity, consistency and isolation and durability)
+properties.
+<p>This behavior is the default for Berkeley DB environments unless the
+<a href="../api_cxx/env_open.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a> flag was specified to the <a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a>
+or <a href="../api_cxx/txn_begin.html">DbEnv::txn_begin</a> interfaces.
+</dl>
+<p>Once the DbTxn::commit method returns, the <a href="../api_cxx/txn_class.html">DbTxn</a> handle may not
+be accessed again. If DbTxn::commit encounters an error, the
+transaction and all child transactions of the transaction are aborted.
+<p>The DbTxn::commit method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbTxn::commit method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbTxn::commit method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Classes</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_tx_max.html">DbEnv::set_tx_max</a>,
+<a href="../api_cxx/env_set_tx_recover.html">DbEnv::set_tx_recover</a>,
+<a href="../api_cxx/env_set_tx_timestamp.html">DbEnv::set_tx_timestamp</a>,
+<a href="../api_cxx/txn_abort.html">DbTxn::abort</a>,
+<a href="../api_cxx/txn_begin.html">DbEnv::txn_begin</a>,
+<a href="../api_cxx/txn_checkpoint.html">DbEnv::txn_checkpoint</a>,
+<a href="../api_cxx/txn_commit.html">DbTxn::commit</a>,
+<a href="../api_cxx/txn_id.html">DbTxn::id</a>,
+<a href="../api_cxx/txn_prepare.html">DbTxn::prepare</a>
+and
+<a href="../api_cxx/txn_stat.html">DbEnv::txn_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/txn_id.html b/bdb/docs/api_cxx/txn_id.html
new file mode 100644
index 00000000000..9c14adf1c59
--- /dev/null
+++ b/bdb/docs/api_cxx/txn_id.html
@@ -0,0 +1,52 @@
+<!--$Id: txn_id.so,v 10.12 1999/12/20 08:52:32 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbTxn::id</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbTxn::id</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+u_int32_t
+DbTxn::id();
+</pre></h3>
+<h1>Description</h1>
+<p>The DbTxn::id method returns the unique transaction id associated with the
+specified transaction. Locking calls made on behalf of this transaction
+should use the value returned from DbTxn::id as the locker parameter
+to the <a href="../api_cxx/lock_get.html">DbEnv::lock_get</a> or <a href="../api_cxx/lock_vec.html">DbEnv::lock_vec</a> calls.
+<h3>Classes</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_tx_max.html">DbEnv::set_tx_max</a>,
+<a href="../api_cxx/env_set_tx_recover.html">DbEnv::set_tx_recover</a>,
+<a href="../api_cxx/env_set_tx_timestamp.html">DbEnv::set_tx_timestamp</a>,
+<a href="../api_cxx/txn_abort.html">DbTxn::abort</a>,
+<a href="../api_cxx/txn_begin.html">DbEnv::txn_begin</a>,
+<a href="../api_cxx/txn_checkpoint.html">DbEnv::txn_checkpoint</a>,
+<a href="../api_cxx/txn_commit.html">DbTxn::commit</a>,
+<a href="../api_cxx/txn_id.html">DbTxn::id</a>,
+<a href="../api_cxx/txn_prepare.html">DbTxn::prepare</a>
+and
+<a href="../api_cxx/txn_stat.html">DbEnv::txn_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/txn_prepare.html b/bdb/docs/api_cxx/txn_prepare.html
new file mode 100644
index 00000000000..de7db8e7611
--- /dev/null
+++ b/bdb/docs/api_cxx/txn_prepare.html
@@ -0,0 +1,67 @@
+<!--$Id: txn_prepare.so,v 10.17 2000/12/31 19:26:21 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbTxn::prepare</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbTxn::prepare</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbTxn::prepare();
+</pre></h3>
+<h1>Description</h1>
+<p>The DbTxn::prepare method initiates the beginning of a two-phase commit.
+<p>In a distributed transaction environment, Berkeley DB can be used as a local
+transaction manager. In this case, the distributed transaction manager
+must send <i>prepare</i> messages to each local manager. The local
+manager must then issue a DbTxn::prepare and await its successful
+return before responding to the distributed transaction manager. Only
+after the distributed transaction manager receives successful responses
+from all of its <i>prepare</i> messages should it issue any
+<i>commit</i> messages.
+<p>In the case of nested transactions, preparing a parent transaction
+causes all unresolved children of the parent transaction to be prepared.
+<p>The DbTxn::prepare method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbTxn::prepare method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbTxn::prepare method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Classes</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_tx_max.html">DbEnv::set_tx_max</a>,
+<a href="../api_cxx/env_set_tx_recover.html">DbEnv::set_tx_recover</a>,
+<a href="../api_cxx/env_set_tx_timestamp.html">DbEnv::set_tx_timestamp</a>,
+<a href="../api_cxx/txn_abort.html">DbTxn::abort</a>,
+<a href="../api_cxx/txn_begin.html">DbEnv::txn_begin</a>,
+<a href="../api_cxx/txn_checkpoint.html">DbEnv::txn_checkpoint</a>,
+<a href="../api_cxx/txn_commit.html">DbTxn::commit</a>,
+<a href="../api_cxx/txn_id.html">DbTxn::id</a>,
+<a href="../api_cxx/txn_prepare.html">DbTxn::prepare</a>
+and
+<a href="../api_cxx/txn_stat.html">DbEnv::txn_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/txn_stat.html b/bdb/docs/api_cxx/txn_stat.html
new file mode 100644
index 00000000000..9644a6ae889
--- /dev/null
+++ b/bdb/docs/api_cxx/txn_stat.html
@@ -0,0 +1,100 @@
+<!--$Id: txn_stat.so,v 10.27 2000/05/25 13:47:08 dda Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::txn_stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv::txn_stat</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+extern "C" {
+ typedef void *(*db_malloc_fcn_type)(size_t);
+};
+int
+DbEnv::txn_stat(DB_TXN_STAT **statp, db_malloc_fcn_type db_malloc);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::txn_stat method
+creates a statistical structure and copies a pointer to it into a
+user-specified memory location.
+<p>Statistical structures are created in allocated memory. If <b>db_malloc</b> is non-NULL, it
+is called to allocate the memory, otherwise, the library function
+<b>malloc</b>(3) is used. The function <b>db_malloc</b> must match
+the calling conventions of the <b>malloc</b>(3) library routine.
+Regardless, the caller is responsible for deallocating the returned
+memory. To deallocate returned memory, free the returned memory
+reference, references inside the returned memory do not need to be
+individually freed.
+<p>The transaction region statistics are stored in a structure of type
+DB_TXN_STAT. The following DB_TXN_STAT fields will be filled in:
+<p><dl compact>
+<dt><a href="../api_cxx/lsn_class.html">DbLsn</a> st_last_ckp;<dd>The LSN of the last checkpoint.
+<dt><a href="../api_cxx/lsn_class.html">DbLsn</a> st_pending_ckp;<dd>The LSN of any checkpoint that is currently in progress. If
+<b>st_pending_ckp</b> is the same as <b>st_last_ckp</b> there
+is no checkpoint in progress.
+<dt>time_t st_time_ckp;<dd>The time the last completed checkpoint finished (as the number of seconds
+since the Epoch, returned by the IEEE/ANSI Std 1003.1 (POSIX) <b>time</b> interface).
+<dt>u_int32_t st_last_txnid;<dd>The last transaction ID allocated.
+<dt>u_int32_t st_maxtxns;<dd>The maximum number of active transactions possible.
+<dt>u_int32_t st_nactive;<dd>The number of transactions that are currently active.
+<dt>u_int32_t st_maxnactive;<dd>The maximum number of active transactions at any one time.
+<dt>u_int32_t st_nbegins;<dd>The number of transactions that have begun.
+<dt>u_int32_t st_naborts;<dd>The number of transactions that have aborted.
+<dt>u_int32_t st_ncommits;<dd>The number of transactions that have committed.
+<dt>u_int32_t st_regsize;<dd>The size of the region.
+<dt>u_int32_t st_region_wait;<dd>The number of times that a thread of control was forced to wait before
+obtaining the region lock.
+<dt>u_int32_t st_region_nowait;<dd>The number of times that a thread of control was able to obtain
+the region lock without waiting.
+<dt>DB_TXN_ACTIVE * st_txnarray;<dd>A pointer to an array of <b>st_nactive</b> DB_TXN_ACTIVE structures,
+describing the currently active transactions. The following fields of
+the DB_TXN_ACTIVE structure will be filled in:
+<p><dl compact>
+<p><dt>u_int32_t txnid;<dd>The transaction ID as returned by <a href="../api_cxx/txn_begin.html">DbEnv::txn_begin</a>.
+<dt>u_int32_t parentid;<dd>The transaction ID of the parent transaction (or 0, if no parent).
+<dt><a href="../api_cxx/lsn_class.html">DbLsn</a> lsn;<dd>The log sequence number of the transaction-begin record.
+</dl>
+</dl>
+<p>The DbEnv::txn_stat method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::txn_stat method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::txn_stat method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h3>Classes</h3>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_tx_max.html">DbEnv::set_tx_max</a>,
+<a href="../api_cxx/env_set_tx_recover.html">DbEnv::set_tx_recover</a>,
+<a href="../api_cxx/env_set_tx_timestamp.html">DbEnv::set_tx_timestamp</a>,
+<a href="../api_cxx/txn_abort.html">DbTxn::abort</a>,
+<a href="../api_cxx/txn_begin.html">DbEnv::txn_begin</a>,
+<a href="../api_cxx/txn_checkpoint.html">DbEnv::txn_checkpoint</a>,
+<a href="../api_cxx/txn_commit.html">DbTxn::commit</a>,
+<a href="../api_cxx/txn_id.html">DbTxn::id</a>,
+<a href="../api_cxx/txn_prepare.html">DbTxn::prepare</a>
+and
+<a href="../api_cxx/txn_stat.html">DbEnv::txn_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_cxx/what.html b/bdb/docs/api_cxx/what.html
new file mode 100644
index 00000000000..9e0410c7684
--- /dev/null
+++ b/bdb/docs/api_cxx/what.html
@@ -0,0 +1,43 @@
+<!--$Id: what.so,v 10.8 1999/12/20 08:52:33 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbException::what</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbException::what</h1>
+</td>
+<td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+virtual const char *
+DbException::what() const;
+</pre></h3>
+<h1>Description</h1>
+<p>A DbException object contains an informational string and an errno.
+The errno can be obtained by using <a href="../api_cxx/get_errno.html">DbException::get_errno</a>.
+The informational string can be obtained by using DbException::what.
+<h3>Class</h3>
+<a href="../api_cxx/except_class.html">DbException</a>
+<h1>See Also</h1>
+<a href="../api_cxx/get_errno.html">DbException::get_errno</a>
+and
+<a href="../api_cxx/what.html">DbException::what</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_cxx/cxx_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/db_class.html b/bdb/docs/api_java/db_class.html
new file mode 100644
index 00000000000..b03e55c1f69
--- /dev/null
+++ b/bdb/docs/api_java/db_class.html
@@ -0,0 +1,92 @@
+<!--$Id: db_class.so,v 10.23 2000/03/17 01:54:00 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public class Db extends Object
+{
+ Db(DbEnv dbenv, int flags)
+ throws DbException;
+ ...
+}
+</pre></h3>
+<h1>Description</h1>
+<p>This manual page describes the specific details of the Db class,
+which is the center of access method activity.
+<p>If no <b>dbenv</b> value is specified, the database is standalone, i.e.,
+it is not part of any Berkeley DB environment.
+<p>If a <b>dbenv</b> value is specified, the database is created within the
+specified Berkeley DB environment. The database access methods automatically
+make calls to the other subsystems in Berkeley DB based on the enclosing
+environment. For example, if the environment has been configured to use
+locking, then the access methods will automatically acquire the correct
+locks when reading and writing pages of the database.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or more
+of the following values.
+<p><dl compact>
+<p><dt><a name="Db.DB_XA_CREATE">Db.DB_XA_CREATE</a><dd>Instead of creating a standalone database, create a database intended to
+be accessed via applications running under a X/Open conformant Transaction
+Manager. The database will be opened in the environment specified by the
+OPENINFO parameter of the GROUPS section of the ubbconfig file. See the
+<a href="../ref/xa/intro.html">XA Resource Manager</a> chapter in the
+Reference Guide for more information.
+</dl>
+<h3>Class</h3>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_close.html">Db.close</a>,
+<a href="../api_java/db_cursor.html">Db.cursor</a>,
+<a href="../api_java/db_del.html">Db.del</a>,
+<a href="../api_java/db_fd.html">Db.fd</a>,
+<a href="../api_java/db_get.html">Db.get</a>,
+<a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a>,
+<a href="../api_java/db_get_type.html">Db.get_type</a>,
+<a href="../api_java/db_join.html">Db.join</a>,
+<a href="../api_java/db_key_range.html">Db.key_range</a>,
+<a href="../api_java/db_open.html">Db.open</a>,
+<a href="../api_java/db_put.html">Db.put</a>,
+<a href="../api_java/db_remove.html">Db.remove</a>,
+<a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a>,
+<a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a>,
+<a href="../api_java/db_set_errcall.html">Db.set_errcall</a>,
+<a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a>,
+<a href="../api_java/db_set_flags.html">Db.set_flags</a>,
+<a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a>,
+<a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a>,
+<a href="../api_java/db_set_lorder.html">Db.set_lorder</a>,
+<a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a>,
+<a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a>,
+<a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a>,
+<a href="../api_java/db_set_re_len.html">Db.set_re_len</a>,
+<a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a>,
+<a href="../api_java/db_set_re_source.html">Db.set_re_source</a>,
+<a href="../api_java/db_stat.html">Db.stat</a>,
+<a href="../api_java/db_sync.html">Db.sync</a>,
+<a href="../api_java/db_upgrade.html">Db.upgrade</a>
+and
+<a href="../api_java/db_verify.html">Db.verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/db_close.html b/bdb/docs/api_java/db_close.html
new file mode 100644
index 00000000000..fcb8fde1dea
--- /dev/null
+++ b/bdb/docs/api_java/db_close.html
@@ -0,0 +1,113 @@
+<!--$Id: db_close.so,v 10.27 2000/09/08 15:20:28 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db.close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db.close</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int close(int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Db.close method flushes any cached database information to disk,
+closes any open cursors, frees any allocated resources, and closes any
+underlying files. Since key/data pairs are cached in memory, failing to
+sync the file with the Db.close or <a href="../api_java/db_sync.html">Db.sync</a> method may result
+in inconsistent or lost information.
+<p>The <b>flags</b> parameter must be set to 0 or the following value:
+<p><dl compact>
+<p><dt><a name="Db.DB_NOSYNC">Db.DB_NOSYNC</a><dd>Do not flush cached information to disk.
+<p>The <a href="../api_java/db_close.html#DB_NOSYNC">Db.DB_NOSYNC</a> flag is a dangerous option. It should only be set
+if the application is doing logging (with transactions) so that the
+database is recoverable after a system or application crash, or if the
+database is always generated from scratch after any system or application
+crash.
+<p><b>It is important to understand that flushing cached information to disk
+only minimizes the window of opportunity for corrupted data.</b>
+While unlikely, it is possible for database corruption to happen if a
+system or application crash occurs while writing data to the database.
+To ensure that database corruption never occurs, applications must either:
+use transactions and logging with automatic recovery, use logging and
+application-specific recovery, or edit a copy of the database,
+and, once all applications using the database have successfully called
+Db.close, atomically replace the original database with the
+updated copy.
+</dl>
+<p>When multiple threads are using the Berkeley DB handle concurrently, only a single
+thread may call the Db.close method.
+<p>Once Db.close has been called, regardless of its return, the
+<a href="../api_java/db_class.html">Db</a> handle may not be accessed again.
+ <a name="3"><!--meow--></a>
+<p>The Db.close method throws an exception that encapsulates a non-zero error value on
+failure, and returns <a href="../api_c/memp_fsync.html#DB_INCOMPLETE">Db.DB_INCOMPLETE</a> if the underlying database still has
+dirty pages in the cache. (The only reason to return
+<a href="../api_c/memp_fsync.html#DB_INCOMPLETE">Db.DB_INCOMPLETE</a> is if another thread of control was writing pages
+in the underlying database file at the same time as the
+Db.close method was called. For this reason, a return of
+<a href="../api_c/memp_fsync.html#DB_INCOMPLETE">Db.DB_INCOMPLETE</a> can normally be ignored, or, in cases where it is
+a possible return value, the <a href="../api_java/db_close.html#DB_NOSYNC">Db.DB_NOSYNC</a> option should probably
+have been specified.)
+<p>The Db.close method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Db.close method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.close method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Class</h3>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_close.html">Db.close</a>,
+<a href="../api_java/db_cursor.html">Db.cursor</a>,
+<a href="../api_java/db_del.html">Db.del</a>,
+<a href="../api_java/db_fd.html">Db.fd</a>,
+<a href="../api_java/db_get.html">Db.get</a>,
+<a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a>,
+<a href="../api_java/db_get_type.html">Db.get_type</a>,
+<a href="../api_java/db_join.html">Db.join</a>,
+<a href="../api_java/db_key_range.html">Db.key_range</a>,
+<a href="../api_java/db_open.html">Db.open</a>,
+<a href="../api_java/db_put.html">Db.put</a>,
+<a href="../api_java/db_remove.html">Db.remove</a>,
+<a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a>,
+<a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a>,
+<a href="../api_java/db_set_errcall.html">Db.set_errcall</a>,
+<a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a>,
+<a href="../api_java/db_set_flags.html">Db.set_flags</a>,
+<a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a>,
+<a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a>,
+<a href="../api_java/db_set_lorder.html">Db.set_lorder</a>,
+<a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a>,
+<a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a>,
+<a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a>,
+<a href="../api_java/db_set_re_len.html">Db.set_re_len</a>,
+<a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a>,
+<a href="../api_java/db_set_re_source.html">Db.set_re_source</a>,
+<a href="../api_java/db_stat.html">Db.stat</a>,
+<a href="../api_java/db_sync.html">Db.sync</a>,
+<a href="../api_java/db_upgrade.html">Db.upgrade</a>
+and
+<a href="../api_java/db_verify.html">Db.verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/db_cursor.html b/bdb/docs/api_java/db_cursor.html
new file mode 100644
index 00000000000..2494aad58b7
--- /dev/null
+++ b/bdb/docs/api_java/db_cursor.html
@@ -0,0 +1,94 @@
+<!--$Id: db_cursor.so,v 10.25 2000/07/11 19:11:25 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db.cursor</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db.cursor</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public Dbc cursor(DbTxn txnid, int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Db.cursor method
+creates a cursor.
+<p>If the file is being accessed under transaction protection, the
+<b>txnid</b> parameter is a transaction ID returned from
+<a href="../api_java/txn_begin.html">DbEnv.txn_begin</a>, otherwise, NULL.
+<p>If transaction protection is enabled, cursors must be opened and closed
+within the context of a transaction, and the <b>txnid</b> parameter
+specifies the transaction context in which the cursor may be used.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or more
+of the following values.
+<p><dl compact>
+<p><dt><a name="Db.DB_WRITECURSOR">Db.DB_WRITECURSOR</a><dd>Specify that the cursor will be used to update the database. This
+flag should <b>only</b> be set when the <a href="../api_java/env_open.html#DB_INIT_CDB">Db.DB_INIT_CDB</a> flag
+was specified to <a href="../api_java/env_open.html">DbEnv.open</a>.
+</dl>
+<p>The Db.cursor method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Db.cursor method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The Db.cursor method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.cursor method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Class</h3>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_close.html">Db.close</a>,
+<a href="../api_java/db_cursor.html">Db.cursor</a>,
+<a href="../api_java/db_del.html">Db.del</a>,
+<a href="../api_java/db_fd.html">Db.fd</a>,
+<a href="../api_java/db_get.html">Db.get</a>,
+<a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a>,
+<a href="../api_java/db_get_type.html">Db.get_type</a>,
+<a href="../api_java/db_join.html">Db.join</a>,
+<a href="../api_java/db_key_range.html">Db.key_range</a>,
+<a href="../api_java/db_open.html">Db.open</a>,
+<a href="../api_java/db_put.html">Db.put</a>,
+<a href="../api_java/db_remove.html">Db.remove</a>,
+<a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a>,
+<a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a>,
+<a href="../api_java/db_set_errcall.html">Db.set_errcall</a>,
+<a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a>,
+<a href="../api_java/db_set_flags.html">Db.set_flags</a>,
+<a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a>,
+<a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a>,
+<a href="../api_java/db_set_lorder.html">Db.set_lorder</a>,
+<a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a>,
+<a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a>,
+<a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a>,
+<a href="../api_java/db_set_re_len.html">Db.set_re_len</a>,
+<a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a>,
+<a href="../api_java/db_set_re_source.html">Db.set_re_source</a>,
+<a href="../api_java/db_stat.html">Db.stat</a>,
+<a href="../api_java/db_sync.html">Db.sync</a>,
+<a href="../api_java/db_upgrade.html">Db.upgrade</a>
+and
+<a href="../api_java/db_verify.html">Db.verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/db_del.html b/bdb/docs/api_java/db_del.html
new file mode 100644
index 00000000000..0a44190dd93
--- /dev/null
+++ b/bdb/docs/api_java/db_del.html
@@ -0,0 +1,94 @@
+<!--$Id: db_del.so,v 10.23 2000/09/05 19:35:10 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db.del</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db.del</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int del(DbTxn txnid, Dbt key, int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Db.del method removes key/data pairs from the database. The
+key/data pair associated with the specified <b>key</b> is discarded from
+the database. In the presence of duplicate key values, all records
+associated with the designated key will be discarded.
+<p>If the file is being accessed under transaction protection, the
+<b>txnid</b> parameter is a transaction ID returned from
+<a href="../api_java/txn_begin.html">DbEnv.txn_begin</a>, otherwise, NULL.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The Db.del method throws an exception that encapsulates a non-zero error value on
+failure, and <a href="../ref/program/errorret.html#DB_NOTFOUND">Db.DB_NOTFOUND</a> if the specified <b>key</b> did not exist in
+the file.
+<h1>Errors</h1>
+<p>The Db.del method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EACCES<dd>An attempt was made to modify a read-only database.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>If the operation was selected to resolve a deadlock, the
+Db.del method will fail and
+throw a <a href="../api_java/deadlock_class.html">DbDeadlockException</a> exception.
+<p>The Db.del method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.del method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Class</h3>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_close.html">Db.close</a>,
+<a href="../api_java/db_cursor.html">Db.cursor</a>,
+<a href="../api_java/db_del.html">Db.del</a>,
+<a href="../api_java/db_fd.html">Db.fd</a>,
+<a href="../api_java/db_get.html">Db.get</a>,
+<a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a>,
+<a href="../api_java/db_get_type.html">Db.get_type</a>,
+<a href="../api_java/db_join.html">Db.join</a>,
+<a href="../api_java/db_key_range.html">Db.key_range</a>,
+<a href="../api_java/db_open.html">Db.open</a>,
+<a href="../api_java/db_put.html">Db.put</a>,
+<a href="../api_java/db_remove.html">Db.remove</a>,
+<a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a>,
+<a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a>,
+<a href="../api_java/db_set_errcall.html">Db.set_errcall</a>,
+<a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a>,
+<a href="../api_java/db_set_flags.html">Db.set_flags</a>,
+<a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a>,
+<a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a>,
+<a href="../api_java/db_set_lorder.html">Db.set_lorder</a>,
+<a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a>,
+<a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a>,
+<a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a>,
+<a href="../api_java/db_set_re_len.html">Db.set_re_len</a>,
+<a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a>,
+<a href="../api_java/db_set_re_source.html">Db.set_re_source</a>,
+<a href="../api_java/db_stat.html">Db.stat</a>,
+<a href="../api_java/db_sync.html">Db.sync</a>,
+<a href="../api_java/db_upgrade.html">Db.upgrade</a>
+and
+<a href="../api_java/db_verify.html">Db.verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/db_fd.html b/bdb/docs/api_java/db_fd.html
new file mode 100644
index 00000000000..77342c2c9a9
--- /dev/null
+++ b/bdb/docs/api_java/db_fd.html
@@ -0,0 +1,79 @@
+<!--$Id: db_fd.so,v 10.21 2000/03/01 21:41:28 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db.fd</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db.fd</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int fd()
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Db.fd method
+returns a file descriptor representative of the underlying database.
+This method does not fit well into the Java framework and may be removed
+in subsequent releases.
+<p>The Db.fd method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Db.fd method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.fd method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Class</h3>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_close.html">Db.close</a>,
+<a href="../api_java/db_cursor.html">Db.cursor</a>,
+<a href="../api_java/db_del.html">Db.del</a>,
+<a href="../api_java/db_fd.html">Db.fd</a>,
+<a href="../api_java/db_get.html">Db.get</a>,
+<a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a>,
+<a href="../api_java/db_get_type.html">Db.get_type</a>,
+<a href="../api_java/db_join.html">Db.join</a>,
+<a href="../api_java/db_key_range.html">Db.key_range</a>,
+<a href="../api_java/db_open.html">Db.open</a>,
+<a href="../api_java/db_put.html">Db.put</a>,
+<a href="../api_java/db_remove.html">Db.remove</a>,
+<a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a>,
+<a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a>,
+<a href="../api_java/db_set_errcall.html">Db.set_errcall</a>,
+<a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a>,
+<a href="../api_java/db_set_flags.html">Db.set_flags</a>,
+<a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a>,
+<a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a>,
+<a href="../api_java/db_set_lorder.html">Db.set_lorder</a>,
+<a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a>,
+<a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a>,
+<a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a>,
+<a href="../api_java/db_set_re_len.html">Db.set_re_len</a>,
+<a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a>,
+<a href="../api_java/db_set_re_source.html">Db.set_re_source</a>,
+<a href="../api_java/db_stat.html">Db.stat</a>,
+<a href="../api_java/db_sync.html">Db.sync</a>,
+<a href="../api_java/db_upgrade.html">Db.upgrade</a>
+and
+<a href="../api_java/db_verify.html">Db.verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/db_get.html b/bdb/docs/api_java/db_get.html
new file mode 100644
index 00000000000..8fd980e9260
--- /dev/null
+++ b/bdb/docs/api_java/db_get.html
@@ -0,0 +1,149 @@
+<!--$Id: db_get.so,v 10.31 2000/11/28 20:12:30 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db.get</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db.get</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int get(DbTxn txnid, Dbt key, Dbt data, int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Db.get method retrieves key/data pairs from the database. The
+byte array
+and length of the data associated with the specified <b>key</b> are
+returned in the structure referenced by <b>data</b>.
+<p>In the presence of duplicate key values, Db.get will return the
+first data item for the designated key. Duplicates are sorted by insert
+order except where this order has been overridden by cursor operations.
+<b>Retrieval of duplicates requires the use of cursor operations.</b>
+See <a href="../api_java/dbc_get.html">Dbc.get</a> for details.
+<p>If the file is being accessed under transaction protection, the
+<b>txnid</b> parameter is a transaction ID returned from
+<a href="../api_java/txn_begin.html">DbEnv.txn_begin</a>, otherwise, NULL.
+<p>The <b>flags</b> parameter must be set to 0 or one of the following
+values:
+<p><dl compact>
+<p><dt><a name="Db.DB_CONSUME">Db.DB_CONSUME</a><dd>Return the record number and data from the available record closest to
+the head of the queue and delete the record. The cursor will be
+positioned on the deleted record. The record number will be returned
+in <b>key</b> as described in <a href="../api_java/dbt_class.html">Dbt</a>. The data will be returned
+in the <b>data</b> parameter. A record is available if it is not
+deleted and is not currently locked. The underlying database must be
+of type Queue for Db.DB_CONSUME to be specified.
+<p><dt><a name="Db.DB_CONSUME_WAIT">Db.DB_CONSUME_WAIT</a><dd>The Db.DB_CONSUME_WAIT flag is the same as the Db.DB_CONSUME
+flag except that if the Queue database is empty, the thread of control
+will wait until there is data in the queue before returning. The
+underlying database must be of type Queue for Db.DB_CONSUME_WAIT
+to be specified.
+<p><dt><a name="Db.DB_GET_BOTH">Db.DB_GET_BOTH</a><dd>Retrieve the key/data pair only if both the key and data match the
+arguments.
+<p><dt><a name="Db.DB_SET_RECNO">Db.DB_SET_RECNO</a><dd>Retrieve the specified numbered key/data pair from a database.
+Upon return, both the <b>key</b> and <b>data</b> items will have been
+filled in, not just the data item as is done for all other uses of the
+Db.get method.
+<p>The <b>data</b> field of the specified <b>key</b>
+must be a byte array large enough to hold a logical record
+number (i.e., an int).
+This record number determines the record to be retrieved.
+<p>For Db.DB_SET_RECNO to be specified, the underlying database must be
+of type Btree and it must have been created with the DB_RECNUM flag.
+</dl>
+<p>In addition, the following flag may be set by bitwise inclusively <b>OR</b>'ing it into the
+<b>flags</b> parameter:
+<p><dl compact>
+<p><dt><a name="Db.DB_RMW">Db.DB_RMW</a><dd>Acquire write locks instead of read locks when doing the retrieval.
+Setting this flag may decrease the likelihood of deadlock during a
+read-modify-write cycle by immediately acquiring the write lock during
+the read part of the cycle so that another thread of control acquiring
+a read lock for the same item, in its own read-modify-write cycle, will
+not result in deadlock.
+<p>As the Db.get interface will not hold locks across
+Berkeley DB interface calls in non-transactional environments, the
+<a href="../api_java/dbc_get.html#DB_RMW">Db.DB_RMW</a> flag to the Db.get call is only meaningful in
+the presence of transactions.
+</dl>
+<p>If the database is a Queue or Recno database and the requested key exists,
+but was never explicitly created by the application or was later deleted,
+the Db.get method returns <a href="../ref/program/errorret.html#DB_KEYEMPTY">Db.DB_KEYEMPTY</a>.
+<p>Otherwise, if the requested key is not in the database, the
+Db.get function returns <a href="../ref/program/errorret.html#DB_NOTFOUND">Db.DB_NOTFOUND</a>.
+<p>Otherwise, the Db.get method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Db.get method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>A record number of 0 was specified.
+<p>The <a href="../api_java/env_open.html#DB_THREAD">Db.DB_THREAD</a> flag was specified to the
+<a href="../api_java/db_open.html">Db.open</a> method and none of the <a href="../api_java/dbt_class.html#DB_DBT_MALLOC">Db.DB_DBT_MALLOC</a>,
+<a href="../api_java/dbt_class.html#DB_DBT_REALLOC">Db.DB_DBT_REALLOC</a> or <a href="../api_java/dbt_class.html#DB_DBT_USERMEM">Db.DB_DBT_USERMEM</a> flags were set in the
+<a href="../api_java/dbt_class.html">Dbt</a>.
+</dl>
+<p>If the operation was selected to resolve a deadlock, the
+Db.get method will fail and
+throw a <a href="../api_java/deadlock_class.html">DbDeadlockException</a> exception.
+<p>If the requested item could not be returned due to insufficient memory,
+the Db.get method will fail and
+throw a <a href="../api_java/mem_class.html">DbMemoryException</a> exception.
+<p>The Db.get method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.get method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Class</h3>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_close.html">Db.close</a>,
+<a href="../api_java/db_cursor.html">Db.cursor</a>,
+<a href="../api_java/db_del.html">Db.del</a>,
+<a href="../api_java/db_fd.html">Db.fd</a>,
+<a href="../api_java/db_get.html">Db.get</a>,
+<a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a>,
+<a href="../api_java/db_get_type.html">Db.get_type</a>,
+<a href="../api_java/db_join.html">Db.join</a>,
+<a href="../api_java/db_key_range.html">Db.key_range</a>,
+<a href="../api_java/db_open.html">Db.open</a>,
+<a href="../api_java/db_put.html">Db.put</a>,
+<a href="../api_java/db_remove.html">Db.remove</a>,
+<a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a>,
+<a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a>,
+<a href="../api_java/db_set_errcall.html">Db.set_errcall</a>,
+<a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a>,
+<a href="../api_java/db_set_flags.html">Db.set_flags</a>,
+<a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a>,
+<a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a>,
+<a href="../api_java/db_set_lorder.html">Db.set_lorder</a>,
+<a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a>,
+<a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a>,
+<a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a>,
+<a href="../api_java/db_set_re_len.html">Db.set_re_len</a>,
+<a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a>,
+<a href="../api_java/db_set_re_source.html">Db.set_re_source</a>,
+<a href="../api_java/db_stat.html">Db.stat</a>,
+<a href="../api_java/db_sync.html">Db.sync</a>,
+<a href="../api_java/db_upgrade.html">Db.upgrade</a>
+and
+<a href="../api_java/db_verify.html">Db.verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/db_get_byteswapped.html b/bdb/docs/api_java/db_get_byteswapped.html
new file mode 100644
index 00000000000..1ef15479d99
--- /dev/null
+++ b/bdb/docs/api_java/db_get_byteswapped.html
@@ -0,0 +1,75 @@
+<!--$Id: db_get_byteswapped.so,v 10.7 1999/12/20 08:52:27 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db.get_byteswapped</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db.get_byteswapped</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public boolean get_byteswapped();
+</pre></h3>
+<h1>Description</h1>
+<p>The Db.get_byteswapped method returns
+false
+if the underlying database files were created on an architecture
+of the same byte order as the current one, and
+true
+if they were not (i.e., big-endian on a little-endian machine or
+vice-versa). This field may be used to determine if application
+data needs to be adjusted for this architecture or not.
+<h3>Class</h3>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_close.html">Db.close</a>,
+<a href="../api_java/db_cursor.html">Db.cursor</a>,
+<a href="../api_java/db_del.html">Db.del</a>,
+<a href="../api_java/db_fd.html">Db.fd</a>,
+<a href="../api_java/db_get.html">Db.get</a>,
+<a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a>,
+<a href="../api_java/db_get_type.html">Db.get_type</a>,
+<a href="../api_java/db_join.html">Db.join</a>,
+<a href="../api_java/db_key_range.html">Db.key_range</a>,
+<a href="../api_java/db_open.html">Db.open</a>,
+<a href="../api_java/db_put.html">Db.put</a>,
+<a href="../api_java/db_remove.html">Db.remove</a>,
+<a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a>,
+<a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a>,
+<a href="../api_java/db_set_errcall.html">Db.set_errcall</a>,
+<a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a>,
+<a href="../api_java/db_set_flags.html">Db.set_flags</a>,
+<a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a>,
+<a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a>,
+<a href="../api_java/db_set_lorder.html">Db.set_lorder</a>,
+<a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a>,
+<a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a>,
+<a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a>,
+<a href="../api_java/db_set_re_len.html">Db.set_re_len</a>,
+<a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a>,
+<a href="../api_java/db_set_re_source.html">Db.set_re_source</a>,
+<a href="../api_java/db_stat.html">Db.stat</a>,
+<a href="../api_java/db_sync.html">Db.sync</a>,
+<a href="../api_java/db_upgrade.html">Db.upgrade</a>
+and
+<a href="../api_java/db_verify.html">Db.verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/db_get_type.html b/bdb/docs/api_java/db_get_type.html
new file mode 100644
index 00000000000..cc10556190d
--- /dev/null
+++ b/bdb/docs/api_java/db_get_type.html
@@ -0,0 +1,72 @@
+<!--$Id: db_get_type.so,v 10.10 1999/12/20 08:52:27 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db.get_type</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db.get_type</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int get_type();
+</pre></h3>
+<h1>Description</h1>
+<p>The Db.get_type method returns the type of the underlying access method
+(and file format). It returns one of Db.DB_BTREE,
+Db.DB_HASH or Db.DB_RECNO. This value may be used to
+determine the type of the database after a return from <a href="../api_java/db_open.html">Db.open</a>
+with the <b>type</b> argument set to Db.DB_UNKNOWN.
+<h3>Class</h3>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_close.html">Db.close</a>,
+<a href="../api_java/db_cursor.html">Db.cursor</a>,
+<a href="../api_java/db_del.html">Db.del</a>,
+<a href="../api_java/db_fd.html">Db.fd</a>,
+<a href="../api_java/db_get.html">Db.get</a>,
+<a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a>,
+<a href="../api_java/db_get_type.html">Db.get_type</a>,
+<a href="../api_java/db_join.html">Db.join</a>,
+<a href="../api_java/db_key_range.html">Db.key_range</a>,
+<a href="../api_java/db_open.html">Db.open</a>,
+<a href="../api_java/db_put.html">Db.put</a>,
+<a href="../api_java/db_remove.html">Db.remove</a>,
+<a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a>,
+<a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a>,
+<a href="../api_java/db_set_errcall.html">Db.set_errcall</a>,
+<a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a>,
+<a href="../api_java/db_set_flags.html">Db.set_flags</a>,
+<a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a>,
+<a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a>,
+<a href="../api_java/db_set_lorder.html">Db.set_lorder</a>,
+<a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a>,
+<a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a>,
+<a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a>,
+<a href="../api_java/db_set_re_len.html">Db.set_re_len</a>,
+<a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a>,
+<a href="../api_java/db_set_re_source.html">Db.set_re_source</a>,
+<a href="../api_java/db_stat.html">Db.stat</a>,
+<a href="../api_java/db_sync.html">Db.sync</a>,
+<a href="../api_java/db_upgrade.html">Db.upgrade</a>
+and
+<a href="../api_java/db_verify.html">Db.verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/db_join.html b/bdb/docs/api_java/db_join.html
new file mode 100644
index 00000000000..5bdd93fedde
--- /dev/null
+++ b/bdb/docs/api_java/db_join.html
@@ -0,0 +1,142 @@
+<!--$Id: db_join.so,v 10.30 2000/12/20 15:34:50 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db.join</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db.join</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public Dbc join(Dbc curslist[], int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Db.join method creates a specialized cursor for use in performing
+joins on secondary indexes. For information on how to organize your data
+to use this functionality, see <a href="../ref/am/join.html">Logical
+join</a>.
+<p>The <b>primary</b> argument contains the <a href="../api_java/db_class.html">Db</a> handle of the primary
+database, which is keyed by the data values found in entries in the
+<b>curslist</b>.
+<p>The <b>curslist</b> argument contains a null terminated array of cursors.
+Each cursor must have been initialized to reference the key on which the
+underlying database should be joined. Typically, this initialization is done
+by a <a href="../api_java/dbc_get.html">Dbc.get</a> call with the <a href="../api_java/dbc_get.html#DB_SET">Db.DB_SET</a> flag specified. Once the
+cursors have been passed as part of a <b>curslist</b>, they should not
+be accessed or modified until the newly created join cursor has been closed,
+or else inconsistent results may be returned.
+<p>Joined values are retrieved by doing a sequential iteration over the first
+cursor in the <b>curslist</b> argument, and a nested iteration over each
+secondary cursor in the order they are specified in the <b>curslist</b>
+argument. This requires database traversals to search for the current
+datum in all the cursors after the first. For this reason, the best join
+performance normally results from sorting the cursors from the one that
+references the least number of data items to the one that references the
+most. By default, Db.join does this sort on behalf of its caller.
+<p>The <b>flags</b> parameter must be set to 0 or the following value:
+<p><dl compact>
+<p><dt><a name="Db.DB_JOIN_NOSORT">Db.DB_JOIN_NOSORT</a><dd>Do not sort the cursors based on the number of data items they reference.
+If the data are structured such that cursors with many data items also
+share many common elements, higher performance will result from listing
+those cursors before cursors with fewer data items, that is, a sort order
+other than the default. The Db.DB_JOIN_NOSORT flag permits
+applications to perform join optimization prior to calling Db.join.
+</dl>
+<p>The returned cursor has the standard cursor functions:
+<p><dl compact>
+<p><dt><a href="../api_java/dbc_get.html">Dbc.get</a><dd>Iterates over the values associated with the keys to which each item in
+<b>curslist</b> has been initialized. Any data value which appears in
+all items specified by the <b>curslist</b> argument is then used as a
+key into the <b>primary</b>, and the key/data pair found in the
+<b>primary</b> is returned.
+<p>The <b>flags</b> parameter must be set to 0 or the following value:
+<p><dl compact>
+<p><dt><a name="Db.DB_JOIN_ITEM">Db.DB_JOIN_ITEM</a><dd>Do not use the data value found in all of the cursors as a lookup
+key for the <b>primary</b>, but simply return it in the key parameter
+instead. The data parameter is left unchanged.
+</dl>
+<p>In addition, the following flag may be set by bitwise inclusively <b>OR</b>'ing it into the
+<b>flags</b> parameter:
+<p><dl compact>
+<p><dt><a name="Db.DB_RMW">Db.DB_RMW</a><dd>Acquire write locks instead of read locks when doing the retrieval.
+Setting this flag may decrease the likelihood of deadlock during a
+read-modify-write cycle by immediately acquiring the write lock during
+the read part of the cycle so that another thread of control acquiring
+a read lock for the same item, in its own read-modify-write cycle, will
+not result in deadlock.
+</dl>
+<p><dt><a href="../api_java/dbc_put.html">Dbc.put</a><dd>Returns EINVAL.
+<p><dt><a href="../api_java/dbc_del.html">Dbc.del</a><dd>Returns EINVAL.
+<p><dt><a href="../api_java/dbc_close.html">Dbc.close</a><dd>Close the returned cursor and release all resources. (Closing the cursors
+in <b>curslist</b> is the responsibility of the caller.)
+</dl>
+<p>For the returned join cursor to be used in a transaction protected manner,
+the cursors listed in <b>curslist</b> must have been created within the
+context of the same transaction.
+<p>The Db.join method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Db.join method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The <a href="../api_java/dbc_put.html">Dbc.put</a> or <a href="../api_java/dbc_del.html">Dbc.del</a> functions were called.
+</dl>
+<p>The Db.join method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.join method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Class</h3>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_close.html">Db.close</a>,
+<a href="../api_java/db_cursor.html">Db.cursor</a>,
+<a href="../api_java/db_del.html">Db.del</a>,
+<a href="../api_java/db_fd.html">Db.fd</a>,
+<a href="../api_java/db_get.html">Db.get</a>,
+<a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a>,
+<a href="../api_java/db_get_type.html">Db.get_type</a>,
+<a href="../api_java/db_join.html">Db.join</a>,
+<a href="../api_java/db_key_range.html">Db.key_range</a>,
+<a href="../api_java/db_open.html">Db.open</a>,
+<a href="../api_java/db_put.html">Db.put</a>,
+<a href="../api_java/db_remove.html">Db.remove</a>,
+<a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a>,
+<a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a>,
+<a href="../api_java/db_set_errcall.html">Db.set_errcall</a>,
+<a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a>,
+<a href="../api_java/db_set_flags.html">Db.set_flags</a>,
+<a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a>,
+<a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a>,
+<a href="../api_java/db_set_lorder.html">Db.set_lorder</a>,
+<a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a>,
+<a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a>,
+<a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a>,
+<a href="../api_java/db_set_re_len.html">Db.set_re_len</a>,
+<a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a>,
+<a href="../api_java/db_set_re_source.html">Db.set_re_source</a>,
+<a href="../api_java/db_stat.html">Db.stat</a>,
+<a href="../api_java/db_sync.html">Db.sync</a>,
+<a href="../api_java/db_upgrade.html">Db.upgrade</a>
+and
+<a href="../api_java/db_verify.html">Db.verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/db_key_range.html b/bdb/docs/api_java/db_key_range.html
new file mode 100644
index 00000000000..dd68e0e1acf
--- /dev/null
+++ b/bdb/docs/api_java/db_key_range.html
@@ -0,0 +1,99 @@
+<!--$Id: db_key_range.so,v 10.5 2000/05/01 21:57:43 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db.key_range</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db.key_range</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void key_range(DbTxn txnid
+ Dbt key, DbKeyRange key_range, int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Db.key_range method returns an estimate of the proportion of keys
+that are less than, equal to and greater than the specified key. The
+underlying database must be of type Btree.
+<p>The information is returned in the <b>key_range</b> argument, which
+contains three elements of type double, <b>less</b>, <b>equal</b> and
+<b>greater</b>. Values are in the range of 0 to 1, e.g., if the field
+<b>less</b> is 0.05, that indicates that 5% of the keys in the database
+are less than the key argument. The value for <b>equal</b> will be zero
+if there is no matching key and non-zero otherwise.
+<p>If the file is being accessed under transaction protection, the
+<b>txnid</b> parameter is a transaction ID returned from
+<a href="../api_java/txn_begin.html">DbEnv.txn_begin</a>, otherwise, NULL.
+The Db.key_range method does not retain the locks it acquires for the
+life of the transaction, so estimates may not be repeatable.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The Db.key_range method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Db.key_range method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The underlying database was not of type Btree.
+</dl>
+<p>If the operation was selected to resolve a deadlock, the
+Db.key_range method will fail and
+throw a <a href="../api_java/deadlock_class.html">DbDeadlockException</a> exception.
+<p>The Db.key_range method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.key_range method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Class</h3>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_close.html">Db.close</a>,
+<a href="../api_java/db_cursor.html">Db.cursor</a>,
+<a href="../api_java/db_del.html">Db.del</a>,
+<a href="../api_java/db_fd.html">Db.fd</a>,
+<a href="../api_java/db_get.html">Db.get</a>,
+<a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a>,
+<a href="../api_java/db_get_type.html">Db.get_type</a>,
+<a href="../api_java/db_join.html">Db.join</a>,
+<a href="../api_java/db_key_range.html">Db.key_range</a>,
+<a href="../api_java/db_open.html">Db.open</a>,
+<a href="../api_java/db_put.html">Db.put</a>,
+<a href="../api_java/db_remove.html">Db.remove</a>,
+<a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a>,
+<a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a>,
+<a href="../api_java/db_set_errcall.html">Db.set_errcall</a>,
+<a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a>,
+<a href="../api_java/db_set_flags.html">Db.set_flags</a>,
+<a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a>,
+<a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a>,
+<a href="../api_java/db_set_lorder.html">Db.set_lorder</a>,
+<a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a>,
+<a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a>,
+<a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a>,
+<a href="../api_java/db_set_re_len.html">Db.set_re_len</a>,
+<a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a>,
+<a href="../api_java/db_set_re_source.html">Db.set_re_source</a>,
+<a href="../api_java/db_stat.html">Db.stat</a>,
+<a href="../api_java/db_sync.html">Db.sync</a>,
+<a href="../api_java/db_upgrade.html">Db.upgrade</a>
+and
+<a href="../api_java/db_verify.html">Db.verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/db_open.html b/bdb/docs/api_java/db_open.html
new file mode 100644
index 00000000000..5371e10bbc2
--- /dev/null
+++ b/bdb/docs/api_java/db_open.html
@@ -0,0 +1,179 @@
+<!--$Id: db_open.so,v 10.61 2000/10/25 15:24:44 dda Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db.open</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db.open</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+<p>
+public void open(String file,
+ String database, int type, int flags, int mode)
+ throws DbException, FileNotFoundException;
+</pre></h3>
+<h1>Description</h1>
+<p>The currently supported Berkeley DB file formats (or <i>access methods</i>)
+are Btree, Hash, Queue and Recno. The Btree format is a representation
+of a sorted, balanced tree structure. The Hash format is an extensible,
+dynamic hashing scheme. The Queue format supports fast access to
+fixed-length records accessed by sequentially or logical record number.
+The Recno format supports fixed- or variable-length records, accessed
+sequentially or by logical record number, and optionally retrieved from
+a flat text file.
+<p>Storage and retrieval for the Berkeley DB access methods are based on key/data
+pairs, see <a href="../api_java/dbt_class.html">Dbt</a> for more information.
+<p>The Db.open interface opens the database represented by the
+<b>file</b> and <b>database</b> arguments for both reading and writing.
+The <b>file</b> argument is used as the name of a physical file on disk
+that will be used to back the database. The <b>database</b> argument is
+optional and allows applications to have multiple logical databases in a
+single physical file. While no <b>database</b> argument needs to be
+specified, it is an error to attempt to open a second database in a
+<b>file</b> that was not initially created using a <b>database</b> name.
+In-memory databases never intended to be preserved on disk may
+be created by setting both the <b>file</b> and <b>database</b> arguments
+to null. Note that in-memory databases can only ever be shared by
+sharing the single database handle that created them, in circumstances
+where doing so is safe.
+<p>The <b>type</b> argument is of type int
+and must be set to one of Db.DB_BTREE, Db.DB_HASH,
+Db.DB_QUEUE, Db.DB_RECNO or Db.DB_UNKNOWN, except
+that databases of type Db.DB_QUEUE are restricted to one per
+<b>file</b>. If <b>type</b> is Db.DB_UNKNOWN, the database must
+already exist and Db.open will automatically determine its type.
+The <a href="../api_java/db_get_type.html">Db.get_type</a> method may be used to determine the underlying type of
+databases opened using Db.DB_UNKNOWN.
+<p>The <b>flags</b> and <b>mode</b> arguments specify how files will be opened
+and/or created if they do not already exist.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or more
+of the following values.
+<p><dl compact>
+<p><dt><a name="Db.DB_CREATE">Db.DB_CREATE</a><dd>Create any underlying files, as necessary. If the files do not already
+exist and the DB_CREATE flag is not specified, the call will fail.
+<p><dt><a name="Db.DB_EXCL">Db.DB_EXCL</a><dd>Return an error if the file already exists. Underlying filesystem
+primitives are used to implement this flag. For this reason it is only
+applicable to the physical file and cannot be used to test if a database
+in a file already exists.
+<p>The Db.DB_EXCL flag is only meaningful when specified with the
+Db.DB_CREATE flag.
+<p><dt><a name="Db.DB_NOMMAP">Db.DB_NOMMAP</a><dd>Do not map this database into process memory (see the description of the
+<a href="../api_java/env_set_mp_mmapsize.html">DbEnv.set_mp_mmapsize</a> method for further information).
+<p><dt><a name="Db.DB_RDONLY">Db.DB_RDONLY</a><dd>Open the database for reading only. Any attempt to modify items in the
+database will fail regardless of the actual permissions of any underlying
+files.
+<p><dt><a name="Db.DB_THREAD">Db.DB_THREAD</a><dd>Cause the <a href="../api_java/db_class.html">Db</a> handle returned by Db.open to be
+<i>free-threaded</i>, that is, useable by multiple threads within a
+single address space.
+<p>Threading is always assumed in the Java API, so no special flags are
+required, and Berkeley DB functions will always behave as if the
+<a href="../api_java/env_open.html#DB_THREAD">Db.DB_THREAD</a> flag was specified.
+<p><dt><a name="Db.DB_TRUNCATE">Db.DB_TRUNCATE</a><dd>Physically truncate the underlying file, discarding all previous databases
+it might have held. Underlying filesystem primitives are used to
+implement this flag. For this reason it is only applicable to the
+physical file and cannot be used to discard databases within a file.
+<p>The Db.DB_TRUNCATE flag cannot be transaction protected, and it is
+an error to specify it in a transaction protected environment.
+</dl>
+<p>On UNIX systems, or in IEEE/ANSI Std 1003.1 (POSIX) environments, all files created by the access methods
+are created with mode <b>mode</b> (as described in <b>chmod</b>(2)) and
+modified by the process' umask value at the time of creation (see
+<b>umask</b>(2)). The group ownership of created files is based on
+the system and directory defaults, and is not further specified by Berkeley DB.
+If <b>mode</b> is 0, files are created readable and writeable by both
+owner and group. On Windows systems, the mode argument is ignored.
+<p>Calling Db.open is a reasonably expensive operation, and
+maintaining a set of open databases will normally be preferable to
+repeatedly open and closing the database for each new query.
+<p>The Db.open method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If the <b>dbenv</b> argument to <a href="../api_c/db_create.html">db_create</a> was initialized using
+<a href="../api_java/env_open.html">DbEnv.open</a> the environment variable <b>DB_HOME</b> may be used
+as the path of the database environment home. Specifically, Db.open
+is affected by the configuration value DB_DATA_DIR.
+</dl>
+<p><dl compact>
+<p><dt>TMPDIR<dd>If the <b>file</b> and <b>dbenv</b> arguments to Db.open are
+null, the environment variable <b>TMPDIR</b> may be used as a
+directory in which to create a temporary backing file.
+</dl>
+<h1>Errors</h1>
+<p>The Db.open method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt><a name="Db.DB_OLD_VERSION">Db.DB_OLD_VERSION</a><dd>The database cannot be opened without being first upgraded.
+<p><dt>EEXIST<dd>DB_CREATE and DB_EXCL were specified and the file exists.
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified (e.g., unknown database
+type, page size, hash function, pad byte, byte order) or a flag value
+or parameter that is incompatible with the specified database.
+<p>
+The <a href="../api_java/env_open.html#DB_THREAD">Db.DB_THREAD</a> flag was specified and spinlocks are not
+implemented for this architecture.
+<p>The <a href="../api_java/env_open.html#DB_THREAD">Db.DB_THREAD</a> flag was specified to Db.open, but was not
+specified to the <a href="../api_java/env_open.html">DbEnv.open</a> call for the environment in which the
+<a href="../api_java/db_class.html">Db</a> handle was created.
+<p>A <b>re_source</b> file was specified with either the <a href="../api_java/env_open.html#DB_THREAD">Db.DB_THREAD</a>
+flag or the provided database environment supports transaction
+processing.
+<p><dt>ENOENT<dd>A non-existent <b>re_source</b> file was specified.
+</dl>
+<p>The Db.open method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.open method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Class</h3>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_close.html">Db.close</a>,
+<a href="../api_java/db_cursor.html">Db.cursor</a>,
+<a href="../api_java/db_del.html">Db.del</a>,
+<a href="../api_java/db_fd.html">Db.fd</a>,
+<a href="../api_java/db_get.html">Db.get</a>,
+<a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a>,
+<a href="../api_java/db_get_type.html">Db.get_type</a>,
+<a href="../api_java/db_join.html">Db.join</a>,
+<a href="../api_java/db_key_range.html">Db.key_range</a>,
+<a href="../api_java/db_open.html">Db.open</a>,
+<a href="../api_java/db_put.html">Db.put</a>,
+<a href="../api_java/db_remove.html">Db.remove</a>,
+<a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a>,
+<a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a>,
+<a href="../api_java/db_set_errcall.html">Db.set_errcall</a>,
+<a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a>,
+<a href="../api_java/db_set_flags.html">Db.set_flags</a>,
+<a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a>,
+<a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a>,
+<a href="../api_java/db_set_lorder.html">Db.set_lorder</a>,
+<a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a>,
+<a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a>,
+<a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a>,
+<a href="../api_java/db_set_re_len.html">Db.set_re_len</a>,
+<a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a>,
+<a href="../api_java/db_set_re_source.html">Db.set_re_source</a>,
+<a href="../api_java/db_stat.html">Db.stat</a>,
+<a href="../api_java/db_sync.html">Db.sync</a>,
+<a href="../api_java/db_upgrade.html">Db.upgrade</a>
+and
+<a href="../api_java/db_verify.html">Db.verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/db_put.html b/bdb/docs/api_java/db_put.html
new file mode 100644
index 00000000000..41fe6dcff9e
--- /dev/null
+++ b/bdb/docs/api_java/db_put.html
@@ -0,0 +1,128 @@
+<!--$Id: db_put.so,v 10.34 2000/09/16 22:27:56 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db.put</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db.put</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int put(DbTxn txnid, Dbt key, Dbt data, int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Db.put method stores key/data pairs in the database. The default
+behavior of the Db.put function is to enter the new key/data
+pair, replacing any previously existing key if duplicates are disallowed,
+or adding a duplicate data item if duplicates are allowed. If the database
+supports duplicates, the Db.put method adds the new data value at the
+end of the duplicate set. If the database supports sorted duplicates,
+the new data value is inserted at the correct sorted location.
+<p>If the file is being accessed under transaction protection, the
+<b>txnid</b> parameter is a transaction ID returned from
+<a href="../api_java/txn_begin.html">DbEnv.txn_begin</a>, otherwise, NULL.
+<p>The <b>flags</b> parameter must be set to 0 or one of the following
+values:
+<p><dl compact>
+<p><dt><a name="Db.DB_APPEND">Db.DB_APPEND</a><dd>Append the key/data pair to the end of the database. For the
+Db.DB_APPEND flag to be specified, the underlying database must be
+a Queue or Recno database. The record number allocated to the record is
+returned in the specified <b>key</b>.
+<p>There is a minor behavioral difference between the Recno and Queue access
+methods for the Db.DB_APPEND flag. If a transaction enclosing a
+Db.put operation with the Db.DB_APPEND flag aborts, the
+record number may be decremented (and later re-allocated by a subsequent
+Db.DB_APPEND operation) by the Recno access method, but will not be
+decremented or re-allocated by the Queue access method.
+<p><dt><a name="Db.DB_NODUPDATA">Db.DB_NODUPDATA</a><dd>In the case of the Btree and Hash access methods, enter the new key/data
+pair only if it does not already appear in the database. If the
+key/data pair already appears in the database, <a href="../api_java/dbc_put.html#DB_KEYEXIST">Db.DB_KEYEXIST</a> is
+returned. The Db.DB_NODUPDATA flag may only be specified if the
+underlying database has been configured to support sorted duplicates.
+<p>The Db.DB_NODUPDATA flag may not be specified to the Queue or Recno
+access methods.
+<p><dt><a name="Db.DB_NOOVERWRITE">Db.DB_NOOVERWRITE</a><dd>Enter the new key/data pair only if the key does not already appear in
+the database. If the key already appears in the database,
+<a href="../api_java/dbc_put.html#DB_KEYEXIST">Db.DB_KEYEXIST</a> is returned. Even if the database allows duplicates,
+a call to Db.put with the Db.DB_NOOVERWRITE flag set will
+fail if the key already exists in the database.
+</dl>
+<p>Otherwise, the Db.put method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Db.put method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EACCES<dd>An attempt was made to modify a read-only database.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>A record number of 0 was specified.
+<p>An attempt was made to add a record to a fixed-length database that was too
+large to fit.
+<p>An attempt was made to do a partial put.
+</dl>
+<p><dl compact>
+<p><dt>ENOSPC<dd>A btree exceeded the maximum btree depth (255).
+</dl>
+<p>If the operation was selected to resolve a deadlock, the
+Db.put method will fail and
+throw a <a href="../api_java/deadlock_class.html">DbDeadlockException</a> exception.
+<p>The Db.put method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.put method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Class</h3>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_close.html">Db.close</a>,
+<a href="../api_java/db_cursor.html">Db.cursor</a>,
+<a href="../api_java/db_del.html">Db.del</a>,
+<a href="../api_java/db_fd.html">Db.fd</a>,
+<a href="../api_java/db_get.html">Db.get</a>,
+<a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a>,
+<a href="../api_java/db_get_type.html">Db.get_type</a>,
+<a href="../api_java/db_join.html">Db.join</a>,
+<a href="../api_java/db_key_range.html">Db.key_range</a>,
+<a href="../api_java/db_open.html">Db.open</a>,
+<a href="../api_java/db_put.html">Db.put</a>,
+<a href="../api_java/db_remove.html">Db.remove</a>,
+<a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a>,
+<a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a>,
+<a href="../api_java/db_set_errcall.html">Db.set_errcall</a>,
+<a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a>,
+<a href="../api_java/db_set_flags.html">Db.set_flags</a>,
+<a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a>,
+<a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a>,
+<a href="../api_java/db_set_lorder.html">Db.set_lorder</a>,
+<a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a>,
+<a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a>,
+<a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a>,
+<a href="../api_java/db_set_re_len.html">Db.set_re_len</a>,
+<a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a>,
+<a href="../api_java/db_set_re_source.html">Db.set_re_source</a>,
+<a href="../api_java/db_stat.html">Db.stat</a>,
+<a href="../api_java/db_sync.html">Db.sync</a>,
+<a href="../api_java/db_upgrade.html">Db.upgrade</a>
+and
+<a href="../api_java/db_verify.html">Db.verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/db_remove.html b/bdb/docs/api_java/db_remove.html
new file mode 100644
index 00000000000..d1238451cc4
--- /dev/null
+++ b/bdb/docs/api_java/db_remove.html
@@ -0,0 +1,104 @@
+<!--$Id: db_remove.so,v 10.20 2000/10/25 15:24:44 dda Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db.remove</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db.remove</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+<p>
+public void remove(String file, String database, int flags)
+ throws DbException, FileNotFoundException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Db.remove interface removes the database specified by the
+<b>file</b> and <b>database</b> arguments. If no <b>database</b> is
+specified, the physical file represented by <b>file</b> is removed,
+incidentally removing all databases that it contained.
+<p>If a physical file is being removed and logging is currently enabled in
+the database environment, no database in the file may be open when the
+Db.remove method is called. Otherwise, no reference count of database
+use is maintained by Berkeley DB. Applications should not remove databases that
+are currently in use. In particular, some architectures do not permit
+the removal of files with open handles. On these architectures, attempts
+to remove databases that are currently in use will fail.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>Once Db.remove has been called, regardless of its return, the
+<a href="../api_java/db_class.html">Db</a> handle may not be accessed again.
+<p>The Db.remove method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If the <b>dbenv</b> argument to <a href="../api_c/db_create.html">db_create</a> was initialized using
+<a href="../api_java/env_open.html">DbEnv.open</a> the environment variable <b>DB_HOME</b> may be used
+as the path of the database environment home. Specifically, Db.remove
+is affected by the configuration value DB_DATA_DIR.
+</dl>
+<h1>Errors</h1>
+<p>The Db.remove method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>A database in the file is currently open.
+</dl>
+<p>If the file or directory does not exist, the Db.remove method will
+fail and
+throw a FileNotFoundException exception.
+<p>The Db.remove method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.remove method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Class</h3>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_close.html">Db.close</a>,
+<a href="../api_java/db_cursor.html">Db.cursor</a>,
+<a href="../api_java/db_del.html">Db.del</a>,
+<a href="../api_java/db_fd.html">Db.fd</a>,
+<a href="../api_java/db_get.html">Db.get</a>,
+<a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a>,
+<a href="../api_java/db_get_type.html">Db.get_type</a>,
+<a href="../api_java/db_join.html">Db.join</a>,
+<a href="../api_java/db_key_range.html">Db.key_range</a>,
+<a href="../api_java/db_open.html">Db.open</a>,
+<a href="../api_java/db_put.html">Db.put</a>,
+<a href="../api_java/db_remove.html">Db.remove</a>,
+<a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a>,
+<a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a>,
+<a href="../api_java/db_set_errcall.html">Db.set_errcall</a>,
+<a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a>,
+<a href="../api_java/db_set_flags.html">Db.set_flags</a>,
+<a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a>,
+<a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a>,
+<a href="../api_java/db_set_lorder.html">Db.set_lorder</a>,
+<a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a>,
+<a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a>,
+<a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a>,
+<a href="../api_java/db_set_re_len.html">Db.set_re_len</a>,
+<a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a>,
+<a href="../api_java/db_set_re_source.html">Db.set_re_source</a>,
+<a href="../api_java/db_stat.html">Db.stat</a>,
+<a href="../api_java/db_sync.html">Db.sync</a>,
+<a href="../api_java/db_upgrade.html">Db.upgrade</a>
+and
+<a href="../api_java/db_verify.html">Db.verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/db_rename.html b/bdb/docs/api_java/db_rename.html
new file mode 100644
index 00000000000..b34f20a26a5
--- /dev/null
+++ b/bdb/docs/api_java/db_rename.html
@@ -0,0 +1,105 @@
+<!--$Id: db_rename.so,v 10.7 2000/10/25 15:24:44 dda Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db.rename</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db.rename</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+<p>
+public void rename(String file, String database, String newname, int flags)
+ throws DbException, FileNotFoundException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Db.rename interface renames the database specified by the
+<b>file</b> and <b>database</b> arguments to <b>newname</b>. If no
+<b>database</b> is specified, the physical file represented by
+<b>file</b> is renamed, incidentally renaming all databases that it
+contained.
+<p>If a physical file is being renamed and logging is currently enabled in
+the database environment, no database in the file may be open when the
+Db.rename method is called. Otherwise, no reference count of database
+use is maintained by Berkeley DB. Applications should not rename databases that
+are currently in use. In particular, some architectures do not permit
+renaming files with open handles. On these architectures, attempts to
+rename databases that are currently in use will fail.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>Once Db.rename has been called, regardless of its return, the
+<a href="../api_java/db_class.html">Db</a> handle may not be accessed again.
+<p>The Db.rename method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If the <b>dbenv</b> argument to <a href="../api_c/db_create.html">db_create</a> was initialized using
+<a href="../api_java/env_open.html">DbEnv.open</a> the environment variable <b>DB_HOME</b> may be used
+as the path of the database environment home. Specifically, Db.rename
+is affected by the configuration value DB_DATA_DIR.
+</dl>
+<h1>Errors</h1>
+<p>The Db.rename method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>A database in the file is currently open.
+</dl>
+<p>If the file or directory does not exist, the Db.rename method will
+fail and
+throw a FileNotFoundException exception.
+<p>The Db.rename method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.rename method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Class</h3>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_close.html">Db.close</a>,
+<a href="../api_java/db_cursor.html">Db.cursor</a>,
+<a href="../api_java/db_del.html">Db.del</a>,
+<a href="../api_java/db_fd.html">Db.fd</a>,
+<a href="../api_java/db_get.html">Db.get</a>,
+<a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a>,
+<a href="../api_java/db_get_type.html">Db.get_type</a>,
+<a href="../api_java/db_join.html">Db.join</a>,
+<a href="../api_java/db_key_range.html">Db.key_range</a>,
+<a href="../api_java/db_open.html">Db.open</a>,
+<a href="../api_java/db_put.html">Db.put</a>,
+<a href="../api_java/db_remove.html">Db.remove</a>,
+<a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a>,
+<a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a>,
+<a href="../api_java/db_set_errcall.html">Db.set_errcall</a>,
+<a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a>,
+<a href="../api_java/db_set_flags.html">Db.set_flags</a>,
+<a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a>,
+<a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a>,
+<a href="../api_java/db_set_lorder.html">Db.set_lorder</a>,
+<a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a>,
+<a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a>,
+<a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a>,
+<a href="../api_java/db_set_re_len.html">Db.set_re_len</a>,
+<a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a>,
+<a href="../api_java/db_set_re_source.html">Db.set_re_source</a>,
+<a href="../api_java/db_stat.html">Db.stat</a>,
+<a href="../api_java/db_sync.html">Db.sync</a>,
+<a href="../api_java/db_upgrade.html">Db.upgrade</a>
+and
+<a href="../api_java/db_verify.html">Db.verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/db_set_append_recno.html b/bdb/docs/api_java/db_set_append_recno.html
new file mode 100644
index 00000000000..8a4d4a0df24
--- /dev/null
+++ b/bdb/docs/api_java/db_set_append_recno.html
@@ -0,0 +1,75 @@
+<!--$Id: db_set_append_recno.so,v 1.3 2000/07/18 16:19:44 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_append_recno</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db.set_append_recno</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public interface DbAppendRecno
+{
+ public abstract void db_append_recno(Db db, Dbt data, int recno);
+ throws DbException;
+}
+public class Db
+{
+ public void set_append_recno(DbAppendRecno db_append_recno)
+ throws DbException;
+ ...
+}
+</pre></h3>
+<h1>Description</h1>
+<p>When using the <a href="../api_java/db_put.html#DB_APPEND">Db.DB_APPEND</a> option of the <a href="../api_java/db_put.html">Db.put</a> method,
+it may be useful to modify the stored data based on the generated key.
+If a callback method is specified using the
+Db.set_append_recno method, it will be called after the record number
+has been selected but before the data has been stored.
+The callback function must throw a <a href="../api_java/except_class.html">DbException</a> object to
+encapsulate the error on failure. That object will be thrown to
+caller of <a href="../api_java/db_put.html">Db.put</a>.
+<p>The called function must take three arguments: a reference to the
+enclosing database handle, the data <a href="../api_java/dbt_class.html">Dbt</a> to be stored and the
+selected record number. The called function may then modify the data
+<a href="../api_java/dbt_class.html">Dbt</a>.
+<p>The Db.set_append_recno interface may only be used to configure Berkeley DB before
+the <a href="../api_java/db_open.html">Db.open</a> interface is called.
+<p>The Db.set_append_recno method throws an exception that encapsulates a non-zero error value on
+failure.
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_close.html">DbEnv.close</a>,
+<a href="../api_java/env_open.html">DbEnv.open</a>,
+<a href="../api_java/env_remove.html">DbEnv.remove</a>,
+<a href="../api_java/env_strerror.html">DbEnv.strerror</a>,
+<a href="../api_java/env_version.html">DbEnv.get_version_string</a>
+<a href="../api_java/env_set_cachesize.html">DbEnv.set_cachesize</a>,
+<a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a>,
+<a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a>,
+<a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a>,
+<a href="../api_java/env_set_flags.html">DbEnv.set_flags</a>,
+<a href="../api_java/env_set_mutexlocks.html">DbEnv.set_mutexlocks</a>,
+and
+<a href="../api_java/env_set_verbose.html">DbEnv.set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/db_set_bt_compare.html b/bdb/docs/api_java/db_set_bt_compare.html
new file mode 100644
index 00000000000..2a2ea869b1e
--- /dev/null
+++ b/bdb/docs/api_java/db_set_bt_compare.html
@@ -0,0 +1,105 @@
+<!--$Id: db_set_bt_compare.so,v 10.24 2000/10/26 15:20:40 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_bt_compare</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db.set_bt_compare</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public interface DbBtreeCompare
+{
+ public abstract int bt_compare(Db db, Dbt dbt1, Dbt dbt2);
+}
+public class Db
+{
+ public void set_bt_compare(DbBtreeCompare bt_compare)
+ throws DbException;
+ ...
+}
+</pre></h3>
+<h1>Description</h1>
+<p>Set the Btree key comparison function. The comparison function is
+called when it is necessary to compare a key specified by the
+application with a key currently stored in the tree. The first argument
+to the comparison function is the <a href="../api_java/dbt_class.html">Dbt</a> representing the
+application supplied key, the second is the current tree's key.
+<p>The comparison function must return an integer value less than, equal
+to, or greater than zero if the first key argument is considered to be
+respectively less than, equal to, or greater than the second key
+argument. In addition, the comparison function must cause the keys in
+the database to be <i>well-ordered</i>. The comparison function
+must correctly handle any key values used by the application (possibly
+including zero-length keys). In addition, when Btree key prefix
+comparison is being performed (see <a href="../api_java/db_set_bt_prefix.html">Db.set_bt_prefix</a> for more
+information), the comparison routine may be passed a prefix of any
+database key. The <b>data</b> and <b>size</b> fields of the
+<a href="../api_java/dbt_class.html">Dbt</a> are the only fields that may be used for the purposes of
+this comparison.
+<p>If no comparison function is specified, the keys are compared lexically,
+with shorter keys collating before longer keys. The same comparison
+method must be used each time a particular Btree is opened.
+<p>The Db.set_bt_compare interface may only be used to configure Berkeley DB before
+the <a href="../api_java/db_open.html">Db.open</a> interface is called.
+<p>The Db.set_bt_compare method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/db_open.html">Db.open</a> was called.
+</dl>
+<h3>Class</h3>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_close.html">Db.close</a>,
+<a href="../api_java/db_cursor.html">Db.cursor</a>,
+<a href="../api_java/db_del.html">Db.del</a>,
+<a href="../api_java/db_fd.html">Db.fd</a>,
+<a href="../api_java/db_get.html">Db.get</a>,
+<a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a>,
+<a href="../api_java/db_get_type.html">Db.get_type</a>,
+<a href="../api_java/db_join.html">Db.join</a>,
+<a href="../api_java/db_key_range.html">Db.key_range</a>,
+<a href="../api_java/db_open.html">Db.open</a>,
+<a href="../api_java/db_put.html">Db.put</a>,
+<a href="../api_java/db_remove.html">Db.remove</a>,
+<a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a>,
+<a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a>,
+<a href="../api_java/db_set_errcall.html">Db.set_errcall</a>,
+<a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a>,
+<a href="../api_java/db_set_flags.html">Db.set_flags</a>,
+<a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a>,
+<a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a>,
+<a href="../api_java/db_set_lorder.html">Db.set_lorder</a>,
+<a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a>,
+<a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a>,
+<a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a>,
+<a href="../api_java/db_set_re_len.html">Db.set_re_len</a>,
+<a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a>,
+<a href="../api_java/db_set_re_source.html">Db.set_re_source</a>,
+<a href="../api_java/db_stat.html">Db.stat</a>,
+<a href="../api_java/db_sync.html">Db.sync</a>,
+<a href="../api_java/db_upgrade.html">Db.upgrade</a>
+and
+<a href="../api_java/db_verify.html">Db.verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/db_set_bt_minkey.html b/bdb/docs/api_java/db_set_bt_minkey.html
new file mode 100644
index 00000000000..dc7c1745123
--- /dev/null
+++ b/bdb/docs/api_java/db_set_bt_minkey.html
@@ -0,0 +1,85 @@
+<!--$Id: db_set_bt_minkey.so,v 10.14 2000/05/01 21:57:43 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_bt_minkey</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db.set_bt_minkey</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int set_bt_minkey(int bt_minkey)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the minimum number of keys that will be stored on any single
+Btree page.
+<p>This value is used to determine which keys will be stored on overflow
+pages, i.e. if a key or data item is larger than the underlying database
+page size divided by the <b>bt_minkey</b> value, it will be stored on
+overflow pages instead of within the page itself. The <b>bt_minkey</b>
+value specified must be at least 2; if <b>bt_minkey</b> is not explicitly
+set, a value of 2 is used.
+<p>The Db.set_bt_minkey interface may only be used to configure Berkeley DB before
+the <a href="../api_java/db_open.html">Db.open</a> interface is called.
+<p>The Db.set_bt_minkey method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/db_open.html">Db.open</a> was called.
+</dl>
+<h3>Class</h3>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_close.html">Db.close</a>,
+<a href="../api_java/db_cursor.html">Db.cursor</a>,
+<a href="../api_java/db_del.html">Db.del</a>,
+<a href="../api_java/db_fd.html">Db.fd</a>,
+<a href="../api_java/db_get.html">Db.get</a>,
+<a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a>,
+<a href="../api_java/db_get_type.html">Db.get_type</a>,
+<a href="../api_java/db_join.html">Db.join</a>,
+<a href="../api_java/db_key_range.html">Db.key_range</a>,
+<a href="../api_java/db_open.html">Db.open</a>,
+<a href="../api_java/db_put.html">Db.put</a>,
+<a href="../api_java/db_remove.html">Db.remove</a>,
+<a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a>,
+<a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a>,
+<a href="../api_java/db_set_errcall.html">Db.set_errcall</a>,
+<a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a>,
+<a href="../api_java/db_set_flags.html">Db.set_flags</a>,
+<a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a>,
+<a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a>,
+<a href="../api_java/db_set_lorder.html">Db.set_lorder</a>,
+<a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a>,
+<a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a>,
+<a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a>,
+<a href="../api_java/db_set_re_len.html">Db.set_re_len</a>,
+<a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a>,
+<a href="../api_java/db_set_re_source.html">Db.set_re_source</a>,
+<a href="../api_java/db_stat.html">Db.stat</a>,
+<a href="../api_java/db_sync.html">Db.sync</a>,
+<a href="../api_java/db_upgrade.html">Db.upgrade</a>
+and
+<a href="../api_java/db_verify.html">Db.verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/db_set_bt_prefix.html b/bdb/docs/api_java/db_set_bt_prefix.html
new file mode 100644
index 00000000000..a6e823969ca
--- /dev/null
+++ b/bdb/docs/api_java/db_set_bt_prefix.html
@@ -0,0 +1,106 @@
+<!--$Id: db_set_bt_prefix.so,v 10.25 2000/09/08 21:35:26 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_bt_prefix</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db.set_bt_prefix</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public interface DbBtreePrefix
+{
+ public abstract int bt_prefix(Db db, Dbt dbt1, Dbt dbt2);
+}
+public class Db
+{
+ public void set_bt_prefix(DbBtreePrefix bt_prefix)
+ throws DbException;
+ ...
+}
+</pre></h3>
+<h1>Description</h1>
+<p>Set the Btree prefix function. The prefix function must return the
+number of bytes of the second key argument that would be required by
+the Btree key comparison function to determine the second key argument's
+ordering relationship with respect to the first key argument. If the
+two keys are equal, the key length should be returned. The prefix
+function must correctly handle any key values used by the application
+(possibly including zero-length keys). The <b>data</b> and
+<b>size</b> fields of the <a href="../api_java/dbt_class.html">Dbt</a> are the only fields that may be
+used for the purposes of this determination.
+<p>The prefix function is used to determine the amount by which keys stored
+on the Btree internal pages can be safely truncated without losing their
+uniqueness. See the <a href="../ref/am_conf/bt_prefix.html">Btree
+prefix comparison</a> section of the Reference Guide for more details about
+how this works. The usefulness of this is data dependent, but in some
+data sets can produce significantly reduced tree sizes and search times.
+<p>If no prefix function or key comparison function is specified by the
+application, a default lexical comparison function is used as the prefix
+function. If no prefix function is specified and a key comparison
+function is specified, no prefix function is used. It is an error to
+specify a prefix function without also specifying a key comparison
+function.
+<p>The Db.set_bt_prefix interface may only be used to configure Berkeley DB before
+the <a href="../api_java/db_open.html">Db.open</a> interface is called.
+<p>The Db.set_bt_prefix method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/db_open.html">Db.open</a> was called.
+</dl>
+<h3>Class</h3>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_close.html">Db.close</a>,
+<a href="../api_java/db_cursor.html">Db.cursor</a>,
+<a href="../api_java/db_del.html">Db.del</a>,
+<a href="../api_java/db_fd.html">Db.fd</a>,
+<a href="../api_java/db_get.html">Db.get</a>,
+<a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a>,
+<a href="../api_java/db_get_type.html">Db.get_type</a>,
+<a href="../api_java/db_join.html">Db.join</a>,
+<a href="../api_java/db_key_range.html">Db.key_range</a>,
+<a href="../api_java/db_open.html">Db.open</a>,
+<a href="../api_java/db_put.html">Db.put</a>,
+<a href="../api_java/db_remove.html">Db.remove</a>,
+<a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a>,
+<a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a>,
+<a href="../api_java/db_set_errcall.html">Db.set_errcall</a>,
+<a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a>,
+<a href="../api_java/db_set_flags.html">Db.set_flags</a>,
+<a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a>,
+<a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a>,
+<a href="../api_java/db_set_lorder.html">Db.set_lorder</a>,
+<a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a>,
+<a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a>,
+<a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a>,
+<a href="../api_java/db_set_re_len.html">Db.set_re_len</a>,
+<a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a>,
+<a href="../api_java/db_set_re_source.html">Db.set_re_source</a>,
+<a href="../api_java/db_stat.html">Db.stat</a>,
+<a href="../api_java/db_sync.html">Db.sync</a>,
+<a href="../api_java/db_upgrade.html">Db.upgrade</a>
+and
+<a href="../api_java/db_verify.html">Db.verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/db_set_cachesize.html b/bdb/docs/api_java/db_set_cachesize.html
new file mode 100644
index 00000000000..67313aa5dd7
--- /dev/null
+++ b/bdb/docs/api_java/db_set_cachesize.html
@@ -0,0 +1,99 @@
+<!--$Id: db_set_cachesize.so,v 10.17 2000/05/01 21:57:43 bostic Exp $-->
+<!--$Id: m4.cachesize,v 10.7 2000/02/11 18:54:45 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_cachesize</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db.set_cachesize</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int set_cachesize(int gbytes, int bytes, int ncache)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the size of the database's shared memory buffer pool, i.e., the cache,
+to <b>gbytes</b> gigabytes plus <b>bytes</b>. The cache should be the
+size of the normal working data set of the application, with some small
+amount of additional memory for unusual situations. (Note, the working
+set is not the same as the number of simultaneously referenced pages, and
+should be quite a bit larger!)
+<p>The default cache size is 256KB, and may not be specified as less than
+20KB. Any cache size less than 500MB is automatically increased by 25%
+to account for buffer pool overhead, cache sizes larger than 500MB are
+used as specified. For information on tuning the Berkeley DB cache size, see
+<a href="../ref/am_conf/cachesize.html">Selecting a cache size</a>.
+<p>It is possible to specify caches to Berkeley DB that are large enough so that
+they cannot be allocated contiguously on some architectures, e.g., some
+releases of Solaris limit the amount of memory that may be allocated
+contiguously by a process. If <b>ncache</b> is 0 or 1, the cache will
+be allocated contiguously in memory. If it is greater than 1, the cache
+will be broken up into <b>ncache</b> equally sized separate pieces of
+memory.
+<p>As databases opened within Berkeley DB environments use the cache specified to
+the environment, it is an error to attempt to set a cache in a database
+created within an environment.
+<p>The Db.set_cachesize interface may only be used to configure Berkeley DB before
+the <a href="../api_java/db_open.html">Db.open</a> interface is called.
+<p>The Db.set_cachesize method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The specified cache size was impossibly small.
+</dl>
+<h3>Class</h3>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_close.html">Db.close</a>,
+<a href="../api_java/db_cursor.html">Db.cursor</a>,
+<a href="../api_java/db_del.html">Db.del</a>,
+<a href="../api_java/db_fd.html">Db.fd</a>,
+<a href="../api_java/db_get.html">Db.get</a>,
+<a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a>,
+<a href="../api_java/db_get_type.html">Db.get_type</a>,
+<a href="../api_java/db_join.html">Db.join</a>,
+<a href="../api_java/db_key_range.html">Db.key_range</a>,
+<a href="../api_java/db_open.html">Db.open</a>,
+<a href="../api_java/db_put.html">Db.put</a>,
+<a href="../api_java/db_remove.html">Db.remove</a>,
+<a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a>,
+<a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a>,
+<a href="../api_java/db_set_errcall.html">Db.set_errcall</a>,
+<a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a>,
+<a href="../api_java/db_set_flags.html">Db.set_flags</a>,
+<a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a>,
+<a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a>,
+<a href="../api_java/db_set_lorder.html">Db.set_lorder</a>,
+<a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a>,
+<a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a>,
+<a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a>,
+<a href="../api_java/db_set_re_len.html">Db.set_re_len</a>,
+<a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a>,
+<a href="../api_java/db_set_re_source.html">Db.set_re_source</a>,
+<a href="../api_java/db_stat.html">Db.stat</a>,
+<a href="../api_java/db_sync.html">Db.sync</a>,
+<a href="../api_java/db_upgrade.html">Db.upgrade</a>
+and
+<a href="../api_java/db_verify.html">Db.verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/db_set_dup_compare.html b/bdb/docs/api_java/db_set_dup_compare.html
new file mode 100644
index 00000000000..ea12dda35bc
--- /dev/null
+++ b/bdb/docs/api_java/db_set_dup_compare.html
@@ -0,0 +1,102 @@
+<!--$Id: db_set_dup_compare.so,v 10.21 2000/10/26 15:20:40 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_dup_compare</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db.set_dup_compare</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public interface DbDupCompare
+{
+ public abstract int dup_compare(Db db, Dbt dbt1, Dbt dbt2);
+}
+public class Db
+{
+ public void set_dup_compare(DbDupCompare dup_compare)
+ throws DbException;
+ ...
+}
+</pre></h3>
+<h1>Description</h1>
+<p>Set the duplicate data item comparison function. The comparison function
+is called when it is necessary to compare a data item specified by the
+application with a data item currently stored in the tree. The first
+argument to the comparison function is the <a href="../api_java/dbt_class.html">Dbt</a> representing the
+application's data item, the second is the current tree's data item.
+<p>The comparison function must return an integer value less than, equal
+to, or greater than zero if the first data item argument is considered
+to be respectively less than, equal to, or greater than the second data
+item argument. In addition, the comparison function must cause the data
+items in the set to be <i>well-ordered</i>. The comparison function
+must correctly handle any data item values used by the application
+(possibly including zero-length data items). The <b>data</b> and
+<b>size</b> fields of the <a href="../api_java/dbt_class.html">Dbt</a> are the only fields that may be
+used for the purposes of this comparison.
+<p>If no comparison function is specified, the data items are compared
+lexically, with shorter data items collating before longer data items.
+The same duplicate data item comparison method must be used each time
+a particular Btree is opened.
+<p>The Db.set_dup_compare interface may only be used to configure Berkeley DB before
+the <a href="../api_java/db_open.html">Db.open</a> interface is called.
+<p>The Db.set_dup_compare method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h3>Class</h3>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_close.html">Db.close</a>,
+<a href="../api_java/db_cursor.html">Db.cursor</a>,
+<a href="../api_java/db_del.html">Db.del</a>,
+<a href="../api_java/db_fd.html">Db.fd</a>,
+<a href="../api_java/db_get.html">Db.get</a>,
+<a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a>,
+<a href="../api_java/db_get_type.html">Db.get_type</a>,
+<a href="../api_java/db_join.html">Db.join</a>,
+<a href="../api_java/db_key_range.html">Db.key_range</a>,
+<a href="../api_java/db_open.html">Db.open</a>,
+<a href="../api_java/db_put.html">Db.put</a>,
+<a href="../api_java/db_remove.html">Db.remove</a>,
+<a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a>,
+<a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a>,
+<a href="../api_java/db_set_errcall.html">Db.set_errcall</a>,
+<a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a>,
+<a href="../api_java/db_set_flags.html">Db.set_flags</a>,
+<a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a>,
+<a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a>,
+<a href="../api_java/db_set_lorder.html">Db.set_lorder</a>,
+<a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a>,
+<a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a>,
+<a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a>,
+<a href="../api_java/db_set_re_len.html">Db.set_re_len</a>,
+<a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a>,
+<a href="../api_java/db_set_re_source.html">Db.set_re_source</a>,
+<a href="../api_java/db_stat.html">Db.stat</a>,
+<a href="../api_java/db_sync.html">Db.sync</a>,
+<a href="../api_java/db_upgrade.html">Db.upgrade</a>
+and
+<a href="../api_java/db_verify.html">Db.verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/db_set_errcall.html b/bdb/docs/api_java/db_set_errcall.html
new file mode 100644
index 00000000000..62f39f6b3ff
--- /dev/null
+++ b/bdb/docs/api_java/db_set_errcall.html
@@ -0,0 +1,81 @@
+<!--$Id: db_set_errcall.so,v 10.7 1999/12/20 08:52:28 bostic Exp $-->
+<!--$Id: m4.errset,v 10.8 2000/02/19 20:57:57 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_errcall</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db.set_errcall</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public interface DbErrcall
+{
+ public abstract void errcall(String errpfx, String msg);
+}
+public class Db
+{
+ public void set_errcall(DbErrcall errcall);
+ ...
+}
+</pre></h3>
+<h1>Description</h1>
+When an error occurs in the Berkeley DB library, an exception is thrown. In
+some cases, however, the <b>errno</b> value may be insufficient to
+completely describe the cause of the error, especially during initial
+application debugging.
+<p>The Db.set_errcall method is used to enhance the mechanism for reporting error
+messages to the application. The <a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a> method must be
+called with a single object argument. The object's class must implement
+the DbErrcall interface. In some cases, when an error occurs, Berkeley DB will
+invoke the object's errcall() method with two arguments; the first is the
+prefix string (as previously set by <a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a> or
+<a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a>), the second will be an error message string.
+It is up to this method to display the message in an appropriate manner.
+<p>Alternatively, you can use the <a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a> method to display
+the additional information via an output stream. You should not mix these
+approaches.
+<p>This error logging enhancement does not slow performance or significantly
+increase application size, and may be run during normal operation as well
+as during application debugging.
+<p>For <a href="../api_java/db_class.html">Db</a> handles opened inside of Berkeley DB environments, calling the
+Db.set_errcall method affects the entire environment and is equivalent to calling
+the <a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a> method.
+<p>The Db.set_errcall interface may be used to configure Berkeley DB at any time
+during the life of the application.
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_close.html">DbEnv.close</a>,
+<a href="../api_java/env_open.html">DbEnv.open</a>,
+<a href="../api_java/env_remove.html">DbEnv.remove</a>,
+<a href="../api_java/env_strerror.html">DbEnv.strerror</a>,
+<a href="../api_java/env_version.html">DbEnv.get_version_string</a>
+<a href="../api_java/env_set_cachesize.html">DbEnv.set_cachesize</a>,
+<a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a>,
+<a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a>,
+<a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a>,
+<a href="../api_java/env_set_flags.html">DbEnv.set_flags</a>,
+<a href="../api_java/env_set_mutexlocks.html">DbEnv.set_mutexlocks</a>,
+and
+<a href="../api_java/env_set_verbose.html">DbEnv.set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/db_set_errpfx.html b/bdb/docs/api_java/db_set_errpfx.html
new file mode 100644
index 00000000000..36db5bd8af4
--- /dev/null
+++ b/bdb/docs/api_java/db_set_errpfx.html
@@ -0,0 +1,55 @@
+<!--$Id: db_set_errpfx.so,v 10.6 1999/12/20 08:52:28 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_errpfx</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db.set_errpfx</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_errpfx(String errpfx);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the prefix string that appears before error messages issued by Berkeley DB.
+<p>For <a href="../api_java/db_class.html">Db</a> handles opened inside of Berkeley DB environments, calling the
+Db.set_errpfx method affects the entire environment and is equivalent to calling
+the <a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a> method.
+<p>The Db.set_errpfx interface may be used to configure Berkeley DB at any time
+during the life of the application.
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_close.html">DbEnv.close</a>,
+<a href="../api_java/env_open.html">DbEnv.open</a>,
+<a href="../api_java/env_remove.html">DbEnv.remove</a>,
+<a href="../api_java/env_strerror.html">DbEnv.strerror</a>,
+<a href="../api_java/env_version.html">DbEnv.get_version_string</a>
+<a href="../api_java/env_set_cachesize.html">DbEnv.set_cachesize</a>,
+<a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a>,
+<a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a>,
+<a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a>,
+<a href="../api_java/env_set_flags.html">DbEnv.set_flags</a>,
+<a href="../api_java/env_set_mutexlocks.html">DbEnv.set_mutexlocks</a>,
+and
+<a href="../api_java/env_set_verbose.html">DbEnv.set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/db_set_feedback.html b/bdb/docs/api_java/db_set_feedback.html
new file mode 100644
index 00000000000..b6dc64fc220
--- /dev/null
+++ b/bdb/docs/api_java/db_set_feedback.html
@@ -0,0 +1,95 @@
+<!--$Id: db_set_feedback.so,v 10.16 2000/07/09 19:11:54 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_feedback</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db.set_feedback</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public interface DbFeedback
+{
+ public abstract void db_feedback(Db db, int opcode, int pct);
+}
+public class Db
+{
+ public void set_feedback(DbFeedback db_feedback)
+ throws DbException;
+ ...
+}
+</pre></h3>
+<h1>Description</h1>
+<p>Some operations performed by the Berkeley DB library can take non-trivial
+amounts of time. The Db.set_feedback method can be used by
+applications to monitor progress within these operations.
+<p>When an operation is likely to take a long time, Berkeley DB will call the
+specified callback method. This method must be declared with
+three arguments: the first will be a reference to the enclosing database
+handle, the second a flag value, and the third the percent of the
+operation that has been completed, specified as an integer value between
+0 and 100. It is up to the callback method to display this
+information in an appropriate manner.
+<p>The <b>opcode</b> argument may take on any of the following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_UPGRADE">Db.DB_UPGRADE</a><dd>The underlying database is being upgraded.
+<p><dt><a name="Db.DB_VERIFY">Db.DB_VERIFY</a><dd>The underlying database is being verified.
+</dl>
+<p>The Db.set_feedback interface may be used to configure Berkeley DB at any time
+during the life of the application.
+<p>The Db.set_feedback method throws an exception that encapsulates a non-zero error value on
+failure.
+<h3>Class</h3>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_close.html">Db.close</a>,
+<a href="../api_java/db_cursor.html">Db.cursor</a>,
+<a href="../api_java/db_del.html">Db.del</a>,
+<a href="../api_java/db_fd.html">Db.fd</a>,
+<a href="../api_java/db_get.html">Db.get</a>,
+<a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a>,
+<a href="../api_java/db_get_type.html">Db.get_type</a>,
+<a href="../api_java/db_join.html">Db.join</a>,
+<a href="../api_java/db_key_range.html">Db.key_range</a>,
+<a href="../api_java/db_open.html">Db.open</a>,
+<a href="../api_java/db_put.html">Db.put</a>,
+<a href="../api_java/db_remove.html">Db.remove</a>,
+<a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a>,
+<a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a>,
+<a href="../api_java/db_set_errcall.html">Db.set_errcall</a>,
+<a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a>,
+<a href="../api_java/db_set_flags.html">Db.set_flags</a>,
+<a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a>,
+<a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a>,
+<a href="../api_java/db_set_lorder.html">Db.set_lorder</a>,
+<a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a>,
+<a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a>,
+<a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a>,
+<a href="../api_java/db_set_re_len.html">Db.set_re_len</a>,
+<a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a>,
+<a href="../api_java/db_set_re_source.html">Db.set_re_source</a>,
+<a href="../api_java/db_stat.html">Db.stat</a>,
+<a href="../api_java/db_sync.html">Db.sync</a>,
+<a href="../api_java/db_upgrade.html">Db.upgrade</a>
+and
+<a href="../api_java/db_verify.html">Db.verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/db_set_flags.html b/bdb/docs/api_java/db_set_flags.html
new file mode 100644
index 00000000000..2a79213ea45
--- /dev/null
+++ b/bdb/docs/api_java/db_set_flags.html
@@ -0,0 +1,170 @@
+<!--$Id: db_set_flags.so,v 10.26 2000/03/17 01:53:58 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_flags</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db.set_flags</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_flags(int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Calling Db.set_flags is additive, there is no way to clear flags.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or more
+of the following values.
+<h3>Btree</h3>
+<p>The following flags may be specified for the Btree access method:
+<p><dl compact>
+<p><dt><a name="Db.DB_DUP">Db.DB_DUP</a><dd>Permit duplicate data items in the tree, i.e. insertion when the key of
+the key/data pair being inserted already exists in the tree will be
+successful. The ordering of duplicates in the tree is determined by the
+order of insertion, unless the ordering is otherwise specified by use of
+a cursor operation. It is an error to specify both Db.DB_DUP and
+Db.DB_RECNUM.
+<p><dt><a name="Db.DB_DUPSORT">Db.DB_DUPSORT</a><dd>Permit duplicate data items in the tree, i.e. insertion when the key of
+the key/data pair being inserted already exists in the tree will be
+successful. The ordering of duplicates in the tree is determined by the
+duplicate comparison function.
+A default, lexical comparison will be used.
+It is an error to specify both Db.DB_DUPSORT and Db.DB_RECNUM.
+<p><dt><a name="Db.DB_RECNUM">Db.DB_RECNUM</a><dd>Support retrieval from the Btree using record numbers. For more
+information, see the DB_GET_RECNO flag to the <a href="../api_java/db_get.html">Db.get</a> and
+<a href="../api_java/dbc_get.html">Dbc.get</a> methods.
+<p>Logical record numbers in Btree databases are mutable in the face of
+record insertion or deletion. See the DB_RENUMBER flag in the Recno
+access method information for further discussion.
+<p>Maintaining record counts within a Btree introduces a serious point of
+contention, namely the page locations where the record counts are stored.
+In addition, the entire tree must be locked during both insertions and
+deletions, effectively single-threading the tree for those operations.
+Specifying DB_RECNUM can result in serious performance degradation for
+some applications and data sets.
+<p>It is an error to specify both DB_DUP and DB_RECNUM.
+<p><dt><a name="Db.DB_REVSPLITOFF">Db.DB_REVSPLITOFF</a><dd>Turn off reverse splitting in the Btree. As pages are emptied in a
+database, the Berkeley DB Btree implementation attempts to coalesce empty pages
+into higher-level pages in order to keep the tree as small as possible
+and minimize tree search time. This can hurt performance in applications
+with cyclical data demands, that is, applications where the database grows
+and shrinks repeatedly. For example, because Berkeley DB does page-level
+locking, the maximum level of concurrency in a database of 2 pages is far
+smaller than that in a database of 100 pages, and so a database that has
+shrunk to a minimal size can cause severe deadlocking when a new cycle of
+data insertion begins.
+</dl>
+<h3>Hash</h3>
+<p>The following flags may be specified for the Hash access method:
+<p><dl compact>
+<p><dt><a name="Db.DB_DUP">Db.DB_DUP</a><dd>Permit duplicate data items in the tree, i.e. insertion when the key of
+the key/data pair being inserted already exists in the tree will be
+successful. The ordering of duplicates in the tree is determined by the
+order of insertion, unless the ordering is otherwise specified by use of
+a cursor operation. It is an error to specify both Db.DB_DUP and
+Db.DB_RECNUM.
+<p><dt><a name="Db.DB_DUPSORT">Db.DB_DUPSORT</a><dd>Permit duplicate data items in the tree, i.e. insertion when the key of
+the key/data pair being inserted already exists in the tree will be
+successful. The ordering of duplicates in the tree is determined by the
+duplicate comparison function.
+A default, lexical comparison will be used.
+It is an error to specify both Db.DB_DUPSORT and Db.DB_RECNUM.
+</dl>
+<h3>Queue</h3>
+<p>There are no additional flags that may be specified for the Queue access
+method.
+<h3>Recno</h3>
+<p>The following flags may be specified for the Recno access method:
+<p><dl compact>
+<p><dt><a name="Db.DB_RENUMBER">Db.DB_RENUMBER</a><dd>Specifying the DB_RENUMBER flag causes the logical record numbers to be
+mutable, and change as records are added to and deleted from the database.
+For example, the deletion of record number 4 causes records numbered 5
+and greater to be renumbered downward by 1. If a cursor was positioned
+to record number 4 before the deletion, it will reference the new record
+number 4, if any such record exists, after the deletion. If a cursor was
+positioned after record number 4 before the deletion, it will be shifted
+downward 1 logical record, continuing to reference the same record as it
+did before.
+<p>Using the <a href="../api_java/db_put.html">Db.put</a> or <a href="../api_java/dbc_put.html">Dbc.put</a> interfaces to create new
+records will cause the creation of multiple records if the record number
+is more than one greater than the largest record currently in the
+database. For example, creating record 28, when record 25 was previously
+the last record in the database, will create records 26 and 27 as well as
+28. Attempts to retrieve records that were created in this manner will
+result in an error return of <a href="../ref/program/errorret.html#DB_KEYEMPTY">Db.DB_KEYEMPTY</a>.
+<p>If a created record is not at the end of the database, all records
+following the new record will be automatically renumbered upward by 1.
+For example, the creation of a new record numbered 8 causes records
+numbered 8 and greater to be renumbered upward by 1. If a cursor was
+positioned to record number 8 or greater before the insertion, it will be
+shifted upward 1 logical record, continuing to reference the same record
+as it did before.
+<p>For these reasons, concurrent access to a Recno database with the
+Db.DB_RENUMBER flag specified may be largely meaningless, although
+it is supported.
+<p><dt><a name="Db.DB_SNAPSHOT">Db.DB_SNAPSHOT</a><dd>This flag specifies that any specified <b>re_source</b> file be read in
+its entirety when <a href="../api_java/db_open.html">Db.open</a> is called. If this flag is not
+specified, the <b>re_source</b> file may be read lazily.
+</dl>
+<p>The Db.set_flags interface may only be used to configure Berkeley DB before
+the <a href="../api_java/db_open.html">Db.open</a> interface is called.
+<p>The Db.set_flags method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h3>Class</h3>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_close.html">Db.close</a>,
+<a href="../api_java/db_cursor.html">Db.cursor</a>,
+<a href="../api_java/db_del.html">Db.del</a>,
+<a href="../api_java/db_fd.html">Db.fd</a>,
+<a href="../api_java/db_get.html">Db.get</a>,
+<a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a>,
+<a href="../api_java/db_get_type.html">Db.get_type</a>,
+<a href="../api_java/db_join.html">Db.join</a>,
+<a href="../api_java/db_key_range.html">Db.key_range</a>,
+<a href="../api_java/db_open.html">Db.open</a>,
+<a href="../api_java/db_put.html">Db.put</a>,
+<a href="../api_java/db_remove.html">Db.remove</a>,
+<a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a>,
+<a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a>,
+<a href="../api_java/db_set_errcall.html">Db.set_errcall</a>,
+<a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a>,
+<a href="../api_java/db_set_flags.html">Db.set_flags</a>,
+<a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a>,
+<a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a>,
+<a href="../api_java/db_set_lorder.html">Db.set_lorder</a>,
+<a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a>,
+<a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a>,
+<a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a>,
+<a href="../api_java/db_set_re_len.html">Db.set_re_len</a>,
+<a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a>,
+<a href="../api_java/db_set_re_source.html">Db.set_re_source</a>,
+<a href="../api_java/db_stat.html">Db.stat</a>,
+<a href="../api_java/db_sync.html">Db.sync</a>,
+<a href="../api_java/db_upgrade.html">Db.upgrade</a>
+and
+<a href="../api_java/db_verify.html">Db.verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/db_set_h_ffactor.html b/bdb/docs/api_java/db_set_h_ffactor.html
new file mode 100644
index 00000000000..c5d10aab05c
--- /dev/null
+++ b/bdb/docs/api_java/db_set_h_ffactor.html
@@ -0,0 +1,86 @@
+<!--$Id: db_set_h_ffactor.so,v 10.15 2000/05/01 21:57:43 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_h_ffactor</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db.set_h_ffactor</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_h_ffactor(int h_ffactor)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the desired density within the hash table.
+<p>The density is an approximation of the number of keys allowed to
+accumulate in any one bucket, determining when the hash table grows or
+shrinks. If you know the average sizes of the keys and data in your
+dataset, setting the fill factor can enhance performance. A reasonable
+rule computing fill factor is to set it to:
+<p><blockquote><pre>(pagesize - 32) / (average_key_size + average_data_size + 8)</pre></blockquote>
+<p>If no value is specified, the fill factor will be selected dynamically as
+pages are filled.
+<p>The Db.set_h_ffactor interface may only be used to configure Berkeley DB before
+the <a href="../api_java/db_open.html">Db.open</a> interface is called.
+<p>The Db.set_h_ffactor method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/db_open.html">Db.open</a> was called.
+</dl>
+<h3>Class</h3>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_close.html">Db.close</a>,
+<a href="../api_java/db_cursor.html">Db.cursor</a>,
+<a href="../api_java/db_del.html">Db.del</a>,
+<a href="../api_java/db_fd.html">Db.fd</a>,
+<a href="../api_java/db_get.html">Db.get</a>,
+<a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a>,
+<a href="../api_java/db_get_type.html">Db.get_type</a>,
+<a href="../api_java/db_join.html">Db.join</a>,
+<a href="../api_java/db_key_range.html">Db.key_range</a>,
+<a href="../api_java/db_open.html">Db.open</a>,
+<a href="../api_java/db_put.html">Db.put</a>,
+<a href="../api_java/db_remove.html">Db.remove</a>,
+<a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a>,
+<a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a>,
+<a href="../api_java/db_set_errcall.html">Db.set_errcall</a>,
+<a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a>,
+<a href="../api_java/db_set_flags.html">Db.set_flags</a>,
+<a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a>,
+<a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a>,
+<a href="../api_java/db_set_lorder.html">Db.set_lorder</a>,
+<a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a>,
+<a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a>,
+<a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a>,
+<a href="../api_java/db_set_re_len.html">Db.set_re_len</a>,
+<a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a>,
+<a href="../api_java/db_set_re_source.html">Db.set_re_source</a>,
+<a href="../api_java/db_stat.html">Db.stat</a>,
+<a href="../api_java/db_sync.html">Db.sync</a>,
+<a href="../api_java/db_upgrade.html">Db.upgrade</a>
+and
+<a href="../api_java/db_verify.html">Db.verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/db_set_h_hash.html b/bdb/docs/api_java/db_set_h_hash.html
new file mode 100644
index 00000000000..89bccc1fbb9
--- /dev/null
+++ b/bdb/docs/api_java/db_set_h_hash.html
@@ -0,0 +1,97 @@
+<!--$Id: db_set_h_hash.so,v 10.18 2000/07/04 18:28:27 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_h_hash</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db.set_h_hash</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public interface DbHash
+{
+ public abstract int hash(Db db, byte[] data, int len);
+}
+public class Db
+{
+ public void set_h_hash(DbHash h_hash)
+ throws DbException;
+ ...
+}
+</pre></h3>
+<h1>Description</h1>
+<p>Set a user defined hash method; if no hash method is specified, a default
+hash method is used. Since no hash method performs equally well on all
+possible data, the user may find that the built-in hash method performs
+poorly with a particular data set. User specified hash functions must
+take a pointer to a byte string and a length as arguments and return a
+value of type
+<b>int</b>.
+The hash function must handle any key values used by the application
+(possibly including zero-length keys).
+<p>If a hash method is specified, <a href="../api_java/db_open.html">Db.open</a> will attempt to determine
+if the hash method specified is the same as the one with which the database
+was created, and will fail if it detects that it is not.
+<p>The Db.set_h_hash interface may only be used to configure Berkeley DB before
+the <a href="../api_java/db_open.html">Db.open</a> interface is called.
+<p>The Db.set_h_hash method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/db_open.html">Db.open</a> was called.
+</dl>
+<h3>Class</h3>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_close.html">Db.close</a>,
+<a href="../api_java/db_cursor.html">Db.cursor</a>,
+<a href="../api_java/db_del.html">Db.del</a>,
+<a href="../api_java/db_fd.html">Db.fd</a>,
+<a href="../api_java/db_get.html">Db.get</a>,
+<a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a>,
+<a href="../api_java/db_get_type.html">Db.get_type</a>,
+<a href="../api_java/db_join.html">Db.join</a>,
+<a href="../api_java/db_key_range.html">Db.key_range</a>,
+<a href="../api_java/db_open.html">Db.open</a>,
+<a href="../api_java/db_put.html">Db.put</a>,
+<a href="../api_java/db_remove.html">Db.remove</a>,
+<a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a>,
+<a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a>,
+<a href="../api_java/db_set_errcall.html">Db.set_errcall</a>,
+<a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a>,
+<a href="../api_java/db_set_flags.html">Db.set_flags</a>,
+<a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a>,
+<a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a>,
+<a href="../api_java/db_set_lorder.html">Db.set_lorder</a>,
+<a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a>,
+<a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a>,
+<a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a>,
+<a href="../api_java/db_set_re_len.html">Db.set_re_len</a>,
+<a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a>,
+<a href="../api_java/db_set_re_source.html">Db.set_re_source</a>,
+<a href="../api_java/db_stat.html">Db.stat</a>,
+<a href="../api_java/db_sync.html">Db.sync</a>,
+<a href="../api_java/db_upgrade.html">Db.upgrade</a>
+and
+<a href="../api_java/db_verify.html">Db.verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/db_set_h_nelem.html b/bdb/docs/api_java/db_set_h_nelem.html
new file mode 100644
index 00000000000..279e109abf7
--- /dev/null
+++ b/bdb/docs/api_java/db_set_h_nelem.html
@@ -0,0 +1,81 @@
+<!--$Id: db_set_h_nelem.so,v 10.15 2000/05/01 21:57:43 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_h_nelem</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db.set_h_nelem</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_h_nelem(int h_nelem)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set an estimate of the final size of the hash table.
+<p>If not set or set too low, hash tables will still expand gracefully
+as keys are entered, although a slight performance degradation may be
+noticed.
+<p>The Db.set_h_nelem interface may only be used to configure Berkeley DB before
+the <a href="../api_java/db_open.html">Db.open</a> interface is called.
+<p>The Db.set_h_nelem method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/db_open.html">Db.open</a> was called.
+</dl>
+<h3>Class</h3>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_close.html">Db.close</a>,
+<a href="../api_java/db_cursor.html">Db.cursor</a>,
+<a href="../api_java/db_del.html">Db.del</a>,
+<a href="../api_java/db_fd.html">Db.fd</a>,
+<a href="../api_java/db_get.html">Db.get</a>,
+<a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a>,
+<a href="../api_java/db_get_type.html">Db.get_type</a>,
+<a href="../api_java/db_join.html">Db.join</a>,
+<a href="../api_java/db_key_range.html">Db.key_range</a>,
+<a href="../api_java/db_open.html">Db.open</a>,
+<a href="../api_java/db_put.html">Db.put</a>,
+<a href="../api_java/db_remove.html">Db.remove</a>,
+<a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a>,
+<a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a>,
+<a href="../api_java/db_set_errcall.html">Db.set_errcall</a>,
+<a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a>,
+<a href="../api_java/db_set_flags.html">Db.set_flags</a>,
+<a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a>,
+<a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a>,
+<a href="../api_java/db_set_lorder.html">Db.set_lorder</a>,
+<a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a>,
+<a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a>,
+<a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a>,
+<a href="../api_java/db_set_re_len.html">Db.set_re_len</a>,
+<a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a>,
+<a href="../api_java/db_set_re_source.html">Db.set_re_source</a>,
+<a href="../api_java/db_stat.html">Db.stat</a>,
+<a href="../api_java/db_sync.html">Db.sync</a>,
+<a href="../api_java/db_upgrade.html">Db.upgrade</a>
+and
+<a href="../api_java/db_verify.html">Db.verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/db_set_lorder.html b/bdb/docs/api_java/db_set_lorder.html
new file mode 100644
index 00000000000..9f6ce37d996
--- /dev/null
+++ b/bdb/docs/api_java/db_set_lorder.html
@@ -0,0 +1,87 @@
+<!--$Id: db_set_lorder.so,v 10.15 2000/05/01 21:57:43 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_lorder</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db.set_lorder</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_lorder(int lorder)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the byte order for integers in the stored database metadata. The
+number should represent the order as an integer, for example, big endian
+order is the number 4,321, and little endian order is the number 1,234.
+If <b>lorder</b> is not explicitly set, the host order of the machine
+where the Berkeley DB library was compiled is used.
+<p>The value of <b>lorder</b> is ignored except when databases are being
+created. If a database already exists, the byte order it uses is
+determined when the database is opened.
+<p><b>The access methods provide no guarantees about the byte ordering of the
+application data stored in the database, and applications are responsible
+for maintaining any necessary ordering.</b>
+<p>The Db.set_lorder interface may only be used to configure Berkeley DB before
+the <a href="../api_java/db_open.html">Db.open</a> interface is called.
+<p>The Db.set_lorder method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h3>Class</h3>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_close.html">Db.close</a>,
+<a href="../api_java/db_cursor.html">Db.cursor</a>,
+<a href="../api_java/db_del.html">Db.del</a>,
+<a href="../api_java/db_fd.html">Db.fd</a>,
+<a href="../api_java/db_get.html">Db.get</a>,
+<a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a>,
+<a href="../api_java/db_get_type.html">Db.get_type</a>,
+<a href="../api_java/db_join.html">Db.join</a>,
+<a href="../api_java/db_key_range.html">Db.key_range</a>,
+<a href="../api_java/db_open.html">Db.open</a>,
+<a href="../api_java/db_put.html">Db.put</a>,
+<a href="../api_java/db_remove.html">Db.remove</a>,
+<a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a>,
+<a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a>,
+<a href="../api_java/db_set_errcall.html">Db.set_errcall</a>,
+<a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a>,
+<a href="../api_java/db_set_flags.html">Db.set_flags</a>,
+<a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a>,
+<a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a>,
+<a href="../api_java/db_set_lorder.html">Db.set_lorder</a>,
+<a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a>,
+<a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a>,
+<a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a>,
+<a href="../api_java/db_set_re_len.html">Db.set_re_len</a>,
+<a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a>,
+<a href="../api_java/db_set_re_source.html">Db.set_re_source</a>,
+<a href="../api_java/db_stat.html">Db.stat</a>,
+<a href="../api_java/db_sync.html">Db.sync</a>,
+<a href="../api_java/db_upgrade.html">Db.upgrade</a>
+and
+<a href="../api_java/db_verify.html">Db.verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/db_set_pagesize.html b/bdb/docs/api_java/db_set_pagesize.html
new file mode 100644
index 00000000000..23c2462a0c0
--- /dev/null
+++ b/bdb/docs/api_java/db_set_pagesize.html
@@ -0,0 +1,83 @@
+<!--$Id: db_set_pagesize.so,v 10.16 2000/05/01 21:57:43 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_pagesize</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db.set_pagesize</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_pagesize(long pagesize)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the size of the pages used to hold items in the database, in bytes.
+The minimum page size is 512 bytes and the maximum page size is 64K bytes.
+If the page size is not explicitly set, one is selected based on the
+underlying filesystem I/O block size. The automatically selected size
+has a lower limit of 512 bytes and an upper limit of 16K bytes.
+<p>For information on tuning the Berkeley DB page size, see
+<a href="../ref/am_conf/pagesize.html">Selecting a page size</a>.
+<p>The Db.set_pagesize interface may only be used to configure Berkeley DB before
+the <a href="../api_java/db_open.html">Db.open</a> interface is called.
+<p>The Db.set_pagesize method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h3>Class</h3>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_close.html">Db.close</a>,
+<a href="../api_java/db_cursor.html">Db.cursor</a>,
+<a href="../api_java/db_del.html">Db.del</a>,
+<a href="../api_java/db_fd.html">Db.fd</a>,
+<a href="../api_java/db_get.html">Db.get</a>,
+<a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a>,
+<a href="../api_java/db_get_type.html">Db.get_type</a>,
+<a href="../api_java/db_join.html">Db.join</a>,
+<a href="../api_java/db_key_range.html">Db.key_range</a>,
+<a href="../api_java/db_open.html">Db.open</a>,
+<a href="../api_java/db_put.html">Db.put</a>,
+<a href="../api_java/db_remove.html">Db.remove</a>,
+<a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a>,
+<a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a>,
+<a href="../api_java/db_set_errcall.html">Db.set_errcall</a>,
+<a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a>,
+<a href="../api_java/db_set_flags.html">Db.set_flags</a>,
+<a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a>,
+<a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a>,
+<a href="../api_java/db_set_lorder.html">Db.set_lorder</a>,
+<a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a>,
+<a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a>,
+<a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a>,
+<a href="../api_java/db_set_re_len.html">Db.set_re_len</a>,
+<a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a>,
+<a href="../api_java/db_set_re_source.html">Db.set_re_source</a>,
+<a href="../api_java/db_stat.html">Db.stat</a>,
+<a href="../api_java/db_sync.html">Db.sync</a>,
+<a href="../api_java/db_upgrade.html">Db.upgrade</a>
+and
+<a href="../api_java/db_verify.html">Db.verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/db_set_q_extentsize.html b/bdb/docs/api_java/db_set_q_extentsize.html
new file mode 100644
index 00000000000..081c5b76c75
--- /dev/null
+++ b/bdb/docs/api_java/db_set_q_extentsize.html
@@ -0,0 +1,83 @@
+<!--$Id: db_set_q_extentsize.so,v 1.3 2000/11/21 19:25:45 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_q_extentsize</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db.set_q_extentsize</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_q_extentsize(int extentsize)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the size of the extents used to hold pages in a Queue database,
+specified as a number of pages. Each extent is created as a separate
+physical file. If no extent size is set, the default behavior is to
+create only a single underlying database file.
+<p>For information on tuning the extent size, see
+<a href="../ref/am_conf/extentsize.html">Selecting a extent size</a>.
+<p>The Db.set_q_extentsize interface may only be used to configure Berkeley DB before
+the <a href="../api_java/db_open.html">Db.open</a> interface is called.
+<p>The Db.set_q_extentsize method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/db_open.html">Db.open</a> was called.
+</dl>
+<h3>Class</h3>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_close.html">Db.close</a>,
+<a href="../api_java/db_cursor.html">Db.cursor</a>,
+<a href="../api_java/db_del.html">Db.del</a>,
+<a href="../api_java/db_fd.html">Db.fd</a>,
+<a href="../api_java/db_get.html">Db.get</a>,
+<a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a>,
+<a href="../api_java/db_get_type.html">Db.get_type</a>,
+<a href="../api_java/db_join.html">Db.join</a>,
+<a href="../api_java/db_key_range.html">Db.key_range</a>,
+<a href="../api_java/db_open.html">Db.open</a>,
+<a href="../api_java/db_put.html">Db.put</a>,
+<a href="../api_java/db_remove.html">Db.remove</a>,
+<a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a>,
+<a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a>,
+<a href="../api_java/db_set_errcall.html">Db.set_errcall</a>,
+<a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a>,
+<a href="../api_java/db_set_flags.html">Db.set_flags</a>,
+<a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a>,
+<a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a>,
+<a href="../api_java/db_set_lorder.html">Db.set_lorder</a>,
+<a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a>,
+<a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a>,
+<a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a>,
+<a href="../api_java/db_set_re_len.html">Db.set_re_len</a>,
+<a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a>,
+<a href="../api_java/db_set_re_source.html">Db.set_re_source</a>,
+<a href="../api_java/db_stat.html">Db.stat</a>,
+<a href="../api_java/db_sync.html">Db.sync</a>,
+<a href="../api_java/db_upgrade.html">Db.upgrade</a>
+and
+<a href="../api_java/db_verify.html">Db.verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/db_set_re_delim.html b/bdb/docs/api_java/db_set_re_delim.html
new file mode 100644
index 00000000000..dfe6bb848de
--- /dev/null
+++ b/bdb/docs/api_java/db_set_re_delim.html
@@ -0,0 +1,83 @@
+<!--$Id: db_set_re_delim.so,v 10.17 2000/05/01 21:57:43 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_re_delim</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db.set_re_delim</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_re_delim(int re_delim)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the delimiting byte used to mark the end of a record in the backing
+source file for the Recno access method.
+<p>This byte is used for variable length records, if the <b>re_source</b>
+file is specified. If the <b>re_source</b> file is specified and no
+delimiting byte was specified, &lt;newline&gt; characters (i.e.
+ASCII 0x0a) are interpreted as end-of-record markers.
+<p>The Db.set_re_delim interface may only be used to configure Berkeley DB before
+the <a href="../api_java/db_open.html">Db.open</a> interface is called.
+<p>The Db.set_re_delim method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/db_open.html">Db.open</a> was called.
+</dl>
+<h3>Class</h3>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_close.html">Db.close</a>,
+<a href="../api_java/db_cursor.html">Db.cursor</a>,
+<a href="../api_java/db_del.html">Db.del</a>,
+<a href="../api_java/db_fd.html">Db.fd</a>,
+<a href="../api_java/db_get.html">Db.get</a>,
+<a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a>,
+<a href="../api_java/db_get_type.html">Db.get_type</a>,
+<a href="../api_java/db_join.html">Db.join</a>,
+<a href="../api_java/db_key_range.html">Db.key_range</a>,
+<a href="../api_java/db_open.html">Db.open</a>,
+<a href="../api_java/db_put.html">Db.put</a>,
+<a href="../api_java/db_remove.html">Db.remove</a>,
+<a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a>,
+<a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a>,
+<a href="../api_java/db_set_errcall.html">Db.set_errcall</a>,
+<a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a>,
+<a href="../api_java/db_set_flags.html">Db.set_flags</a>,
+<a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a>,
+<a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a>,
+<a href="../api_java/db_set_lorder.html">Db.set_lorder</a>,
+<a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a>,
+<a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a>,
+<a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a>,
+<a href="../api_java/db_set_re_len.html">Db.set_re_len</a>,
+<a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a>,
+<a href="../api_java/db_set_re_source.html">Db.set_re_source</a>,
+<a href="../api_java/db_stat.html">Db.stat</a>,
+<a href="../api_java/db_sync.html">Db.sync</a>,
+<a href="../api_java/db_upgrade.html">Db.upgrade</a>
+and
+<a href="../api_java/db_verify.html">Db.verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/db_set_re_len.html b/bdb/docs/api_java/db_set_re_len.html
new file mode 100644
index 00000000000..34fa523b09a
--- /dev/null
+++ b/bdb/docs/api_java/db_set_re_len.html
@@ -0,0 +1,87 @@
+<!--$Id: db_set_re_len.so,v 10.17 2000/05/01 21:57:43 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_re_len</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db.set_re_len</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_re_len(int re_len)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>For the Queue access method, specify that the records are of length
+<b>re_len</b>.
+<p>For the Recno access method, specify that the records are fixed-length,
+not byte delimited, and are of length <b>re_len</b>.
+<p>Any records added to the database that are less than <b>re_len</b> bytes
+long are automatically padded (see <a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a> for more
+information).
+<p>Any attempt to insert records into the database that are greater than
+<b>re_len</b> bytes long will cause the call to fail immediately and
+return an error.
+<p>The Db.set_re_len interface may only be used to configure Berkeley DB before
+the <a href="../api_java/db_open.html">Db.open</a> interface is called.
+<p>The Db.set_re_len method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/db_open.html">Db.open</a> was called.
+</dl>
+<h3>Class</h3>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_close.html">Db.close</a>,
+<a href="../api_java/db_cursor.html">Db.cursor</a>,
+<a href="../api_java/db_del.html">Db.del</a>,
+<a href="../api_java/db_fd.html">Db.fd</a>,
+<a href="../api_java/db_get.html">Db.get</a>,
+<a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a>,
+<a href="../api_java/db_get_type.html">Db.get_type</a>,
+<a href="../api_java/db_join.html">Db.join</a>,
+<a href="../api_java/db_key_range.html">Db.key_range</a>,
+<a href="../api_java/db_open.html">Db.open</a>,
+<a href="../api_java/db_put.html">Db.put</a>,
+<a href="../api_java/db_remove.html">Db.remove</a>,
+<a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a>,
+<a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a>,
+<a href="../api_java/db_set_errcall.html">Db.set_errcall</a>,
+<a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a>,
+<a href="../api_java/db_set_flags.html">Db.set_flags</a>,
+<a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a>,
+<a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a>,
+<a href="../api_java/db_set_lorder.html">Db.set_lorder</a>,
+<a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a>,
+<a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a>,
+<a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a>,
+<a href="../api_java/db_set_re_len.html">Db.set_re_len</a>,
+<a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a>,
+<a href="../api_java/db_set_re_source.html">Db.set_re_source</a>,
+<a href="../api_java/db_stat.html">Db.stat</a>,
+<a href="../api_java/db_sync.html">Db.sync</a>,
+<a href="../api_java/db_upgrade.html">Db.upgrade</a>
+and
+<a href="../api_java/db_verify.html">Db.verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/db_set_re_pad.html b/bdb/docs/api_java/db_set_re_pad.html
new file mode 100644
index 00000000000..118130c54b3
--- /dev/null
+++ b/bdb/docs/api_java/db_set_re_pad.html
@@ -0,0 +1,81 @@
+<!--$Id: db_set_re_pad.so,v 10.16 2000/05/01 21:57:43 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_re_pad</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db.set_re_pad</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_re_pad(int re_pad)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the padding character for short, fixed-length records for the Queue
+and Recno access methods.
+<p>If no pad character is specified, &lt;space&gt; characters (i.e.,
+ASCII 0x20) are used for padding.
+<p>The Db.set_re_pad interface may only be used to configure Berkeley DB before
+the <a href="../api_java/db_open.html">Db.open</a> interface is called.
+<p>The Db.set_re_pad method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/db_open.html">Db.open</a> was called.
+</dl>
+<h3>Class</h3>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_close.html">Db.close</a>,
+<a href="../api_java/db_cursor.html">Db.cursor</a>,
+<a href="../api_java/db_del.html">Db.del</a>,
+<a href="../api_java/db_fd.html">Db.fd</a>,
+<a href="../api_java/db_get.html">Db.get</a>,
+<a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a>,
+<a href="../api_java/db_get_type.html">Db.get_type</a>,
+<a href="../api_java/db_join.html">Db.join</a>,
+<a href="../api_java/db_key_range.html">Db.key_range</a>,
+<a href="../api_java/db_open.html">Db.open</a>,
+<a href="../api_java/db_put.html">Db.put</a>,
+<a href="../api_java/db_remove.html">Db.remove</a>,
+<a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a>,
+<a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a>,
+<a href="../api_java/db_set_errcall.html">Db.set_errcall</a>,
+<a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a>,
+<a href="../api_java/db_set_flags.html">Db.set_flags</a>,
+<a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a>,
+<a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a>,
+<a href="../api_java/db_set_lorder.html">Db.set_lorder</a>,
+<a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a>,
+<a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a>,
+<a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a>,
+<a href="../api_java/db_set_re_len.html">Db.set_re_len</a>,
+<a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a>,
+<a href="../api_java/db_set_re_source.html">Db.set_re_source</a>,
+<a href="../api_java/db_stat.html">Db.stat</a>,
+<a href="../api_java/db_sync.html">Db.sync</a>,
+<a href="../api_java/db_upgrade.html">Db.upgrade</a>
+and
+<a href="../api_java/db_verify.html">Db.verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/db_set_re_source.html b/bdb/docs/api_java/db_set_re_source.html
new file mode 100644
index 00000000000..7ff82a20480
--- /dev/null
+++ b/bdb/docs/api_java/db_set_re_source.html
@@ -0,0 +1,123 @@
+<!--$Id: db_set_re_source.so,v 10.17 2000/05/01 21:57:43 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_re_source</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db.set_re_source</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_re_source(String re_source)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the underlying source file for the Recno access method. The purpose
+of the <b>re_source</b> value is to provide fast access and modification
+to databases that are normally stored as flat text files.
+<p>If the <b>re_source</b> field is set, it specifies an underlying flat
+text database file that is read to initialize a transient record number
+index. In the case of variable length records, the records are separated
+as specified by <a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a>. For example, standard UNIX
+byte stream files can be interpreted as a sequence of variable length
+records separated by &lt;newline&gt; characters.
+<p>In addition, when cached data would normally be written back to the
+underlying database file (e.g., the <a href="../api_java/db_close.html">Db.close</a> or <a href="../api_java/db_sync.html">Db.sync</a>
+methods are called), the in-memory copy of the database will be written
+back to the <b>re_source</b> file.
+<p>By default, the backing source file is read lazily, i.e., records are not
+read from the file until they are requested by the application.
+<b>If multiple processes (not threads) are accessing a Recno database
+concurrently and either inserting or deleting records, the backing source
+file must be read in its entirety before more than a single process
+accesses the database, and only that process should specify the backing
+source file as part of the <a href="../api_java/db_open.html">Db.open</a> call. See the <a href="../api_java/db_set_flags.html#DB_SNAPSHOT">Db.DB_SNAPSHOT</a>
+flag for more information.</b>
+<p><b>Reading and writing the backing source file specified by <b>re_source</b>
+cannot be transactionally protected because it involves filesystem
+operations that are not part of the Db transaction methodology.</b>
+For this reason, if a temporary database is used to hold the records,
+i.e., a null was specified as the <b>file</b> argument to <a href="../api_java/db_open.html">Db.open</a>,
+it is possible to lose the contents of the <b>re_source</b> file, e.g.,
+if the system crashes at the right instant.
+If a file is used to hold the database, i.e., a file name was specified
+as the <b>file</b> argument to <a href="../api_java/db_open.html">Db.open</a>, normal database
+recovery on that file can be used to prevent information loss,
+although it is still possible that the contents of <b>re_source</b>
+will be lost if the system crashes.
+<p>The <b>re_source</b> file must already exist (but may be zero-length) when
+<a href="../api_java/db_open.html">Db.open</a> is called.
+<p>It is not an error to specify a read-only <b>re_source</b> file when
+creating a database, nor is it an error to modify the resulting database.
+However, any attempt to write the changes to the backing source file using
+either the <a href="../api_java/db_sync.html">Db.sync</a> or <a href="../api_java/db_close.html">Db.close</a> methods will fail, of course.
+Specify the <a href="../api_java/db_close.html#DB_NOSYNC">Db.DB_NOSYNC</a> flag to the <a href="../api_java/db_close.html">Db.close</a> method to stop it
+from attempting to write the changes to the backing file, instead, they
+will be silently discarded.
+<p>For all of the above reasons, the <b>re_source</b> field is generally
+used to specify databases that are read-only for <a href="../api_java/db_class.html">Db</a> applications,
+and that are either generated on the fly by software tools, or modified
+using a different mechanism, e.g., a text editor.
+<p>The Db.set_re_source interface may only be used to configure Berkeley DB before
+the <a href="../api_java/db_open.html">Db.open</a> interface is called.
+<p>The Db.set_re_source method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/db_open.html">Db.open</a> was called.
+</dl>
+<h3>Class</h3>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_close.html">Db.close</a>,
+<a href="../api_java/db_cursor.html">Db.cursor</a>,
+<a href="../api_java/db_del.html">Db.del</a>,
+<a href="../api_java/db_fd.html">Db.fd</a>,
+<a href="../api_java/db_get.html">Db.get</a>,
+<a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a>,
+<a href="../api_java/db_get_type.html">Db.get_type</a>,
+<a href="../api_java/db_join.html">Db.join</a>,
+<a href="../api_java/db_key_range.html">Db.key_range</a>,
+<a href="../api_java/db_open.html">Db.open</a>,
+<a href="../api_java/db_put.html">Db.put</a>,
+<a href="../api_java/db_remove.html">Db.remove</a>,
+<a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a>,
+<a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a>,
+<a href="../api_java/db_set_errcall.html">Db.set_errcall</a>,
+<a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a>,
+<a href="../api_java/db_set_flags.html">Db.set_flags</a>,
+<a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a>,
+<a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a>,
+<a href="../api_java/db_set_lorder.html">Db.set_lorder</a>,
+<a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a>,
+<a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a>,
+<a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a>,
+<a href="../api_java/db_set_re_len.html">Db.set_re_len</a>,
+<a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a>,
+<a href="../api_java/db_set_re_source.html">Db.set_re_source</a>,
+<a href="../api_java/db_stat.html">Db.stat</a>,
+<a href="../api_java/db_sync.html">Db.sync</a>,
+<a href="../api_java/db_upgrade.html">Db.upgrade</a>
+and
+<a href="../api_java/db_verify.html">Db.verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/db_stat.html b/bdb/docs/api_java/db_stat.html
new file mode 100644
index 00000000000..197ba19138d
--- /dev/null
+++ b/bdb/docs/api_java/db_stat.html
@@ -0,0 +1,185 @@
+<!--$Id: db_stat.so,v 10.37 2000/10/03 21:55:45 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db.stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db.stat</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public Object Db.stat(int flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The Db.stat method creates a statistical structure and
+fills it with statistics for the database.
+<p>Statistical structures are created in allocated memory. If <b>db_malloc</b> is non-NULL, it
+is called to allocate the memory, otherwise, the library function
+<b>malloc</b>(3) is used. The function <b>db_malloc</b> must match
+the calling conventions of the <b>malloc</b>(3) library routine.
+Regardless, the caller is responsible for deallocating the returned
+memory. To deallocate returned memory, free the returned memory
+reference, references inside the returned memory do not need to be
+individually freed.
+<p>The <b>flags</b> parameter must be set to 0 or the following value:
+<p><dl compact>
+<p><dt><a name="Db.DB_CACHED_COUNTS">Db.DB_CACHED_COUNTS</a><dd>Return a cached count of the keys and records in a database. This flag
+makes it possible for applications to request an possibly approximate key
+and record count without incurring the performance penalty of traversing
+the entire database. The statistics information described for the access
+method <b>XX_nkeys</b> and <b>XX_ndata</b> fields below is filled in,
+but no other information is collected. If the cached information has
+never been set, the fields will be returned set to 0.
+<p><dt><a name="Db.DB_RECORDCOUNT">Db.DB_RECORDCOUNT</a><dd>Return a count of the records in a Btree or Recno Access Method database.
+This flag makes it possible for applications to request a record count
+without incurring the performance penalty of traversing the entire
+database. The statistics information described for the <b>bt_nkeys</b>
+field below is filled in, but no other information is collected.
+<p>This option is only available for Recno databases, or Btree databases
+where the underlying database was created with the <a href="../api_java/db_set_flags.html#DB_RECNUM">Db.DB_RECNUM</a>
+flag.
+</dl>
+<p>The Db.stat method may access all of the pages in the database,
+incurring a severe performance penalty as well as possibly flushing the
+underlying buffer pool.
+<p>In the presence of multiple threads or processes accessing an active
+database, the information returned by Db.stat may be out-of-date.
+<p>If the database was not opened readonly and the Db.DB_CACHED_COUNTS
+flag was not specified, the cached key and record numbers will be updated
+after the statistical information has been gathered.
+<p>The Db.stat method cannot be transaction protected. For this reason,
+it should be called in a thread of control that has no open cursors or
+active transactions.
+<p>The Db.stat method throws an exception that encapsulates a non-zero error value on
+failure.
+<h3>Hash Statistics</h3>
+<p>In the case of a Hash database,
+the statistics are returned in an instance of DbHashStat. The data
+fields are available from DbHashStat:
+<p><dl compact>
+<p><dt>public int hash_magic;<dd>Magic number that identifies the file as a Hash file.
+<dt>public int hash_version;<dd>The version of the Hash database.
+<dt>public int hash_nkeys;<dd>The number of unique keys in the database.
+<dt>public int hash_ndata;<dd>The number of key/data pairs in the database.]
+<dt>public int hash_pagesize;<dd>The underlying Hash database page (and bucket) size.
+<dt>public int hash_nelem;<dd>The estimated size of the hash table specified at database creation time.
+<dt>public int hash_ffactor;<dd>The desired fill factor (number of items per bucket) specified at database
+creation time.
+<dt>public int hash_buckets;<dd>The number of hash buckets.
+<dt>public int hash_free;<dd>The number of pages on the free list.
+<dt>public int hash_bfree;<dd>The number of bytes free on bucket pages.
+<dt>public int hash_bigpages;<dd>The number of big key/data pages.
+<dt>public int hash_big_bfree;<dd>The number of bytes free on big item pages.
+<dt>public int hash_overflows;<dd>The number of overflow pages (overflow pages are pages that contain items
+that did not fit in the main bucket page).
+<dt>public int hash_ovfl_free;<dd>The number of bytes free on overflow pages.
+<dt>public int hash_dup;<dd>The number of duplicate pages.
+<dt>public int hash_dup_free;<dd>The number of bytes free on duplicate pages.
+</dl>
+<h3>Btree and Recno Statistics</h3>
+<p>In the case of a Btree or Recno database,
+the statistics are returned in an instance of DbBtreeStat. The data
+fields are available from DbBtreeStat:
+<p><dl compact>
+<p><dt>public int bt_magic;<dd>Magic number that identifies the file as a Btree database.
+<dt>public int bt_version;<dd>The version of the Btree database.
+<dt>public int bt_nkeys;<dd>For the Btree Access Method, the number of unique keys in the database.
+<p>For the Recno Access Method, the number of records in the database.
+<dt>public int bt_ndata;<dd>For the Btree Access Method, the number of key/data pairs in the database.
+<p>For the Recno Access Method, the number of records in the database. If
+the database has been configured to not re-number records during
+deletion, the number of records will only reflect undeleted records.
+<dt>public int bt_pagesize;<dd>Underlying database page size.
+<dt>public int bt_minkey;<dd>The minimum keys per page.
+<dt>public int bt_re_len;<dd>The length of fixed-length records.
+<dt>public int bt_re_pad;<dd>The padding byte value for fixed-length records.
+<dt>public int bt_levels;<dd>Number of levels in the database.
+<dt>public int bt_int_pg;<dd>Number of database internal pages.
+<dt>public int bt_leaf_pg;<dd>Number of database leaf pages.
+<dt>public int bt_dup_pg;<dd>Number of database duplicate pages.
+<dt>public int bt_over_pg;<dd>Number of database overflow pages.
+<dt>public int bt_free;<dd>Number of pages on the free list.
+<dt>public int bt_int_pgfree;<dd>Number of bytes free in database internal pages.
+<dt>public int bt_leaf_pgfree;<dd>Number of bytes free in database leaf pages.
+<dt>public int bt_dup_pgfree;<dd>Number of bytes free in database duplicate pages.
+<dt>public int bt_over_pgfree;<dd>Number of bytes free in database overflow pages.
+</dl>
+<h3>Queue Statistics</h3>
+<p>In the case of a Queue database,
+the statistics are returned in an instance of DbQueueStat. The data
+fields are available from DbQueueStat:
+<p><dl compact>
+<p><dt>public int qs_magic;<dd>Magic number that identifies the file as a Queue file.
+<dt>public int qs_version;<dd>The version of the Queue file type.
+<dt>public int qs_nkeys;<dd>The number of records in the database.
+<dt>public int qs_ndata;<dd>The number of records in the database.
+<dt>public int qs_pagesize;<dd>Underlying database page size.
+<dt>public int qs_pages;<dd>Number of pages in the database.
+<dt>public int qs_re_len;<dd>The length of the records.
+<dt>public int qs_re_pad;<dd>The padding byte value for the records.
+<dt>public int qs_pgfree;<dd>Number of bytes free in database pages.
+<dt>public int qs_start;<dd>Start offset.
+<dt>public int qs_first_recno;<dd>First undeleted record in the database.
+<dt>public int qs_cur_recno;<dd>Last allocated record number in the database.
+</dl>
+<p>The Db.stat method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Db.stat method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.stat method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Class</h3>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_close.html">Db.close</a>,
+<a href="../api_java/db_cursor.html">Db.cursor</a>,
+<a href="../api_java/db_del.html">Db.del</a>,
+<a href="../api_java/db_fd.html">Db.fd</a>,
+<a href="../api_java/db_get.html">Db.get</a>,
+<a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a>,
+<a href="../api_java/db_get_type.html">Db.get_type</a>,
+<a href="../api_java/db_join.html">Db.join</a>,
+<a href="../api_java/db_key_range.html">Db.key_range</a>,
+<a href="../api_java/db_open.html">Db.open</a>,
+<a href="../api_java/db_put.html">Db.put</a>,
+<a href="../api_java/db_remove.html">Db.remove</a>,
+<a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a>,
+<a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a>,
+<a href="../api_java/db_set_errcall.html">Db.set_errcall</a>,
+<a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a>,
+<a href="../api_java/db_set_flags.html">Db.set_flags</a>,
+<a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a>,
+<a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a>,
+<a href="../api_java/db_set_lorder.html">Db.set_lorder</a>,
+<a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a>,
+<a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a>,
+<a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a>,
+<a href="../api_java/db_set_re_len.html">Db.set_re_len</a>,
+<a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a>,
+<a href="../api_java/db_set_re_source.html">Db.set_re_source</a>,
+<a href="../api_java/db_stat.html">Db.stat</a>,
+<a href="../api_java/db_sync.html">Db.sync</a>,
+<a href="../api_java/db_upgrade.html">Db.upgrade</a>
+and
+<a href="../api_java/db_verify.html">Db.verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/db_sync.html b/bdb/docs/api_java/db_sync.html
new file mode 100644
index 00000000000..5162bd13d55
--- /dev/null
+++ b/bdb/docs/api_java/db_sync.html
@@ -0,0 +1,91 @@
+<!--$Id: db_sync.so,v 10.20 2000/09/08 15:20:28 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db.sync</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db.sync</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int sync(int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Db.sync method flushes any cached information to disk.
+<p>If the database is in memory only, the Db.sync method has no effect and
+will always succeed.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>See <a href="../api_java/db_close.html">Db.close</a> for a discussion of Berkeley DB and cached data.
+<p>The Db.sync method throws an exception that encapsulates a non-zero error value on
+failure, and returns <a href="../api_c/memp_fsync.html#DB_INCOMPLETE">Db.DB_INCOMPLETE</a> if the underlying database still has
+dirty pages in the cache. (The only reason to return
+<a href="../api_c/memp_fsync.html#DB_INCOMPLETE">Db.DB_INCOMPLETE</a> is if another thread of control was writing pages
+in the underlying database file at the same time as the
+Db.sync method was being called. For this reason, a return of
+<a href="../api_c/memp_fsync.html#DB_INCOMPLETE">Db.DB_INCOMPLETE</a> can normally be ignored, or, in cases where it is
+a possible return value, there may be no reason to call
+Db.sync.)
+<h1>Errors</h1>
+<p>The Db.sync method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The Db.sync method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.sync method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Class</h3>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_close.html">Db.close</a>,
+<a href="../api_java/db_cursor.html">Db.cursor</a>,
+<a href="../api_java/db_del.html">Db.del</a>,
+<a href="../api_java/db_fd.html">Db.fd</a>,
+<a href="../api_java/db_get.html">Db.get</a>,
+<a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a>,
+<a href="../api_java/db_get_type.html">Db.get_type</a>,
+<a href="../api_java/db_join.html">Db.join</a>,
+<a href="../api_java/db_key_range.html">Db.key_range</a>,
+<a href="../api_java/db_open.html">Db.open</a>,
+<a href="../api_java/db_put.html">Db.put</a>,
+<a href="../api_java/db_remove.html">Db.remove</a>,
+<a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a>,
+<a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a>,
+<a href="../api_java/db_set_errcall.html">Db.set_errcall</a>,
+<a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a>,
+<a href="../api_java/db_set_flags.html">Db.set_flags</a>,
+<a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a>,
+<a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a>,
+<a href="../api_java/db_set_lorder.html">Db.set_lorder</a>,
+<a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a>,
+<a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a>,
+<a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a>,
+<a href="../api_java/db_set_re_len.html">Db.set_re_len</a>,
+<a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a>,
+<a href="../api_java/db_set_re_source.html">Db.set_re_source</a>,
+<a href="../api_java/db_stat.html">Db.stat</a>,
+<a href="../api_java/db_sync.html">Db.sync</a>,
+<a href="../api_java/db_upgrade.html">Db.upgrade</a>
+and
+<a href="../api_java/db_verify.html">Db.verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/db_upgrade.html b/bdb/docs/api_java/db_upgrade.html
new file mode 100644
index 00000000000..6f6da088c35
--- /dev/null
+++ b/bdb/docs/api_java/db_upgrade.html
@@ -0,0 +1,125 @@
+<!--$Id: db_upgrade.so,v 10.18 2000/05/01 15:58:04 krinsky Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db.upgrade</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db.upgrade</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void upgrade(String file, int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Db.upgrade method upgrades all of the databases included in the
+file <b>file</b>, if necessary. If no upgrade is necessary,
+Db.upgrade always returns success.
+<p><b>Database upgrades are done in place and are destructive, e.g., if pages
+need to be allocated and no disk space is available, the database may be
+left corrupted. Backups should be made before databases are upgraded.
+See <a href="../ref/am/upgrade.html">Upgrading databases</a> for more
+information.</b>
+<p>Unlike all other database operations, Db.upgrade may only be done
+on a system with the same byte-order as the database.
+<p>The <b>flags</b> parameter must be set to 0 or one of the following
+values:
+<p><dl compact>
+<p><dt><a name="Db.DB_DUPSORT">Db.DB_DUPSORT</a><dd><b>This flag is only meaningful when upgrading databases from
+releases before the Berkeley DB 3.1 release.</b>
+<p>As part of the upgrade from the Berkeley DB 3.0 release to the 3.1 release, the
+on-disk format of duplicate data items changed. To correctly upgrade the
+format requires applications specify if duplicate data items in the
+database are sorted or not. Specifying the Db.DB_DUPSORT flag
+informs Db.upgrade that the duplicates are sorted, otherwise they
+are assumed to be unsorted. Incorrectly specifying the value of this flag
+may lead to database corruption.
+<p>Further, because the Db.upgrade method upgrades a physical file
+(including all of the databases it contains), it is not possible to use
+Db.upgrade to upgrade files where some of the databases it
+includes have sorted duplicate data items and some of the databases it
+includes have unsorted duplicate data items. If the file does not have
+more than a single database, or the databases do not support duplicate
+data items, or all of the databases that support duplicate data items
+support the same style of duplicates (either sorted or unsorted),
+Db.upgrade will work correctly as long as the Db.DB_DUPSORT
+flag is correctly specified. Otherwise, the file cannot be upgraded using
+Db.upgrade, and must be upgraded manually by dumping and
+re-loading the databases.
+</dl>
+<p>The Db.upgrade method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If the <b>dbenv</b> argument to <a href="../api_c/db_create.html">db_create</a> was initialized using
+<a href="../api_java/env_open.html">DbEnv.open</a> the environment variable <b>DB_HOME</b> may be used
+as the path of the database environment home. Specifically, Db.upgrade
+is affected by the configuration value DB_DATA_DIR.
+</dl>
+<h1>Errors</h1>
+<p>The Db.upgrade method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The database is not in the same byte-order as the system.
+</dl>
+<p><dl compact>
+<p><dt><a name="Db.DB_OLD_VERSION">Db.DB_OLD_VERSION</a><dd>The database cannot be upgraded by this version of the Berkeley DB software.
+</dl>
+<p>The Db.upgrade method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.upgrade method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Class</h3>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_close.html">Db.close</a>,
+<a href="../api_java/db_cursor.html">Db.cursor</a>,
+<a href="../api_java/db_del.html">Db.del</a>,
+<a href="../api_java/db_fd.html">Db.fd</a>,
+<a href="../api_java/db_get.html">Db.get</a>,
+<a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a>,
+<a href="../api_java/db_get_type.html">Db.get_type</a>,
+<a href="../api_java/db_join.html">Db.join</a>,
+<a href="../api_java/db_key_range.html">Db.key_range</a>,
+<a href="../api_java/db_open.html">Db.open</a>,
+<a href="../api_java/db_put.html">Db.put</a>,
+<a href="../api_java/db_remove.html">Db.remove</a>,
+<a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a>,
+<a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a>,
+<a href="../api_java/db_set_errcall.html">Db.set_errcall</a>,
+<a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a>,
+<a href="../api_java/db_set_flags.html">Db.set_flags</a>,
+<a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a>,
+<a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a>,
+<a href="../api_java/db_set_lorder.html">Db.set_lorder</a>,
+<a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a>,
+<a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a>,
+<a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a>,
+<a href="../api_java/db_set_re_len.html">Db.set_re_len</a>,
+<a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a>,
+<a href="../api_java/db_set_re_source.html">Db.set_re_source</a>,
+<a href="../api_java/db_stat.html">Db.stat</a>,
+<a href="../api_java/db_sync.html">Db.sync</a>,
+<a href="../api_java/db_upgrade.html">Db.upgrade</a>
+and
+<a href="../api_java/db_verify.html">Db.verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/db_verify.html b/bdb/docs/api_java/db_verify.html
new file mode 100644
index 00000000000..e2702028305
--- /dev/null
+++ b/bdb/docs/api_java/db_verify.html
@@ -0,0 +1,140 @@
+<!--$Id: db_verify.so,v 10.3 2000/04/11 15:13:51 dda Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Db.verify</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Db.verify</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void verify(String file,
+ String database, java.io.OutputStream outfile, int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Db.verify method verifies the integrity of all databases in the
+file specified by the file argument, and optionally outputs the databases'
+key/data pairs to a file stream.
+<p>The <b>flags</b> parameter must be set to 0 or one of the following
+values:
+<p><dl compact>
+<p><dt><a name="Db.DB_SALVAGE">Db.DB_SALVAGE</a><dd>Write the key/data pairs from all databases in the file to the file stream
+named in
+the <b>outfile</b> argument. The output format is the same as that
+specified for the <a href="../utility/db_dump.html">db_dump</a> utility and can be used as input for
+the <a href="../utility/db_load.html">db_load</a> utility.
+<p>Because the key/data pairs are output in page order as opposed to the sort
+order used by <a href="../utility/db_dump.html">db_dump</a>, using Db.verify to dump key/data
+pairs normally produces less than optimal loads for Btree databases.
+</dl>
+<p>In addition, the following flags may be set by bitwise inclusively <b>OR</b>'ing them into the
+<b>flags</b> parameter:
+<p><dl compact>
+<p><dt><a name="Db.DB_AGGRESSIVE">Db.DB_AGGRESSIVE</a><dd>Output <b>all</b> the key/data pairs in the file that can be found.
+By default, Db.verify does not assume corruption. For example,
+if a key/data pair on a page is marked as deleted, it is not then written
+to the output file. When Db.DB_AGGRESSIVE is specified, corruption
+is assumed, and any key/data pair that can be found is written. In this
+case, key/data pairs that are corrupted or have been deleted may appear
+in the output (even if the file being salvaged is in no way corrupt), and
+the output will almost certainly require editing before being loaded into
+a database.
+<p><dt><a name="Db.DB_NOORDERCHK">Db.DB_NOORDERCHK</a><dd>Skip the database checks for btree and duplicate sort order and for
+hashing.
+<p>The Db.verify method normally verifies that btree keys and duplicate
+items are correctly sorted and hash keys are correctly hashed. If the
+file being verified contains multiple databases using differing sorting
+or hashing algorithms, some of them must necessarily fail database
+verification as only one sort order or hash function can be specified
+before Db.verify is called. To verify files with multiple
+databases having differing sorting orders or hashing functions, first
+perform verification of the file as a whole by using the
+Db.DB_NOORDERCHK flag, and then individually verify the sort order
+and hashing function for each database in the file using the
+Db.DB_ORDERCHKONLY flag.
+<p><dt><a name="Db.DB_ORDERCHKONLY">Db.DB_ORDERCHKONLY</a><dd>Perform the database checks for btree and duplicate sort order and for
+hashing, skipped by Db.DB_NOORDERCHK.
+<p>When this flag is specified, a <b>database</b> argument should also be
+specified, indicating the database in the physical file which is to be
+checked. This flag is only safe to use on databases that have already
+successfully been verified using Db.verify with the
+Db.DB_NOORDERCHK flag set.
+</dl>
+<p>The database argument must be set to null except when the
+Db.DB_ORDERCHKONLY flag is set.
+<p>The Db.verify method throws an exception that encapsulates a non-zero error value on
+failure, and <a href="../ref/program/errorret.html#DB_VERIFY_BAD">Db.DB_VERIFY_BAD</a> if a database is corrupted. When the
+Db.DB_SALVAGE flag is specified, the <a href="../ref/program/errorret.html#DB_VERIFY_BAD">Db.DB_VERIFY_BAD</a> return
+means that all key/data pairs in the file may not have been successfully
+output.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If the <b>dbenv</b> argument to <a href="../api_c/db_create.html">db_create</a> was initialized using
+<a href="../api_java/env_open.html">DbEnv.open</a> the environment variable <b>DB_HOME</b> may be used
+as the path of the database environment home. Specifically, Db.verify
+is affected by the configuration value DB_DATA_DIR.
+</dl>
+<h1>Errors</h1>
+<p>The Db.verify method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The Db.verify method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.verify method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Class</h3>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_close.html">Db.close</a>,
+<a href="../api_java/db_cursor.html">Db.cursor</a>,
+<a href="../api_java/db_del.html">Db.del</a>,
+<a href="../api_java/db_fd.html">Db.fd</a>,
+<a href="../api_java/db_get.html">Db.get</a>,
+<a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a>,
+<a href="../api_java/db_get_type.html">Db.get_type</a>,
+<a href="../api_java/db_join.html">Db.join</a>,
+<a href="../api_java/db_key_range.html">Db.key_range</a>,
+<a href="../api_java/db_open.html">Db.open</a>,
+<a href="../api_java/db_put.html">Db.put</a>,
+<a href="../api_java/db_remove.html">Db.remove</a>,
+<a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a>,
+<a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a>,
+<a href="../api_java/db_set_errcall.html">Db.set_errcall</a>,
+<a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a>,
+<a href="../api_java/db_set_flags.html">Db.set_flags</a>,
+<a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a>,
+<a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a>,
+<a href="../api_java/db_set_lorder.html">Db.set_lorder</a>,
+<a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a>,
+<a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a>,
+<a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a>,
+<a href="../api_java/db_set_re_len.html">Db.set_re_len</a>,
+<a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a>,
+<a href="../api_java/db_set_re_source.html">Db.set_re_source</a>,
+<a href="../api_java/db_stat.html">Db.stat</a>,
+<a href="../api_java/db_sync.html">Db.sync</a>,
+<a href="../api_java/db_upgrade.html">Db.upgrade</a>
+and
+<a href="../api_java/db_verify.html">Db.verify</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/dbc_class.html b/bdb/docs/api_java/dbc_class.html
new file mode 100644
index 00000000000..61f6b9ec2b6
--- /dev/null
+++ b/bdb/docs/api_java/dbc_class.html
@@ -0,0 +1,49 @@
+<!--$Id: dbc_class.so,v 10.12 1999/12/20 08:52:33 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Dbc</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Dbc</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public class Dbc extends Object { ... }
+</pre></h3>
+<h1>Description</h1>
+<p>This manual page describes the specific details of the Dbc class,
+which provides cursor support for the access methods in Db.
+<p>The Dbc functions are the library interface supporting sequential
+access to the records stored by the access methods of the Berkeley DB library.
+Cursors are created by calling the <a href="../api_java/db_cursor.html">Db.cursor</a> method which returns a
+ Dbc object.
+<h3>Class</h3>
+<a href="../api_java/dbc_class.html">Dbc</a>
+<h1>See Also</h1>
+<a href="../api_java/dbc_close.html">Dbc.close</a>,
+<a href="../api_java/dbc_count.html">Dbc.count</a>,
+<a href="../api_java/dbc_del.html">Dbc.del</a>,
+<a href="../api_java/dbc_dup.html">Dbc.dup</a>,
+<a href="../api_java/dbc_get.html">Dbc.get</a>
+and
+<a href="../api_java/dbc_put.html">Dbc.put</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/dbc_close.html b/bdb/docs/api_java/dbc_close.html
new file mode 100644
index 00000000000..c8ad0570296
--- /dev/null
+++ b/bdb/docs/api_java/dbc_close.html
@@ -0,0 +1,67 @@
+<!--$Id: dbc_close.so,v 10.20 2000/03/01 21:41:29 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Dbc.close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Dbc.close</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void close()
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Dbc.close method discards the cursor.
+<p>It is possible for the Dbc.close method to return
+<a href="../ref/program/errorret.html#DB_LOCK_DEADLOCK">Db.DB_LOCK_DEADLOCK</a>, signaling that any enclosing transaction should
+be aborted. If the application is already intending to abort the
+transaction, this error should be ignored, and the application should
+proceed.
+<p>Once Dbc.close has been called, regardless of its return, the
+cursor handle may not be used again.
+<p>The Dbc.close method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Dbc.close method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The cursor was previously closed.
+</dl>
+<p>If the operation was selected to resolve a deadlock, the
+Dbc.close method will fail and
+throw a <a href="../api_java/deadlock_class.html">DbDeadlockException</a> exception.
+<p>The Dbc.close method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Dbc.close method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Class</h3>
+<a href="../api_java/dbc_class.html">Dbc</a>
+<h1>See Also</h1>
+<a href="../api_java/dbc_close.html">Dbc.close</a>,
+<a href="../api_java/dbc_count.html">Dbc.count</a>,
+<a href="../api_java/dbc_del.html">Dbc.del</a>,
+<a href="../api_java/dbc_dup.html">Dbc.dup</a>,
+<a href="../api_java/dbc_get.html">Dbc.get</a>
+and
+<a href="../api_java/dbc_put.html">Dbc.put</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/dbc_count.html b/bdb/docs/api_java/dbc_count.html
new file mode 100644
index 00000000000..324ee148550
--- /dev/null
+++ b/bdb/docs/api_java/dbc_count.html
@@ -0,0 +1,58 @@
+<!--$Id: dbc_count.so,v 10.4 2000/03/01 21:41:29 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Dbc.count</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Dbc.count</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int count(int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Dbc.count method returns a count of the number of duplicate data
+items for the key referenced by the
+cursor.
+If the underlying database does not support duplicate data items the call
+will still succeed and a count of 1 will be returned.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>If the <b>cursor</b> argument is not yet initialized, the Dbc.count method throws an exception that encapsulates EINVAL.
+<p>Otherwise, the Dbc.count method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Dbc.count method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Dbc.count method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Class</h3>
+<a href="../api_java/dbc_class.html">Dbc</a>
+<h1>See Also</h1>
+<a href="../api_java/dbc_close.html">Dbc.close</a>,
+<a href="../api_java/dbc_count.html">Dbc.count</a>,
+<a href="../api_java/dbc_del.html">Dbc.del</a>,
+<a href="../api_java/dbc_dup.html">Dbc.dup</a>,
+<a href="../api_java/dbc_get.html">Dbc.get</a>
+and
+<a href="../api_java/dbc_put.html">Dbc.put</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/dbc_del.html b/bdb/docs/api_java/dbc_del.html
new file mode 100644
index 00000000000..eb4a32362cf
--- /dev/null
+++ b/bdb/docs/api_java/dbc_del.html
@@ -0,0 +1,71 @@
+<!--$Id: dbc_del.so,v 10.23 2000/05/22 20:51:46 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Dbc.del</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Dbc.del</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int del(int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Dbc.del method deletes the key/data pair currently referenced by
+the cursor.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The cursor position is unchanged after a delete, and subsequent calls to
+cursor functions expecting the cursor to reference an existing key will
+fail.
+<p>If the element has already been deleted, Dbc.del will return
+<a href="../ref/program/errorret.html#DB_KEYEMPTY">Db.DB_KEYEMPTY</a>.
+<p>If the cursor is not yet initialized, the Dbc.del method throws an exception that encapsulates EINVAL.
+<p>Otherwise, the Dbc.del method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Dbc.del method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p><dl compact>
+<p><dt>EPERM <dd>Write attempted on read-only cursor when the <a href="../api_java/env_open.html#DB_INIT_CDB">Db.DB_INIT_CDB</a> flag was
+specified to <a href="../api_java/env_open.html">DbEnv.open</a>.
+</dl>
+<p>If the operation was selected to resolve a deadlock, the
+Dbc.del method will fail and
+throw a <a href="../api_java/deadlock_class.html">DbDeadlockException</a> exception.
+<p>The Dbc.del method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Dbc.del method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Class</h3>
+<a href="../api_java/dbc_class.html">Dbc</a>
+<h1>See Also</h1>
+<a href="../api_java/dbc_close.html">Dbc.close</a>,
+<a href="../api_java/dbc_count.html">Dbc.count</a>,
+<a href="../api_java/dbc_del.html">Dbc.del</a>,
+<a href="../api_java/dbc_dup.html">Dbc.dup</a>,
+<a href="../api_java/dbc_get.html">Dbc.get</a>
+and
+<a href="../api_java/dbc_put.html">Dbc.put</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/dbc_dup.html b/bdb/docs/api_java/dbc_dup.html
new file mode 100644
index 00000000000..f02afbb7350
--- /dev/null
+++ b/bdb/docs/api_java/dbc_dup.html
@@ -0,0 +1,75 @@
+<!--$Id: dbc_dup.so,v 10.8 2000/03/17 01:53:58 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Dbc.dup</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Dbc.dup</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public Dbc dup(int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Dbc.dup method creates a new cursor that uses the same transaction
+and locker ID as the original cursor. This is useful when an application
+is using locking and requires two or more cursors in the same thread of
+control.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or more
+of the following values.
+<p><dl compact>
+<p><dt><a name="Db.DB_POSITION">Db.DB_POSITION</a><dd>The newly created cursor is initialized to reference the same position
+in the database as the original cursor and hold the same locks. If the
+Db.DB_POSITION flag is not specified, then the created cursor is
+uninitialized and will behave like a cursor newly created using
+<a href="../api_java/db_cursor.html">Db.cursor</a>.
+</dl>
+<p>When using the Berkeley DB Concurrent Data Store product, there can be only one active write cursor
+at a time. For this reason, attempting to duplicate a cursor for which
+the <a href="../api_java/db_cursor.html#DB_WRITECURSOR">Db.DB_WRITECURSOR</a> flag was specified during creation will return
+an error.
+<p>If the <b>cursor</b> argument is not yet initialized, the Dbc.dup method throws an exception that encapsulates EINVAL.
+<p>Otherwise, the Dbc.dup method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Dbc.dup method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The <b>cursor</b> argument was created using the
+<a href="../api_java/db_cursor.html#DB_WRITECURSOR">Db.DB_WRITECURSOR</a> flag in the Berkeley DB Concurrent Data Store product.
+</dl>
+<p>The Dbc.dup method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Dbc.dup method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Class</h3>
+<a href="../api_java/dbc_class.html">Dbc</a>
+<h1>See Also</h1>
+<a href="../api_java/dbc_close.html">Dbc.close</a>,
+<a href="../api_java/dbc_count.html">Dbc.count</a>,
+<a href="../api_java/dbc_del.html">Dbc.del</a>,
+<a href="../api_java/dbc_dup.html">Dbc.dup</a>,
+<a href="../api_java/dbc_get.html">Dbc.get</a>
+and
+<a href="../api_java/dbc_put.html">Dbc.put</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/dbc_get.html b/bdb/docs/api_java/dbc_get.html
new file mode 100644
index 00000000000..8213c5b51fc
--- /dev/null
+++ b/bdb/docs/api_java/dbc_get.html
@@ -0,0 +1,168 @@
+<!--$Id: dbc_get.so,v 10.46 2001/01/19 17:29:46 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Dbc.get</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Dbc.get</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int get(Dbt key, Dbt data, int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Dbc.get method retrieves key/data pairs from the database. The
+byte array and length of the key
+are returned in the object referenced by <b>key</b> (except for the case
+of the Db.DB_SET flag where the <b>key</b> object is unchanged),
+and the byte array and length of
+the data are returned in the object referenced by <b>data</b>.
+<p>Modifications to the database during a sequential scan will be reflected
+in the scan, i.e. records inserted behind a cursor will not be returned
+while records inserted in front of a cursor will be returned.
+<p>In Queue and Recno databases, missing entries (i.e., entries that were
+never explicitly created or that were created and then deleted), will be
+skipped during a sequential scan.
+<p>If multiple threads or processes insert items into the same database file
+without using locking, the results are undefined.
+For more detail,
+see <a href="../ref/am/stability.html">Cursor stability</a>.
+<p>The <b>flags</b> parameter must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_CURRENT">Db.DB_CURRENT</a><dd>Return the key/data pair currently referenced by the cursor.
+<p>If the cursor key/data pair was deleted, Dbc.get will return
+<a href="../ref/program/errorret.html#DB_KEYEMPTY">Db.DB_KEYEMPTY</a>.
+<p>If the cursor is not yet initialized, the Dbc.get method throws an exception that encapsulates EINVAL.
+<p><dt><a name="Db.DB_FIRST">Db.DB_FIRST</a>, <a name="Db.DB_LAST">Db.DB_LAST</a><dd>The cursor is set to reference the first (last) key/data pair of the
+database, and that pair is returned. In the presence of duplicate key
+values, the first (last) data item in the set of duplicates is returned.
+<p>If the database is a Queue or Recno database, Dbc.get using the
+Db.DB_FIRST (Db.DB_LAST) flags will ignore any keys that exist
+but were never explicitly created by the application or were created and
+later deleted.
+<p>If the database is empty, Dbc.get will return <a href="../ref/program/errorret.html#DB_NOTFOUND">Db.DB_NOTFOUND</a>.
+<p><dt><a name="Db.DB_GET_BOTH">Db.DB_GET_BOTH</a><dd>The Db.DB_GET_BOTH flag is identical to the Db.DB_SET flag,
+except that both the key and the data arguments must be matched by the
+key and data item in the database.
+<p><dt><a name="Db.DB_GET_RECNO">Db.DB_GET_RECNO</a><dd>Return the record number associated with the cursor. The record number
+will be returned in <b>data</b> as described in <a href="../api_java/dbt_class.html">Dbt</a>. The
+<b>key</b> parameter is ignored.
+<p>For Db.DB_GET_RECNO to be specified, the underlying database must be
+of type Btree and it must have been created with the <a href="../api_java/db_set_flags.html#DB_RECNUM">Db.DB_RECNUM</a>
+flag.
+<p><dt><a name="Db.DB_JOIN_ITEM">Db.DB_JOIN_ITEM</a><dd>Do not use the data value found in all of the cursors as a lookup key for
+the primary database, but simply return it in the key parameter instead.
+The data parameter is left unchanged.
+<p>For Db.DB_JOIN_ITEM to be specified, the underlying cursor must have
+been returned from the <a href="../api_java/db_join.html">Db.join</a> method.
+<p><dt><a name="Db.DB_NEXT">Db.DB_NEXT</a>, <a name="Db.DB_PREV">Db.DB_PREV</a><dd>If the cursor is not yet initialized, Db.DB_NEXT (Db.DB_PREV)
+is identical to Db.DB_FIRST (Db.DB_LAST). Otherwise, the cursor
+is moved to the next (previous) key/data pair of the database, and that
+pair is returned. In the presence of duplicate key values, the value of
+the key may not change.
+<p>If the database is a Queue or Recno database, Dbc.get using the
+Db.DB_NEXT (Db.DB_PREV) flag will skip any keys that exist but
+were never explicitly created by the application or were created and later
+deleted.
+<p>If the cursor is already on the last (first) record in the database,
+Dbc.get will return <a href="../ref/program/errorret.html#DB_NOTFOUND">Db.DB_NOTFOUND</a>.
+<p><dt><a name="Db.DB_NEXT_DUP">Db.DB_NEXT_DUP</a><dd>If the next key/data pair of the database is a duplicate record for the
+current key/data pair, the cursor is moved to the next key/data pair of
+the database, and that pair is returned. Otherwise, Dbc.get will
+return <a href="../ref/program/errorret.html#DB_NOTFOUND">Db.DB_NOTFOUND</a>.
+<p>If the cursor is not yet initialized, the Dbc.get method throws an exception that encapsulates EINVAL.
+<p><dt><a name="Db.DB_NEXT_NODUP">Db.DB_NEXT_NODUP</a>, <a name="Db.DB_PREV_NODUP">Db.DB_PREV_NODUP</a><dd>If the cursor is not yet initialized, Db.DB_NEXT_NODUP
+(Db.DB_PREV_NODUP) is identical to Db.DB_FIRST
+(Db.DB_LAST). Otherwise, the cursor is moved to the next (previous)
+non-duplicate key/data pair of the database, and that pair is returned.
+<p>If the database is a Queue or Recno database, Dbc.get using the
+Db.DB_NEXT_NODUP (Db.DB_PREV_NODUP) flags will ignore any keys
+that exist but were never explicitly created by the application or were
+created and later deleted.
+<p>If no non-duplicate key/data pairs occur after (before) the cursor
+position in the database, Dbc.get will return <a href="../ref/program/errorret.html#DB_NOTFOUND">Db.DB_NOTFOUND</a>.
+<p><dt><a name="Db.DB_SET">Db.DB_SET</a><dd>Move the cursor to the specified key/data pair of the database, and
+return the datum associated with the given key.
+<p>In the presence of duplicate key values, Dbc.get will return the
+first data item for the given key.
+<p>If the database is a Queue or Recno database and the requested key exists,
+but was never explicitly created by the application or was later deleted,
+Dbc.get will return <a href="../ref/program/errorret.html#DB_KEYEMPTY">Db.DB_KEYEMPTY</a>.
+<p>If no matching keys are found, Dbc.get will return
+<a href="../ref/program/errorret.html#DB_NOTFOUND">Db.DB_NOTFOUND</a>.
+<p><dt><a name="Db.DB_SET_RANGE">Db.DB_SET_RANGE</a><dd>The Db.DB_SET_RANGE flag is identical to the Db.DB_SET flag,
+except that the key is returned as well as the data item, and, in the case
+of the Btree access method, the returned key/data pair is the smallest
+key greater than or equal to the specified key (as determined by the
+comparison method), permitting partial key matches and range
+searches.
+<p><dt><a name="Db.DB_SET_RECNO">Db.DB_SET_RECNO</a><dd>Move the cursor to the specific numbered record of the database, and
+return the associated key/data pair. The <b>data</b> field of the
+specified <b>key</b>
+must be a byte array containing a record number, as described in
+<a href="../api_java/dbt_class.html">Dbt</a>. This determines the record to be retrieved.
+<p>For Db.DB_SET_RECNO to be specified, the underlying database must be
+of type Btree and it must have been created with the <a href="../api_java/db_set_flags.html#DB_RECNUM">Db.DB_RECNUM</a>
+flag.
+</dl>
+<p>In addition, the following flag may be set by bitwise inclusively <b>OR</b>'ing it into the
+<b>flags</b> parameter:
+<p><dl compact>
+<p><dt><a name="Db.DB_RMW">Db.DB_RMW</a><dd>Acquire write locks instead of read locks when doing the retrieval.
+Setting this flag may decrease the likelihood of deadlock during a
+read-modify-write cycle by immediately acquiring the write lock during
+the read part of the cycle so that another thread of control acquiring
+a read lock for the same item, in its own read-modify-write cycle, will
+not result in deadlock.
+</dl>
+<p>Otherwise, the Dbc.get method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>If Dbc.get fails for any reason, the state of the cursor will be
+unchanged.
+<h1>Errors</h1>
+<p>The Dbc.get method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The specified cursor was not currently initialized.
+</dl>
+<p>If the operation was selected to resolve a deadlock, the
+Dbc.get method will fail and
+throw a <a href="../api_java/deadlock_class.html">DbDeadlockException</a> exception.
+<p>If the requested item could not be returned due to insufficient memory,
+the Dbc.get method will fail and
+throw a <a href="../api_java/mem_class.html">DbMemoryException</a> exception.
+<p>The Dbc.get method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Dbc.get method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Class</h3>
+<a href="../api_java/dbc_class.html">Dbc</a>
+<h1>See Also</h1>
+<a href="../api_java/dbc_close.html">Dbc.close</a>,
+<a href="../api_java/dbc_count.html">Dbc.count</a>,
+<a href="../api_java/dbc_del.html">Dbc.del</a>,
+<a href="../api_java/dbc_dup.html">Dbc.dup</a>,
+<a href="../api_java/dbc_get.html">Dbc.get</a>
+and
+<a href="../api_java/dbc_put.html">Dbc.put</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/dbc_put.html b/bdb/docs/api_java/dbc_put.html
new file mode 100644
index 00000000000..a2969e15956
--- /dev/null
+++ b/bdb/docs/api_java/dbc_put.html
@@ -0,0 +1,157 @@
+<!--$Id: dbc_put.so,v 10.33 2000/12/04 17:02:01 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Dbc.put</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Dbc.put</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void put(Dbt key, Dbt data, int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Dbc.put method stores key/data pairs into the database.
+<p>The <b>flags</b> parameter must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_AFTER">Db.DB_AFTER</a><dd>In the case of the Btree and Hash access methods, insert the data
+element as a duplicate element of the key referenced by the cursor.
+The new element appears immediately after the current cursor position.
+It is an error to specify Db.DB_AFTER if the underlying Btree or
+Hash database does not support duplicate data items. The <b>key</b>
+parameter is ignored.
+<p>In the case of the Recno access method, it is an error to specify
+Db.DB_AFTER if the underlying Recno database was not created with
+the <a href="../api_java/db_set_flags.html#DB_RENUMBER">Db.DB_RENUMBER</a> flag. If the <a href="../api_java/db_set_flags.html#DB_RENUMBER">Db.DB_RENUMBER</a> flag was
+specified, a new key is created, all records after the inserted item
+are automatically renumbered, and the key of the new record is returned
+in the structure referenced by the parameter <b>key</b>. The initial
+value of the <b>key</b> parameter is ignored. See <a href="../api_java/db_open.html">Db.open</a>
+for more information.
+<p>The Db.DB_AFTER flag may not be specified to the Queue access method.
+<p>If the current cursor record has already been deleted and the underlying
+access method is Hash, Dbc.put will return <a href="../ref/program/errorret.html#DB_NOTFOUND">Db.DB_NOTFOUND</a>.
+If the underlying access method is Btree or Recno, the operation will
+succeed.
+<p>If the cursor is not yet initialized or a duplicate sort function has been
+specified, the Dbc.put function will return EINVAL.
+<p><dt><a name="Db.DB_BEFORE">Db.DB_BEFORE</a><dd>In the case of the Btree and Hash access methods, insert the data element
+as a duplicate element of the key referenced by the cursor. The new
+element appears immediately before the current cursor position. It is
+an error to specify Db.DB_BEFORE if the underlying Btree or Hash
+database does not support duplicate data items. The <b>key</b>
+parameter is ignored.
+<p>In the case of the Recno access method, it is an error to specify
+Db.DB_BEFORE if the underlying Recno database was not created with
+the <a href="../api_java/db_set_flags.html#DB_RENUMBER">Db.DB_RENUMBER</a> flag. If the <a href="../api_java/db_set_flags.html#DB_RENUMBER">Db.DB_RENUMBER</a> flag was
+specified, a new key is created, the current record and all records
+after it are automatically renumbered, and the key of the new record is
+returned in the structure referenced by the parameter <b>key</b>. The
+initial value of the <b>key</b> parameter is ignored. See
+<a href="../api_java/db_open.html">Db.open</a> for more information.
+<p>The Db.DB_BEFORE flag may not be specified to the Queue access method.
+<p>If the current cursor record has already been deleted and the underlying
+access method is Hash, Dbc.put will return <a href="../ref/program/errorret.html#DB_NOTFOUND">Db.DB_NOTFOUND</a>.
+If the underlying access method is Btree or Recno, the operation will
+succeed.
+<p>If the cursor is not yet initialized or a duplicate sort function has been
+specified, Dbc.put will return EINVAL.
+<p><dt><a name="Db.DB_CURRENT">Db.DB_CURRENT</a><dd>Overwrite the data of the key/data pair referenced by the cursor with the
+specified data item. The <b>key</b> parameter is ignored.
+<p>If a duplicate sort function has been specified and the data item of the
+current referenced key/data pair does not compare equally to the <b>data</b>
+parameter, Dbc.put will return EINVAL.
+<p>If the current cursor record has already been deleted and the underlying
+access method is Hash, Dbc.put will return <a href="../ref/program/errorret.html#DB_NOTFOUND">Db.DB_NOTFOUND</a>.
+If the underlying access method is Btree, Queue or Recno, the operation
+will succeed.
+<p>If the cursor is not yet initialized, Dbc.put will return EINVAL.
+<p><dt><a name="Db.DB_KEYFIRST">Db.DB_KEYFIRST</a><dd>In the case of the Btree and Hash access methods, insert the specified
+key/data pair into the database.
+<p>If the underlying database supports duplicate data items, and if the
+key already exists in the database and a duplicate sort function has
+been specified, the inserted data item is added in its sorted location.
+If the key already exists in the database and no duplicate sort function
+has been specified, the inserted data item is added as the first of the
+data items for that key.
+<p>The Db.DB_KEYFIRST flag may not be specified to the Queue or Recno
+access methods.
+<p><dt><a name="Db.DB_KEYLAST">Db.DB_KEYLAST</a><dd>In the case of the Btree and Hash access methods, insert the specified
+key/data pair into the database.
+<p>If the underlying database supports duplicate data items, and if the
+key already exists in the database and a duplicate sort function has
+been specified, the inserted data item is added in its sorted location.
+If the key already exists in the database, and no duplicate sort
+function has been specified, the inserted data item is added as the last
+of the data items for that key.
+<p>The Db.DB_KEYLAST flag may not be specified to the Queue or Recno
+access methods.
+<p><dt><a name="Db.DB_NODUPDATA">Db.DB_NODUPDATA</a><dd>In the case of the Btree and Hash access methods, insert the specified
+key/data pair into the database unless it already exists in the database.
+If the key/data pair already appears in the database, <a href="../api_java/dbc_put.html#DB_KEYEXIST">Db.DB_KEYEXIST</a>
+is returned. The Db.DB_NODUPDATA flag may only be specified if
+the underlying database has been configured to support sorted duplicate
+data items.
+<p>The Db.DB_NODUPDATA flag may not be specified to the Queue or Recno
+access methods.
+</dl>
+<p>Otherwise, the Dbc.put method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>If Dbc.put fails for any reason, the state of the cursor will be
+unchanged. If Dbc.put succeeds and an item is inserted into the
+database, the cursor is always positioned to reference the newly inserted
+item.
+<h1>Errors</h1>
+<p>The Dbc.put method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EACCES<dd>An attempt was made to modify a read-only database.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The Db.DB_BEFORE or Db.DB_AFTER flags were specified, and the
+underlying access method is Queue.
+<p>An attempt was made to add a record to a fixed-length database that was too
+large to fit.
+</dl>
+<p><dl compact>
+<p><dt>EPERM <dd>Write attempted on read-only cursor when the <a href="../api_java/env_open.html#DB_INIT_CDB">Db.DB_INIT_CDB</a> flag was
+specified to <a href="../api_java/env_open.html">DbEnv.open</a>.
+</dl>
+<p>If the operation was selected to resolve a deadlock, the
+Dbc.put method will fail and
+throw a <a href="../api_java/deadlock_class.html">DbDeadlockException</a> exception.
+<p>The Dbc.put method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Dbc.put method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Class</h3>
+<a href="../api_java/dbc_class.html">Dbc</a>
+<h1>See Also</h1>
+<a href="../api_java/dbc_close.html">Dbc.close</a>,
+<a href="../api_java/dbc_count.html">Dbc.count</a>,
+<a href="../api_java/dbc_del.html">Dbc.del</a>,
+<a href="../api_java/dbc_dup.html">Dbc.dup</a>,
+<a href="../api_java/dbc_get.html">Dbc.get</a>
+and
+<a href="../api_java/dbc_put.html">Dbc.put</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/dbenv_class.html b/bdb/docs/api_java/dbenv_class.html
new file mode 100644
index 00000000000..f610cf67015
--- /dev/null
+++ b/bdb/docs/api_java/dbenv_class.html
@@ -0,0 +1,65 @@
+<!--$Id: dbenv_class.so,v 10.20 2000/07/27 13:10:54 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public class DbEnv extends Object
+{
+ public DbEnv(int flags);
+ ...
+}
+</pre></h3>
+<h1>Description</h1>
+<p>This manual page describes the specific details of the DbEnv
+class, which is the center of the Berkeley DB environment.
+<p>The following <b>flags</b> value may be specified:
+<p><dl compact>
+<p><dt><a name="Db.DB_CLIENT">Db.DB_CLIENT</a><dd>Create a client environment to connect to a server.
+<p>The Db.DB_CLIENT flag indicates to the system that this environment
+is remote on a server. The use of this flag causes the environment
+methods to use functions that call a server instead of local functions.
+Prior to making any environment or database method calls, the
+application must call the <a href="../api_java/env_set_server.html">DbEnv.set_server</a> function to establish
+the connection to the server.
+</dl>
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_close.html">DbEnv.close</a>,
+<a href="../api_java/env_open.html">DbEnv.open</a>,
+<a href="../api_java/env_remove.html">DbEnv.remove</a>,
+<a href="../api_java/env_strerror.html">DbEnv.strerror</a>,
+<a href="../api_java/env_version.html">DbEnv.get_version_string</a>
+<a href="../api_java/env_set_cachesize.html">DbEnv.set_cachesize</a>,
+<a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a>,
+<a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a>,
+<a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a>,
+<a href="../api_java/env_set_flags.html">DbEnv.set_flags</a>,
+<a href="../api_java/env_set_mutexlocks.html">DbEnv.set_mutexlocks</a>,
+and
+<a href="../api_java/env_set_verbose.html">DbEnv.set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/dbt_class.html b/bdb/docs/api_java/dbt_class.html
new file mode 100644
index 00000000000..1df9cbb59d1
--- /dev/null
+++ b/bdb/docs/api_java/dbt_class.html
@@ -0,0 +1,227 @@
+<!--$Id: dbt_class.so,v 10.33 2000/12/18 21:05:13 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Dbt</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>Dbt</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public class Dbt extends Object
+{
+ public Dbt(byte[] data);
+ public Dbt(byte[] data, int off, int len);
+<p>
+ public void set_data(byte[] data);
+ public byte[] get_data();
+<p>
+ public void set_offset(int off);
+ public int get_offset();
+<p>
+ public int get_size();
+ public void set_size(int size);
+<p>
+ public int get_ulen();
+ public void set_ulen(int ulen);
+<p>
+ public int get_dlen();
+ public void set_dlen(int dlen);
+<p>
+ public int get_doff();
+ public void set_doff(int doff);
+<p>
+ public int get_flags();
+ public void set_flags(int flags);
+<p>
+ public void set_recno_key_data(int recno);
+ public int get_recno_key_data();
+}
+</pre></h3>
+<h1>Description</h1>
+<p>This manual page describes the specific details of the Dbt class,
+used to encode keys and data items in a database.
+ <a name="3"><!--meow--></a>
+<h3>Key/Data Pairs</h3>
+<p>Storage and retrieval for the Db access methods are based on
+key/data pairs. Both key and data items are represented by Dbt
+objects. Key and data byte strings may reference strings of zero length
+up to strings of essentially unlimited length. See
+<a href="../ref/program/dbsizes.html">Database limits</a> for more
+information.
+<p>The Dbt class provides simple access to an underlying data structure,
+whose elements can be examined or changed using the <b>set_</b> or
+<b>get_</b> methods. The remainder of the manual page sometimes refers
+to these accesses using the underlying name, e.g., simply <b>ulen</b>
+instead of Dbt.get_ulen and Dbt.set_ulen.
+Dbt can be subclassed, providing a way to associate
+with it additional data, or references to other structures.
+<p>The constructors set all elements of the underlying structure to zero.
+The constructor with one argument has the effect of setting all elements
+to zero except for the specified <b>data</b> and <b>size</b> elements.
+The constructor with three arguments has has the additional effect of
+only using the portion of the array specified by the size and offset.
+<p>In the case where the <b>flags</b> structure element is 0, when being
+provided a key or data item by the application, the Berkeley DB package expects
+the <b>data</b> object to be set to a byte array of <b>size</b> bytes.
+When returning a key/data item to the application, the Berkeley DB package will
+store into the <b>data</b> object a byte array of <b>size</b> bytes.
+During a get operation, one of the Db.DB_DBT_MALLOC,
+Db.DB_DBT_REALLOC or Db.DB_DBT_USERMEM flags must be
+specified.
+<p>The elements of the structure underlying the Dbt class are defined as follows:
+<p><dl compact>
+<p><dt>byte[] <a name="data">data</a>;<dd>A byte array containing the data.
+This element is accessed using Dbt.get_data and
+Dbt.set_data, and may be initialized using one
+of the constructors.
+Note that the array data is not copied immediately, but only when the
+Dbt is used.
+<p><dt>int offset;<dd>The number of bytes offset into the <b>data</b> array to determine the
+portion of the array actually used.
+This element is accessed using Dbt.get_offset and
+Dbt.set_offset.
+<p><dt>int size;<dd>The length of <b>data</b>, in bytes.
+This element is accessed using Dbt.get_size and
+Dbt.set_size, and may be initialized
+implicitly to the length of the data array with the constructor having
+one argument.
+<p><dt>int ulen;<dd>The size of the user's buffer (referenced by <b>data</b>), in bytes.
+This location is not written by the Db methods.
+<p>Note that applications can determine the length of a record by setting
+the <b>ulen</b> to 0 and checking the return value found in <b>size</b>.
+See the Db.DB_DBT_USERMEM flag for more information.
+<p>This element is accessed using
+Dbt.get_ulen and Dbt.set_ulen.
+<p><dt>int dlen;<dd>The length of the partial record being read or written by the application,
+in bytes.
+See the Db.DB_DBT_PARTIAL flag for more information.
+This element is accessed using
+Dbt.get_dlen, and Dbt.set_dlen.
+<p><dt>int doff;<dd>The offset of the partial record being read or written by the application,
+in bytes.
+See the Db.DB_DBT_PARTIAL flag for more information.
+This element is accessed using
+Dbt.get_doff and Dbt.set_doff.
+<p><dt>int flags;<dd>This element is accessed using Dbt.get_flags and
+Dbt.set_flags.
+<p>The <b>flags</b> value must be set by bitwise inclusively <b>OR</b>'ing together one or more of the
+following values.
+<p><dl compact>
+<p><dt><a name="Db.DB_DBT_MALLOC">Db.DB_DBT_MALLOC</a><dd>When this flag is set Berkeley DB will allocate memory for the returned key
+or data item
+and return a byte array containing the data in the <b>data</b> field of
+the key or data Dbt object.
+<p>If Db.DB_DBT_MALLOC is specified, Berkeley DB allocates a properly sized
+byte array to contain the data. This can be convenient if you know little
+about the nature of the data, specifically the size of data in the
+database. However, if your application makes repeated calls to retrieve
+keys or data, you may notice increased garbage collection due to this
+allocation. If you know the maximum size of data you are retrieving, you
+might decrease the memory burden and speed your application by allocating
+your own byte array and using Db.DB_DBT_USERMEM. Even if you don't
+know the maximum size, you can use this option and reallocate your array
+whenever your retrieval API call
+throws a <a href="../api_java/mem_class.html">DbMemoryException</a>.
+<p>It is an error to specify more than one of Db.DB_DBT_MALLOC,
+Db.DB_DBT_REALLOC and Db.DB_DBT_USERMEM.
+<p><dt><a name="Db.DB_DBT_REALLOC">Db.DB_DBT_REALLOC</a><dd>When this flag is set Berkeley DB
+will return the data in the <b>data</b> field of the key or data
+Dbt object, reusing the existing byte array if it is large
+enough, or allocating a new one of the appropriate size.
+<p>It is an error to specify more than one of Db.DB_DBT_MALLOC,
+Db.DB_DBT_REALLOC and Db.DB_DBT_USERMEM.
+<p><dt><a name="Db.DB_DBT_USERMEM">Db.DB_DBT_USERMEM</a><dd>The <b>data</b> field of the key or data object must reference memory
+that is at least <b>ulen</b> bytes in length. If the length of the
+requested item is less than or equal to that number of bytes, the item
+is copied into the memory referenced by the <b>data</b> field.
+Otherwise, the <b>size</b> field is set to the length needed for the
+requested item, and the error ENOMEM is returned.
+<p>If Db.DB_DBT_USERMEM is specified, the data field of the Dbt
+must be set to an appropriately sized byte array.
+<p>It is an error to specify more than one of Db.DB_DBT_MALLOC,
+Db.DB_DBT_REALLOC and Db.DB_DBT_USERMEM.
+</dl>
+<p>If Db.DB_DBT_MALLOC or Db.DB_DBT_REALLOC is specified, Berkeley DB
+allocates a properly sized byte array to contain the data. This can be
+convenient if you know little about the nature of the data, specifically
+the size of data in the database. However, if your application makes
+repeated calls to retrieve keys or data, you may notice increased garbage
+collection due to this allocation. If you know the maximum size of data
+you are retrieving, you might decrease the memory burden and speed your
+application by allocating your own byte array and using
+Db.DB_DBT_USERMEM. Even if you don't know the maximum size, you can
+use this option and reallocate your array whenever your retrieval API call
+throws a <a href="../api_java/mem_class.html">DbMemoryException</a>.
+<p><dl compact>
+<p><dt><a name="Db.DB_DBT_PARTIAL">Db.DB_DBT_PARTIAL</a><dd>Do partial retrieval or storage of an item. If the calling application
+is doing a get, the <b>dlen</b> bytes starting <b>doff</b> bytes from
+the beginning of the retrieved data record are returned as if they
+comprised the entire record. If any or all of the specified bytes do
+not exist in the record, the get is successful and the existing bytes
+or nul bytes are returned.
+<p>For example, if the data portion of a retrieved record was 100 bytes,
+and a partial retrieval was done using a Dbt having a <b>dlen</b>
+field of 20 and a <b>doff</b> field of 85, the get call would succeed,
+the <b>data</b> field would reference the last 15 bytes of the record,
+and the <b>size</b> field would be set to 15.
+<p>If the calling application is doing a put, the <b>dlen</b> bytes starting
+<b>doff</b> bytes from the beginning of the specified key's data record
+are replaced by the data specified by the <b>data</b> and <b>size</b>
+objects.
+If <b>dlen</b> is smaller than <b>size</b>, the record will grow, and if
+<b>dlen</b> is larger than <b>size</b>, the record will shrink.
+If the specified bytes do not exist, the record will be extended using nul
+bytes as necessary, and the put call will succeed.
+<p>It is an error to attempt a partial put using the <a href="../api_java/db_put.html">Db.put</a>
+method in a database that supports duplicate records.
+Partial puts in databases supporting duplicate records must be done
+using a <a href="../api_java/dbc_class.html">Dbc</a> method.
+<p>It is an error to attempt a partial put with differing <b>dlen</b> and
+<b>size</b> values in Queue or Recno databases with fixed-length records.
+<p>For example, if the data portion of a retrieved record was 100 bytes,
+and a partial put was done using a Dbt having a <b>dlen</b>
+field of 20, a <b>doff</b> field of 85, and a <b>size</b> field of 30,
+the resulting record would be 115 bytes in length, where the last 30
+bytes would be those specified by the put call.
+</dl>
+</dl>
+<p>Although Java normally maintains proper alignment of byte arrays, the
+set_offset method can be used to specify unaligned addresses. Unaligned
+address accesses that are not supported by the underlying hardware may be
+reported as an exception, or may stop the running Java program.
+ <a name="4"><!--meow--></a>
+<h3>Logical Record Numbers</h3>
+<p>In all cases for the Queue and Recno access methods, and when calling the
+<a href="../api_java/db_get.html">Db.get</a> and <a href="../api_java/dbc_get.html">Dbc.get</a> functions with the
+<a href="../api_java/db_get.html#DB_SET_RECNO">Db.DB_SET_RECNO</a> flag specified, the <b>data</b>
+field of the key must be a four byte array, large enough to store an int.
+The Dbt.set_recno_key_data method can be used to set the value of
+the array. An int is a 32-bit type,
+(which limits the number of logical records in a Queue or Recno database,
+and the maximum logical record which may be directly retrieved from a
+Btree database, to 4,294,967,296). The <b>size</b> field of the key
+should be the size of that type, i.e.,
+4.
+<p>Logical record numbers are 1-based, not 0-based, i.e., the first record
+in the database is record number 1.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/deadlock_class.html b/bdb/docs/api_java/deadlock_class.html
new file mode 100644
index 00000000000..b41c5649dcf
--- /dev/null
+++ b/bdb/docs/api_java/deadlock_class.html
@@ -0,0 +1,47 @@
+<!--$Id: deadlock_class.so,v 10.6 2000/09/21 19:58:54 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbDeadlockException</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbDeadlockException</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public class DbDeadlockException extends DbException { ... }
+</pre></h3>
+<h1>Description</h1>
+<p>This manual page describes the DbDeadlockException class and
+how it is used by the various Db* classes.
+<p>A DbDeadlockException is thrown when multiple threads competing
+for a lock are deadlocked. One of the threads' transactions is selected
+for termination, and a DbDeadlockException is thrown to that thread.
+<p>See <a href="../api_java/env_set_lk_detect.html">DbEnv.set_lk_detect</a> for more information.
+<h3>Class</h3>
+<a href="../api_java/except_class.html">DbException</a>
+<h1>See Also</h1>
+<a href="../api_java/get_errno.html">DbException.get_errno</a>,
+<a href="../api_java/deadlock_class.html">DbDeadlockException</a>,
+<a href="../api_java/mem_class.html">DbMemoryException</a>
+and
+<a href="../api_java/runrec_class.html">DbRunRecoveryException</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/env_close.html b/bdb/docs/api_java/env_close.html
new file mode 100644
index 00000000000..9650c83ed85
--- /dev/null
+++ b/bdb/docs/api_java/env_close.html
@@ -0,0 +1,82 @@
+<!--$Id: env_close.so,v 10.21 2000/03/01 21:41:29 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.close</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void close(int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.close method closes the Berkeley DB environment, freeing any
+allocated resources and closing any underlying subsystems.
+<p>Calling DbEnv.close does not imply closing any databases that were
+opened in the environment.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>Where the environment was initialized with the <a href="../api_java/env_open.html#DB_INIT_LOCK">Db.DB_INIT_LOCK</a> flag,
+calling DbEnv.close does not release any locks still held by the
+closing process, providing functionality for long-lived locks.
+<p>Where the environment was initialized with the <a href="../api_java/env_open.html#DB_INIT_MPOOL">Db.DB_INIT_MPOOL</a>
+flag, calling DbEnv.close implies calls to <a href="../api_java/memp_fclose.html">DbMpoolFile.close</a> for
+any remaining open files in the memory pool that were returned to this
+process by calls to <a href="../api_java/memp_fopen.html">DbMpoolFile.open</a>. It does not imply a call to
+<a href="../api_java/memp_fsync.html">DbMpoolFile.sync</a> for those files.
+<p>Where the environment was initialized with the <a href="../api_java/env_open.html#DB_INIT_TXN">Db.DB_INIT_TXN</a> flag,
+calling DbEnv.close aborts any uncommitted transactions.
+(Applications are should not depend on this behavior. If the process' has
+already closed a database handle which is necessary to abort an
+uncommitted transaction, the Berkeley DB environment must then require that
+recovery be run before further operations are done, since once a
+transaction exists that cannot be committed or aborted, no future
+checkpoint can ever succeed.)
+<p>In multi-threaded applications, only a single thread may call
+DbEnv.close.
+<p>Once DbEnv.close has been called, regardless of its return,
+the Berkeley DB environment handle may not be accessed again.
+<p>The DbEnv.close method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.close method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.close method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_close.html">DbEnv.close</a>,
+<a href="../api_java/env_open.html">DbEnv.open</a>,
+<a href="../api_java/env_remove.html">DbEnv.remove</a>,
+<a href="../api_java/env_strerror.html">DbEnv.strerror</a>,
+<a href="../api_java/env_version.html">DbEnv.get_version_string</a>
+<a href="../api_java/env_set_cachesize.html">DbEnv.set_cachesize</a>,
+<a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a>,
+<a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a>,
+<a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a>,
+<a href="../api_java/env_set_flags.html">DbEnv.set_flags</a>,
+<a href="../api_java/env_set_mutexlocks.html">DbEnv.set_mutexlocks</a>,
+and
+<a href="../api_java/env_set_verbose.html">DbEnv.set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/env_open.html b/bdb/docs/api_java/env_open.html
new file mode 100644
index 00000000000..3a1c2503633
--- /dev/null
+++ b/bdb/docs/api_java/env_open.html
@@ -0,0 +1,212 @@
+<!--$Id: env_open.so,v 10.61 2000/12/01 15:50:31 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.open</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.open</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+<p>
+public void open(String db_home, int flags, int mode)
+ throws DbException, FileNotFoundException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.open method is the interface for opening the Berkeley DB
+environment. It provides a structure for creating a consistent
+environment for processes using one or more of the features of Berkeley DB.
+<p>The <b>db_home</b> argument to DbEnv.open (and file name
+resolution in general) is described in
+<a href="../ref/env/naming.html">Berkeley DB File Naming</a>.
+<p>The <b>flags</b> argument specifies the subsystems that are initialized
+and how the application's environment affects Berkeley DB file naming, among
+other things.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or more
+of the following values.
+<p>As there are a large number of flags that can be specified, they have been
+grouped together by functionality. The first group of flags indicate
+which of the Berkeley DB subsystems should be initialized:
+<p><dl compact>
+<p><dt><a name="Db.DB_JOINENV">Db.DB_JOINENV</a><dd>Join an existing environment. This option allows applications to
+join an existing environment without knowing which Berkeley DB subsystems
+the environment supports.
+<p><dt><a name="Db.DB_INIT_CDB">Db.DB_INIT_CDB</a><dd>Initialize locking for the <a href="../ref/cam/intro.html">Berkeley DB Concurrent Data Store</a>
+product. In this mode, Berkeley DB provides multiple reader/single writer
+access. The only other subsystem that should be specified with the
+Db.DB_INIT_CDB flag is Db.DB_INIT_MPOOL.
+<p>Access method calls are largely unchanged when using this flag, although
+any cursors through which update operations (e.g., <a href="../api_java/dbc_put.html">Dbc.put</a>,
+<a href="../api_java/dbc_del.html">Dbc.del</a>) will be made must have the <a href="../api_java/db_cursor.html#DB_WRITECURSOR">Db.DB_WRITECURSOR</a> value
+set in the flags parameter to the cursor call that creates the cursor.
+See <a href="../api_java/db_cursor.html">Db.cursor</a> for more information.
+<p><dt><a name="Db.DB_INIT_LOCK">Db.DB_INIT_LOCK</a><dd>Initialize the locking subsystem. This subsystem should be used when
+multiple processes or threads are going to be reading and writing a Berkeley DB
+database, so that they do not interfere with each other. If all threads
+are accessing the database(s) read-only, then locking is unnecessary.
+When the DB_INIT_LOCK flag is specified, it is usually necessary to run
+the deadlock detector, as well. See <a href="../utility/db_deadlock.html">db_deadlock</a> and
+<a href="../api_java/lock_detect.html">DbEnv.lock_detect</a> for more information.
+<p><dt><a name="Db.DB_INIT_LOG">Db.DB_INIT_LOG</a><dd>Initialize the logging subsystem. This subsystem is used when recovery
+from application or system failure is necessary.
+<p>The log is stored in one or more files in the environment directory.
+Each file is named using the format <i>log.NNNNNNNNNN</i>, where
+<i>NNNNNNNNNN</i> is the sequence number of the file within the log.
+For further information, see
+<a href="../ref/log/limits.html">Log File Limits</a>.
+<p>If the log region is being created and log files are already present, the
+log files are reviewed and subsequent log writes are appended
+to the end of the log, rather than overwriting current log entries.
+<p><dt><a name="Db.DB_INIT_MPOOL">Db.DB_INIT_MPOOL</a><dd>Initialize the shared memory buffer pool subsystem. This subsystem is
+used whenever the application is using any Berkeley DB access method.
+<p><dt><a name="Db.DB_INIT_TXN">Db.DB_INIT_TXN</a><dd>Initialize the transaction subsystem. This subsystem is used when
+recovery and atomicity of multiple operations and recovery are important.
+The DB_INIT_TXN flag implies the DB_INIT_LOG flag.
+</dl>
+<p>The second group of flags govern what recovery, if any, is performed when
+the environment is initialized:
+<p><dl compact>
+<p><dt><a name="Db.DB_RECOVER">Db.DB_RECOVER</a><dd>Run normal recovery on this environment before opening it for normal use.
+If this flag is set, the DB_CREATE flag must also be set since the regions
+will be removed and recreated.
+<p><dt><a name="Db.DB_RECOVER_FATAL">Db.DB_RECOVER_FATAL</a><dd>Run catastrophic recovery on this environment before opening it for normal
+use. If this flag is set, the DB_CREATE flag must also be set since the
+regions will be removed and recreated.
+</dl>
+<p>A standard part of the recovery process is to remove the existing Berkeley DB
+environment and create a new one in which to perform recovery. If the
+thread of control performing recovery does not specify the correct region
+initialization information (e.g., the correct memory pool cache size),
+the result can be an application running in an environment with incorrect
+cache and other subsystem sizes. For this reason, the thread of control
+performing recovery should either specify correct configuration
+information before calling the DbEnv.open method, or it should remove
+the environment after recovery is completed, leaving creation of the
+correctly sized environment to a subsequent call to DbEnv.open.
+<p>All Berkeley DB recovery processing must be single-threaded, that is, only a
+single thread of control may perform recovery or access a Berkeley DB
+environment while recovery is being performed. As it is not an error to
+specify Db.DB_RECOVER for an environment for which no recovery is
+required, it is reasonable programming practice for the thread of control
+responsible for performing recovery and creating the environment to always
+specify the Db.DB_RECOVER flag during startup.
+<p>The DbEnv.open function returns successfully if Db.DB_RECOVER
+or Db.DB_RECOVER_FATAL is specified and no log files exist, so it is
+necessary to ensure all necessary log files are present before running
+recovery. For further information, consult <a href="../utility/db_archive.html">db_archive</a> and
+<a href="../utility/db_recover.html">db_recover</a>.
+<p>The third group of flags govern file naming extensions in the environment:
+<p><dl compact>
+<!--$Id: m4.env_flags,v 10.9 2000/06/29 22:54:10 bostic Exp $-->
+<p><dt><a name="Db.DB_USE_ENVIRON">Db.DB_USE_ENVIRON</a><dd>The Berkeley DB process' environment may be permitted to specify information to
+be used when naming files; see <a href="../ref/env/naming.html">Berkeley DB
+File Naming</a>. As permitting users to specify which files are used can
+create security problems, environment information will be used in file
+naming for all users only if the DB_USE_ENVIRON flag is set.
+<p><dt><a name="Db.DB_USE_ENVIRON_ROOT">Db.DB_USE_ENVIRON_ROOT</a><dd>The Berkeley DB process' environment may be permitted to specify information to
+be used when naming files; see <a href="../ref/env/naming.html">Berkeley DB
+File Naming</a>. As permitting users to specify which files are used can
+create security problems, if the DB_USE_ENVIRON_ROOT flag is set,
+environment information will be used for file naming only for users with
+appropriate permissions (e.g., on UNIX systems, users with a user-ID of 0).
+</dl>
+<p>Finally, there are a few additional, unrelated flags:
+<p><dl compact>
+<p><dt><a name="Db.DB_CREATE">Db.DB_CREATE</a><dd>Cause Berkeley DB subsystems to create any underlying files, as necessary.
+<p><dt><a name="Db.DB_LOCKDOWN">Db.DB_LOCKDOWN</a><dd>Lock shared Berkeley DB environment files and memory mapped databases into memory.
+<p><dt><a name="Db.DB_PRIVATE">Db.DB_PRIVATE</a><dd>Specify that the environment will only be accessed by a single process
+(although that process may be multi-threaded). This flag has two effects
+on the Berkeley DB environment. First, all underlying data structures are
+allocated from per-process memory instead of from shared memory that is
+potentially accessible to more than a single process. Second, mutexes
+are only configured to work between threads.
+<p>This flag should not be specified if more than a single process is
+accessing the environment, as it is likely to cause database corruption
+and unpredictable behavior, e.g., if both a server application and the
+Berkeley DB utility <a href="../utility/db_stat.html">db_stat</a> will access the environment, the
+Db.DB_PRIVATE flag should not be specified.
+<p><dt><a name="Db.DB_SYSTEM_MEM">Db.DB_SYSTEM_MEM</a><dd>Allocate memory from system shared memory instead of from memory backed
+by the filesystem. See <a href="../ref/env/region.html">Shared Memory
+Regions</a> for more information.
+<p><dt><a name="Db.DB_THREAD">Db.DB_THREAD</a><dd>Cause the <a href="../api_java/dbenv_class.html">DbEnv</a> handle returned by DbEnv.open to be
+<i>free-threaded</i>, that is, useable by multiple threads within a
+single address space.
+<p>Threading is always assumed in the Java API, so no special flags are
+required and Berkeley DB functions will always behave as if the Db.DB_THREAD
+flag was specified.
+</dl>
+<p>On UNIX systems, or in IEEE/ANSI Std 1003.1 (POSIX) environments, all files created by Berkeley DB
+are created with mode <b>mode</b> (as described in <b>chmod</b>(2)) and
+modified by the process' umask value at the time of creation (see
+<b>umask</b>(2)). The group ownership of created files is based on
+the system and directory defaults, and is not further specified by Berkeley DB.
+If <b>mode</b> is 0, files are created readable and writeable by both
+owner and group. On Windows systems, the mode argument is ignored.
+<p>The DbEnv.open method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>The environment variable <b>DB_HOME</b> may be used as the path of
+the database home as described in
+<a href="../ref/env/naming.html">Berkeley DB File Naming</a>.
+</dl>
+<h1>Errors</h1>
+<p>The DbEnv.open method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EAGAIN<dd>The shared memory region was locked and (repeatedly) unavailable.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>
+The Db.DB_THREAD flag was specified and spinlocks are not
+implemented for this architecture.
+<p>The DB_HOME or TMPDIR environment variables were set but empty.
+<p>An incorrectly formatted <b>NAME VALUE</b> entry or line was found.
+</dl>
+<p><dl compact>
+<p><dt>ENOSPC<dd>HP-UX only: a previously created Berkeley DB environment for this process still
+exists.
+</dl>
+<p>If the file or directory does not exist, the DbEnv.open method will
+fail and
+throw a FileNotFoundException exception.
+<p>The DbEnv.open method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.open method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_close.html">DbEnv.close</a>,
+<a href="../api_java/env_open.html">DbEnv.open</a>,
+<a href="../api_java/env_remove.html">DbEnv.remove</a>,
+<a href="../api_java/env_strerror.html">DbEnv.strerror</a>,
+<a href="../api_java/env_version.html">DbEnv.get_version_string</a>
+<a href="../api_java/env_set_cachesize.html">DbEnv.set_cachesize</a>,
+<a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a>,
+<a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a>,
+<a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a>,
+<a href="../api_java/env_set_flags.html">DbEnv.set_flags</a>,
+<a href="../api_java/env_set_mutexlocks.html">DbEnv.set_mutexlocks</a>,
+and
+<a href="../api_java/env_set_verbose.html">DbEnv.set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/env_remove.html b/bdb/docs/api_java/env_remove.html
new file mode 100644
index 00000000000..acfaf39761e
--- /dev/null
+++ b/bdb/docs/api_java/env_remove.html
@@ -0,0 +1,129 @@
+<!--$Id: env_remove.so,v 10.24 2000/12/06 14:40:11 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.remove</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.remove</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+<p>
+public void remove(String db_home, int flags)
+ throws DbException, FileNotFoundException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.remove method destroys a Berkeley DB environment, if it is not
+currently in use. The environment regions, including any backing files,
+are removed. Any log or database files and the environment directory are
+not removed.
+<p>The <b>db_home</b> argument to DbEnv.remove is described in
+<a href="../ref/env/naming.html">Berkeley DB File Naming</a>.
+<p>If there are processes that have called <a href="../api_java/env_open.html">DbEnv.open</a> without
+calling <a href="../api_java/env_close.html">DbEnv.close</a> (i.e., there are processes currently using
+the environment), DbEnv.remove will fail without further action,
+unless the <a href="../api_java/env_remove.html#DB_FORCE">Db.DB_FORCE</a> flag is set, in which case
+DbEnv.remove will attempt to remove the environment regardless
+of any processes still using it.
+<p>The result of attempting to forcibly destroy the environment when it is
+in use is unspecified. Processes using an environment often maintain open
+file descriptors for shared regions within it. On UNIX systems, the
+environment removal will usually succeed and processes that have already
+joined the region will continue to run in that region without change,
+however processes attempting to join the environment will either fail or
+create new regions. On other systems (e.g., Windows/NT), where the
+<b>unlink</b>(2) system call will fail if any process has an open
+file descriptor for the file, the region removal will fail.
+<p>Calling DbEnv.remove should not be necessary for most applications,
+as the Berkeley DB environment is cleaned up as part of normal database recovery
+procedures, however, applications may wish to call DbEnv.remove
+as part of application shutdown to free up system resources.
+Specifically, when the <a href="../api_java/env_open.html#DB_SYSTEM_MEM">Db.DB_SYSTEM_MEM</a> flag was specified to
+<a href="../api_java/env_open.html">DbEnv.open</a>, it may be useful to call DbEnv.remove in order
+to release system shared memory segments that have been allocated.
+<p>In the case of catastrophic or system failure, database recovery must be
+performed (see <a href="../utility/db_recover.html">db_recover</a>), or the <a href="../api_java/env_open.html#DB_RECOVER">Db.DB_RECOVER</a> and
+<a href="../api_java/env_open.html#DB_RECOVER_FATAL">Db.DB_RECOVER_FATAL</a> flags to <a href="../api_java/env_open.html">DbEnv.open</a> must be specified
+when the environment is re-opened. Alternatively, if recovery is not
+required because no database state is maintained across failures, and
+the <a href="../api_java/env_open.html#DB_SYSTEM_MEM">Db.DB_SYSTEM_MEM</a> flag was not specified when the environment
+was created, it is possible to clean up an environment by removing all
+of the files in the environment directory that begin with the string
+prefix "__db", as no backing files are created in any other directory.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or more
+of the following values.
+<p><dl compact>
+<p><dt><a name="Db.DB_FORCE">Db.DB_FORCE</a><dd>If the <a href="../api_java/env_remove.html#DB_FORCE">Db.DB_FORCE</a> flag is set, the environment is removed regardless
+of any processes that may still using it, and, no locks are acquired
+during this process. (Generally, the <a href="../api_java/env_remove.html#DB_FORCE">Db.DB_FORCE</a> flag is only
+specified when applications were unable to shut down cleanly, and there
+is a risk that an application may have died holding a Berkeley DB lock.)
+<!--$Id: m4.env_flags,v 10.9 2000/06/29 22:54:10 bostic Exp $-->
+<p><dt><a name="Db.DB_USE_ENVIRON">Db.DB_USE_ENVIRON</a><dd>The Berkeley DB process' environment may be permitted to specify information to
+be used when naming files; see <a href="../ref/env/naming.html">Berkeley DB
+File Naming</a>. As permitting users to specify which files are used can
+create security problems, environment information will be used in file
+naming for all users only if the DB_USE_ENVIRON flag is set.
+<p><dt><a name="Db.DB_USE_ENVIRON_ROOT">Db.DB_USE_ENVIRON_ROOT</a><dd>The Berkeley DB process' environment may be permitted to specify information to
+be used when naming files; see <a href="../ref/env/naming.html">Berkeley DB
+File Naming</a>. As permitting users to specify which files are used can
+create security problems, if the DB_USE_ENVIRON_ROOT flag is set,
+environment information will be used for file naming only for users with
+appropriate permissions (e.g., on UNIX systems, users with a user-ID of 0).
+</dl>
+<p>In multi-threaded applications, only a single thread may call
+DbEnv.remove.
+<p>A <a href="../api_java/dbenv_class.html">DbEnv</a> handle which has already been used to open an
+environment should not be used to call the DbEnv.remove method, a new
+<a href="../api_java/dbenv_class.html">DbEnv</a> handle should be created for that purpose.
+<p>Once DbEnv.remove has been called, regardless of its return,
+the Berkeley DB environment handle may not be accessed again.
+<p>The DbEnv.remove method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EBUSY<dd>The shared memory region was in use and the force flag was not set.
+</dl>
+<p>If the file or directory does not exist, the DbEnv.remove method will
+fail and
+throw a FileNotFoundException exception.
+<p>The DbEnv.remove method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.remove method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_close.html">DbEnv.close</a>,
+<a href="../api_java/env_open.html">DbEnv.open</a>,
+<a href="../api_java/env_remove.html">DbEnv.remove</a>,
+<a href="../api_java/env_strerror.html">DbEnv.strerror</a>,
+<a href="../api_java/env_version.html">DbEnv.get_version_string</a>
+<a href="../api_java/env_set_cachesize.html">DbEnv.set_cachesize</a>,
+<a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a>,
+<a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a>,
+<a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a>,
+<a href="../api_java/env_set_flags.html">DbEnv.set_flags</a>,
+<a href="../api_java/env_set_mutexlocks.html">DbEnv.set_mutexlocks</a>,
+and
+<a href="../api_java/env_set_verbose.html">DbEnv.set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/env_set_cachesize.html b/bdb/docs/api_java/env_set_cachesize.html
new file mode 100644
index 00000000000..af31e44f91d
--- /dev/null
+++ b/bdb/docs/api_java/env_set_cachesize.html
@@ -0,0 +1,86 @@
+<!--$Id: env_set_cachesize.so,v 10.19 2000/05/20 16:29:11 bostic Exp $-->
+<!--$Id: m4.cachesize,v 10.7 2000/02/11 18:54:45 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_cachesize</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.set_cachesize</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int set_cachesize(int gbytes, int bytes, in ncache)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the size of the database's shared memory buffer pool, i.e., the cache,
+to <b>gbytes</b> gigabytes plus <b>bytes</b>. The cache should be the
+size of the normal working data set of the application, with some small
+amount of additional memory for unusual situations. (Note, the working
+set is not the same as the number of simultaneously referenced pages, and
+should be quite a bit larger!)
+<p>The default cache size is 256KB, and may not be specified as less than
+20KB. Any cache size less than 500MB is automatically increased by 25%
+to account for buffer pool overhead, cache sizes larger than 500MB are
+used as specified. For information on tuning the Berkeley DB cache size, see
+<a href="../ref/am_conf/cachesize.html">Selecting a cache size</a>.
+<p>It is possible to specify caches to Berkeley DB that are large enough so that
+they cannot be allocated contiguously on some architectures, e.g., some
+releases of Solaris limit the amount of memory that may be allocated
+contiguously by a process. If <b>ncache</b> is 0 or 1, the cache will
+be allocated contiguously in memory. If it is greater than 1, the cache
+will be broken up into <b>ncache</b> equally sized separate pieces of
+memory.
+<p>The DbEnv.set_cachesize interface may only be used to configure Berkeley DB before
+the <a href="../api_java/env_open.html">DbEnv.open</a> interface is called.
+<p>The DbEnv.set_cachesize method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>The database environment's cache size may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_cachesize", one or more whitespace characters,
+and the three arguments specified to this interface, separated by whitespace
+characters, for example, "set_cachesize 1 500 2". Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/env_open.html">DbEnv.open</a> was called.
+<p>The specified cache size was impossibly small.
+</dl>
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_close.html">DbEnv.close</a>,
+<a href="../api_java/env_open.html">DbEnv.open</a>,
+<a href="../api_java/env_remove.html">DbEnv.remove</a>,
+<a href="../api_java/env_strerror.html">DbEnv.strerror</a>,
+<a href="../api_java/env_version.html">DbEnv.get_version_string</a>
+<a href="../api_java/env_set_cachesize.html">DbEnv.set_cachesize</a>,
+<a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a>,
+<a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a>,
+<a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a>,
+<a href="../api_java/env_set_flags.html">DbEnv.set_flags</a>,
+<a href="../api_java/env_set_mutexlocks.html">DbEnv.set_mutexlocks</a>,
+and
+<a href="../api_java/env_set_verbose.html">DbEnv.set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/env_set_data_dir.html b/bdb/docs/api_java/env_set_data_dir.html
new file mode 100644
index 00000000000..52fc159e77a
--- /dev/null
+++ b/bdb/docs/api_java/env_set_data_dir.html
@@ -0,0 +1,77 @@
+<!--$Id: env_set_data_dir.so,v 10.3 2000/05/20 16:29:11 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_data_dir</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.set_data_dir</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_data_dir(String dir)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the path of a directory to be used as the location of the access
+method database files. Paths specified to the <a href="../api_java/db_open.html">Db.open</a> function
+will be searched relative to this path. Paths set using this interface
+are additive, and specifying more than one will result in each specified
+directory being searched for database files. If any directories are
+specified, created database files will always be created in the first path
+specified.
+<p>If no database directories are specified, database files can only exist
+in the environment home directory. See <a href="../ref/env/naming.html">Berkeley DB File Naming</a> for more information.
+<p>For the greatest degree of recoverability from system or application
+failure, database files and log files should be located on separate
+physical devices.
+<p>The DbEnv.set_data_dir interface may only be used to configure Berkeley DB before
+the <a href="../api_java/env_open.html">DbEnv.open</a> interface is called.
+<p>The DbEnv.set_data_dir method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>The database environment's data directory may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_data_dir", one or more whitespace characters,
+and the directory name. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/env_open.html">DbEnv.open</a> was called.
+</dl>
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_close.html">DbEnv.close</a>,
+<a href="../api_java/env_open.html">DbEnv.open</a>,
+<a href="../api_java/env_remove.html">DbEnv.remove</a>,
+<a href="../api_java/env_strerror.html">DbEnv.strerror</a>,
+<a href="../api_java/env_version.html">DbEnv.get_version_string</a>
+<a href="../api_java/env_set_cachesize.html">DbEnv.set_cachesize</a>,
+<a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a>,
+<a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a>,
+<a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a>,
+<a href="../api_java/env_set_flags.html">DbEnv.set_flags</a>,
+<a href="../api_java/env_set_mutexlocks.html">DbEnv.set_mutexlocks</a>,
+and
+<a href="../api_java/env_set_verbose.html">DbEnv.set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/env_set_errcall.html b/bdb/docs/api_java/env_set_errcall.html
new file mode 100644
index 00000000000..596de47137d
--- /dev/null
+++ b/bdb/docs/api_java/env_set_errcall.html
@@ -0,0 +1,78 @@
+<!--$Id: env_set_errcall.so,v 10.16 1999/12/20 08:52:29 bostic Exp $-->
+<!--$Id: m4.errset,v 10.8 2000/02/19 20:57:57 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_errcall</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.set_errcall</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public interface DbErrcall
+{
+ public abstract void errcall(String errpfx, String msg);
+}
+public class DbEnv
+{
+ public void set_errcall(DbErrcall errcall);
+ ...
+}
+</pre></h3>
+<h1>Description</h1>
+When an error occurs in the Berkeley DB library, an exception is thrown. In
+some cases, however, the <b>errno</b> value may be insufficient to
+completely describe the cause of the error, especially during initial
+application debugging.
+<p>The DbEnv.set_errcall method is used to enhance the mechanism for reporting error
+messages to the application. The DbEnv.set_errcall method must be
+called with a single object argument. The object's class must implement
+the DbErrcall interface. In some cases, when an error occurs, Berkeley DB will
+invoke the object's errcall() method with two arguments; the first is the
+prefix string (as previously set by <a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a> or
+<a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a>), the second will be an error message string.
+It is up to this method to display the message in an appropriate manner.
+<p>Alternatively, you can use the <a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a> method to display
+the additional information via an output stream. You should not mix these
+approaches.
+<p>This error logging enhancement does not slow performance or significantly
+increase application size, and may be run during normal operation as well
+as during application debugging.
+<p>The DbEnv.set_errcall interface may be used to configure Berkeley DB at any time
+during the life of the application.
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_close.html">DbEnv.close</a>,
+<a href="../api_java/env_open.html">DbEnv.open</a>,
+<a href="../api_java/env_remove.html">DbEnv.remove</a>,
+<a href="../api_java/env_strerror.html">DbEnv.strerror</a>,
+<a href="../api_java/env_version.html">DbEnv.get_version_string</a>
+<a href="../api_java/env_set_cachesize.html">DbEnv.set_cachesize</a>,
+<a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a>,
+<a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a>,
+<a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a>,
+<a href="../api_java/env_set_flags.html">DbEnv.set_flags</a>,
+<a href="../api_java/env_set_mutexlocks.html">DbEnv.set_mutexlocks</a>,
+and
+<a href="../api_java/env_set_verbose.html">DbEnv.set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/env_set_error_stream.html b/bdb/docs/api_java/env_set_error_stream.html
new file mode 100644
index 00000000000..18f44c08d7d
--- /dev/null
+++ b/bdb/docs/api_java/env_set_error_stream.html
@@ -0,0 +1,69 @@
+<!--$Id: env_set_error_stream.so,v 10.13 1999/12/20 08:52:33 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_error_stream</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.set_error_stream</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void Db.set_error_stream(OutputStream s)
+ throws DbException
+</pre></h3>
+<h1>Description</h1>
+<p>When an error occurs in the Berkeley DB library, an exception is thrown. In
+some cases, however, the <b>errno</b> value may be insufficient to
+completely describe the cause of the error, especially during initial
+application debugging.
+<p>The DbEnv.set_error_stream method is used to enhance the mechanism for
+reporting error messages to the application by setting a OutputStream
+to be used for displaying additional Berkeley DB error messages. In some cases,
+when an error occurs, Berkeley DB will output an additional error message to
+the specified stream.
+<p>The error message will consist of the prefix string and a colon
+("<b>:</b>") (if a prefix string was previously specified using
+<a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a>), an error string, and a trailing
+&lt;newline&gt; character.
+<p>Alternatively, you can use the <a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a> method to capture the
+additional error information in a way that does not use output streams.
+You should not mix these approaches.
+<p>This error logging enhancement does not slow performance or significantly
+increase application size, and may be run during normal operation as well
+as during application debugging.
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_close.html">DbEnv.close</a>,
+<a href="../api_java/env_open.html">DbEnv.open</a>,
+<a href="../api_java/env_remove.html">DbEnv.remove</a>,
+<a href="../api_java/env_strerror.html">DbEnv.strerror</a>,
+<a href="../api_java/env_version.html">DbEnv.get_version_string</a>
+<a href="../api_java/env_set_cachesize.html">DbEnv.set_cachesize</a>,
+<a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a>,
+<a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a>,
+<a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a>,
+<a href="../api_java/env_set_flags.html">DbEnv.set_flags</a>,
+<a href="../api_java/env_set_mutexlocks.html">DbEnv.set_mutexlocks</a>,
+and
+<a href="../api_java/env_set_verbose.html">DbEnv.set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/env_set_errpfx.html b/bdb/docs/api_java/env_set_errpfx.html
new file mode 100644
index 00000000000..aca43448c8b
--- /dev/null
+++ b/bdb/docs/api_java/env_set_errpfx.html
@@ -0,0 +1,52 @@
+<!--$Id: env_set_errpfx.so,v 10.12 1999/12/20 08:52:29 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_errpfx</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.set_errpfx</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_errpfx(String errpfx);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the prefix string that appears before error messages issued by Berkeley DB.
+<p>The DbEnv.set_errpfx interface may be used to configure Berkeley DB at any time
+during the life of the application.
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_close.html">DbEnv.close</a>,
+<a href="../api_java/env_open.html">DbEnv.open</a>,
+<a href="../api_java/env_remove.html">DbEnv.remove</a>,
+<a href="../api_java/env_strerror.html">DbEnv.strerror</a>,
+<a href="../api_java/env_version.html">DbEnv.get_version_string</a>
+<a href="../api_java/env_set_cachesize.html">DbEnv.set_cachesize</a>,
+<a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a>,
+<a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a>,
+<a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a>,
+<a href="../api_java/env_set_flags.html">DbEnv.set_flags</a>,
+<a href="../api_java/env_set_mutexlocks.html">DbEnv.set_mutexlocks</a>,
+and
+<a href="../api_java/env_set_verbose.html">DbEnv.set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/env_set_feedback.html b/bdb/docs/api_java/env_set_feedback.html
new file mode 100644
index 00000000000..1b5310e2537
--- /dev/null
+++ b/bdb/docs/api_java/env_set_feedback.html
@@ -0,0 +1,76 @@
+<!--$Id: env_set_feedback.so,v 10.19 2000/07/09 19:12:39 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_feedback</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.set_feedback</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public interface DbEnvFeedback
+{
+ public abstract void db_feedback(DbEnv dbenv, int opcode, int pct);
+}
+public class DbEnv
+{
+ public void set_feedback(DbEnvFeedback db_feedback)
+ throws DbException;
+ ...
+}
+</pre></h3>
+<h1>Description</h1>
+<p>Some operations performed by the Berkeley DB library can take non-trivial
+amounts of time. The DbEnv.set_feedback method can be used by
+applications to monitor progress within these operations.
+<p>When an operation is likely to take a long time, Berkeley DB will call the
+specified callback method. This method must be declared with
+three arguments: the first will be a reference to the enclosing
+environment, the second a flag value, and the third the percent of the
+operation that has been completed, specified as an integer value between
+0 and 100. It is up to the callback method to display this
+information in an appropriate manner.
+<p>The <b>opcode</b> argument may take on any of the following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_RECOVER">Db.DB_RECOVER</a><dd>The environment is being recovered.
+</dl>
+<p>The DbEnv.set_feedback interface may be used to configure Berkeley DB at any time
+during the life of the application.
+<p>The DbEnv.set_feedback method throws an exception that encapsulates a non-zero error value on
+failure.
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_close.html">DbEnv.close</a>,
+<a href="../api_java/env_open.html">DbEnv.open</a>,
+<a href="../api_java/env_remove.html">DbEnv.remove</a>,
+<a href="../api_java/env_strerror.html">DbEnv.strerror</a>,
+<a href="../api_java/env_version.html">DbEnv.get_version_string</a>
+<a href="../api_java/env_set_cachesize.html">DbEnv.set_cachesize</a>,
+<a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a>,
+<a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a>,
+<a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a>,
+<a href="../api_java/env_set_flags.html">DbEnv.set_flags</a>,
+<a href="../api_java/env_set_mutexlocks.html">DbEnv.set_mutexlocks</a>,
+and
+<a href="../api_java/env_set_verbose.html">DbEnv.set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/env_set_flags.html b/bdb/docs/api_java/env_set_flags.html
new file mode 100644
index 00000000000..d371429e0ae
--- /dev/null
+++ b/bdb/docs/api_java/env_set_flags.html
@@ -0,0 +1,84 @@
+<!--$Id-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_flags</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.set_flags</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_flags(int flags, int onoff)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or more
+of the following values.
+If <b>onoff</b> is zero, the specified flags are cleared, otherwise they
+are set.
+<p><dl compact>
+<p><dt><a name="Db.DB_CDB_ALLDB">Db.DB_CDB_ALLDB</a><dd>For Berkeley DB Concurrent Data Store applications, perform locking on an environment-wide basis
+rather than per-database. This flag may only be used to configure Berkeley DB
+before the <a href="../api_java/env_open.html">DbEnv.open</a> interface is called.
+<p><dt><a name="Db.DB_NOMMAP">Db.DB_NOMMAP</a><dd>Copy read-only database files in this environment into the local cache
+instead of potentially mapping them into process memory (see the
+description of the <a href="../api_java/env_set_mp_mmapsize.html">DbEnv.set_mp_mmapsize</a> method for further information).
+<p><dt><a name="Db.DB_TXN_NOSYNC">Db.DB_TXN_NOSYNC</a><dd>Do not synchronously flush the log on transaction commit or prepare.
+This means that transactions exhibit the ACI (atomicity, consistency and
+isolation) properties, but not D (durability), i.e., database integrity
+will be maintained but it is possible that some number of the most
+recently committed transactions may be undone during recovery instead of
+being redone.
+<p>The number of transactions that are potentially at risk is governed by
+how often the log is checkpointed (see <a href="../utility/db_checkpoint.html">db_checkpoint</a> for more
+information) and how many log updates can fit on a single log page.
+</dl>
+<p>The DbEnv.set_flags method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>The database environment's flag values may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_flags", one or more whitespace characters,
+and the interface flag argument as a string, for example, "set_flags
+DB_TXN_NOSYNC". Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_close.html">DbEnv.close</a>,
+<a href="../api_java/env_open.html">DbEnv.open</a>,
+<a href="../api_java/env_remove.html">DbEnv.remove</a>,
+<a href="../api_java/env_strerror.html">DbEnv.strerror</a>,
+<a href="../api_java/env_version.html">DbEnv.get_version_string</a>
+<a href="../api_java/env_set_cachesize.html">DbEnv.set_cachesize</a>,
+<a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a>,
+<a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a>,
+<a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a>,
+<a href="../api_java/env_set_flags.html">DbEnv.set_flags</a>,
+<a href="../api_java/env_set_mutexlocks.html">DbEnv.set_mutexlocks</a>,
+and
+<a href="../api_java/env_set_verbose.html">DbEnv.set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/env_set_lg_bsize.html b/bdb/docs/api_java/env_set_lg_bsize.html
new file mode 100644
index 00000000000..1f3725f6eb5
--- /dev/null
+++ b/bdb/docs/api_java/env_set_lg_bsize.html
@@ -0,0 +1,71 @@
+<!--$Id: env_set_lg_bsize.so,v 10.10 2000/05/20 16:29:11 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_lg_bsize</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.set_lg_bsize</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_lg_bsize(int lg_bsize)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the size of the in-memory log buffer, in bytes. By default, or if
+the value is set to 0, a size of 32K is used.
+<p>Log information is stored in-memory until the storage space fills up
+or transaction commit forces the information to be flushed to stable
+storage. In the presence of long-running transactions or transactions
+producing large amounts of data, larger buffer sizes can increase
+throughput.
+<p>The DbEnv.set_lg_bsize interface may only be used to configure Berkeley DB before
+the <a href="../api_java/env_open.html">DbEnv.open</a> interface is called.
+<p>The DbEnv.set_lg_bsize method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>The database environment's log buffer size may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lg_bsize", one or more whitespace characters,
+and the size in bytes. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/env_open.html">DbEnv.open</a> was called.
+</dl>
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_set_lg_bsize.html">DbEnv.set_lg_bsize</a>,
+<a href="../api_java/env_set_lg_max.html">DbEnv.set_lg_max</a>,
+<a href="../api_java/log_archive.html">DbEnv.log_archive</a>,
+<a href="../api_java/log_compare.html">DbEnv.log_compare</a>,
+<a href="../api_java/log_file.html">DbEnv.log_file</a>,
+<a href="../api_java/log_flush.html">DbEnv.log_flush</a>,
+<a href="../api_java/log_get.html">DbEnv.log_get</a>,
+<a href="../api_java/log_put.html">DbEnv.log_put</a>,
+<a href="../api_java/log_register.html">DbEnv.log_register</a>,
+<a href="../api_java/log_stat.html">DbEnv.log_stat</a>
+and
+<a href="../api_java/log_unregister.html">DbEnv.log_unregister</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/env_set_lg_dir.html b/bdb/docs/api_java/env_set_lg_dir.html
new file mode 100644
index 00000000000..6633ee5d820
--- /dev/null
+++ b/bdb/docs/api_java/env_set_lg_dir.html
@@ -0,0 +1,73 @@
+<!--$Id: env_set_lg_dir.so,v 10.3 2000/05/20 16:29:11 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_lg_dir</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.set_lg_dir</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_lg_dir(String dir)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The path of a directory to be used as the location of logging files.
+Log files created by the Log Manager subsystem will be created in this
+directory.
+<p>If no logging directory is specified, log files are created in the
+environment home directory. See <a href="../ref/env/naming.html">Berkeley DB File Naming</a> for more information.
+<p>For the greatest degree of recoverability from system or application
+failure, database files and log files should be located on separate
+physical devices.
+<p>The DbEnv.set_lg_dir interface may only be used to configure Berkeley DB before
+the <a href="../api_java/env_open.html">DbEnv.open</a> interface is called.
+<p>The DbEnv.set_lg_dir method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>The database environment's logging directory may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lg_dir", one or more whitespace characters,
+and the directory name. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/env_open.html">DbEnv.open</a> was called.
+</dl>
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_close.html">DbEnv.close</a>,
+<a href="../api_java/env_open.html">DbEnv.open</a>,
+<a href="../api_java/env_remove.html">DbEnv.remove</a>,
+<a href="../api_java/env_strerror.html">DbEnv.strerror</a>,
+<a href="../api_java/env_version.html">DbEnv.get_version_string</a>
+<a href="../api_java/env_set_cachesize.html">DbEnv.set_cachesize</a>,
+<a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a>,
+<a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a>,
+<a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a>,
+<a href="../api_java/env_set_flags.html">DbEnv.set_flags</a>,
+<a href="../api_java/env_set_mutexlocks.html">DbEnv.set_mutexlocks</a>,
+and
+<a href="../api_java/env_set_verbose.html">DbEnv.set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/env_set_lg_max.html b/bdb/docs/api_java/env_set_lg_max.html
new file mode 100644
index 00000000000..fea4163bec0
--- /dev/null
+++ b/bdb/docs/api_java/env_set_lg_max.html
@@ -0,0 +1,71 @@
+<!--$Id: env_set_lg_max.so,v 10.20 2000/05/20 16:29:12 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_lg_max</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.set_lg_max</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_lg_max(int lg_max)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the maximum size of a single file in the log, in bytes. Because
+<a href="../api_java/lsn_class.html">DbLsn</a> file offsets are unsigned 4-byte values, the set value may
+not be larger than the maximum unsigned 4-byte value. By default, or if
+the value is set to 0, a size of 10MB is used.
+<p>See <a href="../ref/log/limits.html">Log File Limits</a>
+for more information.
+<p>The DbEnv.set_lg_max interface may only be used to configure Berkeley DB before
+the <a href="../api_java/env_open.html">DbEnv.open</a> interface is called.
+<p>The DbEnv.set_lg_max method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>The database environment's log file size may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lg_max", one or more whitespace characters,
+and the size in bytes. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/env_open.html">DbEnv.open</a> was called.
+<p>The specified log file size was too large.
+</dl>
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_set_lg_bsize.html">DbEnv.set_lg_bsize</a>,
+<a href="../api_java/env_set_lg_max.html">DbEnv.set_lg_max</a>,
+<a href="../api_java/log_archive.html">DbEnv.log_archive</a>,
+<a href="../api_java/log_compare.html">DbEnv.log_compare</a>,
+<a href="../api_java/log_file.html">DbEnv.log_file</a>,
+<a href="../api_java/log_flush.html">DbEnv.log_flush</a>,
+<a href="../api_java/log_get.html">DbEnv.log_get</a>,
+<a href="../api_java/log_put.html">DbEnv.log_put</a>,
+<a href="../api_java/log_register.html">DbEnv.log_register</a>,
+<a href="../api_java/log_stat.html">DbEnv.log_stat</a>
+and
+<a href="../api_java/log_unregister.html">DbEnv.log_unregister</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/env_set_lk_conflicts.html b/bdb/docs/api_java/env_set_lk_conflicts.html
new file mode 100644
index 00000000000..3ad5c6173c9
--- /dev/null
+++ b/bdb/docs/api_java/env_set_lk_conflicts.html
@@ -0,0 +1,68 @@
+<!--$Id: env_set_lk_conflicts.so,v 10.22 2000/12/08 20:43:15 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_lk_conflicts</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.set_lk_conflicts</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_lk_conflicts(byte[][] conflicts)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the locking conflicts matrix.
+A non-0 value for the array element:
+<p><blockquote><pre>conflicts[requested_mode][held_mode]</pre></blockquote>
+<p>indicates that requested_mode and held_mode conflict. The
+<i>not-granted</i> mode must be represented by 0.
+<p>If no <b>conflicts</b> value is specified, the conflicts array
+<b>db_rw_conflicts</b> is used; see <a href="../ref/lock/stdmode.html">Standard Lock Modes</a> for a description of that array.
+<p>The DbEnv.set_lk_conflicts interface may only be used to configure Berkeley DB before
+the <a href="../api_java/env_open.html">DbEnv.open</a> interface is called.
+<p>The DbEnv.set_lk_conflicts method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/env_open.html">DbEnv.open</a> was called.
+</dl>
+<p><dl compact>
+<p><dt>ENOMEM<dd>No memory was available to copy the conflicts array.
+</dl>
+<h3>Classes</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>, <a href="../api_java/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_java/env_set_lk_conflicts.html">DbEnv.set_lk_conflicts</a>,
+<a href="../api_java/env_set_lk_detect.html">DbEnv.set_lk_detect</a>,
+<a href="../api_java/env_set_lk_max_locks.html">DbEnv.set_lk_max_locks</a>,
+<a href="../api_java/env_set_lk_max_lockers.html">DbEnv.set_lk_max_lockers</a>,
+<a href="../api_java/env_set_lk_max_objects.html">DbEnv.set_lk_max_objects</a>,
+<a href="../api_java/env_set_lk_max.html">DbEnv.set_lk_max</a>,
+<a href="../api_java/lock_detect.html">DbEnv.lock_detect</a>,
+<a href="../api_java/lock_get.html">DbEnv.lock_get</a>,
+<a href="../api_java/lock_id.html">DbEnv.lock_id</a>,
+<a href="../api_java/lock_put.html">DbLock.put</a>
+and
+<a href="../api_java/lock_stat.html">DbEnv.lock_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/env_set_lk_detect.html b/bdb/docs/api_java/env_set_lk_detect.html
new file mode 100644
index 00000000000..cf3dd087177
--- /dev/null
+++ b/bdb/docs/api_java/env_set_lk_detect.html
@@ -0,0 +1,74 @@
+<!--$Id: env_set_lk_detect.so,v 10.19 2000/12/08 20:43:15 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_lk_detect</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.set_lk_detect</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_lk_detect(int detect)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set if the deadlock detector is to be run whenever a lock conflict occurs,
+and specify which transaction should be aborted in the case of a deadlock.
+The specified value must be one of the following list:
+<p><dl compact>
+<p><dt><a name="DB_LOCK_DEFAULT">DB_LOCK_DEFAULT</a><dd>Use the default policy as specified by <a href="../utility/db_deadlock.html">db_deadlock</a>.
+<dt><a name="DB_LOCK_OLDEST">DB_LOCK_OLDEST</a><dd>Abort the oldest transaction.
+<dt><a name="DB_LOCK_RANDOM">DB_LOCK_RANDOM</a><dd>Abort a random transaction involved in the deadlock.
+<dt><a name="DB_LOCK_YOUNGEST">DB_LOCK_YOUNGEST</a><dd>Abort the youngest transaction.
+</dl>
+<p>The DbEnv.set_lk_detect interface may only be used to configure Berkeley DB before
+the <a href="../api_java/env_open.html">DbEnv.open</a> interface is called.
+<p>The DbEnv.set_lk_detect method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>The database environment's deadlock detector configuration may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lk_detect", one or more whitespace characters,
+and the interface <b>detect</b> argument as a string, for example,
+"set_lk_detect DB_LOCK_OLDEST". Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/env_open.html">DbEnv.open</a> was called.
+</dl>
+<h3>Classes</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>, <a href="../api_java/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_java/env_set_lk_conflicts.html">DbEnv.set_lk_conflicts</a>,
+<a href="../api_java/env_set_lk_detect.html">DbEnv.set_lk_detect</a>,
+<a href="../api_java/env_set_lk_max_locks.html">DbEnv.set_lk_max_locks</a>,
+<a href="../api_java/env_set_lk_max_lockers.html">DbEnv.set_lk_max_lockers</a>,
+<a href="../api_java/env_set_lk_max_objects.html">DbEnv.set_lk_max_objects</a>,
+<a href="../api_java/env_set_lk_max.html">DbEnv.set_lk_max</a>,
+<a href="../api_java/lock_detect.html">DbEnv.lock_detect</a>,
+<a href="../api_java/lock_get.html">DbEnv.lock_get</a>,
+<a href="../api_java/lock_id.html">DbEnv.lock_id</a>,
+<a href="../api_java/lock_put.html">DbLock.put</a>
+and
+<a href="../api_java/lock_stat.html">DbEnv.lock_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/env_set_lk_max.html b/bdb/docs/api_java/env_set_lk_max.html
new file mode 100644
index 00000000000..1e2a928901b
--- /dev/null
+++ b/bdb/docs/api_java/env_set_lk_max.html
@@ -0,0 +1,74 @@
+<!--$Id: env_set_lk_max.so,v 10.21 2000/12/21 19:11:27 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_lk_max</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.set_lk_max</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_lk_max(int max)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p><b>The DbEnv.set_lk_max method interface has been deprecated in favor of
+the <a href="../api_java/env_set_lk_max_locks.html">DbEnv.set_lk_max_locks</a>, <a href="../api_java/env_set_lk_max_lockers.html">DbEnv.set_lk_max_lockers</a>,
+and <a href="../api_java/env_set_lk_max_objects.html">DbEnv.set_lk_max_objects</a> methods. Please update your applications.</b>
+<p>Set each of the maximum number of locks, lockers and lock objects
+supported by the Berkeley DB lock subsystem to <b>max</b>. This value is
+used by <a href="../api_java/env_open.html">DbEnv.open</a> to estimate how much space to allocate for
+various lock-table data structures. For specific information on
+configuring the size of the lock subsystem, see
+<a href="../ref/lock/max.html">Configuring locking: sizing the
+system</a>.
+<p>The DbEnv.set_lk_max interface may only be used to configure Berkeley DB before
+the <a href="../api_java/env_open.html">DbEnv.open</a> interface is called.
+<p>The DbEnv.set_lk_max method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>The database environment's maximum number of locks may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lk_max", one or more whitespace characters,
+and the number of locks. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/env_open.html">DbEnv.open</a> was called.
+</dl>
+<h3>Classes</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>, <a href="../api_java/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_java/env_set_lk_conflicts.html">DbEnv.set_lk_conflicts</a>,
+<a href="../api_java/env_set_lk_detect.html">DbEnv.set_lk_detect</a>,
+<a href="../api_java/env_set_lk_max_locks.html">DbEnv.set_lk_max_locks</a>,
+<a href="../api_java/env_set_lk_max_lockers.html">DbEnv.set_lk_max_lockers</a>,
+<a href="../api_java/env_set_lk_max_objects.html">DbEnv.set_lk_max_objects</a>,
+<a href="../api_java/env_set_lk_max.html">DbEnv.set_lk_max</a>,
+<a href="../api_java/lock_detect.html">DbEnv.lock_detect</a>,
+<a href="../api_java/lock_get.html">DbEnv.lock_get</a>,
+<a href="../api_java/lock_id.html">DbEnv.lock_id</a>,
+<a href="../api_java/lock_put.html">DbLock.put</a>
+and
+<a href="../api_java/lock_stat.html">DbEnv.lock_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/env_set_lk_max_lockers.html b/bdb/docs/api_java/env_set_lk_max_lockers.html
new file mode 100644
index 00000000000..500244beee5
--- /dev/null
+++ b/bdb/docs/api_java/env_set_lk_max_lockers.html
@@ -0,0 +1,70 @@
+<!--$Id: env_set_lk_max_lockers.so,v 1.2 2000/12/08 22:03:00 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_lk_max_lockers</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.set_lk_max_lockers</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_lk_max_lockers(int max)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the maximum number of simultaneous locking entities supported by
+the Berkeley DB lock subsystem. This value is used by <a href="../api_java/env_open.html">DbEnv.open</a> to
+estimate how much space to allocate for various lock-table data
+structures. For specific information on configuring the size of the
+lock subsystem, see
+<a href="../ref/lock/max.html">Configuring locking: sizing the system</a>.
+<p>The DbEnv.set_lk_max_lockers interface may only be used to configure Berkeley DB before
+the <a href="../api_java/env_open.html">DbEnv.open</a> interface is called.
+<p>The DbEnv.set_lk_max_lockers method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>The database environment's maximum number of lockers may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lk_max_lockers", one or more whitespace characters,
+and the number of lockers. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/env_open.html">DbEnv.open</a> was called.
+</dl>
+<h3>Classes</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>, <a href="../api_java/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_java/env_set_lk_conflicts.html">DbEnv.set_lk_conflicts</a>,
+<a href="../api_java/env_set_lk_detect.html">DbEnv.set_lk_detect</a>,
+<a href="../api_java/env_set_lk_max_locks.html">DbEnv.set_lk_max_locks</a>,
+<a href="../api_java/env_set_lk_max_lockers.html">DbEnv.set_lk_max_lockers</a>,
+<a href="../api_java/env_set_lk_max_objects.html">DbEnv.set_lk_max_objects</a>,
+<a href="../api_java/env_set_lk_max.html">DbEnv.set_lk_max</a>,
+<a href="../api_java/lock_detect.html">DbEnv.lock_detect</a>,
+<a href="../api_java/lock_get.html">DbEnv.lock_get</a>,
+<a href="../api_java/lock_id.html">DbEnv.lock_id</a>,
+<a href="../api_java/lock_put.html">DbLock.put</a>
+and
+<a href="../api_java/lock_stat.html">DbEnv.lock_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/env_set_lk_max_locks.html b/bdb/docs/api_java/env_set_lk_max_locks.html
new file mode 100644
index 00000000000..88a5100aaef
--- /dev/null
+++ b/bdb/docs/api_java/env_set_lk_max_locks.html
@@ -0,0 +1,69 @@
+<!--$Id: env_set_lk_max_locks.so,v 10.1 2000/12/21 19:11:27 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_lk_max_locks</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.set_lk_max_locks</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_lk_max_locks(int max)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the maximum number of locks supported by the Berkeley DB lock subsystem.
+This value is used by <a href="../api_java/env_open.html">DbEnv.open</a> to estimate how much space to
+allocate for various lock-table data structures. For specific
+information on configuring the size of the lock subsystem, see
+<a href="../ref/lock/max.html">Configuring locking: sizing the system</a>.
+<p>The DbEnv.set_lk_max_locks interface may only be used to configure Berkeley DB before
+the <a href="../api_java/env_open.html">DbEnv.open</a> interface is called.
+<p>The DbEnv.set_lk_max_locks method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>The database environment's maximum number of locks may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lk_max_locks", one or more whitespace characters,
+and the number of locks. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/env_open.html">DbEnv.open</a> was called.
+</dl>
+<h3>Classes</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>, <a href="../api_java/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_java/env_set_lk_conflicts.html">DbEnv.set_lk_conflicts</a>,
+<a href="../api_java/env_set_lk_detect.html">DbEnv.set_lk_detect</a>,
+<a href="../api_java/env_set_lk_max_locks.html">DbEnv.set_lk_max_locks</a>,
+<a href="../api_java/env_set_lk_max_lockers.html">DbEnv.set_lk_max_lockers</a>,
+<a href="../api_java/env_set_lk_max_objects.html">DbEnv.set_lk_max_objects</a>,
+<a href="../api_java/env_set_lk_max.html">DbEnv.set_lk_max</a>,
+<a href="../api_java/lock_detect.html">DbEnv.lock_detect</a>,
+<a href="../api_java/lock_get.html">DbEnv.lock_get</a>,
+<a href="../api_java/lock_id.html">DbEnv.lock_id</a>,
+<a href="../api_java/lock_put.html">DbLock.put</a>
+and
+<a href="../api_java/lock_stat.html">DbEnv.lock_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/env_set_lk_max_objects.html b/bdb/docs/api_java/env_set_lk_max_objects.html
new file mode 100644
index 00000000000..b31ebefbf21
--- /dev/null
+++ b/bdb/docs/api_java/env_set_lk_max_objects.html
@@ -0,0 +1,70 @@
+<!--$Id: env_set_lk_max_objects.so,v 1.2 2000/12/08 22:03:00 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_lk_max_objects</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.set_lk_max_objects</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_lk_max_objects(int max)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the maximum number of simultaneously locked objects supported by
+the Berkeley DB lock subsystem. This value is used by <a href="../api_java/env_open.html">DbEnv.open</a> to
+estimate how much space to allocate for various lock-table data
+structures. For specific information on configuring the size of the
+lock subsystem, see
+<a href="../ref/lock/max.html">Configuring locking: sizing the system</a>.
+<p>The DbEnv.set_lk_max_objects interface may only be used to configure Berkeley DB before
+the <a href="../api_java/env_open.html">DbEnv.open</a> interface is called.
+<p>The DbEnv.set_lk_max_objects method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>The database environment's maximum number of objects may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lk_max_objects", one or more whitespace characters,
+and the number of objects. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/env_open.html">DbEnv.open</a> was called.
+</dl>
+<h3>Classes</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>, <a href="../api_java/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_java/env_set_lk_conflicts.html">DbEnv.set_lk_conflicts</a>,
+<a href="../api_java/env_set_lk_detect.html">DbEnv.set_lk_detect</a>,
+<a href="../api_java/env_set_lk_max_locks.html">DbEnv.set_lk_max_locks</a>,
+<a href="../api_java/env_set_lk_max_lockers.html">DbEnv.set_lk_max_lockers</a>,
+<a href="../api_java/env_set_lk_max_objects.html">DbEnv.set_lk_max_objects</a>,
+<a href="../api_java/env_set_lk_max.html">DbEnv.set_lk_max</a>,
+<a href="../api_java/lock_detect.html">DbEnv.lock_detect</a>,
+<a href="../api_java/lock_get.html">DbEnv.lock_get</a>,
+<a href="../api_java/lock_id.html">DbEnv.lock_id</a>,
+<a href="../api_java/lock_put.html">DbLock.put</a>
+and
+<a href="../api_java/lock_stat.html">DbEnv.lock_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/env_set_mp_mmapsize.html b/bdb/docs/api_java/env_set_mp_mmapsize.html
new file mode 100644
index 00000000000..ef1d5b14ef1
--- /dev/null
+++ b/bdb/docs/api_java/env_set_mp_mmapsize.html
@@ -0,0 +1,66 @@
+<!--$Id: env_set_mp_mmapsize.so,v 10.18 2000/05/20 16:29:12 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_mp_mmapsize</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.set_mp_mmapsize</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_mp_mmapsize(long mmapsize)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Files that are opened read-only in the pool (and that satisfy a few other
+criteria) are, by default, mapped into the process address space instead
+of being copied into the local cache. This can result in better-than-usual
+performance, as available virtual memory is normally much larger than the
+local cache, and page faults are faster than page copying on many systems.
+However, in the presence of limited virtual memory it can cause resource
+starvation, and in the presence of large databases, it can result in immense
+process sizes.
+<p>Set the maximum file size, in bytes, for a file to be mapped into the
+process address space. If no value is specified, it defaults to 10MB.
+<p>The DbEnv.set_mp_mmapsize interface may only be used to configure Berkeley DB before
+the <a href="../api_java/env_open.html">DbEnv.open</a> interface is called.
+<p>The DbEnv.set_mp_mmapsize method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>The database environment's maximum mapped file size may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_mp_mmapsize", one or more whitespace characters,
+and the size in bytes. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/env_open.html">DbEnv.open</a> was called.
+</dl>
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/memp_stat.html">DbEnv.memp_fstat</a>,
+<a href="../api_java/memp_stat.html">DbEnv.memp_stat</a>
+and
+<a href="../api_java/memp_trickle.html">DbEnv.memp_trickle</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/env_set_mutexlocks.html b/bdb/docs/api_java/env_set_mutexlocks.html
new file mode 100644
index 00000000000..e098987701e
--- /dev/null
+++ b/bdb/docs/api_java/env_set_mutexlocks.html
@@ -0,0 +1,59 @@
+<!--$Id: env_set_mutexlocks.so,v 10.9 2000/11/17 19:56:52 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_mutexlocks</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.set_mutexlocks</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_mutexlocks(int do_lock)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Toggle mutex locks. Setting <b>do_lock</b> to a false value causes
+Berkeley DB to grant all requested mutual exclusion mutexes without regard
+for their availability.
+<p>This functionality should never be used for any other purpose than
+debugging.
+<p>The DbEnv.set_mutexlocks interface may be used to configure Berkeley DB at any time
+during the life of the application.
+<p>The DbEnv.set_mutexlocks method throws an exception that encapsulates a non-zero error value on
+failure.
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_close.html">DbEnv.close</a>,
+<a href="../api_java/env_open.html">DbEnv.open</a>,
+<a href="../api_java/env_remove.html">DbEnv.remove</a>,
+<a href="../api_java/env_strerror.html">DbEnv.strerror</a>,
+<a href="../api_java/env_version.html">DbEnv.get_version_string</a>
+<a href="../api_java/env_set_cachesize.html">DbEnv.set_cachesize</a>,
+<a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a>,
+<a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a>,
+<a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a>,
+<a href="../api_java/env_set_flags.html">DbEnv.set_flags</a>,
+<a href="../api_java/env_set_mutexlocks.html">DbEnv.set_mutexlocks</a>,
+and
+<a href="../api_java/env_set_verbose.html">DbEnv.set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/env_set_pageyield.html b/bdb/docs/api_java/env_set_pageyield.html
new file mode 100644
index 00000000000..f67f3a88805
--- /dev/null
+++ b/bdb/docs/api_java/env_set_pageyield.html
@@ -0,0 +1,69 @@
+<!--$Id: env_set_pageyield.so,v 10.6 2000/05/31 15:10:00 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_pageyield</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.set_pageyield</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+static int
+DbEnv.set_pageyield(int pageyield);
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Yield the processor whenever requesting a page from the cache. Setting
+<b>pageyield</b> to a true value causes Berkeley DB to yield the processor
+any time a thread requests a page from the cache.
+<p>The DbEnv.set_pageyield interface affects the entire application, not a single
+database or database environment.
+<p>While the DbEnv.set_pageyield interface may be used to configure Berkeley DB at any time
+during the life of the application, it should normally be called before
+making any calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> methods.
+<p>This functionality should never be used for any other purpose than stress
+testing.
+<p>The DbEnv.set_pageyield interface may be used to configure Berkeley DB at any time
+during the life of the application.
+<p>The DbEnv.set_pageyield method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_close.html">DbEnv.close</a>,
+<a href="../api_java/env_open.html">DbEnv.open</a>,
+<a href="../api_java/env_remove.html">DbEnv.remove</a>,
+<a href="../api_java/env_strerror.html">DbEnv.strerror</a>,
+<a href="../api_java/env_version.html">DbEnv.get_version_string</a>
+<a href="../api_java/env_set_cachesize.html">DbEnv.set_cachesize</a>,
+<a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a>,
+<a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a>,
+<a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a>,
+<a href="../api_java/env_set_flags.html">DbEnv.set_flags</a>,
+<a href="../api_java/env_set_mutexlocks.html">DbEnv.set_mutexlocks</a>,
+and
+<a href="../api_java/env_set_verbose.html">DbEnv.set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/env_set_panicstate.html b/bdb/docs/api_java/env_set_panicstate.html
new file mode 100644
index 00000000000..d8b38ee2f6d
--- /dev/null
+++ b/bdb/docs/api_java/env_set_panicstate.html
@@ -0,0 +1,65 @@
+<!--$Id: env_set_panicstate.so,v 10.2 2001/01/17 15:32:34 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_panicstate</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.set_panicstate</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+static int
+DbEnv.set_panicstate(int panic);
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Toggle the Berkeley DB panic state. Setting <b>panic</b> to a true value
+causes Berkeley DB to refuse attempts to call Berkeley DB functions with the
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">Db.DB_RUNRECOVERY</a> error return.
+<p>The DbEnv.set_panicstate interface affects the entire application, not a single
+database or database environment.
+<p>While the DbEnv.set_panicstate interface may be used to configure Berkeley DB at any time
+during the life of the application, it should normally be called before
+making any calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> methods.
+<p>The DbEnv.set_panicstate method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_close.html">DbEnv.close</a>,
+<a href="../api_java/env_open.html">DbEnv.open</a>,
+<a href="../api_java/env_remove.html">DbEnv.remove</a>,
+<a href="../api_java/env_strerror.html">DbEnv.strerror</a>,
+<a href="../api_java/env_version.html">DbEnv.get_version_string</a>
+<a href="../api_java/env_set_cachesize.html">DbEnv.set_cachesize</a>,
+<a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a>,
+<a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a>,
+<a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a>,
+<a href="../api_java/env_set_flags.html">DbEnv.set_flags</a>,
+<a href="../api_java/env_set_mutexlocks.html">DbEnv.set_mutexlocks</a>,
+and
+<a href="../api_java/env_set_verbose.html">DbEnv.set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/env_set_rec_init.html b/bdb/docs/api_java/env_set_rec_init.html
new file mode 100644
index 00000000000..430c33a02af
--- /dev/null
+++ b/bdb/docs/api_java/env_set_rec_init.html
@@ -0,0 +1,78 @@
+<!--$Id: env_set_rec_init.so,v 10.9 2000/05/01 21:57:44 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_recovery_init</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.set_recovery_init</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public interface DbRecoveryInit
+{
+ public abstract int db_recovery_init_fcn(DbEnv dbenv);
+}
+public class DbEnv
+{
+ public void set_recovery_init(DbRecoveryInit db_recovery_init_fcn)
+ throws DbException;
+ ...
+}
+</pre></h3>
+<h1>Description</h1>
+<p>Applications installing application-specific recovery methods need
+to be called before Berkeley DB performs recovery so they may add their recovery
+methods to Berkeley DB's.
+<p>The DbEnv.set_recovery_init method supports this functionality. The
+<b>db_recovery_init_fcn</b> method must be declared with one
+argument, a reference to the enclosing Berkeley DB environment. This
+method will be called after the <a href="../api_java/env_open.html">DbEnv.open</a> has been called,
+but before recovery is started.
+<p>If the <b>db_recovery_init_fcn</b> method returns a non-zero value,
+no recovery will be performed and <a href="../api_java/env_open.html">DbEnv.open</a> will return the same
+value to its caller.
+<p>The DbEnv.set_recovery_init interface may only be used to configure Berkeley DB before
+the <a href="../api_java/env_open.html">DbEnv.open</a> interface is called.
+<p>The DbEnv.set_recovery_init method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/env_open.html">DbEnv.open</a> was called.
+</dl>
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_close.html">DbEnv.close</a>,
+<a href="../api_java/env_open.html">DbEnv.open</a>,
+<a href="../api_java/env_remove.html">DbEnv.remove</a>,
+<a href="../api_java/env_strerror.html">DbEnv.strerror</a>,
+<a href="../api_java/env_version.html">DbEnv.get_version_string</a>
+<a href="../api_java/env_set_cachesize.html">DbEnv.set_cachesize</a>,
+<a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a>,
+<a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a>,
+<a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a>,
+<a href="../api_java/env_set_flags.html">DbEnv.set_flags</a>,
+<a href="../api_java/env_set_mutexlocks.html">DbEnv.set_mutexlocks</a>,
+and
+<a href="../api_java/env_set_verbose.html">DbEnv.set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/env_set_region_init.html b/bdb/docs/api_java/env_set_region_init.html
new file mode 100644
index 00000000000..f4f5d256278
--- /dev/null
+++ b/bdb/docs/api_java/env_set_region_init.html
@@ -0,0 +1,78 @@
+<!--$Id: env_set_region_init.so,v 10.10 2000/05/31 15:10:00 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_region_init</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.set_region_init</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+static int
+DbEnv.set_region_init(int region_init);
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Page-fault shared regions into memory when initially creating or joining
+a Berkeley DB environment. In some applications, the expense of page-faulting
+the shared memory regions can affect performance, e.g., when the
+page-fault occurs while holding a lock, other lock requests can convoy
+and overall throughput may decrease. Setting <b>region_init</b> to a
+true value specifies that shared regions be read or written, as
+appropriate, when the region is joined by the application. This forces
+the underlying virtual memory and file systems to instantiate both the
+necessary memory and the necessary disk space. This can also avoid
+out-of-disk space failures later on.
+<p>The DbEnv.set_region_init interface affects the entire application, not a single
+database or database environment.
+<p>While the DbEnv.set_region_init interface may be used to configure Berkeley DB at any time
+during the life of the application, it should normally be called before
+making any calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> methods.
+<p>The DbEnv.set_region_init method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>The database environment's initial behavior with respect to shared memory regions may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_region_init", one or more whitespace characters,
+and the string "1". Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_close.html">DbEnv.close</a>,
+<a href="../api_java/env_open.html">DbEnv.open</a>,
+<a href="../api_java/env_remove.html">DbEnv.remove</a>,
+<a href="../api_java/env_strerror.html">DbEnv.strerror</a>,
+<a href="../api_java/env_version.html">DbEnv.get_version_string</a>
+<a href="../api_java/env_set_cachesize.html">DbEnv.set_cachesize</a>,
+<a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a>,
+<a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a>,
+<a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a>,
+<a href="../api_java/env_set_flags.html">DbEnv.set_flags</a>,
+<a href="../api_java/env_set_mutexlocks.html">DbEnv.set_mutexlocks</a>,
+and
+<a href="../api_java/env_set_verbose.html">DbEnv.set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/env_set_server.html b/bdb/docs/api_java/env_set_server.html
new file mode 100644
index 00000000000..a0ffeefe35f
--- /dev/null
+++ b/bdb/docs/api_java/env_set_server.html
@@ -0,0 +1,77 @@
+<!--"@(#)env_set_server.so 10.13 (Sleepycat) 8/25/99"-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_server</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.set_server</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_server(String host,
+ long cl_timeout, long sv_timeout, int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Connects to the DB server on the indicated hostname and sets up a channel
+for communication.
+<p>The <b>cl_timeout</b> argument specifies the number of seconds the client
+should wait for results to come back from the server. Once the timeout
+has expired on any communication with the server, Db.DB_NOSERVER will
+be returned. If this value is zero, a default timeout is used.
+<p>The <b>sv_timeout</b> argument specifies the number of seconds the server
+should allow a client connection to remain idle before assuming that
+client is gone. Once that timeout has been reached, the server releases
+all resources associated with that client connection. Subsequent attempts
+by that client to communicate with the server result in
+Db.DB_NOSERVER_ID indicating that an invalid identifier has been
+given to the server. This value can be considered a hint to the server.
+The server may alter this value based on its own policies or allowed
+values. If this value is zero, a default timeout is used.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>When the DbEnv.set_server method has been called, any subsequent calls
+to Berkeley DB library interfaces may return either <a name="DB_NOSERVER">DB_NOSERVER</a> or
+<a name="DB_NOSERVER_ID">DB_NOSERVER_ID</a>.
+<p>The DbEnv.set_server method throws an exception that encapsulates a non-zero error value on
+failure.
+<h3>Errors</h3>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>dbenv_set_server
+</dl>
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_close.html">DbEnv.close</a>,
+<a href="../api_java/env_open.html">DbEnv.open</a>,
+<a href="../api_java/env_remove.html">DbEnv.remove</a>,
+<a href="../api_java/env_strerror.html">DbEnv.strerror</a>,
+<a href="../api_java/env_version.html">DbEnv.get_version_string</a>
+<a href="../api_java/env_set_cachesize.html">DbEnv.set_cachesize</a>,
+<a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a>,
+<a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a>,
+<a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a>,
+<a href="../api_java/env_set_flags.html">DbEnv.set_flags</a>,
+<a href="../api_java/env_set_mutexlocks.html">DbEnv.set_mutexlocks</a>,
+and
+<a href="../api_java/env_set_verbose.html">DbEnv.set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/env_set_shm_key.html b/bdb/docs/api_java/env_set_shm_key.html
new file mode 100644
index 00000000000..12a65c92251
--- /dev/null
+++ b/bdb/docs/api_java/env_set_shm_key.html
@@ -0,0 +1,87 @@
+<!--$Id: env_set_shm_key.so,v 10.5 2000/08/09 15:45:52 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_shm_key</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.set_shm_key</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_shm_key(long shm_key)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Specify a base segment ID for Berkeley DB environment shared memory regions
+created in system memory on VxWorks or systems supporting X/Open-style
+shared memory interfaces, e.g., UNIX systems supporting
+<b>shmget</b>(2) and related System V IPC interfaces.
+<p>This base segment ID will be used when Berkeley DB shared memory regions are
+first created. It will be incremented a small integer value each time
+a new shared memory region is created, that is, if the base ID is 35,
+the first shared memory region created will have a segment ID of 35 and
+the next one a segment ID between 36 and 40 or so. A Berkeley DB environment
+always creates a master shared memory region, plus an additional shared
+memory region for each of the subsystems supported by the environment
+(locking, logging, memory pool and transaction), plus an additional
+shared memory region for each additional memory pool cache that is
+supported. Already existing regions with the same segment IDs will be
+removed. See <a href="../ref/env/region.html">Shared Memory Regions</a>
+for more information.
+<p>The intent behind this interface is two-fold: without it, applications
+have no way to ensure that two Berkeley DB applications don't attempt to use
+the same segment IDs when creating different Berkeley DB environments. In
+addition, by using the same segment IDs each time the environment is
+created, previously created segments will be removed, and the set of
+segments on the system will not grow without bound.
+<p>The DbEnv.set_shm_key interface may only be used to configure Berkeley DB before
+the <a href="../api_java/env_open.html">DbEnv.open</a> interface is called.
+<p>The DbEnv.set_shm_key method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>The database environment's base segment ID may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_shm_key", one or more whitespace characters,
+and the ID. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/env_open.html">DbEnv.open</a> was called.
+</dl>
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_close.html">DbEnv.close</a>,
+<a href="../api_java/env_open.html">DbEnv.open</a>,
+<a href="../api_java/env_remove.html">DbEnv.remove</a>,
+<a href="../api_java/env_strerror.html">DbEnv.strerror</a>,
+<a href="../api_java/env_version.html">DbEnv.get_version_string</a>
+<a href="../api_java/env_set_cachesize.html">DbEnv.set_cachesize</a>,
+<a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a>,
+<a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a>,
+<a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a>,
+<a href="../api_java/env_set_flags.html">DbEnv.set_flags</a>,
+<a href="../api_java/env_set_mutexlocks.html">DbEnv.set_mutexlocks</a>,
+and
+<a href="../api_java/env_set_verbose.html">DbEnv.set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/env_set_tas_spins.html b/bdb/docs/api_java/env_set_tas_spins.html
new file mode 100644
index 00000000000..64654ebc96a
--- /dev/null
+++ b/bdb/docs/api_java/env_set_tas_spins.html
@@ -0,0 +1,71 @@
+<!--$Id: env_set_tas_spins.so,v 10.9 2000/05/31 15:10:00 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_tas_spins</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.set_tas_spins</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+static int
+DbEnv.set_tas_spins(u_int32_t tas_spins);
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Specify that test-and-set mutexes should spin <b>tas_spins</b> times
+without blocking. The value defaults to 1 on uniprocessor systems and
+to 50 times the number of processors on multiprocessor systems.
+<p>The DbEnv.set_tas_spins interface affects the entire application, not a single
+database or database environment.
+<p>While the DbEnv.set_tas_spins interface may be used to configure Berkeley DB at any time
+during the life of the application, it should normally be called before
+making any calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> methods.
+<p>The DbEnv.set_tas_spins method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>The database environment's test-and-set spin count may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_tas_spins", one or more whitespace characters,
+and the number of spins. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_close.html">DbEnv.close</a>,
+<a href="../api_java/env_open.html">DbEnv.open</a>,
+<a href="../api_java/env_remove.html">DbEnv.remove</a>,
+<a href="../api_java/env_strerror.html">DbEnv.strerror</a>,
+<a href="../api_java/env_version.html">DbEnv.get_version_string</a>
+<a href="../api_java/env_set_cachesize.html">DbEnv.set_cachesize</a>,
+<a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a>,
+<a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a>,
+<a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a>,
+<a href="../api_java/env_set_flags.html">DbEnv.set_flags</a>,
+<a href="../api_java/env_set_mutexlocks.html">DbEnv.set_mutexlocks</a>,
+and
+<a href="../api_java/env_set_verbose.html">DbEnv.set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/env_set_tmp_dir.html b/bdb/docs/api_java/env_set_tmp_dir.html
new file mode 100644
index 00000000000..8c3c4b899c0
--- /dev/null
+++ b/bdb/docs/api_java/env_set_tmp_dir.html
@@ -0,0 +1,89 @@
+<!--$Id: env_set_tmp_dir.so,v 10.3 2000/05/20 16:29:12 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_tmp_dir</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.set_tmp_dir</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_tmp_dir(String dir)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The path of a directory to be used as the location of temporary files.
+The files created to back in-memory access method databases will be
+created relative to this path. These temporary files can be quite large,
+depending on the size of the database.
+<p>If no directories are specified, the following alternatives are checked
+in the specified order. The first existing directory path is used for
+all temporary files.
+<p><ol>
+<p><li>The value of the environment variable <b>TMPDIR</b>.
+<li>The value of the environment variable <b>TEMP</b>.
+<li>The value of the environment variable <b>TMP</b>.
+<li>The value of the environment variable <b>TempFolder</b>.
+<li>The value returned by the GetTempPath interface.
+<li>The directory <b>/var/tmp</b>.
+<li>The directory <b>/usr/tmp</b>.
+<li>The directory <b>/temp</b>.
+<li>The directory <b>/tmp</b>.
+<li>The directory <b>C:/temp</b>.
+<li>The directory <b>C:/tmp</b>.
+</ol>
+<p>Note: environment variables are only checked if one of the
+<a href="../api_java/env_open.html#DB_USE_ENVIRON">Db.DB_USE_ENVIRON</a> or <a href="../api_java/env_open.html#DB_USE_ENVIRON_ROOT">Db.DB_USE_ENVIRON_ROOT</a> flags were
+specified.
+<p>Note: the GetTempPath interface is only checked on Win/32 platforms.
+<p>The DbEnv.set_tmp_dir interface may only be used to configure Berkeley DB before
+the <a href="../api_java/env_open.html">DbEnv.open</a> interface is called.
+<p>The DbEnv.set_tmp_dir method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>The database environment's temporary file directory may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_tmp_dir", one or more whitespace characters,
+and the directory name. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/env_open.html">DbEnv.open</a> was called.
+</dl>
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_close.html">DbEnv.close</a>,
+<a href="../api_java/env_open.html">DbEnv.open</a>,
+<a href="../api_java/env_remove.html">DbEnv.remove</a>,
+<a href="../api_java/env_strerror.html">DbEnv.strerror</a>,
+<a href="../api_java/env_version.html">DbEnv.get_version_string</a>
+<a href="../api_java/env_set_cachesize.html">DbEnv.set_cachesize</a>,
+<a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a>,
+<a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a>,
+<a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a>,
+<a href="../api_java/env_set_flags.html">DbEnv.set_flags</a>,
+<a href="../api_java/env_set_mutexlocks.html">DbEnv.set_mutexlocks</a>,
+and
+<a href="../api_java/env_set_verbose.html">DbEnv.set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/env_set_tx_max.html b/bdb/docs/api_java/env_set_tx_max.html
new file mode 100644
index 00000000000..d1a57f5c856
--- /dev/null
+++ b/bdb/docs/api_java/env_set_tx_max.html
@@ -0,0 +1,69 @@
+<!--$Id: env_set_tx_max.so,v 10.21 2000/05/20 16:29:12 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_tx_max</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.set_tx_max</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_tx_max(int tx_max)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the maximum number of active transactions that are supported by the
+environment. This value bounds the size of backing shared memory regions.
+Note that child transactions must be counted as active until their
+ultimate parent commits or aborts.
+<p>When there are more than the specified number of concurrent transactions,
+calls to <a href="../api_java/txn_begin.html">DbEnv.txn_begin</a> will fail (until some active transactions
+complete). If no value is specified, a default value of 20 is used.
+<p>The DbEnv.set_tx_max interface may only be used to configure Berkeley DB before
+the <a href="../api_java/env_open.html">DbEnv.open</a> interface is called.
+<p>The DbEnv.set_tx_max method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>The database environment's maximum number of active transactions may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_tx_max", one or more whitespace characters,
+and the number of transactions. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/env_open.html">DbEnv.open</a> was called.
+</dl>
+<h3>Classes</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>, <a href="../api_java/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_java/env_set_tx_max.html">DbEnv.set_tx_max</a>,
+<a href="../api_java/env_set_tx_timestamp.html">DbEnv.set_tx_timestamp</a>,
+<a href="../api_java/txn_abort.html">DbTxn.abort</a>,
+<a href="../api_java/txn_begin.html">DbEnv.txn_begin</a>,
+<a href="../api_java/txn_checkpoint.html">DbEnv.txn_checkpoint</a>,
+<a href="../api_java/txn_commit.html">DbTxn.commit</a>,
+<a href="../api_java/txn_id.html">DbTxn.id</a>,
+<a href="../api_java/txn_prepare.html">DbTxn.prepare</a>
+and
+<a href="../api_java/txn_stat.html">DbEnv.txn_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/env_set_tx_recover.html b/bdb/docs/api_java/env_set_tx_recover.html
new file mode 100644
index 00000000000..c3c71e9ba0f
--- /dev/null
+++ b/bdb/docs/api_java/env_set_tx_recover.html
@@ -0,0 +1,84 @@
+<!--$Id: env_set_tx_recover.so,v 10.26 2000/07/09 19:13:19 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_tx_recover</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.set_tx_recover</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public interface DbTxnRecover
+{
+ public abstract int
+ tx_recover(DbEnv dbenv, Dbt log_rec, DbLsn lsn, int op);
+}
+public class DbEnv
+{
+ public void set_tx_recover(DbTxnRecover tx_recover)
+ throws DbException;
+ ...
+}
+</pre></h3>
+<h1>Description</h1>
+<p>Set the application's method to be called during transaction abort
+and recovery. This method must return 0 on success and either
+<b>errno</b> or a value outside of the Berkeley DB error name space on
+failure. It takes four arguments:
+<p><dl compact>
+<p><dt>dbenv <dd>A Berkeley DB environment.
+<p><dt>log_rec<dd>A log record.
+<p><dt>lsn<dd>A log sequence number.
+<p><dt>op<dd>One of the following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_TXN_BACKWARD_ROLL">Db.DB_TXN_BACKWARD_ROLL</a><dd>The log is being read backward to determine which transactions have been
+committed and to abort those operations that were not, undo the operation
+described by the log record.
+<p><dt><a name="Db.DB_TXN_FORWARD_ROLL">Db.DB_TXN_FORWARD_ROLL</a><dd>The log is being played forward, redo the operation described by the log
+record.
+<p><dt><a name="Db.DB_TXN_ABORT">Db.DB_TXN_ABORT</a><dd>The log is being read backwards during a transaction abort, undo the
+operation described by the log record.
+</dl>
+</dl>
+<p>The DbEnv.set_tx_recover interface may only be used to configure Berkeley DB before
+the <a href="../api_java/env_open.html">DbEnv.open</a> interface is called.
+<p>The DbEnv.set_tx_recover method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/env_open.html">DbEnv.open</a> was called.
+</dl>
+<h3>Classes</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>, <a href="../api_java/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_java/env_set_tx_max.html">DbEnv.set_tx_max</a>,
+<a href="../api_java/env_set_tx_timestamp.html">DbEnv.set_tx_timestamp</a>,
+<a href="../api_java/txn_abort.html">DbTxn.abort</a>,
+<a href="../api_java/txn_begin.html">DbEnv.txn_begin</a>,
+<a href="../api_java/txn_checkpoint.html">DbEnv.txn_checkpoint</a>,
+<a href="../api_java/txn_commit.html">DbTxn.commit</a>,
+<a href="../api_java/txn_id.html">DbTxn.id</a>,
+<a href="../api_java/txn_prepare.html">DbTxn.prepare</a>
+and
+<a href="../api_java/txn_stat.html">DbEnv.txn_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/env_set_tx_timestamp.html b/bdb/docs/api_java/env_set_tx_timestamp.html
new file mode 100644
index 00000000000..93ae153a7e4
--- /dev/null
+++ b/bdb/docs/api_java/env_set_tx_timestamp.html
@@ -0,0 +1,64 @@
+<!--$Id: env_set_tx_timestamp.so,v 10.6 2000/12/21 18:33:42 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_tx_timestamp</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.set_tx_timestamp</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_tx_timestamp(java.util.Date timestamp)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Recover to the time specified by <b>timestamp</b> rather than to the most
+current possible date.
+Note that only the seconds (not the milliseconds) of the <b>timestamp</b>
+are used
+<p>Once a database environment has been upgraded to a new version of Berkeley DB
+involving a log format change (see <a href="../ref/upgrade/process.html">Upgrading Berkeley DB installations</a>, it is no longer possible to recover
+to a specific time before that upgrade.
+<p>The DbEnv.set_tx_timestamp interface may only be used to configure Berkeley DB before
+the <a href="../api_java/env_open.html">DbEnv.open</a> interface is called.
+<p>The DbEnv.set_tx_timestamp method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>It is not possible to recover to the specified time using the
+log files currently present in the environment.
+</dl>
+<h3>Classes</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>, <a href="../api_java/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_java/env_set_tx_max.html">DbEnv.set_tx_max</a>,
+<a href="../api_java/env_set_tx_timestamp.html">DbEnv.set_tx_timestamp</a>,
+<a href="../api_java/txn_abort.html">DbTxn.abort</a>,
+<a href="../api_java/txn_begin.html">DbEnv.txn_begin</a>,
+<a href="../api_java/txn_checkpoint.html">DbEnv.txn_checkpoint</a>,
+<a href="../api_java/txn_commit.html">DbTxn.commit</a>,
+<a href="../api_java/txn_id.html">DbTxn.id</a>,
+<a href="../api_java/txn_prepare.html">DbTxn.prepare</a>
+and
+<a href="../api_java/txn_stat.html">DbEnv.txn_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/env_set_verbose.html b/bdb/docs/api_java/env_set_verbose.html
new file mode 100644
index 00000000000..8fbfb32ebac
--- /dev/null
+++ b/bdb/docs/api_java/env_set_verbose.html
@@ -0,0 +1,77 @@
+<!--$Id: env_set_verbose.so,v 10.23 2000/05/20 16:29:12 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_verbose</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.set_verbose</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int set_verbose(u_int32_t which, boolean onoff);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.set_verbose method turns additional informational and
+debugging messages in the Berkeley DB message output on and off. If
+<b>onoff</b> is set to
+true,
+the additional messages are output.
+<p>The <b>which</b> parameter must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_VERB_CHKPOINT">Db.DB_VERB_CHKPOINT</a><dd>Display checkpoint location information when searching the log for
+checkpoints.
+<p><dt><a name="Db.DB_VERB_DEADLOCK">Db.DB_VERB_DEADLOCK</a><dd>Display additional information when doing deadlock detection.
+<p><dt><a name="Db.DB_VERB_RECOVERY">Db.DB_VERB_RECOVERY</a><dd>Display additional information when performing recovery.
+<p><dt><a name="Db.DB_VERB_WAITSFOR">Db.DB_VERB_WAITSFOR</a><dd>Display the waits-for table when doing deadlock detection.
+</dl>
+<p>The DbEnv.set_verbose interface may be used to configure Berkeley DB at any time
+during the life of the application.
+<p>The DbEnv.set_verbose method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>The database environment's verbosity may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_verbose", one or more whitespace characters,
+and the interface <b>which</b> argument as a string, for example,
+"set_verbose DB_VERB_CHKPOINT". Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_close.html">DbEnv.close</a>,
+<a href="../api_java/env_open.html">DbEnv.open</a>,
+<a href="../api_java/env_remove.html">DbEnv.remove</a>,
+<a href="../api_java/env_strerror.html">DbEnv.strerror</a>,
+<a href="../api_java/env_version.html">DbEnv.get_version_string</a>
+<a href="../api_java/env_set_cachesize.html">DbEnv.set_cachesize</a>,
+<a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a>,
+<a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a>,
+<a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a>,
+<a href="../api_java/env_set_flags.html">DbEnv.set_flags</a>,
+<a href="../api_java/env_set_mutexlocks.html">DbEnv.set_mutexlocks</a>,
+and
+<a href="../api_java/env_set_verbose.html">DbEnv.set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/env_strerror.html b/bdb/docs/api_java/env_strerror.html
new file mode 100644
index 00000000000..e63e82e5f4c
--- /dev/null
+++ b/bdb/docs/api_java/env_strerror.html
@@ -0,0 +1,58 @@
+<!--$Id: env_strerror.so,v 8.4 2000/07/30 17:59:25 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.strerror</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.strerror</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public static String strerror(int errcode);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.strerror method returns an error message string corresponding
+to the error number <b>error</b>. This interface is a superset of the
+ANSI C X3.159-1989 (ANSI C) <b>strerror</b>(3) interface. If the error number
+<b>error</b> is greater than or equal to 0, then the string returned by
+the system interface <b>strerror</b>(3) is returned. If the error
+number is less than 0, an error string appropriate to the corresponding
+Berkeley DB library error is returned. See
+<a href="../ref/program/errorret.html">Error returns to applications</a>
+for more information.
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_close.html">DbEnv.close</a>,
+<a href="../api_java/env_open.html">DbEnv.open</a>,
+<a href="../api_java/env_remove.html">DbEnv.remove</a>,
+<a href="../api_java/env_strerror.html">DbEnv.strerror</a>,
+<a href="../api_java/env_version.html">DbEnv.get_version_string</a>
+<a href="../api_java/env_set_cachesize.html">DbEnv.set_cachesize</a>,
+<a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a>,
+<a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a>,
+<a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a>,
+<a href="../api_java/env_set_flags.html">DbEnv.set_flags</a>,
+<a href="../api_java/env_set_mutexlocks.html">DbEnv.set_mutexlocks</a>,
+and
+<a href="../api_java/env_set_verbose.html">DbEnv.set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/env_version.html b/bdb/docs/api_java/env_version.html
new file mode 100644
index 00000000000..ba0b1a034cf
--- /dev/null
+++ b/bdb/docs/api_java/env_version.html
@@ -0,0 +1,58 @@
+<!--$Id: env_version.so,v 10.7 2000/09/21 19:58:54 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.get_version_major</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.get_version_major</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public static int get_version_major();
+public static int get_version_minor();
+public static int get_version_patch();
+public static String get_version_string();
+</pre></h3>
+<h1>Description</h1>
+<p>These methods return version information about the underlying Berkeley DB
+software. Berkeley DB is released with a major, minor and patch number, which
+is returned by DbEnv.get_version_major,
+DbEnv.get_version_minor and DbEnv.get_version_patch.
+A verbose version of this information, suitable for display, is returned
+by DbEnv.get_version_string.
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_close.html">DbEnv.close</a>,
+<a href="../api_java/env_open.html">DbEnv.open</a>,
+<a href="../api_java/env_remove.html">DbEnv.remove</a>,
+<a href="../api_java/env_strerror.html">DbEnv.strerror</a>,
+<a href="../api_java/env_version.html">DbEnv.get_version_string</a>
+<a href="../api_java/env_set_cachesize.html">DbEnv.set_cachesize</a>,
+<a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a>,
+<a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a>,
+<a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a>,
+<a href="../api_java/env_set_flags.html">DbEnv.set_flags</a>,
+<a href="../api_java/env_set_mutexlocks.html">DbEnv.set_mutexlocks</a>,
+and
+<a href="../api_java/env_set_verbose.html">DbEnv.set_verbose</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/except_class.html b/bdb/docs/api_java/except_class.html
new file mode 100644
index 00000000000..885d161d556
--- /dev/null
+++ b/bdb/docs/api_java/except_class.html
@@ -0,0 +1,52 @@
+<!--$Id: except_class.so,v 10.15 1999/12/20 08:52:33 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbException</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbException</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public class DbException extends Exception { ... }
+</pre></h3>
+<h1>Description</h1>
+<p>This manual page describes the DbException class and how it is used
+by the various Berkeley DB classes.
+<p>Most methods in the Berkeley DB classes throw an exception when an error occurs.
+A DbException object contains an informational string and an errno. The
+errno can be obtained using <a href="../api_java/get_errno.html">DbException.get_errno</a>. Since DbException
+inherits from the java.Exception, the string portion is available using
+toString().
+<p>Some methods may return non-zero values without issuing an exception.
+This occurs in situations that are not normally considered an error, but
+when some informational status is returned. For example, <a href="../api_java/db_get.html">Db.get</a>
+returns DB_NOTFOUND when a requested key does not appear in the database.
+<h3>Class</h3>
+<a href="../api_java/except_class.html">DbException</a>
+<h1>See Also</h1>
+<a href="../api_java/get_errno.html">DbException.get_errno</a>,
+<a href="../api_java/deadlock_class.html">DbDeadlockException</a>,
+<a href="../api_java/mem_class.html">DbMemoryException</a>
+and
+<a href="../api_java/runrec_class.html">DbRunRecoveryException</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/get_errno.html b/bdb/docs/api_java/get_errno.html
new file mode 100644
index 00000000000..5d3850d1f84
--- /dev/null
+++ b/bdb/docs/api_java/get_errno.html
@@ -0,0 +1,46 @@
+<!--$Id: get_errno.so,v 10.8 1999/12/20 08:52:33 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbException.get_errno</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbException.get_errno</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int get_errno();
+</pre></h3>
+<h1>Description</h1>
+<p>Most methods in the Db classes throw an exception when an error occurs.
+A DbException object contains an informational string and an errno.
+The errno can be obtained using DbException.get_errno.
+Since DbException inherits from the java.Exception, the string
+portion is available using toString().
+<h3>Class</h3>
+<a href="../api_java/except_class.html">DbException</a>
+<h1>See Also</h1>
+<a href="../api_java/get_errno.html">DbException.get_errno</a>,
+<a href="../api_java/deadlock_class.html">DbDeadlockException</a>,
+<a href="../api_java/mem_class.html">DbMemoryException</a>
+and
+<a href="../api_java/runrec_class.html">DbRunRecoveryException</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/java_index.html b/bdb/docs/api_java/java_index.html
new file mode 100644
index 00000000000..c36227edeba
--- /dev/null
+++ b/bdb/docs/api_java/java_index.html
@@ -0,0 +1,131 @@
+<!--$Id: cxx_index.so,v 10.65 2000/12/21 19:11:27 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Java Interface by Class</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Java Interface by Class</h1>
+<p><table border=1 align=center>
+<tr><th>Class</th><th>Method</th><th>Description</th></tr>
+<tr><td><a href="../api_java/dbenv_class.html">DbEnv</a></td><td><br></td><td>Berkeley DB Environment Class</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_close.html">DbEnv.close</a></td><td>Close an environment</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_open.html">DbEnv.open</a></td><td>Open an environment</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_remove.html">DbEnv.remove</a></td><td>Remove an environment</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_cachesize.html">DbEnv.set_cachesize</a></td><td>Set the environment cache size</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_data_dir.html">DbEnv.set_data_dir</a></td><td>Set the environment data directory</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a></td><td>Set error message callback</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a></td><td>Set error message output stream</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a></td><td>Set error message prefix</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_feedback.html">DbEnv.set_feedback</a></td><td>Set feedback callback</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_flags.html">DbEnv.set_flags</a></td><td>Environment configuration</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_lg_bsize.html">DbEnv.set_lg_bsize</a></td><td>Set log buffer size</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_lg_dir.html">DbEnv.set_lg_dir</a></td><td>Set the environment logging directory</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_lg_max.html">DbEnv.set_lg_max</a></td><td>Set log file size</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_lk_conflicts.html">DbEnv.set_lk_conflicts</a></td><td>Set lock conflicts matrix</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_lk_detect.html">DbEnv.set_lk_detect</a></td><td>Set automatic deadlock detection</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_lk_max.html">DbEnv.set_lk_max</a></td><td>Set maximum number of locks (<b>Deprecated</b>)</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_lk_max_locks.html">DbEnv.set_lk_max_locks</a></td><td>Set maximum number of locks</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_lk_max_lockers.html">DbEnv.set_lk_max_lockers</a></td><td>Set maximum number of lockers</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_lk_max_objects.html">DbEnv.set_lk_max_objects</a></td><td>Set maximum number of lock objects</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_mp_mmapsize.html">DbEnv.set_mp_mmapsize</a></td><td>Set maximum mapped-in database file size</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_mutexlocks.html">DbEnv.set_mutexlocks</a></td><td>Turn off mutual exclusion locking</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_pageyield.html">DbEnv.set_pageyield</a></td><td>Yield the processor on each page access</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_panicstate.html">DbEnv.set_panicstate</a></td><td>Reset panic state</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_rec_init.html">DbEnv.set_recovery_init</a></td><td>Set recovery initialization callback</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_region_init.html">DbEnv.set_region_init</a></td><td>Fault in shared regions on initial access</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_server.html">DbEnv.set_server</a></td><td>Establish server connection</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_shm_key.html">DbEnv.set_shm_key</a></td><td>Set system memory shared segment ID</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_tas_spins.html">DbEnv.set_tas_spins</a></td><td>Set the number of test-and-set spins</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_tmp_dir.html">DbEnv.set_tmp_dir</a></td><td>Set the environment temporary file directory</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_tx_max.html">DbEnv.set_tx_max</a></td><td>Set maximum number of transactions</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_tx_recover.html">DbEnv.set_tx_recover</a></td><td>Set transaction abort recover function</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_tx_timestamp.html">DbEnv.set_tx_timestamp</a></td><td>Set recovery timestamp</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_verbose.html">DbEnv.set_verbose</a></td><td>Set verbose messages</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_strerror.html">DbEnv.strerror</a></td><td>Error strings</td></tr>
+<tr><td><br></td><td><a href="../api_java/lock_detect.html">DbEnv.lock_detect</a></td><td>Perform deadlock detection</td></tr>
+<tr><td><br></td><td><a href="../api_java/lock_get.html">DbEnv.lock_get</a></td><td>Acquire a lock</td></tr>
+<tr><td><br></td><td><a href="../api_java/lock_id.html">DbEnv.lock_id</a></td><td>Acquire a locker ID</td></tr>
+<tr><td><br></td><td><a href="../api_java/lock_stat.html">DbEnv.lock_stat</a></td><td>Return lock subsystem statistics</td></tr>
+<tr><td><br></td><td><a href="../api_java/log_archive.html">DbEnv.log_archive</a></td><td>List log and database files</td></tr>
+<tr><td><br></td><td><a href="../api_java/log_compare.html">DbEnv.log_compare</a></td><td>Compare two Log Sequence Numbers</td></tr>
+<tr><td><br></td><td><a href="../api_java/log_file.html">DbEnv.log_file</a></td><td>Map Log Sequence Numbers to log files</td></tr>
+<tr><td><br></td><td><a href="../api_java/log_flush.html">DbEnv.log_flush</a></td><td>Flush log records</td></tr>
+<tr><td><br></td><td><a href="../api_java/log_get.html">DbEnv.log_get</a></td><td>Get a log record</td></tr>
+<tr><td><br></td><td><a href="../api_java/log_put.html">DbEnv.log_put</a></td><td>Write a log record</td></tr>
+<tr><td><br></td><td><a href="../api_java/log_register.html">DbEnv.log_register</a></td><td>Register a file name with the log manager</td></tr>
+<tr><td><br></td><td><a href="../api_java/log_stat.html">DbEnv.log_stat</a></td><td>Return log subsystem statistics</td></tr>
+<tr><td><br></td><td><a href="../api_java/log_unregister.html">DbEnv.log_unregister</a></td><td>Unregister a file name with the log manager</td></tr>
+<tr><td><br></td><td><a href="../api_java/memp_stat.html">DbEnv.memp_fstat</a></td><td>Return buffer pool statistics.</td></tr>
+<tr><td><br></td><td><a href="../api_java/memp_stat.html">DbEnv.memp_stat</a></td><td>Return buffer pool statistics</td></tr>
+<tr><td><br></td><td><a href="../api_java/memp_trickle.html">DbEnv.memp_trickle</a></td><td>Trickle flush pages from a buffer pool</td></tr>
+<tr><td><br></td><td><a href="../api_java/txn_begin.html">DbEnv.txn_begin</a></td><td>Begin a transaction</td></tr>
+<tr><td><br></td><td><a href="../api_java/txn_checkpoint.html">DbEnv.txn_checkpoint</a></td><td>Checkpoint the transaction subsystem</td></tr>
+<tr><td><br></td><td><a href="../api_java/txn_stat.html">DbEnv.txn_stat</a></td><td>Return transaction subsystem statistics</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_version.html">DbEnv.version</a></td><td>Return version information</td></tr>
+<tr><td><a href="../api_java/db_class.html">Db</a></td><td><br></td><td>Berkeley DB Access Method Class</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_close.html">Db.close</a></td><td>Close a database</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_cursor.html">Db.cursor</a></td><td>Open a cursor into a database</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_del.html">Db.del</a></td><td>Delete items from a database</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_fd.html">Db.fd</a></td><td>Return a file descriptor from a database</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_get.html">Db.get</a></td><td>Get items from a database</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a></td><td>Return if the underlying database is in host order</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_get_type.html">Db.get_type</a></td><td>Return the database type</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_join.html">Db.join</a></td><td>Perform a database join on cursors</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_key_range.html">Db.key_range</a></td><td>Return estimate of key location</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_open.html">Db.open</a></td><td>Open a database</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_put.html">Db.put</a></td><td>Store items into a database</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_remove.html">Db.remove</a></td><td>Remove a database</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_rename.html">Db.rename</a></td><td>Rename a database</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_append_recno.html">Db.set_append_recno</a></td><td>Set record append callback</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_bt_compare.html">Db.set_bt_compare</a></td><td>Set a Btree comparison function</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a></td><td>Set the minimum number of keys per Btree page</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_bt_prefix.html">Db.set_bt_prefix</a></td><td>Set a Btree prefix comparison function</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a></td><td>Set the database cache size</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_dup_compare.html">Db.set_dup_compare</a></td><td>Set a duplicate comparison function</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_errcall.html">Db.set_errcall</a></td><td>Set error message callback</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a></td><td>Set error message prefix</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_feedback.html">Db.set_feedback</a></td><td>Set feedback callback</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_flags.html">Db.set_flags</a></td><td>General database configuration</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a></td><td>Set the Hash table density</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_h_hash.html">Db.set_h_hash</a></td><td>Set a hashing function</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a></td><td>Set the Hash table size</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_lorder.html">Db.set_lorder</a></td><td>Set the database byte order</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a></td><td>Set the underlying database page size</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a></td><td>Set Queue database extent size</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a></td><td>Set the variable-length record delimiter</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_re_len.html">Db.set_re_len</a></td><td>Set the fixed-length record length</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a></td><td>Set the fixed-length record pad byte</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_re_source.html">Db.set_re_source</a></td><td>Set the backing Recno text file</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_stat.html">Db.stat</a></td><td>Return database statistics</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_sync.html">Db.sync</a></td><td>Flush a database to stable storage</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_upgrade.html">Db.upgrade</a></td><td>Upgrade a database</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_verify.html">Db.verify</a></td><td>Verify/upgrade a database</td></tr>
+<tr><td><a href="../api_java/dbc_class.html">Dbc</a></td><td><br></td><td>Berkeley DB Cursor Class</td></tr>
+<tr><td><br></td><td><a href="../api_java/dbc_close.html">Dbc.close</a></td><td>Close a cursor</td></tr>
+<tr><td><br></td><td><a href="../api_java/dbc_count.html">Dbc.count</a></td><td>Return count of duplicates</td></tr>
+<tr><td><br></td><td><a href="../api_java/dbc_del.html">Dbc.del</a></td><td>Delete by cursor</td></tr>
+<tr><td><br></td><td><a href="../api_java/dbc_dup.html">Dbc.dup</a></td><td>Duplicate a cursor</td></tr>
+<tr><td><br></td><td><a href="../api_java/dbc_get.html">Dbc.get</a></td><td>Retrieve by cursor</td></tr>
+<tr><td><br></td><td><a href="../api_java/dbc_put.html">Dbc.put</a></td><td>Store by cursor</td></tr>
+<tr><td><a href="../api_java/dbt_class.html">Dbt</a></td><td><br></td><td>Key/Data Encoding Class</td></tr>
+<tr><td><a href="../api_java/lock_class.html">DbLock</a></td><td><br></td><td>Lock Class</td></tr>
+<tr><td><br></td><td><a href="../api_java/lock_put.html">DbLock.put</a></td><td>Release a lock</td></tr>
+<tr><td><a href="../api_java/lsn_class.html">DbLsn</a></td><td><br></td><td>Log Sequence Number Class</td></tr>
+<tr><td><a href="../api_java/txn_class.html">DbTxn</a></td><td><br></td><td>Transaction Class</td></tr>
+<tr><td><br></td><td><a href="../api_java/txn_abort.html">DbTxn.abort</a></td><td>Abort a transaction</td></tr>
+<tr><td><br></td><td><a href="../api_java/txn_commit.html">DbTxn.commit</a></td><td>Commit a transaction</td></tr>
+<tr><td><br></td><td><a href="../api_java/txn_id.html">DbTxn.id</a></td><td>Return a transaction ID</td></tr>
+<tr><td><br></td><td><a href="../api_java/txn_prepare.html">DbTxn.prepare</a></td><td>Prepare a transaction for commit</td></tr>
+<tr><td><a href="../api_java/except_class.html">DbException</a></td><td><br></td><td>Exception Class for Berkeley DB Activity</td></tr>
+<tr><td><br></td><td><a href="../api_java/get_errno.html">DbException.get_errno</a></td><td>Get the error value</td></tr>
+<tr><td><a href="../api_java/deadlock_class.html">DbDeadlockException</a></td><td><br></td><td>Exception Class for deadlocks</td></tr>
+<tr><td><a href="../api_java/mem_class.html">DbMemoryException</a></td><td><br></td><td>Exception Class for insufficient memory</td></tr>
+<tr><td><a href="../api_java/runrec_class.html">DbRunRecoveryException</a></td><td><br></td><td>Exception Class for failures requiring recovery</td></tr>
+</table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/java_pindex.html b/bdb/docs/api_java/java_pindex.html
new file mode 100644
index 00000000000..5ac50e4791b
--- /dev/null
+++ b/bdb/docs/api_java/java_pindex.html
@@ -0,0 +1,478 @@
+<html>
+<head>
+<title>Berkeley DB: Java Interface Index</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Java Interface Index</h1>
+<center>
+<table cellspacing=0 cellpadding=0>
+<tr><td align=right> configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#5">1.85</a> API compatibility</td></tr>
+<tr><td align=right> building a utility to dump Berkeley DB </td><td><a href="../ref/build_unix/conf.html#7">1.85</a> databases</td></tr>
+<tr><td align=right> Upgrading to release </td><td><a href="../ref/upgrade.2.0/intro.html#2">2.0</a></td></tr>
+<tr><td align=right> Upgrading to release </td><td><a href="../ref/upgrade.3.0/intro.html#2">3.0</a></td></tr>
+<tr><td align=right> Upgrading to release </td><td><a href="../ref/upgrade.3.1/intro.html#2">3.1</a></td></tr>
+<tr><td align=right> Upgrading to release </td><td><a href="../ref/upgrade.3.2/intro.html#2">3.2</a></td></tr>
+<tr><td align=right> selecting an </td><td><a href="../ref/am_conf/select.html#2">access</a> method</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am_conf/intro.html#2">access</a> methods</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/aix.html#2">AIX</a></td></tr>
+<tr><td align=right> programmatic </td><td><a href="../ref/arch/apis.html#2">APIs</a></td></tr>
+<tr><td align=right> utility to </td><td><a href="../utility/db_archive.html#3">archive</a> log files</td></tr>
+<tr><td align=right> </td><td><a href="../utility/berkeley_db_svc.html#2">berkeley_db_svc</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/intro.html#2">building</a> for UNIX</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/notes.html#2">building</a> for UNIX FAQ</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_vxworks/intro.html#2">building</a> for VxWorks</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_vxworks/faq.html#2">building</a> for VxWorks FAQ</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_win/intro.html#2">building</a> for Win32</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_win/faq.html#2">building</a> for Windows FAQ</td></tr>
+<tr><td align=right> selecting a </td><td><a href="../ref/am_conf/byteorder.html#2">byte</a> order</td></tr>
+<tr><td align=right> </td><td><a href="../ref/program/byteorder.html#2">byte</a> ordering</td></tr>
+<tr><td align=right> configuring the </td><td><a href="../ref/build_unix/conf.html#6">C++</a> API</td></tr>
+<tr><td align=right> flushing the database </td><td><a href="../ref/am/sync.html#2">cache</a></td></tr>
+<tr><td align=right> selecting a </td><td><a href="../ref/am_conf/cachesize.html#2">cache</a> size</td></tr>
+<tr><td align=right> </td><td><a href="../ref/transapp/archival.html#3">catastrophic</a> recovery</td></tr>
+<tr><td align=right>Patches, Updates and </td><td><a href="http://www.sleepycat.com/update/index.html">Change</a> logs</td></tr>
+<tr><td align=right> utility to take </td><td><a href="../utility/db_checkpoint.html#3">checkpoints</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/curclose.html#2">closing</a> a cursor</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/close.html#2">closing</a> a database</td></tr>
+<tr><td align=right> specifying a Btree </td><td><a href="../ref/am_conf/bt_compare.html#2">comparison</a> function</td></tr>
+<tr><td align=right> changing </td><td><a href="../ref/build_unix/flags.html#2">compile</a> or load options</td></tr>
+<tr><td align=right> </td><td><a href="../ref/cam/intro.html#2">Concurrent</a> Data Store</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/conf.html#2">configuring</a> Berkeley DB for UNIX systems</td></tr>
+<tr><td align=right> recovering </td><td><a href="../ref/am/verify.html#4">corrupted</a> databases</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/count.html#2">counting</a> data items for a key</td></tr>
+<tr><td align=right> closing a </td><td><a href="../ref/am/curclose.html#3">cursor</a></td></tr>
+<tr><td align=right> deleting records with a </td><td><a href="../ref/am/curdel.html#3">cursor</a></td></tr>
+<tr><td align=right> duplicating a </td><td><a href="../ref/am/curdup.html#3">cursor</a></td></tr>
+<tr><td align=right> retrieving records with a </td><td><a href="../ref/am/curget.html#3">cursor</a></td></tr>
+<tr><td align=right> storing records with a </td><td><a href="../ref/am/curput.html#3">cursor</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/stability.html#2">cursor</a> stability</td></tr>
+<tr><td align=right> database </td><td><a href="../ref/am/cursor.html#2">cursors</a></td></tr>
+<tr><td align=right>Dbt</td><td><a href="../api_java/dbt_class.html#data">data</a></td></tr>
+<tr><td align=right> utility to upgrade </td><td><a href="../utility/db_upgrade.html#3">database</a> files</td></tr>
+<tr><td align=right> utility to verify </td><td><a href="../utility/db_verify.html#3">database</a> files</td></tr>
+<tr><td align=right> </td><td><a href="../api_java/db_class.html#2">Db</a></td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_archive.html#2">db_archive</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/dbc_class.html#2">Dbc</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/dbc_close.html#2">Dbc.close</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/dbc_count.html#2">Dbc.count</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/dbc_del.html#2">Dbc.del</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/dbc_dup.html#2">Dbc.dup</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/dbc_get.html#2">Dbc.get</a></td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_checkpoint.html#2">db_checkpoint</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/db_close.html#2">Db.close</a></td></tr>
+<tr><td align=right>File naming</td><td><a href="../ref/env/naming.html#DB_CONFIG">DB_CONFIG</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/dbc_put.html#2">Dbc.put</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/db_cursor.html#2">Db.cursor</a></td></tr>
+<tr><td align=right>Dbc.put</td><td><a href="../api_java/dbc_put.html#Db.DB_AFTER">Db.DB_AFTER</a></td></tr>
+<tr><td align=right>Db.verify</td><td><a href="../api_java/db_verify.html#Db.DB_AGGRESSIVE">Db.DB_AGGRESSIVE</a></td></tr>
+<tr><td align=right>Db.put</td><td><a href="../api_java/db_put.html#Db.DB_APPEND">Db.DB_APPEND</a></td></tr>
+<tr><td align=right>DbEnv.log_archive</td><td><a href="../api_java/log_archive.html#Db.DB_ARCH_ABS">Db.DB_ARCH_ABS</a></td></tr>
+<tr><td align=right>DbEnv.log_archive</td><td><a href="../api_java/log_archive.html#Db.DB_ARCH_DATA">Db.DB_ARCH_DATA</a></td></tr>
+<tr><td align=right>DbEnv.log_archive</td><td><a href="../api_java/log_archive.html#Db.DB_ARCH_LOG">Db.DB_ARCH_LOG</a></td></tr>
+<tr><td align=right>Dbc.put</td><td><a href="../api_java/dbc_put.html#Db.DB_BEFORE">Db.DB_BEFORE</a></td></tr>
+<tr><td align=right>Db.stat</td><td><a href="../api_java/db_stat.html#Db.DB_CACHED_COUNTS">Db.DB_CACHED_COUNTS</a></td></tr>
+<tr><td align=right>DbEnv.set_flags</td><td><a href="../api_java/env_set_flags.html#Db.DB_CDB_ALLDB">Db.DB_CDB_ALLDB</a></td></tr>
+<tr><td align=right>DbEnv.log_get</td><td><a href="../api_java/log_get.html#Db.DB_CHECKPOINT">Db.DB_CHECKPOINT</a></td></tr>
+<tr><td align=right>DbEnv.log_put</td><td><a href="../api_java/log_put.html#Db.DB_CHECKPOINT">Db.DB_CHECKPOINT</a></td></tr>
+<tr><td align=right>DbEnv</td><td><a href="../api_java/dbenv_class.html#Db.DB_CLIENT">Db.DB_CLIENT</a></td></tr>
+<tr><td align=right>Db.get</td><td><a href="../api_java/db_get.html#Db.DB_CONSUME">Db.DB_CONSUME</a></td></tr>
+<tr><td align=right>Db.get</td><td><a href="../api_java/db_get.html#Db.DB_CONSUME_WAIT">Db.DB_CONSUME_WAIT</a></td></tr>
+<tr><td align=right>Db.open</td><td><a href="../api_java/db_open.html#Db.DB_CREATE">Db.DB_CREATE</a></td></tr>
+<tr><td align=right>DbEnv.open</td><td><a href="../api_java/env_open.html#Db.DB_CREATE">Db.DB_CREATE</a></td></tr>
+<tr><td align=right>DbEnv.log_put</td><td><a href="../api_java/log_put.html#Db.DB_CURLSN">Db.DB_CURLSN</a></td></tr>
+<tr><td align=right>Dbc.get</td><td><a href="../api_java/dbc_get.html#Db.DB_CURRENT">Db.DB_CURRENT</a></td></tr>
+<tr><td align=right>Dbc.put</td><td><a href="../api_java/dbc_put.html#Db.DB_CURRENT">Db.DB_CURRENT</a></td></tr>
+<tr><td align=right>DbEnv.log_get</td><td><a href="../api_java/log_get.html#Db.DB_CURRENT">Db.DB_CURRENT</a></td></tr>
+<tr><td align=right>Dbt</td><td><a href="../api_java/dbt_class.html#Db.DB_DBT_MALLOC">Db.DB_DBT_MALLOC</a></td></tr>
+<tr><td align=right>Dbt</td><td><a href="../api_java/dbt_class.html#Db.DB_DBT_PARTIAL">Db.DB_DBT_PARTIAL</a></td></tr>
+<tr><td align=right>Dbt</td><td><a href="../api_java/dbt_class.html#Db.DB_DBT_REALLOC">Db.DB_DBT_REALLOC</a></td></tr>
+<tr><td align=right>Dbt</td><td><a href="../api_java/dbt_class.html#Db.DB_DBT_USERMEM">Db.DB_DBT_USERMEM</a></td></tr>
+<tr><td align=right>Db.set_flags</td><td><a href="../api_java/db_set_flags.html#Db.DB_DUP">Db.DB_DUP</a></td></tr>
+<tr><td align=right>Db.set_flags</td><td><a href="../api_java/db_set_flags.html#Db.DB_DUPSORT">Db.DB_DUPSORT</a></td></tr>
+<tr><td align=right>Db.upgrade</td><td><a href="../api_java/db_upgrade.html#Db.DB_DUPSORT">Db.DB_DUPSORT</a></td></tr>
+<tr><td align=right>Db.open</td><td><a href="../api_java/db_open.html#Db.DB_EXCL">Db.DB_EXCL</a></td></tr>
+<tr><td align=right>Dbc.get</td><td><a href="../api_java/dbc_get.html#Db.DB_FIRST">Db.DB_FIRST</a></td></tr>
+<tr><td align=right>DbEnv.log_get</td><td><a href="../api_java/log_get.html#Db.DB_FIRST">Db.DB_FIRST</a></td></tr>
+<tr><td align=right>DbEnv.log_put</td><td><a href="../api_java/log_put.html#Db.DB_FLUSH">Db.DB_FLUSH</a></td></tr>
+<tr><td align=right>DbEnv.remove</td><td><a href="../api_java/env_remove.html#Db.DB_FORCE">Db.DB_FORCE</a></td></tr>
+<tr><td align=right>DbEnv.txn_checkpoint</td><td><a href="../api_java/txn_checkpoint.html#Db.DB_FORCE">Db.DB_FORCE</a></td></tr>
+<tr><td align=right>Db.get</td><td><a href="../api_java/db_get.html#Db.DB_GET_BOTH">Db.DB_GET_BOTH</a></td></tr>
+<tr><td align=right>Dbc.get</td><td><a href="../api_java/dbc_get.html#Db.DB_GET_BOTH">Db.DB_GET_BOTH</a></td></tr>
+<tr><td align=right>Dbc.get</td><td><a href="../api_java/dbc_get.html#Db.DB_GET_RECNO">Db.DB_GET_RECNO</a></td></tr>
+<tr><td align=right>DbEnv.open</td><td><a href="../api_java/env_open.html#Db.DB_INIT_CDB">Db.DB_INIT_CDB</a></td></tr>
+<tr><td align=right>DbEnv.open</td><td><a href="../api_java/env_open.html#Db.DB_INIT_LOCK">Db.DB_INIT_LOCK</a></td></tr>
+<tr><td align=right>DbEnv.open</td><td><a href="../api_java/env_open.html#Db.DB_INIT_LOG">Db.DB_INIT_LOG</a></td></tr>
+<tr><td align=right>DbEnv.open</td><td><a href="../api_java/env_open.html#Db.DB_INIT_MPOOL">Db.DB_INIT_MPOOL</a></td></tr>
+<tr><td align=right>DbEnv.open</td><td><a href="../api_java/env_open.html#Db.DB_INIT_TXN">Db.DB_INIT_TXN</a></td></tr>
+<tr><td align=right>DbEnv.open</td><td><a href="../api_java/env_open.html#Db.DB_JOINENV">Db.DB_JOINENV</a></td></tr>
+<tr><td align=right>Db.join</td><td><a href="../api_java/db_join.html#Db.DB_JOIN_ITEM">Db.DB_JOIN_ITEM</a></td></tr>
+<tr><td align=right>Dbc.get</td><td><a href="../api_java/dbc_get.html#Db.DB_JOIN_ITEM">Db.DB_JOIN_ITEM</a></td></tr>
+<tr><td align=right>Db.join</td><td><a href="../api_java/db_join.html#Db.DB_JOIN_NOSORT">Db.DB_JOIN_NOSORT</a></td></tr>
+<tr><td align=right>Dbc.put</td><td><a href="../api_java/dbc_put.html#Db.DB_KEYFIRST">Db.DB_KEYFIRST</a></td></tr>
+<tr><td align=right>Dbc.put</td><td><a href="../api_java/dbc_put.html#Db.DB_KEYLAST">Db.DB_KEYLAST</a></td></tr>
+<tr><td align=right>Dbc.get</td><td><a href="../api_java/dbc_get.html#Db.DB_LAST">Db.DB_LAST</a></td></tr>
+<tr><td align=right>DbEnv.log_get</td><td><a href="../api_java/log_get.html#Db.DB_LAST">Db.DB_LAST</a></td></tr>
+<tr><td align=right>DbEnv.lock_detect</td><td><a href="../api_java/lock_detect.html#Db.DB_LOCK_CONFLICT">Db.DB_LOCK_CONFLICT</a></td></tr>
+<tr><td align=right>DbEnv.open</td><td><a href="../api_java/env_open.html#Db.DB_LOCKDOWN">Db.DB_LOCKDOWN</a></td></tr>
+<tr><td align=right>DbEnv.lock_get</td><td><a href="../api_java/lock_get.html#Db.DB_LOCK_NOTGRANTED">Db.DB_LOCK_NOTGRANTED</a></td></tr>
+<tr><td align=right>DbEnv.lock_get</td><td><a href="../api_java/lock_get.html#Db.DB_LOCK_NOWAIT">Db.DB_LOCK_NOWAIT</a></td></tr>
+<tr><td align=right>Dbc.get</td><td><a href="../api_java/dbc_get.html#Db.DB_NEXT">Db.DB_NEXT</a></td></tr>
+<tr><td align=right>DbEnv.log_get</td><td><a href="../api_java/log_get.html#Db.DB_NEXT">Db.DB_NEXT</a></td></tr>
+<tr><td align=right>Dbc.get</td><td><a href="../api_java/dbc_get.html#Db.DB_NEXT_DUP">Db.DB_NEXT_DUP</a></td></tr>
+<tr><td align=right>Dbc.get</td><td><a href="../api_java/dbc_get.html#Db.DB_NEXT_NODUP">Db.DB_NEXT_NODUP</a></td></tr>
+<tr><td align=right>Db.put</td><td><a href="../api_java/db_put.html#Db.DB_NODUPDATA">Db.DB_NODUPDATA</a></td></tr>
+<tr><td align=right>Dbc.put</td><td><a href="../api_java/dbc_put.html#Db.DB_NODUPDATA">Db.DB_NODUPDATA</a></td></tr>
+<tr><td align=right>Db.open</td><td><a href="../api_java/db_open.html#Db.DB_NOMMAP">Db.DB_NOMMAP</a></td></tr>
+<tr><td align=right>DbEnv.set_flags</td><td><a href="../api_java/env_set_flags.html#Db.DB_NOMMAP">Db.DB_NOMMAP</a></td></tr>
+<tr><td align=right>Db.verify</td><td><a href="../api_java/db_verify.html#Db.DB_NOORDERCHK">Db.DB_NOORDERCHK</a></td></tr>
+<tr><td align=right>Db.put</td><td><a href="../api_java/db_put.html#Db.DB_NOOVERWRITE">Db.DB_NOOVERWRITE</a></td></tr>
+<tr><td align=right>Db.close</td><td><a href="../api_java/db_close.html#Db.DB_NOSYNC">Db.DB_NOSYNC</a></td></tr>
+<tr><td align=right>Db.open</td><td><a href="../api_java/db_open.html#Db.DB_OLD_VERSION">Db.DB_OLD_VERSION</a></td></tr>
+<tr><td align=right>Db.upgrade</td><td><a href="../api_java/db_upgrade.html#Db.DB_OLD_VERSION">Db.DB_OLD_VERSION</a></td></tr>
+<tr><td align=right>Db.verify</td><td><a href="../api_java/db_verify.html#Db.DB_ORDERCHKONLY">Db.DB_ORDERCHKONLY</a></td></tr>
+<tr><td align=right>Dbc.dup</td><td><a href="../api_java/dbc_dup.html#Db.DB_POSITION">Db.DB_POSITION</a></td></tr>
+<tr><td align=right>Dbc.get</td><td><a href="../api_java/dbc_get.html#Db.DB_PREV">Db.DB_PREV</a></td></tr>
+<tr><td align=right>DbEnv.log_get</td><td><a href="../api_java/log_get.html#Db.DB_PREV">Db.DB_PREV</a></td></tr>
+<tr><td align=right>Dbc.get</td><td><a href="../api_java/dbc_get.html#Db.DB_PREV_NODUP">Db.DB_PREV_NODUP</a></td></tr>
+<tr><td align=right>DbEnv.open</td><td><a href="../api_java/env_open.html#Db.DB_PRIVATE">Db.DB_PRIVATE</a></td></tr>
+<tr><td align=right>Db.open</td><td><a href="../api_java/db_open.html#Db.DB_RDONLY">Db.DB_RDONLY</a></td></tr>
+<tr><td align=right>Db.set_flags</td><td><a href="../api_java/db_set_flags.html#Db.DB_RECNUM">Db.DB_RECNUM</a></td></tr>
+<tr><td align=right>Db.stat</td><td><a href="../api_java/db_stat.html#Db.DB_RECORDCOUNT">Db.DB_RECORDCOUNT</a></td></tr>
+<tr><td align=right>DbEnv.open</td><td><a href="../api_java/env_open.html#Db.DB_RECOVER">Db.DB_RECOVER</a></td></tr>
+<tr><td align=right>DbEnv.set_feedback</td><td><a href="../api_java/env_set_feedback.html#Db.DB_RECOVER">Db.DB_RECOVER</a></td></tr>
+<tr><td align=right>DbEnv.open</td><td><a href="../api_java/env_open.html#Db.DB_RECOVER_FATAL">Db.DB_RECOVER_FATAL</a></td></tr>
+<tr><td align=right>Db.set_flags</td><td><a href="../api_java/db_set_flags.html#Db.DB_RENUMBER">Db.DB_RENUMBER</a></td></tr>
+<tr><td align=right>Db.set_flags</td><td><a href="../api_java/db_set_flags.html#Db.DB_REVSPLITOFF">Db.DB_REVSPLITOFF</a></td></tr>
+<tr><td align=right>Db.get</td><td><a href="../api_java/db_get.html#Db.DB_RMW">Db.DB_RMW</a></td></tr>
+<tr><td align=right>Db.join</td><td><a href="../api_java/db_join.html#Db.DB_RMW">Db.DB_RMW</a></td></tr>
+<tr><td align=right>Dbc.get</td><td><a href="../api_java/dbc_get.html#Db.DB_RMW">Db.DB_RMW</a></td></tr>
+<tr><td align=right>Db.verify</td><td><a href="../api_java/db_verify.html#Db.DB_SALVAGE">Db.DB_SALVAGE</a></td></tr>
+<tr><td align=right>Dbc.get</td><td><a href="../api_java/dbc_get.html#Db.DB_SET">Db.DB_SET</a></td></tr>
+<tr><td align=right>DbEnv.log_get</td><td><a href="../api_java/log_get.html#Db.DB_SET">Db.DB_SET</a></td></tr>
+<tr><td align=right>Dbc.get</td><td><a href="../api_java/dbc_get.html#Db.DB_SET_RANGE">Db.DB_SET_RANGE</a></td></tr>
+<tr><td align=right>Db.get</td><td><a href="../api_java/db_get.html#Db.DB_SET_RECNO">Db.DB_SET_RECNO</a></td></tr>
+<tr><td align=right>Dbc.get</td><td><a href="../api_java/dbc_get.html#Db.DB_SET_RECNO">Db.DB_SET_RECNO</a></td></tr>
+<tr><td align=right>Db.set_flags</td><td><a href="../api_java/db_set_flags.html#Db.DB_SNAPSHOT">Db.DB_SNAPSHOT</a></td></tr>
+<tr><td align=right>DbEnv.open</td><td><a href="../api_java/env_open.html#Db.DB_SYSTEM_MEM">Db.DB_SYSTEM_MEM</a></td></tr>
+<tr><td align=right>Db.open</td><td><a href="../api_java/db_open.html#Db.DB_THREAD">Db.DB_THREAD</a></td></tr>
+<tr><td align=right>DbEnv.open</td><td><a href="../api_java/env_open.html#Db.DB_THREAD">Db.DB_THREAD</a></td></tr>
+<tr><td align=right>Db.open</td><td><a href="../api_java/db_open.html#Db.DB_TRUNCATE">Db.DB_TRUNCATE</a></td></tr>
+<tr><td align=right>DbEnv.set_tx_recover</td><td><a href="../api_java/env_set_tx_recover.html#Db.DB_TXN_ABORT">Db.DB_TXN_ABORT</a></td></tr>
+<tr><td align=right>DbEnv.set_tx_recover</td><td><a href="../api_java/env_set_tx_recover.html#Db.DB_TXN_BACKWARD_ROLL">Db.DB_TXN_BACKWARD_ROLL</a></td></tr>
+<tr><td align=right>DbEnv.set_tx_recover</td><td><a href="../api_java/env_set_tx_recover.html#Db.DB_TXN_FORWARD_ROLL">Db.DB_TXN_FORWARD_ROLL</a></td></tr>
+<tr><td align=right>DbEnv.set_flags</td><td><a href="../api_java/env_set_flags.html#Db.DB_TXN_NOSYNC">Db.DB_TXN_NOSYNC</a></td></tr>
+<tr><td align=right>DbEnv.txn_begin</td><td><a href="../api_java/txn_begin.html#Db.DB_TXN_NOSYNC">Db.DB_TXN_NOSYNC</a></td></tr>
+<tr><td align=right>DbTxn.commit</td><td><a href="../api_java/txn_commit.html#Db.DB_TXN_NOSYNC">Db.DB_TXN_NOSYNC</a></td></tr>
+<tr><td align=right>DbEnv.txn_begin</td><td><a href="../api_java/txn_begin.html#Db.DB_TXN_NOWAIT">Db.DB_TXN_NOWAIT</a></td></tr>
+<tr><td align=right>DbEnv.txn_begin</td><td><a href="../api_java/txn_begin.html#Db.DB_TXN_SYNC">Db.DB_TXN_SYNC</a></td></tr>
+<tr><td align=right>DbTxn.commit</td><td><a href="../api_java/txn_commit.html#Db.DB_TXN_SYNC">Db.DB_TXN_SYNC</a></td></tr>
+<tr><td align=right>Db.set_feedback</td><td><a href="../api_java/db_set_feedback.html#Db.DB_UPGRADE">Db.DB_UPGRADE</a></td></tr>
+<tr><td align=right>DbEnv.open</td><td><a href="../api_java/env_open.html#Db.DB_USE_ENVIRON">Db.DB_USE_ENVIRON</a></td></tr>
+<tr><td align=right>DbEnv.remove</td><td><a href="../api_java/env_remove.html#Db.DB_USE_ENVIRON">Db.DB_USE_ENVIRON</a></td></tr>
+<tr><td align=right>DbEnv.open</td><td><a href="../api_java/env_open.html#Db.DB_USE_ENVIRON_ROOT">Db.DB_USE_ENVIRON_ROOT</a></td></tr>
+<tr><td align=right>DbEnv.remove</td><td><a href="../api_java/env_remove.html#Db.DB_USE_ENVIRON_ROOT">Db.DB_USE_ENVIRON_ROOT</a></td></tr>
+<tr><td align=right>DbEnv.set_verbose</td><td><a href="../api_java/env_set_verbose.html#Db.DB_VERB_CHKPOINT">Db.DB_VERB_CHKPOINT</a></td></tr>
+<tr><td align=right>DbEnv.set_verbose</td><td><a href="../api_java/env_set_verbose.html#Db.DB_VERB_DEADLOCK">Db.DB_VERB_DEADLOCK</a></td></tr>
+<tr><td align=right>DbEnv.set_verbose</td><td><a href="../api_java/env_set_verbose.html#Db.DB_VERB_RECOVERY">Db.DB_VERB_RECOVERY</a></td></tr>
+<tr><td align=right>DbEnv.set_verbose</td><td><a href="../api_java/env_set_verbose.html#Db.DB_VERB_WAITSFOR">Db.DB_VERB_WAITSFOR</a></td></tr>
+<tr><td align=right>Db.set_feedback</td><td><a href="../api_java/db_set_feedback.html#Db.DB_VERIFY">Db.DB_VERIFY</a></td></tr>
+<tr><td align=right>Db.cursor</td><td><a href="../api_java/db_cursor.html#Db.DB_WRITECURSOR">Db.DB_WRITECURSOR</a></td></tr>
+<tr><td align=right>Db</td><td><a href="../api_java/db_class.html#Db.DB_XA_CREATE">Db.DB_XA_CREATE</a></td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_deadlock.html#2">db_deadlock</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/deadlock_class.html#2">DbDeadlockException</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/db_del.html#2">Db.del</a></td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_dump.html#2">db_dump</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/dbenv_class.html#2">DbEnv</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/env_close.html#2">DbEnv.close</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/env_version.html#2">DbEnv.get_version_major</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/lock_detect.html#2">DbEnv.lock_detect</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/lock_get.html#2">DbEnv.lock_get</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/lock_id.html#2">DbEnv.lock_id</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/lock_stat.html#2">DbEnv.lock_stat</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/lock_vec.html#2">DbEnv.lock_vec</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/log_archive.html#2">DbEnv.log_archive</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/log_compare.html#2">DbEnv.log_compare</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/log_file.html#2">DbEnv.log_file</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/log_flush.html#2">DbEnv.log_flush</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/log_get.html#2">DbEnv.log_get</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/log_put.html#2">DbEnv.log_put</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/log_register.html#2">DbEnv.log_register</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/log_stat.html#2">DbEnv.log_stat</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/log_unregister.html#2">DbEnv.log_unregister</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/memp_register.html#2">DbEnv.memp_register</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/memp_stat.html#2">DbEnv.memp_stat</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/memp_sync.html#2">DbEnv.memp_sync</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/memp_trickle.html#2">DbEnv.memp_trickle</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/env_open.html#2">DbEnv.open</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/env_remove.html#2">DbEnv.remove</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/env_set_cachesize.html#2">DbEnv.set_cachesize</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/env_set_data_dir.html#2">DbEnv.set_data_dir</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/env_set_errcall.html#2">DbEnv.set_errcall</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/env_set_error_stream.html#2">DbEnv.set_error_stream</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/env_set_errpfx.html#2">DbEnv.set_errpfx</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/env_set_feedback.html#2">DbEnv.set_feedback</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/env_set_flags.html#2">DbEnv.set_flags</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/env_set_lg_bsize.html#2">DbEnv.set_lg_bsize</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/env_set_lg_dir.html#2">DbEnv.set_lg_dir</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/env_set_lg_max.html#2">DbEnv.set_lg_max</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/env_set_lk_conflicts.html#2">DbEnv.set_lk_conflicts</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/env_set_lk_detect.html#2">DbEnv.set_lk_detect</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/env_set_lk_max.html#2">DbEnv.set_lk_max</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/env_set_lk_max_lockers.html#2">DbEnv.set_lk_max_lockers</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/env_set_lk_max_locks.html#2">DbEnv.set_lk_max_locks</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/env_set_lk_max_objects.html#2">DbEnv.set_lk_max_objects</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/env_set_mp_mmapsize.html#2">DbEnv.set_mp_mmapsize</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/env_set_mutexlocks.html#2">DbEnv.set_mutexlocks</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/env_set_pageyield.html#2">DbEnv.set_pageyield</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/env_set_panicstate.html#2">DbEnv.set_panicstate</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/env_set_rec_init.html#2">DbEnv.set_recovery_init</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/env_set_region_init.html#2">DbEnv.set_region_init</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/env_set_server.html#2">DbEnv.set_server</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/env_set_shm_key.html#2">DbEnv.set_shm_key</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/env_set_tas_spins.html#2">DbEnv.set_tas_spins</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/env_set_tmp_dir.html#2">DbEnv.set_tmp_dir</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/env_set_tx_max.html#2">DbEnv.set_tx_max</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/env_set_tx_recover.html#2">DbEnv.set_tx_recover</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/env_set_tx_timestamp.html#2">DbEnv.set_tx_timestamp</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/env_set_verbose.html#2">DbEnv.set_verbose</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/env_strerror.html#2">DbEnv.strerror</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/txn_begin.html#2">DbEnv.txn_begin</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/txn_checkpoint.html#2">DbEnv.txn_checkpoint</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/txn_stat.html#2">DbEnv.txn_stat</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/except_class.html#2">DbException</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/get_errno.html#2">DbException.get_errno</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/db_fd.html#2">Db.fd</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/db_get.html#2">Db.get</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/db_get_byteswapped.html#2">Db.get_byteswapped</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/db_get_type.html#2">Db.get_type</a></td></tr>
+<tr><td align=right>File naming</td><td><a href="../ref/env/naming.html#DB_HOME">DB_HOME</a></td></tr>
+<tr><td align=right>File naming</td><td><a href="../ref/env/naming.html#db_home">db_home</a></td></tr>
+<tr><td align=right> Db.close </td><td><a href="../api_java/db_close.html#3">DB_INCOMPLETE</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/db_join.html#2">Db.join</a></td></tr>
+<tr><td align=right>Error returns to applications</td><td><a href="../ref/program/errorret.html#DB_KEYEMPTY">DB_KEYEMPTY</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/db_key_range.html#2">Db.key_range</a></td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_load.html#2">db_load</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/lock_class.html#2">DbLock</a></td></tr>
+<tr><td align=right>Error returns to applications</td><td><a href="../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a></td></tr>
+<tr><td align=right>DbEnv.set_lk_detect</td><td><a href="../api_java/env_set_lk_detect.html#DB_LOCK_DEFAULT">DB_LOCK_DEFAULT</a></td></tr>
+<tr><td align=right>Error returns to applications</td><td><a href="../ref/program/errorret.html#DB_LOCK_NOTGRANTED">DB_LOCK_NOTGRANTED</a></td></tr>
+<tr><td align=right>DbEnv.set_lk_detect</td><td><a href="../api_java/env_set_lk_detect.html#DB_LOCK_OLDEST">DB_LOCK_OLDEST</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/lock_put.html#2">DbLock.put</a></td></tr>
+<tr><td align=right>DbEnv.set_lk_detect</td><td><a href="../api_java/env_set_lk_detect.html#DB_LOCK_RANDOM">DB_LOCK_RANDOM</a></td></tr>
+<tr><td align=right>DbEnv.set_lk_detect</td><td><a href="../api_java/env_set_lk_detect.html#DB_LOCK_YOUNGEST">DB_LOCK_YOUNGEST</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/lsn_class.html#2">DbLsn</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/mem_class.html#2">DbMemoryException</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/memp_fclose.html#2">DbMpoolFile.close</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/memp_fget.html#2">DbMpoolFile.get</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/memp_fopen.html#2">DbMpoolFile.open</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/memp_fput.html#2">DbMpoolFile.put</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/memp_fset.html#2">DbMpoolFile.set</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/memp_fsync.html#2">DbMpoolFile.sync</a></td></tr>
+<tr><td align=right>DbEnv.set_server</td><td><a href="../api_java/env_set_server.html#DB_NOSERVER">DB_NOSERVER</a></td></tr>
+<tr><td align=right>DbEnv.set_server</td><td><a href="../api_java/env_set_server.html#DB_NOSERVER_ID">DB_NOSERVER_ID</a></td></tr>
+<tr><td align=right>Error returns to applications</td><td><a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/db_open.html#2">Db.open</a></td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_printlog.html#2">db_printlog</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/db_put.html#2">Db.put</a></td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_recover.html#2">db_recover</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/db_remove.html#2">Db.remove</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/db_rename.html#2">Db.rename</a></td></tr>
+<tr><td align=right>Error returns to applications</td><td><a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/runrec_class.html#2">DbRunRecoveryException</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/db_set_append_recno.html#2">Db.set_append_recno</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/db_set_bt_compare.html#2">Db.set_bt_compare</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/db_set_bt_minkey.html#2">Db.set_bt_minkey</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/db_set_bt_prefix.html#2">Db.set_bt_prefix</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/db_set_cachesize.html#2">Db.set_cachesize</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/db_set_dup_compare.html#2">Db.set_dup_compare</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/db_set_errcall.html#2">Db.set_errcall</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/db_set_errpfx.html#2">Db.set_errpfx</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/db_set_feedback.html#2">Db.set_feedback</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/db_set_flags.html#2">Db.set_flags</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/db_set_h_ffactor.html#2">Db.set_h_ffactor</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/db_set_h_hash.html#2">Db.set_h_hash</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/db_set_h_nelem.html#2">Db.set_h_nelem</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/db_set_lorder.html#2">Db.set_lorder</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/db_set_pagesize.html#2">Db.set_pagesize</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/db_set_q_extentsize.html#2">Db.set_q_extentsize</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/db_set_re_delim.html#2">Db.set_re_delim</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/db_set_re_len.html#2">Db.set_re_len</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/db_set_re_pad.html#2">Db.set_re_pad</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/db_set_re_source.html#2">Db.set_re_source</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/db_stat.html#2">Db.stat</a></td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_stat.html#2">db_stat</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/db_sync.html#2">Db.sync</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/dbt_class.html#2">Dbt</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/txn_class.html#2">DbTxn</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/txn_abort.html#2">DbTxn.abort</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/txn_commit.html#2">DbTxn.commit</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/txn_id.html#2">DbTxn.id</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/txn_prepare.html#2">DbTxn.prepare</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/db_upgrade.html#2">Db.upgrade</a></td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_upgrade.html#2">db_upgrade</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/db_verify.html#2">Db.verify</a></td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_verify.html#2">db_verify</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/lock/dead.html#2">deadlocks</a></td></tr>
+<tr><td align=right> utility to detect </td><td><a href="../utility/db_deadlock.html#3">deadlocks</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/debug/common.html#2">debugging</a> applications</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/delete.html#2">deleting</a> records</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/curdel.html#2">deleting</a> records with a cursor</td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--disable-bigfile">--disable-bigfile</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/program/diskspace.html#2">disk</a> space requirements</td></tr>
+<tr><td align=right> utility to </td><td><a href="../utility/db_dump.html#3">dump</a> databases as text files</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am_conf/dup.html#2">duplicate</a> data items</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/curdup.html#2">duplicating</a> a cursor</td></tr>
+<tr><td align=right> configuring </td><td><a href="../ref/build_unix/conf.html#9">dynamic</a> shared libraries</td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-compat185">--enable-compat185</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-cxx">--enable-cxx</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-debug">--enable-debug</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-debug_rop">--enable-debug_rop</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-debug_wop">--enable-debug_wop</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-diagnostic">--enable-diagnostic</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-dump185">--enable-dump185</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-dynamic">--enable-dynamic</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-java">--enable-java</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-posixmutexes">--enable-posixmutexes</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-rpc">--enable-rpc</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-shared">--enable-shared</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-tcl">--enable-tcl</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-test">--enable-test</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-uimutexes">--enable-uimutexes</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-umrw">--enable-umrw</a></td></tr>
+<tr><td align=right> byte </td><td><a href="../ref/program/byteorder.html#3">endian</a></td></tr>
+<tr><td align=right> database </td><td><a href="../ref/env/create.html#2">environment</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/program/environ.html#2">environment</a> variables</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/error.html#2">error</a> handling</td></tr>
+<tr><td align=right> </td><td><a href="../ref/program/errorret.html#3">error</a> name space</td></tr>
+<tr><td align=right> </td><td><a href="../ref/program/errorret.html#2">error</a> returns</td></tr>
+<tr><td align=right> </td><td><a href="../ref/install/file.html#2">/etc/magic</a></td></tr>
+<tr><td align=right> selecting a Queue </td><td><a href="../ref/am_conf/extentsize.html#2">extent</a> size</td></tr>
+<tr><td align=right> Java </td><td><a href="../ref/java/faq.html#2">FAQ</a></td></tr>
+<tr><td align=right> Tcl </td><td><a href="../ref/tcl/faq.html#2">FAQ</a></td></tr>
+<tr><td align=right> configuring without large </td><td><a href="../ref/build_unix/conf.html#4">file</a> support</td></tr>
+<tr><td align=right> </td><td><a href="../ref/install/file.html#3">file</a> utility</td></tr>
+<tr><td align=right> recovery and </td><td><a href="../ref/transapp/filesys.html#2">filesystem</a> operations</td></tr>
+<tr><td align=right> remote </td><td><a href="../ref/env/remote.html#2">filesystems</a></td></tr>
+<tr><td align=right> page </td><td><a href="../ref/am_conf/h_ffactor.html#2">fill</a> factor</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/freebsd.html#2">FreeBSD</a></td></tr>
+<tr><td align=right> Berkeley DB </td><td><a href="../ref/program/scope.html#3">free-threaded</a> handles</td></tr>
+<tr><td align=right> specifying a database </td><td><a href="../ref/am_conf/h_hash.html#2">hash</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/am_conf/h_nelem.html#2">hash</a> table size</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/hpux.html#2">HP-UX</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/install.html#2">installing</a> Berkeley DB for UNIX systems</td></tr>
+<tr><td align=right> </td><td><a href="../ref/program/compatible.html#2">interface</a> compatibility</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/irix.html#2">IRIX</a></td></tr>
+<tr><td align=right> configuring the </td><td><a href="../ref/build_unix/conf.html#10">Java</a> API</td></tr>
+<tr><td align=right> </td><td><a href="../ref/java/compat.html#2">Java</a> compatibility</td></tr>
+<tr><td align=right> </td><td><a href="../ref/java/conf.html#2">Java</a> configuration</td></tr>
+<tr><td align=right> </td><td><a href="../ref/java/faq.html#3">Java</a> FAQ</td></tr>
+<tr><td align=right> logical </td><td><a href="../ref/am/join.html#2">join</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_java/dbt_class.html#3">key/data</a> pairs</td></tr>
+<tr><td align=right> database </td><td><a href="../ref/program/dbsizes.html#2">limits</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/linux.html#2">Linux</a></td></tr>
+<tr><td align=right> changing compile or </td><td><a href="../ref/build_unix/flags.html#3">load</a> options</td></tr>
+<tr><td align=right> utility to </td><td><a href="../utility/db_load.html#3">load</a> text files into databases</td></tr>
+<tr><td align=right> standard </td><td><a href="../ref/lock/stdmode.html#2">lock</a> modes</td></tr>
+<tr><td align=right> page-level </td><td><a href="../ref/lock/page.html#2">locking</a></td></tr>
+<tr><td align=right> two-phase </td><td><a href="../ref/lock/twopl.html#2">locking</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/lock/nondb.html#2">locking</a> and non-Berkeley DB applications</td></tr>
+<tr><td align=right> </td><td><a href="../ref/lock/config.html#2">locking</a> configuration</td></tr>
+<tr><td align=right> </td><td><a href="../ref/lock/am_conv.html#2">locking</a> conventions</td></tr>
+<tr><td align=right> Berkeley DB Concurrent Data Store </td><td><a href="../ref/lock/cam_conv.html#2">locking</a> conventions</td></tr>
+<tr><td align=right> </td><td><a href="../ref/lock/intro.html#2">locking</a> introduction</td></tr>
+<tr><td align=right> sizing the </td><td><a href="../ref/lock/max.html#2">locking</a> subsystem</td></tr>
+<tr><td align=right> </td><td><a href="../ref/lock/notxn.html#2">locking</a> without transactions</td></tr>
+<tr><td align=right> </td><td><a href="../ref/log/limits.html#2">log</a> file limits</td></tr>
+<tr><td align=right> </td><td><a href="../ref/transapp/logfile.html#2">log</a> file removal</td></tr>
+<tr><td align=right> utility to display </td><td><a href="../utility/db_printlog.html#3">log</a> files as text</td></tr>
+<tr><td align=right> </td><td><a href="../ref/log/config.html#2">logging</a> configuration</td></tr>
+<tr><td align=right> </td><td><a href="../ref/log/intro.html#2">logging</a> introduction</td></tr>
+<tr><td align=right> </td><td><a href="../ref/mp/config.html#2">memory</a> pool configuration</td></tr>
+<tr><td align=right> Berkeley DB library </td><td><a href="../ref/program/namespace.html#2">name</a> spaces</td></tr>
+<tr><td align=right> file </td><td><a href="../ref/env/naming.html#2">naming</a></td></tr>
+<tr><td align=right> retrieving Btree records by </td><td><a href="../ref/am_conf/bt_recnum.html#2">number</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/open.html#2">opening</a> a database</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/osf1.html#2">OSF/1</a></td></tr>
+<tr><td align=right> selecting a </td><td><a href="../ref/am_conf/pagesize.html#2">page</a> size</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/partial.html#2">partial</a> record storage and retrieval</td></tr>
+<tr><td align=right></td><td><a href="http://www.sleepycat.com/update/index.html">Patches,</a> Updates and Change logs</td></tr>
+<tr><td align=right> </td><td><a href="../ref/perl/intro.html#2">Perl</a></td></tr>
+<tr><td align=right> Sleepycat Software's Berkeley DB </td><td><a href="../ref/intro/products.html#2">products</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/qnx.html#2">QNX</a></td></tr>
+<tr><td align=right> logical </td><td><a href="../api_java/dbt_class.html#4">record</a> number format</td></tr>
+<tr><td align=right> logical </td><td><a href="../ref/am_conf/logrec.html#2">record</a> numbers</td></tr>
+<tr><td align=right> managing </td><td><a href="../ref/am_conf/recno.html#2">record-based</a> databases</td></tr>
+<tr><td align=right> logically renumbering </td><td><a href="../ref/am_conf/renumber.html#2">records</a></td></tr>
+<tr><td align=right> utility to </td><td><a href="../utility/db_recover.html#3">recover</a> database environments</td></tr>
+<tr><td align=right> Berkeley DB </td><td><a href="../ref/transapp/reclimit.html#2">recoverability</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/get.html#2">retrieving</a> records</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/curget.html#2">retrieving</a> records with a cursor</td></tr>
+<tr><td align=right> </td><td><a href="../ref/rpc/client.html#2">RPC</a> client</td></tr>
+<tr><td align=right> configuring a </td><td><a href="../ref/build_unix/conf.html#11">RPC</a> client/server</td></tr>
+<tr><td align=right> utility to support </td><td><a href="../utility/berkeley_db_svc.html#3">RPC</a> client/server</td></tr>
+<tr><td align=right> </td><td><a href="../ref/rpc/server.html#2">RPC</a> server</td></tr>
+<tr><td align=right> database </td><td><a href="../ref/am/verify.html#3">salvage</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/sco.html#2">SCO</a></td></tr>
+<tr><td align=right> Berkeley DB handle </td><td><a href="../ref/program/scope.html#2">scope</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/env/security.html#2">security</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/sendmail/intro.html#2">Sendmail</a></td></tr>
+<tr><td align=right> configuring </td><td><a href="../ref/build_unix/conf.html#8">shared</a> libraries</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/shlib.html#2">shared</a> libraries</td></tr>
+<tr><td align=right> application </td><td><a href="../ref/program/appsignals.html#2">signal</a> handling</td></tr>
+<tr><td align=right></td><td><a href="http://www.sleepycat.com/">Sleepycat</a> Software</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/solaris.html#2">Solaris</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/distrib/layout.html#2">source</a> code layout</td></tr>
+<tr><td align=right> cursor </td><td><a href="../ref/am/stability.html#3">stability</a></td></tr>
+<tr><td align=right> database </td><td><a href="../ref/am/stat.html#2">statistics</a></td></tr>
+<tr><td align=right> utility to display database and environment </td><td><a href="../utility/db_stat.html#3">statistics</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/put.html#2">storing</a> records</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/curput.html#2">storing</a> records with a cursor</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/sunos.html#2">SunOS</a></td></tr>
+<tr><td align=right> loading Berkeley DB with </td><td><a href="../ref/tcl/intro.html#2">Tcl</a></td></tr>
+<tr><td align=right> using Berkeley DB with </td><td><a href="../ref/tcl/using.html#2">Tcl</a></td></tr>
+<tr><td align=right> configuring the </td><td><a href="../ref/build_unix/conf.html#12">Tcl</a> API</td></tr>
+<tr><td align=right> </td><td><a href="../ref/tcl/program.html#2">Tcl</a> API programming notes</td></tr>
+<tr><td align=right> </td><td><a href="../ref/tcl/faq.html#3">Tcl</a> FAQ</td></tr>
+<tr><td align=right> configuring the </td><td><a href="../ref/build_unix/conf.html#13">test</a> suite</td></tr>
+<tr><td align=right> running the </td><td><a href="../ref/test/run.html#2">test</a> suite</td></tr>
+<tr><td align=right> running the </td><td><a href="../ref/build_unix/test.html#2">test</a> suite under UNIX</td></tr>
+<tr><td align=right> running the </td><td><a href="../ref/build_win/test.html#2">test</a> suite under Windows</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am_conf/re_source.html#2">text</a> backing files</td></tr>
+<tr><td align=right> loading </td><td><a href="../ref/dumpload/text.html#2">text</a> into databases</td></tr>
+<tr><td align=right> dumping/loading </td><td><a href="../ref/dumpload/utility.html#2">text</a> to/from databases</td></tr>
+<tr><td align=right> building </td><td><a href="../ref/program/mt.html#2">threaded</a> applications</td></tr>
+<tr><td align=right> </td><td><a href="../ref/txn/config.html#2">transaction</a> configuration</td></tr>
+<tr><td align=right> </td><td><a href="../ref/txn/limits.html#2">transaction</a> limits</td></tr>
+<tr><td align=right> administering </td><td><a href="../ref/transapp/admin.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right> archival in </td><td><a href="../ref/transapp/archival.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right> checkpoints in </td><td><a href="../ref/transapp/checkpoint.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right> deadlock detection in </td><td><a href="../ref/transapp/deadlock.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right> recovery in </td><td><a href="../ref/transapp/recovery.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right> </td><td><a href="../ref/transapp/throughput.html#2">transaction</a> throughput</td></tr>
+<tr><td align=right> </td><td><a href="../ref/transapp/intro.html#2">Transactional</a> Data Store</td></tr>
+<tr><td align=right> Berkeley DB and </td><td><a href="../ref/txn/intro.html#2">transactions</a></td></tr>
+<tr><td align=right> nested </td><td><a href="../ref/txn/nested.html#2">transactions</a></td></tr>
+<tr><td align=right> configuring Berkeley DB with the </td><td><a href="../ref/xa/config.html#2">Tuxedo</a> System</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/ultrix.html#2">Ultrix</a></td></tr>
+<tr><td align=right> building for </td><td><a href="../ref/build_unix/notes.html#3">UNIX</a> FAQ</td></tr>
+<tr><td align=right> configuring Berkeley DB for </td><td><a href="../ref/build_unix/conf.html#3">UNIX</a> systems</td></tr>
+<tr><td align=right>Patches, </td><td><a href="http://www.sleepycat.com/update/index.html">Updates</a> and Change logs</td></tr>
+<tr><td align=right> utility to </td><td><a href="../utility/db_upgrade.html#4">upgrade</a> database files</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/upgrade.html#2">upgrading</a> databases</td></tr>
+<tr><td align=right> </td><td><a href="../ref/arch/utilities.html#2">utilities</a></td></tr>
+<tr><td align=right> database </td><td><a href="../ref/am/verify.html#2">verification</a></td></tr>
+<tr><td align=right> utility to </td><td><a href="../utility/db_verify.html#4">verify</a> database files</td></tr>
+<tr><td align=right> building for </td><td><a href="../ref/build_vxworks/faq.html#3">VxWorks</a> FAQ</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_vxworks/notes.html#2">VxWorks</a> notes</td></tr>
+<tr><td align=right> running the test suite under </td><td><a href="../ref/build_win/test.html#3">Windows</a></td></tr>
+<tr><td align=right> building for </td><td><a href="../ref/build_win/faq.html#3">Windows</a> FAQ</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_win/notes.html#2">Windows</a> notes</td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--with-tcl=DIR">--with-tcl=DIR</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/xa/intro.html#2">XA</a> Resource Manager</td></tr>
+</table>
+</center>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/lock_class.html b/bdb/docs/api_java/lock_class.html
new file mode 100644
index 00000000000..6705e95bdfd
--- /dev/null
+++ b/bdb/docs/api_java/lock_class.html
@@ -0,0 +1,54 @@
+<!--$Id: lock_class.so,v 10.13 1999/12/20 08:52:33 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbLock</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbLock</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public class DbLock extends Object { ... }
+</pre></h3>
+<h1>Description</h1>
+<p>The <a href="../api_java/dbenv_class.html">DbEnv</a> lock methods and the DbLock class are used
+to provide general-purpose locking. While designed to work with the
+other Db classes, they are also useful for more general locking
+purposes. Locks can be shared between processes.
+<p>In most cases, when multiple threads or processes are using locking, the
+deadlock detector, <a href="../utility/db_deadlock.html">db_deadlock</a> should be run.
+<h3>Classes</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>, <a href="../api_java/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_java/env_set_lk_conflicts.html">DbEnv.set_lk_conflicts</a>,
+<a href="../api_java/env_set_lk_detect.html">DbEnv.set_lk_detect</a>,
+<a href="../api_java/env_set_lk_max_locks.html">DbEnv.set_lk_max_locks</a>,
+<a href="../api_java/env_set_lk_max_lockers.html">DbEnv.set_lk_max_lockers</a>,
+<a href="../api_java/env_set_lk_max_objects.html">DbEnv.set_lk_max_objects</a>,
+<a href="../api_java/env_set_lk_max.html">DbEnv.set_lk_max</a>,
+<a href="../api_java/lock_detect.html">DbEnv.lock_detect</a>,
+<a href="../api_java/lock_get.html">DbEnv.lock_get</a>,
+<a href="../api_java/lock_id.html">DbEnv.lock_id</a>,
+<a href="../api_java/lock_put.html">DbLock.put</a>
+and
+<a href="../api_java/lock_stat.html">DbEnv.lock_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/lock_detect.html b/bdb/docs/api_java/lock_detect.html
new file mode 100644
index 00000000000..6f453ddb8e6
--- /dev/null
+++ b/bdb/docs/api_java/lock_detect.html
@@ -0,0 +1,69 @@
+<!--$Id: lock_detect.so,v 10.26 2000/03/17 01:53:59 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.lock_detect</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.lock_detect</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int lock_detect(int flags, int atype)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.lock_detect method runs one iteration of the deadlock detector.
+The deadlock detector traverses the lock table, and for each deadlock
+it finds, marks one of the participating transactions for abort.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or more
+of the following values.
+<p><dl compact>
+<p><dt><a name="Db.DB_LOCK_CONFLICT">Db.DB_LOCK_CONFLICT</a><dd>Only run the deadlock detector if a lock conflict has occurred since
+the last time that the deadlock detector was run.
+</dl>
+<p>The <b>atype</b> parameter specifies which transaction to abort in the
+case of deadlock. It must be set to one of possible arguments listed for
+the <a href="../api_java/env_set_lk_detect.html">DbEnv.set_lk_detect</a> interface.
+<p>The DbEnv.lock_detect method returns the number of transactions aborted.
+<p>The DbEnv.lock_detect method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.lock_detect method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.lock_detect method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Classes</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>, <a href="../api_java/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_java/env_set_lk_conflicts.html">DbEnv.set_lk_conflicts</a>,
+<a href="../api_java/env_set_lk_detect.html">DbEnv.set_lk_detect</a>,
+<a href="../api_java/env_set_lk_max_locks.html">DbEnv.set_lk_max_locks</a>,
+<a href="../api_java/env_set_lk_max_lockers.html">DbEnv.set_lk_max_lockers</a>,
+<a href="../api_java/env_set_lk_max_objects.html">DbEnv.set_lk_max_objects</a>,
+<a href="../api_java/env_set_lk_max.html">DbEnv.set_lk_max</a>,
+<a href="../api_java/lock_detect.html">DbEnv.lock_detect</a>,
+<a href="../api_java/lock_get.html">DbEnv.lock_get</a>,
+<a href="../api_java/lock_id.html">DbEnv.lock_id</a>,
+<a href="../api_java/lock_put.html">DbLock.put</a>
+and
+<a href="../api_java/lock_stat.html">DbEnv.lock_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/lock_get.html b/bdb/docs/api_java/lock_get.html
new file mode 100644
index 00000000000..42604bc46d9
--- /dev/null
+++ b/bdb/docs/api_java/lock_get.html
@@ -0,0 +1,92 @@
+<!--$Id: lock_get.so,v 10.28 2000/04/24 16:33:54 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.lock_get</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.lock_get</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public DbLock lock_get(int locker,
+ int flags, Dbt obj, int lock_mode)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.lock_get method acquires a lock from the lock table, returning
+information about it in
+a DbLock object.
+<p>The <b>locker</b> argument specified to DbEnv.lock_get is an unsigned
+32-bit integer quantity. It represents the entity requesting or releasing
+the lock.
+<p>The <b>flags</b> value must be set to 0 or the following value:
+<p><dl compact>
+<p><dt><a name="Db.DB_LOCK_NOWAIT">Db.DB_LOCK_NOWAIT</a><dd>If a lock cannot be granted because the requested lock conflicts with an
+existing lock, return immediately instead of waiting for the lock to
+become available.
+</dl>
+<p>The <b>obj</b> argument is an untyped byte string that specifies the
+object to be locked or released.
+<p>The <b>mode</b> argument is an index into the environment's lock conflict
+array. See <a href="../api_java/env_set_lk_conflicts.html">DbEnv.set_lk_conflicts</a> and
+<a href="../ref/lock/stdmode.html">Standard Lock Modes</a>
+for a description of that array.
+<p>The DbEnv.lock_get method may
+throw an exception encapsulating
+one of the following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_LOCK_NOTGRANTED">Db.DB_LOCK_NOTGRANTED</a><dd>A lock was requested that could not be immediately granted and the
+<b>flags</b> parameter was set to DB_LOCK_NOWAIT.
+</dl>
+<p>Otherwise, the DbEnv.lock_get method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.lock_get method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p><dl compact>
+<p><dt>ENOMEM<dd>The maximum number of locks has been reached.
+</dl>
+<p>If the operation was selected to resolve a deadlock, the
+DbEnv.lock_get method will fail and
+throw a <a href="../api_java/deadlock_class.html">DbDeadlockException</a> exception.
+<p>The DbEnv.lock_get method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.lock_get method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Classes</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>, <a href="../api_java/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_java/env_set_lk_conflicts.html">DbEnv.set_lk_conflicts</a>,
+<a href="../api_java/env_set_lk_detect.html">DbEnv.set_lk_detect</a>,
+<a href="../api_java/env_set_lk_max_locks.html">DbEnv.set_lk_max_locks</a>,
+<a href="../api_java/env_set_lk_max_lockers.html">DbEnv.set_lk_max_lockers</a>,
+<a href="../api_java/env_set_lk_max_objects.html">DbEnv.set_lk_max_objects</a>,
+<a href="../api_java/env_set_lk_max.html">DbEnv.set_lk_max</a>,
+<a href="../api_java/lock_detect.html">DbEnv.lock_detect</a>,
+<a href="../api_java/lock_get.html">DbEnv.lock_get</a>,
+<a href="../api_java/lock_id.html">DbEnv.lock_id</a>,
+<a href="../api_java/lock_put.html">DbLock.put</a>
+and
+<a href="../api_java/lock_stat.html">DbEnv.lock_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/lock_id.html b/bdb/docs/api_java/lock_id.html
new file mode 100644
index 00000000000..2fa59317fe9
--- /dev/null
+++ b/bdb/docs/api_java/lock_id.html
@@ -0,0 +1,59 @@
+<!--$Id: lock_id.so,v 10.19 2000/03/01 21:41:29 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.lock_id</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.lock_id</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int lock_id()
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.lock_id method
+returns a locker ID, which is guaranteed to be unique in the specified lock
+table.
+<p>The DbEnv.lock_id method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.lock_id method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.lock_id method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Classes</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>, <a href="../api_java/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_java/env_set_lk_conflicts.html">DbEnv.set_lk_conflicts</a>,
+<a href="../api_java/env_set_lk_detect.html">DbEnv.set_lk_detect</a>,
+<a href="../api_java/env_set_lk_max_locks.html">DbEnv.set_lk_max_locks</a>,
+<a href="../api_java/env_set_lk_max_lockers.html">DbEnv.set_lk_max_lockers</a>,
+<a href="../api_java/env_set_lk_max_objects.html">DbEnv.set_lk_max_objects</a>,
+<a href="../api_java/env_set_lk_max.html">DbEnv.set_lk_max</a>,
+<a href="../api_java/lock_detect.html">DbEnv.lock_detect</a>,
+<a href="../api_java/lock_get.html">DbEnv.lock_get</a>,
+<a href="../api_java/lock_id.html">DbEnv.lock_id</a>,
+<a href="../api_java/lock_put.html">DbLock.put</a>
+and
+<a href="../api_java/lock_stat.html">DbEnv.lock_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/lock_put.html b/bdb/docs/api_java/lock_put.html
new file mode 100644
index 00000000000..fe4eacc28af
--- /dev/null
+++ b/bdb/docs/api_java/lock_put.html
@@ -0,0 +1,61 @@
+<!--$Id: lock_put.so,v 10.21 2000/03/01 21:41:29 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbLock.put</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbLock.put</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public native void put(DbEnv env)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbLock.put method releases <b>lock</b> from the lock table.
+<p>The DbLock.put method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbLock.put method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbLock.put method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbLock.put method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Classes</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>, <a href="../api_java/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_java/env_set_lk_conflicts.html">DbEnv.set_lk_conflicts</a>,
+<a href="../api_java/env_set_lk_detect.html">DbEnv.set_lk_detect</a>,
+<a href="../api_java/env_set_lk_max_locks.html">DbEnv.set_lk_max_locks</a>,
+<a href="../api_java/env_set_lk_max_lockers.html">DbEnv.set_lk_max_lockers</a>,
+<a href="../api_java/env_set_lk_max_objects.html">DbEnv.set_lk_max_objects</a>,
+<a href="../api_java/env_set_lk_max.html">DbEnv.set_lk_max</a>,
+<a href="../api_java/lock_detect.html">DbEnv.lock_detect</a>,
+<a href="../api_java/lock_get.html">DbEnv.lock_get</a>,
+<a href="../api_java/lock_id.html">DbEnv.lock_id</a>,
+<a href="../api_java/lock_put.html">DbLock.put</a>
+and
+<a href="../api_java/lock_stat.html">DbEnv.lock_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/lock_stat.html b/bdb/docs/api_java/lock_stat.html
new file mode 100644
index 00000000000..00c3703f528
--- /dev/null
+++ b/bdb/docs/api_java/lock_stat.html
@@ -0,0 +1,94 @@
+<!--$Id: lock_stat.so,v 10.30 2000/12/08 20:43:15 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.lock_stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.lock_stat</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public DbLockStat lock_stat()
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.lock_stat method
+creates a DbLockStat object encapsulating a statistical structure.
+The lock region statistics are stored in a DbLockStat object.
+The following data fields are available from the DbLockStat object:
+<p>Statistical structures are created in allocated memory. If <b>db_malloc</b> is non-NULL, it
+is called to allocate the memory, otherwise, the library function
+<b>malloc</b>(3) is used. The function <b>db_malloc</b> must match
+the calling conventions of the <b>malloc</b>(3) library routine.
+Regardless, the caller is responsible for deallocating the returned
+memory. To deallocate returned memory, free the returned memory
+reference, references inside the returned memory do not need to be
+individually freed.
+<p>The lock region statistics are stored in a structure of type
+DB_LOCK_STAT. The following DB_LOCK_STAT fields will be filled in:
+<p><dl compact>
+<dt>public int st_lastid;<dd>The last allocated lock ID.
+<dt>public int st_nmodes;<dd>The number of lock modes.
+<dt>public int st_maxlocks;<dd>The maximum number of locks possible.
+<dt>public int st_maxlockers;<dd>The maximum number of lockers possible.
+<dt>public int st_maxobjects;<dd>The maximum number of objects possible.
+<dt>public int st_nlocks;<dd>The number of current locks.
+<dt>public int st_maxnlocks;<dd>The maximum number of locks at any one time.
+<dt>public int st_nlockers;<dd>The number of current lockers.
+<dt>public int st_maxnlockers;<dd>The maximum number of lockers at any one time.
+<dt>public int st_nobjects;<dd>The number of current objects.
+<dt>public int st_maxnobjects;<dd>The maximum number of objects at any one time.
+<dt>public int st_nrequests;<dd>The total number of locks requested.
+<dt>public int st_nreleases;<dd>The total number of locks released.
+<dt>public int st_nnowaits;<dd>The total number of lock requests that failed because
+<a href="../api_java/lock_vec.html#DB_LOCK_NOWAIT">Db.DB_LOCK_NOWAIT</a> was set.
+<dt>public int st_nconflicts;<dd>The total number of locks not immediately available due to conflicts.
+<dt>public int st_ndeadlocks;<dd>The number of deadlocks detected.
+<dt>public int st_regsize;<dd>The size of the region.
+<dt>public int st_region_wait;<dd>The number of times that a thread of control was forced to wait before
+obtaining the region lock.
+<dt>public int st_region_nowait;<dd>The number of times that a thread of control was able to obtain
+the region lock without waiting.
+</dl>
+<p>The DbEnv.lock_stat method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.lock_stat method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.lock_stat method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Classes</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>, <a href="../api_java/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_java/env_set_lk_conflicts.html">DbEnv.set_lk_conflicts</a>,
+<a href="../api_java/env_set_lk_detect.html">DbEnv.set_lk_detect</a>,
+<a href="../api_java/env_set_lk_max_locks.html">DbEnv.set_lk_max_locks</a>,
+<a href="../api_java/env_set_lk_max_lockers.html">DbEnv.set_lk_max_lockers</a>,
+<a href="../api_java/env_set_lk_max_objects.html">DbEnv.set_lk_max_objects</a>,
+<a href="../api_java/env_set_lk_max.html">DbEnv.set_lk_max</a>,
+<a href="../api_java/lock_detect.html">DbEnv.lock_detect</a>,
+<a href="../api_java/lock_get.html">DbEnv.lock_get</a>,
+<a href="../api_java/lock_id.html">DbEnv.lock_id</a>,
+<a href="../api_java/lock_put.html">DbLock.put</a>
+and
+<a href="../api_java/lock_stat.html">DbEnv.lock_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/lock_vec.html b/bdb/docs/api_java/lock_vec.html
new file mode 100644
index 00000000000..233e036a370
--- /dev/null
+++ b/bdb/docs/api_java/lock_vec.html
@@ -0,0 +1,33 @@
+<!--$Id: lock_vec.so,v 10.31 2000/12/04 18:05:39 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.lock_vec</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.lock_vec</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+</pre></h3>
+<h1>Description</h1>
+<p>A DbEnv.lock_vec method is not available in the Berkeley DB
+Java API.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/log_archive.html b/bdb/docs/api_java/log_archive.html
new file mode 100644
index 00000000000..a842286585d
--- /dev/null
+++ b/bdb/docs/api_java/log_archive.html
@@ -0,0 +1,92 @@
+<!--$Id: log_archive.so,v 10.26 2000/05/25 13:47:07 dda Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.log_archive</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.log_archive</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public String[] log_archive(int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.log_archive method
+returns an array of log or database file names.
+<p>By default, DbEnv.log_archive returns the names of all of the log files
+that are no longer in use (e.g., no longer involved in active transactions),
+and that may safely be archived for catastrophic recovery and then removed
+from the system. If there were no file names to return, the memory location
+referenced by <b>listp</b> will be set to null.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or more
+of the following values.
+<p><dl compact>
+<p><dt><a name="Db.DB_ARCH_ABS">Db.DB_ARCH_ABS</a><dd>All pathnames are returned as absolute pathnames,
+instead of relative to the database home directory.
+<p><dt><a name="Db.DB_ARCH_DATA">Db.DB_ARCH_DATA</a><dd>Return the database files that need to be archived in order to recover
+the database from catastrophic failure. If any of the database files
+have not been accessed during the lifetime of the current log files,
+DbEnv.log_archive will not include them in this list. It is also
+possible that some of the files referenced in the log have since been
+deleted from the system.
+<p><dt><a name="Db.DB_ARCH_LOG">Db.DB_ARCH_LOG</a><dd>Return all the log file names regardless of whether or not they are in
+use.
+</dl>
+<p>The Db.DB_ARCH_DATA and Db.DB_ARCH_LOG flags are mutually
+exclusive.
+<p>See the <a href="../utility/db_archive.html">db_archive</a> manual page for more information on database
+archival procedures.
+<p>The DbEnv.log_archive method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Bugs</h1>
+<p>In a threaded application (i.e., one where the environment was created
+with the DB_THREAD flag specified), calling DbEnv.log_archive with the
+DB_ARCH_DATA flag will fail, returning EINVAL. To work around this
+problem, re-open the log explicitly without specifying DB_THREAD. This
+restriction is expected to be removed in a future version of Berkeley DB.
+<h1>Errors</h1>
+<p>The DbEnv.log_archive method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The log was corrupted.
+</dl>
+<p>The DbEnv.log_archive method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.log_archive method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_set_lg_bsize.html">DbEnv.set_lg_bsize</a>,
+<a href="../api_java/env_set_lg_max.html">DbEnv.set_lg_max</a>,
+<a href="../api_java/log_archive.html">DbEnv.log_archive</a>,
+<a href="../api_java/log_compare.html">DbEnv.log_compare</a>,
+<a href="../api_java/log_file.html">DbEnv.log_file</a>,
+<a href="../api_java/log_flush.html">DbEnv.log_flush</a>,
+<a href="../api_java/log_get.html">DbEnv.log_get</a>,
+<a href="../api_java/log_put.html">DbEnv.log_put</a>,
+<a href="../api_java/log_register.html">DbEnv.log_register</a>,
+<a href="../api_java/log_stat.html">DbEnv.log_stat</a>
+and
+<a href="../api_java/log_unregister.html">DbEnv.log_unregister</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/log_compare.html b/bdb/docs/api_java/log_compare.html
new file mode 100644
index 00000000000..aba583203e1
--- /dev/null
+++ b/bdb/docs/api_java/log_compare.html
@@ -0,0 +1,52 @@
+<!--$Id: log_compare.so,v 10.12 1999/12/20 08:52:30 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.log_compare</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.log_compare</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public static int log_compare(DbLsn lsn0, DbLsn lsn1);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.log_compare method allows the caller to compare two
+<a href="../api_java/lsn_class.html">DbLsn</a> objects,
+returning 0 if they are equal, 1 if <b>lsn0</b> is greater than
+<b>lsn1</b>, and -1 if <b>lsn0</b> is less than <b>lsn1</b>.
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_set_lg_bsize.html">DbEnv.set_lg_bsize</a>,
+<a href="../api_java/env_set_lg_max.html">DbEnv.set_lg_max</a>,
+<a href="../api_java/log_archive.html">DbEnv.log_archive</a>,
+<a href="../api_java/log_compare.html">DbEnv.log_compare</a>,
+<a href="../api_java/log_file.html">DbEnv.log_file</a>,
+<a href="../api_java/log_flush.html">DbEnv.log_flush</a>,
+<a href="../api_java/log_get.html">DbEnv.log_get</a>,
+<a href="../api_java/log_put.html">DbEnv.log_put</a>,
+<a href="../api_java/log_register.html">DbEnv.log_register</a>,
+<a href="../api_java/log_stat.html">DbEnv.log_stat</a>
+and
+<a href="../api_java/log_unregister.html">DbEnv.log_unregister</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/log_file.html b/bdb/docs/api_java/log_file.html
new file mode 100644
index 00000000000..e85fa46df14
--- /dev/null
+++ b/bdb/docs/api_java/log_file.html
@@ -0,0 +1,77 @@
+<!--$Id: log_file.so,v 10.18 2000/03/01 21:41:29 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.log_file</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.log_file</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public String log_file(DbLsn lsn)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.log_file method maps
+<a href="../api_java/lsn_class.html">DbLsn</a> objects
+to file names,
+returning the name of the file containing the record named by <b>lsn</b>.
+<p>The <b>len</b> argument is the length of the <b>namep</b> buffer in bytes.
+If <b>namep</b> is too short to hold the file name, DbEnv.log_file will
+return ENOMEM.
+(Log file names are normally quite short, on the order of 10 characters.)
+<p>This mapping of
+<a href="../api_java/lsn_class.html">DbLsn</a> objects
+to files is needed for database administration. For example, a
+transaction manager typically records the earliest
+<a href="../api_java/lsn_class.html">DbLsn</a>
+needed for restart, and the database administrator may want to archive
+log files to tape when they contain only
+<a href="../api_java/lsn_class.html">DbLsn</a>
+entries before the earliest one needed for restart.
+<p>The DbEnv.log_file method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.log_file method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>ENOMEM<dd>The supplied buffer was too small to hold the log file name.
+</dl>
+<p>The DbEnv.log_file method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.log_file method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_set_lg_bsize.html">DbEnv.set_lg_bsize</a>,
+<a href="../api_java/env_set_lg_max.html">DbEnv.set_lg_max</a>,
+<a href="../api_java/log_archive.html">DbEnv.log_archive</a>,
+<a href="../api_java/log_compare.html">DbEnv.log_compare</a>,
+<a href="../api_java/log_file.html">DbEnv.log_file</a>,
+<a href="../api_java/log_flush.html">DbEnv.log_flush</a>,
+<a href="../api_java/log_get.html">DbEnv.log_get</a>,
+<a href="../api_java/log_put.html">DbEnv.log_put</a>,
+<a href="../api_java/log_register.html">DbEnv.log_register</a>,
+<a href="../api_java/log_stat.html">DbEnv.log_stat</a>
+and
+<a href="../api_java/log_unregister.html">DbEnv.log_unregister</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/log_flush.html b/bdb/docs/api_java/log_flush.html
new file mode 100644
index 00000000000..4396d21f7cb
--- /dev/null
+++ b/bdb/docs/api_java/log_flush.html
@@ -0,0 +1,65 @@
+<!--$Id: log_flush.so,v 10.18 2000/03/01 21:41:30 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.log_flush</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.log_flush</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void log_flush(DbLsn lsn)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.log_flush method guarantees that all log records whose
+<a href="../api_java/lsn_class.html">DbLsn</a> values
+are less than or equal to the <b>lsn</b> argument have been
+written to disk. If <b>lsn</b> is null, all records in the
+log are flushed.
+<p>The DbEnv.log_flush method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.log_flush method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbEnv.log_flush method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.log_flush method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_set_lg_bsize.html">DbEnv.set_lg_bsize</a>,
+<a href="../api_java/env_set_lg_max.html">DbEnv.set_lg_max</a>,
+<a href="../api_java/log_archive.html">DbEnv.log_archive</a>,
+<a href="../api_java/log_compare.html">DbEnv.log_compare</a>,
+<a href="../api_java/log_file.html">DbEnv.log_file</a>,
+<a href="../api_java/log_flush.html">DbEnv.log_flush</a>,
+<a href="../api_java/log_get.html">DbEnv.log_get</a>,
+<a href="../api_java/log_put.html">DbEnv.log_put</a>,
+<a href="../api_java/log_register.html">DbEnv.log_register</a>,
+<a href="../api_java/log_stat.html">DbEnv.log_stat</a>
+and
+<a href="../api_java/log_unregister.html">DbEnv.log_unregister</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/log_get.html b/bdb/docs/api_java/log_get.html
new file mode 100644
index 00000000000..94eed013408
--- /dev/null
+++ b/bdb/docs/api_java/log_get.html
@@ -0,0 +1,117 @@
+<!--$Id: log_get.so,v 10.22 2000/03/17 01:53:59 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.log_get</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.log_get</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void log_get(DbLsn lsn, Dbt data, int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.log_get method implements a cursor inside of the log,
+retrieving records from the log according to the <b>lsn</b> and
+<b>flags</b> arguments.
+<p>The data field of the <b>data</b> structure is set to the record
+retrieved and the size field indicates the number of bytes in the record.
+See <a href="../api_java/dbt_class.html">Dbt</a> for a description of other fields in the <b>data</b>
+structure. When multiple threads are using the returned log handle
+concurrently, one of the <a href="../api_java/dbt_class.html#DB_DBT_MALLOC">Db.DB_DBT_MALLOC</a>, <a href="../api_java/dbt_class.html#DB_DBT_REALLOC">Db.DB_DBT_REALLOC</a> or
+<a href="../api_java/dbt_class.html#DB_DBT_USERMEM">Db.DB_DBT_USERMEM</a> flags must be specified for any <a href="../api_java/dbt_class.html">Dbt</a> used
+for data retrieval.
+<p>The <b>flags</b> argument must be set to exactly one of the following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_CHECKPOINT">Db.DB_CHECKPOINT</a><dd>The last record written with the DB_CHECKPOINT flag specified to the
+<a href="../api_java/log_put.html">DbEnv.log_put</a> method is returned in the <b>data</b> argument. The
+<b>lsn</b> argument is overwritten with the <a href="../api_java/lsn_class.html">DbLsn</a> of the record
+returned. If no record has been previously written with the DB_CHECKPOINT
+flag specified, the first record in the log is returned.
+<p>If the log is empty, the DbEnv.log_get method will return <a href="../ref/program/errorret.html#DB_NOTFOUND">Db.DB_NOTFOUND</a>.
+<p><dt><a name="Db.DB_FIRST">Db.DB_FIRST</a><dd>The first record from any of the log files found in the log directory
+is returned in the <b>data</b> argument.
+The <b>lsn</b> argument is overwritten with the <a href="../api_java/lsn_class.html">DbLsn</a> of the
+record returned.
+<p>If the log is empty, the DbEnv.log_get method will return <a href="../ref/program/errorret.html#DB_NOTFOUND">Db.DB_NOTFOUND</a>.
+<p><dt><a name="Db.DB_LAST">Db.DB_LAST</a><dd>The last record in the log is returned in the <b>data</b> argument.
+The <b>lsn</b> argument is overwritten with the <a href="../api_java/lsn_class.html">DbLsn</a> of the
+record returned.
+<p>If the log is empty, the DbEnv.log_get method will return <a href="../ref/program/errorret.html#DB_NOTFOUND">Db.DB_NOTFOUND</a>.
+<p><dt><a name="Db.DB_NEXT">Db.DB_NEXT</a><dd>The current log position is advanced to the next record in the log and that
+record is returned in the <b>data</b> argument.
+The <b>lsn</b> argument is overwritten with the <a href="../api_java/lsn_class.html">DbLsn</a> of the
+record returned.
+<p>If the pointer has not been initialized via DB_FIRST, DB_LAST, DB_SET,
+DB_NEXT, or DB_PREV, DbEnv.log_get will return the first record in the log.
+If the last log record has already been returned or the log is empty, the
+DbEnv.log_get method will return <a href="../ref/program/errorret.html#DB_NOTFOUND">Db.DB_NOTFOUND</a>.
+<p>If the log was opened with the DB_THREAD flag set, calls to
+DbEnv.log_get with the DB_NEXT flag set will return EINVAL.
+<p><dt><a name="Db.DB_PREV">Db.DB_PREV</a><dd>The current log position is moved to the previous record in the log and that
+record is returned in the <b>data</b> argument.
+The <b>lsn</b> argument is overwritten with the <a href="../api_java/lsn_class.html">DbLsn</a> of the
+record returned.
+<p>If the pointer has not been initialized via DB_FIRST, DB_LAST, DB_SET,
+DB_NEXT, or DB_PREV,
+DbEnv.log_get will return the last record in the log.
+If the first log record has already been returned or the log is empty, the
+DbEnv.log_get method will return <a href="../ref/program/errorret.html#DB_NOTFOUND">Db.DB_NOTFOUND</a>.
+<p>If the log was opened with the DB_THREAD flag set, calls to
+DbEnv.log_get with the DB_PREV flag set will return EINVAL.
+<p><dt><a name="Db.DB_CURRENT">Db.DB_CURRENT</a><dd>Return the log record currently referenced by the log.
+<p>If the log pointer has not been initialized via DB_FIRST, DB_LAST, DB_SET,
+DB_NEXT, or DB_PREV, or if the log was opened with the DB_THREAD flag set,
+DbEnv.log_get will return EINVAL.
+<p><dt><a name="Db.DB_SET">Db.DB_SET</a><dd>Retrieve the record specified by the <b>lsn</b> argument. If the
+specified <a href="../api_java/lsn_class.html">DbLsn</a> is invalid (e.g., does not appear in the log)
+DbEnv.log_get will return EINVAL.
+</dl>
+<p>Otherwise, the DbEnv.log_get method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.log_get method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The DB_FIRST flag was specified and no log files were found.
+</dl>
+<p>The DbEnv.log_get method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.log_get method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_set_lg_bsize.html">DbEnv.set_lg_bsize</a>,
+<a href="../api_java/env_set_lg_max.html">DbEnv.set_lg_max</a>,
+<a href="../api_java/log_archive.html">DbEnv.log_archive</a>,
+<a href="../api_java/log_compare.html">DbEnv.log_compare</a>,
+<a href="../api_java/log_file.html">DbEnv.log_file</a>,
+<a href="../api_java/log_flush.html">DbEnv.log_flush</a>,
+<a href="../api_java/log_get.html">DbEnv.log_get</a>,
+<a href="../api_java/log_put.html">DbEnv.log_put</a>,
+<a href="../api_java/log_register.html">DbEnv.log_register</a>,
+<a href="../api_java/log_stat.html">DbEnv.log_stat</a>
+and
+<a href="../api_java/log_unregister.html">DbEnv.log_unregister</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/log_put.html b/bdb/docs/api_java/log_put.html
new file mode 100644
index 00000000000..03749f20d66
--- /dev/null
+++ b/bdb/docs/api_java/log_put.html
@@ -0,0 +1,83 @@
+<!--$Id: log_put.so,v 10.21 2000/03/17 01:53:59 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.log_put</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.log_put</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void log_put(DbLsn lsn, Dbt data, int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.log_put method appends records to the log. The <a href="../api_java/lsn_class.html">DbLsn</a> of
+the put record is returned in the <b>lsn</b> argument. The <b>flags</b>
+argument may be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_CHECKPOINT">Db.DB_CHECKPOINT</a><dd>The log should write a checkpoint record, recording any information
+necessary to make the log structures recoverable after a crash.
+<p><dt><a name="Db.DB_CURLSN">Db.DB_CURLSN</a><dd>The <a href="../api_java/lsn_class.html">DbLsn</a> of the next record to be put is returned in the
+<b>lsn</b> argument.
+<p><dt><a name="Db.DB_FLUSH">Db.DB_FLUSH</a><dd>The log is forced to disk after this record is written, guaranteeing
+that all records with <a href="../api_java/lsn_class.html">DbLsn</a> values less than or equal to the
+one being put are on disk before this function returns (this function
+is most often used for a transaction commit, see <a href="../api_java/txn_commit.html">DbTxn.commit</a> for
+more information).
+<p>The caller is responsible for providing any necessary structure to
+<b>data</b>. (For example, in a write-ahead logging protocol, the
+application must understand what part of <b>data</b> is an operation
+code, what part is redo information, and what part is undo information.
+In addition, most transaction managers will store in <b>data</b> the
+<a href="../api_java/lsn_class.html">DbLsn</a> of the previous log record for the same transaction, to
+support chaining back through the transaction's log records during
+undo.)
+</dl>
+<p>The DbEnv.log_put method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The <a href="../api_java/log_flush.html">DbEnv.log_flush</a> method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The record to be logged is larger than the maximum log record.
+</dl>
+<p>The DbEnv.log_put method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.log_put method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_set_lg_bsize.html">DbEnv.set_lg_bsize</a>,
+<a href="../api_java/env_set_lg_max.html">DbEnv.set_lg_max</a>,
+<a href="../api_java/log_archive.html">DbEnv.log_archive</a>,
+<a href="../api_java/log_compare.html">DbEnv.log_compare</a>,
+<a href="../api_java/log_file.html">DbEnv.log_file</a>,
+<a href="../api_java/log_flush.html">DbEnv.log_flush</a>,
+<a href="../api_java/log_get.html">DbEnv.log_get</a>,
+<a href="../api_java/log_put.html">DbEnv.log_put</a>,
+<a href="../api_java/log_register.html">DbEnv.log_register</a>,
+<a href="../api_java/log_stat.html">DbEnv.log_stat</a>
+and
+<a href="../api_java/log_unregister.html">DbEnv.log_unregister</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/log_register.html b/bdb/docs/api_java/log_register.html
new file mode 100644
index 00000000000..7d4833c3c1a
--- /dev/null
+++ b/bdb/docs/api_java/log_register.html
@@ -0,0 +1,67 @@
+<!--$Id: log_register.so,v 10.27 2000/05/09 14:46:45 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.log_register</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.log_register</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int log_register(Db dbp, String name)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.log_register method registers a file name with the specified Berkeley DB
+environment's log manager. The log manager records all file name mappings
+at each checkpoint so that a recovery process can identify the file to
+which a record in the log refers.
+<p>The <b>dbp</b> argument should be a reference to the <a href="../api_java/db_class.html">Db</a> object being
+registered. The <b>name</b> argument should be a file name appropriate
+for opening the file in the environment, during recovery.
+<p>The DbEnv.log_register method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.log_register method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbEnv.log_register method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.log_register method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_set_lg_bsize.html">DbEnv.set_lg_bsize</a>,
+<a href="../api_java/env_set_lg_max.html">DbEnv.set_lg_max</a>,
+<a href="../api_java/log_archive.html">DbEnv.log_archive</a>,
+<a href="../api_java/log_compare.html">DbEnv.log_compare</a>,
+<a href="../api_java/log_file.html">DbEnv.log_file</a>,
+<a href="../api_java/log_flush.html">DbEnv.log_flush</a>,
+<a href="../api_java/log_get.html">DbEnv.log_get</a>,
+<a href="../api_java/log_put.html">DbEnv.log_put</a>,
+<a href="../api_java/log_register.html">DbEnv.log_register</a>,
+<a href="../api_java/log_stat.html">DbEnv.log_stat</a>
+and
+<a href="../api_java/log_unregister.html">DbEnv.log_unregister</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/log_stat.html b/bdb/docs/api_java/log_stat.html
new file mode 100644
index 00000000000..4c51f1095a3
--- /dev/null
+++ b/bdb/docs/api_java/log_stat.html
@@ -0,0 +1,93 @@
+<!--$Id: log_stat.so,v 10.23 2000/05/25 13:47:08 dda Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.log_stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.log_stat</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public DbLogStat log_stat()
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.log_stat method
+creates a DbLogStat object encapsulating a statistical structure.
+The log region statistics are stored in a DbLogStat object.
+The following data fields are available from the DbLogStat object:
+<p>Statistical structures are created in allocated memory. If <b>db_malloc</b> is non-NULL, it
+is called to allocate the memory, otherwise, the library function
+<b>malloc</b>(3) is used. The function <b>db_malloc</b> must match
+the calling conventions of the <b>malloc</b>(3) library routine.
+Regardless, the caller is responsible for deallocating the returned
+memory. To deallocate returned memory, free the returned memory
+reference, references inside the returned memory do not need to be
+individually freed.
+<p>The log region statistics are stored in a structure of type DB_LOG_STAT.
+The following DB_LOG_STAT fields will be filled in:
+<p><dl compact>
+<dt>public int st_magic;<dd>The magic number that identifies a file as a log file.
+<dt>public int st_version;<dd>The version of the log file type.
+<dt>public int st_regsize;<dd>The size of the region.
+<dt>public int st_mode;<dd>The mode of any created log files.
+<dt>public int st_lg_bsize;<dd>The in-memory log record cache size.
+<dt>public int st_lg_max;<dd>The maximum size of any individual file comprising the log.
+<dt>public int st_w_mbytes;<dd>The number of megabytes written to this log.
+<dt>public int st_w_bytes;<dd>The number of bytes over and above <b>st_w_mbytes</b> written to this log.
+<dt>public int st_wc_mbytes;<dd>The number of megabytes written to this log since the last checkpoint.
+<dt>public int st_wc_bytes;<dd>The number of bytes over and above <b>st_wc_mbytes</b> written to this log
+since the last checkpoint.
+<dt>public int st_wcount;<dd>The number of times the log has been written to disk.
+<dt>public int st_wcount_fill;<dd>The number of times the log has been written to disk because the
+in-memory log record cache filled up.
+<dt>public int st_scount;<dd>The number of times the log has been flushed to disk.
+<dt>public int st_cur_file;<dd>The current log file number.
+<dt>public int st_cur_offset;<dd>The byte offset in the current log file.
+<dt>public int st_region_wait;<dd>The number of times that a thread of control was forced to wait before
+obtaining the region lock.
+<dt>public int st_region_nowait;<dd>The number of times that a thread of control was able to obtain
+the region lock without waiting.
+</dl>
+<p>The DbEnv.log_stat method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.log_stat method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.log_stat method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_set_lg_bsize.html">DbEnv.set_lg_bsize</a>,
+<a href="../api_java/env_set_lg_max.html">DbEnv.set_lg_max</a>,
+<a href="../api_java/log_archive.html">DbEnv.log_archive</a>,
+<a href="../api_java/log_compare.html">DbEnv.log_compare</a>,
+<a href="../api_java/log_file.html">DbEnv.log_file</a>,
+<a href="../api_java/log_flush.html">DbEnv.log_flush</a>,
+<a href="../api_java/log_get.html">DbEnv.log_get</a>,
+<a href="../api_java/log_put.html">DbEnv.log_put</a>,
+<a href="../api_java/log_register.html">DbEnv.log_register</a>,
+<a href="../api_java/log_stat.html">DbEnv.log_stat</a>
+and
+<a href="../api_java/log_unregister.html">DbEnv.log_unregister</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/log_unregister.html b/bdb/docs/api_java/log_unregister.html
new file mode 100644
index 00000000000..c79c324ee83
--- /dev/null
+++ b/bdb/docs/api_java/log_unregister.html
@@ -0,0 +1,62 @@
+<!--$Id: log_unregister.so,v 10.21 2000/05/03 22:39:10 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.log_unregister</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.log_unregister</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void log_unregister(Db dbp)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.log_unregister method function unregisters the file represented by
+the <b>dbp</b> parameter from the Berkeley DB environment's log manager.
+<p>The DbEnv.log_unregister method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.log_unregister method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbEnv.log_unregister method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.log_unregister method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_set_lg_bsize.html">DbEnv.set_lg_bsize</a>,
+<a href="../api_java/env_set_lg_max.html">DbEnv.set_lg_max</a>,
+<a href="../api_java/log_archive.html">DbEnv.log_archive</a>,
+<a href="../api_java/log_compare.html">DbEnv.log_compare</a>,
+<a href="../api_java/log_file.html">DbEnv.log_file</a>,
+<a href="../api_java/log_flush.html">DbEnv.log_flush</a>,
+<a href="../api_java/log_get.html">DbEnv.log_get</a>,
+<a href="../api_java/log_put.html">DbEnv.log_put</a>,
+<a href="../api_java/log_register.html">DbEnv.log_register</a>,
+<a href="../api_java/log_stat.html">DbEnv.log_stat</a>
+and
+<a href="../api_java/log_unregister.html">DbEnv.log_unregister</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/lsn_class.html b/bdb/docs/api_java/lsn_class.html
new file mode 100644
index 00000000000..891ba8d0cea
--- /dev/null
+++ b/bdb/docs/api_java/lsn_class.html
@@ -0,0 +1,37 @@
+<!--$Id: lsn_class.so,v 10.11 1999/12/20 08:52:33 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbLsn</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbLsn</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public class DbLsn extends Object { ... }
+</pre></h3>
+<h1>Description</h1>
+<p>A DbLsn is a <b>log sequence number</b> that is fully
+encapsulated. The class itself has no methods, other than a default
+constructor, so there is no way for the user to manipulate its data
+directly.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/mem_class.html b/bdb/docs/api_java/mem_class.html
new file mode 100644
index 00000000000..be9239defad
--- /dev/null
+++ b/bdb/docs/api_java/mem_class.html
@@ -0,0 +1,48 @@
+<!--$Id: mem_class.so,v 10.4 2000/09/21 19:58:54 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbMemoryException</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbMemoryException</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public class DbMemoryException extends DbException { ... }
+</pre></h3>
+<h1>Description</h1>
+<p>This manual page describes the DbMemoryException class and
+how it is used by the various Db* classes.
+<p>A DbMemoryException is thrown when there is insufficient memory
+to complete an operation.
+<p>This may or may not be recoverable. An example of where it would be
+recoverable is during a <a href="../api_java/db_get.html">Db.get</a> or <a href="../api_java/dbc_get.html">Dbc.get</a> operation
+with the <a href="../api_java/dbt_class.html">Dbt</a> flags set to <a href="../api_java/dbt_class.html#DB_DBT_USERMEM">Db.DB_DBT_USERMEM</a>.
+<h3>Class</h3>
+<a href="../api_java/except_class.html">DbException</a>
+<h1>See Also</h1>
+<a href="../api_java/get_errno.html">DbException.get_errno</a>,
+<a href="../api_java/deadlock_class.html">DbDeadlockException</a>,
+<a href="../api_java/mem_class.html">DbMemoryException</a>
+and
+<a href="../api_java/runrec_class.html">DbRunRecoveryException</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/memp_fclose.html b/bdb/docs/api_java/memp_fclose.html
new file mode 100644
index 00000000000..5cf196d40dc
--- /dev/null
+++ b/bdb/docs/api_java/memp_fclose.html
@@ -0,0 +1,33 @@
+<!--$Id: memp_fclose.so,v 10.20 2000/06/13 13:55:49 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbMpoolFile.close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbMpoolFile.close</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+</pre></h3>
+<h1>Description</h1>
+<p>A DbMpoolFile.close method is not available in the Berkeley DB
+Java API.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/memp_fget.html b/bdb/docs/api_java/memp_fget.html
new file mode 100644
index 00000000000..a59b4bbe84d
--- /dev/null
+++ b/bdb/docs/api_java/memp_fget.html
@@ -0,0 +1,33 @@
+<!--$Id: memp_fget.so,v 10.23 2000/12/04 18:05:39 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbMpoolFile.get</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbMpoolFile.get</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+</pre></h3>
+<h1>Description</h1>
+<p>A DbMpoolFile.get method is not available in the Berkeley DB
+Java API.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/memp_fopen.html b/bdb/docs/api_java/memp_fopen.html
new file mode 100644
index 00000000000..e1186d50530
--- /dev/null
+++ b/bdb/docs/api_java/memp_fopen.html
@@ -0,0 +1,33 @@
+<!--$Id: memp_fopen.so,v 10.28 2000/12/18 21:05:12 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbMpoolFile.open</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbMpoolFile.open</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+</pre></h3>
+<h1>Description</h1>
+<p>A DbMpoolFile.open method is not available in the Berkeley DB
+Java API.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/memp_fput.html b/bdb/docs/api_java/memp_fput.html
new file mode 100644
index 00000000000..64cb4732acc
--- /dev/null
+++ b/bdb/docs/api_java/memp_fput.html
@@ -0,0 +1,33 @@
+<!--$Id: memp_fput.so,v 10.18 2000/03/01 21:41:30 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbMpoolFile.put</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbMpoolFile.put</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+</pre></h3>
+<h1>Description</h1>
+<p>A DbMpoolFile.put method is not available in the Berkeley DB
+Java API.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/memp_fset.html b/bdb/docs/api_java/memp_fset.html
new file mode 100644
index 00000000000..3ab3ed0eec4
--- /dev/null
+++ b/bdb/docs/api_java/memp_fset.html
@@ -0,0 +1,33 @@
+<!--$Id: memp_fset.so,v 10.18 2000/03/01 21:41:30 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbMpoolFile.set</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbMpoolFile.set</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+</pre></h3>
+<h1>Description</h1>
+<p>A DbMpoolFile.set method is not available in the Berkeley DB
+Java API.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/memp_fsync.html b/bdb/docs/api_java/memp_fsync.html
new file mode 100644
index 00000000000..b610635211f
--- /dev/null
+++ b/bdb/docs/api_java/memp_fsync.html
@@ -0,0 +1,33 @@
+<!--$Id: memp_fsync.so,v 10.22 2000/09/08 15:20:28 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbMpoolFile.sync</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbMpoolFile.sync</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+</pre></h3>
+<h1>Description</h1>
+<p>A DbMpoolFile.sync method is not available in the Berkeley DB
+Java API.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/memp_register.html b/bdb/docs/api_java/memp_register.html
new file mode 100644
index 00000000000..147b45a2001
--- /dev/null
+++ b/bdb/docs/api_java/memp_register.html
@@ -0,0 +1,33 @@
+<!--$Id: memp_register.so,v 10.23 2000/05/25 13:47:08 dda Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.memp_register</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.memp_register</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+</pre></h3>
+<h1>Description</h1>
+<p>A DbEnv.memp_register method is not available in the Berkeley DB
+Java API.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/memp_stat.html b/bdb/docs/api_java/memp_stat.html
new file mode 100644
index 00000000000..1314201a5c0
--- /dev/null
+++ b/bdb/docs/api_java/memp_stat.html
@@ -0,0 +1,102 @@
+<!--$Id: memp_stat.so,v 10.28 2000/05/25 13:47:08 dda Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.memp_stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.memp_stat</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public DbMpoolStat memp_stat()
+ throws DbException;
+<p>
+public DbMpoolFStat[] memp_fstat()
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.memp_stat and <a href="../api_java/memp_stat.html">DbEnv.memp_fstat</a> method create statistical
+structures and return to the caller. The statistics include the number
+of files participating in the pool, the active pages in the pool, and
+information as to how effective the cache has been.
+<p>The DbEnv.memp_stat method creates a DbMpoolStat object containing global
+statistics. The following data fields are available:
+<p><dl compact>
+<dt>public long st_gbytes;<dd>Gigabytes of cache (total cache size is st_gbytes + st_bytes)
+<dt>public long st_bytes;<dd>Bytes of cache (total cache size is st_gbytes + st_bytes)
+<dt>public int st_ncache;<dd>Number of caches.
+<dt>public int st_regsize;<dd>Individual cache size.
+<dt>public int st_cache_hit;<dd>Requested pages found in the cache.
+<dt>public int st_cache_miss;<dd>Requested pages not found in the cache.
+<dt>public int st_map;<dd>Requested pages mapped into the process' address space (there is no
+available information as to whether or not this request caused disk I/O,
+although examining the application page fault rate may be helpful).
+<dt>public int st_page_create;<dd>Pages created in the cache.
+<dt>public int st_page_in;<dd>Pages read into the cache.
+<dt>public int st_page_out;<dd>Pages written from the cache to the backing file.
+<dt>public int st_ro_evict;<dd>Clean pages forced from the cache.
+<dt>public int st_rw_evict;<dd>Dirty pages forced from the cache.
+<dt>public int st_hash_buckets;<dd>Number of hash buckets in buffer hash table.
+<dt>public int st_hash_searches;<dd>Total number of buffer hash table lookups.
+<dt>public int st_hash_longest;<dd>The longest chain ever encountered in buffer hash table lookups.
+<dt>public int st_hash_examined;<dd>Total number of hash elements traversed during hash table lookups.
+<dt>public int st_page_clean;<dd>Clean pages currently in the cache.
+<dt>public int st_page_dirty;<dd>Dirty pages currently in the cache.
+<dt>public int st_page_trickle;<dd>Dirty pages written using the <a href="../api_java/memp_trickle.html">DbEnv.memp_trickle</a> interface.
+<dt>public int st_region_wait;<dd>The number of times that a thread of control was forced to wait before
+obtaining the region lock.
+<dt>public int st_region_nowait;<dd>The number of times that a thread of control was able to obtain
+the region lock without waiting.
+</dl>
+<p>The <a href="../api_java/memp_stat.html">DbEnv.memp_fstat</a> method creates an array of DbMpoolFStat objects
+containing statistics for individual files in the pool. Each
+DbMpoolFStat object contains statistics for an individual DbMpoolFile.
+The following data fields are available for each DbMpoolFStat object:
+<p><dl compact>
+<dt>public String file_name;<dd>The name of the file.
+<dt>public long st_pagesize;<dd>Page size in bytes.
+<dt>public int st_cache_hit;<dd>Requested pages found in the cache.
+<dt>public int st_cache_miss;<dd>Requested pages not found in the cache.
+<dt>public int st_map;<dd>Requested pages mapped into the process' address space.
+<dt>public int st_page_create;<dd>Pages created in the cache.
+<dt>public int st_page_in;<dd>Pages read into the cache.
+<dt>public int st_page_out;<dd>Pages written from the cache to the backing file.
+</dl>
+<p>The DbEnv.memp_stat method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.memp_stat method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbEnv.memp_stat method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.memp_stat method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/memp_stat.html">DbEnv.memp_fstat</a>,
+<a href="../api_java/memp_stat.html">DbEnv.memp_stat</a>
+and
+<a href="../api_java/memp_trickle.html">DbEnv.memp_trickle</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/memp_sync.html b/bdb/docs/api_java/memp_sync.html
new file mode 100644
index 00000000000..6c729b8f570
--- /dev/null
+++ b/bdb/docs/api_java/memp_sync.html
@@ -0,0 +1,33 @@
+<!--$Id: memp_sync.so,v 10.25 2000/09/08 15:20:28 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.memp_sync</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.memp_sync</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+</pre></h3>
+<h1>Description</h1>
+<p>A DbEnv.memp_sync method is not available in the Berkeley DB
+Java API.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/memp_trickle.html b/bdb/docs/api_java/memp_trickle.html
new file mode 100644
index 00000000000..0eeae97d5c2
--- /dev/null
+++ b/bdb/docs/api_java/memp_trickle.html
@@ -0,0 +1,60 @@
+<!--$Id: memp_trickle.so,v 10.21 2000/03/01 21:41:30 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.memp_trickle</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.memp_trickle</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int memp_trickle(int pct)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.memp_trickle method ensures that at least <b>pct</b> percent of
+the pages in the shared memory pool are clean by writing dirty pages to
+their backing files.
+The number of pages that were written to reach the correct percentage is
+returned.
+<p>The purpose of the DbEnv.memp_trickle function is to enable a memory
+pool manager to ensure that a page is always available for reading in new
+information without having to wait for a write.
+<p>The DbEnv.memp_trickle method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.memp_trickle method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbEnv.memp_trickle method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.memp_trickle method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Class</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/memp_stat.html">DbEnv.memp_fstat</a>,
+<a href="../api_java/memp_stat.html">DbEnv.memp_stat</a>
+and
+<a href="../api_java/memp_trickle.html">DbEnv.memp_trickle</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/pindex.src b/bdb/docs/api_java/pindex.src
new file mode 100644
index 00000000000..2dd85859dcf
--- /dev/null
+++ b/bdb/docs/api_java/pindex.src
@@ -0,0 +1,249 @@
+__APIREL__/api_java/db_class.html#2 @Db
+__APIREL__/api_java/db_class.html#Db.DB_XA_CREATE Db@Db.DB_XA_CREATE
+__APIREL__/api_java/dbc_class.html#2 @Dbc
+__APIREL__/api_java/dbenv_class.html#2 @DbEnv
+__APIREL__/api_java/dbenv_class.html#Db.DB_CLIENT DbEnv@Db.DB_CLIENT
+__APIREL__/api_java/dbt_class.html#2 @Dbt
+__APIREL__/api_java/dbt_class.html#3 @key/data pairs
+__APIREL__/api_java/dbt_class.html#data Dbt@data
+__APIREL__/api_java/dbt_class.html#Db.DB_DBT_MALLOC Dbt@Db.DB_DBT_MALLOC
+__APIREL__/api_java/dbt_class.html#Db.DB_DBT_REALLOC Dbt@Db.DB_DBT_REALLOC
+__APIREL__/api_java/dbt_class.html#Db.DB_DBT_USERMEM Dbt@Db.DB_DBT_USERMEM
+__APIREL__/api_java/dbt_class.html#Db.DB_DBT_PARTIAL Dbt@Db.DB_DBT_PARTIAL
+__APIREL__/api_java/dbt_class.html#4 logical @record number format
+__APIREL__/api_java/deadlock_class.html#2 @DbDeadlockException
+__APIREL__/api_java/env_set_error_stream.html#2 @DbEnv.set_error_stream
+__APIREL__/api_java/except_class.html#2 @DbException
+__APIREL__/api_java/get_errno.html#2 @DbException.get_errno
+__APIREL__/api_java/lock_class.html#2 @DbLock
+__APIREL__/api_java/lsn_class.html#2 @DbLsn
+__APIREL__/api_java/mem_class.html#2 @DbMemoryException
+__APIREL__/api_java/runrec_class.html#2 @DbRunRecoveryException
+__APIREL__/api_java/txn_class.html#2 @DbTxn
+__APIREL__/api_java/db_close.html#2 @Db.close
+__APIREL__/api_java/db_close.html#Db.DB_NOSYNC Db.close@Db.DB_NOSYNC
+__APIREL__/api_java/db_close.html#3 Db.close @DB_INCOMPLETE
+__APIREL__/api_java/db_cursor.html#2 @Db.cursor
+__APIREL__/api_java/db_cursor.html#Db.DB_WRITECURSOR Db.cursor@Db.DB_WRITECURSOR
+__APIREL__/api_java/db_del.html#2 @Db.del
+__APIREL__/api_java/db_fd.html#2 @Db.fd
+__APIREL__/api_java/db_get.html#2 @Db.get
+__APIREL__/api_java/db_get.html#Db.DB_CONSUME Db.get@Db.DB_CONSUME
+__APIREL__/api_java/db_get.html#Db.DB_CONSUME_WAIT Db.get@Db.DB_CONSUME_WAIT
+__APIREL__/api_java/db_get.html#Db.DB_GET_BOTH Db.get@Db.DB_GET_BOTH
+__APIREL__/api_java/db_get.html#Db.DB_SET_RECNO Db.get@Db.DB_SET_RECNO
+__APIREL__/api_java/db_get.html#Db.DB_RMW Db.get@Db.DB_RMW
+__APIREL__/api_java/db_get_byteswapped.html#2 @Db.get_byteswapped
+__APIREL__/api_java/db_get_type.html#2 @Db.get_type
+__APIREL__/api_java/db_join.html#2 @Db.join
+__APIREL__/api_java/db_join.html#Db.DB_JOIN_NOSORT Db.join@Db.DB_JOIN_NOSORT
+__APIREL__/api_java/db_join.html#Db.DB_JOIN_ITEM Db.join@Db.DB_JOIN_ITEM
+__APIREL__/api_java/db_join.html#Db.DB_RMW Db.join@Db.DB_RMW
+__APIREL__/api_java/db_key_range.html#2 @Db.key_range
+__APIREL__/api_java/db_open.html#2 @Db.open
+__APIREL__/api_java/db_open.html#Db.DB_CREATE Db.open@Db.DB_CREATE
+__APIREL__/api_java/db_open.html#Db.DB_EXCL Db.open@Db.DB_EXCL
+__APIREL__/api_java/db_open.html#Db.DB_NOMMAP Db.open@Db.DB_NOMMAP
+__APIREL__/api_java/db_open.html#Db.DB_RDONLY Db.open@Db.DB_RDONLY
+__APIREL__/api_java/db_open.html#Db.DB_THREAD Db.open@Db.DB_THREAD
+__APIREL__/api_java/db_open.html#Db.DB_TRUNCATE Db.open@Db.DB_TRUNCATE
+__APIREL__/api_java/db_open.html#Db.DB_OLD_VERSION Db.open@Db.DB_OLD_VERSION
+__APIREL__/api_java/db_put.html#2 @Db.put
+__APIREL__/api_java/db_put.html#Db.DB_APPEND Db.put@Db.DB_APPEND
+__APIREL__/api_java/db_put.html#Db.DB_NODUPDATA Db.put@Db.DB_NODUPDATA
+__APIREL__/api_java/db_put.html#Db.DB_NOOVERWRITE Db.put@Db.DB_NOOVERWRITE
+__APIREL__/api_java/db_remove.html#2 @Db.remove
+__APIREL__/api_java/db_rename.html#2 @Db.rename
+__APIREL__/api_java/db_set_append_recno.html#2 @Db.set_append_recno
+__APIREL__/api_java/db_set_bt_compare.html#2 @Db.set_bt_compare
+__APIREL__/api_java/db_set_bt_minkey.html#2 @Db.set_bt_minkey
+__APIREL__/api_java/db_set_bt_prefix.html#2 @Db.set_bt_prefix
+__APIREL__/api_java/db_set_cachesize.html#2 @Db.set_cachesize
+__APIREL__/api_java/db_set_dup_compare.html#2 @Db.set_dup_compare
+__APIREL__/api_java/db_set_errcall.html#2 @Db.set_errcall
+__APIREL__/api_java/db_set_errpfx.html#2 @Db.set_errpfx
+__APIREL__/api_java/db_set_feedback.html#2 @Db.set_feedback
+__APIREL__/api_java/db_set_feedback.html#Db.DB_UPGRADE Db.set_feedback@Db.DB_UPGRADE
+__APIREL__/api_java/db_set_feedback.html#Db.DB_VERIFY Db.set_feedback@Db.DB_VERIFY
+__APIREL__/api_java/db_set_flags.html#2 @Db.set_flags
+__APIREL__/api_java/db_set_flags.html#Db.DB_DUP Db.set_flags@Db.DB_DUP
+__APIREL__/api_java/db_set_flags.html#Db.DB_DUPSORT Db.set_flags@Db.DB_DUPSORT
+__APIREL__/api_java/db_set_flags.html#Db.DB_RECNUM Db.set_flags@Db.DB_RECNUM
+__APIREL__/api_java/db_set_flags.html#Db.DB_REVSPLITOFF Db.set_flags@Db.DB_REVSPLITOFF
+__APIREL__/api_java/db_set_flags.html#Db.DB_DUP Db.set_flags@Db.DB_DUP
+__APIREL__/api_java/db_set_flags.html#Db.DB_DUPSORT Db.set_flags@Db.DB_DUPSORT
+__APIREL__/api_java/db_set_flags.html#Db.DB_RENUMBER Db.set_flags@Db.DB_RENUMBER
+__APIREL__/api_java/db_set_flags.html#Db.DB_SNAPSHOT Db.set_flags@Db.DB_SNAPSHOT
+__APIREL__/api_java/db_set_h_ffactor.html#2 @Db.set_h_ffactor
+__APIREL__/api_java/db_set_h_hash.html#2 @Db.set_h_hash
+__APIREL__/api_java/db_set_h_nelem.html#2 @Db.set_h_nelem
+__APIREL__/api_java/db_set_lorder.html#2 @Db.set_lorder
+__APIREL__/api_java/db_set_pagesize.html#2 @Db.set_pagesize
+__APIREL__/api_java/db_set_q_extentsize.html#2 @Db.set_q_extentsize
+__APIREL__/api_java/db_set_re_delim.html#2 @Db.set_re_delim
+__APIREL__/api_java/db_set_re_len.html#2 @Db.set_re_len
+__APIREL__/api_java/db_set_re_pad.html#2 @Db.set_re_pad
+__APIREL__/api_java/db_set_re_source.html#2 @Db.set_re_source
+__APIREL__/api_java/db_stat.html#2 @Db.stat
+__APIREL__/api_java/db_stat.html#Db.DB_CACHED_COUNTS Db.stat@Db.DB_CACHED_COUNTS
+__APIREL__/api_java/db_stat.html#Db.DB_RECORDCOUNT Db.stat@Db.DB_RECORDCOUNT
+__APIREL__/api_java/db_sync.html#2 @Db.sync
+__APIREL__/api_java/db_upgrade.html#2 @Db.upgrade
+__APIREL__/api_java/db_upgrade.html#Db.DB_DUPSORT Db.upgrade@Db.DB_DUPSORT
+__APIREL__/api_java/db_upgrade.html#Db.DB_OLD_VERSION Db.upgrade@Db.DB_OLD_VERSION
+__APIREL__/api_java/db_verify.html#2 @Db.verify
+__APIREL__/api_java/db_verify.html#Db.DB_SALVAGE Db.verify@Db.DB_SALVAGE
+__APIREL__/api_java/db_verify.html#Db.DB_AGGRESSIVE Db.verify@Db.DB_AGGRESSIVE
+__APIREL__/api_java/db_verify.html#Db.DB_NOORDERCHK Db.verify@Db.DB_NOORDERCHK
+__APIREL__/api_java/db_verify.html#Db.DB_ORDERCHKONLY Db.verify@Db.DB_ORDERCHKONLY
+__APIREL__/api_java/dbc_close.html#2 @Dbc.close
+__APIREL__/api_java/dbc_count.html#2 @Dbc.count
+__APIREL__/api_java/dbc_del.html#2 @Dbc.del
+__APIREL__/api_java/dbc_dup.html#2 @Dbc.dup
+__APIREL__/api_java/dbc_dup.html#Db.DB_POSITION Dbc.dup@Db.DB_POSITION
+__APIREL__/api_java/dbc_get.html#2 @Dbc.get
+__APIREL__/api_java/dbc_get.html#Db.DB_CURRENT Dbc.get@Db.DB_CURRENT
+__APIREL__/api_java/dbc_get.html#Db.DB_FIRST Dbc.get@Db.DB_FIRST
+__APIREL__/api_java/dbc_get.html#Db.DB_LAST Dbc.get@Db.DB_LAST
+__APIREL__/api_java/dbc_get.html#Db.DB_GET_BOTH Dbc.get@Db.DB_GET_BOTH
+__APIREL__/api_java/dbc_get.html#Db.DB_GET_RECNO Dbc.get@Db.DB_GET_RECNO
+__APIREL__/api_java/dbc_get.html#Db.DB_JOIN_ITEM Dbc.get@Db.DB_JOIN_ITEM
+__APIREL__/api_java/dbc_get.html#Db.DB_NEXT Dbc.get@Db.DB_NEXT
+__APIREL__/api_java/dbc_get.html#Db.DB_PREV Dbc.get@Db.DB_PREV
+__APIREL__/api_java/dbc_get.html#Db.DB_NEXT_DUP Dbc.get@Db.DB_NEXT_DUP
+__APIREL__/api_java/dbc_get.html#Db.DB_NEXT_NODUP Dbc.get@Db.DB_NEXT_NODUP
+__APIREL__/api_java/dbc_get.html#Db.DB_PREV_NODUP Dbc.get@Db.DB_PREV_NODUP
+__APIREL__/api_java/dbc_get.html#Db.DB_SET Dbc.get@Db.DB_SET
+__APIREL__/api_java/dbc_get.html#Db.DB_SET_RANGE Dbc.get@Db.DB_SET_RANGE
+__APIREL__/api_java/dbc_get.html#Db.DB_SET_RECNO Dbc.get@Db.DB_SET_RECNO
+__APIREL__/api_java/dbc_get.html#Db.DB_RMW Dbc.get@Db.DB_RMW
+__APIREL__/api_java/dbc_put.html#2 @Dbc.put
+__APIREL__/api_java/dbc_put.html#Db.DB_AFTER Dbc.put@Db.DB_AFTER
+__APIREL__/api_java/dbc_put.html#Db.DB_BEFORE Dbc.put@Db.DB_BEFORE
+__APIREL__/api_java/dbc_put.html#Db.DB_CURRENT Dbc.put@Db.DB_CURRENT
+__APIREL__/api_java/dbc_put.html#Db.DB_KEYFIRST Dbc.put@Db.DB_KEYFIRST
+__APIREL__/api_java/dbc_put.html#Db.DB_KEYLAST Dbc.put@Db.DB_KEYLAST
+__APIREL__/api_java/dbc_put.html#Db.DB_NODUPDATA Dbc.put@Db.DB_NODUPDATA
+__APIREL__/api_java/env_close.html#2 @DbEnv.close
+__APIREL__/api_java/env_open.html#2 @DbEnv.open
+__APIREL__/api_java/env_open.html#Db.DB_JOINENV DbEnv.open@Db.DB_JOINENV
+__APIREL__/api_java/env_open.html#Db.DB_INIT_CDB DbEnv.open@Db.DB_INIT_CDB
+__APIREL__/api_java/env_open.html#Db.DB_INIT_LOCK DbEnv.open@Db.DB_INIT_LOCK
+__APIREL__/api_java/env_open.html#Db.DB_INIT_LOG DbEnv.open@Db.DB_INIT_LOG
+__APIREL__/api_java/env_open.html#Db.DB_INIT_MPOOL DbEnv.open@Db.DB_INIT_MPOOL
+__APIREL__/api_java/env_open.html#Db.DB_INIT_TXN DbEnv.open@Db.DB_INIT_TXN
+__APIREL__/api_java/env_open.html#Db.DB_RECOVER DbEnv.open@Db.DB_RECOVER
+__APIREL__/api_java/env_open.html#Db.DB_RECOVER_FATAL DbEnv.open@Db.DB_RECOVER_FATAL
+__APIREL__/api_java/env_open.html#Db.DB_USE_ENVIRON DbEnv.open@Db.DB_USE_ENVIRON
+__APIREL__/api_java/env_open.html#Db.DB_USE_ENVIRON_ROOT DbEnv.open@Db.DB_USE_ENVIRON_ROOT
+__APIREL__/api_java/env_open.html#Db.DB_CREATE DbEnv.open@Db.DB_CREATE
+__APIREL__/api_java/env_open.html#Db.DB_LOCKDOWN DbEnv.open@Db.DB_LOCKDOWN
+__APIREL__/api_java/env_open.html#Db.DB_PRIVATE DbEnv.open@Db.DB_PRIVATE
+__APIREL__/api_java/env_open.html#Db.DB_SYSTEM_MEM DbEnv.open@Db.DB_SYSTEM_MEM
+__APIREL__/api_java/env_open.html#Db.DB_THREAD DbEnv.open@Db.DB_THREAD
+__APIREL__/api_java/env_remove.html#2 @DbEnv.remove
+__APIREL__/api_java/env_remove.html#Db.DB_FORCE DbEnv.remove@Db.DB_FORCE
+__APIREL__/api_java/env_remove.html#Db.DB_USE_ENVIRON DbEnv.remove@Db.DB_USE_ENVIRON
+__APIREL__/api_java/env_remove.html#Db.DB_USE_ENVIRON_ROOT DbEnv.remove@Db.DB_USE_ENVIRON_ROOT
+__APIREL__/api_java/env_set_cachesize.html#2 @DbEnv.set_cachesize
+__APIREL__/api_java/env_set_data_dir.html#2 @DbEnv.set_data_dir
+__APIREL__/api_java/env_set_errcall.html#2 @DbEnv.set_errcall
+__APIREL__/api_java/env_set_errpfx.html#2 @DbEnv.set_errpfx
+__APIREL__/api_java/env_set_feedback.html#2 @DbEnv.set_feedback
+__APIREL__/api_java/env_set_feedback.html#Db.DB_RECOVER DbEnv.set_feedback@Db.DB_RECOVER
+__APIREL__/api_java/env_set_flags.html#2 @DbEnv.set_flags
+__APIREL__/api_java/env_set_flags.html#Db.DB_CDB_ALLDB DbEnv.set_flags@Db.DB_CDB_ALLDB
+__APIREL__/api_java/env_set_flags.html#Db.DB_NOMMAP DbEnv.set_flags@Db.DB_NOMMAP
+__APIREL__/api_java/env_set_flags.html#Db.DB_TXN_NOSYNC DbEnv.set_flags@Db.DB_TXN_NOSYNC
+__APIREL__/api_java/env_set_lg_bsize.html#2 @DbEnv.set_lg_bsize
+__APIREL__/api_java/env_set_lg_dir.html#2 @DbEnv.set_lg_dir
+__APIREL__/api_java/env_set_lg_max.html#2 @DbEnv.set_lg_max
+__APIREL__/api_java/env_set_lk_conflicts.html#2 @DbEnv.set_lk_conflicts
+__APIREL__/api_java/env_set_lk_detect.html#2 @DbEnv.set_lk_detect
+__APIREL__/api_java/env_set_lk_detect.html#DB_LOCK_DEFAULT DbEnv.set_lk_detect@DB_LOCK_DEFAULT
+__APIREL__/api_java/env_set_lk_detect.html#DB_LOCK_OLDEST DbEnv.set_lk_detect@DB_LOCK_OLDEST
+__APIREL__/api_java/env_set_lk_detect.html#DB_LOCK_RANDOM DbEnv.set_lk_detect@DB_LOCK_RANDOM
+__APIREL__/api_java/env_set_lk_detect.html#DB_LOCK_YOUNGEST DbEnv.set_lk_detect@DB_LOCK_YOUNGEST
+__APIREL__/api_java/env_set_lk_max.html#2 @DbEnv.set_lk_max
+__APIREL__/api_java/env_set_lk_max_locks.html#2 @DbEnv.set_lk_max_locks
+__APIREL__/api_java/env_set_lk_max_lockers.html#2 @DbEnv.set_lk_max_lockers
+__APIREL__/api_java/env_set_lk_max_objects.html#2 @DbEnv.set_lk_max_objects
+__APIREL__/api_java/env_set_mp_mmapsize.html#2 @DbEnv.set_mp_mmapsize
+__APIREL__/api_java/env_set_mutexlocks.html#2 @DbEnv.set_mutexlocks
+__APIREL__/api_java/env_set_pageyield.html#2 @DbEnv.set_pageyield
+__APIREL__/api_java/env_set_panicstate.html#2 @DbEnv.set_panicstate
+__APIREL__/api_java/env_set_rec_init.html#2 @DbEnv.set_recovery_init
+__APIREL__/api_java/env_set_region_init.html#2 @DbEnv.set_region_init
+__APIREL__/api_java/env_set_server.html#2 @DbEnv.set_server
+__APIREL__/api_java/env_set_server.html#DB_NOSERVER DbEnv.set_server@DB_NOSERVER
+__APIREL__/api_java/env_set_server.html#DB_NOSERVER_ID DbEnv.set_server@DB_NOSERVER_ID
+__APIREL__/api_java/env_set_shm_key.html#2 @DbEnv.set_shm_key
+__APIREL__/api_java/env_set_tas_spins.html#2 @DbEnv.set_tas_spins
+__APIREL__/api_java/env_set_tmp_dir.html#2 @DbEnv.set_tmp_dir
+__APIREL__/api_java/env_set_tx_max.html#2 @DbEnv.set_tx_max
+__APIREL__/api_java/env_set_tx_recover.html#2 @DbEnv.set_tx_recover
+__APIREL__/api_java/env_set_tx_recover.html#Db.DB_TXN_BACKWARD_ROLL DbEnv.set_tx_recover@Db.DB_TXN_BACKWARD_ROLL
+__APIREL__/api_java/env_set_tx_recover.html#Db.DB_TXN_FORWARD_ROLL DbEnv.set_tx_recover@Db.DB_TXN_FORWARD_ROLL
+__APIREL__/api_java/env_set_tx_recover.html#Db.DB_TXN_ABORT DbEnv.set_tx_recover@Db.DB_TXN_ABORT
+__APIREL__/api_java/env_set_tx_timestamp.html#2 @DbEnv.set_tx_timestamp
+__APIREL__/api_java/env_set_verbose.html#2 @DbEnv.set_verbose
+__APIREL__/api_java/env_set_verbose.html#Db.DB_VERB_CHKPOINT DbEnv.set_verbose@Db.DB_VERB_CHKPOINT
+__APIREL__/api_java/env_set_verbose.html#Db.DB_VERB_DEADLOCK DbEnv.set_verbose@Db.DB_VERB_DEADLOCK
+__APIREL__/api_java/env_set_verbose.html#Db.DB_VERB_RECOVERY DbEnv.set_verbose@Db.DB_VERB_RECOVERY
+__APIREL__/api_java/env_set_verbose.html#Db.DB_VERB_WAITSFOR DbEnv.set_verbose@Db.DB_VERB_WAITSFOR
+__APIREL__/api_java/env_strerror.html#2 @DbEnv.strerror
+__APIREL__/api_java/env_version.html#2 @DbEnv.get_version_major
+__APIREL__/api_java/lock_detect.html#2 @DbEnv.lock_detect
+__APIREL__/api_java/lock_detect.html#Db.DB_LOCK_CONFLICT DbEnv.lock_detect@Db.DB_LOCK_CONFLICT
+__APIREL__/api_java/lock_get.html#2 @DbEnv.lock_get
+__APIREL__/api_java/lock_get.html#Db.DB_LOCK_NOWAIT DbEnv.lock_get@Db.DB_LOCK_NOWAIT
+__APIREL__/api_java/lock_get.html#Db.DB_LOCK_NOTGRANTED DbEnv.lock_get@Db.DB_LOCK_NOTGRANTED
+__APIREL__/api_java/lock_id.html#2 @DbEnv.lock_id
+__APIREL__/api_java/lock_put.html#2 @DbLock.put
+__APIREL__/api_java/lock_stat.html#2 @DbEnv.lock_stat
+__APIREL__/api_java/lock_vec.html#2 @DbEnv.lock_vec
+__APIREL__/api_java/log_archive.html#2 @DbEnv.log_archive
+__APIREL__/api_java/log_archive.html#Db.DB_ARCH_ABS DbEnv.log_archive@Db.DB_ARCH_ABS
+__APIREL__/api_java/log_archive.html#Db.DB_ARCH_DATA DbEnv.log_archive@Db.DB_ARCH_DATA
+__APIREL__/api_java/log_archive.html#Db.DB_ARCH_LOG DbEnv.log_archive@Db.DB_ARCH_LOG
+__APIREL__/api_java/log_compare.html#2 @DbEnv.log_compare
+__APIREL__/api_java/log_file.html#2 @DbEnv.log_file
+__APIREL__/api_java/log_flush.html#2 @DbEnv.log_flush
+__APIREL__/api_java/log_get.html#2 @DbEnv.log_get
+__APIREL__/api_java/log_get.html#Db.DB_CHECKPOINT DbEnv.log_get@Db.DB_CHECKPOINT
+__APIREL__/api_java/log_get.html#Db.DB_FIRST DbEnv.log_get@Db.DB_FIRST
+__APIREL__/api_java/log_get.html#Db.DB_LAST DbEnv.log_get@Db.DB_LAST
+__APIREL__/api_java/log_get.html#Db.DB_NEXT DbEnv.log_get@Db.DB_NEXT
+__APIREL__/api_java/log_get.html#Db.DB_PREV DbEnv.log_get@Db.DB_PREV
+__APIREL__/api_java/log_get.html#Db.DB_CURRENT DbEnv.log_get@Db.DB_CURRENT
+__APIREL__/api_java/log_get.html#Db.DB_SET DbEnv.log_get@Db.DB_SET
+__APIREL__/api_java/log_put.html#2 @DbEnv.log_put
+__APIREL__/api_java/log_put.html#Db.DB_CHECKPOINT DbEnv.log_put@Db.DB_CHECKPOINT
+__APIREL__/api_java/log_put.html#Db.DB_CURLSN DbEnv.log_put@Db.DB_CURLSN
+__APIREL__/api_java/log_put.html#Db.DB_FLUSH DbEnv.log_put@Db.DB_FLUSH
+__APIREL__/api_java/log_register.html#2 @DbEnv.log_register
+__APIREL__/api_java/log_stat.html#2 @DbEnv.log_stat
+__APIREL__/api_java/log_unregister.html#2 @DbEnv.log_unregister
+__APIREL__/api_java/memp_fclose.html#2 @DbMpoolFile.close
+__APIREL__/api_java/memp_fget.html#2 @DbMpoolFile.get
+__APIREL__/api_java/memp_fopen.html#2 @DbMpoolFile.open
+__APIREL__/api_java/memp_fput.html#2 @DbMpoolFile.put
+__APIREL__/api_java/memp_fset.html#2 @DbMpoolFile.set
+__APIREL__/api_java/memp_fsync.html#2 @DbMpoolFile.sync
+__APIREL__/api_java/memp_register.html#2 @DbEnv.memp_register
+__APIREL__/api_java/memp_stat.html#2 @DbEnv.memp_stat
+__APIREL__/api_java/memp_sync.html#2 @DbEnv.memp_sync
+__APIREL__/api_java/memp_trickle.html#2 @DbEnv.memp_trickle
+__APIREL__/api_java/txn_abort.html#2 @DbTxn.abort
+__APIREL__/api_java/txn_begin.html#2 @DbEnv.txn_begin
+__APIREL__/api_java/txn_begin.html#Db.DB_TXN_NOSYNC DbEnv.txn_begin@Db.DB_TXN_NOSYNC
+__APIREL__/api_java/txn_begin.html#Db.DB_TXN_NOWAIT DbEnv.txn_begin@Db.DB_TXN_NOWAIT
+__APIREL__/api_java/txn_begin.html#Db.DB_TXN_SYNC DbEnv.txn_begin@Db.DB_TXN_SYNC
+__APIREL__/api_java/txn_checkpoint.html#2 @DbEnv.txn_checkpoint
+__APIREL__/api_java/txn_checkpoint.html#Db.DB_FORCE DbEnv.txn_checkpoint@Db.DB_FORCE
+__APIREL__/api_java/txn_commit.html#2 @DbTxn.commit
+__APIREL__/api_java/txn_commit.html#Db.DB_TXN_NOSYNC DbTxn.commit@Db.DB_TXN_NOSYNC
+__APIREL__/api_java/txn_commit.html#Db.DB_TXN_SYNC DbTxn.commit@Db.DB_TXN_SYNC
+__APIREL__/api_java/txn_id.html#2 @DbTxn.id
+__APIREL__/api_java/txn_prepare.html#2 @DbTxn.prepare
+__APIREL__/api_java/txn_stat.html#2 @DbEnv.txn_stat
diff --git a/bdb/docs/api_java/runrec_class.html b/bdb/docs/api_java/runrec_class.html
new file mode 100644
index 00000000000..31fd07b9493
--- /dev/null
+++ b/bdb/docs/api_java/runrec_class.html
@@ -0,0 +1,50 @@
+<!--$Id: runrec_class.so,v 10.7 2000/09/21 19:58:54 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbRunRecoveryException</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbRunRecoveryException</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public class DbRunRecoveryException extends DbException { ... }
+</pre></h3>
+<h1>Description</h1>
+<p>This manual page describes the DbRunRecoveryException class and
+how it is used by the various Db* classes.
+<p>Errors can occur in the Berkeley DB library where the only solution is to shut
+down the application and run recovery. (For example, if Berkeley DB is unable
+to write log records to disk because there is insufficient disk space.)
+When a fatal error occurs in Berkeley DB, methods will throw a
+DbRunRecoveryException, at which point all subsequent database
+calls will also fail in the same way. When this occurs, recovery should
+be performed.
+<h3>Class</h3>
+<a href="../api_java/except_class.html">DbException</a>
+<h1>See Also</h1>
+<a href="../api_java/get_errno.html">DbException.get_errno</a>,
+<a href="../api_java/deadlock_class.html">DbDeadlockException</a>,
+<a href="../api_java/mem_class.html">DbMemoryException</a>
+and
+<a href="../api_java/runrec_class.html">DbRunRecoveryException</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/txn_abort.html b/bdb/docs/api_java/txn_abort.html
new file mode 100644
index 00000000000..48f4ddf0784
--- /dev/null
+++ b/bdb/docs/api_java/txn_abort.html
@@ -0,0 +1,65 @@
+<!--$Id: txn_abort.so,v 10.25 2000/12/31 19:26:21 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbTxn.abort</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbTxn.abort</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void abort()
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbTxn.abort method causes an abnormal termination of the
+transaction. The log is played backwards and any necessary recovery
+operations are initiated through the <b>recover</b> function specified
+to <a href="../api_java/env_open.html">DbEnv.open</a>. After the log processing is completed, all locks
+held by the transaction are released. As is the case for
+<a href="../api_java/txn_commit.html">DbTxn.commit</a>, applications that require strict two-phase locking
+should not explicitly release any locks.
+<p>In the case of nested transactions, aborting a parent transaction causes
+all children (unresolved or not) of the parent transaction to be aborted.
+<p>Once the DbTxn.abort method returns, the <a href="../api_java/txn_class.html">DbTxn</a> handle may not
+be accessed again.
+<p>The DbTxn.abort method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbTxn.abort method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbTxn.abort method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Classes</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>, <a href="../api_java/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_java/env_set_tx_max.html">DbEnv.set_tx_max</a>,
+<a href="../api_java/env_set_tx_timestamp.html">DbEnv.set_tx_timestamp</a>,
+<a href="../api_java/txn_abort.html">DbTxn.abort</a>,
+<a href="../api_java/txn_begin.html">DbEnv.txn_begin</a>,
+<a href="../api_java/txn_checkpoint.html">DbEnv.txn_checkpoint</a>,
+<a href="../api_java/txn_commit.html">DbTxn.commit</a>,
+<a href="../api_java/txn_id.html">DbTxn.id</a>,
+<a href="../api_java/txn_prepare.html">DbTxn.prepare</a>
+and
+<a href="../api_java/txn_stat.html">DbEnv.txn_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/txn_begin.html b/bdb/docs/api_java/txn_begin.html
new file mode 100644
index 00000000000..f81e86aeaae
--- /dev/null
+++ b/bdb/docs/api_java/txn_begin.html
@@ -0,0 +1,93 @@
+<!--$Id: txn_begin.so,v 10.37 2001/01/11 17:47:12 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.txn_begin</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.txn_begin</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public DbTxn txn_begin(DbTxn parent, int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.txn_begin method creates a new transaction in the environment
+and returns a <a href="../api_java/txn_class.html">DbTxn</a> that uniquely identifies it.
+<p>If the <b>parent</b> argument is non-null, the new transaction will
+be a nested transaction, with the transaction indicated by
+<b>parent</b> as its parent. Transactions may be
+nested to any level.
+<p>The <b>flags</b> parameter must be set to 0 or one of the following
+values:
+<p><dl compact>
+<p><dt><a name="Db.DB_TXN_NOSYNC">Db.DB_TXN_NOSYNC</a><dd>Do not synchronously flush the log when this transaction commits or
+prepares. This means the transaction will exhibit the ACI (atomicity,
+consistency and isolation) properties, but not D (durability), i.e.,
+database integrity will be maintained but it is possible that this
+transaction may be undone during recovery instead of being redone.
+<p>This behavior may be set for an entire Berkeley DB environment as part of the
+<a href="../api_java/env_set_flags.html">DbEnv.set_flags</a> interface.
+<p><dt><a name="Db.DB_TXN_NOWAIT">Db.DB_TXN_NOWAIT</a><dd>If a lock is unavailable for any Berkeley DB operation performed in the context
+of this transaction, return immediately instead of blocking on the lock.
+The error return in the case will be <a href="../ref/program/errorret.html#DB_LOCK_NOTGRANTED">Db.DB_LOCK_NOTGRANTED</a>.
+<p><dt><a name="Db.DB_TXN_SYNC">Db.DB_TXN_SYNC</a><dd>Synchronously flush the log when this transaction commits or prepares.
+This means the transaction will exhibit all of the ACID (atomicity,
+consistency and isolation and durability) properties.
+<p>This behavior is the default for Berkeley DB environments unless the
+<a href="../api_java/env_open.html#DB_TXN_NOSYNC">Db.DB_TXN_NOSYNC</a> flag was specified to the <a href="../api_java/env_set_flags.html">DbEnv.set_flags</a>
+interface.
+</dl>
+<p><b>Note: An transaction may not span threads,
+i.e., each transaction must begin and end in the same thread, and each
+transaction may only be used by a single thread.</b>
+<p><b>Note: cursors may not span transactions, i.e., each cursor must be opened
+and closed within a single transaction.</b>
+<p><b>Note: a parent transaction may not issue any Berkeley DB operations, except for
+DbEnv.txn_begin, <a href="../api_java/txn_abort.html">DbTxn.abort</a> and <a href="../api_java/txn_commit.html">DbTxn.commit</a>, while it has
+active child transactions (child transactions that have not yet been
+committed or aborted).</b>
+<p>The DbEnv.txn_begin method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.txn_begin method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>ENOMEM<dd>The maximum number of concurrent transactions has been reached.
+</dl>
+<p>The DbEnv.txn_begin method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.txn_begin method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Classes</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>, <a href="../api_java/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_java/env_set_tx_max.html">DbEnv.set_tx_max</a>,
+<a href="../api_java/env_set_tx_timestamp.html">DbEnv.set_tx_timestamp</a>,
+<a href="../api_java/txn_abort.html">DbTxn.abort</a>,
+<a href="../api_java/txn_begin.html">DbEnv.txn_begin</a>,
+<a href="../api_java/txn_checkpoint.html">DbEnv.txn_checkpoint</a>,
+<a href="../api_java/txn_commit.html">DbTxn.commit</a>,
+<a href="../api_java/txn_id.html">DbTxn.id</a>,
+<a href="../api_java/txn_prepare.html">DbTxn.prepare</a>
+and
+<a href="../api_java/txn_stat.html">DbEnv.txn_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/txn_checkpoint.html b/bdb/docs/api_java/txn_checkpoint.html
new file mode 100644
index 00000000000..f2bc7528aba
--- /dev/null
+++ b/bdb/docs/api_java/txn_checkpoint.html
@@ -0,0 +1,74 @@
+<!--$Id: txn_checkpoint.so,v 10.25 2000/09/08 15:20:28 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.txn_checkpoint</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.txn_checkpoint</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+int
+public int txn_checkpoint(int kbyte, int min, int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.txn_checkpoint method flushes the underlying memory pool,
+writes a checkpoint record to the log and then flushes the log.
+<p>If either <b>kbyte</b> or <b>min</b> is non-zero, the checkpoint is only
+done if there has been activity since the last checkpoint and either
+more than <b>min</b> minutes have passed since the last checkpoint,
+or if more than <b>kbyte</b> kilobytes of log data have been written since
+the last checkpoint.
+<p>The <b>flags</b> parameter must be set to 0 or one of the following
+values:
+<p><dl compact>
+<p><dt><a name="Db.DB_FORCE">Db.DB_FORCE</a><dd>Force a checkpoint record even if there has been no activity since the
+last checkpoint.
+</dl>
+<p>The DbEnv.txn_checkpoint method throws an exception that encapsulates a non-zero error value on
+failure, and returns <a href="../api_c/memp_fsync.html#DB_INCOMPLETE">Db.DB_INCOMPLETE</a> if there were pages that needed to be
+written to complete the checkpoint but that <a href="../api_java/memp_sync.html">DbEnv.memp_sync</a> was unable
+to write immediately.
+<h1>Errors</h1>
+<p>The DbEnv.txn_checkpoint method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbEnv.txn_checkpoint method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.txn_checkpoint method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Classes</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>, <a href="../api_java/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_java/env_set_tx_max.html">DbEnv.set_tx_max</a>,
+<a href="../api_java/env_set_tx_timestamp.html">DbEnv.set_tx_timestamp</a>,
+<a href="../api_java/txn_abort.html">DbTxn.abort</a>,
+<a href="../api_java/txn_begin.html">DbEnv.txn_begin</a>,
+<a href="../api_java/txn_checkpoint.html">DbEnv.txn_checkpoint</a>,
+<a href="../api_java/txn_commit.html">DbTxn.commit</a>,
+<a href="../api_java/txn_id.html">DbTxn.id</a>,
+<a href="../api_java/txn_prepare.html">DbTxn.prepare</a>
+and
+<a href="../api_java/txn_stat.html">DbEnv.txn_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/txn_class.html b/bdb/docs/api_java/txn_class.html
new file mode 100644
index 00000000000..ab386172bff
--- /dev/null
+++ b/bdb/docs/api_java/txn_class.html
@@ -0,0 +1,58 @@
+<!--$Id: txn_class.so,v 10.13 2000/12/04 18:05:39 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbTxn</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbTxn</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public class DbTxn extends Object { ... }
+</pre></h3>
+<h1>Description</h1>
+<p>This manual page describes the specific details of the DbTxn class.
+<p>The <a href="../api_java/dbenv_class.html">DbEnv</a> transaction methods and the DbTxn class provide
+transaction semantics. Full transaction support is provided by a
+collection of modules that provide interfaces to the services required
+for transaction processing. These services are recovery, concurrency
+control and the management of shared data.
+<p>Transaction semantics can be applied to the access methods described in
+Db through method call parameters.
+<p>The model intended for transactional use (and the one that is used by
+the access methods) is write-ahead logging to record both before- and
+after-images. Locking follows a two-phase protocol, with all locks being
+released at transaction commit.
+<h3>Classes</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>, <a href="../api_java/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_java/env_set_tx_max.html">DbEnv.set_tx_max</a>,
+<a href="../api_java/env_set_tx_timestamp.html">DbEnv.set_tx_timestamp</a>,
+<a href="../api_java/txn_abort.html">DbTxn.abort</a>,
+<a href="../api_java/txn_begin.html">DbEnv.txn_begin</a>,
+<a href="../api_java/txn_checkpoint.html">DbEnv.txn_checkpoint</a>,
+<a href="../api_java/txn_commit.html">DbTxn.commit</a>,
+<a href="../api_java/txn_id.html">DbTxn.id</a>,
+<a href="../api_java/txn_prepare.html">DbTxn.prepare</a>
+and
+<a href="../api_java/txn_stat.html">DbEnv.txn_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/txn_commit.html b/bdb/docs/api_java/txn_commit.html
new file mode 100644
index 00000000000..53aa0df4622
--- /dev/null
+++ b/bdb/docs/api_java/txn_commit.html
@@ -0,0 +1,85 @@
+<!--$Id: txn_commit.so,v 10.27 2000/12/31 19:26:21 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbTxn.commit</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbTxn.commit</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void commit(u_int32_t flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbTxn.commit method ends the transaction. In the case of nested
+transactions, if the transaction is a parent transaction, committing
+the parent transaction causes all unresolved children of the parent to
+be committed.
+<p>In the case of nested transactions, if the transaction is a child
+transaction, its locks are not released, but are acquired by its parent.
+While the commit of the child transaction will succeed, the actual
+resolution of the child transaction is postponed until the parent
+transaction is committed or aborted, i.e., if its parent transaction
+commits, it will be committed, and if its parent transaction aborts, it
+will be aborted.
+<p>The <b>flags</b> parameter must be set to 0 or one of the following
+values:
+<p><dl compact>
+<p><dt><a name="Db.DB_TXN_NOSYNC">Db.DB_TXN_NOSYNC</a><dd>Do not synchronously flush the log. This means the transaction will
+exhibit the ACI (atomicity, consistency and isolation) properties, but
+not D (durability), i.e., database integrity will be maintained but it is
+possible that this transaction may be undone during recovery instead of
+being redone.
+<p>This behavior may be set for an entire Berkeley DB environment as part of the
+<a href="../api_java/env_set_flags.html">DbEnv.set_flags</a> interface.
+<p><dt><a name="Db.DB_TXN_SYNC">Db.DB_TXN_SYNC</a><dd>Synchronously flush the log. This means the transaction will exhibit
+all of the ACID (atomicity, consistency and isolation and durability)
+properties.
+<p>This behavior is the default for Berkeley DB environments unless the
+<a href="../api_java/env_open.html#DB_TXN_NOSYNC">Db.DB_TXN_NOSYNC</a> flag was specified to the <a href="../api_java/env_set_flags.html">DbEnv.set_flags</a>
+or <a href="../api_java/txn_begin.html">DbEnv.txn_begin</a> interfaces.
+</dl>
+<p>Once the DbTxn.commit method returns, the <a href="../api_java/txn_class.html">DbTxn</a> handle may not
+be accessed again. If DbTxn.commit encounters an error, the
+transaction and all child transactions of the transaction are aborted.
+<p>The DbTxn.commit method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbTxn.commit method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbTxn.commit method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Classes</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>, <a href="../api_java/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_java/env_set_tx_max.html">DbEnv.set_tx_max</a>,
+<a href="../api_java/env_set_tx_timestamp.html">DbEnv.set_tx_timestamp</a>,
+<a href="../api_java/txn_abort.html">DbTxn.abort</a>,
+<a href="../api_java/txn_begin.html">DbEnv.txn_begin</a>,
+<a href="../api_java/txn_checkpoint.html">DbEnv.txn_checkpoint</a>,
+<a href="../api_java/txn_commit.html">DbTxn.commit</a>,
+<a href="../api_java/txn_id.html">DbTxn.id</a>,
+<a href="../api_java/txn_prepare.html">DbTxn.prepare</a>
+and
+<a href="../api_java/txn_stat.html">DbEnv.txn_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/txn_id.html b/bdb/docs/api_java/txn_id.html
new file mode 100644
index 00000000000..89fc62a37b6
--- /dev/null
+++ b/bdb/docs/api_java/txn_id.html
@@ -0,0 +1,51 @@
+<!--$Id: txn_id.so,v 10.12 1999/12/20 08:52:32 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbTxn.id</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbTxn.id</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int id()
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbTxn.id method returns the unique transaction id associated with the
+specified transaction. Locking calls made on behalf of this transaction
+should use the value returned from DbTxn.id as the locker parameter
+to the <a href="../api_java/lock_get.html">DbEnv.lock_get</a> or <a href="../api_java/lock_vec.html">DbEnv.lock_vec</a> calls.
+<h3>Classes</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>, <a href="../api_java/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_java/env_set_tx_max.html">DbEnv.set_tx_max</a>,
+<a href="../api_java/env_set_tx_timestamp.html">DbEnv.set_tx_timestamp</a>,
+<a href="../api_java/txn_abort.html">DbTxn.abort</a>,
+<a href="../api_java/txn_begin.html">DbEnv.txn_begin</a>,
+<a href="../api_java/txn_checkpoint.html">DbEnv.txn_checkpoint</a>,
+<a href="../api_java/txn_commit.html">DbTxn.commit</a>,
+<a href="../api_java/txn_id.html">DbTxn.id</a>,
+<a href="../api_java/txn_prepare.html">DbTxn.prepare</a>
+and
+<a href="../api_java/txn_stat.html">DbEnv.txn_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/txn_prepare.html b/bdb/docs/api_java/txn_prepare.html
new file mode 100644
index 00000000000..09feae726d6
--- /dev/null
+++ b/bdb/docs/api_java/txn_prepare.html
@@ -0,0 +1,65 @@
+<!--$Id: txn_prepare.so,v 10.17 2000/12/31 19:26:21 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbTxn.prepare</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbTxn.prepare</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void prepare()
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbTxn.prepare method initiates the beginning of a two-phase commit.
+<p>In a distributed transaction environment, Berkeley DB can be used as a local
+transaction manager. In this case, the distributed transaction manager
+must send <i>prepare</i> messages to each local manager. The local
+manager must then issue a DbTxn.prepare and await its successful
+return before responding to the distributed transaction manager. Only
+after the distributed transaction manager receives successful responses
+from all of its <i>prepare</i> messages should it issue any
+<i>commit</i> messages.
+<p>In the case of nested transactions, preparing a parent transaction
+causes all unresolved children of the parent transaction to be prepared.
+<p>The DbTxn.prepare method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbTxn.prepare method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbTxn.prepare method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Classes</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>, <a href="../api_java/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_java/env_set_tx_max.html">DbEnv.set_tx_max</a>,
+<a href="../api_java/env_set_tx_timestamp.html">DbEnv.set_tx_timestamp</a>,
+<a href="../api_java/txn_abort.html">DbTxn.abort</a>,
+<a href="../api_java/txn_begin.html">DbEnv.txn_begin</a>,
+<a href="../api_java/txn_checkpoint.html">DbEnv.txn_checkpoint</a>,
+<a href="../api_java/txn_commit.html">DbTxn.commit</a>,
+<a href="../api_java/txn_id.html">DbTxn.id</a>,
+<a href="../api_java/txn_prepare.html">DbTxn.prepare</a>
+and
+<a href="../api_java/txn_stat.html">DbEnv.txn_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_java/txn_stat.html b/bdb/docs/api_java/txn_stat.html
new file mode 100644
index 00000000000..cb033ccde82
--- /dev/null
+++ b/bdb/docs/api_java/txn_stat.html
@@ -0,0 +1,95 @@
+<!--$Id: txn_stat.so,v 10.27 2000/05/25 13:47:08 dda Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.txn_stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>DbEnv.txn_stat</h1>
+</td>
+<td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public DbTxnStat txn_stat()
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.txn_stat method
+creates a DbTxnStat object encapsulating a statistical structure.
+The transaction region statistics are stored in a DbTxnStat object.
+The following data fields are available from the DbTxnStat object:
+<p>Statistical structures are created in allocated memory. If <b>db_malloc</b> is non-NULL, it
+is called to allocate the memory, otherwise, the library function
+<b>malloc</b>(3) is used. The function <b>db_malloc</b> must match
+the calling conventions of the <b>malloc</b>(3) library routine.
+Regardless, the caller is responsible for deallocating the returned
+memory. To deallocate returned memory, free the returned memory
+reference, references inside the returned memory do not need to be
+individually freed.
+<p>The transaction region statistics are stored in a structure of type
+DB_TXN_STAT. The following DB_TXN_STAT fields will be filled in:
+<p><dl compact>
+<dt>public <a href="../api_java/lsn_class.html">DbLsn</a> st_last_ckp;<dd>The LSN of the last checkpoint.
+<dt>public <a href="../api_java/lsn_class.html">DbLsn</a> st_pending_ckp;<dd>The LSN of any checkpoint that is currently in progress. If
+<b>st_pending_ckp</b> is the same as <b>st_last_ckp</b> there
+is no checkpoint in progress.
+<dt>public long st_time_ckp;<dd>The time the last completed checkpoint finished (as the number of seconds
+since the Epoch, returned by the IEEE/ANSI Std 1003.1 (POSIX) <b>time</b> interface).
+<dt>public int st_last_txnid;<dd>The last transaction ID allocated.
+<dt>public int st_maxtxns;<dd>The maximum number of active transactions possible.
+<dt>public int st_nactive;<dd>The number of transactions that are currently active.
+<dt>public int st_maxnactive;<dd>The maximum number of active transactions at any one time.
+<dt>public int st_nbegins;<dd>The number of transactions that have begun.
+<dt>public int st_naborts;<dd>The number of transactions that have aborted.
+<dt>public int st_ncommits;<dd>The number of transactions that have committed.
+<dt>public int st_regsize;<dd>The size of the region.
+<dt>public int st_region_wait;<dd>The number of times that a thread of control was forced to wait before
+obtaining the region lock.
+<dt>public int st_region_nowait;<dd>The number of times that a thread of control was able to obtain
+the region lock without waiting.
+<dt>public Active st_txnarray[];<dd>The array of active transactions. Each element of the array is an object
+of type DbTxnStat.Active, a top level inner class, that has the following
+fields:
+<p><dl compact>
+<p><dt>public int txnid;<dd>The Transaction ID.
+<dt>public DbLsn lsn;<dd>The Lsn of the begin record.
+</dl>
+</dl>
+<p>The DbEnv.txn_stat method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.txn_stat method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.txn_stat method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h3>Classes</h3>
+<a href="../api_java/dbenv_class.html">DbEnv</a>, <a href="../api_java/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_java/env_set_tx_max.html">DbEnv.set_tx_max</a>,
+<a href="../api_java/env_set_tx_timestamp.html">DbEnv.set_tx_timestamp</a>,
+<a href="../api_java/txn_abort.html">DbTxn.abort</a>,
+<a href="../api_java/txn_begin.html">DbEnv.txn_begin</a>,
+<a href="../api_java/txn_checkpoint.html">DbEnv.txn_checkpoint</a>,
+<a href="../api_java/txn_commit.html">DbTxn.commit</a>,
+<a href="../api_java/txn_id.html">DbTxn.id</a>,
+<a href="../api_java/txn_prepare.html">DbTxn.prepare</a>
+and
+<a href="../api_java/txn_stat.html">DbEnv.txn_stat</a>.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_java/java_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_tcl/db_close.html b/bdb/docs/api_tcl/db_close.html
new file mode 100644
index 00000000000..eaae3165588
--- /dev/null
+++ b/bdb/docs/api_tcl/db_close.html
@@ -0,0 +1,59 @@
+<!--$Id: db_close.so,v 11.10 1999/12/20 08:52:33 bostic Exp $-->
+<!--$Id: m4.tcl,v 11.17 2000/04/24 17:31:11 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1><i>db</i> <b>close</b></h1>
+</td>
+<td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db close
+ [-nosync]
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>db</i> <b>close</b> command flushes any cached database information to
+disk, closes any open cursors, frees any allocated resources, and closes
+any underlying files. Since key/data pairs are cached in memory, failing
+to sync the file with the <i>db</i> <b>close</b> or <i>db</i> <b>sync</b> command may
+result in inconsistent or lost information.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt>-nosync<dd>Do not flush cached information to disk.
+<p>The -nosync flag is a dangerous option. It should only be set if the
+application is doing logging (with transactions) so that the database is
+recoverable after a system or application crash, or if the database is
+always generated from scratch after any system or application crash.
+<p>It is important to understand that flushing cached information to disk
+only minimizes the window of opportunity for corrupted data. While
+unlikely, it is possible for database corruption to happen if a system or
+application crash occurs while writing data to the database. To ensure
+that database corruption never occurs, applications must either: use
+transactions and logging with automatic recovery, use logging and
+application-specific recovery, or edit a copy of the database, and, once
+all applications using the database have successfully called
+<i>db</i> <b>close</b>, atomically replace the original database with the updated
+copy.
+</dl>
+<p>Once <i>db</i> <b>close</b> has been called, regardless of its return, the DB
+handle may not be accessed again.
+<p>The <i>db</i> <b>close</b> command returns 0 on success, and in the case of error, a Tcl error
+is thrown.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_tcl/db_count.html b/bdb/docs/api_tcl/db_count.html
new file mode 100644
index 00000000000..123c030ccff
--- /dev/null
+++ b/bdb/docs/api_tcl/db_count.html
@@ -0,0 +1,38 @@
+<!--$Id: db_count.so,v 11.1 2000/02/02 18:23:19 sue Exp $-->
+<!--$Id: m4.tcl,v 11.17 2000/04/24 17:31:11 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db count</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1><i>db</i> <b>count</b></h1>
+</td>
+<td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db count key
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>db</i> <b>count</b> command returns a count of the number
+of duplicate data items for the key given.
+If the key does not exist, a value of 0 is returned.
+If there are no duplicates, or the database does not support
+duplicates, but a key/data pair exists, a value of 1 is returned.
+If an error occurs, a Berkeley DB error message is returned or a Tcl error is
+thrown.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_tcl/db_cursor.html b/bdb/docs/api_tcl/db_cursor.html
new file mode 100644
index 00000000000..79187650dda
--- /dev/null
+++ b/bdb/docs/api_tcl/db_cursor.html
@@ -0,0 +1,42 @@
+<!--$Id: db_cursor.so,v 11.10 1999/12/20 08:52:33 bostic Exp $-->
+<!--$Id: m4.tcl,v 11.17 2000/04/24 17:31:11 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db cursor</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1><i>db</i> <b>cursor</b></h1>
+</td>
+<td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db cursor
+ [-txn txnid]
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>db</i> <b>cursor</b> command creates a database cursor. The returned
+cursor handle is bound to a Tcl command of the form <b>dbN.cX</b>, where
+X is an integer starting at 0 (e.g., db0.c0 and db0.c1). It is through
+this Tcl command that the script accesses the cursor methods.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt>-txn txnid<dd>If the file is being accessed under transaction protection, the
+<b>txnid</b> parameter is a transaction handle returned from <i>env</i> <b>txn</b>.
+</dl>
+<p>In the case of error, a Tcl error is thrown.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_tcl/db_del.html b/bdb/docs/api_tcl/db_del.html
new file mode 100644
index 00000000000..b3340312ab2
--- /dev/null
+++ b/bdb/docs/api_tcl/db_del.html
@@ -0,0 +1,47 @@
+<!--$Id: db_del.so,v 11.10 2000/03/15 16:43:04 sue Exp $-->
+<!--$Id: m4.tcl,v 11.17 2000/04/24 17:31:11 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db del</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1><i>db</i> <b>del</b></h1>
+</td>
+<td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db del
+ [-glob]
+ [-txn txnid]
+ key
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>db</i> <b>del</b> command removes key/data pairs from the database.
+<p>In the presence of duplicate key values, all records associated with the
+designated key will be discarded.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt>-glob<dd>The specified key is a wildcard pattern, and all keys matching that
+pattern are discarded from the database. The pattern is a simple
+wildcard, any characters after the wildcard character are ignored.
+<p><dt>-txn txnid<dd>If the file is being accessed under transaction protection, the
+<b>txnid</b> parameter is a transaction handle returned from <i>env</i> <b>txn</b>.
+</dl>
+<p>The <i>db</i> <b>del</b> command returns 0 on success, and in the case of error, a Tcl error
+is thrown.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_tcl/db_get.html b/bdb/docs/api_tcl/db_get.html
new file mode 100644
index 00000000000..391f156529a
--- /dev/null
+++ b/bdb/docs/api_tcl/db_get.html
@@ -0,0 +1,98 @@
+<!--$Id: db_get.so,v 11.13 2000/11/28 20:12:30 bostic Exp $-->
+<!--$Id: m4.tcl,v 11.17 2000/04/24 17:31:11 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db get</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1><i>db</i> <b>get</b></h1>
+</td>
+<td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db get
+ [-consume]
+ [-consume_wait]
+ [-glob]
+ [-partial {doff dlen}]
+ [-recno]
+ [-rmw]
+ [-txn txnid]
+ key
+db get
+ -get_both
+ [-partial {doff dlen}]
+ [-rmw]
+ [-txn txnid]
+ key data
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>db</i> <b>get</b> command returns key/data pairs from the database.
+<p>In the presence of duplicate key values, <i>db</i> <b>get</b> will return all
+duplicate items. Duplicates are sorted by insert order except where this
+order has been overridden by cursor operations.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt>-consume<dd>Return the record number and data from the available record closest to
+the head of the queue and delete the record. The cursor will be
+positioned on the deleted record. A record is available if it is not
+deleted and is not currently locked. The underlying database must be
+of type Queue for <b>-consume</b> to be specified.
+<p><dt>-consume_wait<dd>The same as the <b>-consume</b> flag except that if the Queue database
+is empty, the thread of control will wait until there is data in the
+queue before returning. The underlying database must be of type Queue
+for <b>-consume_wait</b> to be specified.
+<p><dt>-get_both key data<dd>Retrieve the key/data pair only if both the key and data match the
+arguments.
+<p><dt>-glob<dd>Return all keys matching the given key, where the key is a simple wildcard
+pattern. Where it is used, it replaces the use of the key with the given
+pattern of a set of keys. Any characters after the wildcard character
+are ignored. For example, in a database of last names, the
+command "db0 get Jones" will return all of the "Jones" in the database,
+and the command "db0 get -glob Jo*" will return both "Jones" and "Johnson"
+from the database. The command "db0 get -glob *" will return all of the
+key/data pairs in the database.
+<p><dt>-partial {doff dlen}<dd>The <b>dlen</b> bytes starting <b>doff</b> bytes from the beginning of
+the retrieved data record are returned as if they comprised the entire
+record. If any or all of the specified bytes do not exist in the record,
+the command is successful and the existing bytes or 0 bytes are
+returned.
+<p><dt>-recno<dd>Retrieve the specified numbered key/data pair from a database. For
+<b>-recno</b> to be specified, the specified key must be a record number
+and the underlying database must be of type Recno or Queue, or of type
+Btree that was created with the <b>-recnum</b> option.
+<p><dt>-rmw<dd>Acquire write locks instead of read locks when doing the retrieval.
+Setting this flag may decrease the likelihood of deadlock during a
+read-modify-write cycle by immediately acquiring the write lock during
+the read part of the cycle so that another thread of control acquiring a
+read lock for the same item, in its own read-modify-write cycle, will not
+result in deadlock.
+<p>As the <i>db</i> <b>get</b> command will not hold locks across Berkeley DB interface
+calls in non-transactional environments, the <b>-rmw</b> argument to the
+<i>db</i> <b>get</b> call is only meaningful in the presence of transactions.
+<p><dt>-txn txnid<dd>If the file is being accessed under transaction protection, the
+<b>txnid</b> parameter is a transaction handle returned from <i>env</i> <b>txn</b>.
+</dl>
+<p>If the underlying database is a Queue or Recno database, then the given
+key will be interpreted by Tcl as an integer. For all other database
+types, the key is interpreted by Tcl as a byte array unless indicated
+by a given option.
+<p>A list of key/data pairs is returned. In the error case that no matching
+key exists, an empty list is returned. In all other cases, a Tcl error
+is thrown.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_tcl/db_get_join.html b/bdb/docs/api_tcl/db_get_join.html
new file mode 100644
index 00000000000..e2ede7e47d4
--- /dev/null
+++ b/bdb/docs/api_tcl/db_get_join.html
@@ -0,0 +1,45 @@
+<!--$Id: db_get_join.so,v 11.15 2000/12/04 18:05:39 bostic Exp $-->
+<!--$Id: m4.tcl,v 11.17 2000/04/24 17:31:11 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db get_join</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1><i>db</i> <b>get_join</b></h1>
+</td>
+<td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db get_join
+ [-txn txnid]
+ {<i>db</i> key}
+ {<i>db</i> key}
+ ...
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>db</i> <b>get_join</b> command performs the cursor operations required to
+join the specified keys and returns a list of joined {key data} pairs.
+See <a href="../ref/am/join.html">Logical join</a> for more information on
+the underlying requirements for joining.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt>-txn txnid<dd>If the file is being accessed under transaction protection, the
+<b>txnid</b> parameter is a transaction handle returned from <i>env</i> <b>txn</b>.
+</dl>
+<p>In the case of error, a Tcl error is thrown.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_tcl/db_get_type.html b/bdb/docs/api_tcl/db_get_type.html
new file mode 100644
index 00000000000..75fac1e78ae
--- /dev/null
+++ b/bdb/docs/api_tcl/db_get_type.html
@@ -0,0 +1,34 @@
+<!--$Id: db_get_type.so,v 11.9 1999/12/20 08:52:34 bostic Exp $-->
+<!--$Id: m4.tcl,v 11.17 2000/04/24 17:31:11 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db get_type</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1><i>db</i> <b>get_type</b></h1>
+</td>
+<td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db get_type
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>db</i> <b>get_type</b> command returns the underlying database type,
+returning one of "btree", "hash", "queue" or "recno".
+<p>In the case of error, a Tcl error is thrown.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_tcl/db_is_byteswapped.html b/bdb/docs/api_tcl/db_is_byteswapped.html
new file mode 100644
index 00000000000..6a196eddf73
--- /dev/null
+++ b/bdb/docs/api_tcl/db_is_byteswapped.html
@@ -0,0 +1,37 @@
+<!--$Id: db_is_byteswapped.so,v 11.10 1999/12/20 08:52:34 bostic Exp $-->
+<!--$Id: m4.tcl,v 11.17 2000/04/24 17:31:11 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db is_byteswapped</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1><i>db</i> <b>is_byteswapped</b></h1>
+</td>
+<td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db is_byteswapped
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>db</i> <b>is_byteswapped</b> command returns 0 if the underlying database
+files were created on an architecture of the same byte order as the
+current one, and 1 if they were not (i.e., big-endian on a little-endian
+machine or vice-versa). This value may be used to determine if application
+data needs to be adjusted for this architecture or not.
+<p>In the case of error, a Tcl error is thrown.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_tcl/db_join.html b/bdb/docs/api_tcl/db_join.html
new file mode 100644
index 00000000000..ba3f0a2e5cb
--- /dev/null
+++ b/bdb/docs/api_tcl/db_join.html
@@ -0,0 +1,48 @@
+<!--$Id: db_join.so,v 11.16 2000/12/04 18:05:39 bostic Exp $-->
+<!--$Id: m4.tcl,v 11.17 2000/04/24 17:31:11 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db join</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1><i>db</i> <b>join</b></h1>
+</td>
+<td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db join
+ <i>db.cX</i>
+ <i>db.cY</i>
+ <i>db.cZ</i>
+ ...
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>db</i> <b>join</b> command joins the specified cursors and returns a
+cursor handle that can be used to iterate through the joined {key data}
+pairs. The returned cursor handle is bound to a Tcl command of the form
+<b>dbN.cX</b>, where X is an integer starting at 0 (e.g., db0.c0 and
+db0.c1). It is through this Tcl command that the script accesses the
+cursor methods.
+<p>The returned join cursor has limited cursor functionality and only the
+<i>dbc</i> <b>get</b> and <i>dbc</i> <b>close</b> commands will succeed.
+<p>See <a href="../ref/am/join.html">Logical join</a> for more information on
+the underlying requirements for joining.
+<p>In a transaction protected environment, all of the cursors listed must
+have been created within the same transaction.
+<p>In the case of error, a Tcl error is thrown.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_tcl/db_open.html b/bdb/docs/api_tcl/db_open.html
new file mode 100644
index 00000000000..4f7b651e552
--- /dev/null
+++ b/bdb/docs/api_tcl/db_open.html
@@ -0,0 +1,300 @@
+<!--$Id: db_open.so,v 11.18 2000/11/27 18:14:47 sue Exp $-->
+<!--$Id: m4.tcl,v 11.17 2000/04/24 17:31:11 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: berkdb open</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1><b>berkdb open</b></h1>
+</td>
+<td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>berkdb open
+ [-btree | -hash | -recno | -queue | -unknown]
+ [-cachesize {gbytes bytes ncache}]
+ [-create]
+ [-delim delim]
+ [-dup]
+ [-dupsort]
+ [-env env]
+ [-errfile filename]
+ [-excl]
+ [-extent size]
+ [-ffactor density]
+ [-len len]
+ [-mode mode]
+ [-nelem size]
+ [-pad pad]
+ [-pagesize pagesize]
+ [-rdonly]
+ [-recnum]
+ [-renumber]
+ [-snapshot]
+ [-source file]
+ [-truncate]
+ [-upgrade]
+ [--]
+ [file [database]]
+</pre></h3>
+<h1>Description</h1>
+<p>The <b>berkdb open</b> command opens, and optionally creates, a database.
+The returned database handle is bound to a Tcl command of the form
+<b>dbN</b>, where N is an integer starting at 0 (e.g., db0 and db1).
+It is through this Tcl command that the script accesses the database
+methods.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt>-btree<dd>Open/create a database of type Btree. The Btree format
+is a representation of a sorted, balanced tree structure.
+<p><dt>-hash<dd>Open/create a database of type Hash. The Hash format is
+an extensible, dynamic hashing scheme.
+<p><dt>-queue<dd>Open/create a database of type Queue. The Queue format
+supports fast access to fixed-length records accessed by sequentially or
+logical record number.
+<p><dt>-recno<dd>Open/create a database of type Recno. The Recno format
+supports fixed- or variable-length records, accessed sequentially or by
+logical record number, and optionally retrieved from a flat text file.
+<p><dt>-unknown<dd>The database is of an unknown type, and must already exist.
+<p><dt>-cachesize {gbytes bytes ncache}<dd>Set the size of the database's shared memory buffer pool, i.e., the cache,
+to <b>gbytes</b> gigabytes plus <b>bytes</b>. The cache should be the
+size of the normal working data set of the application, with some small
+amount of additional memory for unusual situations. (Note, the working
+set is not the same as the number of simultaneously referenced pages, and
+should be quite a bit larger!)
+<p>The default cache size is 256KB, and may not be specified as less than
+20KB. Any cache size less than 500MB is automatically increased by 25%
+to account for buffer pool overhead, cache sizes larger than 500MB are
+used as specified.
+<p>It is possible to specify caches to Berkeley DB that are large enough so that
+they cannot be allocated contiguously on some architectures, e.g., some
+releases of Solaris limit the amount of memory that may be allocated
+contiguously by a process. If <b>ncache</b> is 0 or 1, the cache will
+be allocated contiguously in memory. If it is greater than 1, the cache
+will be broken up into <b>ncache</b> equally sized separate pieces of
+memory.
+<p>For information on tuning the Berkeley DB cache size, see
+<a href="../ref/am_conf/cachesize.html">Selecting a cache size</a>.
+<p>As databases opened within Berkeley DB environments use the cache specified to
+the environment, it is an error to attempt to set a cache in a database
+created within an environment.
+<p><dt>-create<dd>Create any underlying files, as necessary. If the files do not already
+exist and the <b>-create</b> argument is not specified, the call will
+fail.
+<p><dt>-delim delim<dd>Set the delimiting byte used to mark the end of a record in the backing
+source file for the Recno access method.
+<p>This byte is used for variable length records, if the <b>-source</b>
+argument file is specified. If the <b>-source</b> argument file is
+specified and no delimiting byte was specified, &lt;newline&gt;
+characters (i.e. ASCII 0x0a) are interpreted as end-of-record markers.
+<p><dt>-dup<dd>Permit duplicate data items in the tree, i.e. insertion when the key of the
+key/data pair being inserted already exists in the tree will be successful.
+The ordering of duplicates in the tree is determined by the order of
+insertion, unless the ordering is otherwise specified by use of a cursor or
+a duplicate comparison function.
+<p>It is an error to specify both <b>-dup</b> and <b>-recnum</b>.
+<p><dt>-dupsort<dd>Sort duplicates within a set of data items. A default, lexical comparison
+will be used. Specifying that duplicates are to be sorted changes the
+behavior of the <i>db</i> <b>put</b> operation as well as the <i>dbc</i> <b>put</b>
+operation when the <b>-keyfirst</b>, <b>-keylast</b> and
+<b>-current</b> options are specified.
+<p><dt>-env env<dd>If no <b>-env</b> argument is given, the database is standalone, i.e.,
+it is not part of any Berkeley DB environment.
+<p>If a <b>-env</b> argument is given, the database is created within the
+specified Berkeley DB environment. The database access methods automatically
+make calls to the other subsystems in Berkeley DB based on the enclosing
+environment. For example, if the environment has been configured to use
+locking, then the access methods will automatically acquire the correct
+locks when reading and writing pages of the database.
+<p><dt>-errfile filename<dd>
+<p>When an error occurs in the Berkeley DB library, a Berkeley DB error or an error
+return value is returned by the function. In some cases, however, the
+errno value may be insufficient to completely describe the cause of the
+error especially during initial application debugging.
+<p>The <b>-errfile</b> argument is used to enhance the mechanism for
+reporting error messages to the application by specifying a file to be
+used for displaying additional Berkeley DB error messages. In some cases, when
+an error occurs, Berkeley DB will output an additional error message to the
+specified file reference.
+<p>The error message will consist of a Tcl command name and a colon (":"),
+an error string, and a trailing &lt;newline&gt; character. If the
+database was opened in an environment the Tcl command name will be the
+environment name (e.g., env0), otherwise it will be the database command
+name (e.g., db0).
+<p>This error logging enhancement does not slow performance or significantly
+increase application size, and may be run during normal operation as well
+as during application debugging.
+<p>For database handles opened inside of Berkeley DB environments, specifying the
+<b>-errfile</b> argument affects the entire environment and is equivalent
+to specifying the same argument to the <b>berkdb env</b> command.
+<p><dt>-excl<dd>Return an error if the file already exists. Underlying filesystem
+primitives are used to implement this flag. For this reason it is only
+applicable to the physical database file and cannot be used to test if a
+database in a file already exists.
+<p><dt>-extent size<dd>Set the size of the extents of the Queue database, The size is specified
+as the number of pages in an extent.
+Each extent is created as a separate physical file. If no extent size
+is set, the default behavior is to create only
+a single underlying database file.
+<p>For information on tuning the extent size, see
+<a href="../ref/am_conf/extentsize.html">Selecting a extent size</a>.
+<p><dt>-ffactor density<dd>Set the desired density within the hash table.
+<p>The density is an approximation of the number of keys allowed to
+accumulate in any one bucket
+<p><dt>-len len<dd>For the Queue access method, specify that the records are of length
+<b>len</b>.
+<p>For the Recno access method, specify that the records are fixed-length,
+not byte delimited, and are of length <b>len</b>.
+<p>Any records added to the database that are less than <b>len</b> bytes
+long are automatically padded (see the <b>-pad</b> argument for more
+information).
+<p>Any attempt to insert records into the database that are greater than
+<b>len</b> bytes long will cause the call to fail immediately and return
+an error.
+<p><dt>-mode mode<dd>
+<p>On UNIX systems, or in IEEE/ANSI Std 1003.1 (POSIX) environments, all files created by the access methods
+are created with mode <b>mode</b> (as described in <b>chmod</b>(2)) and
+modified by the process' umask value at the time of creation (see
+<b>umask</b>(2)). The group ownership of created files is based on
+the system and directory defaults, and is not further specified by Berkeley DB.
+If <b>mode</b> is 0, files are created readable and writeable by both
+owner and group. On Windows systems, the mode argument is ignored.
+<p><dt>-nelem size<dd>Set an estimate of the final size of the hash table.
+<p>If not set or set too low, hash tables will still expand gracefully as
+keys are entered, although a slight performance degradation may be
+noticed.
+<p><dt>-pad pad<dd>Set the padding character for short, fixed-length records for the Queue
+and Recno access methods.
+<p>If no pad character is specified, &lt;space&gt; characters (i.e.,
+ASCII 0x20) are used for padding.
+<p><dt>-pagesize pagesize<dd>Set the size of the pages used to hold items in the database, in bytes.
+The minimum page size is 512 bytes and the maximum page size is 64K bytes.
+If the page size is not explicitly set, one is selected based on the
+underlying filesystem I/O block size. The automatically selected size
+has a lower limit of 512 bytes and an upper limit of 16K bytes.
+<p>For information on tuning the Berkeley DB page size, see
+<a href="../ref/am_conf/pagesize.html">Selecting a page size</a>.
+<p><dt>-rdonly<dd>Open the database for reading only. Any attempt to modify items in the
+database will fail regardless of the actual permissions of any underlying
+files.
+<p><dt>-recnum<dd>Support retrieval from the Btree using record numbers.
+<p>Logical record numbers in Btree databases are mutable in the face of
+record insertion or deletion. See the <b>-renumber</b> argument for
+further discussion.
+<p>Maintaining record counts within a Btree introduces a serious point of
+contention, namely the page locations where the record counts are stored. In
+addition, the entire tree must be locked during both insertions and
+deletions, effectively single-threading the tree for those operations.
+Specifying <b>-recnum</b> can result in serious performance degradation
+for some applications and data sets.
+<p>It is an error to specify both <b>-dup</b> and <b>-recnum</b>.
+<p><dt>-renumber<dd>Specifying the <b>-renumber</b> argument causes the logical record
+numbers to be mutable, and change as records are added to and deleted from
+the database. For example, the deletion of record number 4 causes records
+numbered 5 and greater to be renumbered downward by 1. If a cursor was
+positioned to record number 4 before the deletion, it will reference the
+new record number 4, if any such record exists, after the deletion. If a
+cursor was positioned after record number 4 before the deletion, it will
+be shifted downward 1 logical record, continuing to reference the same
+record as it did before.
+<p>Using the <i>db</i> <b>put</b> or <i>dbc</i> <b>put</b> interfaces to create new records will
+cause the creation of multiple records if the record number is more than one
+greater than the largest record currently in the database. For example,
+creating record 28, when record 25 was previously the last record in the
+database, will create records 26 and 27 as well as 28.
+<p>If a created record is not at the end of the database, all records following
+the new record will be automatically renumbered upward by 1. For example,
+the creation of a new record numbered 8 causes records numbered 8 and
+greater to be renumbered upward by 1. If a cursor was positioned to record
+number 8 or greater before the insertion, it will be shifted upward 1
+logical record, continuing to reference the same record as it did before.
+<p>For these reasons, concurrent access to a Recno database with the
+<b>-renumber</b> flag specified may be largely meaningless, although it
+is supported.
+<p><dt>-snapshot<dd>This argument specifies that any specified <b>-source</b> file be read
+in its entirety when the database is opened. If this argument is not
+specified, the <b>-source</b> file may be read lazily.
+<p><dt>-source file<dd>Set the underlying source file for the Recno access method. The purpose
+of the <b>-source</b> file is to provide fast access and modification
+to databases that are normally stored as flat text files.
+<p>If the <b>-source</b> argument is give, it specifies an underlying flat
+text database file that is read to initialize a transient record number
+index. In the case of variable length records, the records are separated
+as specified by <b>-delim</b>. For example, standard UNIX byte stream
+files can be interpreted as a sequence of variable length records
+separated by &lt;newline&gt; characters.
+<p>In addition, when cached data would normally be written back to the
+underlying database file (e.g., the <i>db</i> <b>close</b> or <i>db</i> <b>sync</b>
+commands are called), the in-memory copy of the database will be written
+back to the <b>-source</b> file.
+<p>By default, the backing source file is read lazily, i.e., records are not
+read from the file until they are requested by the application.
+<b>If multiple processes (not threads) are accessing a Recno database
+concurrently and either inserting or deleting records, the backing source
+file must be read in its entirety before more than a single process
+accesses the database, and only that process should specify the backing
+source argument as part of the <b>berkdb open</b> call. See the <b>-snapshot</b>
+argument for more information.</b>
+<p><b>Reading and writing the backing source file specified by <b>-source</b>
+cannot be transactionally protected because it involves filesystem
+operations that are not part of the Berkeley DB transaction methodology.</b>
+For this reason, if a temporary database is used to hold the records,
+i.e., no <b>file</b> argument was specified to the <b>berkdb open</b> call,
+it is possible to lose the contents of the <b>-file</b> file, e.g., if
+the system crashes at the right instant. If a file is used to hold the
+database, i.e., a file name was specified as the <b>file</b> argument
+to <b>berkdb open</b>, normal database recovery on that file can be used to
+prevent information loss, although it is still possible that the contents
+of <b>-source</b> will be lost if the system crashes.
+<p>The <b>-source</b> file must already exist (but may be zero-length) when
+<b>berkdb open</b> is called.
+<p>It is not an error to specify a read-only <b>-source</b> file when
+creating a database, nor is it an error to modify the resulting database.
+However, any attempt to write the changes to the backing source file using
+either the <i>db</i> <b>close</b> or <i>db</i> <b>sync</b> commands will fail, of course.
+Specify the <b>-nosync</b> argument to the <i>db</i> <b>close</b> command will
+stop it from attempting to write the changes to the backing file, instead,
+they will be silently discarded.
+<p>For all of the above reasons, the <b>-source</b> file is generally used
+to specify databases that are read-only for Berkeley DB applications, and that
+are either generated on the fly by software tools, or modified using a
+different mechanism, e.g., a text editor.
+<p><dt>-truncate<dd>Physically truncate the underlying file, discarding all previous databases
+it might have held. Underlying filesystem primitives are used to
+implement this flag. For this reason it is only applicable to the
+physical file and cannot be used to discard databases within a file.
+<p>The <b>-truncate</b> argument cannot be transaction protected, and it is
+an error to specify it in a transaction protected environment.
+<p><dt>-upgrade<dd>Upgrade the database represented by <b>file</b>, if necessary.
+<p><b>Note: Database upgrades are done in place and are
+destructive, e.g., if pages need to be allocated and no disk space is
+available, the database may be left corrupted. Backups should be made
+before databases are upgraded. See <a href="../ref/am/upgrade.html">Upgrading databases</a> for more information.</b>
+<p><dt>--<dd>Mark the end of the command arguments.
+<p><dt>file<dd>The name of a single physical file on disk that will be used to back the
+database.
+<p><dt>database<dd>The <b>database</b> argument allows applications to have multiple
+databases inside of a single physical file. This is useful when the
+databases are both numerous and reasonably small, in order to avoid
+creating a large number of underlying files. It is an error to attempt
+to open a second database file that was not initially created using a
+<b>database</b> name.
+</dl>
+<p>The <b>berkdb open</b> command returns a database handle on success.
+<p>In the case of error, a Tcl error is thrown.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_tcl/db_put.html b/bdb/docs/api_tcl/db_put.html
new file mode 100644
index 00000000000..2311a3c97ac
--- /dev/null
+++ b/bdb/docs/api_tcl/db_put.html
@@ -0,0 +1,74 @@
+<!--$Id: db_put.so,v 11.10 2000/06/12 17:50:01 sue Exp $-->
+<!--$Id: m4.tcl,v 11.17 2000/04/24 17:31:11 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db put</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1><i>db</i> <b>put</b></h1>
+</td>
+<td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db put
+ -append
+ [-partial {doff dlen}]
+ [-txn txnid]
+ data
+db put
+ [-nooverwrite]
+ [-partial {doff dlen}]
+ [-txn txnid]
+ key data
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>db</i> <b>put</b> command stores the specified key/data pair into the
+database.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt>-append<dd>Append the data item to the end of the database. For the <b>-append</b>
+option to be specified, the underlying database must be a Queue or Recno
+database. The record number allocated to the record is returned on
+success.
+<p><dt>-nooverwrite<dd>Enter the new key/data pair only if the key does not already appear in
+the database.
+<p><dt>-partial {doff dlen}<dd>
+<p>The <b>dlen</b> bytes starting <b>doff</b> bytes from the beginning of
+the specified key's data record are replaced by the data specified by the
+data and size structure elements. If <b>dlen</b> is smaller than the
+length of the supplied data, the record will grow, and if <b>dlen</b> is
+larger than the length of the supplied data, the record will shrink. If
+the specified bytes do not exist, the record will be extended using nul
+bytes as necessary, and the <i>db</i> <b>put</b> call will succeed.
+<p>It is an error to attempt a partial put using the <i>db</i> <b>put</b> command in a database
+that supports duplicate records. Partial puts in databases supporting
+duplicate records must be done using a <i>dbc</i> <b>put</b> command.
+<p>It is an error to attempt a partial put with differing <b>dlen</b> and
+supplied data length values in Queue or Recno databases with fixed-length
+records.
+<p><dt>-txn txnid<dd>If the file is being accessed under transaction protection, the
+<b>txnid</b> parameter is a transaction handle returned from <i>env</i> <b>txn</b>.
+</dl>
+<p>The <i>db</i> <b>put</b> command returns either 0 or a record number for success
+(the record number is returned if the <b>-append</b> option was specified).
+If an error occurs, a Berkeley DB error message is returned or a Tcl error is
+thrown.
+<p>If the underlying database is a Queue or Recno database, then the given
+key will be interpreted by Tcl as an integer. For all other database
+types, the key is interpreted by Tcl as a byte array.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_tcl/db_remove.html b/bdb/docs/api_tcl/db_remove.html
new file mode 100644
index 00000000000..e45f3bc4970
--- /dev/null
+++ b/bdb/docs/api_tcl/db_remove.html
@@ -0,0 +1,49 @@
+<!--$Id: db_remove.so,v 11.6 2000/03/22 21:56:10 bostic Exp $-->
+<!--$Id: m4.tcl,v 11.17 2000/04/24 17:31:11 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: berkdb dbremove</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1><b>berkdb dbremove</b></h1>
+</td>
+<td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>berkdb dbremove
+ [-env env]
+ [--]
+ file
+ [database]
+</pre></h3>
+<h1>Description</h1>
+<p>Remove the Berkeley DB database specified by the database name <b>file</b> and
+[database] name arguments. If no <b>database</b> is specified,
+the physical file represented by <b>file</b> is removed, incidentally
+removing all databases that it contained.
+<p>No reference count of database use is maintained by Berkeley DB. Applications
+should not remove databases that are currently in use.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt>-env env<dd>If a <b>-env</b> argument is given, the database in the specified Berkeley DB
+environment is removed.
+<p><dt>--<dd>Mark the end of the command arguments.
+</dl>
+<p>The <b>berkdb dbremove</b> command returns 0 on success, and in the case of error, a Tcl error
+is thrown.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_tcl/db_rename.html b/bdb/docs/api_tcl/db_rename.html
new file mode 100644
index 00000000000..75707d92a25
--- /dev/null
+++ b/bdb/docs/api_tcl/db_rename.html
@@ -0,0 +1,50 @@
+<!--$Id: db_rename.so,v 11.1 2000/04/24 17:31:12 sue Exp $-->
+<!--$Id: m4.tcl,v 11.17 2000/04/24 17:31:11 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: berkdb dbrename</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1><b>berkdb dbrename</b></h1>
+</td>
+<td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>berkdb rename
+ [-env env]
+ [--]
+ file
+ [database
+ newname]
+</pre></h3>
+<h1>Description</h1>
+<p>Renames the Berkeley DB database specified by the database name <b>file</b> and
+[database] name arguments to the new name given.
+If no <b>database</b> is specified,
+the physical file represented by <b>file</b> is renamed.
+<p>No reference count of database use is maintained by Berkeley DB. Applications
+should not rename databases that are currently in use.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt>-env env<dd>If a <b>-env</b> argument is given, the database in the specified Berkeley DB
+environment is renamed.
+<p><dt>--<dd>Mark the end of the command arguments.
+</dl>
+<p>The <b>berkdb dbrename</b> command returns 0 on success, and in the case of error, a Tcl error
+is thrown.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_tcl/db_stat.html b/bdb/docs/api_tcl/db_stat.html
new file mode 100644
index 00000000000..494226dfd31
--- /dev/null
+++ b/bdb/docs/api_tcl/db_stat.html
@@ -0,0 +1,41 @@
+<!--$Id: db_stat.so,v 11.9 1999/12/20 08:52:35 bostic Exp $-->
+<!--$Id: m4.tcl,v 11.17 2000/04/24 17:31:11 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1><i>db</i> <b>stat</b></h1>
+</td>
+<td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db stat
+ [-recordcount]
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>db</i> <b>stat</b> command returns a list of name/value pairs comprising
+the statistics of the database.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt>-recordcount<dd>Return the number of records in the database. The <b>-recordcount</b>
+option may only be specified for Recno databases, or Btree databases where
+the underlying database was created with the <b>-recnum</b> option.
+</dl>
+<p>In the case of error, a Tcl error is thrown.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_tcl/db_sync.html b/bdb/docs/api_tcl/db_sync.html
new file mode 100644
index 00000000000..f0e61b45a4d
--- /dev/null
+++ b/bdb/docs/api_tcl/db_sync.html
@@ -0,0 +1,36 @@
+<!--$Id: db_sync.so,v 11.9 1999/12/20 08:52:35 bostic Exp $-->
+<!--$Id: m4.tcl,v 11.17 2000/04/24 17:31:11 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db sync</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1><i>db</i> <b>sync</b></h1>
+</td>
+<td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db sync
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>db</i> <b>sync</b> command function flushes any database cached
+information to disk.
+<p>See <i>db</i> <b>close</b> for a discussion of Berkeley DB and cached data.
+<p>The <i>db</i> <b>sync</b> command returns 0 on success, and in the case of error, a Tcl error
+is thrown.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_tcl/dbc_close.html b/bdb/docs/api_tcl/dbc_close.html
new file mode 100644
index 00000000000..f3a63b4c559
--- /dev/null
+++ b/bdb/docs/api_tcl/dbc_close.html
@@ -0,0 +1,36 @@
+<!--$Id: dbc_close.so,v 11.10 1999/12/20 08:52:35 bostic Exp $-->
+<!--$Id: m4.tcl,v 11.17 2000/04/24 17:31:11 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1><i>dbc</i> <b>close</b></h1>
+</td>
+<td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>dbc close
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>dbc</i> <b>close</b> command discards the cursor.
+<p>Once <i>dbc</i> <b>close</b> has been called, regardless of its return, the
+cursor handle may not be used again.
+<p>The <i>dbc</i> <b>close</b> command returns 0 on success, and in the case of error, a Tcl error
+is thrown.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_tcl/dbc_del.html b/bdb/docs/api_tcl/dbc_del.html
new file mode 100644
index 00000000000..11264eeea09
--- /dev/null
+++ b/bdb/docs/api_tcl/dbc_del.html
@@ -0,0 +1,38 @@
+<!--$Id: dbc_del.so,v 11.10 1999/12/20 08:52:35 bostic Exp $-->
+<!--$Id: m4.tcl,v 11.17 2000/04/24 17:31:11 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db del</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1><i>dbc</i> <b>del</b></h1>
+</td>
+<td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>dbc del
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>dbc</i> <b>del</b> command deletes the key/data pair currently referenced
+by the cursor.
+<p>The cursor position is unchanged after a delete, and subsequent calls to
+cursor commands expecting the cursor to reference an existing key will
+fail.
+<p>The <i>dbc</i> <b>del</b> command returns 0 on success, and in the case of error, a Tcl error
+is thrown.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_tcl/dbc_dup.html b/bdb/docs/api_tcl/dbc_dup.html
new file mode 100644
index 00000000000..85b2bfb086e
--- /dev/null
+++ b/bdb/docs/api_tcl/dbc_dup.html
@@ -0,0 +1,46 @@
+<!--$Id: dbc_dup.so,v 11.4 1999/12/20 08:52:35 bostic Exp $-->
+<!--$Id: m4.tcl,v 11.17 2000/04/24 17:31:11 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db dup</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1><i>dbc</i> <b>dup</b></h1>
+</td>
+<td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>dbc dup
+ [-position]
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>dbc</i> <b>dup</b> command duplicates the cursor, creates a new cursor
+that uses the same transaction and locker ID as the original cursor. This
+is useful when an application is using locking and requires two or more
+cursors in the same thread of control.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt>-position<dd>The newly created cursor is initialized to reference the same position in
+the database as the original cursor and hold the same locks. If the
+<b>-position</b> flag is not specified, then the created cursor is
+uninitialized and will behave like a cursor newly created using the
+<i>db</i> <b>cursor</b> command.
+</dl>
+<p>The <i>dbc</i> <b>dup</b> command returns 0 on success, and in the case of error, a Tcl error
+is thrown.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_tcl/dbc_get.html b/bdb/docs/api_tcl/dbc_get.html
new file mode 100644
index 00000000000..d16e51bdf9f
--- /dev/null
+++ b/bdb/docs/api_tcl/dbc_get.html
@@ -0,0 +1,168 @@
+<!--$Id: dbc_get.so,v 11.16 2000/11/28 20:12:30 bostic Exp $-->
+<!--$Id: m4.tcl,v 11.17 2000/04/24 17:31:11 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db get</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1><i>dbc</i> <b>get</b></h1>
+</td>
+<td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>dbc get
+ [-current]
+ [-first]
+ [-get_recno]
+ [-join_item]
+ [-last]
+ [-next]
+ [-nextdup]
+ [-nextnodup]
+ [-partial {offset length}]
+ [-prev]
+ [-prevnodup]
+ [-rmw]
+dbc get
+ [-partial {offset length}]
+ [-rmw]
+ [-set]
+ [-set_range]
+ [-set_recno]
+ key
+dbc get
+ -get_both
+ [-partial {offset length}]
+ [-rmw]
+ key data
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>dbc</i> <b>get</b> command returns a list of {key value} pairs, except in
+the case of the <b>-get_recno</b> and <b>-join_item</b> options. In
+the case of the <b>-get_recno</b> option, <i>dbc</i> <b>get</b> returns a list
+of the record number. In the case of the <b>-join_item</b> option,
+<i>dbc</i> <b>get</b> returns a list containing the joined key.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt>-current<dd>Return the key/data pair currently referenced by the cursor.
+<p>If the cursor key/data pair was deleted, <i>dbc</i> <b>get</b> will return an
+empty list.
+<p><dt>-first<dd>The cursor is set to reference the first key/data pair of the database, and
+that pair is returned. In the presence of duplicate key values, the first
+data item in the set of duplicates is returned.
+<p>If the database is a Queue or Recno database, <i>dbc</i> <b>get</b> using the
+<b>-first</b> option will skip any keys that exist but were never
+explicitly created by the application or were created and later deleted.
+<p>If the database is empty, <i>dbc</i> <b>get</b> will return an empty list.
+<p><dt>-last<dd>The cursor is set to reference the last key/data pair of the database, and
+that pair is returned. In the presence of duplicate key values, the last
+data item in the set of duplicates is returned.
+<p>If the database is a Queue or Recno database, <i>dbc</i> <b>get</b> using the
+<b>-last</b> option will skip any keys that exist but were never
+explicitly created by the application or were created and later deleted.
+<p>If the database is empty, <i>dbc</i> <b>get</b> will return an empty list.
+<p><dt>-next<dd>If the cursor is not yet initialized, the <b>-next</b> option is
+identical to <b>-first</b>.
+<p>Otherwise, the cursor is moved to the next key/data pair of the database,
+and that pair is returned. In the presence of duplicate key values, the
+value of the key may not change.
+<p>If the database is a Queue or Recno database, <i>dbc</i> <b>get</b> using the
+<b>-next</b> option will skip any keys that exist but were never
+explicitly created by the application or were created and later deleted.
+<p>If the cursor is already on the last record in the database, <i>dbc</i> <b>get</b>
+will return an empty list.
+<p><dt>-nextdup<dd>If the next key/data pair of the database is a duplicate record for the
+current key/data pair, the cursor is moved to the next key/data pair of the
+database, and that pair is returned. Otherwise, <i>dbc</i> <b>get</b> will return
+an empty list.
+<p><dt>-nextnodup<dd>If the cursor is not yet initialized, the <b>-nextnodup</b> option is
+identical to <b>-first</b>.
+<p>Otherwise, the cursor is moved to the next non-duplicate
+key/data pair of the database, and that pair is returned.
+<p>If no non-duplicate key/data pairs occur after the cursor
+position in the database, <i>dbc</i> <b>get</b> will return an empty list.
+<p><dt>-prev<dd>If the cursor is not yet initialized, <b>-prev</b> is identical to
+<b>-last</b>.
+<p>Otherwise, the cursor is moved to the previous key/data pair of the
+database, and that pair is returned. In the presence of duplicate key
+values, the value of the key may not change.
+<p>If the database is a Queue or Recno database, <i>dbc</i> <b>get</b> using the
+<b>-prev</b> flag will skip any keys that exist but were never explicitly
+created by the application or were created and later deleted.
+<p>If the cursor is already on the first record in the database,
+<i>dbc</i> <b>get</b> will return an empty list.
+<p><dt>-prevnodup<dd>If the cursor is not yet initialized, the <b>-prevnodup</b> option is
+identical to <b>-last</b>.
+<p>Otherwise, the cursor is moved to the previous non-duplicate
+key/data pair of the database, and that pair is returned.
+<p>If no non-duplicate key/data pairs occur before the cursor
+position in the database, <i>dbc</i> <b>get</b> will return an empty list.
+<p><dt>-set<dd>Move the cursor to the specified key/data pair of the database, and return
+the datum associated with the given key.
+<p>In the presence of duplicate key values, <i>dbc</i> <b>get</b> will return the
+first data item for the given key.
+<p>If the database is a Queue or Recno database and the requested key exists,
+but was never explicitly created by the application or was later deleted,
+<i>dbc</i> <b>get</b> will return an empty list.
+<p>If no matching keys are found, <i>dbc</i> <b>get</b> will return an empty list.
+<p><dt>-set_range<dd>The <b>-set_range</b> option is identical to the <b>-set</b> option,
+except that the key is returned as well as the data item, and, in the case
+of the Btree access method, the returned key/data pair is the smallest
+key greater than or equal to the specified key (as determined by the
+comparison function), permitting partial key matches and range searches.
+<p><dt>-get_both<dd>The <b>-get_both</b> option is identical to the <b>-set</b> option,
+except that both the key and the data arguments must be matched by the
+key and data item in the database.
+<p>For <b>-get_both</b> to be specified, the underlying database must be of
+type Btree or Hash.
+<p><dt>-set_recno<dd>Move the cursor to the specific numbered record of the database, and
+return the associated key/data pair. The key
+must be a record number.
+<p>For the <b>-set_recno</b> option to be specified, the underlying database
+must be of type Btree and it must have been created with the <b>-recnum</b>
+option.
+<p><dt>-get_recno<dd>Return a list of the record number associated with the current cursor
+position. No key argument should be specified.
+<p>For <b>-get_recno</b> to be specified, the underlying database must be
+of type Btree and it must have been created with the <b>-recnum</b>
+option.
+<p><dt>-join_item<dd>Do not use the data value found in all of the cursors as a lookup key for
+the primary database, but simply return it in the key parameter instead.
+The data parameter is left unchanged.
+<p>For <b>-join_item</b> to be specified, the cursor must have been created
+by the <i>db</i> <b>join</b> command.
+<p><dt>-partial {offset length}<dd>The <b>dlen</b> bytes starting <b>doff</b> bytes from the beginning of
+the retrieved data record are returned as if they comprised the entire
+record. If any or all of the specified bytes do not exist in the record,
+the command is successful and the existing bytes or 0 bytes are
+returned.
+<p><dt>-rmw<dd>Acquire write locks instead of read locks when doing the retrieval. Setting
+this flag may decrease the likelihood of deadlock during a read-modify-write
+cycle by immediately acquiring the write lock during the read part of the
+cycle so that another thread of control acquiring a read lock for the same
+item, in its own read-modify-write cycle, will not result in deadlock.
+</dl>
+<p>If a key is specified, and
+if the underlying database is a Queue or Recno database, then the given
+key will be interpreted by Tcl as an integer. For all other database
+types, the key is interpreted by Tcl as a byte array unless indicated
+by a given option.
+<p>In the normal error case of attempting to retrieve a key that does not
+exist an empty list is returned.
+<p>In the case of error, a Tcl error is thrown.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_tcl/dbc_put.html b/bdb/docs/api_tcl/dbc_put.html
new file mode 100644
index 00000000000..bd791ab94d2
--- /dev/null
+++ b/bdb/docs/api_tcl/dbc_put.html
@@ -0,0 +1,133 @@
+<!--$Id: dbc_put.so,v 11.12 2000/06/12 17:50:01 sue Exp $-->
+<!--$Id: m4.tcl,v 11.17 2000/04/24 17:31:11 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: dbc put</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1><i>dbc</i> <b>put</b></h1>
+</td>
+<td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>dbc put
+ [-after]
+ [-before]
+ [-current]
+ [-partial {doff dlen}]
+ data
+dbc put
+ [-keyfirst]
+ [-keylast]
+ [-partial {doff dlen}]
+ key data
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>dbc</i> <b>put</b> command stores the specified key/data pair into the
+database.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt>-after<dd>In the case of the Btree and Hash access methods, insert the data element
+as a duplicate element of the key referenced by the cursor. The new
+element appears immediately after the current cursor position. It is an
+error to specify <b>-after</b> if the underlying Btree or Hash database
+was not created with the <b>-dup</b> option. No key argument should be
+specified.
+<p>In the case of the Recno access method, it is an error to specify
+<b>-after</b> option if the underlying Recno database was not created
+with the <b>-renumber</b> option. If the <b>-renumber</b> option was
+specified, a new key is created, all records after the inserted item are
+automatically renumbered, and the key of the new record is returned in
+the structure referenced by the parameter key. The initial value of the
+key parameter is ignored. See <b>berkdb open</b> for more information.
+<p>In the case of the Queue access method, it is always an error to specify
+<b>-after</b>.
+<p>If the current cursor record has already been deleted and the underlying
+access method is Hash, <i>dbc</i> <b>put</b> will throw a Tcl error. If the
+underlying access method is Btree or Recno, the operation will succeed.
+<p><dt>-before<dd>In the case of the Btree and Hash access methods, insert the data element
+as a duplicate element of the key referenced by the cursor. The new
+element appears immediately before the current cursor position. It is an
+error to specify <b>-before</b> if the underlying Btree or Hash database
+was not created with the <b>-dup</b> option. No key argument should be
+specified.
+<p>In the case of the Recno access method, it is an error to specify
+<b>-before</b> if the underlying Recno database was not created with the
+<b>-before</b> option. If the <b>-before</b> option was specified, a
+new key is created, the current record and all records after it are
+automatically renumbered, and the key of the new record is returned in
+the structure referenced by the parameter key. The initial value of the
+key parameter is ignored. See <b>berkdb open</b> for more information.
+<p>In the case of the Queue access method, it is always an error to specify
+<b>-before</b>.
+<p>If the current cursor record has already been deleted and the underlying
+access method is Hash, <i>dbc</i> <b>put</b> will throw a Tcl error. If the
+underlying access method is Btree or Recno, the operation will succeed.
+<p><dt>-current<dd>Overwrite the data of the key/data pair referenced by the cursor with the
+specified data item. No key argument should be specified.
+<p>If the <b>-dupsort</b> option was specified to <b>berkdb open</b> and the
+data item of the current referenced key/data pair does not compare
+equally to the data parameter, <i>dbc</i> <b>put</b> will throw a Tcl error.
+<p>If the current cursor record has already been deleted and the underlying
+access method is Hash, <i>dbc</i> <b>put</b> will throw a Tcl error. If the
+underlying access method is Btree, Queue or Recno, the operation will
+succeed.
+<p><dt>-keyfirst<dd>In the case of the Btree and Hash access methods, insert the specified
+key/data pair into the database.
+<p>If the key already exists in the database, and the <b>-dupsort</b> option
+was specified to <b>berkdb open</b>, the inserted data item is added in its
+sorted location. If the key already exists in the database, and the
+<b>-dupsort</b> option was not specified, the inserted data item is added
+as the first of the data items for that key.
+<p>The <b>-keyfirst</b> option may not be specified to the Queue or Recno
+access methods.
+<p><dt>-keylast<dd>In the case of the Btree and Hash access methods, insert the specified
+key/data pair into the database.
+<p>If the key already exists in the database, and the <b>-dupsort</b> option
+was specified to <b>berkdb open</b>, the inserted data item is added in its
+sorted location. If the key already exists in the database, and the
+<b>-dupsort</b> option was not specified, the inserted data item is added
+as the last of the data items for that key.
+<p>The <b>-keylast</b> option may not be specified to the Queue or Recno
+access methods.
+<p><dt>-partial {doff dlen}<dd>
+<p>The <b>dlen</b> bytes starting <b>doff</b> bytes from the beginning of
+the specified key's data record are replaced by the data specified by the
+data and size structure elements. If <b>dlen</b> is smaller than the
+length of the supplied data, the record will grow, and if <b>dlen</b> is
+larger than the length of the supplied data, the record will shrink. If
+the specified bytes do not exist, the record will be extended using nul
+bytes as necessary, and the <i>dbc</i> <b>put</b> call will succeed.
+<p>It is an error to attempt a partial put using the <i>dbc</i> <b>put</b> command in a database
+that supports duplicate records. Partial puts in databases supporting
+duplicate records must be done using a <i>dbc</i> <b>put</b> command.
+<p>It is an error to attempt a partial put with differing <b>dlen</b> and
+supplied data length values in Queue or Recno databases with fixed-length
+records.
+</dl>
+<p>If a key is specified, and
+if the underlying database is a Queue or Recno database, then the given
+key will be interpreted by Tcl as an integer. For all other database
+types, the key is interpreted by Tcl as a byte array.
+<p>If <i>dbc</i> <b>put</b> fails for any reason, the state of the cursor will be
+unchanged. If <i>dbc</i> <b>put</b> succeeds and an item is inserted into the
+database, the cursor is always positioned to reference the newly inserted
+item.
+<p>The <i>dbc</i> <b>put</b> command returns 0 on success, and in the case of error, a Tcl error
+is thrown.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_tcl/env_close.html b/bdb/docs/api_tcl/env_close.html
new file mode 100644
index 00000000000..719ad2160ad
--- /dev/null
+++ b/bdb/docs/api_tcl/env_close.html
@@ -0,0 +1,42 @@
+<!--$Id: env_close.so,v 11.10 1999/12/20 08:52:35 bostic Exp $-->
+<!--$Id: m4.tcl,v 11.17 2000/04/24 17:31:11 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: env close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1><i>env</i> <b>close</b></h1>
+</td>
+<td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>env close
+</pre></h3>
+<h1>Description</h1>
+<p>Close the Berkeley DB environment, freeing any allocated resources and closing
+any underlying subsystems.
+<p>This does not imply closing any databases that were opened in the
+environment.
+<p>Where the environment was initialized with the <b>-lock</b> option,
+calling <i>env</i> <b>close</b> does not release any locks still held by the
+closing process, providing functionality for long-lived locks.
+<p>Once <i>env</i> <b>close</b> has been called the <b>env</b> handle may not be
+accessed again.
+<p>The <i>env</i> <b>close</b> command returns 0 on success, and in the case of error, a Tcl error
+is thrown.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_tcl/env_open.html b/bdb/docs/api_tcl/env_open.html
new file mode 100644
index 00000000000..1c5bafee4b9
--- /dev/null
+++ b/bdb/docs/api_tcl/env_open.html
@@ -0,0 +1,168 @@
+<!--$Id: env_open.so,v 11.21 2000/06/05 15:17:24 sue Exp $-->
+<!--$Id: m4.tcl,v 11.17 2000/04/24 17:31:11 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: berkdb env</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1><b>berkdb env</b></h1>
+</td>
+<td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>berkdb env
+ [-cachesize {gbytes bytes ncache}]
+ [-create]
+ [-data_dir dirname]
+ [-errfile filename]
+ [-home directory]
+ [-log_dir dirname]
+ [-mode mode]
+ [-private]
+ [-recover]
+ [-recover_fatal]
+ [-shm_key shmid]
+ [-system_mem]
+ [-tmp_dir dirname]
+ [-txn [nosync]]
+ [-txn_max max]
+ [-use_environ]
+ [-use_environ_root]
+</pre></h3>
+<h1>Description</h1>
+<p>The <b>berkdb env</b> command opens, and optionally creates, a database
+environment. The returned environment handle is bound to a Tcl command
+of the form <b>envN</b>, where N is an integer starting at 0 (e.g., env0
+and env1). It is through this Tcl command that the script accesses the
+environment methods.
+The command automatically initializes the shared memory buffer pool subsystem.
+This subsystem is used whenever the application is
+using any Berkeley DB access method.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt>-cachesize {gbytes bytes ncache}<dd>Set the size of the database's shared memory buffer pool, i.e., the cache,
+to <b>gbytes</b> gigabytes plus <b>bytes</b>. The cache should be the
+size of the normal working data set of the application, with some small
+amount of additional memory for unusual situations. (Note, the working
+set is not the same as the number of simultaneously referenced pages, and
+should be quite a bit larger!)
+<p>The default cache size is 256KB, and may not be specified as less than
+20KB. Any cache size less than 500MB is automatically increased by 25%
+to account for buffer pool overhead, cache sizes larger than 500MB are
+used as specified.
+<p>It is possible to specify caches to Berkeley DB that are large enough so that
+they cannot be allocated contiguously on some architectures, e.g., some
+releases of Solaris limit the amount of memory that may be allocated
+contiguously by a process. If <b>ncache</b> is 0 or 1, the cache will
+be allocated contiguously in memory. If it is greater than 1, the cache
+will be broken up into <b>ncache</b> equally sized separate pieces of
+memory.
+<p>For information on tuning the Berkeley DB cache size, see
+<a href="../ref/am_conf/cachesize.html">Selecting a cache size</a>.
+<p><dt>-create<dd>Cause Berkeley DB subsystems to create any underlying files, as necessary.
+<p><dt>-data_dir dirname<dd>Specify the environment's data directory as described in
+<a href="../ref/env/naming.html">Berkeley DB File Naming</a>.
+<p><dt>-errfile filename<dd>
+<p>When an error occurs in the Berkeley DB library, a Berkeley DB error or an error
+return value is returned by the function. In some cases, however, the
+errno value may be insufficient to completely describe the cause of the
+error especially during initial application debugging.
+<p>The <b>-errfile</b> argument is used to enhance the mechanism for
+reporting error messages to the application by specifying a file to be
+used for displaying additional Berkeley DB error messages. In some cases, when
+an error occurs, Berkeley DB will output an additional error message to the
+specified file reference.
+<p>The error message will consist of the environment command name (e.g., env0)
+and a colon (":"), an error string, and a trailing &lt;newline&gt;
+character.
+<p>This error logging enhancement does not slow performance or significantly
+increase application size, and may be run during normal operation as well
+as during application debugging.
+<p><dt>-home directory<dd>The <b>-home</b> argument is described in
+<a href="../ref/env/naming.html">Berkeley DB File Naming</a>.
+<p><dt>-log_dir dirname<dd>Specify the environment's logging file directory as described in
+<a href="../ref/env/naming.html">Berkeley DB File Naming</a>.
+<p><dt>-mode mode<dd>
+<p>On UNIX systems, or in IEEE/ANSI Std 1003.1 (POSIX) environments, all files created by Berkeley DB
+are created with mode <b>mode</b> (as described in <b>chmod</b>(2)) and
+modified by the process' umask value at the time of creation (see
+<b>umask</b>(2)). The group ownership of created files is based on
+the system and directory defaults, and is not further specified by Berkeley DB.
+If <b>mode</b> is 0, files are created readable and writeable by both
+owner and group. On Windows systems, the mode argument is ignored.
+<p><dt>-private<dd>Specify that the environment will only be accessed by a single process
+(although that process may be multi-threaded). This flag has two effects
+on the Berkeley DB environment. First, all underlying data structures are
+allocated from per-process memory instead of from shared memory that is
+potentially accessible to more than a single process. Second, mutexes
+are only configured to work between threads.
+<p>This flag should not be specified if more than a single process is
+accessing the environment, as it is likely to cause database corruption
+and unpredictable behavior, e.g., if both a server application and the
+Berkeley DB utility <a href="../utility/db_stat.html">db_stat</a> will access the environment, the
+<b>-private</b> option should not be specified.
+<p><dt>-recover<dd>Run normal recovery on this environment before opening it for normal use.
+If this flag is set, the <b>-create</b> option must also be set since
+the regions will be removed and recreated.
+<p><dt>-recover_fatal<dd>Run catastrophic recovery on this environment before opening it for
+normal use. If this flag is set, the <b>-create</b> option must also be
+set since the regions will be removed and recreated.
+<p><dt>-shm_key key<dd>Specify a base segment ID for Berkeley DB environment shared memory regions
+created in system memory on systems supporting X/Open-style shared memory
+interfaces, e.g., UNIX systems supporting shmget(2) and related System V
+IPC interfaces. See <a href="../ref/env/region.html">Shared Memory
+Regions</a> for more information.
+<p><dt>-system_mem<dd>Allocate memory from system shared memory instead of memory backed by the
+filesystem. See <a href="../ref/env/region.html">Shared Memory Regions</a>
+for more information.
+<p><dt>-tmp_dir dirname<dd>Specify the environment's tmp directory as described in
+<a href="../ref/env/naming.html">Berkeley DB File Naming</a>.
+<p><dt>-txn [nosync]<dd>Initialize the transaction subsystem. This subsystem is used when
+recovery and atomicity of multiple operations and recovery are important.
+The <b>-txn</b> option implies the initialization of the logging
+and locking subsystems as well.
+<p>If the optional <b>nosync</b> argument is specified, the log will not be
+synchronously flushed on transaction commit or prepare. This means that
+transactions exhibit the ACI (atomicity, consistency and isolation)
+properties, but not D (durability), i.e., database integrity will be
+maintained but it is possible that some number of the most recently
+committed transactions may be undone during recovery instead of being
+redone.
+<p>The number of transactions that are potentially at risk is governed by
+how often the log is checkpointed (see <a href="../utility/db_checkpoint.html">db_checkpoint</a> for more
+information) and how many log updates can fit on a single log page.
+<p><dt>-txn_max max<dd>Set the maximum number of simultaneous transactions that are supported
+by the environment. This bounds the size of backing files. When there
+are more than the specified number of concurrent transactions, calls to
+<i>env</i> <b>txn</b> will fail (until some active transactions complete).
+<p><dt>-use_environ<dd>The Berkeley DB process' environment may be permitted to specify information to
+be used when naming files; see <a href="../ref/env/naming.html">Berkeley DB
+File Naming</a>. As permitting users to specify which files are used can
+create security problems, environment information will be used in file
+naming for all users only if the <b>-use_environ</b> flag is set.
+<p><dt>-use_environ_root<dd>The Berkeley DB process' environment may be permitted to specify information
+to be used when naming files; see <a href="../ref/env/naming.html">Berkeley DB
+File Naming</a>. As permitting users to specify which files are used can
+create security problems, if the <b>-use_environ_root</b> flag is set,
+environment information will be used for file naming only for users with
+appropriate permissions (e.g., on IEEE/ANSI Std 1003.1 (POSIX) systems, users with a
+user-ID of 0).
+</dl>
+<p>The <b>berkdb env</b> command returns an environment handle on success.
+<p>In the case of error, a Tcl error is thrown.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_tcl/env_remove.html b/bdb/docs/api_tcl/env_remove.html
new file mode 100644
index 00000000000..ca90595f83a
--- /dev/null
+++ b/bdb/docs/api_tcl/env_remove.html
@@ -0,0 +1,70 @@
+<!--$Id: env_remove.so,v 11.13 2000/02/19 20:57:57 bostic Exp $-->
+<!--$Id: m4.tcl,v 11.17 2000/04/24 17:31:11 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: berkdb envremove</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1><b>berkdb envremove</b></h1>
+</td>
+<td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>berkdb envremove
+ [-data_dir directory]
+ [-force]
+ [-home directory]
+ [-log_dir directory]
+ [-tmp_dir directory]
+ [-use_environ]
+ [-use_environ_root]
+</pre></h3>
+<h1>Description</h1>
+<p>Remove a Berkeley DB environment.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt>-data_dir dirname<dd>Specify the environment's data directory as described in
+<a href="../ref/env/naming.html">Berkeley DB File Naming</a>.
+<p><dt>-force<dd>If there are processes that have called <b>berkdb env</b> without calling
+<i>env</i> <b>close</b> (i.e., there are processes currently using the
+environment), <b>berkdb envremove</b> will fail without further action, unless
+the <b>-force</b> flag is set, in which case <b>berkdb envremove</b> will
+attempt to remove the environment regardless of any processes still using
+it.
+<p><dt>-home directory<dd>The <b>-home</b> argument is described in
+<a href="../ref/env/naming.html">Berkeley DB File Naming</a>.
+<p><dt>-log_dir dirname<dd>Specify the environment's log directory as described in
+<a href="../ref/env/naming.html">Berkeley DB File Naming</a>.
+<p><dt>-tmp_dir dirname<dd>Specify the environment's tmp directory as described in
+<a href="../ref/env/naming.html">Berkeley DB File Naming</a>.
+<p><dt>-use_environ<dd>The Berkeley DB process' environment may be permitted to specify information to
+be used when naming files; see <a href="../ref/env/naming.html">Berkeley DB
+File Naming</a>. As permitting users to specify which files are used can
+create security problems, environment information will be used in file
+naming for all users only if the <b>-use_environ</b> flag is set.
+<p><dt>-use_environ_root<dd>The Berkeley DB process' environment may be permitted to specify information
+to be used when naming files; see <a href="../ref/env/naming.html">Berkeley DB
+File Naming</a>. As permitting users to specify which files are used can
+create security problems, if the <b>-use_environ_root</b> flag is set,
+environment information will be used for file naming only for users with
+appropriate permissions (e.g., on IEEE/ANSI Std 1003.1 (POSIX) systems, users with a
+user-ID of 0).
+</dl>
+<p>The <b>berkdb envremove</b> command returns 0 on success, and in the case of error, a Tcl error
+is thrown.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_tcl/pindex.src b/bdb/docs/api_tcl/pindex.src
new file mode 100644
index 00000000000..668f53d25f8
--- /dev/null
+++ b/bdb/docs/api_tcl/pindex.src
@@ -0,0 +1,27 @@
+__APIREL__/api_tcl/db_close.html#2 @db close
+__APIREL__/api_tcl/db_count.html#2 @db count
+__APIREL__/api_tcl/db_cursor.html#2 @db cursor
+__APIREL__/api_tcl/db_del.html#2 @db del
+__APIREL__/api_tcl/db_get.html#2 @db get
+__APIREL__/api_tcl/db_get_join.html#2 @db get_join
+__APIREL__/api_tcl/db_get_type.html#2 @db get_type
+__APIREL__/api_tcl/db_is_byteswapped.html#2 @db is_byteswapped
+__APIREL__/api_tcl/db_join.html#2 @db join
+__APIREL__/api_tcl/db_open.html#2 @berkdb open
+__APIREL__/api_tcl/db_put.html#2 @db put
+__APIREL__/api_tcl/db_rename.html#2 @berkdb dbrename
+__APIREL__/api_tcl/db_remove.html#2 @berkdb dbremove
+__APIREL__/api_tcl/db_stat.html#2 @db stat
+__APIREL__/api_tcl/db_sync.html#2 @db sync
+__APIREL__/api_tcl/dbc_close.html#2 @db close
+__APIREL__/api_tcl/dbc_del.html#2 @db del
+__APIREL__/api_tcl/dbc_dup.html#2 @db dup
+__APIREL__/api_tcl/dbc_get.html#2 @db get
+__APIREL__/api_tcl/dbc_put.html#2 @dbc put
+__APIREL__/api_tcl/env_close.html#2 @env close
+__APIREL__/api_tcl/env_open.html#2 @berkdb env
+__APIREL__/api_tcl/env_remove.html#2 @berkdb envremove
+__APIREL__/api_tcl/txn.html#2 @env txn
+__APIREL__/api_tcl/txn_abort.html#2 @txn abort
+__APIREL__/api_tcl/txn_commit.html#2 @txn commit
+__APIREL__/api_tcl/version.html#2 @berkdb version
diff --git a/bdb/docs/api_tcl/tcl_index.html b/bdb/docs/api_tcl/tcl_index.html
new file mode 100644
index 00000000000..a31c6fc82a1
--- /dev/null
+++ b/bdb/docs/api_tcl/tcl_index.html
@@ -0,0 +1,49 @@
+<!--$Id: tcl_index.so,v 11.13 2000/04/24 17:31:12 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Tcl Interface</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Tcl Interface</h1>
+<!--$Id: m4.tcl,v 11.17 2000/04/24 17:31:11 sue Exp $-->
+<p><table border=1 align=center>
+<tr><th>Tcl Command</th><th>Description</th></tr>
+<tr><td><a href="../api_tcl/db_remove.html"><b>berkdb dbremove</b></a></td><td>Remove a database</td></tr>
+<tr><td><a href="../api_tcl/db_rename.html"><b>berkdb dbrename</b></a></td><td>Rename a database</td></tr>
+<tr><td><a href="../api_tcl/env_open.html"><b>berkdb env</b></a></td><td>Create an environment handle</td></tr>
+<tr><td><a href="../api_tcl/env_remove.html"><b>berkdb envremove</b></a></td><td>Remove an environment</td></tr>
+<tr><td><a href="../api_tcl/db_open.html"><b>berkdb open</b></a></td><td>Create a database handle</td></tr>
+<tr><td><a href="../api_tcl/version.html"><b>berkdb version</b></a></td><td>Return version information</td></tr>
+<tr><td><br></td><td><br></td></tr>
+<tr><td><a href="../api_tcl/env_close.html"><i>env</i> <b>close</b></a></td><td>Close an environment</td></tr>
+<tr><td><a href="../api_tcl/txn.html"><i>env</i> <b>txn</b></a></td><td>Begin a transaction</td></tr>
+<tr><td><br></td><td><br></td></tr>
+<tr><td><a href="../api_tcl/db_close.html"><i>db</i> <b>close</b></a></td><td>Close a database</td></tr>
+<tr><td><a href="../api_tcl/db_count.html"><i>db</i> <b>count</b></a></td><td>Return a count of a key's data items</td></tr>
+<tr><td><a href="../api_tcl/db_cursor.html"><i>db</i> <b>cursor</b></a></td><td>Open a cursor into a database</td></tr>
+<tr><td><a href="../api_tcl/db_del.html"><i>db</i> <b>del</b></a></td><td>Delete items from a database</td></tr>
+<tr><td><a href="../api_tcl/db_get.html"><i>db</i> <b>get</b></a></td><td>Get items from a database</td></tr>
+<tr><td><a href="../api_tcl/db_get_join.html"><i>db</i> <b>get_join</b></a></td><td>Get items from a database join</td></tr>
+<tr><td><a href="../api_tcl/db_get_type.html"><i>db</i> <b>get_type</b></a></td><td>Return the database type</td></tr>
+<tr><td><a href="../api_tcl/db_is_byteswapped.html"><i>db</i> <b>is_byteswapped</b></a></td><td>Return if the underlying database is in host order</td></tr>
+<tr><td><a href="../api_tcl/db_join.html"><i>db</i> <b>join</b></a></td><td>Perform a database join on cursors</td></tr>
+<tr><td><a href="../api_tcl/db_put.html"><i>db</i> <b>put</b></a></td><td>Store items into a database</td></tr>
+<tr><td><a href="../api_tcl/db_stat.html"><i>db</i> <b>stat</b></a></td><td>Return database statistics</td></tr>
+<tr><td><a href="../api_tcl/db_sync.html"><i>db</i> <b>sync</b></a></td><td>Flush a database to stable storage</td></tr>
+<tr><td><br></td><td><br></td></tr>
+<tr><td><a href="../api_tcl/dbc_close.html"><i>dbc</i> <b>close</b></a></td><td>Close a cursor</td></tr>
+<tr><td><a href="../api_tcl/dbc_del.html"><i>dbc</i> <b>del</b></a></td><td>Delete by cursor</td></tr>
+<tr><td><a href="../api_tcl/dbc_dup.html"><i>dbc</i> <b>dup</b></a></td><td>Duplicate a cursor</td></tr>
+<tr><td><a href="../api_tcl/dbc_get.html"><i>dbc</i> <b>get</b></a></td><td>Retrieve by cursor</td></tr>
+<tr><td><a href="../api_tcl/dbc_put.html"><i>dbc</i> <b>put</b></a></td><td>Store by cursor</td></tr>
+<tr><td><br></td><td><br></td></tr>
+<tr><td><a href="../api_tcl/txn_abort.html"><i>txn</i> <b>abort</b></a></td><td>Abort a transaction</td></tr>
+<tr><td><a href="../api_tcl/txn_commit.html"><i>txn</i> <b>commit</b></a></td><td>Commit a transaction</td></tr>
+</table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_tcl/tcl_pindex.html b/bdb/docs/api_tcl/tcl_pindex.html
new file mode 100644
index 00000000000..c82153bad43
--- /dev/null
+++ b/bdb/docs/api_tcl/tcl_pindex.html
@@ -0,0 +1,258 @@
+<html>
+<head>
+<title>Berkeley DB: Tcl Interface Index</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Tcl Interface Index</h1>
+<center>
+<table cellspacing=0 cellpadding=0>
+<tr><td align=right> configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#5">1.85</a> API compatibility</td></tr>
+<tr><td align=right> building a utility to dump Berkeley DB </td><td><a href="../ref/build_unix/conf.html#7">1.85</a> databases</td></tr>
+<tr><td align=right> Upgrading to release </td><td><a href="../ref/upgrade.2.0/intro.html#2">2.0</a></td></tr>
+<tr><td align=right> Upgrading to release </td><td><a href="../ref/upgrade.3.0/intro.html#2">3.0</a></td></tr>
+<tr><td align=right> Upgrading to release </td><td><a href="../ref/upgrade.3.1/intro.html#2">3.1</a></td></tr>
+<tr><td align=right> Upgrading to release </td><td><a href="../ref/upgrade.3.2/intro.html#2">3.2</a></td></tr>
+<tr><td align=right> selecting an </td><td><a href="../ref/am_conf/select.html#2">access</a> method</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am_conf/intro.html#2">access</a> methods</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/aix.html#2">AIX</a></td></tr>
+<tr><td align=right> programmatic </td><td><a href="../ref/arch/apis.html#2">APIs</a></td></tr>
+<tr><td align=right> utility to </td><td><a href="../utility/db_archive.html#3">archive</a> log files</td></tr>
+<tr><td align=right> </td><td><a href="../api_tcl/db_remove.html#2">berkdb</a> dbremove</td></tr>
+<tr><td align=right> </td><td><a href="../api_tcl/db_rename.html#2">berkdb</a> dbrename</td></tr>
+<tr><td align=right> </td><td><a href="../api_tcl/env_open.html#2">berkdb</a> env</td></tr>
+<tr><td align=right> </td><td><a href="../api_tcl/env_remove.html#2">berkdb</a> envremove</td></tr>
+<tr><td align=right> </td><td><a href="../api_tcl/db_open.html#2">berkdb</a> open</td></tr>
+<tr><td align=right> </td><td><a href="../api_tcl/version.html#2">berkdb</a> version</td></tr>
+<tr><td align=right> </td><td><a href="../utility/berkeley_db_svc.html#2">berkeley_db_svc</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/intro.html#2">building</a> for UNIX</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/notes.html#2">building</a> for UNIX FAQ</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_vxworks/intro.html#2">building</a> for VxWorks</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_vxworks/faq.html#2">building</a> for VxWorks FAQ</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_win/intro.html#2">building</a> for Win32</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_win/faq.html#2">building</a> for Windows FAQ</td></tr>
+<tr><td align=right> selecting a </td><td><a href="../ref/am_conf/byteorder.html#2">byte</a> order</td></tr>
+<tr><td align=right> </td><td><a href="../ref/program/byteorder.html#2">byte</a> ordering</td></tr>
+<tr><td align=right> configuring the </td><td><a href="../ref/build_unix/conf.html#6">C++</a> API</td></tr>
+<tr><td align=right> flushing the database </td><td><a href="../ref/am/sync.html#2">cache</a></td></tr>
+<tr><td align=right> selecting a </td><td><a href="../ref/am_conf/cachesize.html#2">cache</a> size</td></tr>
+<tr><td align=right> </td><td><a href="../ref/transapp/archival.html#3">catastrophic</a> recovery</td></tr>
+<tr><td align=right>Patches, Updates and </td><td><a href="http://www.sleepycat.com/update/index.html">Change</a> logs</td></tr>
+<tr><td align=right> utility to take </td><td><a href="../utility/db_checkpoint.html#3">checkpoints</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/curclose.html#2">closing</a> a cursor</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/close.html#2">closing</a> a database</td></tr>
+<tr><td align=right> specifying a Btree </td><td><a href="../ref/am_conf/bt_compare.html#2">comparison</a> function</td></tr>
+<tr><td align=right> changing </td><td><a href="../ref/build_unix/flags.html#2">compile</a> or load options</td></tr>
+<tr><td align=right> </td><td><a href="../ref/cam/intro.html#2">Concurrent</a> Data Store</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/conf.html#2">configuring</a> Berkeley DB for UNIX systems</td></tr>
+<tr><td align=right> recovering </td><td><a href="../ref/am/verify.html#4">corrupted</a> databases</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/count.html#2">counting</a> data items for a key</td></tr>
+<tr><td align=right> closing a </td><td><a href="../ref/am/curclose.html#3">cursor</a></td></tr>
+<tr><td align=right> deleting records with a </td><td><a href="../ref/am/curdel.html#3">cursor</a></td></tr>
+<tr><td align=right> duplicating a </td><td><a href="../ref/am/curdup.html#3">cursor</a></td></tr>
+<tr><td align=right> retrieving records with a </td><td><a href="../ref/am/curget.html#3">cursor</a></td></tr>
+<tr><td align=right> storing records with a </td><td><a href="../ref/am/curput.html#3">cursor</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/stability.html#2">cursor</a> stability</td></tr>
+<tr><td align=right> database </td><td><a href="../ref/am/cursor.html#2">cursors</a></td></tr>
+<tr><td align=right> utility to upgrade </td><td><a href="../utility/db_upgrade.html#3">database</a> files</td></tr>
+<tr><td align=right> utility to verify </td><td><a href="../utility/db_verify.html#3">database</a> files</td></tr>
+<tr><td align=right> </td><td><a href="../api_tcl/db_close.html#2">db</a> close</td></tr>
+<tr><td align=right> </td><td><a href="../api_tcl/dbc_close.html#2">db</a> close</td></tr>
+<tr><td align=right> </td><td><a href="../api_tcl/db_count.html#2">db</a> count</td></tr>
+<tr><td align=right> </td><td><a href="../api_tcl/db_cursor.html#2">db</a> cursor</td></tr>
+<tr><td align=right> </td><td><a href="../api_tcl/db_del.html#2">db</a> del</td></tr>
+<tr><td align=right> </td><td><a href="../api_tcl/dbc_del.html#2">db</a> del</td></tr>
+<tr><td align=right> </td><td><a href="../api_tcl/dbc_dup.html#2">db</a> dup</td></tr>
+<tr><td align=right> </td><td><a href="../api_tcl/db_get.html#2">db</a> get</td></tr>
+<tr><td align=right> </td><td><a href="../api_tcl/dbc_get.html#2">db</a> get</td></tr>
+<tr><td align=right> </td><td><a href="../api_tcl/db_get_join.html#2">db</a> get_join</td></tr>
+<tr><td align=right> </td><td><a href="../api_tcl/db_get_type.html#2">db</a> get_type</td></tr>
+<tr><td align=right> </td><td><a href="../api_tcl/db_is_byteswapped.html#2">db</a> is_byteswapped</td></tr>
+<tr><td align=right> </td><td><a href="../api_tcl/db_join.html#2">db</a> join</td></tr>
+<tr><td align=right> </td><td><a href="../api_tcl/db_put.html#2">db</a> put</td></tr>
+<tr><td align=right> </td><td><a href="../api_tcl/db_stat.html#2">db</a> stat</td></tr>
+<tr><td align=right> </td><td><a href="../api_tcl/db_sync.html#2">db</a> sync</td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_archive.html#2">db_archive</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_tcl/dbc_put.html#2">dbc</a> put</td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_checkpoint.html#2">db_checkpoint</a></td></tr>
+<tr><td align=right>File naming</td><td><a href="../ref/env/naming.html#DB_CONFIG">DB_CONFIG</a></td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_deadlock.html#2">db_deadlock</a></td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_dump.html#2">db_dump</a></td></tr>
+<tr><td align=right>File naming</td><td><a href="../ref/env/naming.html#DB_HOME">DB_HOME</a></td></tr>
+<tr><td align=right>File naming</td><td><a href="../ref/env/naming.html#db_home">db_home</a></td></tr>
+<tr><td align=right>Error returns to applications</td><td><a href="../ref/program/errorret.html#DB_KEYEMPTY">DB_KEYEMPTY</a></td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_load.html#2">db_load</a></td></tr>
+<tr><td align=right>Error returns to applications</td><td><a href="../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a></td></tr>
+<tr><td align=right>Error returns to applications</td><td><a href="../ref/program/errorret.html#DB_LOCK_NOTGRANTED">DB_LOCK_NOTGRANTED</a></td></tr>
+<tr><td align=right>Error returns to applications</td><td><a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a></td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_printlog.html#2">db_printlog</a></td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_recover.html#2">db_recover</a></td></tr>
+<tr><td align=right>Error returns to applications</td><td><a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a></td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_stat.html#2">db_stat</a></td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_upgrade.html#2">db_upgrade</a></td></tr>
+<tr><td align=right> </td><td><a href="../utility/db_verify.html#2">db_verify</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/lock/dead.html#2">deadlocks</a></td></tr>
+<tr><td align=right> utility to detect </td><td><a href="../utility/db_deadlock.html#3">deadlocks</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/debug/common.html#2">debugging</a> applications</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/delete.html#2">deleting</a> records</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/curdel.html#2">deleting</a> records with a cursor</td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--disable-bigfile">--disable-bigfile</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/program/diskspace.html#2">disk</a> space requirements</td></tr>
+<tr><td align=right> utility to </td><td><a href="../utility/db_dump.html#3">dump</a> databases as text files</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am_conf/dup.html#2">duplicate</a> data items</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/curdup.html#2">duplicating</a> a cursor</td></tr>
+<tr><td align=right> configuring </td><td><a href="../ref/build_unix/conf.html#9">dynamic</a> shared libraries</td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-compat185">--enable-compat185</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-cxx">--enable-cxx</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-debug">--enable-debug</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-debug_rop">--enable-debug_rop</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-debug_wop">--enable-debug_wop</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-diagnostic">--enable-diagnostic</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-dump185">--enable-dump185</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-dynamic">--enable-dynamic</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-java">--enable-java</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-posixmutexes">--enable-posixmutexes</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-rpc">--enable-rpc</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-shared">--enable-shared</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-tcl">--enable-tcl</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-test">--enable-test</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-uimutexes">--enable-uimutexes</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--enable-umrw">--enable-umrw</a></td></tr>
+<tr><td align=right> byte </td><td><a href="../ref/program/byteorder.html#3">endian</a></td></tr>
+<tr><td align=right> </td><td><a href="../api_tcl/env_close.html#2">env</a> close</td></tr>
+<tr><td align=right> </td><td><a href="../api_tcl/txn.html#2">env</a> txn</td></tr>
+<tr><td align=right> database </td><td><a href="../ref/env/create.html#2">environment</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/program/environ.html#2">environment</a> variables</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/error.html#2">error</a> handling</td></tr>
+<tr><td align=right> </td><td><a href="../ref/program/errorret.html#3">error</a> name space</td></tr>
+<tr><td align=right> </td><td><a href="../ref/program/errorret.html#2">error</a> returns</td></tr>
+<tr><td align=right> </td><td><a href="../ref/install/file.html#2">/etc/magic</a></td></tr>
+<tr><td align=right> selecting a Queue </td><td><a href="../ref/am_conf/extentsize.html#2">extent</a> size</td></tr>
+<tr><td align=right> Java </td><td><a href="../ref/java/faq.html#2">FAQ</a></td></tr>
+<tr><td align=right> Tcl </td><td><a href="../ref/tcl/faq.html#2">FAQ</a></td></tr>
+<tr><td align=right> configuring without large </td><td><a href="../ref/build_unix/conf.html#4">file</a> support</td></tr>
+<tr><td align=right> </td><td><a href="../ref/install/file.html#3">file</a> utility</td></tr>
+<tr><td align=right> recovery and </td><td><a href="../ref/transapp/filesys.html#2">filesystem</a> operations</td></tr>
+<tr><td align=right> remote </td><td><a href="../ref/env/remote.html#2">filesystems</a></td></tr>
+<tr><td align=right> page </td><td><a href="../ref/am_conf/h_ffactor.html#2">fill</a> factor</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/freebsd.html#2">FreeBSD</a></td></tr>
+<tr><td align=right> Berkeley DB </td><td><a href="../ref/program/scope.html#3">free-threaded</a> handles</td></tr>
+<tr><td align=right> specifying a database </td><td><a href="../ref/am_conf/h_hash.html#2">hash</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/am_conf/h_nelem.html#2">hash</a> table size</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/hpux.html#2">HP-UX</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/install.html#2">installing</a> Berkeley DB for UNIX systems</td></tr>
+<tr><td align=right> </td><td><a href="../ref/program/compatible.html#2">interface</a> compatibility</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/irix.html#2">IRIX</a></td></tr>
+<tr><td align=right> configuring the </td><td><a href="../ref/build_unix/conf.html#10">Java</a> API</td></tr>
+<tr><td align=right> </td><td><a href="../ref/java/compat.html#2">Java</a> compatibility</td></tr>
+<tr><td align=right> </td><td><a href="../ref/java/conf.html#2">Java</a> configuration</td></tr>
+<tr><td align=right> </td><td><a href="../ref/java/faq.html#3">Java</a> FAQ</td></tr>
+<tr><td align=right> logical </td><td><a href="../ref/am/join.html#2">join</a></td></tr>
+<tr><td align=right> database </td><td><a href="../ref/program/dbsizes.html#2">limits</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/linux.html#2">Linux</a></td></tr>
+<tr><td align=right> changing compile or </td><td><a href="../ref/build_unix/flags.html#3">load</a> options</td></tr>
+<tr><td align=right> utility to </td><td><a href="../utility/db_load.html#3">load</a> text files into databases</td></tr>
+<tr><td align=right> standard </td><td><a href="../ref/lock/stdmode.html#2">lock</a> modes</td></tr>
+<tr><td align=right> page-level </td><td><a href="../ref/lock/page.html#2">locking</a></td></tr>
+<tr><td align=right> two-phase </td><td><a href="../ref/lock/twopl.html#2">locking</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/lock/nondb.html#2">locking</a> and non-Berkeley DB applications</td></tr>
+<tr><td align=right> </td><td><a href="../ref/lock/config.html#2">locking</a> configuration</td></tr>
+<tr><td align=right> </td><td><a href="../ref/lock/am_conv.html#2">locking</a> conventions</td></tr>
+<tr><td align=right> Berkeley DB Concurrent Data Store </td><td><a href="../ref/lock/cam_conv.html#2">locking</a> conventions</td></tr>
+<tr><td align=right> </td><td><a href="../ref/lock/intro.html#2">locking</a> introduction</td></tr>
+<tr><td align=right> sizing the </td><td><a href="../ref/lock/max.html#2">locking</a> subsystem</td></tr>
+<tr><td align=right> </td><td><a href="../ref/lock/notxn.html#2">locking</a> without transactions</td></tr>
+<tr><td align=right> </td><td><a href="../ref/log/limits.html#2">log</a> file limits</td></tr>
+<tr><td align=right> </td><td><a href="../ref/transapp/logfile.html#2">log</a> file removal</td></tr>
+<tr><td align=right> utility to display </td><td><a href="../utility/db_printlog.html#3">log</a> files as text</td></tr>
+<tr><td align=right> </td><td><a href="../ref/log/config.html#2">logging</a> configuration</td></tr>
+<tr><td align=right> </td><td><a href="../ref/log/intro.html#2">logging</a> introduction</td></tr>
+<tr><td align=right> </td><td><a href="../ref/mp/config.html#2">memory</a> pool configuration</td></tr>
+<tr><td align=right> Berkeley DB library </td><td><a href="../ref/program/namespace.html#2">name</a> spaces</td></tr>
+<tr><td align=right> file </td><td><a href="../ref/env/naming.html#2">naming</a></td></tr>
+<tr><td align=right> retrieving Btree records by </td><td><a href="../ref/am_conf/bt_recnum.html#2">number</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/open.html#2">opening</a> a database</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/osf1.html#2">OSF/1</a></td></tr>
+<tr><td align=right> selecting a </td><td><a href="../ref/am_conf/pagesize.html#2">page</a> size</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/partial.html#2">partial</a> record storage and retrieval</td></tr>
+<tr><td align=right></td><td><a href="http://www.sleepycat.com/update/index.html">Patches,</a> Updates and Change logs</td></tr>
+<tr><td align=right> </td><td><a href="../ref/perl/intro.html#2">Perl</a></td></tr>
+<tr><td align=right> Sleepycat Software's Berkeley DB </td><td><a href="../ref/intro/products.html#2">products</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/qnx.html#2">QNX</a></td></tr>
+<tr><td align=right> logical </td><td><a href="../ref/am_conf/logrec.html#2">record</a> numbers</td></tr>
+<tr><td align=right> managing </td><td><a href="../ref/am_conf/recno.html#2">record-based</a> databases</td></tr>
+<tr><td align=right> logically renumbering </td><td><a href="../ref/am_conf/renumber.html#2">records</a></td></tr>
+<tr><td align=right> utility to </td><td><a href="../utility/db_recover.html#3">recover</a> database environments</td></tr>
+<tr><td align=right> Berkeley DB </td><td><a href="../ref/transapp/reclimit.html#2">recoverability</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/get.html#2">retrieving</a> records</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/curget.html#2">retrieving</a> records with a cursor</td></tr>
+<tr><td align=right> </td><td><a href="../ref/rpc/client.html#2">RPC</a> client</td></tr>
+<tr><td align=right> configuring a </td><td><a href="../ref/build_unix/conf.html#11">RPC</a> client/server</td></tr>
+<tr><td align=right> utility to support </td><td><a href="../utility/berkeley_db_svc.html#3">RPC</a> client/server</td></tr>
+<tr><td align=right> </td><td><a href="../ref/rpc/server.html#2">RPC</a> server</td></tr>
+<tr><td align=right> database </td><td><a href="../ref/am/verify.html#3">salvage</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/sco.html#2">SCO</a></td></tr>
+<tr><td align=right> Berkeley DB handle </td><td><a href="../ref/program/scope.html#2">scope</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/env/security.html#2">security</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/sendmail/intro.html#2">Sendmail</a></td></tr>
+<tr><td align=right> configuring </td><td><a href="../ref/build_unix/conf.html#8">shared</a> libraries</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/shlib.html#2">shared</a> libraries</td></tr>
+<tr><td align=right> application </td><td><a href="../ref/program/appsignals.html#2">signal</a> handling</td></tr>
+<tr><td align=right></td><td><a href="http://www.sleepycat.com/">Sleepycat</a> Software</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/solaris.html#2">Solaris</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/distrib/layout.html#2">source</a> code layout</td></tr>
+<tr><td align=right> cursor </td><td><a href="../ref/am/stability.html#3">stability</a></td></tr>
+<tr><td align=right> database </td><td><a href="../ref/am/stat.html#2">statistics</a></td></tr>
+<tr><td align=right> utility to display database and environment </td><td><a href="../utility/db_stat.html#3">statistics</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/put.html#2">storing</a> records</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/curput.html#2">storing</a> records with a cursor</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/sunos.html#2">SunOS</a></td></tr>
+<tr><td align=right> loading Berkeley DB with </td><td><a href="../ref/tcl/intro.html#2">Tcl</a></td></tr>
+<tr><td align=right> using Berkeley DB with </td><td><a href="../ref/tcl/using.html#2">Tcl</a></td></tr>
+<tr><td align=right> configuring the </td><td><a href="../ref/build_unix/conf.html#12">Tcl</a> API</td></tr>
+<tr><td align=right> </td><td><a href="../ref/tcl/program.html#2">Tcl</a> API programming notes</td></tr>
+<tr><td align=right> </td><td><a href="../ref/tcl/faq.html#3">Tcl</a> FAQ</td></tr>
+<tr><td align=right> configuring the </td><td><a href="../ref/build_unix/conf.html#13">test</a> suite</td></tr>
+<tr><td align=right> running the </td><td><a href="../ref/test/run.html#2">test</a> suite</td></tr>
+<tr><td align=right> running the </td><td><a href="../ref/build_unix/test.html#2">test</a> suite under UNIX</td></tr>
+<tr><td align=right> running the </td><td><a href="../ref/build_win/test.html#2">test</a> suite under Windows</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am_conf/re_source.html#2">text</a> backing files</td></tr>
+<tr><td align=right> loading </td><td><a href="../ref/dumpload/text.html#2">text</a> into databases</td></tr>
+<tr><td align=right> dumping/loading </td><td><a href="../ref/dumpload/utility.html#2">text</a> to/from databases</td></tr>
+<tr><td align=right> building </td><td><a href="../ref/program/mt.html#2">threaded</a> applications</td></tr>
+<tr><td align=right> </td><td><a href="../ref/txn/config.html#2">transaction</a> configuration</td></tr>
+<tr><td align=right> </td><td><a href="../ref/txn/limits.html#2">transaction</a> limits</td></tr>
+<tr><td align=right> administering </td><td><a href="../ref/transapp/admin.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right> archival in </td><td><a href="../ref/transapp/archival.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right> checkpoints in </td><td><a href="../ref/transapp/checkpoint.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right> deadlock detection in </td><td><a href="../ref/transapp/deadlock.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right> recovery in </td><td><a href="../ref/transapp/recovery.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right> </td><td><a href="../ref/transapp/throughput.html#2">transaction</a> throughput</td></tr>
+<tr><td align=right> </td><td><a href="../ref/transapp/intro.html#2">Transactional</a> Data Store</td></tr>
+<tr><td align=right> Berkeley DB and </td><td><a href="../ref/txn/intro.html#2">transactions</a></td></tr>
+<tr><td align=right> nested </td><td><a href="../ref/txn/nested.html#2">transactions</a></td></tr>
+<tr><td align=right> configuring Berkeley DB with the </td><td><a href="../ref/xa/config.html#2">Tuxedo</a> System</td></tr>
+<tr><td align=right> </td><td><a href="../api_tcl/txn_abort.html#2">txn</a> abort</td></tr>
+<tr><td align=right> </td><td><a href="../api_tcl/txn_commit.html#2">txn</a> commit</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_unix/ultrix.html#2">Ultrix</a></td></tr>
+<tr><td align=right> building for </td><td><a href="../ref/build_unix/notes.html#3">UNIX</a> FAQ</td></tr>
+<tr><td align=right> configuring Berkeley DB for </td><td><a href="../ref/build_unix/conf.html#3">UNIX</a> systems</td></tr>
+<tr><td align=right>Patches, </td><td><a href="http://www.sleepycat.com/update/index.html">Updates</a> and Change logs</td></tr>
+<tr><td align=right> utility to </td><td><a href="../utility/db_upgrade.html#4">upgrade</a> database files</td></tr>
+<tr><td align=right> </td><td><a href="../ref/am/upgrade.html#2">upgrading</a> databases</td></tr>
+<tr><td align=right> </td><td><a href="../ref/arch/utilities.html#2">utilities</a></td></tr>
+<tr><td align=right> database </td><td><a href="../ref/am/verify.html#2">verification</a></td></tr>
+<tr><td align=right> utility to </td><td><a href="../utility/db_verify.html#4">verify</a> database files</td></tr>
+<tr><td align=right> building for </td><td><a href="../ref/build_vxworks/faq.html#3">VxWorks</a> FAQ</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_vxworks/notes.html#2">VxWorks</a> notes</td></tr>
+<tr><td align=right> running the test suite under </td><td><a href="../ref/build_win/test.html#3">Windows</a></td></tr>
+<tr><td align=right> building for </td><td><a href="../ref/build_win/faq.html#3">Windows</a> FAQ</td></tr>
+<tr><td align=right> </td><td><a href="../ref/build_win/notes.html#2">Windows</a> notes</td></tr>
+<tr><td align=right>Configuring Berkeley DB</td><td><a href="../ref/build_unix/conf.html#--with-tcl=DIR">--with-tcl=DIR</a></td></tr>
+<tr><td align=right> </td><td><a href="../ref/xa/intro.html#2">XA</a> Resource Manager</td></tr>
+</table>
+</center>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_tcl/txn.html b/bdb/docs/api_tcl/txn.html
new file mode 100644
index 00000000000..4e66a96a6ae
--- /dev/null
+++ b/bdb/docs/api_tcl/txn.html
@@ -0,0 +1,62 @@
+<!--$Id: txn.so,v 11.12 2000/02/19 20:57:57 bostic Exp $-->
+<!--$Id: m4.tcl,v 11.17 2000/04/24 17:31:11 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: env txn</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1><i>env</i> <b>txn</b></h1>
+</td>
+<td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>env txn
+ [-nosync]
+ [-nowait]
+ [-parent txnid]
+ [-sync]
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>env</i> <b>txn</b> command begins a transaction. The returned transaction
+handle is bound to a Tcl command of the form <b>env.txnX</b>, where X
+is an integer starting at 0 (e.g., env0.txn0 and env0.txn1). It is
+through this Tcl command that the script accesses the transaction methods.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt>-nosync<dd>Do not synchronously flush the log when this transaction commits or
+prepares. This means the transaction will exhibit the ACI (atomicity,
+consistency and isolation) properties, but not D (durability), i.e.,
+database integrity will be maintained but it is possible that this
+transaction may be undone during recovery instead of being redone.
+<p>This behavior may be set for an entire Berkeley DB environment as part of
+the <b>berkdb env</b> interface.
+<p><dt>-nowait<dd>If a lock is unavailable for any Berkeley DB operation performed in the context
+of this transaction, throw a Tcl error immediately instead of blocking on
+the lock.
+<p><dt>-parent txnid<dd>Create the new transaction as a nested transaction, with the specified
+transaction indicated as its parent. Transactions may be nested to any
+level.
+<p><dt>-sync<dd>Synchronously flush the log when this transaction commits or prepares. This
+means the transaction will exhibit all of the ACID (atomicity, consistency
+and isolation and durability) properties.
+<p>This behavior is the default for Berkeley DB environments unless the
+<b>-nosync</b> option was specified to the <b>berkdb env</b> interface.
+</dl>
+<p>The <i>env</i> <b>txn</b> command returns a transaction handle on success.
+<p>In the case of error, a Tcl error is thrown.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_tcl/txn_abort.html b/bdb/docs/api_tcl/txn_abort.html
new file mode 100644
index 00000000000..8b147883f6d
--- /dev/null
+++ b/bdb/docs/api_tcl/txn_abort.html
@@ -0,0 +1,45 @@
+<!--$Id: txn_abort.so,v 11.12 2000/06/28 14:30:15 bostic Exp $-->
+<!--$Id: m4.tcl,v 11.17 2000/04/24 17:31:11 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: txn abort</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1><i>txn</i> <b>abort</b></h1>
+</td>
+<td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>txn abort
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>txn</i> <b>abort</b> command causes an abnormal termination of the
+transaction.
+<p>The log is played backwards and any necessary recovery operations are
+performed. After recovery is completed, all locks held by the
+transaction are acquired by the parent transaction in the case of a
+nested transaction or released in the case of a non-nested transaction.
+As is the case for <i>txn</i> <b>commit</b>, applications that require strict
+two-phase locking should not explicitly release any locks.
+<p>In the case of nested transactions, aborting the parent transaction
+causes all children of that transaction to be aborted.
+<p>Once <i>txn</i> <b>abort</b> has been called, regardless of its return, the
+<b>txn</b> handle may not be accessed again.
+<p>The <i>txn</i> <b>abort</b> command returns 0 on success, and in the case of error, a Tcl error
+is thrown.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_tcl/txn_commit.html b/bdb/docs/api_tcl/txn_commit.html
new file mode 100644
index 00000000000..acd1f2d1b29
--- /dev/null
+++ b/bdb/docs/api_tcl/txn_commit.html
@@ -0,0 +1,68 @@
+<!--$Id: txn_commit.so,v 11.14 2000/06/28 14:30:15 bostic Exp $-->
+<!--$Id: m4.tcl,v 11.17 2000/04/24 17:31:11 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: txn commit</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1><i>txn</i> <b>commit</b></h1>
+</td>
+<td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>txn commit
+ [-nosync]
+ [-sync]
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>txn</i> <b>commit</b> command ends the transaction.
+<p>In the case of nested transactions, if the transaction is a parent
+transaction with unresolved (neither committed or aborted) child
+transactions, the child transactions are aborted and the commit of the
+parent will succeed.
+<p>In the case of nested transactions, if the transaction is a child
+transaction, its locks are not released, but are acquired by its parent.
+While the commit of the child transaction will succeed, the actual
+resolution of the child transaction is postponed until the parent
+transaction is committed or aborted, i.e., if its parent transaction
+commits, it will be committed, and if its parent transaction aborts, it
+will be aborted.
+<p>If the <b>-nosync</b> option is not specified, a commit log record is
+written and flushed to disk, as are all previously written log records.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt>-nosync<dd>Do not synchronously flush the log. This means the transaction will
+exhibit the ACI (atomicity, consistency and isolation) properties, but
+not D (durability), i.e., database integrity will be maintained but it is
+possible that this transaction may be undone during recovery instead of
+being redone.
+<p>This behavior may be set for an entire Berkeley DB environment as part of
+the <b>berkdb env</b> interface.
+<p><dt>-sync<dd>Synchronously flush the log. This means the transaction will exhibit all of
+the ACID (atomicity, consistency and isolation and durability) properties.
+<p>This behavior is the default for Berkeley DB environments unless the
+<b>-nosync</b> option was specified to the <b>berkdb env</b> or
+<i>env</i> <b>txn</b> interfaces.
+</dl>
+<p>Once <i>txn</i> <b>commit</b> has been called, regardless of its return, the
+<b>txn</b> handle may not be accessed again. If <i>txn</i> <b>commit</b>
+encounters an error, then this transaction and all child transactions
+of this transaction are aborted.
+<p>The <i>txn</i> <b>commit</b> command returns 0 on success, and in the case of error, a Tcl error
+is thrown.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/api_tcl/version.html b/bdb/docs/api_tcl/version.html
new file mode 100644
index 00000000000..ab4b901f1e9
--- /dev/null
+++ b/bdb/docs/api_tcl/version.html
@@ -0,0 +1,39 @@
+<!--$Id: version.so,v 11.10 1999/12/20 08:52:35 bostic Exp $-->
+<!--$Id: m4.tcl,v 11.17 2000/04/24 17:31:11 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: berkdb version</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1><b>berkdb version</b></h1>
+</td>
+<td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>berkdb version
+ [-string]
+</pre></h3>
+<h1>Description</h1>
+<p>Return a list of the form {major minor patch} for the major, minor and
+patch levels of the underlying Berkeley DB release.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt>-string<dd>Return a string with formatted Berkeley DB version information.
+</dl>
+<p>In the case of error, a Tcl error is thrown.
+</tt>
+<table><tr><td><br></td><td width="1%">
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/images/api.gif b/bdb/docs/images/api.gif
new file mode 100644
index 00000000000..dafd5772a14
--- /dev/null
+++ b/bdb/docs/images/api.gif
Binary files differ
diff --git a/bdb/docs/images/next.gif b/bdb/docs/images/next.gif
new file mode 100644
index 00000000000..667ee061a9f
--- /dev/null
+++ b/bdb/docs/images/next.gif
Binary files differ
diff --git a/bdb/docs/images/prev.gif b/bdb/docs/images/prev.gif
new file mode 100644
index 00000000000..11dfc5256ee
--- /dev/null
+++ b/bdb/docs/images/prev.gif
Binary files differ
diff --git a/bdb/docs/images/ps.gif b/bdb/docs/images/ps.gif
new file mode 100644
index 00000000000..0f565bc1db7
--- /dev/null
+++ b/bdb/docs/images/ps.gif
Binary files differ
diff --git a/bdb/docs/images/ref.gif b/bdb/docs/images/ref.gif
new file mode 100644
index 00000000000..75be9c1f348
--- /dev/null
+++ b/bdb/docs/images/ref.gif
Binary files differ
diff --git a/bdb/docs/images/sleepycat.gif b/bdb/docs/images/sleepycat.gif
new file mode 100644
index 00000000000..5e768f74f2b
--- /dev/null
+++ b/bdb/docs/images/sleepycat.gif
Binary files differ
diff --git a/bdb/docs/index.html b/bdb/docs/index.html
new file mode 100644
index 00000000000..ad638e776c9
--- /dev/null
+++ b/bdb/docs/index.html
@@ -0,0 +1,75 @@
+<!--$Id: index.html,v 1.11 2000/11/22 21:40:34 bostic Exp $-->
+<html>
+<head>
+<title>Berkeley DB (Version: 3.2.9)</title>
+</head>
+<body bgcolor=white>
+
+<p align=center>
+<img src="images/sleepycat.gif" alt="Sleepycat Software Inc.">
+<p align=right>
+<font size="+1">... <i>the embedded database company</i></font><sup>tm</sup>
+
+<p><br>
+<p><br>
+<center><h1><b>Berkeley DB</b></h1></center>
+
+<p>
+<table align=center cellpadding=4 border=2>
+<tr>
+ <th align=center width="50%">Interface Documentation</th>
+ <th align=center width="50%">Building Berkeley DB</th>
+</tr><tr valign=top>
+ <td>
+ <a href="api_c/c_index.html">C API</a><br>
+ <a href="api_c/c_pindex.html">C API Index</a><br>
+ <p>
+ <a href="api_cxx/cxx_index.html">C++ API</a><br>
+ <a href="api_cxx/cxx_pindex.html">C++ API Index</a><br>
+ <p>
+ <a href="api_java/java_index.html">Java API</a><br>
+ <a href="api_java/java_pindex.html">Java API Index</a><br>
+ <p>
+ <a href="api_tcl/tcl_index.html">Tcl API</a><br>
+ </td><td>
+ <a href="ref/build_unix/intro.html">Building for UNIX and QNX systems</a><br>
+ <p>
+ <a href="ref/build_win/intro.html">Building for Win32 platforms</a><br>
+ <p>
+ <a href="ref/build_vxworks/intro.html">Building for VxWorks platforms</a><br>
+ <p>
+ <a href="ref/upgrade.3.2/intro.html">Upgrading Applications to the 3.2 release</a><br>
+ </td>
+
+</tr><tr valign=top>
+ <th align=center>Additional Documentation</th>
+ <th align=center>Company and Product Information</th>
+</tr><tr valign=top>
+ <td>
+ <a href="utility/index.html">Supporting Utilities</a><br>
+ <p>
+ <a href="ref/toc.html">Programmer's Tutorial and Reference Guide</a><br>
+ </td><td>
+ <a href="sleepycat/contact.html">Contacting Sleepycat Software</a><br>
+ <a href="ref/intro/products.html">Commercial Product List</a><br>
+ <a href="sleepycat/license.html">License</a><br>
+ <a href="http://www.sleepycat.com/update/index.html">Release Patches and Change Logs</a><br>
+ <a href="http://www.sleepycat.com">Sleepycat Software Home Page</a><br>
+ </td>
+</tr>
+
+</table>
+
+<p>
+<center><b>
+Version 3.2.9, January 24, 2001<br>
+Copyright 1997-2000 Sleepycat Software, Inc. All Rights Reserved
+</b></center>
+
+<p><br>
+<p><br>
+<p><br>
+<p><h5><a href="sleepycat/legal.html">Legal Notices</a></h5>
+
+</body>
+</html>
diff --git a/bdb/docs/ref/am/close.html b/bdb/docs/ref/am/close.html
new file mode 100644
index 00000000000..04b8beacb6a
--- /dev/null
+++ b/bdb/docs/ref/am/close.html
@@ -0,0 +1,43 @@
+<!--$Id: close.so,v 10.15 2000/12/18 21:05:13 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Closing a database</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td width="1%"><a href="../../ref/am/stat.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/cursor.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Closing a database</h1>
+<p>The <a href="../../api_c/db_close.html">DB-&gt;close</a> function is the standard interface for closing the database.
+By default, <a href="../../api_c/db_close.html">DB-&gt;close</a> also flushes all modified records from the
+database cache to disk.
+<p>There is one flag that you can set to customize <a href="../../api_c/db_close.html">DB-&gt;close</a>:
+<p><dl compact>
+<p><dt><a href="../../api_c/db_close.html#DB_NOSYNC">DB_NOSYNC</a><dd>Do not flush cached information to disk.
+</dl>
+<b>It is important to understand that flushing cached information
+to disk only minimizes the window of opportunity for corrupted data, it
+does not eliminate the possibility.</b>
+<p>While unlikely, it is possible for database corruption to happen if a
+system or application crash occurs while writing data to the database. To
+ensure that database corruption never occurs, applications must either:
+<ul type=disc>
+<li>Use transactions and logging with automatic recovery.
+<li>Use logging and application-specific recovery.
+<li>Edit a copy of the database, and, once all applications
+using the database have successfully called <a href="../../api_c/db_close.html">DB-&gt;close</a>, use
+system operations (e.g., the POSIX rename system call) to atomically
+replace the original database with the updated copy.
+</ul>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am/stat.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/cursor.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am/count.html b/bdb/docs/ref/am/count.html
new file mode 100644
index 00000000000..92282641b6b
--- /dev/null
+++ b/bdb/docs/ref/am/count.html
@@ -0,0 +1,28 @@
+<!--$Id: count.so,v 1.3 2000/12/18 21:05:13 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Data item count</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td width="1%"><a href="../../ref/am/join.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/curclose.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Data item count</h1>
+<p>Once a cursor has been initialized to reference a particular key in the
+database, it can be used to determine the number of data items that are
+stored for any particular key. The <a href="../../api_c/dbc_count.html">DBcursor-&gt;c_count</a> method returns
+this number of data items. The returned value is always one, unless
+the database supports duplicate data items, in which case it may be any
+number of items.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am/join.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/curclose.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am/curclose.html b/bdb/docs/ref/am/curclose.html
new file mode 100644
index 00000000000..52ccfeb8cd5
--- /dev/null
+++ b/bdb/docs/ref/am/curclose.html
@@ -0,0 +1,28 @@
+<!--$Id: curclose.so,v 10.12 2000/12/13 16:48:13 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Closing a cursor</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a> <a name="3"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td width="1%"><a href="../../ref/am/count.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/stability.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Closing a cursor</h1>
+<p>The <a href="../../api_c/dbc_close.html">DBcursor-&gt;c_close</a> function is the standard interface for closing a cursor,
+after which the cursor may no longer be used. Although cursors are
+implicitly closed when the database they point to are closed, it is good
+programming practice to explicitly close cursors. In addition, in
+transactional systems, cursors may not exist outside of a transaction and
+so must be explicitly closed.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am/count.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/stability.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am/curdel.html b/bdb/docs/ref/am/curdel.html
new file mode 100644
index 00000000000..b0fe8f9573f
--- /dev/null
+++ b/bdb/docs/ref/am/curdel.html
@@ -0,0 +1,26 @@
+<!--$Id: curdel.so,v 10.11 2000/03/18 21:43:07 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Deleting records with a cursor</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a> <a name="3"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td width="1%"><a href="../../ref/am/curput.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/curdup.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Deleting records with a cursor</h1>
+<p>The <a href="../../api_c/dbc_del.html">DBcursor-&gt;c_del</a> function is the standard interface for deleting records from
+the database using a cursor. The <a href="../../api_c/dbc_del.html">DBcursor-&gt;c_del</a> function deletes the record
+currently referenced by the cursor. In all cases, the cursor position is
+unchanged after a delete.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am/curput.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/curdup.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am/curdup.html b/bdb/docs/ref/am/curdup.html
new file mode 100644
index 00000000000..6c609b2e545
--- /dev/null
+++ b/bdb/docs/ref/am/curdup.html
@@ -0,0 +1,34 @@
+<!--$Id: curdup.so,v 11.5 2000/12/19 14:45:39 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Duplicating a cursor</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a> <a name="3"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td width="1%"><a href="../../ref/am/curdel.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/join.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Duplicating a cursor</h1>
+<p>Once a cursor has been initialized (e.g., by a call to <a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a>),
+it can be thought of as identifying a particular location in a database.
+The <a href="../../api_c/dbc_dup.html">DBcursor-&gt;c_dup</a> function permits an application to create a new cursor that
+has the same locking and transactional information as the cursor from
+which it is copied, and which optionally refers to the same position in
+the database.
+<p>In order to maintain a cursor position when an application is using
+locking, locks are maintained on behalf of the cursor until the cursor is
+closed. In cases when an application is using locking without
+transactions, cursor duplication is often required to avoid
+self-deadlocks. For further details, refer to
+<a href="../../ref/lock/am_conv.html">Access method locking conventions</a>.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am/curdel.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/join.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am/curget.html b/bdb/docs/ref/am/curget.html
new file mode 100644
index 00000000000..129fa272bbd
--- /dev/null
+++ b/bdb/docs/ref/am/curget.html
@@ -0,0 +1,74 @@
+<!--$Id: curget.so,v 10.14 2000/12/18 21:05:13 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Retrieving records with a cursor</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a> <a name="3"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td width="1%"><a href="../../ref/am/cursor.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/curput.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Retrieving records with a cursor</h1>
+<p>The <a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a> function is the standard interface for retrieving records from
+the database with a cursor. The <a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a> function takes a flag which
+controls how the cursor is positioned within the database and returns the
+key/data item associated with that positioning. Similar to
+<a href="../../api_c/db_get.html">DB-&gt;get</a>, <a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a> may also take a supplied key and retrieve
+the data associated with that key from the database. There are several
+flags that you can set to customize retrieval.
+<h3>Cursor position flags</h3>
+<p><dl compact>
+<p><dt><a href="../../api_c/dbc_get.html#DB_FIRST">DB_FIRST</a>, <a href="../../api_c/dbc_get.html#DB_LAST">DB_LAST</a><dd>Return the first (last) record in the database.
+<p><dt><a href="../../api_c/dbc_get.html#DB_NEXT">DB_NEXT</a>, <a href="../../api_c/dbc_get.html#DB_PREV">DB_PREV</a><dd>Return the next (previous) record in the database.
+<p><dt><a href="../../api_c/dbc_get.html#DB_NEXT_DUP">DB_NEXT_DUP</a><dd>Return the next record in the database, if it is a duplicate data item
+for the current key.
+<p><dt><a href="../../api_c/dbc_get.html#DB_NEXT_NODUP">DB_NEXT_NODUP</a>, <a href="../../api_c/dbc_get.html#DB_PREV_NODUP">DB_PREV_NODUP</a><dd>Return the next (previous) record in the database that is not a
+duplicate data item for the current key.
+<p><dt><a href="../../api_c/dbc_get.html#DB_CURRENT">DB_CURRENT</a><dd>Return the record from the database currently referenced by the
+cursor.
+</dl>
+<h3>Retrieving specific key/data pairs</h3>
+<p><dl compact>
+<p><dt><a href="../../api_c/dbc_get.html#DB_SET">DB_SET</a><dd>Return the record from the database that matches the supplied key. In
+the case of duplicates the first duplicate is returned and the cursor
+is positioned at the beginning of the duplicate list. The user can then
+traverse the duplicate entries for the key.
+<p><dt><a href="../../api_c/dbc_get.html#DB_SET_RANGE">DB_SET_RANGE</a><dd>Return the smallest record in the database greater than or equal to the
+supplied key. This functionality permits partial key matches and range
+searches in the Btree access method.
+<p><dt><a href="../../api_c/db_get.html#DB_GET_BOTH">DB_GET_BOTH</a><dd>Return the record from the database that matches both the supplied key
+and data items. This is particularly useful when there are large
+numbers of duplicate records for a key, as it allows the cursor to
+easily be positioned at the correct place for traversal of some part of
+a large set of duplicate records.
+</dl>
+<h3>Retrieving based on record numbers</h3>
+<p><dl compact>
+<p><dt><a href="../../api_c/db_get.html#DB_SET_RECNO">DB_SET_RECNO</a><dd>If the underlying database is a Btree, and was configured so that it is
+possible to search it by logical record number, retrieve a specific
+record based on a record number argument.
+<p><dt><a href="../../api_c/dbc_get.html#DB_GET_RECNO">DB_GET_RECNO</a><dd>If the underlying database is a Btree, and was configured so that it is
+possible to search it by logical record number, return the record number
+for the record referenced by the cursor.
+</dl>
+<h3>Special-purpose flags</h3>
+<p><dl compact>
+<p><dt><a href="../../api_c/db_get.html#DB_CONSUME">DB_CONSUME</a><dd>Read-and-delete: the first record (the head) of the queue is returned and
+deleted. The underlying database must be a Queue.
+<p><dt><a href="../../api_c/dbc_get.html#DB_RMW">DB_RMW</a><dd>Read-modify-write: acquire write locks instead of read locks during
+retrieval. This can enhance performance in threaded applications by
+reducing the chance of deadlock.
+</dl>
+<p>In all cases, the cursor is repositioned by a <a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a> operation
+to point to the newly-returned key/data pair in the database.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am/cursor.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/curput.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am/curput.html b/bdb/docs/ref/am/curput.html
new file mode 100644
index 00000000000..0d5ef2725af
--- /dev/null
+++ b/bdb/docs/ref/am/curput.html
@@ -0,0 +1,40 @@
+<!--$Id: curput.so,v 10.12 2000/12/04 18:05:41 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Storing records with a cursor</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a> <a name="3"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td width="1%"><a href="../../ref/am/curget.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/curdel.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Storing records with a cursor</h1>
+<p>The <a href="../../api_c/dbc_put.html">DBcursor-&gt;c_put</a> function is the standard interface for storing records into
+the database with a cursor. In general, <a href="../../api_c/dbc_put.html">DBcursor-&gt;c_put</a> takes a key and
+inserts the associated data into the database, at a location controlled
+by a specified flag.
+<p>There are several flags that you can set to customize storage:
+<p><dl compact>
+<p><dt><a href="../../api_c/dbc_put.html#DB_AFTER">DB_AFTER</a><dd>Create a new record, immediately after the record currently referenced by
+the cursor.
+<p><dt><a href="../../api_c/dbc_put.html#DB_BEFORE">DB_BEFORE</a><dd>Create a new record, immediately before the record currently referenced by
+the cursor.
+<p><dt><a href="../../api_c/dbc_put.html#DB_CURRENT_PUT">DB_CURRENT</a><dd>Replace the data part of the record currently referenced by the cursor.
+<p><dt><a href="../../api_c/dbc_put.html#DB_KEYFIRST">DB_KEYFIRST</a><dd>Create a new record as the first of the duplicate records for the
+supplied key.
+<p><dt><a href="../../api_c/dbc_put.html#DB_KEYLAST">DB_KEYLAST</a><dd>Create a new record, as the last of the duplicate records for the supplied
+key.
+</dl>
+<p>In all cases, the cursor is repositioned by a <a href="../../api_c/dbc_put.html">DBcursor-&gt;c_put</a> operation
+to point to the newly inserted key/data pair in the database.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am/curget.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/curdel.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am/cursor.html b/bdb/docs/ref/am/cursor.html
new file mode 100644
index 00000000000..529285b4a78
--- /dev/null
+++ b/bdb/docs/ref/am/cursor.html
@@ -0,0 +1,41 @@
+<!--$Id: cursor.so,v 10.15 2000/12/18 21:05:13 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Database cursors</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td width="1%"><a href="../../ref/am/close.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/curget.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Database cursors</h1>
+<p>A database cursor is a reference to a single key/data pair in the
+database. It supports traversal of the database and is the only way to
+access individual duplicate data items. Cursors are used for operating
+on collections of records, for iterating over a database, and for saving
+handles to individual records, so that they can be modified after they
+have been read.
+<p>The <a href="../../api_c/db_cursor.html">DB-&gt;cursor</a> function is the standard interface for opening a cursor
+into a database. Upon return the cursor is uninitialized, positioning
+occurs as part of the first cursor operation.
+<p>Once a database cursor has been opened, there are a set of access method
+operations that can be performed. Each of these operations is performed
+using a method referenced from the returned cursor handle.
+<p><dl compact>
+<dt><a href="../../api_c/dbc_close.html">DBcursor-&gt;c_close</a><dd>Close the cursor
+<dt><a href="../../api_c/dbc_del.html">DBcursor-&gt;c_del</a><dd>Delete a record
+<dt><a href="../../api_c/dbc_dup.html">DBcursor-&gt;c_dup</a><dd>Duplicate a cursor
+<dt><a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a><dd>Retrieve a record
+<dt><a href="../../api_c/dbc_put.html">DBcursor-&gt;c_put</a><dd>Store a record
+</dl>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am/close.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/curget.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am/delete.html b/bdb/docs/ref/am/delete.html
new file mode 100644
index 00000000000..8ab612fa428
--- /dev/null
+++ b/bdb/docs/ref/am/delete.html
@@ -0,0 +1,28 @@
+<!--$Id: delete.so,v 10.14 2000/03/18 21:43:08 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Deleting records</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td width="1%"><a href="../../ref/am/put.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/sync.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Deleting records</h1>
+<p>The <a href="../../api_c/db_del.html">DB-&gt;del</a> function is the standard interface for deleting records from
+the database. In general, <a href="../../api_c/db_del.html">DB-&gt;del</a> takes a key and deletes the
+data item associated with it from the database.
+<p>If the database has been configured to support duplicate records, the
+<a href="../../api_c/db_del.html">DB-&gt;del</a> function will remove all of the duplicate records. To remove
+individual duplicate records, you must use a Berkeley DB cursor interface.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am/put.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/sync.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am/error.html b/bdb/docs/ref/am/error.html
new file mode 100644
index 00000000000..737e6d66217
--- /dev/null
+++ b/bdb/docs/ref/am/error.html
@@ -0,0 +1,61 @@
+<!--$Id: error.so,v 10.14 2000/12/18 21:05:13 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Error support</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td width="1%"><a href="../../ref/am/verify.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/arch/bigpic.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Error support</h1>
+<p>Berkeley DB offers programmatic support for displaying error return values.
+<p>The <a href="../../api_c/env_strerror.html">db_strerror</a> interface returns a pointer to the error
+message corresponding to any Berkeley DB error return, similar to the ANSI C
+strerror interface, but is able to handle both system error returns and
+Berkeley DB specific return values.
+<p>For example:
+<p><blockquote><pre>int ret;
+if ((ret = dbp-&gt;put(dbp, NULL, &key, &data, 0)) != 0) {
+ fprintf(stderr, "put failed: %s\n", db_strerror(ret));
+ return (1);
+}
+</pre></blockquote>
+<p>There are also two additional error interfaces, <a href="../../api_c/db_err.html">DB-&gt;err</a> and
+<a href="../../api_c/db_err.html">DB-&gt;errx</a>. These interfaces work like the ANSI C X3.159-1989 (ANSI C) printf
+interface, taking a printf-style format string and argument list, and
+writing a message constructed from the format string and arguments.
+<p>The <a href="../../api_c/db_err.html">DB-&gt;err</a> function appends the standard error string to the constructed
+message, the <a href="../../api_c/db_err.html">DB-&gt;errx</a> function does not. These interfaces provide simpler
+ways of displaying Berkeley DB error messages. For example, if your application
+tracks session IDs in a variable called session_id, it can include that
+information in its error messages:
+<p>Error messages can additionally be configured to always include a prefix
+(e.g., the program name) using the <a href="../../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a> interface.
+<p><blockquote><pre>#define DATABASE "access.db"
+int ret;
+dbp-&gt;errpfx(dbp, argv0);
+if ((ret =
+ dbp-&gt;open(dbp, DATABASE, DB_BTREE, DB_CREATE, 0664)) != 0) {
+ dbp-&gt;err(dbp, ret, "%s", DATABASE);
+ dbp-&gt;errx(dbp,
+ "contact your system administrator: session ID was %d",
+ session_id);
+ return (1);
+}
+</pre></blockquote>
+<p>For example, if the program was called my_app, and the open call returned
+an EACCESS system error, the error messages shown would appear as follows:
+<p><blockquote><pre>my_app: access.db: Permission denied.
+my_app: contact your system administrator: session ID was 14</pre></blockquote>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am/verify.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/arch/bigpic.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am/get.html b/bdb/docs/ref/am/get.html
new file mode 100644
index 00000000000..fda7a8eb2e6
--- /dev/null
+++ b/bdb/docs/ref/am/get.html
@@ -0,0 +1,39 @@
+<!--$Id: get.so,v 10.15 2000/12/18 21:05:13 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Retrieving records</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td width="1%"><a href="../../ref/am/upgrade.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/put.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Retrieving records</h1>
+<p>The <a href="../../api_c/db_get.html">DB-&gt;get</a> function is the standard interface for retrieving records from
+the database. In general, <a href="../../api_c/db_get.html">DB-&gt;get</a> takes a key and returns the
+associated data from the database.
+<p>There are a few flags that you can set to customize retrieval:
+<p><dl compact>
+<p><dt><a href="../../api_c/db_get.html#DB_GET_BOTH">DB_GET_BOTH</a><dd>Search for a matching key and data item, i.e., only return success if both
+the key and the data items match those stored in the database.
+<p><dt><a href="../../api_c/dbc_get.html#DB_RMW">DB_RMW</a><dd>Read-modify-write: acquire write locks instead of read locks during
+retrieval. This can enhance performance in threaded applications by
+reducing the chance of deadlock.
+<p><dt><a href="../../api_c/db_get.html#DB_SET_RECNO">DB_SET_RECNO</a><dd>If the underlying database is a Btree, and was configured so that it
+is possible to search it by logical record number, retrieve a specific
+record.
+</dl>
+<p>If the database has been configured to support duplicate records,
+<a href="../../api_c/db_get.html">DB-&gt;get</a> will always return the first data item in the duplicate
+set.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am/upgrade.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/put.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am/join.html b/bdb/docs/ref/am/join.html
new file mode 100644
index 00000000000..9d4dcdd0949
--- /dev/null
+++ b/bdb/docs/ref/am/join.html
@@ -0,0 +1,184 @@
+<!--$Id: join.so,v 10.21 2000/12/18 21:05:13 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Logical join</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td width="1%"><a href="../../ref/am/curdup.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/count.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Logical join</h1>
+<p>A logical join is a method of retrieving data from a primary database
+using criteria stored in a set of secondary indexes. A logical join
+requires that your data be organized as a primary database which
+contains the primary key and primary data field, and a set of secondary
+indexes. Each of the secondary indexes is indexed by a different
+secondary key, and, for each key in a secondary index, there is a set
+of duplicate data items that match the primary keys in the primary
+database.
+<p>For example, let's assume the need for an application that will return
+the names of stores in which one can buy fruit of a given color. We
+would first construct a primary database that lists types of fruit as
+the key item, and the store where you can buy them as the data item:
+<p><blockquote><pre><b>Primary key:</b> <b>Primary data:</b>
+apple Convenience Store
+blueberry Farmer's Market
+peach Shopway
+pear Farmer's Market
+raspberry Shopway
+strawberry Farmer's Market</pre></blockquote>
+<p>We would then create a secondary index with the key <b>color</b>, and,
+as the data items, the names of fruits of different colors.
+<p><blockquote><pre><b>Secondary key:</b> <b>Secondary data:</b>
+blue blueberry
+red apple
+red raspberry
+red strawberry
+yellow peach
+yellow pear</pre></blockquote>
+<p>This secondary index would allow an application to look up a color, and
+then use the data items to look up the stores where the colored fruit
+could be purchased. For example, by first looking up <b>blue</b>,
+the data item <b>blueberry</b> could be used as the lookup key in the
+primary database, returning <b>Farmer's Market</b>.
+<p>Your data must be organized in the following manner in order to use the
+<a href="../../api_c/db_join.html">DB-&gt;join</a> function:
+<p><ol>
+<p><li>The actual data should be stored in the database represented by the
+DB object used to invoke this function. Generally, this
+DB object is called the <i>primary</i>.
+<p><li>Secondary indexes should be stored in separate databases, whose keys
+are the values of the secondary indexes and whose data items are the
+primary keys corresponding to the records having the designated
+secondary key value. It is acceptable (and expected) that there may be
+duplicate entries in the secondary indexes.
+<p>These duplicate entries should be sorted for performance reasons, although
+it is not required. For more information see the <a href="../../api_c/db_set_flags.html#DB_DUPSORT">DB_DUPSORT</a> flag
+to the <a href="../../api_c/db_set_flags.html">DB-&gt;set_flags</a> function.
+</ol>
+<p>What the <a href="../../api_c/db_join.html">DB-&gt;join</a> function does is review a list of secondary keys, and,
+when it finds a data item that appears as a data item for all of the
+secondary keys, it uses that data items as a lookup into the primary
+database, and returns the associated data item.
+<p>If there were a another secondary index that had as its key the
+<b>cost</b> of the fruit, a similar lookup could be done on stores
+where inexpensive fruit could be purchased:
+<p><blockquote><pre><b>Secondary key:</b> <b>Secondary data:</b>
+expensive blueberry
+expensive peach
+expensive pear
+expensive strawberry
+inexpensive apple
+inexpensive pear
+inexpensive raspberry</pre></blockquote>
+<p>The <a href="../../api_c/db_join.html">DB-&gt;join</a> function provides logical join functionality. While not
+strictly cursor functionality, in that it is not a method off a cursor
+handle, it is more closely related to the cursor operations than to the
+standard DB operations.
+<p>It is also possible to do lookups based on multiple criteria in a single
+operation, e.g., it is possible to look up fruits that are both red and
+expensive in a single operation. If the same fruit appeared as a data
+item in both the color and expense indexes, then that fruit name would
+be used as the key for retrieval from the primary index, and would then
+return the store where expensive, red fruit could be purchased.
+<h3>Example</h3>
+<p>Consider the following three databases:
+<p><dl compact>
+<p><dt>personnel<dd><ul type=disc>
+<li>key = SSN
+<li>data = record containing name, address, phone number, job title
+</ul>
+<p><dt>lastname<dd><ul type=disc>
+<li>key = lastname
+<li>data = SSN
+</ul>
+<p><dt>jobs<dd><ul type=disc>
+<li>key = job title
+<li>data = SSN
+</ul>
+</dl>
+<p>Consider the following query:
+<p><blockquote><pre>Return the personnel records of all people named smith with the job
+title manager.</pre></blockquote>
+<p>This query finds are all the records in the primary database (personnel)
+for whom the criteria <b>lastname=smith and job title=manager</b> is
+true.
+<p>Assume that all databases have been properly opened and have the handles:
+pers_db, name_db, job_db. We also assume that we have an active
+transaction referenced by the handle txn.
+<p><blockquote><pre>DBC *name_curs, *job_curs, *join_curs;
+DBC *carray[3];
+DBT key, data;
+int ret, tret;
+<p>
+name_curs = NULL;
+job_curs = NULL;
+memset(&key, 0, sizeof(key));
+memset(&data, 0, sizeof(data));
+<p>
+if ((ret =
+ name_db-&gt;cursor(name_db, txn, &name_curs)) != 0)
+ goto err;
+key.data = "smith";
+key.size = sizeof("smith");
+if ((ret =
+ name_curs-&gt;c_get(name_curs, &key, &data, DB_SET)) != 0)
+ goto err;
+<p>
+if ((ret = job_db-&gt;cursor(job_db, txn, &job_curs)) != 0)
+ goto err;
+key.data = "manager";
+key.size = sizeof("manager");
+if ((ret =
+ job_curs-&gt;c_get(job_curs, &key, &data, DB_SET)) != 0)
+ goto err;
+<p>
+carray[0] = name_curs;
+carray[1] = job_curs;
+carray[2] = NULL;
+<p>
+if ((ret =
+ pers_db-&gt;join(pers_db, carray, &join_curs, 0)) != 0)
+ goto err;
+while ((ret =
+ join_curs-&gt;c_get(join_curs, &key, &data, 0)) == 0) {
+ /* Process record returned in key/data. */
+}
+<p>
+/*
+ * If we exited the loop because we ran out of records,
+ * then it has completed successfully.
+ */
+if (ret == DB_NOTFOUND)
+ ret = 0;
+<p>
+err:
+if (join_curs != NULL &&
+ (tret = join_curs-&gt;c_close(join_curs)) != 0 && ret == 0)
+ ret = tret;
+if (name_curs != NULL &&
+ (tret = name_curs-&gt;c_close(name_curs)) != 0 && ret == 0)
+ ret = tret;
+if (job_curs != NULL &&
+ (tret = job_curs-&gt;c_close(job_curs)) != 0 && ret == 0)
+ ret = tret;
+<p>
+return (ret);
+</pre></blockquote>
+<p>The name cursor is positioned at the beginning of the duplicate list
+for <b>smith</b> and the job cursor is placed at the beginning of
+the duplicate list for <b>manager</b>. The join cursor is returned
+from the logical join call. This code then loops over the join cursor
+getting the personnel records of each one until there are no more.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am/curdup.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/count.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am/open.html b/bdb/docs/ref/am/open.html
new file mode 100644
index 00000000000..01c45339ed8
--- /dev/null
+++ b/bdb/docs/ref/am/open.html
@@ -0,0 +1,47 @@
+<!--$Id: open.so,v 10.15 2000/12/18 21:05:14 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Opening a database</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td width="1%"><a href="../../ref/am/ops.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/opensub.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Opening a database</h1>
+<p>The <a href="../../api_c/db_open.html">DB-&gt;open</a> function is the standard interface for opening a database,
+and takes five arguments:
+<p><dl compact>
+<p><dt>file<dd>The name of the file to be opened.
+<p><dt>database<dd>An optional database name.
+<p><dt>type<dd>The type of database to open. This value will be one of the four access
+methods Berkeley DB supports: DB_BTREE, DB_HASH, DB_QUEUE or DB_RECNO, or the
+special value DB_UNKNOWN, which allows you to open an existing file
+without knowing its type.
+<p><dt>mode<dd>The permissions to give to any created file.
+</dl>
+<p>There are a few flags that you can set to customize open:
+<p><dl compact>
+<p><dt><a href="../../api_c/env_open.html#DB_CREATE">DB_CREATE</a><dd>Create the underlying database and any necessary physical files.
+<p><dt><a href="../../api_c/env_open.html#DB_NOMMAP">DB_NOMMAP</a><dd>Do not map this database into process memory.
+<p><dt><a href="../../api_c/db_open.html#DB_RDONLY">DB_RDONLY</a><dd>Treat the data base as readonly.
+<p><dt><a href="../../api_c/env_open.html#DB_THREAD">DB_THREAD</a><dd>The returned handle is free-threaded, that is, it can be used
+simultaneously by multiple threads within the process.
+<p><dt><a href="../../api_c/db_open.html#DB_TRUNCATE">DB_TRUNCATE</a><dd>Physically truncate the underlying database file, discarding all
+databases it contained. Underlying filesystem primitives are used to
+implement this flag. For this reason it is only applicable to the
+physical file and cannot be used to discard individual databases from
+within physical files.
+<p><dt><a href="../../api_c/db_set_feedback.html#DB_UPGRADE">DB_UPGRADE</a><dd>Upgrade the database format as necessary.
+</dl>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am/ops.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/opensub.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am/opensub.html b/bdb/docs/ref/am/opensub.html
new file mode 100644
index 00000000000..066ca4b7933
--- /dev/null
+++ b/bdb/docs/ref/am/opensub.html
@@ -0,0 +1,64 @@
+<!--$Id: opensub.so,v 10.6 2000/12/18 21:05:14 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Opening multiple databases in a single file</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td width="1%"><a href="../../ref/am/open.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/upgrade.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Opening multiple databases in a single file</h1>
+<p>Applications may create multiple databases within a single physical
+file. This is useful when the databases are both numerous and
+reasonably small, in order to avoid creating a large number of
+underlying files, or when it is desirable to include secondary index
+databases in the same file as the primary index database. Multiple
+databases are an administrative convenience and using them is unlikely
+to effect database performance. To open or create a file that will
+include more than a single database, specify a database name when
+calling the <a href="../../api_c/db_open.html">DB-&gt;open</a> method.
+<p>Physical files do not need to be comprised of a single type of database,
+and databases in a file may be of any type (e.g., Btree, Hash or Recno),
+except for Queue databases. Queue databases must be created one per file
+and cannot share a file with any other database type. There is no limit
+on the number of databases that may be created in a single file other than
+the standard Berkeley DB file size and disk space limitations.
+<p>It is an error to attempt to open a second database in a file that was
+not initially created using a database name, that is, the file must
+initially be specified as capable of containing multiple databases for a
+second database to be created in it.
+<p>It is not an error to open a file that contains multiple databases without
+specifying a database name, however the database type should be specified
+as DB_UNKNOWN and the database must be opened read-only. The handle that
+is returned from such a call is a handle on a database whose key values
+are the names of the databases stored in the database file and whose data
+values are opaque objects. No keys or data values may be modified or
+stored using this database handle.
+<p>Storing multiple databases in a single file is almost identical to
+storing each database in its own separate file. The one crucial
+difference is how locking and the underlying memory pool services must
+to be configured. As an example, consider two databases instantiated
+in two different physical files. If access to each separate database
+is single-threaded, there is no reason to perform any locking of any
+kind, and the two databases may be read and written simultaneously.
+Further, there would be no requirement to create a shared database
+environment in which to open the databases. Because multiple databases
+in a file exist in a single physical file, opening two databases in the
+same file requires that locking be enabled, unless access to the
+databases is known to be single-threaded, that is, only one of the
+databases is ever accessed at a time. (As the locks for the two
+databases can only conflict during page allocation, this additional
+locking is unlikely to effect performance.) Further, the databases must
+share an underlying memory pool so that per-physical-file information
+is updated correctly.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am/open.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/upgrade.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am/ops.html b/bdb/docs/ref/am/ops.html
new file mode 100644
index 00000000000..5daaddd7496
--- /dev/null
+++ b/bdb/docs/ref/am/ops.html
@@ -0,0 +1,36 @@
+<!--$Id: ops.so,v 10.16 2000/12/18 21:05:14 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Access method operations</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td width="1%"><a href="../../ref/am_conf/renumber.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/open.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Access method operations</h1>
+<p>Once a database handle has been created using <a href="../../api_c/db_create.html">db_create</a>, there
+are several standard access method operations. Each of these operations
+is performed using a method that is referenced from the returned handle.
+The operations are as follows:
+<p><dl compact>
+<p><dt><a href="../../api_c/db_close.html">DB-&gt;close</a><dd>Close the database
+<dt><a href="../../api_c/db_cursor.html">DB-&gt;cursor</a><dd>Open a cursor into the database
+<dt><a href="../../api_c/db_del.html">DB-&gt;del</a><dd>Delete a record
+<dt><a href="../../api_c/db_get.html">DB-&gt;get</a><dd>Retrieve a record
+<dt><a href="../../api_c/db_open.html">DB-&gt;open</a><dd>Open a database
+<dt><a href="../../api_c/db_put.html">DB-&gt;put</a><dd>Store a record
+<dt><a href="../../api_c/db_stat.html">DB-&gt;stat</a><dd>Return statistics about the database
+<dt><a href="../../api_c/db_sync.html">DB-&gt;sync</a><dd>Flush the underlying cache
+<dt><a href="../../api_c/db_upgrade.html">DB-&gt;upgrade</a><dd>Upgrade a database
+</dl>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am_conf/renumber.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/open.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am/partial.html b/bdb/docs/ref/am/partial.html
new file mode 100644
index 00000000000..7f3af8f68df
--- /dev/null
+++ b/bdb/docs/ref/am/partial.html
@@ -0,0 +1,134 @@
+<!--$Id: partial.so,v 10.18 2000/12/18 21:05:14 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Partial record storage and retrieval</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td width="1%"><a href="../../ref/am/stability.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/verify.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Partial record storage and retrieval</h1>
+<p>It is possible to both store and retrieve parts of data items in all
+Berkeley DB access methods. This is done by setting the
+<a href="../../api_c/dbt.html#DB_DBT_PARTIAL">DB_DBT_PARTIAL</a> flag in the <a href="../../api_c/dbt.html">DBT</a> structure passed to the
+Berkeley DB interface.
+<p>The <a href="../../api_c/dbt.html#DB_DBT_PARTIAL">DB_DBT_PARTIAL</a> flag is based on the values of two fields
+of the <a href="../../api_c/dbt.html">DBT</a> structure, <b>dlen</b> and <b>doff</b>. The value
+of <b>dlen</b> is the number of bytes of the record in which the
+application is interested. The value of <b>doff</b> is the offset from
+the beginning of the data item where those bytes start.
+<p>For example, if the data item were <b>ABCDEFGHIJKL</b>, a <b>doff</b>
+value of 3 would indicate that the bytes of interest started at
+<b>D</b>, and a <b>dlen</b> value of 4 would indicate that the bytes
+of interest were <b>DEFG</b>.
+<p>When retrieving a data item from a database, the <b>dlen</b> bytes
+starting <b>doff</b> bytes from the beginning of the record are
+returned, as if they comprised the entire record. If any or all of the
+specified bytes do not exist in the record, the retrieval is still
+successful and any existing bytes (and nul bytes for any non-existent
+bytes) are returned.
+<p>When storing a data item into the database, the <b>dlen</b> bytes
+starting <b>doff</b> bytes from the beginning of the specified key's
+data record are replaced by the data specified by the <b>data</b> and
+<b>size</b> fields. If <b>dlen</b> is smaller than <b>size</b>, the
+record will grow, and if <b>dlen</b> is larger than <b>size</b>, the
+record will shrink. If the specified bytes do not exist, the record will
+be extended using nul bytes as necessary, and the store call will still
+succeed.
+<p>The following are various examples of the put case for the
+<a href="../../api_c/dbt.html#DB_DBT_PARTIAL">DB_DBT_PARTIAL</a> flag. In all examples, the initial data item is 20
+bytes in length:
+<p><b>ABCDEFGHIJ0123456789</b>
+<p><ol>
+<p><li><p><blockquote><pre>size = 20
+doff = 0
+dlen = 20
+data = abcdefghijabcdefghij
+<p>
+Result: The 20 bytes at offset 0 are replaced by the 20 bytes of data,
+i.e., the entire record is replaced.
+<p>
+ABCDEFGHIJ0123456789 -&gt; abcdefghijabcdefghij
+</pre></blockquote>
+<p><li><p><blockquote><pre>size = 10
+doff = 20
+dlen = 0
+data = abcdefghij
+<p>
+Result: The 0 bytes at offset 20 are replaced by the 10 bytes of data,
+i.e., the record is extended by 10 bytes.
+<p>
+ABCDEFGHIJ0123456789 -&gt; ABCDEFGHIJ0123456789abcdefghij
+</pre></blockquote>
+<p><li><p><blockquote><pre>size = 10
+doff = 10
+dlen = 5
+data = abcdefghij
+<p>
+Result: The 5 bytes at offset 10 are replaced by the 10 bytes of data.
+<p>
+ABCDEFGHIJ0123456789 -&gt; ABCDEFGHIJabcdefghij56789
+</pre></blockquote>
+<p><li><p><blockquote><pre>size = 10
+doff = 10
+dlen = 0
+data = abcdefghij
+<p>
+Result: The 0 bytes at offset 10 are replaced by the 10 bytes of data,
+i.e., 10 bytes are inserted into the record.
+<p>
+ABCDEFGHIJ0123456789 -&gt; ABCDEFGHIJabcdefghij0123456789
+</pre></blockquote>
+<p><li><p><blockquote><pre>size = 10
+doff = 2
+dlen = 15
+data = abcdefghij
+<p>
+Result: The 15 bytes at offset 2 are replaced by the 10 bytes of data.
+<p>
+ABCDEFGHIJ0123456789 -&gt; ABabcdefghij789
+</pre></blockquote>
+<p><li><p><blockquote><pre>size = 10
+doff = 0
+dlen = 0
+data = abcdefghij
+<p>
+Result: The 0 bytes at offset 0 are replaced by the 10 bytes of data,
+i.e., the 10 bytes are inserted at the beginning of the record.
+<p>
+ABCDEFGHIJ0123456789 -&gt; abcdefghijABCDEFGHIJ0123456789
+</pre></blockquote>
+<p><li><p><blockquote><pre>size = 0
+doff = 0
+dlen = 10
+data = ""
+<p>
+Result: The 10 bytes at offset 0 are replaced by the 0 bytes of data,
+i.e., the first 10 bytes of the record are discarded.
+<p>
+ABCDEFGHIJ0123456789 -&gt; 0123456789
+</pre></blockquote>
+<p><li><p><blockquote><pre>size = 10
+doff = 25
+dlen = 0
+data = abcdefghij
+<p>
+Result: The 0 bytes at offset 25 are replaced by the 10 bytes of data,
+i.e., 10 bytes are inserted into the record past the end of the current
+data (\0 represents a nul byte).
+<p>
+ABCDEFGHIJ0123456789 -&gt; ABCDEFGHIJ0123456789\0\0\0\0\0abcdefghij
+</pre></blockquote>
+</ol>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am/stability.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/verify.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am/put.html b/bdb/docs/ref/am/put.html
new file mode 100644
index 00000000000..993dcbeb068
--- /dev/null
+++ b/bdb/docs/ref/am/put.html
@@ -0,0 +1,36 @@
+<!--$Id: put.so,v 10.14 2000/03/18 21:43:08 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Storing records</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td width="1%"><a href="../../ref/am/get.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/delete.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Storing records</h1>
+<p>The <a href="../../api_c/db_put.html">DB-&gt;put</a> function is the standard interface for storing records into
+the database. In general, <a href="../../api_c/db_put.html">DB-&gt;put</a> takes a key and stores the
+associated data into the database.
+<p>There are a few flags that you can set to customize storage:
+<p><dl compact>
+<p><dt><a href="../../api_c/db_put.html#DB_APPEND">DB_APPEND</a><dd>Simply append the data to the end of the database, treating the database
+much like a simple log. This flag is only valid for the Queue and Recno
+access methods.
+<p><dt><a href="../../api_c/db_put.html#DB_NOOVERWRITE">DB_NOOVERWRITE</a><dd>Only store the data item if the key does not already appear in the database.
+</dl>
+<p>If the database has been configured to support duplicate records, the
+<a href="../../api_c/db_put.html">DB-&gt;put</a> function will add the new data value at the end of the duplicate
+set. If the database supports sorted duplicates, the new data value is
+inserted at the correct sorted location.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am/get.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/delete.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am/stability.html b/bdb/docs/ref/am/stability.html
new file mode 100644
index 00000000000..b5f6d23864d
--- /dev/null
+++ b/bdb/docs/ref/am/stability.html
@@ -0,0 +1,49 @@
+<!--$Id: stability.so,v 10.20 2000/12/13 16:48:13 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Cursor Stability</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a> <a name="3"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Transaction Subsystem</dl></h3></td>
+<td width="1%"><a href="../../ref/am/curclose.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/partial.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Cursor Stability</h1>
+<p>In the absence of locking, no guarantees are made about the stability of
+cursors in different processes or threads. However, the Btree, Queue
+and Recno access methods guarantee that cursor operations, interspersed
+with other cursor or non-cursor operations in the same thread of control
+will always return keys in order and will return each non-deleted key/data
+pair exactly once. Because the Hash access method uses a dynamic hashing
+algorithm, it cannot guarantee any form of stability in the presence of
+inserts and deletes unless locking is performed.
+<p>If locking was specified when the Berkeley DB file was opened, but transactions
+are not in effect, the access methods provide repeatable reads with
+respect to the cursor. That is, a <a href="../../api_c/dbc_get.html#DB_CURRENT">DB_CURRENT</a> call on the cursor
+is guaranteed to return the same record as was returned on the last call
+to the cursor.
+<p>With the exception of the Queue access method, in the presence of
+transactions, all access method calls between a call to <a href="../../api_c/txn_begin.html">txn_begin</a>
+and a call to <a href="../../api_c/txn_abort.html">txn_abort</a> or <a href="../../api_c/txn_commit.html">txn_commit</a> provide degree 3
+consistency (serializable transactions).
+<p>The Queue access method permits phantom records to appear between calls.
+That is, deleted records are not locked, therefore another transaction may
+replace a deleted record between two calls to retrieve it. The record would
+not appear in the first call but would be seen by the second call.
+<p>For all access methods, a cursor scan of the database performed within
+the context of a transaction is guaranteed to return each key/data pair
+once and only once, except in the following case. If, while performing
+a cursor scan using the Hash access method, the transaction performing
+the scan inserts a new pair into the database, it is possible that duplicate
+key/data pairs will be returned.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am/curclose.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/partial.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am/stat.html b/bdb/docs/ref/am/stat.html
new file mode 100644
index 00000000000..3042ccfee00
--- /dev/null
+++ b/bdb/docs/ref/am/stat.html
@@ -0,0 +1,36 @@
+<!--$Id: stat.so,v 10.17 2000/12/18 21:05:14 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Database statistics</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td width="1%"><a href="../../ref/am/sync.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/close.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Database statistics</h1>
+<p>The <a href="../../api_c/db_stat.html">DB-&gt;stat</a> function is the standard interface for obtaining database
+statistics. Generally, <a href="../../api_c/db_stat.html">DB-&gt;stat</a> returns a set of statistics
+about the underlying database, e.g., the number of key/data pairs in
+the database, how the database was originally configured, and so on.
+<p>There are two flags that you can set to customize the returned statistics:
+<p><dl compact>
+<p><dt><a href="../../api_c/db_stat.html#DB_CACHED_COUNTS">DB_CACHED_COUNTS</a><dd>Request an approximate key and key/data pair count. As obtaining an
+exact count can be very performance intensive for large databases,
+it is possible to request a previously cached count. Obviously, the
+cached count is only an approximate count, and may be out-of-date.
+<p><dt><a href="../../api_c/db_stat.html#DB_RECORDCOUNT">DB_RECORDCOUNT</a><dd>If the database is a Queue or Recno database, or a Btree database that
+was configured so that it is possible to search it by logical record
+number, return only a count of the records in the database.
+</dl>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am/sync.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/close.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am/sync.html b/bdb/docs/ref/am/sync.html
new file mode 100644
index 00000000000..3d1d61e6231
--- /dev/null
+++ b/bdb/docs/ref/am/sync.html
@@ -0,0 +1,38 @@
+<!--$Id: sync.so,v 10.15 2000/12/18 21:05:14 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Flushing the database cache</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td width="1%"><a href="../../ref/am/delete.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/stat.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Flushing the database cache</h1>
+<p>The <a href="../../api_c/db_sync.html">DB-&gt;sync</a> function is the standard interface for flushing all modified
+records from the database cache to disk.
+<p><b>It is important to understand that flushing cached information
+to disk only minimizes the window of opportunity for corrupted data, it
+does not eliminate the possibility.</b>
+<p>While unlikely, it is possible for database corruption to happen if a
+system or application crash occurs while writing data to the database. To
+ensure that database corruption never occurs, applications must either:
+<ul type=disc>
+<li>Use transactions and logging with automatic recovery.
+<li>Use logging and application-specific recovery.
+<li>Edit a copy of the database, and, once all applications
+using the database have successfully called <a href="../../api_c/db_close.html">DB-&gt;close</a>, use
+system operations (e.g., the POSIX rename system call) to atomically
+replace the original database with the updated copy.
+</ul>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am/delete.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/stat.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am/upgrade.html b/bdb/docs/ref/am/upgrade.html
new file mode 100644
index 00000000000..21fa87a1eab
--- /dev/null
+++ b/bdb/docs/ref/am/upgrade.html
@@ -0,0 +1,50 @@
+<!--$Id: upgrade.so,v 10.14 2000/12/21 18:37:08 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Upgrading databases</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Method Operations</dl></h3></td>
+<td width="1%"><a href="../../ref/am/opensub.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/get.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Upgrading databases</h1>
+<p>When upgrading to a new release of Berkeley DB, it may be necessary to upgrade
+the on-disk format of already-created database files. <b>Berkeley DB
+database upgrades are done in place, and so are potentially
+destructive.</b> This means that if the system crashes during the upgrade
+procedure, or if the upgrade procedure runs out of disk space, the
+databases may be left in an inconsistent and unrecoverable state. To
+guard against failure, the procedures outlined in
+<a href="../../ref/upgrade/process.html">Upgrading Berkeley DB installations</a>
+should be carefully followed. If you are not performing catastrophic
+archival as part of your application upgrade process, you should at
+least copy your database to archival media, verify that your archival
+media is error-free and readable, and that copies of your backups are
+stored off-site!
+<p>The actual database upgrade is done using the <a href="../../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+method, or by dumping the database using the old version of the Berkeley DB
+software and reloading it using the current version.
+<p>After an upgrade, Berkeley DB applications must be recompiled to use the new
+Berkeley DB library before they can access an upgraded database.
+<b>There is no guarantee that applications compiled against
+previous releases of Berkeley DB will work correctly with an upgraded database
+format. Nor is there any guarantee that applications compiled against
+newer releases of Berkeley DB will work correctly with the previous database
+format.</b> We do guarantee that any archived database may be upgraded
+using a current Berkeley DB software release and the <a href="../../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+method, and there is no need to step-wise upgrade the database using
+intermediate releases of Berkeley DB. Sites should consider archiving
+appropriate copies of their application or application sources if they
+may need to access archived databases without first upgrading them.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am/opensub.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/get.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am/verify.html b/bdb/docs/ref/am/verify.html
new file mode 100644
index 00000000000..5c975dd8c58
--- /dev/null
+++ b/bdb/docs/ref/am/verify.html
@@ -0,0 +1,50 @@
+<!--$Id: verify.so,v 10.3 2000/12/18 21:05:14 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Database verification and salvage</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a> <a name="3"><!--meow--></a> <a name="4"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td width="1%"><a href="../../ref/am/partial.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/error.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Database verification and salvage</h1>
+<p>The <a href="../../api_c/db_verify.html">DB-&gt;verify</a> method is the standard interface for verifying
+that a file, and any databases it may contain, are uncorrupted. In
+addition, the method may optionally be called with a file stream
+argument to which all key/data pairs found in the database are output.
+There are two modes for finding key/data pairs to be output:
+<p><ol>
+<p><li>If the <a href="../../api_c/db_verify.html#DB_SALVAGE">DB_SALVAGE</a> flag is specified, the key/data pairs in the
+database are output. When run in this mode, the database is assumed to
+be largely uncorrupted. For example, the <a href="../../api_c/db_verify.html">DB-&gt;verify</a> method will
+search for pages that are no longer linked into the database, and will
+output key/data pairs from such pages. However, key/data items that
+have been marked as deleted in the database will not be output, as the
+page structures are generally trusted in this mode.
+<p><li>If both the <a href="../../api_c/db_verify.html#DB_SALVAGE">DB_SALVAGE</a> and <a href="../../api_c/db_verify.html#DB_AGGRESSIVE">DB_AGGRESSIVE</a> flags are
+specified, all possible key/data pairs are output. When run in this mode,
+the database is assumed to be seriously corrupted. For example, key/data
+pairs that have been deleted will re-appear in the output. In addition,
+because pages may have been subsequently re-used and modified during
+normal database operations after the key/data pairs were deleted, it is
+not uncommon for apparently corrupted key/data pairs to be output in this
+mode, even when there is no corruption in the underlying database. The
+output will almost always have to be edited by hand or other means before
+the data is ready for re-load into another database. We recommend that
+<a href="../../api_c/db_verify.html#DB_SALVAGE">DB_SALVAGE</a> be tried first, and <a href="../../api_c/db_verify.html#DB_AGGRESSIVE">DB_AGGRESSIVE</a> only tried
+if the output from that first attempt is obviously missing data items or
+the data is sufficiently valuable that human review of the output is
+preferable to any kind of data loss.
+</ol>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am/partial.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/error.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am_conf/bt_compare.html b/bdb/docs/ref/am_conf/bt_compare.html
new file mode 100644
index 00000000000..bf824ca3597
--- /dev/null
+++ b/bdb/docs/ref/am_conf/bt_compare.html
@@ -0,0 +1,85 @@
+<!--$Id: bt_compare.so,v 10.20 2000/09/10 13:42:12 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Btree comparison</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td width="1%"><a href="../../ref/am_conf/malloc.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/bt_prefix.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Btree comparison</h1>
+<p>The Btree data structure is a sorted, balanced tree structure storing
+associated key/data pairs. By default, the sort order is lexicographical,
+with shorter keys collating before longer keys. The user can specify the
+sort order for the Btree by using the <a href="../../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a> function.
+<p>Sort routines are passed pointers to keys as arguments. The keys are
+represented as <a href="../../api_c/dbt.html">DBT</a> structures. The routine must return an integer
+less than, equal to, or greater than zero if the first argument is
+considered to be respectively less than, equal to, or greater than the
+second argument. The only fields that the routines may examine in the
+<a href="../../api_c/dbt.html">DBT</a> structures are <b>data</b> and <b>size</b> fields.
+<p>An example routine that might be used to sort integer keys in the database
+is as follows:
+<p><blockquote><pre>int
+compare_int(dbp, a, b)
+ DB *dbp;
+ const DBT *a, *b;
+{
+ int ai, bi;
+<p>
+ /*
+ * Returns:
+ * &lt; 0 if a &lt; b
+ * = 0 if a = b
+ * &gt; 0 if a &gt; b
+ */
+ memcpy(&ai, a-&gt;data, sizeof(int));
+ memcpy(&bi, b-&gt;data, sizeof(int));
+ return (ai - bi);
+}
+</pre></blockquote>
+<p>Note that the data must first be copied into memory that is appropriately
+aligned, as Berkeley DB does not guarantee any kind of alignment of the
+underlying data, including for comparison routines. When writing
+comparison routines, remember that databases created on machines of
+different architectures may have different integer byte orders, for which
+your code may need to compensate.
+<p>An example routine that might be used to sort keys based on the first
+five bytes of the key (ignoring any subsequent bytes) is as follows:
+<p><blockquote><pre>int
+compare_dbt(dbp, a, b)
+ DB *dbp;
+ const DBT *a, *b;
+{
+ u_char *p1, *p2;
+<p>
+ /*
+ * Returns:
+ * &lt; 0 if a &lt; b
+ * = 0 if a = b
+ * &gt; 0 if a &gt; b
+ */
+ for (p1 = a-&gt;data, p2 = b-&gt;data, len = 5; len--; ++p1, ++p2)
+ if (*p1 != *p2)
+ return ((long)*p1 - (long)*p2);
+ return (0);
+}</pre></blockquote>
+<p>All comparison functions must cause the keys in the database to be
+well-ordered. The most important implication of being well-ordered is
+that the key relations must be transitive, that is, if key A is less
+than key B, and key B is less than key C, then the comparison routine
+must also return that key A is less than key C. In addition, comparisons
+will only be able to return 0 when comparing full length keys; partial
+key comparisons must always return a result less than or greater than 0.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am_conf/malloc.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/bt_prefix.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am_conf/bt_minkey.html b/bdb/docs/ref/am_conf/bt_minkey.html
new file mode 100644
index 00000000000..f80ecf1dfb8
--- /dev/null
+++ b/bdb/docs/ref/am_conf/bt_minkey.html
@@ -0,0 +1,53 @@
+<!--$Id: bt_minkey.so,v 10.14 2000/03/18 21:43:08 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Minimum keys per page</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td width="1%"><a href="../../ref/am_conf/bt_prefix.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/bt_recnum.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Minimum keys per page</h1>
+<p>The number of keys stored on each page affects the size of a Btree and
+how it is maintained. Therefore, it also affects the retrieval and search
+performance of the tree. For each Btree, Berkeley DB computes a maximum key
+and data size. This size is a function of the page size and the fact that
+at least two key/data pairs must fit on any Btree page. Whenever key or
+data items exceed the calculated size, they are stored on overflow pages
+instead of in the standard Btree leaf pages.
+<p>Applications may use the <a href="../../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a> function to change the minimum
+number of keys that must fit on a Btree page from two to another value.
+Altering this value in turn alters the on-page maximum size, and can be
+used to force key and data items which would normally be stored in the
+Btree leaf pages onto overflow pages.
+<p>Some data sets can benefit from this tuning. For example, consider an
+application using large page sizes, with a data set almost entirely
+consisting of small key and data items, but with a few large items. By
+setting the minimum number of keys that must fit on a page, the
+application can force the outsized items to be stored on overflow pages.
+That in turn can potentially keep the tree more compact, that is, with
+fewer internal levels to traverse during searches.
+<p>The following calculation is similar to the one performed by the Btree
+implementation. (The <b>minimum_keys</b> value is multiplied by 2
+because each key/data pair requires 2 slots on a Btree page.)
+<p><blockquote><pre>maximum_size = page_size / (minimum_keys * 2)</pre></blockquote>
+<p>Using this calculation, if the page size is 8KB and the default
+<b>minimum_keys</b> value of 2 is used, then any key or data items
+larger than 2KB will be forced to an overflow page. If an application
+were to specify a <b>minimum_key</b> value of 100, then any key or data
+items larger than roughly 40 bytes would be forced to overflow pages.
+<p>It is important to remember that accesses to overflow pages do not perform
+as well as accesses to the standard Btree leaf pages, and so setting the
+value incorrectly can result in overusing overflow pages and decreasing
+the application's overall performance.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am_conf/bt_prefix.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/bt_recnum.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am_conf/bt_prefix.html b/bdb/docs/ref/am_conf/bt_prefix.html
new file mode 100644
index 00000000000..621de75fa6b
--- /dev/null
+++ b/bdb/docs/ref/am_conf/bt_prefix.html
@@ -0,0 +1,66 @@
+<!--$Id: bt_prefix.so,v 10.17 2000/07/04 18:28:27 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Btree prefix comparison</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td width="1%"><a href="../../ref/am_conf/bt_compare.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/bt_minkey.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Btree prefix comparison</h1>
+<p>The Berkeley DB Btree implementation maximizes the number of keys that can be
+stored on an internal page by storing only as many bytes of each key as
+are necessary to distinguish it from adjacent keys. The prefix comparison
+routine is what determines this minimum number of bytes (i.e., the length
+of the unique prefix), that must be stored. A prefix comparison function
+for the Btree can be specified by calling <a href="../../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>.
+<p>The prefix comparison routine must be compatible with the overall
+comparison function of the Btree, since what distinguishes any two keys
+depends entirely on the function used to compare them. This means that
+if a prefix comparison routine is specified by the application, a
+compatible overall comparison routine must also have been specified.
+<p>Prefix comparison routines are passed pointers to keys as arguments. The
+keys are represented as <a href="../../api_c/dbt.html">DBT</a> structures. The prefix comparison
+function must return the number of bytes of the second key argument that
+are necessary to determine if it is greater than the first key argument.
+If the keys are equal, the length of the second key should be returned.
+The only fields that the routines may examine in the <a href="../../api_c/dbt.html">DBT</a>
+structures are <b>data</b> and <b>size</b> fields.
+<p>An example prefix comparison routine follows:
+<p><blockquote><pre>u_int32_t
+compare_prefix(dbp, a, b)
+ DB *dbp;
+ const DBT *a, *b;
+{
+ size_t cnt, len;
+ u_int8_t *p1, *p2;
+<p>
+ cnt = 1;
+ len = a-&gt;size &gt; b-&gt;size ? b-&gt;size : a-&gt;size;
+ for (p1 =
+ a-&gt;data, p2 = b-&gt;data; len--; ++p1, ++p2, ++cnt)
+ if (*p1 != *p2)
+ return (cnt);
+ /*
+ * They match up to the smaller of the two sizes.
+ * Collate the longer after the shorter.
+ */
+ if (a-&gt;size &lt; b-&gt;size)
+ return (a-&gt;size + 1);
+ if (b-&gt;size &lt; a-&gt;size)
+ return (b-&gt;size + 1);
+ return (b-&gt;size);
+}</pre></blockquote>
+<p>The usefulness of this functionality is data dependent, but in some data
+sets can produce significantly reduced tree sizes and faster search times.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am_conf/bt_compare.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/bt_minkey.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am_conf/bt_recnum.html b/bdb/docs/ref/am_conf/bt_recnum.html
new file mode 100644
index 00000000000..cdf8970e553
--- /dev/null
+++ b/bdb/docs/ref/am_conf/bt_recnum.html
@@ -0,0 +1,34 @@
+<!--$Id: bt_recnum.so,v 10.18 2000/12/04 18:05:41 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Retrieving Btree records by number</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td width="1%"><a href="../../ref/am_conf/bt_minkey.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/h_ffactor.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Retrieving Btree records by number</h1>
+<p>The Btree access method optionally supports retrieval by logical record
+numbers). To configure a Btree to support record numbers, call the
+<a href="../../api_c/db_set_flags.html">DB-&gt;set_flags</a> function with the <a href="../../api_c/db_set_flags.html#DB_RECNUM">DB_RECNUM</a> flag.
+<p>Configuring a Btree for record numbers should not be done lightly.
+While often useful, it requires that storing items into the database
+be single-threaded, which can severely impact application throughput.
+Generally it should be avoided in trees with a need for high write
+concurrency.
+<p>To determine a key's record number, use the <a href="../../api_c/dbc_get.html#DB_GET_RECNO">DB_GET_RECNO</a> flag
+to the <a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a> function.
+<p>To retrieve by record number, use the <a href="../../api_c/db_get.html#DB_SET_RECNO">DB_SET_RECNO</a> flag to the
+<a href="../../api_c/db_get.html">DB-&gt;get</a> and <a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a> functions.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am_conf/bt_minkey.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/h_ffactor.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am_conf/byteorder.html b/bdb/docs/ref/am_conf/byteorder.html
new file mode 100644
index 00000000000..e0eef8a45f0
--- /dev/null
+++ b/bdb/docs/ref/am_conf/byteorder.html
@@ -0,0 +1,38 @@
+<!--$Id: byteorder.so,v 10.16 2000/03/18 21:43:08 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Selecting a byte order</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td width="1%"><a href="../../ref/am_conf/cachesize.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/dup.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Selecting a byte order</h1>
+<p>The database files created by Berkeley DB can be created in either little- or
+big-endian formats.
+<p>The byte order used for the underlying database can be specified by
+calling the <a href="../../api_c/db_set_lorder.html">DB-&gt;set_lorder</a> function. If no order is selected, the
+native format of the machine on which the database is created will be
+used.
+<p>Berkeley DB databases are architecture independent, and any format database can
+be used on a machine with a different native format. In this case, as
+each page that is read into or written from the cache must be converted
+to or from the host format, and databases with non-native formats will
+incur a performance penalty for the run-time conversion.
+<p><b>It is important to note that the Berkeley DB access methods do no data
+conversion for application specified data. Key/data pairs written on a
+little-endian format architecture will be returned to the application
+exactly as they were written when retrieved on a big-endian format
+architecture.</b>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am_conf/cachesize.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/dup.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am_conf/cachesize.html b/bdb/docs/ref/am_conf/cachesize.html
new file mode 100644
index 00000000000..d0534767fb0
--- /dev/null
+++ b/bdb/docs/ref/am_conf/cachesize.html
@@ -0,0 +1,86 @@
+<!--$Id: cachesize.so,v 10.18 2000/03/18 21:43:08 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Selecting a cache size</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td width="1%"><a href="../../ref/am_conf/pagesize.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/byteorder.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Selecting a cache size</h1>
+<p>The size of the cache used for the underlying database can be specified
+by calling the <a href="../../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a> function.
+Choosing a cache size is, unfortunately, an art. Your cache must be at
+least large enough for your working set plus some overlap for unexpected
+situations.
+<p>When using the Btree access method, you must have a cache big enough for
+the minimum working set for a single access. This will include a root
+page, one or more internal pages (depending on the depth of your tree),
+and a leaf page. If your cache is any smaller than that, each new page
+will force out the least-recently-used page, and Berkeley DB will re-read the
+root page of the tree anew on each database request.
+<p>If your keys are of moderate size (a few tens of bytes) and your pages
+are on the order of 4K to 8K, most Btree applications
+will be only three levels. For
+example, using 20 byte keys with 20 bytes of data associated with each
+key, a 8KB page can hold roughly 400 keys and 200 key/data pairs, so a
+fully populated three-level Btree will hold 32 million key/data pairs,
+and a tree with only a 50% page-fill factor will still hold 16 million
+key/data pairs. We rarely expect trees to exceed five levels, although
+Berkeley DB will support trees up to 255 levels.
+<p>The rule-of-thumb is that cache is good, and more cache is better.
+Generally, applications benefit from increasing the cache size up to a
+point, at which the performance will stop improving as the cache size
+increases. When this point is reached, one of two things have happened:
+either the cache is large enough that the application is almost never
+having to retrieve information from disk, or, your application is doing
+truly random accesses, and therefore increasing size of the cache doesn't
+significantly increase the odds of finding the next requested information
+in the cache. The latter is fairly rare -- almost all applications show
+some form of locality of reference.
+<p>That said, it is important not to increase your cache size beyond the
+capabilities of your system, as that will result in reduced performance.
+Under many operating systems, tying down enough virtual memory will cause
+your memory and potentially your program to be swapped. This is
+especially likely on systems without unified OS buffer caches and virtual
+memory spaces, as the buffer cache was allocated at boot time and so
+cannot be adjusted based on application requests for large amounts of
+virtual memory.
+<p>For example, even if accesses are truly random within a Btree, your
+access pattern will favor internal pages to leaf pages, so your cache
+should be large enough to hold all internal pages. In the steady state,
+this requires at most one I/O per operation to retrieve the appropriate
+leaf page.
+<p>You can use the <a href="../../utility/db_stat.html">db_stat</a> utility to monitor the effectiveness of
+your cache. The following output is excerpted from the output of that
+utility's <b>-m</b> option:
+<p><blockquote><pre>prompt: db_stat -m
+131072 Cache size (128K).
+4273 Requested pages found in the cache (97%).
+134 Requested pages not found in the cache.
+18 Pages created in the cache.
+116 Pages read into the cache.
+93 Pages written from the cache to the backing file.
+5 Clean pages forced from the cache.
+13 Dirty pages forced from the cache.
+0 Dirty buffers written by trickle-sync thread.
+130 Current clean buffer count.
+4 Current dirty buffer count.
+</pre></blockquote>
+<p>The statistics for this cache say that there have been 4,273 requests of
+the cache, and only 116 of those requests required an I/O from disk. This
+means that the cache is working well, yielding a 97% cache hit rate. The
+<a href="../../utility/db_stat.html">db_stat</a> utility will present these statistics both for the cache
+as a whole and for each file within the cache separately.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am_conf/pagesize.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/byteorder.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am_conf/dup.html b/bdb/docs/ref/am_conf/dup.html
new file mode 100644
index 00000000000..eec5302cb2f
--- /dev/null
+++ b/bdb/docs/ref/am_conf/dup.html
@@ -0,0 +1,71 @@
+<!--$Id: dup.so,v 10.21 2000/12/18 21:05:14 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Duplicate data items</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td width="1%"><a href="../../ref/am_conf/byteorder.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/malloc.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Duplicate data items</h1>
+<p>The Btree and Hash access methods support the creation of multiple data
+items for a single key item. By default, multiple data items are not
+permitted, and each database store operation will overwrite any previous
+data item for that key. To configure Berkeley DB for duplicate data items, call
+the <a href="../../api_c/db_set_flags.html">DB-&gt;set_flags</a> function with the <a href="../../api_c/db_set_flags.html#DB_DUP">DB_DUP</a> flag.
+<p>By default, Berkeley DB stores duplicates in the order in which they were added,
+that is, each new duplicate data item will be stored after any already
+existing data items. This default behavior can be overridden by using
+the <a href="../../api_c/dbc_put.html">DBcursor-&gt;c_put</a> function and one of the <a href="../../api_c/dbc_put.html#DB_AFTER">DB_AFTER</a>, <a href="../../api_c/dbc_put.html#DB_BEFORE">DB_BEFORE</a>
+<a href="../../api_c/dbc_put.html#DB_KEYFIRST">DB_KEYFIRST</a> or <a href="../../api_c/dbc_put.html#DB_KEYLAST">DB_KEYLAST</a> flags. Alternatively, Berkeley DB
+may be configured to sort duplicate data items as described below.
+<p>When stepping through the database sequentially, duplicate data items will
+be returned individually, as a key/data pair, where the key item only
+changes after the last duplicate data item has been returned. For this
+reason, duplicate data items cannot be accessed using the
+<a href="../../api_c/db_get.html">DB-&gt;get</a> function, as it always returns the first of the duplicate data
+items. Duplicate data items should be retrieved using the Berkeley DB cursor
+interface, <a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a>.
+<p>There is an interface flag that permits applications to request the
+following data item only if it <b>is</b> a duplicate data item of the
+current entry, see <a href="../../api_c/dbc_get.html#DB_NEXT_DUP">DB_NEXT_DUP</a> for more information. There is an
+interface flag that permits applications to request the following data
+item only if it <b>is not</b> a duplicate data item of the current
+entry, see <a href="../../api_c/dbc_get.html#DB_NEXT_NODUP">DB_NEXT_NODUP</a> and <a href="../../api_c/dbc_get.html#DB_PREV_NODUP">DB_PREV_NODUP</a> for more
+information.
+<p>It is also possible to maintain duplicate records in sorted order. Sorting
+duplicates will significantly increase performance when searching them
+and performing logical joins, common operations when creating secondary
+indexes. To configure Berkeley DB to sort duplicate data items, the application
+must call the <a href="../../api_c/db_set_flags.html">DB-&gt;set_flags</a> function with the <a href="../../api_c/db_set_flags.html#DB_DUPSORT">DB_DUPSORT</a> flag (in
+addition to the <a href="../../api_c/db_set_flags.html#DB_DUP">DB_DUP</a> flag). In addition, a custom sorting
+function may be specified using the <a href="../../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a> function. If the
+<a href="../../api_c/db_set_flags.html#DB_DUPSORT">DB_DUPSORT</a> flag is given, but no comparison routine is specified,
+then Berkeley DB defaults to the same lexicographical sorting used for Btree
+keys, with shorter items collating before longer items.
+<p>If the duplicate data items are unsorted, applications may store identical
+duplicate data items, or, for those that just like the way it sounds,
+<i>duplicate duplicates</i>.
+<p><b>In this release it is an error to attempt to store identical
+duplicate data items when duplicates are being stored in a sorted order.</b>
+This restriction is expected to be lifted in a future release. There is
+an interface flag that permits applications to disallow storing duplicate
+data items when the database has been configured for sorted duplicates,
+see <a href="../../api_c/db_put.html#DB_NODUPDATA">DB_NODUPDATA</a> for more information. Applications not wanting
+to permit duplicate duplicates in databases configured for sorted
+duplicates should begin using the <a href="../../api_c/db_put.html#DB_NODUPDATA">DB_NODUPDATA</a> flag immediately.
+<p>For further information on how searching and insertion behaves in the
+presence of duplicates (sorted or not), see the <a href="../../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../../api_c/db_put.html">DB-&gt;put</a>, <a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a> and <a href="../../api_c/dbc_put.html">DBcursor-&gt;c_put</a> documentation.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am_conf/byteorder.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/malloc.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am_conf/extentsize.html b/bdb/docs/ref/am_conf/extentsize.html
new file mode 100644
index 00000000000..15d940c152d
--- /dev/null
+++ b/bdb/docs/ref/am_conf/extentsize.html
@@ -0,0 +1,38 @@
+<!--$Id: extentsize.so,v 1.2 2000/11/20 21:45:19 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Selecting a Queue extent size</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td width="1%"><a href="../../ref/am_conf/recno.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/re_source.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Selecting a Queue extent size</h1>
+<p>In Queue databases, records are allocated sequentially and directly
+mapped to an offset within the file storage for the database. As
+records are deleted from the Queue, pages will become empty and will
+not be reused in normal queue operations. To facilitate the reclamation
+of disk space a Queue may be partitioned into extents. Each extent is
+kept in a separate physical file. Extent files are automatically
+created as needed and destroyed when they are emptied of records.
+<p>The extent size specifies the number of pages that make up each extent.
+By default, if no extent size is specified, the Queue resides in a
+single file and disk space is not reclaimed. In choosing an extent size
+there is a tradeoff between the amount of disk space used and the
+overhead of creating and deleting files. If the extent size is too
+small, the system will pay a performance penalty, creating and deleting
+files frequently. In addition, if the active part of the queue spans
+many files, all those files will need to be open at the same time,
+consuming system and process file resources.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am_conf/recno.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/re_source.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am_conf/h_ffactor.html b/bdb/docs/ref/am_conf/h_ffactor.html
new file mode 100644
index 00000000000..6c30f0fc39e
--- /dev/null
+++ b/bdb/docs/ref/am_conf/h_ffactor.html
@@ -0,0 +1,31 @@
+<!--$Id: h_ffactor.so,v 10.11 2000/03/18 21:43:08 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Page fill factor</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td width="1%"><a href="../../ref/am_conf/bt_recnum.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/h_hash.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Page fill factor</h1>
+<p>The density, or page fill factor, is an approximation of the number of
+keys allowed to accumulate in any one bucket, determining when the hash
+table grows or shrinks. If you know the average sizes of the keys and
+data in your dataset, setting the fill factor can enhance performance.
+A reasonable rule to use to compute fill factor is:
+<p><blockquote><pre>(pagesize - 32) / (average_key_size + average_data_size + 8)</pre></blockquote>
+<p>The desired density within the hash table can be specified by calling
+the <a href="../../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a> function. If no density is specified, one will
+be selected dynamically as pages are filled.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am_conf/bt_recnum.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/h_hash.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am_conf/h_hash.html b/bdb/docs/ref/am_conf/h_hash.html
new file mode 100644
index 00000000000..d42edee1c07
--- /dev/null
+++ b/bdb/docs/ref/am_conf/h_hash.html
@@ -0,0 +1,39 @@
+<!--$Id: h_hash.so,v 10.12 2000/07/04 18:28:27 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Specifying a database hash</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td width="1%"><a href="../../ref/am_conf/h_ffactor.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/h_nelem.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Specifying a database hash</h1>
+<p>The database hash determines in which bucket a particular key will reside.
+The goal of hashing keys is to distribute keys equally across the database
+pages, therefore it is important that the hash function work well with
+the specified keys so that the resulting bucket usage is relatively
+uniform. A hash function that does not work well can effectively turn
+into a sequential list.
+<p>No hash performs equally well on all possible data sets. It is possible
+that applications may find that the default hash function performs poorly
+with a particular set of keys. The distribution resulting from the hash
+function can be checked using <a href="../../utility/db_stat.html">db_stat</a> utility. By comparing the
+number of hash buckets and the number of keys, one can decide if the entries
+are hashing in a well-distributed manner.
+<p>The hash function for the hash table can be specified by calling the
+<a href="../../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a> function. If no hash function is specified, a default
+function will be used. Any application-specified hash function must
+take a reference to a DB object, a pointer to a byte string and
+its length, as arguments and return an unsigned, 32-bit hash value.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am_conf/h_ffactor.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/h_nelem.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am_conf/h_nelem.html b/bdb/docs/ref/am_conf/h_nelem.html
new file mode 100644
index 00000000000..8c510d6db04
--- /dev/null
+++ b/bdb/docs/ref/am_conf/h_nelem.html
@@ -0,0 +1,32 @@
+<!--$Id: h_nelem.so,v 10.12 2000/03/18 21:43:08 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Hash table size</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td width="1%"><a href="../../ref/am_conf/h_hash.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/recno.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Hash table size</h1>
+<p>When setting up the hash database, knowing the expected number of elements
+that will be stored in the hash table is useful. This value can be used
+by the Hash access method implementation to more accurately construct the
+necessary number of buckets that the database will eventually require.
+<p>The anticipated number of elements in the hash table can be specified by
+calling the <a href="../../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a> function. If not specified, or set too low,
+hash tables will expand gracefully as keys are entered, although a slight
+performance degradation may be noticed. In order for the estimated number
+of elements to be a useful value to Berkeley DB, the <a href="../../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a> function
+must also be called to set the page fill factor.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am_conf/h_hash.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/recno.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am_conf/intro.html b/bdb/docs/ref/am_conf/intro.html
new file mode 100644
index 00000000000..15fed60f612
--- /dev/null
+++ b/bdb/docs/ref/am_conf/intro.html
@@ -0,0 +1,45 @@
+<!--$Id: intro.so,v 10.22 2000/12/18 21:05:14 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: What are the available access methods?</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td width="1%"><a href="../../ref/simple_tut/close.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/select.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>What are the available access methods?</h1>
+<p>Berkeley DB currently offers four access methods: Btree, Hash, Queue and Recno.
+<h3>Btree</h3>
+<p>The Btree access method is an implementation of a sorted, balanced tree
+structure. Searches, insertions, and deletions in the tree all take O(log
+base_b N) time, where base_b is the average number of keys per page, and
+N is the total number of keys stored. Often, inserting ordered data into
+Btree implementations results in pages that are only half-full. Berkeley DB
+makes ordered (or inverse ordered) insertion the best case, resulting in
+nearly full-page space utilization.
+<h3>Hash</h3>
+<p>The Hash access method data structure is an implementation of Extended
+Linear Hashing, as described in "Linear Hashing: A New Tool for File and
+Table Addressing", Witold Litwin, <i>Proceedings of the 6th
+International Conference on Very Large Databases (VLDB)</i>, 1980.
+<h3>Queue</h3>
+<p>The Queue access method stores fixed-length records with logical record
+numbers as keys. It is designed for fast inserts at the tail and has a
+special cursor consume operation that deletes and returns a record from
+the head of the queue. The Queue access method uses record level locking.
+<h3>Recno</h3>
+<p>The Recno access method stores both fixed and variable-length records with
+logical record numbers as keys, optionally backed by a flat text (byte
+stream) file.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/simple_tut/close.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/select.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am_conf/logrec.html b/bdb/docs/ref/am_conf/logrec.html
new file mode 100644
index 00000000000..fd9fb0141d6
--- /dev/null
+++ b/bdb/docs/ref/am_conf/logrec.html
@@ -0,0 +1,45 @@
+<!--$Id: logrec.so,v 10.23 2000/12/04 18:05:41 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Logical record numbers</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td width="1%"><a href="../../ref/am_conf/select.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/pagesize.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Logical record numbers</h1>
+<p>The Berkeley DB Btree, Queue and Recno access methods can operate on logical
+record numbers. In all cases for the Queue and Recno access methods,
+and in some cases with the Btree access method, a record number is
+specified to reference a specific key/data pair. In the case of Btree
+supporting duplicate data items, the logical record number refers to a
+key and all of its data items.
+<p>Record numbers are 32-bit unsigned types, which limits the number of
+logical records in a database to 4,294,967,296. The first record in the
+database is record number 1.
+<p>Record numbers in Recno databases can be configured to run in either
+mutable or fixed mode: mutable, where logical record numbers change as
+records are deleted or inserted, and fixed, where record numbers never
+change regardless of the database operation. Record numbers in Btree
+databases are always mutable, and as records are deleted or inserted, the
+logical record number for other records in the database can change. See
+<a href="../../ref/am_conf/renumber.html">Logically renumbering records</a> for
+more information.
+<p>Record numbers in Queue databases are always fixed, and never change
+regardless of the database operation.
+<p>Configuring Btree databases to support record numbers can severely limit
+the throughput of applications with multiple concurrent threads writing
+the database, because locations used to store record counts often become
+hot spots that many different threads all need to update.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am_conf/select.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/pagesize.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am_conf/malloc.html b/bdb/docs/ref/am_conf/malloc.html
new file mode 100644
index 00000000000..12e57383c5e
--- /dev/null
+++ b/bdb/docs/ref/am_conf/malloc.html
@@ -0,0 +1,31 @@
+<!--$Id: malloc.so,v 10.19 2000/03/18 21:43:09 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Non-local memory allocation</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td width="1%"><a href="../../ref/am_conf/dup.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/bt_compare.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Non-local memory allocation</h1>
+<p>Berkeley DB can allocate memory for returned key/data pairs which then becomes
+the responsibility of the application. See <a href="../../api_c/dbt.html#DB_DBT_MALLOC">DB_DBT_MALLOC</a> or
+<a href="../../api_c/dbt.html#DB_DBT_REALLOC">DB_DBT_REALLOC</a> for further information.
+<p>On systems where there may be multiple library versions of malloc (notably
+Windows NT), the Berkeley DB library could allocate memory from a different heap
+than the application will use to free it. To avoid this problem, the
+allocation routine to be used for allocating such key/data items can be
+specified by calling the <a href="../../api_c/db_set_malloc.html">DB-&gt;set_malloc</a> or
+<a href="../../api_c/db_set_realloc.html">DB-&gt;set_realloc</a> functions. If no allocation function is specified, the
+underlying C library functions are used.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am_conf/dup.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/bt_compare.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am_conf/pagesize.html b/bdb/docs/ref/am_conf/pagesize.html
new file mode 100644
index 00000000000..41cab5ec439
--- /dev/null
+++ b/bdb/docs/ref/am_conf/pagesize.html
@@ -0,0 +1,66 @@
+<!--$Id: pagesize.so,v 10.20 2000/03/18 21:43:09 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Selecting a page size</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td width="1%"><a href="../../ref/am_conf/logrec.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/cachesize.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Selecting a page size</h1>
+<p>The size of the pages used in the underlying database can be specified by
+calling the <a href="../../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a> function. The minimum page size is 512 bytes
+and the maximum page size is 64K bytes, and must be a power of two. If
+no page size is specified by the application, a page size is selected
+based on the underlying filesystem I/O block size. (A page size selected
+in this way has a lower limit of 512 bytes and an upper limit of 16K
+bytes.)
+<p>There are four issues to consider when selecting a pagesize: overflow
+record sizes, locking, I/O efficiency, and recoverability.
+<p>First, the page size implicitly sets the size of an overflow record.
+Overflow records are key or data items that are too large to fit on a
+normal database page because of their size, and are therefore stored in
+overflow pages. Overflow pages are pages that exist outside of the normal
+database structure. For this reason, there is often a significant
+performance penalty associated with retrieving or modifying overflow
+records. Selecting a page size that is too small, and which forces the
+creation of large numbers of overflow pages, can seriously impact the
+performance of an application.
+<p>Second, in the Btree, Hash and Recno Access Methods, the finest-grained
+lock that Berkeley DB acquires is for a page. (The Queue Access Method
+generally acquires record-level locks rather than page-level locks.)
+Selecting a page size that is too large, and which causes threads or
+processes to wait because other threads of control are accessing or
+modifying records on the same page, can impact the performance of your
+application.
+<p>Third, the page size specifies the granularity of I/O from the database
+to the operating system. Berkeley DB will give a page-sized unit of bytes to
+the operating system to be scheduled for writing to the disk. For many
+operating systems, there is an internal <b>block size</b> which is used
+as the granularity of I/O from the operating system to the disk. If the
+page size is smaller than the block size, the operating system may be
+forced to read a block from the disk, copy the page into the buffer it
+read, and then write out the block to disk. Obviously, it will be much
+more efficient for Berkeley DB to write filesystem-sized blocks to the operating
+system and for the operating system to write those same blocks to the
+disk. Selecting a page size that is too small, and which causes the
+operating system to coalesce or otherwise manipulate Berkeley DB pages, can
+impact the performance of your application. Alternatively, selecting a
+page size that is too large may cause Berkeley DB and the operating system to
+write more data than is strictly necessary.
+<p>Fourth, when using the Berkeley DB Transactional Data Store product, the page size may affect the errors
+from which your database can recover See
+<a href="../../ref/transapp/reclimit.html">Berkeley DB Recoverability</a> for more
+information.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am_conf/logrec.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/cachesize.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am_conf/re_source.html b/bdb/docs/ref/am_conf/re_source.html
new file mode 100644
index 00000000000..2095a96983a
--- /dev/null
+++ b/bdb/docs/ref/am_conf/re_source.html
@@ -0,0 +1,62 @@
+<!--$Id: re_source.so,v 10.14 2000/11/20 21:45:19 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Flat-text backing files</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td width="1%"><a href="../../ref/am_conf/extentsize.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/renumber.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Flat-text backing files</h1>
+<p>It is possible to back any Recno database (either fixed or variable
+length) with a flat-text source file. This provides fast read (and
+potentially write) access to databases that are normally created and
+stored as flat-text files. The backing source file may be specified by
+calling the <a href="../../api_c/db_set_re_source.html">DB-&gt;set_re_source</a> function.
+<p>The backing source file will be read to initialize the database. In the
+case of variable length records, the records are assumed to be separated
+as described for the <a href="../../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a> function interface. For example,
+standard UNIX byte stream files can be interpreted as a sequence of
+variable length records separated by ASCII newline characters. This is
+the default.
+<p>When cached data would normally be written back to the underlying database
+file (e.g., <a href="../../api_c/db_close.html">DB-&gt;close</a> or <a href="../../api_c/db_sync.html">DB-&gt;sync</a> functions are called), the
+in-memory copy of the database will be written back to the backing source
+file.
+<p>The backing source file must already exist (but may be zero-length) when
+<a href="../../api_c/db_open.html">DB-&gt;open</a> is called. By default, the backing source file is read
+lazily, i.e., records are not read from the backing source file until they
+are requested by the application. If multiple processes (not threads) are
+accessing a Recno database concurrently and either inserting or deleting
+records, the backing source file must be read in its entirety before more
+than a single process accesses the database, and only that process should
+specify the backing source file as part of the <a href="../../api_c/db_open.html">DB-&gt;open</a> call.
+This can be accomplished by calling the <a href="../../api_c/db_set_flags.html">DB-&gt;set_flags</a> function with the
+<a href="../../api_c/db_set_flags.html#DB_SNAPSHOT">DB_SNAPSHOT</a> flag.
+<p>Reading and writing the backing source file cannot be transactionally
+protected because it involves filesystem operations that are not part of
+the Berkeley DB transaction methodology. For this reason, if a temporary
+database is used to hold the records (a NULL was specified as the file
+argument to <a href="../../api_c/db_open.html">DB-&gt;open</a>), <b>it is possible to lose the
+contents of the backing source file if the system crashes at the right
+instant</b>. If a permanent file is used to hold the database (a file name
+was specified as the file argument to <a href="../../api_c/db_open.html">DB-&gt;open</a>), normal database
+recovery on that file can be used to prevent information loss. It is
+still possible that the contents of the backing source file itself will
+be corrupted or lost if the system crashes.
+<p>For all of the above reasons, the backing source file is generally used
+to specify databases that are read-only for Berkeley DB applications, and that
+are either generated on the fly by software tools, or modified using a
+different mechanism such as a text editor.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am_conf/extentsize.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/renumber.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am_conf/recno.html b/bdb/docs/ref/am_conf/recno.html
new file mode 100644
index 00000000000..1a7128e0e75
--- /dev/null
+++ b/bdb/docs/ref/am_conf/recno.html
@@ -0,0 +1,69 @@
+<!--$Id: recno.so,v 11.10 2000/12/04 18:05:41 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Managing record-based databases</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td width="1%"><a href="../../ref/am_conf/h_nelem.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/extentsize.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Managing record-based databases</h1>
+<p>When using fixed- or variable-length record-based databases, particularly
+with flat-text backing files, there are several items that the user can
+control. The Recno access method can be used to store either variable-
+or fixed-length data items. By default, the Recno access method stores
+variable-length data items. The Queue access method can only store
+fixed-length data items.
+<h3>Record Delimiters</h3>
+<p>When using the Recno access method to store variable-length records,
+records read from any backing source file are separated by a specific
+byte value which marks the end of one record and the beginning of the
+next. This delimiting value is ignored except when reading records from
+a backing source file, that is, records may be stored into the database
+that include the delimiter byte. However, if such records are written
+out to the backing source file and the backing source file is
+subsequently read into a database, the records will be split where
+delimiting bytes were found.
+<p>For example, UNIX text files can usually be interpreted as a sequence of
+variable-length records separated by ASCII newline characters. This byte
+value (ASCII 0x0a) is the default delimiter. Applications may specify a
+different delimiting byte using the <a href="../../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a> interface.
+If no backing source file is being used, there is no reason to set the
+delimiting byte value.
+<h3>Record Length</h3>
+<p>When using the Recno or Queue access methods to store fixed-length
+records, the record length must be specified. Since the Queue access
+method always uses fixed-length records, the user must always set the
+record length prior to creating the database. Setting the record length
+is what causes the Recno access method to store fixed-length, not
+variable-length, records.
+<p>The length of the records is specified by calling the
+<a href="../../api_c/db_set_re_len.html">DB-&gt;set_re_len</a> function. The default length of the records is 0 bytes.
+Any record read from a backing source file or otherwise stored in the
+database that is shorter than the declared length will automatically be
+padded as described for the <a href="../../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a> function. Any record stored
+that is longer than the declared length results in an error. For
+further information on backing source files, see
+<a href="../../ref/am_conf/re_source.html">Flat-text backing files</a>.
+<h3>Record Padding Byte Value</h3>
+<p>When storing fixed-length records in a Queue or Recno database, a pad
+character may be specified by calling the <a href="../../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a> function. Any
+record read from the backing source file or otherwise stored in the
+database that is shorter than the expected length will automatically be
+padded with this byte value. If fixed-length records are specified but
+no pad value is specified, a space character (0x20 in the ASCII
+character set) will be used. For further information on backing source
+files, see <a href="../../ref/am_conf/re_source.html">Flat-text backing
+files</a>.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am_conf/h_nelem.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/extentsize.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am_conf/renumber.html b/bdb/docs/ref/am_conf/renumber.html
new file mode 100644
index 00000000000..7c3594dff66
--- /dev/null
+++ b/bdb/docs/ref/am_conf/renumber.html
@@ -0,0 +1,80 @@
+<!--$Id: renumber.so,v 10.23 2000/12/18 21:05:14 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Logically renumbering records</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td width="1%"><a href="../../ref/am_conf/re_source.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/ops.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Logically renumbering records</h1>
+<p>Records stored in the Queue and Recno access methods are accessed by
+logical record number. In all cases in Btree databases, and optionally
+in Recno databases (see the <a href="../../api_c/db_set_flags.html">DB-&gt;set_flags</a> function and the
+<a href="../../api_c/db_set_flags.html#DB_RENUMBER">DB_RENUMBER</a> flag for more information), record numbers are
+mutable. This means that the record numbers may change as records are
+added to and deleted from the database. The deletion of record number
+4 causes any records numbered 5 and higher to be renumbered downward by
+1; the addition of a new record after record number 4 causes any
+records numbered 5 and higher to be renumbered upward by 1. In all
+cases in Queue databases, and by default in Recno databases, record
+numbers are not mutable, and the addition or deletion of records to the
+database will not cause already-existing record numbers to change. For
+this reason, new records cannot be inserted between already-existing
+records in databases with immutable record numbers.
+<p>Cursors pointing into a Btree database or a Recno database with mutable
+record numbers maintain a reference to a specific record, rather than
+a record number, that is, the record they reference does not change as
+other records are added or deleted. For example, if a database contains
+three records with the record numbers 1, 2, and 3, and the data items
+"A", "B", and "C", respectively, the deletion of record number 2 ("B")
+will cause the record "C" to be renumbered downward to record number 2.
+A cursor positioned at record number 3 ("C") will be adjusted and
+continue to point to "C" after the deletion. Similarly, a cursor
+previously referencing the now deleted record number 2 will be
+positioned between the new record numbers 1 and 2, and an insertion
+using that cursor will appear between those records. In this manner
+records can be added and deleted to a database without disrupting the
+sequential traversal of the database by a cursor.
+<p>Only cursors created using a single DB handle can adjust each
+other's position in this way, however. If multiple DB handles
+have a renumbering Recno database open simultaneously (as when multiple
+processes share a single database environment), a record referred to by
+one cursor could change underfoot if a cursor created using another
+DB handle inserts or deletes records into the database. For
+this reason, applications using Recno databases with mutable record
+numbers will usually make all accesses to the database using a single
+DB handle and cursors created from that handle, or will
+otherwise single-thread access to the database, e.g., by using the
+Berkeley DB Concurrent Data Store product.
+<p>In any Queue or Recno databases, creating new records will cause the
+creation of multiple records if the record number being created is more
+than one greater than the largest record currently in the database. For
+example, creating record number 28, when record 25 was previously the
+last record in the database, will implicitly create records 26 and 27
+as well as 28. All first, last, next and previous cursor operations
+will automatically skip over these implicitly created records. So, if
+record number 5 is the only record the application has created,
+implicitly creating records 1 through 4, the <a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a> interface
+with the <a href="../../api_c/dbc_get.html#DB_FIRST">DB_FIRST</a> flag will return record number 5, not record
+number 1. Attempts to explicitly retrieve implicitly created records
+by their record number will result in a special error return,
+<a href="../../ref/program/errorret.html#DB_KEYEMPTY">DB_KEYEMPTY</a>.
+<p>In any Berkeley DB database, attempting to retrieve a deleted record, using
+a cursor positioned on the record, results in a special error return,
+<a href="../../ref/program/errorret.html#DB_KEYEMPTY">DB_KEYEMPTY</a>. In addition, when using Queue databases or Recno
+databases with immutable record numbers, attempting to retrieve a deleted
+record by its record number will also result in the <a href="../../ref/program/errorret.html#DB_KEYEMPTY">DB_KEYEMPTY</a>
+return.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am_conf/re_source.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/ops.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/am_conf/select.html b/bdb/docs/ref/am_conf/select.html
new file mode 100644
index 00000000000..3838b34673e
--- /dev/null
+++ b/bdb/docs/ref/am_conf/select.html
@@ -0,0 +1,116 @@
+<!--$Id: select.so,v 10.23 2000/03/18 21:43:09 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Selecting an access method</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td width="1%"><a href="../../ref/am_conf/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/logrec.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Selecting an access method</h1>
+<p>The Berkeley DB access method implementation unavoidably interacts with each
+application's data set, locking requirements and data access patterns.
+For this reason, one access method may result in dramatically better
+performance for an application than another one. Applications whose data
+could be stored in more than one access method may want to benchmark their
+performance using the different candidates.
+<p>One of the strengths of Berkeley DB is that it provides multiple access methods
+with almost identical interfaces to the different access methods. This
+means that it is simple to modify an application to use a different access
+method. Applications can easily benchmark the different Berkeley DB access
+methods against each other for their particular data set and access pattern.
+<p>Most applications choose between using the Btree or Hash access methods
+or between using the Queue and Recno access methods, because each of the
+two pairs offer similar functionality.
+<h3>Hash or Btree?</h3>
+<p>The Hash and Btree access methods should be used when logical record
+numbers are not the primary key used for data access. (If logical record
+numbers are a secondary key used for data access, the Btree access method
+is a possible choice, as it supports simultaneous access by a key and a
+record number.)
+<p>Keys in Btrees are stored in sorted order and the relationship between
+them is defined by that sort order. For this reason, the Btree access
+method should be used when there is any locality of reference among keys.
+Locality of reference means that accessing one particular key in the
+Btree implies that the application is more likely to access keys near to
+the key being accessed, where "near" is defined by the sort order. For
+example, if keys are timestamps, and it is likely that a request for an
+8AM timestamp will be followed by a request for a 9AM timestamp, the
+Btree access method is generally the right choice. Or, for example, if
+the keys are names, and the application will want to review all entries
+with the same last name, the Btree access method is again a good choice.
+<p>There is little difference in performance between the Hash and Btree
+access methods on small data sets, where all, or most of, the data set
+fits into the cache. However, when a data set is large enough that
+significant numbers of data pages no longer fit into the cache, then the
+Btree locality of reference described above becomes important for
+performance reasons. For example, there is no locality of reference for
+the Hash access method, and so key "AAAAA" is as likely to be stored on
+the same data page with key "ZZZZZ" as with key "AAAAB". In the Btree
+access method, because items are sorted, key "AAAAA" is far more likely
+to be near key "AAAAB" than key "ZZZZZ". So, if the application exhibits
+locality of reference in its data requests, then the Btree page read into
+the cache to satisfy a request for key "AAAAA" is much more likely to be
+useful to satisfy subsequent requests from the application than the Hash
+page read into the cache to satisfy the same request. This means that
+for applications with locality of reference, the cache is generally much
+"hotter" for the Btree access method than the Hash access method, and
+the Btree access method will make many fewer I/O calls.
+<p>However, when a data set becomes even larger, the Hash access method can
+outperform the Btree access method. The reason for this is that Btrees
+contain more metadata pages than Hash databases. The data set can grow
+so large that metadata pages begin to dominate the cache for the Btree
+access method. If this happens, the Btree can be forced to do an I/O
+for each data request because the probability that any particular data
+page is already in the cache becomes quite small. Because the Hash access
+method has fewer metadata pages, its cache stays "hotter" longer in the
+presence of large data sets. In addition, once the data set is so large
+that both the Btree and Hash access methods are almost certainly doing
+an I/O for each random data request, the fact that Hash does not have to
+walk several internal pages as part of a key search becomes a performance
+advantage for the Hash access method as well.
+<p>Application data access patterns strongly affect all of these behaviors,
+for example, accessing the data by walking a cursor through the database
+will greatly mitigate the large data set behavior describe above because
+each I/O into the cache will satisfy a fairly large number of subsequent
+data requests.
+<p>In the absence of information on application data and data access
+patterns, for small data sets either the Btree or Hash access methods
+will suffice. For data sets larger than the cache, we normally recommend
+using the Btree access method. If you have truly large data, then the
+Hash access method may be a better choice. The <a href="../../utility/db_stat.html">db_stat</a> utility
+is a useful tool for monitoring how well your cache is performing.
+<h3>Queue or Recno?</h3>
+<p>The Queue or Recno access methods should be used when logical record
+numbers are the primary key used for data access. The advantage of the
+Queue access method is that it performs record level locking and for this
+reason supports significantly higher levels of concurrency than the Recno
+access method. The advantage of the Recno access method is that it
+supports a number of additional features beyond those supported by the
+Queue access method, such as variable-length records and support for
+backing flat-text files.
+<p>Logical record numbers can be mutable or fixed: mutable, where logical
+record numbers can change as records are deleted or inserted, and fixed,
+where record numbers never change regardless of the database operation.
+It is possible to store and retrieve records based on logical record
+numbers in the Btree access method. However, those record numbers are
+always mutable, and as records are deleted or inserted, the logical record
+number for other records in the database will change. The Queue access
+method always runs in fixed mode, and logical record numbers never change
+regardless of the database operation. The Recno access method can be
+configured to run in either mutable or fixed mode.
+<p>In addition, the Recno access method provides support for databases whose
+permanent storage is a flat text file and the database is used as a fast,
+temporary storage area while the data is being read or modified.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am_conf/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/logrec.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/arch/apis.html b/bdb/docs/ref/arch/apis.html
new file mode 100644
index 00000000000..d1ae91b5a74
--- /dev/null
+++ b/bdb/docs/ref/arch/apis.html
@@ -0,0 +1,74 @@
+<!--$Id: apis.so,v 10.26 2000/03/18 21:43:09 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Programmatic APIs</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Architecture</dl></h3></td>
+<td width="1%"><a href="../../ref/arch/progmodel.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/arch/script.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Programmatic APIs</h1>
+<p>The Berkeley DB subsystems can be accessed through interfaces from multiple
+languages. The standard library interface is ANSI C. Applications can
+also use Berkeley DB via C++ or Java, as well as from scripting languages.
+Environments can be shared among applications written using any of theses
+APIs. For example, you might have a local server written in C or C++, a
+script for an administrator written in Perl or Tcl, and a web based user
+interface written in Java, all sharing a single database environment.
+<h3>C</h3>
+<p>The Berkeley DB library is written entirely in ANSI C. C applications use a
+single include file:
+<p><blockquote><pre>#include &lt;db.h&gt;</pre></blockquote>
+<h3>C++</h3>
+<p>The C++ classes provide a thin wrapper around the C API, with the major
+advantages being improved encapsulation and an optional exception
+mechanism for errors. C++ applications use a single include file:
+<p><blockquote><pre>#include &lt;db_cxx.h&gt;</pre></blockquote>
+<p>The classes and methods are named in a fashion that directly corresponds
+to structures and functions in the C interface. Likewise, arguments to
+methods appear in the same order as the C interface, except to remove the
+explicit <b>this</b> pointer. The #defines used for flags are identical
+between the C and C++ interfaces.
+<p>As a rule, each C++ object has exactly one structure from the underlying
+C API associated with it. The C structure is allocated with each
+constructor call and deallocated with each destructor call. Thus, the
+rules the user needs to follow in allocating and deallocating structures
+are the same between the C and C++ interfaces.
+<p>To ensure portability to many platforms, both new and old, Berkeley DB makes as
+few assumptions as possible about the C++ compiler and library. For
+example, it does not expect STL, templates or namespaces to be available.
+The newest C++ feature used is exceptions, which are used liberally to
+transmit error information. Even the use of exceptions can be disabled
+at runtime.
+<h3>JAVA</h3>
+<p>The Java classes provide a layer around the C API that is almost identical
+to the C++ layer. The classes and methods are, for the most part
+identical to the C++ layer. Db constants and #defines are represented as
+"static final int" values. Error conditions are communicated as Java
+exceptions.
+<p>As in C++, each Java object has exactly one structure from the underlying
+C API associated with it. The Java structure is allocated with each
+constructor or open call, but is deallocated only by the Java garbage
+collector. Because the timing of garbage collection is not predictable,
+applications should take care to do a close when finished with any object
+that has a close method.
+<h3>Dbm/Ndbm, Hsearch</h3>
+<p>Berkeley DB supports the standard UNIX interfaces <a href="../../api_c/dbm.html">dbm</a> (or its
+<a href="../../api_c/dbm.html">ndbm</a> variant) and <a href="../../api_c/hsearch.html">hsearch</a>. After including a new header
+file and recompiling, <a href="../../api_c/dbm.html">dbm</a> programs will run orders of magnitude
+faster and their underlying databases can grow as large as necessary.
+Historic <a href="../../api_c/dbm.html">dbm</a> applications fail when some number of entries were
+inserted into the database, where the number depends on the effectiveness
+of the hashing function on the particular data set.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/arch/progmodel.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/arch/script.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/arch/bigpic.gif b/bdb/docs/ref/arch/bigpic.gif
new file mode 100644
index 00000000000..48c52aed5a2
--- /dev/null
+++ b/bdb/docs/ref/arch/bigpic.gif
Binary files differ
diff --git a/bdb/docs/ref/arch/bigpic.html b/bdb/docs/ref/arch/bigpic.html
new file mode 100644
index 00000000000..6c945744e83
--- /dev/null
+++ b/bdb/docs/ref/arch/bigpic.html
@@ -0,0 +1,114 @@
+<!--$Id: bigpic.so,v 8.21 2000/12/18 21:05:14 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: The big picture</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Architecture</dl></h3></td>
+<td width="1%"><a href="../../ref/am/error.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/arch/progmodel.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>The big picture</h1>
+<p>The previous chapters in this Reference Guide have described applications
+that use the Berkeley DB Access Methods for fast data storage and retrieval.
+The applications we describe here and in subsequent chapters are similar
+in nature to the Access Method applications, but they are also fully
+recoverable in the face of application or system failure.
+<p>Application code that only uses the Berkeley DB Access Methods might appear as
+follows:
+<p><blockquote><pre>switch (ret = dbp-&gt;put(dbp, NULL, &key, &data, 0)) {
+case 0:
+ printf("db: %s: key stored.\n", (char *)key.data);
+ break;
+default:
+ dbp-&gt;err(dbp, ret, "dbp-&gt;put");
+ exit (1);
+}</pre></blockquote>
+<p>The underlying Berkeley DB architecture that supports this is:
+<p align=center><img src="smallpic.gif" alt="small">
+<p>As you can see from this diagram, the application makes calls into the
+Access Methods, and the Access Methods use the underlying shared memory
+buffer cache to hold recently used file pages in main memory.
+<p>When applications require recoverability, then their calls to the Access
+Methods must be wrapped in calls to the transaction subsystem. The
+application must inform Berkeley DB where to begin and end transactions, and
+must be prepared for the possibility that an operation may fail at any
+particular time, causing the transaction to abort.
+<p>An example of transaction protected code might appear as follows:
+<p><blockquote><pre>retry: if ((ret = txn_begin(dbenv, NULL, &tid)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "txn_begin");
+ exit (1);
+ }
+<p>
+ switch (ret = dbp-&gt;put(dbp, tid, &key, &data, 0)) {
+ case DB_LOCK_DEADLOCK:
+ (void)txn_abort(tid);
+ goto retry;
+ case 0:
+ printf("db: %s: key stored.\n", (char *)key.data);
+ break;
+ default:
+ dbenv-&gt;err(dbenv, ret, "dbp-&gt;put");
+ exit (1);
+ }
+<p>
+ if ((ret = txn_commit(tid)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "txn_commit");
+ exit (1);
+ }</pre></blockquote>
+<p>In this example, the same operation is being done as before, however, it
+is wrapped in transaction calls. The transaction is started with
+<a href="../../api_c/txn_begin.html">txn_begin</a>, and finished with <a href="../../api_c/txn_commit.html">txn_commit</a>. If the operation
+fails due to a deadlock, then the transaction is aborted using
+<a href="../../api_c/txn_abort.html">txn_abort</a>, after which the operation may be retried.
+<p>There are actually five major subsystems in Berkeley DB, as follows:
+<p><dl compact>
+<p><dt>The Access Methods<dd>The Access Method subsystem provides general-purpose support for creating
+and accessing database files formatted as Btrees, Hashed files, and
+Fixed- and Variable-length records. These modules are useful in the
+absence of transactions for applications that need fast, formatted file
+support. See <a href="../../api_c/db_open.html">DB-&gt;open</a> and <a href="../../api_c/db_cursor.html">DB-&gt;cursor</a> for more
+information. These functions were already discussed in detail in the
+previous chapters.
+<p><dt>The Memory Pool<dd>The memory pool subsystem is the general-purpose shared memory buffer pool
+used by Berkeley DB. This is the shared memory cache that allows multiple
+processes and threads within processes to share access to databases. This
+module is useful outside of the Berkeley DB package for processes that require
+portable, page-oriented, cached, shared file access.
+<p><dt>Transactions<dd>The transaction subsystem allows a group of database changes to be
+treated as an atomic unit so that either all of the changes are done, or
+none of the changes are done. The transaction subsystem implements the
+Berkeley DB transaction model. This module is useful outside of the Berkeley DB
+package for processes that want to transaction protect their own data
+modifications.
+<p><dt>Locking<dd>The locking subsystem is the general-purpose lock manager used by Berkeley DB.
+This module is useful outside of the Berkeley DB package for processes that
+require a portable, fast, configurable lock manager.
+<p><dt>Logging<dd>The logging subsystem is the write-ahead logging used to support the Berkeley DB
+transaction model. It is largely specific to the Berkeley DB package, and
+unlikely to be useful elsewhere except as a supporting module for the
+Berkeley DB transaction subsystem.
+</dl>
+<p>Here is a more complete picture of the Berkeley DB library:
+<p align=center><img src="bigpic.gif" alt="large">
+<p>In this example, the application makes calls to the Access Methods and to
+the transaction subsystem. The Access Methods and transaction subsystem
+in turn make calls into the Buffer Pool, Locking and Logging subsystems
+on behalf of the application.
+<p>While the underlying subsystems can each be called independently. For
+example, the Buffer Pool subsystem can be used apart from the rest of
+Berkeley DB by applications simply wanting a shared memory buffer pool, or
+the Locking subsystem may be called directly by applications that are
+doing their own locking outside of Berkeley DB. However, this usage is fairly
+rare, and most applications will either use only the Access Methods, or
+the Access Methods wrapped in calls to the transaction interfaces.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/am/error.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/arch/progmodel.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/arch/progmodel.html b/bdb/docs/ref/arch/progmodel.html
new file mode 100644
index 00000000000..04284f4f37e
--- /dev/null
+++ b/bdb/docs/ref/arch/progmodel.html
@@ -0,0 +1,41 @@
+<!--$Id: progmodel.so,v 10.25 2000/03/18 21:43:09 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Programming model</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Architecture</dl></h3></td>
+<td width="1%"><a href="../../ref/arch/bigpic.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/arch/apis.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Programming model</h1>
+<p>The Berkeley DB distribution is a database library, where the library is linked
+into the address space of the code which uses it. The code using Berkeley DB
+may be an application or it may be a server providing functionality to a
+number of clients via some form of inter-process or remote-process
+communication (IPC/RPC).
+<p>In the application model, one or more applications link the Berkeley DB library
+directly into their address spaces. There may be many threads of control
+in this model, as Berkeley DB supports locking for both multiple processes and
+for multiple threads within a process. This model provides significantly
+faster access to the database functionality, but implies trust among all
+threads of control sharing the database environment as they will have the
+ability to read, write and potentially corrupt each other's data.
+<p>In the client-server model, developers write a database server application
+that accepts requests via some form of IPC and issues calls to the Berkeley DB
+interfaces based on those requests. In this model, the database server
+is the only application linking the Berkeley DB library into its address space.
+The client-server model trades performance for protection, as it does not
+require that the applications share a protection domain with the server,
+but IPC/RPC is slower than a function call. Of course, in addition, this
+model greatly simplifies the creation of network client-server applications.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/arch/bigpic.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/arch/apis.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/arch/script.html b/bdb/docs/ref/arch/script.html
new file mode 100644
index 00000000000..411cff4600c
--- /dev/null
+++ b/bdb/docs/ref/arch/script.html
@@ -0,0 +1,29 @@
+<!--$Id: script.so,v 10.12 2000/03/18 21:43:09 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Scripting languages</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Architecture</dl></h3></td>
+<td width="1%"><a href="../../ref/arch/apis.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/arch/utilities.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Scripting languages</h1>
+<h3>Perl</h3>
+<p>Two Perl APIs are distributed with the Berkeley DB release. The Perl interface
+to Berkeley DB version 1.85 is called DB_File. The Perl interface to Berkeley DB
+version 2 is called BerkeleyDB. See <a href="../../ref/perl/intro.html">Using Berkeley DB with Perl</a> for more information.
+<h3>Tcl</h3>
+<p>A Tcl API is distributed with the Berkeley DB release. See
+<a href="../../ref/tcl/intro.html">Using Berkeley DB with Tcl</a> for more
+information.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/arch/apis.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/arch/utilities.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/arch/smallpic.gif b/bdb/docs/ref/arch/smallpic.gif
new file mode 100644
index 00000000000..5eb7ae8da58
--- /dev/null
+++ b/bdb/docs/ref/arch/smallpic.gif
Binary files differ
diff --git a/bdb/docs/ref/arch/utilities.html b/bdb/docs/ref/arch/utilities.html
new file mode 100644
index 00000000000..72bfe52b21c
--- /dev/null
+++ b/bdb/docs/ref/arch/utilities.html
@@ -0,0 +1,62 @@
+<!--$Id: utilities.so,v 10.23 2000/05/23 20:57:50 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Supporting utilities</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Architecture</dl></h3></td>
+<td width="1%"><a href="../../ref/arch/script.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/env/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Supporting utilities</h1>
+<p>There are several stand-alone utilities that provide supporting
+functionality for the Berkeley DB environment:
+<p><dl compact>
+<p><dt><a href="../../utility/berkeley_db_svc.html">berkeley_db_svc</a><dd>The <a href="../../utility/berkeley_db_svc.html">berkeley_db_svc</a> utility is the Berkeley DB RPC server, providing
+standard server functionality for client. applications.
+<p><dt><a href="../../utility/db_archive.html">db_archive</a><dd>The <a href="../../utility/db_archive.html">db_archive</a> utility supports database backup, archival and log
+file administration. It facilitates log reclamation and the creation of
+database snapshots. Generally, some form of log archival must be done if
+a database environment has been configured for logging or transactions.
+<p><dt><a href="../../utility/db_checkpoint.html">db_checkpoint</a><dd>The <a href="../../utility/db_checkpoint.html">db_checkpoint</a> utility runs as a daemon process, monitoring
+the database log and periodically issuing checkpoints. It facilitates
+log reclamation and the creation of database snapshots. Generally, some
+form of database checkpointing must be done if a database environment has
+been configured for transactions.
+<p><dt><a href="../../utility/db_deadlock.html">db_deadlock</a><dd>The <a href="../../utility/db_deadlock.html">db_deadlock</a> utility runs as a daemon process, periodically
+traversing the database lock structures and aborting transactions when it
+detects a deadlock. Generally, some form of deadlock detection must be
+done if a database environment has been configured for locking.
+<p><dt><a href="../../utility/db_dump.html">db_dump</a><dd>The <a href="../../utility/db_dump.html">db_dump</a> utility writes a copy of the database to a flat-text
+file in a portable format.
+<p><dt><a href="../../utility/db_load.html">db_load</a><dd>The <a href="../../utility/db_load.html">db_load</a> utility reads the flat-text file produced by
+<a href="../../utility/db_dump.html">db_dump</a> and loads it into a database file.
+<p><dt><a href="../../utility/db_printlog.html">db_printlog</a><dd>The <a href="../../utility/db_printlog.html">db_printlog</a> utility displays the contents of Berkeley DB log files
+in a human-readable and parseable format.
+<p><dt><a href="../../utility/db_recover.html">db_recover</a><dd>The <a href="../../utility/db_recover.html">db_recover</a> utility runs after an unexpected Berkeley DB or system
+failure to restore the database to a consistent state. Generally, some
+form of database recovery must be done if databases are being modified.
+<p><dt><a href="../../utility/db_stat.html">db_stat</a> <dd>The <a href="../../utility/db_stat.html">db_stat</a> utility displays statistics for databases and database
+environments.
+<p><dt><a href="../../utility/db_upgrade.html">db_upgrade</a><dd>The <a href="../../utility/db_upgrade.html">db_upgrade</a> utility provides a command-line interface for
+upgrading underlying database formats.
+<p><dt><a href="../../utility/db_verify.html">db_verify</a><dd>The <a href="../../utility/db_verify.html">db_verify</a> utility provides a command-line interface for
+verifying the database format.
+</dl>
+<p>All of the functionality implemented for these utilities is also available
+as part of the standard Berkeley DB API. This means that threaded applications
+can easily create a thread that calls the same Berkeley DB functions as do the
+utilities. This often simplifies an application environment by removing
+the necessity for multiple processes to negotiate database and database
+environment creation and shutdown.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/arch/script.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/env/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/build_unix/aix.html b/bdb/docs/ref/build_unix/aix.html
new file mode 100644
index 00000000000..102e1a01fbe
--- /dev/null
+++ b/bdb/docs/ref/build_unix/aix.html
@@ -0,0 +1,60 @@
+<!--$Id: aix.so,v 11.11 2000/05/04 17:11:19 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: AIX</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for UNIX systems</dl></h3></td>
+<td width="1%"><a href="../../ref/build_unix/notes.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/freebsd.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>AIX</h1>
+<p><ol>
+<p><li><b>I can't compile and run multi-threaded applications.</b>
+<p>Special compile-time flags are required when compiling threaded
+applications on AIX. If you are compiling a threaded application,
+you must compile with the _THREAD_SAFE flag and load with specific
+libraries, e.g., "-lc_r". Specifying the compiler name with a
+trailing "_r" usually performs the right actions for the system.
+<p><blockquote><pre>xlc_r ...
+cc -D_THREAD_SAFE -lc_r ...</pre></blockquote>
+<p>The Berkeley DB library will automatically build with the correct options.
+<hr size=1 noshade>
+<p><li><b>I can't run using the <a href="../../api_c/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a> option to
+<a href="../../api_c/env_open.html">DBENV-&gt;open</a>.</b>
+<p>AIX 4.1 only allows applications to map 10 system shared memory segments.
+In AIX 4.3 this has been raised to 256K segments, but only if you set the
+environment variable "export EXTSHM=ON".
+<hr size=1 noshade>
+<p><li><b>I can't create database files larger than 1GB on AIX.</b>
+<p>Berkeley DB does not include large-file support for AIX systems by default.
+Sleepycat Software has been told that the following changes will add
+large-file support on the AIX 4.2 and later releases, but we have not
+tested them ourselves.
+<p>Add the following lines to the <b>db_config.h</b> file in your build
+directory:
+<p><blockquote><pre>#ifdef HAVE_FILE_OFFSET_BITS
+#define _LARGE_FILES /* AIX specific. */
+#endif</pre></blockquote>
+<p>Change the source code for <b>os/os_open.c</b> to always specify the
+<b>O_LARGEFILE</b> flag to the <b>open</b>(2) system call.
+<p>Recompile Berkeley DB from scratch.
+<p>Note that the documentation for the IBM Visual Age compiler states that
+it does not not support the 64-bit filesystem APIs necessary for creating
+large files, and that the ibmcxx product must be used instead. We have
+not heard if the GNU gcc compiler supports the 64-bit APIs or not.
+<p>Finally, to create large files under AIX, the filesystem has to be
+configured to support large files and the system wide user hard-limit for
+file sizes has to be greater than 1GB.
+</ol>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/build_unix/notes.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/freebsd.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/build_unix/conf.html b/bdb/docs/ref/build_unix/conf.html
new file mode 100644
index 00000000000..289e9559e3a
--- /dev/null
+++ b/bdb/docs/ref/build_unix/conf.html
@@ -0,0 +1,143 @@
+<!--$Id: conf.so,v 10.33 2000/12/04 18:05:41 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Configuring Berkeley DB</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a> <a name="3"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for UNIX systems</dl></h3></td>
+<td width="1%"><a href="../../ref/build_unix/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/flags.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Configuring Berkeley DB</h1>
+<p>There are several options that you can specify when configuring Berkeley DB.
+While only the Berkeley DB specific ones are described here, most of the
+standard GNU autoconf options are available and supported. To see a
+complete list of the options, specify the --help flag to the configure
+program.
+<p>The Berkeley DB specific options are as follows:
+<p><dl compact>
+ <a name="4"><!--meow--></a>
+<p><dt><a name="--disable-bigfile">--disable-bigfile</a><dd>Some systems, notably versions of HP/UX and Solaris, require special
+compile-time options in order to create files larger than 2^32 bytes.
+These options are automatically enabled when Berkeley DB is compiled. For this
+reason, binaries built on current versions of these systems may not run
+on earlier versions of the system, as the library and system calls
+necessary for large files are not available. To disable building with
+these compile-time options, enter --disable-bigfile as an argument to
+configure.
+ <a name="5"><!--meow--></a>
+<p><dt><a name="--enable-compat185">--enable-compat185</a><dd>To compile or load Berkeley DB 1.85 applications against this release of the
+Berkeley DB library, enter --enable-compat185 as an argument to configure.
+This will include Berkeley DB 1.85 API compatibility code in the library.
+ <a name="6"><!--meow--></a>
+<p><dt><a name="--enable-cxx">--enable-cxx</a><dd>To build the Berkeley DB C++ API, enter --enable-cxx as an argument to
+configure.
+<p><dt><a name="--enable-debug">--enable-debug</a><dd>To build Berkeley DB with <b>-g</b> as a compiler flag and with <b>DEBUG</b>
+#defined during compilation, enter --enable-debug as an argument to
+configure. This will create a Berkeley DB library with debugging symbols, as
+well as load various routines that can be called from a debugger to
+display pages, cursor queues and so forth. This option should not be
+specified when configuring to build production binaries, although there
+shouldn't be any significant performance degradation.
+<p><dt><a name="--enable-debug_rop">--enable-debug_rop</a><dd>To build Berkeley DB to output log records for read operations, enter
+--enable-debug_rop as an argument to configure. This option should not
+be specified when configuring to build production binaries, as you will
+lose a significant amount of performance.
+<p><dt><a name="--enable-debug_wop">--enable-debug_wop</a><dd>To build Berkeley DB to output log records for write operations, enter
+--enable-debug_wop as an argument to configure. This option should not
+be specified when configuring to build production binaries, as you will
+lose a significant amount of performance.
+<p><dt><a name="--enable-diagnostic">--enable-diagnostic</a><dd>To build Berkeley DB with debugging run-time sanity checks, enter
+--enable-diagnostic as an argument to configure. This will cause a
+number of special checks to be performed when Berkeley DB is running. This
+option should not be specified when configuring to build production
+binaries, as you will lose a significant amount of performance.
+ <a name="7"><!--meow--></a>
+<p><dt><a name="--enable-dump185">--enable-dump185</a><dd>To convert Berkeley DB 1.85 (or earlier) databases to this release of Berkeley DB,
+enter --enable-dump185 as an argument to configure. This will build the
+<a href="../../utility/db_dump.html">db_dump185</a> utility which can dump Berkeley DB 1.85 and 1.86 databases
+in a format readable by the Berkeley DB <a href="../../utility/db_load.html">db_load</a> utility.
+<p>The system libraries with which you are loading the <a href="../../utility/db_dump.html">db_dump185</a>
+utility must already contain the Berkeley DB 1.85 library routines for this to
+work, as the Berkeley DB distribution does not include them. If you are using
+a non-standard library for the Berkeley DB 1.85 library routines, you will have
+to change the Makefile that the configuration step creates to load the
+<a href="../../utility/db_dump.html">db_dump185</a> utility with that library.
+ <a name="8"><!--meow--></a>
+ <a name="9"><!--meow--></a>
+<p><dt><a name="--enable-dynamic">--enable-dynamic</a><dd>To build a dynamic shared library version of Berkeley DB, instead of the default
+static library, specify --enable-dynamic. Dynamic libraries are built
+using <a href="http://www.gnu.org/software/libtool/libtool.html">the
+GNU Project's Libtool</a> distribution, which supports shared library builds
+on many, although not all, systems.
+<p>Berkeley DB can be configured to build either a static or a dynamic library,
+but not both at once. You should not attempt to build both library
+types in the same directory, as they have incompatible object file
+formats. To build both static and dynamic libraries, create two
+separate build directories, and configure and build them separately.
+ <a name="10"><!--meow--></a>
+<p><dt><a name="--enable-java">--enable-java</a><dd>To build the Berkeley DB Java API, enter --enable-java as an argument to
+configure. To build Java, you must also configure the option
+--enable-dynamic. Before configuring, you must set your PATH environment
+variable to include javac. Note, it is not sufficient to include a
+symbolic link to javac in your PATH, because the configuration process
+uses the location of javac to determine the location of the Java include
+files (e.g., jni.h). On some systems additional include directories may
+be needed to process jni.h, see <a href="flags.html">Changing compile or load
+options</a> for more information.
+<p><dt><a name="--enable-posixmutexes">--enable-posixmutexes</a><dd>To force Berkeley DB to use the POSIX pthread mutex interfaces for underlying
+mutex support, enter --enable-posixmutexes as an argument to configure.
+The Berkeley DB library requires that the POSIX pthread implementation support
+mutexes shared between multiple processes, as described for the
+pthread_condattr_setpshared and pthread_mutexattr_setpshared interfaces.
+In addition, this configuration option requires that Berkeley DB be linked with
+the -lpthread library. On systems where POSIX mutexes are the preferred
+mutex support (e.g., HP-UX), they will be selected automatically.
+ <a name="11"><!--meow--></a>
+<p><dt><a name="--enable-rpc">--enable-rpc</a><dd>To build the Berkeley DB RPC client code and server utility, enter --enable-rpc
+as an argument to configure. The --enable-rpc option requires that RPC
+libraries already be installed on your system.
+<p><dt><a name="--enable-shared">--enable-shared</a><dd>The --enable-shared configure argument is an alias for --enable-dynamic.
+ <a name="12"><!--meow--></a>
+<p><dt><a name="--enable-tcl">--enable-tcl</a><dd>To build the Berkeley DB Tcl API, enter --enable-tcl as an argument to
+configure. This configuration option expects to find Tcl's tclConfig.sh
+file in the <b>/usr/local/lib</b> directory. See the --with-tcl
+option for instructions on specifying a non-standard location for the
+Tcl installation. See <a href="../../ref/tcl/intro.html">Loading Berkeley DB
+with Tcl</a> for information on sites from which you can download Tcl and
+which Tcl versions are compatible with Berkeley DB. To configure the Berkeley DB
+Tcl API, you must also specify the --enable-dynamic option.
+ <a name="13"><!--meow--></a>
+<p><dt><a name="--enable-test">--enable-test</a><dd>To build the Berkeley DB test suite, enter --enable-test as an argument to
+configure. To run the Berkeley DB test suite, you must also specify the
+--enable-dynamic and --enable-tcl options.
+<p><dt><a name="--enable-uimutexes">--enable-uimutexes</a><dd>To force Berkeley DB to use the UNIX International (UI) mutex interfaces for
+underlying mutex support, enter --enable-uimutexes as an argument to
+configure. This configuration option requires that Berkeley DB be linked with
+the -lthread library. On systems where UI mutexes are the preferred mutex
+support, (e.g., SCO's UnixWare 2), they will be selected automatically.
+<p><dt><a name="--enable-umrw">--enable-umrw</a><dd>Rational Software's Purify product and other run-time tools complain
+about uninitialized reads/writes of structure fields whose only purpose
+is padding, as well as when heap memory that was never initialized is
+written to disk. Specify the --enable-umrw option during configuration
+to mask these errors. This option should not be specified when
+configuring to build production binaries, as you will lose a significant
+amount of performance.
+<p><dt><a name="--with-tcl=DIR">--with-tcl=DIR</a><dd>To build the Berkeley DB Tcl API, enter --with-tcl=DIR, replacing DIR with
+the directory in which the Tcl tclConfig.sh file may be found. See
+<a href="../../ref/tcl/intro.html">Loading Berkeley DB with Tcl</a> for information
+on sites from which you can download Tcl and which Tcl versions are
+compatible with Berkeley DB. To configure the Berkeley DB Tcl API, you must also
+specify the --enable-dynamic option.
+</dl>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/build_unix/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/flags.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/build_unix/flags.html b/bdb/docs/ref/build_unix/flags.html
new file mode 100644
index 00000000000..5b70b3d8d64
--- /dev/null
+++ b/bdb/docs/ref/build_unix/flags.html
@@ -0,0 +1,60 @@
+<!--$Id: flags.so,v 10.6 2000/12/01 00:19:10 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Changing compile or load options</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a> <a name="3"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for UNIX systems</dl></h3></td>
+<td width="1%"><a href="../../ref/build_unix/conf.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/install.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Changing compile or load options</h1>
+<p>You can specify compiler and/or compile and load time flags by using
+environment variables during Berkeley DB configuration. For example, if you
+want to use a specific compiler, specify the CC environment variable
+before running configure:
+<p><blockquote><pre>prompt: env CC=gcc ../dist/configure</pre></blockquote>
+<p>Using anything other than the native compiler will almost certainly mean
+that you'll want to check the flags specified to the compiler and
+loader, too.
+<p>To specify debugging and optimization options for the C compiler,
+use the CFLAGS environment variable:
+<p><blockquote><pre>prompt: env CFLAGS=-O2 ../dist/configure</pre></blockquote>
+<p>To specify header file search directories and other miscellaneous options
+for the C preprocessor and compiler, use the CPPFLAGS environment variable:
+<p><blockquote><pre>prompt: env CPPFLAGS=-I/usr/contrib/include ../dist/configure</pre></blockquote>
+<p>To specify debugging and optimization options for the C++ compiler,
+use the CXXFLAGS environment variable:
+<p><blockquote><pre>prompt: env CXXFLAGS=-Woverloaded-virtual ../dist/configure</pre></blockquote>
+<p>To specify miscellaneous options or additional library directories for
+the linker, use the LDFLAGS environment variable:
+<p><blockquote><pre>prompt: env LDFLAGS="-N32 -L/usr/local/lib" ../dist/configure</pre></blockquote>
+<p>If you want to specify additional libraries, set the LIBS environment
+variable before running configure. For example:
+<p><blockquote><pre>prompt: env LIBS="-lposix -lsocket" ../dist/configure</pre></blockquote>
+<p>would specify two additional libraries to load, "posix" and "socket".
+<p>Make sure that you prepend -L to any library directory names and that you
+prepend -I to any include file directory names! Also, if the arguments
+you specify contain blank or tab characters, be sure to quote them as
+shown above, i.e. with single or double quotes around the values you're
+specifying for LIBS.
+<p>The env command is available on most systems, and simply sets one or more
+environment variables before running a command. If the env command is
+not available to you, you can set the environment variables in your shell
+before running configure. For example, in sh or ksh, you could do:
+<p><blockquote><pre>prompt: LIBS="-lposix -lsocket" ../dist/configure</pre></blockquote>
+<p>and in csh or tcsh, you could do:
+<p><blockquote><pre>prompt: setenv LIBS "-lposix -lsocket"
+prompt: ../dist/configure</pre></blockquote>
+<p>See your command shell's manual page for further information.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/build_unix/conf.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/install.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/build_unix/freebsd.html b/bdb/docs/ref/build_unix/freebsd.html
new file mode 100644
index 00000000000..3d3ff81161c
--- /dev/null
+++ b/bdb/docs/ref/build_unix/freebsd.html
@@ -0,0 +1,57 @@
+<!--$Id: freebsd.so,v 11.12 2000/03/18 21:43:10 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: FreeBSD</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for UNIX systems</dl></h3></td>
+<td width="1%"><a href="../../ref/build_unix/aix.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/hpux.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>FreeBSD</h1>
+<p><ol>
+<p><li><b>I can't compile and run multi-threaded applications.</b>
+<p>Special compile-time flags are required when compiling threaded
+applications on FreeBSD. If you are compiling a threaded application,
+you must compile with the _THREAD_SAFE and -pthread flags:
+<p><blockquote><pre>cc -D_THREAD_SAFE -pthread ...</pre></blockquote>
+<p>The Berkeley DB library will automatically build with the correct options.
+<hr size=1 noshade>
+<p><li><b>I get occasional failures when running RPC-based programs under FreeBSD clients.</b>
+<p>There is a known bug in the XDR implementation in the FreeBSD C library,
+from Version 2.2 up to version 4.0-RELEASE, that causes certain sized
+messages to fail and return a zero-filled reply to the client. A bug
+report (#16028) has been filed with FreeBSD. The following patch is the
+FreeBSD fix:
+<p><blockquote><pre>*** /usr/src/lib/libc/xdr/xdr_rec.c.orig Mon Jan 10 10:20:42 2000
+--- /usr/src/lib/libc/xdr/xdr_rec.c Wed Jan 19 10:53:45 2000
+***************
+*** 558,564 ****
+ * but we don't have any way to be certain that they aren't
+ * what the client actually intended to send us.
+ */
+! if ((header & (~LAST_FRAG)) == 0)
+ return(FALSE);
+ rstrm-&gt;fbtbc = header & (~LAST_FRAG);
+ return (TRUE);
+--- 558,564 ----
+ * but we don't have any way to be certain that they aren't
+ * what the client actually intended to send us.
+ */
+! if (header == 0)
+ return(FALSE);
+ rstrm-&gt;fbtbc = header & (~LAST_FRAG);
+ return (TRUE);
+</pre></blockquote>
+</ol>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/build_unix/aix.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/hpux.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/build_unix/hpux.html b/bdb/docs/ref/build_unix/hpux.html
new file mode 100644
index 00000000000..3fc50d73cc9
--- /dev/null
+++ b/bdb/docs/ref/build_unix/hpux.html
@@ -0,0 +1,89 @@
+<!--$Id: hpux.so,v 11.11 2000/12/14 17:04:02 krinsky Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: HP-UX</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for UNIX systems</dl></h3></td>
+<td width="1%"><a href="../../ref/build_unix/freebsd.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/irix.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>HP-UX</h1>
+<p><ol>
+<p><li><b>I can't specify the <a href="../../api_c/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a> flag to <a href="../../api_c/env_open.html">DBENV-&gt;open</a>.</b>
+<p>The <b>shmget</b>(2) interfaces are not always used on HP-UX, even
+though they exist, as anonymous memory allocated using <b>shmget</b>(2)
+cannot be used to store the standard HP-UX msemaphore semaphores. For
+this reason, it may not be possible to specify the <a href="../../api_c/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a>
+flag on some versions of HP-UX. (We have only seen this problem on HP-UX
+10.XX, so the simplest workaround may be to upgrade your HP-UX release.)
+<hr size=1 noshade>
+<p><li><b>I can't specify both the <a href="../../api_c/env_open.html#DB_PRIVATE">DB_PRIVATE</a> and <a href="../../api_c/env_open.html#DB_THREAD">DB_THREAD</a>
+flags to <a href="../../api_c/env_open.html">DBENV-&gt;open</a>.</b>
+<p>It is not possible to store the standard HP-UX msemaphore semaphores in
+memory returned by <b>malloc</b>(3) in some versions of HP-UX. For
+this reason, it may not be possible to specify both the <a href="../../api_c/env_open.html#DB_PRIVATE">DB_PRIVATE</a>
+and <a href="../../api_c/env_open.html#DB_THREAD">DB_THREAD</a> flags on some versions of HP-UX. (We have only seen
+this problem on HP-UX 10.XX, so the simplest workaround may be to upgrade
+your HP-UX release.)
+<hr size=1 noshade>
+<p><li><b>During configuration I see a message that large file support has
+been turned off.</b>
+<p>Some HP-UX system include files redefine "open" when big-file support (the
+HAVE_FILE_OFFSET_BITS and _FILE_OFFSET_BITS #defines) is enabled. This
+causes problems when compiling for C++, where "open" is a legal
+identifier, used in the Berkeley DB C++ API. For this reason, we automatically
+turn off big-file support when Berkeley DB is configured with a C++ API. This
+should not be a problem for applications unless there is a need to create
+databases larger than 2GB.
+<hr size=1 noshade>
+<p><li><b>I can't compile and run multi-threaded applications.</b>
+<p>Special compile-time flags are required when compiling threaded
+applications on HP-UX. If you are compiling a threaded application, you
+must compile with the _REENTRANT flag:
+<p><blockquote><pre>cc -D_REENTRANT ...</pre></blockquote>
+<p>The Berkeley DB library will automatically build with the correct options.
+<hr size=1 noshade>
+<p><li><b>An ENOMEM error is returned from <a href="../../api_c/env_open.html">DBENV-&gt;open</a> or
+<a href="../../api_c/env_remove.html">DBENV-&gt;remove</a>.</b>
+<p>Due to the constraints of the PA-RISC memory architecture, HP-UX does not
+allow a process to map a file into its address space multiple times.
+For this reason, each Berkeley DB environment may be opened only once by a
+process on HP-UX, i.e., calls to <a href="../../api_c/env_open.html">DBENV-&gt;open</a> will fail if the
+specified Berkeley DB environment has been opened and not subsequently closed.
+<hr size=1 noshade>
+<p><li><b>When compiling with gcc, I see the following error:
+<p><blockquote><pre>#error "Large Files (ILP32) not supported in strict ANSI mode."</pre></blockquote></b>
+<p>We believe this is an error in the HP-UX include files, but we don't
+really understand it. The only workaround we have found is to add
+-D__STDC_EXT__ to the C preprocessor defines as part of compilation.
+<hr size=1 noshade>
+<p><li><b>When using the Tcl or Perl APIs (including running the test suite) I
+see the error "Can't shl_load() a library containing Thread Local Storage".</b>
+<p>This problem happens when HP-UX has been configured to use pthread mutex
+locking and an attempt is made to call Berkeley DB using the Tcl or Perl APIs. We
+have never found any way to fix this problem as part of the Berkeley DB build
+process. To work around the problem, rebuild tclsh or perl and modify its build
+process to explicitly link it against the HP-UX pthread library (currently
+/usr/lib/libpthread.a).
+<hr size=1 noshade>
+<p><li><b>When running an executable that has been dynamically linked
+against the Berkeley DB library, I see the error "Can't find path for shared library"
+even though I've correctly set the SHLIB_PATH environment variable.</b>
+<p>By default, some versions of HP-UX ignore the dynamic library search path
+specified by the SHLIB_PATH environment variable. To work around this, specify
+the "+s" flag to ld when linking, or run
+<p><blockquote><pre>chatr +s enable -l /full/path/to/libdb-3.2.sl ...</pre></blockquote>
+<p>on the executable that is not working.
+</ol>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/build_unix/freebsd.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/irix.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/build_unix/install.html b/bdb/docs/ref/build_unix/install.html
new file mode 100644
index 00000000000..7beb6f705f3
--- /dev/null
+++ b/bdb/docs/ref/build_unix/install.html
@@ -0,0 +1,60 @@
+<!--$Id: install.so,v 10.12 2000/12/01 00:19:10 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Installing Berkeley DB</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for UNIX systems</dl></h3></td>
+<td width="1%"><a href="../../ref/build_unix/flags.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/shlib.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Installing Berkeley DB</h1>
+<p>Berkeley DB installs the following files into the following locations, with the
+following default values:
+<p><table border=1 align=center>
+<tr><th>Configuration Variables</th><th>Default value</th></tr>
+<tr><td>--prefix</td><td>/usr/local/BerkeleyDB.<b>Major</b>.<b>Minor</b></td></tr>
+<tr><td>--exec_prefix</td><td>$(prefix)</td></tr>
+<tr><td>--bindir</td><td>$(exec_prefix)/bin</td></tr>
+<tr><td>--includedir</td><td>$(prefix)/include</td></tr>
+<tr><td>--libdir</td><td>$(exec_prefix)/lib</td></tr>
+<tr><td>docdir</td><td>$(prefix)/docs</td></tr>
+<tr><th>Files</th><th>Default location</th></tr>
+<tr><td>include files</td><td>$(includedir)</td></tr>
+<tr><td>libraries</td><td>$(libdir)</td></tr>
+<tr><td>utilities</td><td>$(bindir)</td></tr>
+<tr><td>documentation</td><td>$(docdir)</td></tr>
+</table>
+<p>With one exception, this follows the GNU Autoconf and GNU Coding
+Standards installation guidelines, please see that documentation for
+more information and rationale.
+<p>The single exception is the Berkeley DB documentation. The Berkeley DB
+documentation is provided in HTML format, not in UNIX-style man or GNU
+info format. For this reason, Berkeley DB configuration does not support
+<b>--infodir</b> or <b>--mandir</b>. To change the default
+installation location for the Berkeley DB documentation, modify the Makefile
+variable, <b>docdir</b>.
+<p>To move the entire installation tree to somewhere besides
+<b>/usr/local</b>, change the value of <b>prefix</b>.
+<p>To move the binaries and libraries to a different location, change the
+value of <b>exec_prefix</b>. The values of <b>includedir</b> and
+<b>libdir</b> may be similarly changed.
+<p>Any of these values except for <b>docdir</b> may be set as part
+of configuration:
+<p><blockquote><pre>prompt: ../dist/configure --bindir=/usr/local/bin</pre></blockquote>
+<p>Any of these values, including <b>docdir</b>, may be changed when doing
+the install itself:
+<p><blockquote><pre>prompt: make prefix=/usr/contrib/bdb install</pre></blockquote>
+<p>The Berkeley DB installation process will attempt to create any directories that
+do not already exist on the system.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/build_unix/flags.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/shlib.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/build_unix/intro.html b/bdb/docs/ref/build_unix/intro.html
new file mode 100644
index 00000000000..b2c0d613bfd
--- /dev/null
+++ b/bdb/docs/ref/build_unix/intro.html
@@ -0,0 +1,60 @@
+<!--$Id: intro.so,v 10.18 2000/12/04 18:05:41 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Building for UNIX</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for UNIX systems</dl></h3></td>
+<td width="1%"><a href="../../ref/debug/common.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/conf.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Building for UNIX</h1>
+<p>The Berkeley DB distribution builds up to four separate libraries: the base C
+API Berkeley DB library and the optional C++, Java and Tcl API libraries. For
+portability reasons each library is standalone and contains the full Berkeley DB
+support necessary to build applications, that is, the C++ API Berkeley DB
+library does not require any other Berkeley DB libraries to build and run C++
+applications.
+<p>The Berkeley DB distribution uses the Free Software Foundation's
+<a href="http://sourceware.cygnus.com/autoconf">autoconf</a> and
+<a href="http://www.gnu.org/software/libtool/libtool.html">libtool</a>
+tools to build on UNIX platforms. In general, the standard configuration
+and installation options for these tools apply to the Berkeley DB distribution.
+<p>To perform the default UNIX build of Berkeley DB, first change to the
+<b>build_unix</b> directory, and then enter the following two commands:
+<p><blockquote><pre>../dist/configure
+make</pre></blockquote>
+<p>This will build the Berkeley DB library.
+<p>To install the Berkeley DB library, enter:
+<p><blockquote><pre>make install</pre></blockquote>
+<p>To rebuild Berkeley DB, enter:
+<p><blockquote><pre>make clean
+make</pre></blockquote>
+<p>If you change your mind about how Berkeley DB is to be configured, you must start
+from scratch by entering:
+<p><blockquote><pre>make realclean
+../dist/configure
+make</pre></blockquote>
+<p>To build multiple UNIX versions of Berkeley DB in the same source tree, create a
+new directory at the same level as the build_unix directory, and then
+configure and build in that directory:
+<p><blockquote><pre>mkdir build_bsdos3.0
+cd build_bsdos3.0
+../dist/configure
+make</pre></blockquote>
+<p>If you have trouble with any of these commands, please send email to the
+addresses found in the Sleepycat Software contact information. In that
+email, please provide a complete copy of the commands that you entered
+and any output, along with a copy of any <b>config.log</b> or
+<b>config.cache</b> files created during configuration.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/debug/common.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/conf.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/build_unix/irix.html b/bdb/docs/ref/build_unix/irix.html
new file mode 100644
index 00000000000..af31b6e6811
--- /dev/null
+++ b/bdb/docs/ref/build_unix/irix.html
@@ -0,0 +1,30 @@
+<!--$Id: irix.so,v 11.4 2000/03/18 21:43:10 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: IRIX</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for UNIX systems</dl></h3></td>
+<td width="1%"><a href="../../ref/build_unix/hpux.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/linux.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>IRIX</h1>
+<p><ol>
+<p><li><b>I can't compile and run multi-threaded applications.</b>
+<p>Special compile-time flags are required when compiling threaded
+applications on IRIX. If you are compiling a threaded application, you
+must compile with the _SGI_MP_SOURCE flag:
+<p><blockquote><pre>cc -D_SGI_MP_SOURCE ...</pre></blockquote>
+<p>The Berkeley DB library will automatically build with the correct options.
+</ol>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/build_unix/hpux.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/linux.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/build_unix/linux.html b/bdb/docs/ref/build_unix/linux.html
new file mode 100644
index 00000000000..b6e2b93fb14
--- /dev/null
+++ b/bdb/docs/ref/build_unix/linux.html
@@ -0,0 +1,30 @@
+<!--$Id: linux.so,v 11.4 2000/03/18 21:43:10 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Linux</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for UNIX systems</dl></h3></td>
+<td width="1%"><a href="../../ref/build_unix/irix.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/osf1.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Linux</h1>
+<p><ol>
+<p><li><b>I can't compile and run multi-threaded applications.</b>
+<p>Special compile-time flags are required when compiling threaded
+applications on Linux. If you are compiling a threaded application, you
+must compile with the _REENTRANT flag:
+<p><blockquote><pre>cc -D_REENTRANT ...</pre></blockquote>
+<p>The Berkeley DB library will automatically build with the correct options.
+</ol>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/build_unix/irix.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/osf1.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/build_unix/notes.html b/bdb/docs/ref/build_unix/notes.html
new file mode 100644
index 00000000000..dcb975e3c9e
--- /dev/null
+++ b/bdb/docs/ref/build_unix/notes.html
@@ -0,0 +1,138 @@
+<!--$Id: notes.so,v 10.42 2001/01/09 18:49:53 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Architecture independent FAQs</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a> <a name="3"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for UNIX systems</dl></h3></td>
+<td width="1%"><a href="../../ref/build_unix/test.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/aix.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Architecture independent FAQs</h1>
+<p><ol>
+<p><li><b>When compiling with gcc, I get unreferenced symbols, e.g.,:
+<p><blockquote><pre>symbol __muldi3: referenced symbol not found
+symbol __cmpdi2: referenced symbol not found</pre></blockquote></b>
+<p>On systems where they're available (e.g., HP-UX, Solaris), Berkeley DB uses
+64-bit integral types. As far as we can tell, some versions of gcc
+don't support these types. The simplest workaround is to reconfigure
+Berkeley DB using the --disable-bigfile configuration option, and then rebuild.
+<hr size=1 noshade>
+<p><li><b>My C++ program traps during a failure in a DB call on my
+gcc-based system.</b>
+<p>We believe there are some severe bugs in the implementation of exceptions
+for some gcc compilers. Exceptions require some interaction between
+compiler, assembler, runtime libraries, and we're not sure exactly what
+is at fault, but one failing combination is gcc 2.7.2.3 running on SuSE
+Linux 6.0. The problem on this system can be seen with a rather simple
+test case of an exception thrown from a shared library and caught in the
+main program.
+<p>A variation of this problem seems to occur on AIX, although we believe it
+does not necessarily involve shared libraries on that platform.
+<p>If you see a trap that occurs when an exception might be thrown by the DB
+runtime, we suggest that you use static libraries instead of dynamic
+(shared) libraries. See the documentation for configuration. If this
+doesn't work, and you have a choice of compilers, try using a more recent
+gcc or a non-gcc based compiler to build Berkeley DB.
+<p>Finally, you can disable the use of exceptions in the C++ runtime for
+Berkeley DB by using the <a href="../../api_c/db_create.html#DB_CXX_NO_EXCEPTIONS">DB_CXX_NO_EXCEPTIONS</a> flag with
+<a href="../../api_c/env_create.html">db_env_create</a> or <a href="../../api_c/db_create.html">db_create</a>. When this flag is on, all
+C++ methods fail by returning an error code rather than throwing an
+exception.
+<hr size=1 noshade>
+<p><li><b>I get unexpected results and database corruption when running
+threaded programs.</b>
+<p><b>I get error messages that mutex (e.g., pthread_mutex_XXX or
+mutex_XXX) functions are undefined when linking applications with Berkeley DB.</b>
+<p>On some architectures, the Berkeley DB library uses the ISO POSIX standard
+pthreads and UNIX International (UI) threads interfaces for underlying
+mutex support, e.g., Solaris and HP-UX. You can specify compilers,
+compiler flags or link with the appropriate thread library when loading
+your application, to resolve the undefined references:
+<p><blockquote><pre>cc ... -lpthread ...
+cc ... -lthread ...
+xlc_r ...
+cc ... -mt ...</pre></blockquote>
+<p>See the appropriate architecture-specific Reference Guide pages for more
+information.
+<p>On systems where more than one type of mutex is available, it may be
+necessary for applications to use the same threads package from which
+Berkeley DB draws its mutexes, e.g., if Berkeley DB was built to use the POSIX
+pthreads mutex calls for mutex support, the application may need to be
+written to use the POSIX pthreads interfaces for its threading model.
+While this is only conjecture at this time and we know of no systems that
+actually have this requirement, it's not unlikely that some exist.
+<p>In a few cases, Berkeley DB can be configured to use specific underlying mutex
+interfaces. You can use the <a href="../../ref/build_unix/conf.html#--enable-posixmutexes">--enable-posixmutexes</a> and
+<a href="../../ref/build_unix/conf.html#--enable-uimutexes">--enable-uimutexes</a> configuration options to specify the POSIX and Unix
+International (UI) threads packages. This should not, however, be
+necessary in most cases.
+<p>In some cases, it is vitally important to make sure that you load
+the correct library. For example, on Solaris systems, there are POSIX
+pthread interfaces in the C library, and so applications can link Berkeley DB
+using only C library and not see any undefined symbols. However, the C
+library POSIX pthread mutex support is insufficient for Berkeley DB and Berkeley DB
+cannot detect that fact. Similar errors can arise when applications
+(e.g., tclsh) use dlopen to dynamically load Berkeley DB as a library.
+<p>If you are seeing problems in this area after you've confirmed that you're
+linking with the correct libraries, there are two other things you can
+try. First, if your platform supports inter-library dependencies, we
+recommend that you change the Berkeley DB Makefile to specify the appropriate
+threads library when creating the Berkeley DB dynamic library, as an
+inter-library dependency. Second, if your application is using dlopen to
+dynamically load Berkeley DB, specify the appropriate thread library on the link
+line when you load the application itself.
+<hr size=1 noshade>
+<p><li><b>I get core dumps when running programs that fork children.</b>
+<p>Berkeley DB handles should not be shared across process forks, each forked
+child should acquire its own Berkeley DB handles.
+<hr size=1 noshade>
+<p><li><b>I get reports of uninitialized memory reads and writes when
+running software analysis tools (e.g., Rational Software Corp.'s Purify
+tool).</b>
+<p>For performance reasons, Berkeley DB does not write the unused portions of
+database pages or fill in unused structure fields. To turn off these
+errors when running software analysis tools, build with the
+--enable-umrw configuration option.
+<hr size=1 noshade>
+<p><li><b>Berkeley DB programs or the test suite fail unexpectedly.</b>
+<p>The Berkeley DB architecture does not support placing the shared memory regions
+on remote filesystems, e.g., the Network File System (NFS) or the Andrew
+File System (AFS). For this reason, the shared memory regions (normally
+located in the database home directory) must reside on a local filesystem.
+See <a href="../../ref/env/region.html">Shared Memory Regions</a> for more
+information.
+<p>With respect to running the test suite, always check to make sure that
+TESTDIR is not on a remote mounted filesystem.
+<hr size=1 noshade>
+<p><li><b>The <a href="../../utility/db_dump.html">db_dump185</a> utility fails to build.</b>
+<p>The <a href="../../utility/db_dump.html">db_dump185</a> utility is the utility that supports conversion
+of Berkeley DB 1.85 and earlier databases to current database formats. If
+the errors look something like:
+<p><blockquote><pre>cc -o db_dump185 db_dump185.o
+ld:
+Unresolved:
+dbopen</pre></blockquote>
+<p>it means that the Berkeley DB 1.85 code was not found in the standard
+libraries. To build <a href="../../utility/db_dump.html">db_dump185</a>, the Berkeley DB version 1.85 code
+must have already been built and installed on the system. If the Berkeley DB
+1.85 header file is not found in a standard place, or the library is
+not part of the standard libraries used for loading, you will need to
+edit your Makefile, and change the lines:
+<p><blockquote><pre>DB185INC=
+DB185LIB=</pre></blockquote>
+<p>So that the system Berkeley DB 1.85 header file and library are found, e.g.,
+<p><blockquote><pre>DB185INC=/usr/local/include
+DB185LIB=-ldb185</pre></blockquote>
+</ol>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/build_unix/test.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/aix.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/build_unix/osf1.html b/bdb/docs/ref/build_unix/osf1.html
new file mode 100644
index 00000000000..42ac8e767ef
--- /dev/null
+++ b/bdb/docs/ref/build_unix/osf1.html
@@ -0,0 +1,30 @@
+<!--$Id: osf1.so,v 11.6 2000/10/30 20:46:06 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: OSF/1</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for UNIX systems</dl></h3></td>
+<td width="1%"><a href="../../ref/build_unix/linux.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/qnx.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>OSF/1</h1>
+<p><ol>
+<p><li><b>I can't compile and run multi-threaded applications.</b>
+<p>Special compile-time flags are required when compiling threaded
+applications on OSF/1. If you are compiling a threaded application, you
+must compile with the _REENTRANT flag:
+<p><blockquote><pre>cc -D_REENTRANT ...</pre></blockquote>
+<p>The Berkeley DB library will automatically build with the correct options.
+</ol>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/build_unix/linux.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/qnx.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/build_unix/qnx.html b/bdb/docs/ref/build_unix/qnx.html
new file mode 100644
index 00000000000..29c90dc98cb
--- /dev/null
+++ b/bdb/docs/ref/build_unix/qnx.html
@@ -0,0 +1,58 @@
+<!--$Id: qnx.so,v 11.5 2000/11/29 15:03:24 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: QNX</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for UNIX systems</dl></h3></td>
+<td width="1%"><a href="../../ref/build_unix/osf1.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/sco.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>QNX</h1>
+<p><ol>
+<p><li><b>To what versions of QNX has DB been ported?</b>
+<p>Berkeley DB has been ported to the QNX Neutrino technology which is commonly
+referred to as QNX RTP (Real-Time Platform). Berkeley DB has not been
+ported to earlier versions of QNX, such as QNX 4.25.
+<p><li><b>What is the impact of QNX's use of <b>shm_open</b>(2) for
+shared memory regions?</b>
+<p>QNX requires the use of the POSIX <b>shm_open</b>(2) and
+<b>shm_unlink</b>(2) calls for shared memory regions that will later
+be mapped into memory using <b>mmap</b>(2). QNX's implementation
+of the shared memory functions requires that the name given must begin
+with a slash, and that no other slash may appear in the name.
+<p>In order to comply with those requirements and allow relative pathnames
+to find the same environment, Berkeley DB uses only the last component of the
+home directory path and the name of the shared memory file, separated
+by a colon, as the name specified to the shared memory functions. For
+example, if an application specifies a home directory of
+<b>/home/db/DB_DIR</b>, Berkeley DB will use <b>/DB_DIR:__db.001</b> as
+the name for the shared memory area argument to <b>shm_open</b>(2).
+<p>The impact of this decision is that the last component of all
+environment home directory pathnames on QNX must be unique with respect
+to each other. Additionally, Berkeley DB requires that environments use home
+directories for QNX in order to generate a reasonable entry in the
+shared memory area.
+<p><li><b>What are the implications of QNX's requirement to use
+<b>shm_open</b>(2) in order to use <b>mmap</b>(2)?</b>
+<p>QNX requires that files mapped with <b>mmap</b>(2) be opened using
+<b>shm_open</b>(2). There are other places in addition to the
+environment shared memory regions, where Berkeley DB tries to memory map files
+if it can.
+<p>The memory pool subsystem normally attempts to use <b>mmap</b>(2)
+even when using private memory, as indicated by the <a href="../../api_c/env_open.html#DB_PRIVATE">DB_PRIVATE</a>
+flag to <a href="../../api_c/env_open.html">DBENV-&gt;open</a>. In the case of QNX, if an application is
+using private memory, Berkeley DB will not attempt to map the memory and will
+instead use the local cache.
+</ol>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/build_unix/osf1.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/sco.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/build_unix/sco.html b/bdb/docs/ref/build_unix/sco.html
new file mode 100644
index 00000000000..dda8e6d1d01
--- /dev/null
+++ b/bdb/docs/ref/build_unix/sco.html
@@ -0,0 +1,29 @@
+<!--$Id: sco.so,v 11.7 2000/10/30 20:46:06 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: SCO</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for UNIX systems</dl></h3></td>
+<td width="1%"><a href="../../ref/build_unix/qnx.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/solaris.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>SCO</h1>
+<p><ol>
+<p><li><b>If I build with gcc, programs like db_dump, db_stat core dump immediately
+when invoked.</b>
+<p>We suspect gcc or the runtime loader may have a bug, but we haven't
+tracked it down. If you want to use gcc, we suggest building static
+libraries.
+</ol>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/build_unix/qnx.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/solaris.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/build_unix/shlib.html b/bdb/docs/ref/build_unix/shlib.html
new file mode 100644
index 00000000000..2819651cd1d
--- /dev/null
+++ b/bdb/docs/ref/build_unix/shlib.html
@@ -0,0 +1,94 @@
+<!--$Id: shlib.so,v 10.9 2000/03/18 21:43:10 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Dynamic shared libraries</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for UNIX systems</dl></h3></td>
+<td width="1%"><a href="../../ref/build_unix/install.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/test.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Dynamic shared libraries</h1>
+<p><b>Warning</b>: the following information is intended to be generic and
+is likely to be correct for most UNIX systems. Unfortunately, dynamic
+shared libraries are not standard between UNIX systems, so there may be
+information here that is not correct for your system. If you have
+problems, consult your compiler and linker manual pages or your system
+administrator.
+<p>The Berkeley DB dynamic shared libraries are created with the name
+libdb-<b>major</b>.<b>minor</b>.so, where <b>major</b> is the major
+version number and <b>minor</b> is the minor version number. Other
+shared libraries are created if Java and Tcl support are enabled,
+specifically libdb_java-<b>major</b>.<b>minor</b>.so and
+libdb_tcl-<b>major</b>.<b>minor</b>.so.
+<p>On most UNIX systems, when any shared library is created, the linker
+stamps it with a "SONAME". In the case of Berkeley DB, the SONAME is
+libdb-<b>major</b>.<b>minor</b>.so. It is important to realize that
+applications linked against a shared library remember the SONAMEs of the
+libraries they use and not the underlying names in the filesystem.
+<p>When the Berkeley DB shared library is installed, links are created in the
+install lib directory so that libdb-<b>major</b>.<b>minor</b>.so,
+libdb-<b>major</b>.so and libdb.so all reference the same library. This
+library will have an SONAME of libdb-<b>major</b>.<b>minor</b>.so.
+<p>Any previous versions of the Berkeley DB libraries that are present in the
+install directory (such as libdb-2.7.so or libdb-2.so) are left unchanged.
+(Removing or moving old shared libraries is one drastic way to identify
+applications that have been linked against those vintage releases.)
+<p>Once you have installed the Berkeley DB libraries, unless they are installed in
+a directory where the linker normally looks for shared libraries, you will
+need to specify the installation directory as part of compiling and
+linking against Berkeley DB. Consult your system manuals or system
+administrator for ways to specify a shared library directory when
+compiling and linking applications with the Berkeley DB libraries. Many systems
+support environment variables (e.g., LD_LIBRARY_PATH, LD_RUN_PATH) ), or
+system configuration files (e.g., /etc/ld.so.conf) for this purpose.
+<p><b>Warning</b>: some UNIX installations may have an already existing
+<b>/usr/lib/libdb.so</b>, and this library may be an incompatible
+version of Berkeley DB.
+<p>We recommend that applications link against libdb.so (e.g., using -ldb).
+Even though the linker uses the file named libdb.so, the executable file
+for the application remembers the library's SONAME
+(libdb-<b>major</b>.<b>minor</b>.so). This has the effect of marking
+the applications with the versions they need at link time. Because
+applications locate their needed SONAMEs when they are executed, all
+previously linked applications will continue to run using the library they
+were linked with, even when a new version of Berkeley DB is installed and the
+file <b>libdb.so</b> is replaced with a new version.
+<p>Applications that know they are using features specific to a particular
+Berkeley DB release can be linked to that release. For example, an application
+wanting to link to Berkeley DB major release "3" can link using -ldb-3, and
+applications that know about a particular minor release number can specify
+both major and minor release numbers, for example, -ldb-3.5.
+<p>If you want to link with Berkeley DB before performing library installation,
+the "make" command will have created a shared library object in the
+<b>.libs</b> subdirectory of the build directory, such as
+<b>build_unix/.libs/libdb-major.minor.so</b>. If you want to link a
+file against this library, with, for example, a major number of "3" and
+a minor number of "5", you should be able to do something like:
+<p><blockquote><pre>cc -L BUILD_DIRECTORY/.libs -o testprog testprog.o -ldb-3.5
+env LD_LIBRARY_PATH="BUILD_DIRECTORY/.libs:$LD_LIBRARY_PATH" ./testprog</pre></blockquote>
+<p>where <b>BUILD_DIRECTORY</b> is the full directory path to the directory
+where you built Berkeley DB.
+<p>The libtool program (which is configured in the build_unix directory) can
+be used to set the shared library path and run a program. For example,
+<p><blockquote><pre>libtool gdb db_dump</pre></blockquote>
+<p>runs the gdb debugger on the db_dump utility after setting the appropriate
+paths. Libtool may not know what to do with arbitrary commands (it is
+hardwired to recognize "gdb" and some other commands). If it complains
+the mode argument will usually resolve the problem:
+<p><blockquote><pre>libtool --mode=execute my_debugger db_dump</pre></blockquote>
+<p>On most systems, using libtool in this way is exactly equivalent to
+setting the LD_LIBRARY_PATH environment variable and then executing the
+program. On other systems, using libtool has the virtue of knowing about
+any other details on systems that don't behave in this typical way.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/build_unix/install.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/test.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/build_unix/solaris.html b/bdb/docs/ref/build_unix/solaris.html
new file mode 100644
index 00000000000..8239537a825
--- /dev/null
+++ b/bdb/docs/ref/build_unix/solaris.html
@@ -0,0 +1,90 @@
+<!--$Id: solaris.so,v 11.14 2000/09/13 17:22:20 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Solaris</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for UNIX systems</dl></h3></td>
+<td width="1%"><a href="../../ref/build_unix/sco.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/sunos.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Solaris</h1>
+<p><ol>
+<p><li><b>I can't compile and run multi-threaded applications.</b>
+<p>Special compile-time flags and additional libraries are required when
+compiling threaded applications on Solaris. If you are compiling a
+threaded application, you must compile with the D_REENTRANT flag and link
+with the libpthread.a or libthread.a libraries:
+<p><blockquote><pre>cc -mt ...
+cc -D_REENTRANT ... -lthread
+cc -D_REENTRANT ... -lpthread</pre></blockquote>
+<p>The Berkeley DB library will automatically build with the correct options.
+<hr size=1 noshade>
+<p><li><b>I've installed gcc on my Solaris system, but configuration
+fails because the compiler doesn't work.</b>
+<p>On some versions of Solaris, there is a cc executable in the user's path,
+but all it does is display an error message and fail:
+<p><blockquote><pre>% which cc
+/usr/ucb/cc
+% cc
+/usr/ucb/cc: language optional software package not installed</pre></blockquote>
+<p>As Berkeley DB always uses the native compiler in preference to gcc, this is a
+fatal error. If the error message you're seeing is:
+<p><blockquote><pre>checking whether the C compiler (cc -O ) works... no
+configure: error: installation or configuration problem: C compiler cannot create executables.</pre></blockquote>
+<p>then this may be the problem you're seeing. The simplest workaround is
+to set your CC environment variable to the system compiler, e.g.:
+<p><blockquote><pre>env CC=gcc ../dist/configure</pre></blockquote>
+<p>and reconfigure.
+<p>If you are using the --configure-cxx option, you may also want to specify
+a C++ compiler, e.g.:
+<p><blockquote><pre>env CC=gcc CCC=g++ ../dist/configure</pre></blockquote>
+<hr size=1 noshade>
+<p><li><b>I get the error
+"libc internal error: _rmutex_unlock: rmutex not held", followed by a core
+dump, when running threaded or JAVA programs.</b>
+<p>This is a known bug in Solaris 2.5 and it is fixed by Sun patch 103187-25.
+<hr size=1 noshade>
+<p><li><b>I get error reports of non-existent files, corrupted metadata
+pages and core dumps.</b>
+<p>Solaris 7 contains a bug in the threading libraries (-lpthread, -lthread)
+which causes the wrong version of the pwrite routine to be linked into
+the application if the thread library is linked in after the the C
+library. The result will be that the pwrite function is called rather
+than the pwrite64. To work around the problem, use an explicit link order
+when creating your application.
+<p>Sun Microsystems is tracking this problem with Bug Id's 4291109 and 4267207,
+and patch 106980-09 to Solaris 7 fixes the problem.
+<p><blockquote><pre>Bug Id: 4291109
+Duplicate of: 4267207
+Category: library
+Subcategory: libthread
+State: closed
+Synopsis: pwrite64 mapped to pwrite
+Description:
+When libthread is linked after libc, there is a table of functions in
+libthread that gets "wired into" libc via _libc_threads_interface().
+The table in libthread is wrong in both Solaris 7 and on28_35 for the
+TI_PWRITE64 row (see near the end).</pre></blockquote>
+<hr size=1 noshade>
+<p><li><b>During configuration I see a message that large file support has
+been turned off.</b>
+<p>The Solaris 8 system include files redefine "open" when big-file support (the
+HAVE_FILE_OFFSET_BITS and _FILE_OFFSET_BITS #defines) is enabled. This
+causes problems when compiling for C++, where "open" is a legal
+identifier, used in the Berkeley DB C++ API. For this reason, we automatically
+turn off big-file support when Berkeley DB is configured with a C++ API. This
+should not be a problem for applications unless there is a need to create
+databases larger than 2GB.
+</ol>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/build_unix/sco.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/sunos.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/build_unix/sunos.html b/bdb/docs/ref/build_unix/sunos.html
new file mode 100644
index 00000000000..cecccaefb94
--- /dev/null
+++ b/bdb/docs/ref/build_unix/sunos.html
@@ -0,0 +1,30 @@
+<!--$Id: sunos.so,v 11.4 2000/03/18 21:43:10 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: SunOS</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for UNIX systems</dl></h3></td>
+<td width="1%"><a href="../../ref/build_unix/solaris.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/ultrix.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>SunOS</h1>
+<p><ol>
+<p><li><b>I can't specify the <a href="../../api_c/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a> flag to <a href="../../api_c/env_open.html">DBENV-&gt;open</a>.</b>
+<p>The <b>shmget</b>(2) interfaces are not used on SunOS releases prior
+to 5.0, even though they apparently exist, as the distributed include
+files did not allow them to be compiled. For this reason, it will not be
+possible to specify the <a href="../../api_c/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a> flag those versions of
+SunOS.
+</ol>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/build_unix/solaris.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/ultrix.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/build_unix/test.html b/bdb/docs/ref/build_unix/test.html
new file mode 100644
index 00000000000..9ae398980f6
--- /dev/null
+++ b/bdb/docs/ref/build_unix/test.html
@@ -0,0 +1,49 @@
+<!--$Id: test.so,v 10.19 2000/06/28 14:33:57 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Running the test suite under UNIX</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for UNIX systems</dl></h3></td>
+<td width="1%"><a href="../../ref/build_unix/shlib.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/notes.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Running the test suite under UNIX</h1>
+<p>The Berkeley DB test suite is built if you specify --enable-test as an
+argument when configuring Berkeley DB.
+<p>Before running the tests for the first time, you may need to edit the
+<b>include.tcl</b> file in your build directory. The Berkeley DB
+configuration assumes you intend to use the version of the tclsh utility
+included in the Tcl installation with which Berkeley DB was configured to run
+the test suite, and further assumes that the test suite will be run with
+the libraries pre-built in the Berkeley DB build directory. If either of these
+assumptions are incorrect, you will need to edit the <b>include.tcl</b>
+file and change the line that reads:
+<p><blockquote><pre>set tclsh_path ...</pre></blockquote>
+<p>to correctly specify the full path to the version of tclsh with which you
+are going to run the test suite. You may also need to change the line
+that reads:
+<p><blockquote><pre>set test_path ...</pre></blockquote>
+<p>to correctly specify the path from the directory where you are running
+the test suite to the location of the Berkeley DB Tcl API library you built.
+It may not be necessary that this be a full path if you have configured
+your system's dynamic shared library mechanisms to search the directory
+where you built or installed the Tcl library.
+<p>All Berkeley DB tests are run from within <b>tclsh</b>. After starting tclsh,
+you must source the file <b>test.tcl</b> in the test directory. For
+example, if you built in the <b>build_unix</b> directory of the
+distribution, this would be done using the command:
+<p><blockquote><pre>% source ../test/test.tcl</pre></blockquote>
+<p>Once you have executed that command and the "%" prompt has returned
+without errors, you are ready to run tests in the test suite.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/build_unix/shlib.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/notes.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/build_unix/ultrix.html b/bdb/docs/ref/build_unix/ultrix.html
new file mode 100644
index 00000000000..e71946c8825
--- /dev/null
+++ b/bdb/docs/ref/build_unix/ultrix.html
@@ -0,0 +1,27 @@
+<!--$Id: ultrix.so,v 11.4 2000/03/18 21:43:10 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Ultrix</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for UNIX systems</dl></h3></td>
+<td width="1%"><a href="../../ref/build_unix/sunos.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_win/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Ultrix</h1>
+<p><ol>
+<p><li><b>Configuration complains that mmap(2) interfaces aren't being used.</b>
+<p>The <b>mmap</b>(2) interfaces are not used on Ultrix, even though
+they exist, as they are known to not work correctly.
+</ol>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/build_unix/sunos.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_win/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/build_vxworks/faq.html b/bdb/docs/ref/build_vxworks/faq.html
new file mode 100644
index 00000000000..cea733d7fb2
--- /dev/null
+++ b/bdb/docs/ref/build_vxworks/faq.html
@@ -0,0 +1,85 @@
+<!--$Id: faq.so,v 1.12 2000/12/21 18:33:43 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: VxWorks FAQ</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a> <a name="3"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for VxWorks systems</dl></h3></td>
+<td width="1%"><a href="../../ref/build_vxworks/notes.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade/process.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>VxWorks FAQ</h1>
+<p><ol>
+<p><li><b>Can I run the test suite under VxWorks?</b>
+<p>The test suite requires the Berkeley DB Tcl library. In turn, this library
+requires Tcl 8.1 or greater. In order to run the test suite, you would
+need to port Tcl 8.1 or greater to VxWorks. The Tcl shell included in
+<i>windsh</i> is not adequate for two reasons. First, it is based on
+Tcl 8.0. Second, it does not include the necessary Tcl components for
+adding a Tcl extension.
+<p><li><b>Are all Berkeley DB features available for VxWorks?</b>
+<p>All Berkeley DB features are available for VxWorks with the exception of the
+<a href="../../api_c/db_open.html#DB_TRUNCATE">DB_TRUNCATE</a> flag for <a href="../../api_c/db_open.html">DB-&gt;open</a>. The underlying mechanism
+needed for that flag is not available consistently across different file
+systems for VxWorks.
+<p><li><b>Are there any constraints using particular file system drivers?</b>
+<p>There are constraints using the dosFs file systems with Berkeley DB. Namely,
+you must configure your dosFs file system to support long file names if
+you are using Berkeley DB logging in your application. The VxWorks' dosFs
+1.0 file system, by default, uses the old MS-DOS 8.3 file naming
+constraints, restricting to 8 character file names with a 3 character
+extension. If you have configured with VxWorks' dosFs 2.0 you should
+be compatible with Windows FAT32 filesystems which supports long
+filenames.
+<p><li><b>Are there any dependencies on particular file system drivers?</b>
+<p>There is one dependency on specifics of file system drivers in the port
+of Berkeley DB to VxWorks. Berkeley DB synchronizes data using the FIOSYNC function
+to ioctl() (another option would have been to use the FIOFLUSH function
+instead). The FIOSYNC function was chosen because the NFS client driver,
+nfsDrv, only supports it and doesn't support FIOFLUSH. All local file
+systems, as of VxWorks 5.4, support FIOSYNC with the exception of
+rt11fsLib, which only supports FIOFLUSH. To use rt11fsLib, you will need
+to modify the os/os_fsync.c file to use the FIOFLUSH function; note that
+rt11fsLib cannot work with NFS clients.
+<p><li><b>Are there any known file system problems?</b>
+<p>During the course of our internal testing we came across two problems
+with the dosFs 2.0 file system that warranted patches from Wind River Systems.
+You should ask Wind River Systems for the patches to these
+problems if you encounter them.
+<p>The first problem is that files will seem to disappear. You should
+look at <b>SPR 31480</b> in the Wind River Systems' Support pages for
+a more detailed description of this problem.
+<p>The second problem is a semaphore deadlock within the dosFs file system
+code. Looking at a stack trace via CrossWind, you will see two or more of
+your application's tasks waiting in semaphore code within dosFs. The patch
+for this problem is under <b>SPR 33221</b> at Wind River Systems.
+<p><li><b>Are there any file systems I cannot use?</b>
+<p>The Target Server File System (TSFS) uses the netDrv driver. This driver
+does not support any ioctl that allows flushing to the disk and therefore
+cannot be used with Berkeley DB.
+<p><li><b>Why aren't the utility programs part of the project?</b>
+<p>The utility programs, in their Unix-style form, are not ported to VxWorks.
+The reasoning is the utility programs are essentially wrappers for the
+specific Berkeley DB interface they call. Their interface and generic model
+are not the appropriate paradigm for VxWorks. It is most likely that
+specific applications will want to spawn tasks that call the appropriate
+Berkeley DB function to perform the actions of some utility programs, using
+VxWorks native functions. For example, an application that spawns several
+tasks that all may operate on the same database would also want to spawn
+a task that calls <a href="../../api_c/lock_detect.html">lock_detect</a> for deadlock detection, but specific
+to the environment used for that application.
+<p><li><b>What VxWorks primitives are used for mutual exclusion in Berkeley DB?</b>
+<p>Mutexes inside of Berkeley DB use the basic binary semaphores in VxWorks. The
+mutexes are created using the FIFO queue type.
+</ol>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/build_vxworks/notes.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade/process.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/build_vxworks/intro.html b/bdb/docs/ref/build_vxworks/intro.html
new file mode 100644
index 00000000000..593b8a1e64c
--- /dev/null
+++ b/bdb/docs/ref/build_vxworks/intro.html
@@ -0,0 +1,86 @@
+<!--$Id: intro.so,v 1.7 2000/08/10 17:54:49 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Building for VxWorks</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for VxWorks systems</dl></h3></td>
+<td width="1%"><a href="../../ref/build_win/faq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_vxworks/notes.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Building for VxWorks</h1>
+<p>The build_vxworks directory in the Berkeley DB distribution contains a workspace
+and project files for Tornado 2.0.
+<p><table border=1 align=center>
+<tr><th>File</th><th>Description</th></tr>
+<tr> <td align=center>Berkeley DB.wsp</td> <td align=center>Berkeley DB Workspace file</td> </tr>
+<tr> <td align=center>Berkeley DB.wpj</td> <td align=center>Berkeley DB Project file</td> </tr>
+<tr> <td align=center>ex_*/*.wpj</td> <td align=center>Example programs project files</td> </tr>
+</table>
+<h3>Building With Tornado 2.0</h3>
+<p>Open the workspace <b>Berkeley DB.wsp</b>. The list of projects
+in this workspace will be shown. These projects were created for
+the x86 BSP for VxWorks.
+<p>The remainder of this document assumes you already have a
+VxWorks target and a target server, both up and running.
+<p>First, you'll need to set the include directories.
+To do this, go to the <i>Builds</i> tab for the workspace.
+Open up <i>Berkeley DB Builds</i>. You will see several different
+builds, containing different configurations. All of the projects
+in the Berkeley DB workspace are created to be downloadable applications.
+<p><table border=1 align=center>
+<tr><th>Build</th><th>Description</th></tr>
+<tr> <td align=left>PENTIUM_RPCdebug</td> <td align=left>x86 BSP with RPC and debugging</td> </tr>
+<tr> <td align=left>PENTIUM_RPCnodebug</td> <td align=left>x86 BSP with RPC no debugging</td> </tr>
+<tr> <td align=left>PENTIUM_debug</td> <td align=left>x86 BSP no RPC with debugging</td> </tr>
+<tr> <td align=left>PENTIUM_nodebug</td> <td align=left>x86 BSP no RPC no debugging</td> </tr>
+<tr> <td align=left>SIMSPARCSOLARISgnu</td> <td align=left>VxSim BSP no RPC with debugging</td> </tr>
+</table>
+<p>You will have to add a new build specification if you are using a
+different BSP or wish to customize further. For instance, if you have
+the Power PC (PPC) BSP, you will need to add a new build for the PPC tool
+chain. To do so, select the "Builds" tab and then select the Berkeley DB
+project name and right click. Choose the <i>New Build...</i>
+selection and create the new build target. For your new build target,
+you will need to decide if you want it configured to support RPC and
+whether it should be built for debugging. See the properties of the
+Pentium builds for how to configure for each case. After you have added
+this build you still need to correctly configure the include directories
+as described below.
+<p>Select the build you are interested in and right click. Choose the
+<i>Properties...</i> selection. At this point, a tabbed dialogue
+should appear. In this new window, choose the <i>C/C++ compiler</i>
+tab. In the edit box, you need to modify the full pathname of the
+<i>build_vxworks</i> subdirectory of Berkeley DB, followed by the full
+pathname of the <i>include</i> subdirectory of Berkeley DB. Then click
+OK.
+<p>If the architecture for this new build has the most significant byte
+first, you will also need to edit the <i>db_config.h</i> file in
+the build directory and define <b>WORDS_BIGENDIAN</b>.
+<p>To build and download the Berkeley DB downloadable application for the first time
+requires several steps:
+<p><ol>
+<p><li>Select the build you are interested in and right click.
+Choose the <i>Set ... as Active Build</i> selection.
+<p><li>Select the build you are interested in and right click.
+Choose the <i>Dependencies ...</i> selection.
+Run dependencies over all files in the Berkeley DB project.
+<p><li>Select the build you are interested in and right click.
+Choose the <i>Rebuild All (Berkeley DB.out)</i> selection.
+<p><li>Select the Berkeley DB project name and right click.
+Choose the <i>Download 'Berkeley DB.out'</i> selection.
+</ol>
+<p>You will need to repeat this procedure for
+all builds you are interested in building, as well as for
+all of the example project builds you wish to run.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/build_win/faq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_vxworks/notes.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/build_vxworks/notes.html b/bdb/docs/ref/build_vxworks/notes.html
new file mode 100644
index 00000000000..83de255119b
--- /dev/null
+++ b/bdb/docs/ref/build_vxworks/notes.html
@@ -0,0 +1,56 @@
+<!--$Id: notes.so,v 1.6 2000/08/09 15:45:52 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: VxWorks notes</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for VxWorks systems</dl></h3></td>
+<td width="1%"><a href="../../ref/build_vxworks/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_vxworks/faq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>VxWorks notes</h1>
+<p>Berkeley DB currently disallows the DB_TRUNC flag to <a href="../../api_c/db_open.html">DB-&gt;open</a>.
+The operations this flag represent are not fully supported under
+VxWorks 5.4.
+<p>The memory on VxWorks is always resident and fully shared among all tasks
+running on the target. For this reason, the <a href="../../api_c/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a> flag
+is implied for any application that does not specify the
+<a href="../../api_c/env_open.html#DB_PRIVATE">DB_PRIVATE</a> flag. Additionally, applications must use a
+segment ID to ensure different applications do not overwrite each other's
+database environments.
+See the <a href="../../api_c/env_set_shm_key.html">DBENV-&gt;set_shm_key</a> function for more information.
+Also, the <a href="../../api_c/env_open.html#DB_LOCKDOWN">DB_LOCKDOWN</a> flag has no effect.
+<p>The <a href="../../api_c/db_sync.html">DB-&gt;sync</a> function is implemented using an ioctl call into the
+file system driver with the FIOSYNC command. Most, but not all, file
+system drivers support this call. Berkeley DB requires the use of a file system
+supporting FIOSYNC.
+<h3>Building and Running the Example Programs</h3>
+<p>Each example program can be downloaded and run by calling the function
+equivalent to the example's name. You may have to edit the pathname to
+the environments and database names in the examples' sources. The
+examples included are:
+<p><table border=1 align=center>
+<tr><th>Name</th><th>Description</th></tr>
+<tr> <td align=left>ex_access</td> <td align=left>Simple access method example.</td> </tr>
+<tr> <td align=left>ex_btrec</td> <td align=left>Example using Btree and record numbers.</td> </tr>
+<tr> <td align=left>ex_dbclient</td> <td align=left>Example running an RPC client. Takes a hostname as an argument, e.g.,
+<i>ex_dbclient "myhost"</i>.</td> </tr>
+<tr> <td align=left>ex_env</td> <td align=left>Example using an environment.</td> </tr>
+<tr> <td align=left>ex_mpool</td> <td align=left>Example using mpools.</td> </tr>
+<tr> <td align=left>ex_tpcb</td> <td align=left>Example using transactions. This example requires two invocations both
+taking an integer identifier as an argument. This identifier allows for
+multiple sets of databases to be used within the same environment. The
+first is to initialize the databases, e.g., <i>ex_tpcb_init 1</i>. The
+second is to run the program on those databases, e.g., <i>ex_tpcb 1</i>.</td> </tr>
+</table>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/build_vxworks/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_vxworks/faq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/build_win/faq.html b/bdb/docs/ref/build_win/faq.html
new file mode 100644
index 00000000000..2c185b6daa2
--- /dev/null
+++ b/bdb/docs/ref/build_win/faq.html
@@ -0,0 +1,49 @@
+<!--$Id: faq.so,v 10.20 2000/06/28 15:43:27 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Windows FAQ</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a> <a name="3"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for Windows systems</dl></h3></td>
+<td width="1%"><a href="../../ref/build_win/notes.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_vxworks/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Windows FAQ</h1>
+<p><ol>
+<p><li><b>My Win* C/C++ application crashes in the Berkeley DB library when Berkeley DB calls
+fprintf (or some other standard C library function).</b>
+<p>You should be using the "Debug Multithreaded DLL" compiler option in
+your application when you link with the
+build_win32/Debug/libdb32d.lib library (this .lib file
+is actually a stub for libdb32d.DLL). To check this
+setting in Visual C++, choose the "Project/Settings" menu item, and
+under the tab marked "C/C++", select "Code Generation" and see the box
+marked "Use runtime library". This should be set to "Debug
+Multithreaded DLL". If your application is linked against the static
+library, build_win32/Debug/libdb32sd.lib, then you
+will want to set "Use runtime library" to "Debug Multithreaded".
+<p>Setting this option incorrectly can cause multiple versions of the
+standard libraries to be linked into your application (one on behalf
+of your application, and one on behalf of the Berkeley DB library). That
+violates assumptions made by these libraries, and traps can result.
+<p><li><b>Why are the build options for DB_DLL marked as "Use MFC in a Shared DLL"?
+Does Berkeley DB use MFC?</b>
+<p>Berkeley DB does not use MFC at all. It does however, call malloc and free and
+other facilities provided by the Microsoft C runtime library. We've found
+in our work that many applications and libraries are built assuming MFC,
+and specifying this for Berkeley DB solves various interoperation issues, and
+guarantees that the right runtime libraries are selected. Note that since
+we do not use MFC facilities, the MFC library DLL is not marked as a
+dependency for libdb.dll, but the appropriate Microsoft C runtime is.
+</ol>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/build_win/notes.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_vxworks/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/build_win/intro.html b/bdb/docs/ref/build_win/intro.html
new file mode 100644
index 00000000000..6f5e0d4bbf4
--- /dev/null
+++ b/bdb/docs/ref/build_win/intro.html
@@ -0,0 +1,143 @@
+<!--"@(#)intro.so 10.26 (Sleepycat) 11/18/99"-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Building for Win32</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for Win32 platforms</dl></h3></td>
+<td width="1%"><a href="../../ref/build_unix/ultrix.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_win/test.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Building for Win32</h1>
+<p>The build_win32 directory in the Berkeley DB distribution contains project files
+for both MSVC 5.0 and 6.0:
+<p><table border=1 align=center>
+<tr><th>Project File</th><th>Description</th></tr>
+<tr> <td align=center>Berkeley_DB.dsw</td> <td align=center>Visual C++ 5.0 project (compatible with 6.0)</td> </tr>
+<tr> <td align=center>*.dsp</td> <td align=center>Visual C++ 5.0 subprojects (compatible with 6.0
+)</td> </tr>
+</table>
+<p>These project files can be used to build Berkeley DB for any Win32 platform:
+Windows 2000, Windows NT, Windows 98 and Windows 95.
+<h3>Building With Visual C++ 6.0</h3>
+<p>Open the file <b>Berkeley_DB.dsw</b>. You will be told that the project
+was generated by a previous version of Developer Studio, and asked if you
+want to convert the project. Select Yes, and all projects will be
+converted. Then continue on with the instructions for building with
+Visual C++ 5.0.
+<p>Note that when you build a release version, you may receive a warning
+about an unknown compiler option <i>/Ob2</i>. This is apparently a
+flaw in the project conversion for Visual C++ and can be ignored.
+<p>Each release of Berkeley DB is built and tested with this procedure using
+Microsoft Visual C++ 6.0, Standard Edition.
+<h3>Building With Visual C++ 5.0</h3>
+<p>Open the file <b>Berkeley_DB.dsw</b>. This workspace includes a number
+of subprojects needed to build Berkeley DB.
+<p>First, you'll need to set the include directories. To do this, select
+<i>Options...</i> from the <i>Tools</i> pull-down menu. At this
+point, a tabbed dialogue should appear. In this new window, choose the
+<i>Directories</i> tab. For the <i>Platform</i>, select
+<i>Win32</i> and for <i>Show directories for</i> select
+<i>Include files</i>. Below these options in the list of directories,
+you should add two directories: the full pathname of the
+<i>build_win32</i> subdirectory of Berkeley DB, followed by the full
+pathname of the <i>include</i> subdirectory of Berkeley DB. Then click OK.
+<p>Then, select <i>Active Project Configuration</i> under the
+<i>Build</i> pull-down menu. For a debug version of the libraries,
+tools and examples, select <i>db_buildall - Win32 Debug</i>.
+Results from this build are put into <b>build_win32/Debug</b>.
+For a release version, select <i>db_buildall - Win32 Release</i>;
+results are put into <b>build_win32/Release</b>.
+For a debug version that has all tools and examples built with
+static libraries, select <i>db_buildall - Win32 Debug Static</i>;
+results are put into <b>build_win32/Debug_static</b>.
+For a release version of the same,
+select <i>db_buildall - Win32 Release Static</i>;
+results are put into <b>build_win32/Release_static</b>.
+Finally, to build, select <i>Build db_buildall.exe</i> under the
+<i>Build</i> pull-down menu.
+<p>When building your application, you should normally use compile options
+"debug multithreaded dll" and link against
+<b>build_win32/Debug/libdb32d.lib</b>. If you want
+to link against a static (non-DLL) version of the library, use the
+"debug multithreaded" compile options and link against
+<b>build_win32/Debug_static/libdb32sd.lib</b>. You can
+also build using a release version of the libraries and tools, which will be
+placed in <b>build_win32/Release/libdb32.lib</b>.
+The static version will be in
+<b>build_win32/Release_static/libdb32s.lib</b>.
+<p>Each release of Berkeley DB is maintained, built and tested using Microsoft
+Visual C++ 5.0 and 6.0.
+<h3>Including the C++ API</h3>
+<p>C++ support is built automatically on Win32.
+<h3>Including the Java API</h3>
+<p>Java support is not built automatically. The following instructions
+assume you have installed the Sun Java Development Kit in
+<b>d:/java</b>. Of course, if you've installed elsewhere, or have
+different Java software, you will need to adjust the pathnames
+accordingly. First, use the instructions above for Visual C++ 5.0 or 6.0
+to open the Tools/Options tabbed dialog for adding include directories.
+In addition to the directories specified above, add
+<b>d:/java/include</b> and <b>d:/java/include/win32</b>. These are
+the directories needed when including <b>jni.h</b>. Now, before
+clicking OK, under <i>Show directories for</i>, choose
+<i>Executable files</i>. Add <b>d:/java/bin</b>. That directory
+is needed to find javac. Now select OK.
+<p>Select <i>Active Project Configuration</i> under the
+<i>Build</i> pull-down menu. Choose <i>db_java - Win32
+Release</i>. To build, select <i>Build
+libdb_java32.dll</i> under the <i>Build</i> pull-down
+menu. This builds the Java support library for Berkeley DB and compiles all
+the java files, placing the class files in the <b>java/classes</b>
+subdirectory of Berkeley DB. Set your environment variable CLASSPATH to
+include this directory, your environment variable PATH to include the
+<b>build_win32/Release</b> subdirectory, and as a test, try running
+the command:
+<p><blockquote><pre>java com.sleepycat.examples.AccessExample</pre></blockquote>
+<h3>Including the Tcl API</h3>
+<p>Tcl support is not built automatically. See
+<a href="../../ref/tcl/intro.html">Loading Berkeley DB with Tcl</a> for information
+on sites from which you can download Tcl and which Tcl versions are
+compatible with Berkeley DB.
+<p>The Tcl library must be built as the same build type as the Berkeley DB
+library (both Release or both Debug). We have found that the binary
+release of Tcl can be used with the Release configuration of Berkeley DB, but
+for the Debug configuration, you will need to need to build Tcl from
+sources. Before building Tcl, you will need to modify its makefile to
+make sure you are building a debug version, including thread support.
+This is because the set of DLLs linked into the Tcl executable must
+match the corresponding set of DLLs used by Berkeley DB.
+<p>These notes assume Tcl is installed as <b>d:/tcl</b>, but you can
+change that if you wish. If you run using a different version of Tcl
+than the one currently being used by Sleepycat Software, you will need
+to change the name of the Tcl library used in the build (e.g.,
+tcl83d.lib) to the appropriate name. See
+Projects-&gt;Settings-&gt;Link in the db_tcl subproject.
+<p>Use the instructions above for
+Visual C++ 5.0 or 6.0 to open the <i>Tools/Options</i> tabbed dialog
+for adding include directories. In addition to the directories specified
+above, add <b>d:/tcl/include</b>. This is the directory that contains
+<b>tcl.h</b>.
+Then, in that same dialog, show directories for "Library Files".
+Add <b>d:/tcl/lib</b> (or whatever directory contains
+<b>tcl83d.lib</b> in your distribution) to the list. Now select OK.
+<p>Select <i>Active Project Configuration</i> under the
+<i>Build</i> pull-down menu. Choose <i>db_tcl - Win32
+Release</i>. To build, select <i>Build
+libdb_tcl32.dll</i> under the <i>Build</i> pull-down
+menu. This builds the Tcl support library for Berkeley DB, placing the result
+into <b>build_win32/Release/libdb_tcl32.dll</b>.
+Selecting an Active Configuration of <i>db_tcl - Win32 Debug</i>
+will build a debug version, placing the result into
+<b>build_win32/Debug/libdb_tcl32d.dll</b>.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/build_unix/ultrix.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_win/test.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/build_win/notes.html b/bdb/docs/ref/build_win/notes.html
new file mode 100644
index 00000000000..483b101ecc2
--- /dev/null
+++ b/bdb/docs/ref/build_win/notes.html
@@ -0,0 +1,56 @@
+<!--$Id: notes.so,v 10.17 2000/11/02 16:46:11 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Windows notes</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for Windows systems</dl></h3></td>
+<td width="1%"><a href="../../ref/build_win/test.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_win/faq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Windows notes</h1>
+<ul type=disc>
+<li>Various Berkeley DB interfaces take a <b>mode</b> argument, intended to specify
+the underlying file permissions for created files. Berkeley DB currently ignores
+this argument on Windows systems.
+<p>It would be possible to construct a set of security attributes to pass to
+<b>CreateFile</b> that accurately represents the mode. In the worst
+case, this would involve looking up user and all group names and creating
+an entry for each. Alternatively, we could call the <b>_chmod</b>
+(partial emulation) function after file creation, although this leaves us
+with an obvious race.
+<p>Practically speaking, however, these efforts would be largely meaningless
+on FAT, the most common file system, which only has a "readable" and
+"writeable" flag, applying to all users.
+<li>When using the <a href="../../api_c/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a> flag, Berkeley DB shared regions are
+created without ACLs, which means that the regions are only accessible
+to a single user. If wider sharing is appropriate (e.g., both user
+applications and Windows/NT service applications need to access the
+Berkeley DB regions), the Berkeley DB code will need to be modified to create the
+shared regions with the correct ACLs. Alternatively, by not specifying
+the <a href="../../api_c/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a> flag, file-system backed regions will be
+created instead, and the permissions on those files may be directly
+specified through the <a href="../../api_c/env_open.html">DBENV-&gt;open</a> interface.
+<li>On Windows/9X, files opened by multiple processes do not share data
+correctly. For this reason, the <a href="../../api_c/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a> flag is implied
+for any application that does not specify the <a href="../../api_c/env_open.html#DB_PRIVATE">DB_PRIVATE</a> flag,
+causing the system paging file to be used for sharing data. However,
+paging file memory is freed on last close, implying that multiple
+processes sharing an environment must arrange for at least one process
+to always have the environment open, or, alternatively, that any process
+joining the environment be prepared to re-create it. If a shared
+environment is closed by all processes, a subsequent open without
+specifying the <a href="../../api_c/env_open.html#DB_CREATE">DB_CREATE</a> flag will result in the return of a
+system EAGAIN error code.
+</ul>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/build_win/test.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_win/faq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/build_win/test.html b/bdb/docs/ref/build_win/test.html
new file mode 100644
index 00000000000..e3230ca84a4
--- /dev/null
+++ b/bdb/docs/ref/build_win/test.html
@@ -0,0 +1,77 @@
+<!--$Id: test.so,v 10.29 2001/01/17 14:42:57 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Running the test suite under Windows</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a> <a name="3"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for Windows systems</dl></h3></td>
+<td width="1%"><a href="../../ref/build_win/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_win/notes.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Running the test suite under Windows</h1>
+<p>To build the test suite on Win32 platforms you will need to configure
+Tcl support. You will also need sufficient main memory and disk.
+Something around 100MB of disk will be sufficient. For memory, 32MB is
+too small, we recommend at least 64MB.
+<h3>Building the software needed by the tests</h3>
+<p>There exist bugs in some versions of Tcl that may cause the test suite
+to hang on Windows/NT 4.0. Tcl version 8.4 (currently available as an
+alpha release) has fixed the problem, or there are patches available
+for Tcl 8.3.2 (see bug #119188 in the Tcl SourceForge database). Note
+that if you want to run the test suite against a Debug version of Berkeley DB,
+you need to build a debug version of Tcl. This involves building Tcl
+from its source.
+<p>To build, perform the following steps. Note that steps #1, #4 and #5
+are part of the normal build process for building Berkeley DB; #2, #3 are part
+of including the Tcl API.
+<p><ol>
+<p><li>Open the <b>build_win32/Berkeley_DB.dsw</b> workspace.
+<p><li>Add the pathname for the Tcl include subdirectory to your
+include path. To do this, under the "Tools" menu item, select "Options".
+In the dialog, select the "Directories" tab, and choose directories
+for "Include Files". Add <b>d:/tcl/include</b> (or whatever directory
+contains <b>tcl.h</b> in your distribution) to the list.
+<p><li>Add the pathname for the Tcl library subdirectory to your
+library path. To do this, under the "Tools" menu item, select "Options".
+In the dialog, select the "Directories" tab, and choose directories for
+"Library Files". Add <b>d:/tcl/lib</b> (or whatever directory contains
+<b>tcl83d.lib</b> in your distribution) to the list.
+<p><li>Set the active configuration to db_test -- Debug. To set an
+active configuration, under the "Build" menu item in the IDE, select "Set
+Active Configuration". Then choose "db_test -- Debug".
+<p><li>Build. The IDE menu for this is called "build dbkill.exe",
+even though dbkill is just one of the things that is built.
+This step builds the base Berkeley DB .dll, tcl support,
+and various tools that are needed by the test suite.
+</ol>
+<h3>Running the test suite under Windows</h3>
+<p>Before running the tests for the first time, you must edit the file
+<b>include.tcl</b> in your build directory and change the line
+that reads:
+<p><blockquote><pre>set tclsh_path SET_YOUR_TCLSH_PATH</pre></blockquote>
+<p>You will want to use the location of the <b>tclsh</b> program. For
+example, if Tcl is installed as <b>d:/tcl</b>, this line should be:
+<p><blockquote><pre>set tclsh_path d:/tcl/bin/tclsh83d.exe</pre></blockquote>
+<p>Then, in a shell of your choice enter the following commands:
+<p><ol>
+<p><li>cd build_win32
+<p><li>run <b>d:/tcl/bin/tclsh83d.exe</b>, or the equivalent name of
+the Tcl shell for your distribution.
+<p>You should get a "%" prompt.
+<p><li>% source ../test/test.tcl.
+<p>You should get a "%" prompt with no errors.
+</ol>
+<p>You are now ready to run tests in the test suite, see
+<a href="../../ref/test/run.html">Running the test suite</a> for more
+information.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/build_win/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_win/notes.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/cam/intro.html b/bdb/docs/ref/cam/intro.html
new file mode 100644
index 00000000000..7a02ea87f93
--- /dev/null
+++ b/bdb/docs/ref/cam/intro.html
@@ -0,0 +1,72 @@
+<!--$Id: intro.so,v 10.21 2001/01/18 19:50:57 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Building Berkeley DB Concurrent Data Store applications</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Concurrent Data Store Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/env/error.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Building Berkeley DB Concurrent Data Store applications</h1>
+<p>It is often desirable to have concurrent read-write access to a database
+when there is no need for full recoverability or transaction semantics.
+For this class of applications, Berkeley DB provides an interface supporting
+deadlock free, multiple-reader/single writer access to the database.
+This means that, at any instant in time, there may be either multiple
+readers accessing data or a single writer modifying data. The
+application is entirely unaware of which is happening, and Berkeley DB
+implements the necessary locking and blocking to ensure this behavior.
+<p>In order to create Berkeley DB Concurrent Data Store applications, you must first initialize an
+environment by calling <a href="../../api_c/env_open.html">DBENV-&gt;open</a>. You must specify the
+<a href="../../api_c/env_open.html#DB_INIT_CDB">DB_INIT_CDB</a> and <a href="../../api_c/env_open.html#DB_INIT_MPOOL">DB_INIT_MPOOL</a> flags to that interface.
+It is an error to specify any of the other <a href="../../api_c/env_open.html">DBENV-&gt;open</a> subsystem
+or recovery configuration flags, e.g., <a href="../../api_c/env_open.html#DB_INIT_LOCK">DB_INIT_LOCK</a>,
+<a href="../../api_c/env_open.html#DB_INIT_TXN">DB_INIT_TXN</a> or <a href="../../api_c/env_open.html#DB_RECOVER">DB_RECOVER</a>.
+<p>All databases must, of course, be created in this environment, by using
+the <a href="../../api_c/db_create.html">db_create</a> interface and specifying the correct environment
+as an argument.
+<p>The Berkeley DB access method calls used to support concurrent access are
+unchanged from the normal access method calls, with one exception: the
+<a href="../../api_c/db_cursor.html">DB-&gt;cursor</a> interface. In Berkeley DB Concurrent Data Store, each cursor must encapsulate
+the idea of being used for read-only access or for read-write access.
+There may only be one read-write cursor active at any one time. When your
+application creates a cursor, if that cursor will ever be used for
+writing, the <a href="../../api_c/db_cursor.html#DB_WRITECURSOR">DB_WRITECURSOR</a> flag must be specified when the cursor
+is created.
+<p>No deadlock detector needs to be run in a Berkeley DB Concurrent Data Store database environment.
+<p>Only a single thread of control may write the database at a time. For
+this reason care must be taken to ensure that applications do not
+inadvertently block themselves causing the application to hang, unable
+to proceed. Some common mistakes include:
+<p><ol>
+<p><li>Leaving a cursor open while issuing a <a href="../../api_c/db_put.html">DB-&gt;put</a> or <a href="../../api_c/db_del.html">DB-&gt;del</a>
+access method call.
+<p><li>Attempting to open a cursor for read-write access while already holding
+a cursor open for read-write access.
+<p><li>Not testing Berkeley DB error return codes (if any cursor operation returns an
+unexpected error, that cursor should be closed).
+<p><li>By default, Berkeley DB Concurrent Data Store does locking on a per-database basis. For this reason,
+accessing multiple databases in different orders in different threads
+or processes, or leaving cursors open on one database while accessing
+another database, can cause an application to hang. If this behavior
+is a requirement for the application, Berkeley DB can be configured to do
+locking on an environment wide basis. See the <a href="../../api_c/env_set_flags.html#DB_CDB_ALLDB">DB_CDB_ALLDB</a> flag
+of the <a href="../../api_c/env_set_flags.html">DBENV-&gt;set_flags</a> function for more information.
+</ol>
+<p>Note that it is correct operation for two different threads of control
+(actual threads or processes) to have multiple read-write cursors open,
+or for one thread to issue a <a href="../../api_c/db_put.html">DB-&gt;put</a> call while another thread
+has a read-write cursor open, and it is only a problem if these things
+are done within a single thread of control.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/env/error.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/debug/common.html b/bdb/docs/ref/debug/common.html
new file mode 100644
index 00000000000..6374307f133
--- /dev/null
+++ b/bdb/docs/ref/debug/common.html
@@ -0,0 +1,109 @@
+<!--$Id: common.so,v 10.13 2000/12/05 18:04:26 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Common errors</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Debugging Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/debug/printlog.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Common errors</h1>
+<p>This page outlines some of the most common problems that people encounter
+and some suggested courses of action.
+<p><dl compact>
+<p><dt><b>Symptom:</b><dd>Core dumps or garbage returns from random Berkeley DB operations.
+<p><dt>Possible Cause:<dd>Failure to zero out DBT structure before issuing request.
+<p><dt>Fix:<dd>Before using a <a href="../../api_c/dbt.html">DBT</a>, you must initialize all its elements
+to 0 and then set the ones you are using explicitly.
+<p><dt><b>Symptom:</b><dd>Random crashes and/or database corruption.
+<p><dt>Possible Cause:<dd>Running multiple threads, but did not specify <a href="../../api_c/env_open.html#DB_THREAD">DB_THREAD</a>
+to <a href="../../api_c/db_open.html">DB-&gt;open</a> or <a href="../../api_c/env_open.html">DBENV-&gt;open</a>.
+<p><dt>Fix:<dd>Any time you are sharing a handle across multiple threads, you must
+specify <a href="../../api_c/env_open.html#DB_THREAD">DB_THREAD</a> when you open that handle.
+<p><dt><b>Symptom:</b><dd><a href="../../api_c/env_open.html">DBENV-&gt;open</a> returns EINVAL.
+<p><dt>Possible Cause:<dd>The environment home directory is a remote mounted filesystem.
+<p><dt>Fix:<dd>Use a locally mounted filesystem instead.
+<p><dt><b>Symptom:</b><dd><a href="../../api_c/db_get.html">DB-&gt;get</a> calls are returning EINVAL.
+<p><dt>Possible Cause:<dd>The application is running with threads, but did not specify the
+<a href="../../api_c/dbt.html#DB_DBT_MALLOC">DB_DBT_MALLOC</a>, <a href="../../api_c/dbt.html#DB_DBT_REALLOC">DB_DBT_REALLOC</a> or <a href="../../api_c/dbt.html#DB_DBT_USERMEM">DB_DBT_USERMEM</a>
+flags in the <a href="../../api_c/dbt.html">DBT</a> structures used in the call.
+<p><dt>Fix:<dd>When running with threaded handles (i.e., specifying <a href="../../api_c/env_open.html#DB_THREAD">DB_THREAD</a>
+to <a href="../../api_c/env_open.html">DBENV-&gt;open</a> or <a href="../../api_c/db_open.html">DB-&gt;open</a>), you must specify one of those
+flags for all <a href="../../api_c/dbt.html">DBT</a> structures in which Berkeley DB is returning data.
+<p><dt><b>Symptom:</b><dd>Running multiple threads or processes, and the database appears to be
+getting corrupted.
+<p><dt>Possible Cause:<dd>Locking is not enabled.
+<p><dt>Fix:<dd>Make sure that you are acquiring locks in your access methods. You
+must specify <a href="../../api_c/env_open.html#DB_INIT_LOCK">DB_INIT_LOCK</a> to your <a href="../../api_c/env_open.html">DBENV-&gt;open</a> call and then
+pass that environment to <a href="../../api_c/db_open.html">DB-&gt;open</a>.
+<p><dt><b>Symptom:</b><dd>Locks are accumulating or threads and/or processes are
+deadlocking even though there is no concurrent access to the database.
+<p><dt>Possible Cause:<dd>Failure to close a cursor.
+<p><dt>Fix:<dd>Cursors retain locks between calls. Everywhere the application uses
+a cursor, the cursor should be explicitly closed as soon as possible after
+it is used.
+<p><dt><b>Symptom:</b><dd>The system locks up.
+<p><dt>Possible Cause:<dd>Application not checking for <a href="../../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a>.
+<p><dt>Fix:<dd>Unless you are using the Concurrent Data Store product, whenever you
+have multiple threads and/or processes and at least one of them is
+writing, you have the potential for deadlock. As a result, you must
+test for the <a href="../../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a> return on every Berkeley DB call. In
+general, updates should take place in a transaction or you might leave
+the database in an inconsistent state. Reads may take place outside
+the context of a transaction under common conditions.
+<p>Whenever you get a <a href="../../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a> return, you should:
+<p><ol>
+<p><li>If you are running in a transaction, abort the transaction, first closing
+any cursors opened in the transaction.
+<p><li>If you are not running in a transaction, simply close the cursor that got
+the <a href="../../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a> (if it was a cursor operation) and retry.
+</ol>
+<p>See <a href="../../ref/transapp/put.html">Recoverability and deadlock
+avoidance</a> for further information.
+<p><dt><b>Symptom:</b><dd>An inordinately high number of deadlocks.
+<p><dt>Possible Cause:<dd>Read-Modify-Write pattern without using the RMW flag.
+<p><dt>Fix:<dd>If you frequently read a piece of data, modify it and then write
+it, you may be inadvertently causing a large number of deadlocks. Try
+specifying the <a href="../../api_c/dbc_get.html#DB_RMW">DB_RMW</a> flag on your get calls.
+<p>Or, if the application is doing a large number of updates in a small
+database, turning off Btree splits may help (see <a href="../../api_c/db_set_flags.html#DB_REVSPLITOFF">DB_REVSPLITOFF</a>
+for more information.)
+<p><dt><b>Symptom:</b><dd>I run recovery and it exits cleanly, but my database changes are missing.
+<p><dt>Possible Cause:<dd>Failure to enable logging and transactions in the database environment,
+failure to specify DB_ENV handle when creating DB handle,
+transaction handle not passed to Berkeley DB interface, failure to commit
+transaction.
+<p><dt>Fix:<dd>Make sure that the environment and database handles are properly
+created, and that the application passes the transaction handle returned
+by <a href="../../api_c/txn_begin.html">txn_begin</a> to the appropriate Berkeley DB interfaces, and that each
+transaction is eventually committed.
+<p><dt><b>Symptom:</b><dd>Recovery fails.
+<p><dt>Possible Cause:<dd>A database was updated in a transactional environment both with and
+without transactional handles.
+<p><dt>Fix:<dd>If any database write operation is done using a transaction handle,
+every write operation must be done in the context of a transaction.
+<p><dt><b>Symptom:</b><dd>A database environment locks up, sometimes gradually.
+<p><dt>Possible Cause:<dd>A thread of control exited unexpectedly, holding Berkeley DB resources.
+<p><dt>Fix:<dd>Whenever a thread of control exits holding Berkeley DB resources, all threads
+of control must exit the database environment, and recovery must be run.
+<p><dt><b>Symptom:</b><dd>A database environment locks up, sometimes gradually.
+<p><dt>Possible Cause:<dd>Cursors are not being closed before transaction abort.
+<p><dt>Fix:<dd>Before an application aborts a transaction, any cursors opened within
+the context of that transaction must be closed.
+<p><dt><b>Symptom:</b><dd>Transaction abort or recovery fail, or database corruption occurs.
+<p><dt>Possible Cause:<dd>Log files were removed before it was safe.
+<p><dt>Fix:<dd>Do not remove any log files from a database environment until Berkeley DB
+declares it safe.
+</dl>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/debug/printlog.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/debug/compile.html b/bdb/docs/ref/debug/compile.html
new file mode 100644
index 00000000000..504d5d3ecd6
--- /dev/null
+++ b/bdb/docs/ref/debug/compile.html
@@ -0,0 +1,43 @@
+<!--$Id: compile.so,v 10.10 2000/12/01 20:15:25 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Compile-time configuration</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Debugging</dl></h3></td>
+<td width="1%"><a href="../../ref/debug/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/debug/runtime.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Compile-time configuration</h1>
+<p>There are two compile-time configuration options that assist in debugging
+Berkeley DB and Berkeley DB applications.
+<p><dl compact>
+<p><dt>--enable-debug<dd>If you want to build Berkeley DB with <b>-g</b> as the C and C++ compiler
+flag, enter --enable-debug as an argument to configure. This will create
+Berkeley DB with debugging symbols, as well as load various Berkeley DB routines
+that can be called directly from a debugger to display database page
+content, cursor queues and so forth. (Note that the <b>-O</b>
+optimization flag will still be specified. To compile with only the
+<b>-g</b>, explicitly set the <b>CFLAGS</b> environment variable
+before configuring.)
+<p><dt>--enable-diagnostic<dd>If you want to build Berkeley DB with debugging run-time sanity checks and with
+DIAGNOSTIC #defined during compilation, enter --enable-diagnostic as an
+argument to configure. This will cause a number of special checks to be
+performed when Berkeley DB is running. This flag should not be defined when
+configuring to build production binaries, as it degrades performance.
+<p>In addition, when compiling Berkeley DB for use in run-time memory consistency
+checkers, in particular, programs that look for reads and writes of
+uninitialized memory, use --enable-diagnostic as an argument to configure.
+This guarantees that Berkeley DB will completely initialize allocated pages
+rather than only initializing the minimum necessary amount.
+</dl>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/debug/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/debug/runtime.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/debug/intro.html b/bdb/docs/ref/debug/intro.html
new file mode 100644
index 00000000000..0ea0afcfb22
--- /dev/null
+++ b/bdb/docs/ref/debug/intro.html
@@ -0,0 +1,58 @@
+<!--$Id: intro.so,v 10.15 2000/12/04 18:05:41 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Introduction</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Debugging Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/install/file.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/debug/compile.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Introduction</h1>
+<p>As Berkeley DB is an embedded library, debugging applications that use Berkeley DB
+is both harder and easier than debugging a separate server. Debugging
+can be harder, because, when a problem arises, it is not always readily
+apparent whether the problem is in the application, in the database
+library, or is a result of an unexpected interaction between the two.
+Debugging can be easier, as it is easier to track down a problem when
+you can review a stack trace rather than deciphering inter-process
+communication messages. This chapter is intended to assist you in
+debugging applications and in reporting bugs to us in a manner such that
+we can provide you with the correct answer or fix as quickly as
+possible.
+<p>When you encounter a problem, there are a few general actions you can
+take:
+<p><dl compact>
+<p><dt>Review the Berkeley DB error output<dd>If an error output mechanism has been configured in the Berkeley DB
+environment, additional run-time error messages are made available to
+the applications. If you are not using an environment, it is well worth
+modifying your application to create one so that you can get more
+detailed error messages. See <a href="runtime.html">Run-time error
+information</a> for more information on configuring Berkeley DB to output these
+error messages.
+<p><dt>Review <a href="../../api_c/env_set_verbose.html">DBENV-&gt;set_verbose</a><dd>Check the list of flags for the <a href="../../api_c/env_set_verbose.html">DBENV-&gt;set_verbose</a> function, and
+see if any of them will produce additional information that might help
+understand the problem.
+<p><dt>Add run-time diagnostics<dd>You can configure and build Berkeley DB to perform run-time diagnostics.
+(These checks are not done by default as they can seriously impact
+performance. See <a href="compile.html">Compile-time configuration</a> for more
+information.
+<p><dt>Apply all available patches<dd>Before reporting a problem to Sleepycat Software, please upgrade to the
+latest Sleepycat Software release of Berkeley DB, if possible, or at least
+make sure you have applied any updates available for your release from
+the <a href="http://www.sleepycat.com/update/index.html">Sleepycat
+Software web site</a>.
+<p><dt>Run the test suite<dd>If you are seeing repeated failures, or failures of simple test cases,
+run the Berkeley DB test suite to determine if the distribution of Berkeley DB you
+are using was built and configured correctly.
+</dl>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/install/file.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/debug/compile.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/debug/printlog.html b/bdb/docs/ref/debug/printlog.html
new file mode 100644
index 00000000000..e533a88d21c
--- /dev/null
+++ b/bdb/docs/ref/debug/printlog.html
@@ -0,0 +1,160 @@
+<!--$Id: printlog.so,v 10.20 2000/12/01 20:15:25 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Reviewing Berkeley DB log files</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Debugging Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/debug/runtime.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/debug/common.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Reviewing Berkeley DB log files</h1>
+<p>If you are running with transactions and logging, the <a href="../../utility/db_printlog.html">db_printlog</a>
+utility can be a useful debugging aid. The <a href="../../utility/db_printlog.html">db_printlog</a> utility
+will display the contents of your log files in a human readable (and
+machine-processable) format.
+<p>The <a href="../../utility/db_printlog.html">db_printlog</a> utility will attempt to display any and all
+logfiles present in a designated db_home directory. For each log record,
+<a href="../../utility/db_printlog.html">db_printlog</a> will display a line of the form:
+<p><blockquote><pre>[22][28]db_big: rec: 43 txnid 80000963 prevlsn [21][10483281]</pre></blockquote>
+<p>The opening numbers in square brackets are the log sequence number (LSN)
+of the log record being displayed. The first number indicates the log
+file in which the record appears, and the second number indicates the
+offset in that file of the record.
+<p>The first character string identifies the particular log operation being
+reported. The log records corresponding to particular operations are
+described below. The rest of the line consists of name/value pairs.
+<p>The rec field indicates the record type (this is used to dispatch records
+in the log to appropriate recovery functions).
+<p>The txnid field identifies the transaction for which this record was
+written. A txnid of 0 means that the record was written outside the
+context of any transaction. You will see these most frequently for
+checkpoints.
+<p>Finally, the prevlsn contains the LSN of the last record for this
+transaction. By following prevlsn fields, you can accumulate all the
+updates for a particular transaction. During normal abort processing,
+this field is used to quickly access all the records for a particular
+transaction.
+<p>After the initial line identifying the record type, each field of the log
+record is displayed, one item per line. There are several fields that
+appear in many different records and a few fields that appear only in
+some records.
+<p>The list below presents each log record type currently produced with a brief
+description of the operation they describe.
+<!--START LOG RECORD TYPES-->
+<p><table border=1>
+<tr><th>Log Record Type</th><th>Description</th></tr>
+<tr><td>bam_adj</td><td>Used when we insert/remove an index into/from the page header of a Btree page.</td></tr>
+<tr><td>bam_cadjust</td><td>Keeps track of record counts in a Btree or Recno database.</td></tr>
+<tr><td>bam_cdel</td><td>Used to mark a record on a page as deleted.</td></tr>
+<tr><td>bam_curadj</td><td>Used to adjust a cursor location when a nearby record changes in a Btree database.</td></tr>
+<tr><td>bam_pg_alloc</td><td>Indicates that we allocated a page to a Btree.</td></tr>
+<tr><td>bam_pg_free</td><td>Indicates that we freed a page in the Btree (freed pages are added to a freelist and reused).</td></tr>
+<tr><td>bam_rcuradj</td><td>Used to adjust a cursor location when a nearby record changes in a Recno database.</td></tr>
+<tr><td>bam_repl</td><td>Describes a replace operation on a record.</td></tr>
+<tr><td>bam_root</td><td>Describes an assignment of a root page.</td></tr>
+<tr><td>bam_rsplit</td><td>Describes a reverse page split.</td></tr>
+<tr><td>bam_split</td><td>Describes a page split.</td></tr>
+<tr><td>crdel_delete</td><td>Describes the removal of a Berkeley DB file.</td></tr>
+<tr><td>crdel_fileopen</td><td>Describes a Berkeley DB file create attempt.</td></tr>
+<tr><td>crdel_metapage</td><td>Describes the creation of a meta-data page for a new file.</td></tr>
+<tr><td>crdel_metasub</td><td>Describes the creation of a meta data page for a subdatabase.</td></tr>
+<tr><td>crdel_rename</td><td>Describes a file rename operation.</td></tr>
+<tr><td>db_addrem</td><td>Add or remove an item from a page of duplicates.</td></tr>
+<tr><td>db_big</td><td>Add an item to an overflow page (overflow pages contain items too large to place on the main page)</td></tr>
+<tr><td>db_debug</td><td>Log debugging message.</td></tr>
+<tr><td>db_noop</td><td>This marks an operation that did nothing but update the LSN on a page.</td></tr>
+<tr><td>db_ovref</td><td>Increment or decrement the reference count for a big item.</td></tr>
+<tr><td>db_relink</td><td>Fix prev/next chains on duplicate pages because a page was added or removed.</td></tr>
+<tr><td>ham_chgpg</td><td>Used to adjust a cursor location when a Hash page is removed, and its elements are moved to a different Hash page.</td></tr>
+<tr><td>ham_copypage</td><td>Used when we empty a bucket page, but there are overflow pages for the bucket; one needs to be copied back into the actual bucket.</td></tr>
+<tr><td>ham_curadj</td><td>Used to adjust a cursor location when a nearby record changes in a Hash database.</td></tr>
+<tr><td>ham_groupalloc</td><td>Allocate some number of contiguous pages to the Hash database.</td></tr>
+<tr><td>ham_insdel</td><td>Insert/Delete an item on a Hash page.</td></tr>
+<tr><td>ham_metagroup</td><td>Update the metadata page to reflect the allocation of a sequence of contiguous pages.</td></tr>
+<tr><td>ham_newpage</td><td>Adds or removes overflow pages from a Hash bucket.</td></tr>
+<tr><td>ham_replace</td><td>Handle updates to records that are on the main page.</td></tr>
+<tr><td>ham_splitdata</td><td>Record the page data for a split.</td></tr>
+<tr><td>log_register</td><td>Records an open of a file (mapping the file name to a log-id that is used in subsequent log operations).</td></tr>
+<tr><td>qam_add</td><td>Describes the actual addition of a new record to a Queue.</td></tr>
+<tr><td>qam_del</td><td>Delete a record in a Queue.</td></tr>
+<tr><td>qam_delete</td><td>Remove a Queue extent file.</td></tr>
+<tr><td>qam_inc</td><td>Increments the maximum record number allocated in a Queue indicating that we've allocated another space in the file.</td></tr>
+<tr><td>qam_incfirst</td><td>Increments the record number that refers to the first record in the database.</td></tr>
+<tr><td>qam_mvptr</td><td>Indicates that we changed the reference to either or both of the first and current records in the file.</td></tr>
+<tr><td>qam_rename</td><td>Rename a Queue extent file.</td></tr>
+<tr><td>txn_child</td><td>Commit a child transaction.</td></tr>
+<tr><td>txn_ckp</td><td>Transaction checkpoint.</td></tr>
+<tr><td>txn_regop</td><td>Logs a regular (non-child) transaction commit.</td></tr>
+<tr><td>txn_xa_regop</td><td>Logs a prepare message.</td></tr>
+</table>
+<!--END LOG RECORD TYPES-->
+<h3>Augmenting the Log for Debugging</h3>
+<p>When debugging applications, it is sometimes useful to log, not only the
+actual operations that modify pages, but also the underlying Berkeley DB
+functions being executed. This form of logging can add significant bulk
+to your log, but can permit debugging application errors that are almost
+impossible to find any other way. To turn on these log messages, specify
+the --enable-debug_rop and --enable-debug_wop configuration options when
+configuring Berkeley DB. See <a href="../../ref/build_unix/conf.html">Configuring
+Berkeley DB</a> for more information.
+<h3>Extracting Committed Transactions and Transaction Status</h3>
+<p>Sometimes it is useful to use the human-readable log output to determine
+which transactions committed and aborted. The awk script, commit.awk,
+found in the db_printlog directory of the Berkeley DB distribution allows you
+to do just that. The command:
+<p><blockquote><pre>awk -f commit.awk log_output</pre></blockquote>
+where log_output is the output of db_printlog will display a list of
+the transaction IDs of all committed transactions found in the log.
+<p>If you need a complete list of both committed and aborted transactions,
+then the script status.awk will produce that. The syntax is:
+<p><blockquote><pre>awk -f status.awk log_output</pre></blockquote>
+<h3>Extracting Transaction Histories</h3>
+<p>Another useful debugging aid is to print out the complete history of a
+transaction. The awk script txn.awk, allows you to do that. The
+command line:
+<p><blockquote><pre>awk -f txn.awk TXN=txnlist log_output</pre></blockquote>
+where log_output is the output of <a href="../../utility/db_printlog.html">db_printlog</a> and txnlist is
+a comma-separated list of transaction IDs, will display all log records
+associated with the designated transaction ids.
+<h3>Extracting File Histories</h3>
+<p>The awk script fileid.awk, allows you to extract all log records that
+affect particular files. The syntax for the fileid.awk script is:
+<p><blockquote><pre>awk -f fileid.awk PGNO=fids log_output</pre></blockquote>
+<p>where log_output is the output of db_printlog and fids is a
+comma-separated list of fileids. The script will output all log
+records that reference the designated file.
+<h3>Extracting Page Histories</h3>
+<p>The awk script pgno.awk, allows you to extract all log records that
+affect particular pages. As currently designed, however, it will
+extract records of all files with the designated page number, so this
+script is most useful in conjunction with the fileid script. The syntax
+for the pgno.awk script is:
+<p><blockquote><pre>awk -f pgno.awk PGNO=pgnolist log_output</pre></blockquote>
+<p>where log_output is the output of db_printlog and pgnolist is a
+comma-separated list of page numbers. The script will output all log
+records that reference the designated page numbers.
+<h3>Other log processing tools</h3>
+<p>The awk script count.awk will print out the number of log records
+encountered that belonged to some transaction (that is the number of log
+records excluding those for checkpoints and non-transaction protected
+operations).
+<p>The script range.awk will extract a subset of a log. This is useful
+when the output of <a href="../../utility/db_printlog.html">db_printlog</a> is too large to be reasonably
+manipulated with an editor or other tool.
+<p>The syntax for range.awk is:
+<p><blockquote><pre>awk -f range.awk START_FILE=sf START_OFFSET=so END_FILE=ef END_OFFSET=eo log_output</pre></blockquote>
+<p>where the <b>sf</b> and <b>so</b> represent the log sequence number
+(LSN) of the beginning of the sublog you wish to extract and <b>ef</b>
+and <b>eo</b> represent the LSN of the end of the sublog you wish to
+extract.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/debug/runtime.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/debug/common.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/debug/runtime.html b/bdb/docs/ref/debug/runtime.html
new file mode 100644
index 00000000000..40fec7e82dd
--- /dev/null
+++ b/bdb/docs/ref/debug/runtime.html
@@ -0,0 +1,47 @@
+<!--$Id: runtime.so,v 10.16 2000/12/01 20:15:25 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Run-time error information</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Debugging</dl></h3></td>
+<td width="1%"><a href="../../ref/debug/compile.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/debug/printlog.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Run-time error information</h1>
+<p>Normally, when an error occurs in the Berkeley DB library, an integer value
+(either a Berkeley DB specific value, or a system <b>errno</b> value) is
+returned by the function. In some cases, however, this value may be
+insufficient to completely describe the cause of the error, especially
+during initial application debugging.
+<p>There are four interfaces intended to provide applications with
+additional run-time error information. They are
+<a href="../../api_c/env_set_errcall.html">DBENV-&gt;set_errcall</a>, <a href="../../api_c/env_set_errfile.html">DBENV-&gt;set_errfile</a>,
+<a href="../../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a> and <a href="../../api_c/env_set_verbose.html">DBENV-&gt;set_verbose</a>.
+<p>If the environment is configured with these interfaces, many Berkeley DB errors
+will result in additional information being written to a file or passed
+as an argument to an application function.
+<p>The Berkeley DB error reporting facilities do not slow performance or
+significantly increase application size, and may be run during normal
+operation as well as during debugging. Where possible, we recommend that
+these options always be configured and the output saved in the filesystem.
+We have found that that this often saves time when debugging installation
+or other system integration problems.
+<p>In addition, there are three routines to assist applications in
+displaying their own error messages: <a href="../../api_c/env_strerror.html">db_strerror</a>,
+<a href="../../api_c/db_err.html">DBENV-&gt;err</a> and <a href="../../api_c/db_err.html">DBENV-&gt;errx</a>. The first is a superset of
+the ANSI C strerror interface, and returns a descriptive string for
+any error return from the Berkeley DB library. The <a href="../../api_c/db_err.html">DBENV-&gt;err</a> and
+<a href="../../api_c/db_err.html">DBENV-&gt;errx</a> functions use the error message configuration options
+described above to format and display error messages to appropriate
+output devices.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/debug/compile.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/debug/printlog.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/distrib/layout.html b/bdb/docs/ref/distrib/layout.html
new file mode 100644
index 00000000000..b851f62a04c
--- /dev/null
+++ b/bdb/docs/ref/distrib/layout.html
@@ -0,0 +1,74 @@
+<!--$Id: layout.so,v 10.25 2000/12/22 15:35:32 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Source code layout</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Distribution</dl></h3></td>
+<td width="1%"><a href="../../ref/test/faq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/refs/refs.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Source code layout</h1>
+<p><table border=1 align=center>
+<tr><th>Directory</th><th>Description</th></tr>
+<tr><td>LICENSE</td><td>Berkeley DB Copyright</td></tr>
+<tr><td>btree</td><td>Btree access method source code.</td></tr>
+<tr><td>build_unix</td><td>UNIX build directory.</td></tr>
+<tr><td>build_vxworks</td><td>VxWorks build directory.</td></tr>
+<tr><td>build_win32</td><td>Windows build directory.</td></tr>
+<tr><td>clib</td><td>C library replacement functions.</td></tr>
+<tr><td>common</td><td>Common Berkeley DB functions.</td></tr>
+<tr><td>cxx</td><td>C++ API.</td></tr>
+<tr><td>db</td><td>Berkeley DB database interfaces.</td></tr>
+<tr><td>db185</td><td>Berkeley DB version 1.85 compatibility API</td></tr>
+<tr><td>db_archive</td><td>The db_archive utility.</td></tr>
+<tr><td>db_checkpoint</td><td>The db_checkpoint utility.</td></tr>
+<tr><td>db_deadlock</td><td>The db_deadlock utility.</td></tr>
+<tr><td>db_dump</td><td>The db_dump utility.</td></tr>
+<tr><td>db_dump185</td><td>The db_dump185 utility.</td></tr>
+<tr><td>db_load</td><td>The db_load utility.</td></tr>
+<tr><td>db_printlog</td><td>The db_printlog debugging utility.</td></tr>
+<tr><td>db_recover</td><td>The db_recover utility.</td></tr>
+<tr><td>db_stat</td><td>The db_stat utility.</td></tr>
+<tr><td>db_upgrade</td><td>The db_upgrade utility.</td></tr>
+<tr><td>db_verify</td><td>The db_verify utility.</td></tr>
+<tr><td>dbm</td><td>The dbm/ndbm compatibility APIs.</td></tr>
+<tr><td>dist</td><td>Berkeley DB administration/distribution tools.</td></tr>
+<tr><td>docs</td><td>Documentation.</td></tr>
+<tr><td>env</td><td>Berkeley DB environment interfaces.</td></tr>
+<tr><td>examples_c</td><td>C API example programs.</td></tr>
+<tr><td>examples_cxx</td><td>C++ API example programs.</td></tr>
+<tr><td>examples_java</td><td>Java API example programs.</td></tr>
+<tr><td>hash</td><td>Hash access method.</td></tr>
+<tr><td>hsearch</td><td>The hsearch compatibility API.</td></tr>
+<tr><td>include</td><td>Include files.</td></tr>
+<tr><td>java</td><td>Java API.</td></tr>
+<tr><td>libdb_java</td><td>The libdb_java shared library.</td></tr>
+<tr><td>lock</td><td>Lock manager.</td></tr>
+<tr><td>log</td><td>Log manager.</td></tr>
+<tr><td>mp</td><td>Shared memory buffer pool.</td></tr>
+<tr><td>mutex</td><td>Mutexes.</td></tr>
+<tr><td>os</td><td>POSIX 1003.1 operating-system specific functionality.</td></tr>
+<tr><td>os_vxworks</td><td>VxWorks operating-system specific functionality.</td></tr>
+<tr><td>os_win32</td><td>Windows operating-system specific functionality.</td></tr>
+<tr><td>perl.BerkeleyDB</td><td>BerkeleyDB Perl module.</td></tr>
+<tr><td>perl.DB_File</td><td>DB_File Perl module.</td></tr>
+<tr><td>qam</td><td>Queue access method source code.</td></tr>
+<tr><td>rpc_client</td><td>RPC client interface.</td></tr>
+<tr><td>rpc_server</td><td>RPC server utility.</td></tr>
+<tr><td>tcl</td><td>Tcl API.</td></tr>
+<tr><td>test</td><td>Test suite.</td></tr>
+<tr><td>txn</td><td>Transaction manager.</td></tr>
+<tr><td>xa</td><td>X/Open Distributed Transaction Processing XA interface.</td></tr>
+</table>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/test/faq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/refs/refs.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/dumpload/format.html b/bdb/docs/ref/dumpload/format.html
new file mode 100644
index 00000000000..fd52e530a02
--- /dev/null
+++ b/bdb/docs/ref/dumpload/format.html
@@ -0,0 +1,69 @@
+<!--$Id: format.so,v 10.14 2000/03/22 21:56:11 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Dump output formats</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Dumping and Reloading</dl></h3></td>
+<td width="1%"><a href="../../ref/dumpload/utility.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/dumpload/text.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Dump output formats</h1>
+<p>There are two output formats used by <a href="../../utility/db_dump.html">db_dump</a> and <a href="../../utility/db_dump.html">db_dump185</a>.
+<p>In both output formats, the first few lines of the output contain header
+information describing the underlying access method, filesystem page size
+and other bookkeeping information.
+<p>The header information starts with a single line VERSION=N, where N is
+the version number of the dump output format.
+<p>The header information is then output in name=value pairs, where name may
+be any of the keywords listed in the <a href="../../utility/db_load.html">db_load</a> manual page, and
+value will be its value. While this header information can be manually
+edited before the database is reloaded, there is rarely any reason to do
+so, as all of this information can also be specified or overridden by
+command-line arguments to <a href="../../utility/db_load.html">db_load</a>.
+<p>The header information ends with single line HEADER=END.
+<p>Following the header information are the key/data pairs from the database.
+If the database being dumped is of type Btree or Hash, or if the
+<b>-k</b> option as been specified, the output will be paired lines of
+text, where the first line of the pair is the key item, and the second
+line of the pair is its corresponding data item. If the database being
+dumped is of type Queue or Recno and the <b>-k</b> has not been
+specified, the output will be lines of text, where each line is the next
+data item for the database. Each of these lines will be preceded by a
+single space.
+<p>If the <b>-p</b> option to <a href="../../utility/db_dump.html">db_dump</a> or <a href="../../utility/db_dump.html">db_dump185</a> was
+specified, the key/data lines will consist of single characters
+representing any characters from the database that are <i>printing
+characters</i> and backslash (<b>\</b>) escaped characters
+for any that were not. Backslash characters appearing in the output mean
+one of two things: if the backslash character precedes another backslash
+character, it means that a literal backslash character occurred in the
+key or data item. If the backslash character precedes any other
+character, the next two characters must be interpreted as hexadecimal
+specification of a single character, e.g., <b>\0a</b> is
+a newline character in the ASCII character set.
+<p>Although some care should be exercised, it is perfectly reasonable to use
+standard text editors and tools to edit databases dumped using the
+<b>-p</b> option before re-loading them using the <a href="../../utility/db_load.html">db_load</a>
+utility.
+<p>Note that the definition of a printing character may vary from system to
+system, and so database representations created using the <b>-p</b>
+option may be less portable than those created without it.
+<p>If the <b>-p</b> option to <a href="../../utility/db_dump.html">db_dump</a> or <a href="../../utility/db_dump.html">db_dump185</a> is
+not specified, each output line will consist of paired hexadecimal values,
+e.g., the line <b>726f6f74</b> is the string <b>root</b> in the ASCII
+character set.
+<p>In all output formats, the key and data items are ended by a single line
+DATA=END.
+<p>Where multiple databases have been dumped from a file, the overall output
+will repeat, i.e., a new set of headers and a new set of data items.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/dumpload/utility.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/dumpload/text.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/dumpload/text.html b/bdb/docs/ref/dumpload/text.html
new file mode 100644
index 00000000000..569980a1957
--- /dev/null
+++ b/bdb/docs/ref/dumpload/text.html
@@ -0,0 +1,32 @@
+<!--$Id: text.so,v 10.14 2000/12/04 20:49:18 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Loading text into databases</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Dumping and Reloading</dl></h3></td>
+<td width="1%"><a href="../../ref/dumpload/format.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/install/file.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Loading text into databases</h1>
+<p>The <a href="../../utility/db_load.html">db_load</a> utility can be used to load text into databases.
+The <b>-T</b> option permits non-database applications to create
+flat-text files that are then loaded into databases for fast,
+highly-concurrent access. For example, the following command loads the
+standard UNIX <b>/etc/passwd</b> file into a database, with the login
+name as the key item and the entire password entry as the data item:
+<p><blockquote><pre>awk -F: '{print $1; print $0}' &lt; /etc/passwd |\
+ sed 's/\\/\\\\/g' | db_load -T -t hash passwd.db</pre></blockquote>
+<p>Note that backslash characters naturally occurring in the text are escaped
+to avoid interpretation as escape characters by <a href="../../utility/db_load.html">db_load</a>.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/dumpload/format.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/install/file.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/dumpload/utility.html b/bdb/docs/ref/dumpload/utility.html
new file mode 100644
index 00000000000..f9cb51c11a9
--- /dev/null
+++ b/bdb/docs/ref/dumpload/utility.html
@@ -0,0 +1,45 @@
+<!--$Id: utility.so,v 10.15 2000/12/04 20:49:18 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: The db_dump and db_load utilities</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Dumping and Reloading</dl></h3></td>
+<td width="1%"><a href="../../ref/sendmail/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/dumpload/format.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>The db_dump and db_load utilities</h1>
+<p>There are three utilities used for dumping and loading Berkeley DB
+databases: <a href="../../utility/db_dump.html">db_dump</a>, <a href="../../utility/db_dump.html">db_dump185</a> and <a href="../../utility/db_load.html">db_load</a>.
+<p>The <a href="../../utility/db_dump.html">db_dump</a> and <a href="../../utility/db_dump.html">db_dump185</a> utilities dump Berkeley DB
+databases into a flat-text representation of the data that can
+be read by <a href="../../utility/db_load.html">db_load</a>. The only difference between them
+is that <a href="../../utility/db_dump.html">db_dump</a> reads Berkeley DB version 2 and greater
+database formats, while <a href="../../utility/db_dump.html">db_dump185</a> reads Berkeley DB version
+1.85 and 1.86 database formats.
+<p>The <a href="../../utility/db_load.html">db_load</a> utility reads either the output format used
+by the dump utilities or, optionally, a flat-text representation
+created using other tools, and stores it into a Berkeley DB database.
+<p>Dumping and reloading Hash databases that use user-defined hash functions
+will result in new databases that use the default hash function. While
+using the default hash function may not be optimal for the new database,
+it will continue to work correctly.
+<p>Dumping and reloading Btree databases that use user-defined prefix or
+comparison functions will result in new databases that use the default
+prefix and comparison functions. In which case it is quite likely that
+applications will be unable to retrieve records, and possible that the
+load process itself will fail.
+<p>The only available workaround for either Hash or Btree databases is to
+modify the sources for the <a href="../../utility/db_load.html">db_load</a> utility to load the database
+using the correct hash, prefix and comparison functions.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/sendmail/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/dumpload/format.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/env/create.html b/bdb/docs/ref/env/create.html
new file mode 100644
index 00000000000..374c7a6e005
--- /dev/null
+++ b/bdb/docs/ref/env/create.html
@@ -0,0 +1,73 @@
+<!--$Id: create.so,v 10.23 2000/12/04 18:05:41 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Creating an Environment</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Environment</dl></h3></td>
+<td width="1%"><a href="../../ref/env/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/env/naming.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Creating an Environment</h1>
+<p>The <a href="../../api_c/env_open.html">DBENV-&gt;open</a> function is the standard function for creating or
+joining a database environment. Transaction-protected or multi-process
+applications should call <a href="../../api_c/env_open.html">DBENV-&gt;open</a> before making any other calls
+to the Berkeley DB library. Applications must obtain an environment handle from
+the <a href="../../api_c/env_create.html">db_env_create</a> function before calling <a href="../../api_c/env_open.html">DBENV-&gt;open</a>.
+There are a large number of options that you can set to customize
+<a href="../../api_c/env_open.html">DBENV-&gt;open</a> for your environment. These options fall into four
+broad categories:
+<p><dl compact>
+<p><dt>Subsystem Initialization:<dd>These flags indicate which Berkeley DB subsystems will be initialized for the
+environment, and, what operations will happen automatically when
+databases are accessed within the environment. The flags include
+<a href="../../api_c/env_open.html#DB_JOINENV">DB_JOINENV</a>, <a href="../../api_c/env_open.html#DB_INIT_CDB">DB_INIT_CDB</a>, <a href="../../api_c/env_open.html#DB_INIT_LOCK">DB_INIT_LOCK</a>,
+<a href="../../api_c/env_open.html#DB_INIT_LOG">DB_INIT_LOG</a>, <a href="../../api_c/env_open.html#DB_INIT_MPOOL">DB_INIT_MPOOL</a> and <a href="../../api_c/env_open.html#DB_INIT_TXN">DB_INIT_TXN</a>.
+The <a href="../../api_c/env_open.html#DB_INIT_CDB">DB_INIT_CDB</a> flag does initialization for Berkeley DB Concurrent Data Store
+applications, see <a href="../../ref/cam/intro.html">Building Berkeley DB Concurrent Data Store
+applications</a> for more information. The rest of the flags initialize
+a single subsystem, e.g., when <a href="../../api_c/env_open.html#DB_INIT_LOCK">DB_INIT_LOCK</a> is specified,
+applications reading and writing databases opened in this environment
+will be using locking to ensure that they do not overwrite each other's
+changes.
+<p><dt>Recovery options:<dd>These flags indicate what recovery is to be performed on the environment
+before it is opened for normal use, and include <a href="../../api_c/env_open.html#DB_RECOVER">DB_RECOVER</a> and
+<a href="../../api_c/env_open.html#DB_RECOVER_FATAL">DB_RECOVER_FATAL</a>.
+<p><dt>Naming options:<dd>These flags modify how file naming happens in the environment, and include
+<a href="../../api_c/env_open.html#DB_USE_ENVIRON">DB_USE_ENVIRON</a> and <a href="../../api_c/env_open.html#DB_USE_ENVIRON_ROOT">DB_USE_ENVIRON_ROOT</a>.
+<p><dt>Miscellaneous:<dd>Finally, there are a number of miscellaneous flags such as <a href="../../api_c/env_open.html#DB_CREATE">DB_CREATE</a>
+which causes underlying files to be created as necessary. See the
+<a href="../../api_c/env_open.html">DBENV-&gt;open</a> manual pages for further information.
+</dl>
+<p>Most applications either specify only the <a href="../../api_c/env_open.html#DB_INIT_MPOOL">DB_INIT_MPOOL</a> flag or
+they specify all four subsystem initialization flags
+(<a href="../../api_c/env_open.html#DB_INIT_MPOOL">DB_INIT_MPOOL</a>, <a href="../../api_c/env_open.html#DB_INIT_LOCK">DB_INIT_LOCK</a>, <a href="../../api_c/env_open.html#DB_INIT_LOG">DB_INIT_LOG</a> and
+<a href="../../api_c/env_open.html#DB_INIT_TXN">DB_INIT_TXN</a>). The former configuration is for applications that
+simply want to use the basic Access Method interfaces with a shared
+underlying buffer pool, but don't care about recoverability after
+application or system failure. The latter is for applications that need
+recoverability. There are situations where other combinations of the
+initialization flags make sense, but they are rare.
+<p>The <a href="../../api_c/env_open.html#DB_RECOVER">DB_RECOVER</a> flag is specified by applications that want to
+perform any necessary database recovery when they start running, i.e., if
+there was a system or application failure the last time they ran, they
+want the databases to be made consistent before they start running again.
+It is not an error to specify this flag when no recovery needs to be
+done.
+<p>The <a href="../../api_c/env_open.html#DB_RECOVER_FATAL">DB_RECOVER_FATAL</a> flag is more special-purpose. It performs
+catastrophic database recovery, and normally requires that some initial
+arrangements be made, i.e., archived log files be brought back into the
+filesystem. Applications should not normally specify this flag. Instead,
+under these rare conditions, the <a href="../../utility/db_recover.html">db_recover</a> utility should be
+used.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/env/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/env/naming.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/env/error.html b/bdb/docs/ref/env/error.html
new file mode 100644
index 00000000000..1a79d8fe550
--- /dev/null
+++ b/bdb/docs/ref/env/error.html
@@ -0,0 +1,57 @@
+<!--$Id: error.so,v 10.13 2001/01/11 15:23:14 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Error support</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Environment</dl></h3></td>
+<td width="1%"><a href="../../ref/env/open.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/cam/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Error support</h1>
+<p>Berkeley DB offers programmatic support for displaying error return values.
+The <a href="../../api_c/env_strerror.html">db_strerror</a> interface returns a pointer to the error
+message corresponding to any Berkeley DB error return, similar to the ANSI C
+strerror interface, but able to handle both system error returns and
+Berkeley DB specific return values.
+<p>For example:
+<p><blockquote><pre>int ret;
+if ((ret = dbenv-&gt;set_cachesize(dbenv, 0, 32 * 1024)) != 0) {
+ fprintf(stderr, "set_cachesize failed: %s\n", db_strerror(ret));
+ return (1);
+}</pre></blockquote>
+<p>There are also two additional error functions, <a href="../../api_c/db_err.html">DBENV-&gt;err</a> and
+<a href="../../api_c/db_err.html">DBENV-&gt;errx</a>. These functions work like the ANSI C printf
+interface, taking a printf-style format string and argument list, and
+writing a message constructed from the format string and arguments.
+<p>The <a href="../../api_c/db_err.html">DBENV-&gt;err</a> function appends the standard error string to the
+constructed message and the <a href="../../api_c/db_err.html">DBENV-&gt;errx</a> function does not.
+<p>Error messages can be configured always to include a prefix (e.g., the
+program name) using the <a href="../../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a> interface.
+<p>These functions provide simpler ways of displaying Berkeley DB error messages:
+<p><blockquote><pre>int ret;
+dbenv-&gt;set_errpfx(dbenv, argv0);
+if ((ret = dbenv-&gt;open(dbenv, home, NULL,
+ DB_CREATE | DB_INIT_LOG | DB_INIT_TXN | DB_USE_ENVIRON))
+ != 0) {
+ dbenv-&gt;err(dbenv, ret, "open: %s", home);
+ dbenv-&gt;errx(dbenv,
+ "contact your system administrator: session ID was %d",
+ session_id);
+ return (1);
+}</pre></blockquote>
+<p>For example, if the program was called "my_app", attempting to open an
+environment home directory in "/tmp/home", and the open call returned a
+permission error, the error messages shown would look like:
+<p><blockquote><pre>my_app: open: /tmp/home: Permission denied.
+my_app: contact your system administrator: session ID was 2</pre></blockquote>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/env/open.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/cam/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/env/intro.html b/bdb/docs/ref/env/intro.html
new file mode 100644
index 00000000000..a555c2f0f9e
--- /dev/null
+++ b/bdb/docs/ref/env/intro.html
@@ -0,0 +1,56 @@
+<!--$Id: intro.so,v 10.25 2000/03/18 21:43:12 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Introduction</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Environment</dl></h3></td>
+<td width="1%"><a href="../../ref/arch/utilities.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/env/create.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Introduction</h1>
+<p>A Berkeley DB environment is an encapsulation of one or more databases, log
+files and shared information about the database environment such as shared
+memory buffer cache pages.
+<p>The simplest way to administer a Berkeley DB application environment is to
+create a single <b>home</b> directory that stores the files for the
+applications that will share the environment. The environment home
+directory must be created before any Berkeley DB applications are run. Berkeley DB
+itself never creates the environment home directory. The environment can
+then be identified by the name of that directory.
+<p>An environment may be shared by any number of applications as well as by
+any number of threads within the applications. It is possible for an
+environment to include resources from other directories on the system,
+and applications often choose to distribute resources to other directories
+or disks for performance or other reasons. However, by default, the
+databases, shared regions (the locking, logging, memory pool, and
+transaction shared memory areas) and log files will be stored in a single
+directory hierarchy.
+<p>It is important to realize that all applications sharing a database
+environment implicitly trust each other. They have access to each other's
+data as it resides in the shared regions and they will share resources
+such as buffer space and locks. At the same time, any applications using
+the same databases <b>must</b> share an environment if consistency is
+to be maintained between them.
+<p>The Berkeley DB environment is created and described by the <a href="../../api_c/env_create.html">db_env_create</a>
+and <a href="../../api_c/env_open.html">DBENV-&gt;open</a> interfaces. In situations where customization is
+desired, such as storing log files on a separate disk drive, applications
+must describe the customization by either creating an environment
+configuration file in the environment home directory or by arguments
+passed to the <a href="../../api_c/env_open.html">DBENV-&gt;open</a> interface. See the documentation on that
+function for details on this procedure.
+<p>Once an environment has been created, database files specified using
+relative pathnames will be named relative to the home directory. Using
+pathnames relative to the home directory allows the entire environment
+to be easily moved to facilitate restoring and recovering a database in
+a different directory or on a different system.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/arch/utilities.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/env/create.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/env/naming.html b/bdb/docs/ref/env/naming.html
new file mode 100644
index 00000000000..fd575396210
--- /dev/null
+++ b/bdb/docs/ref/env/naming.html
@@ -0,0 +1,145 @@
+<!--$Id: naming.so,v 10.36 2001/01/09 15:36:10 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: File naming</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Environment</dl></h3></td>
+<td width="1%"><a href="../../ref/env/create.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/env/security.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>File naming</h1>
+<p>The most important task of the environment is to structure file naming
+within Berkeley DB.
+<p>Each of the locking, logging, memory pool and transaction subsystems of
+Berkeley DB require shared memory regions, backed by the filesystem. Further,
+cooperating applications (or multiple invocations of the same application)
+must agree on the location of the shared memory regions and other files
+used by the Berkeley DB subsystems, the log files used by the logging subsystem,
+and, of course, the data files. Although it is possible to specify full
+pathnames to all Berkeley DB functions, this is cumbersome and requires
+that applications be recompiled when database files are moved.
+<p>Applications are normally expected to specify a single directory home for
+their database. This can be done easily in the call to <a href="../../api_c/env_open.html">DBENV-&gt;open</a>
+by specifying a value for the <b>db_home</b> argument. There are more
+complex configurations where it may be desirable to override
+<b>db_home</b> or provide supplementary path information.
+<h3>Specifying file naming to Berkeley DB</h3>
+<p>The following describes the possible ways in which file naming information
+may be specified to the Berkeley DB library. The specific circumstances and
+order in which these ways are applied are described in a subsequent
+paragraph.
+<p><dl compact>
+<p><dt><b><a name="db_home">db_home</a></b><dd>If the <b>db_home</b> argument to <a href="../../api_c/env_open.html">DBENV-&gt;open</a> is non-NULL, its
+value may be used as the database home, and files named relative to its
+path.
+<p><dt><a name="DB_HOME">DB_HOME</a><dd>If the DB_HOME environment variable is set when <a href="../../api_c/env_open.html">DBENV-&gt;open</a> is
+called, its value may be used as the database home, and files named
+relative to its path.
+<p>The DB_HOME environment variable is intended to permit users and system
+administrators to override application and installation defaults, e.g.:
+<p><blockquote><pre>env DB_HOME=/database/my_home application</pre></blockquote>
+<p>Application writers are encouraged to support the <b>-h</b> option
+found in the supporting Berkeley DB utilities to let users specify a database
+home.
+<p><dt>DB_ENV methods<dd>There are three DB_ENV methods that affect file naming. The
+<a href="../../api_c/env_set_data_dir.html">DBENV-&gt;set_data_dir</a> function specifies a directory to search for database
+files. The <a href="../../api_c/env_set_lg_dir.html">DBENV-&gt;set_lg_dir</a> function specifies a directory in which to
+create logging files. The <a href="../../api_c/env_set_tmp_dir.html">DBENV-&gt;set_tmp_dir</a> function specifies a
+directory in which to create backing temporary files. These methods are
+intended to permit applications to customize file location for a database.
+For example, an application writer can place data files and log files in
+different directories, or instantiate a new log directory each time the
+application runs.
+<p><dt><a name="DB_CONFIG">DB_CONFIG</a><dd>The same information specified to the above DB_ENV methods may also
+be specified using a configuration file. If an environment home directory
+has been specified (either by the application specifying a non-NULL
+<b>db_home</b> argument to <a href="../../api_c/env_open.html">DBENV-&gt;open</a>, or by the application
+setting the DB_USE_ENVIRON or DB_USE_ENVIRON_ROOT flags and the DB_HOME
+environment variable being set), any file named <b>DB_CONFIG</b> in the
+database home directory will be read for lines of the format <b>NAME
+VALUE</b>.
+<p>The characters delimiting the two parts of the entry may be one or more
+whitespace characters, and trailing whitespace characters are discarded.
+All empty lines or lines whose first character is a whitespace or hash
+(<b>#</b>) character will be ignored. Each line must specify both
+the NAME and the VALUE of the pair. The specific NAME VALUE pairs are
+documented in the manual <a href="../../api_c/env_set_data_dir.html">DBENV-&gt;set_data_dir</a>,
+<a href="../../api_c/env_set_lg_dir.html">DBENV-&gt;set_lg_dir</a> and <a href="../../api_c/env_set_tmp_dir.html">DBENV-&gt;set_tmp_dir</a> pages.
+<p>The DB_CONFIG configuration file is intended to permit systems to
+customize file location for an environment independent of applications
+using that database. For example, a database administrator can move the
+database log and data files to a different location without application
+recompilation.
+</dl>
+<h3>File name resolution in Berkeley DB</h3>
+<p>The following describes the specific circumstances and order in which the
+different ways of specifying file naming information are applied. Berkeley DB
+file name processing proceeds sequentially through the following steps:
+<p><dl compact>
+<p><dt>absolute pathnames<dd>If the file name specified to a Berkeley DB function is an absolute pathname,
+that file name is used without modification by Berkeley DB.
+<p>On UNIX systems, an absolute pathname is defined as any pathname that
+begins with a leading slash (<b>/</b>).
+<p>On Windows systems, an absolute pathname is any pathname that begins with
+a leading slash or leading backslash (<b>\</b>), or any
+pathname beginning with a single alphabetic character, a colon and a
+leading slash or backslash, e.g., <b>C:/tmp</b>.
+<p><dt>DB_ENV methods, DB_CONFIG<dd>If a relevant configuration string (e.g., set_data_dir), is specified
+either by calling a DB_ENV method or as a line in the DB_CONFIG
+configuration file, the VALUE from the <b>NAME VALUE</b> pair is
+prepended to the current file name. If the resulting file name is an
+absolute pathname, the file name is used without further modification by
+Berkeley DB.
+<p><dt><b>db_home</b><dd>If the application specified a non-NULL <b>db_home</b> argument to
+<a href="../../api_c/env_open.html">DBENV-&gt;open</a> its value is prepended to the current file name. If
+the resulting file name is an absolute pathname, the file name is used
+without further modification by Berkeley DB.
+<p><dt>DB_HOME<dd>If the <b>db_home</b> argument is null, the DB_HOME environment variable
+was set and the application has set the appropriate DB_USE_ENVIRON or
+DB_USE_ENVIRON_ROOT environment variable, its value is prepended to the
+current file name. If the resulting file name is an absolute pathname,
+the file name is used without further modification by Berkeley DB.
+<p><dt>(nothing)<dd>Finally, all file names are interpreted relative to the current working
+directory of the process.
+</dl>
+<p>The common model for a Berkeley DB environment is one where only the DB_HOME
+environment variable, or the <b>db_home</b> argument, is specified. In
+this case, all data file names are relative to that directory, and all
+files created by the Berkeley DB subsystems will be created in that directory.
+<p>The more complex model for a transaction environment might be one where
+a database home is specified, using either the DB_HOME environment
+variable or the <b>db_home</b> argument to <a href="../../api_c/env_open.html">DBENV-&gt;open</a>, and then
+the data directory and logging directory are set to the relative path
+names of directories underneath the environment home.
+<h3>Examples</h3>
+Store all files in the directory <b>/a/database</b>:
+<p><blockquote><pre>DBENV-&gt;open(DBENV, "/a/database", ...);</pre></blockquote>
+Create temporary backing files in <b>/b/temporary</b>, and all other files
+in <b>/a/database</b>:
+<p><blockquote><pre>DBENV-&gt;set_tmp_dir(DBENV, "/b/temporary");
+DBENV-&gt;open(DBENV, "/a/database", ...);</pre></blockquote>
+Store data files in <b>/a/database/datadir</b>, log files in
+<b>/a/database/logdir</b>, and all other files in the directory
+<b>/a/database</b>:
+<p><blockquote><pre>DBENV-&gt;set_lg_dir("logdir");
+DBENV-&gt;set_data_dir("datadir");
+DBENV-&gt;open(DBENV, "/a/database", ...);</pre></blockquote>
+<p>Store data files in <b>/a/database/data1</b> and <b>/b/data2</b>, and
+all other files in the directory <b>/a/database</b>. Any data files
+that are created will be created in <b>/b/data2</b>, because it is the
+first DB_DATA_DIR directory specified:
+<p><blockquote><pre>DBENV-&gt;set_data_dir(DBENV, "/b/data2");
+DBENV-&gt;set_data_dir(DBENV, "data1");
+DBENV-&gt;open(DBENV, "/a/database", ...);</pre></blockquote>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/env/create.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/env/security.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/env/open.html b/bdb/docs/ref/env/open.html
new file mode 100644
index 00000000000..f13675c7365
--- /dev/null
+++ b/bdb/docs/ref/env/open.html
@@ -0,0 +1,30 @@
+<!--$Id: open.so,v 10.14 2000/03/18 21:43:12 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Opening databases within the environment</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Environment</dl></h3></td>
+<td width="1%"><a href="../../ref/env/remote.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/env/error.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Opening databases within the environment</h1>
+<p>Once the environment has been created, database handles may be created
+and then opened within the environment. This is done by calling the
+<a href="../../api_c/db_create.html">db_create</a> interface and specifying the appropriate environment
+as an argument.
+<p>File naming, database operations and error handling will all be done as
+specified for the environment, e.g., if the <a href="../../api_c/env_open.html#DB_INIT_LOCK">DB_INIT_LOCK</a> or
+<a href="../../api_c/env_open.html#DB_INIT_CDB">DB_INIT_CDB</a> flags were specified when the environment was created
+or joined, database operations will automatically perform all necessary
+locking operations for the application.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/env/remote.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/env/error.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/env/region.html b/bdb/docs/ref/env/region.html
new file mode 100644
index 00000000000..0dfa19672e5
--- /dev/null
+++ b/bdb/docs/ref/env/region.html
@@ -0,0 +1,66 @@
+<!--$Id: region.so,v 10.23 2000/08/09 15:45:52 sue Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Shared Memory Regions</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Environment</dl></h3></td>
+<td width="1%"><a href="../../ref/env/security.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/env/remote.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Shared Memory Regions</h1>
+<p>Each of the Berkeley DB subsystems within an environment is described by one or
+more regions. The regions contain all of the per-process and per-thread
+shared information, including mutexes, that comprise a Berkeley DB environment.
+These regions are created in one of three areas, depending on the flags
+specified to the <a href="../../api_c/env_open.html">DBENV-&gt;open</a> function:
+<p><ol>
+<p><li>If the <a href="../../api_c/env_open.html#DB_PRIVATE">DB_PRIVATE</a> flag is specified to <a href="../../api_c/env_open.html">DBENV-&gt;open</a>, regions
+are created in per-process heap memory, i.e., memory returned by
+<b>malloc</b>(3). In this case, the Berkeley DB environment may only be
+accessed by a single process, although that process may be
+multi-threaded.
+<p><li>If the <a href="../../api_c/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a> flag is specified to <a href="../../api_c/env_open.html">DBENV-&gt;open</a>,
+regions are created in system memory. When regions are created in system
+memory, the Berkeley DB environment may be accessed by both multiple processes
+and multiple threads within processes.
+<p>The system memory used by Berkeley DB is potentially useful past the lifetime
+of any particular process. Therefore, additional cleanup may be necessary
+after an application fails, as there may be no way for Berkeley DB to ensure
+that system resources backing the shared memory regions are returned to
+the system.
+<p>The system memory that is used is architecture-dependent. For example,
+on systems supporting X/Open-style shared memory interfaces, e.g., UNIX
+systems, the <b>shmget</b>(2) and related System V IPC interfaces are
+used. Additionally, VxWorks systems use system memory.
+In these cases, an initial segment ID must be specified by the
+application to ensure that applications do not overwrite each other's
+database environments, and so that the number of segments created does
+not grow without bound. See the <a href="../../api_c/env_set_shm_key.html">DBENV-&gt;set_shm_key</a> function for more
+information.
+<p><li>If no memory-related flags are specified to <a href="../../api_c/env_open.html">DBENV-&gt;open</a>, then
+memory backed by the filesystem is used to store the regions. On UNIX
+systems, the Berkeley DB library will use the POSIX mmap interface. If mmap is
+not available, the UNIX shmget interfaces will be used, assuming they are
+available.
+</ol>
+<p>Any files created in the filesystem to back the regions are created in
+the environment home directory specified to the <a href="../../api_c/env_open.html">DBENV-&gt;open</a> call.
+These files are named __db.###, e.g., __db.001, __db.002 and so on.
+When region files are backed by the filesystem, one file per region is
+created. When region files are backed by system memory, a single file
+will still be created, as there must be a well-known name in the
+filesystem so that multiple processes can locate the system shared memory
+that is being used by the environment.
+<p>Statistics about the shared memory regions in the environment can be
+displayed using the <b>-e</b> option to the <a href="../../utility/db_stat.html">db_stat</a> utility.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/env/security.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/env/remote.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/env/remote.html b/bdb/docs/ref/env/remote.html
new file mode 100644
index 00000000000..3cd44a539bc
--- /dev/null
+++ b/bdb/docs/ref/env/remote.html
@@ -0,0 +1,48 @@
+<!--$Id: remote.so,v 11.5 2000/03/18 21:43:12 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Remote filesystems</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Environment</dl></h3></td>
+<td width="1%"><a href="../../ref/env/region.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/env/open.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Remote filesystems</h1>
+<p>When regions are backed by the filesystem, it is a common error to attempt
+to create Berkeley DB environments backed by remote file systems such as the
+Network File System (NFS) or the Andrew File System (AFS). Remote
+filesystems rarely support mapping files into process memory, and even
+more rarely support correct semantics for mutexes after the attempt
+succeeds. For this reason, we strongly recommend that the database
+environment directory reside in a local filesystem.
+<p>For remote file systems that do allow system files to be mapped into
+process memory, home directories accessed via remote file systems cannot
+be used simultaneously from multiple clients. None of the commercial
+remote file systems available today implement coherent, distributed shared
+memory for remote-mounted files. As a result, different machines will
+see different versions of these shared regions and the system behavior is
+undefined.
+<p>Databases, log files and temporary files may be placed on remote
+filesystems, <b>as long as the remote filesystem fully supports
+standard POSIX filesystem semantics</b>, although the application may incur
+a performance penalty for doing so. Obviously, NFS-mounted databases
+cannot be accessed from more than one Berkeley DB environment (and therefore
+from more than one system), at a time since no Berkeley DB database may be
+accessed from more than one Berkeley DB environment at a time.
+<p><dl compact>
+<p><dt>Linux note:<dd>Some Linux releases are known to not support complete semantics for the
+POSIX fsync call on NFS-mounted filesystems. No Berkeley DB files should be
+placed on NFS-mounted filesystems on these systems.
+</dl>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/env/region.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/env/open.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/env/security.html b/bdb/docs/ref/env/security.html
new file mode 100644
index 00000000000..84dab59b260
--- /dev/null
+++ b/bdb/docs/ref/env/security.html
@@ -0,0 +1,54 @@
+<!--$Id: security.so,v 10.15 2000/05/23 21:12:06 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Security</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Environment</dl></h3></td>
+<td width="1%"><a href="../../ref/env/naming.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/env/region.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Security</h1>
+<p>The following are security issues that should be considered when writing
+Berkeley DB applications:
+<p><dl compact>
+<p><dt>Database environment permissions<dd>The directory used as the Berkeley DB database environment should have its
+permissions set to ensure that files in the environment are not accessible
+to users without appropriate permissions. Applications which add to the
+user's permissions (e.g., UNIX setuid or setgid applications), must be
+carefully checked to not permit illegal use of those permissions such
+as general file access in the environment directory.
+<p><dt>Environment variables<dd>Setting the <a href="../../api_c/env_open.html#DB_USE_ENVIRON">DB_USE_ENVIRON</a> and <a href="../../api_c/env_open.html#DB_USE_ENVIRON_ROOT">DB_USE_ENVIRON_ROOT</a> flags
+and allowing the use of environment variables during file naming can be
+dangerous. Setting those flags in Berkeley DB applications with additional
+permissions (e.g., UNIX setuid or setgid applications) could potentially
+allow users to read and write databases to which they would not normally
+have access.
+<p><dt>File permissions<dd>By default, Berkeley DB always creates files readable and writeable by the owner
+and the group (i.e., S_IRUSR, S_IWUSR, S_IRGRP and S_IWGRP, or octal mode
+0660 on historic UNIX systems). The group ownership of created files is
+based on the system and directory defaults, and is not further specified
+by Berkeley DB.
+<p><dt>Temporary backing files<dd>If an unnamed database is created and the cache is too small to hold the
+database in memory, Berkeley DB will create a temporary physical file to enable
+it to page the database to disk as needed. In this case, environment
+variables such as <b>TMPDIR</b> may be used to specify the location of
+that temporary file. While temporary backing files are created readable
+and writeable by the owner only (i.e., S_IRUSR and S_IWUSR, or octal mode
+0600 on historic UNIX systems), some filesystems may not sufficiently
+protect temporary files created in random directories from improper
+access. Applications storing sensitive data in unnamed databases should
+use the <a href="../../api_c/env_set_tmp_dir.html">DBENV-&gt;set_tmp_dir</a> method to specify a temporary directory
+with known permissions, to be absolutely safe.
+</dl>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/env/naming.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/env/region.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/install/file.html b/bdb/docs/ref/install/file.html
new file mode 100644
index 00000000000..2ecb240e242
--- /dev/null
+++ b/bdb/docs/ref/install/file.html
@@ -0,0 +1,37 @@
+<!--$Id: file.so,v 10.16 2000/12/04 18:05:42 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: File utility /etc/magic information</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a> <a name="3"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>System Installation Notes</dl></h3></td>
+<td width="1%"><a href="../../ref/dumpload/text.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/debug/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>File utility /etc/magic information</h1>
+<p>The <b>file</b>(1) utility is a UNIX utility that examines and
+classifies files, based on information found in its database of file
+types, the /etc/magic file. The following information may be added
+to your system's /etc/magic file to enable <b>file</b>(1) to
+correctly identify Berkeley DB database files.
+<p>The <b>file</b>(1) utility <b>magic</b>(5) information for the
+standard System V UNIX implementation of the <b>file</b>(1) utility
+is included in the Berkeley DB distribution for both
+<a href="magic.s5.be.txt">big-endian</a> (e.g., Sparc) and
+<a href="magic.s5.le.txt">little-endian</a> (e.g., x86) architectures.
+<p>The <b>file</b>(1) utility <b>magic</b>(5) information for
+Release 3.X of Ian Darwin's implementation of the file utility (as
+distributed by FreeBSD and most Linux distributions) is included in the
+Berkeley DB distribution. This <a href="magic.txt">magic.txt</a> information
+is correct for both big-endian and little-endian architectures.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/dumpload/text.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/debug/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/install/magic.s5.be.txt b/bdb/docs/ref/install/magic.s5.be.txt
new file mode 100644
index 00000000000..1b8fcc1089e
--- /dev/null
+++ b/bdb/docs/ref/install/magic.s5.be.txt
@@ -0,0 +1,87 @@
+# Berkeley DB
+# $Id: magic.s5.be.txt,v 10.4 2000/07/07 21:02:22 krinsky Exp $
+#
+# System V /etc/magic files: big-endian version.
+#
+# Hash 1.85/1.86 databases store metadata in network byte order.
+# Btree 1.85/1.86 databases store the metadata in host byte order.
+# Hash and Btree 2.X and later databases store the metadata in host byte order.
+
+0 long 0x00053162 Berkeley DB 1.85/1.86 (Btree,
+>4 long 0x00000002 version 2,
+>4 long 0x00000003 version 3,
+>0 long 0x00053162 native byte-order)
+
+0 long 0x62310500 Berkeley DB 1.85/1.86 (Btree,
+>4 long 0x02000000 version 2,
+>4 long 0x03000000 version 3,
+>0 long 0x62310500 little-endian)
+
+12 long 0x00053162 Berkeley DB (Btree,
+>16 long 0x00000004 version 4,
+>16 long 0x00000005 version 5,
+>16 long 0x00000006 version 6,
+>16 long 0x00000007 version 7,
+>16 long 0x00000008 version 8,
+>16 long 0x00000009 version 9,
+>12 long 0x00053162 native byte-order)
+
+12 long 0x62310500 Berkeley DB (Btree,
+>16 long 0x04000000 version 4,
+>16 long 0x05000000 version 5,
+>16 long 0x06000000 version 6,
+>16 long 0x07000000 version 7,
+>16 long 0x08000000 version 8,
+>16 long 0x09000000 version 9,
+>12 long 0x62310500 little-endian)
+
+0 long 0x00061561 Berkeley DB
+>4 long >2 1.86
+>4 long <3 1.85
+>0 long 0x00061561 (Hash,
+>4 long 2 version 2,
+>4 long 3 version 3,
+>8 long 0x000004D2 little-endian)
+>8 long 0x000010E1 native byte-order)
+
+12 long 0x00061561 Berkeley DB (Hash,
+>16 long 0x00000004 version 4,
+>16 long 0x00000005 version 5,
+>16 long 0x00000006 version 6,
+>16 long 0x00000007 version 7,
+>16 long 0x00000008 version 8,
+>16 long 0x00000009 version 9,
+>12 long 0x00061561 native byte-order)
+
+12 long 0x61150600 Berkeley DB (Hash,
+>16 long 0x04000000 version 4,
+>16 long 0x05000000 version 5,
+>16 long 0x06000000 version 6,
+>16 long 0x07000000 version 7,
+>16 long 0x08000000 version 8,
+>16 long 0x09000000 version 9,
+>12 long 0x61150600 little-endian)
+
+12 long 0x00042253 Berkeley DB (Queue,
+>16 long 0x00000001 version 1,
+>16 long 0x00000002 version 2,
+>16 long 0x00000003 version 3,
+>16 long 0x00000004 version 4,
+>16 long 0x00000005 version 5,
+>16 long 0x00000006 version 6,
+>16 long 0x00000007 version 7,
+>16 long 0x00000008 version 8,
+>16 long 0x00000009 version 9,
+>12 long 0x00042253 native byte-order)
+
+12 long 0x53220400 Berkeley DB (Queue,
+>16 long 0x01000000 version 1,
+>16 long 0x02000000 version 2,
+>16 long 0x03000000 version 3,
+>16 long 0x04000000 version 4,
+>16 long 0x05000000 version 5,
+>16 long 0x06000000 version 6,
+>16 long 0x07000000 version 7,
+>16 long 0x08000000 version 8,
+>16 long 0x09000000 version 9,
+>12 long 0x53220400 little-endian)
diff --git a/bdb/docs/ref/install/magic.s5.le.txt b/bdb/docs/ref/install/magic.s5.le.txt
new file mode 100644
index 00000000000..c8871fedf9a
--- /dev/null
+++ b/bdb/docs/ref/install/magic.s5.le.txt
@@ -0,0 +1,87 @@
+# Berkeley DB
+# $Id: magic.s5.le.txt,v 10.4 2000/07/07 21:02:22 krinsky Exp $
+#
+# System V /etc/magic files: little-endian version.
+#
+# Hash 1.85/1.86 databases store metadata in network byte order.
+# Btree 1.85/1.86 databases store the metadata in host byte order.
+# Hash and Btree 2.X and later databases store the metadata in host byte order.
+
+0 long 0x00053162 Berkeley DB 1.85/1.86 (Btree,
+>4 long 0x00000002 version 2,
+>4 long 0x00000003 version 3,
+>0 long 0x00053162 native byte-order)
+
+0 long 0x62310500 Berkeley DB 1.85/1.86 (Btree,
+>4 long 0x02000000 version 2,
+>4 long 0x03000000 version 3,
+>0 long 0x62310500 big-endian)
+
+12 long 0x00053162 Berkeley DB (Btree,
+>16 long 0x00000004 version 4,
+>16 long 0x00000005 version 5,
+>16 long 0x00000006 version 6,
+>16 long 0x00000007 version 7,
+>16 long 0x00000008 version 8,
+>16 long 0x00000009 version 9,
+>12 long 0x00053162 native byte-order)
+
+12 long 0x62310500 Berkeley DB (Btree,
+>16 long 0x04000000 version 4,
+>16 long 0x05000000 version 5,
+>16 long 0x06000000 version 6,
+>16 long 0x07000000 version 7,
+>16 long 0x08000000 version 8,
+>16 long 0x09000000 version 9,
+>12 long 0x62310500 big-endian)
+
+0 long 0x61150600 Berkeley DB
+>4 long >0x02000000 1.86
+>4 long <0x03000000 1.85
+>0 long 0x00061561 (Hash,
+>4 long 0x02000000 version 2,
+>4 long 0x03000000 version 3,
+>8 long 0xD2040000 native byte-order)
+>8 long 0xE1100000 big-endian)
+
+12 long 0x00061561 Berkeley DB (Hash,
+>16 long 0x00000004 version 4,
+>16 long 0x00000005 version 5,
+>16 long 0x00000006 version 6,
+>16 long 0x00000007 version 7,
+>16 long 0x00000008 version 8,
+>16 long 0x00000009 version 9,
+>12 long 0x00061561 native byte-order)
+
+12 long 0x61150600 Berkeley DB (Hash,
+>16 long 0x04000000 version 4,
+>16 long 0x05000000 version 5,
+>16 long 0x06000000 version 6,
+>16 long 0x07000000 version 7,
+>16 long 0x08000000 version 8,
+>16 long 0x09000000 version 9,
+>12 long 0x61150600 big-endian)
+
+12 long 0x00042253 Berkeley DB (Queue,
+>16 long 0x00000001 version 1,
+>16 long 0x00000002 version 2,
+>16 long 0x00000003 version 3,
+>16 long 0x00000004 version 4,
+>16 long 0x00000005 version 5,
+>16 long 0x00000006 version 6,
+>16 long 0x00000007 version 7,
+>16 long 0x00000008 version 8,
+>16 long 0x00000009 version 9,
+>12 long 0x00042253 native byte-order)
+
+12 long 0x53220400 Berkeley DB (Queue,
+>16 long 0x01000000 version 1,
+>16 long 0x02000000 version 2,
+>16 long 0x03000000 version 3,
+>16 long 0x04000000 version 4,
+>16 long 0x05000000 version 5,
+>16 long 0x06000000 version 6,
+>16 long 0x07000000 version 7,
+>16 long 0x08000000 version 8,
+>16 long 0x09000000 version 9,
+>12 long 0x53220400 big-endian)
diff --git a/bdb/docs/ref/install/magic.txt b/bdb/docs/ref/install/magic.txt
new file mode 100644
index 00000000000..c28329f4078
--- /dev/null
+++ b/bdb/docs/ref/install/magic.txt
@@ -0,0 +1,56 @@
+# Berkeley DB
+# $Id: magic.txt,v 10.10 2000/07/07 21:02:22 krinsky Exp $
+#
+# Ian Darwin's file /etc/magic files: big/little-endian version.
+#
+# Hash 1.85/1.86 databases store metadata in network byte order.
+# Btree 1.85/1.86 databases store the metadata in host byte order.
+# Hash and Btree 2.X and later databases store the metadata in host byte order.
+
+0 long 0x00061561 Berkeley DB
+>8 belong 4321
+>>4 belong >2 1.86
+>>4 belong <3 1.85
+>>4 belong >0 (Hash, version %d, native byte-order)
+>8 belong 1234
+>>4 belong >2 1.86
+>>4 belong <3 1.85
+>>4 belong >0 (Hash, version %d, little-endian)
+
+0 belong 0x00061561 Berkeley DB
+>8 belong 4321
+>>4 belong >2 1.86
+>>4 belong <3 1.85
+>>4 belong >0 (Hash, version %d, big-endian)
+>8 belong 1234
+>>4 belong >2 1.86
+>>4 belong <3 1.85
+>>4 belong >0 (Hash, version %d, native byte-order)
+
+0 long 0x00053162 Berkeley DB 1.85/1.86
+>4 long >0 (Btree, version %d, native byte-order)
+0 belong 0x00053162 Berkeley DB 1.85/1.86
+>4 belong >0 (Btree, version %d, big-endian)
+0 lelong 0x00053162 Berkeley DB 1.85/1.86
+>4 lelong >0 (Btree, version %d, little-endian)
+
+12 long 0x00061561 Berkeley DB
+>16 long >0 (Hash, version %d, native byte-order)
+12 belong 0x00061561 Berkeley DB
+>16 belong >0 (Hash, version %d, big-endian)
+12 lelong 0x00061561 Berkeley DB
+>16 lelong >0 (Hash, version %d, little-endian)
+
+12 long 0x00053162 Berkeley DB
+>16 long >0 (Btree, version %d, native byte-order)
+12 belong 0x00053162 Berkeley DB
+>16 belong >0 (Btree, version %d, big-endian)
+12 lelong 0x00053162 Berkeley DB
+>16 lelong >0 (Btree, version %d, little-endian)
+
+12 long 0x00042253 Berkeley DB
+>16 long >0 (Queue, version %d, native byte-order)
+12 belong 0x00042253 Berkeley DB
+>16 belong >0 (Queue, version %d, big-endian)
+12 lelong 0x00042253 Berkeley DB
+>16 lelong >0 (Queue, version %d, little-endian)
diff --git a/bdb/docs/ref/intro/data.html b/bdb/docs/ref/intro/data.html
new file mode 100644
index 00000000000..e9d6ead064d
--- /dev/null
+++ b/bdb/docs/ref/intro/data.html
@@ -0,0 +1,54 @@
+<!--$Id: data.so,v 10.1 2000/09/22 18:23:58 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: An introduction to data management</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Introduction</dl></h3></td>
+<td width="1%"><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/intro/terrain.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>An introduction to data management</h1>
+<p>Cheap, powerful computing and networking have created countless new
+applications that could not have existed a decade ago. The advent of
+the World-Wide Web, and its influence in driving the Internet into homes
+and businesses, is one obvious example. Equally important, though, is
+the from large, general-purpose desktop and server computers
+toward smaller, special-purpose devices with built-in processing and
+communications services.
+<p>As computer hardware has spread into virtually every corner of our
+lives, of course, software has followed. Software developers today are
+building applications not just for conventional desktop and server
+environments, but also for handheld computers, home appliances,
+networking hardware, cars and trucks, factory floor automation systems,
+and more.
+<p>While these operating environments are diverse, the problems that
+software engineers must solve in them are often strikingly similar. Most
+systems must deal with the outside world, whether that means
+communicating with users or controlling machinery. As a result, most
+need some sort of I/O system. Even a simple, single-function system
+generally needs to handle multiple tasks, and so needs some kind of
+operating system to schedule and manage control threads. Also, many
+computer systems must store and retrieve data to track history, record
+configuration settings, or manage access.
+<p>Data management can be very simple. In some cases, just recording
+configuration in a flat text file is enough. More often, though,
+programs need to store and search a large amount of data, or
+structurally complex data. Database management systems are tools that
+programmers can use to do this work quickly and efficiently using
+off-the-shelf software.
+<p>Of course, database management systems have been around for a long time.
+Data storage is a problem dating back to the earliest days of computing.
+Software developers can choose from hundreds of good,
+commercially-available database systems. The problem is selecting the
+one that best solves the problems that their applications face.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/intro/terrain.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/intro/dbis.html b/bdb/docs/ref/intro/dbis.html
new file mode 100644
index 00000000000..10c4abd9585
--- /dev/null
+++ b/bdb/docs/ref/intro/dbis.html
@@ -0,0 +1,159 @@
+<!--$Id: dbis.so,v 10.5 2001/01/19 17:30:29 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: What is Berkeley DB?</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Introduction</dl></h3></td>
+<td width="1%"><a href="../../ref/intro/terrain.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/intro/dbisnot.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>What is Berkeley DB?</h1>
+<p>So far, we've discussed database systems in general terms. It's time
+now to consider Berkeley DB in particular and see how it fits into the
+framework we have introduced. The key question is, what kinds of
+applications should use Berkeley DB?
+<p>Berkeley DB is an open source embedded database library that provides
+scalable, high-performance, transaction-protected data management
+services to applications. Berkeley DB provides a simple function-call API
+for data access and management.
+<p>By "open source," we mean that Berkeley DB is distributed under a license that
+conforms to the <a href="http://www.opensource.org/osd.html">Open
+Source Definition</a>. This license guarantees that Berkeley DB is freely
+available for use and redistribution in other open source products.
+<a href="http://www.sleepycat.com">Sleepycat Software</a> sells
+commercial licenses for redistribution in proprietary applications, but
+in all cases the complete source code for Berkeley DB is freely available for
+download and use.
+<p>Berkeley DB is embedded because it links directly into the application. It
+runs in the same address space as the application. As a result, no
+inter-process communication, either over the network or between
+processes on the same machine, is required for database operations.
+Berkeley DB provides a simple function-call API for a number of programming
+languages, including C, C++, Java, Perl, Tcl, Python, and PHP. All
+database operations happen inside the library. Multiple processes, or
+multiple threads in a single process, can all use the database at the
+same time as each uses the Berkeley DB library. Low-level services like
+locking, transaction logging, shared buffer management, memory
+management, and so on are all handled transparently by the library.
+<p>The library is extremely portable. It runs under almost all UNIX and
+Linux variants, Windows, and a number of embedded real-time operating
+systems. It runs on both 32-bit and 64-bit systems.
+It has been deployed on high-end
+Internet servers, desktop machines, and on palmtop computers, set-top
+boxes, in network switches, and elsewhere. Once Berkeley DB is linked into
+the application, the end user generally does not know that there's a
+database present at all.
+<p>Berkeley DB is scalable in a number of respects. The database library itself
+is quite compact (under 300 kilobytes of text space on common
+architectures), but it can manage databases up to 256 terabytes in size.
+It also supports high concurrency, with thousands of users operating on
+the same database at the same time. Berkeley DB is small enough to run in
+tightly constrained embedded systems, but can take advantage of
+gigabytes of memory and terabytes of disk on high-end server machines.
+<p>Berkeley DB generally outperforms relational and object-oriented database
+systems in embedded applications for a couple of reasons. First, because
+the library runs in the same address space, no inter-process
+communication is required for database operations. The cost of
+communicating between processes on a single machine, or among machines
+on a network, is much higher than the cost of making a function call.
+Second, because Berkeley DB uses a simple function-call interface for all
+operations, there is no query language to parse, and no execution plan
+to produce.
+<h3>Data Access Services</h3>
+<p>Berkeley DB applications can choose the storage structure that best suits the
+application. Berkeley DB supports hash tables, B-trees, simple
+record-number-based storage, and persistent queues. Programmers can
+create tables using any of these storage structures, and can mix
+operations on different kinds of tables in a single application.
+<p>Hash tables are generally good for very large databases that need
+predictable search and update times for random-access records. Hash
+tables allow users to ask, "Does this key exist?" or to fetch a record
+with a known key. Hash tables do not allow users to ask for records
+with keys that are close to a known key.
+<p>B-trees are better for range-based searches, as when the application
+needs to find all records with keys between some starting and ending
+value. B-trees also do a better job of exploiting <i>locality
+of reference</i>. If the application is likely to touch keys near each
+other at the same time, the B-trees work well. The tree structure keeps
+keys that are close together near one another in storage, so fetching
+nearby values usually doesn't require a disk access.
+<p>Record-number-based storage is natural for applications that need
+to store and fetch records, but that do not have a simple way to
+generate keys of their own. In a record number table, the record
+number is the key for the record. Berkeley DB will can generate these
+record numbers automatically.
+<p>Queues are well-suited for applications that create records, and then
+must deal with those records in creation order. A good example is
+on-line purchasing systems. Orders can enter the system at any time,
+but should generally be filled in the order in which they were placed.
+<h3>Data management services</h3>
+<p>Berkeley DB offers important data management services, including concurrency,
+transactions, and recovery. All of these services work on all of the
+storage structures.
+<p>Many users can work on the same database concurrently. Berkeley DB handles
+locking transparently, ensuring that two users working on the same
+record do not interfere with one another.
+<p>The library provides strict ACID transaction semantics. Some systems
+allow the user to relax, for example, the isolation guarantees that the
+database system makes. Berkeley DB ensures that all applications can see only
+committed updates.
+<p>Multiple operations can be grouped into a single transaction, and can
+be committed or rolled back atomically. Berkeley DB uses a technique called
+<i>two-phase locking</i> to be sure that concurrent transactions
+are isolated from one another, and a technique called
+<i>write-ahead logging</i> to guarantee that committed changes
+survive application, system, or hardware failures.
+<p>When an application starts up, it can ask Berkeley DB to run recovery.
+Recovery restores the database to a clean state, with all committed
+changes present, even after a crash. The database is guaranteed to be
+consistent and all committed changes are guaranteed to be present when
+recovery completes.
+<p>An application can specify, when it starts up, which data management
+services it will use. Some applications need fast,
+single-user, non-transactional B-tree data storage. In that case, the
+application can disable the locking and transaction systems, and will
+not incur the overhead of locking or logging. If an application needs
+to support multiple concurrent users, but doesn't need transactions, it
+can turn on locking without transactions. Applications that need
+concurrent, transaction-protected database access can enable all of the
+subsystems.
+<p>In all these cases, the application uses the same function-call API to
+fetch and update records.
+<h3>Design</h3>
+<p>Berkeley DB was designed to provide industrial-strength database services to
+application developers, without requiring them to become database
+experts. It is a classic C-library style <i>toolkit</i>, providing
+a broad base of functionality to application writers. Berkeley DB was designed
+by programmers, for programmers: its modular design surfaces simple,
+orthogonal interfaces to core services, and it provides mechanism (for
+example, good thread support) without imposing policy (for example, the
+use of threads is not required). Just as importantly, Berkeley DB allows
+developers to balance performance against the need for crash recovery
+and concurrent use. An application can use the storage structure that
+provides the fastest access to its data and can request only the degree
+of logging and locking that it needs.
+<p>Because of the tool-based approach and separate interfaces for each
+Berkeley DB subsystem, you can support a complete transaction environment for
+other system operations. Berkeley DB even allows you to wrap transactions
+around the standard UNIX file read and write operations! Further, Berkeley DB
+was designed to interact correctly with the native system's toolset, a
+feature no other database package offers. For example, Berkeley DB supports
+hot backups (database backups while the database is in use), using
+standard UNIX system utilities, e.g., dump, tar, cpio, pax or even cp.
+<p>Finally, because scripting language interfaces are available for Berkeley DB
+(notably Tcl and Perl), application writers can build incredibly powerful
+database engines with little effort. You can build transaction-protected
+database applications using your favorite scripting languages, an
+increasingly important feature in a world using CGI scripts to deliver
+HTML.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/intro/terrain.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/intro/dbisnot.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/intro/dbisnot.html b/bdb/docs/ref/intro/dbisnot.html
new file mode 100644
index 00000000000..a55fa71763e
--- /dev/null
+++ b/bdb/docs/ref/intro/dbisnot.html
@@ -0,0 +1,146 @@
+<!--$Id: dbisnot.so,v 10.3 2000/12/14 20:52:03 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: What is Berkeley DB not?</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Introduction</dl></h3></td>
+<td width="1%"><a href="../../ref/intro/dbis.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/intro/need.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>What is Berkeley DB not?</h1>
+<p>In contrast to most other database systems, Berkeley DB provides relatively
+simple data access services.
+<p>Records in Berkeley DB are (<i>key</i>, <i>value</i>) pairs. Berkeley DB
+supports only a few logical operations on records. They are:
+<ul type=disc>
+<li>Insert a record in a table.
+<li>Delete a record from a table.
+<li>Find a record in a table by looking up its key.
+<li>Update a record that has already been found.
+</ul>
+<p>Notice that Berkeley DB never operates on the value part of a record.
+Values are simply payload, to be
+stored with keys and reliably delivered back to the application on
+demand.
+<p>Both keys and values can be arbitrary bit strings, either fixed-length
+or variable-length. As a result, programmers can put native programming
+language data structures into the database without converting them to
+a foreign record format first. Storage and retrieval are very simple,
+but the application needs to know what the structure of a key and a
+value is in advance. It cannot ask Berkeley DB, because Berkeley DB doesn't know.
+<p>This is an important feature of Berkeley DB, and one worth considering more
+carefully. On the one hand, Berkeley DB cannot provide the programmer with
+any information on the contents or structure of the values that it
+stores. The application must understand the keys and values that it
+uses. On the other hand, there is literally no limit to the data types
+that can be store in a Berkeley DB database. The application never needs to
+convert its own program data into the data types that Berkeley DB supports.
+Berkeley DB is able to operate on any data type the application uses, no
+matter how complex.
+<p>Because both keys and values can be up to four gigabytes in length, a
+single record can store images, audio streams, or other large data
+values. Large values are not treated specially in Berkeley DB. They are
+simply broken into page-sized chunks, and reassembled on demand when
+the application needs them. Unlike some other database systems, Berkeley DB
+offers no special support for binary large objects (BLOBs).
+<h3>Not a relational database</h3>
+<p>Berkeley DB is not a relational database.
+<p>First, Berkeley DB does not support SQL queries. All access to data is through
+the Berkeley DB API. Developers must learn a new set of interfaces in order
+to work with Berkeley DB. Although the interfaces are fairly simple, they are
+non-standard.
+<p>SQL support is a double-edged sword. One big advantage of relational
+databases is that they allow users to write simple declarative queries
+in a high-level language. The database system knows everything about
+the data and can carry out the command. This means that it's simple to
+search for data in new ways, and to ask new questions of the database.
+No programming is required.
+<p>On the other hand, if a programmer can predict in advance how an
+application will access data, then writing a low-level program to get
+and store records can be faster. It eliminates the overhead of query
+parsing, optimization, and execution. The programmer must understand
+the data representation, and must write the code to do the work, but
+once that's done, the application can be very fast.
+<p>Second, Berkeley DB has no notion of <i>schema</i> in the way that
+relational systems do. Schema is the structure of records in tables,
+and the relationships among the tables in the database. For example, in
+a relational system the programmer can create a record from a fixed menu
+of data types. Because the record types are declared to the system, the
+relational engine can reach inside records and examine individual values
+in them. In addition, programmers can use SQL to declare relationships
+among tables, and to create indexes on tables. Relational engines
+usually maintain these relationships and indexes automatically.
+<p>In Berkeley DB, the key and value in a record are opaque
+to Berkeley DB. They may have a rich
+internal structure, but the library is unaware of it. As a result, Berkeley DB
+cannot decompose the value part of a record into its constituent parts,
+and cannot use those parts to find values of interest. Only the
+application, which knows the data structure, can do that.
+<p>Berkeley DB does allow programmers to create indexes on tables, and to use
+those indexes to speed up searches. However, the programmer has no way
+to tell the library how different tables and indexes are related. The
+application needs to make sure that they all stay consistent. In the
+case of indexes in particular, if the application puts a new record into
+a table, it must also put a new record in the index for it. It's
+generally simple to write a single function to make the required
+updates, but it is work that relational systems do automatically.
+<p>Berkeley DB is not a relational system. Relational database systems are
+semantically rich and offer high-level database access. Compared to such
+systems, Berkeley DB is a high-performance, transactional library for record
+storage. It's possible to build a relational system on top of Berkeley DB. In
+fact, the popular MySQL relational system uses Berkeley DB for
+transaction-protected table management, and takes care of all the SQL
+parsing and execution. It uses Berkeley DB for the storage level, and provides
+the semantics and access tools.
+<h3>Not an object-oriented database</h3>
+<p>Object-oriented databases are designed for very tight integration with
+object-oriented programming languages. Berkeley DB is written entirely in the
+C programming language. It includes language bindings for C++, Java,
+and other languages, but the library has no information about the
+objects created in any object-oriented application. Berkeley DB never makes
+method calls on any application object. It has no idea what methods are
+defined on user objects, and cannot see the public or private members
+of any instance. The key and value part of all records are opaque to
+Berkeley DB.
+<p>Berkeley DB cannot automatically page in referenced objects, as some
+object-oriented databases do. The object-oriented application programmer
+must decide what records are required, and must fetch them by making
+method calls on Berkeley DB objects.
+<h3>Not a network database</h3>
+<p>Berkeley DB does not support network-style navigation among records, as
+network databases do. Records in a Berkeley DB table may move around over
+time, as new records are added to the table and old ones are deleted.
+Berkeley DB is able to do fast searches for records based on keys, but there
+is no way to create a persistent physical pointer to a record.
+Applications can only refer to records by key, not by address.
+<h3>Not a database server</h3>
+<p>Berkeley DB is not a standalone database server. It is a library, and runs in
+the address space of the application that uses it. If more than one
+application links in Berkeley DB, then all can use the same database at the
+same time; the library handles coordination among the applications, and
+guarantees that they do not interfere with one another.
+<p>Recent releases of Berkeley DB allow programmers to compile the library as a
+standalone process, and to use RPC stubs to connect to it and to carry
+out operations. However, there are some important limitations to this
+feature. The RPC stubs provide exactly the same API that the library
+itself does. There is no higher-level access provided by the standalone
+process. Tuning the standalone process is difficult, since Berkeley DB does
+no threading in the library (applications can be threaded, but the
+library never creates a thread on its own).
+<p>It is possible to build a server application that uses Berkeley DB for data
+management. For example, many commercial and open source Lightweight
+Directory Access Protocol (LDAP) servers use Berkeley DB for record storage.
+LDAP clients connect to these servers over the network. Individual
+servers make calls through the Berkeley DB API to find records and return them
+to clients. On its own, however, Berkeley DB is not a server.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/intro/dbis.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/intro/need.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/intro/distrib.html b/bdb/docs/ref/intro/distrib.html
new file mode 100644
index 00000000000..a5ff52263c2
--- /dev/null
+++ b/bdb/docs/ref/intro/distrib.html
@@ -0,0 +1,28 @@
+<!--$Id: distrib.so,v 10.16 2000/09/22 18:23:58 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: What does the Berkeley DB distribution include?</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Introduction</dl></h3></td>
+<td width="1%"><a href="../../ref/intro/what.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/intro/where.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>What does the Berkeley DB distribution include?</h1>
+<p>The Berkeley DB distribution includes complete source code for the Berkeley DB
+library, including all three Berkeley DB products and their supporting
+utilities, as well as complete documentation in HTML format.
+<p>The distribution does not include pre-built binaries or libraries, or
+hard-copy documentation. Pre-built libraries and binaries for some
+architecture/compiler combinations are available as part of Sleepycat
+Software's Berkeley DB support services.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/intro/what.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/intro/where.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/intro/need.html b/bdb/docs/ref/intro/need.html
new file mode 100644
index 00000000000..771dd98908c
--- /dev/null
+++ b/bdb/docs/ref/intro/need.html
@@ -0,0 +1,60 @@
+<!--$Id: need.so,v 10.2 2000/12/08 23:59:06 mao Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Do you need Berkeley DB?</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Introduction</dl></h3></td>
+<td width="1%"><a href="../../ref/intro/dbisnot.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/intro/what.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Do you need Berkeley DB?</h1>
+<p>Berkeley DB is an ideal database system for applications that need fast,
+scalable, and reliable embedded database management. For applications
+that need different services, however, it can be a poor choice.
+<p>First, do you need the ability to access your data in ways you cannot
+predict in advance? If your users want to be able to enter SQL
+queries to perform
+complicated searches that you cannot program into your application to
+begin with, then you should consider a relational engine instead. Berkeley DB
+requires a programmer to write code in order to run a new kind of query.
+<p>On the other hand, if you can predict your data access patterns up front
+-- and in particular if you need fairly simple key/value lookups -- then
+Berkeley DB is a good choice. The queries can be coded up once, and will then
+run very quickly because there is no SQL to parse and execute.
+<p>Second, are there political arguments for or against a standalone
+relational server? If you're building an application for your own use
+and have a relational system installed with administrative support
+already, it may be simpler to use that than to build and learn Berkeley DB.
+On the other hand, if you'll be shipping many copies of your application
+to customers, and don't want your customers to have to buy, install,
+and manage a separate database system, then Berkeley DB may be a better
+choice.
+<p>Third, are there any technical advantages to an embedded database? If
+you're building an application that will run unattended for long periods
+of time, or for end users who are not sophisticated administrators, then
+a separate server process may be too big a burden. It will require
+separate installation and management, and if it creates new ways for
+the application to fail, or new complexities to master in the field,
+then Berkeley DB may be a better choice.
+<p>The fundamental question is, how closely do your requirements match the
+Berkeley DB design? Berkeley DB was conceived and built to provide fast, reliable,
+transaction-protected record storage. The library itself was never
+intended to provide interactive query support, graphical reporting
+tools, or similar services that some other database systems provide. We
+have tried always to err on the side of minimalism and simplicity. By
+keeping the library small and simple, we create fewer opportunities for
+bugs to creep in, and we guarantee that the database system stays fast,
+because there is very little code to execute. If your application needs
+that set of features, then Berkeley DB is almost certainly the best choice
+for you.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/intro/dbisnot.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/intro/what.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/intro/products.html b/bdb/docs/ref/intro/products.html
new file mode 100644
index 00000000000..ce04135f03a
--- /dev/null
+++ b/bdb/docs/ref/intro/products.html
@@ -0,0 +1,69 @@
+<!--$Id: products.so,v 10.13 2000/12/04 18:05:42 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Sleepycat Software's Berkeley DB products</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Introduction</dl></h3></td>
+<td width="1%"><a href="../../ref/intro/where.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/simple_tut/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Sleepycat Software's Berkeley DB products</h1>
+<p>Sleepycat Software licenses three different products that use the Berkeley DB
+technology. Each product offers a distinct level of database support.
+It is not possible to mix-and-match products, that is, each application
+or group of applications must use the same Berkeley DB product.
+<p>All three products are included in the single Open Source distribution of
+Berkeley DB from Sleepycat Software, and building that distribution
+automatically builds all three products. Each product adds services, and
+new interfaces, to the product that precedes it in the list. As a result,
+developers can download Berkeley DB and build an application that does only
+single-user, read-only database access, and later add support for more
+users and more complex database access patterns.
+<p>Users who distribute Berkeley DB must ensure that they are licensed for the
+Berkeley DB interfaces they use. Information on licensing is available directly
+from Sleepycat Software.
+<h3>Berkeley DB Data Store</h3>
+<p>The Berkeley DB Data Store product is an embeddable, high-performance data store. It
+supports multiple concurrent threads of control to read information
+managed by Berkeley DB. When updates are required, only a single process may
+be using the database. That process may be multi-threaded, but only one
+thread of control should be allowed to update the database at any time.
+The Berkeley DB Data Store does no locking, and so provides no guarantees of correct
+behavior if more than one thread of control is updating the database at
+a time.
+<p>The Berkeley DB Data Store product includes the <a href="../../api_c/db_create.html">db_create</a> interface, the
+DB handle methods, and the methods returned by <a href="../../api_c/db_cursor.html">DB-&gt;cursor</a>.
+<p>The Berkeley DB Data Store is intended for use in single-user or read-only applications
+that can guarantee that no more than one thread of control will ever
+update the database at any time.
+<h3>Berkeley DB Concurrent Data Store</h3>
+<p>The Berkeley DB Concurrent Data Store product adds multiple-reader, single writer capabilities to
+the Berkeley DB Data Store product, supporting applications that need concurrent updates
+and do not want to implement their own locking protocols. The additional
+interfaces included with the Berkeley DB Concurrent Data Store product are <a href="../../api_c/env_create.html">db_env_create</a>, the
+<a href="../../api_c/env_open.html">DBENV-&gt;open</a> method (using the <a href="../../api_c/env_open.html#DB_INIT_CDB">DB_INIT_CDB</a> flag), and the
+<a href="../../api_c/env_close.html">DBENV-&gt;close</a> method.
+<p>Berkeley DB Concurrent Data Store is intended for applications that require occasional write access
+to a database that is largely used for reading.
+<h3>Berkeley DB Transactional Data Store</h3>
+<p>The Berkeley DB Transactional Data Store product adds full transactional support and recoverability
+to the Berkeley DB Data Store product. This product includes all of the interfaces
+in the Berkeley DB library.
+<p>Berkeley DB Transactional Data Store is intended for applications that require industrial-strength
+database services, including good performance under high-concurrency
+workloads with a mixture of readers and writers, the ability to commit
+or roll back multiple changes to the database at a single instant, and
+the guarantee that even in the event of a catastrophic system or hardware
+failure, any committed database changes will be preserved.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/intro/where.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/simple_tut/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/intro/terrain.html b/bdb/docs/ref/intro/terrain.html
new file mode 100644
index 00000000000..f2a7089135c
--- /dev/null
+++ b/bdb/docs/ref/intro/terrain.html
@@ -0,0 +1,248 @@
+<!--$Id: terrain.so,v 10.3 2000/12/14 20:52:03 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Mapping the terrain: theory and practice</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Introduction</dl></h3></td>
+<td width="1%"><a href="../../ref/intro/data.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/intro/dbis.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Mapping the terrain: theory and practice</h1>
+<p>The first step in selecting a database system is figuring out what the
+choices are. Decades of research and real-world deployment have produced
+countless systems. We need to organize them somehow to reduce the number
+of options.
+<p>One obvious way to group systems is to use the common labels that
+vendors apply to them. The buzzwords here include "network,"
+"relational," "object-oriented," and "embedded," with some
+cross-fertilization like "object-relational" and "embedded network".
+Understanding the buzzwords is important. Each has some grounding in
+theory, but has also evolved into a practical label for categorizing
+systems that work in a certain way.
+<p>All database systems, regardless of the buzzwords that apply to them,
+provide a few common services. All of them store data, for example.
+We'll begin by exploring the common services that all systems provide,
+and then examine the differences among the different kinds of systems.
+<h3>Data access and data management</h3>
+<p>Fundamentally, database systems provide two services.
+<p>The first service is <i>data access</i>. Data access means adding
+new data to the database (inserting), finding data of interest
+(searching), changing data already stored (updating), and removing data
+from the database (deleting). All databases provide these services. How
+they work varies from category to category, and depends on the record
+structure that the database supports.
+<p>Each record in a database is a collection of values. For example, the
+record for a Web site customer might include a name, email address,
+shipping address, and payment information. Records are usually stored
+in tables. Each table holds records of the same kind. For example, the
+<b>customer</b> table at an e-commerce Web site might store the
+customer records for every person who shopped at the site. Often,
+database records have a different structure from the structures or
+instances supported by the programming language in which an application
+is written. As a result, working with records can mean:
+<ul type=disc>
+<li>using database operations like searches and updates on records; and
+<li>converting between programming language structures and database record
+types in the application.
+</ul>
+<p>The second service is <i>data management</i>. Data management is
+more complicated than data access. Providing good data management
+services is the hard part of building a database system. When you
+choose a database system to use in an application you build, making sure
+it supports the data management services you need is critical.
+<p>Data management services include allowing multiple users to work on the
+database simultaneously (concurrency), allowing multiple records to be
+changed instantaneously (transactions), and surviving application and
+system crashes (recovery). Different database systems offer different
+data management services. Data management services are entirely
+independent of the data access services listed above. For example,
+nothing about relational database theory requires that the system
+support transactions, but most commercial relational systems do.
+<p>Concurrency means that multiple users can operate on the database at
+the same time. Support for concurrency ranges from none (single-user
+access only) to complete (many readers and writers working
+simultaneously).
+<p>Transactions permit users to make multiple changes appear at once. For
+example, a transfer of funds between bank accounts needs to be a
+transaction because the balance in one account is reduced and the
+balance in the other increases. If the reduction happened before the
+increase, than a poorly-timed system crash could leave the customer
+poorer; if the bank used the opposite order, then the same system crash
+could make the customer richer. Obviously, both the customer and the
+bank are best served if both operations happen at the same instant.
+<p>Transactions have well-defined properties in database systems. They are
+<i>atomic</i>, so that the changes happen all at once or not at all.
+They are <i>consistent</i>, so that the database is in a legal state
+when the transaction begins and when it ends. They are typically
+<i>isolated</i>, which means that any other users in the database
+cannot interfere with them while they are in progress. And they are
+<i>durable</i>, so that if the system or application crashes after
+a transaction finishes, the changes are not lost. Together, the
+properties of <i>atomicity</i>, <i>consistency</i>,
+<i>isolation</i>, and <i>durability</i> are known as the ACID
+properties.
+<p>As is the case for concurrency, support for transactions varies among
+databases. Some offer atomicity without making guarantees about
+durability. Some ignore isolatability, especially in single-user
+systems; there's no need to isolate other users from the effects of
+changes when there are no other users.
+<p>Another important data management service is recovery. Strictly
+speaking, recovery is a procedure that the system carries out when it
+starts up. The purpose of recovery is to guarantee that the database is
+complete and usable. This is most important after a system or
+application crash, when the database may have been damaged. The recovery
+process guarantees that the internal structure of the database is good.
+Recovery usually means that any completed transactions are checked, and
+any lost changes are reapplied to the database. At the end of the
+recovery process, applications can use the database as if there had been
+no interruption in service.
+<p>Finally, there are a number of data management services that permit
+copying of data. For example, most database systems are able to import
+data from other sources, and to export it for use elsewhere. Also, most
+systems provide some way to back up databases and to restore in the
+event of a system failure that damages the database. Many commercial
+systems allow <i>hot backups</i>, so that users can back up
+databases while they are in use. Many applications must run without
+interruption, and cannot be shut down for backups.
+<p>A particular database system may provide other data management services.
+Some provide browsers that show database structure and contents. Some
+include tools that enforce data integrity rules, such as the rule that
+no employee can have a negative salary. These data management services
+are not common to all systems, however. Concurrency, recovery, and
+transactions are the data management services that most database vendors
+support.
+<p>Deciding what kind of database to use means understanding the data
+access and data management services that your application needs. Berkeley DB
+is an embedded database that supports fairly simple data access with a
+rich set of data management services. To highlight its strengths and
+weaknesses, we can compare it to other database system categories.
+<h3>Relational databases</h3>
+<p>Relational databases are probably the best-known database variant,
+because of the success of companies like Oracle. Relational databases
+are based on the mathematical field of set theory. The term "relation"
+is really just a synonym for "set" -- a relation is just a set of
+records or, in our terminology, a table. One of the main innovations in
+early relational systems was to insulate the programmer from the
+physical organization of the database. Rather than walking through
+arrays of records or traversing pointers, programmers make statements
+about tables in a high-level language, and the system executes those
+statements.
+<p>Relational databases operate on <i>tuples</i>, or records, composed
+of values of several different data types, including integers, character
+strings, and others. Operations include searching for records whose
+values satisfy some criteria, updating records, and so on.
+<p>Virtually all relational databases use the Structured Query Language,
+or SQL. This language permits people and computer programs to work with
+the database by writing simple statements. The database engine reads
+those statements and determines how to satisfy them on the tables in
+the database.
+<p>SQL is the main practical advantage of relational database systems.
+Rather than writing a computer program to find records of interest, the
+relational system user can just type a query in a simple syntax, and
+let the engine do the work. This gives users enormous flexibility; they
+do not need to decide in advance what kind of searches they want to do,
+and they do not need expensive programmers to find the data they need.
+Learning SQL requires some effort, but it's much simpler than a
+full-blown high-level programming language for most purposes. And there
+are a lot of programmers who have already learned SQL.
+<h3>Object-oriented databases</h3>
+<p>Object-oriented databases are less common than relational systems, but
+are still fairly widespread. Most object-oriented databases were
+originally conceived as persistent storage systems closely wedded to
+particular high-level programming languages like C++. With the spread
+of Java, most now support more than one programming language, but
+object-oriented database systems fundamentally provide the same class
+and method abstractions as do object-oriented programming languages.
+<p>Many object-oriented systems allow applications to operate on objects
+uniformly, whether they are in memory or on disk. These systems create
+the illusion that all objects are in memory all the time. The advantage
+to object-oriented programmers who simply want object storage and
+retrieval is clear. They need never be aware of whether an object is in
+memory or not. The application simply uses objects, and the database
+system moves them between disk and memory transparently. All of the
+operations on an object, and all its behavior, are determined by the
+programming language.
+<p>Object-oriented databases aren't nearly as widely deployed as relational
+systems. In order to attract developers who understand relational
+systems, many of the object-oriented systems have added support for
+query languages very much like SQL. In practice, though, object-oriented
+databases are mostly used for persistent storage of objects in C++ and
+Java programs.
+<h3>Network databases</h3>
+<p>The "network model" is a fairly old technique for managing and
+navigating application data. Network databases are designed to make
+pointer traversal very fast. Every record stored in a network database
+is allowed to contain pointers to other records. These pointers are
+generally physical addresses, so fetching the referenced record just
+means reading it from disk by its disk address.
+<p>Network database systems generally permit records to contain integers,
+floating point numbers, and character strings, as well as references to
+other records. An application can search for records of interest. After
+retrieving a record, the application can fetch any referenced record
+quickly.
+<p>Pointer traversal is fast because most network systems use physical disk
+addresses as pointers. When the application wants to fetch a record,
+the database system uses the address to fetch exactly the right string
+of bytes from the disk. This requires only a single disk access in all
+cases. Other systems, by contrast, often must do more than one disk read
+to find a particular record.
+<p>The key advantage of the network model is also its main drawback. The
+fact that pointer traversal is so fast means that applications that do
+it will run well. On the other hand, storing pointers all over the
+database makes it very hard to reorganize the database. In effect, once
+you store a pointer to a record, it is difficult to move that record
+elsewhere. Some network databases handle this by leaving forwarding
+pointers behind, but this defeats the speed advantage of doing a single
+disk access in the first place. Other network databases find, and fix,
+all the pointers to a record when it moves, but this makes
+reorganization very expensive. Reorganization is often necessary in
+databases, since adding and deleting records over time will consume
+space that cannot be reclaimed without reorganizing. Without periodic
+reorganization to compact network databases, they can end up with a
+considerable amount of wasted space.
+<h3>Clients and servers</h3>
+<p>Database vendors have two choices for system architecture. They can
+build a server to which remote clients connect, and do all the database
+management inside the server. Alternatively, they can provide a module
+that links directly into the application, and does all database
+management locally. In either case, the application developer needs
+some way of communicating with the database (generally, an Application
+Programming Interface (API) that does work in the process or that
+communicates with a server to get work done).
+<p>Almost all commercial database products are implemented as servers, and
+applications connect to them as clients. Servers have several features
+that make them attractive.
+<p>First, because all of the data is managed by a separate process, and
+possibly on a separate machine, it's easy to isolate the database server
+from bugs and crashes in the application.
+<p>Second, because some database products (particularly relational engines)
+are quite large, splitting them off as separate server processes keeps
+applications small, which uses less disk space and memory. Relational
+engines include code to parse SQL statements, to analyze them and
+produce plans for execution, to optimize the plans, and to execute
+them.
+<p>Finally, by storing all the data in one place and managing it with a
+single server, it's easier for organizations to back up, protect, and
+set policies on their databases. The enterprise databases for large
+companies often have several full-time administrators caring for them,
+making certain that applications run quickly, granting and denying
+access to users, and making backups.
+<p>However, centralized administration can be a disadvantage in some cases.
+In particular, if a programmer wants to build an application that uses
+a database for storage of important information, then shipping and
+supporting the application is much harder. The end user needs to install
+and administer a separate database server, and the programmer must
+support not just one product, but two. Adding a server process to the
+application creates new opportunity for installation mistakes and
+run-time problems.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/intro/data.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/intro/dbis.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/intro/what.html b/bdb/docs/ref/intro/what.html
new file mode 100644
index 00000000000..c8d12069a57
--- /dev/null
+++ b/bdb/docs/ref/intro/what.html
@@ -0,0 +1,53 @@
+<!--$Id: what.so,v 10.22 2000/09/22 18:23:59 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: What other services does Berkeley DB provide?</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Introduction</dl></h3></td>
+<td width="1%"><a href="../../ref/intro/need.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/intro/distrib.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>What other services does Berkeley DB provide?</h1>
+<p>Berkeley DB also provides core database services to developers. These
+services include:
+<p><dl compact>
+<p><dt>Page cache management:<dd>The page cache provides fast access to a cache of database pages,
+handling the I/O associated with the cache to ensure that dirty pages
+are written back to the file system and that new pages are allocated on
+demand. Applications may use the Berkeley DB shared memory buffer manager to
+serve their own files and pages.
+<p><dt>Transactions and logging:<dd>The transaction and logging systems provide recoverability and atomicity
+for multiple database operations. The transaction system uses two-phase
+locking and write-ahead logging protocols to ensure that database
+operations may be undone or redone in the case of application or system
+failure. Applications may use Berkeley DB transaction and logging subsystems
+to protect their own data structures and operations from application or
+system failure.
+<p><dt>Locking:<dd>The locking system provides multiple reader or single writer access to
+objects. The Berkeley DB access methods use the locking system to acquire
+the right to read or write database pages. Applications may use the
+Berkeley DB locking subsystem to support their own locking needs.
+</dl>
+<p>By combining the page cache, transaction, locking, and logging systems,
+Berkeley DB provides the same services found in much larger, more complex and
+more expensive database systems. Berkeley DB supports multiple simultaneous
+readers and writers and guarantees that all changes are recoverable, even
+in the case of a catastrophic hardware failure during a database update.
+<p>Developers may select some or all of the core database services for any
+access method or database. Therefore, it is possible to choose the
+appropriate storage structure and the right degrees of concurrency and
+recoverability for any application. In addition, some of the systems
+(e.g., the locking subsystem) can be called separately from the Berkeley DB
+access method. As a result, developers can integrate non-database
+objects into their transactional applications using Berkeley DB.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/intro/need.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/intro/distrib.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/intro/where.html b/bdb/docs/ref/intro/where.html
new file mode 100644
index 00000000000..45d0dc3ae99
--- /dev/null
+++ b/bdb/docs/ref/intro/where.html
@@ -0,0 +1,39 @@
+<!--$Id: where.so,v 10.27 2000/12/04 18:05:42 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Where does Berkeley DB run?</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Introduction</dl></h3></td>
+<td width="1%"><a href="../../ref/intro/distrib.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/intro/products.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Where does Berkeley DB run?</h1>
+<p>Berkeley DB requires only underlying IEEE/ANSI Std 1003.1 (POSIX) system calls and can be
+ported easily to new architectures by adding stub routines to connect
+the native system interfaces to the Berkeley DB POSIX-style system calls.
+<p>Berkeley DB will autoconfigure and run on almost any modern UNIX system, and
+even on most historical UNIX platforms. See
+<a href="../../ref/build_unix/intro.html">Building for UNIX systems</a> for
+more information.
+<p>The Berkeley DB distribution includes support for QNX Neutrino. See
+<a href="../../ref/build_unix/intro.html">Building for UNIX systems</a> for
+more information.
+<p>The Berkeley DB distribution includes support for VxWorks, via a workspace
+and project files for Tornado 2.0. See
+<a href="../../ref/build_vxworks/intro.html">Building for VxWorks</a> for more
+information.
+<p>The Berkeley DB distribution includes support for Windows/95, Windows/98,
+Windows/NT and Windows/2000, via the MSVC 5 and 6 development
+environments. See <a href="../../ref/build_win/intro.html">Building for
+Windows systems</a> for more information.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/intro/distrib.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/intro/products.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/java/compat.html b/bdb/docs/ref/java/compat.html
new file mode 100644
index 00000000000..4619ec55794
--- /dev/null
+++ b/bdb/docs/ref/java/compat.html
@@ -0,0 +1,34 @@
+<!--$Id: compat.so,v 10.11 2000/12/04 18:05:42 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Compatibility</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Java API</dl></h3></td>
+<td width="1%"><a href="../../ref/java/conf.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/java/program.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Compatibility</h1>
+<p>The Berkeley DB Java API has been tested with the
+<a href="http://www.javasoft.com">Sun Microsystems JDK 1.1.3</a> on SunOS
+5.5, and Sun's JDK 1.1.7, JDK 1.2.2 and JDK 1.3.0 on Linux and
+Windows/NT. It should work with any JDK 1.1, 1.2 or 1.3 (the latter
+two are known as Java 2) compatible environment. IBM's VM 1.3.0 has
+also been tested on Linux.
+<p>The primary requirement of the Berkeley DB Java API is that the target Java
+environment supports JNI (Java Native Interface), rather than another
+method for allowing native C/C++ code to interface to Java. The JNI was
+new in JDK 1.1, but is the most likely interface to be implemented across
+multiple platforms. However, using the JNI means that Berkeley DB will not be
+compatible with Microsoft Visual J++.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/java/conf.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/java/program.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/java/conf.html b/bdb/docs/ref/java/conf.html
new file mode 100644
index 00000000000..b7eedcaedba
--- /dev/null
+++ b/bdb/docs/ref/java/conf.html
@@ -0,0 +1,82 @@
+<!--$Id: conf.so,v 10.16 2000/12/04 21:21:51 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Configuration</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Java API</dl></h3></td>
+<td width="1%"><a href="../../ref/rpc/server.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/java/compat.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Configuration</h1>
+<p>Building the Berkeley DB java classes, the examples and the native support
+library is integrated into the normal build process. See
+<a href="../../ref/build_unix/conf.html#--enable-java">Configuring
+Berkeley DB</a> and <a href="../../ref/build_win/intro.html">Building for Windows</a>
+for more information.
+<p>We expect that you've already installed the Java JDK or equivalent on
+your system. For the sake of discussion, we'll assume it is in a
+directory called db-VERSION, e.g., you extracted Berkeley DB version 2.3.12
+and you did not change the top-level directory name. The files related
+to Java are in two subdirectories of db-VERSION: java, the java source
+files, and libdb_java, the C++ files that provide the "glue" between
+java and Berkeley DB. The directory tree looks like this:
+<p><blockquote><pre> db-VERSION
+ / \
+ java libdb_java
+ | |
+ src ...
+ |
+ com
+ |
+ sleepycat
+ / \
+ db examples
+ | |
+ ... ...
+</pre></blockquote>
+<p>This naming conforms to the emerging standard for naming java packages.
+When the java code is built, it is placed into a <b>classes</b>
+subdirectory that is parallel to the <b>src</b> subdirectory.
+<p>For your application to use Berkeley DB successfully, you must set your
+CLASSPATH environment variable to include db-VERSION/java/classes as
+well as the classes in your java distribution. On UNIX, CLASSPATH is
+a colon separated list of directories; on Windows it is separated by
+semicolons. Alternatively, you can set your CLASSPATH to include
+db-VERSION/java/classes/db.jar which is created as a result of the
+build. The db.jar file contains the classes in com.sleepycat.db, it
+does not contain any classes in com.sleepycat.examples.
+<p>On Windows, you will want to set your PATH variable to include:
+<p><blockquote><pre>db-VERSION\build_win32\Release</pre></blockquote>
+<p>On UNIX, you will want to set the LD_LIBRARY_PATH environment variable
+to include the Berkeley DB library installation directory. Of course, the
+standard install directory may have been changed for your site, see your
+system administrator for details. Regardless, if you get a:
+<p><blockquote><pre>java.lang.UnsatisfiedLinkError</pre></blockquote>
+<p>exception when you run, chances are you do not have the library search
+path configured correctly. Different Java interpreters provide
+different error messages if the CLASSPATH value is incorrect, a typical
+error is:
+<p><blockquote><pre>java.lang.NoClassDefFoundError</pre></blockquote>
+<p>To ensure that everything is running correctly, you may want to try a
+simple test from the example programs in:
+<p><blockquote><pre>db-VERSION/java/src/com/sleepycat/examples</pre></blockquote>
+<p>For example, the sample program:
+<p><blockquote><pre>% java com.sleepycat.examples.AccessExample</pre></blockquote>
+<p>will prompt for text input lines which are then stored in a Btree
+database named "access.db" in your current directory. Try giving it a
+few lines of input text and then end-of-file. Before it exits, you
+should see a list of the lines you entered display with data items.
+This is a simple check to make sure the fundamental configuration is
+working correctly.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/rpc/server.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/java/compat.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/java/faq.html b/bdb/docs/ref/java/faq.html
new file mode 100644
index 00000000000..75b9e9f3bdb
--- /dev/null
+++ b/bdb/docs/ref/java/faq.html
@@ -0,0 +1,31 @@
+<!--$Id: faq.so,v 1.2 2001/01/09 20:55:54 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Frequently Asked Questions</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a> <a name="3"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Java API</dl></h3></td>
+<td width="1%"><a href="../../ref/java/program.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/perl/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Frequently Asked Questions</h1>
+<p><ol>
+<p><li><b>During one of the first calls to the Berkeley DB Java API, a
+DbException is thrown with a "Bad file number" or "Bad file descriptor"
+message.</b>
+<p>There are known large-file support bugs under JNI in various releases
+of the JDK. Please upgrade to the latest release of the JDK, and, if
+that does not help, disable big file support using the --disable-bigfile
+configuration option.
+</ol>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/java/program.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/perl/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/java/program.html b/bdb/docs/ref/java/program.html
new file mode 100644
index 00000000000..c454a0910ee
--- /dev/null
+++ b/bdb/docs/ref/java/program.html
@@ -0,0 +1,72 @@
+<!--$Id: program.so,v 10.21 2001/01/09 18:57:28 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Java Programming Notes</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Programmer Notes</dl></h3></td>
+<td width="1%"><a href="../../ref/java/compat.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/java/faq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Java Programming Notes</h1>
+<p>The Java API closely parallels the Berkeley DB C++ and C interfaces. If you
+are currently using either of those APIs, there will be very little to
+surprise you in the Java API. We have even taken care to make the names
+of classes, constants, methods and arguments identical, where possible,
+across all three APIs.
+<p><ol>
+<p><li>The Java runtime does not automatically close Berkeley DB objects on
+finalization. There are a couple reasons for this. One is that
+finalization is generally run only when garbage collection occurs and
+there is no guarantee that this occurs at all, even on exit. Allowing
+specific Berkeley DB actions to occur in ways that cannot be replicated seems
+wrong. Secondly, finalization of objects may happen in an arbitrary
+order, so we would have to do extra bookkeeping to make sure everything
+was closed in the proper order. The best word of advice is to always
+do a close() for any matching open() call. Specifically, the Berkeley DB
+package requires that you explicitly call close on each individual
+<a href="../../api_java/db_class.html">Db</a> and <a href="../../api_java/dbc_class.html">Dbc</a> object that you opened. Your database
+activity may not be synchronized to disk unless you do so.
+<p><li>Some methods in the Java API have no return type, and throw a
+<a href="../../api_java/except_class.html">DbException</a> when an severe error arises. There are some notable
+methods that do have a return value, and can also throw an exception.
+<a href="../../api_java/db_get.html">Db.get</a> and <a href="../../api_java/dbc_get.html">Dbc.get</a> both return 0 when a get succeeds,
+<a href="../../ref/program/errorret.html#DB_NOTFOUND">Db.DB_NOTFOUND</a> when the key is not found, and throw an error when
+there is a severe error. This approach allows the programmer to check
+for typical data driven errors by watching return values without special
+casing exceptions.
+<p>An object of type <a href="../../api_java/deadlock_class.html">DbDeadlockException</a> is thrown when a deadlock
+would occur.
+<p>An object of type <a href="../../api_java/mem_class.html">DbMemoryException</a> is thrown when the system
+cannot provide enough memory to complete the operation (the ENOMEM
+system error on UNIX).
+<p>An object of type <a href="../../api_java/runrec_class.html">DbRunRecoveryException</a>, a subclass of
+<a href="../../api_java/except_class.html">DbException</a>, is thrown when there is an error that requires a
+recovery of the database, using <a href="../../utility/db_recover.html">db_recover</a>.
+<p><li>There is no class corresponding to the C++ DbMpoolFile class in the Berkeley DB
+Java API. There is a subset of the memp_XXX methods in the <a href="../../api_java/dbenv_class.html">DbEnv</a>
+class. This has been provided to allow you to perform certain
+administrative actions on underlying memory pools opened as a consequence
+of <a href="../../api_java/env_open.html">DbEnv.open</a>. Direct access to other memory pool functionality
+is not appropriate for the Java environment.
+<p><li>Berkeley DB always turns on the <a href="../../api_java/env_open.html#DB_THREAD">Db.DB_THREAD</a> flag since threads
+are expected in Java.
+<p><li>If there are embedded null strings in the <b>curslist</b> argument for
+<a href="../../api_java/db_join.html">Db.join</a>, they will be treated as the end of the list of
+cursors, even though you may have allocated a longer array. Fill in
+all the strings in your array unless you intend to cut it short.
+<p><li>The callback installed for <a href="../../api_java/env_set_errcall.html">DbEnv.set_errcall</a> will run in the same
+thread as the caller to <a href="../../api_java/env_set_errcall.html">DbEnv.set_errcall</a>. Make sure that thread
+remains running until your application exits or <a href="../../api_java/env_close.html">DbEnv.close</a> is
+called.
+</ol>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/java/compat.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/java/faq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/lock/am_conv.html b/bdb/docs/ref/lock/am_conv.html
new file mode 100644
index 00000000000..7dbe3e73d47
--- /dev/null
+++ b/bdb/docs/ref/lock/am_conv.html
@@ -0,0 +1,129 @@
+<!--$Id: am_conv.so,v 10.16 2000/03/18 21:43:13 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Access method locking conventions</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Locking Subsystem</dl></h3></td>
+<td width="1%"><a href="../../ref/lock/twopl.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/cam_conv.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Access method locking conventions</h1>
+<p>All the Berkeley DB access methods follow the same conventions for locking
+database objects. Applications that do their own locking and also do
+locking via the access methods must be careful to adhere to these
+conventions.
+<p>Whenever a Berkeley DB database is opened, the DB handle is
+assigned a unique locker ID. Unless transactions are specified,
+that ID is used as the locker for all calls that the Berkeley DB methods
+make to the lock subsystem. In order to lock a file, pages in
+the file, or records in the file, we must create a unique ID that
+can be used as the object to be locked in calls to the lock manager.
+Under normal operation, that object is a 28-byte value, created by
+the concatenation of a unique file identifier, a page or record number,
+and an object type (page or record).
+<p>In a transaction-protected environment, database create and delete
+operations are recoverable and single-threaded. This single-threading is
+achieved using a single lock for the entire environment that must be
+acquired before beginning a create or delete operation. In this case,
+the object on which Berkeley DB will lock is a 32-bit unsigned integer with a
+value of 0.
+<p>If applications are using the lock subsystem directly while they are also
+using locking via the access methods, they must take care not to
+inadvertently lock objects that happen to be equal to the unique file IDs
+used to lock files. This is most easily accomplished by using a locker
+ID of a different length than the values used by Berkeley DB.
+<p>All of the access methods other than Queue use a simple
+multiple-reader/single writer page locking scheme. The standard
+read/write locks (<b>DB_LOCK_READ</b> and <b>DB_LOCK_WRITE</b>) and
+conflict matrix, as described in <a href="../../ref/lock/stdmode.html">Standard lock modes</a> are used. An operation that returns data (e.g.,
+<a href="../../api_c/db_get.html">DB-&gt;get</a>, <a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a>) obtains a read lock on all the pages
+accessed while locating the requested record. When an update operation
+is requested (e.g., <a href="../../api_c/db_put.html">DB-&gt;put</a>, <a href="../../api_c/dbc_del.html">DBcursor-&gt;c_del</a>), the page containing
+the updated (or new) data is write locked. As read-modify-write cycles
+are quite common and are deadlock prone under normal circumstances, the
+Berkeley DB interfaces allow the application to specify the <a href="../../api_c/dbc_get.html#DB_RMW">DB_RMW</a> flag,
+which causes operations to immediately obtain a writelock, even though
+they are only reading the data. While this may reduce concurrency
+somewhat, it reduces the probability of deadlock.
+<p>The Queue access method does not hold long term page locks.
+Instead, page locks are held only long enough to locate records or to change
+metadata on a page, and record locks are held for the appropriate duration.
+In the presence of transactions, record locks are held until transaction
+commit.
+For Berkeley DB operations, record locks are held until operation
+completion and for DBC operations, record locks are held until
+subsequent records are returned or the cursor is closed.
+<p>Under non-transaction operation, the access methods do not normally hold
+locks across calls to the Berkeley DB interfaces. The one exception to this
+rule is when cursors are used. As cursors maintain a position in a file,
+they must hold locks across calls and will, in fact, hold locks until the
+cursor is closed. Furthermore, each cursor is assigned its own unique
+locker ID when it is created, so cursor operations can conflict with one
+another. (Each cursor is assigned its own locker ID because Berkeley DB handles
+may be shared by multiple threads of control. The Berkeley DB library cannot
+identify which operations are performed by which threads of control, and
+it must ensure that two different threads of control are not
+simultaneously modifying the same data structure. By assigning each
+cursor its own locker, two threads of control sharing a handle cannot
+inadvertently interfere with each other.
+<p>This has important implications. If a single thread of control opens two
+cursors or uses a combination of cursor and non-cursor operations, these
+operations are performed on behalf of different lockers. Conflicts that
+arise between these different lockers may not cause actual deadlocks, but
+can, in fact, permanently block the thread of control. For example,
+assume that an application creates a cursor and uses it to read record A.
+Now assume a second cursor is opened and the application attempts to write
+record A using the second cursor. Unfortunately, the first cursor has a
+read lock so the second cursor cannot obtain its write lock. However,
+that read lock is held by the same thread of control, so if we block
+waiting for the write lock, the read lock can never be released. This
+might appear to be a deadlock from the application's perspective, but
+Berkeley DB cannot identify it as such because it has no knowledge of which
+lockers belong to which threads of control. For this reason, application
+designers are encouraged to close cursors as soon as they are done with
+them.
+<p>Complicated operations that require multiple cursors (or combinations of
+cursor and non-cursor operations) can be performed in two ways. First,
+they may be performed within a transaction, in which case all operations
+lock on behalf of the designated locker ID. Alternatively, the
+<a href="../../api_c/dbc_dup.html">DBcursor-&gt;c_dup</a> function duplicates a cursor, using the same locker ID as
+the originating cursor. There is no way to achieve this duplication
+functionality through the DB handle calls, but any DB call can be
+implemented by one or more calls through a cursor.
+<p>When the access methods use transactions, many of these problems disappear.
+The transaction ID is used as the locker ID for all operations performed
+on behalf of the transaction. This means that the application may open
+multiple cursors on behalf of the same transaction and these cursors will
+all share a common locker ID. This is safe because transactions cannot
+span threads of control, so the library knows that two cursors in the same
+transaction cannot modify the database concurrently.
+<p>As mentioned earlier, most of the Berkeley DB access methods use page level
+locking. During Btree traversal, lock-coupling is used to traverse the
+tree. Note that the tree traversal that occurs during an update operation
+can also use lock-coupling; it is not necessary to retain locks on
+internal Btree pages even if the item finally referenced will be updated.
+Even in the presence of transactions, locks obtained on internal pages of
+the Btree may be safely released as the traversal proceeds. This greatly
+improves concurrency. The only time internal locks become crucial is when
+internal pages are split or merged. When traversing duplicate data items
+for a key, the lock on the key value also acts as a lock on all duplicates
+of that key. Therefore, two conflicting threads of control cannot access
+the same duplicate set simultaneously.
+<p>The Recno access method uses a Btree as its underlying data
+representation and follows similar locking conventions. However, as the
+Recno access method must keep track of the number of children for all
+internal pages, it must obtain write locks on all internal pages during
+read and write operations. In the presence of transactions, these locks
+are not released until transaction commit.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/lock/twopl.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/cam_conv.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/lock/cam_conv.html b/bdb/docs/ref/lock/cam_conv.html
new file mode 100644
index 00000000000..b37914890bc
--- /dev/null
+++ b/bdb/docs/ref/lock/cam_conv.html
@@ -0,0 +1,53 @@
+<!--$Id: cam_conv.so,v 10.10 2000/03/18 21:43:13 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Berkeley DB Concurrent Data Store locking conventions</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Locking Subsystem</dl></h3></td>
+<td width="1%"><a href="../../ref/lock/am_conv.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/dead.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Berkeley DB Concurrent Data Store locking conventions</h1>
+<p>The Berkeley DB Concurrent Data Store product has a different set of conventions for locking. It
+provides multiple reader/single writer semantics, but not per-page locking
+or transaction recoverability. As such, it does its locking entirely at
+the interface to the access methods.
+<p>The object it locks is the file, identified by its unique file number.
+The locking matrix is not one of the two standard lock modes, instead,
+we use a four-lock set, consisting of:
+<p><dl compact>
+<p><dt>DB_LOCK_NG<dd>not granted (always 0)
+<dt>DB_LOCK_READ<dd>read (shared)
+<dt>DB_LOCK_WRITE<dd>write (exclusive)
+<dt>DB_LOCK_IWRITE<dd>intention-to-write (shared with NG and READ, but conflicts with WRITE and IWRITE)
+</dl>
+<p>The IWRITE lock is used for cursors that will be used for updating (IWRITE
+locks are implicitly obtained for write operations through the Berkeley DB
+handles, e.g., <a href="../../api_c/db_put.html">DB-&gt;put</a>, <a href="../../api_c/db_del.html">DB-&gt;del</a>). While the cursor is
+reading, the IWRITE lock is held, but as soon as the cursor is about to
+modify the database, the IWRITE is upgraded to a WRITE lock. This upgrade
+blocks until all readers have exited the database. Because only one
+IWRITE lock is allowed at any one time, no two cursors can ever try to
+upgrade to a WRITE lock at the same time, and therefore deadlocks are
+prevented, which is essential as Berkeley DB Concurrent Data Store does not include deadlock
+detection and recovery.
+<p>Applications that need to lock compatibly with Berkeley DB Concurrent Data Store must obey the
+following rules:
+<p><ol>
+<p><li>Use only lock modes DB_LOCK_NG, DB_LOCK_READ, DB_LOCK_WRITE,
+DB_LOCK_IWRITE.
+<p><li>Never attempt to acquire a WRITE lock on an object that is
+already locked with a READ lock.
+</ol>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/lock/am_conv.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/dead.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/lock/config.html b/bdb/docs/ref/lock/config.html
new file mode 100644
index 00000000000..cc0b5248149
--- /dev/null
+++ b/bdb/docs/ref/lock/config.html
@@ -0,0 +1,46 @@
+<!--$Id: config.so,v 10.15 2000/12/08 20:43:16 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Configuring locking</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Locking Subsystem</dl></h3></td>
+<td width="1%"><a href="../../ref/lock/dead.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/max.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Configuring locking</h1>
+<p>The <a href="../../api_c/env_set_lk_detect.html">DBENV-&gt;set_lk_detect</a> function specifies that the deadlock detector
+should be run whenever a lock blocks. This option provides for rapid
+detection of deadlocks at the expense of potentially frequent
+invocations of the deadlock detector. On a fast processor with a highly
+contentious application, where response time is critical, this is a good
+choice. An argument to the <a href="../../api_c/env_set_lk_detect.html">DBENV-&gt;set_lk_detect</a> function indicates which
+transaction to abort when a deadlock is detected. It can take on any
+one of the following values:
+<p><dl compact>
+<p><dt><a href="../../api_c/env_set_lk_detect.html#DB_LOCK_YOUNGEST">DB_LOCK_YOUNGEST</a><dd>Abort the most recently started transaction.
+<dt><a href="../../api_c/env_set_lk_detect.html#DB_LOCK_OLDEST">DB_LOCK_OLDEST</a><dd>Abort the longest lived transaction.
+<dt><a href="../../api_c/env_set_lk_detect.html#DB_LOCK_RANDOM">DB_LOCK_RANDOM</a><dd>Abort whatever transaction the deadlock detector happens to find first.
+<dt><a href="../../api_c/env_set_lk_detect.html#DB_LOCK_DEFAULT">DB_LOCK_DEFAULT</a><dd>Use the default policy (currently DB_RANDOM).
+</dl>
+<p>In general, <a href="../../api_c/env_set_lk_detect.html#DB_LOCK_DEFAULT">DB_LOCK_DEFAULT</a> is probably the correct choice. If
+an application has long-running transactions, then
+<a href="../../api_c/env_set_lk_detect.html#DB_LOCK_YOUNGEST">DB_LOCK_YOUNGEST</a> will guarantee that transactions eventually
+complete, but it may do so at the expense of a large number of aborts.
+<p>The alternative to using the <a href="../../api_c/env_set_lk_detect.html">DBENV-&gt;set_lk_detect</a> interface is
+to run the deadlock detector manually, using the Berkeley DB
+<a href="../../api_c/lock_detect.html">lock_detect</a> interface.
+<p>The <a href="../../api_c/env_set_lk_conflicts.html">DBENV-&gt;set_lk_conflicts</a> function allows you to specify your own locking
+conflicts matrix. This is an advanced configuration option, and rarely
+necessary.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/lock/dead.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/max.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/lock/dead.html b/bdb/docs/ref/lock/dead.html
new file mode 100644
index 00000000000..bb77e982285
--- /dev/null
+++ b/bdb/docs/ref/lock/dead.html
@@ -0,0 +1,93 @@
+<!--$Id: dead.so,v 10.13 2000/03/18 21:43:14 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Deadlocks and deadlock avoidance</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Locking Subsystem</dl></h3></td>
+<td width="1%"><a href="../../ref/lock/cam_conv.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/config.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Deadlocks and deadlock avoidance</h1>
+<p>Practically any application that uses locking may deadlock.
+In nearly all cases, in order to recover from a deadlock, transactions
+must be used, so that an operation that deadlocks mid-way through can
+be undone, leaving the database in a consistent state.
+As the access methods may perform updates on multiple pages during a
+single API call, transactions are necessary even when the application
+makes only single update calls into the database.
+The only exception to this rule is when all the threads accessing
+the database are doing so read-only or when the Concurrent Data Store
+product is used; this product guarantees deadlock-free operation at the
+expense of reduced concurrency.
+Since deadlocks cannot be prevented, Berkeley DB provides the ability to detect
+deadlocks and recover from them gracefully.
+<p>Deadlocks occur when two or more threads of control are blocked waiting
+on each other's forward progress. Consider two transactions, each of
+which wants to modify items A and B. Assume that transaction 1 modifies
+first A and then B, but transaction 2 modifies B then A. Now, assume
+that transaction 1 obtains its writelock on A, but before it obtains its
+writelock on B, it is descheduled and transaction 2 runs. Transaction 2
+successfully acquires its writelock on B, but then blocks when it tries
+to obtain its writelock on A, because transaction 1 already holds a
+writelock on it. This is a deadlock. Transaction 1 cannot make forward
+progress until Transaction 2 releases its lock on B, but Transaction 2
+cannot make forward progress until Transaction 1 releases its lock on A.
+<p>The <a href="../../api_c/lock_detect.html">lock_detect</a> function runs an instance of the Berkeley DB deadlock
+detector. The <a href="../../utility/db_deadlock.html">db_deadlock</a> utility performs deadlock detection by
+calling <a href="../../api_c/lock_detect.html">lock_detect</a> at regular intervals. When a deadlock exists
+in the system, all of the threads of control involved in the deadlock are,
+by definition, waiting on a lock. The deadlock detector examines the
+state of the lock manager and identifies a deadlock, and selects one of
+the participants to abort. (See <a href="../../ref/lock/config.html">Configuring locking</a> for a discussion of how a participant is selected).
+The lock on which the selected participant is waiting is identified such
+that the <a href="../../api_c/lock_get.html">lock_get</a> (or <a href="../../api_c/lock_vec.html">lock_vec</a>) call in which that lock
+was requested will receive an error return of <a href="../../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a>.
+In the access methods, this error return is propagated back through the
+Berkeley DB interface as DB_LOCK_DEADLOCK.
+<p>When an application receives an DB_LOCK_DEADLOCK, the correct action is
+to abort the current transaction, and optionally retry it. Transaction
+support is necessary for recovery from deadlocks. When a deadlock occurs,
+the database may be left in an inconsistent or corrupted state, and any
+database changes already accomplished must be undone before the
+application can proceed further.
+<p>The deadlock detector identifies deadlocks by looking for a cycle in what
+is commonly referred to as its "waits-for" graph. More precisely, the
+deadlock detector reads through the lock table, and finds each object
+currently locked. Each object has a list of transactions or operations
+(hereafter called lockers) that currently hold locks on the object and
+possibly a list of waiting lockers, waiting on the lockers holding it.
+Each object creates one or more partial orderings of lockers. That is,
+for a particular object, every waiting locker comes after every holding
+locker, because that holding locker must release its lock before the
+waiting locker can make forward progress. Conceptually, after each object
+has been examined, the partial orderings are topologically sorted (see
+tsort). If this topological sort reveals any cycles, then the lockers
+forming the cycle are involved in a deadlock. One of the lockers is
+selected for abortion.
+<p>It is possible that aborting a single transaction involved in a deadlock
+is not enough to allow other transactions to make forward progress.
+In this case, the deadlock detector will be called repeatedly.
+Unfortunately, at the time a transaction is selected for abortion,
+there is not enough information available to determine if aborting
+that single transaction will allow forward progress or not. Since
+most applications have few deadlocks, Berkeley DB takes the conservative
+approach, aborting as few transactions as may be necessary to resolve
+the existing deadlocks. In particular, for each unique cycle found
+in the waits-for graph described in the previous paragraph, only one
+transaction is selected for abortion. However, if there are multiple
+cycles, then one transaction from each cycle is selected for abortion.
+Only after the aborting transactions have received the deadlock return
+and aborted their transactions, can it be determined if it is necessary
+to abort other transactions in order to allow forward progress.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/lock/cam_conv.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/config.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/lock/intro.html b/bdb/docs/ref/lock/intro.html
new file mode 100644
index 00000000000..b5c85af05b0
--- /dev/null
+++ b/bdb/docs/ref/lock/intro.html
@@ -0,0 +1,89 @@
+<!--$Id: intro.so,v 10.16 2000/03/18 21:43:14 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Berkeley DB and locking</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Locking Subsystem</dl></h3></td>
+<td width="1%"><a href="../../ref/program/runtime.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/page.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Berkeley DB and locking</h1>
+<p>The lock subsystem provides interprocess and intraprocess concurrency
+control mechanisms. While the locking system is used extensively by the
+Berkeley DB access methods and transaction system, it may also be used as a
+stand-alone subsystem to provide concurrency control to any set of
+designated resources.
+<p>The lock subsystem is created, initialized, and opened by calls to
+<a href="../../api_c/env_open.html">DBENV-&gt;open</a> with the <a href="../../api_c/env_open.html#DB_INIT_LOCK">DB_INIT_LOCK</a> or <a href="../../api_c/env_open.html#DB_INIT_CDB">DB_INIT_CDB</a>
+flags specified.
+<p>The <a href="../../api_c/lock_detect.html">lock_detect</a> function provides the programmatic interface to
+the Berkeley DB deadlock detector. Whenever two threads of control issue lock
+requests that are not carefully ordered or that require upgrading locks
+(obtaining write locks on objects that are already read-locked), the
+possibility for deadlock arises. A deadlock occurs when two or more
+threads of control are blocked, waiting for actions that another one of
+these blocked threads must take. For example, assume that threads one
+and two have each obtained read locks on object A. Now suppose that both
+threads wish to obtain write locks on object A. Neither thread can be
+granted its writelock (because of the other thread's readlock). Both
+threads block and will never unblock because the event for which they are
+waiting can never happen.
+<p>The deadlock detector examines all the locks held in the environment and
+identifies situations where no thread can make forward progress. It then
+selects one of the participants in the deadlock (according to the argument
+that was specified to <a href="../../api_c/env_set_lk_detect.html">DBENV-&gt;set_lk_detect</a>) and forces it to return
+the value DB_LOCK_DEADLOCK, which indicates that a deadlock occurred.
+The thread receiving such an error should abort its current transaction,
+or simply release all its locks if it is not running in a transaction,
+and retry the operation.
+<p>The <a href="../../api_c/lock_vec.html">lock_vec</a> interface is used to acquire and release locks.
+<p>Two additional interfaces, <a href="../../api_c/lock_get.html">lock_get</a> and <a href="../../api_c/lock_put.html">lock_put</a>, are
+provided. These interfaces are simpler front-ends to the <a href="../../api_c/lock_vec.html">lock_vec</a>
+functionality, where <a href="../../api_c/lock_get.html">lock_get</a> acquires a lock, and
+<a href="../../api_c/lock_put.html">lock_put</a> releases a lock that was acquired using <a href="../../api_c/lock_get.html">lock_get</a>
+or <a href="../../api_c/lock_vec.html">lock_vec</a>.
+<p>It is up to the application to specify lockers and objects appropriately.
+When used with the Berkeley DB access methods, these lockers and objects are
+handled completely internally, but an application using the lock manager
+directly must either use the same conventions as the access methods or
+define its own convention to which it adheres. If the application is
+using the access methods with locking at the same time that it is calling
+the lock manager directly, the application must follow a convention that
+is compatible with the access methods' use of the locking subsystem. See
+<a href="../../ref/lock/am_conv.html">Access method locking conventions</a>
+for more information.
+<p>The <a href="../../api_c/lock_id.html">lock_id</a> function returns a unique ID which may safely be used
+as the locker parameter to the <a href="../../api_c/lock_vec.html">lock_vec</a> interface. The access
+methods use <a href="../../api_c/lock_id.html">lock_id</a> to generate unique lockers for the cursors
+associated with a database.
+<p>The <a href="../../api_c/lock_vec.html">lock_vec</a> function performs any number of lock operations
+atomically. It also provides the ability to release all locks held by a
+particular locker and release all the locks on a particular object.
+Performing multiple lock operations atomically is useful in performing
+Btree traversals where you want to acquire a lock on a child page and once
+acquired, immediately release the lock on its parent (this is
+traditionally referred to as "lock-coupling"). Using <a href="../../api_c/lock_vec.html">lock_vec</a>
+instead of separate calls to <a href="../../api_c/lock_put.html">lock_put</a> and <a href="../../api_c/lock_get.html">lock_get</a> reduces
+the synchronization overhead between multiple threads or processes.
+<p>The three interfaces, <a href="../../api_c/lock_get.html">lock_get</a>, <a href="../../api_c/lock_put.html">lock_put</a> and <a href="../../api_c/lock_vec.html">lock_vec</a>,
+are fully compatible, and may be used interchangeably.
+<p>All locks explicitly requested by an application should be released via
+calls to <a href="../../api_c/lock_put.html">lock_put</a> or <a href="../../api_c/lock_vec.html">lock_vec</a>.
+<p>The <a href="../../api_c/lock_stat.html">lock_stat</a> function returns information about the status of
+the lock subsystem. It is the programmatic interface used by the
+<a href="../../utility/db_stat.html">db_stat</a> utility.
+<p>The locking subsystem is closed by the call to <a href="../../api_c/env_close.html">DBENV-&gt;close</a>.
+<p>Finally, the entire locking subsystem may be discarded using the
+<a href="../../api_c/env_remove.html">DBENV-&gt;remove</a> interface.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/program/runtime.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/page.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/lock/max.html b/bdb/docs/ref/lock/max.html
new file mode 100644
index 00000000000..23622909035
--- /dev/null
+++ b/bdb/docs/ref/lock/max.html
@@ -0,0 +1,88 @@
+<!--$Id: max.so,v 10.2 2000/12/21 19:11:28 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Configuring locking: sizing the system</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Locking Subsystem</dl></h3></td>
+<td width="1%"><a href="../../ref/lock/config.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/nondb.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Configuring locking: sizing the system</h1>
+<p>The lock system is sized using the following three functions:
+<p><blockquote><pre><a href="../../api_c/env_set_lk_max_locks.html">DBENV-&gt;set_lk_max_locks</a>
+<a href="../../api_c/env_set_lk_max_lockers.html">DBENV-&gt;set_lk_max_lockers</a>
+<a href="../../api_c/env_set_lk_max_objects.html">DBENV-&gt;set_lk_max_objects</a></pre></blockquote>
+<p>The <a href="../../api_c/env_set_lk_max_locks.html">DBENV-&gt;set_lk_max_locks</a>, <a href="../../api_c/env_set_lk_max_lockers.html">DBENV-&gt;set_lk_max_lockers</a>
+and <a href="../../api_c/env_set_lk_max_objects.html">DBENV-&gt;set_lk_max_objects</a> functions specify, respectively, the
+maximum number of locks, lockers and locked objects supported by the
+lock subsystem. The maximum number of locks is the number of locks that
+can be simultaneously requested in the system. The maximum number of
+lockers is the number of lockers that can simultaneously request locks
+in the system. The maximum number of lock objects is the number of
+objects that can simultaneously be locked in the system. Selecting
+appropriate values requires an understanding of your application and
+its databases. If the values are too small, then requests for locks in
+an application will fail. If the values are too large, then the locking
+subsystem will consume more resources than is necessary. It is better
+to err in the direction of allocating too many locks, lockers and
+objects as increasing the number of locks does not require large amounts
+of additional resources.
+<p>The recommended algorithm for selecting the maximum number of locks,
+lockers and lock objects, is to run the application under stressful
+conditions and then review the lock system's statistics to determine
+the maximum number of locks, lockers and lock objects that were used.
+Then, double these values for safety. However, in some large
+applications, finer granularity of control is necessary in order to
+minimize the size of the lock subsystem.
+<p>The maximum number of lockers can be estimated as follows:
+<ul type=disc>
+<li>If the
+database environment is configured to use transactions, then the maximum
+number of lockers needed is the number of simultaneously active
+transactions and child transactions (where a child transaction is active
+until its parent commits or aborts, not until it commits or aborts).
+<li>If the database environment is not configured to use transactions, then
+the maximum number of lockers needed is the number of simultaneous
+non-cursor operations plus an additional locker for every simultaneously
+open cursor.
+</ul>
+<p>The maximum number of lock objects needed can be estimated as follows:
+<ul type=disc>
+<li>For Btree and Recno access methods, you will need, at a minimum, one
+lock object per level of the database tree. (Unless keys are quite
+large with respect to the page size, neither Recno nor Btree database
+trees should ever be deeper than five levels.) Then, you will need one
+lock object for each leaf page of the database tree that will be
+simultaneously accessed.
+<li>For the Queue access method you will need one lock object per record
+that is simultaneously accessed. To this, add one lock object per page
+that will be simultaneously accessed. (Since the Queue access method
+uses fixed-length records, and the database page size is known, it is
+possible to calculate the number of pages and therefore, lock objects,
+required.) Deleted records skipped by a <a href="../../api_c/dbc_get.html#DB_NEXT">DB_NEXT</a> or
+<a href="../../api_c/dbc_get.html#DB_PREV">DB_PREV</a> operation do not require a separate lock object.
+Further, if your application is using transactions, then no database
+operation will ever use more than three lock objects at any time.
+<li>For the Hash access method you only need a single lock object.
+</ul>
+<p>For all access methods, you should then add an additional lock object
+per database, for the database's metadata page.
+<p>The maximum number of locks required by an application cannot be easily
+estimated. It is possible to calculate a maximum number of locks by
+multiplying the maximum number of lockers, times the maximum number of
+lock objects, times two (two for the two possible lock modes for each
+object, read and write). However, this is a pessimal value, and real
+applications are unlikely to actually need that many locks. Review of
+the lock subsystem statistics is the best way to determine this value.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/lock/config.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/nondb.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/lock/nondb.html b/bdb/docs/ref/lock/nondb.html
new file mode 100644
index 00000000000..4fb37d6d7b0
--- /dev/null
+++ b/bdb/docs/ref/lock/nondb.html
@@ -0,0 +1,50 @@
+<!--$Id: nondb.so,v 10.10 2000/12/08 20:43:16 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Locking and non-Berkeley DB applications</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Locking Subsystem</dl></h3></td>
+<td width="1%"><a href="../../ref/lock/max.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/log/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Locking and non-Berkeley DB applications</h1>
+<p>The locking subsystem is useful outside the context of Berkeley DB. It can be
+used to manage concurrent access to any collection of either ephemeral or
+persistent objects. That is, the lock region can persist across
+invocations of an application, so it can be used to provide long-term
+locking (e.g., conference room scheduling).
+<p>In order to use the locking subsystem in such a general way, the
+applications must adhere to a convention for naming objects and lockers.
+Consider the conference room scheduling problem described above. Assume
+there are three conference rooms and that we wish to schedule them in
+half-hour intervals.
+<p>The scheduling application must then select a way to identify each
+conference room/time slot combination. In this case, we could describe
+the objects being locker as bytestrings consisting of the conference room
+name, the date on which it is needed, and the beginning of the appropriate
+half-hour slot.
+<p>Lockers are 32-bit numbers, so we might choose to use the User ID of the
+individual running the scheduling program. To schedule half-hour slots,
+all the application need do is issue a <a href="../../api_c/lock_get.html">lock_get</a> call for the
+appropriate locker/object pair. To schedule a longer slot, the
+application would issue a <a href="../../api_c/lock_vec.html">lock_vec</a> call with one <a href="../../api_c/lock_get.html">lock_get</a>
+operation per half-hour up to the total length. If the <a href="../../api_c/lock_vec.html">lock_vec</a>
+call fails, the application would have to release the parts of the time
+slot that were obtained.
+<p>To cancel a reservation, the application would make the appropriate
+<a href="../../api_c/lock_put.html">lock_put</a> calls. To reschedule a reservation, the <a href="../../api_c/lock_get.html">lock_get</a>
+and <a href="../../api_c/lock_put.html">lock_put</a> calls could all be made inside of a single
+<a href="../../api_c/lock_vec.html">lock_vec</a> call. The output of <a href="../../api_c/lock_stat.html">lock_stat</a> could be
+post-processed into a human-readable schedule of conference room use.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/lock/max.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/log/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/lock/notxn.html b/bdb/docs/ref/lock/notxn.html
new file mode 100644
index 00000000000..16b00cf66bf
--- /dev/null
+++ b/bdb/docs/ref/lock/notxn.html
@@ -0,0 +1,46 @@
+<!--$Id: notxn.so,v 10.10 2000/03/18 21:43:14 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Locking without transactions</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Locking Subsystem</dl></h3></td>
+<td width="1%"><a href="../../ref/lock/stdmode.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/twopl.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Locking without transactions</h1>
+<p>If an application runs with locking specified, but not transactions (e.g.,
+<a href="../../api_c/env_open.html">DBENV-&gt;open</a> is called with <a href="../../api_c/env_open.html#DB_INIT_LOCK">DB_INIT_LOCK</a> or
+<a href="../../api_c/env_open.html#DB_INIT_CDB">DB_INIT_CDB</a> specified, but not <a href="../../api_c/env_open.html#DB_INIT_TXN">DB_INIT_TXN</a>), locks are
+normally acquired during each Berkeley DB operation and released before the
+operation returns to the caller. The only exception is in the case of
+cursor operations. As cursors identify a particular position in a file,
+a cursor must retain a read-lock across cursor calls to make sure that
+that position is uniquely identifiable during the next cursor call,
+because an operation using <a href="../../api_c/dbc_get.html#DB_CURRENT">DB_CURRENT</a> must reference the same
+record as the previous cursor call. Such cursor locks cannot be released
+until either the cursor is reset using the <a href="../../api_c/db_get.html#DB_GET_BOTH">DB_GET_BOTH</a>,
+<a href="../../api_c/dbc_get.html#DB_SET">DB_SET</a>, <a href="../../api_c/dbc_get.html#DB_SET_RANGE">DB_SET_RANGE</a>, <a href="../../api_c/dbc_put.html#DB_KEYFIRST">DB_KEYFIRST</a>, or
+<a href="../../api_c/dbc_put.html#DB_KEYLAST">DB_KEYLAST</a> functionality, in which case a new cursor lock is
+established, or the cursor is closed. As a result, application designers
+are encouraged to close cursors as soon as possible.
+<p>It is important to realize that concurrent applications that use locking
+must ensure that two concurrent threads do not interfere with each other.
+However, as Btree and Hash access method page splits can occur at any
+time, there is virtually no way to guarantee that an application which
+writes the database cannot deadlock. Applications running without the
+protection of transactions may deadlock, and when they do so, can leave
+the database in an inconsistent state. Applications that need concurrent
+access, but not transactions, are more safely implemented using the Berkeley DB Concurrent Data Store
+Product.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/lock/stdmode.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/twopl.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/lock/page.html b/bdb/docs/ref/lock/page.html
new file mode 100644
index 00000000000..a7e43b3af66
--- /dev/null
+++ b/bdb/docs/ref/lock/page.html
@@ -0,0 +1,62 @@
+<!--$Id: page.so,v 10.12 2000/03/18 21:43:14 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Page locks</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Locking Subsystem</dl></h3></td>
+<td width="1%"><a href="../../ref/lock/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/stdmode.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Page locks</h1>
+<p>Under normal operation, the access methods use page locking. The pagesize
+of a database is set when the database is created and may be specified by
+calling the <a href="../../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a> function. If not specified, the Berkeley DB
+package tries to select a pagesize that will provide the best I/O
+performance by setting the page size equal to the block size of the
+underlying file system.
+<p>In the Btree access method, Berkeley DB uses a technique called lock coupling
+to improve concurrency. The traversal of a Btree requires reading a page,
+searching that page to determine which page to search next and then
+repeating this process on the next page. Once a page has been searched,
+it will never be accessed again for this operation, unless a page split
+is required. To improve concurrency in the tree, once the next page to
+read/search has been determined, that page is locked, and then atomically
+(i.e., without relinquishing control of the lock manager) the original
+page lock is released.
+<p>As the Recno access method is built upon Btree, it too uses lock coupling
+for read operations. However, as the Recno access method must maintain
+a count of records on its internal pages, it cannot lock couple during
+write operations. Instead, it retains write locks on all internal pages
+during every update operation. For this reason, it is not possible to
+have high concurrency in the Recno access method in the presence of write
+operations.
+<p>The Queue access method only uses short term page locks. That is, a page
+lock is released prior to requesting another page lock. Record locks are
+used for transaction isolation. The provides a high degree of concurrency
+for write operations. A metadata page is used to keep track of the head
+and tail of the queue. This page is never locked during other locking or
+I/O operations.
+<p>The Hash access method does not have such traversal issues, but because
+it implements dynamic hashing, it must always refer to its metadata while
+computing a hash function. This metadata is stored on a special page in
+the hash database. This page must therefore be read locked on every
+operation. Fortunately, it need only be write locked when new pages are
+allocated to the file, which happens in three cases: 1) a hash bucket
+becomes full and needs to split, 2) a key or data item is too large to
+fit on a normal page, and 3) the number of duplicate items for a fixed
+key becomes sufficiently large that they are moved to an auxiliary page.
+In this case, the access method must obtain a write lock on the metadata
+page, thus requiring that all readers be blocked from entering the tree
+until the update completes.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/lock/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/stdmode.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/lock/stdmode.html b/bdb/docs/ref/lock/stdmode.html
new file mode 100644
index 00000000000..ca1cd6b0bdd
--- /dev/null
+++ b/bdb/docs/ref/lock/stdmode.html
@@ -0,0 +1,61 @@
+<!--$Id: stdmode.so,v 10.20 2000/03/18 21:43:14 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Standard lock modes</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Locking Subsystem</dl></h3></td>
+<td width="1%"><a href="../../ref/lock/page.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/notxn.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Standard lock modes</h1>
+<p>The Berkeley DB locking protocol is described by a conflict matrix. A conflict
+matrix is an n x n array where n is the number of different lock modes
+supported, and the (i, j)th entry of the array indicates whether a lock of
+mode i conflicts with a lock of mode j.
+<p>The Berkeley DB include files declare two commonly used conflict arrays:
+<p><dl compact>
+<p><dt>const u_int8_t db_rw_conflicts[ ];<dd>This is a conflict matrix for a simple scheme using shared and exclusive
+lock modes.
+<p><dt>const u_int8_t db_riw_conflicts[ ];<dd>This is a conflict matrix that involves various intent lock modes (e.g.,
+intent shared) that are used for multigranularity locking.
+</dl>
+<p>The number of modes associated with each matrix are DB_LOCK_RW_N and
+DB_LOCK_RIW_N, respectively.
+<p>In addition, the Berkeley DB include file defines the type <b>db_lockmode_t</b>,
+which is the type of the lock modes used with the standard tables above:
+<p><dl compact>
+<p><dt>DB_LOCK_NG<dd>not granted (always 0)
+<p><dt>DB_LOCK_READ<dd>read (shared)
+<p><dt>DB_LOCK_WRITE<dd>write (exclusive)
+</dl>
+<p>As an example, consider the basic multiple-reader/single writer conflict
+matrix described by <b>db_rw_conflicts</b>. In the following
+example (and in the appropriate file), a 1 represents a conflict (i.e.,
+do not grant the lock if the indicated lock is held) and a 0 indicates
+that it is OK to grant the lock.
+<p>The rows indicate the lock that is held and the columns indicate the lock
+that is requested.
+<p><blockquote><pre> Notheld Read Write
+Notheld 0 0 0
+Read* 0 0 1
+Write** 0 1 1
+</pre></blockquote>
+<p><dl compact>
+<p><dt>*<dd>In this case, suppose that there is a read lock held on an object. A new
+request for a read lock would be granted, but a request for a write lock
+would not.
+<p><dt>**<dd>In this case, suppose that there is a write lock held on an object. A
+new request for either a read or write lock would be denied.
+</dl>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/lock/page.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/notxn.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/lock/twopl.html b/bdb/docs/ref/lock/twopl.html
new file mode 100644
index 00000000000..6cf112c0979
--- /dev/null
+++ b/bdb/docs/ref/lock/twopl.html
@@ -0,0 +1,50 @@
+<!--$Id: twopl.so,v 10.7 2000/03/18 21:43:14 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Locking with transactions: two-phase locking</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Locking Subsystem</dl></h3></td>
+<td width="1%"><a href="../../ref/lock/notxn.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/am_conv.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Locking with transactions: two-phase locking</h1>
+<p>Berkeley DB uses a locking protocol called two-phase locking. This is the
+traditional protocol used in conjunction with lock-based transaction
+systems.
+<p>In a two-phase locking (2PL) system, transactions are broken up into two
+distinct phases. During the first phase, the transaction only acquires
+locks. During the second phase, the transaction only releases locks.
+More formally, once a transaction releases a lock, it may not acquire any
+additional locks. Practically, this translates into a system where locks
+are acquired as they are needed throughout a transaction and retained
+until the transaction ends, either by committing or aborting. In Berkeley DB,
+locks are released during <a href="../../api_c/txn_abort.html">txn_abort</a> or <a href="../../api_c/txn_commit.html">txn_commit</a>. The
+only exception to this protocol occurs when we use lock-coupling to
+traverse a data structure. If the locks are held only for traversal
+purposes, then the locks may be released before transaction commit or
+abort.
+<p>For applications, the implications of 2PL are that long-running
+transactions will hold locks for a long time. When designing
+applications, lock contention should be considered. In order to reduce
+the probability of deadlock and achieve the best level of concurrency
+possible, the following guidelines are helpful.
+<p><ol>
+<p><li>When accessing multiple databases, design all transactions so
+that they access the files in the same order.
+<p><li>If possible, access your most hotly contested resources last
+(so that their locks are held for the shortest time possible).
+<p><li>If possible, use nested transactions to protect the parts of
+your transaction most likely to deadlock.
+</ol>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/lock/notxn.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/am_conv.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/log/config.html b/bdb/docs/ref/log/config.html
new file mode 100644
index 00000000000..f3c94889312
--- /dev/null
+++ b/bdb/docs/ref/log/config.html
@@ -0,0 +1,40 @@
+<!--$Id: config.so,v 10.16 2001/01/18 20:31:37 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Configuring logging</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Logging Subsystem</dl></h3></td>
+<td width="1%"><a href="../../ref/log/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/log/limits.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Configuring logging</h1>
+<p>The two aspects of logging that may be configured are the size of log
+files on disk and the size of the log buffer in memory. The
+<a href="../../api_c/env_set_lg_max.html">DBENV-&gt;set_lg_max</a> interface specifies the individual log file
+size for all of the applications sharing the Berkeley DB environment. Setting
+the log file size is largely a matter of convenience, and a reflection
+of the application's preferences in backup media and frequency.
+However, setting the log file size too low can potentially cause
+problems as it would be possible to run out of log sequence numbers,
+which requires a full archival and application restart to reset. See
+the <a href="../../ref/log/limits.html">Log file limits</a> section for more
+information.
+<p>The <a href="../../api_c/env_set_lg_bsize.html">DBENV-&gt;set_lg_bsize</a> interface specifies the size of the
+in-memory log buffer, in bytes. Log information is stored in memory
+until the buffer fills up or transaction commit forces the buffer to be
+written to disk. Larger buffer sizes can significantly increase
+throughput in the presence of long running transactions, highly
+concurrent applications, or transactions producing large amounts of
+data. By default, the buffer is 32KB.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/log/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/log/limits.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/log/intro.html b/bdb/docs/ref/log/intro.html
new file mode 100644
index 00000000000..0c41c17efa0
--- /dev/null
+++ b/bdb/docs/ref/log/intro.html
@@ -0,0 +1,58 @@
+<!--$Id: intro.so,v 10.16 2001/01/18 20:31:37 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Berkeley DB and logging</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Logging Subsystem</dl></h3></td>
+<td width="1%"><a href="../../ref/lock/nondb.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/log/config.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Berkeley DB and logging</h1>
+<p>The logging subsystem is the logging facility used by Berkeley DB. It is
+largely Berkeley DB specific, although it is potentially useful outside of
+the Berkeley DB package for applications wanting write-ahead logging support.
+Applications wanting to use the log for purposes other than logging file
+modifications based on a set of open file descriptors will almost
+certainly need to make source code modifications to the Berkeley DB code
+base.
+<p>A log can be shared by any number of threads of control. The
+<a href="../../api_c/env_open.html">DBENV-&gt;open</a> interface is used to open a log. When the log is no
+longer in use, it should be closed, using the <a href="../../api_c/env_close.html">DBENV-&gt;close</a>
+interface.
+<p>Individual log entries are identified by log sequence numbers. Log
+sequence numbers are stored in an opaque object, a <a href="../../api_c/db_lsn.html">DB_LSN</a>.
+<p>The <a href="../../api_c/log_put.html">log_put</a> interface is used to append new log records to the
+log. Optionally, the <a href="../../api_c/log_put.html#DB_CHECKPOINT">DB_CHECKPOINT</a> flag can be used to output
+a checkpoint log record (indicating that the log is consistent to that
+point and recoverable after a system or application failure), as well
+as open-file information. The <a href="../../api_c/log_get.html">log_get</a> interface is used to
+retrieve log records from the log.
+<p>There are additional interfaces for integrating the log subsystem with a
+transaction processing system:
+<p><dl compact>
+<p><dt><a href="../../api_c/log_register.html">log_register</a> and <a href="../../api_c/log_unregister.html">log_unregister</a><dd>These interfaces associate files with identification numbers. These
+identification numbers are logged so that transactional recovery
+correctly associates log records with the appropriate files.
+<p><dt><a href="../../api_c/log_flush.html">log_flush</a><dd>Flushes the log up to a particular log sequence number.
+<p><dt><a href="../../api_c/log_compare.html">log_compare</a><dd>Allows applications to compare any two log sequence numbers.
+<p><dt><a href="../../api_c/log_file.html">log_file</a> <dd>Maps a log sequence number to the specific log file which contains it.
+<p><dt><a href="../../api_c/log_archive.html">log_archive</a><dd>Returns various sets of log file names. These interfaces are used for
+database administration, e.g., to determine if log files may safely be
+removed from the system.
+<p><dt><a href="../../api_c/log_stat.html">log_stat</a> <dd>The display <a href="../../utility/db_stat.html">db_stat</a> utility uses the <a href="../../api_c/log_stat.html">log_stat</a> interface
+to display statistics about the log.
+<p><dt><a href="../../api_c/env_remove.html">DBENV-&gt;remove</a><dd>The log meta-information (but not the log files themselves) may be
+removed using the <a href="../../api_c/env_remove.html">DBENV-&gt;remove</a> interface.
+</dl>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/lock/nondb.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/log/config.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/log/limits.html b/bdb/docs/ref/log/limits.html
new file mode 100644
index 00000000000..d34e5a81339
--- /dev/null
+++ b/bdb/docs/ref/log/limits.html
@@ -0,0 +1,47 @@
+<!--$Id: limits.so,v 10.23 2001/01/18 20:31:37 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Log file limits</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Logging Subsystem</dl></h3></td>
+<td width="1%"><a href="../../ref/log/config.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/mp/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Log file limits</h1>
+<p>Log file names and sizes impose a limit on how long databases may be
+used in a Berkeley DB database environment. It is quite unlikely that an
+application will reach this limit, however, if the limit is reached,
+the Berkeley DB environment's databases must be dumped and reloaded.
+<p>The log file name consists of <b>log.</b> followed by 10 digits, with
+a maximum of 2,000,000,000 log files. Consider an application performing
+6000 transactions per second, for 24 hours a day, logged into 10MB log
+files, where each transaction is logging approximately 500 bytes of data.
+The calculation:
+<p><blockquote><pre>(10 * 2^20 * 2000000000) / (6000 * 500 * 365 * 60 * 60 * 24) = ~221</pre></blockquote>
+<p>indicates that the system will run out of log file names in roughly 221
+years.
+<p>There is no way to reset the log file name space in Berkeley DB. If your
+application is reaching the end of its log file name space, you must:
+<p><ol>
+<p><li>Archive your databases as if to prepare for catastrophic failure (see
+<a href="../../utility/db_archive.html">db_archive</a> for more information).
+<p><li>Dump and re-load all your databases (see <a href="../../utility/db_dump.html">db_dump</a> and
+<a href="../../utility/db_load.html">db_load</a> for more information).
+<p><li>Remove all of the log files from the database environment. Note, this
+is the only situation where all of the log files are removed from an
+environment, in all other cases at least a single log file is
+retained.
+<p><li>Restart your application.
+</ol>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/log/config.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/mp/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/mp/config.html b/bdb/docs/ref/mp/config.html
new file mode 100644
index 00000000000..cf311516de3
--- /dev/null
+++ b/bdb/docs/ref/mp/config.html
@@ -0,0 +1,55 @@
+<!--$Id: config.so,v 10.17 2000/10/03 17:17:35 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Configuring the memory pool</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Memory Pool Subsystem</dl></h3></td>
+<td width="1%"><a href="../../ref/mp/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/txn/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Configuring the memory pool</h1>
+<p>There are two interfaces used for configuring the memory pool.
+<p>The most important tuning parameter for almost all applications, including
+Berkeley DB applications, is the size of the pool. There are two ways to
+specify the pool size. First, calling the <a href="../../api_c/env_set_cachesize.html">DBENV-&gt;set_cachesize</a> function
+specifies the pool size for all of the applications sharing the Berkeley DB
+environment. Second, by calling the <a href="../../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a> function. The
+latter only specifies a pool size for the specific database. Note, it is
+meaningless to call <a href="../../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a> for a database opened inside
+of a Berkeley DB environment, since the environment pool size will override any
+pool size specified for a single database. For information on tuning the
+Berkeley DB cache size, see <a href="../../ref/am_conf/cachesize.html">Selecting
+a cache size</a>.
+<p>The second memory pool configuration interface specifies the maximum size
+of backing files to map into the process address space instead of copying
+pages through the local cache. Only read-only database files can be
+mapped into process memory. Because of the requirements of the Berkeley DB
+transactional implementation, log records describing database changes must
+be written to disk before the actual database changes. As mapping
+read-write database files into process memory would permit the underlying
+operating system to write modified database changes at will, it is not
+supported.
+<p>Mapping files into the process address space can result in
+better-than-usual performance, as available virtual memory is normally
+much larger than the local cache, and page faults are faster than page
+copying on many systems. However, in the presence of limited virtual
+memory it can cause resource starvation, and in the presence of large
+databases, it can result in immense process sizes.
+<p>To specify that no files are to be mapped into the process address space,
+specify the <a href="../../api_c/env_open.html#DB_NOMMAP">DB_NOMMAP</a> flag to the <a href="../../api_c/env_set_flags.html">DBENV-&gt;set_flags</a> interface.
+To specify that any individual file should not be mapped into the process
+address space, specify the <a href="../../api_c/env_open.html#DB_NOMMAP">DB_NOMMAP</a> flag to the
+<a href="../../api_c/memp_fopen.html">memp_fopen</a> interface. To limit the size of files mapped into the
+process address space, use the <a href="../../api_c/env_set_mp_mmapsize.html">DBENV-&gt;set_mp_mmapsize</a> function.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/mp/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/txn/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/mp/intro.html b/bdb/docs/ref/mp/intro.html
new file mode 100644
index 00000000000..2b52a5775ce
--- /dev/null
+++ b/bdb/docs/ref/mp/intro.html
@@ -0,0 +1,59 @@
+<!--$Id: intro.so,v 10.15 2001/01/18 20:31:37 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Berkeley DB and the memory pool</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Memory Pool Subsystem</dl></h3></td>
+<td width="1%"><a href="../../ref/log/limits.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/mp/config.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Berkeley DB and the memory pool</h1>
+<p>The memory pool subsystem is the general-purpose shared memory buffer pool
+used by Berkeley DB. This module is useful outside of the Berkeley DB package for
+processes that require page-oriented, cached, shared file access.
+<p>A memory pool is a shared memory cache shared by any number of processes
+and threads within processes. The <a href="../../api_c/env_open.html">DBENV-&gt;open</a> interface opens, and
+optionally creates, a memory pool. When that pool is no longer in use,
+it should be closed, using the <a href="../../api_c/env_close.html">DBENV-&gt;close</a> interface.
+<p>The <a href="../../api_c/memp_fopen.html">memp_fopen</a> interface opens an underlying file within the
+memory pool. When that file is no longer in use, it should be closed,
+using the <a href="../../api_c/memp_fclose.html">memp_fclose</a> interface. The <a href="../../api_c/memp_fget.html">memp_fget</a> interface
+is used to retrieve pages from files in the pool. All retrieved pages
+must be subsequently returned using the <a href="../../api_c/memp_fput.html">memp_fput</a> interface. At
+the time that pages are returned, they may be marked <b>dirty</b>, which
+causes them to be written to the backing disk file before being discarded
+from the pool. If there is insufficient room to bring a new page in the
+pool, a page is selected to be discarded from the pool. If that page is
+dirty, it is first written to the backing file. The page is selected
+using a somewhat modified least-recently-used algorithm. Pages in files
+may also be explicitly marked clean or dirty using the <a href="../../api_c/memp_fset.html">memp_fset</a>
+interface. All dirty pages in the pool from any underlying file may also
+be flushed as a group using the <a href="../../api_c/memp_fsync.html">memp_fsync</a> interface.
+<p>There are additional interfaces for manipulating the entire memory pool:
+<ul type=disc>
+<li>It is possible to gradually flush buffers from the pool in order to
+maintain a consistent percentage of clean buffers in the pool using the
+<a href="../../api_c/memp_trickle.html">memp_trickle</a> interface.
+<li>The <a href="../../utility/db_stat.html">db_stat</a> utility uses the <a href="../../api_c/memp_stat.html">memp_stat</a> interface to
+display statistics about the efficiency of the pool.
+<li>As some conversion may be necessary when pages are read or written the
+<a href="../../api_c/memp_register.html">memp_register</a> function allows applications to specify automatic
+input and output processing in these cases.
+<li>There is one additional interface that is intended for manipulating the
+memory pool, but which is specific to database systems. The
+<a href="../../api_c/memp_sync.html">memp_sync</a> interface flushes dirty pages from all files held in
+the pool up to a specified database log sequence number.
+<li>Finally, the entire pool may be discarded using the <a href="../../api_c/env_remove.html">DBENV-&gt;remove</a>
+interface.
+</ul>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/log/limits.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/mp/config.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/perl/intro.html b/bdb/docs/ref/perl/intro.html
new file mode 100644
index 00000000000..da5d93a6af7
--- /dev/null
+++ b/bdb/docs/ref/perl/intro.html
@@ -0,0 +1,42 @@
+<!--$Id: intro.so,v 10.24 2001/01/09 18:57:28 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Using Berkeley DB with Perl</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Perl</dl></h3></td>
+<td width="1%"><a href="../../ref/java/faq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/tcl/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Using Berkeley DB with Perl</h1>
+<p>The original Perl module for Berkeley DB was DB_File, which was written to
+interface to Berkeley DB version 1.85. The newer Perl module for Berkeley DB is
+BerkeleyDB, which was written to interface to version 2.0 and subsequent
+releases. Because Berkeley DB version 2.X has a compatibility API for version
+1.85, you can (and should!) build DB_File using version 2.X of Berkeley DB,
+although DB_File will still only support the 1.85 functionality.
+<p>DB_File is distributed with the standard Perl source distribution (look
+in the directory "ext/DB_File"). You can find both DB_File and BerkeleyDB
+on CPAN, the Comprehensive Perl Archive Network of mirrored FTP sites.
+The master CPAN site is
+<a href="ftp://ftp.funet.fi/">ftp://ftp.funet.fi/</a>.
+<p>Versions of both BerkeleyDB and DB_File that are known to work correctly
+with each release of Berkeley DB are included in the distributed Berkeley DB source
+tree, in the subdirectories <b>perl.BerkeleyDB</b> and
+<b>perl.DB_File</b>. Each of those directories contains a
+<b>README</b> file with instructions on installing and using those
+modules.
+<p>The Perl interface is not maintained by Sleepycat Software. Questions
+about the DB_File and BerkeleyDB modules are best asked on the Usenet
+newsgroup comp.lang.perl.modules.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/java/faq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/tcl/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/pindex.src b/bdb/docs/ref/pindex.src
new file mode 100644
index 00000000000..0e122ceb29e
--- /dev/null
+++ b/bdb/docs/ref/pindex.src
@@ -0,0 +1,212 @@
+__APIREL__/ref/am/close.html#2 @closing a database
+__APIREL__/ref/am/count.html#2 @counting data items for a key
+__APIREL__/ref/am/curclose.html#2 @closing a cursor
+__APIREL__/ref/am/curclose.html#3 closing a @cursor
+__APIREL__/ref/am/curdel.html#2 @deleting records with a cursor
+__APIREL__/ref/am/curdel.html#3 deleting records with a @cursor
+__APIREL__/ref/am/curdup.html#2 @duplicating a cursor
+__APIREL__/ref/am/curdup.html#3 duplicating a @cursor
+__APIREL__/ref/am/curget.html#2 @retrieving records with a cursor
+__APIREL__/ref/am/curget.html#3 retrieving records with a @cursor
+__APIREL__/ref/am/curput.html#2 @storing records with a cursor
+__APIREL__/ref/am/curput.html#3 storing records with a @cursor
+__APIREL__/ref/am/cursor.html#2 database @cursors
+__APIREL__/ref/am/delete.html#2 @deleting records
+__APIREL__/ref/am/error.html#2 @error handling
+__APIREL__/ref/am/get.html#2 @retrieving records
+__APIREL__/ref/am/join.html#2 logical @join
+__APIREL__/ref/am/open.html#2 @opening a database
+__APIREL__/ref/am/partial.html#2 @partial record storage and retrieval
+__APIREL__/ref/am/put.html#2 @storing records
+__APIREL__/ref/am/stability.html#2 @cursor stability
+__APIREL__/ref/am/stability.html#3 cursor @stability
+__APIREL__/ref/am/stat.html#2 database @statistics
+__APIREL__/ref/am/sync.html#2 flushing the database @cache
+__APIREL__/ref/am/upgrade.html#2 @upgrading databases
+__APIREL__/ref/am/verify.html#2 database @verification
+__APIREL__/ref/am/verify.html#3 database @salvage
+__APIREL__/ref/am/verify.html#4 recovering @corrupted databases
+__APIREL__/ref/am_conf/bt_compare.html#2 specifying a Btree @comparison function
+__APIREL__/ref/am_conf/bt_recnum.html#2 retrieving Btree records by @number
+__APIREL__/ref/am_conf/byteorder.html#2 selecting a @byte order
+__APIREL__/ref/am_conf/cachesize.html#2 selecting a @cache size
+__APIREL__/ref/am_conf/dup.html#2 @duplicate data items
+__APIREL__/ref/am_conf/extentsize.html#2 selecting a Queue @extent size
+__APIREL__/ref/am_conf/h_ffactor.html#2 page @fill factor
+__APIREL__/ref/am_conf/h_hash.html#2 specifying a database @hash
+__APIREL__/ref/am_conf/h_nelem.html#2 @hash table size
+__APIREL__/ref/am_conf/intro.html#2 @access methods
+__APIREL__/ref/am_conf/logrec.html#2 logical @record numbers
+__APIREL__/ref/am_conf/pagesize.html#2 selecting a @page size
+__APIREL__/ref/am_conf/re_source.html#2 @text backing files
+__APIREL__/ref/am_conf/recno.html#2 managing @record-based databases
+__APIREL__/ref/am_conf/renumber.html#2 logically renumbering @records
+__APIREL__/ref/am_conf/select.html#2 selecting an @access method
+__APIREL__/ref/arch/apis.html#2 programmatic @APIs
+__APIREL__/ref/arch/utilities.html#2 @utilities
+__APIREL__/ref/build_unix/aix.html#2 @AIX
+__APIREL__/ref/build_unix/conf.html#2 @configuring Berkeley DB for UNIX systems
+__APIREL__/ref/build_unix/conf.html#3 configuring Berkeley DB for @UNIX systems
+__APIREL__/ref/build_unix/conf.html#4 configuring without large @file support
+__APIREL__/ref/build_unix/conf.html#--disable-bigfile Configuring Berkeley DB@--disable-bigfile
+__APIREL__/ref/build_unix/conf.html#5 configuring Berkeley DB @1.85 API compatibility
+__APIREL__/ref/build_unix/conf.html#--enable-compat185 Configuring Berkeley DB@--enable-compat185
+__APIREL__/ref/build_unix/conf.html#6 configuring the @C++ API
+__APIREL__/ref/build_unix/conf.html#--enable-cxx Configuring Berkeley DB@--enable-cxx
+__APIREL__/ref/build_unix/conf.html#--enable-debug Configuring Berkeley DB@--enable-debug
+__APIREL__/ref/build_unix/conf.html#--enable-debug_rop Configuring Berkeley DB@--enable-debug_rop
+__APIREL__/ref/build_unix/conf.html#--enable-debug_wop Configuring Berkeley DB@--enable-debug_wop
+__APIREL__/ref/build_unix/conf.html#--enable-diagnostic Configuring Berkeley DB@--enable-diagnostic
+__APIREL__/ref/build_unix/conf.html#7 building a utility to dump Berkeley DB @1.85 databases
+__APIREL__/ref/build_unix/conf.html#--enable-dump185 Configuring Berkeley DB@--enable-dump185
+__APIREL__/ref/build_unix/conf.html#8 configuring @shared libraries
+__APIREL__/ref/build_unix/conf.html#9 configuring @dynamic shared libraries
+__APIREL__/ref/build_unix/conf.html#--enable-dynamic Configuring Berkeley DB@--enable-dynamic
+__APIREL__/ref/build_unix/conf.html#10 configuring the @Java API
+__APIREL__/ref/build_unix/conf.html#--enable-java Configuring Berkeley DB@--enable-java
+__APIREL__/ref/build_unix/conf.html#--enable-posixmutexes Configuring Berkeley DB@--enable-posixmutexes
+__APIREL__/ref/build_unix/conf.html#11 configuring a @RPC client/server
+__APIREL__/ref/build_unix/conf.html#--enable-rpc Configuring Berkeley DB@--enable-rpc
+__APIREL__/ref/build_unix/conf.html#--enable-shared Configuring Berkeley DB@--enable-shared
+__APIREL__/ref/build_unix/conf.html#12 configuring the @Tcl API
+__APIREL__/ref/build_unix/conf.html#--enable-tcl Configuring Berkeley DB@--enable-tcl
+__APIREL__/ref/build_unix/conf.html#13 configuring the @test suite
+__APIREL__/ref/build_unix/conf.html#--enable-test Configuring Berkeley DB@--enable-test
+__APIREL__/ref/build_unix/conf.html#--enable-uimutexes Configuring Berkeley DB@--enable-uimutexes
+__APIREL__/ref/build_unix/conf.html#--enable-umrw Configuring Berkeley DB@--enable-umrw
+__APIREL__/ref/build_unix/conf.html#--with-tcl=DIR Configuring Berkeley DB@--with-tcl=DIR
+__APIREL__/ref/build_unix/flags.html#2 changing @compile or load options
+__APIREL__/ref/build_unix/flags.html#3 changing compile or @load options
+__APIREL__/ref/build_unix/freebsd.html#2 @FreeBSD
+__APIREL__/ref/build_unix/hpux.html#2 @HP-UX
+__APIREL__/ref/build_unix/install.html#2 @installing Berkeley DB for UNIX systems
+__APIREL__/ref/build_unix/intro.html#2 @building for UNIX
+__APIREL__/ref/build_unix/irix.html#2 @IRIX
+__APIREL__/ref/build_unix/linux.html#2 @Linux
+__APIREL__/ref/build_unix/notes.html#2 @building for UNIX FAQ
+__APIREL__/ref/build_unix/notes.html#3 building for @UNIX FAQ
+__APIREL__/ref/build_unix/osf1.html#2 @OSF/1
+__APIREL__/ref/build_unix/qnx.html#2 @QNX
+__APIREL__/ref/build_unix/sco.html#2 @SCO
+__APIREL__/ref/build_unix/shlib.html#2 @shared libraries
+__APIREL__/ref/build_unix/solaris.html#2 @Solaris
+__APIREL__/ref/build_unix/sunos.html#2 @SunOS
+__APIREL__/ref/build_unix/test.html#2 running the @test suite under UNIX
+__APIREL__/ref/build_unix/ultrix.html#2 @Ultrix
+__APIREL__/ref/build_vxworks/faq.html#2 @building for VxWorks FAQ
+__APIREL__/ref/build_vxworks/faq.html#3 building for @VxWorks FAQ
+__APIREL__/ref/build_vxworks/intro.html#2 @building for VxWorks
+__APIREL__/ref/build_vxworks/notes.html#2 @VxWorks notes
+__APIREL__/ref/build_win/faq.html#2 @building for Windows FAQ
+__APIREL__/ref/build_win/faq.html#3 building for @Windows FAQ
+__APIREL__/ref/build_win/intro.html#2 @building for Win32
+__APIREL__/ref/build_win/notes.html#2 @Windows notes
+__APIREL__/ref/build_win/test.html#2 running the @test suite under Windows
+__APIREL__/ref/build_win/test.html#3 running the test suite under @Windows
+__APIREL__/ref/cam/intro.html#2 @Concurrent Data Store
+__APIREL__/ref/debug/common.html#2 @debugging applications
+__APIREL__/ref/distrib/layout.html#2 @source code layout
+__APIREL__/ref/dumpload/text.html#2 loading @text into databases
+__APIREL__/ref/dumpload/utility.html#2 dumping/loading @text to/from databases
+__APIREL__/ref/env/create.html#2 database @environment
+__APIREL__/ref/env/naming.html#2 file @naming
+__APIREL__/ref/env/naming.html#db_home File naming@db_home
+__APIREL__/ref/env/naming.html#DB_HOME File naming@DB_HOME
+__APIREL__/ref/env/naming.html#DB_CONFIG File naming@DB_CONFIG
+__APIREL__/ref/env/remote.html#2 remote @filesystems
+__APIREL__/ref/env/security.html#2 @security
+__APIREL__/ref/intro/products.html#2 Sleepycat Software's Berkeley DB @products
+__APIREL__/ref/install/file.html#2 @/etc/magic
+__APIREL__/ref/install/file.html#3 @file utility
+__APIREL__/ref/java/compat.html#2 @Java compatibility
+__APIREL__/ref/java/conf.html#2 @Java configuration
+__APIREL__/ref/java/faq.html#2 Java @FAQ
+__APIREL__/ref/java/faq.html#3 @Java FAQ
+__APIREL__/ref/lock/am_conv.html#2 @locking conventions
+__APIREL__/ref/lock/cam_conv.html#2 Berkeley DB Concurrent Data Store @locking conventions
+__APIREL__/ref/lock/config.html#2 @locking configuration
+__APIREL__/ref/lock/dead.html#2 @deadlocks
+__APIREL__/ref/lock/intro.html#2 @locking introduction
+__APIREL__/ref/lock/max.html#2 sizing the @locking subsystem
+__APIREL__/ref/lock/nondb.html#2 @locking and non-Berkeley DB applications
+__APIREL__/ref/lock/notxn.html#2 @locking without transactions
+__APIREL__/ref/lock/page.html#2 page-level @locking
+__APIREL__/ref/lock/stdmode.html#2 standard @lock modes
+__APIREL__/ref/lock/twopl.html#2 two-phase @locking
+__APIREL__/ref/log/config.html#2 @logging configuration
+__APIREL__/ref/log/intro.html#2 @logging introduction
+__APIREL__/ref/log/limits.html#2 @log file limits
+__APIREL__/ref/mp/config.html#2 @memory pool configuration
+__APIREL__/ref/perl/intro.html#2 @Perl
+__APIREL__/ref/program/appsignals.html#2 application @signal handling
+__APIREL__/ref/program/byteorder.html#2 @byte ordering
+__APIREL__/ref/program/byteorder.html#3 byte @endian
+__APIREL__/ref/program/compatible.html#2 @interface compatibility
+__APIREL__/ref/program/dbsizes.html#2 database @limits
+__APIREL__/ref/program/diskspace.html#2 @disk space requirements
+__APIREL__/ref/program/environ.html#2 @environment variables
+__APIREL__/ref/program/errorret.html#2 @error returns
+__APIREL__/ref/program/errorret.html#3 @error name space
+__APIREL__/ref/program/errorret.html#DB_NOTFOUND Error returns to applications@DB_NOTFOUND
+__APIREL__/ref/program/errorret.html#DB_KEYEMPTY Error returns to applications@DB_KEYEMPTY
+__APIREL__/ref/program/errorret.html#DB_LOCK_DEADLOCK Error returns to applications@DB_LOCK_DEADLOCK
+__APIREL__/ref/program/errorret.html#DB_LOCK_NOTGRANTED Error returns to applications@DB_LOCK_NOTGRANTED
+__APIREL__/ref/program/errorret.html#DB_RUNRECOVERY Error returns to applications@DB_RUNRECOVERY
+__APIREL__/ref/program/mt.html#2 building @threaded applications
+__APIREL__/ref/program/namespace.html#2 Berkeley DB library @name spaces
+__APIREL__/ref/program/scope.html#2 Berkeley DB handle @scope
+__APIREL__/ref/program/scope.html#3 Berkeley DB @free-threaded handles
+__APIREL__/ref/rpc/client.html#2 @RPC client
+__APIREL__/ref/rpc/server.html#2 @RPC server
+__APIREL__/ref/sendmail/intro.html#2 @Sendmail
+__APIREL__/ref/tcl/intro.html#2 loading Berkeley DB with @Tcl
+__APIREL__/ref/tcl/faq.html#2 Tcl @FAQ
+__APIREL__/ref/tcl/faq.html#3 @Tcl FAQ
+__APIREL__/ref/tcl/program.html#2 @Tcl API programming notes
+__APIREL__/ref/tcl/using.html#2 using Berkeley DB with @Tcl
+__APIREL__/ref/test/run.html#2 running the @test suite
+__APIREL__/ref/transapp/admin.html#2 administering @transaction protected applications
+__APIREL__/ref/transapp/archival.html#2 archival in @transaction protected applications
+__APIREL__/ref/transapp/archival.html#3 @catastrophic recovery
+__APIREL__/ref/transapp/checkpoint.html#2 checkpoints in @transaction protected applications
+__APIREL__/ref/transapp/deadlock.html#2 deadlock detection in @transaction protected applications
+__APIREL__/ref/transapp/filesys.html#2 recovery and @filesystem operations
+__APIREL__/ref/transapp/intro.html#2 @Transactional Data Store
+__APIREL__/ref/transapp/logfile.html#2 @log file removal
+__APIREL__/ref/transapp/reclimit.html#2 Berkeley DB @recoverability
+__APIREL__/ref/transapp/recovery.html#2 recovery in @transaction protected applications
+__APIREL__/ref/transapp/throughput.html#2 @transaction throughput
+__APIREL__/ref/txn/config.html#2 @transaction configuration
+__APIREL__/ref/txn/intro.html#2 Berkeley DB and @transactions
+__APIREL__/ref/txn/limits.html#2 @transaction limits
+__APIREL__/ref/txn/nested.html#2 nested @transactions
+__APIREL__/ref/upgrade.2.0/intro.html#2 Upgrading to release @2.0
+__APIREL__/ref/upgrade.3.0/intro.html#2 Upgrading to release @3.0
+__APIREL__/ref/upgrade.3.1/intro.html#2 Upgrading to release @3.1
+__APIREL__/ref/upgrade.3.2/intro.html#2 Upgrading to release @3.2
+__APIREL__/ref/xa/config.html#2 configuring Berkeley DB with the @Tuxedo System
+__APIREL__/ref/xa/intro.html#2 @XA Resource Manager
+__APIREL__/utility/berkeley_db_svc.html#2 @berkeley_db_svc
+__APIREL__/utility/berkeley_db_svc.html#3 utility to support @RPC client/server
+__APIREL__/utility/db_archive.html#2 @db_archive
+__APIREL__/utility/db_archive.html#3 utility to @archive log files
+__APIREL__/utility/db_checkpoint.html#2 @db_checkpoint
+__APIREL__/utility/db_checkpoint.html#3 utility to take @checkpoints
+__APIREL__/utility/db_deadlock.html#2 @db_deadlock
+__APIREL__/utility/db_deadlock.html#3 utility to detect @deadlocks
+__APIREL__/utility/db_dump.html#2 @db_dump
+__APIREL__/utility/db_dump.html#3 utility to @dump databases as text files
+__APIREL__/utility/db_load.html#2 @db_load
+__APIREL__/utility/db_load.html#3 utility to @load text files into databases
+__APIREL__/utility/db_printlog.html#2 @db_printlog
+__APIREL__/utility/db_printlog.html#3 utility to display @log files as text
+__APIREL__/utility/db_recover.html#2 @db_recover
+__APIREL__/utility/db_recover.html#3 utility to @recover database environments
+__APIREL__/utility/db_stat.html#2 @db_stat
+__APIREL__/utility/db_stat.html#3 utility to display database and environment @statistics
+__APIREL__/utility/db_upgrade.html#2 @db_upgrade
+__APIREL__/utility/db_upgrade.html#3 utility to upgrade @database files
+__APIREL__/utility/db_upgrade.html#4 utility to @upgrade database files
+__APIREL__/utility/db_verify.html#2 @db_verify
+__APIREL__/utility/db_verify.html#3 utility to verify @database files
+__APIREL__/utility/db_verify.html#4 utility to @verify database files
diff --git a/bdb/docs/ref/program/appsignals.html b/bdb/docs/ref/program/appsignals.html
new file mode 100644
index 00000000000..2b1d99bd6f3
--- /dev/null
+++ b/bdb/docs/ref/program/appsignals.html
@@ -0,0 +1,35 @@
+<!--$Id: appsignals.so,v 10.25 2000/07/15 15:49:07 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Application signal handling</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Programmer Notes</dl></h3></td>
+<td width="1%"><a href="../../ref/xa/faq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/errorret.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Application signal handling</h1>
+<p>When applications using Berkeley DB receive signals, it is important that they
+exit gracefully, discarding any Berkeley DB locks that they may hold. This is
+normally done by setting a flag when a signal arrives, and then checking
+for that flag periodically within the application. As Berkeley DB is not
+reentrant, the signal handler should not attempt to release locks and/or
+close the database handles itself. Reentering Berkeley DB is not guaranteed to
+work correctly and the results are undefined.
+<p>If an application exits holding a lock, the situation is no different
+than if the application crashed, and all applications participating in
+the database environment must be shutdown, and then recovery must be
+performed. If this is not done, databases may be left in an
+inconsistent state or locks the application held may cause unresolvable
+deadlocks inside the environment, causing applications to hang.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/xa/faq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/errorret.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/program/byteorder.html b/bdb/docs/ref/program/byteorder.html
new file mode 100644
index 00000000000..6569ba88b27
--- /dev/null
+++ b/bdb/docs/ref/program/byteorder.html
@@ -0,0 +1,31 @@
+<!--$Id: byteorder.so,v 10.20 2000/03/18 21:43:15 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Byte ordering</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a> <a name="3"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Programmer Notes</dl></h3></td>
+<td width="1%"><a href="../../ref/program/dbsizes.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/diskspace.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Byte ordering</h1>
+<p>The database files created by Berkeley DB can be created in either little or
+big-endian formats. By default, the native format of the machine on which
+the database is created will be used. Any format database can be used on
+a machine with a different native format, although it is possible that
+the application will incur a performance penalty for the run-time
+conversion.
+<p>No user-specified data is converted in any way at all. Key or data items
+stored on machines of one format will be returned to the application
+exactly as stored on machines of another format.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/program/dbsizes.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/diskspace.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/program/compatible.html b/bdb/docs/ref/program/compatible.html
new file mode 100644
index 00000000000..72db97a5c36
--- /dev/null
+++ b/bdb/docs/ref/program/compatible.html
@@ -0,0 +1,32 @@
+<!--$Id: compatible.so,v 10.29 2000/07/25 16:31:19 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Compatibility with historic interfaces</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Programmer Notes</dl></h3></td>
+<td width="1%"><a href="../../ref/program/diskspace.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/recimp.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Compatibility with historic interfaces</h1>
+<p>The Berkeley DB version 2 library provides backward compatible interfaces for
+the historic UNIX <a href="../../api_c/dbm.html">dbm</a>, <a href="../../api_c/dbm.html">ndbm</a> and <a href="../../api_c/hsearch.html">hsearch</a>
+interfaces. It also provides a backward compatible interface for the
+historic Berkeley DB 1.85 release.
+<p>Berkeley DB version 2 does not provide database compatibility for any of the
+above interfaces, and existing databases must be converted manually. To
+convert existing databases from the Berkeley DB 1.85 format to the Berkeley DB version
+2 format, review the <a href="../../utility/db_dump.html">db_dump185</a> and <a href="../../utility/db_load.html">db_load</a> information.
+No utilities are provided to convert UNIX <a href="../../api_c/dbm.html">dbm</a>, <a href="../../api_c/dbm.html">ndbm</a> or
+<a href="../../api_c/hsearch.html">hsearch</a> databases.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/program/diskspace.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/recimp.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/program/copy.html b/bdb/docs/ref/program/copy.html
new file mode 100644
index 00000000000..80b6f942a78
--- /dev/null
+++ b/bdb/docs/ref/program/copy.html
@@ -0,0 +1,63 @@
+<!--$Id: copy.so,v 10.4 2000/03/18 21:43:15 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Copying databases</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Programmer Notes</dl></h3></td>
+<td width="1%"><a href="../../ref/program/namespace.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/version.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Copying databases</h1>
+<p>Because file identification cookies (e.g., file names, device and inode
+numbers, volume and file IDs, etc.) are not necessarily unique or
+maintained across system reboots, each Berkeley DB database file contains a
+20-byte file identification bytestring that is stored in the first page
+of the database at a page byte offset of 36 bytes. When multiple
+processes or threads open the same database file in Berkeley DB, it is this
+bytestring that is used to ensure that the same underlying pages are
+updated in the shared memory buffer pool no matter which Berkeley DB handle is
+used for the operation.
+<p>It is usually a bad idea to physically copy a database to a new name. In
+the few cases where copying is the best solution for your application,
+you must guarantee there are never two different databases with the same
+file identification bytestring in the memory pool at the same time.
+Copying databases is further complicated by the fact that the shared
+memory buffer pool does not discard all cached copies of pages for a
+database when the database is logically closed, that is, when
+<a href="../../api_c/db_close.html">DB-&gt;close</a> is called. Nor is there a Berkeley DB interface to explicitly
+discard pages from the shared memory buffer pool for any particular
+database.
+<p>Before copying a database, you must ensure that all modified pages have
+been written from the memory pool cache to the backing database file.
+This is done using the <a href="../../api_c/db_sync.html">DB-&gt;sync</a> or <a href="../../api_c/db_close.html">DB-&gt;close</a> interfaces.
+<p>Before using a copy of a database from Berkeley DB, you must ensure that all
+pages from any database with the same bytestring have been removed from
+the memory pool cache. If the environment in which you intend to open
+the copy of the database potentially has pages from files with identical
+bytestrings to the copied database (which is likely to be the case), there
+are a few possible solutions:
+<p><ol>
+<p><li>Remove the environment, either explicitly or by calling <a href="../../api_c/env_remove.html">DBENV-&gt;remove</a>.
+Note, this will not allow you to access both the original and copy of the
+database at the same time.
+<p><li>Overwrite the bytestring in the copied database with a new bytestring.
+This allows you to access both the original and copy of the database at
+the same time.
+<p><li>Create a new file that will have a new bytestring. The simplest
+way to create a new file that will have a new bytestring is to call the
+<a href="../../utility/db_dump.html">db_dump</a> utility to dump out the contents of the database, and then
+use the <a href="../../utility/db_load.html">db_load</a> utility to load the dumped output into a new file
+name. This allows you to access both the original and copy of the
+database at the same time.
+</ol>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/program/namespace.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/version.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/program/dbsizes.html b/bdb/docs/ref/program/dbsizes.html
new file mode 100644
index 00000000000..69b45868d71
--- /dev/null
+++ b/bdb/docs/ref/program/dbsizes.html
@@ -0,0 +1,45 @@
+<!--$Id: dbsizes.so,v 10.22 2000/03/18 21:43:16 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Database limits</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Programmer Notes</dl></h3></td>
+<td width="1%"><a href="../../ref/program/version.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/byteorder.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Database limits</h1>
+<p>The largest database file that Berkeley DB can handle depends on the page size
+selected by the application. Berkeley DB stores database file page numbers as
+unsigned 32-bit numbers and database file page sizes as unsigned 16-bit
+numbers. Using the maximum database page size of 65536, this results in
+a maximum database file size of 2<sup>48</sup> (256 terabytes). The
+minimum database page size is 512 bytes, which results in a minimum
+maximum database size of 2<sup>41</sup> (2 terabytes).
+<p>The largest database file Berkeley DB can support is potentially further limited
+if the host system does not have filesystem support for files larger than
+2<sup>32</sup>, including the ability to seek to absolute offsets within
+those files.
+<p>The largest key or data item that Berkeley DB can support is largely limited
+by available memory. Specifically, while key and data byte strings may
+be of essentially unlimited length, any one of them must fit into
+available memory so that it can be returned to the application. As some
+of the Berkeley DB interfaces return both key and data items to the application,
+those interfaces will require that any key/data pair fit simultaneously
+into memory. Further, as the access methods may need to compare key and
+data items with other key and data items, it may be a requirement that
+any two key or two data items fit into available memory. Finally, when
+writing applications supporting transactions, it may be necessary to have
+an additional copy of any data item in memory for logging purposes.
+<p>The maximum Btree depth is 255.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/program/version.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/byteorder.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/program/diskspace.html b/bdb/docs/ref/program/diskspace.html
new file mode 100644
index 00000000000..fb8425d8a26
--- /dev/null
+++ b/bdb/docs/ref/program/diskspace.html
@@ -0,0 +1,145 @@
+<!--$Id: diskspace.so,v 10.9 2000/03/22 21:56:11 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Disk space requirements</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Programmer Notes</dl></h3></td>
+<td width="1%"><a href="../../ref/program/byteorder.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/compatible.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Disk space requirements</h1>
+<p>It is possible to estimate the total database size based on the size of
+the data. Simply put, the following calculations attempt to figure out
+how many bytes you will need to hold a set of data and then how many pages
+it will take to actually store it on disk.
+<p>Space freed by deleting key/data pairs from a Btree or Hash database is
+never returned to the filesystem, although it is reused where possible.
+This means that the Btree and Hash databases are grow-only. If enough
+keys are deleted from a database that shrinking the underlying file is
+desirable, you should create a new database and insert the records from
+the old one into it.
+<p>These are rough estimates at best. For example, they do not take into
+account overflow records, filesystem metadata information, or real-life
+situations where the sizes of key and data items are wildly variable, and
+the page-fill factor changes over time.
+<h3>Btree</h3>
+<p>The formulas for the Btree access method are as follows:
+<p><blockquote><pre>useful-bytes-per-page = (page-size - page-overhead) * page-fill-factor
+<p>
+bytes-of-data = n-records *
+ (bytes-per-entry + page-overhead-for-two-entries)
+<p>
+n-pages-of-data = bytes-of-data / bytes-per-page
+<p>
+total-pages-on-disk = n-pages-of-data * page-size
+</pre></blockquote>
+<p>The <b>useful-bytes-per-page</b> is a measure of the bytes on each page
+that will actually hold the application data. It is computed as the total
+number of bytes on the page that are available to hold application data,
+corrected by the percentage of the page that is likely to contain data.
+The reason for this correction is that the percentage of a page that
+contains application data can vary from close to 50% after a page split,
+to almost 100% if the entries in the database were inserted in sorted
+order. Obviously, the <b>page-fill-factor</b> can drastically alter
+the amount of disk space required to hold any particular data set. The
+page-fill factor of any existing database can be displayed using the
+<a href="../../utility/db_stat.html">db_stat</a> utility.
+<p>As an example, using an 8K page size, with an 85% page-fill factor, there
+are 6941 bytes of useful space on each page:
+<p><blockquote><pre>6941 = (8192 - 26) * .85</pre></blockquote>
+<p>The total <b>bytes-of-data</b> is an easy calculation: it is the number
+of key/data pairs plus the overhead required to store each pair on a page.
+The overhead to store a single item on a Btree page is 5 bytes. So,
+assuming 60,000,000 key/data pairs, each of which is 8 bytes long, there
+are 1440000000 bytes, or roughly 1.34GB, of total data:
+<p><blockquote><pre>1560000000 = 60000000 * ((8 * 2) + (5 * 2))</pre></blockquote>
+<p>The total pages of data, <b>n-pages-of-data</b>, is the
+<b>bytes-of-data</b> divided by the <b>useful-bytes-per-page</b>. In
+the example, there are 224751 pages of data.
+<p><blockquote><pre>224751 = 1560000000 / 6941</pre></blockquote>
+<p>The total bytes of disk space for the database is <b>n-pages-of-data</b>
+multiplied by the <b>page-size</b>. In the example, the result is
+1841160192 bytes, or roughly 1.71GB.
+<p><blockquote><pre>1841160192 = 224751 * 8192</pre></blockquote>
+<h3>Hash</h3>
+<p>The formulas for the Hash access method are as follows:
+<p><blockquote><pre>useful-bytes-per-page = (page-size - page-overhead)
+<p>
+bytes-of-data = n-records *
+ (bytes-per-entry + page-overhead-for-two-entries)
+<p>
+n-pages-of-data = bytes-of-data / bytes-per-page
+<p>
+total-pages-on-disk = n-pages-of-data * page-size
+</pre></blockquote>
+<p>The <b>useful-bytes-per-page</b> is a measure of the bytes on each page
+that will actually hold the application data. It is computed as the total
+number of bytes on the page that are available to hold application data.
+If the application has explicitly set a page fill factor, then pages will
+not necessarily be kept full. For databases with a preset fill factor,
+see the calculation below. The page-overhead for Hash databases is 26
+bytes and the page-overhead-for-two-entries is 6 bytes.
+<p>As an example, using an 8K page size, there are 8166 bytes of useful space
+on each page:
+<p><blockquote><pre>8166 = (8192 - 26)</pre></blockquote>
+<p>The total <b>bytes-of-data</b> is an easy calculation: it is the number
+of key/data pairs plus the overhead required to store each pair on a page.
+In this case that's 6 bytes per pair. So, assuming 60,000,000 key/data
+pairs, each of which is 8 bytes long, there are 1320000000 bytes, or
+roughly 1.23GB, of total data:
+<p><blockquote><pre>1320000000 = 60000000 * ((16 + 6))</pre></blockquote>
+<p>The total pages of data, <b>n-pages-of-data</b>, is the
+<b>bytes-of-data</b> divided by the <b>useful-bytes-per-page</b>. In
+this example, there are 161646 pages of data.
+<p><blockquote><pre>161646 = 1320000000 / 8166</pre></blockquote>
+<p>The total bytes of disk space for the database is <b>n-pages-of-data</b>
+multiplied by the <b>page-size</b>. In the example, the result is
+1324204032 bytes, or roughly 1.23GB.
+<p><blockquote><pre>1324204032 = 161646 * 8192</pre></blockquote>
+<p>Now, let's assume that the application specified a fill factor explicitly.
+The fill factor indicates the target number of items to place on a single
+page (a fill factor might reduce the utilization of each page, but it can
+be useful in avoiding splits and preventing buckets from becoming too
+large. Using our estimates above, each item is 22 bytes (16 + 6) and
+there are 8166 useful bytes on a page (8192 - 26). That means that, on
+average, you can fit 371 pairs per page.
+<p><blockquote><pre>371 = 8166 / 22</pre></blockquote>
+<p>However, let's assume that the application designer knows that while most
+items are 8 bytes, they can sometimes be as large as 10 and it's very
+important to avoid overflowing buckets and splitting. Then, the
+application might specify a fill factor of 314.
+<p><blockquote><pre>314 = 8166 / 26</pre></blockquote>
+<p>With a fill factor of 314, then the formula for computing database size
+is:
+<p><blockquote><pre>npages = npairs / pairs-per-page</pre></blockquote>
+<p>or 191082.
+<p><blockquote><pre>191082 = 60000000 / 314</pre></blockquote>
+<p>At 191082 pages, the total database size would be 1565343744 or 1.46GB.
+<p><blockquote><pre>1565343744 = 191082 * 8192 </pre></blockquote>
+<p>There are a few additional caveats with respect to Hash databases. This
+discussion assumes that the hash function does a good job of evenly
+distributing keys among hash buckets. If the function does not do this,
+you may find your table growing significantly larger than you expected.
+Secondly, in order to provide support for Hash databases co-existing with
+other databases in a single file, pages within a Hash database are
+allocated in power-of-2 chunks. That means that a Hash database with 65
+buckets will take up as much space as a Hash database with 128 buckets;
+each time the Hash database grows beyond its current power-of-two number
+of buckets, it allocates space for the next power-of-two buckets. This
+space may be sparsely allocated in the file system, but the files will
+appear to be their full size. Finally, because of this need for
+contiguous allocation, overflow pages and duplicate pages can be allocated
+only at specific points in the file, and this too can lead to sparse hash
+tables.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/program/byteorder.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/compatible.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/program/environ.html b/bdb/docs/ref/program/environ.html
new file mode 100644
index 00000000000..7f56109b5d7
--- /dev/null
+++ b/bdb/docs/ref/program/environ.html
@@ -0,0 +1,33 @@
+<!--$Id: environ.so,v 10.17 2000/03/18 21:43:16 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Environment variables</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Programmer Notes</dl></h3></td>
+<td width="1%"><a href="../../ref/program/errorret.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/mt.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Environment variables</h1>
+<p>The Berkeley DB library uses the following environment variables:
+<p><dl compact>
+<p><dt>DB_HOME<dd>If the environment variable DB_HOME is set, it is used as part of
+<a href="../../ref/env/naming.html">File Naming</a>.
+Note, for the DB_HOME variable to take effect, either the
+<a href="../../api_c/env_open.html#DB_USE_ENVIRON">DB_USE_ENVIRON</a> or <a href="../../api_c/env_open.html#DB_USE_ENVIRON_ROOT">DB_USE_ENVIRON_ROOT</a> flags must be
+specified to <a href="../../api_c/env_open.html">DBENV-&gt;open</a>.
+<p><dt>TMPDIR, TEMP, TMP, TempFolder<dd>The TMPDIR, TEMP, TMP and TempFolder environment variables are all
+checked as locations in which to create temporary files. See
+<a href="../../api_c/env_set_tmp_dir.html">DBENV-&gt;set_tmp_dir</a> for more information.
+</dl>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/program/errorret.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/mt.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/program/errorret.html b/bdb/docs/ref/program/errorret.html
new file mode 100644
index 00000000000..fc6ad650d3e
--- /dev/null
+++ b/bdb/docs/ref/program/errorret.html
@@ -0,0 +1,108 @@
+<!--$Id: errorret.so,v 10.34 2000/12/31 19:26:21 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Error returns to applications</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Programmer Notes</dl></h3></td>
+<td width="1%"><a href="../../ref/program/appsignals.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/environ.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Error returns to applications</h1>
+<p>Except for the historic <a href="../../api_c/dbm.html">dbm</a>, <a href="../../api_c/dbm.html">ndbm</a> and <a href="../../api_c/hsearch.html">hsearch</a>
+interfaces, Berkeley DB does not use the global variable <b>errno</b> to
+return error values. The return values for all Berkeley DB functions are
+grouped into three categories:
+<p><dl compact>
+<p><dt>0<dd>A return value of 0 indicates that the operation was successful.
+<p><dt>&gt; 0<dd>A return value that is greater than 0 indicates that there was a system
+error. The <b>errno</b> value returned by the system is returned by
+the function, e.g., when a Berkeley DB function is unable to allocate memory,
+the return value from the function will be ENOMEM.
+<p><dt>&lt; 0<dd>A return value that is less than 0 indicates a condition that was not
+a system failure, but was not an unqualified success, either. For
+example, a routine to retrieve a key/data pair from the database may
+return DB_NOTFOUND when the key/data pair does not appear in
+the database, as opposed to the value of 0, which would be returned if
+the key/data pair were found in the database.
+<p> <a name="3"><!--meow--></a>
+All values returned by Berkeley DB functions are less than 0 in order to avoid
+conflict with possible values of <b>errno</b>. Specifically, Berkeley DB
+reserves all values from -30,800 to -30,999 to itself as possible error
+values. There are a few Berkeley DB interfaces where it is possible for an
+application function to be called by a Berkeley DB function and subsequently
+fail with an application-specific return. Such failure returns will be
+passed back to the function that originally called a Berkeley DB interface.
+To avoid ambiguity as to the cause of the error, error values separate
+from the Berkeley DB error name space should be used.
+</dl>
+While possible error returns are specified by each individual function's
+manual page, there are a few error returns that deserve special mention:
+<h3><a name="DB_NOTFOUND">DB_NOTFOUND</a> and <a name="DB_KEYEMPTY">DB_KEYEMPTY</a></h3>
+<p>There are two special return values that are similar in meaning, and that
+are returned in similar situations, and therefore might be confused:
+DB_NOTFOUND and DB_KEYEMPTY.
+<p>The DB_NOTFOUND error return indicates that the requested key/data
+pair did not exist in the database or that start- or end-of-file has been
+reached.
+<p>The DB_KEYEMPTY error return indicates that the requested key/data
+pair logically exists but was never explicitly created by the application
+(the Recno and Queue access methods will automatically create key/data
+pairs under some circumstances; see <a href="../../api_c/db_open.html">DB-&gt;open</a> for more
+information), or that the requested key/data pair was deleted and never
+re-created. In addition, the Queue access method will return
+DB_KEYEMPTY for records which were created as part of a
+transaction which was later aborted, and never re-created.
+<h3><a name="DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a></h3>
+<p>When multiple threads of control are modifying the database, there is
+normally the potential for deadlock. In Berkeley DB, deadlock is signified by
+an error return from the Berkeley DB function of the value
+DB_LOCK_DEADLOCK. Whenever a Berkeley DB function returns
+DB_LOCK_DEADLOCK, the enclosing transaction should be aborted.
+<p>Any Berkeley DB function that attempts to acquire locks can potentially return
+DB_LOCK_DEADLOCK. Practically speaking, the safest way to deal
+with applications that can deadlock is to handle an
+DB_LOCK_DEADLOCK return from any Berkeley DB access method call.
+<h3><a name="DB_LOCK_NOTGRANTED">DB_LOCK_NOTGRANTED</a></h3>
+<p>When multiple threads of control are modifying the database, there is
+normally the potential for deadlock. In order to avoid deadlock,
+applications may specify, on a per-transaction basis, that if a lock is
+unavailable, the Berkeley DB operation should return immediately instead of
+waiting on the lock. The error return in this case will be
+DB_LOCK_NOTGRANTED. Whenever a Berkeley DB function returns
+DB_LOCK_NOTGRANTED, the enclosing transaction should be aborted.
+<h3><a name="DB_RUNRECOVERY">DB_RUNRECOVERY</a></h3>
+<p>There exists a class of errors that Berkeley DB considers fatal to an entire
+Berkeley DB environment. An example of this type of error is a corrupted
+database, or a log write failure because the disk is out of free space.
+The only way to recover from these failures is to have all threads of
+control exit the Berkeley DB environment, run recovery of the environment, and
+re-enter Berkeley DB. (It is not strictly necessary that the processes exit,
+although that is the only way to recover system resources, such as file
+descriptors and memory, allocated by Berkeley DB.)
+<p>When this type of error is encountered, the error value
+DB_RUNRECOVERY is returned. This error can be returned by any
+Berkeley DB interface. Once DB_RUNRECOVERY is returned by any
+interface, it will be returned from all subsequent Berkeley DB calls made by
+any threads or processes participating in the environment.
+<p>Optionally, applications may also specify a fatal-error callback function
+using the <a href="../../api_c/env_set_paniccall.html">DBENV-&gt;set_paniccall</a> function. This callback function will be
+called with two arguments: a reference to the DB_ENV structure associated
+with the environment, and the <b>errno</b> value associated with the
+underlying error that caused the problem.
+<p>Applications can handle such fatal errors in one of two ways: by checking
+for DB_RUNRECOVERY as part of their normal Berkeley DB error return
+checking, similarly to DB_LOCK_DEADLOCK or any other error, or,
+in applications that have no cleanup processing of their own, by simply
+exiting the application when the callback function is called.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/program/appsignals.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/environ.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/program/extending.html b/bdb/docs/ref/program/extending.html
new file mode 100644
index 00000000000..6f276d8dca5
--- /dev/null
+++ b/bdb/docs/ref/program/extending.html
@@ -0,0 +1,242 @@
+<!--$Id: extending.so,v 10.32 2000/07/25 16:31:19 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Application-specific logging and recovery</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Programmer Notes</dl></h3></td>
+<td width="1%"><a href="../../ref/program/recimp.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/runtime.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Application-specific logging and recovery</h1>
+<p>Berkeley DB includes tools to assist in the development of application-specific
+logging and recovery. Specifically, given a description of the
+information to be logged, these tools will automatically create logging
+functions (functions that take the values as parameters and construct a
+single record that is written to the log), read functions (functions that
+read a log record and unmarshall the values into a structure that maps
+onto the values you chose to log), a print function (for debugging),
+templates for the recovery functions, and automatic dispatching to your
+recovery functions.
+<h3>Defining Application-Specific Operations</h3>
+<p>Log records are described in files named XXX.src, where "XXX" is a
+unique prefix. The prefixes currently used in the Berkeley DB package are
+btree, crdel, db, hash, log, qam, and txn. These files contain interface
+definition language descriptions for each type of log record that
+is supported.
+<p>All lines beginning with a hash character in <b>.src</b> files are
+treated as comments.
+<p>The first non-comment line in the file should begin with the keyword
+PREFIX followed by a string that will be prepended to every function.
+Frequently, the PREFIX is either identical or similar to the name of the
+<b>.src</b> file.
+<p>The rest of the file consists of one or more log record descriptions.
+Each log record description begins with the line:
+<p><blockquote><pre>BEGIN RECORD_NAME RECORD_NUMBER</pre></blockquote>
+<p>and ends with the line:
+<p><blockquote><pre>END</pre></blockquote>
+<p>The RECORD_NAME keyword should be replaced with a unique record name for
+this log record. Record names must only be unique within <b>.src</b>
+files.
+<p>The RECORD_NUMBER keyword should be replaced with a record number. Record
+numbers must be unique for an entire application, that is, both
+application-specific and Berkeley DB log records must have unique values.
+Further, as record numbers are stored in log files, which often must be
+portable across application releases, no record number should ever be
+re-used. The record number space below 10,000 is reserved for Berkeley DB
+itself, applications should choose record number values equal to or
+greater than 10,000.
+<p>Between the BEGIN and END statements, there should be one line for each
+data item that will be logged in this log record. The format of these
+lines is as follows:
+<p><blockquote><pre>ARG | DBT | POINTER variable_name variable_type printf_format</pre></blockquote>
+<p>The keyword ARG indicates that the argument is a simple parameter of the
+type specified. The keyword DBT indicates that the argument is a DBT
+containing a length and pointer. The keyword PTR indicates that the
+argument is a pointer to the data type specified and that the entire type
+should be logged.
+<p>The variable name is the field name within the structure that will be used
+to reference this item. The variable type is the C type of the variable,
+and the printf format should be "s", for string, "d" for signed integral
+type, or "u" for unsigned integral type.
+<h3>Automatically Generated Functions</h3>
+<p>For each log record description found in the file, the following structure
+declarations and #defines will be created in the file PREFIX_auto.h.
+<p><blockquote><pre><p>
+#define DB_PREFIX_RECORD_TYPE /* Integer ID number */
+<p>
+typedef struct _PREFIX_RECORD_TYPE_args {
+ /*
+ * These three fields are generated for every record.
+ */
+ u_int32_t type; /* Record type used for dispatch. */
+<p>
+ /*
+ * Transaction id that identifies the transaction on whose
+ * behalf the record is being logged.
+ */
+ DB_TXN *txnid;
+<p>
+ /*
+ * The LSN returned by the previous call to log for
+ * this transaction.
+ */
+ DB_LSN *prev_lsn;
+<p>
+ /*
+ * The rest of the structure contains one field for each of
+ * the entries in the record statement.
+ */
+};</pre></blockquote>
+<p>The DB_PREFIX_RECORD_TYPE will be described in terms of a value
+DB_PREFIX_BEGIN, which should be specified by the application writer in
+terms of the library provided DB_user_BEGIN macro (this is the value of
+the first identifier available to users outside the access method system).
+<p>In addition to the PREFIX_auto.h file, a file named PREFIX_auto.c is
+created, containing the following functions for each record type:
+<p><dl compact>
+<p><dt>The log function, with the following parameters:<dd><p><dl compact>
+<p><dt>dbenv<dd>The environment handle returned by <a href="../../api_c/env_create.html">db_env_create</a>.
+<p><dt>txnid<dd>The transaction identifier returned by <a href="../../api_c/txn_begin.html">txn_begin</a>.
+<p><dt>lsnp<dd>A pointer to storage for an LSN into which the LSN of the new log record
+will be returned.
+<p><dt>syncflag<dd>A flag indicating if the record must be written synchronously. Valid
+values are 0 and <a href="../../api_c/log_put.html#DB_FLUSH">DB_FLUSH</a>.
+</dl>
+<p>The log function marshalls the parameters into a buffer and calls
+<a href="../../api_c/log_put.html">log_put</a> on that buffer returning 0 on success and 1 on failure.
+<p><dt>The read function with the following parameters:<dd>
+<p><dl compact>
+<p><dt>recbuf<dd>A buffer.
+<p><dt>argp<dd>A pointer to a structure of the appropriate type.
+</dl>
+<p>The read function takes a buffer and unmarshalls its contents into a
+structure of the appropriate type. It returns 0 on success and non-zero
+on error. After the fields of the structure have been used, the pointer
+returned from the read function should be freed.
+<p><dt>The recovery function with the following parameters:<dd><p><dl compact>
+<p><dt>dbenv<dd>The handle returned from the <a href="../../api_c/env_create.html">db_env_create</a> call which identifies
+the environment in which recovery is running.
+<p><dt>rec<dd>The <b>rec</b> parameter is the record being recovered.
+<p><dt>lsn<dd>The log sequence number of the record being recovered.
+<p><dt>op<dd>A parameter of type db_recops which indicates what operation is being run
+(DB_TXN_OPENFILES, DB_TXN_ABORT, DB_TXN_BACKWARD_ROLL, DB_TXN_FORWARD_ROLL).
+<p><dt>info<dd>A structure passed by the dispatch function. It is used to contain a list
+of committed transactions and information about files that may have been
+deleted.
+</dl>
+<p>The recovery function is called on each record read from the log during
+system recovery or transaction abort.
+<p>The recovery function is created in the file PREFIX_rtemp.c since it
+contains templates for recovery functions. The actual recovery functions
+must be written manually, but the templates usually provide a good starting
+point.
+<p><dt>The print function:<dd>The print function takes the same parameters as the recover function so
+that it is simple to dispatch both to simple print functions as well as
+to the actual recovery functions. This is useful for debugging purposes
+and is used by the <a href="../../utility/db_printlog.html">db_printlog</a> utility to produce a human-readable
+version of the log. All parameters except the <b>rec</b> and
+<b>lsnp</b> parameters are ignored. The <b>rec</b> parameter contains
+the record to be printed.
+</dl>
+One additional function, an initialization function,
+is created for each <b>.src</b> file.
+<p><dl compact>
+<p><dt>The initialization function has the following parameters:<dd><p><dl compact>
+<p><dt>dbenv<dd>The environment handle returned by <a href="../../api_c/env_create.html">db_env_create</a>.
+</dl>
+<p>The recovery initialization function registers each log record type
+declared with the recovery system, so that the appropriate function is
+called during recovery.
+</dl>
+<h3>Using Automatically Generated Routines</h3>
+<p>Applications use the automatically generated functions as follows:
+<p><ol>
+<p><li>When the application starts,
+call the <a href="../../api_c/env_set_rec_init.html">DBENV-&gt;set_recovery_init</a> with your recovery
+initialization function so that the initialization function is called
+at the appropriate time.
+<p><li>Issue a <a href="../../api_c/txn_begin.html">txn_begin</a> call before any operations you wish
+to be transaction protected.
+<p><li>Before accessing any data, issue the appropriate lock call to
+lock the data (either for reading or writing).
+<p><li>Before modifying any data that is transaction protected, issue
+a call to the appropriate log function.
+<p><li>Issue a <a href="../../api_c/txn_commit.html">txn_commit</a> to save all of the changes or a
+<a href="../../api_c/txn_abort.html">txn_abort</a> to cancel all of the modifications.
+</ol>
+<p>The recovery functions (described below) can be called in two cases:
+<p><ol>
+<p><li>From the recovery daemon upon system failure, with op set to
+DB_TXN_FORWARD_ROLL or DB_TXN_BACKWARD_ROLL.
+<p><li>From <a href="../../api_c/txn_abort.html">txn_abort</a>, if it is called to abort a transaction, with
+op set to DB_TXN_ABORT.
+</ol>
+<p>For each log record type you declare, you must write the appropriate
+function to undo and redo the modifications. The shell of these functions
+will be generated for you automatically, but you must fill in the details.
+<p>Your code should be able to detect whether the described modifications
+have been applied to the data or not. The function will be called with
+the "op" parameter set to DB_TXN_ABORT when a transaction that wrote the
+log record aborts and with DB_TXN_FORWARD_ROLL and DB_TXN_BACKWARD_ROLL
+during recovery. The actions for DB_TXN_ABORT and DB_TXN_BACKWARD_ROLL
+should generally be the same. For example, in the access methods, each
+page contains the log sequence number of the most recent log record that
+describes a modification to the page. When the access method changes a
+page it writes a log record describing the change and including the the
+LSN that was on the page before the change. This LSN is referred to as
+the previous LSN. The recovery functions read the page described by a
+log record and compare the log sequence number (LSN) on the page to the
+LSN they were passed. If the page LSN is less than the passed LSN and
+the operation is undo, no action is necessary (because the modifications
+have not been written to the page). If the page LSN is the same as the
+previous LSN and the operation is redo, then the actions described are
+reapplied to the page. If the page LSN is equal to the passed LSN and
+the operation is undo, the actions are removed from the page; if the page
+LSN is greater than the passed LSN and the operation is redo, no further
+action is necessary. If the action is a redo and the LSN on the page is
+less than the previous LSN in the log record this is an error, since this
+could only happen if some previous log record was not processed.
+<p>Please refer to the internal recovery functions in the Berkeley DB library
+(found in files named XXX_rec.c) for examples of how recovery functions
+should work.
+<h3>Non-conformant Logging</h3>
+<p>If your application cannot conform to the default logging and recovery
+structure, then you will have to create your own logging and recovery
+functions explicitly.
+<p>First, you must decide how you will dispatch your records. Encapsulate
+this algorithm in a dispatch function that is passed to <a href="../../api_c/env_open.html">DBENV-&gt;open</a>.
+The arguments for the dispatch function are as follows:
+<p><dl compact>
+<p><dt>dbenv<dd>The environment handle returned by <a href="../../api_c/env_create.html">db_env_create</a>.
+<p><dt>rec<dd>The record being recovered.
+<p><dt>lsn<dd>The log sequence number of the record to be recovered.
+<p><dt>op<dd>Indicates what operation of recovery is needed (openfiles, abort, forward roll
+or backward roll).
+<p><dt>info<dd>An opaque value passed to your function during system recovery.
+</dl>
+<p>When you abort a transaction, <a href="../../api_c/txn_abort.html">txn_abort</a> will read the last log
+record written for the aborting transaction and will then call your
+dispatch function. It will continue looping, calling the dispatch
+function on the record whose LSN appears in the lsn parameter of the
+dispatch call (until a NULL LSN is placed in that field). The dispatch
+function will be called with the op set to DB_TXN_ABORT.
+<p>Your dispatch function can do any processing necessary. See the code
+in db/db_dispatch.c for an example dispatch function (that is based on
+the assumption that the transaction ID, previous LSN, and record type
+appear in every log record written).
+<p>If you do not use the default recovery system, you will need to construct
+your own recovery process based on the recovery program provided in
+db_recover/db_recover.c. Note that your recovery functions will need to
+correctly process the log records produced by calls to <a href="../../api_c/txn_begin.html">txn_begin</a>
+and <a href="../../api_c/txn_commit.html">txn_commit</a>.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/program/recimp.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/runtime.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/program/mt.html b/bdb/docs/ref/program/mt.html
new file mode 100644
index 00000000000..31110920aa9
--- /dev/null
+++ b/bdb/docs/ref/program/mt.html
@@ -0,0 +1,95 @@
+<!--$Id: mt.so,v 10.37 2000/12/04 18:05:42 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Building multi-threaded applications</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Programmer Notes</dl></h3></td>
+<td width="1%"><a href="../../ref/program/environ.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/scope.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Building multi-threaded applications</h1>
+<p>The Berkeley DB library is not itself multi-threaded. The library was
+deliberately architected to not use threads internally because of the
+portability problems that using threads within the library would
+introduce.
+<p>Berkeley DB supports multi-threaded applications with the caveat that it loads
+and calls functions that are commonly available in C language environments.
+Other than this usage, Berkeley DB has no static data and maintains no local
+context between calls to Berkeley DB functions.
+<p>Environment and database object handles returned from Berkeley DB library
+functions are free-threaded. No other object handles returned from
+the Berkeley DB library are free-threaded.
+<p>The following rules should be observed when using threads to
+access the Berkeley DB library:
+<p><ol>
+<p><li>The <a href="../../api_c/env_open.html#DB_THREAD">DB_THREAD</a> flag must be specified to the <a href="../../api_c/env_open.html">DBENV-&gt;open</a>
+and <a href="../../api_c/db_open.html">DB-&gt;open</a> functions if the Berkeley DB handles returned by those interfaces
+will be used in the context of more than one thread. Setting the
+<a href="../../api_c/env_open.html#DB_THREAD">DB_THREAD</a> flag inconsistently may result in database corruption.
+<p>Threading is assumed in the Java API, so no special flags are required,
+and Berkeley DB functions will always behave as if the <a href="../../api_c/env_open.html#DB_THREAD">DB_THREAD</a> flag
+was specified.
+<p>Only a single thread may call the <a href="../../api_c/env_close.html">DBENV-&gt;close</a> or <a href="../../api_c/db_close.html">DB-&gt;close</a> functions
+for a returned environment or database handle.
+<p>No other Berkeley DB handles are free-threaded, for example, cursors and
+transactions may not span threads as their returned handles are not
+free-threaded.
+<p><li>When using the non-cursor Berkeley DB calls to retrieve key/data items (e.g.,
+<a href="../../api_c/db_get.html">DB-&gt;get</a>), the memory referenced by the pointer stored into the
+Dbt is only valid until the next call to Berkeley DB using the DB handle
+returned by <a href="../../api_c/db_open.html">DB-&gt;open</a>. This includes any use of the returned
+DB handle, including by another thread of control within the
+process.
+<p>For this reason, if the <a href="../../api_c/env_open.html#DB_THREAD">DB_THREAD</a> handle was specified to the
+<a href="../../api_c/db_open.html">DB-&gt;open</a> function, either <a href="../../api_c/dbt.html#DB_DBT_MALLOC">DB_DBT_MALLOC</a>, <a href="../../api_c/dbt.html#DB_DBT_REALLOC">DB_DBT_REALLOC</a>
+or <a href="../../api_c/dbt.html#DB_DBT_USERMEM">DB_DBT_USERMEM</a> must be specified in the <a href="../../api_c/dbt.html">DBT</a> when
+performing any non-cursor key or data retrieval.
+<p><li>The <a href="../../api_c/dbc_get.html#DB_CURRENT">DB_CURRENT</a>, <a href="../../api_c/dbc_get.html#DB_NEXT">DB_NEXT</a> and <a href="../../api_c/dbc_get.html#DB_PREV">DB_PREV</a> flags to the
+<a href="../../api_c/log_get.html">log_get</a> function may not be used by a free-threaded handle. If
+such calls are necessary, a thread should explicitly create a unique
+environment handle by separately calling <a href="../../api_c/env_open.html">DBENV-&gt;open</a> without
+specifying <a href="../../api_c/env_open.html#DB_THREAD">DB_THREAD</a>.
+<p><li>Each database operation (i.e., any call to a function underlying the
+handles returned by <a href="../../api_c/db_open.html">DB-&gt;open</a> and <a href="../../api_c/db_cursor.html">DB-&gt;cursor</a>) is normally
+performed on behalf of a unique locker. If, within a single thread of
+control, multiple calls on behalf of the same locker are desired, then
+transactions must be used. For example, consider the case where a
+cursor scan locates a record, and then based on that record, accesses
+some other item in the database. If these operations are done using
+the default lockers for the handle, they may conflict. If the
+application wishes to guarantee that the operations do not conflict,
+locks must be obtained on behalf of a transaction, instead of the
+default locker ID, and a transaction must be specified to subsequent
+<a href="../../api_c/db_cursor.html">DB-&gt;cursor</a> and other Berkeley DB calls.
+<p><li>Transactions may not span threads. Each transaction must begin and end
+in the same thread, and each transaction may only be used by a single
+thread.
+<p>Cursors may not span transactions or threads. Each cursor must be
+allocated and de-allocated within the same transaction and within
+the same thread.
+<p><li>User-level synchronization mutexes must have been implemented for the
+compiler/architecture combination. Attempting to specify the DB_THREAD
+flag will fail if fast mutexes are not available.
+<p>If blocking mutexes are available, for example POSIX pthreads, they will
+be used. Otherwise, the Berkeley DB library will make a system call to pause
+for some amount of time when it is necessary to wait on a lock. This may
+not be optimal, especially in a thread-only environment where it will be
+more efficient to explicitly yield the processor to another thread.
+<p>It is possible to specify a yield function on an per-application basis.
+See <a href="../../api_c/set_func_yield.html">db_env_set_func_yield</a> for more information.
+<p>It is possible to specify the number of attempts that will be made to
+acquire the mutex before waiting. Se <a href="../../api_c/env_set_tas_spins.html">db_env_set_tas_spins</a> for
+more information.
+</ol>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/program/environ.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/scope.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/program/namespace.html b/bdb/docs/ref/program/namespace.html
new file mode 100644
index 00000000000..519f5f61c74
--- /dev/null
+++ b/bdb/docs/ref/program/namespace.html
@@ -0,0 +1,44 @@
+<!--$Id: namespace.so,v 10.14 2000/08/01 21:51:23 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Name spaces</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Programmer Notes</dl></h3></td>
+<td width="1%"><a href="../../ref/program/scope.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/copy.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Name spaces</h1>
+<p>The Berkeley DB library is careful to avoid C language programmer name spaces,
+but there are a few potential areas for concern, mostly in the Berkeley DB
+include file db.h. The db.h include file defines a number of types and
+strings. Where possible, all of these types and strings are prefixed with
+"DB_" or "db_". There are a few notable exceptions.
+<p>The Berkeley DB library uses a macro named "__P" to configure for systems that
+do not provide ANSI C function prototypes. This could potentially collide
+with other systems using a "__P" macro for similar or different purposes.
+<p>The Berkeley DB library needs information about specifically sized types for
+each architecture. If they are not provided by the system, they are
+typedef'd in the db.h include file. The types which may be typedef'd
+by db.h include the following: u_int8_t, int16_t, u_int16_t, int32_t,
+u_int32_t, u_char, u_short, u_int and u_long.
+<p>The Berkeley DB library declares a number of external routines. All of these
+routines are prefixed with the strings "db_", "lock_", "log_", "memp_"
+or "txn_". All internal routines are prefixed with the strings "__db_",
+"__lock_," "__log_", "__memp_" or "__txn_".
+<p>Berkeley DB environments create or use some number of files in environment home
+directories. These files are named <a href="../../ref/env/naming.html#DB_CONFIG">DB_CONFIG</a>, "log.NNNNNNNNNN"
+(e.g., log.0000000003), or with the string prefix "__db" (e.g., __db.001).
+Database files that match these names should not be created in the
+environment directory.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/program/scope.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/copy.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/program/recimp.html b/bdb/docs/ref/program/recimp.html
new file mode 100644
index 00000000000..240eccd8bc9
--- /dev/null
+++ b/bdb/docs/ref/program/recimp.html
@@ -0,0 +1,49 @@
+<!--$Id: recimp.so,v 11.2 2000/03/18 21:43:18 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Recovery implementation</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Transaction Protected Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/transapp/filesys.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/reclimit.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Recovery implementation</h1>
+<p>The physical recovery process works as follows:
+<p>First, find the last checkpoint that completed. Since the system may
+have crashed while writing a checkpoint, this implies finding the
+second-to-last checkpoint in the log files. Read forward from this
+checkpoint, opening any database files for which modifications are found
+in the log.
+<p>Then, read backward from the end of the log. For each commit record
+encountered, record its transaction ID. For every other data update
+record, find the transaction ID of the record. If that transaction ID
+appears in the list of committed transactions, do nothing; if it does not
+appear in the committed list, then call the appropriate recovery routine
+to undo the operation.
+<p>In the case of catastrophic recovery, this roll-backward pass continues
+through all the present log files. In the case of normal recovery, this
+pass continues until we find a checkpoint written before the second-to-last
+checkpoint described above.
+<p>When the roll-backward pass is complete, the roll-forward pass begins at
+the point where the roll-backward pass ended. Each record is read and if
+its transaction id is in the committed list, then the appropriate recovery
+routine is called to redo the operation if necessary.
+<p>In a distributed transaction environment, there may be transactions that
+are prepared, but not yet committed. If these transactions are XA
+transactions, then they are rolled forward to their current state, and an
+active transaction corresponding to it is entered in the transaction table
+so that the XA transaction manager may call either transaction abort or
+commit, depending on the outcome of the overall transaction. If the
+transaction is not an XA transaction, then it is aborted like any other
+transactions would be.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/transapp/filesys.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/reclimit.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/program/runtime.html b/bdb/docs/ref/program/runtime.html
new file mode 100644
index 00000000000..a6f860bcac0
--- /dev/null
+++ b/bdb/docs/ref/program/runtime.html
@@ -0,0 +1,57 @@
+<!--$Id: runtime.so,v 10.23 2000/12/04 18:05:42 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Run-time configuration</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Programmer Notes</dl></h3></td>
+<td width="1%"><a href="../../ref/program/extending.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Run-time configuration</h1>
+<p>There are a few interfaces that support run-time configuration of Berkeley DB.
+First is a group of interfaces that allow applications to intercept
+Berkeley DB requests for underlying library or system call functionality:
+<p><blockquote><pre><a href="../../api_c/set_func_close.html">db_env_set_func_close</a>
+<a href="../../api_c/set_func_dirfree.html">db_env_set_func_dirfree</a>
+<a href="../../api_c/set_func_dirlist.html">db_env_set_func_dirlist</a>
+<a href="../../api_c/set_func_exists.html">db_env_set_func_exists</a>
+<a href="../../api_c/set_func_free.html">db_env_set_func_free</a>
+<a href="../../api_c/set_func_fsync.html">db_env_set_func_fsync</a>
+<a href="../../api_c/set_func_ioinfo.html">db_env_set_func_ioinfo</a>
+<a href="../../api_c/set_func_malloc.html">db_env_set_func_malloc</a>
+<a href="../../api_c/set_func_map.html">db_env_set_func_map</a>
+<a href="../../api_c/set_func_open.html">db_env_set_func_open</a>
+<a href="../../api_c/set_func_read.html">db_env_set_func_read</a>
+<a href="../../api_c/set_func_realloc.html">db_env_set_func_realloc</a>
+<a href="../../api_c/set_func_seek.html">db_env_set_func_seek</a>
+<a href="../../api_c/set_func_sleep.html">db_env_set_func_sleep</a>
+<a href="../../api_c/set_func_unlink.html">db_env_set_func_unlink</a>
+<a href="../../api_c/set_func_unmap.html">db_env_set_func_unmap</a>
+<a href="../../api_c/set_func_write.html">db_env_set_func_write</a>
+<a href="../../api_c/set_func_yield.html">db_env_set_func_yield</a></pre></blockquote>
+<p>These interfaces are only available from the Berkeley DB C language API.
+<p>In addition, there are a few interfaces that allow applications to
+re-configure, on an application-wide basis, Berkeley DB behaviors.
+<p><blockquote><pre><a href="../../api_c/env_set_mutexlocks.html">DBENV-&gt;set_mutexlocks</a>
+<a href="../../api_c/env_set_pageyield.html">db_env_set_pageyield</a>
+<a href="../../api_c/env_set_panicstate.html">db_env_set_panicstate</a>
+<a href="../../api_c/env_set_region_init.html">db_env_set_region_init</a>
+<a href="../../api_c/env_set_tas_spins.html">db_env_set_tas_spins</a></pre></blockquote>
+<p>These interfaces are available from all of the Berkeley DB programmatic APIs.
+<p>A not-uncommon problem for applications is the new API in Solaris 2.6
+for manipulating large files. As this API was not part of Solaris 2.5,
+it is difficult to create a single binary that takes advantage of the
+large file functionality in Solaris 2.6 but which still runs on Solaris
+2.5. <a href="solaris.txt">Example code</a> that supports this is
+included in the Berkeley DB distribution.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/program/extending.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/program/scope.html b/bdb/docs/ref/program/scope.html
new file mode 100644
index 00000000000..19814793259
--- /dev/null
+++ b/bdb/docs/ref/program/scope.html
@@ -0,0 +1,71 @@
+<!--$Id: scope.so,v 10.3 2000/08/10 17:54:49 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Berkeley DB handles</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Programmer Notes</dl></h3></td>
+<td width="1%"><a href="../../ref/program/mt.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/namespace.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Berkeley DB handles</h1>
+ <a name="3"><!--meow--></a>
+<p>The Berkeley DB library has a number of object handles. The following table
+lists those handles, their scope, and if they are free-threaded, that
+is, if multiple threads within a process can share them.
+<p><dl compact>
+<p><dt>DB_ENV<dd>The DB_ENV handle is created by the <a href="../../api_c/env_create.html">db_env_create</a> function and
+references a Berkeley DB database environment, a collection of
+databases and Berkeley DB subsystems. DB_ENV handles are free-threaded
+if the <a href="../../api_c/env_open.html#DB_THREAD">DB_THREAD</a> flag is specified to the <a href="../../api_c/env_open.html">DBENV-&gt;open</a> function
+when the environment is opened. The handle should not be closed while
+any other handle remains open that is using it as a reference
+(e.g., DB or DB_TXN). Once either the <a href="../../api_c/env_close.html">DBENV-&gt;close</a> or
+<a href="../../api_c/env_remove.html">DBENV-&gt;remove</a> functions are called, the handle may not be accessed again,
+regardless of the function's return.
+<p><dt>DB_TXN<dd>The DB_TXN handle is created by the <a href="../../api_c/txn_begin.html">txn_begin</a> function and
+references a single transaction. The handle is not free-threaded, and
+transactions may not span threads nor may transactions be used by more
+than a single thread.
+Once the
+<a href="../../api_c/txn_abort.html">txn_abort</a> or <a href="../../api_c/txn_commit.html">txn_commit</a> functions are called, the handle may
+not be accessed again, regardless of the function's return.
+In addition, parent transactions may not issue
+any Berkeley DB operations, except for <a href="../../api_c/txn_begin.html">txn_begin</a>, <a href="../../api_c/txn_abort.html">txn_abort</a>
+and <a href="../../api_c/txn_commit.html">txn_commit</a>, while it has active child transactions (child
+transactions that have not yet been committed or aborted).
+<p><dt>DB_MPOOLFILE<dd>The DB_MPOOLFILE handle references an open file in the shared
+memory buffer pool of the database environment. The handle is not
+free-threaded. Once the <a href="../../api_c/memp_fclose.html">memp_fclose</a> function is called, the handle may
+not be accessed again, regardless of the function's return.
+<p><dt>DB<dd>The DB handle is created by the <a href="../../api_c/db_create.html">db_create</a> function and
+references a single Berkeley DB database, which may or may not be part of a
+database environment. DB handles are free-threaded if the
+<a href="../../api_c/env_open.html#DB_THREAD">DB_THREAD</a> flag is specified to the <a href="../../api_c/db_open.html">DB-&gt;open</a> function when the
+database is opened, or if the database environment in which the database
+is opened is free-threaded. The handle should not be closed while any
+other handle that references the database is in use, e.g., database
+handles must not be closed while cursor handles into the database remain
+open, or transactions which include operations on the database have not
+yet been committed or aborted. Once the <a href="../../api_c/db_close.html">DB-&gt;close</a>,
+<a href="../../api_c/db_remove.html">DB-&gt;remove</a> or <a href="../../api_c/db_rename.html">DB-&gt;rename</a> functions are called, the handle may
+not be accessed again, regardless of the function's return.
+<p><dt>DBC<dd>The DBC handle references a cursor into a Berkeley DB database. The
+handle is not free-threaded and cursors may not span threads nor may
+cursors be used by more than a single thread. If the cursor is to be
+used to perform operations on behalf of a transaction, the cursor must
+be opened and closed within the context of that single transaction.
+Once <a href="../../api_c/dbc_close.html">DBcursor-&gt;c_close</a> has been called, the handle may not be accessed
+again, regardless of the function's return.
+</dl>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/program/mt.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/namespace.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/program/solaris.txt b/bdb/docs/ref/program/solaris.txt
new file mode 100644
index 00000000000..d2ec3168242
--- /dev/null
+++ b/bdb/docs/ref/program/solaris.txt
@@ -0,0 +1,213 @@
+#ifdef OS_solaris
+ * This is all for Solaris 2.6.
+ *
+ * Sun defined a new API in Solaris2.6 to be used when manipulating large
+ * (>2Gbyte) files. This API isn't present in 2.5.x, so we can't simply
+ * call it -- that would mean two binaries, one for 2.5.x and the other for
+ * 2.6. Not pretty. So, what we do here is determine the OS on which we're
+ * running at runtime, and adjust the underlying Berkeley DB calls to use
+ * the new API if it's there.
+ */
+
+/* This must match the definition of stat64 in Solaris2.6 */
+struct our_stat64 {
+ dev_t st_dev;
+ long st_pad1[3]; /* reserve for dev expansion */
+ u_longlong_t st_ino;
+ mode_t st_mode;
+ nlink_t st_nlink;
+ uid_t st_uid;
+ gid_t st_gid;
+ dev_t st_rdev;
+ long st_pad2[2];
+ longlong_t st_size;
+ timestruc_t mst_atime;
+ timestruc_t mst_mtime;
+ timestruc_t mst_ctime;
+ long st_blksize;
+ longlong_t st_blocks; /* large file support */
+ char st_fstype[_ST_FSTYPSZ];
+ long st_pad4[8]; /* expansion area */
+};
+
+#define MEGABYTE (1024 * 1024)
+
+typedef int (*open_fn)(const char *path, int flags, ...);
+typedef longlong_t (*lseek64_fn)(int fildes, longlong_t offset, int whence);
+typedef longlong_t (*fstat64_fn)(int fildes, struct our_stat64 *s);
+typedef void* (*mmap64_fn)(void* addr, size_t len, int prot, int flags,
+int filedes, longlong_t off);
+
+static fstat64_fn os_fstat64_fn = NULL;
+static lseek64_fn os_lseek64_fn = NULL;
+static mmap64_fn os_mmap64_fn = NULL;
+static open_fn os_open64_fn = NULL;
+
+static int dblayer_load_largefile_fns()
+{
+ void *lib_handle = NULL;
+ void *function_found = NULL;
+ int ret = 0;
+
+ lib_handle = dlopen(NULL, RTLD_NOW);
+ if (NULL == lib_handle)
+ return (-1);
+
+ function_found = dlsym(lib_handle,"open64");
+ if (NULL == function_found)
+ return (-1);
+ os_open64_fn = (open_fn)function_found;
+
+ function_found = dlsym(lib_handle,"lseek64");
+ if (NULL == function_found)
+ return (-1);
+ os_lseek64_fn = (lseek64_fn)function_found;
+
+ function_found = dlsym(lib_handle,"fstat64");
+ if (NULL == function_found)
+ return (-1);
+ os_fstat64_fn = (fstat64_fn)function_found;
+
+ function_found = dlsym(lib_handle,"mmap64");
+ if (NULL == function_found)
+ return (-1);
+ os_mmap64_fn = (mmap64_fn)function_found;
+
+ return 0;
+}
+
+/* Helper function for large seeks */
+static int dblayer_seek_fn_solaris(int fd,
+ size_t pgsize, db_pgno_t pageno, u_long relative, int whence)
+{
+ longlong_t offset = 0;
+ longlong_t ret = 0;
+
+ if (NULL == os_lseek64_fn) {
+ return -1;
+ }
+
+ offset = (longlong_t)pgsize * pageno + relative;
+
+ ret = (*os_lseek64_fn)(fd,offset,whence);
+
+ return (ret == -1) ? errno : 0;
+}
+
+/* Helper function for large file mmap */
+static int dblayer_map_solaris(fd, len, is_private, is_rdonly, addr)
+ int fd, is_private, is_rdonly;
+ size_t len;
+ void **addr;
+{
+ void *p;
+ int flags, prot;
+
+ flags = is_private ? MAP_PRIVATE : MAP_SHARED;
+ prot = PROT_READ | (is_rdonly ? 0 : PROT_WRITE);
+
+ if ((p = (*os_mmap64_fn)(NULL,
+ len, prot, flags, fd, (longlong_t)0)) == (void *)MAP_FAILED)
+ return (errno);
+
+ *addr = p;
+ return (0);
+}
+
+/* Helper function for large fstat */
+static int dblayer_ioinfo_solaris(const char *path,
+ int fd, u_int32_t *mbytesp, u_int32_t *bytesp, u_int32_t *iosizep)
+{
+ struct our_stat64 sb;
+
+ if (NULL == os_fstat64_fn) {
+ return -1;
+ }
+
+ if ((*os_fstat64_fn)(fd, &sb) == -1)
+ return (errno);
+
+ /* Return the size of the file. */
+ if (mbytesp != NULL)
+ *mbytesp = (u_int32_t) (sb.st_size / (longlong_t)MEGABYTE);
+ if (bytesp != NULL)
+ *bytesp = (u_int32_t) (sb.st_size % (longlong_t)MEGABYTE);
+
+ /*
+ * Return the underlying filesystem blocksize, if available. Default
+ * to 8K on the grounds that most OS's use less than 8K as their VM
+ * page size.
+ */
+ if (iosizep != NULL)
+ *iosizep = sb.st_blksize;
+ return (0);
+}
+#endif
+
+#ifdef irix
+ * A similar mess to Solaris: a new API added in IRIX6.2 to support large
+ * files. We always build on 6.2 or later, so no need to do the same song
+ * and dance as on Solaris -- we always have the header files for the
+ * 64-bit API.
+ */
+
+/* Helper function for large seeks */
+static int dblayer_seek_fn_irix(int fd,
+ size_t pgsize, db_pgno_t pageno, u_long relative, int whence)
+{
+ off64_t offset = 0;
+ off64_t ret = 0;
+
+ offset = (off64_t)pgsize * pageno + relative;
+
+ ret = lseek64(fd,offset,whence);
+
+ return (ret == -1) ? errno : 0;
+}
+
+/* Helper function for large fstat */
+static int dblayer_ioinfo_irix(const char *path,
+ int fd, u_int32_t *mbytesp, u_int32_t *bytesp, u_int32_t *iosizep)
+{
+ struct stat64 sb;
+
+ if (fstat64(fd, &sb) == -1) {
+ return (errno);
+ }
+
+ /* Return the size of the file. */
+ if (mbytesp != NULL)
+ *mbytesp = (u_int32_t) (sb.st_size / (off64_t)MEGABYTE);
+ if (bytesp != NULL)
+ *bytesp = (u_int32_t) (sb.st_size % (off64_t)MEGABYTE);
+
+ if (iosizep != NULL)
+ *iosizep = sb.st_blksize;
+ return (0);
+}
+#endif /* irix */
+
+static int dblayer_override_libdb_functions(dblayer_private *priv)
+{
+#if defined(OS_solaris)
+ int ret = 0;
+
+ ret = dblayer_load_largefile_fns();
+ if (0 != ret) {
+ Debug("Not Solaris2.6: no large file support enabled\n");
+ } else {
+ /* Means we did get the XXX64 functions, so let's use them */
+ db_jump_set((void*)os_open64_fn, DB_FUNC_OPEN);
+ db_jump_set((void*)dblayer_seek_fn_solaris, DB_FUNC_SEEK);
+ db_jump_set((void*)dblayer_ioinfo_solaris, DB_FUNC_IOINFO);
+ db_jump_set((void*)dblayer_map_solaris, DB_FUNC_MAP);
+ Debug("Solaris2.6: selected 64-bit file handling.\n");
+ }
+#else
+#if defined (irix)
+ db_jump_set((void*)dblayer_seek_fn_irix, DB_FUNC_SEEK);
+ db_jump_set((void*)dblayer_ioinfo_irix, DB_FUNC_IOINFO);
+#endif /* irix */
+#endif /* OS_solaris */
+ return 0;
+}
diff --git a/bdb/docs/ref/program/version.html b/bdb/docs/ref/program/version.html
new file mode 100644
index 00000000000..d1b1254a178
--- /dev/null
+++ b/bdb/docs/ref/program/version.html
@@ -0,0 +1,45 @@
+<!--$Id: version.so,v 10.14 2000/03/18 21:43:16 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Library version information</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Programmer Notes</dl></h3></td>
+<td width="1%"><a href="../../ref/program/copy.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/dbsizes.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Library version information</h1>
+<p>Each release of the Berkeley DB library has a major version number, a minor
+version number, and a patch number.
+<p>The major version number changes only when major portions of the Berkeley DB
+functionality have been changed. In this case, it may be necessary to
+significantly modify applications in order to upgrade them to use the new
+version of the library.
+<p>The minor version number changes when Berkeley DB interfaces have changed, and
+the new release is not entirely backward compatible with previous releases.
+To upgrade applications to the new version, they must be recompiled, and
+potentially, minor modifications made, (e.g., the order of arguments to a
+function might have changed).
+<p>The patch number changes on each release. If only the patch number
+has changed in a release, applications do not need to be recompiled,
+and they can be upgraded to the new version by simply installing a
+new version of the shared library.
+<p>Internal Berkeley DB interfaces may change at any time and during any release,
+without warning. This means that the library must be entirely recompiled
+and reinstalled when upgrading to new releases of the library, as there
+is no guarantee that modules from the current version of the library will
+interact correctly with modules from a previous release.
+<p>To retrieve the Berkeley DB version information, applications should use the
+<a href="../../api_c/env_version.html">db_version</a> interface. In addition to the above information, the
+<a href="../../api_c/env_version.html">db_version</a> interface returns a string encapsulating the version
+information, suitable for display to a user.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/program/copy.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/dbsizes.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/refs/bdb_usenix.html b/bdb/docs/ref/refs/bdb_usenix.html
new file mode 100644
index 00000000000..58e82b57314
--- /dev/null
+++ b/bdb/docs/ref/refs/bdb_usenix.html
@@ -0,0 +1,1120 @@
+<!--"@(#)usenix.html 1.2 4/26/99"-->
+<html>
+<head>
+<title>Berkeley DB</title>
+</head>
+<body bgcolor="white">
+<center>
+<h1>
+Berkeley DB
+</h1>
+<p>
+<i>
+Michael A. Olson
+<br>
+Keith Bostic
+<br>
+Margo Seltzer
+<br>&nbsp;
+<br>
+Sleepycat Software, Inc.
+<br>&nbsp;
+<br>
+</i>
+<b>
+Abstract
+</b>
+</center>
+<font size="-1">
+<blockquote>
+<p>
+Berkeley DB is an Open Source embedded database system with a number
+of key advantages over comparable systems. It is simple to use, supports
+concurrent access by multiple users, and provides industrial-strength
+transaction support, including surviving system and disk crashes. This
+paper describes the design and technical features of Berkeley DB, the
+distribution, and its license.
+</blockquote>
+</font>
+<h1>
+Introduction
+</h1>
+<p>
+The Berkeley Database (Berkeley DB) is an embedded database system
+that can be used in applications requiring high-performance
+concurrent storage and retrieval of key/value pairs. The software
+is distributed as a library that can be linked directly into an
+application.
+It provides a variety of programmatic interfaces,
+including callable APIs for C, C++, Perl, Tcl and Java.
+Users may download Berkeley DB from Sleepycat Software's Web site,
+at
+<a href="http://www.sleepycat.com">www.sleepycat.com</a>.
+<p>
+Sleepycat distributes Berkeley DB as an Open Source product. The company
+collects license fees for certain uses of the software and sells support
+and services.
+<h2>
+History
+</h2>
+<p>
+Berkeley DB began as a new implementation of a hash access method
+to replace both
+<tt>hsearch</tt>
+and the various
+<tt>dbm</tt>
+implementations
+(<tt>dbm</tt> from AT&T,
+<tt>ndbm</tt>
+from Berkeley, and
+<tt>gdbm</tt>
+from the GNU project).
+In 1990 Seltzer and Yigit produced a package called Hash to do this
+<a href="#Selt91">[Selt91]</a>.
+<p>
+The first general release of Berkeley DB, in 1991,
+included some interface changes and a new B+tree access method.
+At roughly the same time, Seltzer and Olson
+developed a prototype transaction
+system based on Berkeley DB, called LIBTP <a href="#Selt92">[Selt92]</a>,
+but never released the code.
+<p>
+The 4.4BSD UNIX release included Berkeley DB 1.85 in 1992.
+Seltzer and Bostic maintained the code in the early 1990s
+in Berkeley and in Massachusetts.
+Many users adopted the code during this period.
+<p>
+By mid-1996,
+users wanted commercial support for the software.
+In response, Bostic and Seltzer formed Sleepycat Software.
+The company enhances, distributes, and
+supports Berkeley DB and supporting software and documentation.
+Sleepycat released version 2.1 of Berkeley DB in mid-1997
+with important new features, including
+support for concurrent access to databases.
+The company makes about three commercial releases a year,
+and most recently shipped version 2.8.
+<h2>
+Overview of Berkeley DB
+</h2>
+<p>
+The C interfaces in Berkeley DB permit
+<tt>dbm</tt>-style
+record management
+for databases,
+with significant extensions to handle duplicate data items elegantly,
+to deal with concurrent access, and to provide transactional
+support so that multiple changes can be simultaneously committed
+(so that they are made permanent) or rolled back (so that the
+database is restored to its state at the beginning of the transaction).
+<p>
+C++ and Java interfaces provide a small set of classes for
+operating on a database. The main class in both cases is called
+<tt>Db</tt>,
+and provides methods that encapsulate the
+<tt>dbm</tt>-style
+interfaces that the C interfaces provide.
+<p>
+Tcl and Perl interfaces allow developers working in those languages
+to use Berkeley DB in their applications.
+Bindings for both languages are included in the distribution.
+<p>
+Developers may compile their applications and link in Berkeley DB
+statically or dynamically.
+<h2>
+How Berkeley DB is used
+</h2>
+<p>
+The Berkeley DB library supports concurrent access to databases.
+It can be linked
+into standalone applications, into a collection of cooperating applications,
+or into servers that handle requests and do database operations on
+behalf of clients.
+<p>
+Compared to using a standalone database management system, Berkeley
+DB is easy to understand and simple to use. The
+software stores and retrieves records, which consist of key/value pairs.
+Keys are used to locate items and can be any data type or structure
+supported by the programming language.
+<p>
+The programmer can provide the functions that Berkeley DB uses to
+operate on keys.
+For example,
+B+trees can use a custom comparison function,
+and the Hash access method can use a custom hash function.
+Berkeley DB uses default functions if none are supplied.
+Otherwise, Berkeley DB does not examine or interpret either keys
+or values in any way.
+Values may be arbitrarily long.
+<p>
+It is also important to understand what Berkeley DB is not.
+It is not a database server that handles network requests. It is not an
+SQL engine that executes queries. It is not a relational or object-oriented
+database management system.
+<p>
+It is possible to build any of those on top of Berkeley DB,
+but the package, as distributed,
+is an embedded database engine. It has been designed
+to be portable, small, fast, and reliable.
+<h2>
+Applications that use Berkeley DB
+</h2>
+<p>
+Berkeley DB is embedded in a variety of proprietary and Open Source
+software packages.
+This section highlights a few of the products that use it.
+<p>
+Directory servers, which do data storage and retrieval using the
+Local Directory Access Protocol (LDAP), provide naming and directory
+lookup service on local-area networks.
+This service is,
+essentially,
+database query and update,
+but uses a simple protocol rather than SQL or ODBC.
+Berkeley DB is the embedded data manager in the majority of deployed
+directory servers today,
+including LDAP servers from Netscape,
+MessageDirect (formerly Isode),
+and others.
+<p>
+Berkeley DB is also embedded in a large number of mail servers.
+Intermail,
+from Software.com,
+uses Berkeley DB as a message store
+and as the backing store for its directory server.
+The sendmail server
+(including both the commercial Sendmail Pro offering from Sendmail,
+Inc. and the version distributed by sendmail.org)
+uses Berkeley DB to store aliases and other information.
+Similarly,
+Postfix (formerly VMailer) uses Berkeley DB
+to store administrative information.
+<p>
+In addition,
+Berkeley DB is embedded in a wide variety of other software products.
+Example applications include managing access control lists,
+storing user keys in a public-key infrastructure,
+recording machine-to-network-address mappings in address servers,
+and storing configuration and device information in video
+post-production software.
+<p>
+Finally,
+Berkeley DB is a part of many other Open Source software packages
+available on the Internet.
+For example,
+the software is embedded in the Apache Web server and the Gnome desktop.
+<h1>
+Access Methods
+</h1>
+<p>
+In database terminology, an access method is the disk-based structure
+used to store data and the operations available on that structure.
+For example, many database systems support a B+tree access method.
+B+trees allow equality-based lookups (find keys equal to some constant),
+range-based lookups (find keys between two constants) and record
+insertion and deletion.
+<p>
+Berkeley DB supports three access methods: B+tree,
+Extended Linear Hashing (Hash),
+and Fixed- or Variable-length Records (Recno).
+All three operate on records composed of a key and a data value.
+In the B+tree and Hash access methods, keys can have arbitrary structure.
+In the Recno access method, each record is assigned a record number, which
+serves as the key.
+In all the access methods, the
+value can have arbitrary structure.
+The programmer can supply comparison or hashing functions for keys,
+and Berkeley DB stores and retrieves values without
+interpreting them.
+<p>
+All of the access methods use the host filesystem as a backing store.
+<h2>
+Hash
+</h2>
+<p>
+Berkeley DB includes a Hash access method that implements extended
+linear hashing <a href="#Litw80">[Litw80]</a>.
+Extended linear hashing adjusts the hash function as the hash
+table grows, attempting to keep all buckets underfull in the steady
+state.
+<p>
+The Hash access method supports insertion and deletion of records and
+lookup by exact match only. Applications may iterate over all records
+stored in a table, but the order in which they are returned is undefined.
+<h2>
+B+tree
+</h2>
+<p>
+Berkeley DB includes a B+tree <a href="#Come79">[Come79]</a> access method.
+B+trees store records of key/value pairs in leaf pages,
+and pairs of (key, child page address) at internal nodes.
+Keys in the tree are stored in sorted order,
+where the order is determined by the comparison function supplied when the
+database was created.
+Pages at the leaf level of the tree include pointers
+to their neighbors to simplify traversal. B+trees support lookup by
+exact match (equality) or range (greater than or equal to a key).
+Like Hash tables, B+trees support record insertion,
+deletion, and iteration over all records in the tree.
+<p>
+As records are inserted and pages in the B+tree fill up, they are split,
+with about half the keys going into a new peer page at the same level in
+the tree.
+Most B+tree implementations leave both nodes half-full after a split.
+This leads to poor performance in a common case, where the caller inserts
+keys in order.
+To handle this case, Berkeley DB keeps track of the insertion order,
+and splits pages unevenly to keep pages fuller.
+This reduces tree size, yielding better search performance and smaller
+databases.
+<p>
+On deletion, empty pages are coalesced by reverse splits
+into single pages.
+The access method does no other page balancing on insertion
+or deletion.
+Keys are not moved among pages at every update
+to keep the tree well-balanced. While this could improve search times
+in some cases, the additional code complexity leads to slower updates and
+is prone to deadlocks.
+<p>
+For simplicity, Berkeley DB B+trees do no prefix compression of keys
+at internal or leaf nodes.
+<h2>
+Recno
+</h2>
+<p>
+Berkeley DB includes a fixed- or variable-length record access method,
+called
+<i>Recno</i>.
+The Recno access method assigns logical record numbers to each
+record,
+and can search for and update records by record number.
+Recno is able,
+for example,
+to load a text file into a database,
+treating each line as a record.
+This permits fast searches by line number for applications like
+text editors <a href="#Ston82">[Ston82]</a>.
+<p>
+Recno is actually built
+on top of the B+tree access method and provides a simple interface
+for storing sequentially-ordered data values.
+The Recno access method generates keys internally.
+The programmer's view of the values is that
+they are numbered sequentially from one.
+Developers can choose to have records automatically renumbered
+when lower-numbered records are added or deleted.
+In this case, new keys can be inserted between existing keys.
+<h1>
+Features
+</h1>
+<p>
+This section describes important features of Berkeley DB.
+In general,
+developers can choose which features are useful to them,
+and use only those that are required by their application.
+<p>
+For example,
+when an application opens a database, it can declare the degree of
+concurrency and recovery that it requires. Simple stand-alone applications,
+and in particular ports of applications that used
+<tt>dbm</tt>
+or one of its
+variants, generally do not require concurrent access or crash recovery.
+Other applications, such as enterprise-class database management systems
+that store sales transactions or other critical data, need full
+transactional service. Single-user operation is faster than multi-user
+operation, since no overhead is incurred by locking. Running with
+the recovery system disabled is faster than running with it enabled,
+since log records need not be written when changes are made to the
+database.
+<p>
+In addition, some core subsystems, including the locking system and
+the logging facility,
+can be used outside the context of the access methods as well.
+Although few users have chosen to do so, it is possible to
+use only the lock manager in Berkeley DB to control concurrency
+in an application, without using any of the standard database services.
+Alternatively, the caller can integrate locking of non-database resources
+with Berkeley DB's transactional two-phase locking system, to impose
+transaction semantics on objects outside the database.
+<h2>
+Programmatic interfaces
+</h2>
+<p>
+Berkeley DB defines a simple API for database management.
+The package does not include industry-standard
+programmatic interfaces such as Open Database Connectivity (ODBC),
+Object Linking and Embedding for Databases (OleDB), or Structured
+Query Language (SQL). These interfaces, while useful, were
+designed to promote interoperability of database systems, and not
+simplicity or performance.
+<p>
+In response to customer demand,
+Berkeley DB 2.5 introduced support for the XA standard <a href="#Open94">[Open94]</a>.
+XA permits Berkeley DB to participate in distributed transactions
+under a transaction processing monitor like Tuxedo from BEA Systems.
+Like XA, other standard interfaces can be built on top of the
+core system.
+The standards do not belong inside Berkeley DB,
+since not all applications need them.
+<h2>
+Working with records
+</h2>
+<p>
+A database user may need to search for particular keys in a database,
+or may simply want to browse available records.
+Berkeley DB supports both keyed access,
+to find one or more records with a given key,
+or sequential access,
+to retrieve all the records in the database one at a time.
+The order of the records returned during sequential scans
+depends on the access method.
+B+tree and Recno databases return records in sort order,
+and Hash databases return them in apparently random order.
+<p>
+Similarly,
+Berkeley DB defines simple interfaces for inserting,
+updating,
+and deleting records in a database.
+<h2>
+Long keys and values
+</h2>
+<p>
+Berkeley DB manages keys and values as large as
+2<sup>32</sup> bytes.
+Since the time required to copy a record is proportional to its size,
+Berkeley DB includes interfaces that operate on partial records.
+If an application requires only part of a large record,
+it requests partial record retrieval,
+and receives just the bytes that it needs.
+The smaller copy saves both time and memory.
+<p>
+Berkeley DB allows the programmer to define the data types of
+keys and values.
+Developers use any type expressible in the programming language.
+<h2>
+Large databases
+</h2>
+<p>
+A single database managed by Berkeley DB can be up to 2<sup>48</sup>
+bytes,
+or 256 petabytes,
+in size.
+Berkeley DB uses the host filesystem as the backing store
+for the database,
+so large databases require big file support from the operating system.
+Sleepycat Software has customers using Berkeley DB
+to manage single databases in excess of 100 gigabytes.
+<h2>
+Main memory databases
+</h2>
+<p>
+Applications that do not require persistent storage can create
+databases that exist only in main memory.
+These databases bypass the overhead imposed by the I/O system
+altogether.
+<p>
+Some applications do need to use disk as a backing store,
+but run on machines with very large memory.
+Berkeley DB is able to manage very large shared memory regions
+for cached data pages,
+log records,
+and lock management.
+For example,
+the cache region used for data pages may be gigabytes in size,
+reducing the likelihood that any read operation will need to
+visit the disk in the steady state.
+The programmer declares the size of the cache region at
+startup.
+<p>
+Finally, many operating systems provide memory-mapped file services
+that are much faster than their general-purpose file system
+interfaces.
+Berkeley DB can memory-map its database files for read-only database use.
+The application operates on records stored directly on the pages,
+with no cache management overhead.
+Because the application gets pointers directly into the
+Berkeley DB pages,
+writes cannot be permitted.
+Otherwise,
+changes could bypass the locking and logging systems,
+and software errors could corrupt the database.
+Read-only applications can use Berkeley DB's memory-mapped
+file service to improve performance on most architectures.
+<h2>
+Configurable page size
+</h2>
+<p>
+Programmers declare the size of the pages used by their access
+methods when they create a database.
+Although Berkeley DB provides reasonable defaults,
+developers may override them to control system performance.
+Small pages reduce the number of records that fit on a single page.
+Fewer records on a page means that fewer records are locked when
+the page is locked,
+improving concurrency.
+The per-page overhead is proportionally higher with smaller pages,
+of course,
+but developers can trade off space for time as an application requires.
+<h2>
+Small footprint
+</h2>
+<p>
+Berkeley DB is a compact system.
+The full package, including all access methods, recoverability,
+and transaction support
+is roughly 175K of text space on common architectures.
+<h2>
+Cursors
+</h2>
+<p>
+In database terminology, a cursor is a pointer into an access method
+that can be called iteratively to return records in sequence. Berkeley
+DB includes cursor interfaces for all access methods. This permits,
+for example, users to traverse a B+tree and view records in order.
+Pointers to records in cursors are persistent, so that once fetched,
+a record may be updated in place. Finally, cursors support access to
+chains of duplicate data items in the various access methods.
+<h2>
+Joins
+</h2>
+<p>
+In database terminology,
+a join is an operation that spans multiple separate
+tables (or in the case of Berkeley DB, multiple separate DB files).
+For example, a company may store information about its customers
+in one table and information about sales in another. An application
+will likely want to look up sales information by customer name; this
+requires matching records in the two tables that share a common
+customer ID field.
+This combining of records from multiple tables is called a join.
+<p>
+Berkeley DB includes interfaces for joining two or more tables.
+<h2>
+Transactions
+</h2>
+<p>
+Transactions have four properties <a href="#Gray93">[Gray93]</a>:
+<ul>
+<li>
+They are atomic. That is, all of the changes made in a single
+transaction must be applied at the same instant or not at all.
+This permits, for example, the transfer of money between two
+accounts to be accomplished, by making the reduction of the
+balance in one account and the increase in the other into a
+single, atomic action.
+</li>
+<li>
+They must be consistent. That is, changes to the database
+by any transaction cannot leave the database in an illegal
+or corrupt state.
+</li>
+<li>
+They must be isolatable. Regardless of the number of users
+working in the database at the same time, every user must have
+the illusion that no other activity is going on.
+</li>
+<li>
+They must be durable. Even if the disk that stores the database
+is lost, it must be possible to recover the database to its last
+transaction-consistent state.
+</li>
+</ul>
+<p>
+This combination of properties -- atomicity, consistency, isolation, and
+durability -- is referred to as ACIDity in the literature. Berkeley DB,
+like most database systems, provides ACIDity using a collection of core
+services.
+<p>
+Programmers can choose to use Berkeley DB's transaction services
+for applications that need them.
+<h3>
+Write-ahead logging
+</h3>
+<p>
+Programmers can enable the logging system when they start up Berkeley DB.
+During a transaction,
+the application makes a series of changes to the database.
+Each change is captured in a log entry,
+which holds the state of the database record
+both before and after the change.
+The log record is guaranteed
+to be flushed to stable storage before any of the changed data pages
+are written.
+This behavior -- writing the log before the data pages -- is called
+<i>write-ahead logging</i>.
+<p>
+At any time during the transaction,
+the application can
+<i>commit</i>,
+making the changes permanent,
+or
+<i>roll back</i>,
+cancelling all changes and restoring the database to its
+pre-transaction state.
+If the application
+rolls back the transaction, then the log holds the state of all
+changed pages prior to the transaction, and Berkeley DB simply
+restores that state.
+If the application commits the transaction,
+Berkeley DB writes the log records to disk.
+In-memory copies of the data pages already reflect the changes,
+and will be flushed as necessary during normal processing.
+Since log writes are sequential, but data page
+writes are random, this improves performance.
+<h3>
+Crashes and recovery
+</h3>
+<p>
+Berkeley DB's write-ahead log is used by the transaction
+system to commit or roll back transactions.
+It also gives the recovery system the information that
+it needs to protect against data loss or corruption
+from crashes.
+Berkeley DB is able to survive application crashes,
+system crashes,
+and even catastrophic failures like the loss of a hard
+disk,
+without losing any data.
+<p>
+Surviving crashes requires data stored in several different places.
+During normal processing,
+Berkeley DB has copies of active log records and recently-used
+data pages in memory.
+Log records are flushed to the log disk when transactions commit.
+Data pages trickle out to the data disk as pages move through
+the buffer cache.
+Periodically,
+the system administrator backs up the data disk,
+creating a safe copy of the database at a particular instant.
+When the database is backed up,
+the log can be truncated.
+For maximum robustness,
+the log disk and data disk should be separate devices.
+<p>
+Different system failures can destroy memory,
+the log disk,
+or the data disk.
+Berkeley DB is able to survive the loss of any one
+of these repositories
+without losing any committed transactions.
+<p>
+If the computer's memory is lost,
+through an application or operating system crash,
+then the log holds all committed transactions.
+On restart,
+the recovery system rolls the log forward against
+the database,
+reapplying any changes to on-disk pages that were in memory at the
+time of the crash.
+Since the log contains pre- and post-change state for
+transactions,
+the recovery system also uses the log to restore any pages to
+their original state if they were modified by transactions
+that never committed.
+<p>
+If the data disk is lost,
+the system administrator can restore the most recent copy from backup.
+The recovery system will roll the entire log forward against
+the original database,
+reapplying all committed changes.
+When it finishes,
+the database will contain every change made by every
+transaction that ever committed.
+<p>
+If the log disk is lost,
+then the recovery system can use the in-memory copies of
+log entries to roll back any uncommitted transactions,
+flush all in-memory database pages to the data disk,
+and shut down gracefully.
+At that point,
+the system administrator can back up the database disk,
+install a new log disk,
+and restart the system.
+<h3>
+Checkpoints
+</h3>
+<p>
+Berkeley DB includes a checkpointing service that interacts
+with the recovery system.
+During normal processing,
+both the log and the database are changing continually.
+At any given instant,
+the on-disk versions of the two are not guaranteed to be consistent.
+The log probably contains changes that are not yet in the database.
+<p>
+When an application makes a
+<i>checkpoint</i>,
+all committed changes in the log up to that point
+are guaranteed to be present on the data disk,
+too.
+Checkpointing is moderately expensive during normal processing,
+but limits the time spent recovering from crashes.
+<p>
+After an application or operating system crash,
+the recovery system only needs to go back two checkpoints
+to start rolling the log forward.
+(One checkpoint is not far enough.
+The recovery system cannot be sure that the most recent
+checkpoint completed --
+it may have been interrupted by the crash that forced the
+recovery system to run in the first place.)
+Without checkpoints,
+there is no way to be sure how long restarting after a crash will take.
+With checkpoints,
+the restart interval can be fixed by the programmer.
+Recovery processing can be guaranteed to complete in a second or two.
+<p>
+Software crashes are much more common than disk failures.
+Many developers want to guarantee that software bugs do not destroy data,
+but are willing to restore from tape,
+and to tolerate a day or two of lost work,
+in the unlikley event of a disk crash.
+With Berkeley DB,
+programmers may truncate the log at checkpoints.
+As long as the two most recent checkpoints are present,
+the recovery system can guarantee that no committed transactions
+are lost after a software crash.
+In this case,
+the recovery system does not require that the log and the
+data be on separate devices,
+although separating them can still improve performance
+by spreading out writes.
+<h3>
+Two-phase locking
+</h3>
+<p>
+Berkeley DB provides a service known as two-phase locking.
+In order to reduce the likelihood of deadlocks and to guarantee ACID
+properties, database systems manage locks in two phases. First, during
+the operation of a transaction, they acquire locks, but never release
+them. Second, at the end of the transaction, they release locks, but
+never acquire them. In practice, most database systems, including Berkeley
+DB, acquire locks on demand over the course of the transaction, then
+flush the log, then release all locks.
+<p>
+Berkeley DB can lock entire database files, which correspond to tables,
+or individual pages in them.
+It does no record-level locking.
+By shrinking the page size,
+however,
+developers can guarantee that every page holds only a small
+number of records.
+This reduces contention.
+<p>
+If locking is enabled,
+then read and write operations on a database acquire two-phase locks,
+which are held until the transaction completes.
+Which objects are locked and the order of lock acquisition
+depend on the workload for each transaction.
+It is possible for two or more transactions to deadlock,
+so that each is waiting for a lock that is held by another.
+<p>
+Berkeley DB detects deadlocks and automatically rolls back
+one of the transactions.
+This releases the locks that it held
+and allows the other transactions to continue.
+The caller is notified that its transaction did not complete,
+and may restart it.
+Developers can specify the deadlock detection interval
+and the policy to use in choosing a transaction to roll back.
+<p>
+The two-phase locking interfaces are separately callable by applications
+that link Berkeley DB, though few users have needed to use that facility
+directly.
+Using these interfaces,
+Berkeley DB provides a fast,
+platform-portable locking system for general-purpose use.
+It also lets users include non-database objects in a database transaction,
+by controlling access to them exactly as if they were inside the database.
+<p>
+The Berkeley DB two-phase locking facility is built on the fastest correct
+locking primitives that are supported by the underlying architecture.
+In the current implementation, this means that the locking system is
+different on the various UNIX platforms, and is still more different
+on Windows NT. In our experience, the most difficult aspect of performance
+tuning is finding the fastest locking primitives that work correctly
+on a particular architecture and then integrating the new
+interface with the several that we already support.
+<p>
+The world would be a better place if the operating systems community
+would uniformly implement POSIX locking primitives and would guarantee
+that acquiring an uncontested lock was a fast operation.
+Locks must work both among threads in a single process
+and among processes.
+<h2>
+Concurrency
+</h2>
+<p>
+Good performance under concurrent operation is a critical design point
+for Berkeley DB. Although Berkeley DB is itself not multi-threaded,
+it is thread-safe, and runs well in threaded applications.
+Philosophically,
+we view the use of threads and the choice of a threads package
+as a policy decision,
+and prefer to offer mechanism (the ability to run threaded or not),
+allowing applications to choose their own policies.
+<p>
+The locking, logging, and buffer pool subsystems all use shared memory
+or other OS-specific sharing facilities to communicate. Locks, buffer
+pool fetches, and log writes behave in the same way across threads in
+a single process as they do across different processes on a single
+machine.
+<p>
+As a result, concurrent database applications may start up a new process
+for every single user, may create a single server which spawns a new
+thread for every client request, or may choose any policy in between.
+<p>
+Berkeley DB has been carefully designed to minimize contention
+and maximize concurrency.
+The cache manager allows all threads or processes to benefit from
+I/O done by one.
+Shared resources must sometimes be locked for exclusive access
+by one thread of control.
+We have kept critical sections small,
+and are careful not to hold critical resource locks across
+system calls that could deschedule the locking thread or process.
+Sleepycat Software has customers with hundreds of concurrent
+users working on a single database in production.
+<h1>
+Engineering Philosophy
+</h1>
+<p>
+Fundamentally, Berkeley DB is a collection of access methods with
+important facilities, like logging, locking, and transactional access
+underlying them. In both the research and the commercial world,
+the techniques for building systems like Berkeley DB have been well-known
+for a long time.
+<p>
+The key advantage of Berkeley DB is the careful attention that has been
+paid to engineering details throughout its life. We have carefully
+designed the system so that the core facilities, like locking and I/O,
+surface the right interfaces and are otherwise opaque to the caller.
+As programmers, we understand the value of simplicity and have worked
+hard to simplify the interfaces we surface to users of the
+database system.
+<p>
+Berkeley DB avoids limits in the code. It places no practical limit
+on the size of keys, values, or databases; they may grow to occupy
+the available storage space.
+<p>
+The locking and logging subsystems have been carefully crafted to
+reduce contention and improve throughput by shrinking or eliminating
+critical sections, and reducing the sizes of locked regions and log
+entries.
+<p>
+There is nothing in the design or implementation of Berkeley DB that
+pushes the state of the art in database systems. Rather, we have been
+very careful to get the engineering right. The result is a system that
+is superior, as an embedded database system, to any other solution
+available.
+<p>
+Most database systems trade off simplicity for correctness. Either the
+system is easy to use, or it supports concurrent use and survives system
+failures. Berkeley DB, because of its careful design and implementation,
+offers both simplicity and correctness.
+<p>
+The system has a small footprint,
+makes simple operations simple to carry out (inserting a new record takes
+just a few lines of code), and behaves correctly in the face of heavy
+concurrent use, system crashes, and even catastrophic failures like loss
+of a hard disk.
+<h1>
+The Berkeley DB 2.x Distribution
+</h1>
+<p>
+Berkeley DB is distributed in source code form from
+<a href="http://www.sleepycat.com">www.sleepycat.com</a>.
+Users are free to download and build the software, and to use it in
+their applications.
+<h2>
+What is in the distribution
+</h2>
+<p>
+The distribution is a compressed archive file.
+It includes the source code for the Berkeley DB library,
+as well as documentation, test suites, and supporting utilities.
+<p>
+The source code includes build support for all supported platforms.
+On UNIX systems Berkeley DB uses the GNU autoconfiguration tool,
+<tt>autoconf</tt>,
+to identify the system and to build the library
+and supporting utilities.
+Berkeley DB includes specific build environments for other platforms,
+such as VMS and Windows.
+<h3>
+Documentation
+</h3>
+<p>
+The distributed system includes documentation in HTML format.
+The documentation is in two parts:
+a UNIX-style reference manual for use by programmers,
+and a reference guide which is tutorial in nature.
+<h3>
+Test suite
+</h3>
+<p>
+The software also includes a complete test suite, written in Tcl.
+We believe that the test suite is a key advantage of Berkeley DB
+over comparable systems.
+<p>
+First, the test suite allows users who download and build the software
+to be sure that it is operating correctly.
+<p>
+Second, the test suite allows us, like other commercial developers
+of database software, to exercise the system thoroughly at every
+release. When we learn of new bugs, we add them to the test suite.
+We run the test suite continually during development cycles, and
+always prior to release. The result is a much more reliable system
+by the time it reaches beta release.
+<h2>
+Binary distribution
+</h2>
+<p>
+Sleepycat makes compiled libraries and general binary distributions available
+to customers for a fee.
+<h2>
+Supported platforms
+</h2>
+<p>
+Berkeley DB runs on any operating system with a
+POSIX 1003.1 interface <a href="#IEEE96">[IEEE96]</a>,
+which includes virtually every UNIX system.
+In addition,
+the software runs on VMS,
+Windows/95,
+Windows/98,
+and Windows/NT.
+Sleepycat Software no longer supports deployment on sixteen-bit
+Windows systems.
+<h1>
+Berkeley DB 2.x Licensing
+</h1>
+<p>
+Berkeley DB 2.x is distributed as an Open Source product. The software
+is freely available from us at our Web site, and in other media. Users
+are free to download the software and build applications with it.
+<p>
+The 1.x versions of Berkeley DB were covered by the UC Berkeley copyright
+that covers software freely redistributable in source form. When
+Sleepycat Software was formed, we needed to draft a license consistent
+with the copyright governing the existing, older software. Because
+of important differences between the UC Berkeley copyright and the GPL,
+it was impossible for us to use the GPL.
+A second copyright, with
+terms contradictory to the first, simply would not have worked.
+<p>
+Sleepycat wanted to continue Open Source development of Berkeley DB
+for several reasons.
+We agree with Raymond <a href="#Raym98">[Raym98]</a> and others that Open
+Source software is typically of higher quality than proprietary,
+binary-only products.
+Our customers benefit from a community of developers who
+know and use Berkeley DB,
+and can help with application design,
+debugging,
+and performance tuning.
+Widespread distribution and use of the source code tends to
+isolate bugs early,
+and to get fixes back into the distributed system quickly.
+As a result,
+Berkeley DB is more reliable.
+Just as importantly,
+individual users are able to contribute new features
+and performance enhancements,
+to the benefit of everyone who uses Berkeley DB.
+From a business perspective,
+Open Source and free distribution of the
+software creates share for us, and gives us a market into which
+we can sell products and services.
+Finally, making the source code
+freely available reduces our support load, since customers can
+find and fix bugs without recourse to us, in many cases.
+<p>
+To preserve the Open Source heritage of the older Berkeley DB code,
+we drafted a new license governing the distribution of Berkeley DB
+2.x. We adopted terms from the GPL that make it impossible to
+turn our Open Source code into proprietary code owned by someone else.
+<p>
+Briefly, the terms governing the use and distribution of Berkeley DB
+are:
+<ul>
+<li>
+your application must be internal to your site, or
+</li>
+<li>
+your application must be freely redistributable in source form, or
+</li>
+<li>
+you must get a license from us.
+</li>
+</ul>
+<p>
+For customers who prefer not to distribute Open Source products,
+we sell licenses to use and extend Berkeley DB at a reasonable cost.
+<p>
+We work hard to accommodate the needs of the Open Source community.
+For example,
+we have crafted special licensing arrangements with Gnome
+to encourage its use and distribution of Berkeley DB.
+<p>
+Berkeley DB conforms to the Open Source definition <a href="#Open99">[Open99]</a>.
+The license has
+been carefully crafted to keep the product available as an Open Source
+offering,
+while providing enough of a return on our investment to fund continued
+development and support of the product. The current license has
+created a business capable of funding three years of development on
+the software that simply would not have happened otherwise.
+<h1>
+Summary
+</h1>
+<p>
+Berkeley DB offers a unique collection of features, targeted squarely
+at software developers who need simple, reliable database management
+services in their applications. Good design and implementation and
+careful engineering throughout make the software better than many
+other systems.
+<p>
+Berkeley DB is an Open Source product, available at
+<a href="http://www.sleepycat.com">www.sleepycat.com</a>.
+for download. The distributed system includes everything needed to
+build and deploy the software or to port it to new systems.
+<p>
+Sleepycat Software distributes Berkeley DB under a license agreement
+that draws on both the UC Berkeley copyright and the GPL. The license
+guarantees that Berkeley DB will remain an Open Source product and
+provides Sleepycat with opportunities to make money to fund continued
+development on the software.
+<h1>
+References
+</h1>
+<table border=0 cellpadding=4 cellspacing=2>
+<tr>
+<td valign="top"><a name="Come79">[Come79]</a></td>
+<td>
+<p>
+Comer, D.,
+&quot;The Ubiquitous B-tree,&quot;
+<i>ACM Computing Surveys</i>
+Volume 11, number 2,
+June 1979.
+</td>
+</tr>
+<tr>
+<td valign="top">
+<a name="Gray93">[Gray93]</a>
+</td>
+<td>
+<p>
+Gray, J., and Reuter, A.,
+<i>Transaction Processing: Concepts and Techniques</i>,
+Morgan-Kaufman Publishers,
+1993.
+</td>
+</tr>
+<tr>
+<td valign="top">
+<a name="IEEE96">[IEEE96]</a>
+</td>
+<td>
+<p>
+Institute for Electrical and Electronics Engineers,
+<i>IEEE/ANSI Std 1003.1</i>,
+1996 Edition.
+</td>
+</tr>
+<tr>
+<td valign="top">
+<a name="Litw80">[Litw80]</a>
+</td>
+<td>
+<p>
+Litwin, W.,
+&quot;Linear Hashing: A New Tool for File and Table Addressing,&quot;
+<i>Proceedings of the 6th International Conference on Very Large Databases (VLDB)</i>,
+Montreal, Quebec, Canada,
+October 1980.
+</td>
+</tr>
+<tr>
+<td valign="top">
+<a name="Open94">[Open94]</a>
+</td>
+<td>
+<p>
+The Open Group,
+<i>Distributed TP: The XA+ Specification, Version 2</i>,
+The Open Group, 1994.
+</td>
+</tr>
+<tr>
+<td valign="top">
+<a name="Open99">[Open99]</a>
+</td>
+<td>
+<p>
+Opensource.org,
+&quot;Open Source Definition,&quot;
+<a href="http://www.opensource.org/osd.html"><i>www.opensource.org/osd.html</i></a>,
+version 1.4,
+1999.
+</td>
+</tr>
+<tr>
+<td valign="top">
+<a name="Raym98">[Raym98]</a>
+</td>
+<td>
+<p>
+Raymond, E.S.,
+&quot;The Cathedral and the Bazaar,&quot;
+<a href="http://www.tuxedo.org/~esr/writings/cathedral-bazaar/cathedral-bazaar.html">
+www.tuxedo.org/~esr/writings/cathedral-bazaar/cathedral-bazaar.html</a>,
+January 1998.
+</td>
+</tr>
+<tr>
+<td valign="top">
+<a name="Selt91">[Selt91]</a>
+</td>
+<td>
+<p>
+Seltzer, M., and Yigit, O.,
+&quot;A New Hashing Package for UNIX,&quot;
+<i>Proceedings 1991 Winter USENIX Conference</i>,
+Dallas, TX,
+January 1991.
+</td>
+</tr>
+<tr>
+<td valign="top">
+<a name="Selt92">[Selt92]</a>
+</td>
+<td>
+<p>
+Seltzer, M., and Olson, M.,
+&quot;LIBTP: Portable Modular Transactions for UNIX,&quot;
+<i>Proceedings 1992 Winter Usenix Conference</i>
+San Francisco, CA,
+January 1992.]
+</td>
+</tr>
+<tr>
+<td valign="top">
+<a name="Ston82">[Ston82]</a>
+</td>
+<td>
+<p>
+Stonebraker, M., Stettner, H., Kalash, J., Guttman, A., and Lynn, N.,
+&quot;Document Processing in a Relational Database System,&quot;
+Memorandum No. UCB/ERL M82/32,
+University of California at Berkeley,
+Berkeley, CA,
+May 1982.
+</td>
+</tr>
+</table>
+</body>
+</html>
diff --git a/bdb/docs/ref/refs/bdb_usenix.ps b/bdb/docs/ref/refs/bdb_usenix.ps
new file mode 100644
index 00000000000..82e6789719b
--- /dev/null
+++ b/bdb/docs/ref/refs/bdb_usenix.ps
@@ -0,0 +1,1441 @@
+%!PS-Adobe-3.0
+%%Creator: groff version 1.11
+%%CreationDate: Mon Apr 26 13:38:12 1999
+%%DocumentNeededResources: font Times-Bold
+%%+ font Times-Roman
+%%+ font Times-Italic
+%%+ font Courier
+%%DocumentSuppliedResources: procset grops 1.11 0
+%%Pages: 9
+%%PageOrder: Ascend
+%%Orientation: Portrait
+%%EndComments
+%%BeginProlog
+%%BeginResource: procset grops 1.11 0
+/setpacking where{
+pop
+currentpacking
+true setpacking
+}if
+/grops 120 dict dup begin
+/SC 32 def
+/A/show load def
+/B{0 SC 3 -1 roll widthshow}bind def
+/C{0 exch ashow}bind def
+/D{0 exch 0 SC 5 2 roll awidthshow}bind def
+/E{0 rmoveto show}bind def
+/F{0 rmoveto 0 SC 3 -1 roll widthshow}bind def
+/G{0 rmoveto 0 exch ashow}bind def
+/H{0 rmoveto 0 exch 0 SC 5 2 roll awidthshow}bind def
+/I{0 exch rmoveto show}bind def
+/J{0 exch rmoveto 0 SC 3 -1 roll widthshow}bind def
+/K{0 exch rmoveto 0 exch ashow}bind def
+/L{0 exch rmoveto 0 exch 0 SC 5 2 roll awidthshow}bind def
+/M{rmoveto show}bind def
+/N{rmoveto 0 SC 3 -1 roll widthshow}bind def
+/O{rmoveto 0 exch ashow}bind def
+/P{rmoveto 0 exch 0 SC 5 2 roll awidthshow}bind def
+/Q{moveto show}bind def
+/R{moveto 0 SC 3 -1 roll widthshow}bind def
+/S{moveto 0 exch ashow}bind def
+/T{moveto 0 exch 0 SC 5 2 roll awidthshow}bind def
+/SF{
+findfont exch
+[exch dup 0 exch 0 exch neg 0 0]makefont
+dup setfont
+[exch/setfont cvx]cvx bind def
+}bind def
+/MF{
+findfont
+[5 2 roll
+0 3 1 roll
+neg 0 0]makefont
+dup setfont
+[exch/setfont cvx]cvx bind def
+}bind def
+/level0 0 def
+/RES 0 def
+/PL 0 def
+/LS 0 def
+/MANUAL{
+statusdict begin/manualfeed true store end
+}bind def
+/PLG{
+gsave newpath clippath pathbbox grestore
+exch pop add exch pop
+}bind def
+/BP{
+/level0 save def
+1 setlinecap
+1 setlinejoin
+72 RES div dup scale
+LS{
+90 rotate
+}{
+0 PL translate
+}ifelse
+1 -1 scale
+}bind def
+/EP{
+level0 restore
+showpage
+}bind def
+/DA{
+newpath arcn stroke
+}bind def
+/SN{
+transform
+.25 sub exch .25 sub exch
+round .25 add exch round .25 add exch
+itransform
+}bind def
+/DL{
+SN
+moveto
+SN
+lineto stroke
+}bind def
+/DC{
+newpath 0 360 arc closepath
+}bind def
+/TM matrix def
+/DE{
+TM currentmatrix pop
+translate scale newpath 0 0 .5 0 360 arc closepath
+TM setmatrix
+}bind def
+/RC/rcurveto load def
+/RL/rlineto load def
+/ST/stroke load def
+/MT/moveto load def
+/CL/closepath load def
+/FL{
+currentgray exch setgray fill setgray
+}bind def
+/BL/fill load def
+/LW/setlinewidth load def
+/RE{
+findfont
+dup maxlength 1 index/FontName known not{1 add}if dict begin
+{
+1 index/FID ne{def}{pop pop}ifelse
+}forall
+/Encoding exch def
+dup/FontName exch def
+currentdict end definefont pop
+}bind def
+/DEFS 0 def
+/EBEGIN{
+moveto
+DEFS begin
+}bind def
+/EEND/end load def
+/CNT 0 def
+/level1 0 def
+/PBEGIN{
+/level1 save def
+translate
+div 3 1 roll div exch scale
+neg exch neg exch translate
+0 setgray
+0 setlinecap
+1 setlinewidth
+0 setlinejoin
+10 setmiterlimit
+[]0 setdash
+/setstrokeadjust where{
+pop
+false setstrokeadjust
+}if
+/setoverprint where{
+pop
+false setoverprint
+}if
+newpath
+/CNT countdictstack def
+userdict begin
+/showpage{}def
+}bind def
+/PEND{
+clear
+countdictstack CNT sub{end}repeat
+level1 restore
+}bind def
+end def
+/setpacking where{
+pop
+setpacking
+}if
+%%EndResource
+%%IncludeResource: font Times-Bold
+%%IncludeResource: font Times-Roman
+%%IncludeResource: font Times-Italic
+%%IncludeResource: font Courier
+grops begin/DEFS 1 dict def DEFS begin/u{.001 mul}bind def end/RES 72
+def/PL 792 def/LS false def/ENC0[/asciicircum/asciitilde/Scaron/Zcaron
+/scaron/zcaron/Ydieresis/trademark/quotesingle/.notdef/.notdef/.notdef
+/.notdef/.notdef/.notdef/.notdef/.notdef/.notdef/.notdef/.notdef/.notdef
+/.notdef/.notdef/.notdef/.notdef/.notdef/.notdef/.notdef/.notdef/.notdef
+/.notdef/.notdef/space/exclam/quotedbl/numbersign/dollar/percent
+/ampersand/quoteright/parenleft/parenright/asterisk/plus/comma/hyphen
+/period/slash/zero/one/two/three/four/five/six/seven/eight/nine/colon
+/semicolon/less/equal/greater/question/at/A/B/C/D/E/F/G/H/I/J/K/L/M/N/O
+/P/Q/R/S/T/U/V/W/X/Y/Z/bracketleft/backslash/bracketright/circumflex
+/underscore/quoteleft/a/b/c/d/e/f/g/h/i/j/k/l/m/n/o/p/q/r/s/t/u/v/w/x/y
+/z/braceleft/bar/braceright/tilde/.notdef/quotesinglbase/guillemotleft
+/guillemotright/bullet/florin/fraction/perthousand/dagger/daggerdbl
+/endash/emdash/ff/fi/fl/ffi/ffl/dotlessi/dotlessj/grave/hungarumlaut
+/dotaccent/breve/caron/ring/ogonek/quotedblleft/quotedblright/oe/lslash
+/quotedblbase/OE/Lslash/.notdef/exclamdown/cent/sterling/currency/yen
+/brokenbar/section/dieresis/copyright/ordfeminine/guilsinglleft
+/logicalnot/minus/registered/macron/degree/plusminus/twosuperior
+/threesuperior/acute/mu/paragraph/periodcentered/cedilla/onesuperior
+/ordmasculine/guilsinglright/onequarter/onehalf/threequarters
+/questiondown/Agrave/Aacute/Acircumflex/Atilde/Adieresis/Aring/AE
+/Ccedilla/Egrave/Eacute/Ecircumflex/Edieresis/Igrave/Iacute/Icircumflex
+/Idieresis/Eth/Ntilde/Ograve/Oacute/Ocircumflex/Otilde/Odieresis
+/multiply/Oslash/Ugrave/Uacute/Ucircumflex/Udieresis/Yacute/Thorn
+/germandbls/agrave/aacute/acircumflex/atilde/adieresis/aring/ae/ccedilla
+/egrave/eacute/ecircumflex/edieresis/igrave/iacute/icircumflex/idieresis
+/eth/ntilde/ograve/oacute/ocircumflex/otilde/odieresis/divide/oslash
+/ugrave/uacute/ucircumflex/udieresis/yacute/thorn/ydieresis]def
+/Courier@0 ENC0/Courier RE/Times-Italic@0 ENC0/Times-Italic RE
+/Times-Roman@0 ENC0/Times-Roman RE/Times-Bold@0 ENC0/Times-Bold RE
+%%EndProlog
+%%Page: 1 1
+%%BeginPageSetup
+BP
+%%EndPageSetup
+/F0 14/Times-Bold@0 SF(Berk)275.358 100.8 Q(eley DB)-.14 E/F1 12
+/Times-Roman@0 SF(Michael A. Olson)270.372 129.6 Q -.3(Ke)283.182 144 S
+(ith Bostic).3 E(Mar)279.15 158.4 Q(go Seltzer)-.216 E/F2 12
+/Times-Italic@0 SF(Sleepycat Softwar)255.492 174.24 Q .24 -.12(e, I)
+-.444 H(nc.).12 E/F3 12/Times-Bold@0 SF(Abstract)290.874 210.24 Q/F4 10
+/Times-Roman@0 SF(Berk)79.2 226.44 Q(ele)-.1 E 2.925(yD)-.15 G 2.925(Bi)
+-2.925 G 2.924(sa)-2.925 G 2.924(nO)-2.924 G .424
+(pen Source embedded database system with a number of k)-2.924 F .724
+-.15(ey a)-.1 H(dv).15 E .424(antages o)-.25 F -.15(ve)-.15 G 2.924(rc)
+.15 G .424(omparable sys-)-2.924 F 3.102(tems. It)79.2 238.44 R .602(is simple to use, supports concurrent access by multiple users, and pro)
+3.102 F .602(vides industrial-strength transaction)-.15 F 1.555
+(support, including survi)79.2 250.44 R 1.555
+(ving system and disk crashes.)-.25 F 1.554
+(This paper describes the design and technical features of)6.555 F(Berk)
+79.2 262.44 Q(ele)-.1 E 2.5(yD)-.15 G(B, the distrib)-2.5 E
+(ution, and its license.)-.2 E F3 3(1. Intr)79.2 286.44 R(oduction)-.216
+E F4 .691(The Berk)79.2 302.64 R(ele)-.1 E 3.191(yD)-.15 G .691
+(atabase \(Berk)-3.191 F(ele)-.1 E 3.191(yD)-.15 G .692
+(B\) is an embedded)-3.191 F .253
+(database system that can be used in applications requir)79.2 314.64 R
+(-)-.2 E 1.636(ing high-performance concurrent storage and retrie)79.2
+326.64 R -.25(va)-.25 G(l).25 E 2.619(of k)79.2 338.64 R -.15(ey)-.1 G
+(/v).15 E 2.619(alue pairs.)-.25 F 2.619(The softw)7.619 F 2.619
+(are is distrib)-.1 F 2.618(uted as a)-.2 F .057
+(library that can be link)79.2 350.64 R .058
+(ed directly into an application.)-.1 F(It)5.058 E(pro)79.2 362.64 Q
+1.454(vides a v)-.15 F 1.453(ariety of programmatic interf)-.25 F 1.453
+(aces, includ-)-.1 F .237
+(ing callable APIs for C, C++, Perl, Tcl and Ja)79.2 374.64 R -.25(va)
+-.2 G 5.237(.U).25 G(sers)-5.237 E .327(may do)79.2 386.64 R .327
+(wnload Berk)-.25 F(ele)-.1 E 2.827(yD)-.15 G 2.827(Bf)-2.827 G .326
+(rom Sleep)-2.827 F .326(ycat Softw)-.1 F(are')-.1 E(s)-.55 E -.8(We)
+79.2 398.64 S 2.5(bs).8 G(ite, at)-2.5 E/F5 10/Times-Italic@0 SF(www)2.5
+E(.sleepycat.com)-.74 E F4(.)A(Sleep)79.2 414.84 Q 1.33(ycat distrib)-.1
+F 1.33(utes Berk)-.2 F(ele)-.1 E 3.83(yD)-.15 G 3.83(Ba)-3.83 G 3.83(sa)
+-3.83 G 3.83(nO)-3.83 G 1.33(pen Source)-3.83 F 3.3(product. The)79.2
+426.84 R(compan)3.3 E 3.3(yc)-.15 G .8(ollects license fees for certain)
+-3.3 F(uses of the softw)79.2 438.84 Q
+(are and sells support and services.)-.1 E F3 3(1.1. History)79.2 468.84
+R F4(Berk)79.2 485.04 Q(ele)-.1 E 3.057(yD)-.15 G 3.057(Bb)-3.057 G
+-2.25 -.15(eg a)-3.057 H 3.058(na).15 G 3.058(san)-3.058 G 1.058 -.25
+(ew i)-3.058 H .558(mplementation of a hash).25 F .843
+(access method to replace both)79.2 497.04 R/F6 10/Courier@0 SF(hsearch)
+3.342 E F4 .842(and the v)3.342 F(ari-)-.25 E(ous)79.2 509.04 Q F6(dbm)
+5.466 E F4 2.967(implementations \()5.466 F F6(dbm)A F4 2.967(from A)
+5.467 F(T&T)-1.11 E(,)-.74 E F6(ndbm)5.467 E F4 1.334(from Berk)79.2
+521.04 R(ele)-.1 E 2.634 -.65(y, a)-.15 H(nd).65 E F6(gdbm)3.834 E F4
+1.334(from the GNU project\).)3.834 F(In)6.333 E .367
+(1990 Seltzer and Y)79.2 533.04 R .368
+(igit produced a package called Hash)-.55 F(to do this [Selt91].)79.2
+545.04 Q 3.106(The \214rst general release of Berk)79.2 561.24 R(ele)-.1
+E 5.606(yD)-.15 G 3.106(B, in 1991,)-5.606 F 3.038(included some interf)
+79.2 573.24 R 3.039(ace changes and a ne)-.1 F 5.539(wB)-.25 G(+tree)
+-5.539 E .887(access method.)79.2 585.24 R .886
+(At roughly the same time, Seltzer and)5.887 F 1.201(Olson de)79.2
+597.24 R -.15(ve)-.25 G 1.202
+(loped a prototype transaction system based).15 F 3.356(on Berk)79.2
+609.24 R(ele)-.1 E 5.856(yD)-.15 G 3.356(B, called LIBTP [Selt92], b)
+-5.856 F 3.355(ut ne)-.2 F -.15(ve)-.25 G(r).15 E(released the code.)
+79.2 621.24 Q .653(The 4.4BSD UNIX release included Berk)79.2 637.44 R
+(ele)-.1 E 3.153(yD)-.15 G 3.153(B1)-3.153 G(.85)-3.153 E .602(in 1992.)
+79.2 649.44 R .601(Seltzer and Bostic maintained the code in the)5.601 F
+1.545(early 1990s in Berk)79.2 661.44 R(ele)-.1 E 4.046(ya)-.15 G 1.546
+(nd in Massachusetts.)-4.046 F(Man)6.546 E(y)-.15 E
+(users adopted the code during this period.)79.2 673.44 Q .432
+(By mid-1996, users w)79.2 689.64 R .431
+(anted commercial support for the)-.1 F(softw)79.2 701.64 Q 7.033
+(are. In)-.1 F 4.533(response, Bostic and Seltzer formed)7.033 F(Sleep)
+79.2 713.64 Q 10.128(ycat Softw)-.1 F 12.628(are. The)-.1 F(compan)
+12.627 E 15.127(ye)-.15 G(nhances,)-15.127 E(distrib)323.2 286.44 Q
+1.623(utes, and supports Berk)-.2 F(ele)-.1 E 4.123(yD)-.15 G 4.124(Ba)
+-4.123 G 1.624(nd supporting)-4.124 F(softw)323.2 298.44 Q 2.2
+(are and documentation.)-.1 F(Sleep)7.2 E 2.2(ycat released v)-.1 F(er)
+-.15 E(-)-.2 E 1.677(sion 2.1 of Berk)323.2 310.44 R(ele)-.1 E 4.177(yD)
+-.15 G 4.178(Bi)-4.177 G 4.178(nm)-4.178 G 1.678(id-1997 with important)
+-4.178 F(ne)323.2 322.44 Q 2.56(wf)-.25 G .06
+(eatures, including support for concurrent access to)-2.56 F 4.176
+(databases. The)323.2 334.44 R(compan)4.176 E 4.177(ym)-.15 G(ak)-4.177
+E 1.677(es about three commer)-.1 F(-)-.2 E .958(cial releases a year)
+323.2 346.44 R 3.458(,a)-.4 G .957(nd most recently shipped v)-3.458 F
+(ersion)-.15 E(2.8.)323.2 358.44 Q F3 3(1.2. Ov)323.2 388.44 R(er)-.12 E
+(view of Berk)-.12 E(eley DB)-.12 E F4 3.094(The C interf)323.2 404.64 R
+3.094(aces in Berk)-.1 F(ele)-.1 E 5.594(yD)-.15 G 5.595(Bp)-5.594 G
+(ermit)-5.595 E F6(dbm)5.595 E F4(-style)A 4.586
+(record management for databases, with signi\214cant)323.2 416.64 R -.15
+(ex)323.2 428.64 S 1.273(tensions to handle duplicate data items ele).15
+F -.05(ga)-.15 G(ntly).05 E 3.773(,t)-.65 G(o)-3.773 E 2.427
+(deal with concurrent access, and to pro)323.2 440.64 R 2.427
+(vide transac-)-.15 F .71
+(tional support so that multiple changes can be simulta-)323.2 452.64 R
+1.273(neously committed \(so that the)323.2 464.64 R 3.773(ya)-.15 G
+1.273(re made permanent\))-3.773 F 1.848
+(or rolled back \(so that the database is restored to its)323.2 476.64 R
+(state at the be)323.2 488.64 Q(ginning of the transaction\).)-.15 E
+1.034(C++ and Ja)323.2 504.84 R 1.534 -.25(va i)-.2 H(nterf).25 E 1.033
+(aces pro)-.1 F 1.033(vide a small set of classes)-.15 F 1.961
+(for operating on a database.)323.2 516.84 R 1.961
+(The main class in both)6.961 F .587(cases is called)323.2 528.84 R F6
+(Db)3.086 E F4 3.086(,a)C .586(nd pro)-3.086 F .586
+(vides methods that encapsu-)-.15 F 1.128(late the)323.2 540.84 R F6
+(dbm)3.628 E F4 1.129(-style interf)B 1.129(aces that the C interf)-.1 F
+1.129(aces pro-)-.1 F(vide.)323.2 552.84 Q 2.565(Tcl and Perl interf)
+323.2 569.04 R 2.564(aces allo)-.1 F 5.064(wd)-.25 G -2.15 -.25(ev e)
+-5.064 H 2.564(lopers w).25 F 2.564(orking in)-.1 F 1.716
+(those languages to use Berk)323.2 581.04 R(ele)-.1 E 4.216(yD)-.15 G
+4.216(Bi)-4.216 G 4.217(nt)-4.216 G 1.717(heir applica-)-4.217 F 3.419
+(tions. Bindings)323.2 593.04 R .919
+(for both languages are included in the)3.419 F(distrib)323.2 605.04 Q
+(ution.)-.2 E(De)323.2 621.24 Q -.15(ve)-.25 G 1.069
+(lopers may compile their applications and link in).15 F(Berk)323.2
+633.24 Q(ele)-.1 E 2.5(yD)-.15 G 2.5(Bs)-2.5 G(tatically or dynamically)
+-2.5 E(.)-.65 E F3 3(1.3. Ho)323.2 663.24 R 3(wB)-.12 G(erk)-3 E
+(eley DB is used)-.12 E F4 .655(The Berk)323.2 679.44 R(ele)-.1 E 3.155
+(yD)-.15 G 3.154(Bl)-3.155 G .654(ibrary supports concurrent access to)
+-3.154 F 5.115(databases. It)323.2 691.44 R 2.616(can be link)5.115 F
+2.616(ed into standalone applica-)-.1 F 1.487
+(tions, into a collection of cooperating applications, or)323.2 703.44 R
+4.21(into serv)323.2 715.44 R 4.21
+(ers that handle requests and do database)-.15 F EP
+%%Page: 2 2
+%%BeginPageSetup
+BP
+%%EndPageSetup
+/F0 10/Times-Roman@0 SF(operations on behalf of clients.)79.2 84 Q .858
+(Compared to using a standalone database management)79.2 100.2 R .846
+(system, Berk)79.2 112.2 R(ele)-.1 E 3.346(yD)-.15 G 3.346(Bi)-3.346 G
+3.346(se)-3.346 G .846(asy to understand and simple)-3.346 F 3.826
+(to use.)79.2 124.2 R 3.826(The softw)8.826 F 3.826
+(are stores and retrie)-.1 F -.15(ve)-.25 G 6.325(sr).15 G(ecords,)
+-6.325 E 2.77(which consist of k)79.2 136.2 R -.15(ey)-.1 G(/v).15 E
+2.77(alue pairs.)-.25 F -2.15 -.25(Ke y)7.77 H 5.27(sa).25 G 2.77
+(re used to)-5.27 F .698(locate items and can be an)79.2 148.2 R 3.198
+(yd)-.15 G .698(ata type or structure sup-)-3.198 F
+(ported by the programming language.)79.2 160.2 Q .813
+(The programmer can pro)79.2 176.4 R .813(vide the functions that Berk)
+-.15 F(e-)-.1 E(le)79.2 188.4 Q 3.264(yD)-.15 G 3.264(Bu)-3.264 G .763
+(ses to operate on k)-3.264 F -.15(ey)-.1 G 3.263(s. F).15 F .763(or e)
+-.15 F .763(xample, B+trees)-.15 F 1.72
+(can use a custom comparison function, and the Hash)79.2 200.4 R .519
+(access method can use a custom hash function.)79.2 212.4 R(Berk)5.518 E
+(e-)-.1 E(le)79.2 224.4 Q 5.222(yD)-.15 G 5.222(Bu)-5.222 G 2.722
+(ses def)-5.222 F 2.723(ault functions if none are supplied.)-.1 F .873
+(Otherwise, Berk)79.2 236.4 R(ele)-.1 E 3.373(yD)-.15 G 3.373(Bd)-3.373
+G .873(oes not e)-3.373 F .873(xamine or interpret)-.15 F .934(either k)
+79.2 248.4 R -.15(ey)-.1 G 3.434(so).15 G 3.434(rv)-3.434 G .934
+(alues in an)-3.684 F 3.434(yw)-.15 G(ay)-3.534 E 5.934(.V)-.65 G .934
+(alues may be arbi-)-7.044 F(trarily long.)79.2 260.4 Q .69
+(It is also important to understand what Berk)79.2 276.6 R(ele)-.1 E
+3.19(yD)-.15 G 3.19(Bi)-3.19 G(s)-3.19 E 4.365(not. It)79.2 288.6 R
+1.865(is not a database serv)4.365 F 1.866(er that handles netw)-.15 F
+(ork)-.1 E 2.797(requests. It)79.2 300.6 R .297
+(is not an SQL engine that e)2.797 F -.15(xe)-.15 G .296(cutes queries.)
+.15 F 1.547(It is not a relational or object-oriented database man-)79.2
+312.6 R(agement system.)79.2 324.6 Q 1.101(It is possible to b)79.2
+340.8 R 1.101(uild an)-.2 F 3.601(yo)-.15 G 3.601(ft)-3.601 G 1.101
+(hose on top of Berk)-3.601 F(ele)-.1 E(y)-.15 E 2.116(DB, b)79.2 352.8
+R 2.116(ut the package, as distrib)-.2 F 2.117(uted, is an embedded)-.2
+F 1.444(database engine.)79.2 364.8 R 1.444
+(It has been designed to be portable,)6.444 F(small, f)79.2 376.8 Q
+(ast, and reliable.)-.1 E/F1 12/Times-Bold@0 SF 3(1.4. A)79.2 406.8 R
+(pplications that use Berk)-.3 E(eley DB)-.12 E F0(Berk)79.2 423 Q(ele)
+-.1 E 4.248(yD)-.15 G 4.248(Bi)-4.248 G 4.249(se)-4.248 G 1.749
+(mbedded in a v)-4.249 F 1.749(ariety of proprietary)-.25 F 3.84
+(and Open Source softw)79.2 435 R 3.84(are packages.)-.1 F 3.84
+(This section)8.84 F(highlights a fe)79.2 447 Q 2.5(wo)-.25 G 2.5(ft)
+-2.5 G(he products that use it.)-2.5 E 1.467(Directory serv)79.2 463.2 R
+1.467(ers, which do data storage and retrie)-.15 F -.25(va)-.25 G(l).25
+E 2.823(using the Local Directory Access Protocol \(LD)79.2 475.2 R
+(AP\),)-.4 E(pro)79.2 487.2 Q .956
+(vide naming and directory lookup service on local-)-.15 F 2.837
+(area netw)79.2 499.2 R 5.337(orks. This)-.1 F 2.837
+(service is, essentially)5.337 F 5.336(,d)-.65 G(atabase)-5.336 E .039
+(query and update, b)79.2 511.2 R .039
+(ut uses a simple protocol rather than)-.2 F 2.202(SQL or ODBC.)79.2
+523.2 R(Berk)7.201 E(ele)-.1 E 4.701(yD)-.15 G 4.701(Bi)-4.701 G 4.701
+(st)-4.701 G 2.201(he embedded data)-4.701 F 1.288
+(manager in the majority of deplo)79.2 535.2 R 1.289(yed directory serv)
+-.1 F(ers)-.15 E(today)79.2 547.2 Q 4.855(,i)-.65 G 2.355(ncluding LD)
+-4.855 F 2.355(AP serv)-.4 F 2.355(ers from Netscape, Mes-)-.15 F
+(sageDirect \(formerly Isode\), and others.)79.2 559.2 Q(Berk)79.2 575.4
+Q(ele)-.1 E 4.385(yD)-.15 G 4.385(Bi)-4.385 G 4.385(sa)-4.385 G 1.886
+(lso embedded in a lar)-4.385 F 1.886(ge number of)-.18 F 5.302
+(mail serv)79.2 587.4 R 7.802(ers. Intermail,)-.15 F 5.302(from Softw)
+7.802 F 5.302(are.com, uses)-.1 F(Berk)79.2 599.4 Q(ele)-.1 E 4.613(yD)
+-.15 G 4.613(Ba)-4.613 G 4.613(sam)-4.613 G 2.114
+(essage store and as the backing)-4.613 F 3.597
+(store for its directory serv)79.2 611.4 R(er)-.15 E 8.597(.T)-.55 G
+3.597(he sendmail serv)-8.597 F(er)-.15 E 1.175
+(\(including both the commercial Sendmail Pro of)79.2 623.4 R(fering)
+-.25 E 3.283(from Sendmail, Inc. and the v)79.2 635.4 R 3.283
+(ersion distrib)-.15 F 3.282(uted by)-.2 F(sendmail.or)79.2 647.4 Q
+2.304(g\) uses Berk)-.18 F(ele)-.1 E 4.804(yD)-.15 G 4.804(Bt)-4.804 G
+4.804(os)-4.804 G 2.305(tore aliases and)-4.804 F 9.01
+(other information.)79.2 659.4 R(Similarly)14.01 E 11.51(,P)-.65 G 9.01
+(ost\214x \(formerly)-11.51 F 3.465(VMailer\) uses Berk)79.2 671.4 R
+(ele)-.1 E 5.965(yD)-.15 G 5.965(Bt)-5.965 G 5.965(os)-5.965 G 3.465
+(tore administrati)-5.965 F -.15(ve)-.25 G(information.)79.2 683.4 Q
+.134(In addition, Berk)79.2 699.6 R(ele)-.1 E 2.634(yD)-.15 G 2.633(Bi)
+-2.634 G 2.633(se)-2.633 G .133(mbedded in a wide v)-2.633 F(ariety)-.25
+E 4.994(of other softw)79.2 711.6 R 4.994(are products.)-.1 F 4.994
+(Example applications)9.994 F .373
+(include managing access control lists, storing user k)323.2 84 R -.15
+(ey)-.1 G(s).15 E 2.75(in a public-k)323.2 96 R 3.05 -.15(ey i)-.1 H
+2.75(nfrastructure, recording machine-to-).15 F(netw)323.2 108 Q .519
+(ork-address mappings in address serv)-.1 F .518(ers, and stor)-.15 F(-)
+-.2 E .411(ing con\214guration and de)323.2 120 R .412
+(vice information in video post-)-.25 F(production softw)323.2 132 Q
+(are.)-.1 E(Finally)323.2 148.2 Q 4.978(,B)-.65 G(erk)-4.978 E(ele)-.1 E
+4.978(yD)-.15 G 4.978(Bi)-4.978 G 4.978(sap)-4.978 G 2.478(art of man)
+-4.978 F 4.977(yo)-.15 G 2.477(ther Open)-4.977 F .005(Source softw)
+323.2 160.2 R .005(are packages a)-.1 F -.25(va)-.2 G .006
+(ilable on the Internet.).25 F -.15(Fo)5.006 G(r).15 E -.15(ex)323.2
+172.2 S .604(ample, the softw).15 F .604
+(are is embedded in the Apache W)-.1 F(eb)-.8 E(serv)323.2 184.2 Q
+(er and the Gnome desktop.)-.15 E F1 3(2. Access)323.2 214.2 R(Methods)3
+E F0 .828(In database terminology)323.2 230.4 R 3.329(,a)-.65 G 3.329
+(na)-3.329 G .829(ccess method is the disk-)-3.329 F 1.964
+(based structure used to store data and the operations)323.2 242.4 R -.2
+(av)323.2 254.4 S 6.053(ailable on that structure.)-.05 F -.15(Fo)11.053
+G 8.554(re).15 G 6.054(xample, man)-8.704 F(y)-.15 E 3.853
+(database systems support a B+tree access method.)323.2 266.4 R 1.203
+(B+trees allo)323.2 278.4 R 3.703(we)-.25 G 1.203
+(quality-based lookups \(\214nd k)-3.703 F -.15(ey)-.1 G 3.704(se).15 G
+(qual)-3.704 E 4(to some constant\), range-based lookups \(\214nd k)
+323.2 290.4 R -.15(ey)-.1 G(s).15 E 1.188(between tw)323.2 302.4 R 3.688
+(oc)-.1 G 1.189(onstants\) and record insertion and dele-)-3.688 F
+(tion.)323.2 314.4 Q(Berk)323.2 330.6 Q(ele)-.1 E 4.729(yD)-.15 G 4.729
+(Bs)-4.729 G 2.228(upports three access methods: B+tree,)-4.729 F 1.553
+(Extended Linear Hashing \(Hash\), and Fix)323.2 342.6 R 1.553(ed- or V)
+-.15 F(ari-)-1.11 E 3.639(able-length Records \(Recno\).)323.2 354.6 R
+3.638(All three operate on)8.638 F 1.956(records composed of a k)323.2
+366.6 R 2.256 -.15(ey a)-.1 H 1.956(nd a data v).15 F 4.456(alue. In)
+-.25 F(the)4.456 E 1.301(B+tree and Hash access methods, k)323.2 378.6 R
+-.15(ey)-.1 G 3.801(sc).15 G 1.301(an ha)-3.801 F 1.601 -.15(ve a)-.2 H
+(rbi-).15 E 3.595(trary structure.)323.2 390.6 R 3.596
+(In the Recno access method, each)8.595 F .266
+(record is assigned a record number)323.2 402.6 R 2.765(,w)-.4 G .265
+(hich serv)-2.765 F .265(es as the)-.15 F -.1(ke)323.2 414.6 S 4.106
+-.65(y. I)-.05 H 2.806(na).65 G .306(ll the access methods, the v)-2.806
+F .306(alue can ha)-.25 F .606 -.15(ve a)-.2 H(rbi-).15 E 1.417
+(trary structure.)323.2 426.6 R 1.417
+(The programmer can supply compari-)6.417 F 2.129
+(son or hashing functions for k)323.2 438.6 R -.15(ey)-.1 G 2.129
+(s, and Berk).15 F(ele)-.1 E 4.629(yD)-.15 G(B)-4.629 E
+(stores and retrie)323.2 450.6 Q -.15(ve)-.25 G 2.5(sv).15 G
+(alues without interpreting them.)-2.75 E 1.069
+(All of the access methods use the host \214lesystem as a)323.2 466.8 R
+(backing store.)323.2 478.8 Q F1 3(2.1. Hash)323.2 508.8 R F0(Berk)323.2
+525 Q(ele)-.1 E 6.485(yD)-.15 G 6.485(Bi)-6.485 G 3.986
+(ncludes a Hash access method that)-6.485 F 9.863(implements e)323.2 537
+R 9.862(xtended linear hashing [Litw80].)-.15 F .017
+(Extended linear hashing adjusts the hash function as the)323.2 549 R
+.507(hash table gro)323.2 561 R .506(ws, attempting to k)-.25 F .506
+(eep all b)-.1 F(uck)-.2 E .506(ets under)-.1 F(-)-.2 E
+(full in the steady state.)323.2 573 Q 1.649
+(The Hash access method supports insertion and dele-)323.2 589.2 R .259
+(tion of records and lookup by e)323.2 601.2 R .259(xact match only)-.15
+F 5.258(.A)-.65 G(ppli-)-5.258 E .038(cations may iterate o)323.2 613.2
+R -.15(ve)-.15 G 2.538(ra).15 G .038(ll records stored in a table, b)
+-2.538 F(ut)-.2 E(the order in which the)323.2 625.2 Q 2.5(ya)-.15 G
+(re returned is unde\214ned.)-2.5 E F1 3(2.2. B+tr)323.2 655.2 R(ee)
+-.216 E F0(Berk)323.2 671.4 Q(ele)-.1 E 7.184(yD)-.15 G 7.184(Bi)-7.184
+G 4.683(ncludes a B+tree [Come79] access)-7.184 F 2.502(method. B+trees)
+323.2 683.4 R .002(store records of k)2.502 F -.15(ey)-.1 G(/v).15 E
+.003(alue pairs in leaf)-.25 F .52(pages, and pairs of \(k)323.2 695.4 R
+-.15(ey)-.1 G 3.02(,c)-.5 G .52(hild page address\) at internal)-3.02 F
+5.384(nodes. K)323.2 707.4 R -.15(ey)-.25 G 5.384(si).15 G 5.384(nt)
+-5.384 G 2.885(he tree are stored in sorted order)-5.384 F(,)-.4 E EP
+%%Page: 3 3
+%%BeginPageSetup
+BP
+%%EndPageSetup
+/F0 10/Times-Roman@0 SF .576
+(where the order is determined by the comparison func-)79.2 84 R .815
+(tion supplied when the database w)79.2 96 R .815(as created.)-.1 F -.15
+(Pa)5.815 G .815(ges at).15 F .389(the leaf le)79.2 108 R -.15(ve)-.25 G
+2.889(lo).15 G 2.889(ft)-2.889 G .389
+(he tree include pointers to their neigh-)-2.889 F 1.444
+(bors to simplify tra)79.2 120 R -.15(ve)-.2 G 3.944(rsal. B+trees).15 F
+1.445(support lookup by)3.944 F -.15(ex)79.2 132 S .068
+(act match \(equality\) or range \(greater than or equal to).15 F 2.891
+(ak)79.2 144 S -.15(ey)-2.991 G 2.891(\). Lik).15 F 2.891(eH)-.1 G .391
+(ash tables, B+trees support record inser)-2.891 F(-)-.2 E
+(tion, deletion, and iteration o)79.2 156 Q -.15(ve)-.15 G 2.5(ra).15 G
+(ll records in the tree.)-2.5 E .646
+(As records are inserted and pages in the B+tree \214ll up,)79.2 172.2 R
+(the)79.2 184.2 Q 2.722(ya)-.15 G .223(re split, with about half the k)
+-2.722 F -.15(ey)-.1 G 2.723(sg).15 G .223(oing into a ne)-2.723 F(w)
+-.25 E 1.603(peer page at the same le)79.2 196.2 R -.15(ve)-.25 G 4.103
+(li).15 G 4.103(nt)-4.103 G 1.603(he tree.)-4.103 F 1.603(Most B+tree)
+6.603 F .387(implementations lea)79.2 208.2 R .687 -.15(ve b)-.2 H .387
+(oth nodes half-full after a split.).15 F 2.763
+(This leads to poor performance in a common case,)79.2 220.2 R 1.522
+(where the caller inserts k)79.2 232.2 R -.15(ey)-.1 G 4.022(si).15 G
+4.022(no)-4.022 G(rder)-4.022 E 6.522(.T)-.55 G 4.023(oh)-7.322 G 1.523
+(andle this)-4.023 F 1.643(case, Berk)79.2 244.2 R(ele)-.1 E 4.143(yD)
+-.15 G 4.143(Bk)-4.143 G 1.642(eeps track of the insertion order)-4.243
+F(,)-.4 E 2.023(and splits pages une)79.2 256.2 R -.15(ve)-.25 G 2.024
+(nly to k).15 F 2.024(eep pages fuller)-.1 F 7.024(.T)-.55 G(his)-7.024
+E 2.3(reduces tree size, yielding better search performance)79.2 268.2 R
+(and smaller databases.)79.2 280.2 Q 3.177
+(On deletion, empty pages are coalesced by re)79.2 296.4 R -.15(ve)-.25
+G(rse).15 E 2.03(splits into single pages.)79.2 308.4 R 2.03
+(The access method does no)7.03 F .347
+(other page balancing on insertion or deletion.)79.2 320.4 R -2.15 -.25
+(Ke y)5.348 H 2.848(sa).25 G(re)-2.848 E 1.927(not mo)79.2 332.4 R -.15
+(ve)-.15 G 4.427(da).15 G 1.927(mong pages at e)-4.427 F -.15(ve)-.25 G
+1.926(ry update to k).15 F 1.926(eep the)-.1 F 2.206
+(tree well-balanced.)79.2 344.4 R 2.207(While this could impro)7.206 F
+2.507 -.15(ve s)-.15 H(earch).15 E 2.341
+(times in some cases, the additional code comple)79.2 356.4 R(xity)-.15
+E(leads to slo)79.2 368.4 Q(wer updates and is prone to deadlocks.)-.25
+E -.15(Fo)79.2 384.6 S 2.948(rs).15 G(implicity)-2.948 E 2.948(,B)-.65 G
+(erk)-2.948 E(ele)-.1 E 2.949(yD)-.15 G 2.949(BB)-2.949 G .449
+(+trees do no pre\214x com-)-2.949 F(pression of k)79.2 396.6 Q -.15(ey)
+-.1 G 2.5(sa).15 G 2.5(ti)-2.5 G(nternal or leaf nodes.)-2.5 E/F1 12
+/Times-Bold@0 SF 3(2.3. Recno)79.2 426.6 R F0(Berk)79.2 442.8 Q(ele)-.1
+E 2.736(yD)-.15 G 2.736(Bi)-2.736 G .236(ncludes a \214x)-2.736 F .236
+(ed- or v)-.15 F .235(ariable-length record)-.25 F 5.075
+(access method, called)79.2 454.8 R/F2 10/Times-Italic@0 SF(Recno)7.575
+E F0 10.075(.T)C 5.075(he Recno access)-10.075 F .896
+(method assigns logical record numbers to each record,)79.2 466.8 R .978
+(and can search for and update records by record num-)79.2 478.8 R(ber)
+79.2 490.8 Q 5.037(.R)-.55 G .037(ecno is able, for e)-5.037 F .037
+(xample, to load a te)-.15 F .036(xt \214le into a)-.15 F 1.514
+(database, treating each line as a record.)79.2 502.8 R 1.514
+(This permits)6.514 F -.1(fa)79.2 514.8 S 1.313
+(st searches by line number for applications lik).1 F 3.812(et)-.1 G
+-.15(ex)-3.812 G(t).15 E(editors [Ston82].)79.2 526.8 Q 2.59
+(Recno is actually b)79.2 543 R 2.59(uilt on top of the B+tree access)
+-.2 F 3.192(method and pro)79.2 555 R 3.191(vides a simple interf)-.15 F
+3.191(ace for storing)-.1 F 3.14(sequentially-ordered data v)79.2 567 R
+5.64(alues. The)-.25 F 3.14(Recno access)5.64 F 2.266
+(method generates k)79.2 579 R -.15(ey)-.1 G 4.766(si).15 G(nternally)
+-4.766 E 7.266(.T)-.65 G 2.266(he programmer')-7.266 F(s)-.55 E(vie)79.2
+591 Q 4.102(wo)-.25 G 4.102(ft)-4.102 G 1.602(he v)-4.102 F 1.602
+(alues is that the)-.25 F 4.102(ya)-.15 G 1.603(re numbered sequen-)
+-4.102 F .254(tially from one.)79.2 603 R(De)5.254 E -.15(ve)-.25 G .254
+(lopers can choose to ha).15 F .553 -.15(ve r)-.2 H(ecords).15 E 9
+(automatically renumbered when lo)79.2 615 R(wer)-.25 E(-numbered)-.2 E
+.041(records are added or deleted.)79.2 627 R .041(In this case, ne)
+5.041 F 2.541(wk)-.25 G -.15(ey)-2.641 G 2.541(sc).15 G(an)-2.541 E
+(be inserted between e)79.2 639 Q(xisting k)-.15 E -.15(ey)-.1 G(s.).15
+E F1 3(3. F)79.2 669 R(eatur)-.3 E(es)-.216 E F0 1.827
+(This section describes important features of Berk)79.2 685.2 R(ele)-.1
+E(y)-.15 E 3.456(DB. In)79.2 697.2 R .956(general, de)3.456 F -.15(ve)
+-.25 G .956(lopers can choose which features).15 F .488
+(are useful to them, and use only those that are required)79.2 709.2 R
+(by their application.)323.2 84 Q -.15(Fo)323.2 100.2 S 3.529(re).15 G
+1.029(xample, when an application opens a database, it)-3.679 F .101
+(can declare the de)323.2 112.2 R .101(gree of concurrenc)-.15 F 2.601
+(ya)-.15 G .102(nd reco)-2.601 F -.15(ve)-.15 G .102(ry that).15 F .049
+(it requires.)323.2 124.2 R .048
+(Simple stand-alone applications, and in par)5.049 F(-)-.2 E .491
+(ticular ports of applications that used)323.2 136.2 R/F3 10/Courier@0
+SF(dbm)2.991 E F0 .491(or one of its)2.991 F -.25(va)323.2 148.2 S 1.093
+(riants, generally do not require concurrent access or).25 F .975
+(crash reco)323.2 160.2 R -.15(ve)-.15 G(ry).15 E 5.975(.O)-.65 G .975
+(ther applications, such as enterprise-)-5.975 F 3.08
+(class database management systems that store sales)323.2 172.2 R 2.643
+(transactions or other critical data, need full transac-)323.2 184.2 R
+3.93(tional service.)323.2 196.2 R 3.93(Single-user operation is f)8.93
+F 3.93(aster than)-.1 F 1.175(multi-user operation, since no o)323.2
+208.2 R -.15(ve)-.15 G 1.176(rhead is incurred by).15 F 3.156
+(locking. Running)323.2 220.2 R .656(with the reco)3.156 F -.15(ve)-.15
+G .655(ry system disabled is).15 F -.1(fa)323.2 232.2 S 1.732
+(ster than running with it enabled, since log records).1 F 2.703
+(need not be written when changes are made to the)323.2 244.2 R
+(database.)323.2 256.2 Q .851
+(In addition, some core subsystems, including the lock-)323.2 272.4 R
+.345(ing system and the logging f)323.2 284.4 R(acility)-.1 E 2.844(,c)
+-.65 G .344(an be used outside)-2.844 F 1.772(the conte)323.2 296.4 R
+1.772(xt of the access methods as well.)-.15 F(Although)6.773 E(fe)323.2
+308.4 Q 4.284(wu)-.25 G 1.784(sers ha)-4.284 F 2.084 -.15(ve c)-.2 H
+1.784(hosen to do so, it is possible to use).15 F .939
+(only the lock manager in Berk)323.2 320.4 R(ele)-.1 E 3.439(yD)-.15 G
+3.439(Bt)-3.439 G 3.439(oc)-3.439 G .939(ontrol con-)-3.439 F(currenc)
+323.2 332.4 Q 4.743(yi)-.15 G 4.743(na)-4.743 G 4.743(na)-4.743 G 2.242
+(pplication, without using an)-4.743 F 4.742(yo)-.15 G 4.742(ft)-4.742 G
+(he)-4.742 E .158(standard database services.)323.2 344.4 R(Alternati)
+5.158 E -.15(ve)-.25 G(ly).15 E 2.658(,t)-.65 G .159(he caller can)
+-2.658 F(inte)323.2 356.4 Q .07
+(grate locking of non-database resources with Berk)-.15 F(e-)-.1 E(le)
+323.2 368.4 Q 5.201(yD)-.15 G(B')-5.201 E 5.201(st)-.55 G 2.702
+(ransactional tw)-5.201 F 2.702(o-phase locking system, to)-.1 F 2.892
+(impose transaction semantics on objects outside the)323.2 380.4 R
+(database.)323.2 392.4 Q F1 3(3.1. Pr)323.2 422.4 R
+(ogrammatic interfaces)-.216 E F0(Berk)323.2 438.6 Q(ele)-.1 E 4.008(yD)
+-.15 G 4.008(Bd)-4.008 G 1.509(e\214nes a simple API for database man-)
+-4.008 F 3.452(agement. The)323.2 450.6 R .952
+(package does not include industry-stan-)3.452 F 1.898
+(dard programmatic interf)323.2 462.6 R 1.898
+(aces such as Open Database)-.1 F(Connecti)323.2 474.6 Q .852
+(vity \(ODBC\), Object Linking and Embedding)-.25 F .817
+(for Databases \(OleDB\), or Structured Query Language)323.2 486.6 R
+4.027(\(SQL\). These)323.2 498.6 R(interf)4.027 E 1.527
+(aces, while useful, were designed)-.1 F 2.477
+(to promote interoperability of database systems, and)323.2 510.6 R
+(not simplicity or performance.)323.2 522.6 Q 3.192
+(In response to customer demand, Berk)323.2 538.8 R(ele)-.1 E 5.691(yD)
+-.15 G 5.691(B2)-5.691 G(.5)-5.691 E .538
+(introduced support for the XA standard [Open94].)323.2 550.8 R(XA)5.539
+E .52(permits Berk)323.2 562.8 R(ele)-.1 E 3.02(yD)-.15 G 3.02(Bt)-3.02
+G 3.02(op)-3.02 G .52(articipate in distrib)-3.02 F .52(uted trans-)-.2
+F 3.373(actions under a transaction processing monitor lik)323.2 574.8 R
+(e)-.1 E -.45(Tu)323.2 586.8 S -.15(xe).45 G 1.31(do from BEA Systems.)
+.15 F(Lik)6.31 E 3.81(eX)-.1 G 1.31(A, other standard)-3.81 F(interf)
+323.2 598.8 Q .99(aces can be b)-.1 F .99
+(uilt on top of the core system.)-.2 F(The)5.99 E .846
+(standards do not belong inside Berk)323.2 610.8 R(ele)-.1 E 3.346(yD)
+-.15 G .846(B, since not)-3.346 F(all applications need them.)323.2
+622.8 Q F1 3(3.2. W)323.2 652.8 R(orking with r)-.9 E(ecords)-.216 E F0
+3.134(Ad)323.2 669 S .634
+(atabase user may need to search for particular k)-3.134 F -.15(ey)-.1 G
+(s).15 E .908(in a database, or may simply w)323.2 681 R .908
+(ant to bro)-.1 F .907(wse a)-.25 F -.25(va)-.2 G(ilable).25 E 4.101
+(records. Berk)323.2 693 R(ele)-.1 E 4.101(yD)-.15 G 4.101(Bs)-4.101 G
+1.601(upports both k)-4.101 F -.15(ey)-.1 G 1.602(ed access, to).15 F
+.173(\214nd one or more records with a gi)323.2 705 R -.15(ve)-.25 G
+2.673(nk).15 G -.15(ey)-2.773 G 2.673(,o)-.5 G 2.673(rs)-2.673 G
+(equential)-2.673 E .53(access, to retrie)323.2 717 R .83 -.15(ve a)-.25
+H .53(ll the records in the database one at).15 F EP
+%%Page: 4 4
+%%BeginPageSetup
+BP
+%%EndPageSetup
+/F0 10/Times-Roman@0 SF 6.34(at)79.2 84 S 6.34(ime. The)-6.34 F 3.84
+(order of the records returned during)6.34 F .208
+(sequential scans depends on the access method.)79.2 96 R(B+tree)5.209 E
+1.495(and Recno databases return records in sort order)79.2 108 R 3.995
+(,a)-.4 G(nd)-3.995 E .023
+(Hash databases return them in apparently random order)79.2 120 R(.)-.55
+E(Similarly)79.2 136.2 Q 4.959(,B)-.65 G(erk)-4.959 E(ele)-.1 E 4.959
+(yD)-.15 G 4.958(Bd)-4.959 G 2.458(e\214nes simple interf)-4.958 F 2.458
+(aces for)-.1 F
+(inserting, updating, and deleting records in a database.)79.2 148.2 Q
+/F1 12/Times-Bold@0 SF 3(3.3. Long)79.2 178.2 R -.12(ke)3 G(ys and v).12
+E(alues)-.12 E F0(Berk)79.2 194.4 Q(ele)-.1 E 3.553(yD)-.15 G 3.553(Bm)
+-3.553 G 1.053(anages k)-3.553 F -.15(ey)-.1 G 3.553(sa).15 G 1.053
+(nd v)-3.553 F 1.053(alues as lar)-.25 F 1.054(ge as 2)-.18 F/F2 8
+/Times-Roman@0 SF(32)-5 I F0 3.192(bytes. Since)79.2 206.4 R .692
+(the time required to cop)3.192 F 3.192(yar)-.1 G .692(ecord is pro-)
+-3.192 F 1.895(portional to its size, Berk)79.2 218.4 R(ele)-.1 E 4.396
+(yD)-.15 G 4.396(Bi)-4.396 G 1.896(ncludes interf)-4.396 F(aces)-.1 E
+4.507(that operate on partial records.)79.2 230.4 R 4.507
+(If an application)9.507 F 1.273(requires only part of a lar)79.2 242.4
+R 1.274(ge record, it requests partial)-.18 F .026(record retrie)79.2
+254.4 R -.25(va)-.25 G .026(l, and recei).25 F -.15(ve)-.25 G 2.526(sj)
+.15 G .025(ust the bytes that it needs.)-2.526 F(The smaller cop)79.2
+266.4 Q 2.5(ys)-.1 G -2.25 -.2(av e)-2.5 H 2.5(sb).2 G
+(oth time and memory)-2.5 E(.)-.65 E(Berk)79.2 282.6 Q(ele)-.1 E 3.206
+(yD)-.15 G 3.206(Ba)-3.206 G(llo)-3.206 E .706
+(ws the programmer to de\214ne the data)-.25 F 2.72(types of k)79.2
+294.6 R -.15(ey)-.1 G 5.22(sa).15 G 2.72(nd v)-5.22 F 5.22(alues. De)
+-.25 F -.15(ve)-.25 G 2.72(lopers use an).15 F 5.22(yt)-.15 G(ype)-5.22
+E -.15(ex)79.2 306.6 S(pressible in the programming language.).15 E F1 3
+(3.4. Lar)79.2 336.6 R(ge databases)-.12 E F0 3.255(As)79.2 352.8 S .755
+(ingle database managed by Berk)-3.255 F(ele)-.1 E 3.256(yD)-.15 G 3.256
+(Bc)-3.256 G .756(an be up)-3.256 F 1.716(to 2)79.2 364.8 R F2(48)-5 I
+F0 1.716(bytes, or 256 petabytes, in size.)4.216 5 N(Berk)6.715 E(ele)
+-.1 E 4.215(yD)-.15 G(B)-4.215 E 2.144
+(uses the host \214lesystem as the backing store for the)79.2 376.8 R
+2.668(database, so lar)79.2 388.8 R 2.667
+(ge databases require big \214le support)-.18 F 3.113
+(from the operating system.)79.2 400.8 R(Sleep)8.113 E 3.114(ycat Softw)
+-.1 F 3.114(are has)-.1 F 5.712(customers using Berk)79.2 412.8 R(ele)
+-.1 E 8.212(yD)-.15 G 8.212(Bt)-8.212 G 8.211(om)-8.212 G 5.711
+(anage single)-8.211 F(databases in e)79.2 424.8 Q(xcess of 100 gig)-.15
+E(abytes.)-.05 E F1 3(3.5. Main)79.2 454.8 R(memory databases)3 E F0
+1.171(Applications that do not require persistent storage can)79.2 471 R
+.119(create databases that e)79.2 483 R .119(xist only in main memory)
+-.15 F 5.118(.T)-.65 G(hese)-5.118 E .542(databases bypass the o)79.2
+495 R -.15(ve)-.15 G .543(rhead imposed by the I/O sys-).15 F
+(tem altogether)79.2 507 Q(.)-.55 E 2.144
+(Some applications do need to use disk as a backing)79.2 523.2 R 2.248
+(store, b)79.2 535.2 R 2.249(ut run on machines with v)-.2 F 2.249
+(ery lar)-.15 F 2.249(ge memory)-.18 F(.)-.65 E(Berk)79.2 547.2 Q(ele)
+-.1 E 2.799(yD)-.15 G 2.799(Bi)-2.799 G 2.799(sa)-2.799 G .299
+(ble to manage v)-2.799 F .299(ery lar)-.15 F .299(ge shared mem-)-.18 F
+.128(ory re)79.2 559.2 R .129
+(gions for cached data pages, log records, and lock)-.15 F 3.938
+(management. F)79.2 571.2 R 1.437(or e)-.15 F 1.437
+(xample, the cache re)-.15 F 1.437(gion used for)-.15 F .033
+(data pages may be gig)79.2 583.2 R .034
+(abytes in size, reducing the lik)-.05 F(eli-)-.1 E .639(hood that an)
+79.2 595.2 R 3.139(yr)-.15 G .639
+(ead operation will need to visit the disk)-3.139 F 1.201
+(in the steady state.)79.2 607.2 R 1.201
+(The programmer declares the size)6.201 F(of the cache re)79.2 619.2 Q
+(gion at startup.)-.15 E(Finally)79.2 635.4 Q 7.048(,m)-.65 G(an)-7.048
+E 7.048(yo)-.15 G 4.548(perating systems pro)-7.048 F 4.548
+(vide memory-)-.15 F 2.532(mapped \214le services that are much f)79.2
+647.4 R 2.533(aster than their)-.1 F 2.602
+(general-purpose \214le system interf)79.2 659.4 R 5.102(aces. Berk)-.1
+F(ele)-.1 E 5.102(yD)-.15 G(B)-5.102 E 5.118
+(can memory-map its database \214les for read-only)79.2 671.4 R 3.917
+(database use.)79.2 683.4 R 3.917(The application operates on records)
+8.917 F 2.069(stored directly on the pages, with no cache manage-)79.2
+695.4 R 1.557(ment o)79.2 707.4 R -.15(ve)-.15 G 4.057(rhead. Because)
+.15 F 1.556(the application gets pointers)4.057 F 1.265
+(directly into the Berk)323.2 84 R(ele)-.1 E 3.765(yD)-.15 G 3.765(Bp)
+-3.765 G 1.265(ages, writes cannot be)-3.765 F 3.775
+(permitted. Otherwise,)323.2 96 R 1.275(changes could bypass the lock-)
+3.775 F .23(ing and logging systems, and softw)323.2 108 R .23
+(are errors could cor)-.1 F(-)-.2 E 4.007(rupt the database.)323.2 120 R
+4.006(Read-only applications can use)9.007 F(Berk)323.2 132 Q(ele)-.1 E
+2.893(yD)-.15 G(B')-2.893 E 2.893(sm)-.55 G .393
+(emory-mapped \214le service to impro)-2.893 F -.15(ve)-.15 G
+(performance on most architectures.)323.2 144 Q F1 3
+(3.6. Con\214gurable)323.2 174 R(page size)3 E F0 .111
+(Programmers declare the size of the pages used by their)323.2 190.2 R
+.403(access methods when the)323.2 202.2 R 2.903(yc)-.15 G .403
+(reate a database.)-2.903 F(Although)5.403 E(Berk)323.2 214.2 Q(ele)-.1
+E 4.046(yD)-.15 G 4.046(Bp)-4.046 G(ro)-4.046 E 1.546
+(vides reasonable def)-.15 F 1.546(aults, de)-.1 F -.15(ve)-.25 G
+(lopers).15 E 3.64(may o)323.2 226.2 R -.15(ve)-.15 G 3.64
+(rride them to control system performance.).15 F .793
+(Small pages reduce the number of records that \214t on a)323.2 238.2 R
+.353(single page.)323.2 250.2 R(Fe)5.353 E .353
+(wer records on a page means that fe)-.25 F(wer)-.25 E .724
+(records are lock)323.2 262.2 R .724(ed when the page is lock)-.1 F .723
+(ed, impro)-.1 F(ving)-.15 E(concurrenc)323.2 274.2 Q 5.262 -.65(y. T)
+-.15 H 1.462(he per).65 F 1.462(-page o)-.2 F -.15(ve)-.15 G 1.462
+(rhead is proportionally).15 F 2.29
+(higher with smaller pages, of course, b)323.2 286.2 R 2.29(ut de)-.2 F
+-.15(ve)-.25 G(lopers).15 E(can trade of)323.2 298.2 Q 2.5(fs)-.25 G
+(pace for time as an application requires.)-2.5 E F1 3(3.7. Small)323.2
+328.2 R -.3(fo)3 G(otprint).3 E F0(Berk)323.2 344.4 Q(ele)-.1 E 3.973
+(yD)-.15 G 3.973(Bi)-3.973 G 3.974(sac)-3.973 G 1.474(ompact system.)
+-3.974 F 1.474(The full package,)6.474 F .832
+(including all access methods, reco)323.2 356.4 R -.15(ve)-.15 G
+(rability).15 E 3.331(,a)-.65 G .831(nd trans-)-3.331 F 1.235
+(action support is roughly 175K of te)323.2 368.4 R 1.236
+(xt space on com-)-.15 F(mon architectures.)323.2 380.4 Q F1 3
+(3.8. Cursors)323.2 410.4 R F0 1.57(In database terminology)323.2 426.6
+R 4.07(,ac)-.65 G 1.57(ursor is a pointer into an)-4.07 F 1.806
+(access method that can be called iterati)323.2 438.6 R -.15(ve)-.25 G
+1.807(ly to return).15 F 3.68(records in sequence.)323.2 450.6 R(Berk)
+8.68 E(ele)-.1 E 6.18(yD)-.15 G 6.18(Bi)-6.18 G 3.68(ncludes cursor)
+-6.18 F(interf)323.2 462.6 Q 2.814(aces for all access methods.)-.1 F
+2.815(This permits, for)7.814 F -.15(ex)323.2 474.6 S .34
+(ample, users to tra).15 F -.15(ve)-.2 G .34(rse a B+tree and vie).15 F
+2.84(wr)-.25 G .34(ecords in)-2.84 F(order)323.2 486.6 Q 6.233(.P)-.55 G
+1.234(ointers to records in cursors are persistent, so)-6.233 F 1.779
+(that once fetched, a record may be updated in place.)323.2 498.6 R
+(Finally)323.2 510.6 Q 4.438(,c)-.65 G 1.939
+(ursors support access to chains of duplicate)-4.438 F
+(data items in the v)323.2 522.6 Q(arious access methods.)-.25 E F1 3
+(3.9. J)323.2 552.6 R(oins)-.18 E F0 2.703(In database terminology)323.2
+568.8 R 5.203(,aj)-.65 G 2.702(oin is an operation that)-5.203 F .616
+(spans multiple separate tables \(or in the case of Berk)323.2 580.8 R
+(e-)-.1 E(le)323.2 592.8 Q 4.518(yD)-.15 G 2.018
+(B, multiple separate DB \214les\).)-4.518 F -.15(Fo)7.017 G 4.517(re)
+.15 G 2.017(xample, a)-4.667 F(compan)323.2 604.8 Q 3.372(ym)-.15 G .873
+(ay store information about its customers in)-3.372 F 1.545
+(one table and information about sales in another)323.2 616.8 R 6.545
+(.A)-.55 G(n)-6.545 E 1.498(application will lik)323.2 628.8 R 1.499
+(ely w)-.1 F 1.499(ant to look up sales informa-)-.1 F .933
+(tion by customer name; this requires matching records)323.2 640.8 R
+2.28(in the tw)323.2 652.8 R 4.78(ot)-.1 G 2.28
+(ables that share a common customer ID)-4.78 F 2.515(\214eld. This)323.2
+664.8 R .015(combining of records from multiple tables is)2.515 F
+(called a join.)323.2 676.8 Q(Berk)323.2 693 Q(ele)-.1 E 5.561(yD)-.15 G
+5.561(Bi)-5.561 G 3.061(ncludes interf)-5.561 F 3.062
+(aces for joining tw)-.1 F 5.562(oo)-.1 G(r)-5.562 E(more tables.)323.2
+705 Q EP
+%%Page: 5 5
+%%BeginPageSetup
+BP
+%%EndPageSetup
+/F0 12/Times-Bold@0 SF 3(3.10. T)79.2 84 R(ransactions)-.888 E/F1 10
+/Times-Roman@0 SF -.35(Tr)79.2 100.2 S(ansactions ha).35 E .3 -.15(ve f)
+-.2 H(our properties [Gray93]:).15 E/F2 8/Times-Roman@0 SF<83>84.2 116.4
+Q F1(The)17.2 E 5.489(ya)-.15 G 2.989(re atomic.)-5.489 F 2.989
+(That is, all of the changes)7.989 F 1.475
+(made in a single transaction must be applied at)104.2 128.4 R 1.31
+(the same instant or not at all.)104.2 140.4 R 1.31(This permits, for)
+6.31 F -.15(ex)104.2 152.4 S 3.565(ample, the transfer of mone).15 F
+6.065(yb)-.15 G 3.565(etween tw)-6.065 F(o)-.1 E 3.68
+(accounts to be accomplished, by making the)104.2 164.4 R 1.27
+(reduction of the balance in one account and the)104.2 176.4 R
+(increase in the other into a single, atomic action.)104.2 188.4 Q F2
+<83>84.2 204.6 Q F1(The)17.2 E 3.125(ym)-.15 G .625(ust be consistent.)
+-3.125 F .625(That is, changes to the)5.625 F 3.628(database by an)104.2
+216.6 R 6.128(yt)-.15 G 3.628(ransaction cannot lea)-6.128 F 3.929 -.15
+(ve t)-.2 H(he).15 E(database in an ille)104.2 228.6 Q -.05(ga)-.15 G
+2.5(lo).05 G 2.5(rc)-2.5 G(orrupt state.)-2.5 E F2<83>84.2 244.8 Q F1
+(The)17.2 E 3.006(ym)-.15 G .506(ust be isolatable.)-3.006 F(Re)5.506 E
+-.05(ga)-.15 G .505(rdless of the num-).05 F .8(ber of users w)104.2
+256.8 R .8(orking in the database at the same)-.1 F 1.88(time, e)104.2
+268.8 R -.15(ve)-.25 G 1.88(ry user must ha).15 F 2.18 -.15(ve t)-.2 H
+1.88(he illusion that no).15 F(other acti)104.2 280.8 Q
+(vity is going on.)-.25 E F2<83>84.2 297 Q F1(The)17.2 E 5.54(ym)-.15 G
+3.04(ust be durable.)-5.54 F(Ev)8.04 E 3.04(en if the disk that)-.15 F
+.877(stores the database is lost, it must be possible to)104.2 309 R
+(reco)104.2 321 Q -.15(ve)-.15 G 2.668(rt).15 G .168
+(he database to its last transaction-consis-)-2.668 F(tent state.)104.2
+333 Q 2.49(This combination of properties \212 atomicity)79.2 349.2 R
+4.99(,c)-.65 G(onsis-)-4.99 E(tenc)79.2 361.2 Q 4.542 -.65(y, i)-.15 H
+3.243(solation, and durability \212 is referred to as).65 F -.4(AC)79.2
+373.2 S 3.459(IDity in the literature.).4 F(Berk)8.459 E(ele)-.1 E 5.958
+(yD)-.15 G 3.458(B, lik)-5.958 F 5.958(em)-.1 G(ost)-5.958 E .993
+(database systems, pro)79.2 385.2 R .993(vides A)-.15 F .994
+(CIDity using a collection)-.4 F(of core services.)79.2 397.2 Q .257
+(Programmers can choose to use Berk)79.2 413.4 R(ele)-.1 E 2.757(yD)-.15
+G(B')-2.757 E 2.757(st)-.55 G(ransac-)-2.757 E
+(tion services for applications that need them.)79.2 425.4 Q F0 3
+(3.10.1. Write-ahead)79.2 455.4 R(logging)3 E F1 .479
+(Programmers can enable the logging system when the)79.2 471.6 R(y)-.15
+E .918(start up Berk)79.2 483.6 R(ele)-.1 E 3.418(yD)-.15 G 3.418
+(B. During)-3.418 F 3.417(at)3.417 G .917(ransaction, the appli-)-3.417
+F .493(cation mak)79.2 495.6 R .493
+(es a series of changes to the database.)-.1 F(Each)5.494 E .552
+(change is captured in a log entry)79.2 507.6 R 3.052(,w)-.65 G .552
+(hich holds the state)-3.052 F .207
+(of the database record both before and after the change.)79.2 519.6 R
+2.208(The log record is guaranteed to be \215ushed to stable)79.2 531.6
+R .871(storage before an)79.2 543.6 R 3.371(yo)-.15 G 3.371(ft)-3.371 G
+.871(he changed data pages are writ-)-3.371 F 3.989(ten. This)79.2 555.6
+R(beha)3.989 E 1.489(vior \212 writing the log before the data)-.2 F
+(pages \212 is called)79.2 567.6 Q/F3 10/Times-Italic@0 SF
+(write-ahead lo)2.5 E -.1(gg)-.1 G(ing).1 E F1(.)A .835(At an)79.2 583.8
+R 3.335(yt)-.15 G .835(ime during the transaction, the application can)
+-3.335 F F3(commit)79.2 595.8 Q F1 4.202(,m)C 1.702
+(aking the changes permanent, or)-4.202 F F3 -.45(ro)4.201 G 1.701
+(ll bac).45 F(k)-.2 E F1(,)A .852
+(cancelling all changes and restoring the database to its)79.2 607.8 R
+1.57(pre-transaction state.)79.2 619.8 R 1.57
+(If the application rolls back the)6.57 F 1.003
+(transaction, then the log holds the state of all changed)79.2 631.8 R
+.5(pages prior to the transaction, and Berk)79.2 643.8 R(ele)-.1 E 3(yD)
+-.15 G 3(Bs)-3 G(imply)-3 E .226(restores that state.)79.2 655.8 R .226
+(If the application commits the trans-)5.226 F .538(action, Berk)79.2
+667.8 R(ele)-.1 E 3.038(yD)-.15 G 3.038(Bw)-3.038 G .538
+(rites the log records to disk.)-3.038 F(In-)5.537 E 2.312
+(memory copies of the data pages already re\215ect the)79.2 679.8 R
+1.399(changes, and will be \215ushed as necessary during nor)79.2 691.8
+R(-)-.2 E 2.35(mal processing.)79.2 703.8 R 2.35
+(Since log writes are sequential, b)7.35 F(ut)-.2 E 8.732
+(data page writes are random, this impro)79.2 715.8 R -.15(ve)-.15 G(s)
+.15 E(performance.)323.2 84 Q F0 3(3.10.2. Crashes)323.2 114 R(and r)3 E
+(eco)-.216 E -.12(ve)-.12 G(ry).12 E F1(Berk)323.2 130.2 Q(ele)-.1 E
+3.592(yD)-.15 G(B')-3.592 E 3.592(sw)-.55 G 1.093
+(rite-ahead log is used by the transac-)-3.592 F .415
+(tion system to commit or roll back transactions.)323.2 142.2 R .414
+(It also)5.414 F(gi)323.2 154.2 Q -.15(ve)-.25 G 3.23(st).15 G .73
+(he reco)-3.23 F -.15(ve)-.15 G .73
+(ry system the information that it needs).15 F .824(to protect ag)323.2
+166.2 R .824(ainst data loss or corruption from crashes.)-.05 F(Berk)
+323.2 178.2 Q(ele)-.1 E 2.703(yD)-.15 G 2.703(Bi)-2.703 G 2.704(sa)
+-2.703 G .204(ble to survi)-2.704 F .504 -.15(ve a)-.25 H .204
+(pplication crashes, sys-).15 F .408(tem crashes, and e)323.2 190.2 R
+-.15(ve)-.25 G 2.908(nc).15 G .407(atastrophic f)-2.908 F .407
+(ailures lik)-.1 F 2.907(et)-.1 G .407(he loss)-2.907 F
+(of a hard disk, without losing an)323.2 202.2 Q 2.5(yd)-.15 G(ata.)-2.5
+E(Survi)323.2 218.4 Q .538(ving crashes requires data stored in se)-.25
+F -.15(ve)-.25 G .539(ral dif).15 F(fer)-.25 E(-)-.2 E 2.52(ent places.)
+323.2 230.4 R 2.52(During normal processing, Berk)7.52 F(ele)-.1 E 5.02
+(yD)-.15 G(B)-5.02 E .766(has copies of acti)323.2 242.4 R 1.066 -.15
+(ve l)-.25 H .766(og records and recently-used data).15 F 1.539
+(pages in memory)323.2 254.4 R 6.539(.L)-.65 G 1.539
+(og records are \215ushed to the log)-6.539 F .694
+(disk when transactions commit.)323.2 266.4 R .695
+(Data pages trickle out)5.694 F .008(to the data disk as pages mo)323.2
+278.4 R .308 -.15(ve t)-.15 H .008(hrough the b).15 F(uf)-.2 E .008
+(fer cache.)-.25 F(Periodically)323.2 290.4 Q 2.691(,t)-.65 G .191
+(he system administrator backs up the data)-2.691 F .278
+(disk, creating a safe cop)323.2 302.4 R 2.778(yo)-.1 G 2.778(ft)-2.778
+G .278(he database at a particular)-2.778 F 2.609(instant. When)323.2
+314.4 R .109(the database is back)2.609 F .109(ed up, the log can be)-.1
+F 3.838(truncated. F)323.2 326.4 R 1.337(or maximum rob)-.15 F 1.337
+(ustness, the log disk and)-.2 F(data disk should be separate de)323.2
+338.4 Q(vices.)-.25 E(Dif)323.2 354.6 Q 1.29(ferent system f)-.25 F 1.29
+(ailures can destro)-.1 F 3.79(ym)-.1 G(emory)-3.79 E 3.79(,t)-.65 G
+1.29(he log)-3.79 F 1.106(disk, or the data disk.)323.2 366.6 R(Berk)
+6.106 E(ele)-.1 E 3.606(yD)-.15 G 3.606(Bi)-3.606 G 3.606(sa)-3.606 G
+1.106(ble to survi)-3.606 F -.15(ve)-.25 G .679(the loss of an)323.2
+378.6 R 3.179(yo)-.15 G .679(ne of these repositories without losing)
+-3.179 F(an)323.2 390.6 Q 2.5(yc)-.15 G(ommitted transactions.)-2.5 E
+1.372(If the computer')323.2 406.8 R 3.871(sm)-.55 G 1.371
+(emory is lost, through an applica-)-3.871 F 1.619
+(tion or operating system crash, then the log holds all)323.2 418.8 R
+1.789(committed transactions.)323.2 430.8 R 1.788(On restart, the reco)
+6.789 F -.15(ve)-.15 G 1.788(ry sys-).15 F .49(tem rolls the log forw)
+323.2 442.8 R .49(ard ag)-.1 F .49(ainst the database, reapply-)-.05 F
+.682(ing an)323.2 454.8 R 3.181(yc)-.15 G .681
+(hanges to on-disk pages that were in memory)-3.181 F .14
+(at the time of the crash.)323.2 466.8 R .14
+(Since the log contains pre- and)5.14 F .957
+(post-change state for transactions, the reco)323.2 478.8 R -.15(ve)-.15
+G .956(ry system).15 F 1.14(also uses the log to restore an)323.2 490.8
+R 3.64(yp)-.15 G 1.14(ages to their original)-3.64 F 1.615(state if the)
+323.2 502.8 R 4.115(yw)-.15 G 1.615
+(ere modi\214ed by transactions that ne)-4.115 F -.15(ve)-.25 G(r).15 E
+(committed.)323.2 514.8 Q 2.051
+(If the data disk is lost, the system administrator can)323.2 531 R .887
+(restore the most recent cop)323.2 543 R 3.386(yf)-.1 G .886
+(rom backup.)-3.386 F .886(The reco)5.886 F(v-)-.15 E 1.298
+(ery system will roll the entire log forw)323.2 555 R 1.298(ard ag)-.1 F
+1.298(ainst the)-.05 F 2.64
+(original database, reapplying all committed changes.)323.2 567 R 4.363
+(When it \214nishes, the database will contain e)323.2 579 R -.15(ve)
+-.25 G(ry).15 E .535(change made by e)323.2 591 R -.15(ve)-.25 G .534
+(ry transaction that e).15 F -.15(ve)-.25 G 3.034(rc).15 G(ommitted.)
+-3.034 E .494(If the log disk is lost, then the reco)323.2 607.2 R -.15
+(ve)-.15 G .495(ry system can use).15 F 1.853
+(the in-memory copies of log entries to roll back an)323.2 619.2 R(y)
+-.15 E .026(uncommitted transactions, \215ush all in-memory database)
+323.2 631.2 R 1.659(pages to the data disk, and shut do)323.2 643.2 R
+1.659(wn gracefully)-.25 F 6.658(.A)-.65 G(t)-6.658 E 2.204
+(that point, the system administrator can back up the)323.2 655.2 R .039
+(database disk, install a ne)323.2 667.2 R 2.539(wl)-.25 G .039
+(og disk, and restart the sys-)-2.539 F(tem.)323.2 679.2 Q EP
+%%Page: 6 6
+%%BeginPageSetup
+BP
+%%EndPageSetup
+/F0 12/Times-Bold@0 SF 3(3.10.3. Checkpoints)79.2 84 R/F1 10
+/Times-Roman@0 SF(Berk)79.2 100.2 Q(ele)-.1 E 6.085(yD)-.15 G 6.085(Bi)
+-6.085 G 3.585(ncludes a checkpointing service that)-6.085 F .263
+(interacts with the reco)79.2 112.2 R -.15(ve)-.15 G .263(ry system.).15
+F .263(During normal pro-)5.263 F 2.415
+(cessing, both the log and the database are changing)79.2 124.2 R
+(continually)79.2 136.2 Q 5.925(.A)-.65 G 3.425(ta)-5.925 G 1.224 -.15
+(ny g)-3.425 H -2.15 -.25(iv e).15 H 3.424(ni).25 G .924
+(nstant, the on-disk v)-3.424 F(ersions)-.15 E .414(of the tw)79.2 148.2
+R 2.914(oa)-.1 G .414(re not guaranteed to be consistent.)-2.914 F .414
+(The log)5.414 F 3.838
+(probably contains changes that are not yet in the)79.2 160.2 R
+(database.)79.2 172.2 Q .085(When an application mak)79.2 188.4 R .086
+(es a)-.1 F/F2 10/Times-Italic@0 SF -.15(ch)2.586 G(ec).15 E(kpoint)-.2
+E F1 2.586(,a)C .086(ll committed)-2.586 F .443
+(changes in the log up to that point are guaranteed to be)79.2 200.4 R
+.631(present on the data disk, too.)79.2 212.4 R .632
+(Checkpointing is moder)5.631 F(-)-.2 E .046(ately e)79.2 224.4 R
+(xpensi)-.15 E .346 -.15(ve d)-.25 H .046(uring normal processing, b).15
+F .045(ut limits the)-.2 F(time spent reco)79.2 236.4 Q -.15(ve)-.15 G
+(ring from crashes.).15 E 3.117
+(After an application or operating system crash, the)79.2 252.6 R(reco)
+79.2 264.6 Q -.15(ve)-.15 G 7.419(ry system only needs to go back tw).15
+F(o)-.1 E(checkpoints)79.2 278.6 Q/F3 7/Times-Roman@0 SF(1)-4 I F1 1.376
+(to start rolling the log forw)3.876 4 N 3.875(ard. W)-.1 F(ithout)-.4 E
+3.264(checkpoints, there is no w)79.2 290.6 R 3.265(ay to be sure ho)-.1
+F 5.765(wl)-.25 G(ong)-5.765 E .395(restarting after a crash will tak)
+79.2 302.6 R 2.895(e. W)-.1 F .395(ith checkpoints, the)-.4 F .088
+(restart interv)79.2 314.6 R .089(al can be \214x)-.25 F .089
+(ed by the programmer)-.15 F 5.089(.R)-.55 G(eco)-5.089 E(v-)-.15 E .668
+(ery processing can be guaranteed to complete in a sec-)79.2 326.6 R
+(ond or tw)79.2 338.6 Q(o.)-.1 E(Softw)79.2 354.8 Q 2.457
+(are crashes are much more common than disk)-.1 F -.1(fa)79.2 366.8 S
+3.385(ilures. Man).1 F 3.385(yd)-.15 G -2.15 -.25(ev e)-3.385 H .884
+(lopers w).25 F .884(ant to guarantee that soft-)-.1 F -.1(wa)79.2 378.8
+S .158(re b).1 F .158(ugs do not destro)-.2 F 2.658(yd)-.1 G .158
+(ata, b)-2.658 F .158(ut are willing to restore)-.2 F .631
+(from tape, and to tolerate a day or tw)79.2 390.8 R 3.131(oo)-.1 G
+3.131(fl)-3.131 G .63(ost w)-3.131 F .63(ork, in)-.1 F .89(the unlikle)
+79.2 402.8 R 3.39(ye)-.15 G -.15(ve)-3.64 G .89(nt of a disk crash.).15
+F -.4(Wi)5.89 G .89(th Berk).4 F(ele)-.1 E 3.39(yD)-.15 G(B,)-3.39 E
+1.093(programmers may truncate the log at checkpoints.)79.2 414.8 R(As)
+6.092 E .09(long as the tw)79.2 426.8 R 2.59(om)-.1 G .09
+(ost recent checkpoints are present, the)-2.59 F(reco)79.2 438.8 Q -.15
+(ve)-.15 G .106(ry system can guarantee that no committed trans-).15 F
+.611(actions are lost after a softw)79.2 450.8 R .611(are crash.)-.1 F
+.611(In this case, the)5.611 F(reco)79.2 462.8 Q -.15(ve)-.15 G 1.439
+(ry system does not require that the log and the).15 F 1.328
+(data be on separate de)79.2 474.8 R 1.329
+(vices, although separating them)-.25 F(can still impro)79.2 486.8 Q .3
+-.15(ve p)-.15 H(erformance by spreading out writes.).15 E F0 3
+(3.10.4. T)79.2 516.8 R -.12(wo)-.888 G(-phase locking).12 E F1(Berk)
+79.2 533 Q(ele)-.1 E 4.416(yD)-.15 G 4.416(Bp)-4.416 G(ro)-4.416 E 1.916
+(vides a service kno)-.15 F 1.915(wn as tw)-.25 F(o-phase)-.1 E 3.017
+(locking. In)79.2 545 R .517(order to reduce the lik)3.017 F .518
+(elihood of deadlocks)-.1 F 2.547(and to guarantee A)79.2 557 R 2.546
+(CID properties, database systems)-.4 F .063(manage locks in tw)79.2 569
+R 2.564(op)-.1 G 2.564(hases. First,)-2.564 F .064(during the operation)
+2.564 F 1.574(of a transaction, the)79.2 581 R 4.074(ya)-.15 G 1.574
+(cquire locks, b)-4.074 F 1.573(ut ne)-.2 F -.15(ve)-.25 G 4.073(rr).15
+G(elease)-4.073 E 6.147(them. Second,)79.2 593 R 3.648
+(at the end of the transaction, the)6.147 F(y)-.15 E .235
+(release locks, b)79.2 605 R .235(ut ne)-.2 F -.15(ve)-.25 G 2.735(ra)
+.15 G .235(cquire them.)-2.735 F .235(In practice, most)5.235 F 4.69
+(database systems, including Berk)79.2 617 R(ele)-.1 E 7.19(yD)-.15 G
+4.69(B, acquire)-7.19 F 2.314(locks on demand o)79.2 629 R -.15(ve)-.15
+G 4.814(rt).15 G 2.314(he course of the transaction,)-4.814 F
+(then \215ush the log, then release all locks.)79.2 641 Q .32 LW 83.2
+650.6 79.2 650.6 DL 87.2 650.6 83.2 650.6 DL 91.2 650.6 87.2 650.6 DL
+95.2 650.6 91.2 650.6 DL 99.2 650.6 95.2 650.6 DL 103.2 650.6 99.2 650.6
+DL 107.2 650.6 103.2 650.6 DL 111.2 650.6 107.2 650.6 DL 115.2 650.6
+111.2 650.6 DL 119.2 650.6 115.2 650.6 DL 123.2 650.6 119.2 650.6 DL
+127.2 650.6 123.2 650.6 DL 131.2 650.6 127.2 650.6 DL 135.2 650.6 131.2
+650.6 DL 139.2 650.6 135.2 650.6 DL 143.2 650.6 139.2 650.6 DL 147.2
+650.6 143.2 650.6 DL 151.2 650.6 147.2 650.6 DL 155.2 650.6 151.2 650.6
+DL 159.2 650.6 155.2 650.6 DL 163.2 650.6 159.2 650.6 DL 167.2 650.6
+163.2 650.6 DL 171.2 650.6 167.2 650.6 DL 175.2 650.6 171.2 650.6 DL
+179.2 650.6 175.2 650.6 DL 183.2 650.6 179.2 650.6 DL 187.2 650.6 183.2
+650.6 DL 191.2 650.6 187.2 650.6 DL 195.2 650.6 191.2 650.6 DL 199.2
+650.6 195.2 650.6 DL 203.2 650.6 199.2 650.6 DL 207.2 650.6 203.2 650.6
+DL 211.2 650.6 207.2 650.6 DL 215.2 650.6 211.2 650.6 DL 219.2 650.6
+215.2 650.6 DL 223.2 650.6 219.2 650.6 DL/F4 5/Times-Roman@0 SF(1)100.8
+661 Q/F5 8/Times-Roman@0 SF .338(One checkpoint is not f)2.338 3.2 N
+.338(ar enough.)-.08 F .338(The reco)4.338 F -.12(ve)-.12 G .338
+(ry system can-).12 F .211
+(not be sure that the most recent checkpoint completed \212 it may ha)
+79.2 673.8 R -.12(ve)-.16 G .734
+(been interrupted by the crash that forced the reco)79.2 683.4 R -.12
+(ve)-.12 G .734(ry system to run).12 F(in the \214rst place.)79.2 693 Q
+F1(Berk)323.2 84 Q(ele)-.1 E 3.306(yD)-.15 G 3.306(Bc)-3.306 G .806
+(an lock entire database \214les, which cor)-3.306 F(-)-.2 E .845
+(respond to tables, or indi)323.2 96 R .844(vidual pages in them.)-.25 F
+.844(It does)5.844 F 2.141(no record-le)323.2 108 R -.15(ve)-.25 G 4.641
+(ll).15 G 4.641(ocking. By)-4.641 F 2.142(shrinking the page size,)4.641
+F(ho)323.2 120 Q(we)-.25 E -.15(ve)-.25 G 4.427 -.4(r, d).15 H -2.15
+-.25(ev e).4 H 3.627(lopers can guarantee that e).25 F -.15(ve)-.25 G
+3.626(ry page).15 F 2.101(holds only a small number of records.)323.2
+132 R 2.102(This reduces)7.102 F(contention.)323.2 144 Q .388
+(If locking is enabled, then read and write operations on)323.2 160.2 R
+5.317(ad)323.2 172.2 S 2.817(atabase acquire tw)-5.317 F 2.817
+(o-phase locks, which are held)-.1 F 3.635
+(until the transaction completes.)323.2 184.2 R 3.635(Which objects are)
+8.635 F(lock)323.2 196.2 Q .738
+(ed and the order of lock acquisition depend on the)-.1 F -.1(wo)323.2
+208.2 S .503(rkload for each transaction.).1 F .502
+(It is possible for tw)5.502 F 3.002(oo)-.1 G(r)-3.002 E 1.315
+(more transactions to deadlock, so that each is w)323.2 220.2 R(aiting)
+-.1 E(for a lock that is held by another)323.2 232.2 Q(.)-.55 E(Berk)
+323.2 248.4 Q(ele)-.1 E 3.307(yD)-.15 G 3.307(Bd)-3.307 G .807
+(etects deadlocks and automatically rolls)-3.307 F 1.825
+(back one of the transactions.)323.2 260.4 R 1.825
+(This releases the locks)6.825 F 1.926(that it held and allo)323.2 272.4
+R 1.925(ws the other transactions to con-)-.25 F 3.346(tinue. The)323.2
+284.4 R .847(caller is noti\214ed that its transaction did not)3.346 F
+1.747(complete, and may restart it.)323.2 296.4 R(De)6.747 E -.15(ve)
+-.25 G 1.747(lopers can specify).15 F .646
+(the deadlock detection interv)323.2 308.4 R .647(al and the polic)-.25
+F 3.147(yt)-.15 G 3.147(ou)-3.147 G .647(se in)-3.147 F
+(choosing a transaction to roll back.)323.2 320.4 Q 6.686(The tw)323.2
+336.6 R 6.686(o-phase locking interf)-.1 F 6.686(aces are separately)-.1
+F .927(callable by applications that link Berk)323.2 348.6 R(ele)-.1 E
+3.427(yD)-.15 G .928(B, though)-3.427 F(fe)323.2 360.6 Q 5.64(wu)-.25 G
+3.14(sers ha)-5.64 F 3.44 -.15(ve n)-.2 H 3.14(eeded to use that f).15 F
+3.14(acility directly)-.1 F(.)-.65 E 2.211(Using these interf)323.2
+372.6 R 2.211(aces, Berk)-.1 F(ele)-.1 E 4.711(yD)-.15 G 4.712(Bp)-4.711
+G(ro)-4.712 E 2.212(vides a f)-.15 F(ast,)-.1 E 2.4
+(platform-portable locking system for general-purpose)323.2 384.6 R
+2.917(use. It)323.2 396.6 R .418
+(also lets users include non-database objects in a)2.917 F 3.497
+(database transaction, by controlling access to them)323.2 408.6 R -.15
+(ex)323.2 420.6 S(actly as if the).15 E 2.5(yw)-.15 G
+(ere inside the database.)-2.5 E .583(The Berk)323.2 436.8 R(ele)-.1 E
+3.083(yD)-.15 G 3.084(Bt)-3.083 G -.1(wo)-3.084 G .584(-phase locking f)
+.1 F .584(acility is b)-.1 F .584(uilt on)-.2 F .609(the f)323.2 448.8 R
+.609(astest correct locking primiti)-.1 F -.15(ve)-.25 G 3.108(st).15 G
+.608(hat are supported)-3.108 F 1.967(by the underlying architecture.)
+323.2 460.8 R 1.967(In the current imple-)6.967 F .593
+(mentation, this means that the locking system is dif)323.2 472.8 R(fer)
+-.25 E(-)-.2 E 1.709(ent on the v)323.2 484.8 R 1.709
+(arious UNIX platforms, and is still more)-.25 F(dif)323.2 496.8 Q .695
+(ferent on W)-.25 F(indo)-.4 E .695(ws NT)-.25 F 5.695(.I)-.74 G 3.195
+(no)-5.695 G .695(ur e)-3.195 F .695(xperience, the most)-.15 F(dif)
+323.2 508.8 Q 2.634
+(\214cult aspect of performance tuning is \214nding the)-.25 F -.1(fa)
+323.2 520.8 S .883(stest locking primiti).1 F -.15(ve)-.25 G 3.383(st)
+.15 G .883(hat w)-3.383 F .882(ork correctly on a par)-.1 F(-)-.2 E 1.26
+(ticular architecture and then inte)323.2 532.8 R 1.26(grating the ne)
+-.15 F 3.76(wi)-.25 G(nter)-3.76 E(-)-.2 E -.1(fa)323.2 544.8 S
+(ce with the se).1 E -.15(ve)-.25 G(ral that we already support.).15 E
+.536(The w)323.2 561 R .536(orld w)-.1 F .536
+(ould be a better place if the operating sys-)-.1 F 2.096
+(tems community w)323.2 573 R 2.096(ould uniformly implement POSIX)-.1 F
+1.31(locking primiti)323.2 585 R -.15(ve)-.25 G 3.81(sa).15 G 1.31(nd w)
+-3.81 F 1.31(ould guarantee that acquiring)-.1 F 1.085
+(an uncontested lock w)323.2 597 R 1.085(as a f)-.1 F 1.085
+(ast operation.)-.1 F 1.085(Locks must)6.085 F -.1(wo)323.2 609 S 3.641
+(rk both among threads in a single process and).1 F(among processes.)
+323.2 621 Q F0 3(3.11. Concurr)323.2 651 R(ency)-.216 E F1 .383
+(Good performance under concurrent operation is a crit-)323.2 667.2 R
+.766(ical design point for Berk)323.2 679.2 R(ele)-.1 E 3.266(yD)-.15 G
+3.265(B. Although)-3.266 F(Berk)3.265 E(ele)-.1 E(y)-.15 E 1.961
+(DB is itself not multi-threaded, it is thread-safe, and)323.2 691.2 R
+.547(runs well in threaded applications.)323.2 703.2 R(Philosophically)
+5.546 E 3.046(,w)-.65 G(e)-3.046 E(vie)323.2 715.2 Q 4.764(wt)-.25 G
+2.264(he use of threads and the choice of a threads)-4.764 F EP
+%%Page: 7 7
+%%BeginPageSetup
+BP
+%%EndPageSetup
+/F0 10/Times-Roman@0 SF .066(package as a polic)79.2 84 R 2.566(yd)-.15
+G .065(ecision, and prefer to of)-2.566 F .065(fer mecha-)-.25 F .042
+(nism \(the ability to run threaded or not\), allo)79.2 96 R .043
+(wing appli-)-.25 F(cations to choose their o)79.2 108 Q(wn policies.)
+-.25 E 1.947(The locking, logging, and b)79.2 124.2 R(uf)-.2 E 1.947
+(fer pool subsystems all)-.25 F .711
+(use shared memory or other OS-speci\214c sharing f)79.2 136.2 R(acili-)
+-.1 E 1.713(ties to communicate.)79.2 148.2 R 1.713(Locks, b)6.713 F(uf)
+-.2 E 1.713(fer pool fetches, and)-.25 F 1.061(log writes beha)79.2
+160.2 R 1.361 -.15(ve i)-.2 H 3.561(nt).15 G 1.061(he same w)-3.561 F
+1.061(ay across threads in a)-.1 F .033(single process as the)79.2 172.2
+R 2.532(yd)-.15 G 2.532(oa)-2.532 G .032(cross dif)-2.532 F .032
+(ferent processes on a)-.25 F(single machine.)79.2 184.2 Q .896
+(As a result, concurrent database applications may start)79.2 200.4 R
+1.651(up a ne)79.2 212.4 R 4.151(wp)-.25 G 1.651(rocess for e)-4.151 F
+-.15(ve)-.25 G 1.651(ry single user).15 F 4.151(,m)-.4 G 1.651
+(ay create a)-4.151 F 2.848(single serv)79.2 224.4 R 2.848(er which spa)
+-.15 F 2.849(wns a ne)-.15 F 5.349(wt)-.25 G 2.849(hread for e)-5.349 F
+-.15(ve)-.25 G(ry).15 E(client request, or may choose an)79.2 236.4 Q
+2.5(yp)-.15 G(olic)-2.5 E 2.5(yi)-.15 G 2.5(nb)-2.5 G(etween.)-2.5 E
+(Berk)79.2 252.6 Q(ele)-.1 E 3.629(yD)-.15 G 3.629(Bh)-3.629 G 1.128
+(as been carefully designed to minimize)-3.629 F .07
+(contention and maximize concurrenc)79.2 264.6 R 3.87 -.65(y. T)-.15 H
+.07(he cache man-).65 F .57(ager allo)79.2 276.6 R .57
+(ws all threads or processes to bene\214t from I/O)-.25 F 2.917
+(done by one.)79.2 288.6 R 2.917(Shared resources must sometimes be)
+7.917 F(lock)79.2 300.6 Q 1.804(ed for e)-.1 F(xclusi)-.15 E 2.104 -.15
+(ve a)-.25 H 1.804(ccess by one thread of control.).15 F 1.757 -.8(We h)
+79.2 312.6 T -2.25 -.2(av e).8 H -.1(ke)2.857 G .158
+(pt critical sections small, and are careful not).1 F 1.199
+(to hold critical resource locks across system calls that)79.2 324.6 R
+.538(could deschedule the locking thread or process.)79.2 336.6 R
+(Sleep-)5.539 E .979(ycat Softw)79.2 348.6 R .979
+(are has customers with hundreds of concur)-.1 F(-)-.2 E(rent users w)
+79.2 360.6 Q(orking on a single database in production.)-.1 E/F1 12
+/Times-Bold@0 SF 3(4. Engineering)79.2 390.6 R(Philosoph)3 E(y)-.18 E F0
+(Fundamentally)79.2 406.8 Q 3.998(,B)-.65 G(erk)-3.998 E(ele)-.1 E 3.998
+(yD)-.15 G 3.998(Bi)-3.998 G 3.999(sac)-3.998 G 1.499
+(ollection of access)-3.999 F .19(methods with important f)79.2 418.8 R
+.19(acilities, lik)-.1 F 2.69(el)-.1 G .19(ogging, locking,)-2.69 F
+1.251(and transactional access underlying them.)79.2 430.8 R 1.252
+(In both the)6.252 F .992(research and the commercial w)79.2 442.8 R
+.991(orld, the techniques for)-.1 F -.2(bu)79.2 454.8 S 2.727
+(ilding systems lik).2 F 5.227(eB)-.1 G(erk)-5.227 E(ele)-.1 E 5.227(yD)
+-.15 G 5.227(Bh)-5.227 G -2.25 -.2(av e)-5.227 H 2.728(been well-)5.427
+F(kno)79.2 466.8 Q(wn for a long time.)-.25 E .443(The k)79.2 483 R .743
+-.15(ey a)-.1 H(dv).15 E .442(antage of Berk)-.25 F(ele)-.1 E 2.942(yD)
+-.15 G 2.942(Bi)-2.942 G 2.942(st)-2.942 G .442(he careful atten-)-2.942
+F 1.059(tion that has been paid to engineering details through-)79.2 495
+R 1.039(out its life.)79.2 507 R 2.639 -.8(We h)6.039 H -2.25 -.2(av e)
+.8 H 1.039(carefully designed the system so)3.739 F .452
+(that the core f)79.2 519 R .452(acilities, lik)-.1 F 2.952(el)-.1 G
+.452(ocking and I/O, surf)-2.952 F .453(ace the)-.1 F .972(right interf)
+79.2 531 R .971(aces and are otherwise opaque to the caller)-.1 F(.)-.55
+E .294(As programmers, we understand the v)79.2 543 R .295
+(alue of simplicity)-.25 F .206(and ha)79.2 555 R .506 -.15(ve w)-.2 H
+(ork).05 E .206(ed hard to simplify the interf)-.1 F .205(aces we sur)
+-.1 F(-)-.2 E -.1(fa)79.2 567 S(ce to users of the database system.).1 E
+(Berk)79.2 583.2 Q(ele)-.1 E 4.531(yD)-.15 G 4.531(Ba)-4.531 G -.2(vo)
+-4.731 G 2.031(ids limits in the code.).2 F 2.031(It places no)7.031 F
+.474(practical limit on the size of k)79.2 595.2 R -.15(ey)-.1 G .473
+(s, v).15 F .473(alues, or databases;)-.25 F(the)79.2 607.2 Q 2.5(ym)
+-.15 G(ay gro)-2.5 E 2.5(wt)-.25 G 2.5(oo)-2.5 G(ccup)-2.5 E 2.5(yt)-.1
+G(he a)-2.5 E -.25(va)-.2 G(ilable storage space.).25 E 1.857
+(The locking and logging subsystems ha)79.2 623.4 R 2.157 -.15(ve b)-.2
+H 1.858(een care-).15 F .184
+(fully crafted to reduce contention and impro)79.2 635.4 R .484 -.15
+(ve t)-.15 H(hrough-).15 E 2.16
+(put by shrinking or eliminating critical sections, and)79.2 647.4 R
+(reducing the sizes of lock)79.2 659.4 Q(ed re)-.1 E
+(gions and log entries.)-.15 E 2.238
+(There is nothing in the design or implementation of)79.2 675.6 R(Berk)
+79.2 687.6 Q(ele)-.1 E 2.818(yD)-.15 G 2.818(Bt)-2.818 G .318
+(hat pushes the state of the art in database)-2.818 F 3.545
+(systems. Rather)79.2 699.6 R 3.545(,w)-.4 G 3.545(eh)-3.545 G -2.25 -.2
+(av e)-3.545 H 1.044(been v)3.745 F 1.044(ery careful to get the)-.15 F
+4.321(engineering right.)79.2 711.6 R 4.321
+(The result is a system that is)9.321 F(superior)323.2 84 Q 2.867(,a)-.4
+G 2.867(sa)-2.867 G 2.866(ne)-2.867 G .366
+(mbedded database system, to an)-2.866 F 2.866(yo)-.15 G(ther)-2.866 E
+(solution a)323.2 96 Q -.25(va)-.2 G(ilable.).25 E .811
+(Most database systems trade of)323.2 112.2 R 3.312(fs)-.25 G .812
+(implicity for correct-)-3.312 F 4.151(ness. Either)323.2 124.2 R 1.651
+(the system is easy to use, or it supports)4.151 F 1.17
+(concurrent use and survi)323.2 136.2 R -.15(ve)-.25 G 3.67(ss).15 G
+1.17(ystem f)-3.67 F 3.67(ailures. Berk)-.1 F(ele)-.1 E(y)-.15 E 1.013
+(DB, because of its careful design and implementation,)323.2 148.2 R(of)
+323.2 160.2 Q(fers both simplicity and correctness.)-.25 E .759
+(The system has a small footprint, mak)323.2 176.4 R .759
+(es simple opera-)-.1 F 1.012
+(tions simple to carry out \(inserting a ne)323.2 188.4 R 3.512(wr)-.25
+G 1.012(ecord tak)-3.512 F(es)-.1 E 1.16(just a fe)323.2 200.4 R 3.66
+(wl)-.25 G 1.16(ines of code\), and beha)-3.66 F -.15(ve)-.2 G 3.66(sc)
+.15 G 1.16(orrectly in the)-3.66 F -.1(fa)323.2 212.4 S .528(ce of hea)
+.1 F .527(vy concurrent use, system crashes, and e)-.2 F -.15(ve)-.25 G
+(n).15 E(catastrophic f)323.2 224.4 Q(ailures lik)-.1 E 2.5(el)-.1 G
+(oss of a hard disk.)-2.5 E F1 3(5. The)323.2 254.4 R(Berk)3 E
+(eley DB 2.x Distrib)-.12 E(ution)-.24 E F0(Berk)323.2 270.6 Q(ele)-.1 E
+4.171(yD)-.15 G 4.171(Bi)-4.171 G 4.171(sd)-4.171 G(istrib)-4.171 E
+1.671(uted in source code form from)-.2 F/F2 10/Times-Italic@0 SF(www)
+323.2 282.6 Q(.sleepycat.com)-.74 E F0 7.322(.U)C 2.322
+(sers are free to do)-7.322 F 2.321(wnload and)-.25 F -.2(bu)323.2 294.6
+S(ild the softw).2 E(are, and to use it in their applications.)-.1 E F1
+3(5.1. What)323.2 324.6 R(is in the distrib)3 E(ution)-.24 E F0 4.827
+(The distrib)323.2 340.8 R 4.827(ution is a compressed archi)-.2 F 5.127
+-.15(ve \214)-.25 H 7.328(le. It).15 F .057
+(includes the source code for the Berk)323.2 352.8 R(ele)-.1 E 2.556(yD)
+-.15 G 2.556(Bl)-2.556 G(ibrary)-2.556 E 2.556(,a)-.65 G(s)-2.556 E .453
+(well as documentation, test suites, and supporting utili-)323.2 364.8 R
+(ties.)323.2 376.8 Q 2.613(The source code includes b)323.2 393 R 2.612
+(uild support for all sup-)-.2 F .254(ported platforms.)323.2 405 R .254
+(On UNIX systems Berk)5.254 F(ele)-.1 E 2.755(yD)-.15 G 2.755(Bu)-2.755
+G(ses)-2.755 E 1.28(the GNU autocon\214guration tool,)323.2 417 R/F3 10
+/Courier@0 SF(autoconf)3.78 E F0 3.78(,t)C 3.78(oi)-3.78 G(den-)-3.78 E
+.992(tify the system and to b)323.2 429 R .992
+(uild the library and supporting)-.2 F 3.589(utilities. Berk)323.2 441 R
+(ele)-.1 E 3.589(yD)-.15 G 3.588(Bi)-3.589 G 1.088(ncludes speci\214c b)
+-3.588 F 1.088(uild en)-.2 F(viron-)-.4 E .515
+(ments for other platforms, such as VMS and W)323.2 453 R(indo)-.4 E
+(ws.)-.25 E F1 3(5.1.1. Documentation)323.2 483 R F0 5.008(The distrib)
+323.2 499.2 R 5.008(uted system includes documentation in)-.2 F 1.626
+(HTML format.)323.2 511.2 R 1.626(The documentation is in tw)6.626 F
+4.127(op)-.1 G 1.627(arts: a)-4.127 F .725
+(UNIX-style reference manual for use by programmers,)323.2 523.2 R
+(and a reference guide which is tutorial in nature.)323.2 535.2 Q F1 3
+(5.1.2. T)323.2 565.2 R(est suite)-1.104 E F0 1.107(The softw)323.2
+581.4 R 1.108(are also includes a complete test suite, writ-)-.1 F .155
+(ten in Tcl.)323.2 593.4 R 1.754 -.8(We b)5.154 H(elie).8 E .454 -.15
+(ve t)-.25 H .154(hat the test suite is a k).15 F .454 -.15(ey a)-.1 H
+(dv).15 E(an-)-.25 E(tage of Berk)323.2 605.4 Q(ele)-.1 E 2.5(yD)-.15 G
+2.5(Bo)-2.5 G -.15(ve)-2.65 G 2.5(rc).15 G(omparable systems.)-2.5 E
+2.612(First, the test suite allo)323.2 621.6 R 2.613(ws users who do)
+-.25 F 2.613(wnload and)-.25 F -.2(bu)323.2 633.6 S 1.731(ild the softw)
+.2 F 1.731(are to be sure that it is operating cor)-.1 F(-)-.2 E(rectly)
+323.2 645.6 Q(.)-.65 E .893(Second, the test suite allo)323.2 661.8 R
+.894(ws us, lik)-.25 F 3.394(eo)-.1 G .894(ther commercial)-3.394 F(de)
+323.2 673.8 Q -.15(ve)-.25 G .536(lopers of database softw).15 F .536
+(are, to e)-.1 F -.15(xe)-.15 G .535(rcise the system).15 F 2.256
+(thoroughly at e)323.2 685.8 R -.15(ve)-.25 G 2.256(ry release.).15 F
+2.256(When we learn of ne)7.256 F(w)-.25 E -.2(bu)323.2 697.8 S 1.719
+(gs, we add them to the test suite.).2 F 3.319 -.8(We r)6.719 H 1.719
+(un the test).8 F 5.692(suite continually during de)323.2 709.8 R -.15
+(ve)-.25 G 5.692(lopment c).15 F 5.692(ycles, and)-.15 F EP
+%%Page: 8 8
+%%BeginPageSetup
+BP
+%%EndPageSetup
+/F0 10/Times-Roman@0 SF(al)79.2 84 Q -.1(wa)-.1 G .314
+(ys prior to release.).1 F .314(The result is a much more reli-)5.314 F
+(able system by the time it reaches beta release.)79.2 96 Q/F1 12
+/Times-Bold@0 SF 3(5.2. Binary)79.2 126 R(distrib)3 E(ution)-.24 E F0
+(Sleep)79.2 142.2 Q .893(ycat mak)-.1 F .893
+(es compiled libraries and general binary)-.1 F(distrib)79.2 154.2 Q
+(utions a)-.2 E -.25(va)-.2 G(ilable to customers for a fee.).25 E F1 3
+(5.3. Supported)79.2 184.2 R(platf)3 E(orms)-.3 E F0(Berk)79.2 200.4 Q
+(ele)-.1 E 5.623(yD)-.15 G 5.623(Br)-5.623 G 3.123(uns on an)-5.623 F
+5.622(yo)-.15 G 3.122(perating system with a)-5.622 F .816
+(POSIX 1003.1 interf)79.2 212.4 R .817(ace [IEEE96], which includes vir)
+-.1 F(-)-.2 E 1.998(tually e)79.2 224.4 R -.15(ve)-.25 G 1.997
+(ry UNIX system.).15 F 1.997(In addition, the softw)6.997 F(are)-.1 E
+2.85(runs on VMS, W)79.2 236.4 R(indo)-.4 E 2.85(ws/95, W)-.25 F(indo)
+-.4 E 2.85(ws/98, and W)-.25 F(in-)-.4 E(do)79.2 248.4 Q(ws/NT)-.25 E
+10.21(.S)-.74 G(leep)-10.21 E 5.21(ycat Softw)-.1 F 5.21
+(are no longer supports)-.1 F(deplo)79.2 260.4 Q(yment on sixteen-bit W)
+-.1 E(indo)-.4 E(ws systems.)-.25 E F1 3(6. Berk)79.2 290.4 R
+(eley DB 2.x Licensing)-.12 E F0(Berk)79.2 306.6 Q(ele)-.1 E 2.627(yD)
+-.15 G 2.627(B2)-2.627 G .128(.x is distrib)-2.627 F .128
+(uted as an Open Source prod-)-.2 F 4.709(uct. The)79.2 318.6 R(softw)
+4.709 E 2.209(are is freely a)-.1 F -.25(va)-.2 G 2.209
+(ilable from us at our).25 F -.8(We)79.2 330.6 S 3.372(bs).8 G .872
+(ite, and in other media.)-3.372 F .872(Users are free to do)5.872 F
+(wn-)-.25 E(load the softw)79.2 342.6 Q(are and b)-.1 E
+(uild applications with it.)-.2 E 1.023(The 1.x v)79.2 358.8 R 1.022
+(ersions of Berk)-.15 F(ele)-.1 E 3.522(yD)-.15 G 3.522(Bw)-3.522 G
+1.022(ere co)-3.522 F -.15(ve)-.15 G 1.022(red by the).15 F 3.763
+(UC Berk)79.2 370.8 R(ele)-.1 E 6.263(yc)-.15 G(op)-6.263 E 3.763
+(yright that co)-.1 F -.15(ve)-.15 G 3.764(rs softw).15 F 3.764
+(are freely)-.1 F(redistrib)79.2 382.8 Q 1.742(utable in source form.)
+-.2 F 1.741(When Sleep)6.742 F 1.741(ycat Soft-)-.1 F -.1(wa)79.2 394.8
+S .906(re w).1 F .907(as formed, we needed to draft a license consis-)
+-.1 F 2.319(tent with the cop)79.2 406.8 R 2.319(yright go)-.1 F -.15
+(ve)-.15 G 2.318(rning the e).15 F 2.318(xisting, older)-.15 F(softw)
+79.2 418.8 Q 5.328(are. Because)-.1 F 2.828(of important dif)5.328 F
+2.828(ferences between)-.25 F .497(the UC Berk)79.2 430.8 R(ele)-.1 E
+2.997(yc)-.15 G(op)-2.997 E .497(yright and the GPL, it w)-.1 F .496
+(as impos-)-.1 F .884(sible for us to use the GPL.)79.2 442.8 R 3.384
+(As)5.884 G .884(econd cop)-3.384 F .884(yright, with)-.1 F .87
+(terms contradictory to the \214rst, simply w)79.2 454.8 R .87
+(ould not ha)-.1 F -.15(ve)-.2 G -.1(wo)79.2 466.8 S(rk).1 E(ed.)-.1 E
+(Sleep)79.2 483 Q 2.533(ycat w)-.1 F 2.533
+(anted to continue Open Source de)-.1 F -.15(ve)-.25 G(lop-).15 E 2.079
+(ment of Berk)79.2 495 R(ele)-.1 E 4.579(yD)-.15 G 4.579(Bf)-4.579 G
+2.079(or se)-4.579 F -.15(ve)-.25 G 2.079(ral reasons.).15 F 3.678 -.8
+(We a)7.078 H(gree).8 E .853
+(with Raymond [Raym98] and others that Open Source)79.2 507 R(softw)79.2
+519 Q .763(are is typically of higher quality than proprietary)-.1 F(,)
+-.65 E 2.616(binary-only products.)79.2 531 R 2.617
+(Our customers bene\214t from a)7.616 F .983(community of de)79.2 543 R
+-.15(ve)-.25 G .983(lopers who kno).15 F 3.483(wa)-.25 G .983
+(nd use Berk)-3.483 F(ele)-.1 E(y)-.15 E 1.317
+(DB, and can help with application design, deb)79.2 555 R(ugging,)-.2 E
+1.65(and performance tuning.)79.2 567 R -.4(Wi)6.65 G 1.65
+(despread distrib).4 F 1.65(ution and)-.2 F 1.017
+(use of the source code tends to isolate b)79.2 579 R 1.017(ugs early)
+-.2 F 3.517(,a)-.65 G(nd)-3.517 E .032(to get \214x)79.2 591 R .031
+(es back into the distrib)-.15 F .031(uted system quickly)-.2 F 5.031
+(.A)-.65 G(s)-5.031 E 3.553(ar)79.2 603 S 1.053(esult, Berk)-3.553 F
+(ele)-.1 E 3.553(yD)-.15 G 3.553(Bi)-3.553 G 3.553(sm)-3.553 G 1.053
+(ore reliable.)-3.553 F 1.054(Just as impor)6.054 F(-)-.2 E(tantly)79.2
+615 Q 3.695(,i)-.65 G(ndi)-3.695 E 1.195
+(vidual users are able to contrib)-.25 F 1.195(ute ne)-.2 F 3.695(wf)
+-.25 G(ea-)-3.695 E 1.056
+(tures and performance enhancements, to the bene\214t of)79.2 627 R
+-2.15 -.25(ev e)79.2 639 T .359(ryone who uses Berk).25 F(ele)-.1 E
+2.859(yD)-.15 G 2.859(B. From)-2.859 F 2.858(ab)2.859 G .358
+(usiness per)-3.058 F(-)-.2 E(specti)79.2 651 Q -.15(ve)-.25 G 3.115(,O)
+.15 G .615(pen Source and free distrib)-3.115 F .615(ution of the soft-)
+-.2 F -.1(wa)79.2 663 S 1.605(re creates share for us, and gi).1 F -.15
+(ve)-.25 G 4.105(su).15 G 4.105(sam)-4.105 G(ark)-4.105 E 1.605(et into)
+-.1 F .412(which we can sell products and services.)79.2 675 R(Finally)
+5.413 E 2.913(,m)-.65 G(ak-)-2.913 E .148(ing the source code freely a)
+79.2 687 R -.25(va)-.2 G .147(ilable reduces our support).25 F 2.436
+(load, since customers can \214nd and \214x b)79.2 699 R 2.437
+(ugs without)-.2 F(recourse to us, in man)79.2 711 Q 2.5(yc)-.15 G
+(ases.)-2.5 E 4.727 -.8(To p)323.2 84 T(reserv).8 E 5.627(et)-.15 G
+3.126(he Open Source heritage of the older)-5.627 F(Berk)323.2 96 Q(ele)
+-.1 E 3.003(yD)-.15 G 3.003(Bc)-3.003 G .504(ode, we drafted a ne)-3.003
+F 3.004(wl)-.25 G .504(icense go)-3.004 F -.15(ve)-.15 G(rning).15 E
+.417(the distrib)323.2 108 R .417(ution of Berk)-.2 F(ele)-.1 E 2.916
+(yD)-.15 G 2.916(B2)-2.916 G 2.916(.x. W)-2.916 F 2.916(ea)-.8 G .416
+(dopted terms)-2.916 F .411(from the GPL that mak)323.2 120 R 2.911(ei)
+-.1 G 2.911(ti)-2.911 G .411(mpossible to turn our Open)-2.911 F 1.289
+(Source code into proprietary code o)323.2 132 R 1.288(wned by someone)
+-.25 F(else.)323.2 144 Q(Brie\215y)323.2 160.2 Q 3.18(,t)-.65 G .68
+(he terms go)-3.18 F -.15(ve)-.15 G .68(rning the use and distrib).15 F
+.68(ution of)-.2 F(Berk)323.2 172.2 Q(ele)-.1 E 2.5(yD)-.15 G 2.5(Ba)
+-2.5 G(re:)-2.5 E/F2 8/Times-Roman@0 SF<83>328.2 188.4 Q F0
+(your application must be internal to your site, or)17.2 E F2<83>328.2
+204.6 Q F0 .612(your application must be freely redistrib)17.2 F .611
+(utable in)-.2 F(source form, or)348.2 216.6 Q F2<83>328.2 232.8 Q F0
+(you must get a license from us.)17.2 E -.15(Fo)323.2 249 S 2.631(rc).15
+G .131(ustomers who prefer not to distrib)-2.631 F .132(ute Open Source)
+-.2 F 1.493(products, we sell licenses to use and e)323.2 261 R 1.492
+(xtend Berk)-.15 F(ele)-.1 E(y)-.15 E(DB at a reasonable cost.)323.2 273
+Q 2.675 -.8(We w)323.2 289.2 T 1.076
+(ork hard to accommodate the needs of the Open).7 F .606
+(Source community)323.2 301.2 R 5.606(.F)-.65 G .606(or e)-5.756 F .606
+(xample, we ha)-.15 F .905 -.15(ve c)-.2 H .605(rafted spe-).15 F 1.415
+(cial licensing arrangements with Gnome to encourage)323.2 313.2 R
+(its use and distrib)323.2 325.2 Q(ution of Berk)-.2 E(ele)-.1 E 2.5(yD)
+-.15 G(B.)-2.5 E(Berk)323.2 341.4 Q(ele)-.1 E 4.103(yD)-.15 G 4.103(Bc)
+-4.103 G 1.603(onforms to the Open Source de\214nition)-4.103 F 4.867
+([Open99]. The)323.2 353.4 R 2.367
+(license has been carefully crafted to)4.867 F -.1(ke)323.2 365.4 S .643
+(ep the product a).1 F -.25(va)-.2 G .642(ilable as an Open Source of)
+.25 F(fering,)-.25 E(while pro)323.2 377.4 Q
+(viding enough of a return on our in)-.15 E -.15(ve)-.4 G(stment to).15
+E 1.546(fund continued de)323.2 389.4 R -.15(ve)-.25 G 1.546
+(lopment and support of the prod-).15 F 3.033(uct. The)323.2 401.4 R
+.534(current license has created a b)3.033 F .534(usiness capable)-.2 F
+.916(of funding three years of de)323.2 413.4 R -.15(ve)-.25 G .916
+(lopment on the softw).15 F(are)-.1 E(that simply w)323.2 425.4 Q
+(ould not ha)-.1 E .3 -.15(ve h)-.2 H(appened otherwise.).15 E F1 3
+(7. Summary)323.2 455.4 R F0(Berk)323.2 471.6 Q(ele)-.1 E 2.991(yD)-.15
+G 2.991(Bo)-2.991 G -.25(ff)-2.991 G .491
+(ers a unique collection of features, tar).25 F(-)-.2 E .175
+(geted squarely at softw)323.2 483.6 R .174(are de)-.1 F -.15(ve)-.25 G
+.174(lopers who need simple,).15 F .492
+(reliable database management services in their applica-)323.2 495.6 R
+5.3(tions. Good)323.2 507.6 R 2.8(design and implementation and careful)
+5.3 F 1.633(engineering throughout mak)323.2 519.6 R 4.133(et)-.1 G
+1.633(he softw)-4.133 F 1.634(are better than)-.1 F(man)323.2 531.6 Q
+2.5(yo)-.15 G(ther systems.)-2.5 E(Berk)323.2 547.8 Q(ele)-.1 E 4.1(yD)
+-.15 G 4.1(Bi)-4.1 G 4.1(sa)-4.1 G 4.1(nO)-4.1 G 1.6
+(pen Source product, a)-4.1 F -.25(va)-.2 G 1.6(ilable at).25 F/F3 10
+/Times-Italic@0 SF(www)323.2 559.8 Q(.sleepycat.com)-.74 E F0 .654
+(for do)3.154 F 3.154(wnload. The)-.25 F(distrib)3.154 E .654(uted sys-)
+-.2 F .383(tem includes e)323.2 571.8 R -.15(ve)-.25 G .383
+(rything needed to b).15 F .382(uild and deplo)-.2 F 2.882(yt)-.1 G(he)
+-2.882 E(softw)323.2 583.8 Q(are or to port it to ne)-.1 E 2.5(ws)-.25 G
+(ystems.)-2.5 E(Sleep)323.2 600 Q 2.633(ycat Softw)-.1 F 2.633
+(are distrib)-.1 F 2.633(utes Berk)-.2 F(ele)-.1 E 5.133(yD)-.15 G 5.134
+(Bu)-5.133 G 2.634(nder a)-5.134 F .764(license agreement that dra)323.2
+612 R .764(ws on both the UC Berk)-.15 F(ele)-.1 E(y)-.15 E(cop)323.2
+624 Q 2.377(yright and the GPL.)-.1 F 2.377(The license guarantees that)
+7.377 F(Berk)323.2 636 Q(ele)-.1 E 3.384(yD)-.15 G 3.384(Bw)-3.384 G
+.884(ill remain an Open Source product and)-3.384 F(pro)323.2 648 Q
+1.493(vides Sleep)-.15 F 1.493(ycat with opportunities to mak)-.1 F
+3.994(em)-.1 G(one)-3.994 E(y)-.15 E(to fund continued de)323.2 660 Q
+-.15(ve)-.25 G(lopment on the softw).15 E(are.)-.1 E EP
+%%Page: 9 9
+%%BeginPageSetup
+BP
+%%EndPageSetup
+/F0 12/Times-Bold@0 SF 3(8. Refer)79.2 84 R(ences)-.216 E/F1 10
+/Times-Roman@0 SF([Come79])79.2 100.2 Q(Comer)104.2 112.2 Q 3.127(,D)-.4
+G .627(., \231The Ubiquitous B-tree,)-3.127 F<9a>-.7 E/F2 10
+/Times-Italic@0 SF -.3(AC)3.126 G 3.126(MC).3 G(om-)-3.126 E .404
+(puting Surve)104.2 124.2 R(ys)-.3 E F1 -1.29(Vo)2.904 G .404
+(lume 11, number 2, June 1979.)1.29 F([Gray93])79.2 140.4 Q(Gray)104.2
+152.4 Q 2.982(,J)-.65 G .482(., and Reuter)-2.982 F 2.982(,A)-.4 G(.,)
+-2.982 E F2 -1.55 -.55(Tr a)2.981 H .481(nsaction Pr).55 F(ocessing:)
+-.45 E 6.776(Concepts and T)104.2 164.4 R(ec)-.92 E(hniques)-.15 E F1
+9.277(,M)C(or)-9.277 E -.05(ga)-.18 G(n-Kaufman).05 E(Publishers, 1993.)
+104.2 176.4 Q([IEEE96])79.2 192.6 Q .364
+(Institute for Electrical and Electronics Engineers,)104.2 204.6 R F2
+(IEEE/ANSI Std 1003.1)104.2 216.6 Q F1 2.5(,1)C(996 Edition.)-2.5 E
+([Litw80])79.2 232.8 Q 2.365(Litwin, W)104.2 244.8 R 2.366
+(., \231Linear Hashing: A Ne)-.92 F 4.866(wT)-.25 G 2.366(ool for)-5.666
+F 1.784(File and T)104.2 256.8 R 1.783(able Addressing,)-.8 F<9a>-.7 E
+F2(Pr)4.283 E 1.783(oceedings of the)-.45 F 4.804
+(6th International Confer)104.2 268.8 R 4.804(ence on V)-.37 F 4.804
+(ery Lar)-1.11 F -.1(ge)-.37 G 1.983(Databases \(VLDB\))104.2 280.8 R F1
+4.483(,M)C 1.982(ontreal, Quebec, Canada,)-4.483 F(October 1980.)104.2
+292.8 Q([Open94])79.2 309 Q 4.068(The Open Group,)104.2 321 R F2
+(Distrib)6.568 E 4.069(uted TP: The XA+)-.2 F .78(Speci\214cation, V)
+104.2 333 R(er)-1.11 E .78(sion 2)-.1 F F1 3.28(,T)C .78
+(he Open Group, 1994.)-3.28 F([Open99])79.2 349.2 Q(Opensource.or)104.2
+361.2 Q 8.307(g, \231Open Source De\214nition,)-.18 F<9a>-.7 E F2(www)
+104.2 373.2 Q(.opensour)-.74 E(ce)-.37 E(.or)-.15 E(g/osd.html)-.37 E F1
+3.13(,v)C .63(ersion 1.4, 1999.)-3.28 F([Raym98])79.2 389.4 Q .718
+(Raymond, E.S., \231The Cathedral and the Bazaar)104.2 401.4 R -.7<2c9a>
+-.4 G F2(www)104.2 413.4 Q(.tuxedo.or)-.74 E(g/~esr/writings/cathedr)
+-.37 E(al-)-.15 E(bazaar/cathedr)104.2 425.4 Q(al-bazaar)-.15 E(.html)
+-1.11 E F1 2.5(,J)C(anuary 1998.)-2.5 E([Selt91])79.2 441.6 Q(Seltzer)
+104.2 453.6 Q 2.578(,M)-.4 G .078(., and Y)-2.578 F .079(igit, O., \231)
+-.55 F 2.579(AN)-.8 G .579 -.25(ew H)-2.579 H .079(ashing P).25 F(ack-)
+-.15 E 6.704(age for UNIX,)104.2 465.6 R<9a>-.7 E F2(Pr)9.204 E 6.704
+(oceedings 1991 W)-.45 F(inter)-.55 E(USENIX Confer)104.2 477.6 Q(ence)
+-.37 E F1 2.5(,D)C(allas, TX, January 1991.)-2.5 E([Selt92])79.2 493.8 Q
+(Seltzer)104.2 505.8 Q 5.365(,M)-.4 G 2.865
+(., and Olson, M., \231LIBTP: Portable)-5.365 F 2.845(Modular T)104.2
+517.8 R 2.845(ransactions for UNIX,)-.35 F<9a>-.7 E F2(Pr)5.345 E
+(oceedings)-.45 E 1.49(1992 W)104.2 529.8 R 1.49(inter Usenix Confer)
+-.55 F(ence)-.37 E F1 3.99(,S)C 1.49(an Francisco,)-3.99 F
+(CA, January 1992.)104.2 541.8 Q([Ston82])79.2 558 Q(Stonebrak)104.2 570
+Q(er)-.1 E 10.04(,M)-.4 G 7.54(., Stettner)-10.04 F 10.04(,H)-.4 G 7.54
+(., Kalash, J.,)-10.04 F .763(Guttman, A., and L)104.2 582 R .764
+(ynn, N., \231Document Process-)-.55 F .557
+(ing in a Relational Database System,)104.2 594 R 3.056<9a4d>-.7 G
+(emoran-)-3.056 E .825(dum No. UCB/ERL M82/32, Uni)104.2 606 R -.15(ve)
+-.25 G .825(rsity of Cali-).15 F(fornia at Berk)104.2 618 Q(ele)-.1 E
+1.3 -.65(y, B)-.15 H(erk).65 E(ele)-.1 E 1.3 -.65(y, C)-.15 H
+(A, May 1982.).65 E EP
+%%Trailer
+end
+%%EOF
diff --git a/bdb/docs/ref/refs/embedded.html b/bdb/docs/ref/refs/embedded.html
new file mode 100644
index 00000000000..b7641d931c1
--- /dev/null
+++ b/bdb/docs/ref/refs/embedded.html
@@ -0,0 +1,672 @@
+<html>
+<head>
+<title>Challenges in Embedded Database System Administration</title>
+</head>
+<body bgcolor=white>
+<center>
+<h1>Challenges in Embedded Database System Administration</h1>
+<h3>Margo Seltzer, Harvard University</h3>
+<h3>Michael Olson, Sleepycat Software, Inc.</h3>
+<em>{margo,mao}@sleepycat.com</em>
+</center>
+<p>
+Database configuration and maintenance have historically been complex tasks,
+often
+requiring expert knowledge of database design and application
+behavior.
+In an embedded environment, it is not feasible to require such
+expertise and ongoing database maintenance.
+This paper discusses the database administration
+challenges posed by embedded systems and describes how the
+Berkeley DB architecture addresses these challenges.
+
+<h2>1. Introduction</h2>
+
+Embedded systems provide a combination of opportunities and challenges
+in application and system configuration and management.
+As an embedded system is most often dedicated to a single application or
+small set of tasks, the operating conditions of the system are
+typically better understood than those of general purpose computing
+environments.
+Similarly, as embedded systems are dedicated to a small set of tasks,
+one would expect that the software to manage them should be small
+and simple.
+On the other hand, once an embedded system is deployed, it must
+continue to function without interruption and without administrator
+intervention.
+<p>
+Database administration consists of two components,
+initial configuration and ongoing maintenance.
+Initial configuration consists of database design, manifestation,
+and tuning.
+The instantiation of the design includes decomposing the design
+into tables, relations, or objects and designating proper indices
+and their implementations (e.g., Btrees, hash tables, etc.).
+Tuning a design requires selecting a location for the log and
+data files, selecting appropriate database page sizes, specifying
+the size of in-memory caches, and specifying the limits of
+multi-threading and concurrency.
+As embedded systems define a specific environment and set of tasks,
+requiring expertise during the initial system
+configuration process is acceptable, and we focus our efforts on
+the ongoing maintenance of the system.
+In this way, our emphasis differs from other projects such as
+Microsoft's AutoAdmin project <a href="#Chaud982">[3]</a>, and the "no-knobs"
+administration that is identified as an area of important future
+research by the Asilomar authors<a href="#Bern98">[1]</a>.
+<p>
+In this paper, we focus on what the authors
+of the Asilomar report call "gizmo" databases <a href="#Bern98"> [1]</a>,
+databases
+that reside in devices such as smart cards, toasters, or telephones.
+The key characteristics of such databases are that their
+functionality is completely transparent to users, no one ever
+performs explicit database operations or
+database maintenance, the database may crash at any time and
+must recover instantly, the device may undergo a hard reset at
+any time, requiring that the database return to its initial
+state, and the semantic integrity of the database must be maintained
+at all times.
+In Section 2, we provide more detail on the sorts of tasks
+typically performed by database administrators (DBAs) that must
+be automated in an embedded system.
+<p>
+The rest of this paper is structured as follows.
+In Section 2, we outline the requirements for embedded database support.
+In Section 3, we discuss how Berkeley DB
+is conducive to the hands-off management
+required in embedded systems.
+In Section 4, we discuss novel features that
+enhance Berkeley
+DB's suitability for the embedded applications.
+In Section 5, we discuss issues of footprint size.
+In Section 6 we discuss related work, and we conclude
+in Section 7.
+
+<h2>2. Embedded Database Requirements</h2>
+Historically, much of the commercial database industry has been driven
+by the requirements of high performance online transaction
+processing (OLTP), complex query processing, and the industry
+standard benchmarks that have emerged (e.g., TPC-C <a href="#TPCC">[9]</a>,
+TPC-D <a href="#TPCD">[10]</a>) to
+allow for system comparisons.
+As embedded systems typically perform fairly simple queries,
+such metrics are not nearly as relevant for embedded database
+systems as are ease of maintenance, robustness, and small footprint.
+Of these three requirements, robustness and ease of maintenance
+are the key issues.
+Users must trust the data stored in their devices and must not need
+to manually perform anything resembling system administration in order
+to get their unit to work properly.
+Fortunately, ease of use and robustness are important side
+effects of simplicity and good design.
+These, in turn, lead to a small size, providing the third
+requirement of an embedded system.
+<h3>2.1 The User Perspective</h3>
+<p>
+In the embedded database arena, it is the ongoing maintenance tasks
+that must be automated, not necessarily the initial system configuration.
+There are five tasks
+that are traditionally performed by DBAs,
+but must be performed automatically
+in embedded database systems.
+These tasks are
+log archival and reclamation,
+backup,
+data compaction/reorganization,
+automatic and rapid recovery, and
+reinitialization from scratch.
+<P>
+Log archival and backup are tightly coupled.
+Database backups are part of any
+large database installation, and log archival is analogous to incremental
+backup.
+It is not clear what the implications of backup and archival are in
+an embedded system.
+Consumers do not back up their VCRs or refrigerators, yet they do
+(or should) back up their personal computers or personal digital
+assistants.
+For the remainder of this paper, we assume that backups, in some form,
+are required for gizmo databases (imagine having to reprogram, manually,
+the television viewing access pattern learned by some set-top television
+systems today).
+Furthermore, we require that those backups are nearly instantaneous or
+completely transparent,
+as users should not be aware that their gizmos are being backed up
+and should not have to explicitly initiate such backups.
+<p>
+Data compaction or reorganization has traditionally required periodic
+dumping and restoration of
+database tables and the recreation of indices.
+In an embedded system, such reorganization must happen automatically.
+<p>
+Recovery issues are similar in embedded and traditional environments
+with a few exceptions.
+While a few seconds or even a minute recovery is acceptable
+for a large server installation, no one is willing to wait
+for their telephone or television to reboot.
+As with archival, recovery must be nearly instantaneous in an embedded product.
+Secondly, it is often the case that a system will be completely
+reinitialized, rather than simply rebooted.
+In this case, the embedded database must be restored to its initial
+state, freeing all its resources.
+This is not typically a requirement of large server systems.
+<h3>2.2 The Developer Perspective</h3>
+<p>
+In addition to the maintenance-free operation required of the
+embedded systems, there are a number of requirements that fall
+out of the constrained resources typically found in the "gizmos"
+using gizmo databases. These requirements are:
+small footprint,
+short code-path,
+programmatic interface for tight application coupling and
+to avoid the overhead (in both time and size) of
+interfaces such as SQL and ODBC,
+application configurability and flexibility,
+support for complete memory-resident operation (e.g., these systems
+must run on gizmos without file systems), and
+support for multi-threading.
+<p>
+A small footprint and short code-path are self-explanatory, however
+what is not as obvious is that the programmatic interface requirement
+is the logical result of them.
+Traditional interfaces such as ODBC and SQL add significant
+size overhead and frequently add multiple context/thread switches
+per operation, not to mention several IPC calls.
+An embedded product is less likely to require the complex
+query processing that SQL enables.
+Instead, in the embedded space, the ability for an application
+to configure the database for the specific tasks in question
+is more important than a general query interface.
+<p>
+As some systems do not provide storage other than RAM and ROM,
+it is essential that an embedded database work seemlessly
+in memory-only environments.
+Similarly, many of today's embedded operating systems provide a
+single address space architecture, so a simple, multi-threaded
+capability is essential for application requiring any concurrency.
+<p>
+In general, embedded applications run on gizmos whose native
+operating system support varies tremendously.
+For example, the embedded OS may or may
+not support user-level processing or multi-threading.
+Even if it does, a particular embedded
+application may or may not need it.
+Not all applications need more than one thread of control.
+An embedded database must provide mechanisms to developers
+without deciding policy.
+For example, the threading model in an application is a matter of policy,
+and depends
+not on the database software, but on the hardware, operating
+system, and the application's feature set.
+Therefore, the data manager must provide for the use of multi-threading,
+but not require it.
+
+<h2>3. Berkeley DB: A Database for Embedded Systems</h2>
+Berkeley DB is the result of implementing database functionality
+using the UNIX tool-based philosophy.
+The current Berkeley DB package, as distributed by Sleepycat
+Software, is a descendant of the hash and btree access methods
+distributed with 4.4BSD and its descendents.
+The original package (referred to as DB-1.85),
+while intended as a public domain replacement for dbm and
+its followers (e.g., ndbm, gdbm, etc), rapidly became widely
+used as an efficient, easy-to-use data store.
+It was incorporated into a number of Open Source packages including
+Perl, Sendmail, Kerberos, and the GNU C-library.
+<p>
+Versions 2.X and higher are distributed by Sleepycat Software and
+add functionality for concurrency, logging, transactions, and
+recovery.
+Each piece of additional functionality is implemented as an independent
+module, which means that the subsystems can be used outside the
+context of Berkeley DB. For example, the locking subsystem can
+easily be used to implement locking for a non-DB application and
+the shared memory buffer pool can be used for any application
+caching data in main memory.
+This subsystem design allows a designer to pick and choose
+the functionality necessary for the application, minimizing
+memory footprint and maximizing performance.
+This addresses the small footprint and short code-path criteria
+mentioned in the previous section.
+<p>
+As Berkeley DB grew out of a replacement for dbm, its primary
+implementation language has always been C and its interface has
+been programmatic. The C interface is the native interface,
+unlike many database systems where the programmatic API is simply
+a layer on top of an already-costly query interface (e.g. embedded
+SQL).
+Berkeley DB's heritage is also apparent in its data model; it has
+none.
+The database stores unstructured key/data pairs, specified as
+variable length byte strings.
+This leaves schema design and representation issues the responsibility
+of the application, which is ideal for an embedded environment.
+Applications retain full control over specification of their data
+types, representation, index values, and index relationships.
+In other words, Berkeley DB provides a robust, high-performance,
+keyed storage system, not a particular database management system.
+We have designed for simplicity and performance, trading off
+complex, general purpose support that is better encapsulated in
+applications.
+<p>
+Another element of Berkeley DB's programmatic interface is its
+customizability; applications can specify Btree comparison and
+prefix compression functions, hash functions, error routines,
+and recovery models.
+This means that embedded applications can tailor the underlying
+database to best suit their data demands.
+Similarly, the utilities traditionally bundled with a database
+manager (e.g., recovery, dump/restore, archive) are implemented
+as tiny wrapper programs around library routines. This means
+that it is not necessary to run separate applications for the
+utilities. Instead, independent threads can act as utility
+daemons, or regular query threads can perform utility functions.
+Many of the current products built on Berkeley DB are bundled as
+a single large server with independent threads that perform functions
+such as checkpoint, deadlock detection, and performance monitoring.
+<p>
+As mentioned earlier, living in an embedded environment requires
+flexible management of storage.
+Berkeley DB does not require any preallocation of disk space
+for log or data files.
+While many commercial database systems take complete control
+of a raw device, Berkeley DB uses a normal file system, and
+can therefore, safely and easily share a data space with other
+programs.
+All databases and log files are native files of the host environment,
+so whatever utilities are provided by the environment can be used
+to manage database files as well.
+<p>
+Berkeley DB provides three different memory models for its
+management of shared information.
+Applications can use the IEEE Std 1003.1b-1993 (POSIX) <tt>mmap</tt>
+interface to share
+data, they can use system shared memory, as frequently provided
+by the shmget family of interfaces, or they can use per-process
+heap memory (e.g., malloc).
+Applications that require no permanent storage and do not provide
+shared memory facilities can still use Berkeley DB by requesting
+strictly private memory and specifying that all databases be
+memory-resident.
+This provides pure-memory operation.
+<p>
+Lastly, Berkeley DB is designed for rapid startup -- recovery can
+happen automatically as part of system initialization.
+This means that Berkeley DB works correctly in environments where
+gizmos are suddenly shut down and restarted.
+
+<h2>4. Extensions for Embedded Environments </h2>
+While the Berkeley DB library has been designed for use in
+embedded systems, all the features described above are useful
+in more conventional systems as well.
+In this section, we discuss a number of features and "automatic
+knobs" that are specifically geared
+toward the more constrained environments found in gizmo databases.
+
+<h3>4.1 Automatic compression</h3>
+Following the programmatic interface design philosophy, we
+support application-specific (or default) compression routines.
+These can be geared toward the particular data types present
+in the application's dataset, thus providing better compression
+than a general purpose routine.
+Note that the application could instead specify an encryption
+function and create encrypted databases instead of compressed ones.
+Alternately, the application might specify a function that performs
+both compression and encryption.
+<p>
+As applications are also permitted to specify comparison and hash
+functions, the application can chose to organize its data based
+either on uncompressed and clear-text data or compressed and encrypted
+data.
+If the application indicates that data should be compared in its
+processed form (i.e., compressed and encrypted), then the compression
+and encryption are performed on individual data items and the in-memory
+representation retains these characteristics.
+However, if the application indicates that data should be compared in
+its original form, then entire pages are transformed upon being read
+into or written out of the main memory buffer cache.
+These two alternatives provide the flexibility to trade space
+and security for performance.
+
+<h3>4.2 In-memory logging & transactions</h3>
+One of the four key properties of transaction systems is durability.
+This means that transaction systems are designed for permanent storage
+(most commonly disk). However, as mentioned above, embedded systems
+do not necessarily contain any such storage.
+Nevertheless, transactions can be useful in this environment to
+preserve the semantic integrity of the underlying storage.
+Berkeley DB optionally provides logging functionality and
+transaction support regardless of whether the database and logs
+are on disk or in memory.
+
+<h3>4.3 Remote Logs</h3>
+While we do not expect users to backup their television sets and
+toasters, it is conceivable that a set-top box provided by a
+cable carrier should, in fact, be backed up by that cable carrier.
+The ability to store logs remotely can provide "information appliance"
+functionality, and can also be used in conjunction with local logs
+to enhance reliability.
+Furthermore, remote logs provide for catastrophic recovery, e.g., loss
+of the gizmo, destruction of the gizmo, etc.
+
+<h3>4.4 Application References to Database Buffers</h3>
+
+Typically, when data is returned to the user, it must be copied
+from the data manager's buffer cache (or data page) into the
+application's memory.
+However, in an embedded environment, the robustness of the
+total software package is of paramount importance, not the
+isolation between the application and the data manager.
+As a result, it is possible for the data manager to avoid
+copies by giving applications direct references to data items
+in a shared memory cache.
+This is a significant performance optimization that can be
+allowed when the application and data manager are tightly
+integrated.
+
+<h3>4.5 Recoverable database creation/deletion</h3>
+
+In a conventional database management system, the creation of
+database tables (relations) and indices are heavyweight operations
+that are not recoverable.
+This is not acceptable in a complex embedded environment where
+instantaneous recovery and robust operation in the face of
+all types of database operations is essential.
+While Berkeley DB files can be removed using normal file system
+utilities, we provide transaction protected utilities that
+allow us to recover both database creation and deletion.
+
+<h3>4.6 Adaptive concurrency control</h3>
+The Berkeley DB package uses page-level locking by default.
+This trades off fine grain concurrency control for simplicity
+during recovery. (Finer grain concurrency control can be
+obtained by reducing the page size in the database.)
+However, when multiple threads/processes perform page-locking
+in the presence of writing operations, there is the
+potential for deadlock.
+As some environments do not need or desire the overhead of
+logging and transactions, it is important to provide the
+ability for concurrent access without the potential for
+deadlock.
+<p>
+Berkeley DB provides an option to perform coarser grain,
+deadlock-free locking.
+Rather than locking on pages, locking is performed at the
+interface to the database.
+Multiple readers or a single writer are allowed to be
+active in the database at any instant in time, with
+conflicting requests queued automatically.
+The presence of cursors, through which applications can both
+read and write data, complicates this design.
+If a cursor is currently being used for reading, but will later
+be used to write, the system will be deadlock prone if no
+special precautions are taken.
+To handle this situation, we require that, when a cursor is
+created, the application specify any future intention to write.
+If there is an intention to write, the cursor is granted an
+intention-to-write lock which does not conflict with readers,
+but does conflict with other intention-to-write locks and write
+locks.
+The end result is that the application is limited to a single
+potentially writing cursor accessing the database at any point
+in time.
+<p>
+Under periods of low contention (but potentially high throughput),
+the normal page-level locking provides the best overall throughput.
+However, as contention rises, so does the potential for deadlock.
+As some cross-over point, switching to the less concurrent, but
+deadlock-free locking protocol will result in higher throughput
+as operations must never be retried.
+Given the operating conditions of an embedded database manager,
+it is useful to make this change automatically as the system
+itself detects high contention.
+
+<h3>4.7 Adaptive synchronization</h3>
+
+In addition to the logical locks that protect the integrity of the
+database pages, Berkeley DB must synchronize access to shared memory
+data structures, such as the lock table, in-memory buffer pool, and
+in-memory log buffer.
+Each independent module uses a single mutex to protect its shared
+data structures, under the assumption that operations that require
+the mutex are very short and the potential for conflict is
+low.
+Unfortunately, in highly concurrent environments with multiple processors
+present, this assumption is not always true.
+When this assumption becomes invalid (that is, we observe significant
+contention for the subsystem mutexes), we can switch over to a finer-grained
+concurrency model for the mutexes.
+Once again, there is a performance trade-off. Fine-grain mutexes
+impose a penalty of approximately 25% (due to the increased number
+of mutexes required for each operation), but allow for higher throughput.
+Using fine-grain mutexes under low contention would cause a decrease
+in performance, so it is important to monitor the system carefully,
+so that the change can be executed only when it will increase system
+throughput without jeopardizing latency.
+
+<h2>5. Footprint of an Embedded System</h2>
+While traditional systems compete on price-performance, the
+embedded players will compete on price, features, and footprint.
+The earlier sections have focused on features; in this section
+we focus on footprint.
+<p>
+Oracle reports that Oracle Lite 3.0 requires 350 KB to 750 KB
+of memory and approximately 2.5 MB of hard disk space <a href="#Oracle">[7]</a>.
+This includes drivers for interfaces such as ODBC and JDBC.
+In contrast, Berkeley DB ranges in size from 75 KB to under 200 KB,
+foregoing heavyweight interfaces such as ODBC and JDBC and
+providing a variety of deployed sizes that can be used depending
+on application needs. At the low end, applications requiring
+a simple single-user access method can choose from either extended
+linear hashing, B+ trees, or record-number based retrieval and
+pay only the 75 KB space requirement.
+Applications requiring all three access methods will observe the
+110 KB footprint.
+At the high end, a fully recoverable, high-performance system
+occupies less than a quarter megabyte of memory.
+This is a system you can easily incorporate in your toaster oven.
+Table 1 shows the per-module break down of the entire Berkeley DB
+library. Note that this does not include memory used to cache database
+pages.
+
+<table border>
+<tr><th colspan=4>Object sizes in bytes</th></tr>
+<tr><th align=left>Subsystem</th><th align=center>Text</th><th align=center>Data</th><th align=center>Bss</th></tr>
+<tr><td>Btree-specific routines</td><td align=right>28812</td><td align=right>0</td><td align=right>0</td></tr>
+<tr><td>Recno-specific routines</td><td align=right>7211</td><td align=right>0</td><td align=right>0</td></tr>
+<tr><td>Hash-specific routines</td><td align=right>23742</td><td align=right>0</td><td align=right>0</td></tr>
+<tr><td colspan=4></td></tr>
+<tr><td>Memory Pool</td><td align=right>14535</td><td align=right>0</td><td align=right>0</td></tr>
+<tr><td>Access method common code</td><td align=right>23252</td><td align=right>0</td><td align=right>0</td></tr>
+<tr><td>OS compatibility library</td><td align=right>4980</td><td align=right>52</td><td align=right>0</td></tr>
+<tr><td>Support utilities</td><td align=right>6165</td><td align=right>0</td><td align=right>0</td></tr>
+<tr><td colspan=4></td></tr>
+<tr><th>All modules for Btree access method only</th><td align=right>77744</td><td align=right>52</td><td align=right>0</td></tr>
+<tr><th>All modules for Recno access method only</th><td align=right>84955</td><td align=right>52</td><td align=right>0</td></tr>
+<tr><th>All modules for Hash access method only</th><td align=right>72674</td><td align=right>52</td><td align=right>0</td></tr>
+<tr><td colspan=4></td></tr>
+<tr><th align=left>All Access Methods</th><td align=right>108697</td><td align=right>52</td><td align=right>0</td></tr>
+<tr><td colspan=4><br></td></tr>
+<tr><td>Locking</td><td align=right>12533</td><td align=right>0</td><td align=right>0</td></tr>
+<tr><td colspan=4></td></tr>
+<tr><td>Recovery</td><td align=right>26948</td><td align=right>8</td><td align=right>4</td></tr>
+<tr><td>Logging</td><td align=right>37367</td><td align=right>0</td><td align=right>0</td></tr>
+<tr><td colspan=4></td></tr>
+<tr><th align=left>Full Package</th><td align=right>185545</td><td align=right>60</td><td align=right>4</td></tr>
+<tr><br></tr>
+</table>
+
+<h2>6. Related Work</h2>
+
+Every three to five years, leading researchers in the database
+community convene to identify future directions in database
+research.
+They produce a report of this meeting, named for the year and
+location of the meeting.
+The most recent of these reports, the 1998 Asilomar report,
+identifies the embedded database market as one of the
+high growth areas in database research <a href="#Bern98">[1]</a>.
+Not surprisingly, market analysts identify the embedded database
+market as a high-growth area in the commercial sector as well <a href="#Host98">
+[5]</a>.
+<p>
+The Asilomar report identifies a new class of database applications, which they
+term "gizmo" databases, small databases embedded in tiny mobile
+appliances, e.g., smart-cards, telephones, personal digital assistants.
+Such databases must be self-managing, secure and reliable.
+Thus, the idea is that gizmo databases require plug and play data
+management with no database administrator (DBA), no human settable
+parameters, and the ability to adapt to changing conditions.
+More specifically, the Asilomar authors claim that the goal is
+self-tuning, including defining the physical DB design, the
+logical DB design, and automatic reports and utilities <a href="#Bern98">[1]</a>
+To date,
+few researchers have accepted this challenge, and there is a dearth
+of research literature on the subject.
+<p>
+Our approach to embedded database administration is fundamentally
+different than that described by the Asilomar authors.
+We adopt their terminology, but view the challenge in supporting
+gizmo databases to be that of self-sustenance <em>after</em> initial
+deployment. Therefore, we find it, not only acceptable, but
+desirable to assume that application developers control initial
+database design and configuration. To the best of our knowledge,
+none of the published work in this area addresses this approach.
+<p>
+As the research community has not provided guidance in this
+arena, most work in embedded database administration has fallen
+to the commercial vendors.
+These vendors fall into two camps, companies selling databases
+specifically designed for embedding or programmatic access
+and the major database vendors (e.g., Oracle, Informix, Sybase).
+<p>
+The embedded vendors all acknowledge the need for automatic
+administration, but fail to identify precisely how their
+products actually accomplish this.
+A notable exception is Interbase whose white paper
+comparison with Sybase and Microsoft's SQL servers
+explicitly address features of maintenance ease.
+Interbase claims that as they use no log files, there is
+no need for log reclamation, checkpoint tuning, or other
+tasks associated with log management. However, Interbase
+uses Transaction Information Pages, and it is unclear
+how these are reused or reclaimed <a href="#Interbase">[6]</a>.
+Additionally, with a log-free system, they must use
+a FORCE policy (write all pages to disk at commit),
+as defined by Haerder and Reuter <a href="#Haerder">[4]</a>. This has
+serious performance consequences for disk-based systems.
+The approach described in this paper does use logs and
+therefore requires log reclamation,
+but provides hooks so the application may reclaim logs
+safely and programmatically.
+While Berkeley DB does require checkpoints, the goal of
+tuning the checkpoint interval is to bound recovery time.
+Since the checkpoint interval in Berkeley DB can be expressed
+by the amount of log data written, it requires no tuning.
+The application designer sets a target recovery time, and
+selects the amount of log data that can be read in that interval
+and specifies the checkpoint interval appropriately. Even as
+load changes, the time to recover does not.
+<p>
+The backup approaches taken by Interbase and Berkeley DB
+are similar in that they both allow online backup, but
+rather different in their affect on transactions running
+during backup. As Interbase performs backups as transactions
+<a href="#Interbase">[6]</a>, concurrent queries can suffer potentially long
+delays. Berkeley DB uses native operating system system utilities
+and recovery for backups, so there is no interference with
+concurrent activity, other than potential contention on disk
+arms.
+<p>
+There are a number of database vendors selling in
+the embedded market (e.g., Raima,
+Centura, Pervasive, Faircom), but none highlight
+the special requirements of embedded database
+applications.
+On the other end of the spectrum, the major vendors,
+Oracle, Sybase, Microsoft, are all becoming convinced
+of the importance of the embedded market.
+As mentioned earlier, Oracle has announced its
+Oracle Lite server for embedded use.
+Sybase has announced its UltraLite platform for "application-optimized,
+high-performance, SQL database engine for professional
+application developers building solutions for mobile and embedded platforms."
+<a href="#Sybase">[8]</a>.
+We believe that SQL is incompatible with the
+gizmo database environment or truly embedded systems for which Berkeley
+DB is most suitable.
+Microsoft research is taking a different approach, developing
+technology to assist in automating initial database design and
+index specification <a href="#Chaud98">[2]</a><a href="#Chaud982">[3]</a>.
+As mentioned earlier, we believe that such configuration is, not only
+acceptable in the embedded market, but desirable so that applications
+can tune their database management for the target environment.
+<h2>7. Conclusions</h2>
+The coming wave of embedded systems poses a new set of challenges
+for data management.
+The traditional server-based, big footprint systems designed for
+high performance on big iron are not the right approach in this
+environment.
+Instead, application developers need small, fast, versatile systems
+that can be tailored to a specific environment.
+In this paper, we have identified several of the key issues in
+providing these systems and shown how Berkeley DB provides
+many of the characteristics necessary for such applications.
+
+<h2>8. References</h2>
+<p>
+[1] <a name="Bern98"> Bernstein, P., Brodie, M., Ceri, S., DeWitt, D., Franklin, M.,
+Garcia-Molina, H., Gray, J., Held, J., Hellerstein, J.,
+Jagadish, H., Lesk, M., Maier, D., Naughton, J.,
+Pirahesh, H., Stonebraker, M., Ullman, J.,
+"The Asilomar Report on Database Research,"
+SIGMOD Record 27(4): 74-80, 1998.
+</a>
+<p>
+[2] <a name="Chaud98"> Chaudhuri, S., Narasayya, V.,
+"AutoAdmin 'What-If' Index Analysis Utility,"
+<em>Proceedings of the ACM SIGMOD Conference</em>, Seattle, 1998.
+</a>
+<p>
+[3] <a name="Chaud982"> Chaudhuri, S., Narasayya, V.,
+"An Efficient, Cost-Driver Index Selection Tool for Microsoft SQL Server,"
+<em>Proceedings of the 23rd VLDB Conference</em>, Athens, Greece, 1997.
+</a>
+<p>
+[4] <a name="Harder"> Haerder, T., Reuter, A.,
+"Principles of Transaction-Oriented Database Recovery,"
+<em>Computing Surveys 15</em>,4 (1983), 237-318.
+</a>
+<p>
+[5] <a name="Host98"> Hostetler, M., "Cover Is Off A New Type of Database,"
+Embedded DB News,
+http://www.theadvisors.com/embeddeddbnews.htm,
+5/6/98.
+</a>
+<p>
+[6] <a name="Interbase"> Interbase, "A Comparison of Borland InterBase 4.0
+Sybase SQL Server and Microsoft SQL Server,"
+http://web.interbase.com/products/doc_info_f.html.
+</a>
+<p>
+[7] <a name="Oracle"> Oracle, "Oracle Delivers New Server, Application Suite
+to Power the Web for Mission-Critical Business,"
+http://www.oracle.com.sg/partners/news/newserver.htm,
+May 1998.
+</a>
+<p>
+[8] <a name="Sybase"> Sybase, Sybase UltraLite, http://www.sybase.com/products/ultralite/beta.
+</a>
+<p>
+[9] <a name="TPCC"> Transaction Processing Council, "TPC-C Benchmark Specification,
+Version 3.4," San Jose, CA, August 1998.
+</a>
+<p>
+[10] <a name="TPCD"> Transaction Processing Council, "TPC-D Benchmark Specification,
+Version 2.1," San Jose, CA, April 1999.
+</a>
+</body>
+</html>
+
+
diff --git a/bdb/docs/ref/refs/hash_usenix.ps b/bdb/docs/ref/refs/hash_usenix.ps
new file mode 100644
index 00000000000..c884778830d
--- /dev/null
+++ b/bdb/docs/ref/refs/hash_usenix.ps
@@ -0,0 +1,12209 @@
+%!PS-Adobe-1.0
+%%Creator: utopia:margo (& Seltzer,608-13E,8072,)
+%%Title: stdin (ditroff)
+%%CreationDate: Tue Dec 11 15:06:45 1990
+%%EndComments
+% @(#)psdit.pro 1.3 4/15/88
+% lib/psdit.pro -- prolog for psdit (ditroff) files
+% Copyright (c) 1984, 1985 Adobe Systems Incorporated. All Rights Reserved.
+% last edit: shore Sat Nov 23 20:28:03 1985
+% RCSID: $Header: psdit.pro,v 2.1 85/11/24 12:19:43 shore Rel $
+
+% Changed by Edward Wang (edward@ucbarpa.berkeley.edu) to handle graphics,
+% 17 Feb, 87.
+
+/$DITroff 140 dict def $DITroff begin
+/fontnum 1 def /fontsize 10 def /fontheight 10 def /fontslant 0 def
+/xi{0 72 11 mul translate 72 resolution div dup neg scale 0 0 moveto
+ /fontnum 1 def /fontsize 10 def /fontheight 10 def /fontslant 0 def F
+ /pagesave save def}def
+/PB{save /psv exch def currentpoint translate
+ resolution 72 div dup neg scale 0 0 moveto}def
+/PE{psv restore}def
+/arctoobig 90 def /arctoosmall .05 def
+/m1 matrix def /m2 matrix def /m3 matrix def /oldmat matrix def
+/tan{dup sin exch cos div}def
+/point{resolution 72 div mul}def
+/dround {transform round exch round exch itransform}def
+/xT{/devname exch def}def
+/xr{/mh exch def /my exch def /resolution exch def}def
+/xp{}def
+/xs{docsave restore end}def
+/xt{}def
+/xf{/fontname exch def /slotno exch def fontnames slotno get fontname eq not
+ {fonts slotno fontname findfont put fontnames slotno fontname put}if}def
+/xH{/fontheight exch def F}def
+/xS{/fontslant exch def F}def
+/s{/fontsize exch def /fontheight fontsize def F}def
+/f{/fontnum exch def F}def
+/F{fontheight 0 le{/fontheight fontsize def}if
+ fonts fontnum get fontsize point 0 0 fontheight point neg 0 0 m1 astore
+ fontslant 0 ne{1 0 fontslant tan 1 0 0 m2 astore m3 concatmatrix}if
+ makefont setfont .04 fontsize point mul 0 dround pop setlinewidth}def
+/X{exch currentpoint exch pop moveto show}def
+/N{3 1 roll moveto show}def
+/Y{exch currentpoint pop exch moveto show}def
+/S{show}def
+/ditpush{}def/ditpop{}def
+/AX{3 -1 roll currentpoint exch pop moveto 0 exch ashow}def
+/AN{4 2 roll moveto 0 exch ashow}def
+/AY{3 -1 roll currentpoint pop exch moveto 0 exch ashow}def
+/AS{0 exch ashow}def
+/MX{currentpoint exch pop moveto}def
+/MY{currentpoint pop exch moveto}def
+/MXY{moveto}def
+/cb{pop}def % action on unknown char -- nothing for now
+/n{}def/w{}def
+/p{pop showpage pagesave restore /pagesave save def}def
+/Dt{/Dlinewidth exch def}def 1 Dt
+/Ds{/Ddash exch def}def -1 Ds
+/Di{/Dstipple exch def}def 1 Di
+/Dsetlinewidth{2 Dlinewidth mul setlinewidth}def
+/Dsetdash{Ddash 4 eq{[8 12]}{Ddash 16 eq{[32 36]}
+ {Ddash 20 eq{[32 12 8 12]}{[]}ifelse}ifelse}ifelse 0 setdash}def
+/Dstroke{gsave Dsetlinewidth Dsetdash 1 setlinecap stroke grestore
+ currentpoint newpath moveto}def
+/Dl{rlineto Dstroke}def
+/arcellipse{/diamv exch def /diamh exch def oldmat currentmatrix pop
+ currentpoint translate 1 diamv diamh div scale /rad diamh 2 div def
+ currentpoint exch rad add exch rad -180 180 arc oldmat setmatrix}def
+/Dc{dup arcellipse Dstroke}def
+/De{arcellipse Dstroke}def
+/Da{/endv exch def /endh exch def /centerv exch def /centerh exch def
+ /cradius centerv centerv mul centerh centerh mul add sqrt def
+ /eradius endv endv mul endh endh mul add sqrt def
+ /endang endv endh atan def
+ /startang centerv neg centerh neg atan def
+ /sweep startang endang sub dup 0 lt{360 add}if def
+ sweep arctoobig gt
+ {/midang startang sweep 2 div sub def /midrad cradius eradius add 2 div def
+ /midh midang cos midrad mul def /midv midang sin midrad mul def
+ midh neg midv neg endh endv centerh centerv midh midv Da
+ Da}
+ {sweep arctoosmall ge
+ {/controldelt 1 sweep 2 div cos sub 3 sweep 2 div sin mul div 4 mul def
+ centerv neg controldelt mul centerh controldelt mul
+ endv neg controldelt mul centerh add endh add
+ endh controldelt mul centerv add endv add
+ centerh endh add centerv endv add rcurveto Dstroke}
+ {centerh endh add centerv endv add rlineto Dstroke}
+ ifelse}
+ ifelse}def
+/Dpatterns[
+[%cf[widthbits]
+[8<0000000000000010>]
+[8<0411040040114000>]
+[8<0204081020408001>]
+[8<0000103810000000>]
+[8<6699996666999966>]
+[8<0000800100001008>]
+[8<81c36666c3810000>]
+[8<0f0e0c0800000000>]
+[8<0000000000000010>]
+[8<0411040040114000>]
+[8<0204081020408001>]
+[8<0000001038100000>]
+[8<6699996666999966>]
+[8<0000800100001008>]
+[8<81c36666c3810000>]
+[8<0f0e0c0800000000>]
+[8<0042660000246600>]
+[8<0000990000990000>]
+[8<0804020180402010>]
+[8<2418814242811824>]
+[8<6699996666999966>]
+[8<8000000008000000>]
+[8<00001c3e363e1c00>]
+[8<0000000000000000>]
+[32<00000040000000c00000004000000040000000e0000000000000000000000000>]
+[32<00000000000060000000900000002000000040000000f0000000000000000000>]
+[32<000000000000000000e0000000100000006000000010000000e0000000000000>]
+[32<00000000000000002000000060000000a0000000f00000002000000000000000>]
+[32<0000000e0000000000000000000000000000000f000000080000000e00000001>]
+[32<0000090000000600000000000000000000000000000007000000080000000e00>]
+[32<00010000000200000004000000040000000000000000000000000000000f0000>]
+[32<0900000006000000090000000600000000000000000000000000000006000000>]]
+[%ug
+[8<0000020000000000>]
+[8<0000020000002000>]
+[8<0004020000002000>]
+[8<0004020000402000>]
+[8<0004060000402000>]
+[8<0004060000406000>]
+[8<0006060000406000>]
+[8<0006060000606000>]
+[8<00060e0000606000>]
+[8<00060e000060e000>]
+[8<00070e000060e000>]
+[8<00070e000070e000>]
+[8<00070e020070e000>]
+[8<00070e020070e020>]
+[8<04070e020070e020>]
+[8<04070e024070e020>]
+[8<04070e064070e020>]
+[8<04070e064070e060>]
+[8<06070e064070e060>]
+[8<06070e066070e060>]
+[8<06070f066070e060>]
+[8<06070f066070f060>]
+[8<060f0f066070f060>]
+[8<060f0f0660f0f060>]
+[8<060f0f0760f0f060>]
+[8<060f0f0760f0f070>]
+[8<0e0f0f0760f0f070>]
+[8<0e0f0f07e0f0f070>]
+[8<0e0f0f0fe0f0f070>]
+[8<0e0f0f0fe0f0f0f0>]
+[8<0f0f0f0fe0f0f0f0>]
+[8<0f0f0f0ff0f0f0f0>]
+[8<1f0f0f0ff0f0f0f0>]
+[8<1f0f0f0ff1f0f0f0>]
+[8<1f0f0f8ff1f0f0f0>]
+[8<1f0f0f8ff1f0f0f8>]
+[8<9f0f0f8ff1f0f0f8>]
+[8<9f0f0f8ff9f0f0f8>]
+[8<9f0f0f9ff9f0f0f8>]
+[8<9f0f0f9ff9f0f0f9>]
+[8<9f8f0f9ff9f0f0f9>]
+[8<9f8f0f9ff9f8f0f9>]
+[8<9f8f1f9ff9f8f0f9>]
+[8<9f8f1f9ff9f8f1f9>]
+[8<bf8f1f9ff9f8f1f9>]
+[8<bf8f1f9ffbf8f1f9>]
+[8<bf8f1fdffbf8f1f9>]
+[8<bf8f1fdffbf8f1fd>]
+[8<ff8f1fdffbf8f1fd>]
+[8<ff8f1fdffff8f1fd>]
+[8<ff8f1ffffff8f1fd>]
+[8<ff8f1ffffff8f1ff>]
+[8<ff9f1ffffff8f1ff>]
+[8<ff9f1ffffff9f1ff>]
+[8<ff9f9ffffff9f1ff>]
+[8<ff9f9ffffff9f9ff>]
+[8<ffbf9ffffff9f9ff>]
+[8<ffbf9ffffffbf9ff>]
+[8<ffbfdffffffbf9ff>]
+[8<ffbfdffffffbfdff>]
+[8<ffffdffffffbfdff>]
+[8<ffffdffffffffdff>]
+[8<fffffffffffffdff>]
+[8<ffffffffffffffff>]]
+[%mg
+[8<8000000000000000>]
+[8<0822080080228000>]
+[8<0204081020408001>]
+[8<40e0400000000000>]
+[8<66999966>]
+[8<8001000010080000>]
+[8<81c36666c3810000>]
+[8<f0e0c08000000000>]
+[16<07c00f801f003e007c00f800f001e003c007800f001f003e007c00f801f003e0>]
+[16<1f000f8007c003e001f000f8007c003e001f800fc007e003f001f8007c003e00>]
+[8<c3c300000000c3c3>]
+[16<0040008001000200040008001000200040008000000100020004000800100020>]
+[16<0040002000100008000400020001800040002000100008000400020001000080>]
+[16<1fc03fe07df0f8f8f07de03fc01f800fc01fe03ff07df8f87df03fe01fc00f80>]
+[8<80>]
+[8<8040201000000000>]
+[8<84cc000048cc0000>]
+[8<9900009900000000>]
+[8<08040201804020100800020180002010>]
+[8<2418814242811824>]
+[8<66999966>]
+[8<8000000008000000>]
+[8<70f8d8f870000000>]
+[8<0814224180402010>]
+[8<aa00440a11a04400>]
+[8<018245aa45820100>]
+[8<221c224180808041>]
+[8<88000000>]
+[8<0855800080550800>]
+[8<2844004482440044>]
+[8<0810204080412214>]
+[8<00>]]]def
+/Dfill{
+ transform /maxy exch def /maxx exch def
+ transform /miny exch def /minx exch def
+ minx maxx gt{/minx maxx /maxx minx def def}if
+ miny maxy gt{/miny maxy /maxy miny def def}if
+ Dpatterns Dstipple 1 sub get exch 1 sub get
+ aload pop /stip exch def /stipw exch def /stiph 128 def
+ /imatrix[stipw 0 0 stiph 0 0]def
+ /tmatrix[stipw 0 0 stiph 0 0]def
+ /minx minx cvi stiph idiv stiph mul def
+ /miny miny cvi stipw idiv stipw mul def
+ gsave eoclip 0 setgray
+ miny stiph maxy{
+ tmatrix exch 5 exch put
+ minx stipw maxx{
+ tmatrix exch 4 exch put tmatrix setmatrix
+ stipw stiph true imatrix {stip} imagemask
+ }for
+ }for
+ grestore
+}def
+/Dp{Dfill Dstroke}def
+/DP{Dfill currentpoint newpath moveto}def
+end
+
+/ditstart{$DITroff begin
+ /nfonts 60 def % NFONTS makedev/ditroff dependent!
+ /fonts[nfonts{0}repeat]def
+ /fontnames[nfonts{()}repeat]def
+/docsave save def
+}def
+
+% character outcalls
+/oc{
+ /pswid exch def /cc exch def /name exch def
+ /ditwid pswid fontsize mul resolution mul 72000 div def
+ /ditsiz fontsize resolution mul 72 div def
+ ocprocs name known{ocprocs name get exec}{name cb}ifelse
+}def
+/fractm [.65 0 0 .6 0 0] def
+/fraction{
+ /fden exch def /fnum exch def gsave /cf currentfont def
+ cf fractm makefont setfont 0 .3 dm 2 copy neg rmoveto
+ fnum show rmoveto currentfont cf setfont(\244)show setfont fden show
+ grestore ditwid 0 rmoveto
+}def
+/oce{grestore ditwid 0 rmoveto}def
+/dm{ditsiz mul}def
+/ocprocs 50 dict def ocprocs begin
+(14){(1)(4)fraction}def
+(12){(1)(2)fraction}def
+(34){(3)(4)fraction}def
+(13){(1)(3)fraction}def
+(23){(2)(3)fraction}def
+(18){(1)(8)fraction}def
+(38){(3)(8)fraction}def
+(58){(5)(8)fraction}def
+(78){(7)(8)fraction}def
+(sr){gsave 0 .06 dm rmoveto(\326)show oce}def
+(is){gsave 0 .15 dm rmoveto(\362)show oce}def
+(->){gsave 0 .02 dm rmoveto(\256)show oce}def
+(<-){gsave 0 .02 dm rmoveto(\254)show oce}def
+(==){gsave 0 .05 dm rmoveto(\272)show oce}def
+(uc){gsave currentpoint 400 .009 dm mul add translate
+ 8 -8 scale ucseal oce}def
+end
+
+% an attempt at a PostScript FONT to implement ditroff special chars
+% this will enable us to
+% cache the little buggers
+% generate faster, more compact PS out of psdit
+% confuse everyone (including myself)!
+50 dict dup begin
+/FontType 3 def
+/FontName /DIThacks def
+/FontMatrix [.001 0 0 .001 0 0] def
+/FontBBox [-260 -260 900 900] def% a lie but ...
+/Encoding 256 array def
+0 1 255{Encoding exch /.notdef put}for
+Encoding
+ dup 8#040/space put %space
+ dup 8#110/rc put %right ceil
+ dup 8#111/lt put %left top curl
+ dup 8#112/bv put %bold vert
+ dup 8#113/lk put %left mid curl
+ dup 8#114/lb put %left bot curl
+ dup 8#115/rt put %right top curl
+ dup 8#116/rk put %right mid curl
+ dup 8#117/rb put %right bot curl
+ dup 8#120/rf put %right floor
+ dup 8#121/lf put %left floor
+ dup 8#122/lc put %left ceil
+ dup 8#140/sq put %square
+ dup 8#141/bx put %box
+ dup 8#142/ci put %circle
+ dup 8#143/br put %box rule
+ dup 8#144/rn put %root extender
+ dup 8#145/vr put %vertical rule
+ dup 8#146/ob put %outline bullet
+ dup 8#147/bu put %bullet
+ dup 8#150/ru put %rule
+ dup 8#151/ul put %underline
+ pop
+/DITfd 100 dict def
+/BuildChar{0 begin
+ /cc exch def /fd exch def
+ /charname fd /Encoding get cc get def
+ /charwid fd /Metrics get charname get def
+ /charproc fd /CharProcs get charname get def
+ charwid 0 fd /FontBBox get aload pop setcachedevice
+ 2 setlinejoin 40 setlinewidth
+ newpath 0 0 moveto gsave charproc grestore
+ end}def
+/BuildChar load 0 DITfd put
+/CharProcs 50 dict def
+CharProcs begin
+/space{}def
+/.notdef{}def
+/ru{500 0 rls}def
+/rn{0 840 moveto 500 0 rls}def
+/vr{0 800 moveto 0 -770 rls}def
+/bv{0 800 moveto 0 -1000 rls}def
+/br{0 840 moveto 0 -1000 rls}def
+/ul{0 -140 moveto 500 0 rls}def
+/ob{200 250 rmoveto currentpoint newpath 200 0 360 arc closepath stroke}def
+/bu{200 250 rmoveto currentpoint newpath 200 0 360 arc closepath fill}def
+/sq{80 0 rmoveto currentpoint dround newpath moveto
+ 640 0 rlineto 0 640 rlineto -640 0 rlineto closepath stroke}def
+/bx{80 0 rmoveto currentpoint dround newpath moveto
+ 640 0 rlineto 0 640 rlineto -640 0 rlineto closepath fill}def
+/ci{500 360 rmoveto currentpoint newpath 333 0 360 arc
+ 50 setlinewidth stroke}def
+
+/lt{0 -200 moveto 0 550 rlineto currx 800 2cx s4 add exch s4 a4p stroke}def
+/lb{0 800 moveto 0 -550 rlineto currx -200 2cx s4 add exch s4 a4p stroke}def
+/rt{0 -200 moveto 0 550 rlineto currx 800 2cx s4 sub exch s4 a4p stroke}def
+/rb{0 800 moveto 0 -500 rlineto currx -200 2cx s4 sub exch s4 a4p stroke}def
+/lk{0 800 moveto 0 300 -300 300 s4 arcto pop pop 1000 sub
+ 0 300 4 2 roll s4 a4p 0 -200 lineto stroke}def
+/rk{0 800 moveto 0 300 s2 300 s4 arcto pop pop 1000 sub
+ 0 300 4 2 roll s4 a4p 0 -200 lineto stroke}def
+/lf{0 800 moveto 0 -1000 rlineto s4 0 rls}def
+/rf{0 800 moveto 0 -1000 rlineto s4 neg 0 rls}def
+/lc{0 -200 moveto 0 1000 rlineto s4 0 rls}def
+/rc{0 -200 moveto 0 1000 rlineto s4 neg 0 rls}def
+end
+
+/Metrics 50 dict def Metrics begin
+/.notdef 0 def
+/space 500 def
+/ru 500 def
+/br 0 def
+/lt 416 def
+/lb 416 def
+/rt 416 def
+/rb 416 def
+/lk 416 def
+/rk 416 def
+/rc 416 def
+/lc 416 def
+/rf 416 def
+/lf 416 def
+/bv 416 def
+/ob 350 def
+/bu 350 def
+/ci 750 def
+/bx 750 def
+/sq 750 def
+/rn 500 def
+/ul 500 def
+/vr 0 def
+end
+
+DITfd begin
+/s2 500 def /s4 250 def /s3 333 def
+/a4p{arcto pop pop pop pop}def
+/2cx{2 copy exch}def
+/rls{rlineto stroke}def
+/currx{currentpoint pop}def
+/dround{transform round exch round exch itransform} def
+end
+end
+/DIThacks exch definefont pop
+ditstart
+(psc)xT
+576 1 1 xr
+1(Times-Roman)xf 1 f
+2(Times-Italic)xf 2 f
+3(Times-Bold)xf 3 f
+4(Times-BoldItalic)xf 4 f
+5(Helvetica)xf 5 f
+6(Helvetica-Bold)xf 6 f
+7(Courier)xf 7 f
+8(Courier-Bold)xf 8 f
+9(Symbol)xf 9 f
+10(DIThacks)xf 10 f
+10 s
+1 f
+xi
+%%EndProlog
+
+%%Page: 1 1
+10 s 10 xH 0 xS 1 f
+3 f
+22 s
+1249 626(A)N
+1420(N)X
+1547(ew)X
+1796(H)X
+1933(ashing)X
+2467(P)X
+2574(ackage)X
+3136(for)X
+3405(U)X
+3532(N)X
+3659(IX)X
+2 f
+20 s
+3855 562(1)N
+1 f
+12 s
+1607 779(Margo)N
+1887(Seltzer)X
+9 f
+2179(-)X
+1 f
+2256(University)X
+2686(of)X
+2790(California,)X
+3229(Berkeley)X
+2015 875(Ozan)N
+2242(Yigit)X
+9 f
+2464(-)X
+1 f
+2541(York)X
+2762(University)X
+3 f
+2331 1086(ABSTRACT)N
+1 f
+10 s
+1152 1222(UNIX)N
+1385(support)X
+1657(of)X
+1756(disk)X
+1921(oriented)X
+2216(hashing)X
+2497(was)X
+2654(originally)X
+2997(provided)X
+3314(by)X
+2 f
+3426(dbm)X
+1 f
+3595([ATT79])X
+3916(and)X
+1152 1310(subsequently)N
+1595(improved)X
+1927(upon)X
+2112(in)X
+2 f
+2199(ndbm)X
+1 f
+2402([BSD86].)X
+2735(In)X
+2826(AT&T)X
+3068(System)X
+3327(V,)X
+3429(in-memory)X
+3809(hashed)X
+1152 1398(storage)N
+1420(and)X
+1572(access)X
+1814(support)X
+2090(was)X
+2251(added)X
+2479(in)X
+2577(the)X
+2 f
+2711(hsearch)X
+1 f
+3000(library)X
+3249(routines)X
+3542([ATT85].)X
+3907(The)X
+1152 1486(result)N
+1367(is)X
+1457(a)X
+1530(system)X
+1789(with)X
+1968(two)X
+2125(incompatible)X
+2580(hashing)X
+2865(schemes,)X
+3193(each)X
+3377(with)X
+3555(its)X
+3666(own)X
+3840(set)X
+3965(of)X
+1152 1574(shortcomings.)N
+1152 1688(This)N
+1316(paper)X
+1517(presents)X
+1802(the)X
+1922(design)X
+2152(and)X
+2289(performance)X
+2717(characteristics)X
+3198(of)X
+3286(a)X
+3343(new)X
+3498(hashing)X
+3768(package)X
+1152 1776(providing)N
+1483(a)X
+1539(superset)X
+1822(of)X
+1909(the)X
+2027(functionality)X
+2456(provided)X
+2761(by)X
+2 f
+2861(dbm)X
+1 f
+3019(and)X
+2 f
+3155(hsearch)X
+1 f
+3409(.)X
+3469(The)X
+3614(new)X
+3768(package)X
+1152 1864(uses)N
+1322(linear)X
+1537(hashing)X
+1818(to)X
+1912(provide)X
+2189(ef\256cient)X
+2484(support)X
+2755(of)X
+2853(both)X
+3026(memory)X
+3324(based)X
+3538(and)X
+3685(disk)X
+3849(based)X
+1152 1952(hash)N
+1319(tables)X
+1526(with)X
+1688(performance)X
+2115(superior)X
+2398(to)X
+2480(both)X
+2 f
+2642(dbm)X
+1 f
+2800(and)X
+2 f
+2936(hsearch)X
+1 f
+3210(under)X
+3413(most)X
+3588(conditions.)X
+3 f
+1380 2128(Introduction)N
+1 f
+892 2260(Current)N
+1196(UNIX)X
+1456(systems)X
+1768(offer)X
+1984(two)X
+2163(forms)X
+2409(of)X
+720 2348(hashed)N
+973(data)X
+1137(access.)X
+2 f
+1413(Dbm)X
+1 f
+1599(and)X
+1745(its)X
+1850(derivatives)X
+2231(provide)X
+720 2436(keyed)N
+939(access)X
+1171(to)X
+1259(disk)X
+1418(resident)X
+1698(data)X
+1858(while)X
+2 f
+2062(hsearch)X
+1 f
+2342(pro-)X
+720 2524(vides)N
+929(access)X
+1175(for)X
+1309(memory)X
+1616(resident)X
+1910(data.)X
+2124(These)X
+2356(two)X
+720 2612(access)N
+979(methods)X
+1302(are)X
+1453(incompatible)X
+1923(in)X
+2037(that)X
+2209(memory)X
+720 2700(resident)N
+1011(hash)X
+1195(tables)X
+1419(may)X
+1593(not)X
+1731(be)X
+1843(stored)X
+2075(on)X
+2191(disk)X
+2360(and)X
+720 2788(disk)N
+884(resident)X
+1169(tables)X
+1387(cannot)X
+1632(be)X
+1739(read)X
+1909(into)X
+2063(memory)X
+2360(and)X
+720 2876(accessed)N
+1022(using)X
+1215(the)X
+1333(in-memory)X
+1709(routines.)X
+2 f
+892 2990(Dbm)N
+1 f
+1091(has)X
+1241(several)X
+1512(shortcomings.)X
+2026(Since)X
+2247(data)X
+2423(is)X
+720 3078(assumed)N
+1032(to)X
+1130(be)X
+1242(disk)X
+1411(resident,)X
+1721(each)X
+1905(access)X
+2146(requires)X
+2440(a)X
+720 3166(system)N
+963(call,)X
+1120(and)X
+1257(almost)X
+1491(certainly,)X
+1813(a)X
+1869(disk)X
+2022(operation.)X
+2365(For)X
+720 3254(extremely)N
+1072(large)X
+1264(databases,)X
+1623(where)X
+1851(caching)X
+2131(is)X
+2214(unlikely)X
+720 3342(to)N
+810(be)X
+914(effective,)X
+1244(this)X
+1386(is)X
+1466(acceptable,)X
+1853(however,)X
+2177(when)X
+2378(the)X
+720 3430(database)N
+1022(is)X
+1100(small)X
+1298(\(i.e.)X
+1447(the)X
+1569(password)X
+1896(\256le\),)X
+2069(performance)X
+720 3518(improvements)N
+1204(can)X
+1342(be)X
+1443(obtained)X
+1744(through)X
+2018(caching)X
+2293(pages)X
+720 3606(of)N
+818(the)X
+947(database)X
+1255(in)X
+1348(memory.)X
+1685(In)X
+1782(addition,)X
+2 f
+2094(dbm)X
+1 f
+2262(cannot)X
+720 3694(store)N
+902(data)X
+1062(items)X
+1261(whose)X
+1492(total)X
+1660(key)X
+1802(and)X
+1943(data)X
+2102(size)X
+2252(exceed)X
+720 3782(the)N
+850(page)X
+1034(size)X
+1191(of)X
+1290(the)X
+1420(hash)X
+1599(table.)X
+1827(Similarly,)X
+2176(if)X
+2257(two)X
+2409(or)X
+720 3870(more)N
+907(keys)X
+1076(produce)X
+1357(the)X
+1477(same)X
+1664(hash)X
+1833(value)X
+2029(and)X
+2166(their)X
+2334(total)X
+720 3958(size)N
+876(exceeds)X
+1162(the)X
+1291(page)X
+1474(size,)X
+1650(the)X
+1779(table)X
+1966(cannot)X
+2210(store)X
+2396(all)X
+720 4046(the)N
+838(colliding)X
+1142(keys.)X
+892 4160(The)N
+1050(in-memory)X
+2 f
+1439(hsearch)X
+1 f
+1725(routines)X
+2015(have)X
+2199(different)X
+720 4248(shortcomings.)N
+1219(First,)X
+1413(the)X
+1539(notion)X
+1771(of)X
+1865(a)X
+1928(single)X
+2146(hash)X
+2320(table)X
+720 4336(is)N
+807(embedded)X
+1171(in)X
+1266(the)X
+1397(interface,)X
+1732(preventing)X
+2108(an)X
+2217(applica-)X
+720 4424(tion)N
+902(from)X
+1116(accessing)X
+1482(multiple)X
+1806(tables)X
+2050(concurrently.)X
+720 4512(Secondly,)N
+1063(the)X
+1186(routine)X
+1438(to)X
+1525(create)X
+1743(a)X
+1804(hash)X
+1976(table)X
+2157(requires)X
+2440(a)X
+720 4600(parameter)N
+1066(which)X
+1286(declares)X
+1573(the)X
+1694(size)X
+1842(of)X
+1932(the)X
+2053(hash)X
+2223(table.)X
+2422(If)X
+720 4688(this)N
+856(size)X
+1001(is)X
+1074(set)X
+1183(too)X
+1305(low,)X
+1465(performance)X
+1892(degradation)X
+2291(or)X
+2378(the)X
+720 4776(inability)N
+1008(to)X
+1092(add)X
+1230(items)X
+1425(to)X
+1509(the)X
+1628(table)X
+1805(may)X
+1964(result.)X
+2223(In)X
+2311(addi-)X
+720 4864(tion,)N
+2 f
+910(hsearch)X
+1 f
+1210(requires)X
+1515(that)X
+1681(the)X
+1825(application)X
+2226(allocate)X
+720 4952(memory)N
+1037(for)X
+1181(the)X
+1329(key)X
+1495(and)X
+1661(data)X
+1845(items.)X
+2108(Lastly,)X
+2378(the)X
+2 f
+720 5040(hsearch)N
+1 f
+1013(routines)X
+1310(provide)X
+1594(no)X
+1713(interface)X
+2034(to)X
+2135(store)X
+2329(hash)X
+720 5128(tables)N
+927(on)X
+1027(disk.)X
+16 s
+720 5593 MXY
+864 0 Dl
+2 f
+8 s
+760 5648(1)N
+1 f
+9 s
+5673(UNIX)Y
+990(is)X
+1056(a)X
+1106(registered)X
+1408(trademark)X
+1718(of)X
+1796(AT&T.)X
+10 s
+2878 2128(The)N
+3032(goal)X
+3199(of)X
+3295(our)X
+3431(work)X
+3625(was)X
+3779(to)X
+3870(design)X
+4108(and)X
+4253(imple-)X
+2706 2216(ment)N
+2900(a)X
+2970(new)X
+3138(package)X
+3436(that)X
+3590(provides)X
+3899(a)X
+3968(superset)X
+4264(of)X
+4364(the)X
+2706 2304(functionality)N
+3144(of)X
+3240(both)X
+2 f
+3411(dbm)X
+1 f
+3578(and)X
+2 f
+3723(hsearch)X
+1 f
+3977(.)X
+4045(The)X
+4198(package)X
+2706 2392(had)N
+2871(to)X
+2982(overcome)X
+3348(the)X
+3495(interface)X
+3826(shortcomings)X
+4306(cited)X
+2706 2480(above)N
+2930(and)X
+3078(its)X
+3185(implementation)X
+3719(had)X
+3867(to)X
+3961(provide)X
+4238(perfor-)X
+2706 2568(mance)N
+2942(equal)X
+3142(or)X
+3235(superior)X
+3524(to)X
+3612(that)X
+3758(of)X
+3851(the)X
+3975(existing)X
+4253(imple-)X
+2706 2656(mentations.)N
+3152(In)X
+3274(order)X
+3498(to)X
+3614(provide)X
+3913(a)X
+4003(compact)X
+4329(disk)X
+2706 2744(representation,)N
+3224(graceful)X
+3531(table)X
+3729(growth,)X
+4018(and)X
+4176(expected)X
+2706 2832(constant)N
+3033(time)X
+3234(performance,)X
+3720(we)X
+3873(selected)X
+4191(Litwin's)X
+2706 2920(linear)N
+2923(hashing)X
+3206(algorithm)X
+3551([LAR88,)X
+3872(LIT80].)X
+4178(We)X
+4324(then)X
+2706 3008(enhanced)N
+3037(the)X
+3161(algorithm)X
+3498(to)X
+3586(handle)X
+3826(page)X
+4004(over\257ows)X
+4346(and)X
+2706 3096(large)N
+2900(key)X
+3049(handling)X
+3362(with)X
+3537(a)X
+3606(single)X
+3830(mechanism,)X
+4248(named)X
+2706 3184(buddy-in-waiting.)N
+3 f
+2975 3338(Existing)N
+3274(UNIX)X
+3499(Hashing)X
+3802(Techniques)X
+1 f
+2878 3470(Over)N
+3076(the)X
+3210(last)X
+3357(decade,)X
+3637(several)X
+3901(dynamic)X
+4213(hashing)X
+2706 3558(schemes)N
+3000(have)X
+3174(been)X
+3348(developed)X
+3700(for)X
+3816(the)X
+3936(UNIX)X
+4159(timeshar-)X
+2706 3646(ing)N
+2856(system,)X
+3146(starting)X
+3433(with)X
+3622(the)X
+3767(inclusion)X
+4107(of)X
+2 f
+4221(dbm)X
+1 f
+4359(,)X
+4426(a)X
+2706 3734(minimal)N
+3008(database)X
+3321(library)X
+3571(written)X
+3834(by)X
+3950(Ken)X
+4120(Thompson)X
+2706 3822([THOM90],)N
+3141(in)X
+3248(the)X
+3391(Seventh)X
+3694(Edition)X
+3974(UNIX)X
+4220(system.)X
+2706 3910(Since)N
+2916(then,)X
+3106(an)X
+3214(extended)X
+3536(version)X
+3804(of)X
+3903(the)X
+4032(same)X
+4228(library,)X
+2 f
+2706 3998(ndbm)N
+1 f
+2884(,)X
+2933(and)X
+3078(a)X
+3142(public-domain)X
+3637(clone)X
+3839(of)X
+3934(the)X
+4060(latter,)X
+2 f
+4273(sdbm)X
+1 f
+4442(,)X
+2706 4086(have)N
+2902(been)X
+3098(developed.)X
+3491(Another)X
+3797 0.1645(interface-compatible)AX
+2706 4174(library)N
+2 f
+2950(gdbm)X
+1 f
+3128(,)X
+3178(was)X
+3333(recently)X
+3622(made)X
+3826(available)X
+4145(as)X
+4241(part)X
+4395(of)X
+2706 4262(the)N
+2829(Free)X
+2997(Software)X
+3312(Foundation's)X
+3759(\(FSF\))X
+3970(software)X
+4271(distri-)X
+2706 4350(bution.)N
+2878 4464(All)N
+3017(of)X
+3121(these)X
+3323(implementations)X
+3893(are)X
+4029(based)X
+4248(on)X
+4364(the)X
+2706 4552(idea)N
+2871(of)X
+2969(revealing)X
+3299(just)X
+3445(enough)X
+3711(bits)X
+3856(of)X
+3953(a)X
+4019(hash)X
+4196(value)X
+4400(to)X
+2706 4640(locate)N
+2920(a)X
+2978(page)X
+3151(in)X
+3234(a)X
+3291(single)X
+3503(access.)X
+3770(While)X
+2 f
+3987(dbm/ndbm)X
+1 f
+4346(and)X
+2 f
+2706 4728(sdbm)N
+1 f
+2908(map)X
+3079(the)X
+3210(hash)X
+3390(value)X
+3597(directly)X
+3874(to)X
+3968(a)X
+4036(disk)X
+4201(address,)X
+2 f
+2706 4816(gdbm)N
+1 f
+2921(uses)X
+3096(the)X
+3231(hash)X
+3414(value)X
+3624(to)X
+3722(index)X
+3936(into)X
+4096(a)X
+2 f
+4168(directory)X
+1 f
+2706 4904([ENB88])N
+3020(containing)X
+3378(disk)X
+3531(addresses.)X
+2878 5018(The)N
+2 f
+3033(hsearch)X
+1 f
+3317(routines)X
+3605(in)X
+3697(System)X
+3962(V)X
+4049(are)X
+4177(designed)X
+2706 5106(to)N
+2804(provide)X
+3085(memory-resident)X
+3669(hash)X
+3852(tables.)X
+4115(Since)X
+4328(data)X
+2706 5194(access)N
+2948(does)X
+3131(not)X
+3269(require)X
+3533(disk)X
+3702(access,)X
+3964(simple)X
+4213(hashing)X
+2706 5282(schemes)N
+3010(which)X
+3238(may)X
+3408(require)X
+3667(multiple)X
+3964(probes)X
+4209(into)X
+4364(the)X
+2706 5370(table)N
+2889(are)X
+3015(used.)X
+3209(A)X
+3294(more)X
+3486(interesting)X
+3851(version)X
+4114(of)X
+2 f
+4208(hsearch)X
+1 f
+2706 5458(is)N
+2784(a)X
+2845(public)X
+3070(domain)X
+3335(library,)X
+2 f
+3594(dynahash)X
+1 f
+3901(,)X
+3945(that)X
+4089(implements)X
+2706 5546(Larson's)N
+3036(in-memory)X
+3440(adaptation)X
+3822([LAR88])X
+4164(of)X
+4279(linear)X
+2706 5634(hashing)N
+2975([LIT80].)X
+3 f
+720 5960(USENIX)N
+9 f
+1042(-)X
+3 f
+1106(Winter)X
+1371('91)X
+9 f
+1498(-)X
+3 f
+1562(Dallas,)X
+1815(TX)X
+1 f
+4424(1)X
+
+2 p
+%%Page: 2 2
+10 s 10 xH 0 xS 1 f
+3 f
+432 258(A)N
+510(New)X
+682(Hashing)X
+985(Package)X
+1290(for)X
+1413(UNIX)X
+3663(Seltzer)X
+3920(&)X
+4007(Yigit)X
+2 f
+1074 538(dbm)N
+1 f
+1232(and)X
+2 f
+1368(ndbm)X
+1 f
+604 670(The)N
+2 f
+760(dbm)X
+1 f
+928(and)X
+2 f
+1074(ndbm)X
+1 f
+1282(library)X
+1526(implementations)X
+2089(are)X
+432 758(based)N
+667(on)X
+799(the)X
+949(same)X
+1166(algorithm)X
+1529(by)X
+1661(Ken)X
+1846(Thompson)X
+432 846([THOM90,)N
+824(TOR88,)X
+1113(WAL84],)X
+1452(but)X
+1582(differ)X
+1789(in)X
+1879(their)X
+2054(pro-)X
+432 934(grammatic)N
+801(interfaces.)X
+1160(The)X
+1311(latter)X
+1502(is)X
+1581(a)X
+1643(modi\256ed)X
+1952(version)X
+432 1022(of)N
+533(the)X
+665(former)X
+918(which)X
+1148(adds)X
+1328(support)X
+1601(for)X
+1728(multiple)X
+2027(data-)X
+432 1110(bases)N
+634(to)X
+724(be)X
+828(open)X
+1011(concurrently.)X
+1484(The)X
+1636(discussion)X
+1996(of)X
+2090(the)X
+432 1198(algorithm)N
+774(that)X
+925(follows)X
+1196(is)X
+1280(applicable)X
+1640(to)X
+1732(both)X
+2 f
+1904(dbm)X
+1 f
+2072(and)X
+2 f
+432 1286(ndbm)N
+1 f
+610(.)X
+604 1400(The)N
+760(basic)X
+956(structure)X
+1268(of)X
+2 f
+1366(dbm)X
+1 f
+1535(calls)X
+1712(for)X
+1836(\256xed-sized)X
+432 1488(disk)N
+612(blocks)X
+868(\(buckets\))X
+1214(and)X
+1377(an)X
+2 f
+1499(access)X
+1 f
+1755(function)X
+2068(that)X
+432 1576(maps)N
+623(a)X
+681(key)X
+819(to)X
+902(a)X
+959(bucket.)X
+1234(The)X
+1380(interface)X
+1683(routines)X
+1962(use)X
+2090(the)X
+2 f
+432 1664(access)N
+1 f
+673(function)X
+970(to)X
+1062(obtain)X
+1292(the)X
+1420(appropriate)X
+1816(bucket)X
+2060(in)X
+2152(a)X
+432 1752(single)N
+643(disk)X
+796(access.)X
+604 1866(Within)N
+869(the)X
+2 f
+1010(access)X
+1 f
+1263(function,)X
+1593(a)X
+1672(bit-randomizing)X
+432 1954(hash)N
+610(function)X
+2 f
+8 s
+877 1929(2)N
+1 f
+10 s
+940 1954(is)N
+1024(used)X
+1202(to)X
+1294(convert)X
+1565(a)X
+1631(key)X
+1777(into)X
+1931(a)X
+1997(32-bit)X
+432 2042(hash)N
+605(value.)X
+825(Out)X
+971(of)X
+1064(these)X
+1254(32)X
+1359(bits,)X
+1519(only)X
+1686(as)X
+1778(many)X
+1981(bits)X
+2121(as)X
+432 2130(necessary)N
+773(are)X
+900(used)X
+1075(to)X
+1165(determine)X
+1514(the)X
+1639(particular)X
+1974(bucket)X
+432 2218(on)N
+533(which)X
+750(a)X
+807(key)X
+944(resides.)X
+1228(An)X
+1347(in-memory)X
+1724(bitmap)X
+1967(is)X
+2041(used)X
+432 2306(to)N
+533(determine)X
+893(how)X
+1070(many)X
+1287(bits)X
+1441(are)X
+1579(required.)X
+1905(Each)X
+2104(bit)X
+432 2394(indicates)N
+746(whether)X
+1033(its)X
+1136(associated)X
+1494(bucket)X
+1736(has)X
+1871(been)X
+2051(split)X
+432 2482(yet)N
+562(\(a)X
+657(0)X
+728(indicating)X
+1079(that)X
+1230(the)X
+1359(bucket)X
+1604(has)X
+1742(not)X
+1875(yet)X
+2004(split\).)X
+432 2570(The)N
+590(use)X
+730(of)X
+830(the)X
+961(hash)X
+1141(function)X
+1441(and)X
+1590(the)X
+1720(bitmap)X
+1974(is)X
+2059(best)X
+432 2658(described)N
+769(by)X
+878(stepping)X
+1177(through)X
+1454(database)X
+1759(creation)X
+2046(with)X
+432 2746(multiple)N
+718(invocations)X
+1107(of)X
+1194(a)X
+2 f
+1250(store)X
+1 f
+1430(operation.)X
+604 2860(Initially,)N
+906(the)X
+1033(hash)X
+1209(table)X
+1394(contains)X
+1690(a)X
+1755(single)X
+1974(bucket)X
+432 2948(\(bucket)N
+711(0\),)X
+836(the)X
+972(bit)X
+1094(map)X
+1270(contains)X
+1575(a)X
+1649(single)X
+1878(bit)X
+2000(\(bit)X
+2148(0)X
+432 3036(corresponding)N
+913(to)X
+997(bucket)X
+1233(0\),)X
+1342(and)X
+1480(0)X
+1542(bits)X
+1699(of)X
+1788(a)X
+1846(hash)X
+2014(value)X
+432 3124(are)N
+560(examined)X
+901(to)X
+992(determine)X
+1342(where)X
+1568(a)X
+1633(key)X
+1778(is)X
+1860(placed)X
+2099(\(in)X
+432 3212(bucket)N
+670(0\).)X
+801(When)X
+1017(bucket)X
+1255(0)X
+1319(is)X
+1396(full,)X
+1551(its)X
+1650(bit)X
+1758(in)X
+1844(the)X
+1966(bitmap)X
+432 3300(\(bit)N
+564(0\))X
+652(is)X
+726(set,)X
+856(and)X
+993(its)X
+1089(contents)X
+1377(are)X
+1497(split)X
+1655(between)X
+1943(buckets)X
+432 3388(0)N
+499(and)X
+641(1,)X
+727(by)X
+833(considering)X
+1233(the)X
+1357(0)X
+2 f
+7 s
+3356(th)Y
+10 s
+1 f
+1480 3388(bit)N
+1590(\(the)X
+1741(lowest)X
+1976(bit)X
+2086(not)X
+432 3476(previously)N
+800(examined\))X
+1169(of)X
+1266(the)X
+1393(hash)X
+1569(value)X
+1772(for)X
+1895(each)X
+2072(key)X
+432 3564(within)N
+668(the)X
+798(bucket.)X
+1064(Given)X
+1292(a)X
+1359(well-designed)X
+1840(hash)X
+2018(func-)X
+432 3652(tion,)N
+613(approximately)X
+1112(half)X
+1273(of)X
+1376(the)X
+1510(keys)X
+1693(will)X
+1853(have)X
+2041(hash)X
+432 3740(values)N
+666(with)X
+837(the)X
+964(0)X
+2 f
+7 s
+3708(th)Y
+10 s
+1 f
+1090 3740(bit)N
+1203(set.)X
+1341(All)X
+1471(such)X
+1646(keys)X
+1821(and)X
+1965(associ-)X
+432 3828(ated)N
+586(data)X
+740(are)X
+859(moved)X
+1097(to)X
+1179(bucket)X
+1413(1,)X
+1493(and)X
+1629(the)X
+1747(rest)X
+1883(remain)X
+2126(in)X
+432 3916(bucket)N
+666(0.)X
+604 4030(After)N
+804(this)X
+949(split,)X
+1135(the)X
+1262(\256le)X
+1393(now)X
+1560(contains)X
+1856(two)X
+2005(buck-)X
+432 4118(ets,)N
+562(and)X
+699(the)X
+818(bitmap)X
+1061(contains)X
+1349(three)X
+1530(bits:)X
+1687(the)X
+1805(0)X
+2 f
+7 s
+4086(th)Y
+10 s
+1 f
+1922 4118(bit)N
+2026(is)X
+2099(set)X
+432 4206(to)N
+525(indicate)X
+810(a)X
+876(bucket)X
+1120(0)X
+1190(split)X
+1357(when)X
+1561(no)X
+1671(bits)X
+1816(of)X
+1913(the)X
+2041(hash)X
+432 4294(value)N
+648(are)X
+789(considered,)X
+1199(and)X
+1357(two)X
+1519(more)X
+1726(unset)X
+1937(bits)X
+2094(for)X
+432 4382(buckets)N
+706(0)X
+775(and)X
+920(1.)X
+1029(The)X
+1183(placement)X
+1542(of)X
+1638(an)X
+1742(incoming)X
+2072(key)X
+432 4470(now)N
+604(requires)X
+897(examination)X
+1327(of)X
+1428(the)X
+1560(0)X
+2 f
+7 s
+4438(th)Y
+10 s
+1 f
+1691 4470(bit)N
+1809(of)X
+1910(the)X
+2041(hash)X
+432 4558(value,)N
+667(and)X
+824(the)X
+963(key)X
+1119(is)X
+1212(placed)X
+1462(either)X
+1685(in)X
+1787(bucket)X
+2041(0)X
+2121(or)X
+432 4646(bucket)N
+674(1.)X
+782(If)X
+864(either)X
+1075(bucket)X
+1317(0)X
+1385(or)X
+1480(bucket)X
+1722(1)X
+1790(\256lls)X
+1937(up,)X
+2064(it)X
+2135(is)X
+432 4734(split)N
+598(as)X
+693(before,)X
+947(its)X
+1050(bit)X
+1162(is)X
+1243(set)X
+1360(in)X
+1450(the)X
+1576(bitmap,)X
+1846(and)X
+1990(a)X
+2054(new)X
+432 4822(set)N
+541(of)X
+628(unset)X
+817(bits)X
+952(are)X
+1071(added)X
+1283(to)X
+1365(the)X
+1483(bitmap.)X
+604 4936(Each)N
+791(time)X
+959(we)X
+1079(consider)X
+1376(a)X
+1437(new)X
+1596(bit)X
+1705(\(bit)X
+1841(n\),)X
+1953(we)X
+2072(add)X
+432 5024(2)N
+2 f
+7 s
+4992(n)Y
+9 f
+509(+)X
+1 f
+540(1)X
+10 s
+595 5024(bits)N
+737(to)X
+826(the)X
+951(bitmap)X
+1199(and)X
+1341(obtain)X
+1567(2)X
+2 f
+7 s
+4992(n)Y
+9 f
+1644(+)X
+1 f
+1675(1)X
+10 s
+1729 5024(more)N
+1920(address-)X
+432 5112(able)N
+595(buckets)X
+869(in)X
+960(the)X
+1087(\256le.)X
+1258(As)X
+1376(a)X
+1441(result,)X
+1668(the)X
+1795(bitmap)X
+2045(con-)X
+432 5200(tains)N
+618(the)X
+751(previous)X
+1062(2)X
+2 f
+7 s
+5168(n)Y
+9 f
+1139(+)X
+1 f
+1170(1)X
+2 f
+10 s
+9 f
+5200(-)Y
+1 f
+1242(1)X
+1317(bits)X
+1467(\(1)X
+2 f
+9 f
+1534(+)X
+1 f
+1578(2)X
+2 f
+9 f
+(+)S
+1 f
+1662(4)X
+2 f
+9 f
+(+)S
+1 f
+1746(...)X
+2 f
+9 f
+(+)S
+1 f
+1850(2)X
+2 f
+7 s
+5168(n)Y
+10 s
+1 f
+1931 5200(\))N
+1992(which)X
+432 5288(trace)N
+649(the)X
+807(entire)X
+2 f
+1050(split)X
+1247(history)X
+1 f
+1529(of)X
+1656(the)X
+1813(addressable)X
+16 s
+432 5433 MXY
+864 0 Dl
+2 f
+8 s
+472 5488(2)N
+1 f
+9 s
+523 5513(This)N
+670(bit-randomizing)X
+1153(property)X
+1416(is)X
+1482(important)X
+1780(to)X
+1854(obtain)X
+2052(radi-)X
+432 5593(cally)N
+599(different)X
+874(hash)X
+1033(values)X
+1244(for)X
+1355(nearly)X
+1562(identical)X
+1836(keys,)X
+2012(which)X
+432 5673(in)N
+506(turn)X
+640(avoids)X
+846(clustering)X
+1148(of)X
+1226(such)X
+1376(keys)X
+1526(in)X
+1600(a)X
+1650(single)X
+1840(bucket.)X
+10 s
+2418 538(buckets.)N
+2590 652(Given)N
+2809(a)X
+2868(key)X
+3007(and)X
+3146(the)X
+3267(bitmap)X
+3512(created)X
+3768(by)X
+3871(this)X
+4009(algo-)X
+2418 740(rithm,)N
+2638(we)X
+2759(\256rst)X
+2910(examine)X
+3209(bit)X
+3320(0)X
+3386(of)X
+3479(the)X
+3603(bitmap)X
+3851(\(the)X
+4002(bit)X
+4112(to)X
+2418 828(consult)N
+2673(when)X
+2871(0)X
+2934(bits)X
+3072(of)X
+3162(the)X
+3283(hash)X
+3453(value)X
+3650(are)X
+3772(being)X
+3973(exam-)X
+2418 916(ined\).)N
+2631(If)X
+2713(it)X
+2785(is)X
+2866(set)X
+2982(\(indicating)X
+3356(that)X
+3503(the)X
+3628(bucket)X
+3869(split\),)X
+4080(we)X
+2418 1004(begin)N
+2617(considering)X
+3012(the)X
+3131(bits)X
+3267(of)X
+3355(the)X
+3473(32-bit)X
+3684(hash)X
+3851(value.)X
+4085(As)X
+2418 1092(bit)N
+2525(n)X
+2587(is)X
+2662(revealed,)X
+2977(a)X
+3035(mask)X
+3226(equal)X
+3422(to)X
+3506(2)X
+2 f
+7 s
+1060(n)Y
+9 f
+3583(+)X
+1 f
+3614(1)X
+2 f
+10 s
+9 f
+1092(-)Y
+1 f
+3686(1)X
+3748(will)X
+3894(yield)X
+4076(the)X
+2418 1180(current)N
+2675(bucket)X
+2918(address.)X
+3228(Adding)X
+3496(2)X
+2 f
+7 s
+1148(n)Y
+9 f
+3573(+)X
+1 f
+3604(1)X
+2 f
+10 s
+9 f
+1180(-)Y
+1 f
+3676(1)X
+3744(to)X
+3834(the)X
+3960(bucket)X
+2418 1268(address)N
+2701(identi\256es)X
+3035(which)X
+3272(bit)X
+3397(in)X
+3500(the)X
+3639(bitmap)X
+3902(must)X
+4098(be)X
+2418 1356(checked.)N
+2743(We)X
+2876(continue)X
+3173(revealing)X
+3493(bits)X
+3628(of)X
+3715(the)X
+3833(hash)X
+4000(value)X
+2418 1444(until)N
+2591(all)X
+2698(set)X
+2814(bits)X
+2955(in)X
+3043(the)X
+3167(bitmap)X
+3415(are)X
+3540(exhausted.)X
+3907(The)X
+4058(fol-)X
+2418 1532(lowing)N
+2682(algorithm,)X
+3055(a)X
+3133(simpli\256cation)X
+3614(of)X
+3723(the)X
+3863(algorithm)X
+2418 1620(due)N
+2565(to)X
+2658(Ken)X
+2823(Thompson)X
+3196([THOM90,)X
+3590(TOR88],)X
+3908(uses)X
+4076(the)X
+2418 1708(hash)N
+2625(value)X
+2839(and)X
+2995(the)X
+3133(bitmap)X
+3395(to)X
+3497(calculate)X
+3823(the)X
+3960(bucket)X
+2418 1796(address)N
+2679(as)X
+2766(discussed)X
+3093(above.)X
+0(Courier)xf 0 f
+1 f
+0 f
+8 s
+2418 2095(hash)N
+2608(=)X
+2684 -0.4038(calchash\(key\);)AX
+2418 2183(mask)N
+2608(=)X
+2684(0;)X
+2418 2271(while)N
+2646 -0.4018(\(isbitset\(\(hash)AX
+3254(&)X
+3330(mask\))X
+3558(+)X
+3634(mask\)\))X
+2706 2359(mask)N
+2896(=)X
+2972(\(mask)X
+3200(<<)X
+3314(1\))X
+3428(+)X
+3504(1;)X
+2418 2447(bucket)N
+2684(=)X
+2760(hash)X
+2950(&)X
+3026(mask;)X
+2 f
+10 s
+3211 2812(sdbm)N
+1 f
+2590 2944(The)N
+2 f
+2738(sdbm)X
+1 f
+2930(library)X
+3167(is)X
+3243(a)X
+3302(public-domain)X
+3791(clone)X
+3987(of)X
+4076(the)X
+2 f
+2418 3032(ndbm)N
+1 f
+2638(library,)X
+2914(developed)X
+3286(by)X
+3408(Ozan)X
+3620(Yigit)X
+3826(to)X
+3929(provide)X
+2 f
+2418 3120(ndbm)N
+1 f
+2596('s)X
+2692(functionality)X
+3139(under)X
+3359(some)X
+3565(versions)X
+3869(of)X
+3973(UNIX)X
+2418 3208(that)N
+2559(exclude)X
+2830(it)X
+2894(for)X
+3008(licensing)X
+3317(reasons)X
+3578([YIG89].)X
+3895(The)X
+4040(pro-)X
+2418 3296(grammer)N
+2735(interface,)X
+3064(and)X
+3207(the)X
+3332(basic)X
+3524(structure)X
+3832(of)X
+2 f
+3926(sdbm)X
+1 f
+4121(is)X
+2418 3384(identical)N
+2733(to)X
+2 f
+2834(ndbm)X
+1 f
+3051(but)X
+3192(internal)X
+3476(details)X
+3723(of)X
+3828(the)X
+2 f
+3964(access)X
+1 f
+2418 3472(function,)N
+2726(such)X
+2894(as)X
+2982(the)X
+3101(calculation)X
+3474(of)X
+3561(the)X
+3679(bucket)X
+3913(address,)X
+2418 3560(and)N
+2563(the)X
+2690(use)X
+2825(of)X
+2920(different)X
+3225(hash)X
+3400(functions)X
+3726(make)X
+3928(the)X
+4054(two)X
+2418 3648(incompatible)N
+2856(at)X
+2934(the)X
+3052(database)X
+3349(level.)X
+2590 3762(The)N
+2 f
+2740(sdbm)X
+1 f
+2934(library)X
+3173(is)X
+3251(based)X
+3458(on)X
+3562(a)X
+3622(simpli\256ed)X
+3965(imple-)X
+2418 3850(mentation)N
+2778(of)X
+2885(Larson's)X
+3206(1978)X
+2 f
+3406(dynamic)X
+3717(hashing)X
+1 f
+4009(algo-)X
+2418 3938(rithm)N
+2616(including)X
+2943(the)X
+2 f
+3066(re\256nements)X
+3461(and)X
+3605(variations)X
+1 f
+3953(of)X
+4044(sec-)X
+2418 4026(tion)N
+2562(5)X
+2622([LAR78].)X
+2956(Larson's)X
+3257(original)X
+3526(algorithm)X
+3857(calls)X
+4024(for)X
+4138(a)X
+2418 4114(forest)N
+2635(of)X
+2736(binary)X
+2975(hash)X
+3156(trees)X
+3341(that)X
+3494(are)X
+3626(accessed)X
+3941(by)X
+4054(two)X
+2418 4202(hash)N
+2586(functions.)X
+2925(The)X
+3071(\256rst)X
+3216(hash)X
+3384(function)X
+3672(selects)X
+3907(a)X
+3964(partic-)X
+2418 4290(ular)N
+2571(tree)X
+2720(within)X
+2952(the)X
+3078(forest.)X
+3309(The)X
+3462(second)X
+3713(hash)X
+3887(function,)X
+2418 4378(which)N
+2659(is)X
+2757(required)X
+3070(to)X
+3177(be)X
+3297(a)X
+3377(boolean)X
+3675(pseudo-random)X
+2418 4466(number)N
+2687(generator)X
+3015(that)X
+3159(is)X
+3236(seeded)X
+3479(by)X
+3583(the)X
+3705(key,)X
+3865(is)X
+3942(used)X
+4112(to)X
+2418 4554(traverse)N
+2733(the)X
+2890(tree)X
+3070(until)X
+3275(internal)X
+3579(\(split\))X
+3829(nodes)X
+4075(are)X
+2418 4642(exhausted)N
+2763(and)X
+2903(an)X
+3003(external)X
+3286(\(non-split\))X
+3648(node)X
+3827(is)X
+3903(reached.)X
+2418 4730(The)N
+2571(bucket)X
+2813(addresses)X
+3149(are)X
+3276(stored)X
+3500(directly)X
+3772(in)X
+3861(the)X
+3986(exter-)X
+2418 4818(nal)N
+2536(nodes.)X
+2590 4932(Larson's)N
+2903(re\256nements)X
+3309(are)X
+3440(based)X
+3655(on)X
+3767(the)X
+3897(observa-)X
+2418 5020(tion)N
+2570(that)X
+2718(the)X
+2844(nodes)X
+3059(can)X
+3199(be)X
+3303(represented)X
+3702(by)X
+3809(a)X
+3872(single)X
+4090(bit)X
+2418 5108(that)N
+2569(is)X
+2653(set)X
+2773(for)X
+2898(internal)X
+3174(nodes)X
+3392(and)X
+3539(not)X
+3672(set)X
+3791(for)X
+3915(external)X
+2418 5196(nodes,)N
+2652(resulting)X
+2959(in)X
+3048(a)X
+3111(radix)X
+3303(search)X
+3536(trie.)X
+3709(Figure)X
+3944(1)X
+4010(illus-)X
+2418 5284(trates)N
+2621(this.)X
+2804(Nodes)X
+3037(A)X
+3123(and)X
+3267(B)X
+3348(are)X
+3475(internal)X
+3748(\(split\))X
+3967(nodes,)X
+2418 5372(thus)N
+2573(having)X
+2813(no)X
+2915(bucket)X
+3151(addresses)X
+3480(associated)X
+3831(with)X
+3994(them.)X
+2418 5460(Instead,)N
+2693(the)X
+2814(external)X
+3096(nodes)X
+3306(\(C,)X
+3429(D,)X
+3530(and)X
+3669(E\))X
+3768(each)X
+3938(need)X
+4112(to)X
+2418 5548(refer)N
+2594(to)X
+2679(a)X
+2738(bucket)X
+2975(address.)X
+3279(These)X
+3494(bucket)X
+3731(addresses)X
+4062(can)X
+2418 5636(be)N
+2529(stored)X
+2760(in)X
+2857(the)X
+2990(trie)X
+3132(itself)X
+3327(where)X
+3559(the)X
+3691(subtries)X
+3974(would)X
+3 f
+432 5960(2)N
+2970(USENIX)X
+9 f
+3292(-)X
+3 f
+3356(Winter)X
+3621('91)X
+9 f
+3748(-)X
+3 f
+3812(Dallas,)X
+4065(TX)X
+
+3 p
+%%Page: 3 3
+0(Courier)xf 0 f
+10 s 10 xH 0 xS 0 f
+3 f
+720 258(Seltzer)N
+977(&)X
+1064(Yigit)X
+3278(A)X
+3356(New)X
+3528(Hashing)X
+3831(Package)X
+4136(for)X
+4259(UNIX)X
+1 f
+720 538(live)N
+862(if)X
+933(they)X
+1092(existed)X
+1340([KNU68].)X
+1709(For)X
+1841(example,)X
+2154(if)X
+2224(nodes)X
+2432(F)X
+720 626(and)N
+858(G)X
+938(were)X
+1117(the)X
+1237(children)X
+1522(of)X
+1610(node)X
+1787(C,)X
+1881(the)X
+2000(bucket)X
+2235(address)X
+720 714(L00)N
+886(could)X
+1101(reside)X
+1330(in)X
+1429(the)X
+1563(bits)X
+1714(that)X
+1870(will)X
+2030(eventually)X
+2400(be)X
+720 802(used)N
+887(to)X
+969(store)X
+1145(nodes)X
+1352(F)X
+1416(and)X
+1552(G)X
+1630(and)X
+1766(all)X
+1866(their)X
+2033(children.)X
+10 f
+720 890 -0.0930(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)AN
+3 f
+1894 2247(L1)N
+784 1925(A)N
+1431(E)X
+1106 2247(D)N
+1428 1281(C)N
+1109 1603(B)N
+1884 1930(L01)N
+1879 1286(L00)N
+1221 1814(1)N
+903 2131(1)N
+1221 1402(0)N
+903 1714(0)N
+1 Dt
+1397 1821 MXY
+-8 -32 Dl
+-5 19 Dl
+-20 6 Dl
+33 7 Dl
+-187 -182 Dl
+1397 1322 MXY
+-33 7 Dl
+20 6 Dl
+5 19 Dl
+8 -32 Dl
+-187 182 Dl
+1069 1639 MXY
+-32 7 Dl
+20 6 Dl
+5 19 Dl
+7 -32 Dl
+-186 182 Dl
+1374 1891 MXY
+185 Dc
+1779 2133 MXY
+0 161 Dl
+322 0 Dl
+0 -161 Dl
+-322 0 Dl
+1811 MY
+0 161 Dl
+322 0 Dl
+0 -161 Dl
+-322 0 Dl
+1166 MY
+0 161 Dl
+322 0 Dl
+0 -161 Dl
+-322 0 Dl
+1052 2213 MXY
+185 Dc
+1569 MY
+185 Dc
+720 1881 MXY
+185 Dc
+1779 2213 MXY
+-28 -17 Dl
+10 17 Dl
+-10 18 Dl
+28 -18 Dl
+-543 0 Dl
+1769 1891 MXY
+-28 -18 Dl
+10 18 Dl
+-10 18 Dl
+28 -18 Dl
+-201 0 Dl
+1364 1247 MXY
+185 Dc
+1769 MX
+-28 -18 Dl
+10 18 Dl
+-10 18 Dl
+28 -18 Dl
+-201 0 Dl
+1064 2143 MXY
+-7 -32 Dl
+-5 19 Dl
+-20 6 Dl
+32 7 Dl
+-181 -181 Dl
+3 Dt
+-1 Ds
+8 s
+720 2482(Figure)N
+925(1:)X
+1 f
+1002(Radix)X
+1179(search)X
+1365(trie)X
+1474(with)X
+1612(internal)X
+1831(nodes)X
+2004(A)X
+2074(and)X
+2189(B,)X
+2271(external)X
+720 2570(nodes)N
+891(C,)X
+972(D,)X
+1056(and)X
+1170(E,)X
+1247(and)X
+1361(bucket)X
+1553(addresses)X
+1819(stored)X
+1997(in)X
+2069(the)X
+2168(unused)X
+2370(por-)X
+720 2658(tion)N
+836(of)X
+905(the)X
+999(trie.)X
+10 s
+10 f
+720 2922 -0.0930(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)AN
+1 f
+892 3124(Further)N
+1153(simpli\256cations)X
+1647(of)X
+1738(the)X
+1860(above)X
+2076([YIG89])X
+2377(are)X
+720 3212(possible.)N
+1038(Using)X
+1265(a)X
+1337(single)X
+1564(radix)X
+1765(trie)X
+1908(to)X
+2006(avoid)X
+2219(the)X
+2352(\256rst)X
+720 3300(hash)N
+904(function,)X
+1227(replacing)X
+1562(the)X
+1696(pseudo-random)X
+2231(number)X
+720 3388(generator)N
+1052(with)X
+1222(a)X
+1286(well)X
+1452(designed,)X
+1785(bit-randomizing)X
+2329(hash)X
+720 3476(function,)N
+1053(and)X
+1215(using)X
+1434(the)X
+1578(portion)X
+1855(of)X
+1967(the)X
+2110(hash)X
+2302(value)X
+720 3564(exposed)N
+1021(during)X
+1268(the)X
+1404(trie)X
+1549(traversal)X
+1864(as)X
+1969(a)X
+2042(direct)X
+2262(bucket)X
+720 3652(address)N
+990(results)X
+1228(in)X
+1319(an)X
+2 f
+1424(access)X
+1 f
+1663(function)X
+1959(that)X
+2108(works)X
+2333(very)X
+720 3740(similar)N
+974(to)X
+1068(Thompson's)X
+1499(algorithm)X
+1841(above.)X
+2084(The)X
+2240(follow-)X
+720 3828(ing)N
+847(algorithm)X
+1183(uses)X
+1346(the)X
+1469(hash)X
+1641(value)X
+1840(to)X
+1927(traverse)X
+2206(a)X
+2266(linear-)X
+720 3916(ized)N
+874(radix)X
+1059(trie)X
+2 f
+8 s
+1166 3891(3)N
+1 f
+10 s
+1218 3916(starting)N
+1478(at)X
+1556(the)X
+1674(0)X
+2 f
+7 s
+3884(th)Y
+10 s
+1 f
+1791 3916(bit.)N
+0 f
+8 s
+720 4215(tbit)N
+910(=)X
+986(0;)X
+1296(/*)X
+1410(radix)X
+1638(trie)X
+1828(index)X
+2056(*/)X
+720 4303(hbit)N
+910(=)X
+986(0;)X
+1296(/*)X
+1410(hash)X
+1600(bit)X
+1752(index)X
+2056(*/)X
+720 4391(mask)N
+910(=)X
+986(0;)X
+720 4479(hash)N
+910(=)X
+986 -0.4038(calchash\(key\);)AX
+720 4655(for)N
+872(\(mask)X
+1100(=)X
+1176(0;)X
+910 4743 -0.4018(isbitset\(tbit\);)AN
+910 4831(mask)N
+1100(=)X
+1176(\(mask)X
+1404(<<)X
+1518(1\))X
+1632(+)X
+1708(1\))X
+1008 4919(if)N
+1122(\(hash)X
+1350(&)X
+1426(\(1)X
+1540(<<)X
+1654 -0.4219(hbit++\)\)\))AX
+1160 5007(/*)N
+1274(right)X
+1502(son)X
+1692(*/)X
+1160 5095(tbit)N
+1350(=)X
+1426(2)X
+1502(*)X
+1578(tbit)X
+1768(+)X
+1844(2;)X
+1008 5183(else)N
+1 f
+16 s
+720 5353 MXY
+864 0 Dl
+2 f
+8 s
+760 5408(3)N
+1 f
+9 s
+818 5433(A)N
+896(linearized)X
+1206(radix)X
+1380(trie)X
+1502(is)X
+1576(merely)X
+1802(an)X
+1895(array)X
+2068(representation)X
+720 5513(of)N
+800(the)X
+908(radix)X
+1076(search)X
+1280(trie)X
+1396(described)X
+1692(above.)X
+1920(The)X
+2052(children)X
+2308(of)X
+2388(the)X
+720 5593(node)N
+885(with)X
+1038(index)X
+1223(i)X
+1267(can)X
+1391(be)X
+1483(found)X
+1675(at)X
+1751(the)X
+1863(nodes)X
+2055(indexed)X
+2307(2*i+1)X
+720 5673(and)N
+842(2*i+2.)X
+0 f
+8 s
+3146 538(/*)N
+3260(left)X
+3450(son)X
+3678(*/)X
+3146 626(tbit)N
+3336(=)X
+3412(2)X
+3488(*)X
+3564(tbit)X
+3754(+)X
+3830(1;)X
+2706 802(bucket)N
+2972(=)X
+3048(hash)X
+3238(&)X
+3314(mask;)X
+2 f
+10 s
+3495 1167(gdbm)N
+1 f
+2878 1299(The)N
+3027(gdbm)X
+3233(\(GNU)X
+3458(data)X
+3616(base)X
+3783(manager\))X
+4111(library)X
+4349(is)X
+4426(a)X
+2706 1387(UNIX)N
+2933(database)X
+3236(manager)X
+3539(written)X
+3792(by)X
+3897(Philip)X
+4112(A.)X
+4215(Nelson,)X
+2706 1475(and)N
+2848(made)X
+3048(available)X
+3364(as)X
+3457(a)X
+3518(part)X
+3668(of)X
+3760(the)X
+3883(FSF)X
+4040(software)X
+4342(dis-)X
+2706 1563(tribution.)N
+3052(The)X
+3207(gdbm)X
+3419(library)X
+3663(provides)X
+3969(the)X
+4097(same)X
+4292(func-)X
+2706 1651(tionality)N
+3028(of)X
+3151(the)X
+2 f
+3304(dbm)X
+1 f
+3442(/)X
+2 f
+3464(ndbm)X
+1 f
+3697(libraries)X
+4015([NEL90])X
+4360(but)X
+2706 1739(attempts)N
+3018(to)X
+3121(avoid)X
+3340(some)X
+3550(of)X
+3658(their)X
+3846(shortcomings.)X
+4337(The)X
+2706 1827(gdbm)N
+2918(library)X
+3162(allows)X
+3401(for)X
+3525(arbitrary-length)X
+4059(data,)X
+4242(and)X
+4387(its)X
+2706 1915(database)N
+3027(is)X
+3124(a)X
+3203(singular,)X
+3524(non-sparse)X
+2 f
+8 s
+3872 1890(4)N
+1 f
+10 s
+3947 1915(\256le.)N
+4112(The)X
+4280(gdbm)X
+2706 2003(library)N
+2947(also)X
+3103(includes)X
+2 f
+3396(dbm)X
+1 f
+3560(and)X
+2 f
+3702(ndbm)X
+1 f
+3906(compatible)X
+4288(inter-)X
+2706 2091(faces.)N
+2878 2205(The)N
+3025(gdbm)X
+3229(library)X
+3465(is)X
+3540(based)X
+3745(on)X
+2 f
+3847(extensible)X
+4189(hashing)X
+1 f
+4442(,)X
+2706 2293(a)N
+2766(dynamic)X
+3066(hashing)X
+3339(algorithm)X
+3674(by)X
+3778(Fagin)X
+3984(et)X
+4066(al)X
+4148([FAG79].)X
+2706 2381(This)N
+2881(algorithm)X
+3225(differs)X
+3467(from)X
+3655(the)X
+3785(previously)X
+4155(discussed)X
+2706 2469(algorithms)N
+3069(in)X
+3152(that)X
+3293(it)X
+3358(uses)X
+3517(a)X
+2 f
+3574(directory)X
+1 f
+3889(that)X
+4030(is)X
+4103(a)X
+4159(collapsed)X
+2706 2557(representation)N
+3192([ENB88])X
+3517(of)X
+3615(the)X
+3744(radix)X
+3940(search)X
+4177(trie)X
+4315(used)X
+2706 2645(by)N
+2 f
+2806(sdbm)X
+1 f
+2975(.)X
+10 f
+2706 2733 -0.0930(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)AN
+3 f
+7 s
+3572 3761(L1)N
+1 Dt
+3485 3738 MXY
+-20 -13 Dl
+7 13 Dl
+-7 13 Dl
+20 -13 Dl
+-400 0 Dl
+3180 3027 MXY
+136 Dc
+2706 3494 MXY
+136 Dc
+2950 3264 MXY
+136 Dc
+3738 MY
+136 Dc
+3485 2968 MXY
+0 118 Dl
+238 0 Dl
+0 -118 Dl
+-238 0 Dl
+3442 MY
+0 119 Dl
+238 0 Dl
+0 -119 Dl
+-238 0 Dl
+3679 MY
+0 119 Dl
+238 0 Dl
+0 -119 Dl
+-238 0 Dl
+3187 3501 MXY
+136 Dc
+2963 3316 MXY
+-24 5 Dl
+15 4 Dl
+4 15 Dl
+5 -24 Dl
+-137 134 Dl
+3204 3083 MXY
+-24 5 Dl
+15 4 Dl
+3 14 Dl
+6 -23 Dl
+-137 133 Dl
+3204 3450 MXY
+-6 -24 Dl
+-3 14 Dl
+-15 5 Dl
+24 5 Dl
+-137 -134 Dl
+2842 3369(0)N
+3075 3139(0)N
+2842 3676(1)N
+3075 3443(1)N
+3562 3054(L00)N
+3565 3528(L01)N
+4197 2968 MXY
+0 118 Dl
+237 0 Dl
+0 -118 Dl
+-237 0 Dl
+3205 MY
+0 119 Dl
+237 0 Dl
+0 -119 Dl
+-237 0 Dl
+3561 MY
+0 118 Dl
+237 0 Dl
+0 -118 Dl
+-237 0 Dl
+3960 2909 MXY
+0 237 Dl
+118 0 Dl
+0 -237 Dl
+-118 0 Dl
+3146 MY
+0 237 Dl
+118 0 Dl
+0 -237 Dl
+-118 0 Dl
+3383 MY
+0 237 Dl
+118 0 Dl
+0 -237 Dl
+-118 0 Dl
+3620 MY
+0 237 Dl
+118 0 Dl
+0 -237 Dl
+-118 0 Dl
+4197 3027 MXY
+-21 -13 Dl
+8 13 Dl
+-8 13 Dl
+21 -13 Dl
+-119 0 Dl
+4197 3264 MXY
+-21 -13 Dl
+8 13 Dl
+-8 13 Dl
+21 -13 Dl
+-119 0 Dl
+3501 MY
+59 0 Dl
+0 89 Dl
+4078 3738 MXY
+59 0 Dl
+0 -88 Dl
+4197 3590 MXY
+-21 -13 Dl
+8 13 Dl
+-8 13 Dl
+21 -13 Dl
+-60 0 Dl
+4197 3650 MXY
+-21 -13 Dl
+8 13 Dl
+-8 13 Dl
+21 -13 Dl
+-60 0 Dl
+3991 3050(00)N
+3991 3287(01)N
+3991 3524(10)N
+3991 3761(11)N
+4269 3050(L00)N
+4269 3287(L01)N
+4283 3643(L1)N
+3485 3501 MXY
+-20 -13 Dl
+7 13 Dl
+-7 13 Dl
+20 -13 Dl
+-155 0 Dl
+3485 3027 MXY
+-20 -13 Dl
+7 13 Dl
+-7 13 Dl
+20 -13 Dl
+-163 0 Dl
+2967 3687 MXY
+-5 -24 Dl
+-4 14 Dl
+-15 4 Dl
+24 6 Dl
+-141 -141 Dl
+3 Dt
+-1 Ds
+8 s
+2706 4033(Figure)N
+2903(2:)X
+1 f
+2972(A)X
+3034(radix)X
+3181(search)X
+3359(trie)X
+3460(and)X
+3568(a)X
+3612(directory)X
+3858(representing)X
+4189(the)X
+4283(trie.)X
+10 s
+10 f
+2706 4209 -0.0930(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)AN
+1 f
+2878 4411(In)N
+2968(this)X
+3106(algorithm,)X
+3460(a)X
+3519(directory)X
+3832(consists)X
+4108(of)X
+4198(a)X
+4256(search)X
+2706 4499(trie)N
+2847(of)X
+2947(depth)X
+2 f
+3158(n)X
+1 f
+3211(,)X
+3264(containing)X
+3635(2)X
+2 f
+7 s
+4467(n)Y
+10 s
+1 f
+3749 4499(bucket)N
+3996(addresses)X
+4337(\(i.e.)X
+2706 4587(each)N
+2897(element)X
+3194(of)X
+3304(the)X
+3445(trie)X
+3594(is)X
+3689(a)X
+3767(bucket)X
+4023(address\).)X
+4373(To)X
+2706 4675(access)N
+2935(the)X
+3056(hash)X
+3226(table,)X
+3425(a)X
+3483(32-bit)X
+3696(hash)X
+3865(value)X
+4061(is)X
+4136(calculated)X
+2706 4763(and)N
+2 f
+2861(n)X
+1 f
+2953(bits)X
+3107(of)X
+3213(the)X
+3350(value)X
+3563(are)X
+3701(used)X
+3886(to)X
+3986(index)X
+4202(into)X
+4364(the)X
+2706 4851(directory)N
+3018(to)X
+3102(obtain)X
+3324(a)X
+3382(bucket)X
+3618(address.)X
+3921(It)X
+3992(is)X
+4067(important)X
+4400(to)X
+2706 4939(note)N
+2866(that)X
+3008(multiple)X
+3296(entries)X
+3532(of)X
+3620(this)X
+3756(directory)X
+4067(may)X
+4226(contain)X
+2706 5027(the)N
+2833(same)X
+3026(bucket)X
+3268(address)X
+3537(as)X
+3632(a)X
+3696(result)X
+3902(of)X
+3997(directory)X
+4315(dou-)X
+2706 5115(bling)N
+2903(during)X
+3145(bucket)X
+3392(splitting.)X
+3706(Figure)X
+3948(2)X
+4021(illustrates)X
+4364(the)X
+2706 5203(relationship)N
+3126(between)X
+3436(a)X
+3513(typical)X
+3772(\(skewed\))X
+4108(search)X
+4355(trie)X
+2706 5291(and)N
+2850(its)X
+2953(directory)X
+3271(representation.)X
+3774(The)X
+3927(formation)X
+4270(of)X
+4364(the)X
+2706 5379(directory)N
+3016(shown)X
+3245(in)X
+3327(the)X
+3445(\256gure)X
+3652(is)X
+3725(as)X
+3812(follows.)X
+16 s
+2706 5593 MXY
+864 0 Dl
+2 f
+8 s
+2746 5648(4)N
+1 f
+9 s
+2796 5673(It)N
+2858(does)X
+3008(not)X
+3118(contain)X
+3348(holes.)X
+3 f
+10 s
+720 5960(USENIX)N
+9 f
+1042(-)X
+3 f
+1106(Winter)X
+1371('91)X
+9 f
+1498(-)X
+3 f
+1562(Dallas,)X
+1815(TX)X
+4424(3)X
+
+4 p
+%%Page: 4 4
+0(Courier)xf 0 f
+10 s 10 xH 0 xS 0 f
+3 f
+432 258(A)N
+510(New)X
+682(Hashing)X
+985(Package)X
+1290(for)X
+1413(UNIX)X
+3663(Seltzer)X
+3920(&)X
+4007(Yigit)X
+1 f
+604 538(Initially,)N
+937(there)X
+1158(is)X
+1271(one)X
+1446(slot)X
+1620(in)X
+1741(the)X
+1898(directory)X
+432 626(addressing)N
+802(a)X
+865(single)X
+1083(bucket.)X
+1364(The)X
+1515(depth)X
+1719(of)X
+1812(the)X
+1936(trie)X
+2069(is)X
+2148(0)X
+432 714(and)N
+577(0)X
+646(bits)X
+790(of)X
+886(each)X
+1063(hash)X
+1239(value)X
+1442(are)X
+1570(examined)X
+1910(to)X
+2000(deter-)X
+432 802(mine)N
+624(in)X
+718(which)X
+946(bucket)X
+1192(to)X
+1286(place)X
+1488(a)X
+1556(key;)X
+1726(all)X
+1837(keys)X
+2015(go)X
+2126(in)X
+432 890(bucket)N
+682(0.)X
+797(When)X
+1024(this)X
+1174(bucket)X
+1423(is)X
+1511(full,)X
+1677(its)X
+1787(contents)X
+2089(are)X
+432 978(divided)N
+698(between)X
+992(L0)X
+1107(and)X
+1249(L1)X
+1363(as)X
+1455(was)X
+1605(done)X
+1786(in)X
+1873(the)X
+1996(previ-)X
+432 1066(ously)N
+664(discussed)X
+1030(algorithms.)X
+1471(After)X
+1700(this)X
+1874(split,)X
+2090(the)X
+432 1154(address)N
+710(of)X
+814(the)X
+948(second)X
+1207(bucket)X
+1457(must)X
+1648(be)X
+1760(stored)X
+1992(in)X
+2090(the)X
+432 1242(directory.)N
+796(To)X
+939(accommodate)X
+1438(the)X
+1589(new)X
+1776(address,)X
+2090(the)X
+432 1330(directory)N
+752(is)X
+835(split)X
+2 f
+8 s
+972 1305(5)N
+1 f
+10 s
+1330(,)Y
+1054(by)X
+1163(doubling)X
+1476(it,)X
+1569(thus)X
+1731(increasing)X
+2090(the)X
+432 1418(depth)N
+630(of)X
+717(the)X
+835(directory)X
+1145(by)X
+1245(one.)X
+604 1532(After)N
+813(this)X
+967(split,)X
+1163(a)X
+1237(single)X
+1466(bit)X
+1588(of)X
+1693(the)X
+1829(hash)X
+2014(value)X
+432 1620(needs)N
+663(to)X
+773(be)X
+896(examined)X
+1255(to)X
+1364(decide)X
+1621(whether)X
+1927(the)X
+2072(key)X
+432 1708(belongs)N
+711(to)X
+803(L0)X
+922(or)X
+1019(L1.)X
+1158(Once)X
+1358(one)X
+1504(of)X
+1601(these)X
+1795(buckets)X
+2069(\256lls)X
+432 1796(\(L0)N
+578(for)X
+702(example\),)X
+1051(it)X
+1125(is)X
+1208(split)X
+1375(as)X
+1472(before,)X
+1728(and)X
+1873(the)X
+2000(direc-)X
+432 1884(tory)N
+585(is)X
+662(split)X
+823(again)X
+1021(to)X
+1107(make)X
+1305(room)X
+1498(for)X
+1615(the)X
+1736(address)X
+2000(of)X
+2090(the)X
+432 1972(third)N
+618(bucket.)X
+927(This)X
+1104(splitting)X
+1400(causes)X
+1645(the)X
+1778(addresses)X
+2121(of)X
+432 2060(the)N
+567(non-splitting)X
+1012(bucket)X
+1263(\(L1\))X
+1443(to)X
+1541(be)X
+1653(duplicated.)X
+2063(The)X
+432 2148(directory)N
+766(now)X
+948(has)X
+1099(four)X
+1277(entries,)X
+1555(a)X
+1635(depth)X
+1857(of)X
+1968(2,)X
+2072(and)X
+432 2236(indexes)N
+700(the)X
+821(buckets)X
+1089(L00,)X
+1261(L01)X
+1413(and)X
+1552(L1,)X
+1684(as)X
+1774(shown)X
+2006(in)X
+2090(the)X
+432 2324(Figure)N
+661(2.)X
+604 2438(The)N
+756(crucial)X
+1002(part)X
+1154(of)X
+1247(the)X
+1371(algorithm)X
+1708(is)X
+1787(the)X
+1911(observa-)X
+432 2526(tion)N
+580(that)X
+724(L1)X
+837(is)X
+914(addressed)X
+1255(twice)X
+1453(in)X
+1539(the)X
+1661(directory.)X
+1995(If)X
+2073(this)X
+432 2614(bucket)N
+679(were)X
+869(to)X
+964(split)X
+1134(now,)X
+1324(the)X
+1454(directory)X
+1776(already)X
+2045(con-)X
+432 2702(tains)N
+611(room)X
+808(to)X
+898(hold)X
+1067(the)X
+1192(address)X
+1460(of)X
+1554(the)X
+1679(new)X
+1840(bucket.)X
+2121(In)X
+432 2790(general,)N
+711(the)X
+831(relationship)X
+1231(between)X
+1521(the)X
+1641(directory)X
+1953(and)X
+2090(the)X
+432 2878(number)N
+704(of)X
+798(bucket)X
+1039(addresses)X
+1374(contained)X
+1713(therein)X
+1962(is)X
+2041(used)X
+432 2966(to)N
+517(decide)X
+750(when)X
+947(to)X
+1031(split)X
+1190(the)X
+1310(directory.)X
+1662(Each)X
+1845(bucket)X
+2081(has)X
+432 3054(a)N
+505(depth,)X
+740(\()X
+2 f
+767(n)X
+7 s
+3070(b)Y
+10 s
+1 f
+848 3054(\),)N
+932(associated)X
+1299(with)X
+1478(it)X
+1558(and)X
+1710(appears)X
+1992(in)X
+2090(the)X
+432 3142(directory)N
+744(exactly)X
+998(2)X
+2 f
+7 s
+3106(n)Y
+9 f
+1075(-)X
+2 f
+1106(n)X
+4 s
+3110(b)Y
+7 s
+1 f
+10 s
+1181 3142(times.)N
+1396(When)X
+1610(a)X
+1668(bucket)X
+1904(splits,)X
+2113(its)X
+432 3230(depth)N
+638(increases)X
+961(by)X
+1069(one.)X
+1253(The)X
+1406(directory)X
+1724(must)X
+1907(split)X
+2072(any)X
+432 3318(time)N
+602(a)X
+665(bucket's)X
+964(depth)X
+1169(exceeds)X
+1451(the)X
+1576(depth)X
+1781(of)X
+1875(the)X
+2000(direc-)X
+432 3406(tory.)N
+630(The)X
+784(following)X
+1123(code)X
+1303(fragment)X
+1621(helps)X
+1818(to)X
+1908(illustrate)X
+432 3494(the)N
+554(extendible)X
+912(hashing)X
+1185(algorithm)X
+1520([FAG79])X
+1838(for)X
+1955(access-)X
+432 3582(ing)N
+554(individual)X
+898(buckets)X
+1163(and)X
+1299(maintaining)X
+1701(the)X
+1819(directory.)X
+0 f
+8 s
+432 3881(hash)N
+622(=)X
+698 -0.4038(calchash\(key\);)AX
+432 3969(mask)N
+622(=)X
+698 -0.4018(maskvec[depth];)AX
+432 4145(bucket)N
+698(=)X
+774 -0.4038(directory[hash)AX
+1344(&)X
+1420(mask];)X
+432 4321(/*)N
+546(Key)X
+698 -0.4219(Insertion)AX
+1078(*/)X
+432 4409(if)N
+546 -0.4038(\(store\(bucket,)AX
+1116(key,)X
+1306(data\))X
+1534(==)X
+1648(FAIL\))X
+1876({)X
+720 4497(newbl)N
+948(=)X
+1024 -0.4167(getpage\(\);)AX
+720 4585 -0.4000(bucket->depth++;)AN
+720 4673 -0.4091(newbl->depth)AN
+1214(=)X
+1290 -0.4038(bucket->depth;)AX
+720 4761(if)N
+834 -0.4038(\(bucket->depth)AX
+1404(>)X
+1480(depth\))X
+1746({)X
+1008 4849(/*)N
+1122(double)X
+1388 -0.4219(directory)AX
+1768(*/)X
+1008 4937(depth++;)N
+1 f
+16 s
+432 5033 MXY
+864 0 Dl
+2 f
+8 s
+472 5088(5)N
+1 f
+9 s
+534 5113(This)N
+692(decision)X
+962(to)X
+1048(split)X
+1202(the)X
+1319(directory)X
+1608(is)X
+1685(based)X
+1878(on)X
+1979(a)X
+2040(com-)X
+432 5193(parison)N
+666(of)X
+748(the)X
+858(depth)X
+1040(of)X
+1121(the)X
+1230(page)X
+1387(being)X
+1568(split)X
+1713(and)X
+1838(the)X
+1947(depth)X
+2128(of)X
+432 5273(the)N
+543(trie.)X
+698(In)X
+781(Figure)X
+992(2,)X
+1069(the)X
+1180(depths)X
+1390(of)X
+1472(both)X
+1622(L00)X
+1760(and)X
+1886(L01)X
+2024(are)X
+2134(2,)X
+432 5353(whereas)N
+689(the)X
+798(depth)X
+979(of)X
+1060(L1)X
+1161(is)X
+1230(1.)X
+1323(Therefore,)X
+1646(if)X
+1710(L1)X
+1810(were)X
+1970(to)X
+2046(split,)X
+432 5433(the)N
+543(directory)X
+826(would)X
+1029(not)X
+1144(need)X
+1303(to)X
+1382(split.)X
+1565(In)X
+1648(reality,)X
+1872(a)X
+1926(bucket)X
+2140(is)X
+432 5513(allocated)N
+727(for)X
+846(the)X
+969(directory)X
+1264(at)X
+1351(the)X
+1474(time)X
+1637(of)X
+1732(\256le)X
+1858(creation)X
+2124(so)X
+432 5593(although)N
+707(the)X
+818(directory)X
+1100(splits)X
+1274(logically,)X
+1566(physical)X
+1828(splits)X
+2002(do)X
+2096(not)X
+432 5673(occur)N
+610(until)X
+760(the)X
+866(\256le)X
+976(becomes)X
+1246(quite)X
+1408(large.)X
+0 f
+8 s
+2994 538 -0.4219(directory)AN
+3374(=)X
+3450 -0.3971(double\(directory\);)AX
+2706 626(})N
+2706 714 -0.3958(splitbucket\(bucket,)AN
+3466(newbl\))X
+2706 802(...)N
+2418 890(})N
+2 f
+10 s
+3169 1255(hsearch)N
+1 f
+2590 1387(Since)N
+2 f
+2807(hsearch)X
+1 f
+3100(does)X
+3286(not)X
+3427(have)X
+3617(to)X
+3717(translate)X
+4027(hash)X
+2418 1475(values)N
+2659(into)X
+2819(disk)X
+2988(addresses,)X
+3352(it)X
+3432(can)X
+3579(use)X
+3721(much)X
+3934(simpler)X
+2418 1563(algorithms)N
+2808(than)X
+2994(those)X
+3211(de\256ned)X
+3495(above.)X
+3775(System)X
+4058(V's)X
+2 f
+2418 1651(hsearch)N
+1 f
+2708(constructs)X
+3069(a)X
+3141(\256xed-size)X
+3489(hash)X
+3671(table)X
+3862(\(speci\256ed)X
+2418 1739(by)N
+2519(the)X
+2637(user)X
+2791(at)X
+2869(table)X
+3045(creation\).)X
+3391(By)X
+3504(default,)X
+3767(a)X
+3823(multiplica-)X
+2418 1827(tive)N
+2570(hash)X
+2748(function)X
+3046(based)X
+3260(on)X
+3371(that)X
+3522(described)X
+3861(in)X
+3954(Knuth,)X
+2418 1915(Volume)N
+2710(3,)X
+2804(section)X
+3065(6.4)X
+3199([KNU68])X
+3541(is)X
+3628(used)X
+3809(to)X
+3905(obtain)X
+4138(a)X
+2418 2003(primary)N
+2694(bucket)X
+2930(address.)X
+3233(If)X
+3309(this)X
+3446(bucket)X
+3681(is)X
+3755(full,)X
+3907(a)X
+3964(secon-)X
+2418 2091(dary)N
+2593(multiplicative)X
+3069(hash)X
+3248(value)X
+3454(is)X
+3538(computed)X
+3885(to)X
+3978(de\256ne)X
+2418 2179(the)N
+2542(probe)X
+2751(interval.)X
+3062(The)X
+3213(probe)X
+3422(interval)X
+3693(is)X
+3772(added)X
+3989(to)X
+4076(the)X
+2418 2267(original)N
+2712(bucket)X
+2971(address)X
+3257(\(modulo)X
+3573(the)X
+3716(table)X
+3916(size\))X
+4112(to)X
+2418 2355(obtain)N
+2658(a)X
+2734(new)X
+2908(bucket)X
+3162(address.)X
+3483(This)X
+3665(process)X
+3946(repeats)X
+2418 2443(until)N
+2588(an)X
+2688(empty)X
+2911(bucket)X
+3148(is)X
+3224(found.)X
+3474(If)X
+3551(no)X
+3654(bucket)X
+3891(is)X
+3967(found,)X
+2418 2531(an)N
+2514(insertion)X
+2814(fails)X
+2972(with)X
+3134(a)X
+3190(``table)X
+3420(full'')X
+3605(condition.)X
+2590 2645(The)N
+2768(basic)X
+2986(algorithm)X
+3350(may)X
+3541(be)X
+3670(modi\256ed)X
+4006(by)X
+4138(a)X
+2418 2733(number)N
+2705(of)X
+2813(compile)X
+3112(time)X
+3295(options)X
+3571(available)X
+3902(to)X
+4005(those)X
+2418 2821(users)N
+2604(with)X
+2767(AT&T)X
+3006(source)X
+3237(code.)X
+3450(First,)X
+3637(the)X
+3756(package)X
+4040(pro-)X
+2418 2909(vides)N
+2638(two)X
+2809(options)X
+3094(for)X
+3238(hash)X
+3435(functions.)X
+3803(Users)X
+4036(may)X
+2418 2997(specify)N
+2690(their)X
+2877(own)X
+3055(hash)X
+3242(function)X
+3549(by)X
+3669(compiling)X
+4032(with)X
+2418 3085(``USCR'')N
+2757(de\256ned)X
+3016(and)X
+3155(declaring)X
+3477(and)X
+3616(de\256ning)X
+3901(the)X
+4022(vari-)X
+2418 3173(able)N
+2 f
+2578(hcompar)X
+1 f
+2863(,)X
+2909(a)X
+2971(function)X
+3263(taking)X
+3488(two)X
+3633(string)X
+3840(arguments)X
+2418 3261(and)N
+2560(returning)X
+2880(an)X
+2982(integer.)X
+3271(Users)X
+3480(may)X
+3643(also)X
+3797(request)X
+4054(that)X
+2418 3349(hash)N
+2587(values)X
+2814(be)X
+2912(computed)X
+3250(simply)X
+3489(by)X
+3590(taking)X
+3811(the)X
+3930(modulo)X
+2418 3437(of)N
+2521(key)X
+2673(\(using)X
+2909(division)X
+3201(rather)X
+3424(than)X
+3597(multiplication)X
+4080(for)X
+2418 3525(hash)N
+2589(value)X
+2787(calculation\).)X
+3230(If)X
+3308(this)X
+3447(technique)X
+3783(is)X
+3859(used,)X
+4049(col-)X
+2418 3613(lisions)N
+2651(are)X
+2775(resolved)X
+3072(by)X
+3176(scanning)X
+3485(sequentially)X
+3896(from)X
+4076(the)X
+2418 3701(selected)N
+2702(bucket)X
+2941(\(linear)X
+3176(probing\).)X
+3517(This)X
+3684(option)X
+3913(is)X
+3991(avail-)X
+2418 3789(able)N
+2572(by)X
+2672(de\256ning)X
+2954(the)X
+3072(variable)X
+3351(``DIV'')X
+3622(at)X
+3700(compile)X
+3978(time.)X
+2590 3903(A)N
+2720(second)X
+3015(option,)X
+3311(based)X
+3565(on)X
+3716(an)X
+3863(algorithm)X
+2418 3991(discovered)N
+2787(by)X
+2888(Richard)X
+3163(P.)X
+3248(Brent,)X
+3466(rearranges)X
+3822(the)X
+3940(table)X
+4116(at)X
+2418 4079(the)N
+2549(time)X
+2724(of)X
+2824(insertion)X
+3137(in)X
+3232(order)X
+3434(to)X
+3528(speed)X
+3743(up)X
+3855(retrievals.)X
+2418 4167(The)N
+2571(basic)X
+2764(idea)X
+2926(is)X
+3007(to)X
+3097(shorten)X
+3361(long)X
+3531(probe)X
+3741(sequences)X
+4094(by)X
+2418 4255(lengthening)N
+2833(short)X
+3030(probe)X
+3249(sequences.)X
+3651(Once)X
+3857(the)X
+3991(probe)X
+2418 4343(chain)N
+2613(has)X
+2741(exceeded)X
+3062(some)X
+3252(threshold)X
+3571(\(Brent)X
+3796(suggests)X
+4087(2\),)X
+2418 4431(we)N
+2541(attempt)X
+2809(to)X
+2899(shuf\257e)X
+3145(any)X
+3289(colliding)X
+3601(keys)X
+3776(\(keys)X
+3978(which)X
+2418 4519(appeared)N
+2734(in)X
+2821(the)X
+2944(probe)X
+3152(sequence)X
+3471(of)X
+3562(the)X
+3684(new)X
+3842(key\).)X
+4049(The)X
+2418 4607(details)N
+2652(of)X
+2744(this)X
+2884(key)X
+3025(shuf\257ing)X
+3333(can)X
+3469(be)X
+3569(found)X
+3780(in)X
+3866([KNU68])X
+2418 4695(and)N
+2576([BRE73].)X
+2946(This)X
+3129(algorithm)X
+3481(may)X
+3660(be)X
+3777(obtained)X
+4094(by)X
+2418 4783(de\256ning)N
+2700(the)X
+2818(variable)X
+3097(``BRENT'')X
+3487(at)X
+3565(compile)X
+3843(time.)X
+2590 4897(A)N
+2698(third)X
+2899(set)X
+3038(of)X
+3154(options,)X
+3458(obtained)X
+3783(by)X
+3912(de\256ning)X
+2418 4985(``CHAINED'',)N
+2943(use)X
+3086(linked)X
+3321(lists)X
+3484(to)X
+3581(resolve)X
+3848(collisions.)X
+2418 5073(Either)N
+2647(of)X
+2747(the)X
+2878(primary)X
+3164(hash)X
+3343(function)X
+3642(described)X
+3982(above)X
+2418 5161(may)N
+2584(be)X
+2688(used,)X
+2882(but)X
+3011(all)X
+3118(collisions)X
+3451(are)X
+3577(resolved)X
+3876(by)X
+3983(build-)X
+2418 5249(ing)N
+2554(a)X
+2623(linked)X
+2856(list)X
+2986(of)X
+3086(entries)X
+3333(from)X
+3522(the)X
+3653(primary)X
+3940(bucket.)X
+2418 5337(By)N
+2542(default,)X
+2816(new)X
+2981(entries)X
+3226(will)X
+3381(be)X
+3488(added)X
+3711(to)X
+3804(a)X
+3871(bucket)X
+4116(at)X
+2418 5425(the)N
+2541(beginning)X
+2886(of)X
+2978(the)X
+3101(bucket)X
+3339(chain.)X
+3577(However,)X
+3916(compile)X
+2418 5513(options)N
+2706(``SORTUP'')X
+3173(or)X
+3293(``SORTDOWN'')X
+3908(may)X
+4098(be)X
+2418 5601(speci\256ed)N
+2723(to)X
+2805(order)X
+2995(the)X
+3113(hash)X
+3280(chains)X
+3505(within)X
+3729(each)X
+3897(bucket.)X
+3 f
+432 5960(4)N
+2970(USENIX)X
+9 f
+3292(-)X
+3 f
+3356(Winter)X
+3621('91)X
+9 f
+3748(-)X
+3 f
+3812(Dallas,)X
+4065(TX)X
+
+5 p
+%%Page: 5 5
+0(Courier)xf 0 f
+10 s 10 xH 0 xS 0 f
+3 f
+720 258(Seltzer)N
+977(&)X
+1064(Yigit)X
+3278(A)X
+3356(New)X
+3528(Hashing)X
+3831(Package)X
+4136(for)X
+4259(UNIX)X
+2 f
+1444 538(dynahash)N
+1 f
+892 670(The)N
+2 f
+1054(dynahash)X
+1 f
+1398(library,)X
+1669(written)X
+1932(by)X
+2048(Esmond)X
+2346(Pitt,)X
+720 758(implements)N
+1183(Larson's)X
+1554(linear)X
+1827(hashing)X
+2165(algorithm)X
+720 846([LAR88])N
+1097(with)X
+1302(an)X
+2 f
+1440(hsearch)X
+1 f
+1756(compatible)X
+2174(interface.)X
+720 934(Intuitively,)N
+1099(a)X
+1161(hash)X
+1334(table)X
+1516(begins)X
+1751(as)X
+1844(a)X
+1905(single)X
+2121(bucket)X
+2360(and)X
+720 1022(grows)N
+941(in)X
+1028(generations,)X
+1443(where)X
+1665(a)X
+1725(generation)X
+2088(corresponds)X
+720 1110(to)N
+815(a)X
+884(doubling)X
+1201(in)X
+1296(the)X
+1427(size)X
+1585(of)X
+1685(the)X
+1815(hash)X
+1994(table.)X
+2222(The)X
+2379(0)X
+2 f
+7 s
+1078(th)Y
+10 s
+1 f
+720 1198(generation)N
+1085(occurs)X
+1321(as)X
+1414(the)X
+1538(table)X
+1719(grows)X
+1940(from)X
+2121(one)X
+2262(bucket)X
+720 1286(to)N
+814(two.)X
+1006(In)X
+1105(the)X
+1235(next)X
+1405(generation)X
+1776(the)X
+1906(table)X
+2093(grows)X
+2320(from)X
+720 1374(two)N
+862(to)X
+946(four.)X
+1122(During)X
+1371(each)X
+1541(generation,)X
+1921(every)X
+2121(bucket)X
+2356(that)X
+720 1462(existed)N
+967(at)X
+1045(the)X
+1163(beginning)X
+1503(of)X
+1590(the)X
+1708(generation)X
+2067(is)X
+2140(split.)X
+892 1576(The)N
+1041(table)X
+1221(starts)X
+1414(as)X
+1505(a)X
+1565(single)X
+1780(bucket)X
+2018(\(numbered)X
+2389(0\),)X
+720 1664(the)N
+839(current)X
+1088(split)X
+1245(bucket)X
+1479(is)X
+1552(set)X
+1661(to)X
+1743(bucket)X
+1977(0,)X
+2057(and)X
+2193(the)X
+2311(max-)X
+720 1752(imum)N
+933(split)X
+1097(point)X
+1288(is)X
+1368(set)X
+1483(to)X
+1571(twice)X
+1771(the)X
+1895(current)X
+2149(split)X
+2312(point)X
+720 1840(\(0\).)N
+863(When)X
+1084(it)X
+1157(is)X
+1239(time)X
+1410(for)X
+1532(a)X
+1596(bucket)X
+1838(to)X
+1928(split,)X
+2113(the)X
+2239(keys)X
+2414(in)X
+720 1928(the)N
+872(current)X
+1154(split)X
+1345(bucket)X
+1612(are)X
+1764(divided)X
+2057(between)X
+2378(the)X
+720 2016(current)N
+981(split)X
+1151(bucket)X
+1397(and)X
+1545(a)X
+1613(new)X
+1779(bucket)X
+2025(whose)X
+2262(bucket)X
+720 2104(number)N
+1000(is)X
+1088(equal)X
+1297(to)X
+1394(1)X
+1469(+)X
+1549(current)X
+1812(split)X
+1984(bucket)X
+2232(+)X
+2311(max-)X
+720 2192(imum)N
+927(split)X
+1085(point.)X
+1310(We)X
+1442(can)X
+1574(determine)X
+1915(which)X
+2131(keys)X
+2298(move)X
+720 2280(to)N
+807(the)X
+929(new)X
+1087(bucket)X
+1325(by)X
+1429(examining)X
+1791(the)X
+2 f
+1913(n)X
+7 s
+1962 2248(th)N
+10 s
+1 f
+2043 2280(bit)N
+2151(of)X
+2242(a)X
+2302(key's)X
+720 2368(hash)N
+899(value)X
+1105(where)X
+1334(n)X
+1406(is)X
+1491(the)X
+1620(generation)X
+1990(number.)X
+2306(After)X
+720 2456(the)N
+846(bucket)X
+1088(at)X
+1174(the)X
+1300(maximum)X
+1651(split)X
+1815(point)X
+2006(has)X
+2140(been)X
+2319(split,)X
+720 2544(the)N
+839(generation)X
+1198(number)X
+1463(is)X
+1536(incremented,)X
+1973(the)X
+2091(current)X
+2339(split)X
+720 2632(point)N
+908(is)X
+985(set)X
+1098(back)X
+1274(to)X
+1360(zero,)X
+1543(and)X
+1683(the)X
+1805(maximum)X
+2152(split)X
+2312(point)X
+720 2720(is)N
+815(set)X
+946(to)X
+1050(the)X
+1190(number)X
+1477(of)X
+1586(the)X
+1725(last)X
+1877(bucket)X
+2132(in)X
+2235(the)X
+2374(\256le)X
+720 2808(\(which)N
+971(is)X
+1052(equal)X
+1253(to)X
+1342(twice)X
+1543(the)X
+1668(old)X
+1797(maximum)X
+2148(split)X
+2312(point)X
+720 2896(plus)N
+873(1\).)X
+892 3010(To)N
+1031(facilitate)X
+1361(locating)X
+1668(keys,)X
+1884(we)X
+2027(maintain)X
+2356(two)X
+720 3098(masks.)N
+989(The)X
+1143(low)X
+1291(mask)X
+1488(is)X
+1569(equal)X
+1771(to)X
+1861(the)X
+1987(maximum)X
+2339(split)X
+720 3186(bucket)N
+967(and)X
+1116(the)X
+1247(high)X
+1422(mask)X
+1624(is)X
+1710(equal)X
+1917(to)X
+2011(the)X
+2141(next)X
+2311(max-)X
+720 3274(imum)N
+931(split)X
+1093(bucket.)X
+1372(To)X
+1486(locate)X
+1703(a)X
+1764(speci\256c)X
+2033(key,)X
+2193(we)X
+2311(com-)X
+720 3362(pute)N
+881(a)X
+940(32-bit)X
+1154(hash)X
+1324(value)X
+1520(using)X
+1715(a)X
+1773(bit-randomizing)X
+2311(algo-)X
+720 3450(rithm)N
+932(such)X
+1118(as)X
+1224(the)X
+1361(one)X
+1516(described)X
+1862(in)X
+1962([LAR88].)X
+2334(This)X
+720 3538(hash)N
+893(value)X
+1093(is)X
+1172(then)X
+1336(masked)X
+1607(with)X
+1775(the)X
+1898(high)X
+2065(mask.)X
+2299(If)X
+2378(the)X
+720 3626(resulting)N
+1026(number)X
+1297(is)X
+1376(greater)X
+1626(than)X
+1790(the)X
+1913(maximum)X
+2262(bucket)X
+720 3714(in)N
+823(the)X
+962(table)X
+1159(\(current)X
+1455(split)X
+1633(bucket)X
+1888(+)X
+1974(maximum)X
+2339(split)X
+720 3802(point\),)N
+962(the)X
+1091(hash)X
+1269(value)X
+1474(is)X
+1558(masked)X
+1834(with)X
+2007(the)X
+2136(low)X
+2287(mask.)X
+720 3890(In)N
+825(either)X
+1046(case,)X
+1242(the)X
+1377(result)X
+1592(of)X
+1696(the)X
+1831(mask)X
+2037(is)X
+2127(the)X
+2262(bucket)X
+720 3978(number)N
+989(for)X
+1107(the)X
+1229(given)X
+1431(key.)X
+1611(The)X
+1759(algorithm)X
+2093(below)X
+2312(illus-)X
+720 4066(trates)N
+914(this)X
+1049(process.)X
+0 f
+8 s
+720 4365(h)N
+796(=)X
+872 -0.4038(calchash\(key\);)AX
+720 4453(bucket)N
+986(=)X
+1062(h)X
+1138(&)X
+1214 -0.4167(high_mask;)AX
+720 4541(if)N
+834(\()X
+910(bucket)X
+1176(>)X
+1252 -0.4167(max_bucket)AX
+1670(\))X
+1008 4629(bucket)N
+1274(=)X
+1350(h)X
+1426(&)X
+1502 -0.4219(low_mask;)AX
+720 4717 -0.4018(return\(bucket\);)AN
+1 f
+10 s
+892 5042(In)N
+1013(order)X
+1237(to)X
+1353(decide)X
+1617(when)X
+1845(to)X
+1961(split)X
+2152(a)X
+2242(bucket,)X
+2 f
+720 5130(dynahash)N
+1 f
+1050(uses)X
+2 f
+1210(controlled)X
+1561(splitting)X
+1 f
+1822(.)X
+1884(A)X
+1964(hash)X
+2133(table)X
+2311(has)X
+2440(a)X
+720 5218(\256ll)N
+837(factor)X
+1054(which)X
+1279(is)X
+1361(expressed)X
+1707(in)X
+1798(terms)X
+2004(of)X
+2099(the)X
+2225(average)X
+720 5306(number)N
+990(of)X
+1082(keys)X
+1253(in)X
+1339(each)X
+1511(bucket.)X
+1789(Each)X
+1974(time)X
+2140(the)X
+2262(table's)X
+720 5394(total)N
+885(number)X
+1153(of)X
+1243(keys)X
+1413(divided)X
+1676(by)X
+1778(its)X
+1875(number)X
+2142(of)X
+2231(buckets)X
+720 5482(exceeds)N
+995(this)X
+1130(\256ll)X
+1238(factor,)X
+1466(a)X
+1522(bucket)X
+1756(is)X
+1829(split.)X
+2878 538(Since)N
+3079(the)X
+2 f
+3200(hsearch)X
+1 f
+3477(create)X
+3693(interface)X
+3998(\()X
+2 f
+4025(hcreate)X
+1 f
+4266(\))X
+4315(calls)X
+2706 626(for)N
+2842(an)X
+2960(estimate)X
+3269(of)X
+3378(the)X
+3518(\256nal)X
+3702(size)X
+3869(of)X
+3978(the)X
+4118(hash)X
+4306(table)X
+2706 714(\()N
+2 f
+2733(nelem)X
+1 f
+2925(\),)X
+2 f
+3007(dynahash)X
+1 f
+3349(uses)X
+3522(this)X
+3672(information)X
+4085(to)X
+4182(initialize)X
+2706 802(the)N
+2848(table.)X
+3088(The)X
+3257(initial)X
+3486(number)X
+3774(of)X
+3884(buckets)X
+4172(is)X
+4268(set)X
+4400(to)X
+2 f
+2706 890(nelem)N
+1 f
+2926(rounded)X
+3217(to)X
+3306(the)X
+3431(next)X
+3596(higher)X
+3828(power)X
+4056(of)X
+4150(two.)X
+4337(The)X
+2706 978(current)N
+2958(split)X
+3118(point)X
+3305(is)X
+3381(set)X
+3493(to)X
+3578(0)X
+3641(and)X
+3780(the)X
+3901(maximum)X
+4248(bucket)X
+2706 1066(and)N
+2842(maximum)X
+3186(split)X
+3343(point)X
+3527(are)X
+3646(set)X
+3755(to)X
+3837(this)X
+3972(rounded)X
+4255(value.)X
+3 f
+3148 1220(The)N
+3301(New)X
+3473(Implementation)X
+1 f
+2878 1352(Our)N
+3042(implementation)X
+3583(is)X
+3675(also)X
+3842(based)X
+4063(on)X
+4181(Larson's)X
+2706 1440(linear)N
+2939(hashing)X
+3238([LAR88])X
+3582(algorithm)X
+3943(as)X
+4060(well)X
+4248(as)X
+4364(the)X
+2 f
+2706 1528(dynahash)N
+1 f
+3047(implementation.)X
+3623(The)X
+2 f
+3782(dbm)X
+1 f
+3954(family)X
+4197(of)X
+4297(algo-)X
+2706 1616(rithms)N
+2942(decide)X
+3184(dynamically)X
+3612(which)X
+3840(bucket)X
+4085(to)X
+4178(split)X
+4346(and)X
+2706 1704(when)N
+2914(to)X
+3010(split)X
+3180(it)X
+3257(\(when)X
+3491(it)X
+3568(over\257ows\))X
+3944(while)X
+2 f
+4155(dynahash)X
+1 f
+2706 1792(splits)N
+2933(in)X
+3054(a)X
+3149(prede\256ned)X
+3547(order)X
+3776(\(linearly\))X
+4134(and)X
+4309(at)X
+4426(a)X
+2706 1880(prede\256ned)N
+3116(time)X
+3328(\(when)X
+3599(the)X
+3767(table)X
+3993(\256ll)X
+4151(factor)X
+4409(is)X
+2706 1968(exceeded\).)N
+3121(We)X
+3280(use)X
+3434(a)X
+3517(hybrid)X
+3773(of)X
+3887(these)X
+4099(techniques.)X
+2706 2056(Splits)N
+2913(occur)X
+3118(in)X
+3206(the)X
+3330(prede\256ned)X
+3695(order)X
+3891(of)X
+3984(linear)X
+4193(hashing,)X
+2706 2144(but)N
+2845(the)X
+2980(time)X
+3159(at)X
+3253(which)X
+3485(pages)X
+3704(are)X
+3839(split)X
+4012(is)X
+4101(determined)X
+2706 2232(both)N
+2869(by)X
+2970(page)X
+3143(over\257ows)X
+3480(\()X
+2 f
+3507(uncontrolled)X
+3937(splitting)X
+1 f
+4198(\))X
+4246(and)X
+4382(by)X
+2706 2320(exceeding)N
+3052(the)X
+3170(\256ll)X
+3278(factor)X
+3486(\()X
+2 f
+3513(controlled)X
+3862(splitting)X
+1 f
+4123(\))X
+2878 2434(A)N
+2962(hash)X
+3135(table)X
+3317(is)X
+3395(parameterized)X
+3876(by)X
+3981(both)X
+4148(its)X
+4248(bucket)X
+2706 2522(size)N
+2904(\()X
+2 f
+2931(bsize)X
+1 f
+(\))S
+3191(and)X
+3380(\256ll)X
+3541(factor)X
+3801(\()X
+2 f
+3828(ffactor)X
+1 f
+4041(\).)X
+4180(Whereas)X
+2 f
+2706 2610(dynahash's)N
+1 f
+3095(buckets)X
+3364(can)X
+3500(be)X
+3599(represented)X
+3993(as)X
+4083(a)X
+4142(linked)X
+4365(list)X
+2706 2698(of)N
+2798(elements)X
+3108(in)X
+3195(memory,)X
+3507(our)X
+3639(package)X
+3928(needs)X
+4136(to)X
+4222(support)X
+2706 2786(disk)N
+2874(access,)X
+3135(and)X
+3286(must)X
+3476(represent)X
+3806(buckets)X
+4086(in)X
+4183(terms)X
+4395(of)X
+2706 2874(pages.)N
+2955(The)X
+2 f
+3106(bsize)X
+1 f
+3291(is)X
+3369(the)X
+3492(size)X
+3642(\(in)X
+3756(bytes\))X
+3977(of)X
+4069(these)X
+4259(pages.)X
+2706 2962(As)N
+2833(in)X
+2933(linear)X
+3154(hashing,)X
+3461(the)X
+3597(number)X
+3879(of)X
+3983(buckets)X
+4265(in)X
+4364(the)X
+2706 3050(table)N
+2906(is)X
+3003(equal)X
+3221(to)X
+3327(the)X
+3469(number)X
+3758(of)X
+3869(keys)X
+4060(in)X
+4165(the)X
+4306(table)X
+2706 3138(divided)N
+2988(by)X
+2 f
+3110(ffactor)X
+1 f
+3323(.)X
+2 f
+8 s
+3113(6)Y
+1 f
+10 s
+3417 3138(The)N
+3584(controlled)X
+3950(splitting)X
+4252(occurs)X
+2706 3226(each)N
+2878(time)X
+3044(the)X
+3166(number)X
+3435(of)X
+3526(keys)X
+3697(in)X
+3783(the)X
+3905(table)X
+4085(exceeds)X
+4364(the)X
+2706 3314(\256ll)N
+2814(factor)X
+3022(multiplied)X
+3370(by)X
+3470(the)X
+3588(number)X
+3853(of)X
+3940(buckets.)X
+2878 3428(Inserting)N
+3187(keys)X
+3358(and)X
+3498(splitting)X
+3783(buckets)X
+4051(is)X
+4127(performed)X
+2706 3516(precisely)N
+3018(as)X
+3107(described)X
+3437(previously)X
+3796(for)X
+2 f
+3911(dynahash)X
+1 f
+4218(.)X
+4279(How-)X
+2706 3604(ever,)N
+2897(since)X
+3094(buckets)X
+3371(are)X
+3502(now)X
+3671(comprised)X
+4036(of)X
+4134(pages,)X
+4368(we)X
+2706 3692(must)N
+2883(be)X
+2981(prepared)X
+3284(to)X
+3367(handle)X
+3602(cases)X
+3793(where)X
+4011(the)X
+4130(size)X
+4276(of)X
+4364(the)X
+2706 3780(keys)N
+2873(and)X
+3009(data)X
+3163(in)X
+3245(a)X
+3301(bucket)X
+3535(exceed)X
+3779(the)X
+3897(bucket)X
+4131(size.)X
+3 f
+3318 3934(Over\257ow)N
+3654(Pages)X
+1 f
+2878 4066(There)N
+3095(are)X
+3223(two)X
+3372(cases)X
+3571(where)X
+3797(a)X
+3862(key)X
+4007(may)X
+4174(not)X
+4305(\256t)X
+4400(in)X
+2706 4154(its)N
+2802(designated)X
+3166(bucket.)X
+3441(In)X
+3529(the)X
+3647(\256rst)X
+3791(case,)X
+3970(the)X
+4088(total)X
+4250(size)X
+4395(of)X
+2706 4242(the)N
+2833(key)X
+2978(and)X
+3123(data)X
+3286(may)X
+3453(exceed)X
+3706(the)X
+3833(bucket)X
+4076(size.)X
+4269(In)X
+4364(the)X
+2706 4330(second,)N
+3008(addition)X
+3328(of)X
+3453(a)X
+3547(new)X
+3739(key)X
+3913(could)X
+4149(cause)X
+4386(an)X
+2706 4418(over\257ow,)N
+3068(but)X
+3227(the)X
+3382(bucket)X
+3652(in)X
+3770(question)X
+4097(is)X
+4206(not)X
+4364(yet)X
+2706 4506(scheduled)N
+3049(to)X
+3133(be)X
+3230(split.)X
+3428(In)X
+3516(existing)X
+3790(implementations,)X
+4364(the)X
+2706 4594(second)N
+2953(case)X
+3115(never)X
+3317(arises)X
+3523(\(since)X
+3738(buckets)X
+4006(are)X
+4128(split)X
+4288(when)X
+2706 4682(they)N
+2871(over\257ow\))X
+3210(and)X
+3352(the)X
+3476(\256rst)X
+3626(case)X
+3791(is)X
+3870(not)X
+3998(handled)X
+4278(at)X
+4362(all.)X
+2706 4770(Although)N
+3036(large)X
+3225(key/data)X
+3525(pair)X
+3678(handling)X
+3986(is)X
+4066(dif\256cult)X
+4346(and)X
+2706 4858(expensive,)N
+3083(it)X
+3163(is)X
+3252(essential.)X
+3604(In)X
+3706(a)X
+3777(linear)X
+3995(hashed)X
+4253(imple-)X
+2706 4946(mentation,)N
+3087(over\257ow)X
+3413(pages)X
+3636(are)X
+3775(required)X
+4083(for)X
+4217(buckets)X
+2706 5034(which)N
+2935(over\257ow)X
+3253(before)X
+3492(they)X
+3662(are)X
+3793(split,)X
+3982(so)X
+4085(we)X
+4211(can)X
+4355(use)X
+2706 5122(the)N
+2833(same)X
+3027(mechanism)X
+3421(for)X
+3544(large)X
+3734(key/data)X
+4035(pairs)X
+4220(that)X
+4368(we)X
+2706 5210(use)N
+2837(for)X
+2955(over\257ow)X
+3264(pages.)X
+3511(Logically,)X
+3862(we)X
+3980(chain)X
+4177(over\257ow)X
+16 s
+2706 5353 MXY
+864 0 Dl
+2 f
+8 s
+2746 5408(6)N
+1 f
+9 s
+2801 5433(This)N
+2952(is)X
+3023(not)X
+3138(strictly)X
+3361(true.)X
+3532(The)X
+3667(\256le)X
+3782(does)X
+3937(not)X
+4052(contract)X
+4306(when)X
+2706 5513(keys)N
+2861(are)X
+2972(deleted,)X
+3221(so)X
+3308(the)X
+3419(number)X
+3662(of)X
+3744(buckets)X
+3986(is)X
+4056(actually)X
+4306(equal)X
+2706 5593(to)N
+2782(the)X
+2890(maximum)X
+3202(number)X
+3441(of)X
+3520(keys)X
+3671(ever)X
+3814(present)X
+4041(in)X
+4116(the)X
+4223(table)X
+4382(di-)X
+2706 5673(vided)N
+2884(by)X
+2974(the)X
+3080(\256ll)X
+3178(factor.)X
+3 f
+10 s
+720 5960(USENIX)N
+9 f
+1042(-)X
+3 f
+1106(Winter)X
+1371('91)X
+9 f
+1498(-)X
+3 f
+1562(Dallas,)X
+1815(TX)X
+4424(5)X
+
+6 p
+%%Page: 6 6
+0(Courier)xf 0 f
+10 s 10 xH 0 xS 0 f
+3 f
+432 258(A)N
+510(New)X
+682(Hashing)X
+985(Package)X
+1290(for)X
+1413(UNIX)X
+3663(Seltzer)X
+3920(&)X
+4007(Yigit)X
+1 f
+432 538(pages)N
+639(to)X
+725(the)X
+847(buckets)X
+1116(\(also)X
+1296(called)X
+1512(primary)X
+1789(pages\).)X
+2062(In)X
+2152(a)X
+432 626(memory)N
+730(based)X
+943(representation,)X
+1448(over\257ow)X
+1763(pages)X
+1976(do)X
+2086(not)X
+432 714(pose)N
+628(any)X
+792(special)X
+1063(problems)X
+1409(because)X
+1712(we)X
+1854(can)X
+2014(chain)X
+432 802(over\257ow)N
+776(pages)X
+1017(to)X
+1137(primary)X
+1449(pages)X
+1690(using)X
+1921(memory)X
+432 890(pointers.)N
+776(However,)X
+1137(mapping)X
+1463(these)X
+1674(over\257ow)X
+2005(pages)X
+432 978(into)N
+584(a)X
+648(disk)X
+809(\256le)X
+939(is)X
+1019(more)X
+1211(of)X
+1305(a)X
+1368(challenge,)X
+1723(since)X
+1915(we)X
+2036(need)X
+432 1066(to)N
+547(be)X
+675(able)X
+861(to)X
+975(address)X
+1268(both)X
+1462(bucket)X
+1728(pages,)X
+1983(whose)X
+432 1154(numbers)N
+729(are)X
+849(growing)X
+1137(linearly,)X
+1422(and)X
+1558(some)X
+1747(indeterminate)X
+432 1242(number)N
+715(of)X
+820(over\257ow)X
+1143(pages)X
+1364(without)X
+1646(reorganizing)X
+2090(the)X
+432 1330(\256le.)N
+604 1444(One)N
+789(simple)X
+1053(solution)X
+1361(would)X
+1612(be)X
+1739(to)X
+1852(allocate)X
+2152(a)X
+432 1532(separate)N
+737(\256le)X
+880(for)X
+1015(over\257ow)X
+1341(pages.)X
+1604(The)X
+1769(disadvantage)X
+432 1620(with)N
+605(such)X
+783(a)X
+850(technique)X
+1193(is)X
+1276(that)X
+1426(it)X
+1500(requires)X
+1789(an)X
+1895(extra)X
+2086(\256le)X
+432 1708(descriptor,)N
+794(an)X
+891(extra)X
+1073(system)X
+1316(call)X
+1453(on)X
+1554(open)X
+1731(and)X
+1867(close,)X
+2072(and)X
+432 1796(logically)N
+739(associating)X
+1122(two)X
+1269(independent)X
+1687(\256les.)X
+1886(For)X
+2023(these)X
+432 1884(reasons,)N
+728(we)X
+857(wanted)X
+1123(to)X
+1219(map)X
+1391(both)X
+1567(primary)X
+1855(pages)X
+2072(and)X
+432 1972(over\257ow)N
+737(pages)X
+940(into)X
+1084(the)X
+1202(same)X
+1387(\256le)X
+1509(space.)X
+604 2086(The)N
+799(buddy-in-waiting)X
+1425(algorithm)X
+1806(provides)X
+2152(a)X
+432 2174(mechanism)N
+851(to)X
+966(support)X
+1259(multiple)X
+1578(pages)X
+1814(per)X
+1970(logical)X
+432 2262(bucket)N
+685(while)X
+902(retaining)X
+1226(the)X
+1362(simple)X
+1613(split)X
+1788(sequence)X
+2121(of)X
+432 2350(linear)N
+681(hashing.)X
+1015(Over\257ow)X
+1383(pages)X
+1631(are)X
+1795(preallocated)X
+432 2438(between)N
+781(generations)X
+1232(of)X
+1379(primary)X
+1713(pages.)X
+1996(These)X
+432 2526(over\257ow)N
+759(pages)X
+984(are)X
+1125(used)X
+1314(by)X
+1436(any)X
+1594(bucket)X
+1850(containing)X
+432 2614(more)N
+646(keys)X
+842(than)X
+1029(\256t)X
+1144(on)X
+1273(the)X
+1420(primary)X
+1723(page)X
+1924(and)X
+2089(are)X
+432 2702(reclaimed,)N
+808(if)X
+896(possible,)X
+1217(when)X
+1430(the)X
+1567(bucket)X
+1819(later)X
+2000(splits.)X
+432 2790(Figure)N
+687(3)X
+773(depicts)X
+1045(the)X
+1188(layout)X
+1433(of)X
+1545(primary)X
+1844(pages)X
+2072(and)X
+432 2878(over\257ow)N
+752(pages)X
+970(within)X
+1209(the)X
+1342(same)X
+1542(\256le.)X
+1699(Over\257ow)X
+2036(page)X
+432 2966(use)N
+586(information)X
+1011(is)X
+1111(recorded)X
+1440(in)X
+1548(bitmaps)X
+1847(which)X
+2089(are)X
+432 3054(themselves)N
+819(stored)X
+1046(on)X
+1157(over\257ow)X
+1472(pages.)X
+1725(The)X
+1880(addresses)X
+432 3142(of)N
+520(the)X
+639(bitmap)X
+882(pages)X
+1086(and)X
+1223(the)X
+1342(number)X
+1608(of)X
+1695(pages)X
+1898(allocated)X
+432 3230(at)N
+515(each)X
+688(split)X
+850(point)X
+1039(are)X
+1163(stored)X
+1384(in)X
+1470(the)X
+1592(\256le)X
+1718(header.)X
+1997(Using)X
+432 3318(this)N
+577(information,)X
+1005(both)X
+1177(over\257ow)X
+1492(addresses)X
+1829(and)X
+1974(bucket)X
+432 3406(addresses)N
+764(can)X
+900(be)X
+999(mapped)X
+1276(to)X
+1361(disk)X
+1517(addresses)X
+1848(by)X
+1951(the)X
+2072(fol-)X
+432 3494(lowing)N
+674(calculation:)X
+0 f
+8 s
+432 3793(int)N
+736(bucket;)X
+1192(/*)X
+1306(bucket)X
+1572(address)X
+1876(*/)X
+432 3881(u_short)N
+736(oaddr;)X
+1192(/*)X
+1306(OVERFLOW)X
+1648(address)X
+1952(*/)X
+432 3969(int)N
+736 -0.4125(nhdr_pages;)AX
+1192(/*)X
+1306(npages)X
+1572(in)X
+1686 -112.4062(\256le)AX
+1838(header)X
+2104(*/)X
+432 4057(int)N
+736 -0.4125(spares[32];)AX
+1192(/*)X
+1306(npages)X
+1572(at)X
+1686(each)X
+1876(split)X
+2104(*/)X
+432 4145(int)N
+736(log2\(\);)X
+1198(/*)X
+1312(ceil\(log)X
+1654(base)X
+1844(2\))X
+1958(*/)X
+432 4321(#DEFINE)N
+736 -0.3929(BUCKET_TO_PAGE\(bucket\))AX
+1610(\\)X
+584 4409(bucket)N
+850(+)X
+926 -0.4167(nhdr_pages)AX
+1344(+)X
+1420(\\)X
+584 4497 -0.3894(\(bucket?spares[logs2\(bucket)AN
+1648(+)X
+1724(1\)-1]:0\))X
+432 4673(#DEFINE)N
+736 -0.3947(OADDR_TO_PAGE\(oaddr\))AX
+1534(\\)X
+584 4761 -0.3984(BUCKET_TO_PAGE\(\(1)AN
+1268(<<)X
+1382 -0.4091(\(oaddr>>11\)\))AX
+1876(-)X
+1952(1\))X
+2066(+)X
+2142(\\)X
+584 4849(oaddr)N
+812(&)X
+888(0x7ff;)X
+1 f
+10 s
+604 5262(An)N
+728(over\257ow)X
+1039(page)X
+1217(is)X
+1295(addressed)X
+1637(by)X
+1742(its)X
+1842(split)X
+2004(point,)X
+432 5350(identifying)N
+858(the)X
+1031(generations)X
+1476(between)X
+1819(which)X
+2090(the)X
+432 5438(over\257ow)N
+740(page)X
+915(is)X
+991(allocated,)X
+1324(and)X
+1463(its)X
+1561(page)X
+1736(number,)X
+2023(iden-)X
+432 5526(tifying)N
+665(the)X
+783(particular)X
+1111(page)X
+1283(within)X
+1507(the)X
+1625(split)X
+1782(point.)X
+1986(In)X
+2073(this)X
+432 5614(implementation,)N
+983(offsets)X
+1225(within)X
+1457(pages)X
+1668(are)X
+1795(16)X
+1903(bits)X
+2046(long)X
+432 5702(\(limiting)N
+732(the)X
+851(maximum)X
+1196(page)X
+1368(size)X
+1513(to)X
+1595(32K\),)X
+1800(so)X
+1891(we)X
+2005(select)X
+2418 538(an)N
+2535(over\257ow)X
+2860(page)X
+3052(addressing)X
+3435(algorithm)X
+3786(that)X
+3946(can)X
+4098(be)X
+2418 626(expressed)N
+2760(in)X
+2847(16)X
+2952(bits)X
+3091(and)X
+3231(which)X
+3451(allows)X
+3684(quick)X
+3886(retrieval.)X
+2418 714(The)N
+2568(top)X
+2695(\256ve)X
+2840(bits)X
+2980(indicate)X
+3258(the)X
+3380(split)X
+3541(point)X
+3729(and)X
+3869(the)X
+3991(lower)X
+2418 802(eleven)N
+2650(indicate)X
+2926(the)X
+3046(page)X
+3220(number)X
+3487(within)X
+3713(the)X
+3832(split)X
+3990(point.)X
+2418 890(Since)N
+2633(\256ve)X
+2789(bits)X
+2940(are)X
+3075(reserved)X
+3384(for)X
+3514(the)X
+3648(split)X
+3821(point,)X
+4041(\256les)X
+2418 978(may)N
+2578(split)X
+2737(32)X
+2839(times)X
+3034(yielding)X
+3318(a)X
+3376(maximum)X
+3721(\256le)X
+3844(size)X
+3990(of)X
+4078(2)X
+7 s
+946(32)Y
+10 s
+2418 1066(buckets)N
+2698(and)X
+2849(32)X
+2 f
+(*)S
+1 f
+2982(2)X
+7 s
+1034(11)Y
+10 s
+3113 1066(over\257ow)N
+3433(pages.)X
+3691(The)X
+3850(maximum)X
+2418 1154(page)N
+2597(size)X
+2749(is)X
+2829(2)X
+7 s
+1122(15)Y
+10 s
+1154(,)Y
+2971(yielding)X
+3259(a)X
+3321(maximum)X
+3671(\256le)X
+3799(size)X
+3950(greater)X
+2418 1242(than)N
+2601(131,000)X
+2906(GB)X
+3061(\(on)X
+3212(\256le)X
+3358(systems)X
+3655(supporting)X
+4041(\256les)X
+2418 1330(larger)N
+2626(than)X
+2784(4GB\).)X
+10 f
+2418 1418 -0.0930(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)AN
+1 Dt
+4014 2275 MXY
+0 133 Dl
+3881 2275 MXY
+0 133 Dl
+3748 2275 MXY
+0 133 Dl
+3083 2275 MXY
+0 133 Dl
+5 s
+1 f
+3523 2475(2/3)N
+3390(2/2)X
+3257(2/1)X
+2859(1/2)X
+2726(1/1)X
+5 Dt
+3814 1743 MXY
+0 133 Dl
+3282 1743 MXY
+0 133 Dl
+3017 1743 MXY
+0 133 Dl
+2884 1743 MXY
+0 133 Dl
+1 Dt
+3681 1743 MXY
+0 133 Dl
+133 0 Dl
+0 -133 Dl
+-133 0 Dl
+3548 MX
+0 133 Dl
+133 0 Dl
+0 -133 Dl
+-133 0 Dl
+3415 MX
+0 133 Dl
+133 0 Dl
+0 -133 Dl
+-133 0 Dl
+3282 MX
+0 133 Dl
+133 0 Dl
+0 -133 Dl
+-133 0 Dl
+3150 MX
+0 133 Dl
+132 0 Dl
+0 -133 Dl
+-132 0 Dl
+3017 MX
+0 133 Dl
+133 0 Dl
+0 -133 Dl
+-133 0 Dl
+2884 MX
+0 133 Dl
+133 0 Dl
+0 -133 Dl
+-133 0 Dl
+3 f
+8 s
+3017 2601(Over\257ow)N
+3285(Addresses)X
+3515 2833(Over\257ow)N
+3783(Pages)X
+2850(Buckets)X
+1 Di
+3349 2740 MXY
+ 3349 2740 lineto
+ 3482 2740 lineto
+ 3482 2873 lineto
+ 3349 2873 lineto
+ 3349 2740 lineto
+closepath 3 3349 2740 3482 2873 Dp
+2684 MX
+0 133 Dl
+133 0 Dl
+0 -133 Dl
+-133 0 Dl
+5 Dt
+4146 2275 MXY
+0 133 Dl
+3216 2275 MXY
+0 133 Dl
+2684 2275 MXY
+0 133 Dl
+2551 2275 MXY
+0 133 Dl
+1 f
+3798 1963(3)N
+3266 1980(2)N
+3001(1)X
+2868(0)X
+1 Dt
+2751 1743 MXY
+0 133 Dl
+133 0 Dl
+0 -133 Dl
+-133 0 Dl
+3548 2275 MXY
+-15 -22 Dl
+2 16 Dl
+-13 11 Dl
+26 -5 Dl
+-282 -117 Dl
+3432 2275 MXY
+-10 -25 Dl
+-2 16 Dl
+-15 8 Dl
+27 1 Dl
+-166 -117 Dl
+3282 2275 MXY
+12 -25 Dl
+-14 10 Dl
+-15 -6 Dl
+17 21 Dl
+-16 -117 Dl
+2884 2275 MXY
+26 7 Dl
+-12 -12 Dl
+3 -16 Dl
+-17 21 Dl
+382 -117 Dl
+2751 2275 MXY
+25 9 Dl
+-11 -12 Dl
+5 -17 Dl
+-19 20 Dl
+515 -117 Dl
+3 f
+3070 2152(Over\257ow)N
+3338(Pages)X
+3482 2275 MXY
+ 3482 2275 lineto
+ 3615 2275 lineto
+ 3615 2408 lineto
+ 3482 2408 lineto
+ 3482 2275 lineto
+closepath 3 3482 2275 3615 2408 Dp
+3349 MX
+ 3349 2275 lineto
+ 3482 2275 lineto
+ 3482 2408 lineto
+ 3349 2408 lineto
+ 3349 2275 lineto
+closepath 3 3349 2275 3482 2408 Dp
+3216 MX
+ 3216 2275 lineto
+ 3349 2275 lineto
+ 3349 2408 lineto
+ 3216 2408 lineto
+ 3216 2275 lineto
+closepath 3 3216 2275 3349 2408 Dp
+2817 MX
+ 2817 2275 lineto
+ 2950 2275 lineto
+ 2950 2408 lineto
+ 2817 2408 lineto
+ 2817 2275 lineto
+closepath 3 2817 2275 2950 2408 Dp
+2684 MX
+ 2684 2275 lineto
+ 2817 2275 lineto
+ 2817 2408 lineto
+ 2684 2408 lineto
+ 2684 2275 lineto
+closepath 3 2684 2275 2817 2408 Dp
+3615 MX
+0 133 Dl
+531 0 Dl
+0 -133 Dl
+-531 0 Dl
+2950 MX
+0 133 Dl
+266 0 Dl
+0 -133 Dl
+-266 0 Dl
+2551 MX
+0 133 Dl
+133 0 Dl
+0 -133 Dl
+-133 0 Dl
+3798 1726 MXY
+-21 -18 Dl
+6 16 Dl
+-10 13 Dl
+25 -11 Dl
+-599 -99 Dl
+3266 1726 MXY
+-1 -27 Dl
+-7 15 Dl
+-17 1 Dl
+25 11 Dl
+-67 -99 Dl
+3033 1726 MXY
+27 1 Dl
+-14 -8 Dl
+-1 -17 Dl
+-12 24 Dl
+166 -99 Dl
+2900 1726 MXY
+27 7 Dl
+-13 -11 Dl
+3 -17 Dl
+-17 21 Dl
+299 -99 Dl
+3058 1621(Split)N
+3203(Points)X
+2418 2275 MXY
+0 133 Dl
+133 0 Dl
+0 -133 Dl
+-133 0 Dl
+3 Dt
+-1 Ds
+3137(Figure)Y
+2619(3:)X
+1 f
+2691(Split)X
+2832(points)X
+3008(occur)X
+3168(between)X
+3399(generations)X
+3712(and)X
+3823(are)X
+3919(numbered)X
+2418 3225(from)N
+2560(0.)X
+2642(In)X
+2713(this)X
+2824(\256gure)X
+2991(there)X
+3136(are)X
+3231(two)X
+3345(over\257ow)X
+3590(pages)X
+3753(allocated)X
+4000(at)X
+4063(split)X
+2418 3313(point)N
+2566(1)X
+2614(and)X
+2722(three)X
+2865(allocated)X
+3111(at)X
+3173(split)X
+3300(point)X
+3448(2.)X
+10 s
+10 f
+2418 3489 -0.0930(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)AN
+3 f
+2949 3731(Buffer)N
+3192(Management)X
+1 f
+2590 3863(The)N
+2744(hash)X
+2920(table)X
+3105(is)X
+3187(stored)X
+3412(in)X
+3502(memory)X
+3797(as)X
+3892(a)X
+3956(logical)X
+2418 3951(array)N
+2633(of)X
+2749(bucket)X
+3012(pointers.)X
+3359(Physically,)X
+3761(the)X
+3907(array)X
+4121(is)X
+2418 4039(arranged)N
+2728(in)X
+2818(segments)X
+3144(of)X
+3239(256)X
+3387(pointers.)X
+3713(Initially,)X
+4013(there)X
+2418 4127(is)N
+2530(space)X
+2767(to)X
+2887(allocate)X
+3195(256)X
+3373(segments.)X
+3769(Reallocation)X
+2418 4215(occurs)N
+2651(when)X
+2847(the)X
+2967(number)X
+3234(of)X
+3323(buckets)X
+3590(exceeds)X
+3867(32K)X
+4027(\(256)X
+2418 4303(*)N
+2508(256\).)X
+2745(Primary)X
+3053(pages)X
+3286(may)X
+3473(be)X
+3598(accessed)X
+3929(directly)X
+2418 4391(through)N
+2711(the)X
+2853(array)X
+3062(by)X
+3185(bucket)X
+3442(number)X
+3730(and)X
+3889(over\257ow)X
+2418 4479(pages)N
+2628(are)X
+2754 0.4028(referenced)AX
+3122(logically)X
+3429(by)X
+3536(their)X
+3710(over\257ow)X
+4022(page)X
+2418 4567(address.)N
+2726(For)X
+2864(small)X
+3063(hash)X
+3236(tables,)X
+3469(it)X
+3539(is)X
+3618(desirable)X
+3934(to)X
+4022(keep)X
+2418 4655(all)N
+2525(pages)X
+2735(in)X
+2823(main)X
+3009(memory)X
+3302(while)X
+3506(on)X
+3612(larger)X
+3826(tables,)X
+4059(this)X
+2418 4743(is)N
+2523(probably)X
+2860(impossible.)X
+3298(To)X
+3438(satisfy)X
+3698(both)X
+3891(of)X
+4009(these)X
+2418 4831(requirements,)N
+2900(the)X
+3041(package)X
+3348(includes)X
+3658(buffer)X
+3897(manage-)X
+2418 4919(ment)N
+2598(with)X
+2760(LRU)X
+2940(\(least)X
+3134(recently)X
+3413(used\))X
+3607(replacement.)X
+2590 5033(By)N
+2730(default,)X
+3020(the)X
+3165(package)X
+3475(allocates)X
+3802(up)X
+3928(to)X
+4036(64K)X
+2418 5121(bytes)N
+2616(of)X
+2712(buffered)X
+3014(pages.)X
+3246(All)X
+3377(pages)X
+3589(in)X
+3680(the)X
+3807(buffer)X
+4032(pool)X
+2418 5209(are)N
+2542(linked)X
+2766(in)X
+2852(LRU)X
+3036(order)X
+3230(to)X
+3316(facilitate)X
+3621(fast)X
+3761(replacement.)X
+2418 5297(Whereas)N
+2724(ef\256cient)X
+3011(access)X
+3241(to)X
+3327(primary)X
+3605(pages)X
+3812(is)X
+3889(provided)X
+2418 5385(by)N
+2521(the)X
+2642(bucket)X
+2879(array,)X
+3087(ef\256cient)X
+3372(access)X
+3600(to)X
+3684(over\257ow)X
+3991(pages)X
+2418 5473(is)N
+2501(provided)X
+2816(by)X
+2926(linking)X
+3182(over\257ow)X
+3497(page)X
+3679(buffers)X
+3936(to)X
+4027(their)X
+2418 5561(predecessor)N
+2827(page)X
+3008(\(either)X
+3247(the)X
+3374(primary)X
+3657(page)X
+3838(or)X
+3933(another)X
+2418 5649(over\257ow)N
+2742(page\).)X
+3000(This)X
+3181(means)X
+3425(that)X
+3584(an)X
+3699(over\257ow)X
+4022(page)X
+3 f
+432 5960(6)N
+2970(USENIX)X
+9 f
+3292(-)X
+3 f
+3356(Winter)X
+3621('91)X
+9 f
+3748(-)X
+3 f
+3812(Dallas,)X
+4065(TX)X
+
+7 p
+%%Page: 7 7
+0(Courier)xf 0 f
+10 s 10 xH 0 xS 0 f
+3 f
+720 258(Seltzer)N
+977(&)X
+1064(Yigit)X
+3278(A)X
+3356(New)X
+3528(Hashing)X
+3831(Package)X
+4136(for)X
+4259(UNIX)X
+1 f
+720 538(cannot)N
+955(be)X
+1052(present)X
+1305(in)X
+1388(the)X
+1507(buffer)X
+1724(pool)X
+1886(if)X
+1955(its)X
+2050(primary)X
+2324(page)X
+720 626(is)N
+804(not)X
+937(present.)X
+1240(This)X
+1413(does)X
+1591(not)X
+1724(impact)X
+1972(performance)X
+2409(or)X
+720 714(functionality,)N
+1209(because)X
+1524(an)X
+1660(over\257ow)X
+2005(page)X
+2217(will)X
+2400(be)X
+720 802(accessed)N
+1048(only)X
+1236(after)X
+1430(its)X
+1550(predecessor)X
+1975(page)X
+2172(has)X
+2324(been)X
+720 890(accessed.)N
+1068(Figure)X
+1303(4)X
+1369(depicts)X
+1622(the)X
+1746(data)X
+1905(structures)X
+2242(used)X
+2414(to)X
+720 978(manage)N
+990(the)X
+1108(buffer)X
+1325(pool.)X
+892 1092(The)N
+1040(in-memory)X
+1419(bucket)X
+1656(array)X
+1845(contains)X
+2134(pointers)X
+2414(to)X
+720 1180(buffer)N
+975(header)X
+1248(structures)X
+1617(which)X
+1870(represent)X
+2222(primary)X
+720 1268(pages.)N
+968(Buffer)X
+1203(headers)X
+1474(contain)X
+1735(modi\256ed)X
+2043(bits,)X
+2202(the)X
+2324(page)X
+720 1356(address)N
+995(of)X
+1096(the)X
+1228(buffer,)X
+1479(a)X
+1548(pointer)X
+1808(to)X
+1903(the)X
+2034(actual)X
+2259(buffer,)X
+720 1444(and)N
+875(a)X
+950(pointer)X
+1216(to)X
+1317(the)X
+1454(buffer)X
+1690(header)X
+1944(for)X
+2077(an)X
+2191(over\257ow)X
+720 1532(page)N
+901(if)X
+979(it)X
+1052(exists,)X
+1283(in)X
+1374(addition)X
+1665(to)X
+1756(the)X
+1883(LRU)X
+2072(links.)X
+2296(If)X
+2378(the)X
+720 1620(buffer)N
+950(corresponding)X
+1442(to)X
+1537(a)X
+1606(particular)X
+1947(bucket)X
+2194(is)X
+2280(not)X
+2414(in)X
+720 1708(memory,)N
+1048(its)X
+1164(pointer)X
+1432(is)X
+1526(NULL.)X
+1801(In)X
+1909(effect,)X
+2154(pages)X
+2377(are)X
+720 1796(linked)N
+950(in)X
+1042(three)X
+1233(ways.)X
+1468(Using)X
+1689(the)X
+1817(buffer)X
+2043(headers,)X
+2338(they)X
+720 1884(are)N
+851(linked)X
+1083(physically)X
+1444(through)X
+1725(the)X
+1854(LRU)X
+2045(links)X
+2231(and)X
+2378(the)X
+720 1972(over\257ow)N
+1036(links.)X
+1241(Using)X
+1462(the)X
+1590(pages)X
+1803(themselves,)X
+2209(they)X
+2377(are)X
+720 2060(linked)N
+943(logically)X
+1246(through)X
+1518(the)X
+1639(over\257ow)X
+1946(addresses)X
+2276(on)X
+2378(the)X
+720 2148(page.)N
+948(Since)X
+1162(over\257ow)X
+1482(pages)X
+1700(are)X
+1834(accessed)X
+2151(only)X
+2328(after)X
+720 2236(their)N
+904(predecessor)X
+1321(pages,)X
+1560(they)X
+1734(are)X
+1869(removed)X
+2186(from)X
+2378(the)X
+720 2324(buffer)N
+937(pool)X
+1099(when)X
+1293(their)X
+1460(primary)X
+1734(is)X
+1807(removed.)X
+10 f
+720 2412 -0.0930(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)AN
+1 Dt
+2309 3177 MXY
+24 15 Dl
+-8 -15 Dl
+8 -15 Dl
+-24 15 Dl
+52 0 Dl
+789 3160 MXY
+-35 0 Dl
+0 -156 Dl
+1607 0 Dl
+0 173 Dl
+789 3091 MXY
+-24 -15 Dl
+9 15 Dl
+-9 15 Dl
+24 -15 Dl
+-69 0 Dl
+2309 3125 MXY
+104 0 Dl
+0 -155 Dl
+-1693 0 Dl
+0 121 Dl
+927 3160 MXY
+24 15 Dl
+-9 -15 Dl
+9 -15 Dl
+-24 15 Dl
+553 0 Dl
+1618 3177 MXY
+8 27 Dl
+4 -17 Dl
+16 -6 Dl
+-28 -4 Dl
+138 121 Dl
+1895 3315 MXY
+28 3 Dl
+-15 -9 Dl
+1 -18 Dl
+-14 24 Dl
+276 -138 Dl
+3108 MY
+-28 -3 Dl
+15 10 Dl
+-1 17 Dl
+14 -24 Dl
+-276 138 Dl
+1756 3229 MXY
+-8 -27 Dl
+-3 17 Dl
+-16 6 Dl
+27 4 Dl
+-138 -121 Dl
+1480 MX
+-24 -15 Dl
+9 15 Dl
+-9 15 Dl
+24 -15 Dl
+-553 0 Dl
+3 f
+5 s
+1083 3073(LRU)N
+1178(chain)X
+4 Ds
+1402 3851 MXY
+ 1402 3851 lineto
+ 1471 3851 lineto
+ 1471 3920 lineto
+ 1402 3920 lineto
+ 1402 3851 lineto
+closepath 19 1402 3851 1471 3920 Dp
+1445 3747(Over\257ow)N
+1613(Address)X
+1549 3609 MXY
+0 69 Dl
+1756 MX
+-23 -15 Dl
+8 15 Dl
+-8 15 Dl
+23 -15 Dl
+-207 0 Dl
+-1 Ds
+3 Dt
+1756 3419 MXY
+-6 -28 Dl
+-4 17 Dl
+-17 5 Dl
+27 6 Dl
+-138 -138 Dl
+2240 3471 MXY
+15 -24 Dl
+-15 9 Dl
+-15 -9 Dl
+15 24 Dl
+0 -138 Dl
+1826 3609 MXY
+15 -24 Dl
+-15 9 Dl
+-16 -9 Dl
+16 24 Dl
+0 -138 Dl
+1549 MX
+15 -24 Dl
+-15 9 Dl
+-15 -9 Dl
+15 24 Dl
+0 -138 Dl
+858 3471 MXY
+15 -24 Dl
+-15 9 Dl
+-15 -9 Dl
+15 24 Dl
+0 -138 Dl
+2240 3056 MXY
+15 -24 Dl
+-15 9 Dl
+-15 -9 Dl
+15 24 Dl
+0 -138 Dl
+1549 3056 MXY
+15 -24 Dl
+-15 9 Dl
+-15 -9 Dl
+15 24 Dl
+0 -138 Dl
+858 3056 MXY
+15 -24 Dl
+-15 9 Dl
+-15 -9 Dl
+15 24 Dl
+0 -138 Dl
+1 Dt
+2171 3471 MXY
+ 2171 3471 lineto
+ 2448 3471 lineto
+ 2448 3609 lineto
+ 2171 3609 lineto
+ 2171 3471 lineto
+closepath 19 2171 3471 2448 3609 Dp
+1756 3609 MXY
+ 1756 3609 lineto
+ 2033 3609 lineto
+ 2033 3747 lineto
+ 1756 3747 lineto
+ 1756 3609 lineto
+closepath 3 1756 3609 2033 3747 Dp
+1480 3471 MXY
+ 1480 3471 lineto
+ 1756 3471 lineto
+ 1756 3609 lineto
+ 1480 3609 lineto
+ 1480 3471 lineto
+closepath 19 1480 3471 1756 3609 Dp
+789 MX
+ 789 3471 lineto
+ 1065 3471 lineto
+ 1065 3609 lineto
+ 789 3609 lineto
+ 789 3471 lineto
+closepath 19 789 3471 1065 3609 Dp
+962 3903(Buffer)N
+1083(Header)X
+849 3851 MXY
+ 849 3851 lineto
+ 918 3851 lineto
+ 918 3920 lineto
+ 849 3920 lineto
+ 849 3851 lineto
+closepath 14 849 3851 918 3920 Dp
+1756 3194 MXY
+ 1756 3194 lineto
+ 1895 3194 lineto
+ 1895 3471 lineto
+ 1756 3471 lineto
+ 1756 3194 lineto
+closepath 14 1756 3194 1895 3471 Dp
+2171 3056 MXY
+ 2171 3056 lineto
+ 2309 3056 lineto
+ 2309 3333 lineto
+ 2171 3333 lineto
+ 2171 3056 lineto
+closepath 14 2171 3056 2309 3333 Dp
+1480 MX
+ 1480 3056 lineto
+ 1618 3056 lineto
+ 1618 3333 lineto
+ 1480 3333 lineto
+ 1480 3056 lineto
+closepath 14 1480 3056 1618 3333 Dp
+789 MX
+ 789 3056 lineto
+ 927 3056 lineto
+ 927 3333 lineto
+ 789 3333 lineto
+ 789 3056 lineto
+closepath 14 789 3056 927 3333 Dp
+2780 MY
+0 138 Dl
+138 0 Dl
+0 -138 Dl
+-138 0 Dl
+927 MX
+0 138 Dl
+138 0 Dl
+0 -138 Dl
+-138 0 Dl
+1065 MX
+0 138 Dl
+138 0 Dl
+0 -138 Dl
+-138 0 Dl
+1203 MX
+0 138 Dl
+139 0 Dl
+0 -138 Dl
+-139 0 Dl
+1342 MX
+0 138 Dl
+138 0 Dl
+0 -138 Dl
+-138 0 Dl
+1480 MX
+0 138 Dl
+138 0 Dl
+0 -138 Dl
+-138 0 Dl
+1618 MX
+0 138 Dl
+138 0 Dl
+0 -138 Dl
+-138 0 Dl
+1756 MX
+0 138 Dl
+139 0 Dl
+0 -138 Dl
+-139 0 Dl
+1895 MX
+0 138 Dl
+138 0 Dl
+0 -138 Dl
+-138 0 Dl
+2033 MX
+0 138 Dl
+138 0 Dl
+0 -138 Dl
+-138 0 Dl
+2171 MX
+0 138 Dl
+138 0 Dl
+0 -138 Dl
+-138 0 Dl
+2309 MX
+0 138 Dl
+139 0 Dl
+0 -138 Dl
+-139 0 Dl
+13 s
+1048 2720(In)N
+1173(Memory)X
+1580(Bucket)X
+1918(Array)X
+867 3584(B0)N
+1558(B5)X
+2223(B10)X
+1788 3722(O1/1)N
+5 s
+1515 3903(Primay)N
+1651(Buffer)X
+4 Ds
+1990 3851 MXY
+ 1990 3851 lineto
+ 2059 3851 lineto
+ 2059 3920 lineto
+ 1990 3920 lineto
+ 1990 3851 lineto
+closepath 3 1990 3851 2059 3920 Dp
+2102 3903(Over\257ow)N
+2270(Buffer)X
+3 Dt
+-1 Ds
+8 s
+720 4184(Figure)N
+922(4:)X
+1 f
+996(Three)X
+1164(primary)X
+1386(pages)X
+1551(\(B0,)X
+1683(B5,)X
+1794(B10\))X
+1942(are)X
+2039(accessed)X
+2281(directly)X
+720 4272(from)N
+862(the)X
+958(bucket)X
+1146(array.)X
+1326(The)X
+1443(one)X
+1553(over\257ow)X
+1798(page)X
+1935(\(O1/1\))X
+2122(is)X
+2182(linked)X
+2359(phy-)X
+720 4360(sically)N
+915(from)X
+1067(its)X
+1155(primary)X
+1384(page's)X
+1577(buffer)X
+1759(header)X
+1955(as)X
+2035(well)X
+2172(as)X
+2252(logically)X
+720 4448(from)N
+860(its)X
+937(predecessor)X
+1253(page)X
+1389(buffer)X
+1560(\(B5\).)X
+10 s
+10 f
+720 4624 -0.0930(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)AN
+3 f
+1191 4954(Table)N
+1406(Parameterization)X
+1 f
+892 5086(When)N
+1107(a)X
+1166(hash)X
+1336(table)X
+1515(is)X
+1590(created,)X
+1865(the)X
+1985(bucket)X
+2221(size,)X
+2388(\256ll)X
+720 5174(factor,)N
+953(initial)X
+1164(number)X
+1434(of)X
+1526(elements,)X
+1856(number)X
+2125(of)X
+2216(bytes)X
+2409(of)X
+720 5262(main)N
+919(memory)X
+1225(used)X
+1411(for)X
+1543(caching,)X
+1851(and)X
+2005(a)X
+2079(user-de\256ned)X
+720 5350(hash)N
+892(function)X
+1184(may)X
+1347(be)X
+1448(speci\256ed.)X
+1797(The)X
+1946(bucket)X
+2184(size)X
+2333(\(and)X
+720 5438(page)N
+906(size)X
+1064(for)X
+1191(over\257ow)X
+1509(pages\))X
+1752(defaults)X
+2039(to)X
+2134(256)X
+2287(bytes.)X
+720 5526(For)N
+858(tables)X
+1072(with)X
+1241(large)X
+1429(data)X
+1590(items,)X
+1810(it)X
+1881(may)X
+2046(be)X
+2149(preferable)X
+720 5614(to)N
+803(increase)X
+1088(the)X
+1207(page)X
+1380(size,)X
+1545(and,)X
+1701(conversely,)X
+2089(applications)X
+720 5702(storing)N
+1002(small)X
+1235(items)X
+1467(exclusively)X
+1891(in)X
+2012(memory)X
+2338(may)X
+2706 538(bene\256t)N
+2966(from)X
+3164(a)X
+3242(smaller)X
+3520(bucket)X
+3776(size.)X
+3983(A)X
+4082(bucket)X
+4337(size)X
+2706 626(smaller)N
+2962(than)X
+3120(64)X
+3220(bytes)X
+3409(is)X
+3482(not)X
+3604(recommended.)X
+2878 740(The)N
+3031(\256ll)X
+3147(factor)X
+3363(indicates)X
+3676(a)X
+3740(desired)X
+4000(density)X
+4258(within)X
+2706 828(the)N
+2833(hash)X
+3009(table.)X
+3234(It)X
+3312(is)X
+3394(an)X
+3499(approximation)X
+3995(of)X
+4091(the)X
+4217(number)X
+2706 916(of)N
+2815(keys)X
+3004(allowed)X
+3300(to)X
+3404(accumulate)X
+3811(in)X
+3914(any)X
+4071(one)X
+4228(bucket,)X
+2706 1004(determining)N
+3119(when)X
+3319(the)X
+3442(hash)X
+3614(table)X
+3795(grows.)X
+4056(Its)X
+4161(default)X
+4409(is)X
+2706 1092(eight.)N
+2953(If)X
+3054(the)X
+3199(user)X
+3380(knows)X
+3636(the)X
+3781(average)X
+4079(size)X
+4251(of)X
+4364(the)X
+2706 1180(key/data)N
+3008(pairs)X
+3194(being)X
+3402(stored)X
+3627(in)X
+3718(the)X
+3845(table,)X
+4050(near)X
+4218(optimal)X
+2706 1268(bucket)N
+2943(sizes)X
+3122(and)X
+3261(\256ll)X
+3372(factors)X
+3614(may)X
+3775(be)X
+3874(selected)X
+4155(by)X
+4257(apply-)X
+2706 1356(ing)N
+2828(the)X
+2946(equation:)X
+0 f
+8 s
+2706 1655(\(1\))N
+2994 -0.3938(\(\(average_pair_length)AX
+3830(+)X
+3906(4\))X
+4020(*)X
+3032 1743(ffactor\))N
+3374(>=)X
+3488(bsize)X
+1 f
+10 s
+2706 2042(For)N
+2859(highly)X
+3104(time)X
+3287(critical)X
+3551(applications,)X
+3999(experimenting)X
+2706 2130(with)N
+2919(different)X
+3266(bucket)X
+3550(sizes)X
+3776(and)X
+3962(\256ll)X
+4120(factors)X
+4409(is)X
+2706 2218(encouraged.)N
+2878 2332(Figures)N
+3144(5a,b,)X
+3326(and)X
+3468(c)X
+3530(illustrate)X
+3836(the)X
+3960(effects)X
+4200(of)X
+4292(vary-)X
+2706 2420(ing)N
+2841(page)X
+3026(sizes)X
+3215(and)X
+3363(\256ll)X
+3483(factors)X
+3734(for)X
+3860(the)X
+3990(same)X
+4187(data)X
+4353(set.)X
+2706 2508(The)N
+2864(data)X
+3031(set)X
+3152(consisted)X
+3482(of)X
+3581(24474)X
+3813(keys)X
+3992(taken)X
+4198(from)X
+4386(an)X
+2706 2596(online)N
+2931(dictionary.)X
+3301(The)X
+3451(data)X
+3609(value)X
+3807(for)X
+3925(each)X
+4097(key)X
+4237(was)X
+4386(an)X
+2706 2684(ASCII)N
+2938(string)X
+3143(for)X
+3260(an)X
+3359(integer)X
+3605(from)X
+3784(1)X
+3847(to)X
+3931(24474)X
+4153(inclusive.)X
+2706 2772(The)N
+2867(test)X
+3013(run)X
+3155(consisted)X
+3488(of)X
+3590(creating)X
+3884(a)X
+3955(new)X
+4124(hash)X
+4306(table)X
+2706 2860(\(where)N
+2966(the)X
+3100(ultimate)X
+3398(size)X
+3559(of)X
+3662(the)X
+3796(table)X
+3987(was)X
+4147(known)X
+4400(in)X
+2706 2948(advance\),)N
+3054(entering)X
+3354(each)X
+3539(key/data)X
+3848(pair)X
+4010(into)X
+4171(the)X
+4306(table)X
+2706 3036(and)N
+2849(then)X
+3014(retrieving)X
+3353(each)X
+3528(key/data)X
+3827(pair)X
+3979(from)X
+4162(the)X
+4286(table.)X
+2706 3124(Each)N
+2898(of)X
+2996(the)X
+3125(graphs)X
+3369(shows)X
+3599(the)X
+3727(timings)X
+3996(resulting)X
+4306(from)X
+2706 3212(varying)N
+2973(the)X
+3093(pagesize)X
+3392(from)X
+3570(128)X
+3712(bytes)X
+3903(to)X
+3986(1M)X
+4118(and)X
+4255(the)X
+4374(\256ll)X
+2706 3300(factor)N
+2929(from)X
+3120(1)X
+3195(to)X
+3292(128.)X
+3486(For)X
+3631(each)X
+3813(run,)X
+3974(the)X
+4106(buffer)X
+4337(size)X
+2706 3388(was)N
+2874(set)X
+3006(at)X
+3106(1M.)X
+3299(The)X
+3466(tests)X
+3650(were)X
+3849(all)X
+3971(run)X
+4120(on)X
+4242(an)X
+4360(HP)X
+2706 3476(9000/370)N
+3077(\(33.3)X
+3312(Mhz)X
+3527(MC68030\),)X
+3966(with)X
+4176(16M)X
+4395(of)X
+2706 3564(memory,)N
+3042(64K)X
+3228(physically)X
+3605(addressed)X
+3970(cache,)X
+4222(and)X
+4386(an)X
+2706 3652(HP7959S)N
+3055(disk)X
+3231(drive,)X
+3459(running)X
+3751(4.3BSD-Reno)X
+4244(single-)X
+2706 3740(user.)N
+2878 3854(Both)N
+3066(system)X
+3321(time)X
+3496(\(Figure)X
+3764(5a\))X
+3899(and)X
+4047(elapsed)X
+4320(time)X
+2706 3942(\(Figure)N
+2966(5b\))X
+3097(show)X
+3290(that)X
+3434(for)X
+3552(all)X
+3655(bucket)X
+3892(sizes,)X
+4091(the)X
+4212(greatest)X
+2706 4030(performance)N
+3137(gains)X
+3329(are)X
+3451(made)X
+3648(by)X
+3751(increasing)X
+4104(the)X
+4225(\256ll)X
+4336(fac-)X
+2706 4118(tor)N
+2822(until)X
+2995(equation)X
+3298(1)X
+3365(is)X
+3445(satis\256ed.)X
+3774(The)X
+3925(user)X
+4085(time)X
+4253(shown)X
+2706 4206(in)N
+2791(Figure)X
+3023(5c)X
+3122(gives)X
+3314(a)X
+3373(more)X
+3561(detailed)X
+3838(picture)X
+4083(of)X
+4172(how)X
+4332(per-)X
+2706 4294(formance)N
+3054(varies.)X
+3330(The)X
+3499(smaller)X
+3778(bucket)X
+4035(sizes)X
+4234(require)X
+2706 4382(fewer)N
+2921(keys)X
+3099(per)X
+3233(page)X
+3416(to)X
+3509(satisfy)X
+3749(equation)X
+4056(1)X
+4127(and)X
+4274(there-)X
+2706 4470(fore)N
+2860(incur)X
+3049(fewer)X
+3257(collisions.)X
+3607(However,)X
+3946(when)X
+4144(the)X
+4265(buffer)X
+2706 4558(pool)N
+2884(size)X
+3045(is)X
+3134(\256xed,)X
+3349(smaller)X
+3620(pages)X
+3838(imply)X
+4059(more)X
+4259(pages.)X
+2706 4646(An)N
+2830(increased)X
+3160(number)X
+3430(of)X
+3522(pages)X
+3730(means)X
+3960(more)X
+2 f
+4150(malloc\(3\))X
+1 f
+2706 4734(calls)N
+2879(and)X
+3021(more)X
+3212(overhead)X
+3533(in)X
+3621(the)X
+3745(hash)X
+3918(package's)X
+4265(buffer)X
+2706 4822(manager)N
+3003(to)X
+3085(manage)X
+3355(the)X
+3473(additional)X
+3813(pages.)X
+2878 4936(The)N
+3028(tradeoff)X
+3308(works)X
+3529(out)X
+3655(most)X
+3834(favorably)X
+4166(when)X
+4364(the)X
+2706 5024(page)N
+2886(size)X
+3039(is)X
+3120(256)X
+3268(and)X
+3412(the)X
+3538(\256ll)X
+3654(factor)X
+3870(is)X
+3950(8.)X
+4057(Similar)X
+4319(con-)X
+2706 5112(clusions)N
+3009(were)X
+3207(obtained)X
+3524(if)X
+3614(the)X
+3753(test)X
+3905(was)X
+4071(run)X
+4218(without)X
+2706 5200(knowing)N
+3007(the)X
+3126(\256nal)X
+3289(table)X
+3466(size)X
+3612(in)X
+3695(advance.)X
+4020(If)X
+4095(the)X
+4214(\256le)X
+4337(was)X
+2706 5288(closed)N
+2942(and)X
+3088(written)X
+3345(to)X
+3437(disk,)X
+3620(the)X
+3748(conclusions)X
+4156(were)X
+4343(still)X
+2706 5376(the)N
+2832(same.)X
+3065(However,)X
+3408(rereading)X
+3740(the)X
+3865(\256le)X
+3994(from)X
+4177(disk)X
+4337(was)X
+2706 5464(slightly)N
+2983(faster)X
+3199(if)X
+3285(a)X
+3358(larger)X
+3583(bucket)X
+3834(size)X
+3996(and)X
+4149(\256ll)X
+4274(factor)X
+2706 5552(were)N
+2898(used)X
+3079(\(1K)X
+3238(bucket)X
+3486(size)X
+3645(and)X
+3795(32)X
+3909(\256ll)X
+4031(factor\).)X
+4320(This)X
+2706 5640(follows)N
+2987(intuitively)X
+3356(from)X
+3553(the)X
+3691(improved)X
+4038(ef\256ciency)X
+4395(of)X
+3 f
+720 5960(USENIX)N
+9 f
+1042(-)X
+3 f
+1106(Winter)X
+1371('91)X
+9 f
+1498(-)X
+3 f
+1562(Dallas,)X
+1815(TX)X
+4424(7)X
+
+8 p
+%%Page: 8 8
+0(Courier)xf 0 f
+10 s 10 xH 0 xS 0 f
+3 f
+432 258(A)N
+510(New)X
+682(Hashing)X
+985(Package)X
+1290(for)X
+1413(UNIX)X
+3663(Seltzer)X
+3920(&)X
+4007(Yigit)X
+1 f
+432 538(performing)N
+830(1K)X
+965(reads)X
+1172(from)X
+1365(the)X
+1500(disk)X
+1670(rather)X
+1894(than)X
+2068(256)X
+432 626(byte)N
+609(reads.)X
+857(In)X
+962(general,)X
+1257(performance)X
+1702(for)X
+1834(disk)X
+2005(based)X
+432 714(tables)N
+639(is)X
+712(best)X
+861(when)X
+1055(the)X
+1173(page)X
+1345(size)X
+1490(is)X
+1563(approximately)X
+2046(1K.)X
+10 f
+432 802 -0.0930(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)AN
+619 2380 MXY
+-12 24 Dl
+24 0 Dl
+-12 -24 Dl
+629 2437 MXY
+-12 24 Dl
+24 0 Dl
+-12 -24 Dl
+648 2504 MXY
+-12 25 Dl
+24 0 Dl
+-12 -25 Dl
+686 2515 MXY
+-12 24 Dl
+24 0 Dl
+-12 -24 Dl
+762 2516 MXY
+-12 24 Dl
+25 0 Dl
+-13 -24 Dl
+916 2515 MXY
+-13 24 Dl
+25 0 Dl
+-12 -24 Dl
+1222 2516 MXY
+-12 24 Dl
+24 0 Dl
+-12 -24 Dl
+1834 2515 MXY
+-12 24 Dl
+24 0 Dl
+-12 -24 Dl
+1 Dt
+619 2392 MXY
+10 57 Dl
+19 67 Dl
+38 11 Dl
+76 1 Dl
+154 -1 Dl
+306 1 Dl
+612 -1 Dl
+8 s
+1 f
+1628 2522(128)N
+3 Dt
+607 2245 MXY
+24 Dc
+617 2375 MXY
+23 Dc
+635 2442 MXY
+24 Dc
+674 2525 MXY
+23 Dc
+750 2529 MXY
+24 Dc
+904 2527 MXY
+23 Dc
+1210 MX
+23 Dc
+1822 2528 MXY
+23 Dc
+20 Ds
+1 Dt
+619 2245 MXY
+10 130 Dl
+19 67 Dl
+38 83 Dl
+76 4 Dl
+154 -2 Dl
+306 0 Dl
+612 1 Dl
+678 2482(256)N
+-1 Ds
+3 Dt
+619 2127 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+629 2191 MXY
+0 25 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+648 2334 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+686 2409 MXY
+0 25 Dl
+0 -13 Dl
+12 0 Dl
+-24 0 Dl
+762 2516 MXY
+0 25 Dl
+0 -12 Dl
+13 0 Dl
+-25 0 Dl
+916 2516 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-25 0 Dl
+1222 2515 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+1834 2515 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+5 Dt
+619 2139 MXY
+10 65 Dl
+19 142 Dl
+38 75 Dl
+76 108 Dl
+154 -1 Dl
+306 -1 Dl
+612 0 Dl
+694 2401(512)N
+3 Dt
+631 2064 MXY
+-24 24 Dl
+12 -12 Dl
+-12 -12 Dl
+24 24 Dl
+641 2077 MXY
+-24 25 Dl
+12 -12 Dl
+-12 -13 Dl
+24 25 Dl
+660 2132 MXY
+-24 24 Dl
+12 -12 Dl
+-12 -12 Dl
+24 24 Dl
+698 2292 MXY
+-24 24 Dl
+12 -12 Dl
+-12 -12 Dl
+24 24 Dl
+775 2382 MXY
+-25 24 Dl
+12 -12 Dl
+-12 -12 Dl
+25 24 Dl
+928 2516 MXY
+-25 24 Dl
+13 -12 Dl
+-13 -12 Dl
+25 24 Dl
+1234 2516 MXY
+-24 25 Dl
+12 -12 Dl
+-12 -13 Dl
+24 25 Dl
+1846 2516 MXY
+-24 24 Dl
+12 -12 Dl
+-12 -12 Dl
+24 24 Dl
+16 Ds
+1 Dt
+619 2076 MXY
+10 14 Dl
+19 54 Dl
+38 160 Dl
+76 90 Dl
+154 134 Dl
+306 1 Dl
+612 -1 Dl
+694 2257(1024)N
+-1 Ds
+3 Dt
+619 1877 MXY
+12 -24 Dl
+-24 0 Dl
+12 24 Dl
+629 1855 MXY
+12 -24 Dl
+-24 0 Dl
+12 24 Dl
+648 1838 MXY
+12 -24 Dl
+-24 0 Dl
+12 24 Dl
+686 1860 MXY
+12 -25 Dl
+-24 0 Dl
+12 25 Dl
+762 1923 MXY
+13 -24 Dl
+-25 0 Dl
+12 24 Dl
+916 2087 MXY
+12 -24 Dl
+-25 0 Dl
+13 24 Dl
+1222 2256 MXY
+12 -24 Dl
+-24 0 Dl
+12 24 Dl
+1834 2541 MXY
+12 -25 Dl
+-24 0 Dl
+12 25 Dl
+619 1865 MXY
+10 -22 Dl
+19 -17 Dl
+38 21 Dl
+76 64 Dl
+154 164 Dl
+306 169 Dl
+612 285 Dl
+1645 2427(4096)N
+619 1243 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+629 1196 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+648 1146 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+686 1174 MXY
+0 25 Dl
+0 -13 Dl
+12 0 Dl
+-24 0 Dl
+762 1249 MXY
+0 24 Dl
+0 -12 Dl
+13 0 Dl
+-25 0 Dl
+916 1371 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-25 0 Dl
+1222 1680 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+1834 1999 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+619 1255 MXY
+10 -47 Dl
+19 -50 Dl
+38 28 Dl
+76 75 Dl
+154 122 Dl
+306 309 Dl
+612 319 Dl
+1741 1934(8192)N
+5 Dt
+609 2531 MXY
+1225 0 Dl
+609 MX
+0 -1553 Dl
+2531 MY
+0 16 Dl
+4 Ds
+1 Dt
+2531 MY
+0 -1553 Dl
+593 2625(0)N
+-1 Ds
+5 Dt
+916 2531 MXY
+0 16 Dl
+4 Ds
+1 Dt
+2531 MY
+0 -1553 Dl
+884 2625(32)N
+-1 Ds
+5 Dt
+1222 2531 MXY
+0 16 Dl
+4 Ds
+1 Dt
+2531 MY
+0 -1553 Dl
+1190 2625(64)N
+-1 Ds
+5 Dt
+1528 2531 MXY
+0 16 Dl
+4 Ds
+1 Dt
+2531 MY
+0 -1553 Dl
+1496 2625(96)N
+-1 Ds
+5 Dt
+1834 2531 MXY
+0 16 Dl
+4 Ds
+1 Dt
+2531 MY
+0 -1553 Dl
+1786 2625(128)N
+-1 Ds
+5 Dt
+609 2531 MXY
+-16 0 Dl
+4 Ds
+1 Dt
+609 MX
+1225 0 Dl
+545 2558(0)N
+-1 Ds
+5 Dt
+609 2013 MXY
+-16 0 Dl
+4 Ds
+1 Dt
+609 MX
+1225 0 Dl
+481 2040(100)N
+-1 Ds
+5 Dt
+609 1496 MXY
+-16 0 Dl
+4 Ds
+1 Dt
+609 MX
+1225 0 Dl
+481 1523(200)N
+-1 Ds
+5 Dt
+609 978 MXY
+-16 0 Dl
+4 Ds
+1 Dt
+609 MX
+1225 0 Dl
+481 1005(300)N
+1088 2724(Fill)N
+1194(Factor)X
+422 1611(S)N
+426 1667(e)N
+426 1724(c)N
+424 1780(o)N
+424 1837(n)N
+424 1893(d)N
+428 1949(s)N
+3 Dt
+-1 Ds
+3 f
+432 2882(Figure)N
+636(5a:)X
+1 f
+744(System)X
+956(Time)X
+1113(for)X
+1209(dictionary)X
+1490(data)X
+1618(set)X
+1711(with)X
+1847(1M)X
+1958(of)X
+2033(buffer)X
+432 2970(space)N
+594(and)X
+707(varying)X
+923(bucket)X
+1114(sizes)X
+1259(and)X
+1372(\256ll)X
+1465(factors.)X
+1675(Each)X
+1823(line)X
+1940(is)X
+2004(labeled)X
+432 3058(with)N
+562(its)X
+639(bucket)X
+825(size.)X
+10 s
+10 f
+432 3234 -0.0930(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)AN
+8 s
+1 f
+428 4381(s)N
+424 4325(d)N
+424 4269(n)N
+424 4212(o)N
+426 4156(c)N
+426 4099(e)N
+422 4043(S)N
+1116 5156(Fill)N
+1222(Factor)X
+506 3437(3200)N
+4 Ds
+1 Dt
+666 3410 MXY
+1168 0 Dl
+-1 Ds
+5 Dt
+666 MX
+-16 0 Dl
+506 3825(2400)N
+4 Ds
+1 Dt
+666 3799 MXY
+1168 0 Dl
+-1 Ds
+5 Dt
+666 MX
+-16 0 Dl
+506 4214(1600)N
+4 Ds
+1 Dt
+666 4186 MXY
+1168 0 Dl
+-1 Ds
+5 Dt
+666 MX
+-16 0 Dl
+538 4602(800)N
+4 Ds
+1 Dt
+666 4575 MXY
+1168 0 Dl
+-1 Ds
+5 Dt
+666 MX
+-16 0 Dl
+602 4990(0)N
+4 Ds
+1 Dt
+666 4963 MXY
+1168 0 Dl
+-1 Ds
+5 Dt
+666 MX
+-16 0 Dl
+1786 5057(128)N
+4 Ds
+1 Dt
+1834 4963 MXY
+0 -1553 Dl
+-1 Ds
+5 Dt
+4963 MY
+0 16 Dl
+1510 5057(96)N
+4 Ds
+1 Dt
+1542 4963 MXY
+0 -1553 Dl
+-1 Ds
+5 Dt
+4963 MY
+0 16 Dl
+1218 5057(64)N
+4 Ds
+1 Dt
+1250 4963 MXY
+0 -1553 Dl
+-1 Ds
+5 Dt
+4963 MY
+0 16 Dl
+926 5057(32)N
+4 Ds
+1 Dt
+958 4963 MXY
+0 -1553 Dl
+-1 Ds
+5 Dt
+4963 MY
+0 16 Dl
+650 5057(0)N
+4 Ds
+1 Dt
+666 4963 MXY
+0 -1553 Dl
+-1 Ds
+5 Dt
+4963 MY
+0 16 Dl
+4963 MY
+0 -1553 Dl
+4963 MY
+1168 0 Dl
+1741 4752(8192)N
+3 Dt
+675 3732 MXY
+9 -172 Dl
+18 -118 Dl
+37 128 Dl
+73 -121 Dl
+146 623 Dl
+292 497 Dl
+584 245 Dl
+4802 MY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+1250 4557 MXY
+0 25 Dl
+0 -13 Dl
+12 0 Dl
+-24 0 Dl
+958 4060 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+812 3437 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+739 3558 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+702 3430 MXY
+0 25 Dl
+0 -13 Dl
+13 0 Dl
+-25 0 Dl
+684 3548 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+675 3720 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+1637 4912(4096)N
+675 4307 MXY
+9 -58 Dl
+18 30 Dl
+37 89 Dl
+73 144 Dl
+146 235 Dl
+292 122 Dl
+584 89 Dl
+4970 MY
+12 -24 Dl
+-24 0 Dl
+12 24 Dl
+1250 4881 MXY
+12 -24 Dl
+-24 0 Dl
+12 24 Dl
+958 4759 MXY
+12 -24 Dl
+-24 0 Dl
+12 24 Dl
+812 4524 MXY
+12 -24 Dl
+-24 0 Dl
+12 24 Dl
+739 4380 MXY
+12 -24 Dl
+-24 0 Dl
+12 24 Dl
+702 4291 MXY
+13 -24 Dl
+-25 0 Dl
+12 24 Dl
+684 4261 MXY
+12 -24 Dl
+-24 0 Dl
+12 24 Dl
+675 4319 MXY
+12 -24 Dl
+-24 0 Dl
+12 24 Dl
+734 4662(1024)N
+16 Ds
+1 Dt
+675 4352 MXY
+9 60 Dl
+18 134 Dl
+37 266 Dl
+73 117 Dl
+146 30 Dl
+292 0 Dl
+584 -1 Dl
+-1 Ds
+3 Dt
+1846 4946 MXY
+-24 24 Dl
+12 -12 Dl
+-12 -12 Dl
+24 24 Dl
+1262 4946 MXY
+-24 25 Dl
+12 -12 Dl
+-12 -13 Dl
+24 25 Dl
+970 4947 MXY
+-24 24 Dl
+12 -12 Dl
+-12 -12 Dl
+24 24 Dl
+824 4917 MXY
+-24 24 Dl
+12 -12 Dl
+-12 -12 Dl
+24 24 Dl
+751 4800 MXY
+-24 24 Dl
+12 -12 Dl
+-12 -12 Dl
+24 24 Dl
+715 4534 MXY
+-25 25 Dl
+12 -13 Dl
+-12 -12 Dl
+25 25 Dl
+696 4400 MXY
+-24 24 Dl
+12 -12 Dl
+-12 -12 Dl
+24 24 Dl
+687 4339 MXY
+-24 25 Dl
+12 -12 Dl
+-12 -13 Dl
+24 25 Dl
+718 4792(512)N
+5 Dt
+675 4422 MXY
+9 137 Dl
+18 278 Dl
+37 105 Dl
+73 18 Dl
+146 -1 Dl
+292 0 Dl
+584 -1 Dl
+3 Dt
+4946 MY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+1250 4946 MXY
+0 25 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+958 4947 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+812 4948 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+739 4930 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+702 4824 MXY
+0 25 Dl
+0 -12 Dl
+13 0 Dl
+-25 0 Dl
+684 4547 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+675 4410 MXY
+0 25 Dl
+0 -13 Dl
+12 0 Dl
+-24 0 Dl
+750 4921(256)N
+20 Ds
+1 Dt
+675 4597 MXY
+9 246 Dl
+18 106 Dl
+37 10 Dl
+73 0 Dl
+146 0 Dl
+292 0 Dl
+584 -1 Dl
+-1 Ds
+3 Dt
+1822 MX
+23 Dc
+1238 4959 MXY
+23 Dc
+946 MX
+23 Dc
+800 MX
+23 Dc
+727 MX
+23 Dc
+691 4949 MXY
+23 Dc
+672 4843 MXY
+24 Dc
+663 4597 MXY
+24 Dc
+1395 4961(128)N
+1 Dt
+675 4855 MXY
+9 93 Dl
+18 10 Dl
+37 1 Dl
+73 0 Dl
+146 -1 Dl
+292 0 Dl
+584 0 Dl
+3 Dt
+4946 MY
+-12 24 Dl
+24 0 Dl
+-12 -24 Dl
+1250 MX
+-12 24 Dl
+24 0 Dl
+-12 -24 Dl
+958 MX
+-12 24 Dl
+24 0 Dl
+-12 -24 Dl
+812 MX
+-12 25 Dl
+24 0 Dl
+-12 -25 Dl
+739 4947 MXY
+-12 24 Dl
+24 0 Dl
+-12 -24 Dl
+702 4946 MXY
+-12 24 Dl
+25 0 Dl
+-13 -24 Dl
+684 4936 MXY
+-12 24 Dl
+24 0 Dl
+-12 -24 Dl
+675 4843 MXY
+-12 24 Dl
+24 0 Dl
+-12 -24 Dl
+3 Dt
+-1 Ds
+3 f
+432 5314(Figure)N
+634(5b:)X
+1 f
+744(Elapsed)X
+967(Time)X
+1123(for)X
+1218(dictionary)X
+1498(data)X
+1625(set)X
+1717(with)X
+1851(1M)X
+1960(of)X
+2033(buffer)X
+432 5402(space)N
+593(and)X
+705(varying)X
+920(bucket)X
+1110(sizes)X
+1254(and)X
+1366(\256ll)X
+1457(factors.)X
+1681(Each)X
+1827(line)X
+1942(is)X
+2004(labeled)X
+432 5490(with)N
+562(its)X
+639(bucket)X
+825(size.)X
+10 s
+2590 538(If)N
+2677(an)X
+2785(approximation)X
+3284(of)X
+3383(the)X
+3513(number)X
+3790(of)X
+3889(elements)X
+2418 626(ultimately)N
+2773(to)X
+2866(be)X
+2973(stored)X
+3200(in)X
+3293(the)X
+3422(hash)X
+3599(table)X
+3785(is)X
+3868(known)X
+4116(at)X
+2418 714(the)N
+2564(time)X
+2754(of)X
+2869(creation,)X
+3196(the)X
+3342(hash)X
+3536(package)X
+3847(takes)X
+4059(this)X
+2418 802(number)N
+2688(as)X
+2779(a)X
+2839(parameter)X
+3185(and)X
+3325(uses)X
+3487(it)X
+3555(to)X
+3641(hash)X
+3812(entries)X
+4050(into)X
+2418 890(the)N
+2541(full)X
+2677(sized)X
+2867(table)X
+3048(rather)X
+3261(than)X
+3424(growing)X
+3716(the)X
+3838(table)X
+4018(from)X
+2418 978(a)N
+2477(single)X
+2691(bucket.)X
+2968(If)X
+3044(this)X
+3181(number)X
+3448(is)X
+3523(not)X
+3647(known,)X
+3907(the)X
+4027(hash)X
+2418 1066(table)N
+2632(starts)X
+2859(with)X
+3059(a)X
+3153(single)X
+3402(bucket)X
+3674(and)X
+3848(gracefully)X
+2418 1154(expands)N
+2707(as)X
+2800(elements)X
+3111(are)X
+3236(added,)X
+3474(although)X
+3780(a)X
+3842(slight)X
+4044(per-)X
+2418 1242(formance)N
+2747(degradation)X
+3151(may)X
+3313(be)X
+3413(noticed.)X
+3713(Figure)X
+3946(6)X
+4010(illus-)X
+2418 1330(trates)N
+2625(the)X
+2756(difference)X
+3116(in)X
+3211(performance)X
+3651(between)X
+3952(storing)X
+2418 1418(keys)N
+2588(in)X
+2673(a)X
+2732(\256le)X
+2857(when)X
+3054(the)X
+3174(ultimate)X
+3458(size)X
+3605(is)X
+3680(known)X
+3920(\(the)X
+4067(left)X
+2418 1506(bars)N
+2581(in)X
+2672(each)X
+2849(set\),)X
+3014(compared)X
+3360(to)X
+3450(building)X
+3744(the)X
+3870(\256le)X
+4000(when)X
+2418 1594(the)N
+2550(ultimate)X
+2846(size)X
+3005(is)X
+3091(unknown)X
+3422(\(the)X
+3580(right)X
+3764(bars)X
+3931(in)X
+4026(each)X
+2418 1682(set\).)N
+2609(Once)X
+2814(the)X
+2947(\256ll)X
+3069(factor)X
+3291(is)X
+3378(suf\256ciently)X
+3772(high)X
+3948(for)X
+4076(the)X
+2418 1770(page)N
+2596(size)X
+2747(\(8\),)X
+2887(growing)X
+3180(the)X
+3304(table)X
+3486(dynamically)X
+3908(does)X
+4081(lit-)X
+2418 1858(tle)N
+2518(to)X
+2600(degrade)X
+2875(performance.)X
+10 f
+2418 1946 -0.0930(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)AN
+9 s
+1 f
+2413 3238(s)N
+2409 3173(d)N
+2409 3108(n)N
+2409 3043(o)N
+2411 2979(c)N
+2411 2914(e)N
+2407 2849(S)N
+3143 4129(Fill)N
+3261(Factor)X
+2448 2152(15)N
+4 Ds
+1 Dt
+2557 2122 MXY
+1473 0 Dl
+-1 Ds
+5 Dt
+2557 MX
+-19 0 Dl
+2448 2747(10)N
+4 Ds
+1 Dt
+2557 2717 MXY
+1473 0 Dl
+-1 Ds
+5 Dt
+2557 MX
+-19 0 Dl
+2484 3343(5)N
+4 Ds
+1 Dt
+2557 3313 MXY
+1473 0 Dl
+-1 Ds
+5 Dt
+2557 MX
+-19 0 Dl
+2484 3938(0)N
+4 Ds
+1 Dt
+2557 3908 MXY
+1473 0 Dl
+-1 Ds
+5 Dt
+2557 MX
+-19 0 Dl
+3976 4015(128)N
+4 Ds
+1 Dt
+4030 3908 MXY
+0 -1786 Dl
+-1 Ds
+5 Dt
+3908 MY
+0 19 Dl
+3626 4015(96)N
+4 Ds
+1 Dt
+3662 3908 MXY
+0 -1786 Dl
+-1 Ds
+5 Dt
+3908 MY
+0 19 Dl
+3258 4015(64)N
+4 Ds
+1 Dt
+3294 3908 MXY
+0 -1786 Dl
+-1 Ds
+5 Dt
+3908 MY
+0 19 Dl
+2889 4015(32)N
+4 Ds
+1 Dt
+2925 3908 MXY
+0 -1786 Dl
+-1 Ds
+5 Dt
+3908 MY
+0 19 Dl
+2539 4015(0)N
+4 Ds
+1 Dt
+2557 3908 MXY
+0 -1786 Dl
+-1 Ds
+5 Dt
+3908 MY
+0 19 Dl
+3908 MY
+0 -1786 Dl
+3908 MY
+1473 0 Dl
+4053 2378(8192)N
+3 Dt
+2569 2277 MXY
+11 0 Dl
+23 48 Dl
+46 -167 Dl
+92 35 Dl
+184 12 Dl
+369 143 Dl
+736 0 Dl
+2334 MY
+0 28 Dl
+0 -14 Dl
+14 0 Dl
+-28 0 Dl
+3294 2334 MXY
+0 28 Dl
+0 -14 Dl
+13 0 Dl
+-27 0 Dl
+2925 2192 MXY
+0 27 Dl
+0 -14 Dl
+14 0 Dl
+-28 0 Dl
+2741 2180 MXY
+0 27 Dl
+0 -14 Dl
+14 0 Dl
+-28 0 Dl
+2649 2144 MXY
+0 28 Dl
+0 -14 Dl
+14 0 Dl
+-28 0 Dl
+2603 2311 MXY
+0 27 Dl
+0 -13 Dl
+14 0 Dl
+-28 0 Dl
+2580 2263 MXY
+0 28 Dl
+0 -14 Dl
+14 0 Dl
+-28 0 Dl
+2569 2263 MXY
+0 28 Dl
+0 -14 Dl
+13 0 Dl
+-27 0 Dl
+4053 2591(4096)N
+2569 2348 MXY
+11 -11 Dl
+23 -96 Dl
+46 71 Dl
+92 72 Dl
+184 226 Dl
+369 48 Dl
+736 -60 Dl
+2612 MY
+14 -28 Dl
+-28 0 Dl
+14 28 Dl
+3294 2672 MXY
+13 -28 Dl
+-27 0 Dl
+14 28 Dl
+2925 2624 MXY
+14 -28 Dl
+-28 0 Dl
+14 28 Dl
+2741 2398 MXY
+14 -28 Dl
+-28 0 Dl
+14 28 Dl
+2649 2326 MXY
+14 -27 Dl
+-28 0 Dl
+14 27 Dl
+2603 2255 MXY
+14 -28 Dl
+-28 0 Dl
+14 28 Dl
+2580 2350 MXY
+14 -27 Dl
+-28 0 Dl
+14 27 Dl
+2569 2362 MXY
+13 -28 Dl
+-27 0 Dl
+14 28 Dl
+4053 2681(1024)N
+16 Ds
+1 Dt
+2569 2300 MXY
+11 48 Dl
+23 96 Dl
+46 95 Dl
+92 274 Dl
+184 202 Dl
+369 -155 Dl
+736 -190 Dl
+-1 Ds
+3 Dt
+4044 2656 MXY
+-28 28 Dl
+14 -14 Dl
+-14 -14 Dl
+28 28 Dl
+3307 2846 MXY
+-27 28 Dl
+14 -14 Dl
+-14 -14 Dl
+27 28 Dl
+2939 3001 MXY
+-28 28 Dl
+14 -14 Dl
+-14 -14 Dl
+28 28 Dl
+2755 2799 MXY
+-28 28 Dl
+14 -14 Dl
+-14 -14 Dl
+28 28 Dl
+2663 2525 MXY
+-28 28 Dl
+14 -14 Dl
+-14 -14 Dl
+28 28 Dl
+2617 2430 MXY
+-28 28 Dl
+14 -14 Dl
+-14 -14 Dl
+28 28 Dl
+2594 2334 MXY
+-28 28 Dl
+14 -14 Dl
+-14 -14 Dl
+28 28 Dl
+2582 2287 MXY
+-27 27 Dl
+14 -14 Dl
+-14 -13 Dl
+27 27 Dl
+4053 2851(512)N
+5 Dt
+2569 2372 MXY
+11 -24 Dl
+23 405 Dl
+46 83 Dl
+92 227 Dl
+184 -72 Dl
+369 -119 Dl
+736 -107 Dl
+3 Dt
+2751 MY
+0 28 Dl
+0 -14 Dl
+14 0 Dl
+-28 0 Dl
+3294 2858 MXY
+0 28 Dl
+0 -14 Dl
+13 0 Dl
+-27 0 Dl
+2925 2977 MXY
+0 28 Dl
+0 -14 Dl
+14 0 Dl
+-28 0 Dl
+2741 3049 MXY
+0 27 Dl
+0 -13 Dl
+14 0 Dl
+-28 0 Dl
+2649 2823 MXY
+0 27 Dl
+0 -14 Dl
+14 0 Dl
+-28 0 Dl
+2603 2739 MXY
+0 28 Dl
+0 -14 Dl
+14 0 Dl
+-28 0 Dl
+2580 2334 MXY
+0 28 Dl
+0 -14 Dl
+14 0 Dl
+-28 0 Dl
+2569 2358 MXY
+0 28 Dl
+0 -14 Dl
+13 0 Dl
+-27 0 Dl
+4053 2795(256)N
+20 Ds
+1 Dt
+2569 2456 MXY
+11 285 Dl
+23 95 Dl
+46 251 Dl
+92 -60 Dl
+184 -84 Dl
+369 -107 Dl
+736 -71 Dl
+-1 Ds
+3 Dt
+4016 MX
+27 Dc
+3280 2836 MXY
+27 Dc
+2912 2943 MXY
+27 Dc
+2728 3027 MXY
+27 Dc
+2635 3087 MXY
+28 Dc
+2589 2836 MXY
+28 Dc
+2566 2741 MXY
+27 Dc
+2554 2456 MXY
+28 Dc
+4053 2741(128)N
+1 Dt
+2569 2729 MXY
+11 203 Dl
+23 131 Dl
+46 -60 Dl
+92 -119 Dl
+184 -60 Dl
+369 -83 Dl
+736 -12 Dl
+3 Dt
+2716 MY
+-14 27 Dl
+28 0 Dl
+-14 -27 Dl
+3294 2727 MXY
+-14 28 Dl
+27 0 Dl
+-13 -28 Dl
+2925 2811 MXY
+-14 27 Dl
+28 0 Dl
+-14 -27 Dl
+2741 2870 MXY
+-14 28 Dl
+28 0 Dl
+-14 -28 Dl
+2649 2989 MXY
+-14 28 Dl
+28 0 Dl
+-14 -28 Dl
+2603 3049 MXY
+-14 27 Dl
+28 0 Dl
+-14 -27 Dl
+2580 2918 MXY
+-14 28 Dl
+28 0 Dl
+-14 -28 Dl
+2569 2716 MXY
+-14 27 Dl
+27 0 Dl
+-13 -27 Dl
+3 Dt
+-1 Ds
+3 f
+8 s
+2418 4286(Figure)N
+2628(5c:)X
+1 f
+2738(User)X
+2887(Time)X
+3051(for)X
+3154(dictionary)X
+3442(data)X
+3577(set)X
+3677(with)X
+3820(1M)X
+3938(of)X
+4019(buffer)X
+2418 4374(space)N
+2579(and)X
+2691(varying)X
+2906(bucket)X
+3096(sizes)X
+3240(and)X
+3352(\256ll)X
+3443(factors.)X
+3667(Each)X
+3813(line)X
+3928(is)X
+3990(labeled)X
+2418 4462(with)N
+2548(its)X
+2625(bucket)X
+2811(size.)X
+10 s
+10 f
+2418 4638 -0.0930(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)AN
+1 f
+2590 4840(Since)N
+2796(no)X
+2904(known)X
+3150(hash)X
+3325(function)X
+3620(performs)X
+3938(equally)X
+2418 4928(well)N
+2589(on)X
+2702(all)X
+2815(possible)X
+3110(data,)X
+3297(the)X
+3428(user)X
+3595(may)X
+3766(\256nd)X
+3923(that)X
+4076(the)X
+2418 5016(built-in)N
+2678(hash)X
+2849(function)X
+3140(does)X
+3311(poorly)X
+3544(on)X
+3648(a)X
+3708(particular)X
+4040(data)X
+2418 5104(set.)N
+2548(In)X
+2636(this)X
+2771(case,)X
+2950(a)X
+3006(hash)X
+3173(function,)X
+3480(taking)X
+3700(two)X
+3840(arguments)X
+2418 5192(\(a)N
+2507(pointer)X
+2760(to)X
+2848(a)X
+2910(byte)X
+3074(string)X
+3282(and)X
+3424(a)X
+3486(length\))X
+3739(and)X
+3880(returning)X
+2418 5280(an)N
+2517(unsigned)X
+2829(long)X
+2993(to)X
+3077(be)X
+3175(used)X
+3344(as)X
+3433(the)X
+3553(hash)X
+3722(value,)X
+3938(may)X
+4098(be)X
+2418 5368(speci\256ed)N
+2731(at)X
+2817(hash)X
+2992(table)X
+3176(creation)X
+3463(time.)X
+3673(When)X
+3893(an)X
+3996(exist-)X
+2418 5456(ing)N
+2570(hash)X
+2767(table)X
+2973(is)X
+3076(opened)X
+3358(and)X
+3524(a)X
+3609(hash)X
+3805(function)X
+4121(is)X
+2418 5544(speci\256ed,)N
+2752(the)X
+2879(hash)X
+3054(package)X
+3346(will)X
+3498(try)X
+3615(to)X
+3705(determine)X
+4054(that)X
+2418 5632(the)N
+2546(hash)X
+2723(function)X
+3020(supplied)X
+3321(is)X
+3404(the)X
+3532(one)X
+3678(with)X
+3850(which)X
+4076(the)X
+2418 5720(table)N
+2630(was)X
+2811(created.)X
+3139(There)X
+3382(are)X
+3536(a)X
+3627(variety)X
+3905(of)X
+4027(hash)X
+3 f
+432 5960(8)N
+2970(USENIX)X
+9 f
+3292(-)X
+3 f
+3356(Winter)X
+3621('91)X
+9 f
+3748(-)X
+3 f
+3812(Dallas,)X
+4065(TX)X
+
+9 p
+%%Page: 9 9
+0(Courier)xf 0 f
+10 s 10 xH 0 xS 0 f
+3 f
+720 258(Seltzer)N
+977(&)X
+1064(Yigit)X
+3278(A)X
+3356(New)X
+3528(Hashing)X
+3831(Package)X
+4136(for)X
+4259(UNIX)X
+1 f
+720 538(functions)N
+1065(provided)X
+1397(with)X
+1586(the)X
+1731(package.)X
+2082(The)X
+2253(default)X
+720 626(function)N
+1014(for)X
+1135(the)X
+1260(package)X
+1551(is)X
+1631(the)X
+1755(one)X
+1897(which)X
+2119(offered)X
+2378(the)X
+720 714(best)N
+875(performance)X
+1308(in)X
+1396(terms)X
+1600(of)X
+1693(cycles)X
+1920(executed)X
+2232(per)X
+2360(call)X
+720 802(\(it)N
+827(did)X
+965(not)X
+1103(produce)X
+1398(the)X
+1531(fewest)X
+1776(collisions)X
+2117(although)X
+2432(it)X
+720 890(was)N
+866(within)X
+1091(a)X
+1148(small)X
+1341(percentage)X
+1710(of)X
+1797(the)X
+1915(function)X
+2202(that)X
+2342(pro-)X
+720 978(duced)N
+947(the)X
+1080(fewest)X
+1324(collisions\).)X
+1731(Again,)X
+1981(in)X
+2077(time)X
+2253(critical)X
+720 1066(applications,)N
+1152(users)X
+1342(are)X
+1466(encouraged)X
+1862(to)X
+1949(experiment)X
+2334(with)X
+720 1154(a)N
+783(variety)X
+1032(of)X
+1125(hash)X
+1298(functions)X
+1622(to)X
+1710(achieve)X
+1982(optimal)X
+2252(perfor-)X
+720 1242(mance.)N
+10 f
+720 1330 -0.0930(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)AN
+3 f
+7 s
+1038 2925(Full)N
+1149(size)X
+1251(table)X
+1384(\(left\))X
+1547 2718(Fill)N
+1643(Factor)X
+2268 2662(64)N
+1964(32)X
+1674(16)X
+1384(8)X
+1093(4)X
+4 Ds
+1 Dt
+900 2280 MXY
+1548 0 Dl
+900 1879 MXY
+1548 0 Dl
+900 1506 MXY
+1548 0 Dl
+1563 2902 MXY
+111 0 Dl
+-1 Ds
+900 MX
+110 0 Dl
+1425 2828(System)N
+983(User)X
+1895 2778 MXY
+ 1895 2778 lineto
+ 1950 2778 lineto
+ 1950 2833 lineto
+ 1895 2833 lineto
+ 1895 2778 lineto
+closepath 21 1895 2778 1950 2833 Dp
+1342 MX
+ 1342 2778 lineto
+ 1397 2778 lineto
+ 1397 2833 lineto
+ 1342 2833 lineto
+ 1342 2778 lineto
+closepath 14 1342 2778 1397 2833 Dp
+900 MX
+ 900 2778 lineto
+ 955 2778 lineto
+ 955 2833 lineto
+ 900 2833 lineto
+ 900 2778 lineto
+closepath 3 900 2778 955 2833 Dp
+5 Dt
+2283 2211 MXY
+96 0 Dl
+1992 MX
+97 0 Dl
+1702 MX
+97 0 Dl
+1411 2252 MXY
+97 0 Dl
+4 Ds
+1 Dt
+2283 2211 MXY
+ 2283 2211 lineto
+ 2379 2211 lineto
+ 2379 2252 lineto
+ 2283 2252 lineto
+ 2283 2211 lineto
+closepath 14 2283 2211 2379 2252 Dp
+1992 MX
+ 1992 2211 lineto
+ 2089 2211 lineto
+ 2089 2252 lineto
+ 1992 2252 lineto
+ 1992 2211 lineto
+closepath 14 1992 2211 2089 2252 Dp
+1702 MX
+ 1702 2211 lineto
+ 1799 2211 lineto
+ 1799 2252 lineto
+ 1702 2252 lineto
+ 1702 2211 lineto
+closepath 14 1702 2211 1799 2252 Dp
+1411 2252 MXY
+ 1411 2252 lineto
+ 1508 2252 lineto
+ 1508 2294 lineto
+ 1411 2294 lineto
+ 1411 2252 lineto
+closepath 14 1411 2252 1508 2294 Dp
+2283 MX
+ 2283 2252 lineto
+ 2379 2252 lineto
+ 2379 2612 lineto
+ 2283 2612 lineto
+ 2283 2252 lineto
+closepath 3 2283 2252 2379 2612 Dp
+1992 MX
+ 1992 2252 lineto
+ 2089 2252 lineto
+ 2089 2612 lineto
+ 1992 2612 lineto
+ 1992 2252 lineto
+closepath 3 1992 2252 2089 2612 Dp
+1702 MX
+ 1702 2252 lineto
+ 1799 2252 lineto
+ 1799 2612 lineto
+ 1702 2612 lineto
+ 1702 2252 lineto
+closepath 3 1702 2252 1799 2612 Dp
+1411 2294 MXY
+ 1411 2294 lineto
+ 1508 2294 lineto
+ 1508 2612 lineto
+ 1411 2612 lineto
+ 1411 2294 lineto
+closepath 3 1411 2294 1508 2612 Dp
+-1 Ds
+2158 2238 MXY
+ 2158 2238 lineto
+ 2255 2238 lineto
+ 2255 2252 lineto
+ 2158 2252 lineto
+ 2158 2238 lineto
+closepath 21 2158 2238 2255 2252 Dp
+1868 MX
+ 1868 2238 lineto
+ 1965 2238 lineto
+ 1965 2280 lineto
+ 1868 2280 lineto
+ 1868 2238 lineto
+closepath 21 1868 2238 1965 2280 Dp
+1577 MX
+ 1577 2238 lineto
+ 1674 2238 lineto
+ 1674 2308 lineto
+ 1577 2308 lineto
+ 1577 2238 lineto
+closepath 21 1577 2238 1674 2308 Dp
+1287 2308 MXY
+ 1287 2308 lineto
+ 1287 2280 lineto
+ 1384 2280 lineto
+ 1384 2308 lineto
+ 1287 2308 lineto
+closepath 21 1287 2280 1384 2308 Dp
+2158 2280 MXY
+ 2158 2280 lineto
+ 2158 2252 lineto
+ 2255 2252 lineto
+ 2255 2280 lineto
+ 2158 2280 lineto
+closepath 14 2158 2252 2255 2280 Dp
+1868 2308 MXY
+ 1868 2308 lineto
+ 1868 2280 lineto
+ 1965 2280 lineto
+ 1965 2308 lineto
+ 1868 2308 lineto
+closepath 14 1868 2280 1965 2308 Dp
+1577 2335 MXY
+ 1577 2335 lineto
+ 1577 2308 lineto
+ 1674 2308 lineto
+ 1674 2335 lineto
+ 1577 2335 lineto
+closepath 14 1577 2308 1674 2335 Dp
+1287 2363 MXY
+ 1287 2363 lineto
+ 1287 2308 lineto
+ 1384 2308 lineto
+ 1384 2363 lineto
+ 1287 2363 lineto
+closepath 14 1287 2308 1384 2363 Dp
+2158 2280 MXY
+ 2158 2280 lineto
+ 2255 2280 lineto
+ 2255 2612 lineto
+ 2158 2612 lineto
+ 2158 2280 lineto
+closepath 3 2158 2280 2255 2612 Dp
+1868 2308 MXY
+ 1868 2308 lineto
+ 1965 2308 lineto
+ 1965 2612 lineto
+ 1868 2612 lineto
+ 1868 2308 lineto
+closepath 3 1868 2308 1965 2612 Dp
+1577 2335 MXY
+ 1577 2335 lineto
+ 1674 2335 lineto
+ 1674 2612 lineto
+ 1577 2612 lineto
+ 1577 2335 lineto
+closepath 3 1577 2335 1674 2612 Dp
+1287 2363 MXY
+ 1287 2363 lineto
+ 1384 2363 lineto
+ 1384 2612 lineto
+ 1287 2612 lineto
+ 1287 2363 lineto
+closepath 3 1287 2363 1384 2612 Dp
+4 Ds
+1121 2066 MXY
+ 1121 2066 lineto
+ 1218 2066 lineto
+ 1224 2080 lineto
+ 1127 2080 lineto
+ 1121 2066 lineto
+closepath 21 1121 2066 1224 2080 Dp
+2080 MY
+ 1121 2080 lineto
+ 1218 2080 lineto
+ 1218 2273 lineto
+ 1121 2273 lineto
+ 1121 2080 lineto
+closepath 14 1121 2080 1218 2273 Dp
+2273 MY
+ 1121 2273 lineto
+ 1218 2273 lineto
+ 1218 2612 lineto
+ 1121 2612 lineto
+ 1121 2273 lineto
+closepath 3 1121 2273 1218 2612 Dp
+-1 Ds
+997 1589 MXY
+ 997 1589 lineto
+ 1093 1589 lineto
+ 1093 1644 lineto
+ 997 1644 lineto
+ 997 1589 lineto
+closepath 21 997 1589 1093 1644 Dp
+1644 MY
+ 997 1644 lineto
+ 1093 1644 lineto
+ 1093 2280 lineto
+ 997 2280 lineto
+ 997 1644 lineto
+closepath 14 997 1644 1093 2280 Dp
+2280 MY
+ 997 2280 lineto
+ 1093 2280 lineto
+ 1093 2612 lineto
+ 997 2612 lineto
+ 997 2280 lineto
+closepath 3 997 2280 1093 2612 Dp
+10 s
+719 2093(s)N
+712 2037(d)N
+712 1982(n)N
+714 1927(o)N
+716 1872(c)N
+716 1816(e)N
+712 1761(S)N
+804 2286(10)N
+804 1899(20)N
+804 1540(30)N
+3 Dt
+900 1506 MXY
+0 1106 Dl
+1548 0 Dl
+7 s
+1978 2828(Elapsed)N
+1701 2925(Dynamically)N
+2018(grown)X
+2184(table)X
+2317(\(right\))X
+3 Dt
+-1 Ds
+8 s
+720 3180(Figure)N
+934(6:)X
+1 f
+1020(The)X
+1152(total)X
+1299(regions)X
+1520(indicate)X
+1755(the)X
+1865(difference)X
+2154(between)X
+2398(the)X
+720 3268(elapsed)N
+931(time)X
+1065(and)X
+1177(the)X
+1275(sum)X
+1402(of)X
+1475(the)X
+1573(system)X
+1771(and)X
+1883(user)X
+2008(time.)X
+2173(The)X
+2291(left)X
+2395(bar)X
+720 3356(of)N
+798(each)X
+939(set)X
+1035(depicts)X
+1241(the)X
+1344(timing)X
+1537(of)X
+1615(the)X
+1718(test)X
+1831(run)X
+1940(when)X
+2102(the)X
+2204(number)X
+2423(of)X
+720 3444(entries)N
+910(is)X
+973(known)X
+1167(in)X
+1237(advance.)X
+1496(The)X
+1614(right)X
+1754(bars)X
+1879(depict)X
+2054(the)X
+2151(timing)X
+2338(when)X
+720 3532(the)N
+814(\256le)X
+912(is)X
+971(grown)X
+1150(from)X
+1290(a)X
+1334(single)X
+1503(bucket.)X
+10 s
+10 f
+720 3708 -0.0930(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)AN
+1 f
+892 3910(Since)N
+1131(this)X
+1307(hashing)X
+1617(package)X
+1942(provides)X
+2279(buffer)X
+720 3998(management,)N
+1188(the)X
+1323(amount)X
+1600(of)X
+1704(space)X
+1920(allocated)X
+2247(for)X
+2378(the)X
+720 4086(buffer)N
+948(pool)X
+1121(may)X
+1290(be)X
+1397(speci\256ed)X
+1713(by)X
+1824(the)X
+1953(user.)X
+2157(Using)X
+2378(the)X
+720 4174(same)N
+910(data)X
+1069(set)X
+1183(and)X
+1324(test)X
+1459(procedure)X
+1805(as)X
+1896(used)X
+2067(to)X
+2153(derive)X
+2378(the)X
+720 4262(graphs)N
+962(in)X
+1052(Figures)X
+1320(5a-c,)X
+1507(Figure)X
+1744(7)X
+1812(shows)X
+2039(the)X
+2164(impact)X
+2409(of)X
+720 4350(varying)N
+997(the)X
+1126(size)X
+1282(of)X
+1380(the)X
+1509(buffer)X
+1737(pool.)X
+1950(The)X
+2106(bucket)X
+2351(size)X
+720 4438(was)N
+873(set)X
+989(to)X
+1078(256)X
+1225(bytes)X
+1421(and)X
+1564(the)X
+1689(\256ll)X
+1804(factor)X
+2019(was)X
+2171(set)X
+2287(to)X
+2376(16.)X
+720 4526(The)N
+869(buffer)X
+1090(pool)X
+1256(size)X
+1404(was)X
+1552(varied)X
+1776(from)X
+1955(0)X
+2018(\(the)X
+2166(minimum)X
+720 4614(number)N
+986(of)X
+1074(pages)X
+1277(required)X
+1565(to)X
+1647(be)X
+1743(buffered\))X
+2063(to)X
+2145(1M.)X
+2316(With)X
+720 4702(1M)N
+854(of)X
+944(buffer)X
+1164(space,)X
+1386(the)X
+1507(package)X
+1794(performed)X
+2151(no)X
+2253(I/O)X
+2382(for)X
+720 4790(this)N
+871(data)X
+1040(set.)X
+1204(As)X
+1328(Figure)X
+1572(7)X
+1647(illustrates,)X
+2013(increasing)X
+2378(the)X
+720 4878(buffer)N
+944(pool)X
+1113(size)X
+1265(can)X
+1404(have)X
+1583(a)X
+1646(dramatic)X
+1954(affect)X
+2165(on)X
+2271(result-)X
+720 4966(ing)N
+842(performance.)X
+2 f
+8 s
+1269 4941(7)N
+1 f
+16 s
+720 5353 MXY
+864 0 Dl
+2 f
+8 s
+760 5408(7)N
+1 f
+9 s
+826 5433(Some)N
+1024(allocators)X
+1338(are)X
+1460(extremely)X
+1782(inef\256cient)X
+2107(at)X
+2192(allocating)X
+720 5513(memory.)N
+1029(If)X
+1110(you)X
+1251(\256nd)X
+1396(that)X
+1536(applications)X
+1916(are)X
+2036(running)X
+2292(out)X
+2416(of)X
+720 5593(memory)N
+1005(before)X
+1234(you)X
+1386(think)X
+1578(they)X
+1746(should,)X
+2000(try)X
+2124(varying)X
+2388(the)X
+720 5673(pagesize)N
+986(to)X
+1060(get)X
+1166(better)X
+1348(utilization)X
+1658(from)X
+1816(the)X
+1922(memory)X
+2180(allocator.)X
+10 s
+2830 1975 MXY
+0 -28 Dl
+28 0 Dl
+0 28 Dl
+-28 0 Dl
+2853 2004 MXY
+0 -27 Dl
+28 0 Dl
+0 27 Dl
+-28 0 Dl
+2876 2016 MXY
+0 -27 Dl
+27 0 Dl
+0 27 Dl
+-27 0 Dl
+2922 1998 MXY
+0 -27 Dl
+27 0 Dl
+0 27 Dl
+-27 0 Dl
+2967 2025 MXY
+0 -28 Dl
+28 0 Dl
+0 28 Dl
+-28 0 Dl
+3013 2031 MXY
+0 -28 Dl
+28 0 Dl
+0 28 Dl
+-28 0 Dl
+3059 MX
+0 -28 Dl
+27 0 Dl
+0 28 Dl
+-27 0 Dl
+3196 2052 MXY
+0 -28 Dl
+27 0 Dl
+0 28 Dl
+-27 0 Dl
+3561 2102 MXY
+0 -28 Dl
+28 0 Dl
+0 28 Dl
+-28 0 Dl
+4292 2105 MXY
+0 -28 Dl
+27 0 Dl
+0 28 Dl
+-27 0 Dl
+4 Ds
+1 Dt
+2844 1961 MXY
+23 30 Dl
+23 12 Dl
+45 -18 Dl
+46 26 Dl
+46 6 Dl
+45 0 Dl
+137 21 Dl
+366 50 Dl
+730 3 Dl
+9 s
+4227 2158(User)N
+-1 Ds
+3 Dt
+2830 1211 MXY
+27 Dc
+2853 1261 MXY
+27 Dc
+2876 1267 MXY
+27 Dc
+2921 1341 MXY
+27 Dc
+2967 1385 MXY
+27 Dc
+3013 1450 MXY
+27 Dc
+3059 1497 MXY
+27 Dc
+3196 1686 MXY
+27 Dc
+3561 2109 MXY
+27 Dc
+4292 2295 MXY
+27 Dc
+20 Ds
+1 Dt
+2844 1211 MXY
+23 50 Dl
+23 6 Dl
+45 74 Dl
+46 44 Dl
+46 65 Dl
+45 47 Dl
+137 189 Dl
+366 423 Dl
+730 186 Dl
+4181 2270(System)N
+-1 Ds
+3 Dt
+2844 583 MXY
+0 28 Dl
+0 -14 Dl
+14 0 Dl
+-28 0 Dl
+2867 672 MXY
+0 27 Dl
+0 -14 Dl
+14 0 Dl
+-28 0 Dl
+2890 701 MXY
+0 28 Dl
+0 -14 Dl
+13 0 Dl
+-27 0 Dl
+2935 819 MXY
+0 28 Dl
+0 -14 Dl
+14 0 Dl
+-27 0 Dl
+2981 849 MXY
+0 28 Dl
+0 -14 Dl
+14 0 Dl
+-28 0 Dl
+3027 908 MXY
+0 27 Dl
+0 -13 Dl
+14 0 Dl
+-28 0 Dl
+3072 1026 MXY
+0 27 Dl
+0 -13 Dl
+14 0 Dl
+-27 0 Dl
+3209 1292 MXY
+0 27 Dl
+0 -14 Dl
+14 0 Dl
+-27 0 Dl
+3575 1823 MXY
+0 28 Dl
+0 -14 Dl
+14 0 Dl
+-28 0 Dl
+4305 2059 MXY
+0 28 Dl
+0 -14 Dl
+14 0 Dl
+-27 0 Dl
+5 Dt
+2844 597 MXY
+23 88 Dl
+23 30 Dl
+45 118 Dl
+46 30 Dl
+46 59 Dl
+45 118 Dl
+137 265 Dl
+366 532 Dl
+730 236 Dl
+4328 2103(Total)N
+2844 2310 MXY
+1461 0 Dl
+2844 MX
+0 -1772 Dl
+2310 MY
+0 18 Dl
+4 Ds
+1 Dt
+2310 MY
+0 -1772 Dl
+2826 2416(0)N
+-1 Ds
+5 Dt
+3209 2310 MXY
+0 18 Dl
+4 Ds
+1 Dt
+2310 MY
+0 -1772 Dl
+3155 2416(256)N
+-1 Ds
+5 Dt
+3575 2310 MXY
+0 18 Dl
+4 Ds
+1 Dt
+2310 MY
+0 -1772 Dl
+3521 2416(512)N
+-1 Ds
+5 Dt
+3940 2310 MXY
+0 18 Dl
+4 Ds
+1 Dt
+2310 MY
+0 -1772 Dl
+3886 2416(768)N
+-1 Ds
+5 Dt
+4305 2310 MXY
+0 18 Dl
+4 Ds
+1 Dt
+2310 MY
+0 -1772 Dl
+4233 2416(1024)N
+-1 Ds
+5 Dt
+2844 2310 MXY
+-18 0 Dl
+4 Ds
+1 Dt
+2844 MX
+1461 0 Dl
+2771 2340(0)N
+-1 Ds
+5 Dt
+2844 2014 MXY
+-18 0 Dl
+2844 1719 MXY
+-18 0 Dl
+4 Ds
+1 Dt
+2844 MX
+1461 0 Dl
+2735 1749(20)N
+-1 Ds
+5 Dt
+2844 1423 MXY
+-18 0 Dl
+2844 1128 MXY
+-18 0 Dl
+4 Ds
+1 Dt
+2844 MX
+1461 0 Dl
+2735 1158(40)N
+-1 Ds
+5 Dt
+2844 833 MXY
+-18 0 Dl
+2844 538 MXY
+-18 0 Dl
+4 Ds
+1 Dt
+2844 MX
+1461 0 Dl
+2735 568(60)N
+3239 2529(Buffer)N
+3445(Pool)X
+3595(Size)X
+3737(\(in)X
+3835(K\))X
+2695 1259(S)N
+2699 1324(e)N
+2699 1388(c)N
+2697 1452(o)N
+2697 1517(n)N
+2697 1581(d)N
+2701 1645(s)N
+3 Dt
+-1 Ds
+3 f
+8 s
+2706 2773(Figure)N
+2908(7:)X
+1 f
+2982(User)X
+3123(time)X
+3258(is)X
+3322(virtually)X
+3560(insensitive)X
+3854(to)X
+3924(the)X
+4022(amount)X
+4234(of)X
+4307(buffer)X
+2706 2861(pool)N
+2852(available,)X
+3130(however,)X
+3396(both)X
+3541(system)X
+3750(time)X
+3895(and)X
+4018(elapsed)X
+4240(time)X
+4385(are)X
+2706 2949(inversely)N
+2960(proportional)X
+3296(to)X
+3366(the)X
+3464(size)X
+3583(of)X
+3656(the)X
+3753(buffer)X
+3927(pool.)X
+4092(Even)X
+4242(for)X
+4335(large)X
+2706 3037(data)N
+2831(sets)X
+2946(where)X
+3120(one)X
+3230(expects)X
+3439(few)X
+3552(collisions,)X
+3832(specifying)X
+4116(a)X
+4162(large)X
+4307(buffer)X
+2706 3125(pool)N
+2836(dramatically)X
+3171(improves)X
+3425(performance.)X
+10 s
+10 f
+2706 3301 -0.0930(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)AN
+3 f
+3175 3543(Enhanced)N
+3536(Functionality)X
+1 f
+2878 3675(This)N
+3046(hashing)X
+3320(package)X
+3609(provides)X
+3910(a)X
+3971(set)X
+4085(of)X
+4177(compati-)X
+2706 3763(bility)N
+2895(routines)X
+3174(to)X
+3257(implement)X
+3620(the)X
+2 f
+3739(ndbm)X
+1 f
+3937(interface.)X
+4279(How-)X
+2706 3851(ever,)N
+2893(when)X
+3095(the)X
+3220(native)X
+3443(interface)X
+3752(is)X
+3832(used,)X
+4026(the)X
+4151(following)X
+2706 3939(additional)N
+3046(functionality)X
+3475(is)X
+3548(provided:)X
+10 f
+2798 4071(g)N
+1 f
+2946(Inserts)X
+3197(never)X
+3413(fail)X
+3556(because)X
+3847(too)X
+3985(many)X
+4199(keys)X
+2946 4159(hash)N
+3113(to)X
+3195(the)X
+3313(same)X
+3498(value.)X
+10 f
+2798 4247(g)N
+1 f
+2946(Inserts)X
+3187(never)X
+3393(fail)X
+3527(because)X
+3808(key)X
+3950(and/or)X
+4181(asso-)X
+2946 4335(ciated)N
+3158(data)X
+3312(is)X
+3385(too)X
+3507(large)X
+10 f
+2798 4423(g)N
+1 f
+2946(Hash)X
+3131(functions)X
+3449(may)X
+3607(be)X
+3703(user-speci\256ed.)X
+10 f
+2798 4511(g)N
+1 f
+2946(Multiple)X
+3268(pages)X
+3498(may)X
+3683(be)X
+3806(cached)X
+4077(in)X
+4186(main)X
+2946 4599(memory.)N
+2706 4731(It)N
+2801(also)X
+2976(provides)X
+3298(a)X
+3380(set)X
+3514(of)X
+3626(compatibility)X
+4097(routines)X
+4400(to)X
+2706 4819(implement)N
+3087(the)X
+2 f
+3224(hsearch)X
+1 f
+3516(interface.)X
+3876(Again,)X
+4130(the)X
+4266(native)X
+2706 4907(interface)N
+3008(offers)X
+3216(enhanced)X
+3540(functionality:)X
+10 f
+2798 5039(g)N
+1 f
+2946(Files)X
+3121(may)X
+3279(grow)X
+3464(beyond)X
+2 f
+3720(nelem)X
+1 f
+3932(elements.)X
+10 f
+2798 5127(g)N
+1 f
+2946(Multiple)X
+3247(hash)X
+3420(tables)X
+3632(may)X
+3795(be)X
+3896(accessed)X
+4203(con-)X
+2946 5215(currently.)N
+10 f
+2798 5303(g)N
+1 f
+2946(Hash)X
+3134(tables)X
+3344(may)X
+3505(be)X
+3604(stored)X
+3823(and)X
+3962(accessed)X
+4266(on)X
+2946 5391(disk.)N
+10 f
+2798 5479(g)N
+1 f
+2946(Hash)X
+3155(functions)X
+3497(may)X
+3679(be)X
+3799(user-speci\256ed)X
+4288(at)X
+2946 5567(runtime.)N
+3 f
+720 5960(USENIX)N
+9 f
+1042(-)X
+3 f
+1106(Winter)X
+1371('91)X
+9 f
+1498(-)X
+3 f
+1562(Dallas,)X
+1815(TX)X
+4424(9)X
+
+10 p
+%%Page: 10 10
+0(Courier)xf 0 f
+10 s 10 xH 0 xS 0 f
+3 f
+432 258(A)N
+510(New)X
+682(Hashing)X
+985(Package)X
+1290(for)X
+1413(UNIX)X
+3663(Seltzer)X
+3920(&)X
+4007(Yigit)X
+459 538(Relative)N
+760(Performance)X
+1227(of)X
+1314(the)X
+1441(New)X
+1613(Implementation)X
+1 f
+604 670(The)N
+761(performance)X
+1200(testing)X
+1445(of)X
+1544(the)X
+1674(new)X
+1840(package)X
+2135(is)X
+432 758(divided)N
+711(into)X
+874(two)X
+1033(test)X
+1183(suites.)X
+1424(The)X
+1588(\256rst)X
+1751(suite)X
+1941(of)X
+2046(tests)X
+432 846(requires)N
+727(that)X
+882(the)X
+1015(tables)X
+1237(be)X
+1348(read)X
+1522(from)X
+1713(and)X
+1864(written)X
+2126(to)X
+432 934(disk.)N
+640(In)X
+742(these)X
+942(tests,)X
+1139(the)X
+1272(basis)X
+1467(for)X
+1595(comparison)X
+2003(is)X
+2090(the)X
+432 1022(4.3BSD-Reno)N
+908(version)X
+1169(of)X
+2 f
+1260(ndbm)X
+1 f
+1438(.)X
+1502(Based)X
+1722(on)X
+1826(the)X
+1948(designs)X
+432 1110(of)N
+2 f
+521(sdbm)X
+1 f
+712(and)X
+2 f
+850(gdbm)X
+1 f
+1028(,)X
+1070(they)X
+1230(are)X
+1351(expected)X
+1659(to)X
+1743(perform)X
+2024(simi-)X
+432 1198(larly)N
+605(to)X
+2 f
+693(ndbm)X
+1 f
+871(,)X
+917(and)X
+1059(we)X
+1179(do)X
+1285(not)X
+1413(show)X
+1608(their)X
+1781(performance)X
+432 1286(numbers.)N
+800(The)X
+977(second)X
+1252(suite)X
+1454(contains)X
+1772(the)X
+1921(memory)X
+432 1374(resident)N
+712(test)X
+849(which)X
+1071(does)X
+1243(not)X
+1370(require)X
+1623(that)X
+1768(the)X
+1891(\256les)X
+2049(ever)X
+432 1462(be)N
+533(written)X
+784(to)X
+870(disk,)X
+1047(only)X
+1213(that)X
+1357(hash)X
+1528(tables)X
+1739(may)X
+1901(be)X
+2001(mani-)X
+432 1550(pulated)N
+692(in)X
+778(main)X
+961(memory.)X
+1291(In)X
+1381(this)X
+1519(test,)X
+1673(we)X
+1790(compare)X
+2090(the)X
+432 1638(performance)N
+859(to)X
+941(that)X
+1081(of)X
+1168(the)X
+2 f
+1286(hsearch)X
+1 f
+1560(routines.)X
+604 1752(For)N
+760(both)X
+947(suites,)X
+1194(two)X
+1358(different)X
+1679(databases)X
+2031(were)X
+432 1840(used.)N
+656(The)X
+818(\256rst)X
+979(is)X
+1069(the)X
+1204(dictionary)X
+1566(database)X
+1880(described)X
+432 1928(previously.)N
+836(The)X
+987(second)X
+1236(was)X
+1386(constructed)X
+1781(from)X
+1962(a)X
+2023(pass-)X
+432 2016(word)N
+647(\256le)X
+799(with)X
+990(approximately)X
+1502(300)X
+1671(accounts.)X
+2041(Two)X
+432 2104(records)N
+700(were)X
+887(constructed)X
+1287(for)X
+1411(each)X
+1589(account.)X
+1909(The)X
+2064(\256rst)X
+432 2192(used)N
+604(the)X
+727(logname)X
+1028(as)X
+1120(the)X
+1243(key)X
+1384(and)X
+1525(the)X
+1648(remainder)X
+1999(of)X
+2090(the)X
+432 2280(password)N
+768(entry)X
+965(for)X
+1091(the)X
+1221(data.)X
+1427(The)X
+1584(second)X
+1839(was)X
+1996(keyed)X
+432 2368(by)N
+541(uid)X
+672(and)X
+817(contained)X
+1157(the)X
+1283(entire)X
+1494(password)X
+1825(entry)X
+2018(as)X
+2113(its)X
+432 2456(data)N
+589(\256eld.)X
+794(The)X
+942(tests)X
+1107(were)X
+1287(all)X
+1389(run)X
+1518(on)X
+1620(the)X
+1740(HP)X
+1864(9000)X
+2046(with)X
+432 2544(the)N
+574(same)X
+783(con\256guration)X
+1254(previously)X
+1636(described.)X
+2027(Each)X
+432 2632(test)N
+576(was)X
+734(run)X
+874(\256ve)X
+1027(times)X
+1232(and)X
+1380(the)X
+1510(timing)X
+1750(results)X
+1991(of)X
+2090(the)X
+432 2720(runs)N
+602(were)X
+791(averaged.)X
+1154(The)X
+1311(variance)X
+1616(across)X
+1849(the)X
+1979(5)X
+2050(runs)X
+432 2808(was)N
+591(approximately)X
+1088(1%)X
+1229(of)X
+1330(the)X
+1462(average)X
+1746(yielding)X
+2041(95%)X
+432 2896(con\256dence)N
+800(intervals)X
+1096(of)X
+1183(approximately)X
+1666(2%.)X
+3 f
+1021 3050(Disk)N
+1196(Based)X
+1420(Tests)X
+1 f
+604 3182(In)N
+693(these)X
+880(tests,)X
+1064(we)X
+1180(use)X
+1308(a)X
+1365(bucket)X
+1600(size)X
+1746(of)X
+1834(1024)X
+2015(and)X
+2152(a)X
+432 3270(\256ll)N
+540(factor)X
+748(of)X
+835(32.)X
+3 f
+432 3384(create)N
+663(test)X
+1 f
+547 3498(The)N
+703(keys)X
+881(are)X
+1011(entered)X
+1279(into)X
+1433(the)X
+1561(hash)X
+1738(table,)X
+1944(and)X
+2090(the)X
+547 3586(\256le)N
+669(is)X
+742(\257ushed)X
+993(to)X
+1075(disk.)X
+3 f
+432 3700(read)N
+608(test)X
+1 f
+547 3814(A)N
+640(lookup)X
+897(is)X
+984(performed)X
+1353(for)X
+1481(each)X
+1663(key)X
+1813(in)X
+1909(the)X
+2041(hash)X
+547 3902(table.)N
+3 f
+432 4016(verify)N
+653(test)X
+1 f
+547 4130(A)N
+640(lookup)X
+897(is)X
+984(performed)X
+1353(for)X
+1481(each)X
+1663(key)X
+1813(in)X
+1909(the)X
+2041(hash)X
+547 4218(table,)N
+759(and)X
+911(the)X
+1045(data)X
+1215(returned)X
+1519(is)X
+1608(compared)X
+1961(against)X
+547 4306(that)N
+687(originally)X
+1018(stored)X
+1234(in)X
+1316(the)X
+1434(hash)X
+1601(table.)X
+3 f
+432 4420(sequential)N
+798(retrieve)X
+1 f
+547 4534(All)N
+674(keys)X
+846(are)X
+970(retrieved)X
+1281(in)X
+1367(sequential)X
+1716(order)X
+1910(from)X
+2090(the)X
+547 4622(hash)N
+724(table.)X
+950(The)X
+2 f
+1105(ndbm)X
+1 f
+1313(interface)X
+1625(allows)X
+1863(sequential)X
+547 4710(retrieval)N
+848(of)X
+948(the)X
+1079(keys)X
+1259(from)X
+1448(the)X
+1578(database,)X
+1907(but)X
+2041(does)X
+547 4798(not)N
+701(return)X
+945(the)X
+1094(data)X
+1279(associated)X
+1660(with)X
+1853(each)X
+2052(key.)X
+547 4886(Therefore,)N
+929(we)X
+1067(compare)X
+1388(the)X
+1530(performance)X
+1980(of)X
+2090(the)X
+547 4974(new)N
+703(package)X
+989(to)X
+1073(two)X
+1215(different)X
+1514(runs)X
+1674(of)X
+2 f
+1763(ndbm)X
+1 f
+1941(.)X
+2002(In)X
+2090(the)X
+547 5062(\256rst)N
+697(case,)X
+2 f
+882(ndbm)X
+1 f
+1086(returns)X
+1335(only)X
+1503(the)X
+1627(keys)X
+1800(while)X
+2003(in)X
+2090(the)X
+547 5150(second,)N
+2 f
+823(ndbm)X
+1 f
+1034(returns)X
+1290(both)X
+1465(the)X
+1596(keys)X
+1776(and)X
+1924(the)X
+2054(data)X
+547 5238(\(requiring)N
+894(a)X
+956(second)X
+1204(call)X
+1345(to)X
+1432(the)X
+1555(library\).)X
+1861(There)X
+2074(is)X
+2152(a)X
+547 5326(single)N
+764(run)X
+897(for)X
+1017(the)X
+1141(new)X
+1300(library)X
+1539(since)X
+1729(it)X
+1798(returns)X
+2046(both)X
+547 5414(the)N
+665(key)X
+801(and)X
+937(the)X
+1055(data.)X
+3 f
+3014 538(In-Memory)N
+3431(Test)X
+1 f
+2590 670(This)N
+2757(test)X
+2892(uses)X
+3054(a)X
+3114(bucket)X
+3352(size)X
+3501(of)X
+3592(256)X
+3736(and)X
+3876(a)X
+3936(\256ll)X
+4048(fac-)X
+2418 758(tor)N
+2527(of)X
+2614(8.)X
+3 f
+2418 872(create/read)N
+2827(test)X
+1 f
+2533 986(In)N
+2627(this)X
+2769(test,)X
+2927(a)X
+2989(hash)X
+3162(table)X
+3344(is)X
+3423(created)X
+3682(by)X
+3788(inserting)X
+4094(all)X
+2533 1074(the)N
+2660(key/data)X
+2961(pairs.)X
+3186(Then)X
+3380(a)X
+3445(keyed)X
+3666(retrieval)X
+3963(is)X
+4044(per-)X
+2533 1162(formed)N
+2801(for)X
+2931(each)X
+3115(pair,)X
+3295(and)X
+3446(the)X
+3579(hash)X
+3761(table)X
+3952(is)X
+4040(des-)X
+2533 1250(troyed.)N
+3 f
+2938 1404(Performance)N
+3405(Results)X
+1 f
+2590 1536(Figures)N
+2866(8a)X
+2978(and)X
+3130(8b)X
+3246(show)X
+3451(the)X
+3585(user)X
+3755(time,)X
+3952(system)X
+2418 1624(time,)N
+2608(and)X
+2752(elapsed)X
+3021(time)X
+3191(for)X
+3312(each)X
+3487(test)X
+3625(for)X
+3746(both)X
+3915(the)X
+4040(new)X
+2418 1712(implementation)N
+2951(and)X
+3098(the)X
+3227(old)X
+3360(implementation)X
+3893(\()X
+2 f
+3920(hsearch)X
+1 f
+2418 1800(or)N
+2 f
+2528(ndbm)X
+1 f
+2706(,)X
+2769(whichever)X
+3147(is)X
+3243(appropriate\))X
+3678(as)X
+3787(well)X
+3967(as)X
+4076(the)X
+2418 1888(improvement.)N
+2929(The)X
+3098(improvement)X
+3569(is)X
+3666(expressed)X
+4027(as)X
+4138(a)X
+2418 1976(percentage)N
+2787(of)X
+2874(the)X
+2992(old)X
+3114(running)X
+3383(time:)X
+0 f
+8 s
+2418 2275(%)N
+2494(=)X
+2570(100)X
+2722(*)X
+2798 -0.4219(\(old_time)AX
+3178(-)X
+3254 -0.4219(new_time\))AX
+3634(/)X
+3710(old_time)X
+1 f
+10 s
+2590 2600(In)N
+2700(nearly)X
+2944(all)X
+3067(cases,)X
+3299(the)X
+3439(new)X
+3615(routines)X
+3915(perform)X
+2418 2688(better)N
+2628(than)X
+2793(the)X
+2918(old)X
+3047(routines)X
+3332(\(both)X
+2 f
+3527(hsearch)X
+1 f
+3807(and)X
+2 f
+3949(ndbm)X
+1 f
+4127(\).)X
+2418 2776(Although)N
+2755(the)X
+3 f
+2888(create)X
+1 f
+3134(tests)X
+3311(exhibit)X
+3567(superior)X
+3864(user)X
+4032(time)X
+2418 2864(performance,)N
+2869(the)X
+2991(test)X
+3126(time)X
+3292(is)X
+3369(dominated)X
+3731(by)X
+3834(the)X
+3955(cost)X
+4107(of)X
+2418 2952(writing)N
+2677(the)X
+2803(actual)X
+3023(\256le)X
+3153(to)X
+3243(disk.)X
+3444(For)X
+3583(the)X
+3709(large)X
+3897(database)X
+2418 3040(\(the)N
+2564(dictionary\),)X
+2957(this)X
+3093(completely)X
+3470(overwhelmed)X
+3927(the)X
+4045(sys-)X
+2418 3128(tem)N
+2570(time.)X
+2783(However,)X
+3129(for)X
+3254(the)X
+3383(small)X
+3587(data)X
+3752(base,)X
+3946(we)X
+4071(see)X
+2418 3216(that)N
+2569(differences)X
+2958(in)X
+3051(both)X
+3224(user)X
+3389(and)X
+3536(system)X
+3788(time)X
+3960(contri-)X
+2418 3304(bute)N
+2576(to)X
+2658(the)X
+2776(superior)X
+3059(performance)X
+3486(of)X
+3573(the)X
+3691(new)X
+3845(package.)X
+2590 3418(The)N
+3 f
+2764(read)X
+1 f
+2920(,)X
+3 f
+2989(verify)X
+1 f
+3190(,)X
+3259(and)X
+3 f
+3424(sequential)X
+1 f
+3818(results)X
+4075(are)X
+2418 3506(deceptive)N
+2758(for)X
+2883(the)X
+3012(small)X
+3216(database)X
+3524(since)X
+3720(the)X
+3849(entire)X
+4063(test)X
+2418 3594(ran)N
+2551(in)X
+2643(under)X
+2856(a)X
+2922(second.)X
+3215(However,)X
+3560(on)X
+3669(the)X
+3796(larger)X
+4013(data-)X
+2418 3682(base)N
+2590(the)X
+3 f
+2716(read)X
+1 f
+2900(and)X
+3 f
+3044(verify)X
+1 f
+3273(tests)X
+3443(bene\256t)X
+3689(from)X
+3873(the)X
+3999(cach-)X
+2418 3770(ing)N
+2546(of)X
+2639(buckets)X
+2910(in)X
+2998(the)X
+3122(new)X
+3282(package)X
+3571(to)X
+3658(improve)X
+3950(perfor-)X
+2418 3858(mance)N
+2666(by)X
+2784(over)X
+2965(80%.)X
+3169(Since)X
+3384(the)X
+3519(\256rst)X
+3 f
+3680(sequential)X
+1 f
+4063(test)X
+2418 3946(does)N
+2598(not)X
+2733(require)X
+2 f
+2994(ndbm)X
+1 f
+3205(to)X
+3299(return)X
+3523(the)X
+3653(data)X
+3819(values,)X
+4076(the)X
+2418 4034(user)N
+2573(time)X
+2735(is)X
+2808(lower)X
+3011(than)X
+3169(for)X
+3283(the)X
+3401(new)X
+3555(package.)X
+3879(However)X
+2418 4122(when)N
+2613(we)X
+2728(require)X
+2977(both)X
+3139(packages)X
+3454(to)X
+3536(return)X
+3748(data,)X
+3922(the)X
+4040(new)X
+2418 4210(package)N
+2702(excels)X
+2923(in)X
+3005(all)X
+3105(three)X
+3286(timings.)X
+2590 4324(The)N
+2773(small)X
+3003(database)X
+3337(runs)X
+3532(so)X
+3660(quickly)X
+3957(in)X
+4076(the)X
+2418 4412(memory-resident)N
+3000(case)X
+3173(that)X
+3326(the)X
+3457(results)X
+3699(are)X
+3831(uninterest-)X
+2418 4500(ing.)N
+2589(However,)X
+2933(for)X
+3056(the)X
+3183(larger)X
+3400(database)X
+3706(the)X
+3833(new)X
+3995(pack-)X
+2418 4588(age)N
+2567(pays)X
+2751(a)X
+2824(small)X
+3033(penalty)X
+3305(in)X
+3403(system)X
+3661(time)X
+3839(because)X
+4130(it)X
+2418 4676(limits)N
+2636(its)X
+2748(main)X
+2944(memory)X
+3247(utilization)X
+3607(and)X
+3759(swaps)X
+3991(pages)X
+2418 4764(out)N
+2550(to)X
+2642(temporary)X
+3002(storage)X
+3264(in)X
+3356(the)X
+3484(\256le)X
+3616(system)X
+3868(while)X
+4076(the)X
+2 f
+2418 4852(hsearch)N
+1 f
+2698(package)X
+2988(requires)X
+3273(that)X
+3419(the)X
+3543(application)X
+3924(allocate)X
+2418 4940(enough)N
+2692(space)X
+2909(for)X
+3041(all)X
+3159(key/data)X
+3468(pair.)X
+3670(However,)X
+4022(even)X
+2418 5028(with)N
+2600(the)X
+2738(system)X
+3000(time)X
+3182(penalty,)X
+3477(the)X
+3614(resulting)X
+3933(elapsed)X
+2418 5116(time)N
+2580(improves)X
+2898(by)X
+2998(over)X
+3161(50%.)X
+3 f
+432 5960(10)N
+2970(USENIX)X
+9 f
+3292(-)X
+3 f
+3356(Winter)X
+3621('91)X
+9 f
+3748(-)X
+3 f
+3812(Dallas,)X
+4065(TX)X
+
+11 p
+%%Page: 11 11
+0(Courier)xf 0 f
+10 s 10 xH 0 xS 0 f
+3 f
+720 258(Seltzer)N
+977(&)X
+1064(Yigit)X
+3278(A)X
+3356(New)X
+3528(Hashing)X
+3831(Package)X
+4136(for)X
+4259(UNIX)X
+1 f
+10 f
+908 454(i)N
+927(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+2 f
+1379 546(hash)N
+1652(ndbm)X
+1950(%change)X
+1 f
+10 f
+908 550(i)N
+927(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+948 642(CREATE)N
+10 f
+908 646(i)N
+927(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+1125 738(user)N
+1424(6.4)X
+1671(12.2)X
+2073(48)X
+1157 826(sys)N
+1384(32.5)X
+1671(34.7)X
+2113(6)X
+3 f
+1006 914(elapsed)N
+10 f
+1310 922(c)N
+890(c)Y
+810(c)Y
+730(c)Y
+3 f
+1384 914(90.4)N
+10 f
+1581 922(c)N
+890(c)Y
+810(c)Y
+730(c)Y
+3 f
+1671 914(99.6)N
+10 f
+1883 922(c)N
+890(c)Y
+810(c)Y
+730(c)Y
+3 f
+2113 914(9)N
+1 f
+10 f
+908 910(i)N
+927(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+908 926(i)N
+927(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+948 1010(READ)N
+10 f
+908 1014(i)N
+927(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+1125 1106(user)N
+1424(3.4)X
+1711(6.1)X
+2073(44)X
+1157 1194(sys)N
+1424(1.2)X
+1671(15.3)X
+2073(92)X
+3 f
+1006 1282(elapsed)N
+10 f
+1310 1290(c)N
+1258(c)Y
+1178(c)Y
+1098(c)Y
+3 f
+1424 1282(4.0)N
+10 f
+1581 1290(c)N
+1258(c)Y
+1178(c)Y
+1098(c)Y
+3 f
+1671 1282(21.2)N
+10 f
+1883 1290(c)N
+1258(c)Y
+1178(c)Y
+1098(c)Y
+3 f
+2073 1282(81)N
+1 f
+10 f
+908 1278(i)N
+927(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+908 1294(i)N
+927(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+948 1378(VERIFY)N
+10 f
+908 1382(i)N
+927(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+1125 1474(user)N
+1424(3.5)X
+1711(6.3)X
+2073(44)X
+1157 1562(sys)N
+1424(1.2)X
+1671(15.3)X
+2073(92)X
+3 f
+1006 1650(elapsed)N
+10 f
+1310 1658(c)N
+1626(c)Y
+1546(c)Y
+1466(c)Y
+3 f
+1424 1650(4.0)N
+10 f
+1581 1658(c)N
+1626(c)Y
+1546(c)Y
+1466(c)Y
+3 f
+1671 1650(21.2)N
+10 f
+1883 1658(c)N
+1626(c)Y
+1546(c)Y
+1466(c)Y
+3 f
+2073 1650(81)N
+1 f
+10 f
+908 1646(i)N
+927(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+908 1662(i)N
+927(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+948 1746(SEQUENTIAL)N
+10 f
+908 1750(i)N
+927(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+1125 1842(user)N
+1424(2.7)X
+1711(1.9)X
+2046(-42)X
+1157 1930(sys)N
+1424(0.7)X
+1711(3.9)X
+2073(82)X
+3 f
+1006 2018(elapsed)N
+10 f
+1310 2026(c)N
+1994(c)Y
+1914(c)Y
+1834(c)Y
+3 f
+1424 2018(3.0)N
+10 f
+1581 2026(c)N
+1994(c)Y
+1914(c)Y
+1834(c)Y
+3 f
+1711 2018(5.0)N
+10 f
+1883 2026(c)N
+1994(c)Y
+1914(c)Y
+1834(c)Y
+3 f
+2073 2018(40)N
+1 f
+10 f
+908 2014(i)N
+927(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+908 2030(i)N
+927(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+948 2114(SEQUENTIAL)N
+1467(\(with)X
+1656(data)X
+1810(retrieval\))X
+10 f
+908 2118(i)N
+927(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+1125 2210(user)N
+1424(2.7)X
+1711(8.2)X
+2073(67)X
+1157 2298(sys)N
+1424(0.7)X
+1711(4.3)X
+2073(84)X
+3 f
+1006 2386(elapsed)N
+1424(3.0)X
+1671(12.0)X
+2073(75)X
+1 f
+10 f
+908 2390(i)N
+927(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+899 2394(c)N
+2378(c)Y
+2298(c)Y
+2218(c)Y
+2138(c)Y
+2058(c)Y
+1978(c)Y
+1898(c)Y
+1818(c)Y
+1738(c)Y
+1658(c)Y
+1578(c)Y
+1498(c)Y
+1418(c)Y
+1338(c)Y
+1258(c)Y
+1178(c)Y
+1098(c)Y
+1018(c)Y
+938(c)Y
+858(c)Y
+778(c)Y
+698(c)Y
+618(c)Y
+538(c)Y
+1310 2394(c)N
+2362(c)Y
+2282(c)Y
+2202(c)Y
+1581 2394(c)N
+2362(c)Y
+2282(c)Y
+2202(c)Y
+1883 2394(c)N
+2362(c)Y
+2282(c)Y
+2202(c)Y
+2278 2394(c)N
+2378(c)Y
+2298(c)Y
+2218(c)Y
+2138(c)Y
+2058(c)Y
+1978(c)Y
+1898(c)Y
+1818(c)Y
+1738(c)Y
+1658(c)Y
+1578(c)Y
+1498(c)Y
+1418(c)Y
+1338(c)Y
+1258(c)Y
+1178(c)Y
+1098(c)Y
+1018(c)Y
+938(c)Y
+858(c)Y
+778(c)Y
+698(c)Y
+618(c)Y
+538(c)Y
+905 2574(i)N
+930(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+2 f
+1318 2666(hash)N
+1585(hsearch)X
+1953(%change)X
+1 f
+10 f
+905 2670(i)N
+930(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+945 2762(CREATE/READ)N
+10 f
+905 2766(i)N
+930(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+1064 2858(user)N
+1343(6.6)X
+1642(17.2)X
+2096(62)X
+1096 2946(sys)N
+1343(1.1)X
+1682(0.3)X
+2029(-266)X
+3 f
+945 3034(elapsed)N
+1343(7.8)X
+1642(17.0)X
+2096(54)X
+1 f
+10 f
+905 3038(i)N
+930(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+896 3050(c)N
+2978(c)Y
+2898(c)Y
+2818(c)Y
+2738(c)Y
+2658(c)Y
+1249 3034(c)N
+3010(c)Y
+2930(c)Y
+2850(c)Y
+1520 3034(c)N
+3010(c)Y
+2930(c)Y
+2850(c)Y
+1886 3034(c)N
+3010(c)Y
+2930(c)Y
+2850(c)Y
+2281 3050(c)N
+2978(c)Y
+2898(c)Y
+2818(c)Y
+2738(c)Y
+2658(c)Y
+3 f
+720 3174(Figure)N
+967(8a:)X
+1 f
+1094(Timing)X
+1349(results)X
+1578(for)X
+1692(the)X
+1810(dictionary)X
+2155(database.)X
+10 f
+720 3262 -0.0930(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)AN
+3 f
+1407 3504(Conclusion)N
+1 f
+892 3636(This)N
+1063(paper)X
+1271(has)X
+1407(presented)X
+1744(the)X
+1871(design,)X
+2129(implemen-)X
+720 3724(tation)N
+928(and)X
+1070(performance)X
+1503(of)X
+1596(a)X
+1658(new)X
+1818(hashing)X
+2093(package)X
+2382(for)X
+720 3812(UNIX.)N
+993(The)X
+1150(new)X
+1316(package)X
+1612(provides)X
+1919(a)X
+1986(superset)X
+2280(of)X
+2378(the)X
+720 3900(functionality)N
+1159(of)X
+1255(existing)X
+1537(hashing)X
+1815(packages)X
+2139(and)X
+2284(incor-)X
+720 3988(porates)N
+975(additional)X
+1318(features)X
+1596(such)X
+1766(as)X
+1855(large)X
+2038(key)X
+2176(handling,)X
+720 4076(user)N
+876(de\256ned)X
+1134(hash)X
+1302(functions,)X
+1641(multiple)X
+1928(hash)X
+2096(tables,)X
+2324(vari-)X
+720 4164(able)N
+894(sized)X
+1099(pages,)X
+1342(and)X
+1498(linear)X
+1721(hashing.)X
+2050(In)X
+2156(nearly)X
+2396(all)X
+720 4252(cases,)N
+954(the)X
+1096(new)X
+1274(package)X
+1582(provides)X
+1902(improved)X
+2252(perfor-)X
+720 4340(mance)N
+974(on)X
+1098(the)X
+1240(order)X
+1454(of)X
+1565(50-80%)X
+1863(for)X
+2001(the)X
+2142(workloads)X
+720 4428(shown.)N
+990(Applications)X
+1420(such)X
+1588(as)X
+1676(the)X
+1794(loader,)X
+2035(compiler,)X
+2360(and)X
+720 4516(mail,)N
+921(which)X
+1156(currently)X
+1485(implement)X
+1866(their)X
+2051(own)X
+2227(hashing)X
+720 4604(routines,)N
+1032(should)X
+1279(be)X
+1389(modi\256ed)X
+1706(to)X
+1801(use)X
+1941(the)X
+2072(generic)X
+2342(rou-)X
+720 4692(tines.)N
+892 4806(This)N
+1087(hashing)X
+1389(package)X
+1705(is)X
+1810(one)X
+1978(access)X
+2236(method)X
+720 4894(which)N
+953(is)X
+1043(part)X
+1205(of)X
+1309(a)X
+1382(generic)X
+1656(database)X
+1970(access)X
+2212(package)X
+720 4982(being)N
+955(developed)X
+1342(at)X
+1457(the)X
+1612(University)X
+2007(of)X
+2131(California,)X
+720 5070(Berkeley.)N
+1089(It)X
+1177(will)X
+1340(include)X
+1614(a)X
+1688(btree)X
+1887(access)X
+2131(method)X
+2409(as)X
+720 5158(well)N
+916(as)X
+1041(\256xed)X
+1259(and)X
+1433(variable)X
+1750(length)X
+2007(record)X
+2270(access)X
+720 5246(methods)N
+1024(in)X
+1119(addition)X
+1414(to)X
+1509(the)X
+1640(hashed)X
+1896(support)X
+2168(presented)X
+720 5334(here.)N
+948(All)X
+1099(of)X
+1215(the)X
+1361(access)X
+1615(methods)X
+1934(are)X
+2081(based)X
+2312(on)X
+2440(a)X
+720 5422(key/data)N
+1037(pair)X
+1207(interface)X
+1533(and)X
+1693(appear)X
+1952(identical)X
+2272(to)X
+2378(the)X
+720 5510(application)N
+1121(layer,)X
+1347(allowing)X
+1671(application)X
+2071(implementa-)X
+720 5598(tions)N
+906(to)X
+999(be)X
+1106(largely)X
+1360(independent)X
+1783(of)X
+1881(the)X
+2010(database)X
+2318(type.)X
+720 5686(The)N
+873(package)X
+1165(is)X
+1246(expected)X
+1560(to)X
+1650(be)X
+1754(an)X
+1858(integral)X
+2131(part)X
+2284(of)X
+2378(the)X
+2706 538(4.4BSD)N
+3006(system,)X
+3293(with)X
+3479(various)X
+3759(standard)X
+4075(applications)X
+2706 626(such)N
+2879(as)X
+2972(more\(1\),)X
+3277(sort\(1\))X
+3517(and)X
+3659(vi\(1\))X
+3841(based)X
+4050(on)X
+4156(it.)X
+4266(While)X
+2706 714(the)N
+2833(current)X
+3089(design)X
+3326(does)X
+3501(not)X
+3631(support)X
+3899(multi-user)X
+4256(access)X
+2706 802(or)N
+2804(transactions,)X
+3238(they)X
+3407(could)X
+3616(be)X
+3723(incorporated)X
+4159(relatively)X
+2706 890(easily.)N
+10 f
+2894 938(i)N
+2913(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+2 f
+3365 1030(hash)N
+3638(ndbm)X
+3936(%change)X
+1 f
+10 f
+2894 1034(i)N
+2913(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+2934 1126(CREATE)N
+10 f
+2894 1130(i)N
+2913(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+3111 1222(user)N
+3390(0.2)X
+3677(0.4)X
+4079(50)X
+3143 1310(sys)N
+3390(0.1)X
+3677(1.0)X
+4079(90)X
+3 f
+2992 1398(elapsed)N
+10 f
+3296 1406(c)N
+1374(c)Y
+1294(c)Y
+1214(c)Y
+3 f
+3390 1398(0)N
+10 f
+3567 1406(c)N
+1374(c)Y
+1294(c)Y
+1214(c)Y
+3 f
+3677 1398(3.2)N
+10 f
+3869 1406(c)N
+1374(c)Y
+1294(c)Y
+1214(c)Y
+3 f
+4039 1398(100)N
+1 f
+10 f
+2894 1394(i)N
+2913(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+2894 1410(i)N
+2913(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+2934 1494(READ)N
+10 f
+2894 1498(i)N
+2913(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+3111 1590(user)N
+3390(0.1)X
+3677(0.1)X
+4119(0)X
+3143 1678(sys)N
+3390(0.1)X
+3677(0.4)X
+4079(75)X
+3 f
+2992 1766(elapsed)N
+10 f
+3296 1774(c)N
+1742(c)Y
+1662(c)Y
+1582(c)Y
+3 f
+3390 1766(0.0)N
+10 f
+3567 1774(c)N
+1742(c)Y
+1662(c)Y
+1582(c)Y
+3 f
+3677 1766(0.0)N
+10 f
+3869 1774(c)N
+1742(c)Y
+1662(c)Y
+1582(c)Y
+3 f
+4119 1766(0)N
+1 f
+10 f
+2894 1762(i)N
+2913(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+2894 1778(i)N
+2913(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+2934 1862(VERIFY)N
+10 f
+2894 1866(i)N
+2913(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+3111 1958(user)N
+3390(0.1)X
+3677(0.2)X
+4079(50)X
+3143 2046(sys)N
+3390(0.1)X
+3677(0.3)X
+4079(67)X
+3 f
+2992 2134(elapsed)N
+10 f
+3296 2142(c)N
+2110(c)Y
+2030(c)Y
+1950(c)Y
+3 f
+3390 2134(0.0)N
+10 f
+3567 2142(c)N
+2110(c)Y
+2030(c)Y
+1950(c)Y
+3 f
+3677 2134(0.0)N
+10 f
+3869 2142(c)N
+2110(c)Y
+2030(c)Y
+1950(c)Y
+3 f
+4119 2134(0)N
+1 f
+10 f
+2894 2130(i)N
+2913(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+2894 2146(i)N
+2913(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+2934 2230(SEQUENTIAL)N
+10 f
+2894 2234(i)N
+2913(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+3111 2326(user)N
+3390(0.1)X
+3677(0.0)X
+4012(-100)X
+3143 2414(sys)N
+3390(0.1)X
+3677(0.1)X
+4119(0)X
+3 f
+2992 2502(elapsed)N
+10 f
+3296 2510(c)N
+2478(c)Y
+2398(c)Y
+2318(c)Y
+3 f
+3390 2502(0.0)N
+10 f
+3567 2510(c)N
+2478(c)Y
+2398(c)Y
+2318(c)Y
+3 f
+3677 2502(0.0)N
+10 f
+3869 2510(c)N
+2478(c)Y
+2398(c)Y
+2318(c)Y
+3 f
+4119 2502(0)N
+1 f
+10 f
+2894 2498(i)N
+2913(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+2894 2514(i)N
+2913(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+2934 2598(SEQUENTIAL)N
+3453(\(with)X
+3642(data)X
+3796(retrieval\))X
+10 f
+2894 2602(i)N
+2913(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+3111 2694(user)N
+3390(0.1)X
+3677(0.1)X
+4119(0)X
+3143 2782(sys)N
+3390(0.1)X
+3677(0.1)X
+4119(0)X
+3 f
+2992 2870(elapsed)N
+3390(0.0)X
+3677(0.0)X
+4119(0)X
+1 f
+10 f
+2894 2874(i)N
+2913(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+2885 2878(c)N
+2862(c)Y
+2782(c)Y
+2702(c)Y
+2622(c)Y
+2542(c)Y
+2462(c)Y
+2382(c)Y
+2302(c)Y
+2222(c)Y
+2142(c)Y
+2062(c)Y
+1982(c)Y
+1902(c)Y
+1822(c)Y
+1742(c)Y
+1662(c)Y
+1582(c)Y
+1502(c)Y
+1422(c)Y
+1342(c)Y
+1262(c)Y
+1182(c)Y
+1102(c)Y
+1022(c)Y
+3296 2878(c)N
+2846(c)Y
+2766(c)Y
+2686(c)Y
+3567 2878(c)N
+2846(c)Y
+2766(c)Y
+2686(c)Y
+3869 2878(c)N
+2846(c)Y
+2766(c)Y
+2686(c)Y
+4264 2878(c)N
+2862(c)Y
+2782(c)Y
+2702(c)Y
+2622(c)Y
+2542(c)Y
+2462(c)Y
+2382(c)Y
+2302(c)Y
+2222(c)Y
+2142(c)Y
+2062(c)Y
+1982(c)Y
+1902(c)Y
+1822(c)Y
+1742(c)Y
+1662(c)Y
+1582(c)Y
+1502(c)Y
+1422(c)Y
+1342(c)Y
+1262(c)Y
+1182(c)Y
+1102(c)Y
+1022(c)Y
+2891 3058(i)N
+2916(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+2 f
+3304 3150(hash)N
+3571(hsearch)X
+3939(%change)X
+1 f
+10 f
+2891 3154(i)N
+2916(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+2931 3246(CREATE/READ)N
+10 f
+2891 3250(i)N
+2916(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+3050 3342(user)N
+3329(0.3)X
+3648(0.4)X
+4048(25)X
+3082 3430(sys)N
+3329(0.0)X
+3648(0.0)X
+4088(0)X
+3 f
+2931 3518(elapsed)N
+3329(0.0)X
+3648(0.0)X
+4088(0)X
+1 f
+10 f
+2891 3522(i)N
+2916(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+2882 3534(c)N
+3462(c)Y
+3382(c)Y
+3302(c)Y
+3222(c)Y
+3142(c)Y
+3235 3518(c)N
+3494(c)Y
+3414(c)Y
+3334(c)Y
+3506 3518(c)N
+3494(c)Y
+3414(c)Y
+3334(c)Y
+3872 3518(c)N
+3494(c)Y
+3414(c)Y
+3334(c)Y
+4267 3534(c)N
+3462(c)Y
+3382(c)Y
+3302(c)Y
+3222(c)Y
+3142(c)Y
+3 f
+2706 3658(Figure)N
+2953(8b:)X
+1 f
+3084(Timing)X
+3339(results)X
+3568(for)X
+3682(the)X
+3800(password)X
+4123(database.)X
+10 f
+2706 3746 -0.0930(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)AN
+3 f
+3396 3988(References)N
+1 f
+2706 4120([ATT79])N
+3058(AT&T,)X
+3358(DBM\(3X\),)X
+2 f
+3773(Unix)X
+3990(Programmer's)X
+2878 4208(Manual,)N
+3194(Seventh)X
+3491(Edition,)X
+3793(Volume)X
+4085(1)X
+1 f
+(,)S
+4192(January,)X
+2878 4296(1979.)N
+2706 4472([ATT85])N
+3027(AT&T,)X
+3296(HSEARCH\(BA_LIB\),)X
+2 f
+4053(Unix)X
+4239(System)X
+2878 4560(User's)N
+3112(Manual,)X
+3401(System)X
+3644(V.3)X
+1 f
+3753(,)X
+3793(pp.)X
+3913(506-508,)X
+4220(1985.)X
+2706 4736([BRE73])N
+3025(Brent,)X
+3253(Richard)X
+3537(P.,)X
+3651(``Reducing)X
+4041(the)X
+4168(Retrieval)X
+2878 4824(Time)N
+3071(of)X
+3162(Scatter)X
+3409(Storage)X
+3678(Techniques'',)X
+2 f
+4146(Commun-)X
+2878 4912(ications)N
+3175(of)X
+3281(the)X
+3422(ACM)X
+1 f
+3591(,)X
+3654(Volume)X
+3955(16,)X
+4098(No.)X
+4259(2,)X
+4362(pp.)X
+2878 5000(105-109,)N
+3185(February,)X
+3515(1973.)X
+2706 5176([BSD86])N
+3055(NDBM\(3\),)X
+2 f
+3469(4.3BSD)X
+3775(Unix)X
+3990(Programmer's)X
+2878 5264(Manual)N
+3155(Reference)X
+3505(Guide)X
+1 f
+3701(,)X
+3749(University)X
+4114(of)X
+4208(Califor-)X
+2878 5352(nia,)N
+3016(Berkeley,)X
+3346(1986.)X
+2706 5528([ENB88])N
+3025(Enbody,)X
+3319(R.)X
+3417(J.,)X
+3533(Du,)X
+3676(H.)X
+3779(C.,)X
+3897(``Dynamic)X
+4270(Hash-)X
+2878 5616(ing)N
+3034(Schemes'',)X
+2 f
+3427(ACM)X
+3630(Computing)X
+4019(Surveys)X
+1 f
+4269(,)X
+4322(Vol.)X
+2878 5704(20,)N
+2998(No.)X
+3136(2,)X
+3216(pp.)X
+3336(85-113,)X
+3603(June)X
+3770(1988.)X
+3 f
+720 5960(USENIX)N
+9 f
+1042(-)X
+3 f
+1106(Winter)X
+1371('91)X
+9 f
+1498(-)X
+3 f
+1562(Dallas,)X
+1815(TX)X
+4384(11)X
+
+12 p
+%%Page: 12 12
+0(Courier)xf 0 f
+10 s 10 xH 0 xS 0 f
+3 f
+432 258(A)N
+510(New)X
+682(Hashing)X
+985(Package)X
+1290(for)X
+1413(UNIX)X
+3663(Seltzer)X
+3920(&)X
+4007(Yigit)X
+1 f
+432 538([FAG79])N
+776(Ronald)X
+1057(Fagin,)X
+1308(Jurg)X
+1495(Nievergelt,)X
+1903(Nicholas)X
+604 626(Pippenger,)N
+1003(H.)X
+1135(Raymond)X
+1500(Strong,)X
+1787(``Extendible)X
+604 714(Hashing)N
+901(--)X
+985(A)X
+1073(Fast)X
+1236(Access)X
+1493(Method)X
+1771(for)X
+1894(Dynamic)X
+604 802(Files'',)N
+2 f
+855(ACM)X
+1046(Transactions)X
+1485(on)X
+1586(Database)X
+1914(Systems)X
+1 f
+2168(,)X
+604 890(Volume)N
+882(4,)X
+962(No.)X
+1100(3.,)X
+1200(September)X
+1563(1979,)X
+1763(pp)X
+1863(315-34)X
+432 1066([KNU68],)N
+802(Knuth,)X
+1064(D.E.,)X
+2 f
+1273(The)X
+1434(Art)X
+1577(of)X
+1680(Computer)X
+2041(Pro-)X
+604 1154(gramming)N
+971(Vol.)X
+1140(3:)X
+1245(Sorting)X
+1518(and)X
+1676(Searching)X
+1 f
+2001(,)X
+2058(sec-)X
+604 1242(tions)N
+779(6.3-6.4,)X
+1046(pp)X
+1146(481-550.)X
+432 1418([LAR78])N
+747(Larson,)X
+1011(Per-Ake,)X
+1319(``Dynamic)X
+1687(Hashing'',)X
+2 f
+2048(BIT)X
+1 f
+(,)S
+604 1506(Vol.)N
+764(18,)X
+884(1978,)X
+1084(pp.)X
+1204(184-201.)X
+432 1682([LAR88])N
+752(Larson,)X
+1021(Per-Ake,)X
+1335(``Dynamic)X
+1709(Hash)X
+1900(Tables'',)X
+2 f
+604 1770(Communications)N
+1183(of)X
+1281(the)X
+1415(ACM)X
+1 f
+1584(,)X
+1640(Volume)X
+1934(31,)X
+2070(No.)X
+604 1858(4.,)N
+704(April)X
+893(1988,)X
+1093(pp)X
+1193(446-457.)X
+432 2034([LIT80])N
+731(Witold,)X
+1013(Litwin,)X
+1286(``Linear)X
+1590(Hashing:)X
+1939(A)X
+2036(New)X
+604 2122(Tool)N
+786(for)X
+911(File)X
+1065(and)X
+1211(Table)X
+1424(Addressing'',)X
+2 f
+1893(Proceed-)X
+604 2210(ings)N
+761(of)X
+847(the)X
+969(6th)X
+1095(International)X
+1540(Conference)X
+1933(on)X
+2036(Very)X
+604 2298(Large)N
+815(Databases)X
+1 f
+1153(,)X
+1193(1980.)X
+432 2474([NEL90])N
+743(Nelson,)X
+1011(Philip)X
+1222(A.,)X
+2 f
+1341(Gdbm)X
+1558(1.4)X
+1679(source)X
+1913(distribu-)X
+604 2562(tion)N
+748(and)X
+888(README)X
+1 f
+1209(,)X
+1249(August)X
+1500(1990.)X
+432 2738([THOM90])N
+840(Ken)X
+1011(Thompson,)X
+1410(private)X
+1670(communication,)X
+604 2826(Nov.)N
+782(1990.)X
+432 3002([TOR87])N
+790(Torek,)X
+1066(C.,)X
+1222(``Re:)X
+1470(dbm.a)X
+1751(and)X
+1950(ndbm.a)X
+604 3090(archives'',)N
+2 f
+966(USENET)X
+1279(newsgroup)X
+1650(comp.unix)X
+1 f
+2002(1987.)X
+432 3266([TOR88])N
+760(Torek,)X
+1006(C.,)X
+1133(``Re:)X
+1351(questions)X
+1686(regarding)X
+2027(data-)X
+604 3354(bases)N
+826(created)X
+1106(with)X
+1295(dbm)X
+1484(and)X
+1647(ndbm)X
+1876(routines'')X
+2 f
+604 3442(USENET)N
+937(newsgroup)X
+1328(comp.unix.questions)X
+1 f
+1982(,)X
+2041(June)X
+604 3530(1988.)N
+432 3706([WAL84])N
+773(Wales,)X
+1018(R.,)X
+1135(``Discussion)X
+1564(of)X
+1655("dbm")X
+1887(data)X
+2045(base)X
+604 3794(system'',)N
+2 f
+973(USENET)X
+1339(newsgroup)X
+1762(unix.wizards)X
+1 f
+2168(,)X
+604 3882(January,)N
+894(1984.)X
+432 4058([YIG89])N
+751(Ozan)X
+963(S.)X
+1069(Yigit,)X
+1294(``How)X
+1545(to)X
+1648(Roll)X
+1826(Your)X
+2032(Own)X
+604 4146(Dbm/Ndbm'',)N
+2 f
+1087(unpublished)X
+1504(manuscript)X
+1 f
+(,)S
+1910(Toronto,)X
+604 4234(July,)N
+777(1989)X
+3 f
+432 5960(12)N
+2970(USENIX)X
+9 f
+3292(-)X
+3 f
+3356(Winter)X
+3621('91)X
+9 f
+3748(-)X
+3 f
+3812(Dallas,)X
+4065(TX)X
+
+13 p
+%%Page: 13 13
+0(Courier)xf 0 f
+10 s 10 xH 0 xS 0 f
+3 f
+720 258(Seltzer)N
+977(&)X
+1064(Yigit)X
+3278(A)X
+3356(New)X
+3528(Hashing)X
+3831(Package)X
+4136(for)X
+4259(UNIX)X
+1 f
+720 538(Margo)N
+960(I.)X
+1033(Seltzer)X
+1282(is)X
+1361(a)X
+1423(Ph.D.)X
+1631(student)X
+1887(in)X
+1974(the)X
+2097(Department)X
+720 626(of)N
+823(Electrical)X
+1167(Engineering)X
+1595(and)X
+1747(Computer)X
+2102(Sciences)X
+2418(at)X
+720 714(the)N
+850(University)X
+1220(of)X
+1318(California,)X
+1694(Berkeley.)X
+2055(Her)X
+2207(research)X
+720 802(interests)N
+1017(include)X
+1283(\256le)X
+1415(systems,)X
+1718(databases,)X
+2076(and)X
+2221(transac-)X
+720 890(tion)N
+896(processing)X
+1291(systems.)X
+1636(She)X
+1807(spent)X
+2027(several)X
+2306(years)X
+720 978(working)N
+1026(at)X
+1123(startup)X
+1380(companies)X
+1762(designing)X
+2112(and)X
+2267(imple-)X
+720 1066(menting)N
+1048(\256le)X
+1216(systems)X
+1535(and)X
+1716(transaction)X
+2133(processing)X
+720 1154(software)N
+1026(and)X
+1170(designing)X
+1509(microprocessors.)X
+2103(Ms.)X
+2253(Seltzer)X
+720 1242(received)N
+1057(her)X
+1223(AB)X
+1397(in)X
+1522(Applied)X
+1843(Mathematics)X
+2320(from)X
+720 1330 0.1953(Harvard/Radcliffe)AN
+1325(College)X
+1594(in)X
+1676(1983.)X
+720 1444(In)N
+810(her)X
+936(spare)X
+1129(time,)X
+1313(Margo)X
+1549(can)X
+1683(usually)X
+1936(be)X
+2034(found)X
+2243(prepar-)X
+720 1532(ing)N
+868(massive)X
+1171(quantities)X
+1527(of)X
+1639(food)X
+1831(for)X
+1970(hungry)X
+2242(hoards,)X
+720 1620(studying)N
+1022(Japanese,)X
+1355(or)X
+1449(playing)X
+1716(soccer)X
+1948(with)X
+2116(an)X
+2218(exciting)X
+720 1708(Bay)N
+912(Area)X
+1132(Women's)X
+1507(Soccer)X
+1788(team,)X
+2026(the)X
+2186(Berkeley)X
+720 1796(Bruisers.)N
+720 1910(Ozan)N
+915(\()X
+3 f
+942(Oz)X
+1 f
+1040(\))X
+1092(Yigit)X
+1281(is)X
+1358(currently)X
+1672(a)X
+1732(software)X
+2033(engineer)X
+2334(with)X
+720 1998(the)N
+886(Communications)X
+1499(Research)X
+1861(and)X
+2044(Development)X
+720 2086(group,)N
+948(Computing)X
+1328(Services,)X
+1641(York)X
+1826(University.)X
+2224(His)X
+2355(for-)X
+720 2174(mative)N
+967(years)X
+1166(were)X
+1352(also)X
+1510(spent)X
+1708(at)X
+1795(York,)X
+2009(where)X
+2234(he)X
+2338(held)X
+720 2262(system)N
+985(programmer)X
+1425(and)X
+1583(administrator)X
+2052(positions)X
+2382(for)X
+720 2350(various)N
+995(mixtures)X
+1314(of)X
+1420(of)X
+1526(UNIX)X
+1765(systems)X
+2056(starting)X
+2334(with)X
+720 2438(Berkeley)N
+1031(4.1)X
+1151(in)X
+1233(1982,)X
+1433(while)X
+1631(at)X
+1709(the)X
+1827(same)X
+2012(time)X
+2174(obtaining)X
+720 2526(a)N
+776(degree)X
+1011(in)X
+1093(Computer)X
+1433(Science.)X
+720 2640(In)N
+813(his)X
+931(copious)X
+1205(free)X
+1356(time,)X
+1543(Oz)X
+1662(enjoys)X
+1896(working)X
+2188(on)X
+2293(what-)X
+720 2728(ever)N
+890(software)X
+1197(looks)X
+1400(interesting,)X
+1788(which)X
+2014(often)X
+2209(includes)X
+720 2816(language)N
+1044(interpreters,)X
+1464(preprocessors,)X
+1960(and)X
+2110(lately,)X
+2342(pro-)X
+720 2904(gram)N
+905(generators)X
+1260(and)X
+1396(expert)X
+1617(systems.)X
+720 3018(Oz)N
+836(has)X
+964(authored)X
+1266(several)X
+1515(public-domain)X
+2003(software)X
+2301(tools,)X
+720 3106(including)N
+1069(an)X
+1191(nroff-like)X
+1545(text)X
+1711(formatter)X
+2 f
+2056(proff)X
+1 f
+2257(that)X
+2423(is)X
+720 3194(apparently)N
+1083(still)X
+1226(used)X
+1397(in)X
+1483(some)X
+1676(basement)X
+2002(PCs.)X
+2173(His)X
+2307(latest)X
+720 3282(obsessions)N
+1143(include)X
+1460(the)X
+1639(incredible)X
+2040(programming)X
+720 3370(language)N
+1030(Scheme,)X
+1324(and)X
+1460(Chinese)X
+1738(Brush)X
+1949(painting.)X
+3 f
+720 5960(USENIX)N
+9 f
+1042(-)X
+3 f
+1106(Winter)X
+1371('91)X
+9 f
+1498(-)X
+3 f
+1562(Dallas,)X
+1815(TX)X
+4384(13)X
+
+14 p
+%%Page: 14 14
+0(Courier)xf 0 f
+10 s 10 xH 0 xS 0 f
+3 f
+432 5960(14)N
+2970(USENIX)X
+9 f
+3292(-)X
+3 f
+3356(Winter)X
+3621('91)X
+9 f
+3748(-)X
+3 f
+3812(Dallas,)X
+4065(TX)X
+
+14 p
+%%Trailer
+xt
+
+xs
diff --git a/bdb/docs/ref/refs/libtp_usenix.ps b/bdb/docs/ref/refs/libtp_usenix.ps
new file mode 100644
index 00000000000..ea821a9145e
--- /dev/null
+++ b/bdb/docs/ref/refs/libtp_usenix.ps
@@ -0,0 +1,12340 @@
+%!PS-Adobe-1.0
+%%Creator: utopia:margo (& Seltzer,608-13E,8072,)
+%%Title: stdin (ditroff)
+%%CreationDate: Thu Dec 12 15:32:11 1991
+%%EndComments
+% @(#)psdit.pro 1.3 4/15/88
+% lib/psdit.pro -- prolog for psdit (ditroff) files
+% Copyright (c) 1984, 1985 Adobe Systems Incorporated. All Rights Reserved.
+% last edit: shore Sat Nov 23 20:28:03 1985
+% RCSID: $Header: psdit.pro,v 2.1 85/11/24 12:19:43 shore Rel $
+
+% Changed by Edward Wang (edward@ucbarpa.berkeley.edu) to handle graphics,
+% 17 Feb, 87.
+
+/$DITroff 140 dict def $DITroff begin
+/fontnum 1 def /fontsize 10 def /fontheight 10 def /fontslant 0 def
+/xi{0 72 11 mul translate 72 resolution div dup neg scale 0 0 moveto
+ /fontnum 1 def /fontsize 10 def /fontheight 10 def /fontslant 0 def F
+ /pagesave save def}def
+/PB{save /psv exch def currentpoint translate
+ resolution 72 div dup neg scale 0 0 moveto}def
+/PE{psv restore}def
+/arctoobig 90 def /arctoosmall .05 def
+/m1 matrix def /m2 matrix def /m3 matrix def /oldmat matrix def
+/tan{dup sin exch cos div}def
+/point{resolution 72 div mul}def
+/dround {transform round exch round exch itransform}def
+/xT{/devname exch def}def
+/xr{/mh exch def /my exch def /resolution exch def}def
+/xp{}def
+/xs{docsave restore end}def
+/xt{}def
+/xf{/fontname exch def /slotno exch def fontnames slotno get fontname eq not
+ {fonts slotno fontname findfont put fontnames slotno fontname put}if}def
+/xH{/fontheight exch def F}def
+/xS{/fontslant exch def F}def
+/s{/fontsize exch def /fontheight fontsize def F}def
+/f{/fontnum exch def F}def
+/F{fontheight 0 le{/fontheight fontsize def}if
+ fonts fontnum get fontsize point 0 0 fontheight point neg 0 0 m1 astore
+ fontslant 0 ne{1 0 fontslant tan 1 0 0 m2 astore m3 concatmatrix}if
+ makefont setfont .04 fontsize point mul 0 dround pop setlinewidth}def
+/X{exch currentpoint exch pop moveto show}def
+/N{3 1 roll moveto show}def
+/Y{exch currentpoint pop exch moveto show}def
+/S{show}def
+/ditpush{}def/ditpop{}def
+/AX{3 -1 roll currentpoint exch pop moveto 0 exch ashow}def
+/AN{4 2 roll moveto 0 exch ashow}def
+/AY{3 -1 roll currentpoint pop exch moveto 0 exch ashow}def
+/AS{0 exch ashow}def
+/MX{currentpoint exch pop moveto}def
+/MY{currentpoint pop exch moveto}def
+/MXY{moveto}def
+/cb{pop}def % action on unknown char -- nothing for now
+/n{}def/w{}def
+/p{pop showpage pagesave restore /pagesave save def}def
+/Dt{/Dlinewidth exch def}def 1 Dt
+/Ds{/Ddash exch def}def -1 Ds
+/Di{/Dstipple exch def}def 1 Di
+/Dsetlinewidth{2 Dlinewidth mul setlinewidth}def
+/Dsetdash{Ddash 4 eq{[8 12]}{Ddash 16 eq{[32 36]}
+ {Ddash 20 eq{[32 12 8 12]}{[]}ifelse}ifelse}ifelse 0 setdash}def
+/Dstroke{gsave Dsetlinewidth Dsetdash 1 setlinecap stroke grestore
+ currentpoint newpath moveto}def
+/Dl{rlineto Dstroke}def
+/arcellipse{/diamv exch def /diamh exch def oldmat currentmatrix pop
+ currentpoint translate 1 diamv diamh div scale /rad diamh 2 div def
+ currentpoint exch rad add exch rad -180 180 arc oldmat setmatrix}def
+/Dc{dup arcellipse Dstroke}def
+/De{arcellipse Dstroke}def
+/Da{/endv exch def /endh exch def /centerv exch def /centerh exch def
+ /cradius centerv centerv mul centerh centerh mul add sqrt def
+ /eradius endv endv mul endh endh mul add sqrt def
+ /endang endv endh atan def
+ /startang centerv neg centerh neg atan def
+ /sweep startang endang sub dup 0 lt{360 add}if def
+ sweep arctoobig gt
+ {/midang startang sweep 2 div sub def /midrad cradius eradius add 2 div def
+ /midh midang cos midrad mul def /midv midang sin midrad mul def
+ midh neg midv neg endh endv centerh centerv midh midv Da
+ Da}
+ {sweep arctoosmall ge
+ {/controldelt 1 sweep 2 div cos sub 3 sweep 2 div sin mul div 4 mul def
+ centerv neg controldelt mul centerh controldelt mul
+ endv neg controldelt mul centerh add endh add
+ endh controldelt mul centerv add endv add
+ centerh endh add centerv endv add rcurveto Dstroke}
+ {centerh endh add centerv endv add rlineto Dstroke}
+ ifelse}
+ ifelse}def
+/Dpatterns[
+[%cf[widthbits]
+[8<0000000000000010>]
+[8<0411040040114000>]
+[8<0204081020408001>]
+[8<0000103810000000>]
+[8<6699996666999966>]
+[8<0000800100001008>]
+[8<81c36666c3810000>]
+[8<0f0e0c0800000000>]
+[8<0000000000000010>]
+[8<0411040040114000>]
+[8<0204081020408001>]
+[8<0000001038100000>]
+[8<6699996666999966>]
+[8<0000800100001008>]
+[8<81c36666c3810000>]
+[8<0f0e0c0800000000>]
+[8<0042660000246600>]
+[8<0000990000990000>]
+[8<0804020180402010>]
+[8<2418814242811824>]
+[8<6699996666999966>]
+[8<8000000008000000>]
+[8<00001c3e363e1c00>]
+[8<0000000000000000>]
+[32<00000040000000c00000004000000040000000e0000000000000000000000000>]
+[32<00000000000060000000900000002000000040000000f0000000000000000000>]
+[32<000000000000000000e0000000100000006000000010000000e0000000000000>]
+[32<00000000000000002000000060000000a0000000f00000002000000000000000>]
+[32<0000000e0000000000000000000000000000000f000000080000000e00000001>]
+[32<0000090000000600000000000000000000000000000007000000080000000e00>]
+[32<00010000000200000004000000040000000000000000000000000000000f0000>]
+[32<0900000006000000090000000600000000000000000000000000000006000000>]]
+[%ug
+[8<0000020000000000>]
+[8<0000020000002000>]
+[8<0004020000002000>]
+[8<0004020000402000>]
+[8<0004060000402000>]
+[8<0004060000406000>]
+[8<0006060000406000>]
+[8<0006060000606000>]
+[8<00060e0000606000>]
+[8<00060e000060e000>]
+[8<00070e000060e000>]
+[8<00070e000070e000>]
+[8<00070e020070e000>]
+[8<00070e020070e020>]
+[8<04070e020070e020>]
+[8<04070e024070e020>]
+[8<04070e064070e020>]
+[8<04070e064070e060>]
+[8<06070e064070e060>]
+[8<06070e066070e060>]
+[8<06070f066070e060>]
+[8<06070f066070f060>]
+[8<060f0f066070f060>]
+[8<060f0f0660f0f060>]
+[8<060f0f0760f0f060>]
+[8<060f0f0760f0f070>]
+[8<0e0f0f0760f0f070>]
+[8<0e0f0f07e0f0f070>]
+[8<0e0f0f0fe0f0f070>]
+[8<0e0f0f0fe0f0f0f0>]
+[8<0f0f0f0fe0f0f0f0>]
+[8<0f0f0f0ff0f0f0f0>]
+[8<1f0f0f0ff0f0f0f0>]
+[8<1f0f0f0ff1f0f0f0>]
+[8<1f0f0f8ff1f0f0f0>]
+[8<1f0f0f8ff1f0f0f8>]
+[8<9f0f0f8ff1f0f0f8>]
+[8<9f0f0f8ff9f0f0f8>]
+[8<9f0f0f9ff9f0f0f8>]
+[8<9f0f0f9ff9f0f0f9>]
+[8<9f8f0f9ff9f0f0f9>]
+[8<9f8f0f9ff9f8f0f9>]
+[8<9f8f1f9ff9f8f0f9>]
+[8<9f8f1f9ff9f8f1f9>]
+[8<bf8f1f9ff9f8f1f9>]
+[8<bf8f1f9ffbf8f1f9>]
+[8<bf8f1fdffbf8f1f9>]
+[8<bf8f1fdffbf8f1fd>]
+[8<ff8f1fdffbf8f1fd>]
+[8<ff8f1fdffff8f1fd>]
+[8<ff8f1ffffff8f1fd>]
+[8<ff8f1ffffff8f1ff>]
+[8<ff9f1ffffff8f1ff>]
+[8<ff9f1ffffff9f1ff>]
+[8<ff9f9ffffff9f1ff>]
+[8<ff9f9ffffff9f9ff>]
+[8<ffbf9ffffff9f9ff>]
+[8<ffbf9ffffffbf9ff>]
+[8<ffbfdffffffbf9ff>]
+[8<ffbfdffffffbfdff>]
+[8<ffffdffffffbfdff>]
+[8<ffffdffffffffdff>]
+[8<fffffffffffffdff>]
+[8<ffffffffffffffff>]]
+[%mg
+[8<8000000000000000>]
+[8<0822080080228000>]
+[8<0204081020408001>]
+[8<40e0400000000000>]
+[8<66999966>]
+[8<8001000010080000>]
+[8<81c36666c3810000>]
+[8<f0e0c08000000000>]
+[16<07c00f801f003e007c00f800f001e003c007800f001f003e007c00f801f003e0>]
+[16<1f000f8007c003e001f000f8007c003e001f800fc007e003f001f8007c003e00>]
+[8<c3c300000000c3c3>]
+[16<0040008001000200040008001000200040008000000100020004000800100020>]
+[16<0040002000100008000400020001800040002000100008000400020001000080>]
+[16<1fc03fe07df0f8f8f07de03fc01f800fc01fe03ff07df8f87df03fe01fc00f80>]
+[8<80>]
+[8<8040201000000000>]
+[8<84cc000048cc0000>]
+[8<9900009900000000>]
+[8<08040201804020100800020180002010>]
+[8<2418814242811824>]
+[8<66999966>]
+[8<8000000008000000>]
+[8<70f8d8f870000000>]
+[8<0814224180402010>]
+[8<aa00440a11a04400>]
+[8<018245aa45820100>]
+[8<221c224180808041>]
+[8<88000000>]
+[8<0855800080550800>]
+[8<2844004482440044>]
+[8<0810204080412214>]
+[8<00>]]]def
+/Dfill{
+ transform /maxy exch def /maxx exch def
+ transform /miny exch def /minx exch def
+ minx maxx gt{/minx maxx /maxx minx def def}if
+ miny maxy gt{/miny maxy /maxy miny def def}if
+ Dpatterns Dstipple 1 sub get exch 1 sub get
+ aload pop /stip exch def /stipw exch def /stiph 128 def
+ /imatrix[stipw 0 0 stiph 0 0]def
+ /tmatrix[stipw 0 0 stiph 0 0]def
+ /minx minx cvi stiph idiv stiph mul def
+ /miny miny cvi stipw idiv stipw mul def
+ gsave eoclip 0 setgray
+ miny stiph maxy{
+ tmatrix exch 5 exch put
+ minx stipw maxx{
+ tmatrix exch 4 exch put tmatrix setmatrix
+ stipw stiph true imatrix {stip} imagemask
+ }for
+ }for
+ grestore
+}def
+/Dp{Dfill Dstroke}def
+/DP{Dfill currentpoint newpath moveto}def
+end
+
+/ditstart{$DITroff begin
+ /nfonts 60 def % NFONTS makedev/ditroff dependent!
+ /fonts[nfonts{0}repeat]def
+ /fontnames[nfonts{()}repeat]def
+/docsave save def
+}def
+
+% character outcalls
+/oc{
+ /pswid exch def /cc exch def /name exch def
+ /ditwid pswid fontsize mul resolution mul 72000 div def
+ /ditsiz fontsize resolution mul 72 div def
+ ocprocs name known{ocprocs name get exec}{name cb}ifelse
+}def
+/fractm [.65 0 0 .6 0 0] def
+/fraction{
+ /fden exch def /fnum exch def gsave /cf currentfont def
+ cf fractm makefont setfont 0 .3 dm 2 copy neg rmoveto
+ fnum show rmoveto currentfont cf setfont(\244)show setfont fden show
+ grestore ditwid 0 rmoveto
+}def
+/oce{grestore ditwid 0 rmoveto}def
+/dm{ditsiz mul}def
+/ocprocs 50 dict def ocprocs begin
+(14){(1)(4)fraction}def
+(12){(1)(2)fraction}def
+(34){(3)(4)fraction}def
+(13){(1)(3)fraction}def
+(23){(2)(3)fraction}def
+(18){(1)(8)fraction}def
+(38){(3)(8)fraction}def
+(58){(5)(8)fraction}def
+(78){(7)(8)fraction}def
+(sr){gsave 0 .06 dm rmoveto(\326)show oce}def
+(is){gsave 0 .15 dm rmoveto(\362)show oce}def
+(->){gsave 0 .02 dm rmoveto(\256)show oce}def
+(<-){gsave 0 .02 dm rmoveto(\254)show oce}def
+(==){gsave 0 .05 dm rmoveto(\272)show oce}def
+(uc){gsave currentpoint 400 .009 dm mul add translate
+ 8 -8 scale ucseal oce}def
+end
+
+% an attempt at a PostScript FONT to implement ditroff special chars
+% this will enable us to
+% cache the little buggers
+% generate faster, more compact PS out of psdit
+% confuse everyone (including myself)!
+50 dict dup begin
+/FontType 3 def
+/FontName /DIThacks def
+/FontMatrix [.001 0 0 .001 0 0] def
+/FontBBox [-260 -260 900 900] def% a lie but ...
+/Encoding 256 array def
+0 1 255{Encoding exch /.notdef put}for
+Encoding
+ dup 8#040/space put %space
+ dup 8#110/rc put %right ceil
+ dup 8#111/lt put %left top curl
+ dup 8#112/bv put %bold vert
+ dup 8#113/lk put %left mid curl
+ dup 8#114/lb put %left bot curl
+ dup 8#115/rt put %right top curl
+ dup 8#116/rk put %right mid curl
+ dup 8#117/rb put %right bot curl
+ dup 8#120/rf put %right floor
+ dup 8#121/lf put %left floor
+ dup 8#122/lc put %left ceil
+ dup 8#140/sq put %square
+ dup 8#141/bx put %box
+ dup 8#142/ci put %circle
+ dup 8#143/br put %box rule
+ dup 8#144/rn put %root extender
+ dup 8#145/vr put %vertical rule
+ dup 8#146/ob put %outline bullet
+ dup 8#147/bu put %bullet
+ dup 8#150/ru put %rule
+ dup 8#151/ul put %underline
+ pop
+/DITfd 100 dict def
+/BuildChar{0 begin
+ /cc exch def /fd exch def
+ /charname fd /Encoding get cc get def
+ /charwid fd /Metrics get charname get def
+ /charproc fd /CharProcs get charname get def
+ charwid 0 fd /FontBBox get aload pop setcachedevice
+ 2 setlinejoin 40 setlinewidth
+ newpath 0 0 moveto gsave charproc grestore
+ end}def
+/BuildChar load 0 DITfd put
+/CharProcs 50 dict def
+CharProcs begin
+/space{}def
+/.notdef{}def
+/ru{500 0 rls}def
+/rn{0 840 moveto 500 0 rls}def
+/vr{0 800 moveto 0 -770 rls}def
+/bv{0 800 moveto 0 -1000 rls}def
+/br{0 840 moveto 0 -1000 rls}def
+/ul{0 -140 moveto 500 0 rls}def
+/ob{200 250 rmoveto currentpoint newpath 200 0 360 arc closepath stroke}def
+/bu{200 250 rmoveto currentpoint newpath 200 0 360 arc closepath fill}def
+/sq{80 0 rmoveto currentpoint dround newpath moveto
+ 640 0 rlineto 0 640 rlineto -640 0 rlineto closepath stroke}def
+/bx{80 0 rmoveto currentpoint dround newpath moveto
+ 640 0 rlineto 0 640 rlineto -640 0 rlineto closepath fill}def
+/ci{500 360 rmoveto currentpoint newpath 333 0 360 arc
+ 50 setlinewidth stroke}def
+
+/lt{0 -200 moveto 0 550 rlineto currx 800 2cx s4 add exch s4 a4p stroke}def
+/lb{0 800 moveto 0 -550 rlineto currx -200 2cx s4 add exch s4 a4p stroke}def
+/rt{0 -200 moveto 0 550 rlineto currx 800 2cx s4 sub exch s4 a4p stroke}def
+/rb{0 800 moveto 0 -500 rlineto currx -200 2cx s4 sub exch s4 a4p stroke}def
+/lk{0 800 moveto 0 300 -300 300 s4 arcto pop pop 1000 sub
+ 0 300 4 2 roll s4 a4p 0 -200 lineto stroke}def
+/rk{0 800 moveto 0 300 s2 300 s4 arcto pop pop 1000 sub
+ 0 300 4 2 roll s4 a4p 0 -200 lineto stroke}def
+/lf{0 800 moveto 0 -1000 rlineto s4 0 rls}def
+/rf{0 800 moveto 0 -1000 rlineto s4 neg 0 rls}def
+/lc{0 -200 moveto 0 1000 rlineto s4 0 rls}def
+/rc{0 -200 moveto 0 1000 rlineto s4 neg 0 rls}def
+end
+
+/Metrics 50 dict def Metrics begin
+/.notdef 0 def
+/space 500 def
+/ru 500 def
+/br 0 def
+/lt 416 def
+/lb 416 def
+/rt 416 def
+/rb 416 def
+/lk 416 def
+/rk 416 def
+/rc 416 def
+/lc 416 def
+/rf 416 def
+/lf 416 def
+/bv 416 def
+/ob 350 def
+/bu 350 def
+/ci 750 def
+/bx 750 def
+/sq 750 def
+/rn 500 def
+/ul 500 def
+/vr 0 def
+end
+
+DITfd begin
+/s2 500 def /s4 250 def /s3 333 def
+/a4p{arcto pop pop pop pop}def
+/2cx{2 copy exch}def
+/rls{rlineto stroke}def
+/currx{currentpoint pop}def
+/dround{transform round exch round exch itransform} def
+end
+end
+/DIThacks exch definefont pop
+ditstart
+(psc)xT
+576 1 1 xr
+1(Times-Roman)xf 1 f
+2(Times-Italic)xf 2 f
+3(Times-Bold)xf 3 f
+4(Times-BoldItalic)xf 4 f
+5(Helvetica)xf 5 f
+6(Helvetica-Bold)xf 6 f
+7(Courier)xf 7 f
+8(Courier-Bold)xf 8 f
+9(Symbol)xf 9 f
+10(DIThacks)xf 10 f
+10 s
+1 f
+xi
+%%EndProlog
+
+%%Page: 1 1
+10 s 10 xH 0 xS 1 f
+3 f
+14 s
+1205 1206(LIBTP:)N
+1633(Portable,)X
+2100(M)X
+2206(odular)X
+2551(Transactions)X
+3202(for)X
+3374(UNIX)X
+1 f
+11 s
+3661 1162(1)N
+2 f
+12 s
+2182 1398(Margo)N
+2467(Seltzer)X
+2171 1494(Michael)N
+2511(Olson)X
+1800 1590(University)N
+2225(of)X
+2324(California,)X
+2773(Berkeley)X
+3 f
+2277 1878(Abstract)N
+1 f
+10 s
+755 2001(Transactions)N
+1198(provide)X
+1475(a)X
+1543(useful)X
+1771(programming)X
+2239(paradigm)X
+2574(for)X
+2700(maintaining)X
+3114(logical)X
+3364(consistency,)X
+3790(arbitrating)X
+4156(con-)X
+555 2091(current)N
+808(access,)X
+1059(and)X
+1200(managing)X
+1540(recovery.)X
+1886(In)X
+1977(traditional)X
+2330(UNIX)X
+2555(systems,)X
+2852(the)X
+2974(only)X
+3140(easy)X
+3307(way)X
+3465(of)X
+3556(using)X
+3753(transactions)X
+4160(is)X
+4237(to)X
+555 2181(purchase)N
+876(a)X
+947(database)X
+1258(system.)X
+1554(Such)X
+1748(systems)X
+2035(are)X
+2168(often)X
+2367(slow,)X
+2572(costly,)X
+2817(and)X
+2967(may)X
+3139(not)X
+3275(provide)X
+3554(the)X
+3686(exact)X
+3890(functionality)X
+555 2271(desired.)N
+848(This)X
+1011(paper)X
+1210(presents)X
+1493(the)X
+1611(design,)X
+1860(implementation,)X
+2402(and)X
+2538(performance)X
+2965(of)X
+3052(LIBTP,)X
+3314(a)X
+3370(simple,)X
+3623(non-proprietary)X
+4147(tran-)X
+555 2361(saction)N
+809(library)X
+1050(using)X
+1249(the)X
+1373(4.4BSD)X
+1654(database)X
+1957(access)X
+2189(routines)X
+2473(\()X
+3 f
+2500(db)X
+1 f
+2588(\(3\)\).)X
+2775(On)X
+2899(a)X
+2961(conventional)X
+3401(transaction)X
+3779(processing)X
+4148(style)X
+555 2451(benchmark,)N
+959(its)X
+1061(performance)X
+1495(is)X
+1575(approximately)X
+2065(85%)X
+2239(that)X
+2386(of)X
+2480(the)X
+2604(database)X
+2907(access)X
+3139(routines)X
+3423(without)X
+3693(transaction)X
+4071(protec-)X
+555 2541(tion,)N
+725(200%)X
+938(that)X
+1084(of)X
+1177(using)X
+3 f
+1376(fsync)X
+1 f
+1554(\(2\))X
+1674(to)X
+1761(commit)X
+2030(modi\256cations)X
+2490(to)X
+2577(disk,)X
+2755(and)X
+2896(125%)X
+3108(that)X
+3253(of)X
+3345(a)X
+3406(commercial)X
+3810(relational)X
+4138(data-)X
+555 2631(base)N
+718(system.)X
+3 f
+555 2817(1.)N
+655(Introduction)X
+1 f
+755 2940(Transactions)N
+1186(are)X
+1306(used)X
+1474(in)X
+1557(database)X
+1855(systems)X
+2129(to)X
+2212(enable)X
+2443(concurrent)X
+2807(users)X
+2992(to)X
+3074(apply)X
+3272(multi-operation)X
+3790(updates)X
+4055(without)X
+555 3030(violating)N
+863(the)X
+985(integrity)X
+1280(of)X
+1371(the)X
+1493(database.)X
+1814(They)X
+2003(provide)X
+2271(the)X
+2392(properties)X
+2736(of)X
+2826(atomicity,)X
+3171(consistency,)X
+3588(isolation,)X
+3906(and)X
+4045(durabil-)X
+555 3120(ity.)N
+701(By)X
+816(atomicity,)X
+1160(we)X
+1276(mean)X
+1472(that)X
+1614(the)X
+1734(set)X
+1845(of)X
+1934(updates)X
+2200(comprising)X
+2581(a)X
+2638(transaction)X
+3011(must)X
+3187(be)X
+3284(applied)X
+3541(as)X
+3629(a)X
+3686(single)X
+3898(unit;)X
+4085(that)X
+4226(is,)X
+555 3210(they)N
+714(must)X
+890(either)X
+1094(all)X
+1195(be)X
+1292(applied)X
+1549(to)X
+1632(the)X
+1751(database)X
+2049(or)X
+2137(all)X
+2238(be)X
+2335(absent.)X
+2601(Consistency)X
+3013(requires)X
+3293(that)X
+3434(a)X
+3491(transaction)X
+3864(take)X
+4019(the)X
+4138(data-)X
+555 3300(base)N
+725(from)X
+908(one)X
+1051(logically)X
+1358(consistent)X
+1704(state)X
+1877(to)X
+1965(another.)X
+2272(The)X
+2423(property)X
+2721(of)X
+2814(isolation)X
+3115(requires)X
+3400(that)X
+3546(concurrent)X
+3916(transactions)X
+555 3390(yield)N
+750(results)X
+994(which)X
+1225(are)X
+1358(indistinguishable)X
+1938(from)X
+2128(the)X
+2260(results)X
+2503(which)X
+2733(would)X
+2967(be)X
+3077(obtained)X
+3387(by)X
+3501(running)X
+3784(the)X
+3916(transactions)X
+555 3480(sequentially.)N
+1002(Finally,)X
+1268(durability)X
+1599(requires)X
+1878(that)X
+2018(once)X
+2190(transactions)X
+2593(have)X
+2765(been)X
+2937(committed,)X
+3319(their)X
+3486(results)X
+3715(must)X
+3890(be)X
+3986(preserved)X
+555 3570(across)N
+776(system)X
+1018(failures)X
+1279([TPCB90].)X
+755 3693(Although)N
+1080(these)X
+1268(properties)X
+1612(are)X
+1734(most)X
+1912(frequently)X
+2265(discussed)X
+2595(in)X
+2680(the)X
+2801(context)X
+3060(of)X
+3150(databases,)X
+3501(they)X
+3661(are)X
+3782(useful)X
+4000(program-)X
+555 3783(ming)N
+750(paradigms)X
+1114(for)X
+1238(more)X
+1433(general)X
+1700(purpose)X
+1984(applications.)X
+2441(There)X
+2659(are)X
+2788(several)X
+3046(different)X
+3353(situations)X
+3689(where)X
+3916(transactions)X
+555 3873(can)N
+687(be)X
+783(used)X
+950(to)X
+1032(replace)X
+1285(current)X
+1533(ad-hoc)X
+1772(mechanisms.)X
+755 3996(One)N
+910(situation)X
+1206(is)X
+1280(when)X
+1475(multiple)X
+1762(\256les)X
+1916(or)X
+2004(parts)X
+2181(of)X
+2269(\256les)X
+2422(need)X
+2594(to)X
+2676(be)X
+2772(updated)X
+3046(in)X
+3128(an)X
+3224(atomic)X
+3462(fashion.)X
+3758(For)X
+3889(example,)X
+4201(the)X
+555 4086(traditional)N
+907(UNIX)X
+1131(\256le)X
+1256(system)X
+1501(uses)X
+1661(ordering)X
+1955(constraints)X
+2324(to)X
+2408(achieve)X
+2676(recoverability)X
+3144(in)X
+3228(the)X
+3348(face)X
+3505(of)X
+3594(crashes.)X
+3893(When)X
+4107(a)X
+4165(new)X
+555 4176(\256le)N
+678(is)X
+752(created,)X
+1026(its)X
+1122(inode)X
+1321(is)X
+1395(written)X
+1642(to)X
+1724(disk)X
+1877(before)X
+2103(the)X
+2221(new)X
+2375(\256le)X
+2497(is)X
+2570(added)X
+2782(to)X
+2864(the)X
+2982(directory)X
+3292(structure.)X
+3633(This)X
+3795(guarantees)X
+4159(that,)X
+555 4266(if)N
+627(the)X
+748(system)X
+993(crashes)X
+1253(between)X
+1544(the)X
+1665(two)X
+1808(I/O's,)X
+2016(the)X
+2137(directory)X
+2450(does)X
+2620(not)X
+2744(contain)X
+3002(a)X
+3060 0.4531(reference)AX
+3383(to)X
+3467(an)X
+3565(invalid)X
+3809(inode.)X
+4049(In)X
+4138(actu-)X
+555 4356(ality,)N
+741(the)X
+863(desired)X
+1119(effect)X
+1326(is)X
+1402(that)X
+1545(these)X
+1733(two)X
+1876(updates)X
+2144(have)X
+2319(the)X
+2440(transactional)X
+2873(property)X
+3168(of)X
+3258(atomicity)X
+3583(\(either)X
+3816(both)X
+3981(writes)X
+4200(are)X
+555 4446(visible)N
+790(or)X
+879(neither)X
+1124(is\).)X
+1266(Rather)X
+1501(than)X
+1660(building)X
+1947(special)X
+2191(purpose)X
+2466(recovery)X
+2769(mechanisms)X
+3186(into)X
+3331(the)X
+3450(\256le)X
+3573(system)X
+3816(or)X
+3904(related)X
+4144(tools)X
+555 4536(\()N
+2 f
+582(e.g.)X
+3 f
+726(fsck)X
+1 f
+864(\(8\)\),)X
+1033(one)X
+1177(could)X
+1383(use)X
+1518(general)X
+1783(purpose)X
+2064(transaction)X
+2443(recovery)X
+2752(protocols)X
+3077(after)X
+3252(system)X
+3501(failure.)X
+3778(Any)X
+3943(application)X
+555 4626(that)N
+705(needs)X
+918(to)X
+1010(keep)X
+1192(multiple,)X
+1508(related)X
+1757(\256les)X
+1920(\(or)X
+2044(directories\))X
+2440(consistent)X
+2790(should)X
+3032(do)X
+3141(so)X
+3241(using)X
+3443(transactions.)X
+3895(Source)X
+4147(code)X
+555 4716(control)N
+805(systems,)X
+1101(such)X
+1271(as)X
+1361(RCS)X
+1534(and)X
+1673(SCCS,)X
+1910(should)X
+2146(use)X
+2276(transaction)X
+2651(semantics)X
+2990(to)X
+3075(allow)X
+3276(the)X
+3397(``checking)X
+3764(in'')X
+3903(of)X
+3992(groups)X
+4232(of)X
+555 4806(related)N
+801(\256les.)X
+1001(In)X
+1095(this)X
+1237(way,)X
+1418(if)X
+1493(the)X
+1617 0.2841(``check-in'')AX
+2028(fails,)X
+2212(the)X
+2336(transaction)X
+2714(may)X
+2878(be)X
+2980(aborted,)X
+3267(backing)X
+3547(out)X
+3675(the)X
+3799(partial)X
+4030(``check-)X
+555 4896(in'')N
+691(leaving)X
+947(the)X
+1065(source)X
+1295(repository)X
+1640(in)X
+1722(a)X
+1778(consistent)X
+2118(state.)X
+755 5019(A)N
+842(second)X
+1094(situation)X
+1398(where)X
+1624(transactions)X
+2036(can)X
+2177(be)X
+2282(used)X
+2458(to)X
+2549(replace)X
+2811(current)X
+3068(ad-hoc)X
+3316(mechanisms)X
+3741(is)X
+3822(in)X
+3912(applications)X
+555 5109(where)N
+776(concurrent)X
+1144(updates)X
+1413(to)X
+1499(a)X
+1559(shared)X
+1793(\256le)X
+1919(are)X
+2042(desired,)X
+2318(but)X
+2444(there)X
+2629(is)X
+2706(logical)X
+2948(consistency)X
+3345(of)X
+3435(the)X
+3556(data)X
+3713(which)X
+3932(needs)X
+4138(to)X
+4223(be)X
+555 5199(preserved.)N
+928(For)X
+1059(example,)X
+1371(when)X
+1565(the)X
+1683(password)X
+2006(\256le)X
+2128(is)X
+2201(updated,)X
+2495(\256le)X
+2617(locking)X
+2877(is)X
+2950(used)X
+3117(to)X
+3199(disallow)X
+3490(concurrent)X
+3854(access.)X
+4120(Tran-)X
+555 5289(saction)N
+804(semantics)X
+1142(on)X
+1244(the)X
+1364(password)X
+1689(\256les)X
+1844(would)X
+2066(allow)X
+2266(concurrent)X
+2632(updates,)X
+2919(while)X
+3119(preserving)X
+3479(the)X
+3598(logical)X
+3837(consistency)X
+4232(of)X
+555 5379(the)N
+681(password)X
+1012(database.)X
+1357(Similarly,)X
+1702(UNIX)X
+1930(utilities)X
+2196(which)X
+2419(rewrite)X
+2674(\256les)X
+2834(face)X
+2996(a)X
+3059(potential)X
+3366(race)X
+3528(condition)X
+3857(between)X
+4152(their)X
+555 5469(rewriting)N
+871(a)X
+929(\256le)X
+1053(and)X
+1191(another)X
+1453(process)X
+1715(reading)X
+1977(the)X
+2096(\256le.)X
+2259(For)X
+2391(example,)X
+2704(the)X
+2823(compiler)X
+3129(\(more)X
+3342(precisely,)X
+3673(the)X
+3792(assembler\))X
+4161(may)X
+8 s
+10 f
+555 5541(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)N
+5 s
+1 f
+727 5619(1)N
+8 s
+763 5644(To)N
+850(appear)X
+1035(in)X
+1101(the)X
+2 f
+1195(Proceedings)X
+1530(of)X
+1596(the)X
+1690(1992)X
+1834(Winter)X
+2024(Usenix)X
+1 f
+2201(,)X
+2233(San)X
+2345(Francisco,)X
+2625(CA,)X
+2746(January)X
+2960(1992.)X
+
+2 p
+%%Page: 2 2
+8 s 8 xH 0 xS 1 f
+10 s
+3 f
+1 f
+555 630(have)N
+737(to)X
+829(rewrite)X
+1087(a)X
+1152(\256le)X
+1283(to)X
+1374(which)X
+1599(it)X
+1672(has)X
+1808(write)X
+2002(permission)X
+2382(in)X
+2473(a)X
+2538(directory)X
+2857(to)X
+2948(which)X
+3173(it)X
+3246(does)X
+3422(not)X
+3553(have)X
+3734(write)X
+3928(permission.)X
+555 720(While)N
+779(the)X
+904(``.o'')X
+1099(\256le)X
+1228(is)X
+1308(being)X
+1513(written,)X
+1787(another)X
+2055(utility)X
+2272(such)X
+2446(as)X
+3 f
+2540(nm)X
+1 f
+2651(\(1\))X
+2772(or)X
+3 f
+2866(ar)X
+1 f
+2942(\(1\))X
+3063(may)X
+3228(read)X
+3394(the)X
+3519(\256le)X
+3648(and)X
+3791(produce)X
+4077(invalid)X
+555 810(results)N
+790(since)X
+981(the)X
+1105(\256le)X
+1233(has)X
+1366(not)X
+1494(been)X
+1672(completely)X
+2054(written.)X
+2347(Currently,)X
+2700(some)X
+2895(utilities)X
+3160(use)X
+3293(special)X
+3542(purpose)X
+3821(code)X
+3998(to)X
+4085(handle)X
+555 900(such)N
+722(cases)X
+912(while)X
+1110(others)X
+1326(ignore)X
+1551(the)X
+1669(problem)X
+1956(and)X
+2092(force)X
+2278(users)X
+2463(to)X
+2545(live)X
+2685(with)X
+2847(the)X
+2965(consequences.)X
+755 1023(In)N
+845(this)X
+983(paper,)X
+1205(we)X
+1322(present)X
+1577(a)X
+1635(simple)X
+1870(library)X
+2106(which)X
+2324(provides)X
+2622(transaction)X
+2996(semantics)X
+3334(\(atomicity,)X
+3705(consistency,)X
+4121(isola-)X
+555 1113(tion,)N
+720(and)X
+857(durability\).)X
+1236(The)X
+1382(4.4BSD)X
+1658(database)X
+1956(access)X
+2182(methods)X
+2473(have)X
+2645(been)X
+2817(modi\256ed)X
+3121(to)X
+3203(use)X
+3330(this)X
+3465(library,)X
+3719(optionally)X
+4063(provid-)X
+555 1203(ing)N
+682(shared)X
+917(buffer)X
+1139(management)X
+1574(between)X
+1867(applications,)X
+2298(locking,)X
+2582(and)X
+2722(transaction)X
+3098(semantics.)X
+3478(Any)X
+3640(UNIX)X
+3865(program)X
+4161(may)X
+555 1293(transaction)N
+930(protect)X
+1176(its)X
+1274(data)X
+1430(by)X
+1532(requesting)X
+1888(transaction)X
+2262(protection)X
+2609(with)X
+2773(the)X
+3 f
+2893(db)X
+1 f
+2981(\(3\))X
+3097(library)X
+3333(or)X
+3422(by)X
+3524(adding)X
+3764(appropriate)X
+4152(calls)X
+555 1383(to)N
+646(the)X
+773(transaction)X
+1154(manager,)X
+1480(buffer)X
+1706(manager,)X
+2032(lock)X
+2199(manager,)X
+2525(and)X
+2670(log)X
+2801(manager.)X
+3147(The)X
+3301(library)X
+3543(routines)X
+3829(may)X
+3995(be)X
+4099(linked)X
+555 1473(into)N
+708(the)X
+834(host)X
+995(application)X
+1379(and)X
+1523(called)X
+1743(by)X
+1851(subroutine)X
+2217(interface,)X
+2547(or)X
+2642(they)X
+2808(may)X
+2974(reside)X
+3194(in)X
+3284(a)X
+3348(separate)X
+3640(server)X
+3865(process.)X
+4174(The)X
+555 1563(server)N
+772(architecture)X
+1172(provides)X
+1468(for)X
+1582(network)X
+1865(access)X
+2091(and)X
+2227(better)X
+2430(protection)X
+2775(mechanisms.)X
+3 f
+555 1749(2.)N
+655(Related)X
+938(Work)X
+1 f
+755 1872(There)N
+1000(has)X
+1164(been)X
+1373(much)X
+1608(discussion)X
+1998(in)X
+2117(recent)X
+2371(years)X
+2597(about)X
+2831(new)X
+3021(transaction)X
+3429(models)X
+3716(and)X
+3888(architectures)X
+555 1962 0.1172([SPEC88][NODI90][CHEN91][MOHA91].)AN
+2009(Much)X
+2220(of)X
+2310(this)X
+2448(work)X
+2636(focuses)X
+2900(on)X
+3003(new)X
+3160(ways)X
+3348(to)X
+3433(model)X
+3656(transactions)X
+4062(and)X
+4201(the)X
+555 2052(interactions)N
+953(between)X
+1245(them,)X
+1449(while)X
+1651(the)X
+1772(work)X
+1960(presented)X
+2291(here)X
+2453(focuses)X
+2717(on)X
+2820(the)X
+2941(implementation)X
+3466(and)X
+3605(performance)X
+4035(of)X
+4125(tradi-)X
+555 2142(tional)N
+757(transaction)X
+1129(techniques)X
+1492(\(write-ahead)X
+1919(logging)X
+2183(and)X
+2319(two-phase)X
+2669(locking\))X
+2956(on)X
+3056(a)X
+3112(standard)X
+3404(operating)X
+3727(system)X
+3969(\(UNIX\).)X
+755 2265(Such)N
+947(traditional)X
+1308(operating)X
+1643(systems)X
+1928(are)X
+2059(often)X
+2256(criticized)X
+2587(for)X
+2713(their)X
+2892(inability)X
+3190(to)X
+3283(perform)X
+3573(transaction)X
+3956(processing)X
+555 2355(adequately.)N
+971([STON81])X
+1342(cites)X
+1517(three)X
+1706(main)X
+1894(areas)X
+2088(of)X
+2183(inadequate)X
+2559(support:)X
+2849(buffer)X
+3074(management,)X
+3532(the)X
+3658(\256le)X
+3788(system,)X
+4058(and)X
+4201(the)X
+555 2445(process)N
+823(structure.)X
+1191(These)X
+1410(arguments)X
+1771(are)X
+1897(summarized)X
+2316(in)X
+2405(table)X
+2587(one.)X
+2769(Fortunately,)X
+3184(much)X
+3388(has)X
+3521(changed)X
+3815(since)X
+4006(1981.)X
+4232(In)X
+555 2535(the)N
+683(area)X
+848(of)X
+945(buffer)X
+1172(management,)X
+1632(most)X
+1817(UNIX)X
+2048(systems)X
+2331(provide)X
+2606(the)X
+2734(ability)X
+2968(to)X
+3060(memory)X
+3357(map)X
+3525(\256les,)X
+3708(thus)X
+3870(obviating)X
+4201(the)X
+555 2625(need)N
+734(for)X
+855(a)X
+918(copy)X
+1101(between)X
+1396(kernel)X
+1624(and)X
+1766(user)X
+1926(space.)X
+2171(If)X
+2251(a)X
+2313(database)X
+2616(system)X
+2864(is)X
+2943(going)X
+3151(to)X
+3239(use)X
+3372(the)X
+3496(\256le)X
+3624(system)X
+3872(buffer)X
+4095(cache,)X
+555 2715(then)N
+719(a)X
+781(system)X
+1029(call)X
+1171(is)X
+1250(required.)X
+1584(However,)X
+1924(if)X
+1998(buffering)X
+2322(is)X
+2400(provided)X
+2710(at)X
+2793(user)X
+2952(level)X
+3133(using)X
+3331(shared)X
+3566(memory,)X
+3878(as)X
+3970(in)X
+4057(LIBTP,)X
+555 2805(buffer)N
+776(management)X
+1210(is)X
+1287(only)X
+1452(as)X
+1542(slow)X
+1716(as)X
+1806(access)X
+2035(to)X
+2120(shared)X
+2353(memory)X
+2643(and)X
+2782(any)X
+2921(replacement)X
+3337(algorithm)X
+3671(may)X
+3832(be)X
+3931(used.)X
+4121(Since)X
+555 2895(multiple)N
+849(processes)X
+1185(can)X
+1325(access)X
+1559(the)X
+1685(shared)X
+1923(data,)X
+2105(prefetching)X
+2499(may)X
+2665(be)X
+2769(accomplished)X
+3238(by)X
+3346(separate)X
+3638(processes)X
+3973(or)X
+4067(threads)X
+555 2985(whose)N
+782(sole)X
+932(purpose)X
+1207(is)X
+1281(to)X
+1364(prefetch)X
+1649(pages)X
+1853(and)X
+1990(wait)X
+2149(on)X
+2250(them.)X
+2471(There)X
+2680(is)X
+2754(still)X
+2894(no)X
+2995(way)X
+3150(to)X
+3233(enforce)X
+3496(write)X
+3682(ordering)X
+3975(other)X
+4161(than)X
+555 3075(keeping)N
+829(pages)X
+1032(in)X
+1114(user)X
+1268(memory)X
+1555(and)X
+1691(using)X
+1884(the)X
+3 f
+2002(fsync)X
+1 f
+2180(\(3\))X
+2294(system)X
+2536(call)X
+2672(to)X
+2754(perform)X
+3033(synchronous)X
+3458(writes.)X
+755 3198(In)N
+845(the)X
+966(area)X
+1124(of)X
+1214(\256le)X
+1339(systems,)X
+1635(the)X
+1756(fast)X
+1895(\256le)X
+2020(system)X
+2265(\(FFS\))X
+2474([MCKU84])X
+2871(allows)X
+3103(allocation)X
+3442(in)X
+3527(units)X
+3704(up)X
+3806(to)X
+3890(64KBytes)X
+4232(as)X
+555 3288(opposed)N
+846(to)X
+932(the)X
+1054(4KByte)X
+1327(and)X
+1466(8KByte)X
+1738(\256gures)X
+1979(quoted)X
+2220(in)X
+2305([STON81].)X
+2711(The)X
+2859(measurements)X
+3341(in)X
+3426(this)X
+3564(paper)X
+3766(were)X
+3946(taken)X
+4143(from)X
+555 3378(an)N
+655(8KByte)X
+928(FFS,)X
+1104(but)X
+1230(as)X
+1320(LIBTP)X
+1565(runs)X
+1726(exclusively)X
+2114(in)X
+2199(user)X
+2356(space,)X
+2578(there)X
+2762(is)X
+2838(nothing)X
+3105(to)X
+3190(prevent)X
+3454(it)X
+3521(from)X
+3700(being)X
+3901(run)X
+4031(on)X
+4134(other)X
+555 3468(UNIX)N
+776(compatible)X
+1152(\256le)X
+1274(systems)X
+1547(\(e.g.)X
+1710(log-structured)X
+2180([ROSE91],)X
+2558(extent-based,)X
+3004(or)X
+3091(multi-block)X
+3484([SELT91]\).)X
+755 3591(Finally,)N
+1029(with)X
+1199(regard)X
+1433(to)X
+1523(the)X
+1648(process)X
+1916(structure,)X
+2244(neither)X
+2494(context)X
+2757(switch)X
+2993(time)X
+3162(nor)X
+3296(scheduling)X
+3670(around)X
+3920(semaphores)X
+555 3681(seems)N
+785(to)X
+881(affect)X
+1099(the)X
+1231(system)X
+1487(performance.)X
+1968(However,)X
+2317(the)X
+2449(implementation)X
+2984(of)X
+3084(semaphores)X
+3496(can)X
+3641(impact)X
+3892(performance)X
+555 3771(tremendously.)N
+1051(This)X
+1213(is)X
+1286(discussed)X
+1613(in)X
+1695(more)X
+1880(detail)X
+2078(in)X
+2160(section)X
+2407(4.3.)X
+755 3894(The)N
+908(Tuxedo)X
+1181(system)X
+1431(from)X
+1615(AT&T)X
+1861(is)X
+1941(a)X
+2004(transaction)X
+2383(manager)X
+2687(which)X
+2910(coordinates)X
+3307(distributed)X
+3676(transaction)X
+4055(commit)X
+555 3984(from)N
+738(a)X
+801(variety)X
+1051(of)X
+1145(different)X
+1449(local)X
+1632(transaction)X
+2011(managers.)X
+2386(At)X
+2493(this)X
+2634(time,)X
+2822(LIBTP)X
+3070(does)X
+3243(not)X
+3371(have)X
+3549(its)X
+3650(own)X
+3814(mechanism)X
+4205(for)X
+555 4074(distributed)N
+942(commit)X
+1231(processing,)X
+1639(but)X
+1786(could)X
+2009(be)X
+2130(used)X
+2322(as)X
+2434(a)X
+2515(local)X
+2716(transaction)X
+3113(agent)X
+3331(by)X
+3455(systems)X
+3752(such)X
+3943(as)X
+4054(Tuxedo)X
+555 4164([ANDR89].)N
+10 f
+863 4393(i)N
+870(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+903 4483(Buffer)N
+1133(Management)X
+10 f
+1672(g)X
+1 f
+1720(Data)X
+1892(must)X
+2067(be)X
+2163(copied)X
+2397(between)X
+2685(kernel)X
+2906(space)X
+3105(and)X
+3241(user)X
+3395(space.)X
+10 f
+1672 4573(g)N
+1 f
+1720(Buffer)X
+1950(pool)X
+2112(access)X
+2338(is)X
+2411(too)X
+2533(slow.)X
+10 f
+1672 4663(g)N
+1 f
+1720(There)X
+1928(is)X
+2001(no)X
+2101(way)X
+2255(to)X
+2337(request)X
+2589(prefetch.)X
+10 f
+1672 4753(g)N
+1 f
+1720(Replacement)X
+2159(is)X
+2232(usually)X
+2483(LRU)X
+2663(which)X
+2879(may)X
+3037(be)X
+3133(suboptimal)X
+3508(for)X
+3622(databases.)X
+10 f
+1672 4843(g)N
+1 f
+1720(There)X
+1928(is)X
+2001(no)X
+2101(way)X
+2255(to)X
+2337(guarantee)X
+2670(write)X
+2855(ordering.)X
+10 f
+863 4853(i)N
+870(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+903 4943(File)N
+1047(System)X
+10 f
+1672(g)X
+1 f
+1720(Allocation)X
+2078(is)X
+2151(done)X
+2327(in)X
+2409(small)X
+2602(blocks)X
+2831(\(usually)X
+3109(4K)X
+3227(or)X
+3314(8K\).)X
+10 f
+1672 5033(g)N
+1 f
+1720(Logical)X
+1985(organization)X
+2406(of)X
+2493(\256les)X
+2646(is)X
+2719(redundantly)X
+3122(expressed.)X
+10 f
+863 5043(i)N
+870(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+903 5133(Process)N
+1168(Structure)X
+10 f
+1672(g)X
+1 f
+1720(Context)X
+1993(switching)X
+2324(and)X
+2460(message)X
+2752(passing)X
+3012(are)X
+3131(too)X
+3253(slow.)X
+10 f
+1672 5223(g)N
+1 f
+1720(A)X
+1798(process)X
+2059(may)X
+2217(be)X
+2313(descheduled)X
+2730(while)X
+2928(holding)X
+3192(a)X
+3248(semaphore.)X
+10 f
+863 5233(i)N
+870(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+863(c)X
+5193(c)Y
+5113(c)Y
+5033(c)Y
+4953(c)Y
+4873(c)Y
+4793(c)Y
+4713(c)Y
+4633(c)Y
+4553(c)Y
+4473(c)Y
+3990 5233(c)N
+5193(c)Y
+5113(c)Y
+5033(c)Y
+4953(c)Y
+4873(c)Y
+4793(c)Y
+4713(c)Y
+4633(c)Y
+4553(c)Y
+4473(c)Y
+3 f
+1156 5446(Table)N
+1371(One:)X
+1560(Shortcomings)X
+2051(of)X
+2138(UNIX)X
+2363(transaction)X
+2770(support)X
+3056(cited)X
+3241(in)X
+3327([STON81].)X
+
+3 p
+%%Page: 3 3
+10 s 10 xH 0 xS 3 f
+1 f
+755 630(The)N
+901(transaction)X
+1274(architecture)X
+1675(presented)X
+2004(in)X
+2087([YOUN91])X
+2474(is)X
+2548(very)X
+2712(similar)X
+2955(to)X
+3038(that)X
+3179(implemented)X
+3618(in)X
+3701(the)X
+3820(LIBTP.)X
+4103(While)X
+555 720([YOUN91])N
+947(presents)X
+1236(a)X
+1298(model)X
+1524(for)X
+1644(providing)X
+1981(transaction)X
+2359(services,)X
+2663(this)X
+2803(paper)X
+3007(focuses)X
+3273(on)X
+3378(the)X
+3501(implementation)X
+4028(and)X
+4169(per-)X
+555 810(formance)N
+881(of)X
+970(a)X
+1028(particular)X
+1358(system.)X
+1642(In)X
+1731(addition,)X
+2034(we)X
+2149(provide)X
+2415(detailed)X
+2690(comparisons)X
+3116(with)X
+3279(alternative)X
+3639(solutions:)X
+3970(traditional)X
+555 900(UNIX)N
+776(services)X
+1055(and)X
+1191(commercial)X
+1590(database)X
+1887(management)X
+2317(systems.)X
+3 f
+555 1086(3.)N
+655(Architecture)X
+1 f
+755 1209(The)N
+906(library)X
+1146(is)X
+1224(designed)X
+1534(to)X
+1621(provide)X
+1891(well)X
+2054(de\256ned)X
+2315(interfaces)X
+2653(to)X
+2740(the)X
+2863(services)X
+3147(required)X
+3440(for)X
+3559(transaction)X
+3936(processing.)X
+555 1299(These)N
+777(services)X
+1066(are)X
+1195(recovery,)X
+1527(concurrency)X
+1955(control,)X
+2232(and)X
+2378(the)X
+2506(management)X
+2946(of)X
+3043(shared)X
+3283(data.)X
+3487(First)X
+3663(we)X
+3787(will)X
+3941(discuss)X
+4201(the)X
+555 1389(design)N
+795(tradeoffs)X
+1112(in)X
+1205(the)X
+1334(selection)X
+1650(of)X
+1748(recovery,)X
+2081(concurrency)X
+2510(control,)X
+2787(and)X
+2933(buffer)X
+3160(management)X
+3600(implementations,)X
+4183(and)X
+555 1479(then)N
+713(we)X
+827(will)X
+971(present)X
+1223(the)X
+1341(overall)X
+1584(library)X
+1818(architecture)X
+2218(and)X
+2354(module)X
+2614(descriptions.)X
+3 f
+555 1665(3.1.)N
+715(Design)X
+966(Tradeoffs)X
+1 f
+3 f
+555 1851(3.1.1.)N
+775(Crash)X
+1004(Recovery)X
+1 f
+755 1974(The)N
+909(recovery)X
+1220(protocol)X
+1516(is)X
+1598(responsible)X
+1992(for)X
+2115(providing)X
+2455(the)X
+2582(transaction)X
+2963(semantics)X
+3308(discussed)X
+3644(earlier.)X
+3919(There)X
+4136(are)X
+4263(a)X
+555 2064(wide)N
+739(range)X
+946(of)X
+1041(recovery)X
+1351(protocols)X
+1677(available)X
+1995([HAER83],)X
+2395(but)X
+2525(we)X
+2647(can)X
+2786(crudely)X
+3054(divide)X
+3281(them)X
+3468(into)X
+3619(two)X
+3766(main)X
+3953(categories.)X
+555 2154(The)N
+706(\256rst)X
+856(category)X
+1159(records)X
+1422(all)X
+1528(modi\256cations)X
+1989(to)X
+2077(the)X
+2201(database)X
+2504(in)X
+2592(a)X
+2653(separate)X
+2942(\256le,)X
+3089(and)X
+3230(uses)X
+3393(this)X
+3533(\256le)X
+3660(\(log\))X
+3841(to)X
+3928(back)X
+4105(out)X
+4232(or)X
+555 2244(reapply)N
+825(these)X
+1019(modi\256cations)X
+1483(if)X
+1561(a)X
+1626(transaction)X
+2007(aborts)X
+2232(or)X
+2328(the)X
+2455(system)X
+2706(crashes.)X
+3012(We)X
+3153(call)X
+3298(this)X
+3442(set)X
+3560(the)X
+3 f
+3687(logging)X
+3963(protocols)X
+1 f
+4279(.)X
+555 2334(The)N
+703(second)X
+949(category)X
+1249(avoids)X
+1481(the)X
+1602(use)X
+1732(of)X
+1822(a)X
+1881(log)X
+2006(by)X
+2109(carefully)X
+2418(controlling)X
+2792(when)X
+2989(data)X
+3146(are)X
+3268(written)X
+3518(to)X
+3603(disk.)X
+3799(We)X
+3934(call)X
+4073(this)X
+4210(set)X
+555 2424(the)N
+3 f
+673(non-logging)X
+1096(protocols)X
+1 f
+1412(.)X
+755 2547(Non-logging)N
+1185(protocols)X
+1504(hold)X
+1666(dirty)X
+1837(buffers)X
+2085(in)X
+2167(main)X
+2347(memory)X
+2634(or)X
+2721(temporary)X
+3071(\256les)X
+3224(until)X
+3390(commit)X
+3654(and)X
+3790(then)X
+3948(force)X
+4134(these)X
+555 2637(pages)N
+769(to)X
+862(disk)X
+1026(at)X
+1115(transaction)X
+1498(commit.)X
+1813(While)X
+2040(we)X
+2165(can)X
+2308(use)X
+2446(temporary)X
+2807(\256les)X
+2971(to)X
+3064(hold)X
+3237(dirty)X
+3418(pages)X
+3631(that)X
+3781(may)X
+3949(need)X
+4131(to)X
+4223(be)X
+555 2727(evicted)N
+810(from)X
+988(memory)X
+1277(during)X
+1508(a)X
+1566(long-running)X
+2006(transaction,)X
+2400(the)X
+2520(only)X
+2684(user-level)X
+3023(mechanism)X
+3410(to)X
+3494(force)X
+3682(pages)X
+3887(to)X
+3971(disk)X
+4126(is)X
+4201(the)X
+3 f
+555 2817(fsync)N
+1 f
+733(\(2\))X
+850(system)X
+1095(call.)X
+1274(Unfortunately,)X
+3 f
+1767(fsync)X
+1 f
+1945(\(2\))X
+2062(is)X
+2138(an)X
+2237(expensive)X
+2581(system)X
+2826(call)X
+2965(in)X
+3050(that)X
+3193(it)X
+3260(forces)X
+3480(all)X
+3583(pages)X
+3789(of)X
+3879(a)X
+3938(\256le)X
+4062(to)X
+4146(disk,)X
+555 2907(and)N
+691(transactions)X
+1094(that)X
+1234(manage)X
+1504(more)X
+1689(than)X
+1847(one)X
+1983(\256le)X
+2105(must)X
+2280(issue)X
+2460(one)X
+2596(call)X
+2732(per)X
+2855(\256le.)X
+755 3030(In)N
+853(addition,)X
+3 f
+1166(fsync)X
+1 f
+1344(\(2\))X
+1469(provides)X
+1776(no)X
+1887(way)X
+2051(to)X
+2143(control)X
+2400(the)X
+2528(order)X
+2728(in)X
+2820(which)X
+3046(dirty)X
+3227(pages)X
+3440(are)X
+3569(written)X
+3826(to)X
+3918(disk.)X
+4121(Since)X
+555 3120(non-logging)N
+976(protocols)X
+1304(must)X
+1489(sometimes)X
+1861(order)X
+2061(writes)X
+2287(carefully)X
+2603([SULL92],)X
+2987(they)X
+3155(are)X
+3284(dif\256cult)X
+3567(to)X
+3659(implement)X
+4030(on)X
+4139(Unix)X
+555 3210(systems.)N
+868(As)X
+977(a)X
+1033(result,)X
+1251(we)X
+1365(have)X
+1537(chosen)X
+1780(to)X
+1862(implement)X
+2224(a)X
+2280(logging)X
+2544(protocol.)X
+755 3333(Logging)N
+1050(protocols)X
+1372(may)X
+1534(be)X
+1634(categorized)X
+2029(based)X
+2236(on)X
+2340(how)X
+2502(information)X
+2904(is)X
+2981(logged)X
+3223(\(physically)X
+3602(or)X
+3692(logically\))X
+4022(and)X
+4161(how)X
+555 3423(much)N
+767(is)X
+854(logged)X
+1106(\(before)X
+1373(images,)X
+1654(after)X
+1836(images)X
+2097(or)X
+2198(both\).)X
+2441(In)X
+3 f
+2542(physical)X
+2855(logging)X
+1 f
+3103(,)X
+3157(images)X
+3417(of)X
+3517(complete)X
+3844(physical)X
+4144(units)X
+555 3513(\(pages)N
+786(or)X
+874(buffers\))X
+1150(are)X
+1270(recorded,)X
+1593(while)X
+1792(in)X
+3 f
+1875(logical)X
+2118(logging)X
+1 f
+2387(a)X
+2444(description)X
+2820(of)X
+2907(the)X
+3025(operation)X
+3348(is)X
+3421(recorded.)X
+3763(Therefore,)X
+4121(while)X
+555 3603(we)N
+675(may)X
+839(record)X
+1071(entire)X
+1280(pages)X
+1489(in)X
+1577(a)X
+1639(physical)X
+1932(log,)X
+2080(we)X
+2200(need)X
+2378(only)X
+2546(record)X
+2777(the)X
+2900(records)X
+3162(being)X
+3365(modi\256ed)X
+3674(in)X
+3761(a)X
+3822(logical)X
+4065(log.)X
+4232(In)X
+555 3693(fact,)N
+718(physical)X
+1006(logging)X
+1271(can)X
+1404(be)X
+1501(thought)X
+1766(of)X
+1854(as)X
+1942(a)X
+1999(special)X
+2243(case)X
+2403(of)X
+2491(logical)X
+2730(logging,)X
+3015(since)X
+3201(the)X
+3320 0.3125(``records'')AX
+3686(that)X
+3827(we)X
+3942(log)X
+4065(in)X
+4148(logi-)X
+555 3783(cal)N
+673(logging)X
+941(might)X
+1151(be)X
+1251(physical)X
+1542(pages.)X
+1789(Since)X
+1991(logical)X
+2233(logging)X
+2501(is)X
+2578(both)X
+2743(more)X
+2931(space-ef\256cient)X
+3423(and)X
+3562(more)X
+3750(general,)X
+4030(we)X
+4147(have)X
+555 3873(chosen)N
+798(it)X
+862(for)X
+976(our)X
+1103(logging)X
+1367(protocol.)X
+755 3996(In)N
+3 f
+843(before-image)X
+1315(logging)X
+1 f
+1563(,)X
+1604(we)X
+1719(log)X
+1842(a)X
+1899(copy)X
+2076(of)X
+2164(the)X
+2283(data)X
+2438(before)X
+2665(the)X
+2784(update,)X
+3039(while)X
+3238(in)X
+3 f
+3321(after-image)X
+3739(logging)X
+1 f
+3987(,)X
+4027(we)X
+4141(log)X
+4263(a)X
+555 4086(copy)N
+740(of)X
+836(the)X
+963(data)X
+1126(after)X
+1303(the)X
+1429(update.)X
+1711(If)X
+1793(we)X
+1915(log)X
+2045(only)X
+2215(before-images,)X
+2723(then)X
+2889(there)X
+3078(is)X
+3159(suf\256cient)X
+3485(information)X
+3891(in)X
+3981(the)X
+4107(log)X
+4237(to)X
+555 4176(allow)N
+761(us)X
+860(to)X
+3 f
+950(undo)X
+1 f
+1150(the)X
+1276(transaction)X
+1656(\(go)X
+1791(back)X
+1971(to)X
+2061(the)X
+2187(state)X
+2361(represented)X
+2759(by)X
+2866(the)X
+2991(before-image\).)X
+3514(However,)X
+3876(if)X
+3952(the)X
+4077(system)X
+555 4266(crashes)N
+814(and)X
+952(a)X
+1010(committed)X
+1374(transaction's)X
+1806(changes)X
+2087(have)X
+2261(not)X
+2385(reached)X
+2658(the)X
+2778(disk,)X
+2953(we)X
+3068(have)X
+3241(no)X
+3342(means)X
+3568(to)X
+3 f
+3651(redo)X
+1 f
+3828(the)X
+3947(transaction)X
+555 4356(\(reapply)N
+849(the)X
+973(updates\).)X
+1311(Therefore,)X
+1675(logging)X
+1945(only)X
+2113(before-images)X
+2599(necessitates)X
+3004(forcing)X
+3262(dirty)X
+3439(pages)X
+3648(at)X
+3732(commit)X
+4002(time.)X
+4210(As)X
+555 4446(mentioned)N
+913(above,)X
+1145(forcing)X
+1397(pages)X
+1600(at)X
+1678(commit)X
+1942(is)X
+2015(considered)X
+2383(too)X
+2505(costly.)X
+755 4569(If)N
+834(we)X
+953(log)X
+1080(only)X
+1247(after-images,)X
+1694(then)X
+1857(there)X
+2043(is)X
+2121(suf\256cient)X
+2444(information)X
+2847(in)X
+2934(the)X
+3057(log)X
+3184(to)X
+3271(allow)X
+3474(us)X
+3570(to)X
+3657(redo)X
+3825(the)X
+3947(transaction)X
+555 4659(\(go)N
+687(forward)X
+967(to)X
+1054(the)X
+1177(state)X
+1348(represented)X
+1743(by)X
+1847(the)X
+1969(after-image\),)X
+2411(but)X
+2537(we)X
+2655(do)X
+2759(not)X
+2885(have)X
+3061(the)X
+3183(information)X
+3585(required)X
+3877(to)X
+3963(undo)X
+4147(tran-)X
+555 4749(sactions)N
+845(which)X
+1073(aborted)X
+1346(after)X
+1526(dirty)X
+1709(pages)X
+1924(were)X
+2113(written)X
+2372(to)X
+2466(disk.)X
+2670(Therefore,)X
+3039(logging)X
+3314(only)X
+3487(after-images)X
+3920(necessitates)X
+555 4839(holding)N
+819(all)X
+919(dirty)X
+1090(buffers)X
+1338(in)X
+1420(main)X
+1600(memory)X
+1887(until)X
+2053(commit)X
+2317(or)X
+2404(writing)X
+2655(them)X
+2835(to)X
+2917(a)X
+2973(temporary)X
+3323(\256le.)X
+755 4962(Since)N
+956(neither)X
+1202(constraint)X
+1541(\(forcing)X
+1823(pages)X
+2029(on)X
+2132(commit)X
+2399(or)X
+2489(buffering)X
+2811(pages)X
+3016(until)X
+3184(commit\))X
+3477(was)X
+3624(feasible,)X
+3916(we)X
+4032(chose)X
+4237(to)X
+555 5052(log)N
+683(both)X
+851(before)X
+1083(and)X
+1225(after)X
+1399(images.)X
+1672(The)X
+1823(only)X
+1991(remaining)X
+2342(consideration)X
+2800(is)X
+2879(when)X
+3079(changes)X
+3363(get)X
+3486(written)X
+3738(to)X
+3825(disk.)X
+4023(Changes)X
+555 5142(affect)N
+764(both)X
+931(data)X
+1090(pages)X
+1298(and)X
+1438(the)X
+1560(log.)X
+1726(If)X
+1804(the)X
+1926(changed)X
+2218(data)X
+2376(page)X
+2552(is)X
+2629(written)X
+2880(before)X
+3110(the)X
+3232(log)X
+3358(page,)X
+3554(and)X
+3694(the)X
+3816(system)X
+4062(crashes)X
+555 5232(before)N
+787(the)X
+911(log)X
+1039(page)X
+1217(is)X
+1296(written,)X
+1569(the)X
+1693(log)X
+1820(will)X
+1969(contain)X
+2230(insuf\256cient)X
+2615(information)X
+3018(to)X
+3105(undo)X
+3290(the)X
+3413(change.)X
+3706(This)X
+3873(violates)X
+4147(tran-)X
+555 5322(saction)N
+803(semantics,)X
+1160(since)X
+1346(some)X
+1536(changed)X
+1825(data)X
+1980(pages)X
+2184(may)X
+2343(not)X
+2466(have)X
+2638(been)X
+2810(written,)X
+3077(and)X
+3213(the)X
+3331(database)X
+3628(cannot)X
+3862(be)X
+3958(restored)X
+4237(to)X
+555 5412(its)N
+650(pre-transaction)X
+1152(state.)X
+755 5535(The)N
+914(log)X
+1050(record)X
+1290(describing)X
+1658(an)X
+1768(update)X
+2016(must)X
+2205(be)X
+2315(written)X
+2576(to)X
+2672(stable)X
+2893(storage)X
+3159(before)X
+3398(the)X
+3529(modi\256ed)X
+3846(page.)X
+4071(This)X
+4246(is)X
+3 f
+555 5625(write-ahead)N
+992(logging)X
+1 f
+1240(.)X
+1307(If)X
+1388(log)X
+1517(records)X
+1781(are)X
+1907(safely)X
+2126(written)X
+2380(to)X
+2469(disk,)X
+2649(data)X
+2810(pages)X
+3020(may)X
+3185(be)X
+3288(written)X
+3542(at)X
+3627(any)X
+3770(time)X
+3939(afterwards.)X
+555 5715(This)N
+721(means)X
+950(that)X
+1094(the)X
+1216(only)X
+1382(\256le)X
+1508(that)X
+1652(ever)X
+1815(needs)X
+2022(to)X
+2108(be)X
+2208(forced)X
+2438(to)X
+2524(disk)X
+2681(is)X
+2758(the)X
+2880(log.)X
+3046(Since)X
+3248(the)X
+3370(log)X
+3495(is)X
+3571(append-only,)X
+4015(modi\256ed)X
+
+4 p
+%%Page: 4 4
+10 s 10 xH 0 xS 1 f
+3 f
+1 f
+555 630(pages)N
+760(always)X
+1005(appear)X
+1242(at)X
+1322(the)X
+1442(end)X
+1580(and)X
+1718(may)X
+1878(be)X
+1976(written)X
+2224(to)X
+2307(disk)X
+2461(ef\256ciently)X
+2807(in)X
+2890(any)X
+3027(\256le)X
+3150(system)X
+3393(that)X
+3534(favors)X
+3756(sequential)X
+4102(order-)X
+555 720(ing)N
+677(\()X
+2 f
+704(e.g.)X
+1 f
+820(,)X
+860(FFS,)X
+1032(log-structured)X
+1502(\256le)X
+1624(system,)X
+1886(or)X
+1973(an)X
+2069(extent-based)X
+2495(system\).)X
+3 f
+555 906(3.1.2.)N
+775(Concurrency)X
+1245(Control)X
+1 f
+755 1029(The)N
+918(concurrency)X
+1354(control)X
+1619(protocol)X
+1923(is)X
+2013(responsible)X
+2415(for)X
+2546(maintaining)X
+2965(consistency)X
+3376(in)X
+3475(the)X
+3610(presence)X
+3929(of)X
+4033(multiple)X
+555 1119(accesses.)N
+897(There)X
+1114(are)X
+1242(several)X
+1499(alternative)X
+1867(solutions)X
+2183(such)X
+2358(as)X
+2453(locking,)X
+2741(optimistic)X
+3088(concurrency)X
+3514(control)X
+3769([KUNG81],)X
+4183(and)X
+555 1209(timestamp)N
+912(ordering)X
+1208([BERN80].)X
+1619(Since)X
+1821(optimistic)X
+2164(methods)X
+2459(and)X
+2599(timestamp)X
+2956(ordering)X
+3252(are)X
+3374(generally)X
+3696(more)X
+3884(complex)X
+4183(and)X
+555 1299(restrict)N
+804(concurrency)X
+1228(without)X
+1498(eliminating)X
+1888(starvation)X
+2230(or)X
+2323(deadlocks,)X
+2690(we)X
+2810(chose)X
+3018(two-phase)X
+3373(locking)X
+3638(\(2PL\).)X
+3890(Strict)X
+4088(2PL)X
+4246(is)X
+555 1389(suboptimal)N
+935(for)X
+1054(certain)X
+1297(data)X
+1455(structures)X
+1791(such)X
+1962(as)X
+2053(B-trees)X
+2309(because)X
+2588(it)X
+2656(can)X
+2792(limit)X
+2966(concurrency,)X
+3408(so)X
+3503(we)X
+3621(use)X
+3752(a)X
+3812(special)X
+4059(locking)X
+555 1479(protocol)N
+842(based)X
+1045(on)X
+1145(one)X
+1281(described)X
+1609(in)X
+1691([LEHM81].)X
+755 1602(The)N
+901(B-tree)X
+1123(locking)X
+1384(protocol)X
+1672(we)X
+1787(implemented)X
+2226(releases)X
+2502(locks)X
+2691(at)X
+2769(internal)X
+3034(nodes)X
+3241(in)X
+3323(the)X
+3441(tree)X
+3582(as)X
+3669(it)X
+3733(descends.)X
+4083(A)X
+4161(lock)X
+555 1692(on)N
+658(an)X
+757(internal)X
+1025(page)X
+1200(is)X
+1276(always)X
+1522(released)X
+1808(before)X
+2036(a)X
+2094(lock)X
+2254(on)X
+2356(its)X
+2453(child)X
+2635(is)X
+2710(obtained)X
+3008(\(that)X
+3177(is,)X
+3272(locks)X
+3463(are)X
+3584(not)X
+3 f
+3708(coupled)X
+1 f
+3996([BAY77])X
+555 1782(during)N
+786(descent\).)X
+1116(When)X
+1330(a)X
+1388(leaf)X
+1531(\(or)X
+1647(internal\))X
+1941(page)X
+2115(is)X
+2190(split,)X
+2369(a)X
+2427(write)X
+2614(lock)X
+2774(is)X
+2849(acquired)X
+3148(on)X
+3250(the)X
+3370(parent)X
+3593(before)X
+3821(the)X
+3941(lock)X
+4100(on)X
+4201(the)X
+555 1872(just-split)N
+855(page)X
+1028(is)X
+1102(released)X
+1387(\(locks)X
+1604(are)X
+3 f
+1724(coupled)X
+1 f
+2011(during)X
+2241(ascent\).)X
+2530(Write)X
+2734(locks)X
+2924(on)X
+3025(internal)X
+3291(pages)X
+3495(are)X
+3615(released)X
+3899(immediately)X
+555 1962(after)N
+723(the)X
+841(page)X
+1013(is)X
+1086(updated,)X
+1380(but)X
+1502(locks)X
+1691(on)X
+1791(leaf)X
+1932(pages)X
+2135(are)X
+2254(held)X
+2412(until)X
+2578(the)X
+2696(end)X
+2832(of)X
+2919(the)X
+3037(transaction.)X
+755 2085(Since)N
+964(locks)X
+1164(are)X
+1294(released)X
+1589(during)X
+1828(descent,)X
+2119(the)X
+2247(structure)X
+2558(of)X
+2655(the)X
+2783(tree)X
+2934(may)X
+3102(change)X
+3360(above)X
+3582(a)X
+3648(node)X
+3834(being)X
+4042(used)X
+4219(by)X
+555 2175(some)N
+752(process.)X
+1061(If)X
+1143(that)X
+1291(process)X
+1560(must)X
+1743(later)X
+1914(ascend)X
+2161(the)X
+2287(tree)X
+2435(because)X
+2717(of)X
+2811(a)X
+2874(page)X
+3053(split,)X
+3237(any)X
+3380(such)X
+3554(change)X
+3809(must)X
+3991(not)X
+4120(cause)X
+555 2265(confusion.)N
+938(We)X
+1077(use)X
+1211(the)X
+1336(technique)X
+1675(described)X
+2010(in)X
+2099([LEHM81])X
+2487(which)X
+2710(exploits)X
+2989(the)X
+3113(ordering)X
+3411(of)X
+3504(data)X
+3664(on)X
+3770(a)X
+3832(B-tree)X
+4059(page)X
+4237(to)X
+555 2355(guarantee)N
+888(that)X
+1028(no)X
+1128(process)X
+1389(ever)X
+1548(gets)X
+1697(lost)X
+1832(as)X
+1919(a)X
+1975(result)X
+2173(of)X
+2260(internal)X
+2525(page)X
+2697(updates)X
+2962(made)X
+3156(by)X
+3256(other)X
+3441(processes.)X
+755 2478(If)N
+836(a)X
+899(transaction)X
+1278(that)X
+1425(updates)X
+1697(a)X
+1760(B-tree)X
+1988(aborts,)X
+2231(the)X
+2356(user-visible)X
+2757(changes)X
+3043(to)X
+3131(the)X
+3255(tree)X
+3402(must)X
+3583(be)X
+3685(rolled)X
+3898(back.)X
+4116(How-)X
+555 2568(ever,)N
+735(changes)X
+1015(to)X
+1097(the)X
+1215(internal)X
+1480(nodes)X
+1687(of)X
+1774(the)X
+1892(tree)X
+2033(need)X
+2205(not)X
+2327(be)X
+2423(rolled)X
+2630(back,)X
+2822(since)X
+3007(these)X
+3192(pages)X
+3395(contain)X
+3651(no)X
+3751(user-visible)X
+4145(data.)X
+555 2658(When)N
+771(rolling)X
+1008(back)X
+1184(a)X
+1244(transaction,)X
+1640(we)X
+1758(roll)X
+1893(back)X
+2069(all)X
+2173(leaf)X
+2318(page)X
+2494(updates,)X
+2783(but)X
+2909(no)X
+3013(internal)X
+3281(insertions)X
+3615(or)X
+3705(page)X
+3880(splits.)X
+4111(In)X
+4201(the)X
+555 2748(worst)N
+759(case,)X
+944(this)X
+1085(will)X
+1235(leave)X
+1431(a)X
+1493(leaf)X
+1640(page)X
+1818(less)X
+1964(than)X
+2128(half)X
+2279(full.)X
+2456(This)X
+2624(may)X
+2788(cause)X
+2993(poor)X
+3166(space)X
+3371(utilization,)X
+3741(but)X
+3869(does)X
+4042(not)X
+4170(lose)X
+555 2838(user)N
+709(data.)X
+755 2961(Holding)N
+1038(locks)X
+1228(on)X
+1329(leaf)X
+1471(pages)X
+1675(until)X
+1842(transaction)X
+2215(commit)X
+2480(guarantees)X
+2845(that)X
+2986(no)X
+3087(other)X
+3273(process)X
+3535(can)X
+3668(insert)X
+3866(or)X
+3953(delete)X
+4165(data)X
+555 3051(that)N
+711(has)X
+854(been)X
+1042(touched)X
+1332(by)X
+1448(this)X
+1598(process.)X
+1914(Rolling)X
+2188(back)X
+2375(insertions)X
+2721(and)X
+2872(deletions)X
+3196(on)X
+3311(leaf)X
+3467(pages)X
+3685(guarantees)X
+4064(that)X
+4219(no)X
+555 3141(aborted)N
+819(updates)X
+1087(are)X
+1209(ever)X
+1371(visible)X
+1607(to)X
+1692(other)X
+1880(transactions.)X
+2326(Leaving)X
+2612(page)X
+2787(splits)X
+2978(intact)X
+3179(permits)X
+3442(us)X
+3536(to)X
+3621(release)X
+3867(internal)X
+4134(write)X
+555 3231(locks)N
+744(early.)X
+965(Thus)X
+1145(transaction)X
+1517(semantics)X
+1853(are)X
+1972(preserved,)X
+2325(and)X
+2461(locks)X
+2650(are)X
+2769(held)X
+2927(for)X
+3041(shorter)X
+3284(periods.)X
+755 3354(The)N
+901(extra)X
+1083(complexity)X
+1464(introduced)X
+1828(by)X
+1929(this)X
+2065(locking)X
+2326(protocol)X
+2614(appears)X
+2881(substantial,)X
+3264(but)X
+3387(it)X
+3452(is)X
+3525(important)X
+3856(for)X
+3970(multi-user)X
+555 3444(execution.)N
+950(The)X
+1118(bene\256ts)X
+1410(of)X
+1520(non-two-phase)X
+2040(locking)X
+2323(on)X
+2446(B-trees)X
+2721(are)X
+2863(well)X
+3044(established)X
+3443(in)X
+3548(the)X
+3689(database)X
+4009(literature)X
+555 3534([BAY77],)N
+899([LEHM81].)X
+1320(If)X
+1394(a)X
+1450(process)X
+1711(held)X
+1869(locks)X
+2058(until)X
+2224(it)X
+2288(committed,)X
+2670(then)X
+2828(a)X
+2884(long-running)X
+3322(update)X
+3556(could)X
+3754(lock)X
+3912(out)X
+4034(all)X
+4134(other)X
+555 3624(transactions)N
+967(by)X
+1076(preventing)X
+1448(any)X
+1593(other)X
+1787(process)X
+2057(from)X
+2241(locking)X
+2509(the)X
+2635(root)X
+2792(page)X
+2972(of)X
+3067(the)X
+3193(tree.)X
+3382(The)X
+3535(B-tree)X
+3764(locking)X
+4032(protocol)X
+555 3714(described)N
+884(above)X
+1096(guarantees)X
+1460(that)X
+1600(locks)X
+1789(on)X
+1889(internal)X
+2154(pages)X
+2357(are)X
+2476(held)X
+2634(for)X
+2748(extremely)X
+3089(short)X
+3269(periods,)X
+3545(thereby)X
+3806(increasing)X
+4156(con-)X
+555 3804(currency.)N
+3 f
+555 3990(3.1.3.)N
+775(Management)X
+1245(of)X
+1332(Shared)X
+1596(Data)X
+1 f
+755 4113(Database)N
+1075(systems)X
+1353(permit)X
+1587(many)X
+1790(users)X
+1980(to)X
+2067(examine)X
+2364(and)X
+2505(update)X
+2744(the)X
+2866(same)X
+3055(data)X
+3213(concurrently.)X
+3683(In)X
+3774(order)X
+3968(to)X
+4054(provide)X
+555 4203(this)N
+702(concurrent)X
+1078(access)X
+1316(and)X
+1464(enforce)X
+1738(the)X
+1868(write-ahead)X
+2280(logging)X
+2556(protocol)X
+2855(described)X
+3195(in)X
+3289(section)X
+3548(3.1.1,)X
+3759(we)X
+3884(use)X
+4022(a)X
+4089(shared)X
+555 4293(memory)N
+848(buffer)X
+1071(manager.)X
+1414(Not)X
+1559(only)X
+1726(does)X
+1898(this)X
+2038(provide)X
+2308(the)X
+2431(guarantees)X
+2800(we)X
+2919(require,)X
+3192(but)X
+3319(a)X
+3380(user-level)X
+3722(buffer)X
+3944(manager)X
+4246(is)X
+555 4383(frequently)N
+916(faster)X
+1126(than)X
+1295(using)X
+1498(the)X
+1626(\256le)X
+1758(system)X
+2010(buffer)X
+2237(cache.)X
+2491(Reads)X
+2717(or)X
+2814(writes)X
+3040(involving)X
+3376(the)X
+3504(\256le)X
+3636(system)X
+3888(buffer)X
+4115(cache)X
+555 4473(often)N
+746(require)X
+1000(copying)X
+1284(data)X
+1444(between)X
+1738(user)X
+1898(and)X
+2040(kernel)X
+2266(space)X
+2470(while)X
+2673(a)X
+2734(user-level)X
+3076(buffer)X
+3298(manager)X
+3600(can)X
+3737(return)X
+3954(pointers)X
+4237(to)X
+555 4563(data)N
+709(pages)X
+912(directly.)X
+1217(Additionally,)X
+1661(if)X
+1730(more)X
+1915(than)X
+2073(one)X
+2209(process)X
+2470(uses)X
+2628(the)X
+2746(same)X
+2931(page,)X
+3123(then)X
+3281(fewer)X
+3485(copies)X
+3710(may)X
+3868(be)X
+3964(required.)X
+3 f
+555 4749(3.2.)N
+715(Module)X
+997(Architecture)X
+1 f
+755 4872(The)N
+913(preceding)X
+1262(sections)X
+1552(described)X
+1892(modules)X
+2195(for)X
+2321(managing)X
+2669(the)X
+2799(transaction)X
+3183(log,)X
+3337(locks,)X
+3558(and)X
+3706(a)X
+3774(cache)X
+3990(of)X
+4089(shared)X
+555 4962(buffers.)N
+847(In)X
+938(addition,)X
+1244(we)X
+1362(need)X
+1538(to)X
+1624(provide)X
+1893(functionality)X
+2326(for)X
+2444(transaction)X
+2 f
+2819(begin)X
+1 f
+2997(,)X
+2 f
+3040(commit)X
+1 f
+3276(,)X
+3319(and)X
+2 f
+3458(abort)X
+1 f
+3654(processing,)X
+4040(necessi-)X
+555 5052(tating)N
+769(a)X
+837(transaction)X
+1221(manager.)X
+1570(In)X
+1669(order)X
+1871(to)X
+1965(arbitrate)X
+2265(concurrent)X
+2641(access)X
+2879(to)X
+2973(locks)X
+3173(and)X
+3320(buffers,)X
+3599(we)X
+3724(include)X
+3991(a)X
+4058(process)X
+555 5142(management)N
+995(module)X
+1264(which)X
+1489(manages)X
+1799(a)X
+1864(collection)X
+2209(of)X
+2305(semaphores)X
+2713(used)X
+2889(to)X
+2980(block)X
+3187(and)X
+3332(release)X
+3585(processes.)X
+3962(Finally,)X
+4237(in)X
+555 5232(order)N
+752(to)X
+841(provide)X
+1113(a)X
+1176(simple,)X
+1436(standard)X
+1735(interface)X
+2044(we)X
+2165(have)X
+2344(modi\256ed)X
+2655(the)X
+2780(database)X
+3084(access)X
+3317(routines)X
+3602(\()X
+3 f
+3629(db)X
+1 f
+3717(\(3\)\).)X
+3904(For)X
+4041(the)X
+4165(pur-)X
+555 5322(poses)N
+758(of)X
+850(this)X
+990(paper)X
+1194(we)X
+1313(call)X
+1453(the)X
+1575(modi\256ed)X
+1883(package)X
+2171(the)X
+3 f
+2293(Record)X
+2567(Manager)X
+1 f
+2879(.)X
+2943(Figure)X
+3176(one)X
+3316(shows)X
+3540(the)X
+3662(main)X
+3846(interfaces)X
+4183(and)X
+555 5412(architecture)N
+955(of)X
+1042(LIBTP.)X
+
+5 p
+%%Page: 5 5
+10 s 10 xH 0 xS 1 f
+3 f
+1 f
+11 s
+1851 1520(log_commit)N
+2764 2077(buf_unpin)N
+2764 1987(buf_get)N
+3633 1408(buf_unpin)N
+3633 1319(buf_pin)N
+3633 1230(buf_get)N
+3 f
+17 s
+1163 960(Txn)N
+1430(M)X
+1559(anager)X
+2582(Record)X
+3040(M)X
+3169(anager)X
+1 Dt
+2363 726 MXY
+0 355 Dl
+1426 0 Dl
+0 -355 Dl
+-1426 0 Dl
+3255 1616 MXY
+0 535 Dl
+534 0 Dl
+0 -535 Dl
+-534 0 Dl
+2185 MX
+0 535 Dl
+535 0 Dl
+0 -535 Dl
+-535 0 Dl
+1116 MX
+0 535 Dl
+534 0 Dl
+0 -535 Dl
+-534 0 Dl
+726 MY
+0 355 Dl
+891 0 Dl
+0 -355 Dl
+-891 0 Dl
+1 f
+11 s
+2207 1297(lock)N
+2564 1386(log)N
+865(unlock_all)X
+1851 1609(log_unroll)N
+1650 2508 MXY
+0 178 Dl
+1605 0 Dl
+0 -178 Dl
+-1605 0 Dl
+1294 1616 MXY
+19 -30 Dl
+-19 11 Dl
+-20 -11 Dl
+20 30 Dl
+0 -535 Dl
+2319 2508 MXY
+-22 -30 Dl
+4 23 Dl
+-18 14 Dl
+36 -7 Dl
+-936 -357 Dl
+3277 2455(sleep_on)N
+1405 1616 MXY
+36 4 Dl
+-18 -13 Dl
+1 -22 Dl
+-19 31 Dl
+1070 -535 Dl
+2631 2508 MXY
+36 6 Dl
+-18 -14 Dl
+3 -22 Dl
+-21 30 Dl
+891 -357 Dl
+1426 2455(sleep_on)N
+3255 1884 MXY
+-31 -20 Dl
+11 20 Dl
+-11 19 Dl
+31 -19 Dl
+-535 0 Dl
+1554 2366(wake)N
+3277(wake)X
+2185 1884 MXY
+-31 -20 Dl
+12 20 Dl
+-12 19 Dl
+31 -19 Dl
+-356 0 Dl
+0 -803 Dl
+3 f
+17 s
+1236 1851(Lock)N
+1118 2030(M)N
+1247(anager)X
+2339 1851(Log)N
+2187 2030(M)N
+2316(anager)X
+3333 1851(Buffer)N
+3257 2030(M)N
+3386(anager)X
+3522 1616 MXY
+20 -30 Dl
+-20 11 Dl
+-20 -11 Dl
+20 30 Dl
+0 -535 Dl
+1950 2654(Process)N
+2424(M)X
+2553(anager)X
+2542 1616 MXY
+19 -30 Dl
+-19 11 Dl
+-20 -11 Dl
+20 30 Dl
+0 -535 Dl
+1 f
+11 s
+2207 1364(unlock)N
+2452 2508 MXY
+20 -31 Dl
+-20 11 Dl
+-19 -11 Dl
+19 31 Dl
+0 -357 Dl
+2497 2322(sleep_on)N
+2497 2233(wake)N
+3 Dt
+-1 Ds
+3 f
+10 s
+1790 2830(Figure)N
+2037(1:)X
+2144(Library)X
+2435(module)X
+2708(interfaces.)X
+1 f
+10 f
+555 3010(h)N
+579(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)X
+3 f
+555 3286(3.2.1.)N
+775(The)X
+928(Log)X
+1081(Manager)X
+1 f
+755 3409(The)N
+3 f
+907(Log)X
+1067(Manager)X
+1 f
+1406(enforces)X
+1706(the)X
+1831(write-ahead)X
+2238(logging)X
+2509(protocol.)X
+2843(Its)X
+2949(primitive)X
+3268(operations)X
+3628(are)X
+2 f
+3753(log)X
+1 f
+3855(,)X
+2 f
+3901(log_commit)X
+1 f
+4279(,)X
+2 f
+555 3499(log_read)N
+1 f
+844(,)X
+2 f
+889(log_roll)X
+1 f
+1171(and)X
+2 f
+1312(log_unroll)X
+1 f
+1649(.)X
+1714(The)X
+2 f
+1864(log)X
+1 f
+1991(call)X
+2132(performs)X
+2447(a)X
+2508(buffered)X
+2806(write)X
+2996(of)X
+3088(the)X
+3211(speci\256ed)X
+3520(log)X
+3646(record)X
+3876(and)X
+4016(returns)X
+4263(a)X
+555 3589(unique)N
+809(log)X
+947(sequence)X
+1278(number)X
+1559(\(LSN\).)X
+1840(This)X
+2017(LSN)X
+2203(may)X
+2376(then)X
+2549(be)X
+2660(used)X
+2842(to)X
+2939(retrieve)X
+3220(a)X
+3291(record)X
+3532(from)X
+3723(the)X
+3856(log)X
+3993(using)X
+4201(the)X
+2 f
+555 3679(log_read)N
+1 f
+865(call.)X
+1042(The)X
+2 f
+1188(log)X
+1 f
+1311(interface)X
+1614(knows)X
+1844(very)X
+2008(little)X
+2175(about)X
+2374(the)X
+2493(internal)X
+2759(format)X
+2993(of)X
+3080(the)X
+3198(log)X
+3320(records)X
+3577(it)X
+3641(receives.)X
+3965(Rather,)X
+4219(all)X
+555 3769(log)N
+681(records)X
+942(are)X
+1065 0.4028(referenced)AX
+1430(by)X
+1534(a)X
+1594(header)X
+1833(structure,)X
+2158(a)X
+2218(log)X
+2344(record)X
+2574(type,)X
+2756(and)X
+2896(a)X
+2956(character)X
+3276(buffer)X
+3497(containing)X
+3859(the)X
+3981(data)X
+4138(to)X
+4223(be)X
+555 3859(logged.)N
+834(The)X
+980(log)X
+1103(record)X
+1330(type)X
+1489(is)X
+1563(used)X
+1731(to)X
+1814(call)X
+1951(the)X
+2070(appropriate)X
+2457(redo)X
+2621(and)X
+2758(undo)X
+2939(routines)X
+3217(during)X
+2 f
+3446(abort)X
+1 f
+3639(and)X
+2 f
+3775(commit)X
+1 f
+4031(process-)X
+555 3949(ing.)N
+721(While)X
+941(we)X
+1059(have)X
+1235(used)X
+1406(the)X
+3 f
+1528(Log)X
+1684(Manager)X
+1 f
+2019(to)X
+2104(provide)X
+2372(before)X
+2601(and)X
+2740(after)X
+2911(image)X
+3130(logging,)X
+3417(it)X
+3484(may)X
+3645(also)X
+3797(be)X
+3896(used)X
+4066(for)X
+4183(any)X
+555 4039(of)N
+642(the)X
+760(logging)X
+1024(algorithms)X
+1386(discussed.)X
+755 4162(The)N
+2 f
+905(log_commit)X
+1 f
+1308(operation)X
+1636(behaves)X
+1920(exactly)X
+2177(like)X
+2322(the)X
+2 f
+2445(log)X
+1 f
+2572(operation)X
+2900(but)X
+3026(guarantees)X
+3394(that)X
+3538(the)X
+3660(log)X
+3786(has)X
+3917(been)X
+4093(forced)X
+555 4252(to)N
+643(disk)X
+802(before)X
+1034(returning.)X
+1394(A)X
+1478(discussion)X
+1837(of)X
+1930(our)X
+2063(commit)X
+2333(strategy)X
+2613(appears)X
+2884(in)X
+2971(the)X
+3094(implementation)X
+3621(section)X
+3873(\(section)X
+4152(4.2\).)X
+2 f
+555 4342(Log_unroll)N
+1 f
+935(reads)X
+1126(log)X
+1249(records)X
+1507(from)X
+1684(the)X
+1803(log,)X
+1946(following)X
+2278(backward)X
+2611(transaction)X
+2983(pointers)X
+3261(and)X
+3397(calling)X
+3635(the)X
+3753(appropriate)X
+4139(undo)X
+555 4432(routines)N
+839(to)X
+927(implement)X
+1295(transaction)X
+1673(abort.)X
+1904(In)X
+1997(a)X
+2059(similar)X
+2307(manner,)X
+2 f
+2594(log_roll)X
+1 f
+2877(reads)X
+3073(log)X
+3201(records)X
+3464(sequentially)X
+3877(forward,)X
+4178(cal-)X
+555 4522(ling)N
+699(the)X
+817(appropriate)X
+1203(redo)X
+1366(routines)X
+1644(to)X
+1726(recover)X
+1988(committed)X
+2350(transactions)X
+2753(after)X
+2921(a)X
+2977(system)X
+3219(crash.)X
+3 f
+555 4708(3.2.2.)N
+775(The)X
+928(Buffer)X
+1171(Manager)X
+1 f
+755 4831(The)N
+3 f
+912(Buffer)X
+1167(Manager)X
+1 f
+1511(uses)X
+1681(a)X
+1749(pool)X
+1923(of)X
+2022(shared)X
+2264(memory)X
+2563(to)X
+2657(provide)X
+2934(a)X
+3002(least-recently-used)X
+3641(\(LRU\))X
+3886(block)X
+4095(cache.)X
+555 4921(Although)N
+886(the)X
+1013(current)X
+1270(library)X
+1513(provides)X
+1818(an)X
+1923(LRU)X
+2112(cache,)X
+2345(it)X
+2418(would)X
+2647(be)X
+2752(simple)X
+2994(to)X
+3085(add)X
+3229(alternate)X
+3534(replacement)X
+3955(policies)X
+4232(as)X
+555 5011(suggested)N
+903(by)X
+1015([CHOU85])X
+1408(or)X
+1507(to)X
+1601(provide)X
+1878(multiple)X
+2176(buffer)X
+2405(pools)X
+2610(with)X
+2784(different)X
+3092(policies.)X
+3412(Transactions)X
+3853(request)X
+4116(pages)X
+555 5101(from)N
+736(the)X
+859(buffer)X
+1081(manager)X
+1383(and)X
+1524(keep)X
+1701(them)X
+3 f
+1886(pinned)X
+1 f
+2145(to)X
+2232(ensure)X
+2466(that)X
+2610(they)X
+2772(are)X
+2895(not)X
+3021(written)X
+3272(to)X
+3358(disk)X
+3515(while)X
+3717(they)X
+3879(are)X
+4002(in)X
+4088(a)X
+4148(logi-)X
+555 5191(cally)N
+732(inconsistent)X
+1135(state.)X
+1343(When)X
+1556(page)X
+1729(replacement)X
+2143(is)X
+2217(necessary,)X
+2571(the)X
+3 f
+2689(Buffer)X
+2932(Manager)X
+1 f
+3264(\256nds)X
+3439(an)X
+3535(unpinned)X
+3853(page)X
+4025(and)X
+4161(then)X
+555 5281(checks)N
+794(with)X
+956(the)X
+3 f
+1074(Log)X
+1227(Manager)X
+1 f
+1559(to)X
+1641(ensure)X
+1871(that)X
+2011(the)X
+2129(write-ahead)X
+2529(protocol)X
+2816(is)X
+2889(enforced.)X
+3 f
+555 5467(3.2.3.)N
+775(The)X
+928(Lock)X
+1121(Manager)X
+1 f
+755 5590(The)N
+3 f
+901(Lock)X
+1095(Manager)X
+1 f
+1428(supports)X
+1720(general)X
+1978(purpose)X
+2253(locking)X
+2514(\(single)X
+2753(writer,)X
+2986(multiple)X
+3273(readers\))X
+3553(which)X
+3769(is)X
+3842(currently)X
+4152(used)X
+555 5680(to)N
+638(provide)X
+904(two-phase)X
+1254(locking)X
+1514(and)X
+1650(high)X
+1812(concurrency)X
+2230(B-tree)X
+2451(locking.)X
+2751(However,)X
+3086(the)X
+3204(general)X
+3461(purpose)X
+3735(nature)X
+3956(of)X
+4043(the)X
+4161(lock)X
+
+6 p
+%%Page: 6 6
+10 s 10 xH 0 xS 1 f
+3 f
+1 f
+555 630(manager)N
+857(provides)X
+1158(the)X
+1281(ability)X
+1510(to)X
+1597(support)X
+1862(a)X
+1923(variety)X
+2171(of)X
+2263(locking)X
+2528(protocols.)X
+2890(Currently,)X
+3241(all)X
+3345(locks)X
+3538(are)X
+3661(issued)X
+3885(at)X
+3967(the)X
+4089(granu-)X
+555 720(larity)N
+747(of)X
+837(a)X
+896(page)X
+1071(\(the)X
+1219(size)X
+1367(of)X
+1457(a)X
+1516(buffer)X
+1736(in)X
+1821(the)X
+1942(buffer)X
+2161(pool\))X
+2352(which)X
+2570(is)X
+2645(identi\256ed)X
+2969(by)X
+3071(two)X
+3213(4-byte)X
+3440(integers)X
+3716(\(a)X
+3801(\256le)X
+3925(id)X
+4009(and)X
+4147(page)X
+555 810(number\).)N
+898(This)X
+1071(provides)X
+1378(the)X
+1507(necessary)X
+1851(information)X
+2259(to)X
+2351(extend)X
+2595(the)X
+3 f
+2723(Lock)X
+2926(Manager)X
+1 f
+3268(to)X
+3360(perform)X
+3649(hierarchical)X
+4059(locking)X
+555 900([GRAY76].)N
+982(The)X
+1133(current)X
+1387(implementation)X
+1915(does)X
+2088(not)X
+2216(support)X
+2482(locks)X
+2677(at)X
+2760(other)X
+2950(granularities)X
+3376(and)X
+3517(does)X
+3689(not)X
+3816(promote)X
+4108(locks;)X
+555 990(these)N
+740(are)X
+859(obvious)X
+1132(future)X
+1344(additions)X
+1657(to)X
+1739(the)X
+1857(system.)X
+755 1113(If)N
+831(an)X
+929(incoming)X
+1253(lock)X
+1413(request)X
+1667(cannot)X
+1903(be)X
+2001(granted,)X
+2284(the)X
+2404(requesting)X
+2760(process)X
+3023(is)X
+3098(queued)X
+3352(for)X
+3467(the)X
+3586(lock)X
+3745(and)X
+3882(descheduled.)X
+555 1203(When)N
+769(a)X
+827(lock)X
+987(is)X
+1062(released,)X
+1368(the)X
+1488(wait)X
+1647(queue)X
+1860(is)X
+1934(traversed)X
+2250(and)X
+2387(any)X
+2524(newly)X
+2741(compatible)X
+3118(locks)X
+3308(are)X
+3428(granted.)X
+3730(Locks)X
+3947(are)X
+4067(located)X
+555 1293(via)N
+680(a)X
+743(\256le)X
+872(and)X
+1015(page)X
+1194(hash)X
+1368(table)X
+1551(and)X
+1694(are)X
+1820(chained)X
+2097(both)X
+2266(by)X
+2373(object)X
+2595(and)X
+2737(by)X
+2843(transaction,)X
+3241(facilitating)X
+3614(rapid)X
+3805(traversal)X
+4108(of)X
+4201(the)X
+555 1383(lock)N
+713(table)X
+889(during)X
+1118(transaction)X
+1490(commit)X
+1754(and)X
+1890(abort.)X
+755 1506(The)N
+907(primary)X
+1188(interfaces)X
+1528(to)X
+1617(the)X
+1742(lock)X
+1907(manager)X
+2211(are)X
+2 f
+2337(lock)X
+1 f
+2471(,)X
+2 f
+2518(unlock)X
+1 f
+2732(,)X
+2779(and)X
+2 f
+2922(lock_unlock_all)X
+1 f
+3434(.)X
+2 f
+3500(Lock)X
+1 f
+3682(obtains)X
+3939(a)X
+4001(new)X
+4161(lock)X
+555 1596(for)N
+680(a)X
+747(speci\256c)X
+1023(object.)X
+1290(There)X
+1509(are)X
+1638(also)X
+1797(two)X
+1947(variants)X
+2231(of)X
+2328(the)X
+2 f
+2456(lock)X
+1 f
+2620(request,)X
+2 f
+2902(lock_upgrade)X
+1 f
+3373(and)X
+2 f
+3519(lock_downgrade)X
+1 f
+4053(,)X
+4103(which)X
+555 1686(allow)N
+755(the)X
+875(caller)X
+1076(to)X
+1160(atomically)X
+1519(trade)X
+1701(a)X
+1758(lock)X
+1917(of)X
+2005(one)X
+2142(type)X
+2301(for)X
+2416(a)X
+2473(lock)X
+2632(of)X
+2720(another.)X
+2 f
+3022(Unlock)X
+1 f
+3275(releases)X
+3551(a)X
+3608(speci\256c)X
+3874(mode)X
+4073(of)X
+4161(lock)X
+555 1776(on)N
+655(a)X
+711(speci\256c)X
+976(object.)X
+2 f
+1232(Lock_unlock_all)X
+1 f
+1786(releases)X
+2061(all)X
+2161(the)X
+2279(locks)X
+2468(associated)X
+2818(with)X
+2980(a)X
+3036(speci\256c)X
+3301(transaction.)X
+3 f
+555 1962(3.2.4.)N
+775(The)X
+928(Process)X
+1207(Manager)X
+1 f
+755 2085(The)N
+3 f
+900(Process)X
+1179(Manager)X
+1 f
+1511(acts)X
+1656(as)X
+1743(a)X
+1799(user-level)X
+2136(scheduler)X
+2464(to)X
+2546(make)X
+2740(processes)X
+3068(wait)X
+3226(on)X
+3326(unavailable)X
+3716(locks)X
+3905(and)X
+4041(pending)X
+555 2175(buffer)N
+778(cache)X
+988(I/O.)X
+1161(For)X
+1297(each)X
+1470(process,)X
+1756(a)X
+1817(semaphore)X
+2190(is)X
+2268(maintained)X
+2649(upon)X
+2834(which)X
+3055(that)X
+3200(process)X
+3466(waits)X
+3660(when)X
+3859(it)X
+3928(needs)X
+4136(to)X
+4223(be)X
+555 2265(descheduled.)N
+1014(When)X
+1228(a)X
+1286(process)X
+1549(needs)X
+1754(to)X
+1838(be)X
+1936(run,)X
+2084(its)X
+2180(semaphore)X
+2549(is)X
+2623(cleared,)X
+2897(and)X
+3034(the)X
+3153(operating)X
+3477(system)X
+3720(reschedules)X
+4116(it.)X
+4201(No)X
+555 2355(sophisticated)N
+1002(scheduling)X
+1378(algorithm)X
+1718(is)X
+1799(applied;)X
+2085(if)X
+2162(the)X
+2288(lock)X
+2454(for)X
+2576(which)X
+2800(a)X
+2864(process)X
+3133(was)X
+3286(waiting)X
+3554(becomes)X
+3863(available,)X
+4201(the)X
+555 2445(process)N
+824(is)X
+905(made)X
+1107(runnable.)X
+1456(It)X
+1533(would)X
+1761(have)X
+1941(been)X
+2121(possible)X
+2411(to)X
+2501(change)X
+2757(the)X
+2883(kernel's)X
+3170(process)X
+3439(scheduler)X
+3775(to)X
+3865(interact)X
+4134(more)X
+555 2535(ef\256ciently)N
+900(with)X
+1062(the)X
+1180(lock)X
+1338(manager,)X
+1655(but)X
+1777(doing)X
+1979(so)X
+2070(would)X
+2290(have)X
+2462(compromised)X
+2918(our)X
+3045(commitment)X
+3469(to)X
+3551(a)X
+3607(user-level)X
+3944(package.)X
+3 f
+555 2721(3.2.5.)N
+775(The)X
+928(Transaction)X
+1361(Manager)X
+1 f
+755 2844(The)N
+3 f
+901(Transaction)X
+1335(Manager)X
+1 f
+1668(provides)X
+1965(the)X
+2084(standard)X
+2377(interface)X
+2680(of)X
+2 f
+2768(txn_begin)X
+1 f
+3084(,)X
+2 f
+3125(txn_commit)X
+1 f
+3499(,)X
+3540(and)X
+2 f
+3676(txn_abort)X
+1 f
+3987(.)X
+4047(It)X
+4116(keeps)X
+555 2934(track)N
+742(of)X
+835(all)X
+941(active)X
+1159(transactions,)X
+1588(assigns)X
+1845(unique)X
+2089(transaction)X
+2467(identi\256ers,)X
+2833(and)X
+2974(directs)X
+3213(the)X
+3336(abort)X
+3526(and)X
+3667(commit)X
+3936(processing.)X
+555 3024(When)N
+772(a)X
+2 f
+833(txn_begin)X
+1 f
+1174(is)X
+1252(issued,)X
+1497(the)X
+3 f
+1620(Transaction)X
+2058(Manager)X
+1 f
+2395(assigns)X
+2651(the)X
+2773(next)X
+2935(available)X
+3249(transaction)X
+3625(identi\256er,)X
+3958(allocates)X
+4263(a)X
+555 3114(per-process)N
+948(transaction)X
+1322(structure)X
+1625(in)X
+1709(shared)X
+1941(memory,)X
+2249(increments)X
+2622(the)X
+2741(count)X
+2940(of)X
+3028(active)X
+3241(transactions,)X
+3665(and)X
+3802(returns)X
+4046(the)X
+4165(new)X
+555 3204(transaction)N
+937(identi\256er)X
+1256(to)X
+1348(the)X
+1476(calling)X
+1724(process.)X
+2034(The)X
+2188(in-memory)X
+2573(transaction)X
+2954(structure)X
+3264(contains)X
+3560(a)X
+3625(pointer)X
+3881(into)X
+4034(the)X
+4161(lock)X
+555 3294(table)N
+734(for)X
+851(locks)X
+1043(held)X
+1204(by)X
+1307(this)X
+1445(transaction,)X
+1840(the)X
+1961(last)X
+2095(log)X
+2220(sequence)X
+2538(number,)X
+2826(a)X
+2885(transaction)X
+3260(state)X
+3430(\()X
+2 f
+3457(idle)X
+1 f
+(,)S
+2 f
+3620(running)X
+1 f
+3873(,)X
+2 f
+3915(aborting)X
+1 f
+4190(,)X
+4232(or)X
+2 f
+555 3384(committing\))N
+1 f
+942(,)X
+982(an)X
+1078(error)X
+1255(code,)X
+1447(and)X
+1583(a)X
+1639(semaphore)X
+2007(identi\256er.)X
+755 3507(At)N
+859(commit,)X
+1147(the)X
+3 f
+1269(Transaction)X
+1706(Manager)X
+1 f
+2042(calls)X
+2 f
+2213(log_commit)X
+1 f
+2615(to)X
+2700(record)X
+2929(the)X
+3050(end)X
+3189(of)X
+3279(transaction)X
+3654(and)X
+3793(to)X
+3878(\257ush)X
+4056(the)X
+4177(log.)X
+555 3597(Then)N
+743(it)X
+810(directs)X
+1047(the)X
+3 f
+1168(Lock)X
+1364(Manager)X
+1 f
+1699(to)X
+1784(release)X
+2031(all)X
+2134(locks)X
+2325(associated)X
+2677(with)X
+2841(the)X
+2961(given)X
+3161(transaction.)X
+3575(If)X
+3651(a)X
+3709(transaction)X
+4083(aborts,)X
+555 3687(the)N
+3 f
+680(Transaction)X
+1120(Manager)X
+1 f
+1459(calls)X
+1633(on)X
+2 f
+1739(log_unroll)X
+1 f
+2102(to)X
+2190(read)X
+2355(the)X
+2479(transaction's)X
+2915(log)X
+3043(records)X
+3306(and)X
+3448(undo)X
+3634(any)X
+3776(modi\256cations)X
+4237(to)X
+555 3777(the)N
+673(database.)X
+1010(As)X
+1119(in)X
+1201(the)X
+1319(commit)X
+1583(case,)X
+1762(it)X
+1826(then)X
+1984(calls)X
+2 f
+2151(lock_unlock_all)X
+1 f
+2683(to)X
+2765(release)X
+3009(the)X
+3127(transaction's)X
+3557(locks.)X
+3 f
+555 3963(3.2.6.)N
+775(The)X
+928(Record)X
+1198(Manager)X
+1 f
+755 4086(The)N
+3 f
+919(Record)X
+1208(Manager)X
+1 f
+1559(supports)X
+1869(the)X
+2006(abstraction)X
+2397(of)X
+2503(reading)X
+2783(and)X
+2938(writing)X
+3208(records)X
+3484(to)X
+3585(a)X
+3660(database.)X
+3996(We)X
+4147(have)X
+555 4176(modi\256ed)N
+861(the)X
+981(the)X
+1101(database)X
+1399(access)X
+1626(routines)X
+3 f
+1905(db)X
+1 f
+1993(\(3\))X
+2108([BSD91])X
+2418(to)X
+2501(call)X
+2638(the)X
+2757(log,)X
+2900(lock,)X
+3079(and)X
+3216(buffer)X
+3434(managers.)X
+3803(In)X
+3891(order)X
+4082(to)X
+4165(pro-)X
+555 4266(vide)N
+718(functionality)X
+1152(to)X
+1239(perform)X
+1523(undo)X
+1708(and)X
+1849(redo,)X
+2037(the)X
+3 f
+2160(Record)X
+2434(Manager)X
+1 f
+2770(de\256nes)X
+3021(a)X
+3081(collection)X
+3421(of)X
+3512(log)X
+3638(record)X
+3868(types)X
+4061(and)X
+4201(the)X
+555 4356(associated)N
+920(undo)X
+1115(and)X
+1266(redo)X
+1444(routines.)X
+1777(The)X
+3 f
+1937(Log)X
+2105(Manager)X
+1 f
+2452(performs)X
+2777(a)X
+2848(table)X
+3039(lookup)X
+3296(on)X
+3411(the)X
+3543(record)X
+3783(type)X
+3955(to)X
+4051(call)X
+4201(the)X
+555 4446(appropriate)N
+951(routines.)X
+1299(For)X
+1440(example,)X
+1762(the)X
+1890(B-tree)X
+2121(access)X
+2356(method)X
+2625(requires)X
+2913(two)X
+3062(log)X
+3193(record)X
+3428(types:)X
+3648(insert)X
+3855(and)X
+4000(delete.)X
+4241(A)X
+555 4536(replace)N
+808(operation)X
+1131(is)X
+1204(implemented)X
+1642(as)X
+1729(a)X
+1785(delete)X
+1997(followed)X
+2302(by)X
+2402(an)X
+2498(insert)X
+2696(and)X
+2832(is)X
+2905(logged)X
+3143(accordingly.)X
+3 f
+555 4722(3.3.)N
+715(Application)X
+1134(Architectures)X
+1 f
+755 4845(The)N
+907(structure)X
+1215(of)X
+1309(LIBTP)X
+1558(allows)X
+1794(application)X
+2177(designers)X
+2507(to)X
+2596(trade)X
+2784(off)X
+2905(performance)X
+3339(and)X
+3481(protection.)X
+3872(Since)X
+4076(a)X
+4138(large)X
+555 4935(portion)N
+810(of)X
+901(LIBTP's)X
+1205(functionality)X
+1638(is)X
+1715(provided)X
+2024(by)X
+2128(managing)X
+2468(structures)X
+2804(in)X
+2889(shared)X
+3122(memory,)X
+3432(its)X
+3530(structures)X
+3865(are)X
+3987(subject)X
+4237(to)X
+555 5025(corruption)N
+926(by)X
+1043(applications)X
+1467(when)X
+1678(the)X
+1813(library)X
+2064(is)X
+2154(linked)X
+2391(directly)X
+2673(with)X
+2852(the)X
+2987(application.)X
+3420(For)X
+3568(this)X
+3720(reason,)X
+3987(LIBTP)X
+4246(is)X
+555 5115(designed)N
+864(to)X
+950(allow)X
+1152(compilation)X
+1558(into)X
+1706(a)X
+1766(separate)X
+2053(server)X
+2273(process)X
+2537(which)X
+2756(may)X
+2917(be)X
+3016(accessed)X
+3321(via)X
+3442(a)X
+3501(socket)X
+3729(interface.)X
+4094(In)X
+4184(this)X
+555 5205(way)N
+712(LIBTP's)X
+1015(data)X
+1172(structures)X
+1507(are)X
+1629(protected)X
+1951(from)X
+2130(application)X
+2509(code,)X
+2704(but)X
+2829(communication)X
+3349(overhead)X
+3666(is)X
+3741(increased.)X
+4107(When)X
+555 5295(applications)N
+975(are)X
+1107(trusted,)X
+1377(LIBTP)X
+1631(may)X
+1801(be)X
+1909(compiled)X
+2239(directly)X
+2516(into)X
+2672(the)X
+2802(application)X
+3190(providing)X
+3533(improved)X
+3872(performance.)X
+555 5385(Figures)N
+815(two)X
+955(and)X
+1091(three)X
+1272(show)X
+1461(the)X
+1579(two)X
+1719(alternate)X
+2016(application)X
+2392(architectures.)X
+755 5508(There)N
+964(are)X
+1084(potentially)X
+1447(two)X
+1588(modes)X
+1818(in)X
+1901(which)X
+2118(one)X
+2255(might)X
+2462(use)X
+2590(LIBTP)X
+2833(in)X
+2916(a)X
+2972(server)X
+3189(based)X
+3392(architecture.)X
+3832(In)X
+3919(the)X
+4037(\256rst,)X
+4201(the)X
+555 5598(server)N
+778(would)X
+1004(provide)X
+1275(the)X
+1399(capability)X
+1741(to)X
+1829(respond)X
+2109(to)X
+2197(requests)X
+2486(to)X
+2574(each)X
+2747(of)X
+2839(the)X
+2962(low)X
+3107(level)X
+3288(modules)X
+3584(\(lock,)X
+3794(log,)X
+3941(buffer,)X
+4183(and)X
+555 5688(transaction)N
+944(managers\).)X
+1356(Unfortunately,)X
+1863(the)X
+1998(performance)X
+2442(of)X
+2546(such)X
+2730(a)X
+2803(system)X
+3062(is)X
+3152(likely)X
+3371(to)X
+3470(be)X
+3583(blindingly)X
+3947(slow)X
+4134(since)X
+
+7 p
+%%Page: 7 7
+10 s 10 xH 0 xS 1 f
+3 f
+1 f
+1 Dt
+1864 1125 MXY
+15 -26 Dl
+-15 10 Dl
+-14 -10 Dl
+14 26 Dl
+0 -266 Dl
+1315 1125 MXY
+15 -26 Dl
+-15 10 Dl
+-14 -10 Dl
+14 26 Dl
+0 -266 Dl
+3 Dt
+1133 1125 MXY
+0 798 Dl
+931 0 Dl
+0 -798 Dl
+-931 0 Dl
+1 Dt
+1266 1257 MXY
+0 133 Dl
+665 0 Dl
+0 -133 Dl
+-665 0 Dl
+3 f
+8 s
+1513 1351(driver)N
+1502 1617(LIBTP)N
+1266 1390 MXY
+0 400 Dl
+665 0 Dl
+0 -400 Dl
+-665 0 Dl
+3 Dt
+1133 726 MXY
+0 133 Dl
+931 0 Dl
+0 -133 Dl
+-931 0 Dl
+1 f
+1029 1098(txn_abort)N
+964 1015(txn_commit)N
+1018 932(txn_begin)N
+1910 1015(db_ops)N
+3 f
+1308 820(Application)N
+1645(Program)X
+1398 1218(Server)N
+1594(Process)X
+1 f
+1390 986(socket)N
+1569(interface)X
+1 Dt
+1848 967 MXY
+-23 -14 Dl
+8 14 Dl
+-8 15 Dl
+23 -15 Dl
+-50 0 Dl
+1324 MX
+23 15 Dl
+-9 -15 Dl
+9 -14 Dl
+-23 14 Dl
+50 0 Dl
+3 Dt
+2862 859 MXY
+0 1064 Dl
+932 0 Dl
+0 -1064 Dl
+-932 0 Dl
+1 Dt
+3178 1390 MXY
+24 -12 Dl
+-17 0 Dl
+-8 -15 Dl
+1 27 Dl
+150 -265 Dl
+3494 1390 MXY
+0 -27 Dl
+-8 15 Dl
+-16 1 Dl
+24 11 Dl
+-166 -265 Dl
+3 f
+3232 1617(LIBTP)N
+2995 1390 MXY
+0 400 Dl
+666 0 Dl
+0 -400 Dl
+-666 0 Dl
+992 MY
+0 133 Dl
+666 0 Dl
+0 -133 Dl
+-666 0 Dl
+3168 1086(Application)N
+1 f
+2939 1201(txn_begin)N
+2885 1284(txn_commit)N
+2950 1368(txn_abort)N
+3465 1284(db_ops)N
+3 f
+3155 766(Single)N
+3339(Process)X
+3 Dt
+-1 Ds
+811 2100(Figure)N
+1023(2:)X
+1107(Server)X
+1318(Architecture.)X
+1 f
+1727(In)X
+1811(this)X
+1934(con\256guration,)X
+811 2190(the)N
+916(library)X
+1113(is)X
+1183(loaded)X
+1380(into)X
+1507(a)X
+1562(server)X
+1744(process)X
+1962(which)X
+2145(is)X
+2214(ac-)X
+811 2280(cessed)N
+993(via)X
+1087(a)X
+1131(socket)X
+1310(interface.)X
+3 f
+2563 2100(Figure)N
+2803(3:)X
+2914(Single)X
+3140(Process)X
+3403(Architecture.)X
+1 f
+3839(In)X
+3950(this)X
+2563 2190(con\256guration,)N
+2948(the)X
+3053(library)X
+3250(routines)X
+3483(are)X
+3587(loaded)X
+3784(as)X
+3864(part)X
+3990(of)X
+2563 2280(the)N
+2657(application)X
+2957(and)X
+3065(accessed)X
+3303(via)X
+3397(a)X
+3441(subroutine)X
+3727(interface.)X
+10 s
+10 f
+555 2403(h)N
+579(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)X
+1 f
+555 2679(modifying)N
+909(a)X
+966(piece)X
+1157(of)X
+1245(data)X
+1400(would)X
+1621(require)X
+1870(three)X
+2051(or)X
+2138(possibly)X
+2424(four)X
+2578(separate)X
+2862(communications:)X
+3433(one)X
+3569(to)X
+3651(lock)X
+3809(the)X
+3927(data,)X
+4101(one)X
+4237(to)X
+555 2769(obtain)N
+781(the)X
+905(data,)X
+1085(one)X
+1227(to)X
+1315(log)X
+1443(the)X
+1567(modi\256cation,)X
+2017(and)X
+2159(possibly)X
+2451(one)X
+2593(to)X
+2681(transmit)X
+2969(the)X
+3093(modi\256ed)X
+3403(data.)X
+3583(Figure)X
+3817(four)X
+3976(shows)X
+4201(the)X
+555 2859(relative)N
+826(performance)X
+1263(for)X
+1387(retrieving)X
+1728(a)X
+1793(single)X
+2013(record)X
+2248(using)X
+2450(the)X
+2577(record)X
+2812(level)X
+2997(call)X
+3142(versus)X
+3376(using)X
+3578(the)X
+3705(lower)X
+3917(level)X
+4102(buffer)X
+555 2949(management)N
+987(and)X
+1125(locking)X
+1387(calls.)X
+1616(The)X
+1763(2:1)X
+1887(ratio)X
+2056(observed)X
+2367(in)X
+2450(the)X
+2569(single)X
+2781(process)X
+3043(case)X
+3203(re\257ects)X
+3456(the)X
+3575(additional)X
+3916(overhead)X
+4232(of)X
+555 3039(parsing)N
+819(eight)X
+1006(commands)X
+1380(rather)X
+1595(than)X
+1760(one)X
+1903(while)X
+2108(the)X
+2233(3:1)X
+2362(ratio)X
+2536(observed)X
+2853(in)X
+2942(the)X
+3067(client/server)X
+3491(architecture)X
+3898(re\257ects)X
+4157(both)X
+555 3129(the)N
+679(parsing)X
+941(and)X
+1083(the)X
+1207(communication)X
+1731(overheard.)X
+2118(Although)X
+2445(there)X
+2631(may)X
+2794(be)X
+2895(applications)X
+3307(which)X
+3528(could)X
+3731(tolerate)X
+3997(such)X
+4169(per-)X
+555 3219(formance,)N
+904(it)X
+973(seems)X
+1194(far)X
+1309(more)X
+1499(feasible)X
+1774(to)X
+1861(support)X
+2126(a)X
+2187(higher)X
+2417(level)X
+2597(interface,)X
+2923(such)X
+3094(as)X
+3185(that)X
+3329(provided)X
+3638(by)X
+3742(a)X
+3802(query)X
+4009(language)X
+555 3309(\()N
+2 f
+582(e.g.)X
+1 f
+718(SQL)X
+889([SQL86]\).)X
+755 3432(Although)N
+1081(LIBTP)X
+1327(does)X
+1498(not)X
+1624(have)X
+1800(an)X
+1900(SQL)X
+2075(parser,)X
+2316(we)X
+2433(have)X
+2608(built)X
+2777(a)X
+2836(server)X
+3056(application)X
+3435(using)X
+3631(the)X
+3752(toolkit)X
+3983(command)X
+555 3522(language)N
+882(\(TCL\))X
+1124([OUST90].)X
+1544(The)X
+1706(server)X
+1940(supports)X
+2248(a)X
+2321(command)X
+2674(line)X
+2831(interface)X
+3150(similar)X
+3409(to)X
+3508(the)X
+3643(subroutine)X
+4017(interface)X
+555 3612(de\256ned)N
+811(in)X
+3 f
+893(db)X
+1 f
+981(\(3\).)X
+1135(Since)X
+1333(it)X
+1397(is)X
+1470(based)X
+1673(on)X
+1773(TCL,)X
+1964(it)X
+2028(provides)X
+2324(control)X
+2571(structures)X
+2903(as)X
+2990(well.)X
+3 f
+555 3798(4.)N
+655(Implementation)X
+1 f
+3 f
+555 3984(4.1.)N
+715(Locking)X
+1014(and)X
+1162(Deadlock)X
+1502(Detection)X
+1 f
+755 4107(LIBTP)N
+1007(uses)X
+1175(two-phase)X
+1535(locking)X
+1805(for)X
+1929(user)X
+2093(data.)X
+2297(Strictly)X
+2562(speaking,)X
+2897(the)X
+3024(two)X
+3173(phases)X
+3416(in)X
+3507(two-phase)X
+3866(locking)X
+4135(are)X
+4263(a)X
+3 f
+555 4197(grow)N
+1 f
+756(phase,)X
+986(during)X
+1221(which)X
+1443(locks)X
+1638(are)X
+1763(acquired,)X
+2086(and)X
+2228(a)X
+3 f
+2290(shrink)X
+1 f
+2537(phase,)X
+2766(during)X
+3001(which)X
+3223(locks)X
+3418(are)X
+3543(released.)X
+3873(No)X
+3997(lock)X
+4161(may)X
+555 4287(ever)N
+720(be)X
+822(acquired)X
+1124(during)X
+1358(the)X
+1481(shrink)X
+1706(phase.)X
+1954(The)X
+2104(grow)X
+2294(phase)X
+2502(lasts)X
+2669(until)X
+2840(the)X
+2963(\256rst)X
+3112(release,)X
+3381(which)X
+3602(marks)X
+3823(the)X
+3946(start)X
+4109(of)X
+4201(the)X
+555 4377(shrink)N
+780(phase.)X
+1028(In)X
+1120(practice,)X
+1420(the)X
+1543(grow)X
+1733(phase)X
+1941(lasts)X
+2108(for)X
+2227(the)X
+2350(duration)X
+2642(of)X
+2734(a)X
+2795(transaction)X
+3172(in)X
+3259(LIBTP)X
+3506(and)X
+3647(in)X
+3734(commercial)X
+4138(data-)X
+555 4467(base)N
+721(systems.)X
+1037(The)X
+1184(shrink)X
+1406(phase)X
+1611(takes)X
+1798(place)X
+1990(during)X
+2221(transaction)X
+2595(commit)X
+2861(or)X
+2950(abort.)X
+3177(This)X
+3341(means)X
+3568(that)X
+3710(locks)X
+3901(are)X
+4022(acquired)X
+555 4557(on)N
+655(demand)X
+929(during)X
+1158(the)X
+1276(lifetime)X
+1545(of)X
+1632(a)X
+1688(transaction,)X
+2080(and)X
+2216(held)X
+2374(until)X
+2540(commit)X
+2804(time,)X
+2986(at)X
+3064(which)X
+3280(point)X
+3464(all)X
+3564(locks)X
+3753(are)X
+3872(released.)X
+755 4680(If)N
+832(multiple)X
+1121(transactions)X
+1527(are)X
+1649(active)X
+1864(concurrently,)X
+2313(deadlocks)X
+2657(can)X
+2792(occur)X
+2994(and)X
+3133(must)X
+3311(be)X
+3410(detected)X
+3701(and)X
+3840(resolved.)X
+4174(The)X
+555 4770(lock)N
+715(table)X
+893(can)X
+1027(be)X
+1125(thought)X
+1391(of)X
+1480(as)X
+1569(a)X
+1627(representation)X
+2104(of)X
+2193(a)X
+2251(directed)X
+2532(graph.)X
+2777(The)X
+2924(nodes)X
+3133(in)X
+3216(the)X
+3335(graph)X
+3539(are)X
+3659(transactions.)X
+4103(Edges)X
+555 4860(represent)N
+878(the)X
+3 f
+1004(waits-for)X
+1 f
+1340(relation)X
+1613(between)X
+1909(transactions;)X
+2342(if)X
+2419(transaction)X
+2 f
+2799(A)X
+1 f
+2876(is)X
+2957(waiting)X
+3225(for)X
+3347(a)X
+3411(lock)X
+3577(held)X
+3743(by)X
+3851(transaction)X
+2 f
+4230(B)X
+1 f
+4279(,)X
+555 4950(then)N
+716(a)X
+775(directed)X
+1057(edge)X
+1232(exists)X
+1437(from)X
+2 f
+1616(A)X
+1 f
+1687(to)X
+2 f
+1771(B)X
+1 f
+1842(in)X
+1926(the)X
+2046(graph.)X
+2291(A)X
+2371(deadlock)X
+2683(exists)X
+2887(if)X
+2958(a)X
+3016(cycle)X
+3208(appears)X
+3476(in)X
+3560(the)X
+3680(graph.)X
+3925(By)X
+4040(conven-)X
+555 5040(tion,)N
+719(no)X
+819(transaction)X
+1191(ever)X
+1350(waits)X
+1539(for)X
+1653(a)X
+1709(lock)X
+1867(it)X
+1931(already)X
+2188(holds,)X
+2401(so)X
+2492(re\257exive)X
+2793(edges)X
+2996(are)X
+3115(impossible.)X
+755 5163(A)N
+836(distinguished)X
+1285(process)X
+1549(monitors)X
+1856(the)X
+1977(lock)X
+2138(table,)X
+2337(searching)X
+2668(for)X
+2785(cycles.)X
+3048(The)X
+3195(frequency)X
+3539(with)X
+3703(which)X
+3921(this)X
+4058(process)X
+555 5253(runs)N
+716(is)X
+792(user-settable;)X
+1243(for)X
+1360(the)X
+1481(multi-user)X
+1833(tests)X
+1998(discussed)X
+2328(in)X
+2413(section)X
+2663(5.1.2,)X
+2866(it)X
+2933(has)X
+3063(been)X
+3238(set)X
+3350(to)X
+3435(wake)X
+3628(up)X
+3731(every)X
+3932(second,)X
+4197(but)X
+555 5343(more)N
+742(sophisticated)X
+1182(schedules)X
+1516(are)X
+1636(certainly)X
+1938(possible.)X
+2261(When)X
+2474(a)X
+2531(cycle)X
+2722(is)X
+2796(detected,)X
+3105(one)X
+3242(of)X
+3330(the)X
+3449(transactions)X
+3853(in)X
+3936(the)X
+4055(cycle)X
+4246(is)X
+555 5433(nominated)N
+917(and)X
+1057(aborted.)X
+1362(When)X
+1578(the)X
+1700(transaction)X
+2076(aborts,)X
+2315(it)X
+2382(rolls)X
+2547(back)X
+2722(its)X
+2820(changes)X
+3102(and)X
+3241(releases)X
+3519(its)X
+3617(locks,)X
+3829(thereby)X
+4093(break-)X
+555 5523(ing)N
+677(the)X
+795(cycle)X
+985(in)X
+1067(the)X
+1185(graph.)X
+
+8 p
+%%Page: 8 8
+10 s 10 xH 0 xS 1 f
+3 f
+1 f
+4 Ds
+1 Dt
+1866 865 MXY
+1338 0 Dl
+1866 1031 MXY
+1338 0 Dl
+1866 1199 MXY
+1338 0 Dl
+1866 1366 MXY
+1338 0 Dl
+1866 1533 MXY
+1338 0 Dl
+1866 1701 MXY
+1338 0 Dl
+-1 Ds
+5 Dt
+1866 1868 MXY
+1338 0 Dl
+1 Dt
+1 Di
+2981 MX
+ 2981 1868 lineto
+ 2981 1575 lineto
+ 3092 1575 lineto
+ 3092 1868 lineto
+ 2981 1868 lineto
+closepath 21 2981 1575 3092 1868 Dp
+2646 MX
+ 2646 1868 lineto
+ 2646 949 lineto
+ 2758 949 lineto
+ 2758 1868 lineto
+ 2646 1868 lineto
+closepath 14 2646 949 2758 1868 Dp
+2312 MX
+ 2312 1868 lineto
+ 2312 1701 lineto
+ 2423 1701 lineto
+ 2423 1868 lineto
+ 2312 1868 lineto
+closepath 3 2312 1701 2423 1868 Dp
+1977 MX
+ 1977 1868 lineto
+ 1977 1512 lineto
+ 2089 1512 lineto
+ 2089 1868 lineto
+ 1977 1868 lineto
+closepath 19 1977 1512 2089 1868 Dp
+3 f
+2640 2047(Client/Server)N
+1957(Single)X
+2185(Process)X
+7 s
+2957 1957(record)N
+2570(component)X
+2289(record)X
+1890(components)X
+1733 1724(.1)N
+1733 1556(.2)N
+1733 1389(.3)N
+1733 1222(.4)N
+1733 1055(.5)N
+1733 889(.6)N
+1590 726(Elapsed)N
+1794(Time)X
+1613 782(\(in)N
+1693(seconds\))X
+3 Dt
+-1 Ds
+8 s
+555 2255(Figure)N
+756(4:)X
+829(Comparison)X
+1187(of)X
+1260(High)X
+1416(and)X
+1540(Low)X
+1681(Level)X
+1850(Interfaces.)X
+1 f
+2174(Elapsed)X
+2395(time)X
+2528(in)X
+2597(seconds)X
+2818(to)X
+2887(perform)X
+3111(a)X
+3158(single)X
+3330(record)X
+3511(retrieval)X
+3742(from)X
+3885(a)X
+3932(command)X
+4203(line)X
+555 2345(\(rather)N
+751(than)X
+888(a)X
+943(procedural)X
+1241(interface\))X
+1510(is)X
+1579(shown)X
+1772(on)X
+1862(the)X
+1966(y)X
+2024(axis.)X
+2185(The)X
+2310(``component'')X
+2704(numbers)X
+2950(re\257ect)X
+3135(the)X
+3239(timings)X
+3458(when)X
+3622(the)X
+3726(record)X
+3914(is)X
+3983(retrieved)X
+4235(by)X
+555 2435(separate)N
+785(calls)X
+924(to)X
+996(the)X
+1096(lock)X
+1228(manager)X
+1469(and)X
+1583(buffer)X
+1760(manager)X
+2001(while)X
+2165(the)X
+2264(``record'')X
+2531(timings)X
+2745(were)X
+2889(obtained)X
+3130(by)X
+3215(using)X
+3375(a)X
+3424(single)X
+3598(call)X
+3711(to)X
+3782(the)X
+3881(record)X
+4064(manager.)X
+555 2525(The)N
+674(2:1)X
+776(ratio)X
+913(observed)X
+1163(for)X
+1257(the)X
+1355(single)X
+1528(process)X
+1739(case)X
+1868(is)X
+1930(a)X
+1977(re\257ection)X
+2237(of)X
+2309(the)X
+2406(parsing)X
+2613(overhead)X
+2865(for)X
+2958(executing)X
+3225(eight)X
+3372(separate)X
+3599(commands)X
+3895(rather)X
+4062(than)X
+4191(one.)X
+555 2615(The)N
+673(additional)X
+948(factor)X
+1115(of)X
+1187(one)X
+1298(re\257ected)X
+1536(in)X
+1605(the)X
+1702(3:1)X
+1803(ratio)X
+1939(for)X
+2031(the)X
+2127(client/server)X
+2460(architecture)X
+2794(is)X
+2855(due)X
+2965(to)X
+3033(the)X
+3129(communication)X
+3545(overhead.)X
+3828(The)X
+3945(true)X
+4062(ratios)X
+4222(are)X
+555 2705(actually)N
+775(worse)X
+945(since)X
+1094(the)X
+1190(component)X
+1492(timings)X
+1703(do)X
+1785(not)X
+1884(re\257ect)X
+2060(the)X
+2155(search)X
+2334(times)X
+2490(within)X
+2671(each)X
+2804(page)X
+2941(or)X
+3011(the)X
+3106(time)X
+3237(required)X
+3466(to)X
+3533(transmit)X
+3760(the)X
+3855(page)X
+3992(between)X
+4221(the)X
+555 2795(two)N
+667(processes.)X
+10 s
+10 f
+555 2885(h)N
+579(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)X
+3 f
+555 3161(4.2.)N
+715(Group)X
+961(Commit)X
+1 f
+755 3284(Since)N
+959(the)X
+1083(log)X
+1211(must)X
+1392(be)X
+1494(\257ushed)X
+1751(to)X
+1839(disk)X
+1997(at)X
+2080(commit)X
+2349(time,)X
+2536(disk)X
+2694(bandwidth)X
+3057(fundamentally)X
+3545(limits)X
+3751(the)X
+3874(rate)X
+4020(at)X
+4103(which)X
+555 3374(transactions)N
+959(complete.)X
+1314(Since)X
+1513(most)X
+1688(transactions)X
+2091(write)X
+2276(only)X
+2438(a)X
+2494(few)X
+2635(small)X
+2828(records)X
+3085(to)X
+3167(the)X
+3285(log,)X
+3427(the)X
+3545(last)X
+3676(page)X
+3848(of)X
+3935(the)X
+4053(log)X
+4175(will)X
+555 3464(be)N
+658(\257ushed)X
+916(once)X
+1095(by)X
+1202(every)X
+1408(transaction)X
+1787(which)X
+2010(writes)X
+2233(to)X
+2322(it.)X
+2433(In)X
+2527(the)X
+2652(naive)X
+2853(implementation,)X
+3402(these)X
+3593(\257ushes)X
+3841(would)X
+4067(happen)X
+555 3554(serially.)N
+755 3677(LIBTP)N
+1008(uses)X
+3 f
+1177(group)X
+1412(commit)X
+1 f
+1702([DEWI84])X
+2077(in)X
+2170(order)X
+2371(to)X
+2464(amortize)X
+2775(the)X
+2903(cost)X
+3062(of)X
+3159(one)X
+3305(synchronous)X
+3740(disk)X
+3903(write)X
+4098(across)X
+555 3767(multiple)N
+851(transactions.)X
+1304(Group)X
+1539(commit)X
+1812(provides)X
+2117(a)X
+2182(way)X
+2345(for)X
+2468(a)X
+2533(group)X
+2749(of)X
+2845(transactions)X
+3257(to)X
+3348(commit)X
+3621(simultaneously.)X
+4174(The)X
+555 3857(\256rst)N
+709(several)X
+967(transactions)X
+1380(to)X
+1472(commit)X
+1745(write)X
+1939(their)X
+2115(changes)X
+2403(to)X
+2494(the)X
+2621(in-memory)X
+3006(log)X
+3137(page,)X
+3338(then)X
+3505(sleep)X
+3699(on)X
+3808(a)X
+3873(distinguished)X
+555 3947(semaphore.)N
+966(Later,)X
+1179(a)X
+1238(committing)X
+1629(transaction)X
+2004(\257ushes)X
+2249(the)X
+2370(page)X
+2545(to)X
+2630(disk,)X
+2805(and)X
+2943(wakes)X
+3166(up)X
+3268(all)X
+3370(its)X
+3467(sleeping)X
+3756(peers.)X
+3988(The)X
+4135(point)X
+555 4037(at)N
+635(which)X
+853(changes)X
+1134(are)X
+1255(actually)X
+1531(written)X
+1780(is)X
+1855(determined)X
+2238(by)X
+2340(three)X
+2523(thresholds.)X
+2914(The)X
+3061(\256rst)X
+3207(is)X
+3281(the)X
+2 f
+3400(group)X
+3612(threshold)X
+1 f
+3935(and)X
+4072(de\256nes)X
+555 4127(the)N
+674(minimum)X
+1005(number)X
+1271(of)X
+1359(transactions)X
+1763(which)X
+1979(must)X
+2154(be)X
+2250(active)X
+2462(in)X
+2544(the)X
+2662(system)X
+2904(before)X
+3130(transactions)X
+3533(are)X
+3652(forced)X
+3878(to)X
+3960(participate)X
+555 4217(in)N
+646(a)X
+711(group)X
+927(commit.)X
+1240(The)X
+1394(second)X
+1646(is)X
+1728(the)X
+2 f
+1855(wait)X
+2021(threshold)X
+1 f
+2352(which)X
+2577(is)X
+2658(expressed)X
+3003(as)X
+3098(the)X
+3224(percentage)X
+3601(of)X
+3696(active)X
+3916(transactions)X
+555 4307(waiting)N
+826(to)X
+919(be)X
+1026(committed.)X
+1439(The)X
+1595(last)X
+1737(is)X
+1821(the)X
+2 f
+1950(logdelay)X
+2257(threshold)X
+1 f
+2590(which)X
+2816(indicates)X
+3131(how)X
+3299(much)X
+3507(un\257ushed)X
+3848(log)X
+3980(should)X
+4223(be)X
+555 4397(allowed)N
+829(to)X
+911(accumulate)X
+1297(before)X
+1523(a)X
+1579(waiting)X
+1839(transaction's)X
+2289(commit)X
+2553(record)X
+2779(is)X
+2852(\257ushed.)X
+755 4520(Group)N
+981(commit)X
+1246(can)X
+1379(substantially)X
+1803(improve)X
+2090(performance)X
+2517(for)X
+2631(high-concurrency)X
+3218(environments.)X
+3714(If)X
+3788(only)X
+3950(a)X
+4006(few)X
+4147(tran-)X
+555 4610(sactions)N
+836(are)X
+957(running,)X
+1248(it)X
+1314(is)X
+1389(unlikely)X
+1673(to)X
+1757(improve)X
+2046(things)X
+2263(at)X
+2343(all.)X
+2485(The)X
+2632(crossover)X
+2962(point)X
+3148(is)X
+3223(the)X
+3343(point)X
+3529(at)X
+3609(which)X
+3827(the)X
+3947(transaction)X
+555 4700(commit)N
+823(rate)X
+968(is)X
+1045(limited)X
+1295(by)X
+1399(the)X
+1521(bandwidth)X
+1883(of)X
+1974(the)X
+2096(device)X
+2330(on)X
+2434(which)X
+2654(the)X
+2776(log)X
+2902(resides.)X
+3189(If)X
+3267(processes)X
+3599(are)X
+3722(trying)X
+3937(to)X
+4023(\257ush)X
+4201(the)X
+555 4790(log)N
+677(faster)X
+876(than)X
+1034(the)X
+1152(log)X
+1274(disk)X
+1427(can)X
+1559(accept)X
+1785(data,)X
+1959(then)X
+2117(group)X
+2324(commit)X
+2588(will)X
+2732(increase)X
+3016(the)X
+3134(commit)X
+3398(rate.)X
+3 f
+555 4976(4.3.)N
+715(Kernel)X
+971(Intervention)X
+1418(for)X
+1541(Synchronization)X
+1 f
+755 5099(Since)N
+954(LIBTP)X
+1197(uses)X
+1356(data)X
+1511(in)X
+1594(shared)X
+1825(memory)X
+2113(\()X
+2 f
+2140(e.g.)X
+1 f
+2277(the)X
+2395(lock)X
+2553(table)X
+2729(and)X
+2865(buffer)X
+3082(pool\))X
+3271(it)X
+3335(must)X
+3510(be)X
+3606(possible)X
+3888(for)X
+4002(a)X
+4058(process)X
+555 5189(to)N
+640(acquire)X
+900(exclusive)X
+1226(access)X
+1454(to)X
+1538(shared)X
+1770(data)X
+1926(in)X
+2010(order)X
+2202(to)X
+2286(prevent)X
+2549(corruption.)X
+2945(In)X
+3034(addition,)X
+3338(the)X
+3458(process)X
+3721(manager)X
+4020(must)X
+4197(put)X
+555 5279(processes)N
+886(to)X
+971(sleep)X
+1159(when)X
+1356(the)X
+1477(lock)X
+1638(or)X
+1728(buffer)X
+1948(they)X
+2109(request)X
+2364(is)X
+2440(in)X
+2525(use)X
+2655(by)X
+2758(some)X
+2950(other)X
+3138(process.)X
+3441(In)X
+3530(the)X
+3650(LIBTP)X
+3894(implementa-)X
+555 5385(tion)N
+705(under)X
+914(Ultrix)X
+1131(4.0)X
+7 s
+5353(2)Y
+10 s
+5385(,)Y
+1305(we)X
+1424(use)X
+1556(System)X
+1816(V)X
+1899(semaphores)X
+2303(to)X
+2390(provide)X
+2660(this)X
+2800(synchronization.)X
+3377(Semaphores)X
+3794(implemented)X
+4237(in)X
+555 5475(this)N
+701(fashion)X
+968(turn)X
+1128(out)X
+1261(to)X
+1354(be)X
+1461(an)X
+1568(expensive)X
+1920(choice)X
+2161(for)X
+2285(synchronization,)X
+2847(because)X
+3132(each)X
+3310(access)X
+3546(traps)X
+3732(to)X
+3824(the)X
+3952(kernel)X
+4183(and)X
+8 s
+10 f
+555 5547(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)N
+5 s
+1 f
+727 5625(2)N
+8 s
+763 5650(Ultrix)N
+932(and)X
+1040(DEC)X
+1184(are)X
+1277(trademarks)X
+1576(of)X
+1645(Digital)X
+1839(Equipment)X
+2136(Corporation.)X
+
+9 p
+%%Page: 9 9
+8 s 8 xH 0 xS 1 f
+10 s
+3 f
+1 f
+555 630(executes)N
+852(atomically)X
+1210(there.)X
+755 753(On)N
+878(architectures)X
+1314(that)X
+1459(support)X
+1724(atomic)X
+1967(test-and-set,)X
+2382(a)X
+2443(much)X
+2646(better)X
+2854(choice)X
+3089(would)X
+3314(be)X
+3415(to)X
+3502(attempt)X
+3767(to)X
+3854(obtain)X
+4079(a)X
+4139(spin-)X
+555 843(lock)N
+714(with)X
+877(a)X
+934(test-and-set,)X
+1345(and)X
+1482(issue)X
+1663(a)X
+1720(system)X
+1963(call)X
+2100(only)X
+2263(if)X
+2333(the)X
+2452(spinlock)X
+2744(is)X
+2818(unavailable.)X
+3249(Since)X
+3447(virtually)X
+3738(all)X
+3838(semaphores)X
+4237(in)X
+555 933(LIBTP)N
+801(are)X
+924(uncontested)X
+1330(and)X
+1469(are)X
+1591(held)X
+1752(for)X
+1869(very)X
+2035(short)X
+2218(periods)X
+2477(of)X
+2567(time,)X
+2752(this)X
+2890(would)X
+3113(improve)X
+3403(performance.)X
+3873(For)X
+4007(example,)X
+555 1023(processes)N
+885(must)X
+1062(acquire)X
+1321(exclusive)X
+1646(access)X
+1874(to)X
+1958(buffer)X
+2177(pool)X
+2341(metadata)X
+2653(in)X
+2737(order)X
+2929(to)X
+3013(\256nd)X
+3159(and)X
+3297(pin)X
+3421(a)X
+3479(buffer)X
+3698(in)X
+3781(shared)X
+4012(memory.)X
+555 1113(This)N
+721(semaphore)X
+1093(is)X
+1170(requested)X
+1502(most)X
+1681(frequently)X
+2034(in)X
+2119(LIBTP.)X
+2404(However,)X
+2742(once)X
+2917(it)X
+2984(is)X
+3060(acquired,)X
+3380(only)X
+3545(a)X
+3604(few)X
+3748(instructions)X
+4144(must)X
+555 1203(be)N
+656(executed)X
+966(before)X
+1196(it)X
+1264(is)X
+1341(released.)X
+1669(On)X
+1791(one)X
+1931(architecture)X
+2335(for)X
+2453(which)X
+2673(we)X
+2791(were)X
+2972(able)X
+3130(to)X
+3216(gather)X
+3441(detailed)X
+3719(pro\256ling)X
+4018(informa-)X
+555 1293(tion,)N
+729(the)X
+857(cost)X
+1015(of)X
+1111(the)X
+1238(semaphore)X
+1615(calls)X
+1791(accounted)X
+2146(for)X
+2269(25%)X
+2445(of)X
+2541(the)X
+2668(total)X
+2839(time)X
+3010(spent)X
+3208(updating)X
+3517(the)X
+3644(metadata.)X
+4003(This)X
+4174(was)X
+555 1383(fairly)N
+749(consistent)X
+1089(across)X
+1310(most)X
+1485(of)X
+1572(the)X
+1690(critical)X
+1933(sections.)X
+755 1506(In)N
+848(an)X
+950(attempt)X
+1216(to)X
+1304(quantify)X
+1597(the)X
+1720(overhead)X
+2040(of)X
+2132(kernel)X
+2358(synchronization,)X
+2915(we)X
+3034(ran)X
+3162(tests)X
+3329(on)X
+3434(a)X
+3495(version)X
+3756(of)X
+3848(4.3BSD-Reno)X
+555 1596(which)N
+786(had)X
+937(been)X
+1123(modi\256ed)X
+1441(to)X
+1537(support)X
+1811(binary)X
+2050(semaphore)X
+2432(facilities)X
+2742(similar)X
+2998(to)X
+3094(those)X
+3297(described)X
+3639(in)X
+3735([POSIX91].)X
+4174(The)X
+555 1686(hardware)N
+880(platform)X
+1181(consisted)X
+1504(of)X
+1595(an)X
+1695(HP300)X
+1941(\(33MHz)X
+2237(MC68030\))X
+2612(workstation)X
+3014(with)X
+3180(16MBytes)X
+3537(of)X
+3628(main)X
+3812(memory,)X
+4123(and)X
+4263(a)X
+555 1776(600MByte)N
+920(HP7959)X
+1205(SCSI)X
+1396(disk)X
+1552(\(17)X
+1682(ms)X
+1798(average)X
+2072(seek)X
+2237(time\).)X
+2468(We)X
+2602(ran)X
+2727(three)X
+2910(sets)X
+3052(of)X
+3141(comparisons)X
+3568(which)X
+3786(are)X
+3907(summarized)X
+555 1866(in)N
+645(\256gure)X
+860(\256ve.)X
+1028(In)X
+1123(each)X
+1299(comparison)X
+1701(we)X
+1823(ran)X
+1954(two)X
+2102(tests,)X
+2292(one)X
+2436(using)X
+2637(hardware)X
+2965(spinlocks)X
+3295(and)X
+3438(the)X
+3563(other)X
+3755(using)X
+3955(kernel)X
+4183(call)X
+555 1956(synchronization.)N
+1135(Since)X
+1341(the)X
+1467(test)X
+1606(was)X
+1758(run)X
+1892(single-user,)X
+2291(none)X
+2474(of)X
+2568(the)X
+2693(the)X
+2818(locks)X
+3014(were)X
+3198(contested.)X
+3568(In)X
+3662(the)X
+3787(\256rst)X
+3938(two)X
+4085(sets)X
+4232(of)X
+555 2046(tests,)N
+743(we)X
+863(ran)X
+992(the)X
+1116(full)X
+1253(transaction)X
+1631(processing)X
+2000(benchmark)X
+2383(described)X
+2717(in)X
+2805(section)X
+3058(5.1.)X
+3223(In)X
+3315(one)X
+3456(case)X
+3620(we)X
+3739(ran)X
+3867(with)X
+4034(both)X
+4201(the)X
+555 2136(database)N
+854(and)X
+992(log)X
+1116(on)X
+1218(the)X
+1338(same)X
+1525(disk)X
+1680(\(1)X
+1769(Disk\))X
+1969(and)X
+2107(in)X
+2191(the)X
+2311(second,)X
+2576(we)X
+2692(ran)X
+2817(with)X
+2981(the)X
+3101(database)X
+3400(and)X
+3538(log)X
+3661(on)X
+3762(separate)X
+4047(disks)X
+4232(\(2)X
+555 2226(Disk\).)N
+800(In)X
+894(the)X
+1019(last)X
+1157(test,)X
+1315(we)X
+1436(wanted)X
+1695(to)X
+1784(create)X
+2004(a)X
+2067(CPU)X
+2249(bound)X
+2476(environment,)X
+2928(so)X
+3026(we)X
+3146(used)X
+3319(a)X
+3381(database)X
+3684(small)X
+3883(enough)X
+4145(to)X
+4233(\256t)X
+555 2316(completely)N
+941(in)X
+1033(the)X
+1161(cache)X
+1375(and)X
+1521(issued)X
+1751(read-only)X
+2089(transactions.)X
+2541(The)X
+2695(results)X
+2933(in)X
+3024(\256gure)X
+3240(\256ve)X
+3389(express)X
+3659(the)X
+3786(kernel)X
+4016(call)X
+4161(syn-)X
+555 2406(chronization)N
+980(performance)X
+1411(as)X
+1502(a)X
+1562(percentage)X
+1935(of)X
+2026(the)X
+2148(spinlock)X
+2443(performance.)X
+2914(For)X
+3049(example,)X
+3365(in)X
+3451(the)X
+3573(1)X
+3637(disk)X
+3794(case,)X
+3977(the)X
+4098(kernel)X
+555 2496(call)N
+697(implementation)X
+1225(achieved)X
+1537(4.4)X
+1662(TPS)X
+1824(\(transactions)X
+2259(per)X
+2387(second\))X
+2662(while)X
+2865(the)X
+2988(semaphore)X
+3361(implementation)X
+3888(achieved)X
+4199(4.6)X
+555 2586(TPS,)N
+735(and)X
+874(the)X
+995(relative)X
+1259(performance)X
+1689(of)X
+1779(the)X
+1900(kernel)X
+2123(synchronization)X
+2657(is)X
+2732(96%)X
+2901(that)X
+3043(of)X
+3132(the)X
+3252(spinlock)X
+3545(\(100)X
+3714(*)X
+3776(4.4)X
+3898(/)X
+3942(4.6\).)X
+4111(There)X
+555 2676(are)N
+674(two)X
+814(striking)X
+1078(observations)X
+1503(from)X
+1679(these)X
+1864(results:)X
+10 f
+635 2799(g)N
+1 f
+755(even)X
+927(when)X
+1121(the)X
+1239(system)X
+1481(is)X
+1554(disk)X
+1707(bound,)X
+1947(the)X
+2065(CPU)X
+2240(cost)X
+2389(of)X
+2476(synchronization)X
+3008(is)X
+3081(noticeable,)X
+3451(and)X
+10 f
+635 2922(g)N
+1 f
+755(when)X
+949(we)X
+1063(are)X
+1182(CPU)X
+1357(bound,)X
+1597(the)X
+1715(difference)X
+2062(is)X
+2135(dramatic)X
+2436(\(67%\).)X
+3 f
+555 3108(4.4.)N
+715(Transaction)X
+1148(Protected)X
+1499(Access)X
+1747(Methods)X
+1 f
+755 3231(The)N
+903(B-tree)X
+1127(and)X
+1266(\256xed)X
+1449(length)X
+1671(recno)X
+1872(\(record)X
+2127(number\))X
+2421(access)X
+2649(methods)X
+2942(have)X
+3116(been)X
+3290(modi\256ed)X
+3596(to)X
+3680(provide)X
+3947(transaction)X
+555 3321(protection.)N
+941(Whereas)X
+1244(the)X
+1363(previously)X
+1722(published)X
+2054(interface)X
+2357(to)X
+2440(the)X
+2559(access)X
+2786(routines)X
+3065(had)X
+3202(separate)X
+3487(open)X
+3664(calls)X
+3832(for)X
+3946(each)X
+4114(of)X
+4201(the)X
+10 f
+555 3507(h)N
+579(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)X
+1 Dt
+2978 5036 MXY
+ 2978 5036 lineto
+ 2978 4662 lineto
+ 3093 4662 lineto
+ 3093 5036 lineto
+ 2978 5036 lineto
+closepath 21 2978 4662 3093 5036 Dp
+2518 MX
+ 2518 5036 lineto
+ 2518 3960 lineto
+ 2633 3960 lineto
+ 2633 5036 lineto
+ 2518 5036 lineto
+closepath 3 2518 3960 2633 5036 Dp
+2059 MX
+ 2059 5036 lineto
+ 2059 3946 lineto
+ 2174 3946 lineto
+ 2174 5036 lineto
+ 2059 5036 lineto
+closepath 1 2059 3946 2174 5036 Dp
+3 f
+7 s
+2912 5141(Read-only)N
+1426 3767(of)N
+1487(Spinlock)X
+1710(Throughput)X
+1480 3710(Throughput)N
+1786(as)X
+1850(a)X
+1892(%)X
+11 s
+1670 4843(20)N
+1670 4614(40)N
+1670 4384(60)N
+1670 4155(80)N
+1648 3925(100)N
+7 s
+2041 5141(1)N
+2083(Disk)X
+2490(2)X
+2532(Disks)X
+5 Dt
+1829 5036 MXY
+1494 0 Dl
+4 Ds
+1 Dt
+1829 4806 MXY
+1494 0 Dl
+1829 4577 MXY
+1494 0 Dl
+1829 4347 MXY
+1494 0 Dl
+1829 4118 MXY
+1494 0 Dl
+1829 3888 MXY
+1494 0 Dl
+3 Dt
+-1 Ds
+8 s
+555 5360(Figure)N
+753(5:)X
+823(Kernel)X
+1028(Overhead)X
+1315(for)X
+1413(System)X
+1625(Call)X
+1756(Synchronization.)X
+1 f
+2254(The)X
+2370(performance)X
+2708(of)X
+2778(the)X
+2873(kernel)X
+3049(call)X
+3158(synchronization)X
+3583(is)X
+3643(expressed)X
+3911(as)X
+3980(a)X
+4024(percentage)X
+555 5450(of)N
+625(the)X
+720(spinlock)X
+954(synchronization)X
+1379(performance.)X
+1749(In)X
+1819(disk)X
+1943(bound)X
+2120(cases)X
+2271(\(1)X
+2341(Disk)X
+2479(and)X
+2588(2)X
+2637(Disks\),)X
+2837(we)X
+2928(see)X
+3026(that)X
+3139(4-6%)X
+3294(of)X
+3364(the)X
+3459(performance)X
+3797(is)X
+3857(lost)X
+3966(due)X
+4074(to)X
+4140(kernel)X
+555 5540(calls)N
+688(while)X
+846(in)X
+912(the)X
+1006(CPU)X
+1147(bound)X
+1323(case,)X
+1464(we)X
+1554(have)X
+1690(lost)X
+1799(67%)X
+1932(of)X
+2001(the)X
+2095(performance)X
+2432(due)X
+2540(to)X
+2606(kernel)X
+2781(calls.)X
+
+10 p
+%%Page: 10 10
+8 s 8 xH 0 xS 1 f
+10 s
+3 f
+1 f
+555 630(access)N
+781(methods,)X
+1092(we)X
+1206(now)X
+1364(have)X
+1536(an)X
+1632(integrated)X
+1973(open)X
+2149(call)X
+2285(with)X
+2447(the)X
+2565(following)X
+2896(calling)X
+3134(conventions:)X
+7 f
+715 753(DB)N
+859(*dbopen)X
+1243(\(const)X
+1579(char)X
+1819(*file,)X
+2155(int)X
+2347(flags,)X
+2683(int)X
+2875(mode,)X
+3163(DBTYPE)X
+3499(type,)X
+1291 843(int)N
+1483(dbflags,)X
+1915(const)X
+2203(void)X
+2443(*openinfo\))X
+1 f
+555 966(where)N
+2 f
+774(\256le)X
+1 f
+894(is)X
+969(the)X
+1089(name)X
+1285(of)X
+1374(the)X
+1494(\256le)X
+1618(being)X
+1818(opened,)X
+2 f
+2092(\257ags)X
+1 f
+2265(and)X
+2 f
+2402(mode)X
+1 f
+2597(are)X
+2717(the)X
+2836(standard)X
+3129(arguments)X
+3484(to)X
+3 f
+3567(open)X
+1 f
+3731(\(2\),)X
+2 f
+3866(type)X
+1 f
+4021(is)X
+4095(one)X
+4232(of)X
+555 1056(the)N
+680(access)X
+913(method)X
+1180(types,)X
+2 f
+1396(db\257ags)X
+1 f
+1654(indicates)X
+1966(the)X
+2091(mode)X
+2296(of)X
+2390(the)X
+2515(buffer)X
+2739(pool)X
+2907(and)X
+3049(transaction)X
+3427(protection,)X
+3798(and)X
+2 f
+3940(openinfo)X
+1 f
+4246(is)X
+555 1146(the)N
+681(access)X
+915(method)X
+1183(speci\256c)X
+1456(information.)X
+1902(Currently,)X
+2257(the)X
+2383(possible)X
+2673(values)X
+2906(for)X
+2 f
+3028(db\257ags)X
+1 f
+3287(are)X
+3414(DB_SHARED)X
+3912(and)X
+4055(DB_TP)X
+555 1236(indicating)N
+895(that)X
+1035(buffers)X
+1283(should)X
+1516(be)X
+1612(kept)X
+1770(in)X
+1852(a)X
+1908(shared)X
+2138(buffer)X
+2355(pool)X
+2517(and)X
+2653(that)X
+2793(the)X
+2911(\256le)X
+3033(should)X
+3266(be)X
+3362(transaction)X
+3734(protected.)X
+755 1359(The)N
+900(modi\256cations)X
+1355(required)X
+1643(to)X
+1725(add)X
+1861(transaction)X
+2233(protection)X
+2578(to)X
+2660(an)X
+2756(access)X
+2982(method)X
+3242(are)X
+3361(quite)X
+3541(simple)X
+3774(and)X
+3910(localized.)X
+715 1482(1.)N
+795(Replace)X
+1074(\256le)X
+2 f
+1196(open)X
+1 f
+1372(with)X
+2 f
+1534(buf_open)X
+1 f
+1832(.)X
+715 1572(2.)N
+795(Replace)X
+1074(\256le)X
+2 f
+1196(read)X
+1 f
+1363(and)X
+2 f
+1499(write)X
+1 f
+1683(calls)X
+1850(with)X
+2012(buffer)X
+2229(manager)X
+2526(calls)X
+2693(\()X
+2 f
+2720(buf_get)X
+1 f
+(,)S
+2 f
+3000(buf_unpin)X
+1 f
+3324(\).)X
+715 1662(3.)N
+795(Precede)X
+1070(buffer)X
+1287(manager)X
+1584(calls)X
+1751(with)X
+1913(an)X
+2009(appropriate)X
+2395(\(read)X
+2581(or)X
+2668(write\))X
+2880(lock)X
+3038(call.)X
+715 1752(4.)N
+795(Before)X
+1034(updates,)X
+1319(issue)X
+1499(a)X
+1555(logging)X
+1819(operation.)X
+715 1842(5.)N
+795(After)X
+985(data)X
+1139(have)X
+1311(been)X
+1483(accessed,)X
+1805(release)X
+2049(the)X
+2167(buffer)X
+2384(manager)X
+2681(pin.)X
+715 1932(6.)N
+795(Provide)X
+1064(undo/redo)X
+1409(code)X
+1581(for)X
+1695(each)X
+1863(type)X
+2021(of)X
+2108(log)X
+2230(record)X
+2456(de\256ned.)X
+555 2071(The)N
+702(following)X
+1035(code)X
+1209(fragments)X
+1552(show)X
+1743(how)X
+1903(to)X
+1987(transaction)X
+2361(protect)X
+2606(several)X
+2856(updates)X
+3123(to)X
+3206(a)X
+3263(B-tree.)X
+7 s
+3484 2039(3)N
+10 s
+3533 2071(In)N
+3621(the)X
+3740(unprotected)X
+4140(case,)X
+555 2161(an)N
+652(open)X
+829(call)X
+966(is)X
+1040(followed)X
+1346(by)X
+1447(a)X
+1504(read)X
+1664(call)X
+1801(to)X
+1884(obtain)X
+2105(the)X
+2224(meta-data)X
+2562(for)X
+2677(the)X
+2796(B-tree.)X
+3058(Instead,)X
+3331(we)X
+3446(issue)X
+3627(an)X
+3724(open)X
+3901(to)X
+3984(the)X
+4102(buffer)X
+555 2251(manager)N
+852(to)X
+934(obtain)X
+1154(a)X
+1210(\256le)X
+1332(id)X
+1414(and)X
+1550(a)X
+1606(buffer)X
+1823(request)X
+2075(to)X
+2157(obtain)X
+2377(the)X
+2495(meta-data)X
+2832(as)X
+2919(shown)X
+3148(below.)X
+7 f
+715 2374(char)N
+955(*path;)X
+715 2464(int)N
+907(fid,)X
+1147(flags,)X
+1483(len,)X
+1723(mode;)X
+715 2644(/*)N
+859(Obtain)X
+1195(a)X
+1291(file)X
+1531(id)X
+1675(with)X
+1915(which)X
+2203(to)X
+2347(access)X
+2683(the)X
+2875(buffer)X
+3211(pool)X
+3451(*/)X
+715 2734(fid)N
+907(=)X
+1003(buf_open\(path,)X
+1723(flags,)X
+2059(mode\);)X
+715 2914(/*)N
+859(Read)X
+1099(the)X
+1291(meta)X
+1531(data)X
+1771(\(page)X
+2059(0\))X
+2203(for)X
+2395(the)X
+2587(B-tree)X
+2923(*/)X
+715 3004(if)N
+859(\(tp_lock\(fid,)X
+1531(0,)X
+1675(READ_LOCK\)\))X
+1003 3094(return)N
+1339(error;)X
+715 3184(meta_data_ptr)N
+1387(=)X
+1483(buf_get\(fid,)X
+2107(0,)X
+2251(BF_PIN,)X
+2635(&len\);)X
+1 f
+555 3307(The)N
+714(BF_PIN)X
+1014(argument)X
+1350(to)X
+2 f
+1445(buf_get)X
+1 f
+1718(indicates)X
+2036(that)X
+2189(we)X
+2316(wish)X
+2500(to)X
+2595(leave)X
+2798(this)X
+2946(page)X
+3131(pinned)X
+3382(in)X
+3477(memory)X
+3777(so)X
+3881(that)X
+4034(it)X
+4111(is)X
+4197(not)X
+555 3397(swapped)N
+862(out)X
+990(while)X
+1194(we)X
+1314(are)X
+1439(accessing)X
+1772(it.)X
+1881(The)X
+2031(last)X
+2167(argument)X
+2495(to)X
+2 f
+2582(buf_get)X
+1 f
+2847(returns)X
+3095(the)X
+3218(number)X
+3488(of)X
+3580(bytes)X
+3774(on)X
+3879(the)X
+4002(page)X
+4179(that)X
+555 3487(were)N
+732(valid)X
+912(so)X
+1003(that)X
+1143(the)X
+1261(access)X
+1487(method)X
+1747(may)X
+1905(initialize)X
+2205(the)X
+2323(page)X
+2495(if)X
+2564(necessary.)X
+755 3610(Next,)N
+955(consider)X
+1251(inserting)X
+1555(a)X
+1615(record)X
+1845(on)X
+1949(a)X
+2009(particular)X
+2341(page)X
+2517(of)X
+2608(a)X
+2668(B-tree.)X
+2932(In)X
+3022(the)X
+3143(unprotected)X
+3545(case,)X
+3727(we)X
+3844(read)X
+4006(the)X
+4127(page,)X
+555 3700(call)N
+2 f
+693(_bt_insertat)X
+1 f
+1079(,)X
+1121(and)X
+1258(write)X
+1444(the)X
+1563(page.)X
+1776(Instead,)X
+2049(we)X
+2164(lock)X
+2323(the)X
+2442(page,)X
+2635(request)X
+2888(the)X
+3007(buffer,)X
+3245(log)X
+3368(the)X
+3487(change,)X
+3756(modify)X
+4008(the)X
+4127(page,)X
+555 3790(and)N
+691(release)X
+935(the)X
+1053(buffer.)X
+7 f
+715 3913(int)N
+907(fid,)X
+1147(len,)X
+1387(pageno;)X
+1867(/*)X
+2011(Identifies)X
+2539(the)X
+2731(buffer)X
+3067(*/)X
+715 4003(int)N
+907(index;)X
+1867(/*)X
+2011(Location)X
+2443(at)X
+2587(which)X
+2875(to)X
+3019(insert)X
+3355(the)X
+3547(new)X
+3739(pair)X
+3979(*/)X
+715 4093(DBT)N
+907(*keyp,)X
+1243(*datap;)X
+1867(/*)X
+2011(Key/Data)X
+2443(pair)X
+2683(to)X
+2827(be)X
+2971(inserted)X
+3403(*/)X
+715 4183(DATUM)N
+1003(*d;)X
+1867(/*)X
+2011(Key/data)X
+2443(structure)X
+2923(to)X
+3067(insert)X
+3403(*/)X
+715 4363(/*)N
+859(Lock)X
+1099(and)X
+1291(request)X
+1675(the)X
+1867(buffer)X
+2203(*/)X
+715 4453(if)N
+859(\(tp_lock\(fid,)X
+1531(pageno,)X
+1915(WRITE_LOCK\)\))X
+1003 4543(return)N
+1339(error;)X
+715 4633(buffer_ptr)N
+1243(=)X
+1339(buf_get\(fid,)X
+1963(pageno,)X
+2347(BF_PIN,)X
+2731(&len\);)X
+715 4813(/*)N
+859(Log)X
+1051(and)X
+1243(perform)X
+1627(the)X
+1819(update)X
+2155(*/)X
+715 4903(log_insdel\(BTREE_INSERT,)N
+1915(fid,)X
+2155(pageno,)X
+2539(keyp,)X
+2827(datap\);)X
+715 4993(_bt_insertat\(buffer_ptr,)N
+1915(d,)X
+2059(index\);)X
+715 5083(buf_unpin\(buffer_ptr\);)N
+1 f
+555 5206(Succinctly,)N
+942(the)X
+1068(algorithm)X
+1407(for)X
+1529(turning)X
+1788(unprotected)X
+2195(code)X
+2375(into)X
+2527(protected)X
+2854(code)X
+3034(is)X
+3115(to)X
+3205(replace)X
+3466(read)X
+3633(operations)X
+3995(with)X
+2 f
+4165(lock)X
+1 f
+555 5296(and)N
+2 f
+691(buf_get)X
+1 f
+951(operations)X
+1305(and)X
+1441(write)X
+1626(operations)X
+1980(with)X
+2 f
+2142(log)X
+1 f
+2264(and)X
+2 f
+2400(buf_unpin)X
+1 f
+2744(operations.)X
+8 s
+10 f
+555 5458(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)N
+5 s
+1 f
+727 5536(3)N
+8 s
+766 5561(The)N
+884(following)X
+1152(code)X
+1291(fragments)X
+1565(are)X
+1661(examples,)X
+1937(but)X
+2038(do)X
+2120(not)X
+2220(de\256ne)X
+2394(the)X
+2490(\256nal)X
+2622(interface.)X
+2894(The)X
+3011(\256nal)X
+3143(interface)X
+3383(will)X
+3501(be)X
+3579(determined)X
+3884(after)X
+4018(LIBTP)X
+4214(has)X
+555 5633(been)N
+691(fully)X
+828(integrated)X
+1099(with)X
+1229(the)X
+1323(most)X
+1464(recent)X
+3 f
+1635(db)X
+1 f
+1707(\(3\))X
+1797(release)X
+1989(from)X
+2129(the)X
+2223(Computer)X
+2495(Systems)X
+2725(Research)X
+2974(Group)X
+3153(at)X
+3215(University)X
+3501(of)X
+3570(California,)X
+3861(Berkeley.)X
+
+11 p
+%%Page: 11 11
+8 s 8 xH 0 xS 1 f
+10 s
+3 f
+555 630(5.)N
+655(Performance)X
+1 f
+755 753(In)N
+845(this)X
+983(section,)X
+1253(we)X
+1370(present)X
+1625(the)X
+1746(results)X
+1978(of)X
+2067(two)X
+2209(very)X
+2374(different)X
+2673(benchmarks.)X
+3103(The)X
+3250(\256rst)X
+3396(is)X
+3471(an)X
+3569(online)X
+3791(transaction)X
+4165(pro-)X
+555 843(cessing)N
+824(benchmark,)X
+1234(similar)X
+1489(to)X
+1584(the)X
+1715(standard)X
+2020(TPCB,)X
+2272(but)X
+2407(has)X
+2547(been)X
+2732(adapted)X
+3015(to)X
+3110(run)X
+3250(in)X
+3345(a)X
+3414(desktop)X
+3696(environment.)X
+4174(The)X
+555 933(second)N
+798(emulates)X
+1103(a)X
+1159(computer-aided)X
+1683(design)X
+1912(environment)X
+2337(and)X
+2473(provides)X
+2769(more)X
+2954(complex)X
+3250(query)X
+3453(processing.)X
+3 f
+555 1119(5.1.)N
+715(Transaction)X
+1148(Processing)X
+1533(Benchmark)X
+1 f
+755 1242(For)N
+887(this)X
+1023(section,)X
+1291(all)X
+1392(performance)X
+1820(numbers)X
+2117(shown)X
+2346(except)X
+2576(for)X
+2690(the)X
+2808(commercial)X
+3207(database)X
+3504(system)X
+3746(were)X
+3923(obtained)X
+4219(on)X
+555 1332(a)N
+614(DECstation)X
+1009(5000/200)X
+1333(with)X
+1497(32MBytes)X
+1852(of)X
+1941(memory)X
+2230(running)X
+2501(Ultrix)X
+2714(V4.0,)X
+2914(accessing)X
+3244(a)X
+3302(DEC)X
+3484(RZ57)X
+3688(1GByte)X
+3959(disk)X
+4114(drive.)X
+555 1422(The)N
+720(commercial)X
+1139(relational)X
+1482(database)X
+1799(system)X
+2061(tests)X
+2242(were)X
+2438(run)X
+2584(on)X
+2703(a)X
+2778(comparable)X
+3192(machine,)X
+3523(a)X
+3598(Sparcstation)X
+4033(1+)X
+4157(with)X
+555 1512(32MBytes)N
+915(memory)X
+1209(and)X
+1352(a)X
+1415(1GByte)X
+1691(external)X
+1976(disk)X
+2135(drive.)X
+2366(The)X
+2517(database,)X
+2840(binaries)X
+3120(and)X
+3262(log)X
+3390(resided)X
+3648(on)X
+3754(the)X
+3878(same)X
+4069(device.)X
+555 1602(Reported)N
+869(times)X
+1062(are)X
+1181(the)X
+1299(means)X
+1524(of)X
+1611(\256ve)X
+1751(tests)X
+1913(and)X
+2049(have)X
+2221(standard)X
+2513(deviations)X
+2862(within)X
+3086(two)X
+3226(percent)X
+3483(of)X
+3570(the)X
+3688(mean.)X
+755 1725(The)N
+905(test)X
+1041(database)X
+1343(was)X
+1493(con\256gured)X
+1861(according)X
+2203(to)X
+2290(the)X
+2413(TPCB)X
+2637(scaling)X
+2889(rules)X
+3070(for)X
+3189(a)X
+3250(10)X
+3355(transaction)X
+3732(per)X
+3860(second)X
+4108(\(TPS\))X
+555 1815(system)N
+817(with)X
+999(1,000,000)X
+1359(account)X
+1649(records,)X
+1946(100)X
+2106(teller)X
+2311(records,)X
+2607(and)X
+2762(10)X
+2881(branch)X
+3139(records.)X
+3455(Where)X
+3709(TPS)X
+3885(numbers)X
+4200(are)X
+555 1905(reported,)N
+865(we)X
+981(are)X
+1102(running)X
+1373(a)X
+1431(modi\256ed)X
+1737(version)X
+1995(of)X
+2084(the)X
+2203(industry)X
+2486(standard)X
+2779(transaction)X
+3152(processing)X
+3516(benchmark,)X
+3914(TPCB.)X
+4174(The)X
+555 1995(TPCB)N
+780(benchmark)X
+1163(simulates)X
+1491(a)X
+1553(withdrawal)X
+1940(performed)X
+2301(by)X
+2407(a)X
+2469(hypothetical)X
+2891(teller)X
+3082(at)X
+3166(a)X
+3228(hypothetical)X
+3650(bank.)X
+3872(The)X
+4022(database)X
+555 2085(consists)N
+831(of)X
+921(relations)X
+1220(\(\256les\))X
+1430(for)X
+1547(accounts,)X
+1871(branches,)X
+2200(tellers,)X
+2439(and)X
+2578(history.)X
+2863(For)X
+2997(each)X
+3168(transaction,)X
+3563(the)X
+3684(account,)X
+3976(teller,)X
+4183(and)X
+555 2175(branch)N
+795(balances)X
+1093(must)X
+1269(be)X
+1366(updated)X
+1641(to)X
+1724(re\257ect)X
+1946(the)X
+2065(withdrawal)X
+2447(and)X
+2584(a)X
+2640(history)X
+2882(record)X
+3108(is)X
+3181(written)X
+3428(which)X
+3644(contains)X
+3931(the)X
+4049(account)X
+555 2265(id,)N
+657(branch)X
+896(id,)X
+998(teller)X
+1183(id,)X
+1285(and)X
+1421(the)X
+1539(amount)X
+1799(of)X
+1886(the)X
+2004(withdrawal)X
+2385([TPCB90].)X
+755 2388(Our)N
+914(implementation)X
+1450(of)X
+1551(the)X
+1683(benchmark)X
+2074(differs)X
+2317(from)X
+2506(the)X
+2637(speci\256cation)X
+3075(in)X
+3170(several)X
+3431(aspects.)X
+3736(The)X
+3894(speci\256cation)X
+555 2478(requires)N
+840(that)X
+985(the)X
+1108(database)X
+1410(keep)X
+1587(redundant)X
+1933(logs)X
+2091(on)X
+2196(different)X
+2498(devices,)X
+2784(but)X
+2911(we)X
+3030(use)X
+3162(a)X
+3223(single)X
+3439(log.)X
+3606(Furthermore,)X
+4052(all)X
+4157(tests)X
+555 2568(were)N
+734(run)X
+863(on)X
+965(a)X
+1023(single,)X
+1256(centralized)X
+1631(system)X
+1875(so)X
+1968(there)X
+2151(is)X
+2226(no)X
+2328(notion)X
+2553(of)X
+2641(remote)X
+2885(accesses.)X
+3219(Finally,)X
+3486(we)X
+3601(calculated)X
+3948(throughput)X
+555 2658(by)N
+662(dividing)X
+955(the)X
+1080(total)X
+1249(elapsed)X
+1517(time)X
+1686(by)X
+1793(the)X
+1918(number)X
+2190(of)X
+2284(transactions)X
+2694(processed)X
+3038(rather)X
+3253(than)X
+3418(by)X
+3525(computing)X
+3894(the)X
+4018(response)X
+555 2748(time)N
+717(for)X
+831(each)X
+999(transaction.)X
+755 2871(The)N
+912(performance)X
+1351(comparisons)X
+1788(focus)X
+1993(on)X
+2104(traditional)X
+2464(Unix)X
+2655(techniques)X
+3029(\(unprotected,)X
+3486(using)X
+3 f
+3690(\257ock)X
+1 f
+3854(\(2\))X
+3979(and)X
+4126(using)X
+3 f
+555 2961(fsync)N
+1 f
+733(\(2\)\))X
+884(and)X
+1030(a)X
+1096(commercial)X
+1504(relational)X
+1836(database)X
+2142(system.)X
+2433(Well-behaved)X
+2913(applications)X
+3329(using)X
+3 f
+3531(\257ock)X
+1 f
+3695(\(2\))X
+3818(are)X
+3946(guaranteed)X
+555 3051(that)N
+704(concurrent)X
+1077(processes')X
+1441(updates)X
+1715(do)X
+1824(not)X
+1955(interact)X
+2225(with)X
+2396(one)X
+2541(another,)X
+2831(but)X
+2962(no)X
+3070(guarantees)X
+3442(about)X
+3648(atomicity)X
+3978(are)X
+4105(made.)X
+555 3141(That)N
+731(is,)X
+833(if)X
+911(the)X
+1038(system)X
+1289(crashes)X
+1555(in)X
+1646(mid-transaction,)X
+2198(only)X
+2369(parts)X
+2554(of)X
+2649(that)X
+2797(transaction)X
+3177(will)X
+3329(be)X
+3433(re\257ected)X
+3738(in)X
+3828(the)X
+3954 0.3125(after-crash)AX
+555 3231(state)N
+725(of)X
+815(the)X
+936(database.)X
+1276(The)X
+1424(use)X
+1554(of)X
+3 f
+1643(fsync)X
+1 f
+1821(\(2\))X
+1937(at)X
+2017(transaction)X
+2391(commit)X
+2657(time)X
+2821(provides)X
+3119(guarantees)X
+3485(of)X
+3574(durability)X
+3907(after)X
+4077(system)X
+555 3321(failure.)N
+825(However,)X
+1160(there)X
+1341(is)X
+1414(no)X
+1514(mechanism)X
+1899(to)X
+1981(perform)X
+2260(transaction)X
+2632(abort.)X
+3 f
+555 3507(5.1.1.)N
+775(Single-User)X
+1191(Tests)X
+1 f
+755 3630(These)N
+978(tests)X
+1151(compare)X
+1459(LIBTP)X
+1712(in)X
+1804(a)X
+1870(variety)X
+2123(of)X
+2220(con\256gurations)X
+2708(to)X
+2800(traditional)X
+3159(UNIX)X
+3390(solutions)X
+3708(and)X
+3854(a)X
+3920(commercial)X
+555 3720(relational)N
+884(database)X
+1187(system)X
+1435(\(RDBMS\).)X
+1814(To)X
+1929(demonstrate)X
+2347(the)X
+2471(server)X
+2694(architecture)X
+3100(we)X
+3220(built)X
+3392(a)X
+3454(front)X
+3636(end)X
+3777(test)X
+3913(process)X
+4179(that)X
+555 3810(uses)N
+732(TCL)X
+922([OUST90])X
+1304(to)X
+1405(parse)X
+1614(database)X
+1930(access)X
+2175(commands)X
+2561(and)X
+2716(call)X
+2870(the)X
+3006(database)X
+3321(access)X
+3565(routines.)X
+3901(In)X
+4006(one)X
+4160(case)X
+555 3900(\(SERVER\),)N
+956(frontend)X
+1249(and)X
+1386(backend)X
+1675(processes)X
+2004(were)X
+2181(created)X
+2434(which)X
+2650(communicated)X
+3142(via)X
+3260(an)X
+3356(IP)X
+3447(socket.)X
+3712(In)X
+3799(the)X
+3917(second)X
+4160(case)X
+555 3990(\(TCL\),)N
+802(a)X
+860(single)X
+1073(process)X
+1336(read)X
+1497(queries)X
+1751(from)X
+1929(standard)X
+2223(input,)X
+2429(parsed)X
+2660(them,)X
+2861(and)X
+2998(called)X
+3211(the)X
+3330(database)X
+3628(access)X
+3855(routines.)X
+4174(The)X
+555 4080(performance)N
+987(difference)X
+1338(between)X
+1630(the)X
+1752(TCL)X
+1927(and)X
+2067(SERVER)X
+2397(tests)X
+2563(quanti\256es)X
+2898(the)X
+3020(communication)X
+3542(overhead)X
+3861(of)X
+3952(the)X
+4074(socket.)X
+555 4170(The)N
+732(RDBMS)X
+1063(implementation)X
+1617(used)X
+1816(embedded)X
+2198(SQL)X
+2401(in)X
+2515(C)X
+2620(with)X
+2814(stored)X
+3062(database)X
+3391(procedures.)X
+3835(Therefore,)X
+4224(its)X
+555 4260(con\256guration)N
+1003(is)X
+1076(a)X
+1132(hybrid)X
+1361(of)X
+1448(the)X
+1566(single)X
+1777(process)X
+2038(architecture)X
+2438(and)X
+2574(the)X
+2692(server)X
+2909(architecture.)X
+3349(The)X
+3494(graph)X
+3697(in)X
+3779(\256gure)X
+3986(six)X
+4099(shows)X
+555 4350(a)N
+611(comparison)X
+1005(of)X
+1092(the)X
+1210(following)X
+1541(six)X
+1654(con\256gurations:)X
+1126 4506(LIBTP)N
+1552(Uses)X
+1728(the)X
+1846(LIBTP)X
+2088(library)X
+2322(in)X
+2404(a)X
+2460(single)X
+2671(application.)X
+1126 4596(TCL)N
+1552(Uses)X
+1728(the)X
+1846(LIBTP)X
+2088(library)X
+2322(in)X
+2404(a)X
+2460(single)X
+2671(application,)X
+3067(requires)X
+3346(query)X
+3549(parsing.)X
+1126 4686(SERVER)N
+1552(Uses)X
+1728(the)X
+1846(LIBTP)X
+2088(library)X
+2322(in)X
+2404(a)X
+2460(server)X
+2677(con\256guration,)X
+3144(requires)X
+3423(query)X
+3626(parsing.)X
+1126 4776(NOTP)N
+1552(Uses)X
+1728(no)X
+1828(locking,)X
+2108(logging,)X
+2392(or)X
+2479(concurrency)X
+2897(control.)X
+1126 4866(FLOCK)N
+1552(Uses)X
+3 f
+1728(\257ock)X
+1 f
+1892(\(2\))X
+2006(for)X
+2120(concurrency)X
+2538(control)X
+2785(and)X
+2921(nothing)X
+3185(for)X
+3299(durability.)X
+1126 4956(FSYNC)N
+1552(Uses)X
+3 f
+1728(fsync)X
+1 f
+1906(\(2\))X
+2020(for)X
+2134(durability)X
+2465(and)X
+2601(nothing)X
+2865(for)X
+2979(concurrency)X
+3397(control.)X
+1126 5046(RDBMS)N
+1552(Uses)X
+1728(a)X
+1784(commercial)X
+2183(relational)X
+2506(database)X
+2803(system.)X
+755 5235(The)N
+902(results)X
+1133(show)X
+1324(that)X
+1466(LIBTP,)X
+1730(both)X
+1894(in)X
+1978(the)X
+2098(procedural)X
+2464(and)X
+2602(parsed)X
+2834(environments,)X
+3312(is)X
+3387(competitive)X
+3787(with)X
+3951(a)X
+4009(commer-)X
+555 5325(cial)N
+692(system)X
+935(\(comparing)X
+1326(LIBTP,)X
+1589(TCL,)X
+1781(and)X
+1917(RDBMS\).)X
+2263(Compared)X
+2617(to)X
+2699(existing)X
+2972(UNIX)X
+3193(solutions,)X
+3521(LIBTP)X
+3763(is)X
+3836(approximately)X
+555 5415(15%)N
+738(slower)X
+988(than)X
+1162(using)X
+3 f
+1371(\257ock)X
+1 f
+1535(\(2\))X
+1665(or)X
+1768(no)X
+1884(protection)X
+2245(but)X
+2383(over)X
+2562(80%)X
+2745(better)X
+2964(than)X
+3137(using)X
+3 f
+3345(fsync)X
+1 f
+3523(\(2\))X
+3652(\(comparing)X
+4057(LIBTP,)X
+555 5505(FLOCK,)N
+857(NOTP,)X
+1106(and)X
+1242(FSYNC\).)X
+
+12 p
+%%Page: 12 12
+10 s 10 xH 0 xS 1 f
+3 f
+8 s
+3500 2184(RDBMS)N
+1 Dt
+3553 2085 MXY
+ 3553 2085 lineto
+ 3676 2085 lineto
+ 3676 1351 lineto
+ 3553 1351 lineto
+ 3553 2085 lineto
+closepath 16 3553 1351 3676 2085 Dp
+2018 2184(SERVER)N
+1720 1168 MXY
+0 917 Dl
+122 0 Dl
+0 -917 Dl
+-122 0 Dl
+1715 2184(TCL)N
+2087 1534 MXY
+ 2087 1534 lineto
+ 2209 1534 lineto
+ 2209 2085 lineto
+ 2087 2085 lineto
+ 2087 1534 lineto
+closepath 12 2087 1534 2209 2085 Dp
+3187 MX
+ 3187 1534 lineto
+ 3309 1534 lineto
+ 3309 2085 lineto
+ 3187 2085 lineto
+ 3187 1534 lineto
+closepath 19 3187 1534 3309 2085 Dp
+3142 2184(FSYNC)N
+2425(NOTP)X
+2453 955 MXY
+ 2453 955 lineto
+ 2576 955 lineto
+ 2576 2085 lineto
+ 2453 2085 lineto
+ 2453 955 lineto
+closepath 21 2453 955 2576 2085 Dp
+2820 1000 MXY
+ 2820 1000 lineto
+ 2942 1000 lineto
+ 2942 2085 lineto
+ 2820 2085 lineto
+ 2820 1000 lineto
+closepath 14 2820 1000 2942 2085 Dp
+5 Dt
+1231 2085 MXY
+2567 0 Dl
+4 Ds
+1 Dt
+1231 1840 MXY
+2567 0 Dl
+1231 1596 MXY
+2567 0 Dl
+1231 1351 MXY
+2567 0 Dl
+1231 1108 MXY
+2567 0 Dl
+1231 863 MXY
+2567 0 Dl
+11 s
+1087 1877(2)N
+1087 1633(4)N
+1087 1388(6)N
+1087 1145(8)N
+1065 900(10)N
+1028 763(TPS)N
+-1 Ds
+1353 2085 MXY
+ 1353 2085 lineto
+ 1353 1151 lineto
+ 1476 1151 lineto
+ 1476 2085 lineto
+ 1353 2085 lineto
+closepath 3 1353 1151 1476 2085 Dp
+8 s
+1318 2184(LIBTP)N
+2767(FLOCK)X
+3 Dt
+-1 Ds
+10 s
+1597 2399(Figure)N
+1844(6:)X
+1931(Single-User)X
+2347(Performance)X
+2814(Comparison.)X
+1 f
+10 f
+555 2579(h)N
+579(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)X
+3 f
+555 2855(5.1.2.)N
+775(Multi-User)X
+1174(Tests)X
+1 f
+755 2978(While)N
+975(the)X
+1097(single-user)X
+1473(tests)X
+1639(form)X
+1819(a)X
+1878(basis)X
+2061(for)X
+2178(comparing)X
+2544(LIBTP)X
+2789(to)X
+2874(other)X
+3062(systems,)X
+3358(our)X
+3488(goal)X
+3649(in)X
+3734(multi-user)X
+4086(testing)X
+555 3068(was)N
+714(to)X
+810(analyze)X
+1089(its)X
+1197(scalability.)X
+1579(To)X
+1701(this)X
+1849(end,)X
+2018(we)X
+2145(have)X
+2330(run)X
+2470(the)X
+2601(benchmark)X
+2991(in)X
+3086(three)X
+3280(modes,)X
+3542(the)X
+3673(normal)X
+3933(disk)X
+4099(bound)X
+555 3158(con\256guration)N
+1010(\(\256gure)X
+1252(seven\),)X
+1510(a)X
+1573(CPU)X
+1755(bound)X
+1982(con\256guration)X
+2436(\(\256gure)X
+2677(eight,)X
+2884(READ-ONLY\),)X
+3426(and)X
+3569(lock)X
+3734(contention)X
+4099(bound)X
+555 3248(\(\256gure)N
+796(eight,)X
+1003(NO_FSYNC\).)X
+1510(Since)X
+1715(the)X
+1840(normal)X
+2094(con\256guration)X
+2548(is)X
+2628(completely)X
+3011(disk)X
+3171(bound)X
+3398(\(each)X
+3600(transaction)X
+3978(requires)X
+4263(a)X
+555 3354(random)N
+823(read,)X
+1005(a)X
+1064(random)X
+1332(write,)X
+1540(and)X
+1679(a)X
+1738(sequential)X
+2086(write)X
+7 s
+2251 3322(4)N
+10 s
+3354(\))Y
+2329(we)X
+2446(expect)X
+2679(to)X
+2764(see)X
+2890(little)X
+3059(performance)X
+3489(improvement)X
+3939(as)X
+4028(the)X
+4148(mul-)X
+555 3444(tiprogramming)N
+1064(level)X
+1249(increases.)X
+1613(In)X
+1709(fact,)X
+1879(\256gure)X
+2095(seven)X
+2307(reveals)X
+2564(that)X
+2713(we)X
+2836(are)X
+2964(able)X
+3127(to)X
+3218(overlap)X
+3487(CPU)X
+3670(and)X
+3814(disk)X
+3975(utilization)X
+555 3534(slightly)N
+825(producing)X
+1181(approximately)X
+1674(a)X
+1740(10%)X
+1917(performance)X
+2354(improvement)X
+2811(with)X
+2983(two)X
+3133(processes.)X
+3511(After)X
+3711(that)X
+3861(point,)X
+4075(perfor-)X
+555 3624(mance)N
+785(drops)X
+983(off,)X
+1117(and)X
+1253(at)X
+1331(a)X
+1387(multi-programming)X
+2038(level)X
+2214(of)X
+2301(4,)X
+2381(we)X
+2495(are)X
+2614(performing)X
+2995(worse)X
+3207(than)X
+3365(in)X
+3447(the)X
+3565(single)X
+3776(process)X
+4037(case.)X
+755 3747(Similar)N
+1021(behavior)X
+1333(was)X
+1489(reported)X
+1787(on)X
+1897(the)X
+2025(commercial)X
+2434(relational)X
+2767(database)X
+3074(system)X
+3326(using)X
+3529(the)X
+3657(same)X
+3852(con\256guration.)X
+555 3837(The)N
+707(important)X
+1045(conclusion)X
+1419(to)X
+1508(draw)X
+1696(from)X
+1879(this)X
+2021(is)X
+2101(that)X
+2248(you)X
+2395(cannot)X
+2636(attain)X
+2841(good)X
+3028(multi-user)X
+3384(scaling)X
+3638(on)X
+3745(a)X
+3808(badly)X
+4013(balanced)X
+555 3927(system.)N
+839(If)X
+915(multi-user)X
+1266(performance)X
+1695(on)X
+1797(applications)X
+2205(of)X
+2293(this)X
+2429(sort)X
+2570(is)X
+2644(important,)X
+2996(one)X
+3133(must)X
+3309(have)X
+3482(a)X
+3539(separate)X
+3824(logging)X
+4089(device)X
+555 4017(and)N
+697(horizontally)X
+1110(partition)X
+1407(the)X
+1531(database)X
+1834(to)X
+1921(allow)X
+2124(a)X
+2185(suf\256ciently)X
+2570(high)X
+2737(degree)X
+2977(of)X
+3069(multiprogramming)X
+3698(that)X
+3843(group)X
+4055(commit)X
+555 4107(can)N
+687(amortize)X
+988(the)X
+1106(cost)X
+1255(of)X
+1342(log)X
+1464(\257ushing.)X
+755 4230(By)N
+871(using)X
+1067(a)X
+1126(very)X
+1292(small)X
+1488(database)X
+1788(\(one)X
+1954(that)X
+2097(can)X
+2232(be)X
+2331(entirely)X
+2599(cached)X
+2846(in)X
+2930(main)X
+3112(memory\))X
+3428(and)X
+3566(read-only)X
+3896(transactions,)X
+555 4320(we)N
+670(generated)X
+1004(a)X
+1061(CPU)X
+1236(bound)X
+1456(environment.)X
+1921(By)X
+2034(using)X
+2227(the)X
+2345(same)X
+2530(small)X
+2723(database,)X
+3040(the)X
+3158(complete)X
+3472(TPCB)X
+3691(transaction,)X
+4083(and)X
+4219(no)X
+3 f
+555 4410(fsync)N
+1 f
+733(\(2\))X
+862(on)X
+977(the)X
+1110(log)X
+1247(at)X
+1340(commit,)X
+1639(we)X
+1768(created)X
+2036(a)X
+2107(lock)X
+2280(contention)X
+2652(bound)X
+2886(environment.)X
+3365(The)X
+3524(small)X
+3731(database)X
+4042(used)X
+4223(an)X
+555 4500(account)N
+828(\256le)X
+953(containing)X
+1314(only)X
+1479(1000)X
+1662(records)X
+1922(rather)X
+2133(than)X
+2294(the)X
+2415(full)X
+2549(1,000,000)X
+2891(records)X
+3150(and)X
+3288(ran)X
+3413(enough)X
+3671(transactions)X
+4076(to)X
+4160(read)X
+555 4590(the)N
+677(entire)X
+883(database)X
+1183(into)X
+1330(the)X
+1451(buffer)X
+1671(pool)X
+1836(\(2000\))X
+2073(before)X
+2302(beginning)X
+2645(measurements.)X
+3147(The)X
+3295(read-only)X
+3626(transaction)X
+4001(consisted)X
+555 4680(of)N
+646(three)X
+831(database)X
+1132(reads)X
+1326(\(from)X
+1533(the)X
+1655(1000)X
+1839(record)X
+2069(account)X
+2343(\256le,)X
+2489(the)X
+2611(100)X
+2754(record)X
+2983(teller)X
+3171(\256le,)X
+3316(and)X
+3455(the)X
+3576(10)X
+3679(record)X
+3908(branch)X
+4150(\256le\).)X
+555 4770(Since)N
+759(no)X
+865(data)X
+1025(were)X
+1208(modi\256ed)X
+1518(and)X
+1660(no)X
+1766(history)X
+2014(records)X
+2277(were)X
+2460(written,)X
+2733(no)X
+2839(log)X
+2966(records)X
+3228(were)X
+3410(written.)X
+3702(For)X
+3838(the)X
+3961(contention)X
+555 4860(bound)N
+780(con\256guration,)X
+1252(we)X
+1371(used)X
+1543(the)X
+1666(normal)X
+1918(TPCB)X
+2142(transaction)X
+2519(\(against)X
+2798(the)X
+2920(small)X
+3117(database\))X
+3445(and)X
+3585(disabled)X
+3876(the)X
+3998(log)X
+4124(\257ush.)X
+555 4950(Figure)N
+784(eight)X
+964(shows)X
+1184(both)X
+1346(of)X
+1433(these)X
+1618(results.)X
+755 5073(The)N
+902(read-only)X
+1231(test)X
+1363(indicates)X
+1669(that)X
+1810(we)X
+1925(barely)X
+2147(scale)X
+2329(at)X
+2408(all)X
+2509(in)X
+2592(the)X
+2711(CPU)X
+2887(bound)X
+3108(case.)X
+3308(The)X
+3454(explanation)X
+3849(for)X
+3964(that)X
+4105(is)X
+4179(that)X
+555 5163(even)N
+735(with)X
+905(a)X
+969(single)X
+1188(process,)X
+1477(we)X
+1599(are)X
+1726(able)X
+1888(to)X
+1978(drive)X
+2171(the)X
+2297(CPU)X
+2480(utilization)X
+2832(to)X
+2922(96%.)X
+3137(As)X
+3254(a)X
+3317(result,)X
+3542(that)X
+3689(gives)X
+3885(us)X
+3983(very)X
+4153(little)X
+555 5253(room)N
+753(for)X
+876(improvement,)X
+1352(and)X
+1497(it)X
+1570(takes)X
+1764(a)X
+1829(multiprogramming)X
+2462(level)X
+2647(of)X
+2743(four)X
+2906(to)X
+2997(approach)X
+3321(100%)X
+3537(CPU)X
+3721(saturation.)X
+4106(In)X
+4201(the)X
+555 5343(case)N
+718(where)X
+939(we)X
+1057(do)X
+1161(perform)X
+1444(writes,)X
+1684(we)X
+1802(are)X
+1925(interested)X
+2261(in)X
+2347(detecting)X
+2665(when)X
+2863(lock)X
+3025(contention)X
+3387(becomes)X
+3691(a)X
+3750(dominant)X
+4075(perfor-)X
+555 5433(mance)N
+787(factor.)X
+1037(Contention)X
+1414(will)X
+1560(cause)X
+1761(two)X
+1903(phenomena;)X
+2317(we)X
+2433(will)X
+2579(see)X
+2704(transactions)X
+3109(queueing)X
+3425(behind)X
+3665(frequently)X
+4017(accessed)X
+555 5523(data,)N
+731(and)X
+869(we)X
+985(will)X
+1131(see)X
+1256(transaction)X
+1629(abort)X
+1815(rates)X
+1988(increasing)X
+2339(due)X
+2476(to)X
+2559(deadlock.)X
+2910(Given)X
+3127(that)X
+3268(the)X
+3387(branch)X
+3627(\256le)X
+3750(contains)X
+4038(only)X
+4201(ten)X
+8 s
+10 f
+555 5595(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)N
+5 s
+1 f
+727 5673(4)N
+8 s
+763 5698(Although)N
+1021(the)X
+1115(log)X
+1213(is)X
+1272(written)X
+1469(sequentially,)X
+1810(we)X
+1900(do)X
+1980(not)X
+2078(get)X
+2172(the)X
+2266(bene\256t)X
+2456(of)X
+2525(sequentiality)X
+2868(since)X
+3015(the)X
+3109(log)X
+3207(and)X
+3315(database)X
+3550(reside)X
+3718(on)X
+3798(the)X
+3892(same)X
+4039(disk.)X
+
+13 p
+%%Page: 13 13
+8 s 8 xH 0 xS 1 f
+10 s
+3 f
+1 f
+3187 2051 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+3286 2028 MXY
+0 17 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+3384 1926 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+3483 1910 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+3581 1910 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+3680 1832 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+3778 1909 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+3877 1883 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+3975 1679 MXY
+0 17 Dl
+0 -8 Dl
+9 0 Dl
+-18 0 Dl
+4074 1487 MXY
+0 17 Dl
+0 -8 Dl
+9 0 Dl
+-18 0 Dl
+5 Dt
+3187 2060 MXY
+99 -24 Dl
+98 -101 Dl
+99 -16 Dl
+98 0 Dl
+99 -78 Dl
+98 77 Dl
+99 -26 Dl
+98 -204 Dl
+99 -192 Dl
+3 f
+6 s
+4088 1516(SMALL)N
+3 Dt
+3187 2051 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+3286 2051 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+3384 2041 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+3483 1990 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+3581 1843 MXY
+0 17 Dl
+0 -8 Dl
+9 0 Dl
+-18 0 Dl
+3680 1578 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+3778 1496 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+3877 1430 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+3975 1269 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+4074 1070 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+1 Dt
+3187 2060 MXY
+99 0 Dl
+98 -10 Dl
+99 -51 Dl
+98 -147 Dl
+99 -265 Dl
+98 -82 Dl
+99 -66 Dl
+98 -161 Dl
+99 -199 Dl
+4088 1099(LARGE)N
+5 Dt
+3089 2060 MXY
+985 0 Dl
+3089 MX
+0 -1174 Dl
+4 Ds
+1 Dt
+3581 2060 MXY
+0 -1174 Dl
+4074 2060 MXY
+0 -1174 Dl
+3089 1825 MXY
+985 0 Dl
+9 s
+2993 1855(25)N
+3089 1591 MXY
+985 0 Dl
+2993 1621(50)N
+3089 1356 MXY
+985 0 Dl
+2993 1386(75)N
+3089 1121 MXY
+985 0 Dl
+2957 1151(100)N
+3089 886 MXY
+985 0 Dl
+2957 916(125)N
+3281 2199(Multiprogramming)N
+3071 2152(0)N
+3569(5)X
+4038(10)X
+2859 787(Aborts)N
+3089(per)X
+3211(500)X
+2901 847(transactions)N
+-1 Ds
+3 Dt
+2037 1342 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+2125 1358 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+2213 1341 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+2301 1191 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+2388 1124 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-17 0 Dl
+2476 1157 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+2564 1157 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+2652 1161 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+2740 1153 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+2828 1150 MXY
+0 18 Dl
+0 -9 Dl
+8 0 Dl
+-17 0 Dl
+5 Dt
+2037 1351 MXY
+88 16 Dl
+88 -17 Dl
+88 -150 Dl
+87 -67 Dl
+88 33 Dl
+88 0 Dl
+88 4 Dl
+88 -8 Dl
+88 -3 Dl
+6 s
+2685 1234(READ-ONLY)N
+3 Dt
+2037 1464 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+2125 1640 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+2213 1854 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+2301 1872 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+2388 1871 MXY
+0 17 Dl
+0 -9 Dl
+9 0 Dl
+-17 0 Dl
+2476 1933 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+2564 1914 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+2652 1903 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+2740 1980 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+2828 2004 MXY
+0 18 Dl
+0 -9 Dl
+8 0 Dl
+-17 0 Dl
+1 Dt
+2037 1473 MXY
+88 176 Dl
+88 214 Dl
+88 18 Dl
+87 -2 Dl
+88 63 Dl
+88 -19 Dl
+88 -11 Dl
+88 77 Dl
+88 24 Dl
+2759 1997(NO-FSYNC)N
+5 Dt
+1949 2060 MXY
+879 0 Dl
+1949 MX
+0 -1174 Dl
+4 Ds
+1 Dt
+2388 2060 MXY
+0 -1174 Dl
+2828 2060 MXY
+0 -1174 Dl
+1949 1825 MXY
+879 0 Dl
+9 s
+1842 1855(40)N
+1949 1591 MXY
+879 0 Dl
+1842 1621(80)N
+1949 1356 MXY
+879 0 Dl
+1806 1386(120)N
+1949 1121 MXY
+879 0 Dl
+1806 1151(160)N
+1949 886 MXY
+879 0 Dl
+1806 916(200)N
+2088 2199(Multiprogramming)N
+1844 863(in)N
+1922(TPS)X
+1761 792(Throughput)N
+1931 2121(0)N
+2370 2133(5)N
+2792(10)X
+6 s
+1679 1833(LIBTP)N
+-1 Ds
+3 Dt
+837 1019 MXY
+0 17 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+929 878 MXY
+0 17 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+1021 939 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+1113 1043 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+1205 1314 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+1297 1567 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+1389 1665 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+1481 1699 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+1573 1828 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+1665 1804 MXY
+0 18 Dl
+0 -9 Dl
+8 0 Dl
+-17 0 Dl
+5 Dt
+837 1027 MXY
+92 -141 Dl
+92 62 Dl
+92 104 Dl
+92 271 Dl
+92 253 Dl
+92 98 Dl
+92 34 Dl
+92 129 Dl
+92 -24 Dl
+745 2060 MXY
+920 0 Dl
+745 MX
+0 -1174 Dl
+4 Ds
+1 Dt
+1205 2060 MXY
+0 -1174 Dl
+1665 2060 MXY
+0 -1174 Dl
+745 1766 MXY
+920 0 Dl
+9 s
+673 1796(3)N
+745 1473 MXY
+920 0 Dl
+673 1503(5)N
+745 1180 MXY
+920 0 Dl
+673 1210(8)N
+745 886 MXY
+920 0 Dl
+637 916(10)N
+905 2199(Multiprogramming)N
+622 851(in)N
+700(TPS)X
+575 792(Throughput)N
+733 2152(0)N
+1196(5)X
+1629(10)X
+3 Dt
+-1 Ds
+8 s
+655 2441(Figure)N
+872(7:)X
+960(Multi-user)X
+1286(Performance.)X
+1 f
+655 2531(Since)N
+825(the)X
+931(con\256guration)X
+1300(is)X
+1371(completely)X
+655 2621(disk)N
+790(bound,)X
+994(we)X
+1096(see)X
+1204(only)X
+1345(a)X
+1400(small)X
+1566(im-)X
+655 2711(provement)N
+964(by)X
+1064(adding)X
+1274(a)X
+1337(second)X
+1549(pro-)X
+655 2801(cess.)N
+849(Adding)X
+1081(any)X
+1213(more)X
+1383(concurrent)X
+655 2891(processes)N
+935(causes)X
+1137(performance)X
+1493(degra-)X
+655 2981(dation.)N
+3 f
+1927 2441(Figure)N
+2149(8:)X
+2243(Multi-user)X
+2574(Performance)X
+1927 2531(on)N
+2021(a)X
+2079(small)X
+2251(database.)X
+1 f
+2551(With)X
+2704(one)X
+2821(pro-)X
+1927 2621(cess,)N
+2075(we)X
+2174(are)X
+2276(driving)X
+2486(the)X
+2589(CPU)X
+2739(at)X
+2810(96%)X
+1927 2711(utilization)N
+2215(leaving)X
+2430(little)X
+2575(room)X
+2737(for)X
+2838(im-)X
+1927 2801(provement)N
+2238(as)X
+2328(the)X
+2443(multiprogramming)X
+1927 2891(level)N
+2091(increases.)X
+2396(In)X
+2489(the)X
+2607(NO-FSYNC)X
+1927 2981(case,)N
+2076(lock)X
+2209(contention)X
+2502(degrades)X
+2751(perfor-)X
+1927 3071(mance)N
+2117(as)X
+2194(soon)X
+2339(as)X
+2416(a)X
+2468(second)X
+2669(process)X
+2884(is)X
+1927 3161(added.)N
+3 f
+3199 2441(Figure)N
+3405(9:)X
+3482(Abort)X
+3669(rates)X
+3827(on)X
+3919(the)X
+4028(TPCB)X
+3199 2531(Benchmark.)N
+1 f
+3589(The)X
+3726(abort)X
+3895(rate)X
+4028(climbs)X
+3199 2621(more)N
+3366(quickly)X
+3594(for)X
+3704(the)X
+3818(large)X
+3980(database)X
+3199 2711(test)N
+3324(since)X
+3491(processes)X
+3771(are)X
+3884(descheduled)X
+3199 2801(more)N
+3409(frequently,)X
+3766(allowing)X
+4068(more)X
+3199 2891(processes)N
+3459(to)X
+3525(vie)X
+3619(for)X
+3709(the)X
+3803(same)X
+3950(locks.)X
+10 s
+10 f
+555 3284(h)N
+579(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)X
+1 f
+555 3560(records,)N
+835(we)X
+952(expect)X
+1185(contention)X
+1546(to)X
+1631(become)X
+1904(a)X
+1963(factor)X
+2174(quickly)X
+2437(and)X
+2576(the)X
+2697(NO-FSYNC)X
+3120(line)X
+3263(in)X
+3348(\256gure)X
+3557(eight)X
+3739(demonstrates)X
+4184(this)X
+555 3650(dramatically.)N
+1022(Each)X
+1209(additional)X
+1555(process)X
+1822(causes)X
+2058(both)X
+2226(more)X
+2417(waiting)X
+2682(and)X
+2823(more)X
+3013(deadlocking.)X
+3470(Figure)X
+3704(nine)X
+3867(shows)X
+4092(that)X
+4237(in)X
+555 3740(the)N
+681(small)X
+882(database)X
+1187(case)X
+1353(\(SMALL\),)X
+1725(waiting)X
+1992(is)X
+2072(the)X
+2197(dominant)X
+2526(cause)X
+2732(of)X
+2826(declining)X
+3151(performance)X
+3585(\(the)X
+3737(number)X
+4009(of)X
+4103(aborts)X
+555 3830(increases)N
+878(less)X
+1026(steeply)X
+1281(than)X
+1447(the)X
+1573(performance)X
+2008(drops)X
+2214(off)X
+2336(in)X
+2426(\256gure)X
+2641(eight\),)X
+2876(while)X
+3082(in)X
+3172(the)X
+3298(large)X
+3487(database)X
+3792(case)X
+3958(\(LARGE\),)X
+555 3920(deadlocking)N
+967(contributes)X
+1343(more)X
+1528(to)X
+1610(the)X
+1728(declining)X
+2046(performance.)X
+755 4043(Deadlocks)N
+1116(are)X
+1237(more)X
+1424(likely)X
+1628(to)X
+1712(occur)X
+1913(in)X
+1997(the)X
+2116(LARGE)X
+2404(test)X
+2536(than)X
+2695(in)X
+2778(the)X
+2897(SMALL)X
+3189(test)X
+3321(because)X
+3597(there)X
+3779(are)X
+3899(more)X
+4085(oppor-)X
+555 4133(tunities)N
+814(to)X
+900(wait.)X
+1082(In)X
+1173(the)X
+1295(SMALL)X
+1590(case,)X
+1773(processes)X
+2105(never)X
+2307(do)X
+2410(I/O)X
+2540(and)X
+2679(are)X
+2801(less)X
+2944(likely)X
+3149(to)X
+3234(be)X
+3333(descheduled)X
+3753(during)X
+3985(a)X
+4044(transac-)X
+555 4223(tion.)N
+740(In)X
+828(the)X
+947(LARGE)X
+1235(case,)X
+1415(processes)X
+1744(will)X
+1889(frequently)X
+2240(be)X
+2337(descheduled)X
+2755(since)X
+2941(they)X
+3100(have)X
+3273(to)X
+3356(perform)X
+3636(I/O.)X
+3804(This)X
+3967(provides)X
+4263(a)X
+555 4313(window)N
+837(where)X
+1058(a)X
+1118(second)X
+1365(process)X
+1630(can)X
+1766(request)X
+2022(locks)X
+2215(on)X
+2318(already)X
+2578(locked)X
+2815(pages,)X
+3041(thus)X
+3197(increasing)X
+3550(the)X
+3671(likelihood)X
+4018(of)X
+4108(build-)X
+555 4403(ing)N
+677(up)X
+777(long)X
+939(chains)X
+1164(of)X
+1251(waiting)X
+1511(processes.)X
+1879(Eventually,)X
+2266(this)X
+2401(leads)X
+2586(to)X
+2668(deadlock.)X
+3 f
+555 4589(5.2.)N
+715(The)X
+868(OO1)X
+1052(Benchmark)X
+1 f
+755 4712(The)N
+903(TPCB)X
+1125(benchmark)X
+1505(described)X
+1836(in)X
+1921(the)X
+2042(previous)X
+2341(section)X
+2591(measures)X
+2913(performance)X
+3343(under)X
+3549(a)X
+3608(conventional)X
+4044(transac-)X
+555 4802(tion)N
+706(processing)X
+1076(workload.)X
+1446(Other)X
+1656(application)X
+2039(domains,)X
+2357(such)X
+2531(as)X
+2625(computer-aided)X
+3156(design,)X
+3412(have)X
+3591(substantially)X
+4022(different)X
+555 4892(access)N
+786(patterns.)X
+1105(In)X
+1197(order)X
+1392(to)X
+1479(measure)X
+1772(the)X
+1895(performance)X
+2327(of)X
+2418(LIBTP)X
+2664(under)X
+2871(workloads)X
+3229(of)X
+3320(this)X
+3459(type,)X
+3641(we)X
+3759(implemented)X
+4201(the)X
+555 4982(OO1)N
+731(benchmark)X
+1108(described)X
+1436(in)X
+1518([CATT91].)X
+755 5105(The)N
+908(database)X
+1213(models)X
+1472(a)X
+1535(set)X
+1651(of)X
+1745(electronics)X
+2120(components)X
+2534(with)X
+2703(connections)X
+3113(among)X
+3358(them.)X
+3585(One)X
+3746(table)X
+3929(stores)X
+4143(parts)X
+555 5195(and)N
+696(another)X
+962(stores)X
+1174(connections.)X
+1622(There)X
+1835(are)X
+1959(three)X
+2145(connections)X
+2552(originating)X
+2927(at)X
+3009(any)X
+3149(given)X
+3351(part.)X
+3540(Ninety)X
+3782(percent)X
+4043(of)X
+4134(these)X
+555 5285(connections)N
+960(are)X
+1081(to)X
+1165(nearby)X
+1406(parts)X
+1584(\(those)X
+1802(with)X
+1966(nearby)X
+2 f
+2207(ids)X
+1 f
+2300(\))X
+2348(to)X
+2431(model)X
+2652(the)X
+2771(spatial)X
+3001(locality)X
+3262(often)X
+3448(exhibited)X
+3767(in)X
+3850(CAD)X
+4040(applica-)X
+555 5375(tions.)N
+779(Ten)X
+933(percent)X
+1198(of)X
+1293(the)X
+1419(connections)X
+1830(are)X
+1957(randomly)X
+2292(distributed)X
+2662(among)X
+2908(all)X
+3016(other)X
+3209(parts)X
+3393(in)X
+3483(the)X
+3609(database.)X
+3954(Every)X
+4174(part)X
+555 5465(appears)N
+829(exactly)X
+1089(three)X
+1278(times)X
+1479(in)X
+1569(the)X
+2 f
+1695(from)X
+1 f
+1874(\256eld)X
+2043(of)X
+2137(a)X
+2200(connection)X
+2579(record,)X
+2832(and)X
+2975(zero)X
+3141(or)X
+3235(more)X
+3427(times)X
+3627(in)X
+3716(the)X
+2 f
+3841(to)X
+1 f
+3930(\256eld.)X
+4139(Parts)X
+555 5555(have)N
+2 f
+727(x)X
+1 f
+783(and)X
+2 f
+919(y)X
+1 f
+975(locations)X
+1284(set)X
+1393(randomly)X
+1720(in)X
+1802(an)X
+1898(appropriate)X
+2284(range.)X
+
+14 p
+%%Page: 14 14
+10 s 10 xH 0 xS 1 f
+3 f
+1 f
+755 630(The)N
+900(intent)X
+1102(of)X
+1189(OO1)X
+1365(is)X
+1438(to)X
+1520(measure)X
+1808(the)X
+1926(overall)X
+2169(cost)X
+2318(of)X
+2405(a)X
+2461(query)X
+2664(mix)X
+2808(characteristic)X
+3257(of)X
+3344(engineering)X
+3743(database)X
+4040(applica-)X
+555 720(tions.)N
+770(There)X
+978(are)X
+1097(three)X
+1278(tests:)X
+10 f
+635 843(g)N
+2 f
+755(Lookup)X
+1 f
+1022(generates)X
+1353(1,000)X
+1560(random)X
+1832(part)X
+2 f
+1984(ids)X
+1 f
+2077(,)X
+2124(fetches)X
+2378(the)X
+2502(corresponding)X
+2987(parts)X
+3169(from)X
+3351(the)X
+3475(database,)X
+3798(and)X
+3940(calls)X
+4113(a)X
+4175(null)X
+755 933(procedure)N
+1097(in)X
+1179(the)X
+1297(host)X
+1450(programming)X
+1906(language)X
+2216(with)X
+2378(the)X
+2496(parts')X
+2 f
+2699(x)X
+1 f
+2755(and)X
+2 f
+2891(y)X
+1 f
+2947(positions.)X
+10 f
+635 1056(g)N
+2 f
+755(Traverse)X
+1 f
+1067(retrieves)X
+1371(a)X
+1434(random)X
+1706(part)X
+1858(from)X
+2041(the)X
+2166(database)X
+2470(and)X
+2613(follows)X
+2880(connections)X
+3290(from)X
+3473(it)X
+3544(to)X
+3632(other)X
+3823(parts.)X
+4045(Each)X
+4232(of)X
+755 1146(those)N
+947(parts)X
+1126(is)X
+1202(retrieved,)X
+1531(and)X
+1670(all)X
+1773(connections)X
+2179(from)X
+2358(it)X
+2424(followed.)X
+2771(This)X
+2935(procedure)X
+3279(is)X
+3354(repeated)X
+3649(depth-\256rst)X
+4000(for)X
+4116(seven)X
+755 1236(hops)N
+930(from)X
+1110(the)X
+1232(original)X
+1505(part,)X
+1674(for)X
+1792(a)X
+1852(total)X
+2018(of)X
+2109(3280)X
+2293(parts.)X
+2513(Backward)X
+2862(traversal)X
+3162(also)X
+3314(exists,)X
+3539(and)X
+3678(follows)X
+3941(all)X
+4044(connec-)X
+755 1326(tions)N
+930(into)X
+1074(a)X
+1130(given)X
+1328(part)X
+1473(to)X
+1555(their)X
+1722(origin.)X
+10 f
+635 1449(g)N
+2 f
+755(Insert)X
+1 f
+962(adds)X
+1129(100)X
+1269(new)X
+1423(parts)X
+1599(and)X
+1735(their)X
+1902(connections.)X
+755 1572(The)N
+913(benchmark)X
+1303(is)X
+1389(single-user,)X
+1794(but)X
+1929(multi-user)X
+2291(access)X
+2530(controls)X
+2821(\(locking)X
+3120(and)X
+3268(transaction)X
+3652(protection\))X
+4036(must)X
+4223(be)X
+555 1662(enforced.)N
+898(It)X
+968(is)X
+1042(designed)X
+1348(to)X
+1431(be)X
+1528(run)X
+1656(on)X
+1757(a)X
+1814(database)X
+2112(with)X
+2275(20,000)X
+2516(parts,)X
+2713(and)X
+2850(on)X
+2951(one)X
+3087(with)X
+3249(200,000)X
+3529(parts.)X
+3745(Because)X
+4033(we)X
+4147(have)X
+555 1752(insuf\256cient)N
+935(disk)X
+1088(space)X
+1287(for)X
+1401(the)X
+1519(larger)X
+1727(database,)X
+2044(we)X
+2158(report)X
+2370(results)X
+2599(only)X
+2761(for)X
+2875(the)X
+2993(20,000)X
+3233(part)X
+3378(database.)X
+3 f
+555 1938(5.2.1.)N
+775(Implementation)X
+1 f
+755 2061(The)N
+920(LIBTP)X
+1182(implementation)X
+1724(of)X
+1831(OO1)X
+2027(uses)X
+2205(the)X
+2342(TCL)X
+2532([OUST90])X
+2914(interface)X
+3235(described)X
+3582(earlier.)X
+3867(The)X
+4031(backend)X
+555 2151(accepts)N
+813(commands)X
+1181(over)X
+1345(an)X
+1442(IP)X
+1534(socket)X
+1760(and)X
+1897(performs)X
+2208(the)X
+2327(requested)X
+2656(database)X
+2954(actions.)X
+3242(The)X
+3387(frontend)X
+3679(opens)X
+3886(and)X
+4022(executes)X
+555 2241(a)N
+618(TCL)X
+796(script.)X
+1041(This)X
+1210(script)X
+1415(contains)X
+1709(database)X
+2013(accesses)X
+2313(interleaved)X
+2697(with)X
+2866(ordinary)X
+3165(program)X
+3463(control)X
+3716(statements.)X
+4120(Data-)X
+555 2331(base)N
+718(commands)X
+1085(are)X
+1204(submitted)X
+1539(to)X
+1621(the)X
+1739(backend)X
+2027(and)X
+2163(results)X
+2392(are)X
+2511(bound)X
+2731(to)X
+2813(program)X
+3105(variables.)X
+755 2454(The)N
+903(parts)X
+1082(table)X
+1261(was)X
+1409(stored)X
+1628(as)X
+1718(a)X
+1776(B-tree)X
+1999(indexed)X
+2275(by)X
+2 f
+2377(id)X
+1 f
+2439(.)X
+2501(The)X
+2648(connection)X
+3022(table)X
+3200(was)X
+3347(stored)X
+3565(as)X
+3654(a)X
+3712(set)X
+3823(of)X
+3912(\256xed-length)X
+555 2544(records)N
+824(using)X
+1029(the)X
+1159(4.4BSD)X
+1446(recno)X
+1657(access)X
+1895(method.)X
+2207(In)X
+2306(addition,)X
+2620(two)X
+2771(B-tree)X
+3003(indices)X
+3261(were)X
+3449(maintained)X
+3836(on)X
+3947(connection)X
+555 2634(table)N
+732(entries.)X
+1007(One)X
+1162(index)X
+1360(mapped)X
+1634(the)X
+2 f
+1752(from)X
+1 f
+1923(\256eld)X
+2085(to)X
+2167(a)X
+2223(connection)X
+2595(record)X
+2821(number,)X
+3106(and)X
+3242(the)X
+3360(other)X
+3545(mapped)X
+3819(the)X
+2 f
+3937(to)X
+1 f
+4019(\256eld)X
+4181(to)X
+4263(a)X
+555 2724(connection)N
+932(record)X
+1163(number.)X
+1473(These)X
+1690(indices)X
+1941(support)X
+2205(fast)X
+2345(lookups)X
+2622(on)X
+2726(connections)X
+3133(in)X
+3219(both)X
+3385(directions.)X
+3765(For)X
+3900(the)X
+4022(traversal)X
+555 2814(tests,)N
+743(the)X
+867(frontend)X
+1165(does)X
+1338(an)X
+1439(index)X
+1642(lookup)X
+1889(to)X
+1976(discover)X
+2273(the)X
+2396(connected)X
+2747(part's)X
+2 f
+2955(id)X
+1 f
+3017(,)X
+3062(and)X
+3203(then)X
+3366(does)X
+3538(another)X
+3804(lookup)X
+4051(to)X
+4138(fetch)X
+555 2904(the)N
+673(part)X
+818(itself.)X
+3 f
+555 3090(5.2.2.)N
+775(Performance)X
+1242(Measurements)X
+1766(for)X
+1889(OO1)X
+1 f
+755 3213(We)N
+888(compare)X
+1186(LIBTP's)X
+1487(OO1)X
+1664(performance)X
+2092(to)X
+2174(that)X
+2314(reported)X
+2602(in)X
+2684([CATT91].)X
+3087(Those)X
+3303(results)X
+3532(were)X
+3709(collected)X
+4019(on)X
+4119(a)X
+4175(Sun)X
+555 3303(3/280)N
+759(\(25)X
+888(MHz)X
+1075(MC68020\))X
+1448(with)X
+1612(16)X
+1714(MBytes)X
+1989(of)X
+2078(memory)X
+2367(and)X
+2505(two)X
+2647(Hitachi)X
+2904(892MByte)X
+3267(disks)X
+3452(\(15)X
+3580(ms)X
+3694(average)X
+3966(seek)X
+4130(time\))X
+555 3393(behind)N
+793(an)X
+889(SMD-4)X
+1149(controller.)X
+1521(Frontends)X
+1861(ran)X
+1984(on)X
+2084(an)X
+2180(8MByte)X
+2462(Sun)X
+2606(3/260.)X
+755 3516(In)N
+844(order)X
+1036(to)X
+1120(measure)X
+1410(performance)X
+1839(on)X
+1941(a)X
+1999(machine)X
+2293(of)X
+2382(roughly)X
+2653(equivalent)X
+3009(processor)X
+3339(power,)X
+3582(we)X
+3698(ran)X
+3822(one)X
+3959(set)X
+4069(of)X
+4157(tests)X
+555 3606(on)N
+666(a)X
+733(standalone)X
+1107(MC68030-based)X
+1671(HP300)X
+1923(\(33MHz)X
+2225(MC68030\).)X
+2646(The)X
+2801(database)X
+3108(was)X
+3263(stored)X
+3489(on)X
+3599(a)X
+3665(300MByte)X
+4037(HP7959)X
+555 3696(SCSI)N
+744(disk)X
+898(\(17)X
+1026(ms)X
+1139(average)X
+1410(seek)X
+1573(time\).)X
+1802(Since)X
+2000(this)X
+2135(machine)X
+2427(is)X
+2500(not)X
+2622(connected)X
+2968(to)X
+3050(a)X
+3106(network,)X
+3409(we)X
+3523(ran)X
+3646(local)X
+3822(tests)X
+3984(where)X
+4201(the)X
+555 3786(frontend)N
+855(and)X
+999(backend)X
+1295(run)X
+1430(on)X
+1538(the)X
+1664(same)X
+1856(machine.)X
+2195(We)X
+2334(compare)X
+2638(these)X
+2830(measurements)X
+3316(with)X
+3485(Cattell's)X
+3783(local)X
+3966(Sun)X
+4117(3/280)X
+555 3876(numbers.)N
+755 3999(Because)N
+1051(the)X
+1177(benchmark)X
+1562(requires)X
+1849(remote)X
+2100(access,)X
+2354(we)X
+2476(ran)X
+2607(another)X
+2876(set)X
+2993(of)X
+3088(tests)X
+3258(on)X
+3365(a)X
+3428(DECstation)X
+3828(5000/200)X
+4157(with)X
+555 4089(32M)N
+732(of)X
+825(memory)X
+1118(running)X
+1393(Ultrix)X
+1610(V4.0)X
+1794(and)X
+1936(a)X
+1998(DEC)X
+2184(1GByte)X
+2459(RZ57)X
+2666(SCSI)X
+2859(disk.)X
+3057(We)X
+3194(compare)X
+3496(the)X
+3619(local)X
+3800(performance)X
+4232(of)X
+555 4179(OO1)N
+734(on)X
+837(the)X
+958(DECstation)X
+1354(to)X
+1439(its)X
+1536(remote)X
+1781(performance.)X
+2250(For)X
+2383(the)X
+2503(remote)X
+2748(case,)X
+2929(we)X
+3045(ran)X
+3170(the)X
+3290(frontend)X
+3584(on)X
+3686(a)X
+3744(DECstation)X
+4139(3100)X
+555 4269(with)N
+717(16)X
+817(MBytes)X
+1090(of)X
+1177(main)X
+1357(memory.)X
+755 4392(The)N
+900(databases)X
+1228(tested)X
+1435(in)X
+1517([CATT91])X
+1880(are)X
+10 f
+635 4515(g)N
+1 f
+755(INDEX,)X
+1045(a)X
+1101(highly-optimized)X
+1672(access)X
+1898(method)X
+2158(package)X
+2442(developed)X
+2792(at)X
+2870(Sun)X
+3014(Microsystems.)X
+10 f
+635 4638(g)N
+1 f
+755(OODBMS,)X
+1137(a)X
+1193(beta)X
+1347(release)X
+1591(of)X
+1678(a)X
+1734(commercial)X
+2133(object-oriented)X
+2639(database)X
+2936(management)X
+3366(system.)X
+10 f
+635 4761(g)N
+1 f
+755(RDBMS,)X
+1076(a)X
+1133(UNIX-based)X
+1565(commercial)X
+1965(relational)X
+2289(data)X
+2444(manager)X
+2742(at)X
+2821(production)X
+3189(release.)X
+3474(The)X
+3620(OO1)X
+3797(implementation)X
+755 4851(used)N
+922(embedded)X
+1272(SQL)X
+1443(in)X
+1525(C.)X
+1638(Stored)X
+1867(procedures)X
+2240(were)X
+2417(de\256ned)X
+2673(to)X
+2755(reduce)X
+2990(client-server)X
+3412(traf\256c.)X
+755 4974(Table)N
+974(two)X
+1130(shows)X
+1366(the)X
+1500(measurements)X
+1995(from)X
+2187([CATT91])X
+2566(and)X
+2718(LIBTP)X
+2976(for)X
+3106(a)X
+3178(local)X
+3370(test)X
+3517(on)X
+3632(the)X
+3765(MC680x0-based)X
+555 5064(hardware.)N
+915(All)X
+1037(caches)X
+1272(are)X
+1391(cleared)X
+1644(before)X
+1870(each)X
+2038(test.)X
+2209(All)X
+2331(times)X
+2524(are)X
+2643(in)X
+2725(seconds.)X
+755 5187(Table)N
+960(two)X
+1102(shows)X
+1324(that)X
+1466(LIBTP)X
+1710(outperforms)X
+2123(the)X
+2242(commercial)X
+2642(relational)X
+2966(system,)X
+3229(but)X
+3352(is)X
+3426(slower)X
+3661(than)X
+3820(OODBMS)X
+4183(and)X
+555 5277(INDEX.)N
+872(Since)X
+1077(the)X
+1202(caches)X
+1444(were)X
+1628(cleared)X
+1888(at)X
+1973(the)X
+2098(start)X
+2263(of)X
+2356(each)X
+2530(test,)X
+2687(disk)X
+2846(throughput)X
+3223(is)X
+3302(critical)X
+3551(in)X
+3639(this)X
+3780(test.)X
+3957(The)X
+4108(single)X
+555 5367(SCSI)N
+749(HP)X
+877(drive)X
+1068(used)X
+1241(by)X
+1347(LIBTP)X
+1595(is)X
+1674(approximately)X
+2163(13%)X
+2336(slower)X
+2576(than)X
+2739(the)X
+2862(disks)X
+3051(used)X
+3223(in)X
+3310([CATT91])X
+3678(which)X
+3899(accounts)X
+4205(for)X
+555 5457(part)N
+700(of)X
+787(the)X
+905(difference.)X
+755 5580(OODBMS)N
+1118(and)X
+1255(INDEX)X
+1525(outperform)X
+1906(LIBTP)X
+2148(most)X
+2323(dramatically)X
+2744(on)X
+2844(traversal.)X
+3181(This)X
+3343(is)X
+3416(because)X
+3691(we)X
+3805(use)X
+3932(index)X
+4130(look-)X
+555 5670(ups)N
+689(to)X
+774(\256nd)X
+921(connections,)X
+1347(whereas)X
+1634(the)X
+1755(other)X
+1942(two)X
+2084(systems)X
+2359(use)X
+2488(a)X
+2546(link)X
+2692(access)X
+2920(method.)X
+3222(The)X
+3369(index)X
+3569(requires)X
+3850(us)X
+3943(to)X
+4027(examine)X
+
+15 p
+%%Page: 15 15
+10 s 10 xH 0 xS 1 f
+3 f
+1 f
+10 f
+555 679(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)N
+2 f
+606 769(Measure)N
+1 f
+1019(INDEX)X
+1389(OODBMS)X
+1851(RDBMS)X
+2250(LIBTP)X
+10 f
+555 771(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)N
+555 787(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)N
+1 f
+595 869(Lookup)N
+1114(5.4)X
+1490(12.9)X
+1950(27)X
+2291(27.2)X
+595 959(Traversal)N
+1074(13)X
+1530(9.8)X
+1950(90)X
+2291(47.3)X
+595 1049(Insert)N
+1114(7.4)X
+1530(1.5)X
+1950(22)X
+2331(9.7)X
+10 f
+555 1059(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)N
+555(c)X
+999(c)Y
+919(c)Y
+839(c)Y
+759(c)Y
+959 1059(c)N
+999(c)Y
+919(c)Y
+839(c)Y
+759(c)Y
+1329 1059(c)N
+999(c)Y
+919(c)Y
+839(c)Y
+759(c)Y
+1791 1059(c)N
+999(c)Y
+919(c)Y
+839(c)Y
+759(c)Y
+2190 1059(c)N
+999(c)Y
+919(c)Y
+839(c)Y
+759(c)Y
+2512 1059(c)N
+999(c)Y
+919(c)Y
+839(c)Y
+759(c)Y
+2618 679(i)N
+2629(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+2 f
+2829 769(Measure)N
+3401(Cache)X
+3726(Local)X
+4028(Remote)X
+1 f
+10 f
+2618 771(i)N
+2629(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+2618 787(i)N
+2629(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+2658 869(Lookup)N
+3401(cold)X
+3747(15.7)X
+4078(20.6)X
+3401 959(warm)N
+3787(7.8)X
+4078(12.4)X
+10 f
+2618 969(i)N
+2629(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+2658 1059(Forward)N
+2950(traversal)X
+3401(cold)X
+3747(28.4)X
+4078(52.6)X
+3401 1149(warm)N
+3747(23.5)X
+4078(47.4)X
+10 f
+2618 1159(i)N
+2629(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+2658 1249(Backward)N
+3004(traversal)X
+3401(cold)X
+3747(24.2)X
+4078(47.4)X
+3401 1339(warm)N
+3747(24.3)X
+4078(47.6)X
+10 f
+2618 1349(i)N
+2629(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+2658 1439(Insert)N
+3401(cold)X
+3787(7.5)X
+4078(10.3)X
+3401 1529(warm)N
+3787(6.7)X
+4078(10.9)X
+10 f
+2618 1539(i)N
+2629(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+2618(c)X
+1479(c)Y
+1399(c)Y
+1319(c)Y
+1239(c)Y
+1159(c)Y
+1079(c)Y
+999(c)Y
+919(c)Y
+839(c)Y
+759(c)Y
+3341 1539(c)N
+1479(c)Y
+1399(c)Y
+1319(c)Y
+1239(c)Y
+1159(c)Y
+1079(c)Y
+999(c)Y
+919(c)Y
+839(c)Y
+759(c)Y
+3666 1539(c)N
+1479(c)Y
+1399(c)Y
+1319(c)Y
+1239(c)Y
+1159(c)Y
+1079(c)Y
+999(c)Y
+919(c)Y
+839(c)Y
+759(c)Y
+3968 1539(c)N
+1479(c)Y
+1399(c)Y
+1319(c)Y
+1239(c)Y
+1159(c)Y
+1079(c)Y
+999(c)Y
+919(c)Y
+839(c)Y
+759(c)Y
+4309 1539(c)N
+1479(c)Y
+1399(c)Y
+1319(c)Y
+1239(c)Y
+1159(c)Y
+1079(c)Y
+999(c)Y
+919(c)Y
+839(c)Y
+759(c)Y
+3 f
+587 1785(Table)N
+823(2:)X
+931(Local)X
+1163(MC680x0)X
+1538(Performance)X
+2026(of)X
+2133(Several)X
+587 1875(Systems)N
+883(on)X
+987(OO1.)X
+2667 1785(Table)N
+2909(3:)X
+3023(Local)X
+3260(vs.)X
+3397(Remote)X
+3707(Performance)X
+4200(of)X
+2667 1875(LIBTP)N
+2926(on)X
+3030(OO1.)X
+1 f
+10 f
+555 1998(h)N
+579(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)X
+1 f
+555 2274(two)N
+696(disk)X
+850(pages,)X
+1074(but)X
+1197(the)X
+1316(links)X
+1492(require)X
+1741(only)X
+1904(one,)X
+2061(regardless)X
+2408(of)X
+2496(database)X
+2794(size.)X
+2980(Cattell)X
+3214(reports)X
+3458(that)X
+3599(lookups)X
+3873(using)X
+4067(B-trees)X
+555 2364(instead)N
+808(of)X
+901(links)X
+1082(makes)X
+1313(traversal)X
+1616(take)X
+1776(twice)X
+1976(as)X
+2069(long)X
+2237(in)X
+2325(INDEX.)X
+2641(Adding)X
+2907(a)X
+2969(link)X
+3119(access)X
+3351(method)X
+3617(to)X
+3 f
+3704(db)X
+1 f
+3792(\(3\))X
+3911(or)X
+4003(using)X
+4201(the)X
+555 2454(existing)N
+828(hash)X
+995(method)X
+1255(would)X
+1475(apparently)X
+1834(be)X
+1930(a)X
+1986(good)X
+2166(idea.)X
+755 2577(Both)N
+936(OODBMS)X
+1304(and)X
+1446(INDEX)X
+1722(issue)X
+1908 0.1944(coarser-granularity)AX
+2545(locks)X
+2739(than)X
+2902(LIBTP.)X
+3189(This)X
+3356(limits)X
+3562(concurrency)X
+3985(for)X
+4104(multi-)X
+555 2667(user)N
+711(applications,)X
+1140(but)X
+1264(helps)X
+1455(single-user)X
+1829(applications.)X
+2278(In)X
+2367(addition,)X
+2671(the)X
+2791(fact)X
+2934(that)X
+3076(LIBTP)X
+3319(releases)X
+3595(B-tree)X
+3817(locks)X
+4007(early)X
+4189(is)X
+4263(a)X
+555 2757(drawback)N
+896(in)X
+986(OO1.)X
+1210(Since)X
+1416(there)X
+1605(is)X
+1686(no)X
+1793(concurrency)X
+2218(in)X
+2307(the)X
+2432(benchmark,)X
+2836(high-concurrency)X
+3430(strategies)X
+3760(only)X
+3929(show)X
+4125(up)X
+4232(as)X
+555 2847(increased)N
+882(locking)X
+1145(overhead.)X
+1503(Finally,)X
+1772(the)X
+1892(architecture)X
+2294(of)X
+2383(the)X
+2503(LIBTP)X
+2747(implementation)X
+3271(was)X
+3418(substantially)X
+3844(different)X
+4143(from)X
+555 2937(that)N
+702(of)X
+796(either)X
+1006(OODBMS)X
+1375(or)X
+1469(INDEX.)X
+1786(Both)X
+1968(of)X
+2062(those)X
+2258(systems)X
+2538(do)X
+2645(the)X
+2770(searches)X
+3070(in)X
+3159(the)X
+3284(user's)X
+3503(address)X
+3771(space,)X
+3997(and)X
+4139(issue)X
+555 3027(requests)N
+844(for)X
+964(pages)X
+1173(to)X
+1260(the)X
+1383(server)X
+1605(process.)X
+1911(Pages)X
+2123(are)X
+2247(cached)X
+2496(in)X
+2583(the)X
+2706(client,)X
+2929(and)X
+3070(many)X
+3273(queries)X
+3530(can)X
+3667(be)X
+3768(satis\256ed)X
+4055(without)X
+555 3117(contacting)N
+910(the)X
+1029(server)X
+1247(at)X
+1326(all.)X
+1467(LIBTP)X
+1710(submits)X
+1979(all)X
+2080(the)X
+2199(queries)X
+2452(to)X
+2535(the)X
+2653(server)X
+2870(process,)X
+3151(and)X
+3287(receives)X
+3571(database)X
+3868(records)X
+4125(back;)X
+555 3207(it)N
+619(does)X
+786(no)X
+886(client)X
+1084(caching.)X
+755 3330(The)N
+911(RDBMS)X
+1221(architecture)X
+1632(is)X
+1716(much)X
+1925(closer)X
+2148(to)X
+2241(that)X
+2392(of)X
+2490(LIBTP.)X
+2783(A)X
+2872(server)X
+3100(process)X
+3372(receives)X
+3667(queries)X
+3930(and)X
+4076(returns)X
+555 3420(results)N
+786(to)X
+870(a)X
+928(client.)X
+1168(The)X
+1315(timing)X
+1545(results)X
+1776(in)X
+1860(table)X
+2038(two)X
+2180(clearly)X
+2421(show)X
+2612(that)X
+2754(the)X
+2874(conventional)X
+3309(database)X
+3607(client/server)X
+4025(model)X
+4246(is)X
+555 3510(expensive.)N
+941(LIBTP)X
+1188(outperforms)X
+1605(the)X
+1728(RDBMS)X
+2032(on)X
+2136(traversal)X
+2437(and)X
+2577(insertion.)X
+2921(We)X
+3057(speculate)X
+3380(that)X
+3524(this)X
+3663(is)X
+3740(due)X
+3880(in)X
+3966(part)X
+4115(to)X
+4201(the)X
+555 3600(overhead)N
+870(of)X
+957(query)X
+1160(parsing,)X
+1436(optimization,)X
+1880(and)X
+2016(repeated)X
+2309(interpretation)X
+2761(of)X
+2848(the)X
+2966(plan)X
+3124(tree)X
+3265(in)X
+3347(the)X
+3465(RDBMS')X
+3791(query)X
+3994(executor.)X
+755 3723(Table)N
+962(three)X
+1147(shows)X
+1371(the)X
+1492(differences)X
+1873(between)X
+2164(local)X
+2343(and)X
+2482(remote)X
+2728(execution)X
+3063(of)X
+3153(LIBTP's)X
+3456(OO1)X
+3635(implementation)X
+4160(on)X
+4263(a)X
+555 3813(DECstation.)N
+989(We)X
+1122(measured)X
+1451(performance)X
+1879(with)X
+2042(a)X
+2099(populated)X
+2436(\(warm\))X
+2694(cache)X
+2899(and)X
+3036(an)X
+3133(empty)X
+3354(\(cold\))X
+3567(cache.)X
+3812(Reported)X
+4126(times)X
+555 3903(are)N
+681(the)X
+806(means)X
+1037(of)X
+1130(twenty)X
+1374(tests,)X
+1562(and)X
+1704(are)X
+1829(in)X
+1917(seconds.)X
+2237(Standard)X
+2548(deviations)X
+2903(were)X
+3086(within)X
+3316(seven)X
+3525(percent)X
+3788(of)X
+3881(the)X
+4005(mean)X
+4205(for)X
+555 3993(remote,)N
+818(and)X
+954(two)X
+1094(percent)X
+1351(of)X
+1438(the)X
+1556(mean)X
+1750(for)X
+1864(local.)X
+755 4116(The)N
+914(20ms)X
+1121(overhead)X
+1450(of)X
+1551(TCP/IP)X
+1824(on)X
+1938(an)X
+2048(Ethernet)X
+2354(entirely)X
+2633(accounts)X
+2948(for)X
+3076(the)X
+3207(difference)X
+3567(in)X
+3662(speed.)X
+3918(The)X
+4076(remote)X
+555 4206(traversal)N
+857(times)X
+1055(are)X
+1179(nearly)X
+1405(double)X
+1648(the)X
+1771(local)X
+1952(times)X
+2150(because)X
+2430(we)X
+2549(do)X
+2653(index)X
+2855(lookups)X
+3132(and)X
+3272(part)X
+3421(fetches)X
+3673(in)X
+3759(separate)X
+4047(queries.)X
+555 4296(It)N
+629(would)X
+854(make)X
+1053(sense)X
+1252(to)X
+1339(do)X
+1444(indexed)X
+1723(searches)X
+2021(on)X
+2126(the)X
+2248(server,)X
+2489(but)X
+2615(we)X
+2733(were)X
+2914(unwilling)X
+3244(to)X
+3330(hard-code)X
+3676(knowledge)X
+4052(of)X
+4143(OO1)X
+555 4386(indices)N
+803(into)X
+948(our)X
+1075(LIBTP)X
+1317(TCL)X
+1488(server.)X
+1745(Cold)X
+1920(and)X
+2056(warm)X
+2259(insertion)X
+2559(times)X
+2752(are)X
+2871(identical)X
+3167(since)X
+3352(insertions)X
+3683(do)X
+3783(not)X
+3905(bene\256t)X
+4143(from)X
+555 4476(caching.)N
+755 4599(One)N
+915(interesting)X
+1279(difference)X
+1632(shown)X
+1867(by)X
+1973(table)X
+2155(three)X
+2342(is)X
+2421(the)X
+2545(cost)X
+2700(of)X
+2793(forward)X
+3074(versus)X
+3305(backward)X
+3644(traversal.)X
+3987(When)X
+4205(we)X
+555 4689(built)N
+725(the)X
+847(database,)X
+1168(we)X
+1285(inserted)X
+1562(parts)X
+1741(in)X
+1826(part)X
+2 f
+1974(id)X
+1 f
+2059(order.)X
+2292(We)X
+2427(built)X
+2596(the)X
+2717(indices)X
+2967(at)X
+3048(the)X
+3169(same)X
+3357(time.)X
+3562(Therefore,)X
+3923(the)X
+4044(forward)X
+555 4779(index)N
+757(had)X
+897(keys)X
+1068(inserted)X
+1346(in)X
+1432(order,)X
+1646(while)X
+1848(the)X
+1970(backward)X
+2307(index)X
+2509(had)X
+2649(keys)X
+2820(inserted)X
+3098(more)X
+3286(randomly.)X
+3656(In-order)X
+3943(insertion)X
+4246(is)X
+555 4885(pessimal)N
+858(for)X
+975(B-tree)X
+1199(indices,)X
+1469(so)X
+1563(the)X
+1684(forward)X
+1962(index)X
+2163(is)X
+2239(much)X
+2440(larger)X
+2651(than)X
+2812(the)X
+2933(backward)X
+3269(one)X
+7 s
+3385 4853(5)N
+10 s
+4885(.)Y
+3476(This)X
+3640(larger)X
+3850(size)X
+3997(shows)X
+4219(up)X
+555 4975(as)N
+642(extra)X
+823(disk)X
+976(reads)X
+1166(in)X
+1248(the)X
+1366(cold)X
+1524(benchmark.)X
+3 f
+555 5161(6.)N
+655(Conclusions)X
+1 f
+755 5284(LIBTP)N
+1006(provides)X
+1311(the)X
+1438(basic)X
+1632(building)X
+1927(blocks)X
+2165(to)X
+2256(support)X
+2525(transaction)X
+2906(protection.)X
+3300(In)X
+3396(comparison)X
+3799(with)X
+3970(traditional)X
+555 5374(Unix)N
+746(libraries)X
+1040(and)X
+1187(commercial)X
+1597(systems,)X
+1900(it)X
+1974(offers)X
+2192(a)X
+2258(variety)X
+2511(of)X
+2608(tradeoffs.)X
+2964(Using)X
+3185(complete)X
+3509(transaction)X
+3891(protection)X
+4246(is)X
+555 5464(more)N
+747(complicated)X
+1166(than)X
+1331(simply)X
+1575(adding)X
+3 f
+1820(fsync)X
+1 f
+1998(\(2\))X
+2119(and)X
+3 f
+2262(\257ock)X
+1 f
+2426(\(2\))X
+2547(calls)X
+2721(to)X
+2810(code,)X
+3008(but)X
+3136(it)X
+3206(is)X
+3285(faster)X
+3490(in)X
+3578(some)X
+3773(cases)X
+3969(and)X
+4111(offers)X
+8 s
+10 f
+555 5536(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)N
+5 s
+1 f
+727 5614(5)N
+8 s
+763 5639(The)N
+878(next)X
+1004(release)X
+1196(of)X
+1265(the)X
+1359(4.4BSD)X
+1580(access)X
+1758(method)X
+1966(will)X
+2082(automatically)X
+2446(detect)X
+2614(and)X
+2722(compensate)X
+3039(for)X
+3129(in-order)X
+3350(insertion,)X
+3606(eliminating)X
+3914(this)X
+4023(problem.)X
+
+16 p
+%%Page: 16 16
+8 s 8 xH 0 xS 1 f
+10 s
+3 f
+1 f
+555 630(stricter)N
+801(guarantees)X
+1168(\(atomicity,)X
+1540(consistency,)X
+1957(isolation,)X
+2275(and)X
+2414(durability\).)X
+2815(If)X
+2892(the)X
+3013(data)X
+3170(to)X
+3255(be)X
+3354(protected)X
+3676(are)X
+3798(already)X
+4058(format-)X
+555 720(ted)N
+675(\()X
+2 f
+702(i.e.)X
+1 f
+821(use)X
+949(one)X
+1086(of)X
+1174(the)X
+1293(database)X
+1591(access)X
+1818(methods\),)X
+2157(then)X
+2316(adding)X
+2555(transaction)X
+2928(protection)X
+3274(requires)X
+3554(no)X
+3655(additional)X
+3996(complex-)X
+555 810(ity,)N
+679(but)X
+801(incurs)X
+1017(a)X
+1073(performance)X
+1500(penalty)X
+1756(of)X
+1843(approximately)X
+2326(15%.)X
+755 933(In)N
+844(comparison)X
+1240(with)X
+1404(commercial)X
+1805(database)X
+2104(systems,)X
+2399(the)X
+2519(tradeoffs)X
+2827(are)X
+2948(more)X
+3135(complex.)X
+3473(LIBTP)X
+3717(does)X
+3886(not)X
+4009(currently)X
+555 1023(support)N
+825(a)X
+891(standard)X
+1193(query)X
+1406(language.)X
+1766(The)X
+1921(TCL-based)X
+2312(server)X
+2539(process)X
+2810(allows)X
+3049(a)X
+3115(certain)X
+3364(ease)X
+3533(of)X
+3630(use)X
+3767(which)X
+3993(would)X
+4223(be)X
+555 1113(enhanced)N
+882(with)X
+1047(a)X
+1106(more)X
+1294(user-friendly)X
+1732(interface)X
+2037(\()X
+2 f
+2064(e.g.)X
+1 f
+2203(a)X
+2261(windows)X
+2572(based)X
+2777(query-by-form)X
+3272(application\),)X
+3697(for)X
+3813(which)X
+4031(we)X
+4147(have)X
+555 1203(a)N
+620(working)X
+916(prototype.)X
+1292(When)X
+1513(accesses)X
+1815(do)X
+1924(not)X
+2055(require)X
+2312(sophisticated)X
+2758(query)X
+2969(processing,)X
+3360(the)X
+3486(TCL)X
+3665(interface)X
+3975(is)X
+4056(an)X
+4160(ade-)X
+555 1293(quate)N
+756(solution.)X
+1080(What)X
+1281(LIBTP)X
+1529(fails)X
+1693(to)X
+1781(provide)X
+2052(in)X
+2140(functionality,)X
+2595(it)X
+2665(makes)X
+2896(up)X
+3002(for)X
+3122(in)X
+3210(performance)X
+3643(and)X
+3785(\257exibility.)X
+4161(Any)X
+555 1383(application)N
+931(may)X
+1089(make)X
+1283(use)X
+1410(of)X
+1497(its)X
+1592(record)X
+1818(interface)X
+2120(or)X
+2207(the)X
+2325(more)X
+2510(primitive)X
+2823(log,)X
+2965(lock,)X
+3143(and)X
+3279(buffer)X
+3496(calls.)X
+755 1506(Future)N
+987(work)X
+1175(will)X
+1322(focus)X
+1519(on)X
+1621(overcoming)X
+2026(some)X
+2217(of)X
+2306(the)X
+2426(areas)X
+2614(in)X
+2698(which)X
+2916(LIBTP)X
+3160(is)X
+3235(currently)X
+3547(de\256cient)X
+3845(and)X
+3983(extending)X
+555 1596(its)N
+652(transaction)X
+1026(model.)X
+1288(The)X
+1435(addition)X
+1719(of)X
+1808(an)X
+1905(SQL)X
+2077(parser)X
+2295(and)X
+2432(forms)X
+2640(front)X
+2817(end)X
+2954(will)X
+3099(improve)X
+3387(the)X
+3506(system's)X
+3807(ease)X
+3967(of)X
+4055(use)X
+4183(and)X
+555 1686(make)N
+750(it)X
+815(more)X
+1001(competitive)X
+1400(with)X
+1563(commercial)X
+1963(systems.)X
+2277(In)X
+2365(the)X
+2484(long)X
+2647(term,)X
+2835(we)X
+2950(would)X
+3170(like)X
+3310(to)X
+3392(add)X
+3528(generalized)X
+3919(hierarchical)X
+555 1776(locking,)N
+836(nested)X
+1062(transactions,)X
+1486(parallel)X
+1748(transactions,)X
+2171(passing)X
+2431(of)X
+2518(transactions)X
+2921(between)X
+3209(processes,)X
+3557(and)X
+3693(distributed)X
+4055(commit)X
+555 1866(handling.)N
+900(In)X
+992(the)X
+1115(short)X
+1300(term,)X
+1492(the)X
+1614(next)X
+1776(step)X
+1929(is)X
+2006(to)X
+2092(integrate)X
+2397(LIBTP)X
+2643(with)X
+2809(the)X
+2931(most)X
+3110(recent)X
+3331(release)X
+3579(of)X
+3670(the)X
+3792(database)X
+4093(access)X
+555 1956(routines)N
+833(and)X
+969(make)X
+1163(it)X
+1227(freely)X
+1435(available)X
+1745(via)X
+1863(anonymous)X
+2252(ftp.)X
+3 f
+555 2142(7.)N
+655(Acknowledgements)X
+1 f
+755 2265(We)N
+888(would)X
+1109(like)X
+1250(to)X
+1332(thank)X
+1530(John)X
+1701(Wilkes)X
+1948(and)X
+2084(Carl)X
+2242(Staelin)X
+2484(of)X
+2571(Hewlett-Packard)X
+3131(Laboratories)X
+3557(and)X
+3693(Jon)X
+3824(Krueger.)X
+4148(John)X
+555 2355(and)N
+694(Carl)X
+855(provided)X
+1162(us)X
+1255(with)X
+1419(an)X
+1517(extra)X
+1700(disk)X
+1855(for)X
+1971(the)X
+2091(HP)X
+2215(testbed)X
+2464(less)X
+2606(than)X
+2766(24)X
+2868(hours)X
+3068(after)X
+3238(we)X
+3354(requested)X
+3684(it.)X
+3770(Jon)X
+3903(spent)X
+4094(count-)X
+555 2445(less)N
+699(hours)X
+901(helping)X
+1164(us)X
+1258(understand)X
+1633(the)X
+1754(intricacies)X
+2107(of)X
+2197(commercial)X
+2599(database)X
+2899(products)X
+3198(and)X
+3337(their)X
+3507(behavior)X
+3811(under)X
+4017(a)X
+4076(variety)X
+555 2535(of)N
+642(system)X
+884(con\256gurations.)X
+3 f
+555 2721(8.)N
+655(References)X
+1 f
+555 2901([ANDR89])N
+942(Andrade,)X
+1265(J.,)X
+1361(Carges,)X
+1629(M.,)X
+1765(Kovach,)X
+2060(K.,)X
+2183(``Building)X
+2541(an)X
+2642(On-Line)X
+2939(Transaction)X
+3343(Processing)X
+3715(System)X
+3975(On)X
+4098(UNIX)X
+727 2991(System)N
+982(V'',)X
+2 f
+1134(CommUNIXations)X
+1 f
+1725(,)X
+1765 0.2188(November/December)AX
+2477(1989.)X
+555 3171([BAY77])N
+878(Bayer,)X
+1110(R.,)X
+1223(Schkolnick,)X
+1623(M.,)X
+1754(``Concurrency)X
+2243(of)X
+2330(Operations)X
+2702(on)X
+2802(B-Trees'',)X
+2 f
+3155(Acta)X
+3322(Informatica)X
+1 f
+3700(,)X
+3740(1977.)X
+555 3351([BERN80])N
+936(Bernstein,)X
+1297(P.,)X
+1415(Goodman,)X
+1785(N.,)X
+1917(``Timestamp)X
+2365(Based)X
+2595(Algorithms)X
+2992(for)X
+3119(Concurrency)X
+3567(Control)X
+3844(in)X
+3939(Distributed)X
+727 3441(Database)N
+1042(Systems'',)X
+2 f
+1402(Proceedings)X
+1823(6th)X
+1945(International)X
+2387(Conference)X
+2777(on)X
+2877(Very)X
+3049(Large)X
+3260(Data)X
+3440(Bases)X
+1 f
+3627(,)X
+3667(October)X
+3946(1980.)X
+555 3621([BSD91])N
+864(DB\(3\),)X
+2 f
+1109(4.4BSD)X
+1376(Unix)X
+1552(Programmer's)X
+2044(Manual)X
+2313(Reference)X
+2655(Guide)X
+1 f
+2851(,)X
+2891(University)X
+3249(of)X
+3336(California,)X
+3701(Berkeley,)X
+4031(1991.)X
+555 3801([CATT91])N
+923(Cattell,)X
+1181(R.G.G.,)X
+1455(``An)X
+1632(Engineering)X
+2049(Database)X
+2369(Benchmark'',)X
+2 f
+2838(The)X
+2983(Benchmark)X
+3373(Handbook)X
+3731(for)X
+3848(Database)X
+4179(and)X
+727 3891(Transaction)N
+1133(Processing)X
+1509(Systems)X
+1 f
+1763(,)X
+1803(J.)X
+1874(Gray,)X
+2075(editor,)X
+2302(Morgan)X
+2576(Kaufman)X
+2895(1991.)X
+555 4071([CHEN91])N
+929(Cheng,)X
+1180(E.,)X
+1291(Chang,)X
+1542(E.,)X
+1653(Klein,)X
+1872(J.,)X
+1964(Lee,)X
+2126(D.,)X
+2245(Lu,)X
+2375(E.,)X
+2485(Lutgardo,)X
+2820(A.,)X
+2939(Obermarck,)X
+3342(R.,)X
+3456(``An)X
+3629(Open)X
+3824(and)X
+3961(Extensible)X
+727 4161(Event-Based)N
+1157(Transaction)X
+1556(Manager'',)X
+2 f
+1936(Proceedings)X
+2357(1991)X
+2537(Summer)X
+2820(Usenix)X
+1 f
+3043(,)X
+3083(Nashville,)X
+3430(TN,)X
+3577(June)X
+3744(1991.)X
+555 4341([CHOU85])N
+943(Chou,)X
+1163(H.,)X
+1288(DeWitt,)X
+1570(D.,)X
+1694(``An)X
+1872(Evaluation)X
+2245(of)X
+2338(Buffer)X
+2574(Management)X
+3019(Strategies)X
+3361(for)X
+3481(Relational)X
+3836(Database)X
+4157(Sys-)X
+727 4431(tems'',)N
+2 f
+972(Proceedings)X
+1393(of)X
+1475(the)X
+1593(11th)X
+1755(International)X
+2197(Conference)X
+2587(on)X
+2687(Very)X
+2859(Large)X
+3070(Databases)X
+1 f
+3408(,)X
+3448(1985.)X
+555 4611([DEWI84])N
+925(DeWitt,)X
+1207(D.,)X
+1331(Katz,)X
+1529(R.,)X
+1648(Olken,)X
+1890(F.,)X
+2000(Shapiro,)X
+2295(L.,)X
+2410(Stonebraker,)X
+2843(M.,)X
+2979(Wood,)X
+3220(D.,)X
+3343(``Implementation)X
+3929(Techniques)X
+727 4701(for)N
+841(Main)X
+1030(Memory)X
+1326(Database)X
+1641(Systems'',)X
+2 f
+2001(Proceedings)X
+2422(of)X
+2504(SIGMOD)X
+1 f
+2812(,)X
+2852(pp.)X
+2972(1-8,)X
+3119(June)X
+3286(1984.)X
+555 4881([GRAY76])N
+944(Gray,)X
+1153(J.,)X
+1252(Lorie,)X
+1474(R.,)X
+1595(Putzolu,)X
+1887(F.,)X
+1999(and)X
+2143(Traiger,)X
+2428(I.,)X
+2522(``Granularity)X
+2973(of)X
+3067(locks)X
+3263(and)X
+3406(degrees)X
+3679(of)X
+3773(consistency)X
+4174(in)X
+4263(a)X
+727 4971(large)N
+909(shared)X
+1140(data)X
+1295(base'',)X
+2 f
+1533(Modeling)X
+1861(in)X
+1944(Data)X
+2125(Base)X
+2301(Management)X
+2740(Systems)X
+1 f
+2994(,)X
+3034(Elsevier)X
+3317(North)X
+3524(Holland,)X
+3822(New)X
+3994(York,)X
+4199(pp.)X
+727 5061(365-394.)N
+555 5241([HAER83])N
+931(Haerder,)X
+1235(T.)X
+1348(Reuter,)X
+1606(A.)X
+1728(``Principles)X
+2126(of)X
+2217(Transaction-Oriented)X
+2928(Database)X
+3246(Recovery'',)X
+2 f
+3651(Computing)X
+4029(Surveys)X
+1 f
+4279(,)X
+727 5331(15\(4\);)N
+943(237-318,)X
+1250(1983.)X
+555 5511([KUNG81])N
+943(Kung,)X
+1162(H.)X
+1261(T.,)X
+1371(Richardson,)X
+1777(J.,)X
+1869(``On)X
+2042(Optimistic)X
+2400(Methods)X
+2701(for)X
+2816(Concurrency)X
+3252(Control'',)X
+2 f
+3591(ACM)X
+3781(Transactions)X
+4219(on)X
+727 5601(Database)N
+1054(Systems)X
+1 f
+1328(6\(2\);)X
+1504(213-226,)X
+1811(1981.)X
+
+17 p
+%%Page: 17 17
+10 s 10 xH 0 xS 1 f
+3 f
+1 f
+555 630([LEHM81])N
+939(Lehman,)X
+1245(P.,)X
+1352(Yao,)X
+1529(S.,)X
+1636(``Ef\256cient)X
+1989(Locking)X
+2279(for)X
+2396(Concurrent)X
+2780(Operations)X
+3155(on)X
+3258(B-trees'',)X
+2 f
+3587(ACM)X
+3779(Transactions)X
+4219(on)X
+727 720(Database)N
+1054(Systems)X
+1 f
+1308(,)X
+1348(6\(4\),)X
+1522(December)X
+1873(1981.)X
+555 900([MOHA91])N
+964(Mohan,)X
+1241(C.,)X
+1364(Pirahesh,)X
+1690(H.,)X
+1818(``ARIES-RRH:)X
+2366(Restricted)X
+2721(Repeating)X
+3076(of)X
+3173(History)X
+3442(in)X
+3533(the)X
+3660(ARIES)X
+3920(Transaction)X
+727 990(Recovery)N
+1055(Method'',)X
+2 f
+1398(Proceedings)X
+1819(7th)X
+1941(International)X
+2383(Conference)X
+2773(on)X
+2873(Data)X
+3053(Engineering)X
+1 f
+3449(,)X
+3489(Kobe,)X
+3703(Japan,)X
+3926(April)X
+4115(1991.)X
+555 1170([NODI90])N
+914(Nodine,)X
+1194(M.,)X
+1328(Zdonik,)X
+1602(S.,)X
+1709(``Cooperative)X
+2178(Transaction)X
+2580(Hierarchies:)X
+2996(A)X
+3077(Transaction)X
+3479(Model)X
+3711(to)X
+3796(Support)X
+4072(Design)X
+727 1260(Applications'',)N
+2 f
+1242(Proceedings)X
+1675(16th)X
+1849(International)X
+2303(Conference)X
+2704(on)X
+2815(Very)X
+2998(Large)X
+3220(Data)X
+3411(Bases)X
+1 f
+3598(,)X
+3649(Brisbane,)X
+3985(Australia,)X
+727 1350(August)N
+978(1990.)X
+555 1530([OUST90])N
+923(Ousterhout,)X
+1324(J.,)X
+1420(``Tcl:)X
+1648(An)X
+1771(Embeddable)X
+2197(Command)X
+2555(Language'',)X
+2 f
+2971(Proceedings)X
+3396(1990)X
+3580(Winter)X
+3822(Usenix)X
+1 f
+4045(,)X
+4089(Wash-)X
+727 1620(ington,)N
+971(D.C.,)X
+1162(January)X
+1432(1990.)X
+555 1800([POSIX91])N
+955(``Unapproved)X
+1441(Draft)X
+1645(for)X
+1773(Realtime)X
+2096(Extension)X
+2450(for)X
+2578(Portable)X
+2879(Operating)X
+3234(Systems'',)X
+3608(Draft)X
+3812(11,)X
+3946(October)X
+4239(7,)X
+727 1890(1991,)N
+927(IEEE)X
+1121(Computer)X
+1461(Society.)X
+555 2070([ROSE91])N
+925(Rosenblum,)X
+1341(M.,)X
+1484(Ousterhout,)X
+1892(J.,)X
+1995(``The)X
+2206(Design)X
+2464(and)X
+2611(Implementation)X
+3149(of)X
+3247(a)X
+3314(Log-Structured)X
+3835(File)X
+3990(System'',)X
+2 f
+727 2160(Proceedings)N
+1148(of)X
+1230(the)X
+1348(13th)X
+1510(Symposium)X
+1895(on)X
+1995(Operating)X
+2344(Systems)X
+2618(Principles)X
+1 f
+2947(,)X
+2987(1991.)X
+555 2340([SELT91])N
+904(Seltzer,)X
+1171(M.,)X
+1306(Stonebraker,)X
+1738(M.,)X
+1873(``Read)X
+2116(Optimized)X
+2478(File)X
+2626(Systems:)X
+2938(A)X
+3020(Performance)X
+3454(Evaluation'',)X
+2 f
+3898(Proceedings)X
+727 2430(7th)N
+849(Annual)X
+1100(International)X
+1542(Conference)X
+1932(on)X
+2032(Data)X
+2212(Engineering)X
+1 f
+2608(,)X
+2648(Kobe,)X
+2862(Japan,)X
+3085(April)X
+3274(1991.)X
+555 2610([SPEC88])N
+907(Spector,)X
+1200(Rausch,)X
+1484(Bruell,)X
+1732(``Camelot:)X
+2107(A)X
+2192(Flexible,)X
+2501(Distributed)X
+2888(Transaction)X
+3294(Processing)X
+3668(System'',)X
+2 f
+4004(Proceed-)X
+727 2700(ings)N
+880(of)X
+962(Spring)X
+1195(COMPCON)X
+1606(1988)X
+1 f
+(,)S
+1806(February)X
+2116(1988.)X
+555 2880([SQL86])N
+862(American)X
+1201(National)X
+1499(Standards)X
+1836(Institute,)X
+2139(``Database)X
+2509(Language)X
+2847(SQL'',)X
+3093(ANSI)X
+3301(X3.135-1986)X
+3747(\(ISO)X
+3924(9075\),)X
+4152(May)X
+727 2970(1986.)N
+555 3150([STON81])N
+919(Stonebraker,)X
+1348(M.,)X
+1480(``Operating)X
+1876(System)X
+2132(Support)X
+2406(for)X
+2520(Database)X
+2835(Management'',)X
+2 f
+3348(Communications)X
+3910(of)X
+3992(the)X
+4110(ACM)X
+1 f
+4279(,)X
+727 3240(1981.)N
+555 3420([SULL92])N
+925(Sullivan,)X
+1247(M.,)X
+1394(Olson,)X
+1641(M.,)X
+1788(``An)X
+1976(Index)X
+2195(Implementation)X
+2737(Supporting)X
+3127(Fast)X
+3295(Recovery)X
+3638(for)X
+3767(the)X
+3900(POSTGRES)X
+727 3510(Storage)N
+1014(System'',)X
+1365(to)X
+1469(appear)X
+1726(in)X
+2 f
+1830(Proceedings)X
+2272(8th)X
+2415(Annual)X
+2687(International)X
+3150(Conference)X
+3561(on)X
+3682(Data)X
+3883(Engineering)X
+1 f
+4279(,)X
+727 3600(Tempe,)N
+990(Arizona,)X
+1289(February)X
+1599(1992.)X
+555 3780([TPCB90])N
+914(Transaction)X
+1319(Processing)X
+1692(Performance)X
+2129(Council,)X
+2428(``TPC)X
+2653(Benchmark)X
+3048(B'',)X
+3200(Standard)X
+3510(Speci\256cation,)X
+3973(Waterside)X
+727 3870(Associates,)N
+1110(Fremont,)X
+1421(CA.,)X
+1592(1990.)X
+555 4050([YOUN91])N
+947(Young,)X
+1211(M.)X
+1328(W.,)X
+1470(Thompson,)X
+1858(D.)X
+1962(S.,)X
+2072(Jaffe,)X
+2274(E.,)X
+2388(``A)X
+2525(Modular)X
+2826(Architecture)X
+3253(for)X
+3372(Distributed)X
+3757(Transaction)X
+4161(Pro-)X
+727 4140(cessing'',)N
+2 f
+1057(Proceedings)X
+1478(1991)X
+1658(Winter)X
+1896(Usenix)X
+1 f
+2119(,)X
+2159(Dallas,)X
+2404(TX,)X
+2551(January)X
+2821(1991.)X
+3 f
+755 4263(Margo)N
+1008(I.)X
+1080(Seltzer)X
+1 f
+1338(is)X
+1411(a)X
+1467(Ph.D.)X
+1669(student)X
+1920(in)X
+2002(the)X
+2120(Department)X
+2519(of)X
+2606(Electrical)X
+2934(Engineering)X
+3346(and)X
+3482(Computer)X
+3822(Sciences)X
+4123(at)X
+4201(the)X
+555 4353(University)N
+919(of)X
+1012(California,)X
+1383(Berkeley.)X
+1739(Her)X
+1886(research)X
+2181(interests)X
+2474(include)X
+2735(\256le)X
+2862(systems,)X
+3160(databases,)X
+3513(and)X
+3654(transaction)X
+4031(process-)X
+555 4443(ing)N
+686(systems.)X
+1008(She)X
+1157(spent)X
+1355(several)X
+1612(years)X
+1811(working)X
+2107(at)X
+2194(startup)X
+2441(companies)X
+2813(designing)X
+3153(and)X
+3298(implementing)X
+3771(\256le)X
+3902(systems)X
+4183(and)X
+555 4533(transaction)N
+929(processing)X
+1294(software)X
+1592(and)X
+1729(designing)X
+2061(microprocessors.)X
+2648(Ms.)X
+2791(Seltzer)X
+3035(received)X
+3329(her)X
+3453(AB)X
+3585(in)X
+3668(Applied)X
+3947(Mathemat-)X
+555 4623(ics)N
+664(from)X
+840 0.1953(Harvard/Radcliffe)AX
+1445(College)X
+1714(in)X
+1796(1983.)X
+755 4746(In)N
+845(her)X
+971(spare)X
+1163(time,)X
+1347(Margo)X
+1583(can)X
+1717(usually)X
+1970(be)X
+2068(found)X
+2277(preparing)X
+2607(massive)X
+2887(quantities)X
+3220(of)X
+3309(food)X
+3478(for)X
+3594(hungry)X
+3843(hordes,)X
+4099(study-)X
+555 4836(ing)N
+677(Japanese,)X
+1003(or)X
+1090(playing)X
+1350(soccer)X
+1576(with)X
+1738(an)X
+1834(exciting)X
+2112(Bay)X
+2261(Area)X
+2438(Women's)X
+2770(Soccer)X
+3009(team,)X
+3205(the)X
+3323(Berkeley)X
+3633(Bruisers.)X
+3 f
+755 5049(Michael)N
+1056(A.)X
+1159(Olson)X
+1 f
+1383(is)X
+1461(a)X
+1522(Master's)X
+1828(student)X
+2084(in)X
+2170(the)X
+2292(Department)X
+2695(of)X
+2786(Electrical)X
+3118(Engineering)X
+3534(and)X
+3674(Computer)X
+4018(Sciences)X
+555 5139(at)N
+645(the)X
+774(University)X
+1143(of)X
+1241(California,)X
+1617(Berkeley.)X
+1978(His)X
+2120(primary)X
+2405(interests)X
+2703(are)X
+2833(database)X
+3141(systems)X
+3425(and)X
+3572(mass)X
+3763(storage)X
+4026(systems.)X
+555 5229(Mike)N
+759(spent)X
+963(two)X
+1118(years)X
+1323(working)X
+1625(for)X
+1754(a)X
+1825(commercial)X
+2239(database)X
+2551(system)X
+2808(vendor)X
+3066(before)X
+3307(joining)X
+3567(the)X
+3699(Postgres)X
+4004(Research)X
+555 5319(Group)N
+780(at)X
+858(Berkeley)X
+1168(in)X
+1250(1988.)X
+1470(He)X
+1584(received)X
+1877(his)X
+1990(B.A.)X
+2161(in)X
+2243(Computer)X
+2583(Science)X
+2853(from)X
+3029(Berkeley)X
+3339(in)X
+3421(May)X
+3588(1991.)X
+755 5442(Mike)N
+945(only)X
+1108(recently)X
+1388(transferred)X
+1758(into)X
+1903(Sin)X
+2030(City,)X
+2208(but)X
+2330(is)X
+2403(rapidly)X
+2650(adopting)X
+2950(local)X
+3126(customs)X
+3408(and)X
+3544(coloration.)X
+3929(In)X
+4016(his)X
+4129(spare)X
+555 5532(time,)N
+742(he)X
+843(organizes)X
+1176(informal)X
+1477(Friday)X
+1711(afternoon)X
+2043(study)X
+2240(groups)X
+2482(to)X
+2568(discuss)X
+2823(recent)X
+3044(technical)X
+3358(and)X
+3498(economic)X
+3834(developments.)X
+555 5622(Among)N
+815(his)X
+928(hobbies)X
+1197(are)X
+1316(Charles)X
+1581(Dickens,)X
+1884(Red)X
+2033(Rock,)X
+2242(and)X
+2378(speaking)X
+2683(Dutch)X
+2899(to)X
+2981(anyone)X
+3233(who)X
+3391(will)X
+3535(permit)X
+3764(it.)X
+
+17 p
+%%Trailer
+xt
+
+xs
+
diff --git a/bdb/docs/ref/refs/refs.html b/bdb/docs/ref/refs/refs.html
new file mode 100644
index 00000000000..9e321b938c5
--- /dev/null
+++ b/bdb/docs/ref/refs/refs.html
@@ -0,0 +1,75 @@
+<!--$Id: refs.so,v 10.24 2000/12/19 18:54:17 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Additional references</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Additional References</dl></h3></td>
+<td width="1%"><a href="../../ref/distrib/layout.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Additional references</h1>
+<p>For more information on Berkeley DB, or on database systems theory in general,
+we recommend the sources listed below.
+<h3>Technical Papers on Berkeley DB</h3>
+<p>These papers have appeared in refereed conference proceedings, and are
+subject to copyrights held by the conference organizers and the authors
+of the papers. Sleepycat Software makes them available here as a courtesy
+with the permission of the copyright holders.
+<p><dl compact>
+<p><dt><i>Berkeley DB</i> (<a href="bdb_usenix.html">HTML</a>, <a href="bdb_usenix.ps">Postscript</a>)<dd>Michael Olson, Keith Bostic, and Margo Seltzer, Proceedings of the 1999
+Summer Usenix Technical Conference, Monterey, California, June 1999. This
+paper describes recent commercial releases of Berkeley DB, its most important
+features, the history of the software, and Sleepycat's Open Source
+licensing policies.
+<p><dt><i>Challenges in Embedded Database System Administration</i>
+(<a href="embedded.html">HTML</a>)<dd>Margo Seltzer and Michael Olson, First Workshop on Embedded Systems,
+Cambridge, Massachusetts, March 1999. This paper describes the challenges
+that face embedded systems developers, and how Berkeley DB has been designed to
+address them.
+<p><dt><i>LIBTP: Portable Modular Transactions for UNIX</i>
+(<a href="libtp_usenix.ps">Postscript</a>)<dd>Margo Seltzer and Michael Olson, USENIX Conference Proceedings, Winter
+1992. This paper describes an early prototype of the transactional system
+for Berkeley DB.
+<p><dt><i>A New Hashing Package for UNIX</i>
+(<a href="hash_usenix.ps">Postscript</a>)<dd>Margo Seltzer and Oz Yigit, USENIX Conference Proceedings, Winter 1991.
+This paper describes the Extended Linear Hashing techniques used by Berkeley DB.
+</dl>
+<h3>Background on Berkeley DB Features</h3>
+<p>These papers, while not specific to Berkeley DB, give a good overview of how
+different Berkeley DB features were implemented.
+<p><dl compact>
+<p><dt><i>Operating System Support for Database Management</i><dd>Michael Stonebraker, Communications of the ACM 24(7), 1981, pp. 412-418.
+<p><dt><i>Dynamic Hash Tables</i><dd>Per-Ake Larson, Communications of the ACM, April 1988.
+<p><dt><i>Linear Hashing: A New Tool for File and Table Addressing</i><dd><a href="witold.html">Witold Litwin</a>, Proceedings of the 6th International
+Conference on Very Large Databases (VLDB), 1980
+<p><dt><i>The Ubiquitous B-tree</i><dd>Douglas Comer, ACM Comput. Surv. 11, 2 (June 1979), pp. 121-138.
+<p><dt><i>Prefix B-trees</i><dd>Bayer and Unterauer, ACM Transactions on Database Systems, Vol. 2, 1
+(March 1977), pp. 11-26.
+<p><dt><i>The Art of Computer Programming Vol. 3: Sorting and Searching</i><dd>D.E. Knuth, 1968, pp. 471-480.
+<p><dt><i>Document Processing in a Relational Database System</i><dd>Michael Stonebraker, Heidi Stettner, Joseph Kalash, Antonin Guttman,
+Nadene Lynn, Memorandum No. UCB/ERL M82/32, May 1982.
+</dl>
+<h3>Database Systems Theory</h3>
+<p>These publications are standard reference works on the design and
+implementation of database systems. Berkeley DB uses many of the ideas they
+describe.
+<p><dl compact>
+<p><dt><i>Transaction Processing Concepts and Techniques</i><dd>by Jim Gray and Andreas Reuter, Morgan Kaufmann Publishers.
+We recommend chapters 1, 4 (skip 4.6, 4.7, 4.9, 4.10 and 4.11),
+7, 9, 10.3, and 10.4.
+<p><dt><i>An Introduction to Database Systems, Volume 1</i><dd>by C.J. Date, Addison Wesley Longman Publishers.
+In the 5th Edition, we recommend chapters 1, 2, 3, 16 and 17.
+<p><dt><i>Concurrency Control and Recovery in Database Systems</i><dd>by Bernstein, Goodman, Hadzilaco. Currently out of print, but available
+from <a href="http://research.microsoft.com/pubs/ccontrol/">http://research.microsoft.com/pubs/ccontrol/</a>.
+</dl>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/distrib/layout.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/refs/witold.html b/bdb/docs/ref/refs/witold.html
new file mode 100644
index 00000000000..d81065e66c4
--- /dev/null
+++ b/bdb/docs/ref/refs/witold.html
@@ -0,0 +1,16 @@
+<!--$Id: witold.so,v 10.4 1999/11/19 17:21:03 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Witold Litwin</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Witold Litwin</h1>
+Witold is a hell of a guy to take you on a late-night high-speed car
+chase up the mountains of Austria in search of very green wine.
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/rpc/client.html b/bdb/docs/ref/rpc/client.html
new file mode 100644
index 00000000000..e8eb90dcfe1
--- /dev/null
+++ b/bdb/docs/ref/rpc/client.html
@@ -0,0 +1,75 @@
+<!--$Id: client.so,v 1.6 2000/03/18 21:43:16 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Client program</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>RPC Client/Server</dl></h3></td>
+<td width="1%"><a href="../../ref/rpc/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rpc/server.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Client program</h1>
+<p>Changing a Berkeley DB application to remotely call a server program requires
+only a few changes on the client side:
+<p><ol>
+<p><li>The client application must create and use a Berkeley DB environment,
+that is, it cannot simply call the <a href="../../api_c/db_create.html">db_create</a> interface, but must
+first call the <a href="../../api_c/env_create.html">db_env_create</a> interface to create an environment in
+which the database will live.
+<p><li>The client application must call <a href="../../api_c/env_create.html">db_env_create</a> using the
+<a href="../../api_c/env_create.html#DB_CLIENT">DB_CLIENT</a> flag.
+<p><li>The client application must call the additional DB_ENV
+method <a href="../../api_c/env_set_server.html">DBENV-&gt;set_server</a> to specify the database server. This call
+must be made before opening the environment with the <a href="../../api_c/env_open.html">DBENV-&gt;open</a>
+call.
+</ol>
+<p>The client application provides three pieces of information to Berkeley DB as
+part of the <a href="../../api_c/env_set_server.html">DBENV-&gt;set_server</a> call:
+<p><ol>
+<p><li>The hostname of the server. The hostname format is not
+specified by Berkeley DB, but must be in a format acceptable to the local
+network support, specifically, the RPC clnt_create interface.
+<p><li>The client timeout. This is the number of seconds the client
+will wait for the server to respond to its requests. A default is used
+if this value is zero.
+<p><li>The server timeout. This is the number of seconds the server
+will allow client resources to remain idle before releasing those
+resources. The resources this applies to are transactions and cursors,
+as those objects hold locks and if a client dies, the server needs to
+release those resources in a timely manner. This value
+is really a hint to the server, as the server may choose to override this
+value with its own.
+</ol>
+<p>The only other item of interest to the client is the home directory
+that is given to the <a href="../../api_c/env_open.html">DBENV-&gt;open</a> call.
+The server is started with a list of allowed home directories.
+The client must use one of those names (where a name is the last
+component of the home directory). This allows the pathname structure
+on the server to change without client applications needing to be
+aware of it.
+<p>Once the <a href="../../api_c/env_set_server.html">DBENV-&gt;set_server</a> call has been made, the client is
+connected to the server and all subsequent Berkeley DB
+operations will be forwarded to the server. The client does not need to
+be otherwise aware that it is using a database server rather than
+accessing the database locally.
+<p>It is important to realize that the client portion of the Berkeley DB library
+acts as a simple conduit, forwarding Berkeley DB interface arguments to the
+server without interpretation. This has two important implications.
+First, all pathnames must be specified relative to the server. For
+example, the home directory and other configuration information passed by
+the application when creating its environment or databases must be
+pathnames for the server, not the client system. In addition, as there
+is no logical bundling of operations at the server, performance is usually
+significantly less than when Berkeley DB is embedded within the client's address
+space, even if the RPC is to a local address.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/rpc/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rpc/server.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/rpc/intro.html b/bdb/docs/ref/rpc/intro.html
new file mode 100644
index 00000000000..25e4f4aea61
--- /dev/null
+++ b/bdb/docs/ref/rpc/intro.html
@@ -0,0 +1,62 @@
+<!--$Id: intro.so,v 1.6 2000/12/04 21:51:04 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Introduction</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>RPC Client/Server</dl></h3></td>
+<td width="1%"><a href="../../ref/txn/other.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rpc/client.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Introduction</h1>
+<p>Berkeley DB includes a basic implementation of a client-server protocol, using
+Sun Microsystem's Remote Procedure Call Protocol. RPC support is only
+available for UNIX systems, and is not included in the Berkeley DB library by
+default, but must be enabled during configuration. See
+<a href="../../ref/build_unix/conf.html">Configuring Berkeley DB</a> for more
+information. For more information on RPC itself, see your UNIX system
+documentation or <i>RPC: Remote Procedure Call Protocol
+Specification, RFC1832, Sun Microsystems, Inc., USC-ISI</i>.
+<p>Only some of the complete Berkeley DB functionality is available when using RPC.
+The following functionality is available:
+<p><ol>
+<li>The <a href="../../api_c/env_create.html">db_env_create</a> interface and the DB_ENV
+handle methods.
+<li>The <a href="../../api_c/db_create.html">db_create</a> interface and the DB handle
+methods.
+<li>The <a href="../../api_c/txn_begin.html">txn_begin</a>, <a href="../../api_c/txn_commit.html">txn_commit</a> and
+<a href="../../api_c/txn_abort.html">txn_abort</a> interfaces.
+</ol>
+<p>The RPC client/server code does not support any of the user-defined
+comparison or allocation functions, e.g., an application using the RPC
+support may not specify its own Btree comparison function. If your
+application only requires those portions of Berkeley DB, then using RPC is
+fairly simple. If your application requires other Berkeley DB functionality,
+such as direct access to locking, logging or shared memory buffer memory
+pools, then your application cannot use the RPC support.
+<p><b>The Berkeley DB RPC support does not provide any security or authentication of
+any kind.</b> Sites needing any kind of data security measures must modify
+the client and server code to provide whatever level of security they
+require.
+<p>One particularly interesting use of the RPC support is for debugging Berkeley DB
+applications. The seamless nature of the interface means that with very
+minor application code changes, an application can run outside of the
+Berkeley DB address space, making it far easier to track down many types of
+errors such as memory misuse.
+<p>Using the RPC mechanisms in Berkeley DB involves two basic steps:
+<p><ol>
+<p><li>Modify your Berkeley DB application to act as a client and call the
+RPC server.
+<li>Run the <a href="../../utility/berkeley_db_svc.html">berkeley_db_svc</a> server program on the system
+where the database resides.
+</ol>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/txn/other.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rpc/client.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/rpc/server.html b/bdb/docs/ref/rpc/server.html
new file mode 100644
index 00000000000..64572a90d30
--- /dev/null
+++ b/bdb/docs/ref/rpc/server.html
@@ -0,0 +1,54 @@
+<!--$Id: server.so,v 1.6 2000/03/18 21:43:16 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Server program</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>RPC Client/Server</dl></h3></td>
+<td width="1%"><a href="../../ref/rpc/client.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/java/conf.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Server program</h1>
+<p>The Berkeley DB server utility, <a href="../../utility/berkeley_db_svc.html">berkeley_db_svc</a>, handles all of the
+client application requests.
+<p>Currently, the <a href="../../utility/berkeley_db_svc.html">berkeley_db_svc</a> utility is single-threaded,
+limiting the number of requests that it can handle. Modifying the server
+implementation to run in multi-thread or multi-process mode will require
+modification of the server code automatically generated by the rpcgen
+program.
+<p>There are two different types of timeouts used by <a href="../../utility/berkeley_db_svc.html">berkeley_db_svc</a>.
+The first timeout (which can be modified within some constraints by the
+client application), is the resource timeout. When clients use
+transactions or cursors, those resources hold locks in Berkeley DB across calls
+to the server. If a client application dies or loses its connection to
+the server while holding those resources, it prevents any other client
+from acquiring them. Therefore, it is important to detect that a client
+has not used a resource for some period of time and release them. In the
+case of transactions, the server aborts the transaction. In the case of
+cursors, the server closes the cursor.
+<p>The second timeout is an idle timeout. A client application may remain
+idle with an open handle to an environment and a database. Doing so
+simply consumes some memory, it does not hold locks. However, the Berkeley DB
+server may want to eventually reclaim resources if a client dies or
+remains disconnected for a long period of time, so there is a separate
+idle timeout for open Berkeley DB handles.
+<p>The list of home directories specified to <a href="../../utility/berkeley_db_svc.html">berkeley_db_svc</a> are the
+only ones client applications are allowed to use. When
+<a href="../../utility/berkeley_db_svc.html">berkeley_db_svc</a> is started, it is given a list of pathnames.
+Clients are expected to specify the name of the home directory (defined
+as the last component in the directory pathname) as the database
+environment they are opening. In this manner, clients need only know the
+name of their home environment, and not its full pathname on the server
+machine. This means, of course, that only one environment of a particular
+name is allowed on the server at any given time.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/rpc/client.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/java/conf.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/sendmail/intro.html b/bdb/docs/ref/sendmail/intro.html
new file mode 100644
index 00000000000..9dc1b4a141e
--- /dev/null
+++ b/bdb/docs/ref/sendmail/intro.html
@@ -0,0 +1,51 @@
+<!--$Id: intro.so,v 10.20 2001/01/09 18:48:06 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Using Berkeley DB with Sendmail</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Sendmail</dl></h3></td>
+<td width="1%"><a href="../../ref/tcl/faq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/dumpload/utility.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Using Berkeley DB with Sendmail</h1>
+<p>If you are attempting to use Berkeley DB with Sendmail 8.8.X, you must use
+Berkeley DB version 1.85 (see the Sleepycat Software web site's
+<a href="http://www.sleepycat.com/historic.html">historic releases</a>
+of Berkeley DB page for more information.
+<p>Berkeley DB versions 2.0 and later are only supported by Sendmail versions 8.9.X
+and later.
+<p>Berkeley DB versions 3.0 and later are only supported by Sendmail versions
+8.10.X and later.
+<p>We strongly recommend that you not use Berkeley DB version 1.85. It is no longer
+maintained or supported and has known bugs that can cause Sendmail to
+fail. Instead, please upgrade to Sendmail version 8.9.X or later and use
+a later version of Berkeley DB. For more information on using Berkeley DB with
+Sendmail, please review the README and src/README files in the Sendmail
+distribution.
+<p>To load sendmail against Berkeley DB, add the following lines to
+BuildTools/Site/site.config.m4:
+<p><blockquote><pre>APPENDDEF(`confINCDIRS', `-I/usr/local/BerkeleyDB/include')
+APPENDDEF(`confLIBDIRS', `-L/usr/local/BerkeleyDB/lib')</pre></blockquote>
+<p>where those are the paths to #include &lt;db.h&gt; and libdb.a respectively.
+Then, run "Build -c" from the src directory.
+<p>Note that this Build script will use -DNEWDB on the compiles
+and -L/path/to/libdb/directory -ldb on the link if it can find libdb.a;
+the search path is $LIBDIRS:/lib:/usr/lib:/usr/shlib. $LIBDIRS is
+NULL by default for most systems, but some set it in BuildTools/OS/foo.
+Anyone can append to it as above (confLIBDIRS is the m4 variable name;
+LIBDIRS is the shell-script variable name).
+<p>To download Sendmail, or to obtain more information on Sendmail, see the
+<a href="http://www.sendmail.org">Sendmail home page</a>, which includes
+FAQ pages and problem addresses.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/tcl/faq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/dumpload/utility.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/simple_tut/close.html b/bdb/docs/ref/simple_tut/close.html
new file mode 100644
index 00000000000..a268a591c7d
--- /dev/null
+++ b/bdb/docs/ref/simple_tut/close.html
@@ -0,0 +1,102 @@
+<!--$Id: close.so,v 10.22 2000/12/18 21:05:15 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Closing a database</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Simple Tutorial</dl></h3></td>
+<td width="1%"><a href="../../ref/simple_tut/del.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Closing a database</h1>
+<p>The only other operation that we need for our simple example is closing
+the database, and cleaning up the DB handle.
+<p>It is necessary that the database be closed. The most important reason
+for this is that Berkeley DB runs on top of an underlying buffer cache. If
+the modified database pages are never explicitly flushed to disk and
+the database is never closed, changes made to the database may never
+make it out to disk, because they are held in the Berkeley DB cache. As the
+default behavior of the close function is to flush the Berkeley DB cache,
+closing the database will update the on-disk information.
+<p>The <a href="../../api_c/db_close.html">DB-&gt;close</a> interface takes two arguments:
+<p><dl compact>
+<p><dt>db<dd>The database handle returned by <a href="../../api_c/db_create.html">db_create</a>.
+<p><dt>flags<dd>Optional flags modifying the underlying behavior of the <a href="../../api_c/db_close.html">DB-&gt;close</a>
+interface.
+</dl>
+<p>Here's what the code to call <a href="../../api_c/db_close.html">DB-&gt;close</a> looks like:
+<p><blockquote><pre>#include &lt;sys/types.h&gt;
+#include &lt;stdio.h&gt;
+#include &lt;db.h&gt;
+<p>
+#define DATABASE "access.db"
+<p>
+int
+main()
+{
+ DB *dbp;
+ DBT key, data;
+ <b>int ret, t_ret;</b>
+<p>
+ if ((ret = db_create(&dbp, NULL, 0)) != 0) {
+ fprintf(stderr, "db_create: %s\n", db_strerror(ret));
+ exit (1);
+ }
+ if ((ret = dbp-&gt;open(
+ dbp, DATABASE, NULL, DB_BTREE, DB_CREATE, 0664)) != 0) {
+ dbp-&gt;err(dbp, ret, "%s", DATABASE);
+ goto err;
+ }
+<p>
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ key.data = "fruit";
+ key.size = sizeof("fruit");
+ data.data = "apple";
+ data.size = sizeof("apple");
+<p>
+ if ((ret = dbp-&gt;put(dbp, NULL, &key, &data, 0)) == 0)
+ printf("db: %s: key stored.\n", (char *)key.data);
+ else {
+ dbp-&gt;err(dbp, ret, "DB-&gt;put");
+ goto err;
+ }
+<p>
+ if ((ret = dbp-&gt;get(dbp, NULL, &key, &data, 0)) == 0)
+ printf("db: %s: key retrieved: data was %s.\n",
+ (char *)key.data, (char *)data.data);
+ else {
+ dbp-&gt;err(dbp, ret, "DB-&gt;get");
+ goto err;
+ }
+<p>
+ if ((ret = dbp-&gt;del(dbp, NULL, &key, 0)) == 0)
+ printf("db: %s: key was deleted.\n", (char *)key.data);
+ else {
+ dbp-&gt;err(dbp, ret, "DB-&gt;del");
+ goto err;
+ }
+<p>
+ if ((ret = dbp-&gt;get(dbp, NULL, &key, &data, 0)) == 0)
+ printf("db: %s: key retrieved: data was %s.\n",
+ (char *)key.data, (char *)data.data);
+ else
+ dbp-&gt;err(dbp, ret, "DB-&gt;get");
+<p><b>err: if ((t_ret = dbp-&gt;close(dbp, 0)) != 0 && ret == 0)
+ ret = t_ret; </b>
+<p>
+ exit(ret);
+}
+</pre></blockquote>
+<p>Note that we do not necessarily overwrite the <b>ret</b> variable, as it
+may contain error return information from a previous Berkeley DB call.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/simple_tut/del.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/simple_tut/del.html b/bdb/docs/ref/simple_tut/del.html
new file mode 100644
index 00000000000..ac4d4126033
--- /dev/null
+++ b/bdb/docs/ref/simple_tut/del.html
@@ -0,0 +1,93 @@
+<!--$Id: del.so,v 10.20 2000/03/18 21:43:17 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Removing elements from a database</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Simple Tutorial</dl></h3></td>
+<td width="1%"><a href="../../ref/simple_tut/get.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/simple_tut/close.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Removing elements from a database</h1>
+<p>The simplest way to remove elements from a database is the <a href="../../api_c/db_del.html">DB-&gt;del</a>
+interface.
+<p>The <a href="../../api_c/db_del.html">DB-&gt;del</a> interface takes four of the same five arguments that
+the <a href="../../api_c/db_get.html">DB-&gt;get</a> and <a href="../../api_c/db_put.html">DB-&gt;put</a> interfaces take. The difference
+is that there is no need to specify a data item, as the delete operation
+is only interested in the key that you want to remove.
+<p><dl compact>
+<p><dt>db<dd>The database handle returned by <a href="../../api_c/db_create.html">db_create</a>.
+<p><dt>txnid<dd>A transaction ID.
+In our simple case, we aren't expecting to recover the database after
+application or system crash, so we aren't using transactions, and will
+leave this argument unspecified.
+<p><dt>key<dd>The key item for the key/data pair that we want to delete from the
+database.
+<p><dt>flags<dd>Optional flags modifying the underlying behavior of the <a href="../../api_c/db_del.html">DB-&gt;del</a>
+interface. There are currently no available flags for this interface,
+so the flags argument should always be set to 0.
+</dl>
+<p>Here's what the code to call <a href="../../api_c/db_del.html">DB-&gt;del</a> looks like:
+<p><blockquote><pre>#include &lt;sys/types.h&gt;
+#include &lt;stdio.h&gt;
+#include &lt;db.h&gt;
+<p>
+#define DATABASE "access.db"
+<p>
+int
+main()
+{
+ DB *dbp;
+ DBT key, data;
+ int ret;
+<p>
+ if ((ret = db_create(&dbp, NULL, 0)) != 0) {
+ fprintf(stderr, "db_create: %s\n", db_strerror(ret));
+ exit (1);
+ }
+ if ((ret = dbp-&gt;open(
+ dbp, DATABASE, NULL, DB_BTREE, DB_CREATE, 0664)) != 0) {
+ dbp-&gt;err(dbp, ret, "%s", DATABASE);
+ goto err;
+ }
+<p>
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ key.data = "fruit";
+ key.size = sizeof("fruit");
+ data.data = "apple";
+ data.size = sizeof("apple");
+<p>
+ if ((ret = dbp-&gt;put(dbp, NULL, &key, &data, 0)) == 0)
+ printf("db: %s: key stored.\n", (char *)key.data);
+ else {
+ dbp-&gt;err(dbp, ret, "DB-&gt;put");
+ goto err;
+ }
+<p>
+ if ((ret = dbp-&gt;get(dbp, NULL, &key, &data, 0)) == 0)
+ printf("db: %s: key retrieved: data was %s.\n",
+ (char *)key.data, (char *)data.data);
+ else {
+ dbp-&gt;err(dbp, ret, "DB-&gt;get");
+ goto err;
+ }
+<p><b> if ((ret = dbp-&gt;del(dbp, NULL, &key, 0)) == 0)
+ printf("db: %s: key was deleted.\n", (char *)key.data);
+ else {
+ dbp-&gt;err(dbp, ret, "DB-&gt;del");
+ goto err;
+ }
+</b></pre></blockquote>
+<p>After the <a href="../../api_c/db_del.html">DB-&gt;del</a> call returns, the entry referenced by the key
+fruit has been removed from the database.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/simple_tut/get.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/simple_tut/close.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/simple_tut/errors.html b/bdb/docs/ref/simple_tut/errors.html
new file mode 100644
index 00000000000..bb7e8a67184
--- /dev/null
+++ b/bdb/docs/ref/simple_tut/errors.html
@@ -0,0 +1,46 @@
+<!--$Id: errors.so,v 10.19 2000/12/14 21:42:18 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Error returns</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Simple Tutorial</dl></h3></td>
+<td width="1%"><a href="../../ref/simple_tut/handles.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/simple_tut/open.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Error returns</h1>
+<p>The Berkeley DB interfaces always return a value of 0 on success. If the
+operation does not succeed for any reason, the return value will be
+non-zero.
+<p>If a system error occurred (e.g., Berkeley DB ran out of disk space, or
+permission to access a file was denied, or an illegal argument was
+specified to one of the interfaces), Berkeley DB returns an <b>errno</b>
+value. All of the possible values of <b>errno</b> are greater than
+0.
+<p>If the operation didn't fail due to a system error, but wasn't
+successful either, Berkeley DB returns a special error value. For example,
+if you tried to retrieve the data item associated with the key
+<b>fruit</b>, and there was no such key/data pair in the database,
+Berkeley DB would return <a href="../../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>, a special error value that means
+the requested key does not appear in the database. All of the possible
+special error values are less than 0.
+<p>Berkeley DB also offers programmatic support for displaying error return values.
+First, the <a href="../../api_c/env_strerror.html">db_strerror</a> interface returns a pointer to the error
+message corresponding to any Berkeley DB error return, similar to the ANSI C
+strerror interface, but is able to handle both system error returns and
+Berkeley DB-specific return values.
+<p>Second, there are two error functions, <a href="../../api_c/db_err.html">DB-&gt;err</a> and <a href="../../api_c/db_err.html">DB-&gt;errx</a>.
+These functions work like the ANSI C printf interface, taking a
+printf-style format string and argument list, and optionally appending
+the standard error string to a message constructed from the format string
+and other arguments.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/simple_tut/handles.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/simple_tut/open.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/simple_tut/example.txt b/bdb/docs/ref/simple_tut/example.txt
new file mode 100644
index 00000000000..e610648d1c1
--- /dev/null
+++ b/bdb/docs/ref/simple_tut/example.txt
@@ -0,0 +1,73 @@
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "db.h"
+
+#define DATABASE "access.db"
+
+int
+main()
+{
+ DB *dbp;
+ DBT key, data;
+ int ret, t_ret;
+
+ /* Create the database handle and open the underlying database. */
+ if ((ret = db_create(&dbp, NULL, 0)) != 0) {
+ fprintf(stderr, "db_create: %s\n", db_strerror(ret));
+ exit (1);
+ }
+ if ((ret =
+ dbp->open(dbp, DATABASE, NULL, DB_BTREE, DB_CREATE, 0664)) != 0) {
+ dbp->err(dbp, ret, "%s", DATABASE);
+ goto err;
+ }
+
+ /* Initialize key/data structures. */
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ key.data = "fruit";
+ key.size = sizeof("fruit");
+ data.data = "apple";
+ data.size = sizeof("apple");
+
+ /* Store a key/data pair. */
+ if ((ret = dbp->put(dbp, NULL, &key, &data, 0)) == 0)
+ printf("db: %s: key stored.\n", (char *)key.data);
+ else {
+ dbp->err(dbp, ret, "DB->put");
+ goto err;
+ }
+
+ /* Retrieve a key/data pair. */
+ if ((ret = dbp->get(dbp, NULL, &key, &data, 0)) == 0)
+ printf("db: %s: key retrieved: data was %s.\n",
+ (char *)key.data, (char *)data.data);
+ else {
+ dbp->err(dbp, ret, "DB->get");
+ goto err;
+ }
+
+ /* Delete a key/data pair. */
+ if ((ret = dbp->del(dbp, NULL, &key, 0)) == 0)
+ printf("db: %s: key was deleted.\n", (char *)key.data);
+ else {
+ dbp->err(dbp, ret, "DB->del");
+ goto err;
+ }
+
+ /* Retrieve a key/data pair. */
+ if ((ret = dbp->get(dbp, NULL, &key, &data, 0)) == 0)
+ printf("db: %s: key retrieved: data was %s.\n",
+ (char *)key.data, (char *)data.data);
+ else
+ dbp->err(dbp, ret, "DB->get");
+
+err: if ((t_ret = dbp->close(dbp, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ exit(ret);
+}
diff --git a/bdb/docs/ref/simple_tut/get.html b/bdb/docs/ref/simple_tut/get.html
new file mode 100644
index 00000000000..697aa8f511c
--- /dev/null
+++ b/bdb/docs/ref/simple_tut/get.html
@@ -0,0 +1,97 @@
+<!--$Id: get.so,v 10.23 2000/12/14 21:42:18 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Retrieving elements from a database</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Simple Tutorial</dl></h3></td>
+<td width="1%"><a href="../../ref/simple_tut/put.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/simple_tut/del.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Retrieving elements from a database</h1>
+<p>The simplest way to retrieve elements from a database is the
+<a href="../../api_c/db_get.html">DB-&gt;get</a> interface.
+<p>The <a href="../../api_c/db_get.html">DB-&gt;get</a> interface takes the same five arguments that the
+<a href="../../api_c/db_put.html">DB-&gt;put</a> interface takes:
+<p><dl compact>
+<p><dt>db<dd>The database handle returned by <a href="../../api_c/db_create.html">db_create</a>.
+<p><dt>txnid<dd>A transaction ID. In our simple case, we aren't expecting to recover
+the database after application or system crash, so we aren't using
+transactions, and will leave this argument NULL.
+<p><dt>key<dd>The key item for the key/data pair that we want to retrieve from the
+database.
+<p><dt>data<dd>The data item for the key/data pair that we want to retrieve from the
+database.
+<p><dt>flags<dd>Optional flags modifying the underlying behavior of the <a href="../../api_c/db_get.html">DB-&gt;get</a>
+interface.
+</dl>
+<p>Here's what the code to call <a href="../../api_c/db_get.html">DB-&gt;get</a> looks like:
+<p><blockquote><pre>#include &lt;sys/types.h&gt;
+#include &lt;stdio.h&gt;
+#include &lt;db.h&gt;
+<p>
+#define DATABASE "access.db"
+<p>
+int
+main()
+{
+ DB *dbp;
+ DBT key, data;
+ int ret;
+<p>
+ if ((ret = db_create(&dbp, NULL, 0)) != 0) {
+ fprintf(stderr, "db_create: %s\n", db_strerror(ret));
+ exit (1);
+ }
+ if ((ret = dbp-&gt;open(
+ dbp, DATABASE, NULL, DB_BTREE, DB_CREATE, 0664)) != 0) {
+ dbp-&gt;err(dbp, ret, "%s", DATABASE);
+ goto err;
+ }
+<p>
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ key.data = "fruit";
+ key.size = sizeof("fruit");
+ data.data = "apple";
+ data.size = sizeof("apple");
+<p>
+ if ((ret = dbp-&gt;put(dbp, NULL, &key, &data, 0)) == 0)
+ printf("db: %s: key stored.\n", (char *)key.data);
+ else {
+ dbp-&gt;err(dbp, ret, "DB-&gt;put");
+ goto err;
+ }
+<p><b> if ((ret = dbp-&gt;get(dbp, NULL, &key, &data, 0)) == 0)
+ printf("db: %s: key retrieved: data was %s.\n",
+ (char *)key.data, (char *)data.data);
+ else {
+ dbp-&gt;err(dbp, ret, "DB-&gt;get");
+ goto err;
+ }
+</b></pre></blockquote>
+<p>It is not usually necessary to clear the <a href="../../api_c/dbt.html">DBT</a> structures passed
+to the Berkeley DB functions between calls. This is not always true, when
+some of the less commonly used flags for <a href="../../api_c/dbt.html">DBT</a> structures are
+used. The <a href="../../api_c/dbt.html">DBT</a> manual page specified the details of those cases.
+<p>It is possible, of course, to distinguish between system errors and the
+key/data pair simply not existing in the database. There are three
+standard returns from <a href="../../api_c/db_get.html">DB-&gt;get</a>:
+<p><ol>
+<p><li>The call might be successful and the key found, in which case the return
+value will be 0.
+<li>The call might be successful, but the key not found, in which case the
+return value will be <a href="../../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+<li>The call might not be successful, in which case the return value will
+be a system error.
+</ol>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/simple_tut/put.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/simple_tut/del.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/simple_tut/handles.html b/bdb/docs/ref/simple_tut/handles.html
new file mode 100644
index 00000000000..2396a224ee9
--- /dev/null
+++ b/bdb/docs/ref/simple_tut/handles.html
@@ -0,0 +1,29 @@
+<!--$Id: handles.so,v 10.8 2000/03/18 21:43:17 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Handles</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Simple Tutorial</dl></h3></td>
+<td width="1%"><a href="../../ref/simple_tut/keydata.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/simple_tut/errors.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Handles</h1>
+<p>With a few minor exceptions, Berkeley DB functionality is accessed by creating
+a structure and then calling functions that are fields in that structure.
+This is, of course, similar to object-oriented concepts, of instances and
+methods on them. For simplicity, we will often refer to these structure
+fields as methods of the handle.
+<p>The manual pages will show these methods as C structure references. For
+example, the open-a-database method for a database handle is represented
+as <a href="../../api_c/db_open.html">DB-&gt;open</a>.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/simple_tut/keydata.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/simple_tut/errors.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/simple_tut/intro.html b/bdb/docs/ref/simple_tut/intro.html
new file mode 100644
index 00000000000..a9b6f648cf5
--- /dev/null
+++ b/bdb/docs/ref/simple_tut/intro.html
@@ -0,0 +1,40 @@
+<!--$Id: intro.so,v 10.20 2000/12/04 18:05:44 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Introduction</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Simple Tutorial</dl></h3></td>
+<td width="1%"><a href="../../ref/intro/products.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/simple_tut/keydata.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Introduction</h1>
+<p>As an introduction to Berkeley DB, we will present a few Berkeley DB programming
+concepts, and then a simple database application.
+<p>The programming concepts are:
+<ul type=disc>
+<li><a href="keydata.html">Key/data pairs</a>
+<li><a href="handles.html">Object handles</a>
+<li><a href="errors.html">Error returns</a>
+</ul>
+<p>This database application will:
+<ul type=disc>
+<li><a href="open.html">Create a simple database</a>
+<li><a href="put.html">Store items</a>
+<li><a href="get.html">Retrieve items</a>
+<li><a href="del.html">Remove items</a>
+<li><a href="close.html">Close the database</a>
+</ul>
+<p>The introduction will be presented using the programming language C. The
+<a href="example.txt">complete source</a> of the final version of the
+example program is included in the Berkeley DB distribution.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/intro/products.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/simple_tut/keydata.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/simple_tut/keydata.html b/bdb/docs/ref/simple_tut/keydata.html
new file mode 100644
index 00000000000..38d34aebc5a
--- /dev/null
+++ b/bdb/docs/ref/simple_tut/keydata.html
@@ -0,0 +1,48 @@
+<!--$Id: keydata.so,v 10.19 2000/12/14 21:42:18 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Key/data pairs</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Simple Tutorial</dl></h3></td>
+<td width="1%"><a href="../../ref/simple_tut/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/simple_tut/handles.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Key/data pairs</h1>
+<p>Berkeley DB uses key/data pairs to identify elements in the database.
+That is, in the general case, whenever you call a Berkeley DB interface,
+you present a key to identify the key/data pair on which you intend
+to operate.
+<p>For example, you might store some key/data pairs as follows:
+<p><table border=1>
+<tr><th>Key:</th><th>Data:</th></tr>
+<tr><td>fruit</td><td>apple</td></tr>
+<tr><td>sport</td><td>cricket</td></tr>
+<tr><td>drink</td><td>water</td></tr>
+</table>
+<p>In each case, the first element of the pair is the key, and the second is
+the data. To store the first of these key/data pairs into the database,
+you would call the Berkeley DB interface to store items, with <b>fruit</b> as
+the key, and <b>apple</b> as the data. At some future time, you could
+then retrieve the data item associated with <b>fruit</b>, and the Berkeley DB
+retrieval interface would return <b>apple</b> to you. While there are
+many variations and some subtleties, all accesses to data in Berkeley DB come
+down to key/data pairs.
+<p>Both key and data items are stored in simple structures (called
+<a href="../../api_c/dbt.html">DBT</a>s) that contain a reference to memory and a length, counted
+in bytes. (The name <a href="../../api_c/dbt.html">DBT</a> is an acronym for <i>database
+thang</i>, chosen because nobody could think of a sensible name that wasn't
+already in use somewhere else.) Key and data items can be arbitrary
+binary data of practically any length, including 0 bytes. There is a
+single data item for each key item, by default, but databases can be
+configured to support multiple data items for each key item.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/simple_tut/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/simple_tut/handles.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/simple_tut/open.html b/bdb/docs/ref/simple_tut/open.html
new file mode 100644
index 00000000000..24df8a8e17f
--- /dev/null
+++ b/bdb/docs/ref/simple_tut/open.html
@@ -0,0 +1,90 @@
+<!--$Id: open.so,v 10.27 2000/12/14 21:42:18 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Opening a database</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Simple Tutorial</dl></h3></td>
+<td width="1%"><a href="../../ref/simple_tut/errors.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/simple_tut/put.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Opening a database</h1>
+<p>Opening a database is done in two steps: first, a DB handle is
+created using the Berkeley DB <a href="../../api_c/db_create.html">db_create</a> interface, and then the
+actual database is opened using the <a href="../../api_c/db_open.html">DB-&gt;open</a> function.
+<p>The <a href="../../api_c/db_create.html">db_create</a> interface takes three arguments:
+<p><dl compact>
+<p><dt>dbp<dd>A location to store a reference to the created structure.
+<p><dt>environment<dd>A location to specify an enclosing Berkeley DB environment, not used in our
+example.
+<p><dt>flags<dd>A placeholder for flags, not used in our example.
+</dl>
+<p>The <a href="../../api_c/db_open.html">DB-&gt;open</a> interface takes five arguments:
+<p><dl compact>
+<p><dt>file<dd>The name of the database file to be opened.
+<p><dt>database<dd>The optional database name, not used in this example.
+<p><dt>type<dd>The type of database to open. This value will be one of the four access
+methods Berkeley DB supports: DB_BTREE, DB_HASH, DB_QUEUE or DB_RECNO, or the
+special value DB_UNKNOWN, which allows you to open an existing file
+without knowing its type.
+<p><dt>flags<dd>Various flags that modify the behavior of <a href="../../api_c/db_open.html">DB-&gt;open</a>. In our
+simple case, the only interesting flag is <a href="../../api_c/env_open.html#DB_CREATE">DB_CREATE</a>. This flag
+behaves similarly to the IEEE/ANSI Std 1003.1 (POSIX) O_CREATE flag to the open system
+call, causing Berkeley DB to create the underlying database if it does not
+yet exist.
+<p><dt>mode<dd>The file mode of any underlying files that <a href="../../api_c/db_open.html">DB-&gt;open</a> will create.
+The mode behaves as does the IEEE/ANSI Std 1003.1 (POSIX) mode argument to the open
+system call, and specifies file read, write and execute permissions.
+Of course, only the read and write permissions are relevant to Berkeley DB.
+</dl>
+<p>Here's what the code to create the handle and then call <a href="../../api_c/db_open.html">DB-&gt;open</a>
+looks like:
+<p><blockquote><pre><b>#include &lt;sys/types.h&gt;
+#include &lt;stdio.h&gt;
+#include &lt;db.h&gt;
+<p>
+#define DATABASE "access.db"
+<p>
+int
+main()
+{
+ DB *dbp;
+ int ret;
+<p>
+ if ((ret = db_create(&dbp, NULL, 0)) != 0) {
+ fprintf(stderr, "db_create: %s\n", db_strerror(ret));
+ exit (1);
+ }
+ if ((ret = dbp-&gt;open(
+ dbp, DATABASE, NULL, DB_BTREE, DB_CREATE, 0664)) != 0) {
+ dbp-&gt;err(dbp, ret, "%s", DATABASE);
+ goto err;
+ }</b>
+</pre></blockquote>
+<p>If the call to <a href="../../api_c/db_create.html">db_create</a> is successful, the variable <b>dbp</b>
+will contain a database handle that will be used to configure and access
+an underlying database.
+<p>As you see, the program opens a database named <b>access.db</b>. The
+underlying database is a Btree. Because the <a href="../../api_c/env_open.html#DB_CREATE">DB_CREATE</a> flag was
+specified, the file will be created if it does not already exist. The
+mode of any created files will be 0664 (i.e., readable and writeable by
+the owner and the group, and readable by everyone else).
+<p>One additional function call is used in this code sample, <a href="../../api_c/db_err.html">DB-&gt;err</a>.
+This method works like the ANSI C printf interface. The second argument
+is the error return from a Berkeley DB function, and the rest of the arguments
+are a printf-style format string and argument list. The error message
+associated with the error return will be appended to a message constructed
+from the format string and other arguments. In the above code, if the
+<a href="../../api_c/db_open.html">DB-&gt;open</a> call were to fail, the message it would display would be
+something like
+<p><blockquote><pre>access.db: Operation not permitted</pre></blockquote>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/simple_tut/errors.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/simple_tut/put.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/simple_tut/put.html b/bdb/docs/ref/simple_tut/put.html
new file mode 100644
index 00000000000..8ecdfa6cabf
--- /dev/null
+++ b/bdb/docs/ref/simple_tut/put.html
@@ -0,0 +1,127 @@
+<!--$Id: put.so,v 10.31 2000/12/18 21:05:15 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Adding elements to a database</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Simple Tutorial</dl></h3></td>
+<td width="1%"><a href="../../ref/simple_tut/open.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/simple_tut/get.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Adding elements to a database</h1>
+<p>The simplest way to add elements to a database is the <a href="../../api_c/db_put.html">DB-&gt;put</a>
+interface.
+<p>The <a href="../../api_c/db_put.html">DB-&gt;put</a> interface takes five arguments:
+<p><dl compact>
+<p><dt>db<dd>The database handle returned by <a href="../../api_c/db_create.html">db_create</a>.
+<p><dt>txnid<dd>A transaction handle. In our simple case, we aren't expecting to
+recover the database after application or system crash, so we aren't
+using transactions, and will leave this argument NULL.
+<p><dt>key<dd>The key item for the key/data pair that we want to add to the database.
+<p><dt>data<dd>The data item for the key/data pair that we want to add to the database.
+<p><dt>flags<dd>Optional flags modifying the underlying behavior of the <a href="../../api_c/db_put.html">DB-&gt;put</a>
+interface.
+</dl>
+<p>Here's what the code to call <a href="../../api_c/db_put.html">DB-&gt;put</a> looks like:
+<p><blockquote><pre>#include &lt;sys/types.h&gt;
+#include &lt;stdio.h&gt;
+#include &lt;db.h&gt;
+<p>
+#define DATABASE "access.db"
+<p>
+int
+main()
+{
+ DB *dbp;
+ <b>DBT key, data;</b>
+ int ret;
+<p>
+ if ((ret = db_create(&dbp, NULL, 0)) != 0) {
+ fprintf(stderr, "db_create: %s\n", db_strerror(ret));
+ exit (1);
+ }
+ if ((ret = dbp-&gt;open(
+ dbp, DATABASE, NULL, DB_BTREE, DB_CREATE, 0664)) != 0) {
+ dbp-&gt;err(dbp, ret, "%s", DATABASE);
+ goto err;
+ }
+<p><b> memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ key.data = "fruit";
+ key.size = sizeof("fruit");
+ data.data = "apple";
+ data.size = sizeof("apple");
+<p>
+ if ((ret = dbp-&gt;put(dbp, NULL, &key, &data, 0)) == 0)
+ printf("db: %s: key stored.\n", (char *)key.data);
+ else {
+ dbp-&gt;err(dbp, ret, "DB-&gt;put");
+ goto err;
+ }
+</b></pre></blockquote>
+<p>The first thing to notice about this new code is that we clear the
+<a href="../../api_c/dbt.html">DBT</a> structures that we're about to pass as arguments to Berkeley DB
+functions. This is very important, and being careful to do so will
+result in fewer errors in your programs. All Berkeley DB structures
+instantiated in the application and handed to Berkeley DB should be cleared
+before use, without exception. This is necessary so that future
+versions of Berkeley DB may add additional fields to the structures. If
+applications clear the structures before use, it will be possible for
+Berkeley DB to change those structures without requiring that the applications
+be rewritten to be aware of the changes.
+<p>Notice also that we're storing the trailing nul byte found in the C
+strings <b>"fruit"</b> and <b>"apple"</b> in both the key and data
+items, that is, the trailing nul byte is part of the stored key, and
+therefore has to be specified in order to access the data item. There is
+no requirement to store the trailing nul byte, it simply makes it easier
+for us to display strings that we've stored in programming languages that
+use nul bytes to terminate strings.
+<p>In many applications, it is important not to overwrite existing
+data. For example, we might not want to store the key/data pair
+<b>fruit/apple</b> if it already existed, e.g., if someone had
+previously stored the key/data pair <b>fruit/cherry</b> into the
+database.
+<p>This is easily accomplished by adding the <a href="../../api_c/db_put.html#DB_NOOVERWRITE">DB_NOOVERWRITE</a> flag to
+the <a href="../../api_c/db_put.html">DB-&gt;put</a> call:
+<p><blockquote><pre><b>if ((ret =
+ dbp-&gt;put(dbp, NULL, &key, &data, DB_NOOVERWRITE)) == 0)
+ printf("db: %s: key stored.\n", (char *)key.data);
+else {
+ dbp-&gt;err(dbp, ret, "DB-&gt;put");
+ goto err;
+}</b></pre></blockquote>
+<p>This flag causes the underlying database functions to not overwrite any
+previously existing key/data pair. (Note that the value of the previously
+existing data doesn't matter in this case. The only question is if a
+key/data pair already exists where the key matches the key that we are
+trying to store.)
+<p>Specifying <a href="../../api_c/db_put.html#DB_NOOVERWRITE">DB_NOOVERWRITE</a> opens up the possibility of a new
+Berkeley DB return value from the <a href="../../api_c/db_put.html">DB-&gt;put</a> function, <a href="../../api_c/dbc_put.html#DB_KEYEXIST">DB_KEYEXIST</a>,
+which means we were unable to add the key/data pair to the database
+because the key already existed in the database. While the above sample
+code simply displays a message in this case:
+<p><blockquote><pre>DB-&gt;put: DB_KEYEXIST: Key/data pair already exists</pre></blockquote>
+<p>The following code shows an explicit check for this possibility:
+<p><blockquote><pre><b>switch (ret =
+ dbp-&gt;put(dbp, NULL, &key, &data, DB_NOOVERWRITE)) {
+case 0:
+ printf("db: %s: key stored.\n", (char *)key.data);
+ break;
+case DB_KEYEXIST:
+ printf("db: %s: key previously stored.\n",
+ (char *)key.data);
+ break;
+default:
+ dbp-&gt;err(dbp, ret, "DB-&gt;put");
+ goto err;
+}</b></pre></blockquote>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/simple_tut/open.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/simple_tut/get.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/tcl/error.html b/bdb/docs/ref/tcl/error.html
new file mode 100644
index 00000000000..3d1de037d52
--- /dev/null
+++ b/bdb/docs/ref/tcl/error.html
@@ -0,0 +1,69 @@
+<!--$Id: error.so,v 11.13 2001/01/09 18:48:06 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Tcl error handling</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Tcl</dl></h3></td>
+<td width="1%"><a href="../../ref/tcl/program.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/tcl/faq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Tcl error handling</h1>
+<p>The Tcl interfaces to Berkeley DB generally return TCL_OK on success and throw
+a Tcl error on failure, using the appropriate Tcl interfaces to provide
+the user with an informative error message. There are some "expected"
+failures, however, for which no Tcl error will be thrown and for which
+Tcl commands will return TCL_OK. These failures include when a
+searched-for key is not found, a requested key/data pair was previously
+deleted, or a key/data pair cannot be written because the key already
+exists.
+<p>These failures can be detected by searching the Berkeley DB error message that
+is returned. For example, to detect that an attempt to put a record into
+the database failed because the key already existed:
+<p><blockquote><pre>% berkdb open -create -btree a.db
+db0
+% db0 put dog cat
+0
+% set ret [db0 put -nooverwrite dog newcat]
+DB_KEYEXIST: Key/data pair already exists
+% if { [string first DB_KEYEXIST $ret] != -1 } {
+ puts "This was an error; the key existed"
+}
+This was an error; the key existed
+% db0 close
+0
+% exit</pre></blockquote>
+<p>To simplify parsing, it is recommended that the initial Berkeley DB error name
+be checked, e.g., DB_KEYEXIST in the above example. These values will
+not change in future releases of Berkeley DB to ensure that Tcl scripts are not
+broken by upgrading to new releases of Berkeley DB. There are currently only
+three such "expected" error returns. They are:
+<p><blockquote><pre>DB_NOTFOUND: No matching key/data pair found
+DB_KEYEMPTY: Non-existent key/data pair
+DB_KEYEXIST: Key/data pair already exists</pre></blockquote>
+<p>Finally, in some cases, when a Berkeley DB error occurs Berkeley DB will output
+additional error information. By default, all Berkeley DB error messages will
+be prefixed with the created command in whose context the error occurred
+(e.g., "env0", "db2", etc.). There are several ways to capture and
+access this information.
+<p>First, if Berkeley DB invokes the error callback function, the additional
+information will be placed in the error result returned from the
+command and in the errorInfo backtrace variable in Tcl.
+<p>Also the two calls to open an environment and
+open a database take an option, <b>-errfile filename</b>, which sets an
+output file to which these additional error messages should be written.
+<p>Additionally the two calls to open an environment and
+open a database take an option, <b>-errpfx string</b>, which sets the
+error prefix to the given string. This option may be useful
+in circumstances where a more descriptive prefix is desired or
+where a constant prefix indicating an error is desired.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/tcl/program.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/tcl/faq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/tcl/faq.html b/bdb/docs/ref/tcl/faq.html
new file mode 100644
index 00000000000..29f63b42385
--- /dev/null
+++ b/bdb/docs/ref/tcl/faq.html
@@ -0,0 +1,60 @@
+<!--$Id: faq.so,v 11.2 2001/01/15 17:50:48 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Frequently Asked Questions</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a> <a name="3"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Tcl API</dl></h3></td>
+<td width="1%"><a href="../../ref/tcl/error.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/sendmail/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Frequently Asked Questions</h1>
+<p><ol>
+<p><li><b>I have several versions of Tcl installed. How do I configure
+Berkeley DB to use a particular version?</b>
+<p>To compile the Tcl interface with a particular version of Tcl, use the
+--with-tcl option to specify the Tcl installation directory that contains
+the tclConfig.sh file.
+<p>See <a href="../../ref/build_unix/flags.html">Changing compile or load options</a>
+for more information.
+<hr size=1 noshade>
+<p><li><b>Berkeley DB was configured using --enable-tcl or --with-tcl and fails
+to build.</b>
+<p>The Berkeley DB Tcl interface requires Tcl version 8.1 or greater. You can
+download a copy of Tcl from the
+<a href="http://www.ajubasolutions.com/home.html">Ajuba Solutions</a>
+corporate web site.
+<hr size=1 noshade>
+<p><li><b>Berkeley DB was configured using --enable-tcl or --with-tcl and fails
+to build.</b>
+<p>If the Tcl installation was moved after it was configured and installed,
+try re-configuring and re-installing Tcl.
+<p>Also, some systems do not search for shared libraries by default, or do
+not search for shared libraries named the way the Tcl installation names
+them, or are searching for a different kind of library than those in
+your Tcl installation. For example, Linux systems often require linking
+"libtcl.a" to "libtcl#.#.a", while AIX systems often require adding the
+"-brtl" flag to the linker. A simpler solution that almost always works
+on all systems is to create a link from "libtcl.#.#.a" or "libtcl.so"
+(or whatever you happen to have) to "libtcl.a" and reconfigure.
+<hr size=1 noshade>
+<p><li><b>Loading the Berkeley DB library into Tcl on AIX causes a core dump.</b>
+<p>In some versions of Tcl, the "tclConfig.sh" autoconfiguration script
+created by the Tcl installation does not work properly under AIX. To
+build a working Berkeley DB Tcl API when this happens, use the "--enable-tcl"
+flag to configure Berkeley DB (rather than "--with-tcl"). In addition, you
+will have to specify any necessary include and library paths and linker
+flags needed to build with Tcl by setting the CPPFLAGS, LIBS and LDFLAGS
+environment variables before running configure.
+</ol>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/tcl/error.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/sendmail/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/tcl/intro.html b/bdb/docs/ref/tcl/intro.html
new file mode 100644
index 00000000000..6484eaac6b8
--- /dev/null
+++ b/bdb/docs/ref/tcl/intro.html
@@ -0,0 +1,70 @@
+<!--$Id: intro.so,v 11.14 2000/12/04 20:49:18 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Loading Berkeley DB with Tcl</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Tcl</dl></h3></td>
+<td width="1%"><a href="../../ref/perl/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/tcl/using.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Loading Berkeley DB with Tcl</h1>
+<p>Berkeley DB includes a dynamically loadable Tcl API. The Tcl API requires that
+Tcl/Tk 8.1 or later already be installed on your system. We recommend
+that you install later releases of Tcl/Tk than 8.1, if possible,
+especially on Windows platforms, as we found that we had to make local
+fixes to the 8.1 release in a few cases. You can download a copy of
+Tcl from the <a href="http://www.ajubasolutions.com/home.html">Ajuba
+Solutions</a> corporate web site.
+<p>This document assumes that you have already configured Berkeley DB for Tcl
+support and you have built and installed everything where you want it
+to be. If you have not done so, see
+<a href="../../ref/build_unix/conf.html">Configuring Berkeley DB</a> or
+<a href="../../ref/build_win/intro.html">Building for Win32</a> for more
+information.
+<h3>Installing as a Tcl Package</h3>
+<p>Once enabled, the Berkeley DB shared library for Tcl is automatically installed
+as part of the standard installation process. However, if you wish to be
+able to dynamically load it as a Tcl package into your script there are
+several steps that must be performed:
+<p><ol>
+<p><li>Run the Tcl shell in the install directory
+<li>Append this directory to your auto_path variable
+<li>Run the pkg_mkIndex proc giving the name of the Berkeley DB Tcl library
+</ol>
+<p>For example:
+<p><blockquote><pre># tclsh8.1
+% lappend auto_path /usr/local/BerkeleyDB/lib
+% pkg_mkIndex /usr/local/BerkeleyDB/lib libdb_tcl-3.2.so libdb-3.2.so</pre></blockquote>
+<p>Note that your Tcl and Berkeley DB version numbers may differ from the example,
+and so your tclsh and and library names may be different.
+<h3>Loading Berkeley DB with Tcl</h3>
+<p>The Berkeley DB package may be loaded into the user's interactive Tcl script
+(or wish session) via the "load" command. For example:
+<p><blockquote><pre>load /usr/local/BerkeleyDB/lib/libdb_tcl-3.2.so</pre></blockquote>
+<p>Note that your Berkeley DB version numbers may differ from the example, and so
+the library name may be different.
+<p>If you installed your library to run as a Tcl package, Tcl application
+scripts should use the "package" command to indicate to the Tcl
+interpreter that it needs the Berkeley DB package and where to find it. For
+example:
+<p><blockquote><pre>lappend auto_path "/usr/local/BerkeleyDB/lib"
+package require Db_tcl</pre></blockquote>
+<p>No matter which way the library gets loaded, it creates a command named
+<b>berkdb</b>. All of the Berkeley DB functionality is accessed via this
+command and additional commands it creates on behalf of the application.
+A simple test to determine if everything is loaded and ready is to ask
+for the version:
+<p><blockquote><pre>berkdb version -string</pre></blockquote>
+<p>This should return you the Berkeley DB version in a string format.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/perl/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/tcl/using.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/tcl/program.html b/bdb/docs/ref/tcl/program.html
new file mode 100644
index 00000000000..881c8848bac
--- /dev/null
+++ b/bdb/docs/ref/tcl/program.html
@@ -0,0 +1,33 @@
+<!--$Id: program.so,v 11.9 2000/12/04 18:05:44 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Tcl API programming notes</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Tcl</dl></h3></td>
+<td width="1%"><a href="../../ref/tcl/using.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/tcl/error.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Tcl API programming notes</h1>
+<p>The Tcl API closely parallels the Berkeley DB programmatic interfaces. If you
+are already familiar with one of those interfaces there will not be many
+surprises in the Tcl API.
+<p>Several pieces of Berkeley DB functionality are not available in the Tcl API.
+Any of the functions that require a user-provided function are not
+supported via the Tcl API. For example, there is no equivalent to the
+<a href="../../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a> or the <a href="../../api_c/env_set_errcall.html">DBENV-&gt;set_errcall</a>
+methods.
+<p>The Berkeley DB Tcl API always turns on the DB_THREAD flag for environments and
+databases making no assumptions about the existence or lack thereof of
+threads support in current or future releases of Tcl.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/tcl/using.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/tcl/error.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/tcl/using.html b/bdb/docs/ref/tcl/using.html
new file mode 100644
index 00000000000..6c927477c2c
--- /dev/null
+++ b/bdb/docs/ref/tcl/using.html
@@ -0,0 +1,53 @@
+<!--$Id: using.so,v 11.6 2000/03/18 21:43:17 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Using Berkeley DB with Tcl</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Tcl</dl></h3></td>
+<td width="1%"><a href="../../ref/tcl/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/tcl/program.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Using Berkeley DB with Tcl</h1>
+<p>All commands in the Berkeley DB Tcl interface are of the form:
+<p><blockquote><pre>command_handle operation options</pre></blockquote>
+<p>The <i>command handle</i> is <b>berkdb</b> or one of the additional
+commands that may be created. The <i>operation</i> is what you want
+to do to that handle and the <i>options</i> apply to the operation.
+Commands that get created on behalf of the application have their own sets
+of operations. Generally any calls in DB that result in new object
+handles will translate into a new command handle in Tcl. Then the user
+can access the operations of the handle via the new Tcl command handle.
+<p>Newly created commands are named with an abbreviated form of their objects
+followed by a number. Some created commands are subcommands of other
+created commands and will be the first command, followed by a period, '.'
+followed by the new subcommand. For example, suppose you have a database
+already existing called my_data.db. The following example shows the
+commands created when you open the database, and when you open a cursor:
+<p><blockquote><pre># First open the database and get a database command handle
+% berkdb open my_data.db
+db0
+#Get some data from that database
+% db0 get my_key
+{{my_key my_data0}{my_key my_data1}}
+#Open a cursor in this database, get a new cursor handle
+% db0 cursor
+db0.c0
+#Get the first data from the cursor
+% db0.c0 get -first
+{{first_key first_data}}</pre></blockquote>
+<p>All commands in the library support a special option <b>-?</b> that will
+list the correct operations for a command or the correct options.
+<p>A list of commands and operations can be found in the
+<a href="../../api_tcl/tcl_index.html">Tcl Interface</a> documentation.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/tcl/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/tcl/program.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/test/faq.html b/bdb/docs/ref/test/faq.html
new file mode 100644
index 00000000000..ec5d2d3f061
--- /dev/null
+++ b/bdb/docs/ref/test/faq.html
@@ -0,0 +1,32 @@
+<!--$Id: faq.so,v 10.2 2000/08/10 17:54:49 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Test suite FAQ</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Test Suite</dl></h3></td>
+<td width="1%"><a href="../../ref/test/run.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/distrib/layout.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Test suite FAQ</h1>
+<p><ol>
+<p><li><b>The test suite has been running for over a day. What's wrong?</b>
+<p>The test suite an take anywhere from some number of hours to several
+days to run, depending on your hardware configuration. As long as the
+run is making forward progress and new lines are being written to the
+<b>ALL.OUT</b> file, everything is probably fine.
+<p><li><b>The test suite hangs.</b>
+<p>The test suite requires Tcl 8.1 or greater, preferably at least Tcl 8.3.
+If you are using an earlier version of Tcl, the test suite may simply
+hang at some point.
+</ol>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/test/run.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/distrib/layout.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/test/run.html b/bdb/docs/ref/test/run.html
new file mode 100644
index 00000000000..078951a05ea
--- /dev/null
+++ b/bdb/docs/ref/test/run.html
@@ -0,0 +1,78 @@
+<!--$Id: run.so,v 10.34 2000/11/28 21:27:49 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Running the test suite</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Test Suite</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.2/disk.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/test/faq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Running the test suite</h1>
+<p>Once you have started tclsh and have loaded the test.tcl source file (see
+<a href="../../ref/build_unix/test.html">Running the test suite under UNIX</a>
+and <a href="../../ref/build_win/test.html">Running the test suite under
+Windows</a> for more information), you are ready to run the test suite. At
+the tclsh prompt, to run the entire test suite, enter:
+<p><blockquote><pre>% run_all</pre></blockquote>
+<p>Running all the tests can take from several hours to a few days to
+complete, depending on your hardware. For this reason, the output from
+this command is re-directed to a file in the current directory named
+<b>ALL.OUT</b>. Periodically, a line will be written to the standard
+output indicating what test is being run. When the suite has finished,
+a single message indicating that the test suite completed successfully or
+that it failed will be written. If the run failed, you should review the
+file ALL.OUT to determine which tests failed. Any errors will appear in
+that file as output lines beginning with the string: FAIL.
+<p>It is also possible to run specific tests or tests for a particular
+subsystem:
+<p><blockquote><pre>% r archive
+% r btree
+% r env
+% r frecno
+% r hash
+% r join
+% r join
+% r lock
+% r log
+% r mpool
+% r mutex
+% r queue
+% r rbtree
+% r recno
+% r rrecno
+% r subdb
+% r txn</pre></blockquote>
+<p>Or to run a single, individual test:
+<p><blockquote><pre>% test001 btree</pre></blockquote>
+<p>It is also possible to modify the test run based on arguments on the
+command line. For example, the command:
+<p><blockquote><pre>% test001 btree 10</pre></blockquote>
+<p>will run a greatly abbreviated form of test001, doing 10 operations
+instead of 10,000.
+<p>In all cases, when not running the entire test suite as described above,
+a successful test run will return you to the tclsh prompt (%). On
+failure, a message is displayed indicating what failed.
+<p>Tests are run, by default, in the directory <b>TESTDIR</b>. However,
+the test files are often very large. To use a different directory for
+the test directory, edit the file include.tcl in your build directory,
+and change the line:
+<p><blockquote><pre>set testdir ./TESTDIR</pre></blockquote>
+<p>to a more appropriate value for your system, e.g.:
+<p><blockquote><pre>set testdir /var/tmp/db.test</pre></blockquote>
+<p>Alternatively, you can create a symbolic link named TESTDIR in your build
+directory to an appropriate location for running the tests. Regardless
+of where you run the tests, the TESTDIR directory should be on a local
+filesystem, using a remote filesystem (e.g., NFS) will almost certainly
+cause spurious test failures.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.2/disk.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/test/faq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/toc.html b/bdb/docs/ref/toc.html
new file mode 100644
index 00000000000..e56ee5d4859
--- /dev/null
+++ b/bdb/docs/ref/toc.html
@@ -0,0 +1,310 @@
+<!--$Id: toc.so,v 10.166 2001/01/18 20:31:37 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Reference Guide Table of Contents</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Reference Guide Table of Contents</h1>
+<ol>
+<font size="+1"><li>Introduction</font>
+ <ol>
+ <li><a href="intro/data.html">An introduction to data management</a>
+ <li><a href="intro/terrain.html">Mapping the terrain: theory and practice</a>
+ <li><a href="intro/dbis.html">What is Berkeley DB?</a>
+ <li><a href="intro/dbisnot.html">What is Berkeley DB not?</a>
+ <li><a href="intro/need.html">Do you need Berkeley DB?</a>
+ <li><a href="intro/what.html">What other services does Berkeley DB provide?</a>
+ <li><a href="intro/distrib.html">What does the Berkeley DB distribution include?</a>
+ <li><a href="intro/where.html">Where does Berkeley DB run?</a>
+ <li><a href="intro/products.html">Sleepycat Software's Berkeley DB products</a>
+ </ol>
+<font size="+1"><li>Getting Started: A Simple Tutorial</font>
+ <ol>
+ <li><a href="simple_tut/intro.html">Introduction</a>
+ <li><a href="simple_tut/keydata.html">Key/data pairs</a>
+ <li><a href="simple_tut/handles.html">Object handles</a>
+ <li><a href="simple_tut/errors.html">Error returns</a>
+ <li><a href="simple_tut/open.html">Opening a database</a>
+ <li><a href="simple_tut/put.html">Adding elements to a database</a>
+ <li><a href="simple_tut/get.html">Retrieving elements from a database</a>
+ <li><a href="simple_tut/del.html">Removing elements from a database</a>
+ <li><a href="simple_tut/close.html">Closing a database</a>
+ </ol>
+<font size="+1"><li>Access Method Configuration</font>
+ <ol>
+ <li><a href="am_conf/intro.html">What are the available access methods?</a>
+ <li><a href="am_conf/select.html">Selecting an access method</a>
+ <li><a href="am_conf/logrec.html">Logical record numbers</a>
+ <li>General access method configuration
+ <ol>
+ <li><a href="am_conf/pagesize.html">Selecting a page size</a>
+ <li><a href="am_conf/cachesize.html">Selecting a cache size</a>
+ <li><a href="am_conf/byteorder.html">Selecting a byte order</a>
+ <li><a href="am_conf/dup.html">Duplicate data items</a>
+ <li><a href="am_conf/malloc.html">Non-local memory allocation</a>
+ </ol>
+ <li>Btree access method specific configuration
+ <ol>
+ <li><a href="am_conf/bt_compare.html">Btree comparison</a>
+ <li><a href="am_conf/bt_prefix.html">Btree prefix comparison</a>
+ <li><a href="am_conf/bt_minkey.html">Minimum keys per page</a>
+ <li><a href="am_conf/bt_recnum.html">Retrieving Btree records by logical record number</a>
+ </ol>
+ <li>Hash access method specific configuration
+ <ol>
+ <li><a href="am_conf/h_ffactor.html">Page fill factor</a>
+ <li><a href="am_conf/h_hash.html">Specifying a database hash</a>
+ <li><a href="am_conf/h_nelem.html">Hash table size</a>
+ </ol>
+ <li>Queue and Recno access method specific configuration
+ <ol>
+ <li><a href="am_conf/recno.html">Managing record-based databases</a>
+ <li><a href="am_conf/extentsize.html">Selecting a Queue extent size</a>
+ <li><a href="am_conf/re_source.html">Flat-text backing files</a>
+ <li><a href="am_conf/renumber.html">Logically renumbering records</a>
+ </ol>
+ </ol>
+<font size="+1"><li>Access Method Operations</font>
+ <ol>
+ <li><a href="am/ops.html">Access method operations</a>
+ <li><a href="am/open.html">Opening a database</a>
+ <li><a href="am/opensub.html">Opening multiple databases in a single file</a>
+ <li><a href="am/upgrade.html">Upgrading databases</a>
+ <li><a href="am/get.html">Retrieving records</a>
+ <li><a href="am/put.html">Storing records</a>
+ <li><a href="am/delete.html">Deleting records</a>
+ <li><a href="am/sync.html">Flushing the database cache</a>
+ <li><a href="am/stat.html">Database statistics</a>
+ <li><a href="am/close.html">Closing a database</a>
+ <li><a href="am/cursor.html">Database cursors</a>
+ <ol>
+ <li><a href="am/curget.html">Retrieving records with a cursor</a>
+ <li><a href="am/curput.html">Storing records with a cursor</a>
+ <li><a href="am/curdel.html">Deleting records with a cursor</a>
+ <li><a href="am/curdup.html">Duplicating a cursor</a>
+ <li><a href="am/join.html">Logical join</a>
+ <li><a href="am/count.html">Data item count</a>
+ <li><a href="am/curclose.html">Closing a cursor</a>
+ <li><a href="am/stability.html">Cursor stability</a>
+ </ol>
+ <li><a href="am/partial.html">Partial record storage and retrieval</a>
+ <li><a href="am/verify.html">Database verification and salvage</a>
+ <li><a href="am/error.html">Error support</a>
+ </ol>
+<font size="+1"><li>Berkeley DB Architecture</font>
+ <ol>
+ <li><a href="arch/bigpic.html">The big picture</a>
+ <li><a href="arch/progmodel.html">Programming model</a>
+ <li><a href="arch/apis.html">Programmatic APIs</a>
+ <li><a href="arch/script.html">Scripting languages</a>
+ <li><a href="arch/utilities.html">Supporting utilities</a>
+ </ol>
+<font size="+1"><li>The Berkeley DB Environment</font>
+ <ol>
+ <li><a href="env/intro.html">Introduction</a>
+ <li><a href="env/create.html">Creating an environment</a>
+ <li><a href="env/naming.html">File naming</a>
+ <li><a href="env/security.html">Security</a>
+ <li><a href="env/region.html">Shared memory regions</a>
+ <li><a href="env/remote.html">Remote filesystems</a>
+ <li><a href="env/open.html">Opening databases within the environment</a>
+ <li><a href="env/error.html">Error support</a>
+ </ol>
+<font size="+1"><li>Berkeley DB Concurrent Data Store Applications</font>
+ <ol>
+ <li><a href="cam/intro.html">Building Berkeley DB Concurrent Data Store applications</a>
+ </ol>
+<font size="+1"><li>Berkeley DB Transactional Data Store Applications</font>
+ <ol>
+ <li><a href="transapp/intro.html">Building Berkeley DB Transactional Data Store applications</a>
+ <li><a href="transapp/why.html">Why transactions?</a>
+ <li><a href="transapp/term.html">Terminology</a>
+ <li><a href="transapp/app.html">Application structure</a>
+ <li><a href="transapp/env_open.html">Opening the environment</a>
+ <li><a href="transapp/data_open.html">Opening the databases</a>
+ <li><a href="transapp/put.html">Recoverability and deadlock avoidance</a>
+ <li><a href="transapp/inc.html">Atomicity</a>
+ <li><a href="transapp/read.html">Repeatable reads</a>
+ <li><a href="transapp/cursor.html">Transactional cursors</a>
+ <li><a href="transapp/admin.html">Environment infrastructure</a>
+ <li><a href="transapp/deadlock.html">Deadlock detection</a>
+ <li><a href="transapp/checkpoint.html">Checkpoints</a>
+ <li><a href="transapp/archival.html">Database and log file archival</a>
+ <li><a href="transapp/logfile.html">Log file removal</a>
+ <li><a href="transapp/recovery.html">Recovery procedures</a>
+ <li><a href="transapp/filesys.html">Recovery and filesystem operations</a>
+ <li><a href="transapp/reclimit.html">Berkeley DB recoverability</a>
+ <li><a href="transapp/throughput.html">Transaction throughput</a>
+ </ol>
+<font size="+1"><li>XA Resource Manager</font>
+ <ol>
+ <li><a href="xa/intro.html">Introduction</a>
+ <li><a href="xa/config.html">Configuring Berkeley DB with The Tuxedo System</a>
+ <li><a href="xa/faq.html">Frequently Asked Questions</a>
+ </ol>
+<font size="+1"><li>Programmer Notes</font>
+ <ol>
+ <li><a href="program/appsignals.html">Application signal handling</a>
+ <li><a href="program/errorret.html">Error returns to applications</a>
+ <li><a href="program/environ.html">Environmental variables</a>
+ <li><a href="program/mt.html">Building multi-threaded applications</a>
+ <li><a href="program/scope.html">Berkeley DB handles</a>
+ <li><a href="program/namespace.html">Name spaces</a>
+ <li><a href="program/copy.html">Copying databases</a>
+ <li><a href="program/version.html">Library version information</a>
+ <li><a href="program/dbsizes.html">Database limits</a>
+ <li><a href="program/byteorder.html">Byte ordering</a>
+ <li><a href="program/diskspace.html">Disk space requirements</a>
+ <li><a href="program/compatible.html">Compatibility with historic interfaces</a>
+ <li><a href="program/recimp.html">Recovery implementation</a>
+ <li><a href="program/extending.html">Application-specific logging and recovery</a>
+ <li><a href="program/runtime.html">Run-time configuration</a>
+ </ol>
+<font size="+1"><li>The Locking Subsystem</font>
+ <ol>
+ <li><a href="lock/intro.html">Berkeley DB and locking</a>
+ <li><a href="lock/page.html">Page locks</a>
+ <ol>
+ <li><a href="lock/stdmode.html">Standard lock modes</a>
+ <li><a href="lock/notxn.html">Locking without transactions</a>
+ <li><a href="lock/twopl.html">Locking with transactions: two-phase locking</a>
+ </ol>
+ <li><a href="lock/am_conv.html">Access method locking conventions</a>
+ <li><a href="lock/cam_conv.html">Berkeley DB Concurrent Data Store locking conventions</a>
+ <li><a href="lock/dead.html">Deadlocks and deadlock avoidance</a>
+ <li><a href="lock/config.html">Configuring locking</a>
+ <li><a href="lock/max.html">Configuring locking: sizing the system</a>
+ <li><a href="lock/nondb.html">Locking and non-Berkeley DB applications</a>
+ </ol>
+<font size="+1"><li>The Logging Subsystem</font>
+ <ol>
+ <li><a href="log/intro.html">Berkeley DB and logging</a>
+ <li><a href="log/config.html">Configuring logging</a>
+ <li><a href="log/limits.html">Log file limits</a>
+ </ol>
+<font size="+1"><li>The Memory Pool Subsystem</font>
+ <ol>
+ <li><a href="mp/intro.html">Berkeley DB and the memory pool</a>
+ <li><a href="mp/config.html">Configuring the memory pool</a>
+ </ol>
+<font size="+1"><li>The Transaction Subsystem</font>
+ <ol>
+ <li><a href="txn/intro.html">Berkeley DB and transactions</a>
+ <li><a href="txn/nested.html">Nested transactions</a>
+ <li><a href="txn/limits.html">Transaction limits</a>
+ <li><a href="txn/config.html">Configuring transactions</a>
+ <li><a href="txn/other.html">Transactions and non-Berkeley DB applications</a>
+ </ol>
+<font size="+1"><li>RPC Client/Server</font>
+ <ol>
+ <li><a href="rpc/intro.html">Introduction</a>
+ <li><a href="rpc/client.html">Client program</a>
+ <li><a href="rpc/server.html">Server program</a>
+ </ol>
+<font size="+1"><li>Java API</font>
+ <ol>
+ <li><a href="java/conf.html">Configuration</a>
+ <li><a href="java/compat.html">Compatibility</a>
+ <li><a href="java/program.html">Programming notes</a>
+ <li><a href="java/faq.html">Java FAQ</a>
+ </ol>
+<font size="+1"><li>Perl API</font>
+ <ol>
+ <li><a href="perl/intro.html">Using Berkeley DB with Perl</a>
+ </ol>
+<font size="+1"><li>Tcl API</font>
+ <ol>
+ <li><a href="tcl/intro.html">Loading Berkeley DB with Tcl</a>
+ <li><a href="tcl/using.html">Using Berkeley DB with Tcl</a>
+ <li><a href="tcl/program.html">Tcl API programming notes</a>
+ <li><a href="tcl/error.html">Tcl error handling</a>
+ <li><a href="tcl/faq.html">Tcl FAQ</a>
+ </ol>
+<font size="+1"><li>Sendmail</font>
+ <ol>
+ <li><a href="sendmail/intro.html">Using Berkeley DB with Sendmail</a>
+ </ol>
+<font size="+1"><li>Dumping and Reloading Databases</font>
+ <ol>
+ <li><a href="dumpload/utility.html">The db_dump and db_load utilities</a>
+ <li><a href="dumpload/format.html">Dump output formats</a>
+ <li><a href="dumpload/text.html">Loading text into databases</a>
+ </ol>
+<font size="+1"><li>System Installation Notes</font>
+ <ol>
+ <li><a href="install/file.html">File utility /etc/magic information</a>
+ </ol>
+<font size="+1"><li>Debugging Applications</font>
+ <ol>
+ <li><a href="debug/intro.html">Introduction</a>
+ <li><a href="debug/compile.html">Compile-time configuration</a>
+ <li><a href="debug/runtime.html">Run-time error information</a>
+ <li><a href="debug/printlog.html">Reviewing Berkeley DB log files</a>
+ <li><a href="debug/common.html">Common errors</a>
+ </ol>
+<font size="+1"><li>Building Berkeley DB for UNIX and QNX systems</font>
+ <ol>
+ <li><a href="build_unix/intro.html">Building for UNIX</a>
+ <li><a href="build_unix/conf.html">Configuring Berkeley DB</a>
+ <li><a href="build_unix/flags.html">Changing compile or load options</a>
+ <li><a href="build_unix/install.html">Installing Berkeley DB</a>
+ <li><a href="build_unix/shlib.html">Dynamic shared libraries</a>
+ <li><a href="build_unix/test.html">Running the test suite under UNIX</a>
+ <li><a href="build_unix/notes.html">Architecture independent FAQ</a>
+ <li>Architecture specific FAQs
+ <ol>
+ <li><a href="build_unix/aix.html">AIX</a>
+ <li><a href="build_unix/freebsd.html">FreeBSD</a>
+ <li><a href="build_unix/hpux.html">HP-UX</a>
+ <li><a href="build_unix/irix.html">IRIX</a>
+ <li><a href="build_unix/linux.html">Linux</a>
+ <li><a href="build_unix/osf1.html">OSF/1</a>
+ <li><a href="build_unix/qnx.html">QNX</a>
+ <li><a href="build_unix/sco.html">SCO</a>
+ <li><a href="build_unix/solaris.html">Solaris</a>
+ <li><a href="build_unix/sunos.html">SunOS</a>
+ <li><a href="build_unix/ultrix.html">Ultrix</a>
+ </ol>
+ </ol>
+<font size="+1"><li>Building Berkeley DB for Win32 platforms</font>
+ <ol>
+ <li><a href="build_win/intro.html">Building for Win32</a>
+ <li><a href="build_win/test.html">Running the test suite under Windows</a>
+ <li><a href="build_win/notes.html">Windows notes</a>
+ <li><a href="build_win/faq.html">Windows FAQ</a>
+ </ol>
+<font size="+1"><li>Building Berkeley DB for VxWorks systems</font>
+ <ol>
+ <li><a href="build_vxworks/intro.html">Building for VxWorks</a>
+ <li><a href="build_vxworks/notes.html">VxWorks notes</a>
+ <li><a href="build_vxworks/faq.html">VxWorks FAQ</a>
+ </ol>
+<font size="+1"><li>Upgrading Berkeley DB Applications</font>
+ <ol>
+ <li><a href="upgrade/process.html">Upgrading Berkeley DB installations</a>
+ <li><a href="upgrade.2.0/toc.html">Upgrading Berkeley DB 1.XX applications to Berkeley DB 2.0</a>
+ <li><a href="upgrade.3.0/toc.html">Upgrading Berkeley DB 2.X.X applications to Berkeley DB 3.0</a>
+ <li><a href="upgrade.3.1/toc.html">Upgrading Berkeley DB 3.0.X applications to Berkeley DB 3.1</a>
+ <li><a href="upgrade.3.2/toc.html">Upgrading Berkeley DB 3.1.X applications to Berkeley DB 3.2</a>
+ </ol>
+<font size="+1"><li>Test Suite</font>
+ <ol>
+ <li><a href="test/run.html">Running the test suite</a>
+ <li><a href="test/faq.html">Test suite FAQ</a>
+ </ol>
+<font size="+1"><li>Distribution</font>
+ <ol>
+ <li><a href="distrib/layout.html">Source code layout</a>
+ </ol>
+<font size="+1"><li>Additional References</font>
+ <ol>
+ <li><a href="refs/refs.html">Additional references</a>
+ </ol>
+</ol>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/transapp/admin.html b/bdb/docs/ref/transapp/admin.html
new file mode 100644
index 00000000000..c908a7a33a2
--- /dev/null
+++ b/bdb/docs/ref/transapp/admin.html
@@ -0,0 +1,47 @@
+<!--$Id: admin.so,v 10.14 2000/08/16 17:50:39 margo Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Environment infrastructure</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Transaction Protected Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/transapp/cursor.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/deadlock.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Environment infrastructure</h1>
+<p>When building transactional applications, it is usually necessary to
+build an administrative infrastructure around the database environment.
+There are five components to this infrastructure, and each is
+supported by the Berkeley DB package in two different ways: a standalone
+utility and one or more library interfaces.
+<ul type=disc>
+<li>Deadlock detection: <a href="../../utility/db_deadlock.html">db_deadlock</a>,
+<a href="../../api_c/lock_detect.html">lock_detect</a>, <a href="../../api_c/env_set_lk_detect.html">DBENV-&gt;set_lk_detect</a>
+<li>Checkpoints: <a href="../../utility/db_checkpoint.html">db_checkpoint</a>, <a href="../../api_c/txn_checkpoint.html">txn_checkpoint</a>
+<li>Database and log file archival:
+<a href="../../utility/db_archive.html">db_archive</a>, <a href="../../api_c/log_archive.html">log_archive</a>
+<li>Log file removal: <a href="../../utility/db_archive.html">db_archive</a>, <a href="../../api_c/log_archive.html">log_archive</a>
+<li>Recovery procedures: <a href="../../utility/db_recover.html">db_recover</a>, <a href="../../api_c/env_open.html">DBENV-&gt;open</a>
+</ul>
+<p>When writing multi-threaded server applications and/or applications
+intended for download from the web, it is usually simpler to create
+local threads that are responsible for administration of the database
+environment as scheduling is often simpler in a single-process model,
+and only a single binary need be installed and run. However, the
+supplied utilities can be generally useful tools even when the
+application is responsible for doing its own administration, as
+applications rarely offer external interfaces to database
+administration. The utilities are required when programming to a Berkeley DB
+scripting interface, as the scripting APIs do not always offer
+interfaces to the administrative functionality.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/transapp/cursor.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/deadlock.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/transapp/app.html b/bdb/docs/ref/transapp/app.html
new file mode 100644
index 00000000000..3c946989b50
--- /dev/null
+++ b/bdb/docs/ref/transapp/app.html
@@ -0,0 +1,117 @@
+<!--$Id: app.so,v 10.4 2000/07/25 16:31:20 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Application structure</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Transaction Protected Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/transapp/term.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/env_open.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Application structure</h1>
+<p>When building transactionally protected applications, there are some
+special issues that must be considered. The most important one is that,
+if any thread of control exits for any reason while holding Berkeley DB
+resources, recovery must be performed to:
+<ul type=disc>
+<li>recover the Berkeley DB resources,
+<li>release any locks or mutexes that may have been held to avoid starvation
+as the remaining threads of control convoy behind the failed thread's
+locks, and
+<li>clean up any partially completed operations that may have left a
+database in an inconsistent or corrupted state.
+</ul>
+<p>Complicating this problem is the fact that the Berkeley DB library itself
+cannot determine if recovery is required, the application itself
+<b>must</b> make that decision. A further complication is that
+recovery must be single-threaded, that is, one thread of control or
+process must perform recovery before any other thread of control or
+processes attempts to create or join the Berkeley DB environment.
+<p>There are two approaches to handling this problem:
+<p><dl compact>
+<p><dt>The hard way:<dd>An application can track its own state carefully enough that it knows
+when recovery needs to be performed. Specifically, the rule to use is
+that recovery must be performed before using a Berkeley DB environment any
+time the threads of control previously using the Berkeley DB environment did
+not shut the environment down cleanly before exiting the environment
+for any reason (including application or system failure).
+<p>Requirements for shutting down the environment cleanly differ depending
+on the type of environment created. If the environment is public and
+persistent (i.e., the <a href="../../api_c/env_open.html#DB_PRIVATE">DB_PRIVATE</a> flag was not specified to the
+<a href="../../api_c/env_open.html">DBENV-&gt;open</a> function), recovery must be performed if any transaction was
+not committed or aborted, or <a href="../../api_c/env_close.html">DBENV-&gt;close</a> function was not called for
+any open DB_ENV handle.
+<p>If the environment is private and temporary (i.e., the <a href="../../api_c/env_open.html#DB_PRIVATE">DB_PRIVATE</a>
+flag was specified to the <a href="../../api_c/env_open.html">DBENV-&gt;open</a> function), recovery must be performed
+if any transaction was not committed or aborted, or <a href="../../api_c/env_close.html">DBENV-&gt;close</a> function
+was not called for any open DB_ENV handle. In addition, at least
+one transaction checkpoint must be performed after all existing
+transactions have been committed or aborted.
+<p><dt>The easy way:<dd>It greatly simplifies matters that recovery may be performed regardless
+of whether recovery strictly needs to be performed, that is, it is not
+an error to run recovery on a database where no recovery is necessary.
+Because of this fact, it is almost invariably simpler to ignore the
+above rules about shutting an application down cleanly, and simply run
+recovery each time a thread of control accessing a database environment
+fails for any reason, as well as before accessing any database
+environment after system reboot.
+</dl>
+<p>There are two common ways to build transactionally protected Berkeley DB
+applications. The most common way is as a single, usually
+multi-threaded, process. This architecture is simplest because it
+requires no monitoring of other threads of control. When the
+application starts, it opens and potentially creates the environment,
+runs recovery (whether it was needed or not), and then opens its
+databases. From then on, the application can create new threads of
+control as it chooses. All threads of control share the open Berkeley DB
+DB_ENV and DB handles. In this model, databases are
+rarely opened or closed when more than a single thread of control is
+running, that is, they are opened when only a single thread is running,
+and closed after all threads but one have exited. The last thread of
+control to exit closes the databases and the environment.
+<p>An alternative way to build Berkeley DB applications is as a set of
+cooperating processes, which may or may not be multi-threaded. This
+architecture is more complicated.
+<p>First, this architecture requires that the order in which threads of
+control are created and subsequently access the Berkeley DB environment be
+controlled, because recovery must be single-threaded. The first thread
+of control to access the environment must run recovery, and no other
+thread should attempt to access the environment until recovery is
+complete. (Note that this ordering requirement does not apply to
+environment creation without recovery. If multiple threads attempt to
+create a Berkeley DB environment, only one will perform the creation and the
+others will join the already existing environment.)
+<p>Second, this architecture requires that threads of control be monitored.
+If any thread of control that owns Berkeley DB resources exits, without first
+cleanly discarding those resources, recovery is usually necessary.
+Before running recovery, all threads using the Berkeley DB environment must
+relinquish all of their Berkeley DB resources (it does not matter if they do
+so gracefully or because they are forced to exit). Then recovery can
+be run and the threads of control continued or re-started.
+<p>We have found that the safest way to structure groups of cooperating
+processes is to first create a single process (often a shell script)
+that opens/creates the Berkeley DB environment and runs recovery, and which
+then creates the processes or threads that will actually perform work.
+The initial thread has no further responsibilities other than to monitor
+the threads of control it has created, to ensure that none of them
+unexpectedly exits. If one exits, the initial process then forces all
+of the threads of control using the Berkeley DB environment to exit, runs
+recovery, and restarts the working threads of control.
+<p>If it is not practical to have a single parent for the processes sharing
+a Berkeley DB environment, each process sharing the environment should log
+their connection to and exit from the environment in some fashion that
+permits a monitoring process to detect if a thread of control may have
+potentially acquired Berkeley DB resources and never released them.
+<p>Obviously, it is important that the monitoring process in either case
+be as simple and well-tested as possible as there is no recourse should
+it fail.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/transapp/term.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/env_open.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/transapp/archival.html b/bdb/docs/ref/transapp/archival.html
new file mode 100644
index 00000000000..2e88158504d
--- /dev/null
+++ b/bdb/docs/ref/transapp/archival.html
@@ -0,0 +1,149 @@
+<!--$Id: archival.so,v 10.41 2000/12/05 20:36:25 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Database and log file archival</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Transaction Protected Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/transapp/checkpoint.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/logfile.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Database and log file archival</h1>
+ <a name="3"><!--meow--></a>
+<p>The third component of the administrative infrastructure, archival for
+catastrophic recovery, concerns the recoverability of the database in
+the face of catastrophic failure. Recovery after catastrophic failure
+is intended to minimize data loss when physical hardware has been
+destroyed, for example, loss of a disk that contains databases or log
+files. While the application may still experience data loss in this
+case, it is possible to minimize it.
+<p>First, you may want to periodically create snapshots (i.e., backups) of
+your databases to make it possible to recover from catastrophic failure.
+These snapshots are either a standard backup which creates a consistent
+picture of the databases as of a single instant in time, or an on-line
+backup (also known as a <i>hot</i> backup), which creates a
+consistent picture of the databases as of an unspecified instant during
+the period of time when the snapshot was made. The advantage of a hot
+backup is that applications may continue to read and write the databases
+while the snapshot is being taken. The disadvantage of a hot backup is
+that more information must be archived, and recovery based on a hot
+backup is to an unspecified time between the start of the backup and
+when the backup is completed.
+<p>Second, after taking a snapshot, you should periodically archive the
+log files being created in the environment. It is often helpful to
+think of database archival in terms of full and incremental filesystem
+backups. A snapshot is a full backup, while the periodic archival of
+the current log files is an incremental. For example, it might be
+reasonable to take a full snapshot of a database environment weekly or
+monthly, and then archive additional log files daily. Using both the
+snapshot and the log files, a catastrophic crash at any time can be
+recovered to the time of the most recent log archival, a time long after
+the original snapshot.
+<p>To create a standard backup of your database that can be used to recover
+from catastrophic failure, take the following steps:
+<p><ol>
+<p><li>Commit or abort all on-going transactions.
+<p><li>Force an environment checkpoint (see <a href="../../utility/db_checkpoint.html">db_checkpoint</a> for more
+information).
+<p><li>Stop writing your databases until the backup has completed. Read-only
+operations are permitted, but no write operations and no filesystem
+operations may be performed, e.g., the <a href="../../api_c/env_remove.html">DBENV-&gt;remove</a> and
+<a href="../../api_c/db_open.html">DB-&gt;open</a> functions may not be called).
+<p><li>Run <a href="../../utility/db_archive.html">db_archive</a> <b>-s</b> to identify all of the database data
+files, and copy them to a backup device, such as CDROM, alternate disk,
+or tape. Obviously, the reliability of your archive media will affect
+the safety of your data.
+<p>If the database files are stored in a separate directory from the other
+Berkeley DB files, it may be simpler to archive the directory itself instead
+of the individual files (see <a href="../../api_c/env_set_data_dir.html">DBENV-&gt;set_data_dir</a> for additional
+information). If you are performing a hot backup, the utility you use
+to copy the files must read database pages atomically (as described by
+<a href="../../ref/transapp/reclimit.html">Berkeley DB recoverability</a>).
+<p><b>Note: if any of the database files did not have an open DB
+handle during the lifetime of the current log files, <a href="../../utility/db_archive.html">db_archive</a>
+will not list them in its output!</b> For this reason, it may be simpler
+to use a separate database file directory, and archive the entire
+directory instead of only the files listed by <a href="../../utility/db_archive.html">db_archive</a>.
+</ol>
+<p>To create a <i>hot</i> backup of your database that can be used to
+recover from catastrophic failure, take the following steps:
+<p><ol>
+<p><li>Archive your databases as described in Step #4 above. You
+do not have to halt on-going transactions or force a checkpoint.
+<p><li>When performing a hot backup, you must additionally archive the active
+log files. Note that the order of these two operations is required,
+and the database files must be archived before the log files. This
+means that if the database files and log files are in the same
+directory, you cannot simply archive the directory, you must make sure
+that the correct order of archival is maintained.
+<p>To archive your log files, run the <a href="../../utility/db_archive.html">db_archive</a> utility, using
+the <b>-l</b> option, to identify all of the database log files, and
+copy them to your backup media. If the database log files are stored
+in a separate directory from the other database files, it may be simpler
+to archive the directory itself instead of the individual files (see
+the <a href="../../api_c/env_set_lg_dir.html">DBENV-&gt;set_lg_dir</a> function for more information).
+</ol>
+<p>Once these steps are completed, your database can be recovered from
+catastrophic failure (see <a href="recovery.html">Recovery procedures</a> for
+more information).
+<p>To update your snapshot so that recovery from catastrophic failure is
+possible up to a new point in time, repeat step #2 under the hot backup
+instructions, copying all existing log files to a backup device. This
+is applicable to both standard and hot backups, that is, you can update
+snapshots made in either way. Each time both the database and log files
+are copied to backup media, you may discard all previous database
+snapshots and saved log files. Archiving additional log files does not
+allow you to discard either previous database snapshots or log files.
+<p>The time to restore from catastrophic failure is a function of the
+number of log records that have been written since the snapshot was
+originally created. Perhaps more importantly, the more separate pieces
+of backup media you use, the more likely that you will have a problem
+reading from one of them. For these reasons, it is often best to make
+snapshots on a regular basis.
+<p><b>For archival safety, ensure that you have multiple copies of your
+database backups, verify that your archival media is error-free and
+readable, and that copies of your backups are stored off-site!</b>
+<p>The functionality provided by the <a href="../../utility/db_archive.html">db_archive</a> utility is also
+available directly from the Berkeley DB library. The following code fragment
+prints out a list of log and database files that need to be archived.
+<p><blockquote><pre>void
+log_archlist(DB_ENV *dbenv)
+{
+ int ret;
+ char **begin, **list;
+<p>
+ /* Get the list of database files. */
+ if ((ret = log_archive(dbenv,
+ &list, DB_ARCH_ABS | DB_ARCH_DATA, NULL)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "log_archive: DB_ARCH_DATA");
+ exit (1);
+ }
+ if (list != NULL) {
+ for (begin = list; *list != NULL; ++list)
+ printf("database file: %s\n", *list);
+ free (begin);
+ }
+<p>
+ /* Get the list of log files. */
+ if ((ret = log_archive(dbenv,
+ &list, DB_ARCH_ABS | DB_ARCH_LOG, NULL)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "log_archive: DB_ARCH_LOG");
+ exit (1);
+ }
+ if (list != NULL) {
+ for (begin = list; *list != NULL; ++list)
+ printf("log file: %s\n", *list);
+ free (begin);
+ }
+}</pre></blockquote>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/transapp/checkpoint.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/logfile.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/transapp/checkpoint.html b/bdb/docs/ref/transapp/checkpoint.html
new file mode 100644
index 00000000000..b9bd81a3ed6
--- /dev/null
+++ b/bdb/docs/ref/transapp/checkpoint.html
@@ -0,0 +1,127 @@
+<!--$Id: checkpoint.so,v 10.13 2000/08/16 17:50:40 margo Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Checkpoints</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Transaction Protected Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/transapp/deadlock.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/archival.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Checkpoints</h1>
+<p>The second component of the infrastructure is performing checkpoints of
+the log files. As transactions commit, change records are written into
+the log files, but the actual changes to the database are not
+necessarily written to disk. When a checkpoint is performed, the
+changes to the database that are part of committed transactions are
+written into the backing database file.
+<p>Performing checkpoints is necessary for two reasons. First, you can
+only remove the Berkeley DB log files from your system after a checkpoint.
+Second, the frequency of your checkpoints is inversely proportional to
+the amount of time it takes to run database recovery after a system or
+application failure.
+<p>Once the database pages are written, log files can be archived and removed
+from the system because they will never be needed for anything other than
+catastrophic failure. In addition, recovery after system or application
+failure only has to redo or undo changes since the last checkpoint, since
+changes before the checkpoint have all been flushed to the filesystem.
+<p>Berkeley DB provides a separate utility, <a href="../../utility/db_checkpoint.html">db_checkpoint</a>, which can be
+used to perform checkpoints. Alternatively, applications can write
+their own checkpoint utility using the underlying <a href="../../api_c/txn_checkpoint.html">txn_checkpoint</a>
+function. The following code fragment checkpoints the database
+environment every 60 seconds:
+<p><blockquote><pre>int
+main(int argc, char *argv)
+{
+ extern char *optarg;
+ extern int optind;
+ DB *db_cats, *db_color, *db_fruit;
+ DB_ENV *dbenv;
+ pthread_t ptid;
+ int ch;
+<p>
+ while ((ch = getopt(argc, argv, "")) != EOF)
+ switch (ch) {
+ case '?':
+ default:
+ usage();
+ }
+ argc -= optind;
+ argv += optind;
+<p>
+ env_dir_create();
+ env_open(&dbenv);
+<p>
+<b> /* Start a checkpoint thread. */
+ if ((errno = pthread_create(
+ &ptid, NULL, checkpoint_thread, (void *)dbenv)) != 0) {
+ fprintf(stderr,
+ "txnapp: failed spawning checkpoint thread: %s\n",
+ strerror(errno));
+ exit (1);
+ }</b>
+<p>
+ /* Open database: Key is fruit class; Data is specific type. */
+ db_open(dbenv, &db_fruit, "fruit", 0);
+<p>
+ /* Open database: Key is a color; Data is an integer. */
+ db_open(dbenv, &db_color, "color", 0);
+<p>
+ /*
+ * Open database:
+ * Key is a name; Data is: company name, address, cat breeds.
+ */
+ db_open(dbenv, &db_cats, "cats", 1);
+<p>
+ add_fruit(dbenv, db_fruit, "apple", "yellow delicious");
+<p>
+ add_color(dbenv, db_color, "blue", 0);
+ add_color(dbenv, db_color, "blue", 3);
+<p>
+ add_cat(dbenv, db_cats,
+ "Amy Adams",
+ "Sleepycat Software",
+ "394 E. Riding Dr., Carlisle, MA 01741, USA",
+ "abyssinian",
+ "bengal",
+ "chartreaux",
+ NULL);
+<p>
+ return (0);
+}
+<p>
+<b>void *
+checkpoint_thread(void *arg)
+{
+ DB_ENV *dbenv;
+ int ret;
+<p>
+ dbenv = arg;
+ dbenv-&gt;errx(dbenv, "Checkpoint thread: %lu", (u_long)pthread_self());
+<p>
+ /* Checkpoint once a minute. */
+ for (;; sleep(60))
+ switch (ret = txn_checkpoint(dbenv, 0, 0, 0)) {
+ case 0:
+ case DB_INCOMPLETE:
+ break;
+ default:
+ dbenv-&gt;err(dbenv, ret, "checkpoint thread");
+ exit (1);
+ }
+<p>
+ /* NOTREACHED */
+}</b></pre></blockquote>
+<p>As checkpoints can be quite expensive, choosing how often to perform a
+checkpoint is a common tuning parameter for Berkeley DB applications.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/transapp/deadlock.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/archival.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/transapp/cursor.html b/bdb/docs/ref/transapp/cursor.html
new file mode 100644
index 00000000000..bb1aff98a8c
--- /dev/null
+++ b/bdb/docs/ref/transapp/cursor.html
@@ -0,0 +1,169 @@
+<!--$Id: cursor.so,v 1.2 2000/08/16 17:50:40 margo Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Transactional cursors</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Transaction Protected Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/transapp/read.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/admin.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Transactional cursors</h1>
+<p>Berkeley DB cursors may be used inside a transaction, exactly like any other
+DB method. The enclosing transaction ID must be specified when
+the cursor is created, but it does not then need to be further specified
+on operations performed using the cursor. One important point to
+remember is that a cursor <b>must be closed</b> before the enclosing
+transaction is committed or aborted.
+<p>The following code fragment uses a cursor to store a new key in the cats
+database with four associated data items. The key is a name. The data
+items are a company name, an address, and a list of the breeds of cat
+owned. Each of the data entries is stored as a duplicate data item.
+In this example, transactions are necessary to ensure that either all or none
+of the data items appear in case of system or application failure.
+<p><blockquote><pre>int
+main(int argc, char *argv)
+{
+ extern char *optarg;
+ extern int optind;
+ DB *db_cats, *db_color, *db_fruit;
+ DB_ENV *dbenv;
+ pthread_t ptid;
+ int ch;
+<p>
+ while ((ch = getopt(argc, argv, "")) != EOF)
+ switch (ch) {
+ case '?':
+ default:
+ usage();
+ }
+ argc -= optind;
+ argv += optind;
+<p>
+ env_dir_create();
+ env_open(&dbenv);
+<p>
+ /* Open database: Key is fruit class; Data is specific type. */
+ db_open(dbenv, &db_fruit, "fruit", 0);
+<p>
+ /* Open database: Key is a color; Data is an integer. */
+ db_open(dbenv, &db_color, "color", 0);
+<p>
+ /*
+ * Open database:
+ * Key is a name; Data is: company name, address, cat breeds.
+ */
+ db_open(dbenv, &db_cats, "cats", 1);
+<p>
+ add_fruit(dbenv, db_fruit, "apple", "yellow delicious");
+<p>
+ add_color(dbenv, db_color, "blue", 0);
+ add_color(dbenv, db_color, "blue", 3);
+<p>
+<b> add_cat(dbenv, db_cats,
+ "Amy Adams",
+ "Sleepycat Software",
+ "394 E. Riding Dr., Carlisle, MA 01741, USA",
+ "abyssinian",
+ "bengal",
+ "chartreaux",
+ NULL);</b>
+<p>
+ return (0);
+}
+<p>
+<b>void
+add_cat(DB_ENV *dbenv, DB *db, char *name, ...)
+{
+ va_list ap;
+ DBC *dbc;
+ DBT key, data;
+ DB_TXN *tid;
+ int ret;
+ char *s;
+<p>
+ /* Initialization. */
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ key.data = name;
+ key.size = strlen(name);
+<p>
+retry: /* Begin the transaction. */
+ if ((ret = txn_begin(dbenv, NULL, &tid, 0)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "txn_begin");
+ exit (1);
+ }
+<p>
+ /* Delete any previously existing item. */
+ switch (ret = db-&gt;del(db, tid, &key, 0)) {
+ case 0:
+ case DB_NOTFOUND:
+ break;
+ case DB_LOCK_DEADLOCK:
+ /* Deadlock: retry the operation. */
+ if ((ret = txn_abort(tid)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "txn_abort");
+ exit (1);
+ }
+ goto retry;
+ default:
+ dbenv-&gt;err(dbenv, ret, "db-&gt;del: %s", name);
+ exit (1);
+ }
+<p>
+ /* Create a cursor. */
+ if ((ret = db-&gt;cursor(db, tid, &dbc, 0)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "db-&gt;cursor");
+ exit (1);
+ }
+<p>
+ /* Append the items, in order. */
+ va_start(ap, name);
+ while ((s = va_arg(ap, char *)) != NULL) {
+ data.data = s;
+ data.size = strlen(s);
+ switch (ret = dbc-&gt;c_put(dbc, &key, &data, DB_KEYLAST)) {
+ case 0:
+ break;
+ case DB_LOCK_DEADLOCK:
+ va_end(ap);
+<p>
+ /* Deadlock: retry the operation. */
+ if ((ret = dbc-&gt;c_close(dbc)) != 0) {
+ dbenv-&gt;err(
+ dbenv, ret, "dbc-&gt;c_close");
+ exit (1);
+ }
+ if ((ret = txn_abort(tid)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "txn_abort");
+ exit (1);
+ }
+ goto retry;
+ default:
+ /* Error: run recovery. */
+ dbenv-&gt;err(dbenv, ret, "dbc-&gt;put: %s/%s", name, s);
+ exit (1);
+ }
+ }
+ va_end(ap);
+<p>
+ /* Success: commit the change. */
+ if ((ret = dbc-&gt;c_close(dbc)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "dbc-&gt;c_close");
+ exit (1);
+ }
+ if ((ret = txn_commit(tid, 0)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "txn_commit");
+ exit (1);
+ }
+}</b></pre></blockquote>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/transapp/read.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/admin.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/transapp/data_open.html b/bdb/docs/ref/transapp/data_open.html
new file mode 100644
index 00000000000..904778c3558
--- /dev/null
+++ b/bdb/docs/ref/transapp/data_open.html
@@ -0,0 +1,119 @@
+<!--$Id: data_open.so,v 1.3 2000/08/16 17:50:40 margo Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Opening the databases</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Transaction Protected Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/transapp/env_open.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/put.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Opening the databases</h1>
+<p>Next, we open three databases ("color" and "fruit" and "cats"), in the
+database environment. Again, our DB database handles are
+declared to be free-threaded using the <a href="../../api_c/env_open.html#DB_THREAD">DB_THREAD</a> flag, and so
+may be used by any number of threads we subsequently create.
+<p><blockquote><pre>int
+main(int argc, char *argv)
+{
+ extern char *optarg;
+ extern int optind;
+ DB *db_cats, *db_color, *db_fruit;
+ DB_ENV *dbenv;
+ pthread_t ptid;
+ int ch;
+<p>
+ while ((ch = getopt(argc, argv, "")) != EOF)
+ switch (ch) {
+ case '?':
+ default:
+ usage();
+ }
+ argc -= optind;
+ argv += optind;
+<p>
+ env_dir_create();
+ env_open(&dbenv);
+<p>
+<b> /* Open database: Key is fruit class; Data is specific type. */
+ db_open(dbenv, &db_fruit, "fruit", 0);
+<p>
+ /* Open database: Key is a color; Data is an integer. */
+ db_open(dbenv, &db_color, "color", 0);
+<p>
+ /*
+ * Open database:
+ * Key is a name; Data is: company name, address, cat breeds.
+ */
+ db_open(dbenv, &db_cats, "cats", 1);</b>
+<p>
+ return (0);
+}
+<p>
+<b>void
+db_open(DB_ENV *dbenv, DB **dbp, char *name, int dups)
+{
+ DB *db;
+ int ret;
+<p>
+ /* Create the database handle. */
+ if ((ret = db_create(&db, dbenv, 0)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "db_create");
+ exit (1);
+ }
+<p>
+ /* Optionally, turn on duplicate data items. */
+ if (dups && (ret = db-&gt;set_flags(db, DB_DUP)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "db-&gt;set_flags: DB_DUP");
+ exit (1);
+ }
+<p>
+ /*
+ * Open a database in the environment:
+ * create if it doesn't exist
+ * free-threaded handle
+ * read/write owner only
+ */
+ if ((ret = db-&gt;open(db, name, NULL,
+ DB_BTREE, DB_CREATE | DB_THREAD, S_IRUSR | S_IWUSR)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "db-&gt;open: %s", name);
+ exit (1);
+ }
+<p>
+ *dbp = db;
+}</b></pre></blockquote>
+<p>There is no reason to wrap database opens inside of transactions. All
+database opens are transaction protected internally to Berkeley DB, and
+applications using transaction-protected environments can simply rely on
+files either being successfully re-created in a recovered environment,
+or not appearing at all.
+<p>After running this initial code, we can use the <a href="../../utility/db_stat.html">db_stat</a> utility
+to display information about a database we have created:
+<p><blockquote><pre>prompt&gt; db_stat -h TXNAPP -d color
+53162 Btree magic number.
+8 Btree version number.
+Flags:
+2 Minimum keys per-page.
+8192 Underlying database page size.
+1 Number of levels in the tree.
+0 Number of unique keys in the tree.
+0 Number of data items in the tree.
+0 Number of tree internal pages.
+0 Number of bytes free in tree internal pages (0% ff).
+1 Number of tree leaf pages.
+8166 Number of bytes free in tree leaf pages (0.% ff).
+0 Number of tree duplicate pages.
+0 Number of bytes free in tree duplicate pages (0% ff).
+0 Number of tree overflow pages.
+0 Number of bytes free in tree overflow pages (0% ff).
+0 Number of pages on the free list.</pre></blockquote>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/transapp/env_open.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/put.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/transapp/deadlock.html b/bdb/docs/ref/transapp/deadlock.html
new file mode 100644
index 00000000000..65765ec5903
--- /dev/null
+++ b/bdb/docs/ref/transapp/deadlock.html
@@ -0,0 +1,92 @@
+<!--$Id: deadlock.so,v 10.15 2000/08/10 17:54:49 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Deadlock detection</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Transaction Protected Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/transapp/admin.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/checkpoint.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Deadlock detection</h1>
+<p>The first component of the infrastructure, deadlock detection, is not
+so much a requirement specific to transaction protected applications,
+but rather is necessary for almost all applications where more than a
+single thread of control will be accessing the database at one time.
+While Berkeley DB automatically handles database locking, it is normally
+possible for deadlock to occur. It is not required by all transactional
+applications, but exceptions are rare.
+<p>When the deadlock occurs, two (or more) threads of control each request
+additional locks which can never be granted because one of the threads
+of control waiting holds the requested resource.
+<p>For example, consider two processes A and B. Let's say that A obtains
+an exclusive lock on item X, and B obtains an exclusive lock on item Y.
+Then, A requests a lock on Y and B requests a lock on X. A will wait
+until resource Y becomes available and B will wait until resource X
+becomes available. Unfortunately, since both A and B are waiting,
+neither will release the locks they hold and neither will ever obtain
+the resource on which it is waiting. In order to detect that deadlock
+has happened, a separate process or thread must review the locks
+currently held in the database. If deadlock has occurred, a victim must
+be selected, and that victim will then return the error
+<a href="../../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a> from whatever Berkeley DB call it was making.
+<p>Berkeley DB provides a separate UNIX-style utility which can be used to
+perform this deadlock detection, named <a href="../../utility/db_deadlock.html">db_deadlock</a>.
+Alternatively, applications can create their own deadlock utility or
+thread using the underlying <a href="../../api_c/lock_detect.html">lock_detect</a> function, or specify
+that Berkeley DB run the deadlock detector internally whenever there is a
+conflict over a lock (see <a href="../../api_c/env_set_lk_detect.html">DBENV-&gt;set_lk_detect</a> for more
+information). The following code fragment does the latter:
+<p><blockquote><pre>void
+env_open(DB_ENV **dbenvp)
+{
+ DB_ENV *dbenv;
+ int ret;
+<p>
+ /* Create the environment handle. */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "txnapp: db_env_create: %s\n", db_strerror(ret));
+ exit (1);
+ }
+<p>
+ /* Set up error handling. */
+ dbenv-&gt;set_errpfx(dbenv, "txnapp");
+<p>
+<b> /* Do deadlock detection internally. */
+ if ((ret = dbenv-&gt;set_lk_detect(dbenv, DB_LOCK_DEFAULT)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "set_lk_detect: DB_LOCK_DEFAULT");
+ exit (1);
+ }</b>
+<p>
+ /*
+ * Open a transactional environment:
+ * create if it doesn't exist
+ * free-threaded handle
+ * run recovery
+ * read/write owner only
+ */
+ if ((ret = dbenv-&gt;open(dbenv, ENV_DIRECTORY,
+ DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG |
+ DB_INIT_MPOOL | DB_INIT_TXN | DB_RECOVER | DB_THREAD,
+ S_IRUSR | S_IWUSR)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "dbenv-&gt;open: %s", ENV_DIRECTORY);
+ exit (1);
+ }
+<p>
+ *dbenvp = dbenv;
+}</pre></blockquote>
+<p>Deciding how often to run the deadlock detector and which of the
+deadlocked transactions will be forced to abort when the deadlock is
+detected is a common tuning parameter for Berkeley DB applications.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/transapp/admin.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/checkpoint.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/transapp/env_open.html b/bdb/docs/ref/transapp/env_open.html
new file mode 100644
index 00000000000..7209a3fef5f
--- /dev/null
+++ b/bdb/docs/ref/transapp/env_open.html
@@ -0,0 +1,174 @@
+<!--$Id: env_open.so,v 1.1 2000/07/25 17:56:36 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Opening the environment</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Transaction Protected Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/transapp/app.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/data_open.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Opening the environment</h1>
+<p>Creating transaction-protected applications using the Berkeley DB library is
+quite easy. Applications first use <a href="../../api_c/env_open.html">DBENV-&gt;open</a> to initialize
+the database environment. Transaction-protected applications normally
+require all four Berkeley DB subsystems, so the <a href="../../api_c/env_open.html#DB_INIT_MPOOL">DB_INIT_MPOOL</a>,
+<a href="../../api_c/env_open.html#DB_INIT_LOCK">DB_INIT_LOCK</a>, <a href="../../api_c/env_open.html#DB_INIT_LOG">DB_INIT_LOG</a> and <a href="../../api_c/env_open.html#DB_INIT_TXN">DB_INIT_TXN</a> flags
+should be specified.
+<p>Once the application has called <a href="../../api_c/env_open.html">DBENV-&gt;open</a>, it opens its
+databases within the environment. Once the databases are opened, the
+application makes changes to the databases inside of transactions. Each
+set of changes that entail a unit of work should be surrounded by the
+appropriate <a href="../../api_c/txn_begin.html">txn_begin</a>, <a href="../../api_c/txn_commit.html">txn_commit</a> and <a href="../../api_c/txn_abort.html">txn_abort</a>
+calls. The Berkeley DB access methods will make the appropriate calls into
+the lock, log and memory pool subsystems in order to guarantee
+transaction semantics. When the application is ready to exit, all
+outstanding transactions should have been committed or aborted.
+<p>Databases accessed by a transaction must not be closed during the
+transaction. Once all outstanding transactions are finished, all open
+Berkeley DB files should be closed. When the Berkeley DB database files have been
+closed, the environment should be closed by calling <a href="../../api_c/env_close.html">DBENV-&gt;close</a>.
+<p>The following code fragment creates the database environment directory,
+then opens the environment, running recovery. Our DB_ENV
+database environment handle is declared to be free-threaded using the
+<a href="../../api_c/env_open.html#DB_THREAD">DB_THREAD</a> flag, and so may be used by any number of threads that
+we may subsequently create.
+<p><blockquote><pre>#include &lt;sys/types.h&gt;
+#include &lt;sys/stat.h&gt;
+<p>
+#include &lt;errno.h&gt;
+#include &lt;pthread.h&gt;
+#include &lt;stdarg.h&gt;
+#include &lt;stdlib.h&gt;
+#include &lt;string.h&gt;
+#include &lt;unistd.h&gt;
+<p>
+#include &lt;db.h&gt;
+<p>
+#define ENV_DIRECTORY "TXNAPP"
+<p>
+void env_dir_create(void);
+void env_open(DB_ENV **);
+<p>
+int
+main(int argc, char *argv)
+{
+ extern char *optarg;
+ extern int optind;
+ DB *db_cats, *db_color, *db_fruit;
+ DB_ENV *dbenv;
+ pthread_t ptid;
+ int ch;
+<p>
+ while ((ch = getopt(argc, argv, "")) != EOF)
+ switch (ch) {
+ case '?':
+ default:
+ usage();
+ }
+ argc -= optind;
+ argv += optind;
+<p>
+ env_dir_create();
+ env_open(&dbenv);
+<p>
+ return (0);
+}
+<p>
+void
+env_dir_create()
+{
+ struct stat sb;
+<p>
+ /*
+ * If the directory exists, we're done. We do not further check
+ * the type of the file, DB will fail appropriately if it's the
+ * wrong type.
+ */
+ if (stat(ENV_DIRECTORY, &sb) == 0)
+ return;
+<p>
+ /* Create the directory, read/write/access owner only. */
+ if (mkdir(ENV_DIRECTORY, S_IRWXU) != 0) {
+ fprintf(stderr,
+ "txnapp: mkdir: %s: %s\n", ENV_DIRECTORY, strerror(errno));
+ exit (1);
+ }
+}
+<p>
+void
+env_open(DB_ENV **dbenvp)
+{
+ DB_ENV *dbenv;
+ int ret;
+<p>
+ /* Create the environment handle. */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "txnapp: db_env_create: %s\n", db_strerror(ret));
+ exit (1);
+ }
+<p>
+ /* Set up error handling. */
+ dbenv-&gt;set_errpfx(dbenv, "txnapp");
+<p>
+ /*
+ * Open a transactional environment:
+ * create if it doesn't exist
+ * free-threaded handle
+ * run recovery
+ * read/write owner only
+ */
+ if ((ret = dbenv-&gt;open(dbenv, ENV_DIRECTORY,
+ DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG |
+ DB_INIT_MPOOL | DB_INIT_TXN | DB_RECOVER | DB_THREAD,
+ S_IRUSR | S_IWUSR)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "dbenv-&gt;open: %s", ENV_DIRECTORY);
+ exit (1);
+ }
+<p>
+ *dbenvp = dbenv;
+}</pre></blockquote>
+<p>After running this initial program, we can use the <a href="../../utility/db_stat.html">db_stat</a>
+utility to display the contents of the environment directory:
+<p><blockquote><pre>prompt&gt; db_stat -e -h TXNAPP
+3.2.1 Environment version.
+120897 Magic number.
+0 Panic value.
+1 References.
+6 Locks granted without waiting.
+0 Locks granted after waiting.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+Mpool Region: 4.
+264KB Size (270336 bytes).
+-1 Segment ID.
+1 Locks granted without waiting.
+0 Locks granted after waiting.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+Log Region: 3.
+96KB Size (98304 bytes).
+-1 Segment ID.
+3 Locks granted without waiting.
+0 Locks granted after waiting.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+Lock Region: 2.
+240KB Size (245760 bytes).
+-1 Segment ID.
+1 Locks granted without waiting.
+0 Locks granted after waiting.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+Txn Region: 5.
+8KB Size (8192 bytes).
+-1 Segment ID.
+1 Locks granted without waiting.
+0 Locks granted after waiting.</pre></blockquote>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/transapp/app.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/data_open.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/transapp/filesys.html b/bdb/docs/ref/transapp/filesys.html
new file mode 100644
index 00000000000..fc68089e90f
--- /dev/null
+++ b/bdb/docs/ref/transapp/filesys.html
@@ -0,0 +1,62 @@
+<!--$Id: filesys.so,v 10.30 2000/07/25 16:31:20 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Recovery and filesystem operations</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Transaction Protected Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/transapp/recovery.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/reclimit.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Recovery and filesystem operations</h1>
+<p>When running in a transaction-protected environment, database creation
+and deletion are logged as stand-alone transactions internal to Berkeley DB.
+That is, for each such operation a new transaction is begun and aborted
+or committed internally, so that they will be recovered during recovery.
+<p>The Berkeley DB API supports removing and renaming files. Renaming files is
+supported by the <a href="../../api_c/db_rename.html">DB-&gt;rename</a> method, and removing files by the
+<a href="../../api_c/db_remove.html">DB-&gt;remove</a> method. Berkeley DB does not permit specifying the
+<a href="../../api_c/db_open.html#DB_TRUNCATE">DB_TRUNCATE</a> flag when opening a file in a transaction protected
+environment. This is an implicit file deletion, but one that does not
+always require the same operating system file permissions as does deleting
+and creating a file.
+<p>If you have changed the name of a file or deleted it outside of the Berkeley DB
+library (e.g., you explicitly removed a file using your normal operating
+system utilities), then it is possible that recovery will not be able to
+find a database referenced in the log. In this case, <a href="../../utility/db_recover.html">db_recover</a>
+will produce a warning message saying it was unable to locate a file it
+expected to find. This message is only a warning, as the file may have
+been subsequently deleted as part of normal database operations before
+the failure occurred, and so is not necessarily a problem.
+<p>Generally, any filesystem operations that are performed outside the Berkeley DB
+interface should be performed at the same time as making a snapshot of
+the database. To perform filesystem operations correctly:
+<p><ol>
+<p><li>Cleanly shutdown database operations.
+<p>To shutdown database operations cleanly, all applications accessing the
+database environment must be shutdown and a transaction checkpoint must
+be taken. If the applications are not implemented such that they can be
+shutdown gracefully (i.e., closing all references to the database
+environment), recovery must be performed after all applications have been
+killed to ensure that the underlying databases are consistent on disk.
+<p><li>Perform the filesystem operations, e.g., remove or rename one
+or more files.
+<p><li>Make an archival snapshot of the database.
+<p>While this step is not strictly necessary, it is strongly recommended.
+If this step is not performed, recovery from catastrophic failure will
+require that recovery first be performed up to the time of the
+filesystem operations, the filesystem operations be redone, and then
+recovery be performed from the filesystem operations forward.
+<p><li>Restart the database applications.
+</ol>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/transapp/recovery.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/reclimit.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/transapp/inc.html b/bdb/docs/ref/transapp/inc.html
new file mode 100644
index 00000000000..35cf67d7efa
--- /dev/null
+++ b/bdb/docs/ref/transapp/inc.html
@@ -0,0 +1,201 @@
+<!--$Id: inc.so,v 1.6 2000/08/08 19:58:20 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Atomicity</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Transaction Protected Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/transapp/put.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/read.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Atomicity</h1>
+<p>The third reason listed for using transactions was atomicity. Consider
+an application suite where multiple threads of control (multiple
+processes or threads in one or more processes) are changing the values
+associated with a key in one or more databases. Specifically, they are
+taking the current value, incrementing it, and then storing it back into
+the database.
+<p>Such an application requires atomicity. Since we want to change a value
+in the database, we must make sure that once we read it, no other thread
+of control modifies it. For example, assume that both thread #1 and
+thread #2 are doing similar operations in the database, where thread #1
+is incrementing records by 3, and thread #2 is incrementing records by
+5. We want to increment the record by a total of 8. If the operations
+interleave in the right (well, wrong) order, that is not what will
+happen:
+<p><blockquote><pre>thread #1 <b>read</b> record: the value is 2
+thread #2 <b>read</b> record: the value is 2
+thread #2 <b>write</b> record + 5 back into the database (new value 7)
+thread #1 <b>write</b> record + 3 back into the database (new value 5)</pre></blockquote>
+<p>As you can see, instead of incrementing the record by a total of 8,
+we've only incremented it by 3, because thread #1 overwrote thread #2's
+change. By wrapping the operations in transactions, we ensure that this
+cannot happen. In a transaction, when the first thread reads the
+record, locks are acquired that will not be released until the
+transaction finishes, guaranteeing that all other readers and writers
+will block, waiting for the first thread's transaction to complete (or
+to be aborted).
+<p>Here is an example function that does transaction-protected increments
+on database records to ensure atomicity.
+<p><blockquote><pre>int
+main(int argc, char *argv)
+{
+ extern char *optarg;
+ extern int optind;
+ DB *db_cats, *db_color, *db_fruit;
+ DB_ENV *dbenv;
+ pthread_t ptid;
+ int ch;
+<p>
+ while ((ch = getopt(argc, argv, "")) != EOF)
+ switch (ch) {
+ case '?':
+ default:
+ usage();
+ }
+ argc -= optind;
+ argv += optind;
+<p>
+ env_dir_create();
+ env_open(&dbenv);
+<p>
+ /* Open database: Key is fruit class; Data is specific type. */
+ db_open(dbenv, &db_fruit, "fruit", 0);
+<p>
+ /* Open database: Key is a color; Data is an integer. */
+ db_open(dbenv, &db_color, "color", 0);
+<p>
+ /*
+ * Open database:
+ * Key is a name; Data is: company name, address, cat breeds.
+ */
+ db_open(dbenv, &db_cats, "cats", 1);
+<p>
+ add_fruit(dbenv, db_fruit, "apple", "yellow delicious");
+<p>
+<b> add_color(dbenv, db_color, "blue", 0);
+ add_color(dbenv, db_color, "blue", 3);</b>
+<p>
+ return (0);
+}
+<p>
+<b>void
+add_color(DB_ENV *dbenv, DB *dbp, char *color, int increment)
+{
+ DBT key, data;
+ DB_TXN *tid;
+ int original, ret;
+ char buf64;
+<p>
+ /* Initialization. */
+ memset(&key, 0, sizeof(key));
+ key.data = color;
+ key.size = strlen(color);
+ memset(&data, 0, sizeof(data));
+ data.flags = DB_DBT_MALLOC;
+<p>
+ for (;;) {
+ /* Begin the transaction. */
+ if ((ret = txn_begin(dbenv, NULL, &tid, 0)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "txn_begin");
+ exit (1);
+ }
+<p>
+ /*
+ * Get the key. If it exists, we increment the value. If it
+ * doesn't exist, we create it.
+ */
+ switch (ret = dbp-&gt;get(dbp, tid, &key, &data, 0)) {
+ case 0:
+ original = atoi(data.data);
+ break;
+ case DB_LOCK_DEADLOCK:
+ /* Deadlock: retry the operation. */
+ if ((ret = txn_abort(tid)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "txn_abort");
+ exit (1);
+ }
+ continue;
+ case DB_NOTFOUND:
+ original = 0;
+ break;
+ default:
+ /* Error: run recovery. */
+ dbenv-&gt;err(
+ dbenv, ret, "dbc-&gt;get: %s/%d", color, increment);
+ exit (1);
+ }
+ if (data.data != NULL)
+ free(data.data);
+<p>
+ /* Create the new data item. */
+ (void)snprintf(buf, sizeof(buf), "%d", original + increment);
+ data.data = buf;
+ data.size = strlen(buf) + 1;
+<p>
+ /* Store the new value. */
+ switch (ret = dbp-&gt;put(dbp, tid, &key, &data, 0)) {
+ case 0:
+ /* Success: commit the change. */
+ if ((ret = txn_commit(tid, 0)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "txn_commit");
+ exit (1);
+ }
+ return;
+ case DB_LOCK_DEADLOCK:
+ /* Deadlock: retry the operation. */
+ if ((ret = txn_abort(tid)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "txn_abort");
+ exit (1);
+ }
+ break;
+ default:
+ /* Error: run recovery. */
+ dbenv-&gt;err(
+ dbenv, ret, "dbc-&gt;put: %s/%d", color, increment);
+ exit (1);
+ }
+ }
+}</b></pre></blockquote>
+<p>Any number of operations, on any number of databases, can be included
+in a single transaction to ensure atomicity of the operations. There
+is, however, a trade-off between the number of operations included in
+a single transaction and both throughput and the possibility of
+deadlock. The reason for this is because transactions acquire locks
+throughout their lifetime, and do not release them until transaction
+commit or abort. So, the more operations included in a transaction,
+the more likely that a transaction will block other operations and that
+deadlock will occur. However, each transaction commit requires a
+synchronous disk I/O, so grouping multiple operations into a transaction
+can increase overall throughput. (There is one exception to this. The
+<a href="../../api_c/env_open.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a> option causes transactions to exhibit the ACI
+(atomicity, consistency and isolation) properties, but not D
+(durability), avoiding the synchronous disk I/O on transaction commit
+and greatly increasing transaction throughput for some applications.
+<p>When applications do create complex transactions, they often avoid
+having more than one complex transaction at a time, as simple operations
+like a single <a href="../../api_c/db_put.html">DB-&gt;put</a> are unlikely to deadlock with each other
+or the complex transaction, while multiple complex transactions are
+likely to deadlock with each other as they will both acquire many locks
+over their lifetime. Alternatively, complex transactions can be broken
+up into smaller sets of operations, and each of those sets may be
+encapsulated in a nested transaction. Because nested transactions may
+be individually aborted and retried without causing the entire
+transaction to be aborted, this allows complex transactions to proceed
+even in the face of heavy contention, repeatedly trying the
+sub-operations until they succeed.
+<p>It is also helpful to order operations within a transaction, that is,
+access the databases and items within the databases in the same order,
+to the extent possible, in all transactions. Accessing databases and
+items in different orders greatly increases the likelihood of operations
+being blocked and failing due to deadlocks.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/transapp/put.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/read.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/transapp/intro.html b/bdb/docs/ref/transapp/intro.html
new file mode 100644
index 00000000000..758169e8552
--- /dev/null
+++ b/bdb/docs/ref/transapp/intro.html
@@ -0,0 +1,42 @@
+<!--$Id: intro.so,v 10.35 2000/12/04 18:05:44 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Building transaction protected applications</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Transaction Protected Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/cam/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/why.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Building transaction protected applications</h1>
+<p>It is difficult to write a useful transactional tutorial and still keep
+within reasonable bounds of documentation, that is, without writing a
+book on transactional programming. We have two goals in this section:
+to familiarize readers with the transactional interfaces of Berkeley DB and
+to provide code building blocks that will be useful in creating
+applications.
+<p>We have not attempted to present this information using a real-world
+application. First, transactional applications are often complex and
+time consuming to explain. Also, one of our goals is to give you an
+understanding of the wide variety of tools Berkeley DB makes available to you,
+and no single application would use most of the interfaces included in
+the Berkeley DB library. For these reasons, we have chosen to simply present
+the Berkeley DB data structures and programming solutions, using examples that
+differ from page to page. All of the examples are included in a
+standalone program you can examine, modify and run, and from which you
+will be able to extract code blocks for your own applications.
+Fragments of the program will be presented throughout this chapter, and
+the complete text of the <a href="transapp.txt">example program</a>
+for IEEE/ANSI Std 1003.1 (POSIX) standard systems is included in the Berkeley DB
+distribution.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/cam/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/why.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/transapp/logfile.html b/bdb/docs/ref/transapp/logfile.html
new file mode 100644
index 00000000000..64d8a96475e
--- /dev/null
+++ b/bdb/docs/ref/transapp/logfile.html
@@ -0,0 +1,104 @@
+<!--$Id: logfile.so,v 11.1 2000/07/25 16:31:20 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Log file removal</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Transaction Protected Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/transapp/archival.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/recovery.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Log file removal</h1>
+<p>The fourth component of the infrastructure, log file removal, concerns
+the ongoing disk consumption of the database log files. Depending on
+the rate at which the application writes to the databases and the
+available disk space, the number of log files may increase quickly
+enough that disk space will be a resource problem. For this reason,
+you will periodically want to remove log files in order to conserve disk
+space. This procedure is distinct from database and log file archival
+for catastrophic recovery, and you cannot remove the current log files
+simply because you have created a database snapshot or copied log files
+to archival media.
+<p>Log files may be removed at any time, as long as:
+<ul type=disc>
+<li>the log file is not involved in an active transaction
+<li>at least two checkpoints have been written subsequent to the
+log file's creation, and
+<li>the log file is not the only log file in the environment.
+</ul>
+<p>Obviously, if you are preparing for catastrophic failure, you will want
+to copy the log files to archival media before you remove them.
+<p>To remove log files, take the following steps:
+<p><ol>
+<p><li>If you are concerned with catastrophic failure, first copy the log files
+to backup media as described in <a href="archival.html">Archival for
+catastrophic recovery</a>.
+<p><li>Run <a href="../../utility/db_archive.html">db_archive</a> without options to identify all of the log files
+that are no longer in use (e.g., no longer involved in an active
+transaction).
+<p><li>Remove those log files from the system.
+</ol>
+<p>The functionality provided by the <a href="../../utility/db_archive.html">db_archive</a> utility is also
+available directly from the Berkeley DB library. The following code fragment
+removes log files that are no longer needed by the database
+environment.
+<p><blockquote><pre>int
+main(int argc, char *argv)
+{
+ ...
+<p>
+<b> /* Start a logfile removal thread. */
+ if ((errno = pthread_create(
+ &ptid, NULL, logfile_thread, (void *)dbenv)) != 0) {
+ fprintf(stderr,
+ "txnapp: failed spawning log file removal thread: %s\n",
+ strerror(errno));
+ exit (1);
+ }</b>
+<p>
+ ...
+}
+<p>
+<b>void *
+logfile_thread(void *arg)
+{
+ DB_ENV *dbenv;
+ int ret;
+ char **begin, **list;
+<p>
+ dbenv = arg;
+ dbenv-&gt;errx(dbenv,
+ "Log file removal thread: %lu", (u_long)pthread_self());
+<p>
+ /* Check once every 5 minutes. */
+ for (;; sleep(300)) {
+ /* Get the list of log files. */
+ if ((ret = log_archive(dbenv, &list, DB_ARCH_ABS, NULL)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "log_archive");
+ exit (1);
+ }
+<p>
+ /* Remove the log files. */
+ if (list != NULL) {
+ for (begin = list; *list != NULL; ++list)
+ if ((ret = remove(*list)) != 0) {
+ dbenv-&gt;err(dbenv,
+ ret, "remove %s", *list);
+ exit (1);
+ }
+ free (begin);
+ }
+ }
+ /* NOTREACHED */
+}</b></pre></blockquote>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/transapp/archival.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/recovery.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/transapp/put.html b/bdb/docs/ref/transapp/put.html
new file mode 100644
index 00000000000..e04a04f70bb
--- /dev/null
+++ b/bdb/docs/ref/transapp/put.html
@@ -0,0 +1,151 @@
+<!--$Id: put.so,v 1.3 2000/08/16 17:50:40 margo Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Recoverability and deadlock avoidance</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Transaction Protected Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/transapp/data_open.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/inc.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Recoverability and deadlock avoidance</h1>
+<p>The first reason listed for using transactions was recoverability. Any
+logical change to a database may require multiple changes to underlying
+data structures. For example, modifying a record in a Btree may require
+leaf and internal pages to split, and so a single <a href="../../api_c/db_put.html">DB-&gt;put</a> method
+call can potentially require that multiple physical database pages be
+written. If only some of those pages are written and then the system
+or application fails, the database is left inconsistent and cannot be
+used until it has been recovered, that is, until the partially completed
+changes have been undone.
+<p>Write-ahead-logging is the term that describes the underlying
+implementation that Berkeley DB uses to ensure recoverability. What it means
+is that before any change is made to a database, information about the
+change is written to a database log. During recovery, the log is read,
+and databases are checked to ensure that changes described in the log
+for committed transactions appear in the database. Changes that appear
+in the database but are related to aborted or unfinished transactions
+in the log are undone from the database.
+<p>For recoverability after application or system failure, operations that
+modify the database must be protected by transactions. More
+specifically, operations are not recoverable unless a transaction is
+begun and each operation is associated with the transaction via the
+Berkeley DB interfaces, and then the transaction successfully committed. This
+is true even if logging is turned on in the database environment.
+<p>Here is an example function that updates a record in a database in a
+transactionally protected manner. The function takes a key and data
+items as arguments, and then attempts to store them into the database.
+<p><blockquote><pre>int
+main(int argc, char *argv)
+{
+ extern char *optarg;
+ extern int optind;
+ DB *db_cats, *db_color, *db_fruit;
+ DB_ENV *dbenv;
+ pthread_t ptid;
+ int ch;
+<p>
+ while ((ch = getopt(argc, argv, "")) != EOF)
+ switch (ch) {
+ case '?':
+ default:
+ usage();
+ }
+ argc -= optind;
+ argv += optind;
+<p>
+ env_dir_create();
+ env_open(&dbenv);
+<p>
+ /* Open database: Key is fruit class; Data is specific type. */
+ db_open(dbenv, &db_fruit, "fruit", 0);
+<p>
+ /* Open database: Key is a color; Data is an integer. */
+ db_open(dbenv, &db_color, "color", 0);
+<p>
+ /*
+ * Open database:
+ * Key is a name; Data is: company name, address, cat breeds.
+ */
+ db_open(dbenv, &db_cats, "cats", 1);
+<p>
+<b> add_fruit(dbenv, db_fruit, "apple", "yellow delicious");</b>
+<p>
+ return (0);
+}
+<p>
+<b>void
+add_fruit(DB_ENV *dbenv, DB *db, char *fruit, char *name)
+{
+ DBT key, data;
+ DB_TXN *tid;
+ int ret;
+<p>
+ /* Initialization. */
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ key.data = fruit;
+ key.size = strlen(fruit);
+ data.data = name;
+ data.size = strlen(name);
+<p>
+ for (;;) {
+ /* Begin the transaction. */
+ if ((ret = txn_begin(dbenv, NULL, &tid, 0)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "txn_begin");
+ exit (1);
+ }
+<p>
+ /* Store the value. */
+ switch (ret = db-&gt;put(db, tid, &key, &data, 0)) {
+ case 0:
+ /* Success: commit the change. */
+ if ((ret = txn_commit(tid, 0)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "txn_commit");
+ exit (1);
+ }
+ return;
+ case DB_LOCK_DEADLOCK:
+ /* Deadlock: retry the operation. */
+ if ((ret = txn_abort(tid)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "txn_abort");
+ exit (1);
+ }
+ break;
+ default:
+ /* Error: run recovery. */
+ dbenv-&gt;err(dbenv, ret, "dbc-&gt;put: %s/%s", fruit, name);
+ exit (1);
+ }
+ }
+}</b></pre></blockquote>
+<p>The second reason listed for using transactions was deadlock avoidance.
+There is a new error return in this function that you may not have seen
+before. In transactional (not Concurrent Data Store) applications
+supporting both readers and writers or just multiple writers, Berkeley DB
+functions have an additional possible error return:
+<a href="../../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a>. This return means that our thread of control
+deadlocked with another thread of control, and our thread was selected
+to discard all of its Berkeley DB resources in order to resolve the problem.
+In the sample code, any time the <a href="../../api_c/db_put.html">DB-&gt;put</a> function returns
+<a href="../../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a>, the transaction is aborted (by calling
+<a href="../../api_c/txn_abort.html">txn_abort</a>, which releases the transaction's Berkeley DB resources and
+undoes any partial changes to the databases), and then the transaction
+is retried from the beginning.
+<p>There is no requirement that the transaction be attempted again, but
+that is a common course of action for applications. Applications may
+want to set an upper boundary on the number of times an operation will
+be retried, as some operations on some data sets may simply be unable
+to succeed. For example, updating all of the pages on a large web site
+during prime business hours may simply be impossible because of the high
+access rate to the database.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/transapp/data_open.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/inc.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/transapp/read.html b/bdb/docs/ref/transapp/read.html
new file mode 100644
index 00000000000..912401e8758
--- /dev/null
+++ b/bdb/docs/ref/transapp/read.html
@@ -0,0 +1,40 @@
+<!--$Id: read.so,v 1.1 2000/07/25 17:56:36 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Repeatable reads</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Transaction Protected Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/transapp/inc.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/cursor.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Repeatable reads</h1>
+<p>The fourth reason listed for using transactions was repeatable reads.
+Generally, most applications do not need to place reads inside a
+transaction for performance reasons. The problem is that a
+transactionally protected cursor, reading each key/data pair in a
+database, will acquire a read lock on most of the pages in the database
+and so will gradually block all write operations on the databases until
+the transaction commits or aborts. Note, however, if there are update
+transactions present in the application, the reading transactions must
+still use locking, and should be prepared to repeat any operation
+(possibly closing and reopening a cursor) which fails with a return
+value of <a href="../../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a>.
+<p>The exceptions to this rule are when the application is doing a
+read-modify-write operation and so requires atomicity, and when an
+application requires the ability to repeatedly access a data item
+knowing that it will not have changed. A repeatable read simply means
+that, for the life of the transaction, every time a request is made by
+any thread of control to read a data item, it will be unchanged from
+its previous value, that is, that the value will not change until the
+transaction commits or aborts.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/transapp/inc.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/cursor.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/transapp/reclimit.html b/bdb/docs/ref/transapp/reclimit.html
new file mode 100644
index 00000000000..559f8ed11b3
--- /dev/null
+++ b/bdb/docs/ref/transapp/reclimit.html
@@ -0,0 +1,106 @@
+<!--$Id: reclimit.so,v 11.19 2000/08/16 17:50:40 margo Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Berkeley DB recoverability</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Transaction Protected Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/transapp/filesys.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/throughput.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Berkeley DB recoverability</h1>
+<p>Berkeley DB recovery is based on write-ahead logging. What this means is that,
+when a change is made to a database page, a description of the change is
+written into a log file. This description in the log file is guaranteed
+to be written to stable storage before the database pages that were
+changed are written to stable storage. This is the fundamental feature
+of the logging system that makes durability and rollback work.
+<p>If the application or system crashes, the log is reviewed during recovery.
+Any database changes described in the log that were part of committed
+transactions, and that were never written to the actual database itself,
+are written to the database as part of recovery. Any database changes
+described in the log that were never committed, and that were written to
+the actual database itself, are backed-out of the the database as part of
+recovery. This design allows the database to be written lazily, and only
+blocks from the log file have to be forced to disk as part of transaction
+commit.
+<p>There are two interfaces that are a concern when considering Berkeley DB
+recoverability:
+<p><ol>
+<p><li>The interface between Berkeley DB and the operating system/filesystem.
+<li>The interface between the operating system/filesystem and the
+underlying stable storage hardware.
+</ol>
+<p>Berkeley DB uses the operating system interfaces and its underlying filesystem
+when writing its files. This means that Berkeley DB can fail if the underlying
+filesystem fails in some unrecoverable way. Otherwise, the interface
+requirements here are simple: the system call that Berkeley DB uses to flush
+data to disk (normally <b>fsync</b>(2)), must guarantee that all the
+information necessary for a file's recoverability has been written to
+stable storage before it returns to Berkeley DB, and that no possible
+application or system crash can cause that file to be unrecoverable.
+<p>In addition, Berkeley DB implicitly uses the interface between the operating
+system and the underlying hardware. The interface requirements here are
+not as simple.
+<p>First, it is necessary to consider the underlying page size of the Berkeley DB
+databases. The Berkeley DB library performs all database writes using the page
+size specified by the application. These pages are not checksummed and
+Berkeley DB assumes that they are written atomically. This means that if the
+operating system performs filesystem I/O in different sized blocks than
+the database page size, it may increase the possibility for database
+corruption. For example, assume that Berkeley DB is writing 32KB pages for a
+database and the operating system does filesystem I/O in 16KB blocks. If
+the operating system writes the first 16KB of the database page
+successfully, but crashes before being able to write the second 16KB of
+the database, the database has been corrupted and this corruption will
+not be detected during recovery. For this reason, it may be important
+to select database page sizes that will be written as single block
+transfers by the underlying operating system.
+<p>Second, it is necessary to consider the behavior of the system's underlying
+stable storage hardware. For example, consider a SCSI controller that
+has been configured to cache data and return to the operating system that
+the data has been written to stable storage, when, in fact, it has only
+been written into the controller RAM cache. If power is lost before the
+controller is able to flush its cache to disk, and the controller cache
+is not stable (i.e., the writes will not be flushed to disk when power
+returns), the writes will be lost. If the writes include database blocks,
+there is no loss as recovery will correctly update the database. If the
+writes include log file blocks, it is possible that transactions that were
+already committed may not appear in the recovered database, although the
+recovered database will be coherent after a crash.
+<p>If the underlying hardware can fail in any way such that only part of the
+block was written, the failure conditions are the same as those described
+above for an operating system failure that only writes part of a logical
+database block.
+<p>For these reasons, it is important to select hardware that does not do
+partial writes and does not cache data writes (or does not return that
+the data has been written to stable storage until it either has been
+written to stable storage or the actual writing of all of the data is
+guaranteed barring catastrophic hardware failure, e.g., your disk drive
+exploding). You should also be aware that Berkeley DB does not protect against
+all cases of stable storage hardware failure, nor does it protect against
+hardware misbehavior.
+<p>If the disk drive on which you are storing your databases explodes, you
+can perform normal Berkeley DB catastrophic recovery, as that requires only a
+snapshot of your databases plus all of the log files you have archived
+since those snapshots were taken. In this case, you will lose no database
+changes at all. If the disk drive on which you are storing your log files
+explodes, you can still perform catastrophic recovery, but you will lose
+any database changes that were part of transactions committed since your
+last archival of the log files. For this reason, storing your databases
+and log files on different disks should be considered a safety measure as
+well as a performance enhancement.
+<p>Finally, if your hardware misbehaves, for example, a SCSI controller
+writes incorrect data to the disk, Berkeley DB will not detect this and your
+data may be corrupted.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/transapp/filesys.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/throughput.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/transapp/recovery.html b/bdb/docs/ref/transapp/recovery.html
new file mode 100644
index 00000000000..5be94bf417c
--- /dev/null
+++ b/bdb/docs/ref/transapp/recovery.html
@@ -0,0 +1,91 @@
+<!--$Id: recovery.so,v 10.26 2000/08/16 17:50:40 margo Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Recovery procedures</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Transaction Protected Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/transapp/logfile.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/filesys.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Recovery procedures</h1>
+<p>The fifth component of the infrastructure, recovery procedures, concerns
+the recoverability of the database. After any application or system
+failure, there are two possible approaches to database recovery:
+<p><ol>
+<p><li>There is no need for recoverability and all databases can be recreated
+from scratch. While these applications may still need transaction
+protection for other reasons, recovery usually consists of removing the
+Berkeley DB environment home directory and all files it contains, and then
+restarting the application.
+<p><li>It is necessary to recover information after system or application
+failure. In this case, recovery processing must be performed on any
+database environments that were active at the time of the failure.
+Recovery processing involves running the <a href="../../utility/db_recover.html">db_recover</a> utility or
+calling the <a href="../../api_c/env_open.html">DBENV-&gt;open</a> function with the <a href="../../api_c/env_open.html#DB_RECOVER">DB_RECOVER</a> or
+<a href="../../api_c/env_open.html#DB_RECOVER_FATAL">DB_RECOVER_FATAL</a> flags.
+<p>During recovery processing, all database changes made by aborted or
+unfinished transactions are undone and all database changes made by
+committed transactions are redone, as necessary. Database applications
+must not be restarted until recovery completes. After recovery
+finishes, the environment is properly initialized so that applications
+may be restarted.
+</ol>
+<p>If you intend to do recovery, there are two possible types of recovery
+processing:
+<p><ol>
+<p><li><i>catastrophic</i> recovery. A failure that requires catastrophic
+recovery is a failure where either the database or log files have been
+destroyed or corrupted. For example, catastrophic failure includes the
+case where the disk drive on which either the database or logs are
+stored has been physically destroyed, or when the system's normal
+filesystem recovery on startup is unable to bring the database and log
+files to a consistent state. This is often difficult to detect, and
+perhaps the most common sign of the need for catastrophic recovery is
+when the normal recovery procedures fail.
+<p>To restore your database environment after catastrophic failure, take
+the following steps:
+<p><ol>
+<p><li>Restore the most recent snapshots of the database and log files from
+the backup media into the system directory where recovery will be
+performed.
+<p><li>If any log files were archived since the last snapshot was made, they
+should be restored into the Berkeley DB environment directory where recovery
+will be performed. Make sure you restore them in the order in which
+they were written. The order is important because it's possible that
+the same log file appears on multiple backups and you want to run
+recovery using the most recent version of each log file.
+<p><li>Run the <a href="../../utility/db_recover.html">db_recover</a> utility, specifying its <b>-c</b> option,
+or call the <a href="../../api_c/env_open.html">DBENV-&gt;open</a> function specifying the <a href="../../api_c/env_open.html#DB_RECOVER_FATAL">DB_RECOVER_FATAL</a>
+flag. The catastrophic recovery process will review the logs and
+database files to bring the environment databases to a consistent state
+as of the time of the last uncorrupted log file that is found. It is
+important to realize that only transactions committed before that date
+will appear in the databases.
+<p>It is possible to recreate the database in a location different than
+the original, by specifying appropriate pathnames to the <b>-h</b>
+option of the <a href="../../utility/db_recover.html">db_recover</a> utility. In order for this to work
+properly, it is important that your application reference files by
+names relative to the database home directory or the pathname(s) specified
+in calls to <a href="../../api_c/env_set_data_dir.html">DBENV-&gt;set_data_dir</a>, instead of using full path names.
+</ol>
+<p><li><i>non-catastrophic</i> or <i>normal</i> recovery. If the
+failure is non-catastrophic and the database files and log are both
+accessible on a stable filesystem, run the <a href="../../utility/db_recover.html">db_recover</a> utility
+without the <b>-c</b> option or call the <a href="../../api_c/env_open.html">DBENV-&gt;open</a> function
+specifying the <a href="../../api_c/env_open.html#DB_RECOVER">DB_RECOVER</a> flag. The normal recovery process
+will review the logs and database files to ensure that all changes
+associated with committed transactions appear in the databases and that
+all uncommitted transactions do not.
+</ol>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/transapp/logfile.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/filesys.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/transapp/term.html b/bdb/docs/ref/transapp/term.html
new file mode 100644
index 00000000000..d6d54a44d29
--- /dev/null
+++ b/bdb/docs/ref/transapp/term.html
@@ -0,0 +1,60 @@
+<!--$Id: term.so,v 10.16 2000/08/16 17:50:40 margo Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Terminology</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Transaction Protected Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/transapp/why.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/app.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Terminology</h1>
+<p>Here are some definitions that will be helpful in understanding
+transactions:
+<p><dl compact>
+<p><dt>Thread of control<dd>Berkeley DB is indifferent to the type or style of threads being used by the
+application, or, for that matter, if threads are being used at all, as
+Berkeley DB supports multi-process access. In the Berkeley DB documentation, any
+time we refer to a "thread of control", that can be read as a true
+thread (one of many in an application's address space), or, a process.
+<p><dt>Free-threaded<dd>A Berkeley DB handle that can be used by multiple threads simultaneously
+without any application-level synchronization is called free-threaded.
+<p><dt>Transaction<dd>A transaction is a one or more operations on one or more databases, that
+should be treated as a single unit of work. For example, changes to a
+set of databases, where either all of the changes must be applied to
+the database(s) or none of them should. Applications specify when each
+transaction starts, what database operations are included in it, and
+when it ends.
+<p><dt>Transaction abort/commit<dd>Every transaction ends by <i>committing</i> or <i>aborting</i>.
+If a transaction commits, then Berkeley DB guarantees that any database
+changes included in the transaction will never be lost, even after
+system or application failure. If a transaction aborts, or is
+uncommitted when the system or application fails, then the changes
+involved will never appear in the database.
+<p><dt>System or application failure<dd>This is the phrase that we will use to describe when something bad
+happens near your data. It can be an application dumping core, being
+interrupted by a signal, the disk filling up, or the entire system
+crashing. In any case, for whatever reason, the application can no
+longer make forward progress, and its databases were left in an unknown
+state.
+<p><dt>Recovery<dd>Whenever system or application failure occurs, the application must run
+recovery. Recovery is what makes the database consistent, that is, the
+recovery process includes review of log files and databases to ensure
+that the changes from each committed transaction appear in the database,
+and that no changes from an unfinished (or aborted) transaction do.
+<p><dt>Deadlock<dd>Deadlock, in its simplest form, happens when one thread of control owns
+resource A, but needs resource B, while another thread of control owns
+resource B, but needs resource A. Neither thread of control can make
+progress, and so one has to give up and release all of its resources,
+at which time the remaining thread of control can make forward progress.
+</dl>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/transapp/why.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/app.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/transapp/throughput.html b/bdb/docs/ref/transapp/throughput.html
new file mode 100644
index 00000000000..734f3c7f9ab
--- /dev/null
+++ b/bdb/docs/ref/transapp/throughput.html
@@ -0,0 +1,117 @@
+<!--$Id: throughput.so,v 10.24 2000/12/04 18:05:44 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Transaction throughput</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Transaction Protected Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/transapp/reclimit.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/xa/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Transaction throughput</h1>
+<p>Generally, the speed of a database system is measured by the transaction
+throughput, expressed as the number of transactions per second. The two
+gating factors for Berkeley DB performance in a transactional system are usually
+the underlying database files and the log file. Both are factors because
+they require disk I/O, which is slow relative to other system resources
+like CPU.
+<p>In the worst case scenario:
+<ul type=disc>
+<li>Database access is truly random and the database is too large to fit into
+the cache, resulting in a single I/O per requested key/data pair.
+<li>Both the database and the log are on a single disk.
+</ul>
+<p>This means that for each transaction, Berkeley DB is potentially performing
+several filesystem operations:
+<ul type=disc>
+<li>Disk seek to database file.
+<li>Database file read.
+<li>Disk seek to log file.
+<li>Log file write.
+<li>Flush log file information to disk.
+<li>Disk seek to update log file metadata (e.g., inode).
+<li>Log metadata write.
+<li>Flush log file metadata to disk.
+</ul>
+<p>There are a number of ways to increase transactional throughput, all of
+which attempt to decrease the number of filesystem operations per
+transaction:
+<ul type=disc>
+<li>Tune the size of the database cache. If the Berkeley DB key/data pairs used
+during the transaction are found in the database cache, the seek and read
+from the database are no longer necessary, resulting in two fewer
+filesystem operations per transaction. To determine if your cache size
+is too small, see <a href="../../ref/am_conf/cachesize.html">Selecting a
+cache size</a>.
+<li>Put the database and the log files on different disks. This allows reads
+and writes to the log files and the database files to be performed
+concurrently.
+<li>Set the filesystem configuration so that file access and modification
+times are not updated. Note, although the file access and modification
+times are not used by Berkeley DB, this may affect other programs, so be
+careful.
+<li>Upgrade your hardware. When considering the hardware on which to run your
+application, however, it is important to consider the entire system. The
+controller and bus can have as much to do with the disk performance as
+the disk itself. It is also important to remember that throughput is
+rarely the limiting factor, and that disk seek times are normally the true
+performance issue for Berkeley DB.
+<li>Turn on the <a href="../../api_c/env_open.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a> flag. This changes the Berkeley DB behavior
+so that the log files are not flushed when transactions are committed.
+While this change will greatly increase your transaction throughput, it
+means that transactions will exhibit the ACI (atomicity, consistency and
+isolation) properties, but not D (durability). Database integrity will
+be maintained but it is possible that some number of the most recently
+committed transactions may be undone during recovery instead of being
+redone.
+</ul>
+<p>If you are bottlenecked on logging, the following test will help you
+confirm that the number of transactions per second that your application
+does is reasonable for the hardware on which you're running. Your test
+program should repeatedly perform the following operations:
+<ul type=disc>
+<li>Seek to the beginning of a file.
+<li>Write to the file.
+<li>Flush the file write to disk.
+</ul>
+<p>The number of times that you can perform these three operations per second
+is a rough measure of the number of transactions per second of which the
+hardware is capable. This test simulates the operations applied to the
+log file. (As a simplifying assumption in this experiment, we assume that
+the database files are either on a separate disk, or that they fit, with
+some few exceptions, into the database cache.) We do not have to directly
+simulate updating the log file directory information, as it will normally
+be updated and flushed to disk as a result of flushing the log file write
+to disk.
+<p>Running this test program, where we write 256 bytes, for 1000 operations,
+on reasonably standard commodity hardware (Pentium II CPU, SCSI disk),
+returned the following results:
+<p><blockquote><pre>% testfile -b256 -o1000
+running: 1000 ops
+Elapsed time: 16.641934 seconds
+1000 ops: 60.09 ops per second</pre></blockquote>
+<p>Note that the number of bytes being written to the log as part of each
+transaction can dramatically affect the transaction throughput. The
+above test run used 256, which is a reasonable size log write. Your
+log writes may be different. To determine your average log write size,
+use the <a href="../../utility/db_stat.html">db_stat</a> utility to display your log statistics.
+<p>As a quick sanity check, for this particular disk, the average seek time
+is 9.4 msec, and the average latency is 4.17 msec. That results in a
+minimum requirement for a data transfer to the disk of 13.57 msec, or a
+maximum of 74 transfers per second. This is close enough to the above 60
+operations per second (which wasn't done on a quiescent disk) that the
+number is believable.
+<p>An implementation of the above <a href="writetest.txt">example test
+program</a> for IEEE/ANSI Std 1003.1 (POSIX) standard systems is included in the Berkeley DB
+distribution.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/transapp/reclimit.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/xa/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/transapp/transapp.txt b/bdb/docs/ref/transapp/transapp.txt
new file mode 100644
index 00000000000..afd441c59f8
--- /dev/null
+++ b/bdb/docs/ref/transapp/transapp.txt
@@ -0,0 +1,492 @@
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <errno.h>
+#include <pthread.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <db.h>
+
+#define ENV_DIRECTORY "TXNAPP"
+
+void add_cat(DB_ENV *, DB *, char *, ...);
+void add_color(DB_ENV *, DB *, char *, int);
+void add_fruit(DB_ENV *, DB *, char *, char *);
+void *checkpoint_thread(void *);
+void log_archlist(DB_ENV *);
+void *logfile_thread(void *);
+void db_open(DB_ENV *, DB **, char *, int);
+void env_dir_create(void);
+void env_open(DB_ENV **);
+void usage(void);
+
+int
+main(int argc, char *argv[])
+{
+ extern char *optarg;
+ extern int optind;
+ DB *db_cats, *db_color, *db_fruit;
+ DB_ENV *dbenv;
+ pthread_t ptid;
+ int ch;
+
+ while ((ch = getopt(argc, argv, "")) != EOF)
+ switch (ch) {
+ case '?':
+ default:
+ usage();
+ }
+ argc -= optind;
+ argv += optind;
+
+ env_dir_create();
+ env_open(&dbenv);
+
+ /* Start a checkpoint thread. */
+ if ((errno = pthread_create(
+ &ptid, NULL, checkpoint_thread, (void *)dbenv)) != 0) {
+ fprintf(stderr,
+ "txnapp: failed spawning checkpoint thread: %s\n",
+ strerror(errno));
+ exit (1);
+ }
+
+ /* Start a logfile removal thread. */
+ if ((errno = pthread_create(
+ &ptid, NULL, logfile_thread, (void *)dbenv)) != 0) {
+ fprintf(stderr,
+ "txnapp: failed spawning log file removal thread: %s\n",
+ strerror(errno));
+ exit (1);
+ }
+
+ /* Open database: Key is fruit class; Data is specific type. */
+ db_open(dbenv, &db_fruit, "fruit", 0);
+
+ /* Open database: Key is a color; Data is an integer. */
+ db_open(dbenv, &db_color, "color", 0);
+
+ /*
+ * Open database:
+ * Key is a name; Data is: company name, address, cat breeds.
+ */
+ db_open(dbenv, &db_cats, "cats", 1);
+
+ add_fruit(dbenv, db_fruit, "apple", "yellow delicious");
+
+ add_color(dbenv, db_color, "blue", 0);
+ add_color(dbenv, db_color, "blue", 3);
+
+ add_cat(dbenv, db_cats,
+ "Amy Adams",
+ "Sleepycat Software",
+ "394 E. Riding Dr., Carlisle, MA 01741, USA",
+ "abyssinian",
+ "bengal",
+ "chartreaux",
+ NULL);
+
+ return (0);
+}
+
+void
+env_dir_create()
+{
+ struct stat sb;
+
+ /*
+ * If the directory exists, we're done. We do not further check
+ * the type of the file, DB will fail appropriately if it's the
+ * wrong type.
+ */
+ if (stat(ENV_DIRECTORY, &sb) == 0)
+ return;
+
+ /* Create the directory, read/write/access owner only. */
+ if (mkdir(ENV_DIRECTORY, S_IRWXU) != 0) {
+ fprintf(stderr,
+ "txnapp: mkdir: %s: %s\n", ENV_DIRECTORY, strerror(errno));
+ exit (1);
+ }
+}
+
+void
+env_open(DB_ENV **dbenvp)
+{
+ DB_ENV *dbenv;
+ int ret;
+
+ /* Create the environment handle. */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "txnapp: db_env_create: %s\n", db_strerror(ret));
+ exit (1);
+ }
+
+ /* Set up error handling. */
+ dbenv->set_errpfx(dbenv, "txnapp");
+
+ /* Do deadlock detection internally. */
+ if ((ret = dbenv->set_lk_detect(dbenv, DB_LOCK_DEFAULT)) != 0) {
+ dbenv->err(dbenv, ret, "set_lk_detect: DB_LOCK_DEFAULT");
+ exit (1);
+ }
+
+ /*
+ * Open a transactional environment:
+ * create if it doesn't exist
+ * free-threaded handle
+ * run recovery
+ * read/write owner only
+ */
+ if ((ret = dbenv->open(dbenv, ENV_DIRECTORY,
+ DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG |
+ DB_INIT_MPOOL | DB_INIT_TXN | DB_RECOVER | DB_THREAD,
+ S_IRUSR | S_IWUSR)) != 0) {
+ dbenv->err(dbenv, ret, "dbenv->open: %s", ENV_DIRECTORY);
+ exit (1);
+ }
+
+ *dbenvp = dbenv;
+}
+
+void *
+checkpoint_thread(void *arg)
+{
+ DB_ENV *dbenv;
+ int ret;
+
+ dbenv = arg;
+ dbenv->errx(dbenv, "Checkpoint thread: %lu", (u_long)pthread_self());
+
+ /* Checkpoint once a minute. */
+ for (;; sleep(60))
+ switch (ret = txn_checkpoint(dbenv, 0, 0, 0)) {
+ case 0:
+ case DB_INCOMPLETE:
+ break;
+ default:
+ dbenv->err(dbenv, ret, "checkpoint thread");
+ exit (1);
+ }
+
+ /* NOTREACHED */
+}
+
+void *
+logfile_thread(void *arg)
+{
+ DB_ENV *dbenv;
+ int ret;
+ char **begin, **list;
+
+ dbenv = arg;
+ dbenv->errx(dbenv,
+ "Log file removal thread: %lu", (u_long)pthread_self());
+
+ /* Check once every 5 minutes. */
+ for (;; sleep(300)) {
+ /* Get the list of log files. */
+ if ((ret = log_archive(dbenv, &list, DB_ARCH_ABS, NULL)) != 0) {
+ dbenv->err(dbenv, ret, "log_archive");
+ exit (1);
+ }
+
+ /* Remove the log files. */
+ if (list != NULL) {
+ for (begin = list; *list != NULL; ++list)
+ if ((ret = remove(*list)) != 0) {
+ dbenv->err(dbenv,
+ ret, "remove %s", *list);
+ exit (1);
+ }
+ free (begin);
+ }
+ }
+ /* NOTREACHED */
+}
+
+void
+log_archlist(DB_ENV *dbenv)
+{
+ int ret;
+ char **begin, **list;
+
+ /* Get the list of database files. */
+ if ((ret = log_archive(dbenv,
+ &list, DB_ARCH_ABS | DB_ARCH_DATA, NULL)) != 0) {
+ dbenv->err(dbenv, ret, "log_archive: DB_ARCH_DATA");
+ exit (1);
+ }
+ if (list != NULL) {
+ for (begin = list; *list != NULL; ++list)
+ printf("database file: %s\n", *list);
+ free (begin);
+ }
+
+ /* Get the list of log files. */
+ if ((ret = log_archive(dbenv,
+ &list, DB_ARCH_ABS | DB_ARCH_LOG, NULL)) != 0) {
+ dbenv->err(dbenv, ret, "log_archive: DB_ARCH_LOG");
+ exit (1);
+ }
+ if (list != NULL) {
+ for (begin = list; *list != NULL; ++list)
+ printf("log file: %s\n", *list);
+ free (begin);
+ }
+}
+
+void
+db_open(DB_ENV *dbenv, DB **dbp, char *name, int dups)
+{
+ DB *db;
+ int ret;
+
+ /* Create the database handle. */
+ if ((ret = db_create(&db, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ exit (1);
+ }
+
+ /* Optionally, turn on duplicate data items. */
+ if (dups && (ret = db->set_flags(db, DB_DUP)) != 0) {
+ dbenv->err(dbenv, ret, "db->set_flags: DB_DUP");
+ exit (1);
+ }
+
+ /*
+ * Open a database in the environment:
+ * create if it doesn't exist
+ * free-threaded handle
+ * read/write owner only
+ */
+ if ((ret = db->open(db, name, NULL,
+ DB_BTREE, DB_CREATE | DB_THREAD, S_IRUSR | S_IWUSR)) != 0) {
+ dbenv->err(dbenv, ret, "db->open: %s", name);
+ exit (1);
+ }
+
+ *dbp = db;
+}
+
+void
+add_fruit(DB_ENV *dbenv, DB *db, char *fruit, char *name)
+{
+ DBT key, data;
+ DB_TXN *tid;
+ int ret;
+
+ /* Initialization. */
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ key.data = fruit;
+ key.size = strlen(fruit);
+ data.data = name;
+ data.size = strlen(name);
+
+ for (;;) {
+ /* Begin the transaction. */
+ if ((ret = txn_begin(dbenv, NULL, &tid, 0)) != 0) {
+ dbenv->err(dbenv, ret, "txn_begin");
+ exit (1);
+ }
+
+ /* Store the value. */
+ switch (ret = db->put(db, tid, &key, &data, 0)) {
+ case 0:
+ /* Success: commit the change. */
+ if ((ret = txn_commit(tid, 0)) != 0) {
+ dbenv->err(dbenv, ret, "txn_commit");
+ exit (1);
+ }
+ return;
+ case DB_LOCK_DEADLOCK:
+ /* Deadlock: retry the operation. */
+ if ((ret = txn_abort(tid)) != 0) {
+ dbenv->err(dbenv, ret, "txn_abort");
+ exit (1);
+ }
+ break;
+ default:
+ /* Error: run recovery. */
+ dbenv->err(dbenv, ret, "dbc->put: %s/%s", fruit, name);
+ exit (1);
+ }
+ }
+}
+
+void
+add_color(DB_ENV *dbenv, DB *dbp, char *color, int increment)
+{
+ DBT key, data;
+ DB_TXN *tid;
+ int original, ret;
+ char buf[64];
+
+ /* Initialization. */
+ memset(&key, 0, sizeof(key));
+ key.data = color;
+ key.size = strlen(color);
+ memset(&data, 0, sizeof(data));
+ data.flags = DB_DBT_MALLOC;
+
+ for (;;) {
+ /* Begin the transaction. */
+ if ((ret = txn_begin(dbenv, NULL, &tid, 0)) != 0) {
+ dbenv->err(dbenv, ret, "txn_begin");
+ exit (1);
+ }
+
+ /*
+ * Get the key. If it exists, we increment the value. If it
+ * doesn't exist, we create it.
+ */
+ switch (ret = dbp->get(dbp, tid, &key, &data, 0)) {
+ case 0:
+ original = atoi(data.data);
+ break;
+ case DB_LOCK_DEADLOCK:
+ /* Deadlock: retry the operation. */
+ if ((ret = txn_abort(tid)) != 0) {
+ dbenv->err(dbenv, ret, "txn_abort");
+ exit (1);
+ }
+ continue;
+ case DB_NOTFOUND:
+ original = 0;
+ break;
+ default:
+ /* Error: run recovery. */
+ dbenv->err(
+ dbenv, ret, "dbc->get: %s/%d", color, increment);
+ exit (1);
+ }
+ if (data.data != NULL)
+ free(data.data);
+
+ /* Create the new data item. */
+ (void)snprintf(buf, sizeof(buf), "%d", original + increment);
+ data.data = buf;
+ data.size = strlen(buf) + 1;
+
+ /* Store the new value. */
+ switch (ret = dbp->put(dbp, tid, &key, &data, 0)) {
+ case 0:
+ /* Success: commit the change. */
+ if ((ret = txn_commit(tid, 0)) != 0) {
+ dbenv->err(dbenv, ret, "txn_commit");
+ exit (1);
+ }
+ return;
+ case DB_LOCK_DEADLOCK:
+ /* Deadlock: retry the operation. */
+ if ((ret = txn_abort(tid)) != 0) {
+ dbenv->err(dbenv, ret, "txn_abort");
+ exit (1);
+ }
+ break;
+ default:
+ /* Error: run recovery. */
+ dbenv->err(
+ dbenv, ret, "dbc->put: %s/%d", color, increment);
+ exit (1);
+ }
+ }
+}
+
+void
+add_cat(DB_ENV *dbenv, DB *db, char *name, ...)
+{
+ va_list ap;
+ DBC *dbc;
+ DBT key, data;
+ DB_TXN *tid;
+ int ret;
+ char *s;
+
+ /* Initialization. */
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ key.data = name;
+ key.size = strlen(name);
+
+retry: /* Begin the transaction. */
+ if ((ret = txn_begin(dbenv, NULL, &tid, 0)) != 0) {
+ dbenv->err(dbenv, ret, "txn_begin");
+ exit (1);
+ }
+
+ /* Delete any previously existing item. */
+ switch (ret = db->del(db, tid, &key, 0)) {
+ case 0:
+ case DB_NOTFOUND:
+ break;
+ case DB_LOCK_DEADLOCK:
+ /* Deadlock: retry the operation. */
+ if ((ret = txn_abort(tid)) != 0) {
+ dbenv->err(dbenv, ret, "txn_abort");
+ exit (1);
+ }
+ goto retry;
+ default:
+ dbenv->err(dbenv, ret, "db->del: %s", name);
+ exit (1);
+ }
+
+ /* Create a cursor. */
+ if ((ret = db->cursor(db, tid, &dbc, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db->cursor");
+ exit (1);
+ }
+
+ /* Append the items, in order. */
+ va_start(ap, name);
+ while ((s = va_arg(ap, char *)) != NULL) {
+ data.data = s;
+ data.size = strlen(s);
+ switch (ret = dbc->c_put(dbc, &key, &data, DB_KEYLAST)) {
+ case 0:
+ break;
+ case DB_LOCK_DEADLOCK:
+ va_end(ap);
+
+ /* Deadlock: retry the operation. */
+ if ((ret = dbc->c_close(dbc)) != 0) {
+ dbenv->err(
+ dbenv, ret, "dbc->c_close");
+ exit (1);
+ }
+ if ((ret = txn_abort(tid)) != 0) {
+ dbenv->err(dbenv, ret, "txn_abort");
+ exit (1);
+ }
+ goto retry;
+ default:
+ /* Error: run recovery. */
+ dbenv->err(dbenv, ret, "dbc->put: %s/%s", name, s);
+ exit (1);
+ }
+ }
+ va_end(ap);
+
+ /* Success: commit the change. */
+ if ((ret = dbc->c_close(dbc)) != 0) {
+ dbenv->err(dbenv, ret, "dbc->c_close");
+ exit (1);
+ }
+ if ((ret = txn_commit(tid, 0)) != 0) {
+ dbenv->err(dbenv, ret, "txn_commit");
+ exit (1);
+ }
+}
+
+void
+usage()
+{
+ (void)fprintf(stderr, "usage: txnapp\n");
+ exit(1);
+}
diff --git a/bdb/docs/ref/transapp/why.html b/bdb/docs/ref/transapp/why.html
new file mode 100644
index 00000000000..8fee1308246
--- /dev/null
+++ b/bdb/docs/ref/transapp/why.html
@@ -0,0 +1,49 @@
+<!--$Id: why.so,v 1.1 2000/07/25 17:56:36 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Why transactions?</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Transaction Protected Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/transapp/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/term.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Why transactions?</h1>
+<p>Perhaps the first question to answer is "Why transactions?" There are
+a number of reasons for including transactional support in your
+applications. The most common ones are:
+<p><dl compact>
+<p><dt>Recoverability<dd>Applications often need to ensure that, no matter how the system or
+application fails, previously saved data is available the next time the
+application runs.
+<p><dt>Deadlock avoidance<dd>When multiple threads of control change the database at the same time,
+there is usually the possibility of deadlock, that is, where each of
+the threads of control owns a resource another thread wants, and so no
+thread is able to make forward progress, all waiting for a resource.
+Deadlocks are resolved by having one of the operations involved release
+the resources it controls so the other operations can proceed. (The
+operation releasing its resources usually just tries again later.)
+Transactions are necessary so that any changes that were already made
+to the database can be undone as part of releasing the held resources.
+<p><dt>Atomicity<dd>Applications often need to make multiple changes to one or more
+databases, but want to ensure that either all of the changes happen, or
+none of them happen. Transactions guarantee that a group of changes
+are atomic, that is, if the application or system fails, either all of
+the changes to the databases will appear when the application next runs,
+or none of them.
+<p><dt>Repeatable reads<dd>Applications sometimes need to ensure that, while doing a group of
+operations on a database, the value returned as a result of a database
+retrieval doesn't change, that is, if you retrieve the same key more
+than once, the data item will be the same each time. Transactions
+guarantee this behavior.
+</dl>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/transapp/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/term.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/transapp/writetest.txt b/bdb/docs/ref/transapp/writetest.txt
new file mode 100644
index 00000000000..b86c1b6ce66
--- /dev/null
+++ b/bdb/docs/ref/transapp/writetest.txt
@@ -0,0 +1,100 @@
+/*
+ * writetest --
+ *
+ * $Id: writetest.txt,v 10.3 1999/11/19 17:21:06 bostic Exp $
+ */
+#include <sys/types.h>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+
+void usage __P((void));
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ struct timeval start_time, end_time;
+ long usecs;
+ int bytes, ch, cnt, fd, ops;
+ char *fname, buf[100 * 1024];
+
+ bytes = 256;
+ fname = "testfile";
+ ops = 1000;
+ while ((ch = getopt(argc, argv, "b:f:o:")) != EOF)
+ switch (ch) {
+ case 'b':
+ if ((bytes = atoi(optarg)) > sizeof(buf)) {
+ fprintf(stderr,
+ "max -b option %d\n", sizeof(buf));
+ exit (1);
+ }
+ break;
+ case 'f':
+ fname = optarg;
+ break;
+ case 'o':
+ if ((ops = atoi(optarg)) <= 0) {
+ fprintf(stderr, "illegal -o option value\n");
+ exit (1);
+ }
+ break;
+ case '?':
+ default:
+ usage();
+ }
+ argc -= optind;
+ argv += optind;
+
+ (void)unlink(fname);
+ if ((fd = open(fname, O_RDWR | O_CREAT, 0666)) == -1) {
+ perror(fname);
+ exit (1);
+ }
+
+ memset(buf, 0, bytes);
+
+ printf("running: %d ops\n", ops);
+
+ (void)gettimeofday(&start_time, NULL);
+ for (cnt = 0; cnt < ops; ++cnt) {
+ if (write(fd, buf, bytes) != bytes) {
+ fprintf(stderr, "write: %s\n", strerror(errno));
+ exit (1);
+ }
+ if (lseek(fd, (off_t)0, SEEK_SET) == -1) {
+ fprintf(stderr, "lseek: %s\n", strerror(errno));
+ exit (1);
+ }
+ if (fsync(fd) != 0) {
+ fprintf(stderr, "fsync: %s\n", strerror(errno));
+ exit (1);
+ }
+ }
+ (void)gettimeofday(&end_time, NULL);
+
+ usecs = (end_time.tv_sec - start_time.tv_sec) * 1000000 +
+ end_time.tv_usec - start_time.tv_usec;
+ printf("Elapsed time: %ld.%06ld seconds\n",
+ usecs / 1000000, usecs % 1000000);
+ printf("%d ops: %7.2f ops per second\n",
+ ops, (float)1000000 * ops/usecs);
+
+ (void)unlink(fname);
+ exit (0);
+}
+
+void
+usage()
+{
+ (void)fprintf(stderr,
+ "usage: testfile [-b bytes] [-f file] [-o ops]\n");
+ exit(1);
+}
diff --git a/bdb/docs/ref/txn/config.html b/bdb/docs/ref/txn/config.html
new file mode 100644
index 00000000000..beb73859fe8
--- /dev/null
+++ b/bdb/docs/ref/txn/config.html
@@ -0,0 +1,37 @@
+<!--$Id: config.so,v 10.14 2000/10/03 17:17:36 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Configuring transactions</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Transaction Subsystem</dl></h3></td>
+<td width="1%"><a href="../../ref/txn/limits.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/txn/other.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Configuring transactions</h1>
+<p>There is only a single parameter used in configuring transactions, the
+<a href="../../api_c/env_open.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a> flag. Setting the <a href="../../api_c/env_open.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a> flag to
+<a href="../../api_c/env_set_flags.html">DBENV-&gt;set_flags</a> when opening a transaction region changes the
+behavior of transactions not to synchronously flush the log during
+transaction commit.
+<p>This change will significantly increase application transactional
+throughput. However, it means that while transactions will continue to
+exhibit the ACI (atomicity, consistency and isolation) properties, they
+will not have D (durability). Database integrity will be maintained but
+it is possible that some number of the most recently committed
+transactions may be undone during recovery instead of being redone.
+<p>The application may also limit the number of simultaneous outstanding
+transactions supported by the environment by calling the
+<a href="../../api_c/env_set_tx_max.html">DBENV-&gt;set_tx_max</a> function. When this number is met, additional calls to
+<a href="../../api_c/txn_begin.html">txn_begin</a> will fail until some active transactions complete.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/txn/limits.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/txn/other.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/txn/intro.html b/bdb/docs/ref/txn/intro.html
new file mode 100644
index 00000000000..557481509e0
--- /dev/null
+++ b/bdb/docs/ref/txn/intro.html
@@ -0,0 +1,86 @@
+<!--$Id: intro.so,v 10.14 2000/03/18 21:43:19 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Berkeley DB and transactions</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Transaction Subsystem</dl></h3></td>
+<td width="1%"><a href="../../ref/mp/config.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/txn/nested.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Berkeley DB and transactions</h1>
+<p>The transaction subsystem makes operations atomic, consistent, isolated,
+and durable in the face of system and application failures. The subsystem
+requires that the data be properly logged and locked in order to attain
+these properties. Berkeley DB contains all the components necessary to
+transaction-protect the Berkeley DB access methods and other forms of data may
+be protected if they are logged and locked appropriately.
+<p>The transaction subsystem is created, initialized, and opened by calls to
+<a href="../../api_c/env_open.html">DBENV-&gt;open</a> with the <a href="../../api_c/env_open.html#DB_INIT_TXN">DB_INIT_TXN</a> flag specified. Note that
+enabling transactions automatically enables logging, but does not enable
+locking, as a single thread of control that needed atomicity and
+recoverability would not require it.
+<p>The <a href="../../api_c/txn_begin.html">txn_begin</a> function starts a transaction, returning an opaque
+handle to a transaction. If the parent parameter to <a href="../../api_c/txn_begin.html">txn_begin</a> is
+non-NULL, then the new transaction is a child of the designated parent
+transaction.
+<p>The <a href="../../api_c/txn_abort.html">txn_abort</a> function ends the designated transaction and causes
+all updates performed by the transaction to be undone. The end result is
+that the database is left in a state identical to the state that existed
+prior to the <a href="../../api_c/txn_begin.html">txn_begin</a>. If the aborting transaction has any child
+transactions associated with it (even ones that have already been
+committed), they are also aborted. Any transactions that are unresolved
+(i.e., neither committed nor aborted) when the application or system fails
+are aborted during recovery.
+<p>The <a href="../../api_c/txn_commit.html">txn_commit</a> function ends the designated transaction and makes
+all the updates performed by the transaction permanent, even in the face
+of application or system failure. If this is a parent transaction
+committing, then all child transactions that individually committed or
+had not been resolved are also committed.
+<p>Transactions are identified by 32-bit unsigned integers. The ID
+associated with any transaction can be obtained using the <a href="../../api_c/txn_id.html">txn_id</a>
+function. If an application is maintaining information outside of Berkeley DB
+that it wishes to transaction-protect, it should use this transaction ID
+as the locking ID.
+<p>The <a href="../../api_c/txn_checkpoint.html">txn_checkpoint</a> function causes a transaction checkpoint. A
+checkpoint is performed relative to a specific log sequence number (LSN),
+referred to as the checkpoint LSN. When a checkpoint completes
+successfully, it means that all data buffers whose updates are described
+by LSNs less than the checkpoint LSN have been written to disk. This, in
+turn, means that the log records less than the checkpoint LSN are no
+longer necessary for normal recovery (although they would be required for
+catastrophic recovery should the database files be lost) and all log files
+containing only records prior to the checkpoint LSN may be safely archived
+and removed.
+<p>It is possible that in order to complete a transaction checkpoint, it will
+be necessary to write a buffer that is currently in use (i.e., is actively
+being read or written by some transaction). In this case,
+<a href="../../api_c/txn_checkpoint.html">txn_checkpoint</a> will not be able to write the buffer, as doing so
+might cause an inconsistent version of the page to be written to disk,
+and instead of completing successfully will return with an error code of
+<a href="../../api_c/memp_fsync.html#DB_INCOMPLETE">DB_INCOMPLETE</a>. In such cases, the checkpoint can simply be
+retried after a short delay.
+<p>The interval between successive checkpoints is directly proportional to
+the length of time required to run normal recovery. If the interval
+between checkpoints is long, then a large number of updates that are
+recorded in the log may not yet be written to disk and recovery may take
+longer to run. If the interval is short, then data is being written to
+disk more frequently, but the recovery time will be shorter. Often, the
+checkpoint interval will be tuned for each specific application.
+<p>The <a href="../../api_c/txn_stat.html">txn_stat</a> function returns information about the status of
+the transaction subsystem. It is the programmatic interface used by the
+<a href="../../utility/db_stat.html">db_stat</a> utility.
+<p>The transaction system is closed by a call to <a href="../../api_c/env_close.html">DBENV-&gt;close</a>.
+<p>Finally, the entire transaction system may be removed using the
+<a href="../../api_c/env_remove.html">DBENV-&gt;remove</a> interface.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/mp/config.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/txn/nested.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/txn/limits.html b/bdb/docs/ref/txn/limits.html
new file mode 100644
index 00000000000..0ed97806667
--- /dev/null
+++ b/bdb/docs/ref/txn/limits.html
@@ -0,0 +1,66 @@
+<!--$Id: limits.so,v 10.29 2001/01/10 17:33:53 margo Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Transaction limits</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Transaction Subsystem</dl></h3></td>
+<td width="1%"><a href="../../ref/txn/nested.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/txn/config.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Transaction limits</h1>
+<h3>Transaction IDs</h3>
+<p>Transactions are identified uniquely by 32-bit unsigned integers. The
+high-order bit of the transaction ID is reserved (and defined to be 1)
+resulting in just over two billion unique transaction IDs. Each time
+that recovery is run, the beginning transaction ID is reset with new
+transactions being numbered starting from 1. This means that recovery
+must be run at least once every two billion transactions.
+<p>It is possible that some environments may need to be aware of this
+limitation. Consider an application performing 600 transactions a second
+for 15 hours a day. The transaction ID space will run out in roughly 66
+days:
+<p><blockquote><pre>2^31 / (600 * 15 * 60 * 60) = 66</pre></blockquote>
+<p>Doing only 100 transactions a second exhausts the transaction ID space
+in roughly one year.
+<p>The transaction ID name space is initialized each time
+a database environment is created or recovered. If you
+reach the end of the transaction ID name space, it must
+be handled as if an application or system failure had
+occurred. The most recently allocated transaction ID
+is the <b>st_last_txnid</b> value in the transaction
+statistics information, and is displayed by the
+<a href="../../utility/db_stat.html">db_stat</a> utility.
+<h3>Cursors</h3>
+<p>When using transactions, cursors are localized to a single transaction.
+That is, a cursor may not span transactions and must be opened and
+closed within a single transaction. In addition, intermingling
+transaction-protected cursor operations and non-transaction-protected
+cursor operations on the same database in a single thread of control is
+practically guaranteed to deadlock as the locks obtained for transactions
+and non-transactions can conflict.
+<h3>Multiple Threads of Control</h3>
+<p>Since transactions must hold all their locks until commit, a single
+transaction may accumulate a large number of long-term locks during its
+lifetime. As a result, when two concurrently running transactions access
+the same database, there is strong potential for conflict. While Berkeley
+DB allows an application to have multiple outstanding transactions active
+within a single thread of control, great care must be taken to ensure that
+the transactions do not interfere with each other (e.g., attempt to obtain
+conflicting locks on the same data). If two concurrently active
+transactions in the same thread of control do encounter a lock conflict,
+the thread of control will deadlock in such a manner that the deadlock
+detector will be unable to resolve the problem. In this case, there is
+no true deadlock, but because the transaction on which a transaction is
+waiting is in the same thread of control, no forward progress can be made.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/txn/nested.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/txn/config.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/txn/nested.html b/bdb/docs/ref/txn/nested.html
new file mode 100644
index 00000000000..a635abf52a8
--- /dev/null
+++ b/bdb/docs/ref/txn/nested.html
@@ -0,0 +1,66 @@
+<!--$Id: nested.so,v 10.17 2000/12/31 19:26:22 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Nested transactions</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Transaction Subsystem</dl></h3></td>
+<td width="1%"><a href="../../ref/txn/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/txn/limits.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Nested transactions</h1>
+<p>Berkeley DB provides support for nested transactions. Nested transactions
+allow an application to decompose a large or long-running transaction
+into smaller units that may be independently aborted.
+<p>Normally, when beginning a transaction, the application will pass a NULL
+value for the parent argument to <a href="../../api_c/txn_begin.html">txn_begin</a>. If, however, the
+parent argument is a DB_TXN handle, then the newly created
+transaction will be treated as a nested transaction within the parent.
+Transactions may nest arbitrarily deeply. For the purposes of this
+discussion, transactions created with a parent identifier will be called
+child transactions.
+<p>Once a transaction becomes a parent, as long as any of its child
+transactions are unresolved (i.e., they have neither committed nor
+aborted), the parent may not issue any Berkeley DB calls except to begin more
+child transactions or to commit or abort. That is, it may not issue
+any access method or cursor calls. Once all of a parent's children have
+committed or aborted, the parent may again request operations on its
+own behalf.
+<p>The semantics of nested transactions are as follows. When a child
+transaction is begun, it inherits all the locks of its parent. This
+means that the child will never block waiting on a lock held by its
+parent. However, if a parent attempts to obtain locks after they have
+begun a child, the parental locks can conflict with those held by a
+child. Furthermore, locks held by two different children will also
+conflict. To make this concrete, consider the following set of
+transactions and lock acquisitions.
+<p>Transaction T1 is the parent transaction. It acquires an exclusive lock
+on item A and then begins two child transactions, C1 and C2. C1 also
+wishes to acquire a write lock on A; this succeeds. Now, let's say that
+C1 acquires a write lock on B. If C2 now attempts to obtain a lock on
+B, it will block. However, let's now assume that C1 commits. Its locks
+are anti-inherited, which means they are now given to T1. At this
+point, either T1 or C2 is allowed to acquire a lock on B. If, however,
+transaction T1 aborts, then its locks are released. Future requests by
+T1 or C2 will also succeed, but they will be obtaining new locks as
+opposed to piggy-backing off a lock already held by T1.
+<p>Child transactions are entirely subservient to their parent transaction.
+They may abort, undoing their operations regardless of the eventual fate
+of the parent. However, even if a child transaction commits, if its
+parent transaction is eventually aborted, the child's changes are undone
+and the child's transaction is effectively aborted. Any child
+transactions that are not yet resolved when the parent commits or aborts
+are resolved based on the parent's resolution, committing if the parent
+commits and aborting if the parent aborts. Any child transactions that
+are not yet resolved when the parent prepares are also prepared.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/txn/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/txn/limits.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/txn/other.html b/bdb/docs/ref/txn/other.html
new file mode 100644
index 00000000000..e4678c2cbb0
--- /dev/null
+++ b/bdb/docs/ref/txn/other.html
@@ -0,0 +1,67 @@
+<!--$Id: other.so,v 10.16 2000/03/18 21:43:19 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Transactions and non-Berkeley DB applications</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Transaction Subsystem</dl></h3></td>
+<td width="1%"><a href="../../ref/txn/config.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rpc/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Transactions and non-Berkeley DB applications</h1>
+<p>It is possible to use the locking, logging and transaction subsystems
+of Berkeley DB to provide transaction semantics on objects other than those
+described by the Berkeley DB access methods. In these cases, the application
+will need more explicit customization of the subsystems as well as the
+development of appropriate data-structure-specific recovery functions.
+<p>For example, consider an application that provides transaction semantics
+on data stored in plain UNIX files accessed using the POSIX read and write
+system calls. The operations for which transaction protection is desired
+are bracketed by calls to <a href="../../api_c/txn_begin.html">txn_begin</a> and <a href="../../api_c/txn_commit.html">txn_commit</a>.
+<p>Before data are referenced, the application must make a call to the lock
+manager, <a href="../../api_c/lock_get.html">lock_get</a>, for a lock of the appropriate type (e.g.,
+read) on the object being locked. The object might be a page in the file,
+a byte, a range of bytes, or some key. It is up to the application to
+ensure that appropriate locks are acquired. Before a write is performed,
+the application should acquire a write lock on the object, by making an
+appropriate call to the lock manager, <a href="../../api_c/lock_get.html">lock_get</a>. Then, the
+application should make a call to the log manager, <a href="../../api_c/log_put.html">log_put</a>, to
+record enough information to redo the operation in case of failure after
+commit and to undo the operation in case of abort.
+<p>It is important, when designing applications that will use the log
+subsystem, to remember that the application is responsible for providing
+any necessary structure to the log record. For example, the application
+must understand what part of the log record is an operation code, what
+part identifies the file being modified, what part is redo information,
+and what part is undo information.
+<p>After the log message is written, the application may issue the write
+system call. After all requests are issued, the application may call
+<a href="../../api_c/txn_commit.html">txn_commit</a>. When <a href="../../api_c/txn_commit.html">txn_commit</a> returns, the caller is
+guaranteed that all necessary log writes have been written to disk.
+<p>At any time, the application may call <a href="../../api_c/txn_abort.html">txn_abort</a>, which will result
+in restoration of the database to a consistent pre-transaction state.
+(The application may specify its own recovery function for this purpose
+using the <a href="../../api_c/env_set_tx_recover.html">DBENV-&gt;set_tx_recover</a> function. The recovery function must be
+able to either re-apply or undo the update depending on the context, for
+each different type of log record.)
+<p>If the application should crash, the recovery process uses the log to
+restore the database to a consistent state.
+<p>The <a href="../../api_c/txn_prepare.html">txn_prepare</a> function provides the core functionality to
+implement distributed transactions, but it does not manage the
+notification of distributed transaction managers. The caller is
+responsible for issuing <a href="../../api_c/txn_prepare.html">txn_prepare</a> calls to all sites
+participating in the transaction. If all responses are positive, the
+caller can issue a <a href="../../api_c/txn_commit.html">txn_commit</a>. If any of the responses are
+negative, the caller should issue a <a href="../../api_c/txn_abort.html">txn_abort</a>. In general, the
+<a href="../../api_c/txn_prepare.html">txn_prepare</a> call requires that the transaction log be flushed to
+disk.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/txn/config.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rpc/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.2.0/convert.html b/bdb/docs/ref/upgrade.2.0/convert.html
new file mode 100644
index 00000000000..ad5685368dc
--- /dev/null
+++ b/bdb/docs/ref/upgrade.2.0/convert.html
@@ -0,0 +1,74 @@
+<!--$Id: convert.so,v 11.6 2000/03/18 21:43:19 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 2.0: converting applications</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.2.0/system.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.2.0/disk.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 2.0: converting applications</h1>
+<p>Mapping the Berkeley DB 1.85 functionality into Berkeley DB version 2 is almost always
+simple. The manual page <a href="../../api_c/db_open.html">DB-&gt;open</a> replaces the Berkeley DB 1.85 manual
+pages <b>dbopen</b>(3), <b>btree</b>(3), <b>hash</b>(3) and
+<b>recno</b>(3). You should be able to convert each 1.85 function
+call into a Berkeley DB version 2 function call using just the <a href="../../api_c/db_open.html">DB-&gt;open</a>
+documentation.
+<p>Some guidelines and things to watch out for:
+<p><ol>
+<p><li>Most access method functions have exactly the same semantics as in Berkeley DB
+1.85, although the arguments to the functions have changed in some cases.
+To get your code to compile, the most common change is to add the
+transaction ID as an argument (NULL, since Berkeley DB 1.85 did not support
+transactions.)
+<p><li>You must always initialize DBT structures to zero before using them with
+any Berkeley DB version 2 function. (They do not normally have to be
+reinitialized each time, only when they are first allocated. Do this by
+declaring the DBT structure external or static, or by calling the C
+library routine <b>bzero</b>(3) or <b>memset</b>(3).)
+<p><li>The error returns are completely different in the two versions. In Berkeley DB
+1.85, &lt; 0 meant an error, and &gt; 0 meant a minor Berkeley DB exception.
+In Berkeley DB 2.0, &gt; 0 means an error (the Berkeley DB version 2 functions
+return <b>errno</b> on error) and &lt; 0 means a Berkeley DB exception.
+See <a href="../../ref/program/errorret.html">Error Returns to Applications</a>
+for more information.
+<p><li>The Berkeley DB 1.85 DB-&gt;seq function has been replaced by cursors in Berkeley DB
+version 2. The semantics are approximately the same, but cursors require
+the creation of an extra object (the DBC object), which is then used to
+access the database.
+<p>Specifically, the partial key match and range search functionality of the
+R_CURSOR flag in DB-&gt;seq has been replaced by the
+<a href="../../api_c/dbc_get.html#DB_SET_RANGE">DB_SET_RANGE</a> flag in <a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a>.
+<p><li>In version 2 of the Berkeley DB library, additions or deletions into Recno
+(fixed and variable-length record) databases no longer automatically
+logically renumber all records after the add/delete point, by default.
+The default behavior is that deleting records does not cause subsequent
+records to be renumbered, and it is an error to attempt to add new records
+between records already in the database. Applications wanting the
+historic Recno access method semantics should call the
+<a href="../../api_c/db_set_flags.html">DB-&gt;set_flags</a> function with the <a href="../../api_c/db_set_flags.html#DB_RENUMBER">DB_RENUMBER</a> flag.
+<p><li>Opening a database in Berkeley DB version 2 is a much heavier-weight operation
+than it was in Berkeley DB 1.85. Therefore, if your historic applications were
+written to open a database, perform a single operation, and close the
+database, you may observe performance degradation. In most cases, this
+is due to the expense of creating the environment upon each open. While
+we encourage restructuring your application to avoid repeated opens and
+closes, you can probably recover most of the lost performance by simply
+using a persistent environment across invocations.
+</ol>
+<p>While simply converting Berkeley DB 1.85 function calls to Berkeley DB version 2
+function calls will work, we recommend that you eventually reconsider your
+application's interface to the Berkeley DB database library in light of the
+additional functionality supplied by Berkeley DB version 2, as it is likely to
+result in enhanced application performance.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.2.0/system.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.2.0/disk.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.2.0/disk.html b/bdb/docs/ref/upgrade.2.0/disk.html
new file mode 100644
index 00000000000..8e7aeabc718
--- /dev/null
+++ b/bdb/docs/ref/upgrade.2.0/disk.html
@@ -0,0 +1,27 @@
+<!--$Id: disk.so,v 11.6 2000/12/05 20:36:25 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 2.0: upgrade requirements</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.2.0/convert.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 2.0: upgrade requirements</h1>
+<p>You will need to upgrade your on-disk databases, as all access method
+database formats changed in the Berkeley DB 2.0 release. For information on
+converting databases from Berkeley DB 1.85 to Berkeley DB 2.0, see the
+<a href="../../utility/db_dump.html">db_dump185</a> and <a href="../../utility/db_load.html">db_load</a> documentation. As database
+environments did not exist prior to the 2.0 release, there is no
+question of upgrading existing database environments.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.2.0/convert.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.2.0/intro.html b/bdb/docs/ref/upgrade.2.0/intro.html
new file mode 100644
index 00000000000..1bebc81cbf5
--- /dev/null
+++ b/bdb/docs/ref/upgrade.2.0/intro.html
@@ -0,0 +1,32 @@
+<!--$Id: intro.so,v 11.8 2000/12/21 18:33:44 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 2.0: introduction</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade/process.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.2.0/system.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 2.0: introduction</h1>
+<p>The following pages describe how to upgrade applications coded against
+the Berkeley DB 1.85 and 1.86 release interfaces to the Berkeley DB 2.0 release
+interfaces. They do not describe how to upgrade to the current Berkeley DB
+release interfaces.
+<p>It is not difficult to upgrade Berkeley DB 1.85 applications to use the Berkeley DB
+version 2 library. The Berkeley DB version 2 library has a Berkeley DB 1.85
+compatibility API, which you can use by either recompiling your
+application's source code or by relinking its object files against the
+version 2 library. The underlying databases must be converted, however,
+as the Berkeley DB version 2 library has a different underlying database format.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade/process.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.2.0/system.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.2.0/system.html b/bdb/docs/ref/upgrade.2.0/system.html
new file mode 100644
index 00000000000..60a11c9bdf0
--- /dev/null
+++ b/bdb/docs/ref/upgrade.2.0/system.html
@@ -0,0 +1,84 @@
+<!--$Id: system.so,v 11.5 2000/03/18 21:43:20 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 2.0: system integration</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.2.0/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.2.0/convert.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 2.0: system integration</h1>
+<p><ol>
+<p><li>It is possible to maintain both the Berkeley DB 1.85 and Berkeley DB version 2
+libraries on your system. However, the <b>db.h</b> include file that
+was distributed with Berkeley DB 1.85 is not compatible with the <b>db.h</b>
+file distributed with Berkeley DB version 2, so you will have to install them
+in different locations. In addition, both the Berkeley DB 1.85 and Berkeley DB
+version 2 libraries are named <b>libdb.a</b>.
+<p>As the Berkeley DB 1.85 library did not have an installation target in the
+Makefile, there's no way to know exactly where it was installed on the
+system. In addition, many vendors included it in the C library instead
+of as a separate library, and so it may actually be part of libc and the
+<b>db.h</b> include file may be installed in <b>/usr/include</b>.
+<p>For these reasons, the simplest way to maintain both libraries is to
+install Berkeley DB version 2 in a completely separate area of your system.
+The Berkeley DB version 2 installation process allows you to install into a
+standalone directory hierarchy on your system. See the
+<a href="../../ref/build_unix/intro.html">Building for UNIX systems</a>
+documentation for more information and instructions on how to install the
+Berkeley DB version 2 library, include files and documentation into specific
+locations.
+<p><li>Alternatively, you can replace Berkeley DB 1.85 on your system with Berkeley DB
+version 2. In this case, you'll probably want to install Berkeley DB version
+2 in the normal place on your system, wherever that may be, and delete
+the Berkeley DB 1.85 include files, manual pages and libraries.
+<p>To replace 1.85 with version 2, you must either convert your 1.85
+applications to use the version 2 API or build the Berkeley DB version 2 library
+to include Berkeley DB 1.85 interface compatibility code. Whether converting
+your applications to use the version 2 interface or using the version 1.85
+compatibility API, you will need to recompile or relink your 1.85
+applications, and you must convert any persistent application databases
+to the Berkeley DB version 2 database formats.
+<p>If you want to recompile your Berkeley DB 1.85 applications, you will have to
+change them to include the file <b>db_185.h</b> instead of
+<b>db.h</b>. (The <b>db_185.h</b> file is automatically installed
+during the Berkeley DB version 2 installation process.) You can then recompile
+the applications, linking them against the Berkeley DB version 2 library.
+<p>For more information on compiling the Berkeley DB 1.85 compatibility code into
+the Berkeley DB version 2 library, see <a href="../../ref/build_unix/intro.html">Building for UNIX platforms</a>.
+<p>For more information on converting databases from the Berkeley DB 1.85 formats
+to the Berkeley DB version 2 formats, see the <a href="../../utility/db_dump.html">db_dump185</a> and
+<a href="../../utility/db_load.html">db_load</a> documentation.
+<p><li>Finally, although we certainly do not recommend it, it is possible to
+load both Berkeley DB 1.85 and Berkeley DB version 2 into the same library.
+Similarly, it is possible to use both Berkeley DB 1.85 and Berkeley DB version 2
+within a single application, although it is not possible to use them from
+within the same file.
+<p>The name space in Berkeley DB version 2 has been changed from that of previous
+Berkeley DB versions, notably version 1.85, for portability and consistency
+reasons. The only name collisions in the two libraries are the names used
+by the historic <a href="../../api_c/dbm.html">dbm</a>, <a href="../../api_c/dbm.html">ndbm</a> and <a href="../../api_c/hsearch.html">hsearch</a> interfaces,
+and the Berkeley DB 1.85 compatibility interfaces in the Berkeley DB version 2
+library.
+<p>If you are loading both Berkeley DB 1.85 and Berkeley DB version 2 into a single
+library, remove the historic interfaces from one of the two library
+builds, and configure the Berkeley DB version 2 build to not include the Berkeley DB
+1.85 compatibility API, otherwise you could have collisions and undefined
+behavior. This can be done by editing the library Makefiles and
+reconfiguring and rebuilding the Berkeley DB version 2 library. Obviously, if
+you use the historic interfaces, you will get the version in the library
+from which you did not remove them. Similarly, you will not be able to
+access Berkeley DB version 2 files using the Berkeley DB 1.85 compatibility interface,
+since you have removed that from the library as well.
+</ol>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.2.0/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.2.0/convert.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.2.0/toc.html b/bdb/docs/ref/upgrade.2.0/toc.html
new file mode 100644
index 00000000000..68502be59cb
--- /dev/null
+++ b/bdb/docs/ref/upgrade.2.0/toc.html
@@ -0,0 +1,20 @@
+<!--$Id: toc.so,v 11.2 2000/12/05 20:36:25 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Upgrading Berkeley DB 1.XX applications to Berkeley DB 2.0</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Upgrading Berkeley DB 1.XX applications to Berkeley DB 2.0</h1>
+<ol>
+<li><a href="intro.html">Release 2.0: introduction</a>
+<li><a href="system.html">Release 2.0: system integration</a>
+<li><a href="convert.html">Release 2.0: converting applications</a>
+<li><a href="disk.html">Release 2.0: upgrade requirements</a>
+</ol>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.0/close.html b/bdb/docs/ref/upgrade.3.0/close.html
new file mode 100644
index 00000000000..620e4babb8b
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.0/close.html
@@ -0,0 +1,34 @@
+<!--$Id: close.so,v 11.9 2000/07/25 16:59:36 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: DB-&gt;sync and DB-&gt;close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.0/stat.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/lock_put.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: DB-&gt;sync and DB-&gt;close</h1>
+<p>In previous Berkeley DB releases, the <a href="../../api_c/db_close.html">DB-&gt;close</a> and <a href="../../api_c/db_sync.html">DB-&gt;sync</a> functions
+discarded any return of <a href="../../api_c/memp_fsync.html#DB_INCOMPLETE">DB_INCOMPLETE</a> from the underlying buffer
+pool interfaces, and returned success to its caller. (The
+<a href="../../api_c/memp_fsync.html#DB_INCOMPLETE">DB_INCOMPLETE</a> error will be returned if the buffer pool functions
+are unable to flush all of the database's dirty blocks from the pool.
+This often happens if another thread is reading or writing the database's
+pages in the pool.)
+<p>In the 3.X release, <a href="../../api_c/db_sync.html">DB-&gt;sync</a> and <a href="../../api_c/db_close.html">DB-&gt;close</a> will return
+<a href="../../api_c/memp_fsync.html#DB_INCOMPLETE">DB_INCOMPLETE</a> to the application. The best solution is to not
+call <a href="../../api_c/db_sync.html">DB-&gt;sync</a> and specify the <a href="../../api_c/db_close.html#DB_NOSYNC">DB_NOSYNC</a> flag to the
+<a href="../../api_c/db_close.html">DB-&gt;close</a> function when multiple threads are expected to be accessing the
+database. Alternatively, the caller can ignore any error return of
+<a href="../../api_c/memp_fsync.html#DB_INCOMPLETE">DB_INCOMPLETE</a>.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.0/stat.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/lock_put.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.0/cxx.html b/bdb/docs/ref/upgrade.3.0/cxx.html
new file mode 100644
index 00000000000..7f6c1ab7ea9
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.0/cxx.html
@@ -0,0 +1,31 @@
+<!--$Id: cxx.so,v 11.5 2000/03/18 21:43:20 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: additional C++ changes</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.0/db_cxx.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/java.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: additional C++ changes</h1>
+<p>The Db::set_error_model method is gone. The way to change the C++ API to
+return errors rather than throw exceptions is via a flag on the DbEnv or
+Db constructor. For example:
+<p><blockquote><pre>int dberr;
+DbEnv *dbenv = new DbEnv(DB_CXX_NO_EXCEPTIONS);</pre></blockquote>
+<p>creates an environment that will never throw exceptions, and method
+returns should be checked instead.
+<p>There are a number of smaller changes to the API that bring the C, C++
+and Java APIs much closer in terms of functionality and usage. Please
+refer to the pages for upgrading C applications for further details.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.0/db_cxx.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/java.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.0/db.html b/bdb/docs/ref/upgrade.3.0/db.html
new file mode 100644
index 00000000000..a086b589e1b
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.0/db.html
@@ -0,0 +1,48 @@
+<!--$Id: db.so,v 11.9 2000/12/01 17:57:34 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: the DB structure</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.0/xa.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/dbinfo.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: the DB structure</h1>
+<p>The DB structure is now opaque for applications in the Berkeley DB 3.0
+release. Accesses to any fields within that structure by the application
+should be replaced with method calls. The following example illustrates
+this using the historic type structure field. In the Berkeley DB 2.X releases,
+applications could find the type of an underlying database using code
+similar to the following:
+<p><blockquote><pre>DB *db;
+DB_TYPE type;
+<p>
+ type = db-&gt;type;</pre></blockquote>
+<p>in the Berkeley DB 3.X releases, this should be done using the
+<a href="../../api_c/db_get_type.html">DB-&gt;get_type</a> method, as follows:
+<p><blockquote><pre>DB *db;
+DB_TYPE type;
+<p>
+ type = db-&gt;get_type(db);</pre></blockquote>
+<p>The following table lists the DB fields previously used by
+applications and the methods that should now be used to get or set them.
+<p><table border=1 align=center>
+<tr><th>DB field</th><th>Berkeley DB 3.X method</th></tr>
+<tr><td>byteswapped</td><td><a href="../../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a></td></tr>
+<tr><td>db_errcall</td><td><a href="../../api_c/db_set_errcall.html">DB-&gt;set_errcall</a></td></tr>
+<tr><td>db_errfile</td><td><a href="../../api_c/db_set_errfile.html">DB-&gt;set_errfile</a></td></tr>
+<tr><td>db_errpfx</td><td><a href="../../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a></td></tr>
+<tr><td>db_paniccall</td><td><a href="../../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a></td></tr>
+<tr><td>type</td><td><a href="../../api_c/db_get_type.html">DB-&gt;get_type</a></td></tr>
+</table>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.0/xa.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/dbinfo.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.0/db_cxx.html b/bdb/docs/ref/upgrade.3.0/db_cxx.html
new file mode 100644
index 00000000000..e3a794e3865
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.0/db_cxx.html
@@ -0,0 +1,47 @@
+<!--$Id: db_cxx.so,v 11.9 2000/03/22 22:02:14 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: the Db class for C++ and Java</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.0/dbenv_cxx.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/cxx.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: the Db class for C++ and Java</h1>
+<p>The static Db::open method and the DbInfo class have been removed in the
+Berkeley DB 3.0 release. The way to open a database file is to use the new Db
+constructor with two arguments, followed by set_XXX methods to configure
+the Db object, and finally a call to the new (nonstatic) Db::open(). In
+comparing the Berkeley DB 3.0 release open method with the 2.X static open
+method, the second argument is new. It is a database name, which can
+be null. The DbEnv argument has been removed, as the environment is now
+specified in the constructor. The open method no longer returns a Db,
+since it operates on one.
+<p>Here's a C++ example opening a Berkeley DB database using the 2.X interface:
+<p><blockquote><pre>// Note: by default, errors are thrown as exceptions
+Db *table;
+Db::open("lookup.db", DB_BTREE, DB_CREATE, 0644, dbenv, 0, &table);</pre></blockquote>
+<p>In the Berkeley DB 3.0 release, this code would be written as:
+<p><blockquote><pre>// Note: by default, errors are thrown as exceptions
+Db *table = new Db(dbenv, 0);
+table-&gt;open("lookup.db", NULL, DB_BTREE, DB_CREATE, 0644);</pre></blockquote>
+<p>Here's a Java example opening a Berkeley DB database using the 2.X interface:
+<p><blockquote><pre>// Note: errors are thrown as exceptions
+Db table = Db.open("lookup.db", Db.DB_BTREE, Db.DB_CREATE, 0644, dbenv, 0);</pre></blockquote>
+<p>In the Berkeley DB 3.0 release, this code would be written as:
+<p><blockquote><pre>// Note: errors are thrown as exceptions
+Db table = new Db(dbenv, 0);
+table.open("lookup.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);</pre></blockquote>
+<p>Note that if the dbenv argument is null, the database will not exist
+within an environment.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.0/dbenv_cxx.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/cxx.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.0/dbenv.html b/bdb/docs/ref/upgrade.3.0/dbenv.html
new file mode 100644
index 00000000000..08b6ec149ef
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.0/dbenv.html
@@ -0,0 +1,68 @@
+<!--$Id: dbenv.so,v 11.9 2000/03/18 21:43:20 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: the DB_ENV structure</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.0/func.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/open.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: the DB_ENV structure</h1>
+<p>The DB_ENV structure is now opaque for applications in the Berkeley DB
+3.0 release. Accesses to any fields within that structure by the
+application should be replaced with method calls. The following example
+illustrates this using the historic errpfx structure field. In the Berkeley DB
+2.X releases, applications set error prefixes using code similar to the
+following:
+<p><blockquote><pre>DB_ENV *dbenv;
+<p>
+ dbenv-&gt;errpfx = "my prefix";</pre></blockquote>
+<p>in the Berkeley DB 3.X releases, this should be done using the
+<a href="../../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a> method, as follows:
+<p><blockquote><pre>DB_ENV *dbenv;
+<p>
+ dbenv-&gt;set_errpfx(dbenv, "my prefix");</pre></blockquote>
+<p>The following table lists the DB_ENV fields previously used by
+applications and the methods that should now be used to set them.
+<p><table border=1 align=center>
+<tr><th>DB_ENV field</th><th>Berkeley DB 3.X method</th></tr>
+<tr><td>db_errcall</td><td><a href="../../api_c/env_set_errcall.html">DBENV-&gt;set_errcall</a></td></tr>
+<tr><td>db_errfile</td><td><a href="../../api_c/env_set_errfile.html">DBENV-&gt;set_errfile</a></td></tr>
+<tr><td>db_errpfx</td><td><a href="../../api_c/env_set_errpfx.html">DBENV-&gt;set_errpfx</a></td></tr>
+<tr><td>db_lorder</td><td>This field was removed from the DB_ENV structure in the Berkeley DB
+3.0 release as no application should have ever used it. Any code using
+it should be evaluated for potential bugs.</td></tr>
+<tr><td>db_paniccall</td><td><a href="../../api_c/env_set_paniccall.html">DBENV-&gt;set_paniccall</a></td></tr>
+<tr><td>db_verbose</td><td><a href="../../api_c/env_set_verbose.html">DBENV-&gt;set_verbose</a>
+<p>Note: the db_verbose field was a simple boolean toggle, the
+<a href="../../api_c/env_set_verbose.html">DBENV-&gt;set_verbose</a> method takes arguments that specify exactly
+which verbose messages are desired.</td></tr>
+<tr><td>lg_max</td><td><a href="../../api_c/env_set_lg_max.html">DBENV-&gt;set_lg_max</a></td></tr>
+<tr><td>lk_conflicts</td><td><a href="../../api_c/env_set_lk_conflicts.html">DBENV-&gt;set_lk_conflicts</a></td></tr>
+<tr><td>lk_detect</td><td><a href="../../api_c/env_set_lk_detect.html">DBENV-&gt;set_lk_detect</a></td></tr>
+<tr><td>lk_max</td><td><a href="../../api_c/env_set_lk_max.html">DBENV-&gt;set_lk_max</a></td></tr>
+<tr><td>lk_modes</td><td><a href="../../api_c/env_set_lk_conflicts.html">DBENV-&gt;set_lk_conflicts</a></td></tr>
+<tr><td>mp_mmapsize</td><td><a href="../../api_c/env_set_mp_mmapsize.html">DBENV-&gt;set_mp_mmapsize</a></td></tr>
+<tr><td>mp_size</td><td><a href="../../api_c/env_set_cachesize.html">DBENV-&gt;set_cachesize</a>
+<p>Note: the <a href="../../api_c/env_set_cachesize.html">DBENV-&gt;set_cachesize</a> function takes additional arguments.
+Setting both the second argument (the number of GB in the pool) and the
+last argument (the number of memory pools to create) to 0 will result in
+behavior that is backward compatible with previous Berkeley DB releases.</td></tr>
+<tr><td>tx_info</td><td>This field was used by applications as an argument to the transaction
+subsystem functions. As those functions take references to a
+DB_ENV structure as arguments in the Berkeley DB 3.0 release, it should
+no longer be used by any application.</td></tr>
+<tr><td>tx_max</td><td><a href="../../api_c/env_set_tx_max.html">DBENV-&gt;set_tx_max</a></td></tr>
+<tr><td>tx_recover</td><td><a href="../../api_c/env_set_tx_recover.html">DBENV-&gt;set_tx_recover</a></td></tr>
+</table>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.0/func.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/open.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.0/dbenv_cxx.html b/bdb/docs/ref/upgrade.3.0/dbenv_cxx.html
new file mode 100644
index 00000000000..8839d640897
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.0/dbenv_cxx.html
@@ -0,0 +1,72 @@
+<!--$Id: dbenv_cxx.so,v 11.10 2000/12/01 17:59:32 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: the DbEnv class for C++ and Java</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.0/value_set.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/db_cxx.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: the DbEnv class for C++ and Java</h1>
+<p>The DbEnv::appinit() method and two constructors for the DbEnv class are
+gone. There is now a single way to create and initialize the environment.
+The way to create an environment is to use the new DbEnv constructor with
+one argument. After this call, the DbEnv can be configured with various
+set_XXX methods. Finally, a call to DbEnv::open is made to initialize
+the environment.
+<p>Here's a C++ example creating a Berkeley DB environment using the 2.X interface
+<p><blockquote><pre>int dberr;
+DbEnv *dbenv = new DbEnv();
+<p>
+dbenv-&gt;set_error_stream(&cerr);
+dbenv-&gt;set_errpfx("myprog");
+<p>
+if ((dberr = dbenv-&gt;appinit("/database/home",
+ NULL, DB_CREATE | DB_INIT_LOCK | DB_INIT_MPOOL)) != 0) {
+ cerr &lt;&lt; "failure: " &lt;&lt; strerror(dberr);
+ exit (1);
+}</pre></blockquote>
+<p>In the Berkeley DB 3.0 release, this code would be written as:
+<p><blockquote><pre>int dberr;
+DbEnv *dbenv = new DbEnv(0);
+<p>
+dbenv-&gt;set_error_stream(&cerr);
+dbenv-&gt;set_errpfx("myprog");
+<p>
+if ((dberr = dbenv-&gt;open("/database/home",
+ NULL, DB_CREATE | DB_INIT_LOCK | DB_INIT_MPOOL, 0)) != 0) {
+ cerr &lt;&lt; "failure: " &lt;&lt; dbenv-&gt;strerror(dberr);
+ exit (1);
+}</pre></blockquote>
+<p>Here's a Java example creating a Berkeley DB environment using the 2.X interface:
+<p><blockquote><pre>int dberr;
+DbEnv dbenv = new DbEnv();
+<p>
+dbenv.set_error_stream(System.err);
+dbenv.set_errpfx("myprog");
+<p>
+dbenv.appinit("/database/home",
+ null, Db.DB_CREATE | Db.DB_INIT_LOCK | Db.DB_INIT_MPOOL);</pre></blockquote>
+<p>In the Berkeley DB 3.0 release, this code would be written as:
+<p><blockquote><pre>int dberr;
+DbEnv dbenv = new DbEnv(0);
+<p>
+dbenv.set_error_stream(System.err);
+dbenv.set_errpfx("myprog");
+<p>
+dbenv.open("/database/home",
+ null, Db.DB_CREATE | Db.DB_INIT_LOCK | Db.DB_INIT_MPOOL, 0);</pre></blockquote>
+<p>In the Berkeley DB 2.X release, DbEnv had accessors to obtain "managers" of type
+DbTxnMgr, DbMpool, DbLog, DbTxnMgr. If you used any of these managers,
+all their methods are now found directly in the DbEnv class.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.0/value_set.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/db_cxx.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.0/dbinfo.html b/bdb/docs/ref/upgrade.3.0/dbinfo.html
new file mode 100644
index 00000000000..da1f8460d80
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.0/dbinfo.html
@@ -0,0 +1,72 @@
+<!--$Id: dbinfo.so,v 11.8 2000/03/18 21:43:20 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: the DBINFO structure</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.0/db.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/join.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: the DBINFO structure</h1>
+<p>The DB_INFO structure has been removed from the Berkeley DB 3.0 release.
+Accesses to any fields within that structure by the application should be
+replaced with method calls on the DB handle. The following
+example illustrates this using the historic db_cachesize structure field.
+In the Berkeley DB 2.X releases, applications could set the size of an
+underlying database cache using code similar to the following:
+<p><blockquote><pre>DB_INFO dbinfo;
+<p>
+ memset(dbinfo, 0, sizeof(dbinfo));
+ dbinfo.db_cachesize = 1024 * 1024;</pre></blockquote>
+<p>in the Berkeley DB 3.X releases, this should be done using the
+<a href="../../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a> method, as follows:
+<p><blockquote><pre>DB *db;
+int ret;
+<p>
+ ret = db-&gt;set_cachesize(db, 0, 1024 * 1024, 0);</pre></blockquote>
+<p>The DB_INFO structure is no longer used in any way by the Berkeley DB 3.0
+release, and should be removed from the application.
+<p>The following table lists the DB_INFO fields previously used by
+applications and the methods that should now be used to set
+them. Because these calls provide configuration for the
+database open, they must precede the call to <a href="../../api_c/db_open.html">DB-&gt;open</a>.
+Calling them after the call to <a href="../../api_c/db_open.html">DB-&gt;open</a> will return an
+error.
+<p><table border=1 align=center>
+<tr><th>DB_INFO field</th><th>Berkeley DB 3.X method</th></tr>
+<tr><td>bt_compare</td><td><a href="../../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a></td></tr>
+<tr><td>bt_minkey</td><td><a href="../../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a></td></tr>
+<tr><td>bt_prefix</td><td><a href="../../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a></td></tr>
+<tr><td>db_cachesize</td><td><a href="../../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>
+<p>Note: the <a href="../../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a> function takes additional arguments.
+Setting both the second argument (the number of GB in the pool) and the
+last argument (the number of memory pools to create) to 0 will result in
+behavior that is backward compatible with previous Berkeley DB releases.</td></tr>
+<tr><td>db_lorder</td><td><a href="../../api_c/db_set_lorder.html">DB-&gt;set_lorder</a></td></tr>
+<tr><td>db_malloc</td><td><a href="../../api_c/db_set_malloc.html">DB-&gt;set_malloc</a></td></tr>
+<tr><td>db_pagesize</td><td><a href="../../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a></td></tr>
+<tr><td>dup_compare</td><td><a href="../../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a></td></tr>
+<tr><td>flags</td><td><a href="../../api_c/db_set_flags.html">DB-&gt;set_flags</a>
+<p>Note: the DB_DELIMITER, DB_FIXEDLEN and DB_PAD flags no longer need to be
+set as there are specific methods off the DB handle that set the
+file delimiter, the length of fixed-length records and the fixed-length
+record pad character. They should simply be discarded from the application.</td></tr>
+<tr><td>h_ffactor</td><td><a href="../../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a></td></tr>
+<tr><td>h_hash</td><td><a href="../../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a></td></tr>
+<tr><td>h_nelem</td><td><a href="../../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a></td></tr>
+<tr><td>re_delim</td><td><a href="../../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a></td></tr>
+<tr><td>re_len</td><td><a href="../../api_c/db_set_re_len.html">DB-&gt;set_re_len</a></td></tr>
+<tr><td>re_pad</td><td><a href="../../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a></td></tr>
+<tr><td>re_source</td><td><a href="../../api_c/db_set_re_source.html">DB-&gt;set_re_source</a></td></tr>
+</table>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.0/db.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/join.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.0/disk.html b/bdb/docs/ref/upgrade.3.0/disk.html
new file mode 100644
index 00000000000..f6ea2799be9
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.0/disk.html
@@ -0,0 +1,30 @@
+<!--$Id: disk.so,v 11.15 2000/12/21 18:37:09 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: upgrade requirements</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.0/java.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: upgrade requirements</h1>
+<p>Log file formats and the Btree, Recno and Hash Access Method database
+formats changed in the Berkeley DB 3.0 release. (The on-disk Btree/Recno
+format changed from version 6 to version 7. The on-disk Hash format
+changed from version 5 to version 6.) Until the underlying databases
+are upgraded, the <a href="../../api_c/db_open.html">DB-&gt;open</a> function will return a <a href="../../api_c/db_open.html#DB_OLD_VERSION">DB_OLD_VERSION</a>
+error.
+<p>For further information on upgrading Berkeley DB installations, see
+<a href="../../ref/upgrade/process.html">Upgrading Berkeley DB
+installations</a>.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.0/java.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.0/eacces.html b/bdb/docs/ref/upgrade.3.0/eacces.html
new file mode 100644
index 00000000000..b7fb3e8598a
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.0/eacces.html
@@ -0,0 +1,28 @@
+<!--$Id: eacces.so,v 11.7 2000/12/01 17:58:21 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: EACCES</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.0/eagain.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/jump_set.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: EACCES</h1>
+<p>There was an error in previous releases of the Berkeley DB documentation that
+said that the <a href="../../api_c/lock_put.html">lock_put</a> and <a href="../../api_c/lock_vec.html">lock_vec</a> interfaces could
+return EACCES as an error to indicate that a lock could not be released
+because it was held by another locker. The application should be
+searched for any occurrences of EACCES. For each of these, any that are
+checking for an error return from <a href="../../api_c/lock_put.html">lock_put</a> or <a href="../../api_c/lock_vec.html">lock_vec</a>
+should have the test and any error handling removed.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.0/eagain.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/jump_set.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.0/eagain.html b/bdb/docs/ref/upgrade.3.0/eagain.html
new file mode 100644
index 00000000000..e998c1b4351
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.0/eagain.html
@@ -0,0 +1,34 @@
+<!--$Id: eagain.so,v 11.5 2000/03/18 21:43:20 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: EAGAIN</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.0/txn_commit.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/eacces.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: EAGAIN</h1>
+<p>Historically, the Berkeley DB interfaces have returned the POSIX error value
+EAGAIN to indicate a deadlock. This has been removed from the Berkeley DB 3.0
+release in order to make it possible for applications to distinguish
+between EAGAIN errors returned by the system and returns from Berkeley DB
+indicating deadlock.
+<p>The application should be searched for any occurrences of EAGAIN. For
+each of these, any that are checking for a deadlock return from Berkeley DB
+should be changed to check for the DB_LOCK_DEADLOCK return value.
+<p>If, for any reason, this is a difficult change for the application to
+make, the <b>include/db.src</b> distribution file should be modified to
+translate all returns of DB_LOCK_DEADLOCK to EAGAIN. Search for the
+string EAGAIN in that file, there is a comment that describes how to make
+the change.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.0/txn_commit.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/eacces.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.0/envopen.html b/bdb/docs/ref/upgrade.3.0/envopen.html
new file mode 100644
index 00000000000..3c20a0e9e21
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.0/envopen.html
@@ -0,0 +1,156 @@
+<!--$Id: envopen.so,v 11.12 2000/03/18 21:43:20 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: environment open/close/unlink</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.0/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/func.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: environment open/close/unlink</h1>
+<p>The hardest part of upgrading your application from a 2.X code base to
+the 3.0 release is translating the Berkeley DB environment open, close and
+remove calls.
+<p>There were two logical changes in this part of the Berkeley DB interface.
+First, in Berkeley DB 3.0, there are no longer separate structures that
+represent each subsystem (e.g., DB_LOCKTAB or DB_TXNMGR) and an overall
+DB_ENV environment structure. Instead there is only the
+DB_ENV structure. This means that DB_ENV references should
+be passed around by your application instead of passing around DB_LOCKTAB
+or DB_TXNMGR references. This is likely to be a simple change for most
+applications as few applications use the lock_XXX, log_XXX,
+memp_XXX or txn_XXX interfaces to create Berkeley DB environments.
+<p>The second change is that there are no longer separate open, close, and
+unlink interfaces to the
+Berkeley DB subsystems, e.g., in previous releases, it was possible to open a
+lock subsystem either using db_appinit or using the lock_open call. In
+the 3.0 release the XXX_open interfaces to the subsystems have been
+removed, and subsystems must now be opened using the 3.0 replacement for the
+db_appinit call.
+<p>To upgrade your application, first find each place your application opens,
+closes and/or removes a Berkeley DB environment. This will be code of the form:
+<p><blockquote><pre>db_appinit, db_appexit
+lock_open, lock_close, lock_unlink
+log_open, log_close, log_unlink
+memp_open, memp_close, memp_unlink
+txn_open, txn_close, txn_unlink</pre></blockquote>
+<p>Each of these groups of calls should be replaced with calls to:
+<p><blockquote><pre><a href="../../api_c/env_create.html">db_env_create</a>, <a href="../../api_c/env_open.html">DBENV-&gt;open</a>, <a href="../../api_c/env_close.html">DBENV-&gt;close</a>,
+<a href="../../api_c/env_remove.html">DBENV-&gt;remove</a></pre></blockquote>
+<p>The <a href="../../api_c/env_create.html">db_env_create</a> call and the call to the <a href="../../api_c/env_open.html">DBENV-&gt;open</a>
+method replace the db_appinit, lock_open, log_open, memp_open and txn_open
+calls. The <a href="../../api_c/env_close.html">DBENV-&gt;close</a> method replaces the db_appexit,
+lock_close, log_close, memp_close and txn_close calls. The
+<a href="../../api_c/env_remove.html">DBENV-&gt;remove</a> call replaces the lock_unlink, log_unlink,
+memp_unlink and txn_unlink calls.
+<p>Here's an example creating a Berkeley DB environment using the 2.X interface:
+<p><blockquote><pre>/*
+ * db_init --
+ * Initialize the environment.
+ */
+DB_ENV *
+db_init(home)
+ char *home;
+{
+ DB_ENV *dbenv;
+<p>
+ if ((dbenv = (DB_ENV *)calloc(sizeof(DB_ENV), 1)) == NULL)
+ return (errno);
+<p>
+ if ((errno = db_appinit(home, NULL, dbenv,
+ DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN |
+ DB_USE_ENVIRON)) == 0)
+ return (dbenv);
+<p>
+ free(dbenv);
+ return (NULL);
+}</pre></blockquote>
+<p>In the Berkeley DB 3.0 release, this code would be written as:
+<p><blockquote><pre>/*
+ * db_init --
+ * Initialize the environment.
+ */
+int
+db_init(home, dbenvp)
+ char *home;
+ DB_ENV **dbenvp;
+{
+ int ret;
+ DB_ENV *dbenv;
+<p>
+ if ((ret = db_env_create(&dbenv, 0)) != 0)
+ return (ret);
+<p>
+ if ((ret = dbenv-&gt;open(dbenv, home, NULL,
+ DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN |
+ DB_USE_ENVIRON, 0)) == 0) {
+ *dbenvp = dbenv;
+ return (0);
+ }
+<p>
+ (void)dbenv-&gt;close(dbenv, 0);
+ return (ret);
+}</pre></blockquote>
+<p>As you can see, the arguments to db_appinit and to <a href="../../api_c/env_open.html">DBENV-&gt;open</a> are
+largely the same. There is some minor re-organization: the mapping is
+that arguments #1, 2, 3, and 4 to db_appinit become arguments #2, 3, 1
+and 4 to <a href="../../api_c/env_open.html">DBENV-&gt;open</a>. There is one additional argument to
+<a href="../../api_c/env_open.html">DBENV-&gt;open</a>, argument #5. For backward compatibility with the 2.X
+Berkeley DB releases, simply set that argument to 0.
+<p>It is only slightly more complex to translate calls to XXX_open to the
+<a href="../../api_c/env_open.html">DBENV-&gt;open</a> method. Here's an example of creating a lock region
+using the 2.X interface:
+<p><blockquote><pre>lock_open(dir, DB_CREATE, 0664, dbenv, &regionp);</pre></blockquote>
+<p>In the Berkeley DB 3.0 release, this code would be written as:
+<p><blockquote><pre>if ((ret = db_env_create(&dbenv, 0)) != 0)
+ return (ret);
+<p>
+if ((ret = dbenv-&gt;open(dbenv,
+ dir, NULL, DB_CREATE | DB_INIT_LOCK, 0664)) == 0) {
+ *dbenvp = dbenv;
+ return (0);
+}</pre></blockquote>
+<p>Note that in this example, you no longer need the DB_LOCKTAB structure
+reference that was required in Berkeley DB 2.X releases.
+<p>The final issue with upgrading the db_appinit call is the DB_MPOOL_PRIVATE
+option previously provided for the db_appinit interface. If your
+application is using this flag, it should almost certainly use the new
+<a href="../../api_c/env_open.html#DB_PRIVATE">DB_PRIVATE</a> flag to the <a href="../../api_c/env_open.html">DBENV-&gt;open</a> interface. Regardless,
+you should carefully consider this change before converting to use the
+<a href="../../api_c/env_open.html#DB_PRIVATE">DB_PRIVATE</a> flag.
+<p>Translating db_appexit or XXX_close calls to <a href="../../api_c/env_close.html">DBENV-&gt;close</a> is equally
+simple. Instead of taking a reference to a per-subsystem structure such
+as DB_LOCKTAB or DB_TXNMGR, all calls take a reference to a DB_ENV
+structure. The calling sequence is otherwise unchanged. Note that as
+the application no longer allocates the memory for the DB_ENV structure,
+application code to discard it after the call to db_appexit() is no longer
+needed.
+<p>Translating XXX_unlink calls to <a href="../../api_c/env_remove.html">DBENV-&gt;remove</a> is slightly more complex.
+As with <a href="../../api_c/env_close.html">DBENV-&gt;close</a>, the call takes a reference to a DB_ENV
+structure instead of a per-subsystem structure. The calling sequence is
+slightly different, however. Here is an example of removing a lock region
+using the 2.X interface:
+<p><blockquote><pre>DB_ENV *dbenv;
+<p>
+ret = lock_unlink(dir, 1, dbenv);</pre></blockquote>
+<p>In the Berkeley DB 3.0 release, this code fragment would be written as:
+<p><blockquote><pre>DB_ENV *dbenv;
+<p>
+ret = dbenv-&gt;remove(dbenv, dir, NULL, DB_FORCE);</pre></blockquote>
+<p>The additional argument to the <a href="../../api_c/env_remove.html">DBENV-&gt;remove</a> function is a
+configuration argument similar to that previously taken by db_appinit and
+now taken by the <a href="../../api_c/env_open.html">DBENV-&gt;open</a> method. For backward compatibility
+this new argument should simply be set to NULL. The force argument to
+XXX_unlink is now a flag value that is set by bitwise inclusively <b>OR</b>'ing it the
+<a href="../../api_c/env_remove.html">DBENV-&gt;remove</a> flag argument.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.0/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/func.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.0/func.html b/bdb/docs/ref/upgrade.3.0/func.html
new file mode 100644
index 00000000000..b6f7d816b49
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.0/func.html
@@ -0,0 +1,69 @@
+<!--$Id: func.so,v 11.8 2000/03/18 21:43:20 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: function arguments</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.0/envopen.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/dbenv.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: function arguments</h1>
+<p>In Berkeley DB 3.0, there are no longer separate structures that
+represent each subsystem (e.g., DB_LOCKTAB or DB_TXNMGR), and an overall
+DB_ENV environment structure. Instead there is only the
+DB_ENV structure. This means that DB_ENV references should
+be passed around by your application instead of passing around DB_LOCKTAB
+or DB_TXNMGR references.
+<p>Each of the following functions:
+<p><blockquote><pre>lock_detect
+lock_get
+lock_id
+lock_put
+lock_stat
+lock_vec</pre></blockquote>
+<p>should have its first argument, a reference to the DB_LOCKTAB structure,
+replaced with a reference to the enclosing DB_ENV structure. For
+example, the following line of code from a Berkeley DB 2.X application:
+<p><blockquote><pre>DB_LOCKTAB *lt;
+DB_LOCK lock;
+ ret = lock_put(lt, lock);</pre></blockquote>
+<p>should now be written as follows:
+<p><blockquote><pre>DB_ENV *dbenv;
+DB_LOCK *lock;
+ ret = lock_put(dbenv, lock);</pre></blockquote>
+<p>Similarly, all of the functions:
+<p><blockquote><pre>log_archive
+log_compare
+log_file
+log_flush
+log_get
+log_put
+log_register
+log_stat
+log_unregister</pre></blockquote>
+<p>should have their DB_LOG argument replaced with a reference to a
+DB_ENV structure, and the functions:
+<p><blockquote><pre>memp_fopen
+memp_register
+memp_stat
+memp_sync
+memp_trickle</pre></blockquote>
+<p>should have their DB_MPOOL argument replaced with a reference to a
+DB_ENV structure.
+<p>You should remove all references to DB_LOCKTAB, DB_LOG, DB_MPOOL, and
+DB_TXNMGR structures from your application, they are no longer useful
+in any way. In fact, a simple way to identify all of the places that
+need to be upgraded is to remove all such structures and variables
+they declare, and then compile. You will see a warning message from
+your compiler in each case that needs to be upgraded.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.0/envopen.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/dbenv.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.0/intro.html b/bdb/docs/ref/upgrade.3.0/intro.html
new file mode 100644
index 00000000000..a74e40f4ee7
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.0/intro.html
@@ -0,0 +1,26 @@
+<!--$Id: intro.so,v 11.6 2000/03/18 21:43:20 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: introduction</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.2.0/disk.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/envopen.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: introduction</h1>
+<p>The following pages describe how to upgrade applications coded against
+the Berkeley DB 2.X release interfaces to the Berkeley DB 3.0 release interfaces.
+This information does not describe how to upgrade Berkeley DB 1.85 release
+applications.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.2.0/disk.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/envopen.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.0/java.html b/bdb/docs/ref/upgrade.3.0/java.html
new file mode 100644
index 00000000000..3997095bc96
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.0/java.html
@@ -0,0 +1,34 @@
+<!--$Id: java.so,v 11.8 2000/12/01 18:33:56 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: additional Java changes</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.0/cxx.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/disk.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: additional Java changes</h1>
+<p>There are several additional types of exceptions thrown in the Berkeley DB 3.0
+Java API.
+<p>DbMemoryException and DbDeadlockException can be caught independently of
+DbException if you want to do special handling for these kinds of errors.
+Since they are subclassed from DbException, a try block that catches
+DbException will catch these also, so code is not required to change.
+The catch clause for these new exceptions should appear before the catch
+clause for DbException.
+<p>You will need to add a catch clause for java.io.FileNotFoundException,
+since that can be thrown by the <a href="../../api_java/db_open.html">Db.open</a> and <a href="../../api_java/env_open.html">DbEnv.open</a> functions.
+<p>There are a number of smaller changes to the API that bring the C, C++
+and Java APIs much closer in terms of functionality and usage. Please
+refer to the pages for upgrading C applications for further details.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.0/cxx.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/disk.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.0/join.html b/bdb/docs/ref/upgrade.3.0/join.html
new file mode 100644
index 00000000000..82c9019fa1b
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.0/join.html
@@ -0,0 +1,28 @@
+<!--$Id: join.so,v 11.9 2000/07/25 16:59:36 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: DB-&gt;join</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.0/dbinfo.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/stat.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: DB-&gt;join</h1>
+<p>Historically, the last two arguments to the Berkeley DB <a href="../../api_c/db_join.html">DB-&gt;join</a>
+interface were a flags value followed by a reference to a memory location
+to store the returned cursor object. In the Berkeley DB 3.0 release, the
+order of those two arguments has been swapped for consistency with other
+Berkeley DB interfaces.
+<p>The application should be searched for any occurrences of <a href="../../api_c/db_join.html">DB-&gt;join</a>.
+For each of these, the order of the last two arguments should be swapped.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.0/dbinfo.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/stat.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.0/jump_set.html b/bdb/docs/ref/upgrade.3.0/jump_set.html
new file mode 100644
index 00000000000..c93e7270ee6
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.0/jump_set.html
@@ -0,0 +1,48 @@
+<!--$Id: jump_set.so,v 11.6 2000/03/18 21:43:20 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: db_jump_set</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.0/eacces.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/value_set.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: db_jump_set</h1>
+<p>The db_jump_set interface has been removed from the Berkeley DB 3.0 release,
+replaced by method calls on the DB_ENV handle.
+<p>The following table lists the db_jump_set arguments previously used by
+applications and the methods that should now be used instead.
+<p><table border=1 align=center>
+<tr><th>db_jump_set argument</th><th>Berkeley DB 3.X method</th></tr>
+<tr><td>DB_FUNC_CLOSE</td><td><a href="../../api_c/set_func_close.html">db_env_set_func_close</a></td></tr>
+<tr><td>DB_FUNC_DIRFREE</td><td><a href="../../api_c/set_func_dirfree.html">db_env_set_func_dirfree</a></td></tr>
+<tr><td>DB_FUNC_DIRLIST</td><td><a href="../../api_c/set_func_dirlist.html">db_env_set_func_dirlist</a></td></tr>
+<tr><td>DB_FUNC_EXISTS</td><td><a href="../../api_c/set_func_exists.html">db_env_set_func_exists</a></td></tr>
+<tr><td>DB_FUNC_FREE</td><td><a href="../../api_c/set_func_free.html">db_env_set_func_free</a></td></tr>
+<tr><td>DB_FUNC_FSYNC</td><td><a href="../../api_c/set_func_fsync.html">db_env_set_func_fsync</a></td></tr>
+<tr><td>DB_FUNC_IOINFO</td><td><a href="../../api_c/set_func_ioinfo.html">db_env_set_func_ioinfo</a></td></tr>
+<tr><td>DB_FUNC_MALLOC</td><td><a href="../../api_c/set_func_malloc.html">db_env_set_func_malloc</a></td></tr>
+<tr><td>DB_FUNC_MAP</td><td><a href="../../api_c/set_func_map.html">db_env_set_func_map</a></td></tr>
+<tr><td>DB_FUNC_OPEN</td><td><a href="../../api_c/set_func_open.html">db_env_set_func_open</a></td></tr>
+<tr><td>DB_FUNC_READ</td><td><a href="../../api_c/set_func_read.html">db_env_set_func_read</a></td></tr>
+<tr><td>DB_FUNC_REALLOC</td><td><a href="../../api_c/set_func_realloc.html">db_env_set_func_realloc</a></td></tr>
+<tr><td>DB_FUNC_RUNLINK</td><td>The DB_FUNC_RUNLINK functionality has been removed from the Berkeley DB
+3.0 release, and should be removed from the application.</td></tr>
+<tr><td>DB_FUNC_SEEK</td><td><a href="../../api_c/set_func_seek.html">db_env_set_func_seek</a></td></tr>
+<tr><td>DB_FUNC_SLEEP</td><td><a href="../../api_c/set_func_sleep.html">db_env_set_func_sleep</a></td></tr>
+<tr><td>DB_FUNC_UNLINK</td><td><a href="../../api_c/set_func_unlink.html">db_env_set_func_unlink</a></td></tr>
+<tr><td>DB_FUNC_UNMAP</td><td><a href="../../api_c/set_func_unmap.html">db_env_set_func_unmap</a></td></tr>
+<tr><td>DB_FUNC_WRITE</td><td><a href="../../api_c/set_func_write.html">db_env_set_func_write</a></td></tr>
+<tr><td>DB_FUNC_YIELD</td><td><a href="../../api_c/set_func_yield.html">db_env_set_func_yield</a></td></tr>
+</table>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.0/eacces.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/value_set.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.0/lock_detect.html b/bdb/docs/ref/upgrade.3.0/lock_detect.html
new file mode 100644
index 00000000000..4ff00a8a6b0
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.0/lock_detect.html
@@ -0,0 +1,24 @@
+<!--$Id: lock_detect.so,v 11.8 2000/07/25 16:59:36 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: lock_detect</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.0/lock_put.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/lock_stat.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: lock_detect</h1>
+<p>An additional argument has been added to the <a href="../../api_c/lock_detect.html">lock_detect</a> interface.
+<p>The application should be searched for any occurrences of <a href="../../api_c/lock_detect.html">lock_detect</a>.
+For each one, a NULL argument should be appended to the current arguments.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.0/lock_put.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/lock_stat.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.0/lock_notheld.html b/bdb/docs/ref/upgrade.3.0/lock_notheld.html
new file mode 100644
index 00000000000..3f11738563e
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.0/lock_notheld.html
@@ -0,0 +1,27 @@
+<!--$Id: lock_notheld.so,v 11.7 2000/12/01 17:58:21 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: DB_LOCK_NOTHELD</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.0/rmw.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/eagain.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: DB_LOCK_NOTHELD</h1>
+<p>Historically, the Berkeley DB <a href="../../api_c/lock_put.html">lock_put</a> and <a href="../../api_c/lock_vec.html">lock_vec</a> interfaces
+could return the DB_LOCK_NOTHELD error to indicate that a lock could
+not be released as it was held by another locker. This error can no
+longer be returned under any circumstances. The application should be
+searched for any occurrences of DB_LOCK_NOTHELD. For each of these,
+the test and any error processing should be removed.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.0/rmw.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/eagain.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.0/lock_put.html b/bdb/docs/ref/upgrade.3.0/lock_put.html
new file mode 100644
index 00000000000..d6057f8e291
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.0/lock_put.html
@@ -0,0 +1,25 @@
+<!--$Id: lock_put.so,v 11.8 2000/07/25 16:59:36 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: lock_put</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.0/close.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/lock_detect.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: lock_put</h1>
+<p>An argument change has been made in the <a href="../../api_c/lock_put.html">lock_put</a> interface.
+<p>The application should be searched for any occurrences of <a href="../../api_c/lock_put.html">lock_put</a>.
+For each one, instead of passing a DB_LOCK variable as the last argument
+to the function, the address of the DB_LOCK variable should be passed.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.0/close.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/lock_detect.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.0/lock_stat.html b/bdb/docs/ref/upgrade.3.0/lock_stat.html
new file mode 100644
index 00000000000..80504db3bdf
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.0/lock_stat.html
@@ -0,0 +1,24 @@
+<!--$Id: lock_stat.so,v 11.3 2000/07/25 16:59:36 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: lock_stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.0/lock_detect.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/log_register.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: lock_stat</h1>
+<p>The <b>st_magic</b>, <b>st_version</b>, <b>st_numobjs</b> and
+<b>st_refcnt</b> fields returned from the <a href="../../api_c/lock_stat.html">lock_stat</a> interface
+have been removed, and this information is no longer available.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.0/lock_detect.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/log_register.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.0/log_register.html b/bdb/docs/ref/upgrade.3.0/log_register.html
new file mode 100644
index 00000000000..3a856275ff0
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.0/log_register.html
@@ -0,0 +1,25 @@
+<!--$Id: log_register.so,v 11.8 2000/07/25 16:59:36 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: log_register</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.0/lock_stat.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/log_stat.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: log_register</h1>
+<p>An argument has been removed from the <a href="../../api_c/log_register.html">log_register</a> interface.
+The application should be searched for any occurrences of
+<a href="../../api_c/log_register.html">log_register</a>. In each of these, the DBTYPE argument (it is the
+fourth argument) should be removed.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.0/lock_stat.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/log_stat.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.0/log_stat.html b/bdb/docs/ref/upgrade.3.0/log_stat.html
new file mode 100644
index 00000000000..8c023bfe26f
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.0/log_stat.html
@@ -0,0 +1,23 @@
+<!--$Id: log_stat.so,v 11.3 2000/07/25 16:59:36 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: log_stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.0/log_register.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/memp_stat.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: log_stat</h1>
+<p>The <b>st_refcnt</b> field returned from the <a href="../../api_c/log_stat.html">log_stat</a> interface
+has been removed, and this information is no longer available.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.0/log_register.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/memp_stat.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.0/memp_stat.html b/bdb/docs/ref/upgrade.3.0/memp_stat.html
new file mode 100644
index 00000000000..ff61fa745d6
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.0/memp_stat.html
@@ -0,0 +1,26 @@
+<!--$Id: memp_stat.so,v 11.3 2000/07/25 16:59:36 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: memp_stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.0/log_stat.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/txn_begin.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: memp_stat</h1>
+<p>The <b>st_refcnt</b> field returned from the <a href="../../api_c/memp_stat.html">memp_stat</a> interface
+has been removed, and this information is no longer available.
+<p>The <b>st_cachesize</b> field returned from the <a href="../../api_c/memp_stat.html">memp_stat</a>
+interface has been replaced with two new fields, <b>st_gbytes</b> and
+<b>st_bytes</b>.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.0/log_stat.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/txn_begin.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.0/open.html b/bdb/docs/ref/upgrade.3.0/open.html
new file mode 100644
index 00000000000..3730ab4749d
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.0/open.html
@@ -0,0 +1,65 @@
+<!--$Id: open.so,v 11.10 2000/03/18 21:43:21 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: database open/close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.0/dbenv.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/xa.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: database open/close</h1>
+<p>Database opens were changed in the Berkeley DB 3.0 release in a similar way to
+environment opens.
+<p>To upgrade your application, first find each place your application opens
+a database, that is, calls the db_open function. Each of these calls
+should be replaced with calls to <a href="../../api_c/db_create.html">db_create</a> and <a href="../../api_c/db_open.html">DB-&gt;open</a>.
+<p>Here's an example creating a Berkeley DB database using the 2.X interface:
+<p><blockquote><pre>DB *dbp;
+DB_ENV *dbenv;
+int ret;
+<p>
+if ((ret = db_open(DATABASE,
+ DB_BTREE, DB_CREATE, 0664, dbenv, NULL, &dbp)) != 0)
+ return (ret);</pre></blockquote>
+<p>In the Berkeley DB 3.0 release, this code would be written as:
+<p><blockquote><pre>DB *dbp;
+DB_ENV *dbenv;
+int ret;
+<p>
+if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ return (ret);
+<p>
+if ((ret = dbp-&gt;open(dbp,
+ DATABASE, NULL, DB_BTREE, DB_CREATE, 0664)) != 0) {
+ (void)dbp-&gt;close(dbp, 0);
+ return (ret);
+}</pre></blockquote>
+<p>As you can see, the arguments to db_open and to <a href="../../api_c/db_open.html">DB-&gt;open</a> are
+largely the same. There is some re-organization, and note that the
+enclosing DB_ENV structure is specified when the DB object
+is created using the <a href="../../api_c/db_create.html">db_create</a> interface. There is one
+additional argument to <a href="../../api_c/db_open.html">DB-&gt;open</a>, argument #3. For backward
+compatibility with the 2.X Berkeley DB releases, simply set that argument to
+NULL.
+<p>There are two additional issues with the db_open call.
+<p>First, it was possible in the 2.X releases for an application to provide
+an environment that did not contain a shared memory buffer pool as the
+database environment, and Berkeley DB would create a private one automatically.
+This functionality is no longer available, applications must specify the
+<a href="../../api_c/env_open.html#DB_INIT_MPOOL">DB_INIT_MPOOL</a> flag if databases are going to be opened in the
+environment.
+<p>The final issue with upgrading the db_open call is that the DB_INFO
+structure is no longer used, having been replaced by individual methods
+on the DB handle. That change is discussed in detail later in
+this chapter.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.0/dbenv.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/xa.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.0/rmw.html b/bdb/docs/ref/upgrade.3.0/rmw.html
new file mode 100644
index 00000000000..a1a30da5ecf
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.0/rmw.html
@@ -0,0 +1,31 @@
+<!--$Id: rmw.so,v 11.9 2000/03/18 21:43:21 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: DB_RMW</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.0/txn_stat.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/lock_notheld.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: DB_RMW</h1>
+<p>The following change applies only to applications using the
+Berkeley DB Concurrent Data Store product. If your application is not using that product,
+you can ignore this change.
+<p>Historically, the Berkeley DB <a href="../../api_c/db_cursor.html">DB-&gt;cursor</a> interface took the DB_RMW flag
+to indicate that the created cursor would be used for write operations on
+the database. This flag has been renamed to the <a href="../../api_c/db_cursor.html#DB_WRITECURSOR">DB_WRITECURSOR</a>
+flag.
+<p>The application should be searched for any occurrences of DB_RMW. For
+each of these, any that are arguments to the <a href="../../api_c/db_cursor.html">DB-&gt;cursor</a> function
+should be changed to pass in the <a href="../../api_c/db_cursor.html#DB_WRITECURSOR">DB_WRITECURSOR</a> flag instead.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.0/txn_stat.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/lock_notheld.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.0/stat.html b/bdb/docs/ref/upgrade.3.0/stat.html
new file mode 100644
index 00000000000..735e235d9cd
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.0/stat.html
@@ -0,0 +1,24 @@
+<!--$Id: stat.so,v 11.3 2000/07/25 16:59:36 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: DB-&gt;stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.0/join.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/close.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: DB-&gt;stat</h1>
+<p>The <b>bt_flags</b> field returned from the <a href="../../api_c/db_stat.html">DB-&gt;stat</a> interface
+for Btree and Recno databases has been removed, and this information is
+no longer available.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.0/join.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/close.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.0/toc.html b/bdb/docs/ref/upgrade.3.0/toc.html
new file mode 100644
index 00000000000..189d7c0a657
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.0/toc.html
@@ -0,0 +1,47 @@
+<!--$Id: toc.so,v 11.2 2000/12/05 20:36:26 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Upgrading Berkeley DB 2.X.X applications to Berkeley DB 3.0</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Upgrading Berkeley DB 2.X.X applications to Berkeley DB 3.0</h1>
+<ol>
+<li><a href="intro.html">Release 3.0: introduction</a>
+<li><a href="envopen.html">Release 3.0: environment open/close/unlink</a>
+<li><a href="func.html">Release 3.0: function arguments</a>
+<li><a href="dbenv.html">Release 3.0: the DB_ENV structure</a>
+<li><a href="open.html">Release 3.0: database open/close</a>
+<li><a href="xa.html">Release 3.0: db_xa_open</a>
+<li><a href="db.html">Release 3.0: the DB structure</a>
+<li><a href="dbinfo.html">Release 3.0: the DBINFO structure</a>
+<li><a href="join.html">Release 3.0: DB-&gt;join</a>
+<li><a href="stat.html">Release 3.0: DB-&gt;stat</a>
+<li><a href="close.html">Release 3.0: DB-&gt;sync and DB-&gt;close</a>
+<li><a href="lock_put.html">Release 3.0: lock_put</a>
+<li><a href="lock_detect.html">Release 3.0: lock_detect</a>
+<li><a href="lock_stat.html">Release 3.0: lock_stat</a>
+<li><a href="log_register.html">Release 3.0: log_register</a>
+<li><a href="log_stat.html">Release 3.0: log_stat</a>
+<li><a href="memp_stat.html">Release 3.0: memp_stat</a>
+<li><a href="txn_begin.html">Release 3.0: txn_begin</a>
+<li><a href="txn_commit.html">Release 3.0: txn_commit</a>
+<li><a href="txn_stat.html">Release 3.0: txn_stat</a>
+<li><a href="rmw.html">Release 3.0: DB_RMW</a>
+<li><a href="lock_notheld.html">Release 3.0: DB_LOCK_NOTHELD</a>
+<li><a href="eagain.html">Release 3.0: EAGAIN</a>
+<li><a href="eacces.html">Release 3.0: EACCES</a>
+<li><a href="jump_set.html">Release 3.0: db_jump_set</a>
+<li><a href="value_set.html">Release 3.0: db_value_set</a>
+<li><a href="dbenv_cxx.html">Release 3.0: the DbEnv class for C++ and Java</a>
+<li><a href="db_cxx.html">Release 3.0: the Db class for C++ and Java</a>
+<li><a href="cxx.html">Release 3.0: additional C++ changes</a>
+<li><a href="java.html">Release 3.0: additional Java changes</a>
+<li><a href="disk.html">Release 3.0: upgrade requirements</a>
+</ol>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.0/txn_begin.html b/bdb/docs/ref/upgrade.3.0/txn_begin.html
new file mode 100644
index 00000000000..3fb9a6527d4
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.0/txn_begin.html
@@ -0,0 +1,25 @@
+<!--$Id: txn_begin.so,v 11.7 2000/07/25 16:59:36 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: txn_begin</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.0/memp_stat.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/txn_commit.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: txn_begin</h1>
+<p>An additional argument has been added to the <a href="../../api_c/txn_begin.html">txn_begin</a> interface.
+<p>The application should be searched for any occurrences of
+<a href="../../api_c/txn_begin.html">txn_begin</a>. For each one, an argument of 0 should be appended to
+the current arguments.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.0/memp_stat.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/txn_commit.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.0/txn_commit.html b/bdb/docs/ref/upgrade.3.0/txn_commit.html
new file mode 100644
index 00000000000..8090b1e3b84
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.0/txn_commit.html
@@ -0,0 +1,25 @@
+<!--$Id: txn_commit.so,v 11.8 2000/07/25 16:59:36 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: txn_commit</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.0/txn_begin.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/txn_stat.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: txn_commit</h1>
+<p>An additional argument has been added to the <a href="../../api_c/txn_commit.html">txn_commit</a> interface.
+<p>The application should be searched for any occurrences of
+<a href="../../api_c/txn_commit.html">txn_commit</a>. For each one, an argument of 0 should be appended to
+the current arguments.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.0/txn_begin.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/txn_stat.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.0/txn_stat.html b/bdb/docs/ref/upgrade.3.0/txn_stat.html
new file mode 100644
index 00000000000..d965494d5ef
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.0/txn_stat.html
@@ -0,0 +1,23 @@
+<!--$Id: txn_stat.so,v 11.3 2000/07/25 16:59:36 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: txn_stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.0/txn_commit.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/rmw.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: txn_stat</h1>
+<p>The <b>st_refcnt</b> field returned from the <a href="../../api_c/txn_stat.html">txn_stat</a> interface
+has been removed, and this information is no longer available.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.0/txn_commit.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/rmw.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.0/value_set.html b/bdb/docs/ref/upgrade.3.0/value_set.html
new file mode 100644
index 00000000000..66070b09fd6
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.0/value_set.html
@@ -0,0 +1,41 @@
+<!--$Id: value_set.so,v 11.6 2000/03/18 21:43:21 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: db_value_set</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.0/jump_set.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/dbenv_cxx.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: db_value_set</h1>
+<p>The db_value_set interface has been removed from the Berkeley DB 3.0 release,
+replaced by method calls on the DB_ENV handle.
+<p>The following table lists the db_value_set arguments previously used by
+applications and the methods that should now be used instead.
+<p><table border=1 align=center>
+<tr><th>db_value_set argument</th><th>Berkeley DB 3.X method</th></tr>
+<tr><td>DB_MUTEX_LOCKS</td><td><a href="../../api_c/env_set_mutexlocks.html">DBENV-&gt;set_mutexlocks</a></td></tr>
+<tr><td>DB_REGION_ANON</td><td>The DB_REGION_ANON functionality has
+been replaced by the <a href="../../api_c/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a> and <a href="../../api_c/env_open.html#DB_PRIVATE">DB_PRIVATE</a> flags
+to the <a href="../../api_c/env_open.html">DBENV-&gt;open</a> function. A direct translation is not
+available, please review the <a href="../../api_c/env_open.html">DBENV-&gt;open</a> manual page for more
+information.</td></tr>
+<tr><td>DB_REGION_INIT</td><td><a href="../../api_c/env_set_region_init.html">db_env_set_region_init</a></td></tr>
+<tr><td>DB_REGION_NAME</td><td>The DB_REGION_NAME functionality has
+been replaced by the <a href="../../api_c/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a> and <a href="../../api_c/env_open.html#DB_PRIVATE">DB_PRIVATE</a> flags
+to the <a href="../../api_c/env_open.html">DBENV-&gt;open</a> function. A direct translation is not
+available, please review the <a href="../../api_c/env_open.html">DBENV-&gt;open</a> manual page for more
+information.</td></tr>
+<tr><td>DB_TSL_SPINS</td><td><a href="../../api_c/env_set_tas_spins.html">db_env_set_tas_spins</a></td></tr>
+</table>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.0/jump_set.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/dbenv_cxx.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.0/xa.html b/bdb/docs/ref/upgrade.3.0/xa.html
new file mode 100644
index 00000000000..41f5a993d23
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.0/xa.html
@@ -0,0 +1,33 @@
+<!--$Id: xa.so,v 11.7 2000/03/18 21:43:21 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: db_xa_open</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.0/open.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/db.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: db_xa_open</h1>
+<p>The following change applies only to applications using Berkeley DB as an XA
+Resource Manager. If your application is not using Berkeley DB in this way,
+you can ignore this change.
+<p>The db_xa_open function has been replaced with the <a href="../../api_c/db_create.html#DB_XA_CREATE">DB_XA_CREATE</a>
+flag to the <a href="../../api_c/db_create.html">db_create</a> function. All calls to db_xa_open should
+be replaced with calls to <a href="../../api_c/db_create.html">db_create</a> with the <a href="../../api_c/db_create.html#DB_XA_CREATE">DB_XA_CREATE</a>
+flag set, followed by a call to the <a href="../../api_c/db_open.html">DB-&gt;open</a> function.
+<p>A similar change has been made for the C++ API, where the
+<a href="../../api_c/db_create.html#DB_XA_CREATE">DB_XA_CREATE</a> flag should be specified to the Db constructor. All
+calls to the Db::xa_open method should be replaced with the
+<a href="../../api_c/db_create.html#DB_XA_CREATE">DB_XA_CREATE</a> flag to the Db constructor, followed by a call to
+the DB::open method.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.0/open.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/db.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.1/btstat.html b/bdb/docs/ref/upgrade.3.1/btstat.html
new file mode 100644
index 00000000000..e5d7c4bb5d5
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.1/btstat.html
@@ -0,0 +1,50 @@
+<!--$Id: btstat.so,v 1.11 2000/07/25 16:59:36 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.1: DB-&gt;stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.1/dup.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/sysmem.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.1: DB-&gt;stat</h1>
+<p>For Btree database statistics, the <a href="../../api_c/db_stat.html">DB-&gt;stat</a> interface field
+<b>bt_nrecs</b> has been removed, replaced by two fields:
+<b>bt_nkeys</b> and <b>bt_ndata</b>. The <b>bt_nkeys</b> field returns
+a count of the unique keys in the database. The <b>bt_ndata</b> field
+returns a count of the key/data pairs in the database. Neither exactly
+matches the previous value of the <b>bt_nrecs</b> field, which returned
+a count of keys in the database, but, in the case of Btree databases,
+could overcount as it sometimes counted duplicate data items as unique
+keys. The application should be searched for any uses of the
+<b>bt_nrecs</b> field and the field should be changed to be either
+<b>bt_nkeys</b> or <b>bt_ndata</b>, whichever is more appropriate.
+<p>For Hash database statistics, the <a href="../../api_c/db_stat.html">DB-&gt;stat</a> interface field
+<b>hash_nrecs</b> has been removed, replaced by two fields:
+<b>hash_nkeys</b> and <b>hash_ndata</b>. The <b>hash_nkeys</b> field
+returns a count of the unique keys in the database. The
+<b>hash_ndata</b> field returns a count of the key/data pairs in the
+database. The new <b>hash_nkeys</b> field exactly matches the previous
+value of the <b>hash_nrecs</b> field. The application should be searched
+for any uses of the <b>hash_nrecs</b> field, and the field should be
+changed to be <b>hash_nkeys</b>.
+<p>For Queue database statistics, the <a href="../../api_c/db_stat.html">DB-&gt;stat</a> interface field
+<b>qs_nrecs</b> has been removed, replaced by two fields:
+<b>qs_nkeys</b> and <b>qs_ndata</b>. The <b>qs_nkeys</b> field returns
+a count of the unique keys in the database. The <b>qs_ndata</b> field
+returns a count of the key/data pairs in the database. The new
+<b>qs_nkeys</b> field exactly matches the previous value of the
+<b>qs_nrecs</b> field. The application should be searched for any uses
+of the <b>qs_nrecs</b> field, and the field should be changed to be
+<b>qs_nkeys</b>.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.1/dup.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/sysmem.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.1/config.html b/bdb/docs/ref/upgrade.3.1/config.html
new file mode 100644
index 00000000000..29a53363eaf
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.1/config.html
@@ -0,0 +1,35 @@
+<!--$Id: config.so,v 1.3 2000/07/25 16:59:36 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.1: DBENV-&gt;open, DBENV-&gt;remove</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.1/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/set_tx_recover.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.1: DBENV-&gt;open, DBENV-&gt;remove</h1>
+<p>In the Berkeley DB 3.1 release, the <b>config</b> argument to the
+<a href="../../api_c/env_open.html">DBENV-&gt;open</a>, <a href="../../api_c/env_remove.html">DBENV-&gt;remove</a> methods has been removed,
+replaced by additional methods on the DB_ENV handle. If your
+application calls <a href="../../api_c/env_open.html">DBENV-&gt;open</a> or <a href="../../api_c/env_remove.html">DBENV-&gt;remove</a> with a NULL
+<b>config</b> argument, find those functions and remove the config
+argument from the call. If your application has non-NULL <b>config</b>
+argument, the strings values in that argument are replaced with calls to
+DB_ENV methods as follows:
+<p><table border=1 align=center>
+<tr><th>Previous config string</th><th>Berkeley DB 3.1 version method</th></tr>
+<tr><td>DB_DATA_DIR</td><td><a href="../../api_c/env_set_data_dir.html">DBENV-&gt;set_data_dir</a></td></tr>
+<tr><td>DB_LOG_DIR</td><td><a href="../../api_c/env_set_lg_dir.html">DBENV-&gt;set_lg_dir</a></td></tr>
+<tr><td>DB_TMP_DIR</td><td><a href="../../api_c/env_set_tmp_dir.html">DBENV-&gt;set_tmp_dir</a></td></tr>
+</table>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.1/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/set_tx_recover.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.1/disk.html b/bdb/docs/ref/upgrade.3.1/disk.html
new file mode 100644
index 00000000000..cbaa3342b5f
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.1/disk.html
@@ -0,0 +1,34 @@
+<!--$Id: disk.so,v 1.9 2000/12/21 18:37:09 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.1: upgrade requirements</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.1/logalloc.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.1: upgrade requirements</h1>
+<p>Log file formats and the Btree, Queue, Recno and Hash Access Method
+database formats changed in the Berkeley DB 3.1 release. (The on-disk
+Btree/Recno format changed from version 7 to version 8. The on-disk
+Hash format changed from version 6 to version 7. The on-disk Queue
+format changed from version 1 to version 2.) Until the underlying
+databases are upgraded, the <a href="../../api_c/db_open.html">DB-&gt;open</a> function will return a
+<a href="../../api_c/db_open.html#DB_OLD_VERSION">DB_OLD_VERSION</a> error.
+<p>An additional flag, <a href="../../api_c/db_set_flags.html#DB_DUPSORT">DB_DUPSORT</a>, has been added to the
+<a href="../../api_c/db_upgrade.html">DB-&gt;upgrade</a> function for this upgrade. Please review the
+<a href="../../api_c/db_upgrade.html">DB-&gt;upgrade</a> documentation for further information.
+<p>For further information on upgrading Berkeley DB installations, see
+<a href="../../ref/upgrade/process.html">Upgrading Berkeley DB
+installations</a>.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.1/logalloc.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.1/dup.html b/bdb/docs/ref/upgrade.3.1/dup.html
new file mode 100644
index 00000000000..33f71ebb418
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.1/dup.html
@@ -0,0 +1,31 @@
+<!--$Id: dup.so,v 1.1 2000/05/31 18:53:28 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.1: identical duplicate data items</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.1/put.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/btstat.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.1: identical duplicate data items</h1>
+<p>In previous releases of Berkeley DB, it was not an error to store identical
+duplicate data items, or, for those that just like the way it sounds,
+duplicate duplicates. However, there were implementation bugs where
+storing duplicate duplicates could cause database corruption.
+<p>In this release, applications may store identical duplicate data items
+as long as the data items are unsorted. It is an error to attempt to
+store identical duplicate data items when duplicates are being stored
+in a sorted order. This restriction is expected to be lifted in a future
+release. See <a href="../../ref/am_conf/dup.html">Duplicate data items</a>
+for more information.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.1/put.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/btstat.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.1/env.html b/bdb/docs/ref/upgrade.3.1/env.html
new file mode 100644
index 00000000000..6e1b8ccde53
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.1/env.html
@@ -0,0 +1,53 @@
+<!--$Id: env.so,v 1.1 2000/05/31 15:10:03 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.1: environment configuration</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.1/txn_check.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/tcl.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.1: environment configuration</h1>
+<p>A set of DB_ENV configuration methods which were not environment
+specific, but which instead affected the entire application space, have
+been removed from the DB_ENV object and replaced by static
+functions. The following table lists the DB_ENV methods previously
+available to applications and the static functions that should now be used
+instead.
+<p><table border=1 align=center>
+<tr><th>DB_ENV method</th><th>Berkeley DB 3.1 function</th></tr>
+<tr><td>DBENV-&gt;set_func_close</td><td><a href="../../api_c/set_func_close.html">db_env_set_func_close</a></td></tr>
+<tr><td>DBENV-&gt;set_func_dirfree</td><td><a href="../../api_c/set_func_dirfree.html">db_env_set_func_dirfree</a></td></tr>
+<tr><td>DBENV-&gt;set_func_dirlist</td><td><a href="../../api_c/set_func_dirlist.html">db_env_set_func_dirlist</a></td></tr>
+<tr><td>DBENV-&gt;set_func_exists</td><td><a href="../../api_c/set_func_exists.html">db_env_set_func_exists</a></td></tr>
+<tr><td>DBENV-&gt;set_func_free</td><td><a href="../../api_c/set_func_free.html">db_env_set_func_free</a></td></tr>
+<tr><td>DBENV-&gt;set_func_fsync</td><td><a href="../../api_c/set_func_fsync.html">db_env_set_func_fsync</a></td></tr>
+<tr><td>DBENV-&gt;set_func_ioinfo</td><td><a href="../../api_c/set_func_ioinfo.html">db_env_set_func_ioinfo</a></td></tr>
+<tr><td>DBENV-&gt;set_func_malloc</td><td><a href="../../api_c/set_func_malloc.html">db_env_set_func_malloc</a></td></tr>
+<tr><td>DBENV-&gt;set_func_map</td><td><a href="../../api_c/set_func_map.html">db_env_set_func_map</a></td></tr>
+<tr><td>DBENV-&gt;set_func_open</td><td><a href="../../api_c/set_func_open.html">db_env_set_func_open</a></td></tr>
+<tr><td>DBENV-&gt;set_func_read</td><td><a href="../../api_c/set_func_read.html">db_env_set_func_read</a></td></tr>
+<tr><td>DBENV-&gt;set_func_realloc</td><td><a href="../../api_c/set_func_realloc.html">db_env_set_func_realloc</a></td></tr>
+<tr><td>DBENV-&gt;set_func_rename</td><td><a href="../../api_c/set_func_rename.html">db_env_set_func_rename</a></td></tr>
+<tr><td>DBENV-&gt;set_func_seek</td><td><a href="../../api_c/set_func_seek.html">db_env_set_func_seek</a></td></tr>
+<tr><td>DBENV-&gt;set_func_sleep</td><td><a href="../../api_c/set_func_sleep.html">db_env_set_func_sleep</a></td></tr>
+<tr><td>DBENV-&gt;set_func_unlink</td><td><a href="../../api_c/set_func_unlink.html">db_env_set_func_unlink</a></td></tr>
+<tr><td>DBENV-&gt;set_func_unmap</td><td><a href="../../api_c/set_func_unmap.html">db_env_set_func_unmap</a></td></tr>
+<tr><td>DBENV-&gt;set_func_write</td><td><a href="../../api_c/set_func_write.html">db_env_set_func_write</a></td></tr>
+<tr><td>DBENV-&gt;set_func_yield</td><td><a href="../../api_c/set_func_yield.html">db_env_set_func_yield</a></td></tr>
+<tr><td>DBENV-&gt;set_pageyield</td><td><a href="../../api_c/env_set_pageyield.html">db_env_set_pageyield</a></td></tr>
+<tr><td>DBENV-&gt;set_region_init</td><td><a href="../../api_c/env_set_region_init.html">db_env_set_region_init</a></td></tr>
+<tr><td>DBENV-&gt;set_mutexlocks</td><td><a href="../../api_c/env_set_mutexlocks.html">DBENV-&gt;set_mutexlocks</a></td></tr>
+<tr><td>DBENV-&gt;set_tas_spins</td><td><a href="../../api_c/env_set_tas_spins.html">db_env_set_tas_spins</a></td></tr>
+</table>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.1/txn_check.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/tcl.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.1/intro.html b/bdb/docs/ref/upgrade.3.1/intro.html
new file mode 100644
index 00000000000..9c5d9529158
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.1/intro.html
@@ -0,0 +1,26 @@
+<!--$Id: intro.so,v 1.4 2000/03/18 21:43:21 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.1: introduction</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.0/disk.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/config.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.1: introduction</h1>
+<p>The following pages describe how to upgrade applications coded against
+the Berkeley DB 3.0 release interfaces to the Berkeley DB 3.1 release interfaces.
+This information does not describe how to upgrade Berkeley DB 1.85 release
+applications.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.0/disk.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/config.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.1/log_register.html b/bdb/docs/ref/upgrade.3.1/log_register.html
new file mode 100644
index 00000000000..8823d643953
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.1/log_register.html
@@ -0,0 +1,28 @@
+<!--$Id: log_register.so,v 1.3 2000/07/25 16:59:37 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.1: log_register</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.1/sysmem.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/memp_register.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.1: log_register</h1>
+<p>The arguments to the <a href="../../api_c/log_register.html">log_register</a> and <a href="../../api_c/log_unregister.html">log_unregister</a>
+interfaces have changed. Instead of returning (and passing in) a logging
+file ID, a reference to the DB structure being registered (or
+unregistered) is passed. The application should be searched for any
+occurrences of <a href="../../api_c/log_register.html">log_register</a> and <a href="../../api_c/log_unregister.html">log_unregister</a>. For each
+one, change the arguments to be a reference to the DB structure
+being registered or unregistered.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.1/sysmem.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/memp_register.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.1/logalloc.html b/bdb/docs/ref/upgrade.3.1/logalloc.html
new file mode 100644
index 00000000000..acafbf6ee0a
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.1/logalloc.html
@@ -0,0 +1,27 @@
+<!--$Id: logalloc.so,v 1.1 2000/06/02 23:32:48 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.1: log file pre-allocation</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.1/tmp.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/disk.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.1: log file pre-allocation</h1>
+<p>This change only affects Win/32 applications.
+<p>On Win/32 platforms Berkeley DB no longer pre-allocates log files. The problem
+was a noticeable performance spike as each log file was created. To turn
+this feature back on, search for the flag DB_OSO_LOG in the source file
+<b>log/log_put.c</b> and make the change described there, or contact
+Sleepycat Software for assistance.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.1/tmp.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/disk.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.1/memp_register.html b/bdb/docs/ref/upgrade.3.1/memp_register.html
new file mode 100644
index 00000000000..e8a667031e6
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.1/memp_register.html
@@ -0,0 +1,30 @@
+<!--$Id: memp_register.so,v 1.3 2000/07/25 16:59:37 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.1: memp_register</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.1/log_register.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/txn_check.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.1: memp_register</h1>
+<p>An additional argument has been added to the <b>pgin</b> and
+<b>pgout</b> functions provided to the <a href="../../api_c/memp_register.html">memp_register</a> interface.
+The application should be searched for any occurrences of
+<a href="../../api_c/memp_register.html">memp_register</a>. For each one, if <b>pgin</b> or <b>pgout</b>
+functions are specified, the <b>pgin</b> and <b>pgout</b> functions
+should be modified to take an initial argument of a <b>DB_ENV *</b>.
+This argument is intended to support better error reporting for
+applications, and may be entirely ignored by the <b>pgin</b> and
+<b>pgout</b> functions themselves.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.1/log_register.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/txn_check.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.1/put.html b/bdb/docs/ref/upgrade.3.1/put.html
new file mode 100644
index 00000000000..5252b3ac00a
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.1/put.html
@@ -0,0 +1,63 @@
+<!--$Id: put.so,v 1.8 2000/07/25 16:59:37 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.1: DB-&gt;put</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.1/set_paniccall.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/dup.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.1: DB-&gt;put</h1>
+<p>For the Queue and Recno access methods, when the <a href="../../api_c/db_put.html#DB_APPEND">DB_APPEND</a> flag
+is specified to the <a href="../../api_c/db_put.html">DB-&gt;put</a> interface, the allocated record number
+is returned to the application in the <b>key</b> <a href="../../api_c/dbt.html">DBT</a> argument.
+In previous releases of Berkeley DB, this <a href="../../api_c/dbt.html">DBT</a> structure did not follow
+the usual <a href="../../api_c/dbt.html">DBT</a> conventions, e.g., it was not possible to cause
+Berkeley DB to allocate space for the returned record number. Rather, it was
+always assumed that the <b>data</b> field of the <b>key</b> structure
+referenced memory that could be used as storage for a db_recno_t type.
+<p>As of the Berkeley DB 3.1.0 release, the <b>key</b> structure behaves as
+described in the <a href="../../api_c/dbt.html">DBT</a> C++/Java class or C structure documentation.
+<p>Applications which are using the <a href="../../api_c/db_put.html#DB_APPEND">DB_APPEND</a> flag for Queue and
+Recno access method databases will require a change to upgrade to the
+Berkeley DB 3.1 releases. The simplest change is likely to be to add the
+<a href="../../api_c/dbt.html#DB_DBT_USERMEM">DB_DBT_USERMEM</a> flag to the <b>key</b> structure. For example,
+code that appears as follows:
+<p><blockquote><pre>DBT key;
+db_recno_t recno;
+<p>
+memset(&key, 0, sizeof(DBT));
+key.data = &recno;
+key.size = sizeof(recno);
+DB-&gt;put(DB, NULL, &key, &data, DB_APPEND);
+printf("new record number is %lu\n", (u_long)recno);</pre></blockquote>
+<p>would be changed to:
+<p><blockquote><pre>DBT key;
+db_recno_t recno;
+<p>
+memset(&key, 0, sizeof(DBT));
+key.data = &recno;
+key.ulen = sizeof(recno);
+key.flags = DB_DBT_USERMEM;
+DB-&gt;put(DB, NULL, &key, &data, DB_APPEND);
+printf("new record number is %lu\n", (u_long)recno);</pre></blockquote>
+<p>Note that the <b>ulen</b> field is now set as well as the flag value.
+An alternative change would be:
+<p><blockquote><pre>DBT key;
+db_recno_t recno;
+<p>
+memset(&key, 0, sizeof(DBT));
+DB-&gt;put(DB, NULL, &key, &data, DB_APPEND);
+recno = *(db_recno_t *)key-&gt;data;
+printf("new record number is %lu\n", (u_long)recno);</pre></blockquote>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.1/set_paniccall.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/dup.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.1/set_feedback.html b/bdb/docs/ref/upgrade.3.1/set_feedback.html
new file mode 100644
index 00000000000..c7b7864b9d2
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.1/set_feedback.html
@@ -0,0 +1,27 @@
+<!--$Id: set_feedback.so,v 1.3 2000/07/25 16:59:37 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.1: DBENV-&gt;set_feedback, DB-&gt;set_feedback</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.1/set_tx_recover.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/set_paniccall.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.1: DBENV-&gt;set_feedback, DB-&gt;set_feedback</h1>
+<p>Starting with the 3.1 release of Berkeley DB, the <a href="../../api_c/env_set_feedback.html">DBENV-&gt;set_feedback</a>
+and <a href="../../api_c/db_set_feedback.html">DB-&gt;set_feedback</a> functions may return an error value, that is, they
+are no longer declared as returning no value, instead they return an int
+or throw an exception as appropriate when an error occurs.
+<p>If your application calls these functions, you may want to check for a
+possible error on return.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.1/set_tx_recover.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/set_paniccall.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.1/set_paniccall.html b/bdb/docs/ref/upgrade.3.1/set_paniccall.html
new file mode 100644
index 00000000000..8aa554cf067
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.1/set_paniccall.html
@@ -0,0 +1,27 @@
+<!--$Id: set_paniccall.so,v 1.4 2000/07/25 16:59:37 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.1: DBENV-&gt;set_paniccall, DB-&gt;set_paniccall</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.1/set_feedback.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/put.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.1: DBENV-&gt;set_paniccall, DB-&gt;set_paniccall</h1>
+<p>Starting with the 3.1 release of Berkeley DB, the <a href="../../api_c/env_set_paniccall.html">DBENV-&gt;set_paniccall</a>
+and <a href="../../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a> functions may return an error value, that is, they
+are no longer declared as returning no value, instead they return an int
+or throw an exception as appropriate when an error occurs.
+<p>If your application calls these functions, you may want to check for a
+possible error on return.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.1/set_feedback.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/put.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.1/set_tx_recover.html b/bdb/docs/ref/upgrade.3.1/set_tx_recover.html
new file mode 100644
index 00000000000..9943845e864
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.1/set_tx_recover.html
@@ -0,0 +1,36 @@
+<!--$Id: set_tx_recover.so,v 1.9 2000/07/25 16:59:37 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.1: DBENV-&gt;set_tx_recover</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.1/config.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/set_feedback.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.1: DBENV-&gt;set_tx_recover</h1>
+<p>The redo parameter of the function passed to <a href="../../api_c/env_set_tx_recover.html">DBENV-&gt;set_tx_recover</a>
+used to be an integer set to any one of a number of #defined values. In
+the 3.1 release of Berkeley DB, the redo parameter has been replaced by the op
+parameter which is an enumerated type of type db_recops.
+<p>If your application calls <a href="../../api_c/env_set_tx_recover.html">DBENV-&gt;set_tx_recover</a>, then find the
+function referenced in the call. Replace the flag values in that function
+as follows:
+<p><table border=1 align=center>
+<tr><th>Previous flag</th><th>Berkeley DB 3.1 version flag</th></tr>
+<tr><td>TXN_BACKWARD_ROLL</td><td>DB_TXN_BACKWARD_ROLL</td></tr>
+<tr><td>TXN_FORWARD_ROLL</td><td>DB_TXN_FORWARD_ROLL</td></tr>
+<tr><td>TXN_OPENFILES</td><td>DB_TXN_OPENFILES</td></tr>
+<tr><td>TXN_REDO</td><td>DB_TXN_FORWARD_ROLL</td></tr>
+<tr><td>TXN_UNDO</td><td>DB_TXN_ABORT</td></tr>
+</table>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.1/config.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/set_feedback.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.1/sysmem.html b/bdb/docs/ref/upgrade.3.1/sysmem.html
new file mode 100644
index 00000000000..7e21a565e97
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.1/sysmem.html
@@ -0,0 +1,25 @@
+<!--$Id: sysmem.so,v 1.3 2000/07/25 16:59:37 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.1: DB_SYSTEM_MEM</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.1/btstat.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/log_register.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.1: DB_SYSTEM_MEM</h1>
+<p>Using the <a href="../../api_c/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a> option on UNIX systems now requires the
+specification of a base system memory segment ID, using the
+<a href="../../api_c/env_set_shm_key.html">DBENV-&gt;set_shm_key</a> function. Any valid segment ID may be specified, for
+example, one returned by the UNIX <b>ftok</b>(3) interface.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.1/btstat.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/log_register.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.1/tcl.html b/bdb/docs/ref/upgrade.3.1/tcl.html
new file mode 100644
index 00000000000..0f964abb31e
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.1/tcl.html
@@ -0,0 +1,30 @@
+<!--$Id: tcl.so,v 1.5 2000/06/02 14:50:20 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.1: Tcl API</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.1/env.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/tmp.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.1: Tcl API</h1>
+<p>The Berkeley DB Tcl API has been modified so that the <b>-mpool</b> option to
+the <b>berkdb env</b> command is now the default behavior. The Tcl API
+has also been modified so that the <b>-txn</b> option to the
+<b>berkdb env</b> command implies the <b>-lock</b> and <b>-log</b>
+options. Tcl scripts should be updated to remove the <b>-mpool</b>,
+<b>-lock</b> and <b>-log</b> options.
+<p>The Berkeley DB Tcl API has been modified to follow the Tcl standard rules for
+integer conversion, e.g., if the first two characters of a record number
+are "0x", the record number is expected to be in hexadecimal form.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.1/env.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/tmp.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.1/tmp.html b/bdb/docs/ref/upgrade.3.1/tmp.html
new file mode 100644
index 00000000000..72034803b1b
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.1/tmp.html
@@ -0,0 +1,34 @@
+<!--$Id: tmp.so,v 1.7 2000/05/22 20:26:35 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.1: DB_TMP_DIR</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.1/tcl.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/logalloc.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.1: DB_TMP_DIR</h1>
+<p>This change only affects Win/32 applications that create in-memory
+databases.
+<p>On Win/32 platforms an additional test has been added when searching for
+the appropriate directory in which to create the temporary files that are
+used to back in-memory databases. Berkeley DB now uses any return value from
+the GetTempPath interface as the temporary file directory name before
+resorting to the static list of compiled-in pathnames.
+<p>If the system registry does not return the same directory as Berkeley DB has
+been using previously, this change could cause temporary backing files to
+move to a new directory when applications are upgraded to the 3.1 release.
+In extreme cases, this could create (or fix) security problems if the file
+protection modes for the system registry directory are different from
+those on the directory previously used by Berkeley DB.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.1/tcl.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/logalloc.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.1/toc.html b/bdb/docs/ref/upgrade.3.1/toc.html
new file mode 100644
index 00000000000..091318810da
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.1/toc.html
@@ -0,0 +1,33 @@
+<!--$Id: toc.so,v 1.2 2000/12/05 20:36:27 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Upgrading Berkeley DB 3.0.X applications to Berkeley DB 3.1</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Upgrading Berkeley DB 3.0.X applications to Berkeley DB 3.1</h1>
+<ol>
+<li><a href="intro.html">Release 3.1: introduction</a>
+<li><a href="config.html">Release 3.1: DBENV-&gt;open, DBENV-&gt;remove</a>
+<li><a href="set_tx_recover.html">Release 3.1: DBENV-&gt;set_tx_recover</a>
+<li><a href="set_feedback.html">Release 3.1: DBENV-&gt;set_feedback, DB-&gt;set_feedback</a>
+<li><a href="set_paniccall.html">Release 3.1: DBENV-&gt;set_paniccall, DB-&gt;set_paniccall</a>
+<li><a href="put.html">Release 3.1: DB-&gt;put</a>
+<li><a href="dup.html">Release 3.1: identical duplicate data items</a>
+<li><a href="btstat.html">Release 3.1: DB-&gt;stat</a>
+<li><a href="sysmem.html">Release 3.1: DB_SYSTEM_MEM</a>
+<li><a href="log_register.html">Release 3.1: log_register</a>
+<li><a href="memp_register.html">Release 3.1: memp_register</a>
+<li><a href="txn_check.html">Release 3.1: txn_checkpoint</a>
+<li><a href="env.html">Release 3.1: environment configuration</a>
+<li><a href="tcl.html">Release 3.1: Tcl API</a>
+<li><a href="tmp.html">Release 3.1: DB_TMP_DIR</a>
+<li><a href="logalloc.html">Release 3.1: log file pre-allocation</a>
+<li><a href="disk.html">Release 3.1: upgrade requirements</a>
+</ol>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.1/txn_check.html b/bdb/docs/ref/upgrade.3.1/txn_check.html
new file mode 100644
index 00000000000..27dc3851f7e
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.1/txn_check.html
@@ -0,0 +1,26 @@
+<!--$Id: txn_check.so,v 1.6 2000/07/25 16:59:37 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.1: txn_checkpoint</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.1/memp_register.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/env.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.1: txn_checkpoint</h1>
+<p>An additional argument has been added to the <a href="../../api_c/txn_checkpoint.html">txn_checkpoint</a>
+interface.
+<p>The application should be searched for any occurrences of
+<a href="../../api_c/txn_checkpoint.html">txn_checkpoint</a>. For each one, an argument of 0 should be appended
+to the current arguments.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.1/memp_register.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/env.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.2/callback.html b/bdb/docs/ref/upgrade.3.2/callback.html
new file mode 100644
index 00000000000..f60a81d5c56
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.2/callback.html
@@ -0,0 +1,39 @@
+<!--$Id: callback.so,v 1.5 2000/10/26 15:20:40 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.2: DB callback functions, app_private field</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.2/set_flags.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/renumber.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.2: DB callback functions, app_private field</h1>
+<p>In the Berkeley DB 3.2 release, four application callback functions (the
+callback functions set by <a href="../../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>, <a href="../../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a> and
+<a href="../../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>) were modified to take a reference to a
+DB object as their first argument. This change allows the Berkeley DB
+Java API to reasonably support these interfaces. There is currently no
+need for the callback functions to do anything with this additional
+argument.
+<p>C and C++ applications that specify their own Btree key comparison,
+Btree prefix comparison, duplicate data item comparison or Hash
+functions should modify these functions to take a reference to a
+DB structure as their first argument. No further change is
+required.
+<p>The app_private field of the <a href="../../api_c/dbt.html">DBT</a> structure (accessible only from
+the Berkeley DB C API) has been removed in the 3.2 release. It was replaced
+with app_private fields in the DB_ENV and DB handles.
+Applications using this field will have to convert to using one of the
+replacement fields.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.2/set_flags.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/renumber.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.2/db_dump.html b/bdb/docs/ref/upgrade.3.2/db_dump.html
new file mode 100644
index 00000000000..87d909086b3
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.2/db_dump.html
@@ -0,0 +1,29 @@
+<!--$Id: db_dump.so,v 1.3 2000/11/28 21:27:49 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.2: db_dump</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.2/notfound.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/disk.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.2: db_dump</h1>
+<p>In previous releases of Berkeley DB, the <a href="../../utility/db_dump.html">db_dump</a> utility dumped Recno
+access method database keys as numeric strings. For consistency, the
+<a href="../../utility/db_dump.html">db_dump</a> utility has been changed in the 3.2 release to dump
+record numbers as hex pairs when the data items are being dumped as hex
+pairs. (See the <b>-k</b> and <b>-p</b> options to the
+<a href="../../utility/db_dump.html">db_dump</a> utility for more information.) Any applications or
+scripts post-processing the <a href="../../utility/db_dump.html">db_dump</a> output of Recno databases
+under these conditions may require modification.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.2/notfound.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/disk.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.2/disk.html b/bdb/docs/ref/upgrade.3.2/disk.html
new file mode 100644
index 00000000000..8cebb9319ec
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.2/disk.html
@@ -0,0 +1,28 @@
+<!--$Id: disk.so,v 1.4 2000/12/21 18:37:09 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.2: upgrade requirements</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.2/db_dump.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/test/run.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.2: upgrade requirements</h1>
+<p>Log file formats and the Queue Access Method database formats changed
+in the Berkeley DB 3.2 release. (The on-disk Queue format changed from
+version 2 to version 3.) Until the underlying databases are upgraded,
+the <a href="../../api_c/db_open.html">DB-&gt;open</a> function will return a <a href="../../api_c/db_open.html#DB_OLD_VERSION">DB_OLD_VERSION</a> error.
+<p>For further information on upgrading Berkeley DB installations, see
+<a href="../../ref/upgrade/process.html">Upgrading Berkeley DB
+installations</a>.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.2/db_dump.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/test/run.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.2/handle.html b/bdb/docs/ref/upgrade.3.2/handle.html
new file mode 100644
index 00000000000..86f86a03a93
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.2/handle.html
@@ -0,0 +1,27 @@
+<!--$Id: handle.so,v 1.2 2000/11/17 19:56:16 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.2: Java and C++ object re-use</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.2/mutexlock.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/notfound.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.2: Java and C++ object re-use</h1>
+<p>In previous releases of Berkeley DB, Java <a href="../../api_java/dbenv_class.html">DbEnv</a> and <a href="../../api_java/db_class.html">Db</a>
+objects, and C++ <a href="../../api_cxx/dbenv_class.html">DbEnv</a> and <a href="../../api_cxx/db_class.html">Db</a> objects could be
+re-used after they were closed, by calling open on them again. This is
+no longer permitted, and these objects no longer allow any operations
+after a close. Applications re-using these objects should be modified
+to create new objects instead.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.2/mutexlock.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/notfound.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.2/incomplete.html b/bdb/docs/ref/upgrade.3.2/incomplete.html
new file mode 100644
index 00000000000..5aeb7755952
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.2/incomplete.html
@@ -0,0 +1,39 @@
+<!--$Id: incomplete.so,v 1.4 2000/12/07 15:59:23 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.2: DB_INCOMPLETE</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.2/renumber.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/tx_recover.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.2: DB_INCOMPLETE</h1>
+<p>There are a number of functions that flush pages from the Berkeley DB shared
+memory buffer pool to disk. Most of those functions can potentially
+fail because a page that needs to be flushed is not currently available.
+However, this is not a hard failure and is rarely cause for concern.
+In the Berkeley DB 3.2 release, the C++ API (if that API is configured to
+throw exceptions) and the Java API have been changed so that this
+failure does not throw an exception, but rather returns a non-zero error
+code of <a href="../../api_c/memp_fsync.html#DB_INCOMPLETE">DB_INCOMPLETE</a>.
+<p>The following C++ methods will return <a href="../../api_c/memp_fsync.html#DB_INCOMPLETE">DB_INCOMPLETE</a> rather than throw
+an exception: <a href="../../api_cxx/db_close.html">Db::close</a>, <a href="../../api_cxx/db_sync.html">Db::sync</a>, <a href="../../api_cxx/memp_sync.html">DbEnv::memp_sync</a>,
+<a href="../../api_cxx/txn_checkpoint.html">DbEnv::txn_checkpoint</a>, <a href="../../api_cxx/memp_fsync.html">DbMpoolFile::sync</a>.
+<p>The following Java methods are now declared "public int" rather than
+"public void", and will return <a href="../../api_c/memp_fsync.html#DB_INCOMPLETE">Db.DB_INCOMPLETE</a> rather than
+throw an exception: <a href="../../api_java/db_close.html">Db.close</a>, <a href="../../api_java/db_sync.html">Db.sync</a>,
+<a href="../../api_java/txn_checkpoint.html">DbEnv.txn_checkpoint</a>.
+<p>It is likely that the only change required by any application will be
+those currently checking for a <a href="../../api_c/memp_fsync.html#DB_INCOMPLETE">DB_INCOMPLETE</a> return that has
+been encapsulated in an exception.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.2/renumber.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/tx_recover.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.2/intro.html b/bdb/docs/ref/upgrade.3.2/intro.html
new file mode 100644
index 00000000000..df4d573a087
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.2/intro.html
@@ -0,0 +1,26 @@
+<!--$Id: intro.so,v 1.3 2000/10/03 17:17:36 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.2: introduction</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.1/disk.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/set_flags.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.2: introduction</h1>
+<p>The following pages describe how to upgrade applications coded against
+the Berkeley DB 3.1 release interfaces to the Berkeley DB 3.2 release interfaces.
+This information does not describe how to upgrade Berkeley DB 1.85 release
+applications.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.1/disk.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/set_flags.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.2/mutexlock.html b/bdb/docs/ref/upgrade.3.2/mutexlock.html
new file mode 100644
index 00000000000..fb1b87ca9ed
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.2/mutexlock.html
@@ -0,0 +1,28 @@
+<!--$Id: mutexlock.so,v 1.1 2000/11/17 19:56:16 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.2: DBENV-&gt;set_mutexlocks</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.2/tx_recover.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/handle.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.2: DBENV-&gt;set_mutexlocks</h1>
+<p>Previous Berkeley DB releases included the db_env_set_mutexlocks interface,
+intended for debugging, that allows applications to always obtain
+requested mutual exclusion mutexes without regard for their
+availability. This interface has been replaced with
+<a href="../../api_c/env_set_mutexlocks.html">DBENV-&gt;set_mutexlocks</a>, which provides the same functionality on
+a per-database environment basis. Applications using the old interface
+should be updated to use the new one.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.2/tx_recover.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/handle.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.2/notfound.html b/bdb/docs/ref/upgrade.3.2/notfound.html
new file mode 100644
index 00000000000..cb40beaae22
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.2/notfound.html
@@ -0,0 +1,25 @@
+<!--$Id: notfound.so,v 1.1 2000/10/25 14:27:30 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.2: Java java.io.FileNotFoundException</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.2/handle.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/db_dump.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.2: Java java.io.FileNotFoundException</h1>
+<p>The Java <a href="../../api_java/env_remove.html">DbEnv.remove</a>, <a href="../../api_java/db_remove.html">Db.remove</a> and
+<a href="../../api_java/db_rename.html">Db.rename</a> methods now throw java.io.FileNotFoundException
+in the case where the named file does not exist. Applications should
+be modified to catch this exception where appropriate.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.2/handle.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/db_dump.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.2/renumber.html b/bdb/docs/ref/upgrade.3.2/renumber.html
new file mode 100644
index 00000000000..619fa07ff0e
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.2/renumber.html
@@ -0,0 +1,39 @@
+<!--$Id: renumber.so,v 1.3 2000/12/01 18:33:57 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.2: Logically renumbering records</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.2/callback.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/incomplete.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.2: Logically renumbering records</h1>
+<p>In the Berkeley DB 3.2 release, cursor adjustment semantics changed for Recno
+databases with mutable record numbers. Before the 3.2 release, cursors
+were adjusted to point to the previous or next record at the time the
+record referenced by the cursor was deleted. This could lead to
+unexpected behaviors. For example, two cursors referencing sequential
+records that were both deleted would lose their relationship to each
+other and would reference the same position in the database instead of
+their original sequential relationship. There were also command
+sequences that would have unexpected results. For example, DB_AFTER
+and DB_BEFORE cursor put operations, using a cursor previously used to
+delete an item, would perform the put relative to the cursor's adjusted
+position and not its original position.
+<p>In the Berkeley DB 3.2 release, cursors maintain their position in the tree
+regardless of deletion operations using the cursor. Applications that
+perform database operations, using cursors previously used to delete
+entries in Recno databases with mutable record numbers, should be
+evaluated to ensure that the new semantics do not cause application
+failure.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.2/callback.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/incomplete.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.2/set_flags.html b/bdb/docs/ref/upgrade.3.2/set_flags.html
new file mode 100644
index 00000000000..b1bbe906b2d
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.2/set_flags.html
@@ -0,0 +1,35 @@
+<!--$Id: set_flags.so,v 1.1 2000/10/03 17:17:36 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.2: DBENV-&gt;set_flags</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.2/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/callback.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.2: DBENV-&gt;set_flags</h1>
+<p>A new method has been added to the Berkeley DB environment handle,
+<a href="../../api_c/env_set_flags.html">DBENV-&gt;set_flags</a>. This interface currently takes three flags:
+<a href="../../api_c/env_set_flags.html#DB_CDB_ALLDB">DB_CDB_ALLDB</a>, <a href="../../api_c/env_open.html#DB_NOMMAP">DB_NOMMAP</a> and <a href="../../api_c/env_open.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a>. The
+first of these flags, <a href="../../api_c/env_set_flags.html#DB_CDB_ALLDB">DB_CDB_ALLDB</a>, provides new functionality,
+allowing Berkeley DB Concurrent Data Store applications to do locking across multiple databases.
+<p>The other two flags, <a href="../../api_c/env_open.html#DB_NOMMAP">DB_NOMMAP</a> and <a href="../../api_c/env_open.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a>, were
+specified to the <a href="../../api_c/env_open.html">DBENV-&gt;open</a> method in previous releases. In
+the 3.2 release, they have been moved to the <a href="../../api_c/env_set_flags.html">DBENV-&gt;set_flags</a> function
+because this allows the database environment's value to be toggled
+during the life of the application as well as because it is a more
+appropriate place for them. Applications specifying either the
+<a href="../../api_c/env_open.html#DB_NOMMAP">DB_NOMMAP</a> or <a href="../../api_c/env_open.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a> flags to the
+<a href="../../api_c/env_open.html">DBENV-&gt;open</a> function should replace those flags with calls to the
+<a href="../../api_c/env_set_flags.html">DBENV-&gt;set_flags</a> function.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.2/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/callback.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.2/toc.html b/bdb/docs/ref/upgrade.3.2/toc.html
new file mode 100644
index 00000000000..8a466d1b4d3
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.2/toc.html
@@ -0,0 +1,27 @@
+<!--$Id: toc.so,v 1.7 2000/12/07 15:59:23 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Upgrading Berkeley DB 3.1.X applications to Berkeley DB 3.2</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Upgrading Berkeley DB 3.1.X applications to Berkeley DB 3.2</h1>
+<ol>
+<li><a href="intro.html">Release 3.2: introduction</a>
+<li><a href="set_flags.html">Release 3.2: DBENV-&gt;set_flags</a>
+<li><a href="callback.html">Release 3.2: DB callback functions, app_private field</a>
+<li><a href="renumber.html">Release 3.2: logically renumbering records</a>
+<li><a href="incomplete.html">Release 3.2: DB_INCOMPLETE</a>
+<li><a href="tx_recover.html">Release 3.2: DBENV-&gt;set_tx_recover</a>
+<li><a href="mutexlock.html">Release 3.2: DBENV-&gt;set_mutexlocks</a>
+<li><a href="handle.html">Release 3.2: Java and C++ object re-use</a>
+<li><a href="notfound.html">Release 3.2: Java java.io.FileNotFoundException</a>
+<li><a href="db_dump.html">Release 3.2: db_dump</a>
+<li><a href="disk.html">Release 3.2: upgrade requirements</a>
+</ol>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade.3.2/tx_recover.html b/bdb/docs/ref/upgrade.3.2/tx_recover.html
new file mode 100644
index 00000000000..c5cf18ebcfb
--- /dev/null
+++ b/bdb/docs/ref/upgrade.3.2/tx_recover.html
@@ -0,0 +1,32 @@
+<!--$Id: tx_recover.so,v 1.11 2000/12/07 15:59:23 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.2: DBENV-&gt;set_tx_recover</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/upgrade.3.2/incomplete.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/mutexlock.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.2: DBENV-&gt;set_tx_recover</h1>
+<p>The <b>info</b> parameter of the function passed to
+<a href="../../api_c/env_set_tx_recover.html">DBENV-&gt;set_tx_recover</a> is no longer needed. If your application
+calls <a href="../../api_c/env_set_tx_recover.html">DBENV-&gt;set_tx_recover</a>, find the callback function referenced
+in that call and remove the <b>info</b> parameter.
+<p>In addition, the called function no longer needs to handle Berkeley DB log
+records, Berkeley DB will handle them internally as well as call the
+application-specified function. Any handling of Berkeley DB log records in the
+application's callback function may be removed.
+<p>In addition, the callback function will no longer be called with the
+<a href="../../api_c/env_set_tx_recover.html#DB_TXN_FORWARD_ROLL">DB_TXN_FORWARD_ROLL</a> flag specified unless the transaction
+enclosing the operation successfully committed.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/upgrade.3.2/incomplete.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/mutexlock.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/upgrade/process.html b/bdb/docs/ref/upgrade/process.html
new file mode 100644
index 00000000000..40be3c8e898
--- /dev/null
+++ b/bdb/docs/ref/upgrade/process.html
@@ -0,0 +1,108 @@
+<!--$Id: process.so,v 1.1 2000/12/05 20:39:10 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Upgrading Berkeley DB installations</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td width="1%"><a href="../../ref/build_vxworks/faq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.2.0/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Upgrading Berkeley DB installations</h1>
+<p>The following information describes the general process of upgrading
+Berkeley DB installations. There are three issues to be considered when
+upgrading Berkeley DB applications and database environments. They are the
+application API, the underlying database formats, and, in the case of
+transactional database environments, the log files.
+<p>An application must always be re-compiled to use a new Berkeley DB release.
+Internal Berkeley DB interfaces may change at any time and in any release,
+without warning. This means the application and library must be entirely
+recompiled and reinstalled when upgrading to new releases of the
+library, as there is no guarantee that modules from one version of the
+library will interact correctly with modules from another release.
+<p>A Berkeley DB patch release will never modify the Berkeley DB API, log file or
+database formats in non-backward compatible ways. Berkeley DB minor and major
+releases may optionally include changes to the Berkeley DB application API,
+log files and database formats that are not backward compatible. Note,
+that there are several underlying Berkeley DB database formats. As all of
+them do not necessarily change at the same time, changes to one database
+format in a release may not affect any particular application.
+<p>Each Berkeley DB minor or major release has an upgrading section in this
+chapter of the Berkeley DB Reference Guide. The section describes any API
+changes that were made in the release. Application maintainers must
+review the API changes, update their applications as necessary, and then
+re-compile using the new release. In addition, each section includes
+a page specifying if the log file format or database formats changed in
+non-backward compatible ways as part of the release.
+<p>If the application does not have a Berkeley DB transactional environment, the
+re-compiled application may be installed in the field using the
+following steps:
+<p><ol>
+<p><li>Shut down the old version of the application.
+<p><li>Remove any Berkeley DB environment, using the <a href="../../api_c/env_remove.html">DBENV-&gt;remove</a> function or an
+appropriate system utility.
+<p><li>Install the new version of the application.
+<p><li>If the database format has changed, upgrade the application's databases.
+See <a href="../../ref/am/upgrade.html">Upgrading databases</a> for more
+information.
+<p><li>Re-start the application.
+</ol>
+<p>If the application has a Berkeley DB transactional environment, but neither
+the log file or database formats have changed, the re-compiled
+application may be installed in the field using the following steps:
+<p><ol>
+<p><li>Shut down the old version of the application.
+<p><li>Run recovery on the database environment, using the <a href="../../api_c/env_open.html">DBENV-&gt;open</a> function
+or the <a href="../../utility/db_recover.html">db_recover</a> utility.
+<p><li>Install the new version of the application.
+<p><li>Re-start the application.
+</ol>
+<p>If the application has a Berkeley DB transactional environment, and the log
+file format has changed but the database formats have not, the
+re-compiled application may be installed in the field using the
+following steps:
+<p><ol>
+<p><li>Shut down the old version of the application.
+<p><li>Run recovery on the database environment, using the <a href="../../api_c/env_open.html">DBENV-&gt;open</a> function
+or the <a href="../../utility/db_recover.html">db_recover</a> utility.
+<p><li>Archive the database environment for catastrophic recovery. See
+<a href="../../ref/transapp/archival.html">Archival procedures</a> for more
+information.
+<p><li>Install the new version of the application.
+<p><li>Re-start the application.
+</ol>
+<p>If the application has a Berkeley DB transactional environment and the
+database format has changed, the re-compiled application may be
+installed in the field using the following steps:
+<p><ol>
+<p><li>Shut down the old version of the application.
+<p><li>Run recovery on the database environment, using the <a href="../../api_c/env_open.html">DBENV-&gt;open</a> function
+or the <a href="../../utility/db_recover.html">db_recover</a> utility.
+<p><li>Archive the database environment for catastrophic recovery. See
+<a href="../../ref/transapp/archival.html">Archival procedures</a> for more
+information.
+<p><li>Install the new version of the application.
+<p><li>Upgrade the application's databases. See
+<a href="../../ref/am/upgrade.html">Upgrading databases</a> for more
+information.
+<p><li>Archive the database for catastrophic recovery again (using different
+media than before, of course).
+<p>This archival is not strictly necessary. However, if you have to perform
+catastrophic recovery after restarting your applications, that recovery
+must be done based on the last archive you have made. If you make this
+archive, you can use it as the basis of your catastrophic recovery. If
+you do not make this archive, you will have to use the archive you made
+in step #2 as the basis of your recovery, and you will have to upgrade it
+as described in step #3 before you can apply your log files to it.
+<p><li>Re-start the application.
+</ol>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/build_vxworks/faq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.2.0/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/xa/config.html b/bdb/docs/ref/xa/config.html
new file mode 100644
index 00000000000..cfe31f372f4
--- /dev/null
+++ b/bdb/docs/ref/xa/config.html
@@ -0,0 +1,79 @@
+<!--$Id: config.so,v 10.18 2000/03/22 22:02:15 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Configuring Berkeley DB with the Tuxedo System</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>XA Resource Manager</dl></h3></td>
+<td width="1%"><a href="../../ref/xa/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/xa/faq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Configuring Berkeley DB with the Tuxedo System</h1>
+<p>This information assumes that you have already installed the Berkeley DB
+library.
+<p>First, you must update the resource manager file in Tuxedo. For the
+purposes of this discussion, assume the Tuxedo home directory is in:
+<p><blockquote><pre>/home/tuxedo</pre></blockquote>
+In that case, the resource manager file will be located in:
+<p><blockquote><pre>/home/tuxedo/udataobj/RM</pre></blockquote>
+Edit the resource manager file, adding the following line:
+<p><blockquote><pre>BERKELEY-DB:db_xa_switch:-L${DB_INSTALL}/lib -ldb \
+ -lsocket -ldl -lm</pre></blockquote>
+<p>where ${DB_INSTALLHOME} is the directory into which you installed the Berkeley DB
+library.
+<p><b>Note, the above load options are for a Sun Microsystems Solaris
+5.6 Sparc installation of Tuxedo, and may not be correct for your system.</b>
+<p>Next, you must build the transaction manager server. To do this, use the
+Tuxedo <b>buildtms</b>(1) utility. The buildtms utility will create
+the Berkeley-DB resource manager in the directory from which it was run.
+The parameters to buildtms should be:
+<p><blockquote><pre>buildtms -v -o DBRM -r BERKELEY-DB</pre></blockquote>
+<p>This will create an executable transaction manager server, DBRM, that is
+called by Tuxedo to process begins, commits, and aborts.
+<p>Finally, you must make sure that your TUXCONFIG environment variable
+identifies a ubbconfig file that properly identifies your resource
+managers. In the GROUPS section of the ubb file, you should identify the
+group's LMID and GRPNO as well as the transaction manager server name
+"TMSNAME=DBRM." You must also specify the OPENINFO parameter, setting it
+equal to the string:
+<p><blockquote><pre>rm_name:dir</pre></blockquote>
+<p>where rm_name is the resource name specified in the RM file (i.e.,
+BERKELEY-DB) and dir is the directory for the Berkeley DB home environment
+(see <a href="../../api_c/env_open.html">DBENV-&gt;open</a> for a discussion of Berkeley DB environments).
+<p>As Tuxedo resource manager startup accepts only a single string for
+configuration, any environment customization that might have been done
+via the config parameter to <a href="../../api_c/env_open.html">DBENV-&gt;open</a> must instead be done by
+placing a <a href="../../ref/env/naming.html#DB_CONFIG">DB_CONFIG</a> file in the Berkeley DB environment directory. See
+<a href="../../ref/env/naming.html">Berkeley DB File Naming</a> for further
+information.
+<p>Consider the following configuration. We have built a transaction
+manager server as described above. We want the Berkeley DB environment
+to be <b>/home/dbhome</b>, our database files to be maintained
+in <b>/home/datafiles</b>, our log files to be maintained in
+<b>/home/log</b>, and we want a duplexed server.
+<p>The GROUPS section of the ubb file might look like:
+<p><blockquote><pre>group_tm LMID=myname GRPNO=1 TMSNAME=DBRM TMSCOUNT=2 \
+ OPENINFO="BERKELEY-DB:/home/dbhome"</pre></blockquote>
+<p>There would be a <a href="../../ref/env/naming.html#DB_CONFIG">DB_CONFIG</a> configuration file in the directory
+<b>/home/dbhome</b> that contained the following two lines:
+<p><blockquote><pre>DB_DATA_DIR /home/datafiles
+DB_LOG_DIR /home/log
+</pre></blockquote>
+<p>Finally, the ubb file must be translated into a binary version, using
+Tuxedo's <b>tmloadcf</b>(1) utility, and then the pathname of that
+binary file must be specified as your TUXCONFIG environment variable.
+<p>At this point, your system is properly initialized to use the Berkeley DB
+resource manager.
+<p>See <a href="../../api_c/db_create.html">db_create</a> for further information on accessing data files
+using XA.
+<table><tr><td><br></td><td width="1%"><a href="../../ref/xa/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/xa/faq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/xa/faq.html b/bdb/docs/ref/xa/faq.html
new file mode 100644
index 00000000000..db1e26a0b6b
--- /dev/null
+++ b/bdb/docs/ref/xa/faq.html
@@ -0,0 +1,55 @@
+<!--$Id: faq.so,v 10.11 2000/03/18 21:43:21 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Frequently Asked Questions</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>XA Resource Manager</dl></h3></td>
+<td width="1%"><a href="../../ref/xa/config.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/appsignals.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Frequently Asked Questions</h1>
+<p><ol>
+<p><li><b>Does converting an application to run within XA change any of
+the already existing C/C++ API calls it does?</b>
+<p>When converting an application to run under XA, the application's Berkeley DB
+calls are unchanged, with two exceptions:
+<p><ol>
+<p><li>The application must use specify the <a href="../../api_c/db_create.html#DB_XA_CREATE">DB_XA_CREATE</a> flag
+to the <a href="../../api_c/db_create.html">db_create</a> interface.
+<p><li>The application should never explicitly call <a href="../../api_c/txn_commit.html">txn_commit</a>,
+<a href="../../api_c/txn_abort.html">txn_abort</a> or <a href="../../api_c/txn_begin.html">txn_begin</a>, as those calls are replaced by
+calls into the Tuxedo transaction manager. For the same reason, the
+application will always specify a transaction argument of NULL to the
+Berkeley DB functions that take transaction arguments (e.g., <a href="../../api_c/db_put.html">DB-&gt;put</a> or
+<a href="../../api_c/db_cursor.html">DB-&gt;cursor</a>).
+</ol>
+<p>Otherwise, your application should be unchanged.
+<hr size=1 noshade>
+<p><li><b>Is it possible to mix XA and non-XA transactions?</b>
+<p>Yes. It is also possible for XA and non-XA transactions to co-exist in
+the same Berkeley DB environment. To do this, specify the same environment to
+the non-XA <a href="../../api_c/env_open.html">DBENV-&gt;open</a> calls as was specified in the Tuxedo
+configuration file.
+<hr size=1 noshade>
+<p><li><b>How does Berkeley DB recovery interact with recovery by the transaction
+manager?</b>
+<p>When the Tuxedo recovery calls the Berkeley DB recovery functions, the standard
+Berkeley DB recovery procedures occur, for all operations that are represented
+in the Berkeley DB log files. This includes any non-XA transactions that were
+performed in the environment. Of course, this means that you can't use
+the standard Berkeley DB utilities (e.g., <a href="../../utility/db_recover.html">db_recover</a>) to perform
+recovery.
+<p>Also, standard log file archival and catastrophic recovery procedures
+should occur independently of XA operation.
+</ol>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/xa/config.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/appsignals.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/ref/xa/intro.html b/bdb/docs/ref/xa/intro.html
new file mode 100644
index 00000000000..7643ee420c6
--- /dev/null
+++ b/bdb/docs/ref/xa/intro.html
@@ -0,0 +1,61 @@
+<!--$Id: intro.so,v 10.19 2000/12/04 18:05:45 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Introduction</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>XA Resource Manager</dl></h3></td>
+<td width="1%"><a href="../../ref/transapp/throughput.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/xa/config.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Introduction</h1>
+<p>Berkeley DB can be used as an XA-compliant resource manager. The XA
+implementation is known to work with the Tuxedo(tm) transaction
+manager.
+<p>The XA support is encapsulated in the resource manager switch
+db_xa_switch, which defines the following functions:
+<p><blockquote><pre>__db_xa_close Close the resource manager.
+__db_xa_commit Commit the specified transaction.
+__db_xa_complete Wait for asynchronous operations to
+ complete.
+__db_xa_end Disassociate the application from a
+ transaction.
+__db_xa_forget Forget about a transaction that was heuristically
+ completed. (Berkeley DB does not support heuristic
+ completion.)
+__db_xa_open Open the resource manager.
+__db_xa_prepare Prepare the specified transaction.
+__db_xa_recover Return a list of prepared, but not yet
+ committed transactions.
+__db_xa_rollback Abort the specified transaction.
+__db_xa_start Associate the application with a
+ transaction.
+</pre></blockquote>
+<p>The Berkeley DB resource manager does not support the following optional
+XA features:
+<ul type=disc>
+<li>Asynchronous operations.
+<li>Transaction migration.
+</ul>
+<p>The Tuxedo System is available from <a href="http://www.beasys.com">BEA Systems, Inc.</a>
+<p>For additional information on Tuxedo, see:
+<p><blockquote><i>Building Client/Server Applications Using Tuxedo</i>,
+by Hall, John Wiley & Sons, Inc. Publishers.</blockquote>
+<p>For additional information on XA Resource Managers, see:
+<p><blockquote>X/Open CAE Specification
+<i>Distributed Transaction Processing: The XA Specification</i>,
+X/Open Document Number: XO/CAE/91/300.</blockquote>
+<p>For additional information on The Tuxedo System, see:
+<p><blockquote><i>The Tuxedo System</i>,
+by Andrade, Carges, Dwyer and Felts, Addison Wesley Longman Publishers.</blockquote>
+<table><tr><td><br></td><td width="1%"><a href="../../ref/transapp/throughput.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../ref/toc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/xa/config.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/sleepycat/contact.html b/bdb/docs/sleepycat/contact.html
new file mode 100644
index 00000000000..c2d1e7f0ac7
--- /dev/null
+++ b/bdb/docs/sleepycat/contact.html
@@ -0,0 +1,107 @@
+<!--$Id: contact.html,v 10.8 2000/02/19 20:57:58 bostic Exp $-->
+<html>
+<head>
+<title>The Sleepycat Software Contact Page</title>
+<meta name="description" content="DB: A database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+
+<p align=center>
+<img src="../images/sleepycat.gif" alt="Sleepycat Software Inc.">
+<p>
+
+<table>
+<tr>
+
+<td valign=top>
+<b>
+General:
+</b>
+</td><td>
+<a href="mailto:db@sleepycat.com">db@sleepycat.com</a>
+</td>
+
+</tr><tr>
+<td><br></td>
+</tr><tr>
+
+<td valign=top>
+<b>
+Sales and Marketing:
+</b>
+</td><td>
+<a href="mailto:sales@sleepycat.com">sales@sleepycat.com</a><br>
++1-510-526-3972<br>
++1-877-SLEEPYCAT (USA only, toll-free)<br>
+</td>
+
+</tr><tr>
+<td><br></td>
+</tr><tr>
+
+<td valign=top>
+<b>
+Technical Support:
+</b>
+</td><td>
+<a href="mailto:support@sleepycat.com">support@sleepycat.com</a>
+</td>
+
+</tr><tr>
+<td><br></td>
+</tr><tr>
+
+<td valign=top>
+<b>
+Web Site:
+</b>
+</td><td>
+<a href="mailto:webmaster@sleepycat.com">webmaster@sleepycat.com</a>
+</td>
+
+</tr><tr>
+<td><br></td>
+</tr><tr>
+
+<td valign=top>
+<b>
+Press Inquiries:
+</b>
+</td><td>
+Michael Olson, VP Marketing<br>
+Sleepycat Software, Inc.<br>
+<a href="mailto:mao@sleepycat.com">mao@sleepycat.com</a>
+</td>
+
+</tr><tr>
+<td><br></td>
+</tr><tr>
+
+<td valign=top>
+<b>
+Postal Mail:
+</b>
+</td><td bgcolor="#EEEEEE">
+<b>Massachussetts Corporate Office</b><br>
+<br>
+Sleepycat Software Inc.<br>
+394 E. Riding Dr.<br>
+Carlisle, MA 01741-1601<br>
+</td>
+
+</tr><tr>
+
+<td valign=top>
+</td><td bgcolor="#EEEEEE">
+<b>West Coast Sales Office</b><br>
+<br>
+Sleepycat Software Inc.<br>
+1509 McGee St.<br>
+Berkeley CA 94703<br>
+</td>
+</tr>
+</table>
+
+</body>
+</html>
diff --git a/bdb/docs/sleepycat/legal.html b/bdb/docs/sleepycat/legal.html
new file mode 100644
index 00000000000..1945b3976d0
--- /dev/null
+++ b/bdb/docs/sleepycat/legal.html
@@ -0,0 +1,56 @@
+<!--$Id: legal.so,v 10.7 2000/03/20 15:02:43 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Sleepycat Software Legal Notices</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Sleepycat Software Legal Notices</h1>
+<p>Copyright (c) 1990-2000 Sleepycat Software, Inc., 394 E. Riding Dr.,
+Carlisle, MA 01741-1601 U.S.A. All Rights Reserved.
+<p>This product and publication is protected by copyright and distributed
+under licenses restricting its use, copying and distribution. Permission
+to use this publication or portions of this publication is granted by
+Sleepycat Software provided that the above copyright notice appears in
+all copies and that use of such publications is for non-commercial use
+only and no modifications of the publication is made.
+<p>RESTRICTED RIGHTS: Use, duplication, or disclosure by the U.S. Government
+is subject to restrictions of FAR 52.227-14(g)(2)(6/87) and FAR
+52.227-19(6/87), or DFAR 252.227-7015(b)(6/95) and DFAR 227.7202-3(a).
+<p>Sleepycat and the names of Sleepycat Software products referenced herein
+are either trademarks and/or service marks or registered trademarks and/or
+service marks of Sleepycat Software Inc.
+<p>Sun Microsystems, SunOS and Solaris are trademarks or registered
+trademarks of Sun Microsystems, Inc.
+<p>Hewlett-Packard and HP-UX are trademarks or registered trademarks of
+Hewlett-Packard Company.
+<p>DIGITAL and ULTRIX are trademarks or registered trademarks of Digital
+Equipment Corporation.
+<p>Microsoft, Windows and Windows NT are trademarks or registered trademarks
+of Microsoft Corporation.
+<p>TUXEDO is a trademark or registered trademark of BEA Systems, Inc.
+<p>All other brand, company and product names referenced in this publication
+may be trademarks, registered trademarks or service marks of their
+respective holders and are used here for informational purposes only.
+<p>WARNING: There is a non-zero chance that, through a process know as
+"tunneling," this product may spontaneously disappear from its present
+location and reappear at any random place in the universe. Sleepycat
+Software will not be responsible for damages or inconvenience that may
+result.
+<p><b>THIS PRODUCT IS PROVIDED BY SLEEPYCAT SOFTWARE "AS IS" AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT,
+ARE DISCLAIMED. IN NO EVENT SHALL SLEEPYCAT SOFTWARE BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGE.</b>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/sleepycat/license.html b/bdb/docs/sleepycat/license.html
new file mode 100644
index 00000000000..1407eed05ad
--- /dev/null
+++ b/bdb/docs/sleepycat/license.html
@@ -0,0 +1,109 @@
+<!--$Id: license.so,v 10.3 2000/12/04 18:05:45 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Sleepycat Software Product License</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Sleepycat Software Product License</h1>
+<p>The following is the license that applies to this copy of the Berkeley DB
+software. For a license to use the Berkeley DB software under conditions
+other than those described here, or to purchase support for this software,
+please <a href="contact.html">contact Sleepycat Software</a>.
+<p><blockquote><pre>/*
+ * Copyright (c) 1990-2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Redistributions in any form must be accompanied by information on
+ * how to obtain complete source code for the DB software and any
+ * accompanying software that uses the DB software. The source code
+ * must either be included in the distribution or be available for no
+ * more than the cost of distribution plus a nominal fee, and must be
+ * freely redistributable under reasonable conditions. For an
+ * executable file, complete source code means the source code for all
+ * modules it contains. It does not include source code for modules or
+ * files that typically accompany the major components of the operating
+ * system on which the executable file runs.
+ *
+ * THIS SOFTWARE IS PROVIDED BY SLEEPYCAT SOFTWARE ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
+ * NON-INFRINGEMENT, ARE DISCLAIMED. IN NO EVENT SHALL SLEEPYCAT SOFTWARE
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*
+ * Copyright (c) 1995, 1996
+ * The President and Fellows of Harvard University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY HARVARD AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL HARVARD OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */</pre></blockquote>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/utility/berkeley_db_svc.html b/bdb/docs/utility/berkeley_db_svc.html
new file mode 100644
index 00000000000..9e9c7bb4e45
--- /dev/null
+++ b/bdb/docs/utility/berkeley_db_svc.html
@@ -0,0 +1,88 @@
+<!--$Id: berkeley_db_svc.so,v 10.11 2000/04/15 16:57:38 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: berkeley_db_svc</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>berkeley_db_svc</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>berkeley_db_svc [<b>-Vv</b>] [<b>-h home</b>]
+ [<b>-I seconds</b>] [<b>-L file</b>] [<b>-t seconds</b>] [<b>-T seconds</b>]</pre></h3>
+<h1>Description</h1>
+ <a name="3"><!--meow--></a>
+<p>The berkeley_db_svc utility is the Berkeley DB RPC server.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt><b>-h</b><dd>Add the specified home directory to the list of allowed home directories
+that can be specified by the client. The home directory should be an
+absolute pathname. The last component of each home directory specified
+must be unique, as that is how clients specify which database environment
+they wish to join.
+<p>Recovery will be run on each specified environment before the server
+begins accepting requests from clients. For this reason, only one copy
+of the server program should ever be run at any time, as recovery must
+always be single-threaded.
+<p><dt><b>-I</b><dd>Set the default idle timeout for client environments to the specified
+number of seconds. The default timeout is 24 hours.
+<p><dt><b>-L</b><dd>Log the execution of the berkeley_db_svc utility to the specified file in the
+following format, where <i>###</i> is the process ID, and the date
+is the time the utility was started.
+<p><blockquote><pre>berkeley_db_svc: ### Wed Jun 15 01:23:45 EDT 1995</pre></blockquote>
+This file will be removed if the berkeley_db_svc utility exits gracefully.
+<p><dt><b>-t</b><dd>Set the default timeout for client resources (idle transactions and
+cursors) to the
+specified number of seconds. When the timeout expires, if the
+resource is a transaction, it is aborted, if the resource is
+a cursor, it is closed.
+The default timeout is 5 minutes.
+<p><dt><b>-T</b><dd>Set the maximum timeout allowed for client resources.
+The default timeout is 20 minutes.
+If a client application requests a server timeout
+greater than the maximum timeout set for this server, then the client's
+timeout will be capped at the maximum timeout value.
+<p><dt><b>-V</b><dd>Write the version number to the standard output and exit.
+<p><dt><b>-v</b><dd>Run in verbose mode.
+</dl>
+<p>The berkeley_db_svc utility uses a Berkeley DB environment (as described for the
+<b>-h</b> option, the environment variable <b>DB_HOME</b>, or,
+because the utility was run in a directory containing a Berkeley DB
+environment). In order to avoid environment corruption when using a Berkeley DB
+environment, berkeley_db_svc should always be given the chance to detach from
+the environment and exit gracefully. To cause berkeley_db_svc to release all
+environment resources and exit cleanly, send it an interrupt signal
+(SIGINT).
+<p>The berkeley_db_svc utility exits 0 on success, and &gt;0 if an error occurs.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If the <b>-h</b> option is not specified and the environment variable
+DB_HOME is set, it is used as the path of the database home, as described
+in <a href="../api_c/env_open.html">DBENV-&gt;open</a>.
+</dl>
+<h1>See Also</h1>
+berkeley_db_svc,
+<a href="../utility/db_archive.html">db_archive</a>,
+<a href="../utility/db_checkpoint.html">db_checkpoint</a>,
+<a href="../utility/db_deadlock.html">db_deadlock</a>,
+<a href="../utility/db_dump.html">db_dump</a>,
+<a href="../utility/db_load.html">db_load</a>,
+<a href="../utility/db_recover.html">db_recover</a>,
+<a href="../utility/db_stat.html">db_stat</a>,
+<a href="../utility/db_upgrade.html">db_upgrade</a>,
+and
+<a href="../utility/db_verify.html">db_verify</a>.
+</tt>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/utility/db_archive.html b/bdb/docs/utility/db_archive.html
new file mode 100644
index 00000000000..5cc56a428b1
--- /dev/null
+++ b/bdb/docs/utility/db_archive.html
@@ -0,0 +1,85 @@
+<!--$Id: db_archive.so,v 10.19 2000/04/03 20:10:39 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db_archive</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>db_archive</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db_archive [<b>-alsVv</b>] [<b>-h home</b>]</pre></h3>
+<h1>Description</h1>
+ <a name="3"><!--meow--></a>
+<p>The db_archive utility writes the pathnames of log files that are
+no longer in use (e.g., no longer involved in active transactions), to
+the standard output, one pathname per line. These log files should be
+written to backup media to provide for recovery in the case of
+catastrophic failure (which also requires a snapshot of the database
+files), but they may then be deleted from the system to reclaim disk
+space.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt><b>-a</b><dd>Write all pathnames as absolute pathnames,
+instead of relative to the database home directories.
+<p><dt><b>-h</b><dd>Specify a home directory for the database environment; by
+default, the current working directory is used.
+<p><dt><b>-l</b><dd>Write out the pathnames of all of the database log files,
+whether or not they are involved in active transactions.
+<p><dt><b>-s</b><dd>Write the pathnames of all of the database files that need to be archived
+in order to recover the database from catastrophic failure.
+If any of the database files have not been accessed during the lifetime of
+the current log files, db_archive will
+not include them in this
+output.
+<p>It is possible that some of the files referenced in the log have since
+been deleted from the system.
+In this case, db_archive will ignore them.
+When <a href="../utility/db_recover.html">db_recover</a> is run, any files referenced in the log that
+are not present during recovery are assumed to have been deleted and will
+not be recovered.
+<p><dt><b>-V</b><dd>Write the version number to the standard output and exit.
+<p><dt><b>-v</b><dd>Run in verbose mode, listing the checkpoints in the log files as they
+are reviewed.
+</dl>
+<p>The db_archive utility uses a Berkeley DB environment (as described for the
+<b>-h</b> option, the environment variable <b>DB_HOME</b>, or,
+because the utility was run in a directory containing a Berkeley DB
+environment). In order to avoid environment corruption when using a Berkeley DB
+environment, db_archive should always be given the chance to detach from
+the environment and exit gracefully. To cause db_archive to release all
+environment resources and exit cleanly, send it an interrupt signal
+(SIGINT).
+<p>The db_archive utility exits 0 on success, and &gt;0 if an error occurs.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If the <b>-h</b> option is not specified and the environment variable
+DB_HOME is set, it is used as the path of the database home, as described
+in <a href="../api_c/env_open.html">DBENV-&gt;open</a>.
+</dl>
+<h1>See Also</h1>
+<a href="../utility/berkeley_db_svc.html">berkeley_db_svc</a>,
+db_archive,
+<a href="../utility/db_checkpoint.html">db_checkpoint</a>,
+<a href="../utility/db_deadlock.html">db_deadlock</a>,
+<a href="../utility/db_dump.html">db_dump</a>,
+<a href="../utility/db_load.html">db_load</a>,
+<a href="../utility/db_recover.html">db_recover</a>,
+<a href="../utility/db_stat.html">db_stat</a>,
+<a href="../utility/db_upgrade.html">db_upgrade</a>,
+and
+<a href="../utility/db_verify.html">db_verify</a>.
+</tt>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/utility/db_checkpoint.html b/bdb/docs/utility/db_checkpoint.html
new file mode 100644
index 00000000000..dc49d03d8f5
--- /dev/null
+++ b/bdb/docs/utility/db_checkpoint.html
@@ -0,0 +1,82 @@
+<!--$Id: db_checkpoint.so,v 10.22 2000/04/03 20:10:39 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db_checkpoint</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>db_checkpoint</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db_checkpoint [<b>-1Vv</b>]
+ [<b>-h home</b>] [<b>-k kbytes</b>] [<b>-L file</b>] [<b>-p min</b>]</pre></h3>
+<h1>Description</h1>
+ <a name="3"><!--meow--></a>
+<p>The db_checkpoint utility is a daemon process that monitors the
+database log and periodically calls <a href="../api_c/txn_checkpoint.html">txn_checkpoint</a> to checkpoint it.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt><b>-1</b><dd>Checkpoint the log once, regardless of whether or not there has
+been activity since the last checkpoint, and then exit.
+<p><dt><b>-h</b><dd>Specify a home directory for the database environment; by
+default, the current working directory is used.
+<p><dt><b>-k</b><dd>Checkpoint the database at least as often as every <b>kbytes</b> of log
+file are written.
+<p><dt><b>-L</b><dd>Log the execution of the db_checkpoint utility to the specified file in the
+following format, where <i>###</i> is the process ID, and the date
+is the time the utility was started.
+<p><blockquote><pre>db_checkpoint: ### Wed Jun 15 01:23:45 EDT 1995</pre></blockquote>
+This file will be removed if the db_checkpoint utility exits gracefully.
+<p><dt><b>-p</b><dd>Checkpoint the database at least every <b>min</b> minutes if
+there has been any activity since the last checkpoint.
+<p><dt><b>-V</b><dd>Write the version number to the standard output and exit.
+<p><dt><b>-v</b><dd>Write the time of each checkpoint attempt to the standard
+output.
+</dl>
+<p>At least one of the <b>-1</b>, <b>-k</b> and <b>-p</b> options must
+be specified.
+<p>The db_checkpoint utility uses a Berkeley DB environment (as described for the
+<b>-h</b> option, the environment variable <b>DB_HOME</b>, or,
+because the utility was run in a directory containing a Berkeley DB
+environment). In order to avoid environment corruption when using a Berkeley DB
+environment, db_checkpoint should always be given the chance to detach from
+the environment and exit gracefully. To cause db_checkpoint to release all
+environment resources and exit cleanly, send it an interrupt signal
+(SIGINT).
+<p>The db_checkpoint utility does not attempt to create the Berkeley DB
+shared memory regions if they do not already exist. The application
+which creates the region should be started first, and then, once the
+region is created, the db_checkpoint utility should be started.
+<p>The db_checkpoint utility exits 0 on success, and &gt;0 if an error occurs.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If the <b>-h</b> option is not specified and the environment variable
+DB_HOME is set, it is used as the path of the database home, as described
+in <a href="../api_c/env_open.html">DBENV-&gt;open</a>.
+</dl>
+<h1>See Also</h1>
+<a href="../utility/berkeley_db_svc.html">berkeley_db_svc</a>,
+<a href="../utility/db_archive.html">db_archive</a>,
+db_checkpoint,
+<a href="../utility/db_deadlock.html">db_deadlock</a>,
+<a href="../utility/db_dump.html">db_dump</a>,
+<a href="../utility/db_load.html">db_load</a>,
+<a href="../utility/db_recover.html">db_recover</a>,
+<a href="../utility/db_stat.html">db_stat</a>,
+<a href="../utility/db_upgrade.html">db_upgrade</a>,
+and
+<a href="../utility/db_verify.html">db_verify</a>.
+</tt>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/utility/db_deadlock.html b/bdb/docs/utility/db_deadlock.html
new file mode 100644
index 00000000000..dfd23a903be
--- /dev/null
+++ b/bdb/docs/utility/db_deadlock.html
@@ -0,0 +1,85 @@
+<!--$Id: db_deadlock.so,v 10.22 2000/11/29 16:52:38 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db_deadlock</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>db_deadlock</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db_deadlock [<b>-Vvw</b>]
+ [<b>-a o | y</b>] [<b>-h home</b>] [<b>-L file</b>] [<b>-t sec</b>]</pre></h3>
+<h1>Description</h1>
+ <a name="3"><!--meow--></a>
+<p>The db_deadlock utility traverses the database lock structures
+and aborts a lock request each time it detects a deadlock. By default,
+a random lock request is chosen to be aborted. This utility should be
+run as a background daemon, or the underlying Berkeley DB deadlock detection
+interfaces should be called in some other way, whenever there are
+multiple threads or processes accessing a database and at least one of
+them is modifying it.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt><b>-a</b><dd>When a deadlock is detected, abort the oldest (o) lock request or the
+youngest (y) lock request.
+<p><dt><b>-h</b><dd>Specify a home directory for the database environment; by
+default, the current working directory is used.
+<p><dt><b>-L</b><dd>Log the execution of the db_deadlock utility to the specified file in the
+following format, where <i>###</i> is the process ID, and the date
+is the time the utility was started.
+<p><blockquote><pre>db_deadlock: ### Wed Jun 15 01:23:45 EDT 1995</pre></blockquote>
+This file will be removed if the db_deadlock utility exits gracefully.
+<p><dt><b>-t</b><dd>Initiate a pass over the database locks at least every <b>sec</b>
+seconds.
+<p><dt><b>-V</b><dd>Write the version number to the standard output and exit.
+<p><dt><b>-v</b><dd>Run in verbose mode, generating messages each time the detector runs.
+<p><dt><b>-w</b><dd>Make a single pass over the database locks every time a process is
+forced to wait for a lock.
+</dl>
+<p>At least one of the <b>-t</b> and <b>-w</b> options must be specified.
+<p>The db_deadlock utility uses a Berkeley DB environment (as described for the
+<b>-h</b> option, the environment variable <b>DB_HOME</b>, or,
+because the utility was run in a directory containing a Berkeley DB
+environment). In order to avoid environment corruption when using a Berkeley DB
+environment, db_deadlock should always be given the chance to detach from
+the environment and exit gracefully. To cause db_deadlock to release all
+environment resources and exit cleanly, send it an interrupt signal
+(SIGINT).
+<p>The db_deadlock utility does not attempt to create the Berkeley DB
+shared memory regions if they do not already exist. The application
+which creates the region should be started first, and then, once the
+region is created, the db_deadlock utility should be started.
+<p>The db_deadlock utility exits 0 on success, and &gt;0 if an error occurs.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If the <b>-h</b> option is not specified and the environment variable
+DB_HOME is set, it is used as the path of the database home, as described
+in <a href="../api_c/env_open.html">DBENV-&gt;open</a>.
+</dl>
+<h1>See Also</h1>
+<a href="../utility/berkeley_db_svc.html">berkeley_db_svc</a>,
+<a href="../utility/db_archive.html">db_archive</a>,
+<a href="../utility/db_checkpoint.html">db_checkpoint</a>,
+db_deadlock,
+<a href="../utility/db_dump.html">db_dump</a>,
+<a href="../utility/db_load.html">db_load</a>,
+<a href="../utility/db_recover.html">db_recover</a>,
+<a href="../utility/db_stat.html">db_stat</a>,
+<a href="../utility/db_upgrade.html">db_upgrade</a>,
+and
+<a href="../utility/db_verify.html">db_verify</a>.
+</tt>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/utility/db_dump.html b/bdb/docs/utility/db_dump.html
new file mode 100644
index 00000000000..bd97b307c7b
--- /dev/null
+++ b/bdb/docs/utility/db_dump.html
@@ -0,0 +1,128 @@
+<!--$Id: db_dump.so,v 10.26 2000/09/04 16:00:50 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db_dump</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>db_dump</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db_dump [<b>-klNpRrV</b>] [<b>-d ahr</b>]
+ [<b>-f output</b>] [<b>-h home</b>] [<b>-s database</b>] file
+db_dump185 [<b>-p</b>] [<b>-f output</b>] file</pre></h3>
+<h1>Description</h1>
+ <a name="3"><!--meow--></a>
+<p>The db_dump utility reads the database file <b>file</b> and
+writes it to the standard output using a portable flat-text format
+understood by the <a href="../utility/db_load.html">db_load</a> utility. The argument <b>file</b>
+must be a file produced using the Berkeley DB library functions.
+<p>The <a href="../utility/db_dump.html">db_dump185</a> utility is similar to the db_dump utility
+except that it reads databases in the format used by Berkeley DB versions 1.85
+and 1.86.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt><b>-d</b><dd>Dump the specified database in a format helpful for debugging the Berkeley DB
+library routines.
+<p><dl compact>
+<p><dt>a<dd>Display all information.
+<dt>h<dd>Display only page headers.
+<dt>r<dd>Do not display the free-list or pages on the free list. This
+mode is used by the recovery tests.
+</dl>
+<p><b>The output format of the <b>-d</b> option is not standard and may change,
+without notice, between releases of the Berkeley DB library.</b>
+<p><dt><b>-f</b><dd>Write to the specified <b>file</b> instead of to the standard output.
+<p><dt><b>-h</b><dd>Specify a home directory for the database environment; by
+default, the current working directory is used.
+<p><dt><b>-k</b><dd>Dump record numbers from Queue and Recno databases as keys.
+<p><dt><b>-l</b><dd>List the databases stored in the file.
+<p><dt><b>-N</b><dd>Do not acquire shared region locks while running. Other problems such as
+potentially fatal errors in Berkeley DB will be ignored as well. This option
+is intended only for debugging errors and should not be used under any
+other circumstances.
+<p><dt><b>-p</b><dd>If characters in either the key or data items are printing characters (as
+defined by <b>isprint</b>(3)), use printing characters in <b>file</b>
+to represent them. This option permits users to use standard text editors
+and tools to modify the contents of databases.
+<p>Note, different systems may have different notions as to what characters
+are considered <i>printing characters</i>, and databases dumped in
+this manner may be less portable to external systems.
+<p><dt><b>-R</b><dd>Aggressively salvage data from a possibly corrupt file. The <b>-R</b>
+flag differs from the <b>-r</b> option in that it will return all
+possible data from the file at the risk of also returning already deleted
+or otherwise nonsensical items. Data dumped in this fashion will almost
+certainly have to be edited by hand or other means before the data is
+ready for re-load into another database
+<p><dt><b>-r</b><dd>Salvage data from a possibly corrupt file. When used on a uncorrupted
+database, this option should return equivalent data to a normal dump, but
+most likely in a different order.
+<p><dt><b>-s</b><dd>Specify a single database to dump. If no database is specified, all
+databases in the database file are dumped.
+<p><dt><b>-V</b><dd>Write the version number to the standard output and exit.
+</dl>
+<p>Dumping and reloading Hash databases that use user-defined hash functions
+will result in new databases that use the default hash function.
+While using the default hash function may not be optimal for the new database,
+it will continue to work correctly.
+<p>Dumping and reloading Btree databases that use user-defined prefix or
+comparison functions will result in new databases that use the default
+prefix and comparison functions.
+<b>In this case, it is quite likely that the database will be damaged
+beyond repair permitting neither record storage or retrieval.</b>
+<p>The only available workaround for either case is to modify the sources
+for the <a href="../utility/db_load.html">db_load</a> utility to load the database using the correct
+hash, prefix and comparison functions.
+<p>The <a href="../utility/db_dump.html">db_dump185</a> utility may not be available on your system as it
+is not always built when the Berkeley DB libraries and utilities are installed.
+If you are unable to find it, see your system administrator for further
+information.
+<p>The db_dump and <a href="../utility/db_dump.html">db_dump185</a> utility output formats are
+documented in the <a href="../ref/dumpload/format.html">Dump Output
+Formats</a> section of the Reference Guide.
+<p>The db_dump utility may be used with a Berkeley DB environment (as described for the
+<b>-h</b> option, the environment variable <b>DB_HOME</b>, or,
+because the utility was run in a directory containing a Berkeley DB
+environment). In order to avoid environment corruption when using a Berkeley DB
+environment, db_dump should always be given the chance to detach from
+the environment and exit gracefully. To cause db_dump to release all
+environment resources and exit cleanly, send it an interrupt signal
+(SIGINT).
+<p>When using an Berkeley DB database environment, the db_dump utility
+does not configure for any kind of database locking and so should not
+be used with active Berkeley DB environments. If db_dump is used in
+an active database environment, corruption may result.
+<p>The db_dump utility exits 0 on success, and &gt;0 if an error occurs.
+<p>The <a href="../utility/db_dump.html">db_dump185</a> utility exits 0 on success, and &gt;0 if an error occurs.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If the <b>-h</b> option is not specified and the environment variable
+DB_HOME is set, it is used as the path of the database home, as described
+in <a href="../api_c/env_open.html">DBENV-&gt;open</a>.
+</dl>
+<h1>See Also</h1>
+<a href="../utility/berkeley_db_svc.html">berkeley_db_svc</a>,
+<a href="../utility/db_archive.html">db_archive</a>,
+<a href="../utility/db_checkpoint.html">db_checkpoint</a>,
+<a href="../utility/db_deadlock.html">db_deadlock</a>,
+db_dump,
+<a href="../utility/db_load.html">db_load</a>,
+<a href="../utility/db_recover.html">db_recover</a>,
+<a href="../utility/db_stat.html">db_stat</a>,
+<a href="../utility/db_upgrade.html">db_upgrade</a>,
+and
+<a href="../utility/db_verify.html">db_verify</a>.
+</tt>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/utility/db_load.html b/bdb/docs/utility/db_load.html
new file mode 100644
index 00000000000..41084f09cd0
--- /dev/null
+++ b/bdb/docs/utility/db_load.html
@@ -0,0 +1,151 @@
+<!--$Id: db_load.so,v 10.29 2000/12/04 18:05:45 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db_load</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>db_load</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db_load [<b>-nTV</b>] [<b>-c name=value</b>] [<b>-f file</b>]
+ [<b>-h home</b>] [<b>-t btree | hash | queue | recno</b>] file</pre></h3>
+<h1>Description</h1>
+ <a name="3"><!--meow--></a>
+<p>The db_load utility reads from the standard input and loads it
+into the database <b>file</b>. The database <b>file</b> is created if
+it does not already exist.
+<p>The input to db_load must be in the output format specified by the
+<a href="../utility/db_dump.html">db_dump</a> utility, utilities, or as specified for the <b>-T</b>
+below.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt><b>-c</b><dd>Specify configuration options ignoring any value they may have based on
+the input. The command-line format is <b>name=value</b>. See
+<a href="db_load.html#Supported Keywords">Supported Keywords</a> for a
+list of supported words for the <b>-c</b> option.
+<p><dt><b>-f</b><dd>Read from the specified <b>input</b> file instead of from the standard
+input.
+<p><dt><b>-h</b><dd>Specify a home directory for the database environment.
+<p>If a home directory is specified, the database environment is opened using
+the <a href="../api_c/env_open.html#DB_INIT_LOCK">DB_INIT_LOCK</a>, <a href="../api_c/env_open.html#DB_INIT_LOG">DB_INIT_LOG</a>, <a href="../api_c/env_open.html#DB_INIT_MPOOL">DB_INIT_MPOOL</a>,
+<a href="../api_c/env_open.html#DB_INIT_TXN">DB_INIT_TXN</a> and <a href="../api_c/env_open.html#DB_USE_ENVIRON">DB_USE_ENVIRON</a> flags to
+<a href="../api_c/env_open.html">DBENV-&gt;open</a>. (This means that db_load can be used to load
+data into databases while they are in use by other processes.) If the
+<a href="../api_c/env_open.html">DBENV-&gt;open</a> call fails, or if no home directory is specified, the
+database is still updated, but the environment is ignored, e.g., no
+locking is done.
+<p><dt><b>-n</b><dd>Do not overwrite existing keys in the database when loading into an
+already existing database. If a key/data pair cannot be loaded into the
+database for this reason, a warning message is displayed on the standard
+error output and the key/data pair are skipped.
+<p><dt><b>-T</b><dd>The <b>-T</b> option allows non-Berkeley DB applications to easily load text
+files into databases.
+<p>If the database to be created is of type Btree or Hash, or the keyword
+<b>keys</b> is specified as set, the input must be paired lines of text,
+where the first line of the pair is the key item, and the second line of
+the pair is its corresponding data item. If the database to be created
+is of type Queue or Recno and the keywork <b>keys</b> is not set, the
+input must be lines of text, where each line is a new data item for the
+database.
+<p>A simple escape mechanism, where newline and backslash (\)
+characters are special, is applied to the text input. Newline characters
+are interpreted as record separators. Backslash characters in the text
+will be interpreted in one of two ways: if the backslash character
+precedes another backslash character, the pair will be interpreted as a
+literal backslash. If the backslash character precedes any other
+character, the two characters following the backslash will be interpreted
+as hexadecimal specification of a single character, e.g., \0a
+is a newline character in the ASCII character set.
+<p>For this reason, any backslash or newline characters that naturally
+occur in the text input must be escaped to avoid misinterpretation by
+db_load.
+<p>If the <b>-T</b> option is specified, the underlying access method type
+must be specified using the <b>-t</b> option.
+<p><dt><b>-t</b><dd>Specify the underlying access method. If no <b>-t</b> option is
+specified, the database will be loaded into a database of the same type
+as was dumped, e.g., a Hash database will be created if a Hash database
+was dumped.
+<p>Btree and Hash databases may be converted from one to the other. Queue
+and Recno databases may be converted from one to the other. If the
+<b>-k</b> option was specified on the call to <a href="../utility/db_dump.html">db_dump</a> then Queue
+and Recno databases may be converted to Btree or Hash, with the key being
+the integer record number.
+<p><dt><b>-V</b><dd>Write the version number to the standard output and exit.
+</dl>
+<p>The db_load utility may be used with a Berkeley DB environment (as described for the
+<b>-h</b> option, the environment variable <b>DB_HOME</b>, or,
+because the utility was run in a directory containing a Berkeley DB
+environment). In order to avoid environment corruption when using a Berkeley DB
+environment, db_load should always be given the chance to detach from
+the environment and exit gracefully. To cause db_load to release all
+environment resources and exit cleanly, send it an interrupt signal
+(SIGINT).
+<p>The db_load utility exits 0 on success, 1 if one or more key/data
+pairs were not loaded into the database because the key already existed,
+and &gt;1 if an error occurs.
+<h3>Examples</h3>
+<p>The db_load utility can be used to load text files into databases.
+For example, the following command loads the standard UNIX
+<i>/etc/passwd</i> file into a database, with the login name as the
+key item and the entire password entry as the data item:
+<p><blockquote><pre>awk -F: '{print $1; print $0}' &lt; /etc/passwd |
+ sed 's/\\/\\\\/g' | db_load -T -t hash passwd.db</pre></blockquote>
+<p>Note that backslash characters naturally occurring in the text are escaped
+to avoid interpretation as escape characters by db_load.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If the <b>-h</b> option is not specified and the environment variable
+DB_HOME is set, it is used as the path of the database home, as described
+in <a href="../api_c/env_open.html">DBENV-&gt;open</a>.
+</dl>
+<h3>Supported Keywords</h3>
+The following keywords are supported for the <b>-c</b> command-line
+option to the db_load utility. See <a href="../api_c/db_open.html">DB-&gt;open</a> for further
+discussion of these keywords and what values should be specified.
+<p>The parenthetical listing specifies how the value part of the
+<b>name=value</b> pair is interpreted. Items listed as (boolean) expect
+value to be <b>1</b> (set) or <b>0</b> (unset). Items listed as
+(number) convert value to a number. Items listed as (string) use the
+string value without modification.
+<p><dl compact>
+<dt>bt_minkey (number)<dd>The minimum number of keys per page.
+<dt>database (string)<dd>The database to load.
+<dt>db_lorder (number)<dd>The byte order for integers in the stored database metadata.
+<dt>db_pagesize (number)<dd>The size of pages used for nodes in the tree, in bytes.
+<dt>duplicates (boolean)<dd>The value of the <a href="../api_c/db_set_flags.html#DB_DUP">DB_DUP</a> flag.
+<dt>dupsort (boolean)<dd>The value of the <a href="../api_c/db_set_flags.html#DB_DUPSORT">DB_DUPSORT</a> flag.
+<dt>h_ffactor (number)<dd>The density within the Hash database.
+<dt>h_nelem (number)<dd>The size of the Hash database.
+<dt>keys (boolean)<dd>Specify if keys are present for Queue or Recno databases.
+<dt>re_len (number)<dd>Specify fixed-length records of the specified length.
+<dt>re_pad (string)<dd>Specify the fixed-length record pad character.
+<dt>recnum (boolean)<dd>The value of the <a href="../api_c/db_set_flags.html#DB_RECNUM">DB_RECNUM</a> flag.
+<dt>renumber (boolean)<dd>The value of the <a href="../api_c/db_set_flags.html#DB_RENUMBER">DB_RENUMBER</a> flag.
+</dl>
+<h1>See Also</h1>
+<a href="../utility/berkeley_db_svc.html">berkeley_db_svc</a>,
+<a href="../utility/db_archive.html">db_archive</a>,
+<a href="../utility/db_checkpoint.html">db_checkpoint</a>,
+<a href="../utility/db_deadlock.html">db_deadlock</a>,
+<a href="../utility/db_dump.html">db_dump</a>,
+db_load,
+<a href="../utility/db_recover.html">db_recover</a>,
+<a href="../utility/db_stat.html">db_stat</a>,
+<a href="../utility/db_upgrade.html">db_upgrade</a>,
+and
+<a href="../utility/db_verify.html">db_verify</a>.
+</tt>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/utility/db_printlog.html b/bdb/docs/utility/db_printlog.html
new file mode 100644
index 00000000000..10033e09483
--- /dev/null
+++ b/bdb/docs/utility/db_printlog.html
@@ -0,0 +1,69 @@
+<!--$Id: db_printlog.so,v 10.10 2000/04/03 20:10:39 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db_printlog</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>db_printlog</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db_printlog [<b>-NV</b>] [<b>-h home</b>]</pre></h3>
+<h1>Description</h1>
+ <a name="3"><!--meow--></a>
+<p>The db_printlog utility is a debugging utility that dumps Berkeley DB
+log files in a human-readable format.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt><b>-h</b><dd>Specify a home directory for the database environment; by
+default, the current working directory is used.
+<p><dt><b>-N</b><dd>Do not acquire shared region locks while running. Other problems such as
+potentially fatal errors in Berkeley DB will be ignored as well. This option
+is intended only for debugging errors and should not be used under any
+other circumstances.
+<p><dt><b>-V</b><dd>Write the version number to the standard output and exit.
+</dl>
+<p>For more information on the db_printlog output and using it to
+debug applications, see <a href="../ref/debug/printlog.html">Reviewing
+Berkeley DB log files</a>.
+<p>The db_printlog utility uses a Berkeley DB environment (as described for the
+<b>-h</b> option, the environment variable <b>DB_HOME</b>, or,
+because the utility was run in a directory containing a Berkeley DB
+environment). In order to avoid environment corruption when using a Berkeley DB
+environment, db_printlog should always be given the chance to detach from
+the environment and exit gracefully. To cause db_printlog to release all
+environment resources and exit cleanly, send it an interrupt signal
+(SIGINT).
+<p>The db_printlog utility exits 0 on success, and &gt;0 if an error occurs.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If the <b>-h</b> option is not specified and the environment variable
+DB_HOME is set, it is used as the path of the database home, as described
+in <a href="../api_c/env_open.html">DBENV-&gt;open</a>.
+</dl>
+<h1>See Also</h1>
+<a href="../utility/berkeley_db_svc.html">berkeley_db_svc</a>,
+<a href="../utility/db_archive.html">db_archive</a>,
+<a href="../utility/db_checkpoint.html">db_checkpoint</a>,
+<a href="../utility/db_deadlock.html">db_deadlock</a>,
+<a href="../utility/db_dump.html">db_dump</a>,
+<a href="../utility/db_load.html">db_load</a>,
+<a href="../utility/db_recover.html">db_recover</a>,
+<a href="../utility/db_stat.html">db_stat</a>,
+<a href="../utility/db_upgrade.html">db_upgrade</a>,
+and
+<a href="../utility/db_verify.html">db_verify</a>.
+</tt>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/utility/db_recover.html b/bdb/docs/utility/db_recover.html
new file mode 100644
index 00000000000..80341597cd7
--- /dev/null
+++ b/bdb/docs/utility/db_recover.html
@@ -0,0 +1,97 @@
+<!--$Id: db_recover.so,v 10.22 2000/05/10 17:56:02 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db_recover</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>db_recover</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db_recover [<b>-cVv</b>] [<b>-h home</b>] [<b>-t [[CC]YY]MMDDhhmm[.SS]]</b>]</pre></h3>
+<h1>Description</h1>
+ <a name="3"><!--meow--></a>
+<p>The db_recover utility must be run after an unexpected application,
+Berkeley DB, or system failure to restore the database to a consistent state.
+All committed transactions are guaranteed to appear after db_recover
+has run, and all uncommitted transactions will be completely undone.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt><b>-c</b><dd>Perform catastrophic recovery instead of normal recovery.
+<p><dt><b>-h</b><dd>Specify a home directory for the database environment; by
+default, the current working directory is used.
+<p><dt><b>-t</b><dd>Recover to the time specified rather than to the most current possible
+date. The timestamp argument should be in the form
+[[CC]YY]MMDDhhmm[.SS] where each pair of
+letters represents the following:
+<p><dl compact>
+<p><dt>CC<dd>The first two digits of the year (the century).
+<dt>YY<dd>The second two digits of the year. If "YY" is specified, but "CC" is not,
+a value for "YY" between 69 and 99 results in a "YY" value of 19. Otherwise,
+a "YY" value of 20 is used.
+<dt>MM<dd>The month of the year, from 1 to 12.
+<dt>DD<dd>The day of the month, from 1 to 31.
+<dt>hh<dd>The hour of the day, from 0 to 23.
+<dt>mm<dd>The minute of the hour, from 0 to 59.
+<dt>SS<dd>The second of the minute, from 0 to 61.
+</dl>
+<p>If the "CC" and "YY" letter pairs are not specified, the values default
+to the current year. If the "SS" letter pair is not specified, the value
+defaults to 0.
+<p><dt><b>-V</b><dd>Write the version number to the standard output and exit.
+<p><dt><b>-v</b><dd>Run in verbose mode.
+</dl>
+<p>In the case of catastrophic recovery, an archival copy, or
+<i>snapshot</i> of all database files must be restored along with all
+of the log files written since the database file snapshot was made. (If
+disk space is a problem, log files may be referenced by symbolic links).
+For further information on creating a database snapshot, see
+<a href="../ref/transapp/archival.html">Archival Procedures</a>.
+For further information on performing recovery, see
+<a href="../ref/transapp/recovery.html">Recovery Procedures</a>.
+<p>If the failure was not catastrophic, the files present on the system at the
+time of failure are sufficient to perform recovery.
+<p>If log files are missing, db_recover will identify the missing
+log file(s) and fail, in which case the missing log files need to be
+restored and recovery performed again.
+<p>The db_recover utility uses a Berkeley DB environment (as described for the
+<b>-h</b> option, the environment variable <b>DB_HOME</b>, or,
+because the utility was run in a directory containing a Berkeley DB
+environment). In order to avoid environment corruption when using a Berkeley DB
+environment, db_recover should always be given the chance to detach from
+the environment and exit gracefully. To cause db_recover to release all
+environment resources and exit cleanly, send it an interrupt signal
+(SIGINT).
+<p>The db_recover utility exits 0 on success, and &gt;0 if an error occurs.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If the <b>-h</b> option is not specified and the environment variable
+DB_HOME is set, it is used as the path of the database home, as described
+in <a href="../api_c/env_open.html">DBENV-&gt;open</a>.
+</dl>
+<h1>See Also</h1>
+<a href="../utility/berkeley_db_svc.html">berkeley_db_svc</a>,
+<a href="../utility/db_archive.html">db_archive</a>,
+<a href="../utility/db_checkpoint.html">db_checkpoint</a>,
+<a href="../utility/db_deadlock.html">db_deadlock</a>,
+<a href="../utility/db_dump.html">db_dump</a>,
+<a href="../utility/db_load.html">db_load</a>,
+db_recover,
+<a href="../utility/db_stat.html">db_stat</a>,
+<a href="../utility/db_upgrade.html">db_upgrade</a>,
+and
+<a href="../utility/db_verify.html">db_verify</a>.
+</tt>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/utility/db_stat.html b/bdb/docs/utility/db_stat.html
new file mode 100644
index 00000000000..ba9263e3221
--- /dev/null
+++ b/bdb/docs/utility/db_stat.html
@@ -0,0 +1,104 @@
+<!--$Id: db_stat.so,v 10.27 2000/04/03 20:10:39 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db_stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>db_stat</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db_stat [<b>-celmNtV</b>]
+ [<b>-C Acfhlmo</b>] [<b>-d file [<b>-s database</b>]</b>] [<b>-h home</b>] [<b>-M Ahlm</b>]</pre></h3>
+<h1>Description</h1>
+ <a name="3"><!--meow--></a>
+<p>The db_stat utility displays statistics for Berkeley DB environments.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt><b>-C</b><dd>Display internal information about the lock region. (The output from this
+option is often both voluminous and meaningless, and is intended only for
+debugging.)
+<p><dl compact>
+<p><dt>A<dd>Display all information.
+<dt>c<dd>Display lock conflict matrix.
+<dt>f<dd>Display lock and object free lists.
+<dt>l<dd>Display lockers within hash chains.
+<dt>m<dd>Display region memory information.
+<dt>o<dd>Display objects within hash chains.
+</dl>
+<p><dt><b>-c</b><dd>Display lock region statistics as described in <a href="../api_c/lock_stat.html">lock_stat</a>.
+<p><dt><b>-d</b><dd>Display database statistics for the specified file, as described in
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>.
+<p>If the database contains multiple databases and the <b>-s</b> flag is
+not specified, the statistics are for the internal database that describes
+the other databases the file contains, and not for the file as a whole.
+<p><dt><b>-e</b><dd>Display current environment statistics.
+<p><dt><b>-h</b><dd>Specify a home directory for the database environment; by
+default, the current working directory is used.
+<p><dt><b>-l</b><dd>Display log region statistics as described in <a href="../api_c/log_stat.html">log_stat</a>.
+<p><dt><b>-M</b><dd>Display internal information about the shared memory buffer pool. (The
+output from this option is often both voluminous and meaningless, and is
+intended only for debugging.)
+<p><dl compact>
+<p><dt>A<dd>Display all information.
+<dt>h<dd>Display buffers within hash chains.
+<dt>l<dd>Display buffers within LRU chains.
+<dt>m<dd>Display region memory information.
+</dl>
+<p><dt><b>-m</b><dd>Display shared memory buffer pool statistics as described in
+<a href="../api_c/memp_stat.html">memp_stat</a>.
+<p><dt><b>-N</b><dd>Do not acquire shared region locks while running. Other problems such as
+potentially fatal errors in Berkeley DB will be ignored as well. This option
+is intended only for debugging errors and should not be used under any
+other circumstances.
+<p><dt><b>-s</b><dd>Display statistics for the specified database contained in the file
+specified with the <b>-d</b> flag.
+<p><dt><b>-t</b><dd>Display transaction region statistics as described in <a href="../api_c/txn_stat.html">txn_stat</a>.
+<p><dt><b>-V</b><dd>Write the version number to the standard output and exit.
+</dl>
+<p>Only one set of statistics is displayed for each run, and the last option
+specifying a set of statistics takes precedence.
+<p>Values smaller than 10 million are generally displayed without any special
+notation. Values larger than 10 million are normally displayed as
+<b>&lt;number&gt;M</b>.
+<p>The db_stat utility may be used with a Berkeley DB environment (as described for the
+<b>-h</b> option, the environment variable <b>DB_HOME</b>, or,
+because the utility was run in a directory containing a Berkeley DB
+environment). In order to avoid environment corruption when using a Berkeley DB
+environment, db_stat should always be given the chance to detach from
+the environment and exit gracefully. To cause db_stat to release all
+environment resources and exit cleanly, send it an interrupt signal
+(SIGINT).
+<p>The db_stat utility exits 0 on success, and &gt;0 if an error occurs.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If the <b>-h</b> option is not specified and the environment variable
+DB_HOME is set, it is used as the path of the database home, as described
+in <a href="../api_c/env_open.html">DBENV-&gt;open</a>.
+</dl>
+<h1>See Also</h1>
+<a href="../utility/berkeley_db_svc.html">berkeley_db_svc</a>,
+<a href="../utility/db_archive.html">db_archive</a>,
+<a href="../utility/db_checkpoint.html">db_checkpoint</a>,
+<a href="../utility/db_deadlock.html">db_deadlock</a>,
+<a href="../utility/db_dump.html">db_dump</a>,
+<a href="../utility/db_load.html">db_load</a>,
+<a href="../utility/db_recover.html">db_recover</a>,
+db_stat,
+<a href="../utility/db_upgrade.html">db_upgrade</a>,
+and
+<a href="../utility/db_verify.html">db_verify</a>.
+</tt>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/utility/db_upgrade.html b/bdb/docs/utility/db_upgrade.html
new file mode 100644
index 00000000000..6375f380ed9
--- /dev/null
+++ b/bdb/docs/utility/db_upgrade.html
@@ -0,0 +1,93 @@
+<!--$Id: db_upgrade.so,v 1.4 2000/08/10 17:54:50 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db_upgrade</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>db_upgrade</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db_upgrade [<b>-NsV</b>] [<b>-h home</b>] file ...</pre></h3>
+<h1>Description</h1>
+ <a name="3"><!--meow--></a> <a name="4"><!--meow--></a>
+<p>The db_upgrade utility upgrades the Berkeley DB version of one or more
+files and the databases they contain to the current release version.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt><b>-h</b><dd>Specify a home directory for the database environment; by
+default, the current working directory is used.
+<p><dt><b>-N</b><dd>Do not acquire shared region locks while running. Other problems such as
+potentially fatal errors in Berkeley DB will be ignored as well. This option
+is intended only for debugging errors and should not be used under any
+other circumstances.
+<p><dt><b>-s</b><dd>This flag is only meaningful when upgrading databases from releases before the
+Berkeley DB 3.1 release.
+<p>As part of the upgrade from the Berkeley DB 3.0 release to the 3.1 release, the
+on-disk format of duplicate data items changed. To correctly upgrade the
+format requires applications specify if duplicate data items in the
+database are sorted or not. Specifying the <b>-s</b> flag means that
+the duplicates are sorted, otherwise they are assumed to be unsorted.
+Incorrectly specifying the value of this flag may lead to database
+corruption.
+<p>Because the db_upgrade utility upgrades a physical file (including
+all of the databases it contains), it is not possible to use
+db_upgrade to upgrade files where some of the databases it
+includes have sorted duplicate data items and some of the databases it
+includes have unsorted duplicate data items. If the file does not have
+more than a single database, or the databases do not support duplicate
+data items, or all of the databases that support duplicate data items
+support the same style of duplicates (either sorted or unsorted),
+db_upgrade will work correctly as long as the <b>-s</b> flag is
+correctly specified. Otherwise, the file cannot be upgraded using
+db_upgrade, and must be upgraded manually using the <a href="../utility/db_dump.html">db_dump</a>
+and <a href="../utility/db_load.html">db_load</a> utilities.
+<p><dt><b>-V</b><dd>Write the version number to the standard output and exit.
+</dl>
+<p><b>It is important to realize that Berkeley DB database upgrades are done
+in place, and so are potentially destructive.</b> This means that if the
+system crashes during the upgrade procedure, or if the upgrade procedure
+runs out of disk space, the databases may be left in an inconsistent and
+unrecoverable state. See <a href="../ref/am/upgrade.html">Upgrading
+databases</a> for more information.
+<p>The db_upgrade utility may be used with a Berkeley DB environment (as described for the
+<b>-h</b> option, the environment variable <b>DB_HOME</b>, or,
+because the utility was run in a directory containing a Berkeley DB
+environment). In order to avoid environment corruption when using a Berkeley DB
+environment, db_upgrade should always be given the chance to detach from
+the environment and exit gracefully. To cause db_upgrade to release all
+environment resources and exit cleanly, send it an interrupt signal
+(SIGINT).
+<p>The db_upgrade utility exits 0 on success, and &gt;0 if an error occurs.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If the <b>-h</b> option is not specified and the environment variable
+DB_HOME is set, it is used as the path of the database home, as described
+in <a href="../api_c/env_open.html">DBENV-&gt;open</a>.
+</dl>
+<h1>See Also</h1>
+<a href="../utility/berkeley_db_svc.html">berkeley_db_svc</a>,
+<a href="../utility/db_archive.html">db_archive</a>,
+<a href="../utility/db_checkpoint.html">db_checkpoint</a>,
+<a href="../utility/db_deadlock.html">db_deadlock</a>,
+<a href="../utility/db_dump.html">db_dump</a>,
+<a href="../utility/db_load.html">db_load</a>,
+<a href="../utility/db_recover.html">db_recover</a>,
+<a href="../utility/db_stat.html">db_stat</a>,
+db_upgrade,
+and
+<a href="../utility/db_verify.html">db_verify</a>.
+</tt>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/utility/db_verify.html b/bdb/docs/utility/db_verify.html
new file mode 100644
index 00000000000..610e857a9da
--- /dev/null
+++ b/bdb/docs/utility/db_verify.html
@@ -0,0 +1,73 @@
+<!--$Id: db_verify.so,v 10.3 2000/07/13 16:41:50 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: db_verify</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+ <a name="2"><!--meow--></a>
+<table><tr valign=top>
+<td>
+<h1>db_verify</h1>
+</td>
+<td width="1%">
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../ref/toc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db_verify [<b>-NqV</b>] [<b>-h home</b>] file ...</pre></h3>
+<h1>Description</h1>
+ <a name="3"><!--meow--></a> <a name="4"><!--meow--></a>
+<p>The db_verify utility verifies the structure of one or more
+files and the databases they contain.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt><b>-h</b><dd>Specify a home directory for the database environment; by
+default, the current working directory is used.
+<p><dt><b>-N</b><dd>Do not acquire shared region locks while running. Other problems such as
+potentially fatal errors in Berkeley DB will be ignored as well. This option
+is intended only for debugging errors and should not be used under any
+other circumstances.
+<p><dt><b>-q</b><dd>Suppress the printing of any error descriptions, simply exit success or
+failure.
+<p><dt><b>-V</b><dd>Write the version number to the standard output and exit.
+</dl>
+<p>If the file being verified contains databases using non-default
+comparison or hashing functions, the db_verify utility may
+not be used for verification, as it will likely always return failure.
+Such files must be verified explicitly, using the <a href="../api_c/db_verify.html">DB-&gt;verify</a> function,
+after setting the correct comparison or hashing functions.
+<p>The db_verify utility may be used with a Berkeley DB environment (as described for the
+<b>-h</b> option, the environment variable <b>DB_HOME</b>, or,
+because the utility was run in a directory containing a Berkeley DB
+environment). In order to avoid environment corruption when using a Berkeley DB
+environment, db_verify should always be given the chance to detach from
+the environment and exit gracefully. To cause db_verify to release all
+environment resources and exit cleanly, send it an interrupt signal
+(SIGINT).
+<p>The db_verify utility exits 0 on success, and &gt;0 if an error occurs.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If the <b>-h</b> option is not specified and the environment variable
+DB_HOME is set, it is used as the path of the database home, as described
+in <a href="../api_c/env_open.html">DBENV-&gt;open</a>.
+</dl>
+<h1>See Also</h1>
+<a href="../utility/berkeley_db_svc.html">berkeley_db_svc</a>,
+<a href="../utility/db_archive.html">db_archive</a>,
+<a href="../utility/db_checkpoint.html">db_checkpoint</a>,
+<a href="../utility/db_deadlock.html">db_deadlock</a>,
+<a href="../utility/db_dump.html">db_dump</a>,
+<a href="../utility/db_load.html">db_load</a>,
+<a href="../utility/db_recover.html">db_recover</a>,
+<a href="../utility/db_stat.html">db_stat</a>,
+<a href="../utility/db_upgrade.html">db_upgrade</a>,
+and
+db_verify.
+</tt>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/docs/utility/index.html b/bdb/docs/utility/index.html
new file mode 100644
index 00000000000..3f0c430ab0f
--- /dev/null
+++ b/bdb/docs/utility/index.html
@@ -0,0 +1,28 @@
+<!--$Id: index.so,v 10.10 2000/04/03 20:10:39 bostic Exp $-->
+<!--Copyright 1997, 1998, 1999, 2000 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Berkeley DB Supporting Utilities</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Berkeley DB Supporting Utilities</h1>
+<p><table border=1 align=center>
+<tr><th>Utility</th><th>Description</th></tr>
+<tr><td><a href="../utility/berkeley_db_svc.html">berkeley_db_svc</a></td><td>RPC server utility</td></tr>
+<tr><td><a href="../utility/db_archive.html">db_archive</a></td><td>Archival utility</td></tr>
+<tr><td><a href="../utility/db_checkpoint.html">db_checkpoint</a></td><td>Transaction checkpoint utility</td></tr>
+<tr><td><a href="../utility/db_deadlock.html">db_deadlock</a></td><td>Deadlock detection utility</td></tr>
+<tr><td><a href="../utility/db_dump.html">db_dump</a></td><td>Database dump utility</td></tr>
+<tr><td><a href="../utility/db_load.html">db_load</a></td><td>Database load utility</td></tr>
+<tr><td><a href="../utility/db_printlog.html">db_printlog</a></td><td>Transaction log display utility</td></tr>
+<tr><td><a href="../utility/db_recover.html">db_recover</a></td><td>Recovery utility</td></tr>
+<tr><td><a href="../utility/db_stat.html">db_stat</a></td><td>Statistics utility</td></tr>
+<tr><td><a href="../utility/db_upgrade.html">db_upgrade</a></td><td>Database upgrade utility</td></tr>
+<tr><td><a href="../utility/db_verify.html">db_verify</a></td><td>Verification utility</td></tr>
+</table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/bdb/env/db_salloc.c b/bdb/env/db_salloc.c
new file mode 100644
index 00000000000..4780107c593
--- /dev/null
+++ b/bdb/env/db_salloc.c
@@ -0,0 +1,360 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_salloc.c,v 11.10 2000/12/06 19:55:44 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * Implement shared memory region allocation, using simple first-fit algorithm.
+ * The model is that we take a "chunk" of shared memory store and begin carving
+ * it up into areas, similarly to how malloc works. We do coalescing on free.
+ *
+ * The "len" field in the __data struct contains the length of the free region
+ * (less the size_t bytes that holds the length). We use the address provided
+ * by the caller to find this length, which allows us to free a chunk without
+ * requiring that the caller pass in the length of the chunk they're freeing.
+ */
+SH_LIST_HEAD(__head);
+struct __data {
+ size_t len;
+ SH_LIST_ENTRY links;
+};
+
+/*
+ * __db_shalloc_init --
+ * Initialize the area as one large chunk.
+ *
+ * PUBLIC: void __db_shalloc_init __P((void *, size_t));
+ */
+void
+__db_shalloc_init(area, size)
+ void *area;
+ size_t size;
+{
+ struct __data *elp;
+ struct __head *hp;
+
+ hp = area;
+ SH_LIST_INIT(hp);
+
+ elp = (struct __data *)(hp + 1);
+ elp->len = size - sizeof(struct __head) - sizeof(elp->len);
+ SH_LIST_INSERT_HEAD(hp, elp, links, __data);
+}
+
+/*
+ * __db_shalloc --
+ * Allocate some space from the shared region.
+ *
+ * PUBLIC: int __db_shalloc_size __P((size_t, size_t));
+ */
+int
+__db_shalloc_size(len, align)
+ size_t len, align;
+{
+ /* Never allocate less than the size of a struct __data. */
+ if (len < sizeof(struct __data))
+ len = sizeof(struct __data);
+
+#ifdef DIAGNOSTIC
+ /* Add room for a guard byte. */
+ ++len;
+#endif
+
+ /* Never align to less than a db_align_t boundary. */
+ if (align <= sizeof(db_align_t))
+ align = sizeof(db_align_t);
+
+ return (ALIGN(len, align) + sizeof (struct __data));
+}
+
+/*
+ * __db_shalloc --
+ * Allocate some space from the shared region.
+ *
+ * PUBLIC: int __db_shalloc __P((void *, size_t, size_t, void *));
+ */
+int
+__db_shalloc(p, len, align, retp)
+ void *p, *retp;
+ size_t len, align;
+{
+ struct __data *elp;
+ size_t *sp;
+ void *rp;
+
+ /* Never allocate less than the size of a struct __data. */
+ if (len < sizeof(struct __data))
+ len = sizeof(struct __data);
+
+#ifdef DIAGNOSTIC
+ /* Add room for a guard byte. */
+ ++len;
+#endif
+
+ /* Never align to less than a db_align_t boundary. */
+ if (align <= sizeof(db_align_t))
+ align = sizeof(db_align_t);
+
+ /* Walk the list, looking for a slot. */
+ for (elp = SH_LIST_FIRST((struct __head *)p, __data);
+ elp != NULL;
+ elp = SH_LIST_NEXT(elp, links, __data)) {
+ /*
+ * Calculate the value of the returned pointer if we were to
+ * use this chunk.
+ * + Find the end of the chunk.
+ * + Subtract the memory the user wants.
+ * + Find the closest previous correctly-aligned address.
+ */
+ rp = (u_int8_t *)elp + sizeof(size_t) + elp->len;
+ rp = (u_int8_t *)rp - len;
+ rp = (u_int8_t *)((db_alignp_t)rp & ~(align - 1));
+
+ /*
+ * Rp may now point before elp->links, in which case the chunk
+ * was too small, and we have to try again.
+ */
+ if ((u_int8_t *)rp < (u_int8_t *)&elp->links)
+ continue;
+
+ *(void **)retp = rp;
+#ifdef DIAGNOSTIC
+ /*
+ * At this point, whether or not we still need to split up a
+ * chunk, retp is the address of the region we are returning,
+ * and (u_int8_t *)elp + sizeof(size_t) + elp->len gives us
+ * the address of the first byte after the end of the chunk.
+ * Make the byte immediately before that the guard byte.
+ */
+ *((u_int8_t *)elp + sizeof(size_t) + elp->len - 1) = GUARD_BYTE;
+#endif
+
+#define SHALLOC_FRAGMENT 32
+ /*
+ * If there are at least SHALLOC_FRAGMENT additional bytes of
+ * memory, divide the chunk into two chunks.
+ */
+ if ((u_int8_t *)rp >=
+ (u_int8_t *)&elp->links + SHALLOC_FRAGMENT) {
+ sp = rp;
+ *--sp = elp->len -
+ ((u_int8_t *)rp - (u_int8_t *)&elp->links);
+ elp->len -= *sp + sizeof(size_t);
+ return (0);
+ }
+
+ /*
+ * Otherwise, we return the entire chunk, wasting some amount
+ * of space to keep the list compact. However, because the
+ * address we're returning to the user may not be the address
+ * of the start of the region for alignment reasons, set the
+ * size_t length fields back to the "real" length field to a
+ * flag value, so that we can find the real length during free.
+ */
+#define ILLEGAL_SIZE 1
+ SH_LIST_REMOVE(elp, links, __data);
+ for (sp = rp; (u_int8_t *)--sp >= (u_int8_t *)&elp->links;)
+ *sp = ILLEGAL_SIZE;
+ return (0);
+ }
+
+ return (ENOMEM);
+}
+
+/*
+ * __db_shalloc_free --
+ * Free a shared memory allocation.
+ *
+ * PUBLIC: void __db_shalloc_free __P((void *, void *));
+ */
+void
+__db_shalloc_free(regionp, ptr)
+ void *regionp, *ptr;
+{
+ struct __data *elp, *lastp, *newp;
+ struct __head *hp;
+ size_t free_size, *sp;
+ int merged;
+
+ /*
+ * Step back over flagged length fields to find the beginning of
+ * the object and its real size.
+ */
+ for (sp = (size_t *)ptr; sp[-1] == ILLEGAL_SIZE; --sp)
+ ;
+ ptr = sp;
+
+ newp = (struct __data *)((u_int8_t *)ptr - sizeof(size_t));
+ free_size = newp->len;
+
+#ifdef DIAGNOSTIC
+ /*
+ * The "real size" includes the guard byte; it's just the last
+ * byte in the chunk, and the caller never knew it existed.
+ *
+ * Check it to make sure it hasn't been stomped.
+ */
+ if (*((u_int8_t *)ptr + free_size - 1) != GUARD_BYTE) {
+ /*
+ * Eventually, once we push a DB_ENV handle down to these
+ * routines, we should use the standard output channels.
+ */
+ fprintf(stderr,
+ "Guard byte incorrect during shared memory free.\n");
+ abort();
+ /* NOTREACHED */
+ }
+
+ /* Trash the returned memory (including guard byte). */
+ memset(ptr, CLEAR_BYTE, free_size);
+#endif
+
+ /*
+ * Walk the list, looking for where this entry goes.
+ *
+ * We keep the free list sorted by address so that coalescing is
+ * trivial.
+ *
+ * XXX
+ * Probably worth profiling this to see how expensive it is.
+ */
+ hp = (struct __head *)regionp;
+ for (elp = SH_LIST_FIRST(hp, __data), lastp = NULL;
+ elp != NULL && (void *)elp < (void *)ptr;
+ lastp = elp, elp = SH_LIST_NEXT(elp, links, __data))
+ ;
+
+ /*
+ * Elp is either NULL (we reached the end of the list), or the slot
+ * after the one that's being returned. Lastp is either NULL (we're
+ * returning the first element of the list) or the element before the
+ * one being returned.
+ *
+ * Check for coalescing with the next element.
+ */
+ merged = 0;
+ if ((u_int8_t *)ptr + free_size == (u_int8_t *)elp) {
+ newp->len += elp->len + sizeof(size_t);
+ SH_LIST_REMOVE(elp, links, __data);
+ if (lastp != NULL)
+ SH_LIST_INSERT_AFTER(lastp, newp, links, __data);
+ else
+ SH_LIST_INSERT_HEAD(hp, newp, links, __data);
+ merged = 1;
+ }
+
+ /* Check for coalescing with the previous element. */
+ if (lastp != NULL && (u_int8_t *)lastp +
+ lastp->len + sizeof(size_t) == (u_int8_t *)newp) {
+ lastp->len += newp->len + sizeof(size_t);
+
+ /*
+ * If we have already put the new element into the list take
+ * it back off again because it's just been merged with the
+ * previous element.
+ */
+ if (merged)
+ SH_LIST_REMOVE(newp, links, __data);
+ merged = 1;
+ }
+
+ if (!merged) {
+ if (lastp == NULL)
+ SH_LIST_INSERT_HEAD(hp, newp, links, __data);
+ else
+ SH_LIST_INSERT_AFTER(lastp, newp, links, __data);
+ }
+}
+
+/*
+ * __db_shalloc_count --
+ * Return the amount of memory on the free list.
+ *
+ * PUBLIC: size_t __db_shalloc_count __P((void *));
+ */
+size_t
+__db_shalloc_count(addr)
+ void *addr;
+{
+ struct __data *elp;
+ size_t count;
+
+ count = 0;
+ for (elp = SH_LIST_FIRST((struct __head *)addr, __data);
+ elp != NULL;
+ elp = SH_LIST_NEXT(elp, links, __data))
+ count += elp->len;
+
+ return (count);
+}
+
+/*
+ * __db_shsizeof --
+ * Return the size of a shalloc'd piece of memory.
+ *
+ * !!!
+ * Note that this is from an internal standpoint -- it includes not only
+ * the size of the memory being used, but also the extra alignment bytes
+ * in front and, #ifdef DIAGNOSTIC, the guard byte at the end.
+ *
+ * PUBLIC: size_t __db_shsizeof __P((void *));
+ */
+size_t
+__db_shsizeof(ptr)
+ void *ptr;
+{
+ struct __data *elp;
+ size_t *sp;
+
+ /*
+ * Step back over flagged length fields to find the beginning of
+ * the object and its real size.
+ */
+ for (sp = (size_t *)ptr; sp[-1] == ILLEGAL_SIZE; --sp)
+ ;
+
+ elp = (struct __data *)((u_int8_t *)sp - sizeof(size_t));
+ return (elp->len);
+}
+
+/*
+ * __db_shalloc_dump --
+ *
+ * PUBLIC: void __db_shalloc_dump __P((void *, FILE *));
+ */
+void
+__db_shalloc_dump(addr, fp)
+ void *addr;
+ FILE *fp;
+{
+ struct __data *elp;
+
+ /* Make it easy to call from the debugger. */
+ if (fp == NULL)
+ fp = stderr;
+
+ fprintf(fp, "%s\nMemory free list\n", DB_LINE);
+
+ for (elp = SH_LIST_FIRST((struct __head *)addr, __data);
+ elp != NULL;
+ elp = SH_LIST_NEXT(elp, links, __data))
+ fprintf(fp, "%#lx: %lu\t", (u_long)elp, (u_long)elp->len);
+ fprintf(fp, "\n");
+}
diff --git a/bdb/env/db_shash.c b/bdb/env/db_shash.c
new file mode 100644
index 00000000000..1c33b383098
--- /dev/null
+++ b/bdb/env/db_shash.c
@@ -0,0 +1,124 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_shash.c,v 11.3 2000/02/14 02:59:49 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * Table of good hash values. Up to ~250,000 buckets, we use powers of 2.
+ * After that, we slow the rate of increase by half. For each choice, we
+ * then use a nearby prime number as the hash value.
+ *
+ * If a terabyte is the maximum cache we'll see, and we assume there are
+ * 10 1K buckets on each hash chain, then 107374182 is the maximum number
+ * of buckets we'll ever need.
+ */
+static const struct {
+ u_int32_t power;
+ u_int32_t prime;
+} list[] = {
+ { 64, 67}, /* 2^6 */
+ { 128, 131}, /* 2^7 */
+ { 256, 257}, /* 2^8 */
+ { 512, 521}, /* 2^9 */
+ { 1024, 1031}, /* 2^10 */
+ { 2048, 2053}, /* 2^11 */
+ { 4096, 4099}, /* 2^12 */
+ { 8192, 8191}, /* 2^13 */
+ { 16384, 16381}, /* 2^14 */
+ { 32768, 32771}, /* 2^15 */
+ { 65536, 65537}, /* 2^16 */
+ { 131072, 131071}, /* 2^17 */
+ { 262144, 262147}, /* 2^18 */
+ { 393216, 393209}, /* 2^18 + 2^18/2 */
+ { 524288, 524287}, /* 2^19 */
+ { 786432, 786431}, /* 2^19 + 2^19/2 */
+ { 1048576, 1048573}, /* 2^20 */
+ { 1572864, 1572869}, /* 2^20 + 2^20/2 */
+ { 2097152, 2097169}, /* 2^21 */
+ { 3145728, 3145721}, /* 2^21 + 2^21/2 */
+ { 4194304, 4194301}, /* 2^22 */
+ { 6291456, 6291449}, /* 2^22 + 2^22/2 */
+ { 8388608, 8388617}, /* 2^23 */
+ { 12582912, 12582917}, /* 2^23 + 2^23/2 */
+ { 16777216, 16777213}, /* 2^24 */
+ { 25165824, 25165813}, /* 2^24 + 2^24/2 */
+ { 33554432, 33554393}, /* 2^25 */
+ { 50331648, 50331653}, /* 2^25 + 2^25/2 */
+ { 67108864, 67108859}, /* 2^26 */
+ { 100663296, 100663291}, /* 2^26 + 2^26/2 */
+ { 134217728, 134217757}, /* 2^27 */
+ { 201326592, 201326611}, /* 2^27 + 2^27/2 */
+ { 268435456, 268435459}, /* 2^28 */
+ { 402653184, 402653189}, /* 2^28 + 2^28/2 */
+ { 536870912, 536870909}, /* 2^29 */
+ { 805306368, 805306357}, /* 2^29 + 2^29/2 */
+ {1073741824, 1073741827}, /* 2^30 */
+ {0, 0}
+};
+
+/*
+ * __db_tablesize --
+ * Choose a size for the hash table.
+ *
+ * PUBLIC: int __db_tablesize __P((u_int32_t));
+ */
+int
+__db_tablesize(n_buckets)
+ u_int32_t n_buckets;
+{
+ int i;
+
+ /*
+ * We try to be clever about how big we make the hash tables. Use a
+ * prime number close to the "suggested" number of elements that will
+ * be in the hash table. Use 64 as the minimum hash table size.
+ *
+ * Ref: Sedgewick, Algorithms in C, "Hash Functions"
+ */
+ if (n_buckets < 64)
+ n_buckets = 64;
+
+ for (i = 0;; ++i) {
+ if (list[i].power == 0) {
+ --i;
+ break;
+ }
+ if (list[i].power >= n_buckets)
+ break;
+ }
+ return (list[i].prime);
+}
+
+/*
+ * __db_hashinit --
+ * Initialize a hash table that resides in shared memory.
+ *
+ * PUBLIC: void __db_hashinit __P((void *, u_int32_t));
+ */
+void
+__db_hashinit(begin, nelements)
+ void *begin;
+ u_int32_t nelements;
+{
+ u_int32_t i;
+ SH_TAILQ_HEAD(hash_head) *headp;
+
+ headp = (struct hash_head *)begin;
+
+ for (i = 0; i < nelements; i++, headp++)
+ SH_TAILQ_INIT(headp);
+}
diff --git a/bdb/env/env_method.c b/bdb/env/env_method.c
new file mode 100644
index 00000000000..c5f45df7124
--- /dev/null
+++ b/bdb/env/env_method.c
@@ -0,0 +1,461 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: env_method.c,v 11.31 2000/11/30 00:58:35 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#ifdef HAVE_RPC
+#include "db_server.h"
+#endif
+
+/*
+ * This is the file that initializes the global array. Do it this way because
+ * people keep changing one without changing the other. Having declaration and
+ * initialization in one file will hopefully fix that.
+ */
+#define DB_INITIALIZE_DB_GLOBALS 1
+
+#include "db_int.h"
+#include "db_shash.h"
+#include "db_page.h"
+#include "db_am.h"
+#include "lock.h"
+#include "log.h"
+#include "mp.h"
+#include "txn.h"
+
+#ifdef HAVE_RPC
+#include "gen_client_ext.h"
+#include "rpc_client_ext.h"
+#endif
+
+static void __dbenv_err __P((const DB_ENV *, int, const char *, ...));
+static void __dbenv_errx __P((const DB_ENV *, const char *, ...));
+static int __dbenv_set_data_dir __P((DB_ENV *, const char *));
+static void __dbenv_set_errcall __P((DB_ENV *, void (*)(const char *, char *)));
+static void __dbenv_set_errfile __P((DB_ENV *, FILE *));
+static void __dbenv_set_errpfx __P((DB_ENV *, const char *));
+static int __dbenv_set_feedback __P((DB_ENV *, void (*)(DB_ENV *, int, int)));
+static int __dbenv_set_flags __P((DB_ENV *, u_int32_t, int));
+static int __dbenv_set_mutexlocks __P((DB_ENV *, int));
+static int __dbenv_set_paniccall __P((DB_ENV *, void (*)(DB_ENV *, int)));
+static int __dbenv_set_recovery_init __P((DB_ENV *, int (*)(DB_ENV *)));
+static int __dbenv_set_server_noclnt
+ __P((DB_ENV *, char *, long, long, u_int32_t));
+static int __dbenv_set_shm_key __P((DB_ENV *, long));
+static int __dbenv_set_tmp_dir __P((DB_ENV *, const char *));
+static int __dbenv_set_verbose __P((DB_ENV *, u_int32_t, int));
+
+/*
+ * db_env_create --
+ * DB_ENV constructor.
+ */
+int
+db_env_create(dbenvpp, flags)
+ DB_ENV **dbenvpp;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ int ret;
+
+ /*
+ * !!!
+ * We can't call the flags-checking routines, we don't have an
+ * environment yet.
+ */
+ if (flags != 0 && flags != DB_CLIENT)
+ return (EINVAL);
+
+ if ((ret = __os_calloc(NULL, 1, sizeof(*dbenv), &dbenv)) != 0)
+ return (ret);
+
+#ifdef HAVE_RPC
+ if (LF_ISSET(DB_CLIENT))
+ F_SET(dbenv, DB_ENV_RPCCLIENT);
+#endif
+ ret = __dbenv_init(dbenv);
+
+ if (ret != 0) {
+ __os_free(dbenv, sizeof(*dbenv));
+ return (ret);
+ }
+
+ *dbenvpp = dbenv;
+ return (0);
+}
+
+/*
+ * __dbenv_init --
+ * Initialize a DB_ENV structure.
+ *
+ * PUBLIC: int __dbenv_init __P((DB_ENV *));
+ */
+int
+__dbenv_init(dbenv)
+ DB_ENV *dbenv;
+{
+ /*
+ * Set up methods that are the same in both normal and RPC
+ */
+ dbenv->err = __dbenv_err;
+ dbenv->errx = __dbenv_errx;
+ dbenv->set_errcall = __dbenv_set_errcall;
+ dbenv->set_errfile = __dbenv_set_errfile;
+ dbenv->set_errpfx = __dbenv_set_errpfx;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) {
+ dbenv->close = __dbcl_env_close;
+ dbenv->open = __dbcl_env_open;
+ dbenv->remove = __dbcl_env_remove;
+ dbenv->set_data_dir = __dbcl_set_data_dir;
+ dbenv->set_feedback = __dbcl_env_set_feedback;
+ dbenv->set_flags = __dbcl_env_flags;
+ dbenv->set_mutexlocks = __dbcl_set_mutex_locks;
+ dbenv->set_paniccall = __dbcl_env_paniccall;
+ dbenv->set_recovery_init = __dbcl_set_recovery_init;
+ dbenv->set_server = __dbcl_envserver;
+ dbenv->set_shm_key = __dbcl_set_shm_key;
+ dbenv->set_tmp_dir = __dbcl_set_tmp_dir;
+ dbenv->set_verbose = __dbcl_set_verbose;
+ } else {
+#endif
+ dbenv->close = __dbenv_close;
+ dbenv->open = __dbenv_open;
+ dbenv->remove = __dbenv_remove;
+ dbenv->set_data_dir = __dbenv_set_data_dir;
+ dbenv->set_feedback = __dbenv_set_feedback;
+ dbenv->set_flags = __dbenv_set_flags;
+ dbenv->set_mutexlocks = __dbenv_set_mutexlocks;
+ dbenv->set_paniccall = __dbenv_set_paniccall;
+ dbenv->set_recovery_init = __dbenv_set_recovery_init;
+ dbenv->set_server = __dbenv_set_server_noclnt;
+ dbenv->set_shm_key = __dbenv_set_shm_key;
+ dbenv->set_tmp_dir = __dbenv_set_tmp_dir;
+ dbenv->set_verbose = __dbenv_set_verbose;
+#ifdef HAVE_RPC
+ }
+#endif
+ dbenv->shm_key = INVALID_REGION_SEGID;
+ dbenv->db_mutexlocks = 1;
+
+ __log_dbenv_create(dbenv); /* Subsystem specific. */
+ __lock_dbenv_create(dbenv);
+ __memp_dbenv_create(dbenv);
+ __txn_dbenv_create(dbenv);
+
+ return (0);
+}
+
+/*
+ * __dbenv_err --
+ * Error message, including the standard error string.
+ */
+static void
+#ifdef __STDC__
+__dbenv_err(const DB_ENV *dbenv, int error, const char *fmt, ...)
+#else
+__dbenv_err(dbenv, error, fmt, va_alist)
+ const DB_ENV *dbenv;
+ int error;
+ const char *fmt;
+ va_dcl
+#endif
+{
+ va_list ap;
+
+#ifdef __STDC__
+ va_start(ap, fmt);
+#else
+ va_start(ap);
+#endif
+ __db_real_err(dbenv, error, 1, 1, fmt, ap);
+
+ va_end(ap);
+}
+
+/*
+ * __dbenv_errx --
+ * Error message.
+ */
+static void
+#ifdef __STDC__
+__dbenv_errx(const DB_ENV *dbenv, const char *fmt, ...)
+#else
+__dbenv_errx(dbenv, fmt, va_alist)
+ const DB_ENV *dbenv;
+ const char *fmt;
+ va_dcl
+#endif
+{
+ va_list ap;
+
+#ifdef __STDC__
+ va_start(ap, fmt);
+#else
+ va_start(ap);
+#endif
+ __db_real_err(dbenv, 0, 0, 1, fmt, ap);
+
+ va_end(ap);
+}
+
+static int
+__dbenv_set_flags(dbenv, flags, onoff)
+ DB_ENV *dbenv;
+ u_int32_t flags;
+ int onoff;
+{
+#define OK_FLAGS (DB_CDB_ALLDB | DB_NOMMAP | DB_TXN_NOSYNC)
+
+ if (LF_ISSET(~OK_FLAGS))
+ return (__db_ferr(dbenv, "DBENV->set_flags", 0));
+
+ if (LF_ISSET(DB_CDB_ALLDB)) {
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_flags: DB_CDB_ALLDB");
+ if (onoff)
+ F_SET(dbenv, DB_ENV_CDB_ALLDB);
+ else
+ F_CLR(dbenv, DB_ENV_CDB_ALLDB);
+ }
+ if (LF_ISSET(DB_NOMMAP)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_NOMMAP);
+ else
+ F_CLR(dbenv, DB_ENV_NOMMAP);
+ }
+ if (LF_ISSET(DB_TXN_NOSYNC)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_TXN_NOSYNC);
+ else
+ F_CLR(dbenv, DB_ENV_TXN_NOSYNC);
+ }
+ return (0);
+}
+
+static int
+__dbenv_set_data_dir(dbenv, dir)
+ DB_ENV *dbenv;
+ const char *dir;
+{
+ int ret;
+
+#define DATA_INIT_CNT 20 /* Start with 20 data slots. */
+ if (dbenv->db_data_dir == NULL) {
+ if ((ret = __os_calloc(dbenv, DATA_INIT_CNT,
+ sizeof(char **), &dbenv->db_data_dir)) != 0)
+ return (ret);
+ dbenv->data_cnt = DATA_INIT_CNT;
+ } else if (dbenv->data_next == dbenv->data_cnt - 1) {
+ dbenv->data_cnt *= 2;
+ if ((ret = __os_realloc(dbenv,
+ dbenv->data_cnt * sizeof(char **),
+ NULL, &dbenv->db_data_dir)) != 0)
+ return (ret);
+ }
+ return (__os_strdup(dbenv,
+ dir, &dbenv->db_data_dir[dbenv->data_next++]));
+}
+
+static void
+__dbenv_set_errcall(dbenv, errcall)
+ DB_ENV *dbenv;
+ void (*errcall) __P((const char *, char *));
+{
+ dbenv->db_errcall = errcall;
+}
+
+static void
+__dbenv_set_errfile(dbenv, errfile)
+ DB_ENV *dbenv;
+ FILE *errfile;
+{
+ dbenv->db_errfile = errfile;
+}
+
+static void
+__dbenv_set_errpfx(dbenv, errpfx)
+ DB_ENV *dbenv;
+ const char *errpfx;
+{
+ dbenv->db_errpfx = errpfx;
+}
+
+static int
+__dbenv_set_feedback(dbenv, feedback)
+ DB_ENV *dbenv;
+ void (*feedback) __P((DB_ENV *, int, int));
+{
+ dbenv->db_feedback = feedback;
+ return (0);
+}
+
+static int
+__dbenv_set_mutexlocks(dbenv, onoff)
+ DB_ENV *dbenv;
+ int onoff;
+{
+ dbenv->db_mutexlocks = onoff;
+ return (0);
+}
+
+static int
+__dbenv_set_paniccall(dbenv, paniccall)
+ DB_ENV *dbenv;
+ void (*paniccall) __P((DB_ENV *, int));
+{
+ dbenv->db_paniccall = paniccall;
+ return (0);
+}
+
+static int
+__dbenv_set_recovery_init(dbenv, recovery_init)
+ DB_ENV *dbenv;
+ int (*recovery_init) __P((DB_ENV *));
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_recovery_init");
+
+ dbenv->db_recovery_init = recovery_init;
+
+ return (0);
+}
+
+static int
+__dbenv_set_shm_key(dbenv, shm_key)
+ DB_ENV *dbenv;
+ long shm_key; /* !!!: really a key_t. */
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_shm_key");
+
+ dbenv->shm_key = shm_key;
+ return (0);
+}
+
+static int
+__dbenv_set_tmp_dir(dbenv, dir)
+ DB_ENV *dbenv;
+ const char *dir;
+{
+ if (dbenv->db_tmp_dir != NULL)
+ __os_freestr(dbenv->db_tmp_dir);
+ return (__os_strdup(dbenv, dir, &dbenv->db_tmp_dir));
+}
+
+static int
+__dbenv_set_verbose(dbenv, which, onoff)
+ DB_ENV *dbenv;
+ u_int32_t which;
+ int onoff;
+{
+ switch (which) {
+ case DB_VERB_CHKPOINT:
+ case DB_VERB_DEADLOCK:
+ case DB_VERB_RECOVERY:
+ case DB_VERB_WAITSFOR:
+ if (onoff)
+ FLD_SET(dbenv->verbose, which);
+ else
+ FLD_CLR(dbenv->verbose, which);
+ break;
+ default:
+ return (EINVAL);
+ }
+ return (0);
+}
+
+/*
+ * __db_mi_env --
+ * Method illegally called with public environment.
+ *
+ * PUBLIC: int __db_mi_env __P((DB_ENV *, const char *));
+ */
+int
+__db_mi_env(dbenv, name)
+ DB_ENV *dbenv;
+ const char *name;
+{
+ __db_err(dbenv, "%s: method meaningless in shared environment", name);
+ return (EINVAL);
+}
+
+/*
+ * __db_mi_open --
+ * Method illegally called after open.
+ *
+ * PUBLIC: int __db_mi_open __P((DB_ENV *, const char *, int));
+ */
+int
+__db_mi_open(dbenv, name, after)
+ DB_ENV *dbenv;
+ const char *name;
+ int after;
+{
+ __db_err(dbenv,
+ "%s: method meaningless %s open", name, after ? "after" : "before");
+ return (EINVAL);
+}
+
+/*
+ * __db_env_config --
+ * Method or function called without subsystem being configured.
+ *
+ * PUBLIC: int __db_env_config __P((DB_ENV *, int));
+ */
+int
+__db_env_config(dbenv, subsystem)
+ DB_ENV *dbenv;
+ int subsystem;
+{
+ const char *name;
+
+ switch (subsystem) {
+ case DB_INIT_LOCK:
+ name = "lock";
+ break;
+ case DB_INIT_LOG:
+ name = "log";
+ break;
+ case DB_INIT_MPOOL:
+ name = "mpool";
+ break;
+ case DB_INIT_TXN:
+ name = "txn";
+ break;
+ default:
+ name = "unknown";
+ break;
+ }
+ __db_err(dbenv,
+ "%s interface called with environment not configured for that subsystem",
+ name);
+ return (EINVAL);
+}
+
+static int
+__dbenv_set_server_noclnt(dbenv, host, tsec, ssec, flags)
+ DB_ENV *dbenv;
+ char *host;
+ long tsec, ssec;
+ u_int32_t flags;
+{
+ COMPQUIET(host, NULL);
+ COMPQUIET(tsec, 0);
+ COMPQUIET(ssec, 0);
+ COMPQUIET(flags, 0);
+
+ __db_err(dbenv, "set_server method meaningless in non-RPC enviroment");
+ return (__db_eopnotsup(dbenv));
+}
diff --git a/bdb/env/env_open.c b/bdb/env/env_open.c
new file mode 100644
index 00000000000..2007b4266c0
--- /dev/null
+++ b/bdb/env/env_open.c
@@ -0,0 +1,1064 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: env_open.c,v 11.34 2000/12/21 19:20:00 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_shash.h"
+#include "btree.h"
+#include "hash.h"
+#include "qam.h"
+#include "lock.h"
+#include "log.h"
+#include "mp.h"
+#include "txn.h"
+#include "clib_ext.h"
+
+static int __dbenv_config __P((DB_ENV *, const char *, u_int32_t));
+static int __dbenv_refresh __P((DB_ENV *));
+static int __db_home __P((DB_ENV *, const char *, u_int32_t));
+static int __db_parse __P((DB_ENV *, char *));
+static int __db_tmp_open __P((DB_ENV *, u_int32_t, char *, DB_FH *));
+
+/*
+ * db_version --
+ * Return version information.
+ */
+char *
+db_version(majverp, minverp, patchp)
+ int *majverp, *minverp, *patchp;
+{
+ if (majverp != NULL)
+ *majverp = DB_VERSION_MAJOR;
+ if (minverp != NULL)
+ *minverp = DB_VERSION_MINOR;
+ if (patchp != NULL)
+ *patchp = DB_VERSION_PATCH;
+ return ((char *)DB_VERSION_STRING);
+}
+
+/*
+ * __dbenv_open --
+ * Initialize an environment.
+ *
+ * PUBLIC: int __dbenv_open __P((DB_ENV *, const char *, u_int32_t, int));
+ */
+int
+__dbenv_open(dbenv, db_home, flags, mode)
+ DB_ENV *dbenv;
+ const char *db_home;
+ u_int32_t flags;
+ int mode;
+{
+ DB_ENV *rm_dbenv;
+ int ret;
+ u_int32_t init_flags;
+
+#undef OKFLAGS
+#define OKFLAGS \
+ DB_CREATE | DB_INIT_CDB | DB_INIT_LOCK | DB_INIT_LOG | \
+ DB_INIT_MPOOL | DB_INIT_TXN | DB_JOINENV | DB_LOCKDOWN | \
+ DB_PRIVATE | DB_RECOVER | DB_RECOVER_FATAL | DB_SYSTEM_MEM | \
+ DB_THREAD | DB_USE_ENVIRON | DB_USE_ENVIRON_ROOT
+#undef OKFLAGS_CDB
+#define OKFLAGS_CDB \
+ DB_CREATE | DB_INIT_CDB | DB_INIT_MPOOL | DB_LOCKDOWN | \
+ DB_PRIVATE | DB_SYSTEM_MEM | DB_THREAD | \
+ DB_USE_ENVIRON | DB_USE_ENVIRON_ROOT
+
+ /*
+ * Flags saved in the init_flags field of the environment, representing
+ * flags to DBENV->set_flags and DBENV->open that need to be set.
+ */
+#define DB_INITENV_CDB 0x0001 /* DB_INIT_CDB */
+#define DB_INITENV_CDB_ALLDB 0x0002 /* DB_INIT_CDB_ALLDB */
+#define DB_INITENV_LOCK 0x0004 /* DB_INIT_LOCK */
+#define DB_INITENV_LOG 0x0008 /* DB_INIT_LOG */
+#define DB_INITENV_MPOOL 0x0010 /* DB_INIT_MPOOL */
+#define DB_INITENV_TXN 0x0020 /* DB_INIT_TXN */
+
+ if ((ret = __db_fchk(dbenv, "DBENV->open", flags, OKFLAGS)) != 0)
+ return (ret);
+ if (LF_ISSET(DB_INIT_CDB) &&
+ (ret = __db_fchk(dbenv, "DBENV->open", flags, OKFLAGS_CDB)) != 0)
+ return (ret);
+ if ((ret = __db_fcchk(dbenv,
+ "DBENV->open", flags, DB_PRIVATE, DB_SYSTEM_MEM)) != 0)
+ return (ret);
+ if ((ret = __db_fcchk(dbenv, "DBENV->open", flags, DB_JOINENV,
+ DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL |
+ DB_INIT_TXN | DB_PRIVATE)) != 0)
+ return (ret);
+
+ /*
+ * If we're doing recovery, destroy the environment so that we create
+ * all the regions from scratch. I'd like to reuse already created
+ * regions, but that's hard. We would have to create the environment
+ * region from scratch, at least, as we have no way of knowing if its
+ * linked lists are corrupted.
+ *
+ * I suppose we could set flags while modifying those links, but that
+ * is going to be difficult to get right. The major concern I have
+ * is if the application stomps the environment with a rogue pointer.
+ * We have no way of detecting that, and we could be forced into a
+ * situation where we start up and then crash, repeatedly.
+ *
+ * Note that we do not check any flags like DB_PRIVATE before calling
+ * remove. We don't care if the current environment was private or
+ * not, we just want to nail any files that are left-over for whatever
+ * reason, from whatever session.
+ */
+ if (LF_ISSET(DB_RECOVER | DB_RECOVER_FATAL)) {
+ if ((ret = db_env_create(&rm_dbenv, 0)) != 0)
+ return (ret);
+ if ((ret = dbenv->remove(rm_dbenv, db_home, DB_FORCE)) != 0)
+ return (ret);
+ }
+
+ /* Initialize the DB_ENV structure. */
+ if ((ret = __dbenv_config(dbenv, db_home, flags)) != 0)
+ goto err;
+
+ /* Convert the DBENV->open flags to internal flags. */
+ if (LF_ISSET(DB_CREATE))
+ F_SET(dbenv, DB_ENV_CREATE);
+ if (LF_ISSET(DB_LOCKDOWN))
+ F_SET(dbenv, DB_ENV_LOCKDOWN);
+ if (LF_ISSET(DB_PRIVATE))
+ F_SET(dbenv, DB_ENV_PRIVATE);
+ if (LF_ISSET(DB_SYSTEM_MEM))
+ F_SET(dbenv, DB_ENV_SYSTEM_MEM);
+ if (LF_ISSET(DB_THREAD))
+ F_SET(dbenv, DB_ENV_THREAD);
+
+ /* Default permissions are read-write for both owner and group. */
+ dbenv->db_mode = mode == 0 ? __db_omode("rwrw--") : mode;
+
+ /*
+ * Create/join the environment. We pass in the flags that
+ * will be of interest to an environment joining later; if
+ * we're not the ones to do the create, we
+ * pull out whatever has been stored, if we don't do a create.
+ */
+ init_flags = 0;
+ init_flags |= (LF_ISSET(DB_INIT_CDB) ? DB_INITENV_CDB : 0);
+ init_flags |= (LF_ISSET(DB_INIT_LOCK) ? DB_INITENV_LOCK : 0);
+ init_flags |= (LF_ISSET(DB_INIT_LOG) ? DB_INITENV_LOG : 0);
+ init_flags |= (LF_ISSET(DB_INIT_MPOOL) ? DB_INITENV_MPOOL : 0);
+ init_flags |= (LF_ISSET(DB_INIT_TXN) ? DB_INITENV_TXN : 0);
+ init_flags |=
+ (F_ISSET(dbenv, DB_ENV_CDB_ALLDB) ? DB_INITENV_CDB_ALLDB : 0);
+
+ if ((ret = __db_e_attach(dbenv, &init_flags)) != 0)
+ goto err;
+
+ /*
+ * __db_e_attach will return the saved init_flags field, which
+ * contains the DB_INIT_* flags used when we were created.
+ */
+ if (LF_ISSET(DB_JOINENV)) {
+ LF_CLR(DB_JOINENV);
+
+ LF_SET((init_flags & DB_INITENV_CDB) ? DB_INIT_CDB : 0);
+ LF_SET((init_flags & DB_INITENV_LOCK) ? DB_INIT_LOCK : 0);
+ LF_SET((init_flags & DB_INITENV_LOG) ? DB_INIT_LOG : 0);
+ LF_SET((init_flags & DB_INITENV_MPOOL) ? DB_INIT_MPOOL : 0);
+ LF_SET((init_flags & DB_INITENV_TXN) ? DB_INIT_TXN : 0);
+
+ if (LF_ISSET(DB_INITENV_CDB_ALLDB) &&
+ (ret = dbenv->set_flags(dbenv, DB_CDB_ALLDB, 1)) != 0)
+ goto err;
+ }
+
+ /* Initialize for CDB product. */
+ if (LF_ISSET(DB_INIT_CDB)) {
+ LF_SET(DB_INIT_LOCK);
+ F_SET(dbenv, DB_ENV_CDB);
+ }
+
+ /* Initialize the DB list, and its mutex if appropriate. */
+ LIST_INIT(&dbenv->dblist);
+ if (F_ISSET(dbenv, DB_ENV_THREAD)) {
+ if ((ret = __db_mutex_alloc(dbenv,
+ dbenv->reginfo, (MUTEX **)&dbenv->dblist_mutexp)) != 0)
+ return (ret);
+ if ((ret = __db_mutex_init(dbenv,
+ dbenv->dblist_mutexp, 0, MUTEX_THREAD)) != 0) {
+ __db_mutex_free(dbenv, dbenv->reginfo,
+ dbenv->dblist_mutexp);
+ return (ret);
+ }
+ }
+
+ /*
+ * Initialize the subsystems. Transactions imply logging but do not
+ * imply locking. While almost all applications want both locking
+ * and logging, it would not be unreasonable for a single threaded
+ * process to want transactions for atomicity guarantees, but not
+ * necessarily need concurrency.
+ */
+ if (LF_ISSET(DB_INIT_MPOOL))
+ if ((ret = __memp_open(dbenv)) != 0)
+ goto err;
+ if (LF_ISSET(DB_INIT_LOG | DB_INIT_TXN))
+ if ((ret = __log_open(dbenv)) != 0)
+ goto err;
+ if (LF_ISSET(DB_INIT_LOCK))
+ if ((ret = __lock_open(dbenv)) != 0)
+ goto err;
+ if (LF_ISSET(DB_INIT_TXN)) {
+ if ((ret = __txn_open(dbenv)) != 0)
+ goto err;
+
+ /*
+ * If the application is running with transactions, initialize
+ * the function tables.
+ */
+ if ((ret = __bam_init_recover(dbenv)) != 0)
+ goto err;
+ if ((ret = __crdel_init_recover(dbenv)) != 0)
+ goto err;
+ if ((ret = __db_init_recover(dbenv)) != 0)
+ goto err;
+ if ((ret = __ham_init_recover(dbenv)) != 0)
+ goto err;
+ if ((ret = __log_init_recover(dbenv)) != 0)
+ goto err;
+ if ((ret = __qam_init_recover(dbenv)) != 0)
+ goto err;
+ if ((ret = __txn_init_recover(dbenv)) != 0)
+ goto err;
+
+ /*
+ * If the application specified their own recovery
+ * initialization function, call it.
+ */
+ if (dbenv->db_recovery_init != NULL &&
+ (ret = dbenv->db_recovery_init(dbenv)) != 0)
+ goto err;
+
+ /* Perform recovery for any previous run. */
+ if (LF_ISSET(DB_RECOVER | DB_RECOVER_FATAL) &&
+ (ret = __db_apprec(dbenv,
+ LF_ISSET(DB_RECOVER | DB_RECOVER_FATAL))) != 0)
+ goto err;
+ }
+ return (0);
+
+err: (void)__dbenv_refresh(dbenv);
+ return (ret);
+}
+
+/*
+ * __dbenv_remove --
+ * Discard an environment.
+ *
+ * PUBLIC: int __dbenv_remove __P((DB_ENV *, const char *, u_int32_t));
+ */
+int
+__dbenv_remove(dbenv, db_home, flags)
+ DB_ENV *dbenv;
+ const char *db_home;
+ u_int32_t flags;
+{
+ int ret, t_ret;
+
+#undef OKFLAGS
+#define OKFLAGS \
+ DB_FORCE | DB_USE_ENVIRON | DB_USE_ENVIRON_ROOT
+
+ /* Validate arguments. */
+ if ((ret = __db_fchk(dbenv, "DBENV->remove", flags, OKFLAGS)) != 0)
+ goto err;
+
+ /*
+ * A hard-to-debug error is calling DBENV->remove after open. That's
+ * not legal. You have to close the original, already opened handle
+ * and then allocate a new DBENV handle to use for DBENV->remove.
+ */
+ if (F_ISSET(dbenv, DB_ENV_OPEN_CALLED)) {
+ __db_err(dbenv,
+ "DBENV handle opened, not usable for remove method.");
+ return (EINVAL);
+ }
+
+ /* Initialize the DB_ENV structure. */
+ if ((ret = __dbenv_config(dbenv, db_home, flags)) != 0)
+ goto err;
+
+ /* Remove the environment. */
+ ret = __db_e_remove(dbenv, LF_ISSET(DB_FORCE) ? 1 : 0);
+
+ /* Discard any resources we've acquired. */
+err: if ((t_ret = __dbenv_refresh(dbenv)) != 0 && ret == 0)
+ ret = t_ret;
+
+ memset(dbenv, CLEAR_BYTE, sizeof(DB_ENV));
+ __os_free(dbenv, sizeof(DB_ENV));
+
+ return (ret);
+}
+
+/*
+ * __dbenv_config --
+ * Initialize the DB_ENV structure.
+ */
+static int
+__dbenv_config(dbenv, db_home, flags)
+ DB_ENV *dbenv;
+ const char *db_home;
+ u_int32_t flags;
+{
+ FILE *fp;
+ int ret;
+ char *lp, buf[MAXPATHLEN * 2];
+
+ /* Set the database home. */
+ if ((ret = __db_home(dbenv, db_home, flags)) != 0)
+ return (ret);
+
+ /*
+ * Parse the config file.
+ *
+ * !!!
+ * Don't use sprintf(3)/snprintf(3) -- the former is dangerous, and
+ * the latter isn't standard, and we're manipulating strings handed
+ * us by the application.
+ */
+ if (dbenv->db_home != NULL) {
+#define CONFIG_NAME "/DB_CONFIG"
+ if (strlen(dbenv->db_home) +
+ strlen(CONFIG_NAME) + 1 > sizeof(buf)) {
+ ret = ENAMETOOLONG;
+ return (ret);
+ }
+ (void)strcpy(buf, dbenv->db_home);
+ (void)strcat(buf, CONFIG_NAME);
+ if ((fp = fopen(buf, "r")) != NULL) {
+ while (fgets(buf, sizeof(buf), fp) != NULL) {
+ if ((lp = strchr(buf, '\n')) == NULL) {
+ __db_err(dbenv,
+ "%s: line too long", CONFIG_NAME);
+ (void)fclose(fp);
+ ret = EINVAL;
+ return (ret);
+ }
+ *lp = '\0';
+ if (buf[0] == '\0' ||
+ buf[0] == '#' || isspace((int)buf[0]))
+ continue;
+
+ if ((ret = __db_parse(dbenv, buf)) != 0) {
+ (void)fclose(fp);
+ return (ret);
+ }
+ }
+ (void)fclose(fp);
+ }
+ }
+
+ /* Set up the tmp directory path. */
+ if (dbenv->db_tmp_dir == NULL && (ret = __os_tmpdir(dbenv, flags)) != 0)
+ return (ret);
+
+ /*
+ * The locking file descriptor is rarely on. Set the fd to -1, not
+ * because it's ever tested, but to make sure we catch mistakes.
+ */
+ if ((ret =
+ __os_calloc(dbenv,
+ 1, sizeof(*dbenv->lockfhp), &dbenv->lockfhp)) != 0)
+ return (ret);
+ dbenv->lockfhp->fd = -1;
+
+ /*
+ * Flag that the DB_ENV structure has been initialized. Note, this
+ * must be set before calling into the subsystems as it's used during
+ * file naming.
+ */
+ F_SET(dbenv, DB_ENV_OPEN_CALLED);
+
+ return (0);
+}
+
+/*
+ * __dbenv_close --
+ * DB_ENV destructor.
+ *
+ * PUBLIC: int __dbenv_close __P((DB_ENV *, u_int32_t));
+ */
+int
+__dbenv_close(dbenv, flags)
+ DB_ENV *dbenv;
+ u_int32_t flags;
+{
+ int ret;
+
+ COMPQUIET(flags, 0);
+
+ PANIC_CHECK(dbenv);
+
+ ret = __dbenv_refresh(dbenv);
+
+ /* Discard the structure if we allocated it. */
+ if (!F_ISSET(dbenv, DB_ENV_USER_ALLOC)) {
+ memset(dbenv, CLEAR_BYTE, sizeof(DB_ENV));
+ __os_free(dbenv, sizeof(DB_ENV));
+ }
+
+ return (ret);
+}
+
+/*
+ * __dbenv_refresh --
+ * Refresh the DB_ENV structure, releasing any allocated resources.
+ */
+static int
+__dbenv_refresh(dbenv)
+ DB_ENV *dbenv;
+{
+ int ret, t_ret;
+ char **p;
+
+ ret = 0;
+
+ /*
+ * Close subsystems, in the reverse order they were opened (txn
+ * must be first, it may want to discard locks and flush the log).
+ */
+ if (TXN_ON(dbenv)) {
+ if ((t_ret = __txn_close(dbenv)) != 0 && ret == 0)
+ ret = t_ret;
+ }
+
+ if (LOCKING_ON(dbenv)) {
+ if ((t_ret = __lock_close(dbenv)) != 0 && ret == 0)
+ ret = t_ret;
+ }
+ __lock_dbenv_close(dbenv);
+
+ if (LOGGING_ON(dbenv)) {
+ if ((t_ret = __log_close(dbenv)) != 0 && ret == 0)
+ ret = t_ret;
+ }
+
+ if (MPOOL_ON(dbenv)) {
+ if ((t_ret = __memp_close(dbenv)) != 0 && ret == 0)
+ ret = t_ret;
+ }
+
+ /* Discard DB list and its mutex. */
+ LIST_INIT(&dbenv->dblist);
+ if (dbenv->dblist_mutexp != NULL)
+ __db_mutex_free(dbenv, dbenv->reginfo, dbenv->dblist_mutexp);
+
+ /* Detach from the region. */
+ if (dbenv->reginfo != NULL) {
+ if ((t_ret = __db_e_detach(dbenv, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ /*
+ * !!!
+ * Don't free dbenv->reginfo or set the reference to NULL,
+ * that was done by __db_e_detach().
+ */
+ }
+
+ /* Clean up the structure. */
+ dbenv->db_panic = 0;
+
+ if (dbenv->db_home != NULL) {
+ __os_freestr(dbenv->db_home);
+ dbenv->db_home = NULL;
+ }
+ if (dbenv->db_log_dir != NULL) {
+ __os_freestr(dbenv->db_log_dir);
+ dbenv->db_log_dir = NULL;
+ }
+ if (dbenv->db_tmp_dir != NULL) {
+ __os_freestr(dbenv->db_tmp_dir);
+ dbenv->db_tmp_dir = NULL;
+ }
+ if (dbenv->db_data_dir != NULL) {
+ for (p = dbenv->db_data_dir; *p != NULL; ++p)
+ __os_freestr(*p);
+ __os_free(dbenv->db_data_dir,
+ dbenv->data_cnt * sizeof(char **));
+ dbenv->db_data_dir = NULL;
+ }
+ dbenv->data_cnt = dbenv->data_next = 0;
+
+ dbenv->db_mode = 0;
+
+ if (dbenv->lockfhp != NULL) {
+ __os_free(dbenv->lockfhp, sizeof(*dbenv->lockfhp));
+ dbenv->lockfhp = NULL;
+ }
+
+ if (dbenv->dtab != NULL) {
+ __os_free(dbenv->dtab,
+ dbenv->dtab_size * sizeof(dbenv->dtab[0]));
+ dbenv->dtab = NULL;
+ dbenv->dtab_size = 0;
+ }
+
+ dbenv->mp_mmapsize = 0;
+ dbenv->links.tqe_next = NULL;
+ dbenv->links.tqe_prev = NULL;
+ dbenv->xa_rmid = 0;
+ dbenv->xa_txn = 0;
+
+ F_CLR(dbenv, ~(DB_ENV_STANDALONE | DB_ENV_USER_ALLOC));
+
+ return (ret);
+}
+
+#define DB_ADDSTR(add) { \
+ if ((add) != NULL) { \
+ /* If leading slash, start over. */ \
+ if (__os_abspath(add)) { \
+ p = str; \
+ slash = 0; \
+ } \
+ /* Append to the current string. */ \
+ len = strlen(add); \
+ if (slash) \
+ *p++ = PATH_SEPARATOR[0]; \
+ memcpy(p, add, len); \
+ p += len; \
+ slash = strchr(PATH_SEPARATOR, p[-1]) == NULL; \
+ } \
+}
+
+/*
+ * __db_appname --
+ * Given an optional DB environment, directory and file name and type
+ * of call, build a path based on the DBENV->open rules, and return
+ * it in allocated space.
+ *
+ * PUBLIC: int __db_appname __P((DB_ENV *, APPNAME,
+ * PUBLIC: const char *, const char *, u_int32_t, DB_FH *, char **));
+ */
+int
+__db_appname(dbenv, appname, dir, file, tmp_oflags, fhp, namep)
+ DB_ENV *dbenv;
+ APPNAME appname;
+ const char *dir, *file;
+ u_int32_t tmp_oflags;
+ DB_FH *fhp;
+ char **namep;
+{
+ DB_ENV etmp;
+ size_t len, str_len;
+ int data_entry, ret, slash, tmp_create, tmp_free;
+ const char *a, *b, *c;
+ char *p, *str;
+
+ a = b = c = NULL;
+ data_entry = -1;
+ tmp_create = tmp_free = 0;
+
+ /*
+ * We don't return a name when creating temporary files, just a
+ * file handle. Default to an error now.
+ */
+ if (fhp != NULL)
+ F_CLR(fhp, DB_FH_VALID);
+ if (namep != NULL)
+ *namep = NULL;
+
+ /*
+ * Absolute path names are never modified. If the file is an absolute
+ * path, we're done. If the directory is, simply append the file and
+ * return.
+ */
+ if (file != NULL && __os_abspath(file))
+ return (__os_strdup(dbenv, file, namep));
+ if (dir != NULL && __os_abspath(dir)) {
+ a = dir;
+ goto done;
+ }
+
+ /*
+ * DB_ENV DIR APPNAME RESULT
+ * -------------------------------------------
+ * null null none <tmp>/file
+ * null set none DIR/file
+ * set null none DB_HOME/file
+ * set set none DB_HOME/DIR/file
+ *
+ * DB_ENV FILE APPNAME RESULT
+ * -------------------------------------------
+ * null null DB_APP_DATA <tmp>/<create>
+ * null set DB_APP_DATA ./file
+ * set null DB_APP_DATA <tmp>/<create>
+ * set set DB_APP_DATA DB_HOME/DB_DATA_DIR/file
+ *
+ * DB_ENV DIR APPNAME RESULT
+ * -------------------------------------------
+ * null null DB_APP_LOG <tmp>/file
+ * null set DB_APP_LOG DIR/file
+ * set null DB_APP_LOG DB_HOME/DB_LOG_DIR/file
+ * set set DB_APP_LOG DB_HOME/DB_LOG_DIR/DIR/file
+ *
+ * DB_ENV APPNAME RESULT
+ * -------------------------------------------
+ * null DB_APP_TMP* <tmp>/<create>
+ * set DB_APP_TMP* DB_HOME/DB_TMP_DIR/<create>
+ */
+retry: switch (appname) {
+ case DB_APP_NONE:
+ if (dbenv == NULL || !F_ISSET(dbenv, DB_ENV_OPEN_CALLED)) {
+ if (dir == NULL)
+ goto tmp;
+ a = dir;
+ } else {
+ a = dbenv->db_home;
+ b = dir;
+ }
+ break;
+ case DB_APP_DATA:
+ if (dir != NULL) {
+ __db_err(dbenv,
+ "DB_APP_DATA: illegal directory specification");
+ return (EINVAL);
+ }
+
+ if (file == NULL) {
+ tmp_create = 1;
+ goto tmp;
+ }
+ if (dbenv != NULL && F_ISSET(dbenv, DB_ENV_OPEN_CALLED)) {
+ a = dbenv->db_home;
+ if (dbenv->db_data_dir != NULL &&
+ (b = dbenv->db_data_dir[++data_entry]) == NULL) {
+ data_entry = -1;
+ b = dbenv->db_data_dir[0];
+ }
+ }
+ break;
+ case DB_APP_LOG:
+ if (dbenv == NULL || !F_ISSET(dbenv, DB_ENV_OPEN_CALLED)) {
+ if (dir == NULL)
+ goto tmp;
+ a = dir;
+ } else {
+ a = dbenv->db_home;
+ b = dbenv->db_log_dir;
+ c = dir;
+ }
+ break;
+ case DB_APP_TMP:
+ if (dir != NULL || file != NULL) {
+ __db_err(dbenv,
+ "DB_APP_TMP: illegal directory or file specification");
+ return (EINVAL);
+ }
+
+ tmp_create = 1;
+ if (dbenv == NULL || !F_ISSET(dbenv, DB_ENV_OPEN_CALLED))
+ goto tmp;
+ else {
+ a = dbenv->db_home;
+ b = dbenv->db_tmp_dir;
+ }
+ break;
+ }
+
+ /* Reference a file from the appropriate temporary directory. */
+ if (0) {
+tmp: if (dbenv == NULL || !F_ISSET(dbenv, DB_ENV_OPEN_CALLED)) {
+ memset(&etmp, 0, sizeof(etmp));
+ if ((ret = __os_tmpdir(&etmp, DB_USE_ENVIRON)) != 0)
+ return (ret);
+ tmp_free = 1;
+ a = etmp.db_tmp_dir;
+ } else
+ a = dbenv->db_tmp_dir;
+ }
+
+done: len =
+ (a == NULL ? 0 : strlen(a) + 1) +
+ (b == NULL ? 0 : strlen(b) + 1) +
+ (c == NULL ? 0 : strlen(c) + 1) +
+ (file == NULL ? 0 : strlen(file) + 1);
+
+ /*
+ * Allocate space to hold the current path information, as well as any
+ * temporary space that we're going to need to create a temporary file
+ * name.
+ */
+#define DB_TRAIL "BDBXXXXXX"
+ str_len = len + sizeof(DB_TRAIL) + 10;
+ if ((ret = __os_malloc(dbenv, str_len, NULL, &str)) != 0) {
+ if (tmp_free)
+ __os_freestr(etmp.db_tmp_dir);
+ return (ret);
+ }
+
+ slash = 0;
+ p = str;
+ DB_ADDSTR(a);
+ DB_ADDSTR(b);
+ DB_ADDSTR(file);
+ *p = '\0';
+
+ /* Discard any space allocated to find the temp directory. */
+ if (tmp_free) {
+ __os_freestr(etmp.db_tmp_dir);
+ tmp_free = 0;
+ }
+
+ /*
+ * If we're opening a data file, see if it exists. If it does,
+ * return it, otherwise, try and find another one to open.
+ */
+ if (data_entry != -1 && __os_exists(str, NULL) != 0) {
+ __os_free(str, str_len);
+ a = b = c = NULL;
+ goto retry;
+ }
+
+ /* Create the file if so requested. */
+ if (tmp_create &&
+ (ret = __db_tmp_open(dbenv, tmp_oflags, str, fhp)) != 0) {
+ __os_free(str, str_len);
+ return (ret);
+ }
+
+ if (namep == NULL)
+ __os_free(str, str_len);
+ else
+ *namep = str;
+ return (0);
+}
+
+/*
+ * __db_home --
+ * Find the database home.
+ */
+static int
+__db_home(dbenv, db_home, flags)
+ DB_ENV *dbenv;
+ const char *db_home;
+ u_int32_t flags;
+{
+ const char *p;
+
+ /*
+ * Use db_home by default, this allows utilities to reasonably
+ * override the environment either explicitly or by using a -h
+ * option. Otherwise, use the environment if it's permitted
+ * and initialized.
+ */
+ if ((p = db_home) == NULL &&
+ (LF_ISSET(DB_USE_ENVIRON) ||
+ (LF_ISSET(DB_USE_ENVIRON_ROOT) && __os_isroot())) &&
+ (p = getenv("DB_HOME")) != NULL && p[0] == '\0') {
+ __db_err(dbenv, "illegal DB_HOME environment variable");
+ return (EINVAL);
+ }
+
+ return (p == NULL ? 0 : __os_strdup(dbenv, p, &dbenv->db_home));
+}
+
+/*
+ * __db_parse --
+ * Parse a single NAME VALUE pair.
+ */
+static int
+__db_parse(dbenv, s)
+ DB_ENV *dbenv;
+ char *s;
+{
+ u_long v1, v2, v3;
+ u_int32_t flags;
+ char *name, *p, *value, v4;
+
+ /*
+ * !!!
+ * The value of 40 is hard-coded into format arguments to sscanf
+ * below, it can't be changed here without changing it there, too.
+ */
+ char arg[40];
+
+ /*
+ * Name/value pairs are parsed as two white-space separated strings.
+ * Leading and trailing white-space is trimmed from the value, but
+ * it may contain embedded white-space. Note: we use the isspace(3)
+ * macro because it's more portable, but that means that you can use
+ * characters like form-feed to separate the strings.
+ */
+ name = s;
+ for (p = name; *p != '\0' && !isspace((int)*p); ++p)
+ ;
+ if (*p == '\0' || p == name)
+ goto illegal;
+ *p = '\0';
+ for (++p; isspace((int)*p); ++p)
+ ;
+ if (*p == '\0')
+ goto illegal;
+ value = p;
+ for (++p; *p != '\0'; ++p)
+ ;
+ for (--p; isspace((int)*p); --p)
+ ;
+ ++p;
+ if (p == value) {
+illegal: __db_err(dbenv, "mis-formatted name-value pair: %s", s);
+ return (EINVAL);
+ }
+ *p = '\0';
+
+ if (!strcasecmp(name, "set_cachesize")) {
+ if (sscanf(value, "%lu %lu %lu %c", &v1, &v2, &v3, &v4) != 3)
+ goto badarg;
+ return (dbenv->set_cachesize(dbenv, v1, v2, v3));
+ }
+
+ if (!strcasecmp(name, "set_data_dir") ||
+ !strcasecmp(name, "db_data_dir")) /* Compatibility. */
+ return (dbenv->set_data_dir(dbenv, value));
+
+ if (!strcasecmp(name, "set_flags")) {
+ if (sscanf(value, "%40s %c", arg, &v4) != 1)
+ goto badarg;
+
+ if (!strcasecmp(value, "db_cdb_alldb"))
+ return (dbenv->set_flags(dbenv, DB_CDB_ALLDB, 1));
+ if (!strcasecmp(value, "db_nommap"))
+ return (dbenv->set_flags(dbenv, DB_NOMMAP, 1));
+ if (!strcasecmp(value, "db_txn_nosync"))
+ return (dbenv->set_flags(dbenv, DB_TXN_NOSYNC, 1));
+ goto badarg;
+ }
+
+ if (!strcasecmp(name, "set_lg_bsize")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+ goto badarg;
+ return (dbenv->set_lg_bsize(dbenv, v1));
+ }
+
+ if (!strcasecmp(name, "set_lg_max")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+ goto badarg;
+ return (dbenv->set_lg_max(dbenv, v1));
+ }
+
+ if (!strcasecmp(name, "set_lg_dir") ||
+ !strcasecmp(name, "db_log_dir")) /* Compatibility. */
+ return (dbenv->set_lg_dir(dbenv, value));
+
+ if (!strcasecmp(name, "set_lk_detect")) {
+ if (sscanf(value, "%40s %c", arg, &v4) != 1)
+ goto badarg;
+ if (!strcasecmp(value, "db_lock_default"))
+ flags = DB_LOCK_DEFAULT;
+ else if (!strcasecmp(value, "db_lock_oldest"))
+ flags = DB_LOCK_OLDEST;
+ else if (!strcasecmp(value, "db_lock_random"))
+ flags = DB_LOCK_RANDOM;
+ else if (!strcasecmp(value, "db_lock_youngest"))
+ flags = DB_LOCK_YOUNGEST;
+ else
+ goto badarg;
+ return (dbenv->set_lk_detect(dbenv, flags));
+ }
+
+ if (!strcasecmp(name, "set_lk_max")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+ goto badarg;
+ return (dbenv->set_lk_max(dbenv, v1));
+ }
+
+ if (!strcasecmp(name, "set_lk_max_locks")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+ goto badarg;
+ return (dbenv->set_lk_max_locks(dbenv, v1));
+ }
+
+ if (!strcasecmp(name, "set_lk_max_lockers")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+ goto badarg;
+ return (dbenv->set_lk_max_lockers(dbenv, v1));
+ }
+
+ if (!strcasecmp(name, "set_lk_max_objects")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+ goto badarg;
+ return (dbenv->set_lk_max_objects(dbenv, v1));
+ }
+
+ if (!strcasecmp(name, "set_mp_mmapsize")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+ goto badarg;
+ return (dbenv->set_mp_mmapsize(dbenv, v1));
+ }
+
+ if (!strcasecmp(name, "set_region_init")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1 || v1 != 1)
+ goto badarg;
+ return (db_env_set_region_init(v1));
+ }
+
+ if (!strcasecmp(name, "set_shm_key")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+ goto badarg;
+ return (dbenv->set_shm_key(dbenv, (long)v1));
+ }
+
+ if (!strcasecmp(name, "set_tas_spins")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+ goto badarg;
+ return (db_env_set_tas_spins(v1));
+ }
+
+ if (!strcasecmp(name, "set_tmp_dir") ||
+ !strcasecmp(name, "db_tmp_dir")) /* Compatibility.*/
+ return (dbenv->set_tmp_dir(dbenv, value));
+
+ if (!strcasecmp(name, "set_tx_max")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+ goto badarg;
+ return (dbenv->set_tx_max(dbenv, v1));
+ }
+
+ if (!strcasecmp(name, "set_verbose")) {
+ if (sscanf(value, "%40s %c", arg, &v4) != 1)
+ goto badarg;
+
+ if (!strcasecmp(value, "db_verb_chkpoint"))
+ flags = DB_VERB_CHKPOINT;
+ else if (!strcasecmp(value, "db_verb_deadlock"))
+ flags = DB_VERB_DEADLOCK;
+ else if (!strcasecmp(value, "db_verb_recovery"))
+ flags = DB_VERB_RECOVERY;
+ else if (!strcasecmp(value, "db_verb_waitsfor"))
+ flags = DB_VERB_WAITSFOR;
+ else
+ goto badarg;
+ return (dbenv->set_verbose(dbenv, flags, 1));
+ }
+
+ __db_err(dbenv, "unrecognized name-value pair: %s", s);
+ return (EINVAL);
+
+badarg: __db_err(dbenv, "incorrect arguments for name-value pair: %s", s);
+ return (EINVAL);
+}
+
+/*
+ * __db_tmp_open --
+ * Create a temporary file.
+ */
+static int
+__db_tmp_open(dbenv, tmp_oflags, path, fhp)
+ DB_ENV *dbenv;
+ u_int32_t tmp_oflags;
+ char *path;
+ DB_FH *fhp;
+{
+ u_long pid;
+ int mode, isdir, ret;
+ const char *p;
+ char *trv;
+
+ /*
+ * Check the target directory; if you have six X's and it doesn't
+ * exist, this runs for a *very* long time.
+ */
+ if ((ret = __os_exists(path, &isdir)) != 0) {
+ __db_err(dbenv, "%s: %s", path, db_strerror(ret));
+ return (ret);
+ }
+ if (!isdir) {
+ __db_err(dbenv, "%s: %s", path, db_strerror(EINVAL));
+ return (EINVAL);
+ }
+
+ /* Build the path. */
+ for (trv = path; *trv != '\0'; ++trv)
+ ;
+ *trv = PATH_SEPARATOR[0];
+ for (p = DB_TRAIL; (*++trv = *p) != '\0'; ++p)
+ ;
+
+ /*
+ * Replace the X's with the process ID. Pid should be a pid_t,
+ * but we use unsigned long for portability.
+ */
+ for (pid = getpid(); *--trv == 'X'; pid /= 10)
+ switch (pid % 10) {
+ case 0: *trv = '0'; break;
+ case 1: *trv = '1'; break;
+ case 2: *trv = '2'; break;
+ case 3: *trv = '3'; break;
+ case 4: *trv = '4'; break;
+ case 5: *trv = '5'; break;
+ case 6: *trv = '6'; break;
+ case 7: *trv = '7'; break;
+ case 8: *trv = '8'; break;
+ case 9: *trv = '9'; break;
+ }
+ ++trv;
+
+ /* Set up open flags and mode. */
+ mode = __db_omode("rw----");
+
+ /* Loop, trying to open a file. */
+ for (;;) {
+ if ((ret = __os_open(dbenv, path,
+ tmp_oflags | DB_OSO_CREATE | DB_OSO_EXCL, mode, fhp)) == 0)
+ return (0);
+
+ /*
+ * !!!:
+ * If we don't get an EEXIST error, then there's something
+ * seriously wrong. Unfortunately, if the implementation
+ * doesn't return EEXIST for O_CREAT and O_EXCL regardless
+ * of other possible errors, we've lost.
+ */
+ if (ret != EEXIST) {
+ __db_err(dbenv,
+ "tmp_open: %s: %s", path, db_strerror(ret));
+ return (ret);
+ }
+
+ /*
+ * Tricky little algorithm for backward compatibility.
+ * Assumes sequential ordering of lower-case characters.
+ */
+ for (;;) {
+ if (*trv == '\0')
+ return (EINVAL);
+ if (*trv == 'z')
+ *trv++ = 'a';
+ else {
+ if (isdigit((int)*trv))
+ *trv = 'a';
+ else
+ ++*trv;
+ break;
+ }
+ }
+ }
+ /* NOTREACHED */
+}
diff --git a/bdb/env/env_recover.c b/bdb/env/env_recover.c
new file mode 100644
index 00000000000..bc5e4760584
--- /dev/null
+++ b/bdb/env/env_recover.c
@@ -0,0 +1,449 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2000\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id: env_recover.c,v 11.33 2001/01/04 22:38:42 ubell Exp $";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_dispatch.h"
+#include "db_am.h"
+#include "log.h"
+#include "txn.h"
+
+static float __lsn_diff __P((DB_LSN *, DB_LSN *, DB_LSN *, u_int32_t, int));
+static int __log_earliest __P((DB_ENV *, int32_t *, DB_LSN *));
+
+/*
+ * __db_apprec --
+ * Perform recovery.
+ *
+ * PUBLIC: int __db_apprec __P((DB_ENV *, u_int32_t));
+ */
+int
+__db_apprec(dbenv, flags)
+ DB_ENV *dbenv;
+ u_int32_t flags;
+{
+ DBT data;
+ DB_LSN ckp_lsn, first_lsn, last_lsn, lowlsn, lsn, open_lsn;
+ DB_TXNREGION *region;
+ __txn_ckp_args *ckp_args;
+ time_t now, tlow;
+ float nfiles;
+ int32_t low;
+ int is_thread, progress, ret;
+ void *txninfo;
+
+ COMPQUIET(nfiles, (float)0);
+
+ /*
+ * Save the state of the thread flag -- we don't need it on at the
+ * moment because we're single-threaded until recovery is complete.
+ */
+ is_thread = F_ISSET(dbenv, DB_ENV_THREAD) ? 1 : 0;
+ F_CLR(dbenv, DB_ENV_THREAD);
+ F_SET((DB_LOG *)dbenv->lg_handle, DBLOG_RECOVER);
+
+ /*
+ * If the user is specifying recover to a particular point in time,
+ * verify that the logs present are sufficient to do this.
+ */
+ ZERO_LSN(lowlsn);
+ if (dbenv->tx_timestamp != 0) {
+ if ((ret = __log_earliest(dbenv, &low, &lowlsn)) != 0)
+ return (ret);
+ if ((int32_t)dbenv->tx_timestamp < low) {
+ char t1[30], t2[30];
+
+ strcpy(t1, ctime(&dbenv->tx_timestamp));
+ tlow = (time_t)low;
+ strcpy(t2, ctime(&tlow));
+ __db_err(dbenv,
+ "Invalid recovery timestamp %.*s; earliest time is %.*s",
+ 24, t1, 24, t2);
+ return (EINVAL);
+ }
+ }
+
+ /* Initialize the transaction list. */
+ if ((ret = __db_txnlist_init(dbenv, &txninfo)) != 0)
+ return (ret);
+
+ /*
+ * Recovery is done in three passes:
+ * Pass #0:
+ * We need to find the position from which we will open files
+ * We need to open files beginning with the last to next
+ * checkpoint because we might have crashed after writing the
+ * last checkpoint record, but before having written out all
+ * the open file information.
+ *
+ * Pass #1:
+ * Read forward through the log from the second to last checkpoint
+ * opening and closing files so that at the end of the log we have
+ * the "current" set of files open.
+ *
+ * Pass #2:
+ * Read backward through the log undoing any uncompleted TXNs.
+ * There are three cases:
+ * 1. If doing catastrophic recovery, we read to the beginning
+ * of the log
+ * 2. If we are doing normal reovery, then we have to roll
+ * back to the most recent checkpoint that occurs
+ * before the most recent checkpoint LSN, which is
+ * returned by __log_findckp().
+ * 3. If we are recovering to a point in time, then we have
+ * to roll back to the checkpoint whose ckp_lsn is earlier
+ * than the specified time. __log_earliest will figure
+ * this out for us.
+ * In case 2, "uncompleted TXNs" include all those who commited
+ * after the user's specified timestamp.
+ *
+ * Pass #3:
+ * Read forward through the log from the LSN found in pass #2,
+ * redoing any committed TXNs (which commited after any user-
+ * specified rollback point). During this pass, checkpoint
+ * file information is ignored, and file openings and closings
+ * are redone.
+ */
+
+ /*
+ * Find out the last lsn, so that we can estimate how far along we
+ * are in recovery. This will help us determine how much log there
+ * is between the first LSN that we're going to be working with and
+ * the last one. We assume that each of the three phases takes the
+ * same amount of time (a false assumption) and then use the %-age
+ * of the amount of log traversed to figure out how much of the
+ * pass we've accomplished.
+ */
+ memset(&data, 0, sizeof(data));
+ if (dbenv->db_feedback != NULL &&
+ (ret = log_get(dbenv, &last_lsn, &data, DB_LAST)) != 0)
+ goto out;
+
+ /*
+ * Pass #0
+ * Find the second to last checkpoint in the log. This is the point
+ * from which we want to begin pass #1 (the open files pass).
+ */
+ ckp_args = NULL;
+
+ if (LF_ISSET(DB_RECOVER_FATAL)) {
+ if ((ret = log_get(dbenv, &ckp_lsn, &data, DB_FIRST)) != 0) {
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+ else
+ __db_err(dbenv, "First log record not found");
+ goto out;
+ }
+ open_lsn = ckp_lsn;
+ } else if ((ret =
+ log_get(dbenv, &ckp_lsn, &data, DB_CHECKPOINT)) != 0) {
+ /*
+ * If we don't find a checkpoint, start from the beginning.
+ * If that fails, we're done. Note, we do not require that
+ * there be log records if we're performing recovery.
+ */
+first: if ((ret = log_get(dbenv, &ckp_lsn, &data, DB_FIRST)) != 0) {
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+ else
+ __db_err(dbenv, "First log record not found");
+ goto out;
+ }
+ open_lsn = ckp_lsn;
+ } else if ((ret = __txn_ckp_read(dbenv, data.data, &ckp_args)) != 0) {
+ __db_err(dbenv, "Invalid checkpoint record at [%ld][%ld]\n",
+ (u_long)ckp_lsn.file, (u_long)ckp_lsn.offset);
+ goto out;
+ } else if (IS_ZERO_LSN(ckp_args->last_ckp) ||
+ (ret = log_get(dbenv, &ckp_args->last_ckp, &data, DB_SET)) != 0)
+ goto first;
+ else
+ open_lsn = ckp_args->last_ckp;
+
+ if (dbenv->db_feedback != NULL) {
+ if (last_lsn.file == open_lsn.file)
+ nfiles = (float)(last_lsn.offset - open_lsn.offset) /
+ dbenv->lg_max;
+ else
+ nfiles = (float)(last_lsn.file - open_lsn.file) +
+ (float)(dbenv->lg_max - open_lsn.offset +
+ last_lsn.offset) / dbenv->lg_max;
+ /* We are going to divide by nfiles; make sure it isn't 0. */
+ if (nfiles == 0)
+ nfiles = (float)0.001;
+ }
+
+ /*
+ * Pass #1
+ * Now, ckp_lsn is either the lsn of the last checkpoint
+ * or the lsn of the first record in the log. Open_lsn is
+ * the second to last checkpoint or the beinning of the log;
+ * begin the open files pass from that lsn, and proceed to
+ * the end of the log.
+ */
+ lsn = open_lsn;
+ for (;;) {
+ if (dbenv->db_feedback != NULL) {
+ progress = (int)(33 * (__lsn_diff(&open_lsn,
+ &last_lsn, &lsn, dbenv->lg_max, 1) / nfiles));
+ dbenv->db_feedback(dbenv, DB_RECOVER, progress);
+ }
+ ret = __db_dispatch(dbenv,
+ &data, &lsn, DB_TXN_OPENFILES, txninfo);
+ if (ret != 0 && ret != DB_TXN_CKP)
+ goto msgerr;
+ if ((ret = log_get(dbenv, &lsn, &data, DB_NEXT)) != 0) {
+ if (ret == DB_NOTFOUND)
+ break;
+ goto out;
+ }
+ }
+
+ /*
+ * Pass #2.
+ *
+ * Before we can begin pass #2, backward roll phase, we determine how
+ * far back in the log to recover. If we are doing catastrophic
+ * recovery, then we go as far back as we have files. If we are
+ * doing normal recovery, we go as back to the most recent checkpoint
+ * that occurs before the most recent checkpoint LSN. If we are
+ * recovering to a point in time, then rollback to the checkpoint whose
+ * ckp_lsn precedes the first log record (and then roll forward to
+ * the appropriate timestamp in Pass #3).
+ */
+ if (LF_ISSET(DB_RECOVER_FATAL)) {
+ ZERO_LSN(first_lsn);
+ } else if (dbenv->tx_timestamp != 0)
+ first_lsn = lowlsn;
+ else
+ if ((ret = __log_findckp(dbenv, &first_lsn)) == DB_NOTFOUND) {
+ /*
+ * We don't require that log files exist if recovery
+ * was specified.
+ */
+ ret = 0;
+ goto out;
+ }
+
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_RECOVERY))
+ __db_err(dbenv, "Recovery starting from [%lu][%lu]",
+ (u_long)first_lsn.file, (u_long)first_lsn.offset);
+
+ for (ret = log_get(dbenv, &lsn, &data, DB_LAST);
+ ret == 0 && log_compare(&lsn, &first_lsn) > 0;
+ ret = log_get(dbenv, &lsn, &data, DB_PREV)) {
+ if (dbenv->db_feedback != NULL) {
+ progress = 34 + (int)(33 * (__lsn_diff(&open_lsn,
+ &last_lsn, &lsn, dbenv->lg_max, 0) / nfiles));
+ dbenv->db_feedback(dbenv, DB_RECOVER, progress);
+ }
+ ret = __db_dispatch(dbenv,
+ &data, &lsn, DB_TXN_BACKWARD_ROLL, txninfo);
+ if (ret != 0) {
+ if (ret != DB_TXN_CKP)
+ goto msgerr;
+ else
+ ret = 0;
+ }
+ }
+ if (ret != 0 && ret != DB_NOTFOUND)
+ goto out;
+
+ /*
+ * Pass #3.
+ */
+ for (ret = log_get(dbenv, &lsn, &data, DB_NEXT);
+ ret == 0; ret = log_get(dbenv, &lsn, &data, DB_NEXT)) {
+ if (dbenv->db_feedback != NULL) {
+ progress = 67 + (int)(33 * (__lsn_diff(&open_lsn,
+ &last_lsn, &lsn, dbenv->lg_max, 1) / nfiles));
+ dbenv->db_feedback(dbenv, DB_RECOVER, progress);
+ }
+ ret = __db_dispatch(dbenv,
+ &data, &lsn, DB_TXN_FORWARD_ROLL, txninfo);
+ if (ret != 0) {
+ if (ret != DB_TXN_CKP)
+ goto msgerr;
+ else
+ ret = 0;
+ }
+ }
+ if (ret != DB_NOTFOUND)
+ goto out;
+
+ /*
+ * Process any pages that were on the limbo list
+ * and move them to the free list. Do this
+ * before checkpointing the database.
+ */
+ if ((ret = __db_do_the_limbo(dbenv, txninfo)) != 0)
+ goto out;
+
+ /*
+ * Now set the last checkpoint lsn and the current time,
+ * take a checkpoint, and reset the txnid.
+ */
+ (void)time(&now);
+ region = ((DB_TXNMGR *)dbenv->tx_handle)->reginfo.primary;
+ region->last_txnid = ((DB_TXNHEAD *)txninfo)->maxid;
+ region->last_ckp = ckp_lsn;
+ region->time_ckp = (u_int32_t)now;
+
+ /*
+ * Take two checkpoints so that we don't re-recover any of the
+ * work we've already done.
+ */
+ if ((ret = txn_checkpoint(dbenv, 0, 0, DB_FORCE)) != 0)
+ goto out;
+
+ /* Now close all the db files that are open. */
+ __log_close_files(dbenv);
+
+ if ((ret = txn_checkpoint(dbenv, 0, 0, DB_FORCE)) != 0)
+ goto out;
+ region->last_txnid = TXN_MINIMUM;
+
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_RECOVERY)) {
+ __db_err(dbenv, "Recovery complete at %.24s", ctime(&now));
+ __db_err(dbenv, "%s %lx %s [%lu][%lu]",
+ "Maximum transaction ID",
+ ((DB_TXNHEAD *)txninfo)->maxid,
+ "Recovery checkpoint",
+ (u_long)region->last_ckp.file,
+ (u_long)region->last_ckp.offset);
+ }
+
+ if (0) {
+msgerr: __db_err(dbenv, "Recovery function for LSN %lu %lu failed",
+ (u_long)lsn.file, (u_long)lsn.offset);
+ }
+
+out: if (is_thread)
+ F_SET(dbenv, DB_ENV_THREAD);
+ __db_txnlist_end(dbenv, txninfo);
+ if (ckp_args != NULL)
+ __os_free(ckp_args, sizeof(*ckp_args));
+ F_CLR((DB_LOG *)dbenv->lg_handle, DBLOG_RECOVER);
+
+ dbenv->tx_timestamp = 0;
+ return (ret);
+}
+
+/*
+ * Figure out how many logfiles we have processed. If we are moving
+ * forward (is_forward != 0), then we're computing current - low. If
+ * we are moving backward, we are computing high - current. max is
+ * the number of bytes per logfile.
+ */
+static float
+__lsn_diff(low, high, current, max, is_forward)
+ DB_LSN *low, *high, *current;
+ u_int32_t max;
+ int is_forward;
+{
+ float nf;
+
+ /*
+ * There are three cases in each direction. If you are in the
+ * same file, then all you need worry about is the difference in
+ * offsets. If you are in different files, then either your offsets
+ * put you either more or less than the integral difference in the
+ * number of files -- we need to handle both of these.
+ */
+ if (is_forward) {
+ if (current->file == low->file)
+ nf = (float)(current->offset - low->offset) / max;
+ else if (current->offset < low->offset)
+ nf = (float)(current->file - low->file - 1) +
+ (float)(max - low->offset + current->offset) / max;
+ else
+ nf = (float)(current->file - low->file) +
+ (float)(current->offset - low->offset) / max;
+ } else {
+ if (current->file == high->file)
+ nf = (float)(high->offset - current->offset) / max;
+ else if (current->offset > high->offset)
+ nf = (float)(high->file - current->file - 1) +
+ (float)(max - current->offset + high->offset) / max;
+ else
+ nf = (float)(high->file - current->file) +
+ (float)(high->offset - current->offset) / max;
+ }
+ return (nf);
+}
+
+/*
+ * __log_earliest --
+ *
+ * Return the earliest recovery point for the log files present. The
+ * earliest recovery time is the time stamp of the first checkpoint record
+ * whose checkpoint LSN is greater than the first LSN we process.
+ */
+static int
+__log_earliest(dbenv, lowtime, lowlsn)
+ DB_ENV *dbenv;
+ int32_t *lowtime;
+ DB_LSN *lowlsn;
+{
+ DB_LSN first_lsn, lsn;
+ DBT data;
+ __txn_ckp_args *ckpargs;
+ u_int32_t rectype;
+ int cmp, ret;
+
+ memset(&data, 0, sizeof(data));
+ /*
+ * Read forward through the log looking for the first checkpoint
+ * record whose ckp_lsn is greater than first_lsn.
+ */
+
+ for (ret = log_get(dbenv, &first_lsn, &data, DB_FIRST);
+ ret == 0; ret = log_get(dbenv, &lsn, &data, DB_NEXT)) {
+ if (ret != 0)
+ break;
+ memcpy(&rectype, data.data, sizeof(rectype));
+ if (rectype != DB_txn_ckp)
+ continue;
+ if ((ret = __txn_ckp_read(dbenv, data.data, &ckpargs)) == 0) {
+ cmp = log_compare(&ckpargs->ckp_lsn, &first_lsn);
+ *lowlsn = ckpargs->ckp_lsn;
+ *lowtime = ckpargs->timestamp;
+
+ __os_free(ckpargs, 0);
+ if (cmp >= 0)
+ break;
+ }
+ }
+
+ return (ret);
+}
diff --git a/bdb/env/env_region.c b/bdb/env/env_region.c
new file mode 100644
index 00000000000..f3df4bac184
--- /dev/null
+++ b/bdb/env/env_region.c
@@ -0,0 +1,1205 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: env_region.c,v 11.28 2000/12/12 17:36:10 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "db_shash.h"
+#include "lock.h"
+#include "lock_ext.h"
+#include "log.h"
+#include "log_ext.h"
+#include "mp.h"
+#include "mp_ext.h"
+#include "txn.h"
+#include "txn_ext.h"
+
+static int __db_des_destroy __P((DB_ENV *, REGION *));
+static int __db_des_get __P((DB_ENV *, REGINFO *, REGINFO *, REGION **));
+static int __db_e_remfile __P((DB_ENV *));
+static int __db_faultmem __P((void *, size_t, int));
+static void __db_region_destroy __P((DB_ENV *, REGINFO *));
+
+/*
+ * __db_e_attach
+ * Join/create the environment
+ *
+ * PUBLIC: int __db_e_attach __P((DB_ENV *, u_int32_t *));
+ */
+int
+__db_e_attach(dbenv, init_flagsp)
+ DB_ENV *dbenv;
+ u_int32_t *init_flagsp;
+{
+ REGENV *renv;
+ REGENV_REF ref;
+ REGINFO *infop;
+ REGION *rp, tregion;
+ size_t size;
+ size_t nrw;
+ u_int32_t mbytes, bytes;
+ int retry_cnt, ret, segid;
+ char buf[sizeof(DB_REGION_FMT) + 20];
+
+#if !defined(HAVE_MUTEX_THREADS)
+ /*
+ * !!!
+ * If we don't have spinlocks, we need a file descriptor for fcntl(2)
+ * locking. We use the file handle from the REGENV file for this
+ * purpose.
+ *
+ * Since we may be using shared memory regions, e.g., shmget(2), and
+ * not a mapped-in regular file, the backing file may be only a few
+ * bytes in length. So, this depends on the ability to call fcntl to
+ * lock file offsets much larger than the actual physical file. I
+ * think that's safe -- besides, very few systems actually need this
+ * kind of support, SunOS is the only one still in wide use of which
+ * I'm aware.
+ *
+ * The error case is if an application lacks spinlocks and wants to be
+ * threaded. That doesn't work because fcntl may lock the underlying
+ * process, including all its threads.
+ */
+ if (F_ISSET(dbenv, DB_ENV_THREAD)) {
+ __db_err(dbenv,
+"architecture lacks fast mutexes: applications cannot be threaded");
+ return (EINVAL);
+ }
+#endif
+
+ /* Initialization */
+ retry_cnt = 0;
+
+ /* Repeated initialization. */
+loop: renv = NULL;
+
+ /* Set up the DB_ENV's REG_INFO structure. */
+ if ((ret = __os_calloc(dbenv, 1, sizeof(REGINFO), &infop)) != 0)
+ return (ret);
+ infop->type = REGION_TYPE_ENV;
+ infop->id = REGION_ID_ENV;
+ infop->mode = dbenv->db_mode;
+ infop->flags = REGION_JOIN_OK;
+ if (F_ISSET(dbenv, DB_ENV_CREATE))
+ F_SET(infop, REGION_CREATE_OK);
+
+ /*
+ * We have to single-thread the creation of the REGENV region. Once
+ * it exists, we can do locking using locks in the region, but until
+ * then we have to be the only player in the game.
+ *
+ * If this is a private environment, we are only called once and there
+ * are no possible race conditions.
+ *
+ * If this is a public environment, we use the filesystem to ensure
+ * the creation of the environment file is single-threaded.
+ */
+ if (F_ISSET(dbenv, DB_ENV_PRIVATE))
+ goto creation;
+
+ /* Build the region name. */
+ (void)snprintf(buf, sizeof(buf), "%s", DB_REGION_ENV);
+ if ((ret = __db_appname(dbenv,
+ DB_APP_NONE, NULL, buf, 0, NULL, &infop->name)) != 0)
+ goto err;
+
+ /*
+ * Try to create the file, if we have the authority. We have to ensure
+ * that multiple threads/processes attempting to simultaneously create
+ * the file are properly ordered. Open using the O_CREAT and O_EXCL
+ * flags so that multiple attempts to create the region will return
+ * failure in all but one. POSIX 1003.1 requires that EEXIST be the
+ * errno return value -- I sure hope they're right.
+ */
+ if (F_ISSET(dbenv, DB_ENV_CREATE)) {
+ if ((ret = __os_open(dbenv,
+ infop->name, DB_OSO_REGION | DB_OSO_CREATE | DB_OSO_EXCL,
+ dbenv->db_mode, dbenv->lockfhp)) == 0)
+ goto creation;
+ if (ret != EEXIST) {
+ __db_err(dbenv,
+ "%s: %s", infop->name, db_strerror(ret));
+ goto err;
+ }
+ }
+
+ /*
+ * If we couldn't create the file, try and open it. (If that fails,
+ * we're done.)
+ */
+ if ((ret = __os_open(dbenv, infop->name,
+ DB_OSO_REGION, dbenv->db_mode, dbenv->lockfhp)) != 0)
+ goto err;
+
+ /*
+ * !!!
+ * The region may be in system memory not backed by the filesystem
+ * (more specifically, not backed by this file), and we're joining
+ * it. In that case, the process that created it will have written
+ * out a REGENV_REF structure as its only contents. We read that
+ * structure before we do anything further, e.g., we can't just map
+ * that file in and then figure out what's going on.
+ *
+ * All of this noise is because some systems don't have a coherent VM
+ * and buffer cache, and what's worse, when you mix operations on the
+ * VM and buffer cache, half the time you hang the system.
+ *
+ * If the file is the size of an REGENV_REF structure, then we know
+ * the real region is in some other memory. (The only way you get a
+ * file that size is to deliberately write it, as it's smaller than
+ * any possible disk sector created by writing a file or mapping the
+ * file into memory.) In which case, retrieve the structure from the
+ * file and use it to acquire the referenced memory.
+ *
+ * If the structure is larger than a REGENV_REF structure, then this
+ * file is backing the shared memory region, and we just map it into
+ * memory.
+ *
+ * And yes, this makes me want to take somebody and kill them. (I
+ * digress -- but you have no freakin' idea. This is unbelievably
+ * stupid and gross, and I've probably spent six months of my life,
+ * now, trying to make different versions of it work.)
+ */
+ if ((ret = __os_ioinfo(dbenv, infop->name,
+ dbenv->lockfhp, &mbytes, &bytes, NULL)) != 0) {
+ __db_err(dbenv, "%s: %s", infop->name, db_strerror(ret));
+ goto err;
+ }
+
+ /*
+ * !!!
+ * A size_t is OK -- regions get mapped into memory, and so can't
+ * be larger than a size_t.
+ */
+ size = mbytes * MEGABYTE + bytes;
+
+ /*
+ * If the size is less than the size of a REGENV_REF structure, the
+ * region (or, possibly, the REGENV_REF structure) has not yet been
+ * completely written. Wait awhile and try again.
+ *
+ * Otherwise, if the size is the size of a REGENV_REF structure,
+ * read it into memory and use it as a reference to the real region.
+ */
+ if (size <= sizeof(ref)) {
+ if (size != sizeof(ref))
+ goto retry;
+
+ if ((ret = __os_read(dbenv, dbenv->lockfhp, &ref,
+ sizeof(ref), &nrw)) != 0 || nrw < (size_t)sizeof(ref)) {
+ if (ret == 0)
+ ret = EIO;
+ __db_err(dbenv,
+ "%s: unable to read system-memory information from: %s",
+ infop->name, db_strerror(ret));
+ goto err;
+ }
+ size = ref.size;
+ segid = ref.segid;
+
+ F_SET(dbenv, DB_ENV_SYSTEM_MEM);
+ } else if (F_ISSET(dbenv, DB_ENV_SYSTEM_MEM)) {
+ ret = EINVAL;
+ __db_err(dbenv,
+ "%s: existing environment not created in system memory: %s",
+ infop->name, db_strerror(ret));
+ goto err;
+ } else
+ segid = INVALID_REGION_SEGID;
+
+ /*
+ * If not doing thread locking, we need to save the file handle for
+ * fcntl(2) locking. Otherwise, discard the handle, we no longer
+ * need it, and the less contact between the buffer cache and the VM,
+ * the better.
+ */
+#ifdef HAVE_MUTEX_THREADS
+ __os_closehandle(dbenv->lockfhp);
+#endif
+
+ /* Call the region join routine to acquire the region. */
+ memset(&tregion, 0, sizeof(tregion));
+ tregion.size = size;
+ tregion.segid = segid;
+ if ((ret = __os_r_attach(dbenv, infop, &tregion)) != 0)
+ goto err;
+
+ /*
+ * The environment's REGENV structure has to live at offset 0 instead
+ * of the usual shalloc information. Set the primary reference and
+ * correct the "addr" value to reference the shalloc region. Note,
+ * this means that all of our offsets (R_ADDR/R_OFFSET) get shifted
+ * as well, but that should be fine.
+ */
+ infop->primary = R_ADDR(infop, 0);
+ infop->addr = (u_int8_t *)infop->addr + sizeof(REGENV);
+
+ /*
+ * Check if the environment has had a catastrophic failure.
+ *
+ * Check the magic number to ensure the region is initialized. If the
+ * magic number isn't set, the lock may not have been initialized, and
+ * an attempt to use it could lead to random behavior.
+ *
+ * The panic and magic values aren't protected by any lock, so we never
+ * use them in any check that's more complex than set/not-set.
+ *
+ * !!!
+ * I'd rather play permissions games using the underlying file, but I
+ * can't because Windows/NT filesystems won't open files mode 0.
+ */
+ renv = infop->primary;
+ if (renv->panic) {
+ ret = __db_panic_msg(dbenv);
+ goto err;
+ }
+ if (renv->magic != DB_REGION_MAGIC)
+ goto retry;
+
+ /* Make sure the region matches our build. */
+ if (renv->majver != DB_VERSION_MAJOR ||
+ renv->minver != DB_VERSION_MINOR ||
+ renv->patch != DB_VERSION_PATCH) {
+ __db_err(dbenv,
+ "Program version %d.%d.%d doesn't match environment version %d.%d.%d",
+ DB_VERSION_MAJOR, DB_VERSION_MINOR, DB_VERSION_PATCH,
+ renv->majver, renv->minver, renv->patch);
+#ifndef DIAGNOSTIC
+ ret = EINVAL;
+ goto err;
+#endif
+ }
+
+ /* Lock the environment. */
+ MUTEX_LOCK(dbenv, &renv->mutex, dbenv->lockfhp);
+
+ /*
+ * Finally! We own the environment now. Repeat the panic check, it's
+ * possible that it was set while we waited for the lock.
+ */
+ if (renv->panic) {
+ ret = __db_panic_msg(dbenv);
+ goto err_unlock;
+ }
+
+ /*
+ * Get a reference to the underlying REGION information for this
+ * environment.
+ */
+ if ((ret = __db_des_get(dbenv, infop, infop, &rp)) != 0 || rp == NULL) {
+ MUTEX_UNLOCK(dbenv, &renv->mutex);
+ goto find_err;
+ }
+ infop->rp = rp;
+
+ /*
+ * There's still a possibility for inconsistent data. When we acquired
+ * the size of the region and attached to it, it might have still been
+ * growing as part of its creation. We can detect this by checking the
+ * size we originally found against the region's current size. (The
+ * region's current size has to be final, the creator finished growing
+ * it before releasing the environment for us to lock.)
+ */
+ if (rp->size != size) {
+err_unlock: MUTEX_UNLOCK(dbenv, &renv->mutex);
+ goto retry;
+ }
+
+ /* Increment the reference count. */
+ ++renv->refcnt;
+
+ /*
+ * If our caller wants them, return the flags this environment was
+ * initialized with.
+ */
+ if (init_flagsp != NULL)
+ *init_flagsp = renv->init_flags;
+
+ /* Discard our lock. */
+ MUTEX_UNLOCK(dbenv, &renv->mutex);
+
+ /*
+ * Fault the pages into memory. Note, do this AFTER releasing the
+ * lock, because we're only reading the pages, not writing them.
+ */
+ (void)__db_faultmem(infop->primary, rp->size, 0);
+
+ /* Everything looks good, we're done. */
+ dbenv->reginfo = infop;
+ return (0);
+
+creation:
+ /* Create the environment region. */
+ F_SET(infop, REGION_CREATE);
+
+ /*
+ * Allocate room for 50 REGION structures plus overhead (we're going
+ * to use this space for last-ditch allocation requests), although we
+ * should never need anything close to that.
+ */
+ memset(&tregion, 0, sizeof(tregion));
+ tregion.size = 50 * sizeof(REGION) + 50 * sizeof(MUTEX) + 2048;
+ tregion.segid = INVALID_REGION_SEGID;
+ if ((ret = __os_r_attach(dbenv, infop, &tregion)) != 0)
+ goto err;
+
+ /*
+ * Fault the pages into memory. Note, do this BEFORE we initialize
+ * anything, because we're writing the pages, not just reading them.
+ */
+ (void)__db_faultmem(infop->addr, tregion.size, 1);
+
+ /*
+ * The first object in the region is the REGENV structure. This is
+ * different from the other regions, and, from everything else in
+ * this region, where all objects are allocated from the pool, i.e.,
+ * there aren't any fixed locations. The remaining space is made
+ * available for later allocation.
+ *
+ * The allocation space must be size_t aligned, because that's what
+ * the initialization routine is going to store there. To make sure
+ * that happens, the REGENV structure was padded with a final size_t.
+ * No other region needs to worry about it because all of them treat
+ * the entire region as allocation space.
+ *
+ * Set the primary reference and correct the "addr" value to reference
+ * the shalloc region. Note, this requires that we "uncorrect" it at
+ * region detach, and that all of our offsets (R_ADDR/R_OFFSET) will be
+ * shifted as well, but that should be fine.
+ */
+ infop->primary = R_ADDR(infop, 0);
+ infop->addr = (u_int8_t *)infop->addr + sizeof(REGENV);
+ __db_shalloc_init(infop->addr, tregion.size - sizeof(REGENV));
+
+ /*
+ * Initialize the rest of the REGENV structure, except for the magic
+ * number which validates the file/environment.
+ */
+ renv = infop->primary;
+ renv->panic = 0;
+ db_version(&renv->majver, &renv->minver, &renv->patch);
+ SH_LIST_INIT(&renv->regionq);
+ renv->refcnt = 1;
+
+ /*
+ * Initialize init_flags to store the flags that any other environment
+ * handle that uses DB_JOINENV to join this environment will need.
+ */
+ renv->init_flags = (init_flagsp == NULL) ? 0 : *init_flagsp;
+
+ /*
+ * Lock the environment.
+ *
+ * Check the lock call return. This is the first lock we initialize
+ * and acquire, and we have to know if it fails. (It CAN fail, e.g.,
+ * SunOS, when using fcntl(2) for locking and using an in-memory
+ * filesystem as the database home. But you knew that, I'm sure -- it
+ * probably wasn't even worth mentioning.)
+ */
+ if ((ret =
+ __db_mutex_init(dbenv, &renv->mutex, DB_FCNTL_OFF_GEN, 0)) != 0) {
+ __db_err(dbenv, "%s: unable to initialize environment lock: %s",
+ infop->name, db_strerror(ret));
+ goto err;
+ }
+
+ if (!F_ISSET(&renv->mutex, MUTEX_IGNORE) &&
+ (ret = __db_mutex_lock(dbenv, &renv->mutex, dbenv->lockfhp)) != 0) {
+ __db_err(dbenv, "%s: unable to acquire environment lock: %s",
+ infop->name, db_strerror(ret));
+ goto err;
+ }
+
+ /*
+ * Get the underlying REGION structure for this environment. Note,
+ * we created the underlying OS region before we acquired the REGION
+ * structure, which is backwards from the normal procedure. Update
+ * the REGION structure.
+ */
+ if ((ret = __db_des_get(dbenv, infop, infop, &rp)) != 0) {
+find_err: __db_err(dbenv,
+ "%s: unable to find environment", infop->name);
+ if (ret == 0)
+ ret = EINVAL;
+ goto err;
+ }
+ infop->rp = rp;
+ rp->size = tregion.size;
+ rp->segid = tregion.segid;
+
+ /*
+ * !!!
+ * If we create an environment where regions are public and in system
+ * memory, we have to inform processes joining the environment how to
+ * attach to the shared memory segment. So, we write the shared memory
+ * identifier into the file, to be read by those other processes.
+ *
+ * XXX
+ * This is really OS-layer information, but I can't see any easy way
+ * to move it down there without passing down information that it has
+ * no right to know, e.g., that this is the one-and-only REGENV region
+ * and not some other random region.
+ */
+ if (tregion.segid != INVALID_REGION_SEGID) {
+ ref.size = tregion.size;
+ ref.segid = tregion.segid;
+ if ((ret = __os_write(dbenv, dbenv->lockfhp,
+ &ref, sizeof(ref), &nrw)) != 0 || nrw != sizeof(ref)) {
+ __db_err(dbenv,
+ "%s: unable to write out public environment ID: %s",
+ infop->name, db_strerror(ret));
+ goto err;
+ }
+ }
+
+ /*
+ * If not doing thread locking, we need to save the file handle for
+ * fcntl(2) locking. Otherwise, discard the handle, we no longer
+ * need it, and the less contact between the buffer cache and the VM,
+ * the better.
+ */
+#if defined(HAVE_MUTEX_THREADS)
+ if (F_ISSET(dbenv->lockfhp, DB_FH_VALID))
+ __os_closehandle(dbenv->lockfhp);
+#endif
+
+ /* Validate the file. */
+ renv->magic = DB_REGION_MAGIC;
+
+ /* Discard our lock. */
+ MUTEX_UNLOCK(dbenv, &renv->mutex);
+
+ /* Everything looks good, we're done. */
+ dbenv->reginfo = infop;
+ return (0);
+
+err:
+retry: /* Close any open file handle. */
+ if (F_ISSET(dbenv->lockfhp, DB_FH_VALID))
+ (void)__os_closehandle(dbenv->lockfhp);
+
+ /*
+ * If we joined or created the region, detach from it. If we created
+ * it, destroy it. Note, there's a path in the above code where we're
+ * using a temporary REGION structure because we haven't yet allocated
+ * the real one. In that case the region address (addr) will be filled
+ * in, but the REGION pointer (rp) won't. Fix it.
+ */
+ if (infop->addr != NULL) {
+ if (infop->rp == NULL)
+ infop->rp = &tregion;
+
+ /* Reset the addr value that we "corrected" above. */
+ infop->addr = infop->primary;
+ (void)__os_r_detach(dbenv,
+ infop, F_ISSET(infop, REGION_CREATE));
+ }
+
+ /* Free the allocated name and/or REGINFO structure. */
+ if (infop->name != NULL)
+ __os_freestr(infop->name);
+ __os_free(infop, sizeof(REGINFO));
+
+ /* If we had a temporary error, wait awhile and try again. */
+ if (ret == 0) {
+ if (++retry_cnt > 3) {
+ __db_err(dbenv, "unable to join the environment");
+ ret = EAGAIN;
+ } else {
+ __os_sleep(dbenv, retry_cnt * 3, 0);
+ goto loop;
+ }
+ }
+
+ return (ret);
+}
+
+/*
+ * __db_e_detach --
+ * Detach from the environment.
+ *
+ * PUBLIC: int __db_e_detach __P((DB_ENV *, int));
+ */
+int
+__db_e_detach(dbenv, destroy)
+ DB_ENV *dbenv;
+ int destroy;
+{
+ REGENV *renv;
+ REGINFO *infop;
+
+ infop = dbenv->reginfo;
+ renv = infop->primary;
+
+ /* Lock the environment. */
+ MUTEX_LOCK(dbenv, &renv->mutex, dbenv->lockfhp);
+
+ /* Decrement the reference count. */
+ if (renv->refcnt == 0) {
+ __db_err(dbenv,
+ "region %lu (environment): reference count went negative",
+ infop->rp->id);
+ } else
+ --renv->refcnt;
+
+ /* Release the lock. */
+ MUTEX_UNLOCK(dbenv, &renv->mutex);
+
+ /* Close the locking file handle. */
+ if (F_ISSET(dbenv->lockfhp, DB_FH_VALID))
+ (void)__os_closehandle(dbenv->lockfhp);
+
+ /* Reset the addr value that we "corrected" above. */
+ infop->addr = infop->primary;
+
+ /*
+ * If we are destroying the environment, we need to
+ * destroy any system resources backing the mutex.
+ * Do that now before we free the memory in __os_r_detach.
+ */
+ if (destroy)
+ __db_mutex_destroy(&renv->mutex);
+
+ /*
+ * Release the region, and kill our reference.
+ *
+ * We set the DBENV->reginfo field to NULL here and discard its memory.
+ * DBENV->remove calls __dbenv_remove to do the region remove, and
+ * __dbenv_remove attached and then detaches from the region. We don't
+ * want to return to DBENV->remove with a non-NULL DBENV->reginfo field
+ * because it will attempt to detach again as part of its cleanup.
+ */
+ (void)__os_r_detach(dbenv, infop, destroy);
+
+ if (infop->name != NULL)
+ __os_free(infop->name, 0);
+ __os_free(dbenv->reginfo, sizeof(REGINFO));
+ dbenv->reginfo = NULL;
+
+ return (0);
+}
+
+/*
+ * __db_e_remove --
+ * Discard an environment if it's not in use.
+ *
+ * PUBLIC: int __db_e_remove __P((DB_ENV *, int));
+ */
+int
+__db_e_remove(dbenv, force)
+ DB_ENV *dbenv;
+ int force;
+{
+ REGENV *renv;
+ REGINFO *infop, reginfo;
+ REGION *rp;
+ int ret;
+
+ /*
+ * This routine has to walk a nasty line between not looking into
+ * the environment (which may be corrupted after an app or system
+ * crash), and removing everything that needs removing. What we
+ * do is:
+ * 1. Connect to the environment (so it better be OK).
+ * 2. If the environment is in use (reference count is non-zero),
+ * return EBUSY.
+ * 3. Overwrite the magic number so that any threads of control
+ * attempting to connect will backoff and retry.
+ * 4. Walk the list of regions. Connect to each region and then
+ * disconnect with the destroy flag set. This shouldn't cause
+ * any problems, even if the region is corrupted, because we
+ * should never be looking inside the region.
+ * 5. Walk the list of files in the directory, unlinking any
+ * files that match a region name. Unlink the environment
+ * file last.
+ *
+ * If the force flag is set, we do not acquire any locks during this
+ * process.
+ */
+ if (force)
+ dbenv->db_mutexlocks = 0;
+
+ /* Join the environment. */
+ if ((ret = __db_e_attach(dbenv, NULL)) != 0) {
+ /*
+ * If we can't join it, we assume that's because it doesn't
+ * exist. It would be better to know why we failed, but it
+ * probably isn't important.
+ */
+ ret = 0;
+ if (force)
+ goto remfiles;
+ goto err;
+ }
+
+ infop = dbenv->reginfo;
+ renv = infop->primary;
+
+ /* Lock the environment. */
+ MUTEX_LOCK(dbenv, &renv->mutex, dbenv->lockfhp);
+
+ /* If it's in use, we're done. */
+ if (renv->refcnt == 1 || force) {
+ /*
+ * Set the panic flag and overwrite the magic number.
+ *
+ * !!!
+ * From this point on, there's no going back, we pretty
+ * much ignore errors, and just whack on whatever we can.
+ */
+ renv->panic = 1;
+ renv->magic = 0;
+
+ /*
+ * Unlock the environment. We should no longer need the lock
+ * because we've poisoned the pool, but we can't continue to
+ * hold it either, because other routines may want it.
+ */
+ MUTEX_UNLOCK(dbenv, &renv->mutex);
+
+ /*
+ * Attach to each sub-region and destroy it.
+ *
+ * !!!
+ * The REGION_CREATE_OK flag is set for Windows/95 -- regions
+ * are zero'd out when the last reference to the region goes
+ * away, in which case the underlying OS region code requires
+ * callers be prepared to create the region in order to join it.
+ */
+ memset(&reginfo, 0, sizeof(reginfo));
+restart: for (rp = SH_LIST_FIRST(&renv->regionq, __db_region);
+ rp != NULL; rp = SH_LIST_NEXT(rp, q, __db_region)) {
+ if (rp->type == REGION_TYPE_ENV)
+ continue;
+
+ reginfo.id = rp->id;
+ reginfo.flags = REGION_CREATE_OK;
+ if ((ret = __db_r_attach(dbenv, &reginfo, 0)) != 0) {
+ __db_err(dbenv,
+ "region %s attach: %s", db_strerror(ret));
+ continue;
+ }
+ R_UNLOCK(dbenv, &reginfo);
+ if ((ret = __db_r_detach(dbenv, &reginfo, 1)) != 0) {
+ __db_err(dbenv,
+ "region detach: %s", db_strerror(ret));
+ continue;
+ }
+ /*
+ * If we have an error, we continue so we eventually
+ * reach the end of the list. If we succeed, restart
+ * the list because it was relinked when we destroyed
+ * the entry.
+ */
+ goto restart;
+ }
+
+ /* Destroy the environment's region. */
+ (void)__db_e_detach(dbenv, 1);
+
+ /* Discard the physical files. */
+remfiles: (void)__db_e_remfile(dbenv);
+ } else {
+ /* Unlock the environment. */
+ MUTEX_UNLOCK(dbenv, &renv->mutex);
+
+ /* Discard the environment. */
+ (void)__db_e_detach(dbenv, 0);
+
+ ret = EBUSY;
+ }
+
+err:
+ return (ret);
+}
+
+/*
+ * __db_e_remfile --
+ * Discard any region files in the filesystem.
+ */
+static int
+__db_e_remfile(dbenv)
+ DB_ENV *dbenv;
+{
+ static char *old_region_names[] = {
+ "__db_lock.share",
+ "__db_log.share",
+ "__db_mpool.share",
+ "__db_txn.share",
+ NULL,
+ };
+ int cnt, fcnt, lastrm, ret;
+ u_int8_t saved_byte;
+ const char *dir;
+ char *p, **names, *path, buf[sizeof(DB_REGION_FMT) + 20];
+
+ /* Get the full path of a file in the environment. */
+ (void)snprintf(buf, sizeof(buf), "%s", DB_REGION_ENV);
+ if ((ret =
+ __db_appname(dbenv, DB_APP_NONE, NULL, buf, 0, NULL, &path)) != 0)
+ return (ret);
+
+ /* Get the parent directory for the environment. */
+ if ((p = __db_rpath(path)) == NULL) {
+ p = path;
+ saved_byte = *p;
+
+ dir = PATH_DOT;
+ } else {
+ saved_byte = *p;
+ *p = '\0';
+
+ dir = path;
+ }
+
+ /* Get the list of file names. */
+ ret = __os_dirlist(dbenv, dir, &names, &fcnt);
+
+ /* Restore the path, and free it. */
+ *p = saved_byte;
+ __os_freestr(path);
+
+ if (ret != 0) {
+ __db_err(dbenv, "%s: %s", dir, db_strerror(ret));
+ return (ret);
+ }
+
+ /*
+ * Search for valid region names, and remove them. We remove the
+ * environment region last, because it's the key to this whole mess.
+ */
+ for (lastrm = -1, cnt = fcnt; --cnt >= 0;) {
+ if (strlen(names[cnt]) != DB_REGION_NAME_LENGTH ||
+ memcmp(names[cnt], DB_REGION_FMT, DB_REGION_NAME_NUM) != 0)
+ continue;
+ if (strcmp(names[cnt], DB_REGION_ENV) == 0) {
+ lastrm = cnt;
+ continue;
+ }
+ for (p = names[cnt] + DB_REGION_NAME_NUM;
+ *p != '\0' && isdigit((int)*p); ++p)
+ ;
+ if (*p != '\0')
+ continue;
+
+ if (__db_appname(dbenv,
+ DB_APP_NONE, NULL, names[cnt], 0, NULL, &path) == 0) {
+ (void)__os_unlink(dbenv, path);
+ __os_freestr(path);
+ }
+ }
+
+ if (lastrm != -1)
+ if (__db_appname(dbenv,
+ DB_APP_NONE, NULL, names[lastrm], 0, NULL, &path) == 0) {
+ (void)__os_unlink(dbenv, path);
+ __os_freestr(path);
+ }
+ __os_dirfree(names, fcnt);
+
+ /*
+ * !!!
+ * Backward compatibility -- remove region files from releases
+ * before 2.8.XX.
+ */
+ for (names = (char **)old_region_names; *names != NULL; ++names)
+ if (__db_appname(dbenv,
+ DB_APP_NONE, NULL, *names, 0, NULL, &path) == 0) {
+ (void)__os_unlink(dbenv, path);
+ __os_freestr(path);
+ }
+
+ return (0);
+}
+
+/*
+ * __db_e_stat
+ * Statistics for the environment.
+ *
+ * PUBLIC: int __db_e_stat __P((DB_ENV *, REGENV *, REGION *, int *));
+ */
+int
+__db_e_stat(dbenv, arg_renv, arg_regions, arg_regions_cnt)
+ DB_ENV *dbenv;
+ REGENV *arg_renv;
+ REGION *arg_regions;
+ int *arg_regions_cnt;
+{
+ REGENV *renv;
+ REGINFO *infop;
+ REGION *rp;
+ int n;
+
+ infop = dbenv->reginfo;
+ renv = infop->primary;
+ rp = infop->rp;
+
+ /* Lock the environment. */
+ MUTEX_LOCK(dbenv, &rp->mutex, dbenv->lockfhp);
+
+ *arg_renv = *renv;
+
+ for (n = 0, rp = SH_LIST_FIRST(&renv->regionq, __db_region);
+ n < *arg_regions_cnt && rp != NULL;
+ ++n, rp = SH_LIST_NEXT(rp, q, __db_region))
+ arg_regions[n] = *rp;
+
+ /* Release the lock. */
+ rp = infop->rp;
+ MUTEX_UNLOCK(dbenv, &rp->mutex);
+
+ *arg_regions_cnt = n == 0 ? n : n - 1;
+
+ return (0);
+}
+
+/*
+ * __db_r_attach
+ * Join/create a region.
+ *
+ * PUBLIC: int __db_r_attach __P((DB_ENV *, REGINFO *, size_t));
+ */
+int
+__db_r_attach(dbenv, infop, size)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ size_t size;
+{
+ REGENV *renv;
+ REGION *rp;
+ int ret;
+ char buf[sizeof(DB_REGION_FMT) + 20];
+
+ renv = ((REGINFO *)dbenv->reginfo)->primary;
+ F_CLR(infop, REGION_CREATE);
+
+ /* Lock the environment. */
+ MUTEX_LOCK(dbenv, &renv->mutex, dbenv->lockfhp);
+
+ /* Find or create a REGION structure for this region. */
+ if ((ret = __db_des_get(dbenv, dbenv->reginfo, infop, &rp)) != 0) {
+ MUTEX_UNLOCK(dbenv, &renv->mutex);
+ return (ret);
+ }
+ infop->rp = rp;
+ infop->type = rp->type;
+ infop->id = rp->id;
+
+ /* If we're creating the region, set the desired size. */
+ if (F_ISSET(infop, REGION_CREATE))
+ rp->size = size;
+
+ /* Join/create the underlying region. */
+ (void)snprintf(buf, sizeof(buf), DB_REGION_FMT, infop->id);
+ if ((ret = __db_appname(dbenv,
+ DB_APP_NONE, NULL, buf, 0, NULL, &infop->name)) != 0)
+ goto err;
+ if ((ret = __os_r_attach(dbenv, infop, rp)) != 0)
+ goto err;
+
+ /*
+ * Fault the pages into memory. Note, do this BEFORE we initialize
+ * anything because we're writing pages in created regions, not just
+ * reading them.
+ */
+ (void)__db_faultmem(infop->addr,
+ rp->size, F_ISSET(infop, REGION_CREATE));
+
+ /*
+ * !!!
+ * The underlying layer may have just decided that we are going
+ * to create the region. There are various system issues that
+ * can result in a useless region that requires re-initialization.
+ *
+ * If we created the region, initialize it for allocation.
+ */
+ if (F_ISSET(infop, REGION_CREATE)) {
+ ((REGION *)(infop->addr))->magic = DB_REGION_MAGIC;
+
+ (void)__db_shalloc_init(infop->addr, rp->size);
+ }
+
+ /*
+ * If the underlying REGION isn't the environment, acquire a lock
+ * for it and release our lock on the environment.
+ */
+ if (infop->type != REGION_TYPE_ENV) {
+ MUTEX_LOCK(dbenv, &rp->mutex, dbenv->lockfhp);
+ MUTEX_UNLOCK(dbenv, &renv->mutex);
+ }
+
+ return (0);
+
+ /* Discard the underlying region. */
+err: if (infop->addr != NULL)
+ (void)__os_r_detach(dbenv,
+ infop, F_ISSET(infop, REGION_CREATE));
+ infop->rp = NULL;
+ infop->id = INVALID_REGION_ID;
+
+ /* Discard the REGION structure if we created it. */
+ if (F_ISSET(infop, REGION_CREATE))
+ (void)__db_des_destroy(dbenv, rp);
+
+ /* Release the environment lock. */
+ MUTEX_UNLOCK(dbenv, &renv->mutex);
+
+ return (ret);
+}
+
+/*
+ * __db_r_detach --
+ * Detach from a region.
+ *
+ * PUBLIC: int __db_r_detach __P((DB_ENV *, REGINFO *, int));
+ */
+int
+__db_r_detach(dbenv, infop, destroy)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ int destroy;
+{
+ REGENV *renv;
+ REGION *rp;
+ int ret, t_ret;
+
+ renv = ((REGINFO *)dbenv->reginfo)->primary;
+ rp = infop->rp;
+
+ /* Lock the environment. */
+ MUTEX_LOCK(dbenv, &renv->mutex, dbenv->lockfhp);
+
+ /* Acquire the lock for the REGION. */
+ MUTEX_LOCK(dbenv, &rp->mutex, dbenv->lockfhp);
+
+ /*
+ * We need to call destroy on per-subsystem info before
+ * we free the memory associated with the region.
+ */
+ if (destroy)
+ __db_region_destroy(dbenv, infop);
+
+ /* Detach from the underlying OS region. */
+ ret = __os_r_detach(dbenv, infop, destroy);
+
+ /* Release the REGION lock. */
+ MUTEX_UNLOCK(dbenv, &rp->mutex);
+
+ /* If we destroyed the region, discard the REGION structure. */
+ if (destroy &&
+ ((t_ret = __db_des_destroy(dbenv, rp)) != 0) && ret == 0)
+ ret = t_ret;
+
+ /* Release the environment lock. */
+ MUTEX_UNLOCK(dbenv, &renv->mutex);
+
+ /* Destroy the structure. */
+ if (infop->name != NULL)
+ __os_freestr(infop->name);
+
+ return (ret);
+}
+
+/*
+ * __db_des_get --
+ * Return a reference to the shared information for a REGION,
+ * optionally creating a new entry.
+ */
+static int
+__db_des_get(dbenv, env_infop, infop, rpp)
+ DB_ENV *dbenv;
+ REGINFO *env_infop, *infop;
+ REGION **rpp;
+{
+ REGENV *renv;
+ REGION *rp, *first_type;
+ u_int32_t maxid;
+ int ret;
+
+ /*
+ * !!!
+ * Called with the environment already locked.
+ */
+ *rpp = NULL;
+ renv = env_infop->primary;
+
+ /*
+ * If the caller wants to join a region, walk through the existing
+ * regions looking for a matching ID (if ID specified) or matching
+ * type (if type specified). If we return based on a matching type
+ * return the "primary" region, that is, the first region that was
+ * created of this type.
+ *
+ * Track the maximum region ID so we can allocate a new region,
+ * note that we have to start at 1 because the primary environment
+ * uses ID == 1.
+ */
+ maxid = REGION_ID_ENV;
+ for (first_type = NULL,
+ rp = SH_LIST_FIRST(&renv->regionq, __db_region);
+ rp != NULL; rp = SH_LIST_NEXT(rp, q, __db_region)) {
+ if (infop->id != INVALID_REGION_ID) {
+ if (infop->id == rp->id)
+ break;
+ continue;
+ }
+ if (infop->type == rp->type &&
+ F_ISSET(infop, REGION_JOIN_OK) &&
+ (first_type == NULL || first_type->id > rp->id))
+ first_type = rp;
+
+ if (rp->id > maxid)
+ maxid = rp->id;
+ }
+ if (rp == NULL)
+ rp = first_type;
+
+ /*
+ * If we didn't find a region and we can't create the region, fail.
+ * The caller generates any error message.
+ */
+ if (rp == NULL && !F_ISSET(infop, REGION_CREATE_OK))
+ return (ENOENT);
+
+ /*
+ * If we didn't find a region, create and initialize a REGION structure
+ * for the caller. If id was set, use that value, otherwise we use the
+ * next available ID.
+ */
+ if (rp == NULL) {
+ if ((ret = __db_shalloc(env_infop->addr,
+ sizeof(REGION), MUTEX_ALIGN, &rp)) != 0)
+ return (ret);
+
+ /* Initialize the region. */
+ memset(rp, 0, sizeof(*rp));
+ if ((ret = __db_mutex_init(dbenv, &rp->mutex,
+ R_OFFSET(env_infop, &rp->mutex) + DB_FCNTL_OFF_GEN,
+ 0)) != 0) {
+ __db_shalloc_free(env_infop->addr, rp);
+ return (ret);
+ }
+ rp->segid = INVALID_REGION_SEGID;
+
+ /*
+ * Set the type and ID; if no region ID was specified,
+ * allocate one.
+ */
+ rp->type = infop->type;
+ rp->id = infop->id == INVALID_REGION_ID ? maxid + 1 : infop->id;
+
+ SH_LIST_INSERT_HEAD(&renv->regionq, rp, q, __db_region);
+ F_SET(infop, REGION_CREATE);
+ }
+
+ *rpp = rp;
+ return (0);
+}
+
+/*
+ * __db_des_destroy --
+ * Destroy a reference to a REGION.
+ */
+static int
+__db_des_destroy(dbenv, rp)
+ DB_ENV *dbenv;
+ REGION *rp;
+{
+ REGINFO *infop;
+
+ /*
+ * !!!
+ * Called with the environment already locked.
+ */
+ infop = dbenv->reginfo;
+
+ SH_LIST_REMOVE(rp, q, __db_region);
+ __db_mutex_destroy(&rp->mutex);
+ __db_shalloc_free(infop->addr, rp);
+
+ return (0);
+}
+
+/*
+ * __db_faultmem --
+ * Fault the region into memory.
+ */
+static int
+__db_faultmem(addr, size, created)
+ void *addr;
+ size_t size;
+ int created;
+{
+ int ret;
+ u_int8_t *p, *t;
+
+ /*
+ * It's sometimes significantly faster to page-fault in all of the
+ * region's pages before we run the application, as we see nasty
+ * side-effects when we page-fault while holding various locks, i.e.,
+ * the lock takes a long time to acquire because of the underlying
+ * page fault, and the other threads convoy behind the lock holder.
+ *
+ * If we created the region, we write a non-zero value so that the
+ * system can't cheat. If we're just joining the region, we can
+ * only read the value and try to confuse the compiler sufficiently
+ * that it doesn't figure out that we're never really using it.
+ */
+ ret = 0;
+ if (DB_GLOBAL(db_region_init)) {
+ if (created)
+ for (p = addr, t = (u_int8_t *)addr + size;
+ p < t; p += OS_VMPAGESIZE)
+ p[0] = 0xdb;
+ else
+ for (p = addr, t = (u_int8_t *)addr + size;
+ p < t; p += OS_VMPAGESIZE)
+ ret |= p[0];
+ }
+
+ return (ret);
+}
+
+/*
+ * __db_region_destroy --
+ * Destroy per-subsystem region information.
+ * Called with the region already locked.
+ */
+static void
+__db_region_destroy(dbenv, infop)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+{
+ switch (infop->type) {
+ case REGION_TYPE_LOCK:
+ __lock_region_destroy(dbenv, infop);
+ break;
+ case REGION_TYPE_MPOOL:
+ __mpool_region_destroy(dbenv, infop);
+ break;
+ case REGION_TYPE_ENV:
+ case REGION_TYPE_LOG:
+ case REGION_TYPE_MUTEX:
+ case REGION_TYPE_TXN:
+ break;
+ default:
+ DB_ASSERT(0);
+ break;
+ }
+}
diff --git a/bdb/examples_c/README b/bdb/examples_c/README
new file mode 100644
index 00000000000..f59ae00a608
--- /dev/null
+++ b/bdb/examples_c/README
@@ -0,0 +1,23 @@
+# $Id: README,v 11.3 2000/12/13 06:32:29 krinsky Exp $
+
+ex_access.c Using just the DB access methods.
+
+ex_btrec.c Using the BTREE access method with record numbers.
+
+ex_env.c Setting up the DB environment.
+
+ex_lock.c Locking.
+
+ex_mpool.c Shared memory buffer pools.
+
+ex_tpcb.c TPC/B.
+ Ex_tpcb sets up a framework in which to run a TPC/B test.
+ Database initialization (the -i flag) and running the
+ benchmark (-n flag) must take place separately (i.e.,
+ first create the database, then run 1 or more copies of
+ the benchmark). Furthermore, when running more than one
+ TPCB process, it is necessary to run the deadlock detector
+ (db_deadlock), since it is possible for concurrent tpcb
+ processes to deadlock. For performance measurement, it
+ will also be beneficial to run the db_checkpoint process
+ as well.
diff --git a/bdb/examples_c/ex_access.c b/bdb/examples_c/ex_access.c
new file mode 100644
index 00000000000..3448daf43a3
--- /dev/null
+++ b/bdb/examples_c/ex_access.c
@@ -0,0 +1,171 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: ex_access.c,v 11.7 2000/05/22 15:17:03 sue Exp $
+ */
+
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include <db.h>
+
+#ifdef HAVE_VXWORKS
+#include "stdio.h"
+#define DATABASE "/vxtmp/vxtmp/access.db"
+#define ERROR_RETURN ERROR
+#else
+#define DATABASE "access.db"
+#define ERROR_RETURN 1
+int main __P((int, char *[]));
+void usage __P((char *));
+#endif
+
+int ex_access __P((void));
+
+#ifndef HAVE_VXWORKS
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ int ch;
+
+ while ((ch = getopt(argc, argv, "")) != EOF)
+ switch (ch) {
+ case '?':
+ default:
+ usage(argv[0]);
+ }
+ argc -= optind;
+ argv += optind;
+
+ return (ex_access());
+}
+
+void
+usage(progname)
+ char *progname;
+{
+ (void)fprintf(stderr, "usage: %s\n", progname);
+ exit(1);
+}
+#endif
+
+int
+ex_access()
+{
+ DB *dbp;
+ DBC *dbcp;
+ DBT key, data;
+ u_int32_t len;
+ int ret;
+ char *p, *t, buf[1024], rbuf[1024];
+ const char *progname = "ex_access"; /* Program name. */
+
+ /* Remove the previous database. */
+ (void)unlink(DATABASE);
+
+ /* Create and initialize database object, open the database. */
+ if ((ret = db_create(&dbp, NULL, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_create: %s\n", progname, db_strerror(ret));
+ return (ERROR_RETURN);
+ }
+ dbp->set_errfile(dbp, stderr);
+ dbp->set_errpfx(dbp, progname);
+ if ((ret = dbp->set_pagesize(dbp, 1024)) != 0) {
+ dbp->err(dbp, ret, "set_pagesize");
+ goto err1;
+ }
+ if ((ret = dbp->set_cachesize(dbp, 0, 32 * 1024, 0)) != 0) {
+ dbp->err(dbp, ret, "set_cachesize");
+ goto err1;
+ }
+ if ((ret =
+ dbp->open(dbp, DATABASE, NULL, DB_BTREE, DB_CREATE, 0664)) != 0) {
+ dbp->err(dbp, ret, "%s: open", DATABASE);
+ goto err1;
+ }
+
+ /*
+ * Insert records into the database, where the key is the user
+ * input and the data is the user input in reverse order.
+ */
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ for (;;) {
+ printf("input> ");
+ fflush(stdout);
+ if (fgets(buf, sizeof(buf), stdin) == NULL)
+ break;
+ if ((len = strlen(buf)) <= 1)
+ continue;
+ for (t = rbuf, p = buf + (len - 2); p >= buf;)
+ *t++ = *p--;
+ *t++ = '\0';
+
+ key.data = buf;
+ data.data = rbuf;
+ data.size = key.size = len - 1;
+
+ switch (ret =
+ dbp->put(dbp, NULL, &key, &data, DB_NOOVERWRITE)) {
+ case 0:
+ break;
+ default:
+ dbp->err(dbp, ret, "DB->put");
+ if (ret != DB_KEYEXIST)
+ goto err1;
+ break;
+ }
+ }
+ printf("\n");
+
+ /* Acquire a cursor for the database. */
+ if ((ret = dbp->cursor(dbp, NULL, &dbcp, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->cursor");
+ goto err1;
+ }
+
+ /* Initialize the key/data pair so the flags aren't set. */
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ /* Walk through the database and print out the key/data pairs. */
+ while ((ret = dbcp->c_get(dbcp, &key, &data, DB_NEXT)) == 0)
+ printf("%.*s : %.*s\n",
+ (int)key.size, (char *)key.data,
+ (int)data.size, (char *)data.data);
+ if (ret != DB_NOTFOUND) {
+ dbp->err(dbp, ret, "DBcursor->get");
+ goto err2;
+ }
+
+ /* Close everything down. */
+ if ((ret = dbcp->c_close(dbcp)) != 0) {
+ dbp->err(dbp, ret, "DBcursor->close");
+ goto err1;
+ }
+ if ((ret = dbp->close(dbp, 0)) != 0) {
+ fprintf(stderr,
+ "%s: DB->close: %s\n", progname, db_strerror(ret));
+ return (ERROR_RETURN);
+ }
+ return (0);
+
+err2: (void)dbcp->c_close(dbcp);
+err1: (void)dbp->close(dbp, 0);
+ return (ERROR_RETURN);
+}
diff --git a/bdb/examples_c/ex_btrec.c b/bdb/examples_c/ex_btrec.c
new file mode 100644
index 00000000000..b74f16b83e4
--- /dev/null
+++ b/bdb/examples_c/ex_btrec.c
@@ -0,0 +1,241 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: ex_btrec.c,v 11.8 2000/05/22 15:17:03 sue Exp $
+ */
+
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include <db.h>
+
+#ifdef HAVE_VXWORKS
+#define DATABASE "/vxtmp/vxtmp/access.db"
+#define WORDLIST "/vxtmp/vxtmp/wordlist"
+#define ERROR_RETURN ERROR
+#else
+#define DATABASE "access.db"
+#define WORDLIST "../test/wordlist"
+#define ERROR_RETURN 1
+int main __P((int, char *[]));
+void usage __P((char *));
+#endif
+
+int ex_btrec __P((void));
+void show __P((char *, DBT *, DBT *));
+
+#ifndef HAVE_VXWORKS
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ int ch;
+
+ while ((ch = getopt(argc, argv, "")) != EOF)
+ switch (ch) {
+ case '?':
+ default:
+ usage(argv[0]);
+ }
+ argc -= optind;
+ argv += optind;
+
+ return (ex_btrec());
+}
+
+void
+usage(progname)
+ char *progname;
+{
+ (void)fprintf(stderr, "usage: %s\n", progname);
+ exit(1);
+}
+#endif
+
+int
+ex_btrec()
+{
+ DB *dbp;
+ DBC *dbcp;
+ DBT key, data;
+ DB_BTREE_STAT *statp;
+ FILE *fp;
+ db_recno_t recno;
+ u_int32_t len;
+ int cnt, ret;
+ char *p, *t, buf[1024], rbuf[1024];
+ const char *progname = "ex_btrec"; /* Program name. */
+
+ /* Open the word database. */
+ if ((fp = fopen(WORDLIST, "r")) == NULL) {
+ fprintf(stderr, "%s: open %s: %s\n",
+ progname, WORDLIST, db_strerror(errno));
+ return (ERROR_RETURN);
+ }
+
+ /* Remove the previous database. */
+ (void)unlink(DATABASE);
+
+ /* Create and initialize database object, open the database. */
+ if ((ret = db_create(&dbp, NULL, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_create: %s\n", progname, db_strerror(ret));
+ return (ERROR_RETURN);
+ }
+ dbp->set_errfile(dbp, stderr);
+ dbp->set_errpfx(dbp, progname); /* 1K page sizes. */
+ if ((ret = dbp->set_pagesize(dbp, 1024)) != 0) {
+ dbp->err(dbp, ret, "set_pagesize");
+ return (ERROR_RETURN);
+ } /* Record numbers. */
+ if ((ret = dbp->set_flags(dbp, DB_RECNUM)) != 0) {
+ dbp->err(dbp, ret, "set_flags: DB_RECNUM");
+ return (ERROR_RETURN);
+ }
+ if ((ret =
+ dbp->open(dbp, DATABASE, NULL, DB_BTREE, DB_CREATE, 0664)) != 0) {
+ dbp->err(dbp, ret, "open: %s", DATABASE);
+ return (ERROR_RETURN);
+ }
+
+ /*
+ * Insert records into the database, where the key is the word
+ * preceded by its record number, and the data is the same, but
+ * in reverse order.
+ */
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ for (cnt = 1; cnt <= 1000; ++cnt) {
+ (void)sprintf(buf, "%04d_", cnt);
+ if (fgets(buf + 4, sizeof(buf) - 4, fp) == NULL)
+ break;
+ len = strlen(buf);
+ for (t = rbuf, p = buf + (len - 2); p >= buf;)
+ *t++ = *p--;
+ *t++ = '\0';
+
+ key.data = buf;
+ data.data = rbuf;
+ data.size = key.size = len - 1;
+
+ if ((ret =
+ dbp->put(dbp, NULL, &key, &data, DB_NOOVERWRITE)) != 0) {
+ dbp->err(dbp, ret, "DB->put");
+ if (ret != DB_KEYEXIST)
+ goto err1;
+ }
+ }
+
+ /* Close the word database. */
+ (void)fclose(fp);
+
+ /* Print out the number of records in the database. */
+ if ((ret = dbp->stat(dbp, &statp, NULL, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ goto err1;
+ }
+ printf("%s: database contains %lu records\n",
+ progname, (u_long)statp->bt_ndata);
+ free(statp);
+
+ /* Acquire a cursor for the database. */
+ if ((ret = dbp->cursor(dbp, NULL, &dbcp, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->cursor");
+ goto err1;
+ }
+
+ /*
+ * Prompt the user for a record number, then retrieve and display
+ * that record.
+ */
+ for (;;) {
+ /* Get a record number. */
+ printf("recno #> ");
+ fflush(stdout);
+ if (fgets(buf, sizeof(buf), stdin) == NULL)
+ break;
+ recno = atoi(buf);
+
+ /*
+ * Reset the key each time, the dbp->c_get() routine returns
+ * the key and data pair, not just the key!
+ */
+ key.data = &recno;
+ key.size = sizeof(recno);
+ if ((ret = dbcp->c_get(dbcp, &key, &data, DB_SET_RECNO)) != 0)
+ goto get_err;
+
+ /* Display the key and data. */
+ show("k/d\t", &key, &data);
+
+ /* Move the cursor a record forward. */
+ if ((ret = dbcp->c_get(dbcp, &key, &data, DB_NEXT)) != 0)
+ goto get_err;
+
+ /* Display the key and data. */
+ show("next\t", &key, &data);
+
+ /*
+ * Retrieve the record number for the following record into
+ * local memory.
+ */
+ data.data = &recno;
+ data.size = sizeof(recno);
+ data.ulen = sizeof(recno);
+ data.flags |= DB_DBT_USERMEM;
+ if ((ret = dbcp->c_get(dbcp, &key, &data, DB_GET_RECNO)) != 0) {
+get_err: dbp->err(dbp, ret, "DBcursor->get");
+ if (ret != DB_NOTFOUND && ret != DB_KEYEMPTY)
+ goto err2;
+ } else
+ printf("retrieved recno: %lu\n", (u_long)recno);
+
+ /* Reset the data DBT. */
+ memset(&data, 0, sizeof(data));
+ }
+
+ if ((ret = dbcp->c_close(dbcp)) != 0) {
+ dbp->err(dbp, ret, "DBcursor->close");
+ goto err1;
+ }
+ if ((ret = dbp->close(dbp, 0)) != 0) {
+ fprintf(stderr,
+ "%s: DB->close: %s\n", progname, db_strerror(ret));
+ return (ERROR_RETURN);
+ }
+
+ return (0);
+
+err2: (void)dbcp->c_close(dbcp);
+err1: (void)dbp->close(dbp, 0);
+ return (ret);
+
+}
+
+/*
+ * show --
+ * Display a key/data pair.
+ */
+void
+show(msg, key, data)
+ DBT *key, *data;
+ char *msg;
+{
+ printf("%s%.*s : %.*s\n", msg,
+ (int)key->size, (char *)key->data,
+ (int)data->size, (char *)data->data);
+}
diff --git a/bdb/examples_c/ex_dbclient.c b/bdb/examples_c/ex_dbclient.c
new file mode 100644
index 00000000000..27461a8923f
--- /dev/null
+++ b/bdb/examples_c/ex_dbclient.c
@@ -0,0 +1,248 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: ex_dbclient.c,v 1.12 2000/10/26 14:13:05 bostic Exp $
+ */
+
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include <db.h>
+
+#define DATABASE_HOME "database"
+
+#define DATABASE "access.db"
+
+int db_clientrun __P((DB_ENV *, char *));
+int ex_dbclient_run __P((char *, FILE *, char *, char *));
+#ifdef HAVE_VXWORKS
+int ex_dbclient __P((char *));
+#define ERROR_RETURN ERROR
+#define VXSHM_KEY 10
+#else
+int main __P((int, char *[]));
+#define ERROR_RETURN 1
+#endif
+
+/*
+ * An example of a program creating/configuring a Berkeley DB environment.
+ */
+#ifndef HAVE_VXWORKS
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ char *home;
+ int ret;
+
+ if (argc != 2) {
+ fprintf(stderr, "Usage: %s hostname\n",argv[0]);
+ exit(1);
+ }
+ /*
+ * All of the shared database files live in DATABASE_HOME, but
+ * data files will live in CONFIG_DATA_DIR.
+ */
+ home = DATABASE_HOME;
+
+ if ((ret = ex_dbclient_run(home, stderr, argv[1], argv[0])) != 0)
+ return (ret);
+
+ return (0);
+}
+#endif
+
+int
+ex_dbclient(host)
+ char *host;
+{
+ char *home;
+ char *progname = "ex_dbclient"; /* Program name. */
+ int ret;
+
+ /*
+ * All of the shared database files live in DATABASE_HOME, but
+ * data files will live in CONFIG_DATA_DIR.
+ */
+ home = DATABASE_HOME;
+
+ if ((ret = ex_dbclient_run(home, stderr, host, progname)) != 0)
+ return (ret);
+
+ return (0);
+}
+
+int
+ex_dbclient_run(home, errfp, host, progname)
+ char *home, *host, *progname;
+ FILE *errfp;
+{
+ DB_ENV *dbenv;
+ int ret, retry;
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, DB_CLIENT)) != 0) {
+ fprintf(errfp, "%s: %s\n", progname, db_strerror(ret));
+ return (ERROR_RETURN);
+ }
+#ifdef HAVE_VXWORKS
+ if ((ret = dbenv->set_shm_key(dbenv, VXSHM_KEY)) != 0) {
+ fprintf(errfp, "%s: %s\n", progname, db_strerror(ret));
+ return (ERROR_RETURN);
+ }
+#endif
+ retry = 0;
+retry:
+ while (retry < 5) {
+ /*
+ * Set the server host we are talking to.
+ */
+ if ((ret =
+ dbenv->set_server(dbenv, host, 10000, 10000, 0)) != 0) {
+ fprintf(stderr, "Try %d: DBENV->set_server: %s\n",
+ retry, db_strerror(ret));
+ retry++;
+ if ((ret = __os_sleep(dbenv, 15, 0)) != 0)
+ return (ret);
+ } else
+ break;
+ }
+
+ if (retry >= 5) {
+ fprintf(stderr, "DBENV->set_server: %s\n", db_strerror(ret));
+ dbenv->close(dbenv, 0);
+ return (ERROR_RETURN);
+ }
+ /*
+ * We want to specify the shared memory buffer pool cachesize,
+ * but everything else is the default.
+ */
+ if ((ret = dbenv->set_cachesize(dbenv, 0, 64 * 1024, 0)) != 0) {
+ dbenv->err(dbenv, ret, "set_cachesize");
+ dbenv->close(dbenv, 0);
+ return (ERROR_RETURN);
+ }
+ /*
+ * We have multiple processes reading/writing these files, so
+ * we need concurrency control and a shared buffer pool, but
+ * not logging or transactions.
+ */
+ if ((ret = dbenv->open(dbenv, home,
+ DB_CREATE | DB_INIT_LOCK | DB_INIT_MPOOL | DB_INIT_TXN, 0)) != 0) {
+ dbenv->err(dbenv, ret, "environment open: %s", home);
+ dbenv->close(dbenv, 0);
+ if (ret == DB_NOSERVER)
+ goto retry;
+ return (ERROR_RETURN);
+ }
+
+ ret = db_clientrun(dbenv, progname);
+ printf("db_clientrun returned %d\n", ret);
+ if (ret == DB_NOSERVER)
+ goto retry;
+
+ /* Close the handle. */
+ if ((ret = dbenv->close(dbenv, 0)) != 0) {
+ fprintf(stderr, "DBENV->close: %s\n", db_strerror(ret));
+ return (ERROR_RETURN);
+ }
+ return (0);
+}
+
+int
+db_clientrun(dbenv, progname)
+ DB_ENV *dbenv;
+ char *progname;
+{
+ DB *dbp;
+ DBT key, data;
+ u_int32_t len;
+ int ret;
+ char *p, *t, buf[1024], rbuf[1024];
+
+ /* Remove the previous database. */
+
+ /* Create and initialize database object, open the database. */
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_create: %s\n", progname, db_strerror(ret));
+ return (ret);
+ }
+ if ((ret = dbp->set_pagesize(dbp, 1024)) != 0) {
+ dbp->err(dbp, ret, "set_pagesize");
+ goto err1;
+ }
+ if ((ret =
+ dbp->open(dbp, DATABASE, NULL, DB_BTREE, DB_CREATE, 0664)) != 0) {
+ dbp->err(dbp, ret, "%s: open", DATABASE);
+ goto err1;
+ }
+
+ /*
+ * Insert records into the database, where the key is the user
+ * input and the data is the user input in reverse order.
+ */
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ for (;;) {
+ printf("input> ");
+ fflush(stdout);
+ if (fgets(buf, sizeof(buf), stdin) == NULL)
+ break;
+ if ((len = strlen(buf)) <= 1)
+ continue;
+ for (t = rbuf, p = buf + (len - 2); p >= buf;)
+ *t++ = *p--;
+ *t++ = '\0';
+
+ key.data = buf;
+ data.data = rbuf;
+ data.size = key.size = len - 1;
+
+ switch (ret =
+ dbp->put(dbp, NULL, &key, &data, DB_NOOVERWRITE)) {
+ case 0:
+ break;
+ default:
+ dbp->err(dbp, ret, "DB->put");
+ if (ret != DB_KEYEXIST)
+ goto err1;
+ break;
+ }
+ memset(&data, 0, sizeof(DBT));
+ switch (ret = dbp->get(dbp, NULL, &key, &data, 0)) {
+ case 0:
+ printf("%.*s : %.*s\n",
+ (int)key.size, (char *)key.data,
+ (int)data.size, (char *)data.data);
+ break;
+ default:
+ dbp->err(dbp, ret, "DB->get");
+ break;
+ }
+ }
+ if ((ret = dbp->close(dbp, 0)) != 0) {
+ fprintf(stderr,
+ "%s: DB->close: %s\n", progname, db_strerror(ret));
+ return (1);
+ }
+ return (0);
+
+err1: (void)dbp->close(dbp, 0);
+ return (ret);
+}
diff --git a/bdb/examples_c/ex_env.c b/bdb/examples_c/ex_env.c
new file mode 100644
index 00000000000..5490723a31c
--- /dev/null
+++ b/bdb/examples_c/ex_env.c
@@ -0,0 +1,170 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: ex_env.c,v 11.18 2000/10/27 20:32:00 dda Exp $
+ */
+
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#endif
+
+#include <db.h>
+
+#ifdef macintosh
+#define DATABASE_HOME ":database"
+#define CONFIG_DATA_DIR ":database"
+#else
+#ifdef DB_WIN32
+#define DATABASE_HOME "\\tmp\\database"
+#define CONFIG_DATA_DIR "\\database\\files"
+#else
+#ifdef HAVE_VXWORKS
+#define DATABASE_HOME "/ata0/vxtmp/database"
+#define CONFIG_DATA_DIR "/vxtmp/vxtmp/database/files"
+#else
+#define DATABASE_HOME "/tmp/database"
+#define CONFIG_DATA_DIR "/database/files"
+#endif
+#endif
+#endif
+
+int db_setup __P((char *, char *, FILE *, char *));
+int db_teardown __P((char *, char *, FILE *, char *));
+#ifdef HAVE_VXWORKS
+int ex_env __P((void));
+#define ERROR_RETURN ERROR
+#define VXSHM_KEY 11
+#else
+int main __P((void));
+#define ERROR_RETURN 1
+#endif
+
+/*
+ * An example of a program creating/configuring a Berkeley DB environment.
+ */
+int
+#ifdef HAVE_VXWORKS
+ex_env()
+#else
+main()
+#endif
+{
+ int ret;
+ char *data_dir, *home;
+ char *progname = "ex_env"; /* Program name. */
+
+ /*
+ * All of the shared database files live in DATABASE_HOME, but
+ * data files will live in CONFIG_DATA_DIR.
+ */
+ home = DATABASE_HOME;
+ data_dir = CONFIG_DATA_DIR;
+
+ printf("Setup env\n");
+ if ((ret = db_setup(home, data_dir, stderr, progname)) != 0)
+ return (ret);
+
+ printf("Teardown env\n");
+ if ((ret = db_teardown(home, data_dir, stderr, progname)) != 0)
+ return (ret);
+
+ return (0);
+}
+
+int
+db_setup(home, data_dir, errfp, progname)
+ char *home, *data_dir, *progname;
+ FILE *errfp;
+{
+ DB_ENV *dbenv;
+ int ret;
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(errfp, "%s: %s\n", progname, db_strerror(ret));
+ return (ERROR_RETURN);
+ }
+ dbenv->set_errfile(dbenv, errfp);
+ dbenv->set_errpfx(dbenv, progname);
+
+#ifdef HAVE_VXWORKS
+ /* VxWorks needs to specify a base segment ID. */
+ if ((ret = dbenv->set_shm_key(dbenv, VXSHM_KEY)) != 0) {
+ fprintf(errfp, "%s: %s\n", progname, db_strerror(ret));
+ return (ERROR_RETURN);
+ }
+#endif
+
+ /*
+ * We want to specify the shared memory buffer pool cachesize,
+ * but everything else is the default.
+ */
+ if ((ret = dbenv->set_cachesize(dbenv, 0, 64 * 1024, 0)) != 0) {
+ dbenv->err(dbenv, ret, "set_cachesize");
+ dbenv->close(dbenv, 0);
+ return (ERROR_RETURN);
+ }
+
+ /* Databases are in a subdirectory. */
+ (void)dbenv->set_data_dir(dbenv, data_dir);
+
+ /* Open the environment with full transactional support. */
+ if ((ret = dbenv->open(dbenv, home,
+ DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN,
+ 0)) != 0) {
+ dbenv->err(dbenv, ret, "environment open: %s", home);
+ dbenv->close(dbenv, 0);
+ return (ERROR_RETURN);
+ }
+
+ /* Do something interesting... */
+
+ /* Close the handle. */
+ if ((ret = dbenv->close(dbenv, 0)) != 0) {
+ fprintf(stderr, "DBENV->close: %s\n", db_strerror(ret));
+ return (ERROR_RETURN);
+ }
+ return (0);
+}
+
+int
+db_teardown(home, data_dir, errfp, progname)
+ char *home, *data_dir, *progname;
+ FILE *errfp;
+{
+ DB_ENV *dbenv;
+ int ret;
+
+ /* Remove the shared database regions. */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(errfp, "%s: %s\n", progname, db_strerror(ret));
+ return (ERROR_RETURN);
+ }
+ dbenv->set_errfile(dbenv, errfp);
+ dbenv->set_errpfx(dbenv, progname);
+#ifdef HAVE_VXWORKS
+ if ((ret = dbenv->set_shm_key(dbenv, VXSHM_KEY)) != 0) {
+ fprintf(errfp, "%s: %s\n", progname, db_strerror(ret));
+ return (ERROR_RETURN);
+ }
+#endif
+
+ (void)dbenv->set_data_dir(dbenv, data_dir);
+ if ((ret = dbenv->remove(dbenv, home, 0)) != 0) {
+ fprintf(stderr, "DBENV->remove: %s\n", db_strerror(ret));
+ return (ERROR_RETURN);
+ }
+ return (0);
+}
diff --git a/bdb/examples_c/ex_lock.c b/bdb/examples_c/ex_lock.c
new file mode 100644
index 00000000000..e858be6b330
--- /dev/null
+++ b/bdb/examples_c/ex_lock.c
@@ -0,0 +1,235 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: ex_lock.c,v 11.6 2001/01/04 14:23:29 dda Exp $
+ */
+
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include <db.h>
+
+void db_init __P((char *, u_int32_t, int));
+int main __P((int, char *[]));
+void usage __P((void));
+
+DB_ENV *dbenv;
+const char
+ *progname = "ex_lock"; /* Program name. */
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ DBT lock_dbt;
+ DB_LOCK lock;
+ DB_LOCK *locks;
+ db_lockmode_t lock_type;
+ long held;
+ u_int32_t len, locker, maxlocks;
+ int ch, do_unlink, did_get, i, lockid, lockcount, ret;
+ char *home, opbuf[16], objbuf[1024], lockbuf[16];
+
+ home = "TESTDIR";
+ maxlocks = 0;
+ do_unlink = 0;
+ while ((ch = getopt(argc, argv, "h:m:u")) != EOF)
+ switch (ch) {
+ case 'h':
+ home = optarg;
+ break;
+ case 'm':
+ if ((i = atoi(optarg)) <= 0)
+ usage();
+ maxlocks = (u_int32_t)i; /* XXX: possible overflow. */
+ break;
+ case 'u':
+ do_unlink = 1;
+ break;
+ case '?':
+ default:
+ usage();
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 0)
+ usage();
+
+ /* Initialize the database environment. */
+ db_init(home, maxlocks, do_unlink);
+
+ locks = 0;
+ lockcount = 0;
+
+ /*
+ * Accept lock requests.
+ */
+ if ((ret = lock_id(dbenv, &locker)) != 0) {
+ dbenv->err(dbenv, ret, "unable to get locker id");
+ (void)dbenv->close(dbenv, 0);
+ exit (1);
+ }
+ lockid = -1;
+
+ memset(&lock_dbt, 0, sizeof(lock_dbt));
+ for (held = 0, did_get = 0;;) {
+ printf("Operation get/release [get]> ");
+ fflush(stdout);
+ if (fgets(opbuf, sizeof(opbuf), stdin) == NULL)
+ break;
+ if ((len = strlen(opbuf)) <= 1 || strcmp(opbuf, "get\n") == 0) {
+ /* Acquire a lock. */
+ printf("input object (text string) to lock> ");
+ fflush(stdout);
+ if (fgets(objbuf, sizeof(objbuf), stdin) == NULL)
+ break;
+ if ((len = strlen(objbuf)) <= 1)
+ continue;
+
+ do {
+ printf("lock type read/write [read]> ");
+ fflush(stdout);
+ if (fgets(lockbuf,
+ sizeof(lockbuf), stdin) == NULL)
+ break;
+ len = strlen(lockbuf);
+ } while (len > 1 &&
+ strcmp(lockbuf, "read\n") != 0 &&
+ strcmp(lockbuf, "write\n") != 0);
+ if (len == 1 || strcmp(lockbuf, "read\n") == 0)
+ lock_type = DB_LOCK_READ;
+ else
+ lock_type = DB_LOCK_WRITE;
+
+ lock_dbt.data = objbuf;
+ lock_dbt.size = strlen(objbuf);
+ ret = lock_get(dbenv, locker,
+ DB_LOCK_NOWAIT, &lock_dbt, lock_type, &lock);
+ if (ret == 0) {
+ did_get = 1;
+ lockid = lockcount++;
+ if (locks == NULL)
+ locks =
+ (DB_LOCK *)malloc(sizeof(DB_LOCK));
+ else
+ locks = (DB_LOCK *)realloc(locks,
+ lockcount * sizeof(DB_LOCK));
+ locks[lockid] = lock;
+ }
+ } else {
+ /* Release a lock. */
+ do {
+ printf("input lock to release> ");
+ fflush(stdout);
+ if (fgets(objbuf,
+ sizeof(objbuf), stdin) == NULL)
+ break;
+ } while ((len = strlen(objbuf)) <= 1);
+ lockid = strtol(objbuf, NULL, 16);
+ if (lockid < 0 || lockid >= lockcount) {
+ printf("Lock #%d out of range\n", lockid);
+ continue;
+ }
+ lock = locks[lockid];
+ ret = lock_put(dbenv, &lock);
+ did_get = 0;
+ }
+ switch (ret) {
+ case 0:
+ printf("Lock #%d %s\n", lockid,
+ did_get ? "granted" : "released");
+ held += did_get ? 1 : -1;
+ break;
+ case DB_LOCK_NOTGRANTED:
+ dbenv->err(dbenv, ret, NULL);
+ break;
+ case DB_LOCK_DEADLOCK:
+ dbenv->err(dbenv, ret,
+ "lock_%s", did_get ? "get" : "put");
+ break;
+ default:
+ dbenv->err(dbenv, ret,
+ "lock_%s", did_get ? "get" : "put");
+ (void)dbenv->close(dbenv, 0);
+ exit (1);
+ }
+ }
+
+ printf("\nClosing lock region %ld locks held\n", held);
+
+ if (locks != NULL)
+ free(locks);
+
+ if ((ret = dbenv->close(dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ return (1);
+ }
+ return (0);
+}
+
+/*
+ * db_init --
+ * Initialize the environment.
+ */
+void
+db_init(home, maxlocks, do_unlink)
+ char *home;
+ u_int32_t maxlocks;
+ int do_unlink;
+{
+ int ret;
+
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr, "%s: db_env_create: %s\n",
+ progname, db_strerror(ret));
+ exit (1);
+ }
+
+ if (do_unlink) {
+ if ((ret = dbenv->remove(dbenv, home, DB_FORCE)) != 0) {
+ fprintf(stderr, "%s: dbenv->remove: %s\n",
+ progname, db_strerror(ret));
+ exit (1);
+ }
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr, "%s: db_env_create: %s\n",
+ progname, db_strerror(ret));
+ exit (1);
+ }
+ }
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+ if (maxlocks != 0)
+ dbenv->set_lk_max_locks(dbenv, maxlocks);
+
+ if ((ret =
+ dbenv->open(dbenv, home, DB_CREATE | DB_INIT_LOCK, 0)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ (void)dbenv->close(dbenv, 0);
+ exit(1);
+ }
+}
+
+void
+usage()
+{
+ (void)fprintf(stderr,
+ "usage: %s [-u] [-h home] [-m maxlocks]\n", progname);
+ exit(1);
+}
diff --git a/bdb/examples_c/ex_mpool.c b/bdb/examples_c/ex_mpool.c
new file mode 100644
index 00000000000..376c6647895
--- /dev/null
+++ b/bdb/examples_c/ex_mpool.c
@@ -0,0 +1,280 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: ex_mpool.c,v 11.13 2000/10/27 20:32:00 dda Exp $
+ */
+
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include <db.h>
+
+int init __P((char *, int, int, char *));
+int run __P((int, int, int, int, char *));
+int run_mpool __P((int, int, int, int, char *));
+#ifdef HAVE_VXWORKS
+int ex_mpool __P((void));
+#define MPOOL "/vxtmp/vxtmp/mpool" /* File. */
+#define ERROR_RETURN ERROR
+#define VXSHM_KEY 12
+#else
+int main __P((int, char *[]));
+void usage __P((char *));
+#define MPOOL "mpool" /* File. */
+#define ERROR_RETURN 1
+#endif
+
+#ifndef HAVE_VXWORKS
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ int cachesize, ch, hits, npages, pagesize;
+ char *progname;
+
+ cachesize = 20 * 1024;
+ hits = 1000;
+ npages = 50;
+ pagesize = 1024;
+ progname = argv[0];
+ while ((ch = getopt(argc, argv, "c:h:n:p:")) != EOF)
+ switch (ch) {
+ case 'c':
+ if ((cachesize = atoi(optarg)) < 20 * 1024)
+ usage(progname);
+ break;
+ case 'h':
+ if ((hits = atoi(optarg)) <= 0)
+ usage(progname);
+ break;
+ case 'n':
+ if ((npages = atoi(optarg)) <= 0)
+ usage(progname);
+ break;
+ case 'p':
+ if ((pagesize = atoi(optarg)) <= 0)
+ usage(progname);
+ break;
+ case '?':
+ default:
+ usage(progname);
+ }
+ argc -= optind;
+ argv += optind;
+
+ return (run_mpool(pagesize, cachesize, hits, npages, progname));
+}
+
+void
+usage(progname)
+ char *progname;
+{
+ (void)fprintf(stderr,
+ "usage: %s [-c cachesize] [-h hits] [-n npages] [-p pagesize]\n",
+ progname);
+ exit(1);
+}
+#else
+int
+ex_mpool()
+{
+ char *progname = "ex_mpool"; /* Program name. */
+ int cachesize, ch, hits, npages, pagesize;
+
+ cachesize = 20 * 1024;
+ hits = 1000;
+ npages = 50;
+ pagesize = 1024;
+
+ return (run_mpool(pagesize, cachesize, hits, npages, progname));
+}
+#endif
+
+int
+run_mpool(pagesize, cachesize, hits, npages, progname)
+ int pagesize, cachesize, hits, npages;
+ char *progname;
+{
+ int ret;
+
+ /* Initialize the file. */
+ if ((ret = init(MPOOL, pagesize, npages, progname)) != 0)
+ return (ret);
+
+ /* Get the pages. */
+ if ((ret = run(hits, cachesize, pagesize, npages, progname)) != 0)
+ return (ret);
+
+ return (0);
+}
+
+/*
+ * init --
+ * Create a backing file.
+ */
+int
+init(file, pagesize, npages, progname)
+ char *file, *progname;
+ int pagesize, npages;
+{
+ int cnt, flags, fd;
+ char *p;
+
+ /*
+ * Create a file with the right number of pages, and store a page
+ * number on each page.
+ */
+ flags = O_CREAT | O_RDWR | O_TRUNC;
+#ifdef DB_WIN32
+ flags |= O_BINARY;
+#endif
+ if ((fd = open(file, flags, 0666)) < 0) {
+ fprintf(stderr,
+ "%s: %s: %s\n", progname, file, strerror(errno));
+ return (ERROR_RETURN);
+ }
+ if ((p = (char *)malloc(pagesize)) == NULL) {
+ fprintf(stderr, "%s: %s\n", progname, strerror(ENOMEM));
+ return (ERROR_RETURN);
+ }
+
+ /* The pages are numbered from 0. */
+ for (cnt = 0; cnt <= npages; ++cnt) {
+ *(int *)p = cnt;
+ if (write(fd, p, pagesize) != pagesize) {
+ fprintf(stderr,
+ "%s: %s: %s\n", progname, file, strerror(errno));
+ return (ERROR_RETURN);
+ }
+ }
+
+ (void)close(fd);
+ free(p);
+ return (0);
+}
+
+/*
+ * run --
+ * Get a set of pages.
+ */
+int
+run(hits, cachesize, pagesize, npages, progname)
+ int hits, cachesize, pagesize, npages;
+ char *progname;
+{
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *dbmfp;
+ db_pgno_t pageno;
+ int cnt, ret;
+ void *p;
+
+ printf("%s: cachesize: %d; pagesize: %d; N pages: %d\n",
+ progname, cachesize, pagesize, npages);
+
+ /*
+ * Open a memory pool, specify a cachesize, output error messages
+ * to stderr.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ return (ERROR_RETURN);
+ }
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+#ifdef HAVE_VXWORKS
+ if ((ret = dbenv->set_shm_key(dbenv, VXSHM_KEY)) != 0) {
+ dbenv->err(dbenv, ret, "set_shm_key");
+ return (ERROR_RETURN);
+ }
+#endif
+
+ /* Set the cachesize. */
+ if ((ret = dbenv->set_cachesize(dbenv, 0, cachesize, 0)) != 0) {
+ dbenv->err(dbenv, ret, "set_cachesize");
+ goto err1;
+ }
+
+ /* Open the environment. */
+ if ((ret = dbenv->open(
+ dbenv, NULL, DB_CREATE | DB_INIT_MPOOL, 0)) != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto err1;
+ }
+
+ /* Open the file in the environment. */
+ if ((ret =
+ memp_fopen(dbenv, MPOOL, 0, 0, pagesize, NULL, &dbmfp)) != 0) {
+ dbenv->err(dbenv, ret, "memp_fopen: %s", MPOOL);
+ goto err1;
+ }
+
+ printf("retrieve %d random pages... ", hits);
+
+ srand((u_int)time(NULL));
+ for (cnt = 0; cnt < hits; ++cnt) {
+ pageno = (rand() % npages) + 1;
+ if ((ret = memp_fget(dbmfp, &pageno, 0, &p)) != 0) {
+ dbenv->err(dbenv, ret,
+ "unable to retrieve page %lu", (u_long)pageno);
+ goto err2;
+ }
+ if (*(db_pgno_t *)p != pageno) {
+ dbenv->errx(dbenv,
+ "wrong page retrieved (%lu != %d)",
+ (u_long)pageno, *(int *)p);
+ goto err2;
+ }
+ if ((ret = memp_fput(dbmfp, p, 0)) != 0) {
+ dbenv->err(dbenv, ret,
+ "unable to return page %lu", (u_long)pageno);
+ goto err2;
+ }
+ }
+
+ printf("successful.\n");
+
+ /* Close the file. */
+ if ((ret = memp_fclose(dbmfp)) != 0) {
+ dbenv->err(dbenv, ret, "memp_fclose");
+ goto err1;
+ }
+
+ /* Close the pool. */
+ if ((ret = dbenv->close(dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ return (ERROR_RETURN);
+ }
+ return (0);
+
+err2: (void)memp_fclose(dbmfp);
+err1: (void)dbenv->close(dbenv, 0);
+ return (ERROR_RETURN);
+}
diff --git a/bdb/examples_c/ex_thread.c b/bdb/examples_c/ex_thread.c
new file mode 100644
index 00000000000..93812ade764
--- /dev/null
+++ b/bdb/examples_c/ex_thread.c
@@ -0,0 +1,604 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: ex_thread.c,v 11.9 2000/05/31 15:10:04 bostic Exp $
+ */
+
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <errno.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include <db.h>
+
+/*
+ * NB: This application is written using POSIX 1003.1b-1993 pthreads
+ * interfaces, which may not be portable to your system.
+ */
+extern int sched_yield __P((void)); /* Pthread yield function. */
+
+DB_ENV *db_init __P((char *));
+void *deadlock __P((void *));
+void fatal __P((char *, int, int));
+int main __P((int, char *[]));
+int reader __P((int));
+void stats __P((void));
+void *trickle __P((void *));
+void *tstart __P((void *));
+void usage __P((void));
+void word __P((void));
+int writer __P((int));
+
+struct _statistics {
+ int aborted; /* Write. */
+ int aborts; /* Read/write. */
+ int adds; /* Write. */
+ int deletes; /* Write. */
+ int txns; /* Write. */
+ int found; /* Read. */
+ int notfound; /* Read. */
+} *perf;
+
+const char
+ *progname = "ex_thread"; /* Program name. */
+
+#define DATABASE "access.db" /* Database name. */
+#define WORDLIST "../test/wordlist" /* Dictionary. */
+
+/*
+ * We can seriously increase the number of collisions and transaction
+ * aborts by yielding the scheduler after every DB call. Specify the
+ * -p option to do this.
+ */
+int punish; /* -p */
+int nlist; /* -n */
+int nreaders; /* -r */
+int verbose; /* -v */
+int nwriters; /* -w */
+
+DB *dbp; /* Database handle. */
+DB_ENV *dbenv; /* Database environment. */
+int nthreads; /* Total threads. */
+char **list; /* Word list. */
+
+/*
+ * ex_thread --
+ * Run a simple threaded application of some numbers of readers and
+ * writers competing for a set of words.
+ *
+ * Example UNIX shell script to run this program:
+ * % rm -rf TESTDIR
+ * % mkdir TESTDIR
+ * % ex_thread -h TESTDIR
+ */
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int errno, optind;
+ pthread_t *tids;
+ int ch, i, ret;
+ char *home;
+ void *retp;
+
+ nlist = 1000;
+ nreaders = nwriters = 4;
+ home = "TESTDIR";
+ while ((ch = getopt(argc, argv, "h:pn:r:vw:")) != EOF)
+ switch (ch) {
+ case 'h':
+ home = optarg;
+ break;
+ case 'p':
+ punish = 1;
+ break;
+ case 'n':
+ nlist = atoi(optarg);
+ break;
+ case 'r':
+ nreaders = atoi(optarg);
+ break;
+ case 'v':
+ verbose = 1;
+ break;
+ case 'w':
+ nwriters = atoi(optarg);
+ break;
+ case '?':
+ default:
+ usage();
+ }
+ argc -= optind;
+ argv += optind;
+
+ /* Initialize the random number generator. */
+ srand(getpid() | time(NULL));
+
+ /* Build the key list. */
+ word();
+
+ /* Remove the previous database. */
+ (void)unlink(DATABASE);
+
+ /* Initialize the database environment. */
+ dbenv = db_init(home);
+
+ /* Initialize the database. */
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ (void)dbenv->close(dbenv, 0);
+ return (1);
+ }
+ if ((ret = dbp->set_pagesize(dbp, 1024)) != 0) {
+ dbp->err(dbp, ret, "set_pagesize");
+ goto err;
+ }
+ if ((ret = dbp->open(dbp,
+ DATABASE, NULL, DB_BTREE, DB_CREATE | DB_THREAD, 0664)) != 0) {
+ dbp->err(dbp, ret, "%s: open", DATABASE);
+ goto err;
+ }
+
+ nthreads = nreaders + nwriters + 2;
+ printf("Running: readers %d, writers %d\n", nreaders, nwriters);
+ fflush(stdout);
+
+ /* Create statistics structures, offset by 1. */
+ if ((perf = calloc(nreaders + nwriters + 1, sizeof(*perf))) == NULL)
+ fatal(NULL, errno, 1);
+
+ /* Create thread ID structures. */
+ if ((tids = malloc(nthreads * sizeof(pthread_t))) == NULL)
+ fatal(NULL, errno, 1);
+
+ /* Create reader/writer threads. */
+ for (i = 0; i < nreaders + nwriters; ++i)
+ if (pthread_create(&tids[i], NULL, tstart, (void *)i))
+ fatal("pthread_create", errno, 1);
+
+ /* Create buffer pool trickle thread. */
+ if (pthread_create(&tids[i], NULL, trickle, &i))
+ fatal("pthread_create", errno, 1);
+ ++i;
+
+ /* Create deadlock detector thread. */
+ if (pthread_create(&tids[i], NULL, deadlock, &i))
+ fatal("pthread_create", errno, 1);
+
+ /* Wait for the threads. */
+ for (i = 0; i < nthreads; ++i)
+ (void)pthread_join(tids[i], &retp);
+
+err: (void)dbp->close(dbp, 0);
+ (void)dbenv->close(dbenv, 0);
+
+ return (0);
+}
+
+int
+reader(id)
+ int id;
+{
+ DBT key, data;
+ int n, ret;
+ char buf[64];
+
+ /*
+ * DBT's must use local memory or malloc'd memory if the DB handle
+ * is accessed in a threaded fashion.
+ */
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ data.flags = DB_DBT_MALLOC;
+
+ /*
+ * Read-only threads do not require transaction protection, unless
+ * there's a need for repeatable reads.
+ */
+ for (;;) {
+ /* Pick a key at random, and look it up. */
+ n = rand() % nlist;
+ key.data = list[n];
+ key.size = strlen(key.data);
+
+ if (verbose) {
+ sprintf(buf, "reader: %d: list entry %d\n", id, n);
+ write(STDOUT_FILENO, buf, strlen(buf));
+ }
+
+ switch (ret = dbp->get(dbp, NULL, &key, &data, 0)) {
+ case DB_LOCK_DEADLOCK: /* Deadlock. */
+ ++perf[id].aborts;
+ break;
+ case 0: /* Success. */
+ ++perf[id].found;
+ free(data.data);
+ break;
+ case DB_NOTFOUND: /* Not found. */
+ ++perf[id].notfound;
+ break;
+ default:
+ sprintf(buf,
+ "reader %d: dbp->get: %s", id, (char *)key.data);
+ fatal(buf, ret, 0);
+ }
+ }
+ return (0);
+}
+
+int
+writer(id)
+ int id;
+{
+ DBT key, data;
+ DB_TXN *tid;
+ time_t now, then;
+ int n, ret;
+ char buf[256], dbuf[10000];
+
+ time(&now);
+ then = now;
+
+ /*
+ * DBT's must use local memory or malloc'd memory if the DB handle
+ * is accessed in a threaded fashion.
+ */
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ data.data = dbuf;
+ data.ulen = sizeof(dbuf);
+ data.flags = DB_DBT_USERMEM;
+
+ for (;;) {
+ /* Pick a random key. */
+ n = rand() % nlist;
+ key.data = list[n];
+ key.size = strlen(key.data);
+
+ if (verbose) {
+ sprintf(buf, "writer: %d: list entry %d\n", id, n);
+ write(STDOUT_FILENO, buf, strlen(buf));
+ }
+
+ /* Abort and retry. */
+ if (0) {
+retry: if ((ret = txn_abort(tid)) != 0)
+ fatal("txn_abort", ret, 1);
+ ++perf[id].aborts;
+ ++perf[id].aborted;
+ }
+
+ /* Thread #1 prints out the stats every 20 seconds. */
+ if (id == 1) {
+ time(&now);
+ if (now - then >= 20) {
+ stats();
+ then = now;
+ }
+ }
+
+ /* Begin the transaction. */
+ if ((ret = txn_begin(dbenv, NULL, &tid, 0)) != 0)
+ fatal("txn_begin", ret, 1);
+
+ /*
+ * Get the key. If it doesn't exist, add it. If it does
+ * exist, delete it.
+ */
+ switch (ret = dbp->get(dbp, tid, &key, &data, 0)) {
+ case DB_LOCK_DEADLOCK:
+ goto retry;
+ case 0:
+ goto delete;
+ case DB_NOTFOUND:
+ goto add;
+ }
+
+ sprintf(buf, "writer: %d: dbp->get", id);
+ fatal(buf, ret, 1);
+ /* NOTREACHED */
+
+delete: /* Delete the key. */
+ switch (ret = dbp->del(dbp, tid, &key, 0)) {
+ case DB_LOCK_DEADLOCK:
+ goto retry;
+ case 0:
+ ++perf[id].deletes;
+ goto commit;
+ }
+
+ sprintf(buf, "writer: %d: dbp->del", id);
+ fatal(buf, ret, 1);
+ /* NOTREACHED */
+
+add: /* Add the key. 1 data item in 30 is an overflow item. */
+ data.size = 20 + rand() % 128;
+ if (rand() % 30 == 0)
+ data.size += 8192;
+
+ switch (ret = dbp->put(dbp, tid, &key, &data, 0)) {
+ case DB_LOCK_DEADLOCK:
+ goto retry;
+ case 0:
+ ++perf[id].adds;
+ goto commit;
+ default:
+ sprintf(buf, "writer: %d: dbp->put", id);
+ fatal(buf, ret, 1);
+ }
+
+commit: /* The transaction finished, commit it. */
+ if ((ret = txn_commit(tid, 0)) != 0)
+ fatal("txn_commit", ret, 1);
+
+ /*
+ * Every time the thread completes 20 transactions, show
+ * our progress.
+ */
+ if (++perf[id].txns % 20 == 0) {
+ sprintf(buf,
+"writer: %2d: adds: %4d: deletes: %4d: aborts: %4d: txns: %4d\n",
+ id, perf[id].adds, perf[id].deletes,
+ perf[id].aborts, perf[id].txns);
+ write(STDOUT_FILENO, buf, strlen(buf));
+ }
+
+ /*
+ * If this thread was aborted more than 5 times before
+ * the transaction finished, complain.
+ */
+ if (perf[id].aborted > 5) {
+ sprintf(buf,
+"writer: %2d: adds: %4d: deletes: %4d: aborts: %4d: txns: %4d: ABORTED: %2d\n",
+ id, perf[id].adds, perf[id].deletes,
+ perf[id].aborts, perf[id].txns, perf[id].aborted);
+ write(STDOUT_FILENO, buf, strlen(buf));
+ }
+ perf[id].aborted = 0;
+ }
+ return (0);
+}
+
+/*
+ * stats --
+ * Display reader/writer thread statistics. To display the statistics
+ * for the mpool trickle or deadlock threads, use db_stat(1).
+ */
+void
+stats()
+{
+ int id;
+ char *p, buf[8192];
+
+ p = buf + sprintf(buf, "-------------\n");
+ for (id = 0; id < nreaders + nwriters;)
+ if (id++ < nwriters)
+ p += sprintf(p,
+ "writer: %2d: adds: %4d: deletes: %4d: aborts: %4d: txns: %4d\n",
+ id, perf[id].adds,
+ perf[id].deletes, perf[id].aborts, perf[id].txns);
+ else
+ p += sprintf(p,
+ "reader: %2d: found: %5d: notfound: %5d: aborts: %4d\n",
+ id, perf[id].found,
+ perf[id].notfound, perf[id].aborts);
+ p += sprintf(p, "-------------\n");
+
+ write(STDOUT_FILENO, buf, p - buf);
+}
+
+/*
+ * db_init --
+ * Initialize the environment.
+ */
+DB_ENV *
+db_init(home)
+ char *home;
+{
+ DB_ENV *dbenv;
+ int ret;
+
+ if (punish) {
+ (void)db_env_set_pageyield(1);
+ (void)db_env_set_func_yield(sched_yield);
+ }
+
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ exit (1);
+ }
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+ (void)dbenv->set_cachesize(dbenv, 0, 100 * 1024, 0);
+ (void)dbenv->set_lg_max(dbenv, 200000);
+
+ if ((ret = dbenv->open(dbenv, home,
+ DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG |
+ DB_INIT_MPOOL | DB_INIT_TXN | DB_THREAD, 0)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ (void)dbenv->close(dbenv, 0);
+ exit (1);
+ }
+ return (dbenv);
+}
+
+/*
+ * tstart --
+ * Thread start function for readers and writers.
+ */
+void *
+tstart(arg)
+ void *arg;
+{
+ pthread_t tid;
+ u_int id;
+
+ id = (u_int)arg + 1;
+
+ tid = pthread_self();
+
+ if (id <= (u_int)nwriters) {
+ printf("write thread %d starting: tid: %lu\n", id, (u_long)tid);
+ fflush(stdout);
+ writer(id);
+ } else {
+ printf("read thread %d starting: tid: %lu\n", id, (u_long)tid);
+ fflush(stdout);
+ reader(id);
+ }
+
+ /* NOTREACHED */
+ return (NULL);
+}
+
+/*
+ * deadlock --
+ * Thread start function for lock_detect().
+ */
+void *
+deadlock(arg)
+ void *arg;
+{
+ struct timeval t;
+ pthread_t tid;
+
+ arg = arg; /* XXX: shut the compiler up. */
+ tid = pthread_self();
+
+ printf("deadlock thread starting: tid: %lu\n", (u_long)tid);
+ fflush(stdout);
+
+ t.tv_sec = 0;
+ t.tv_usec = 100000;
+ for (;;) {
+ (void)lock_detect(dbenv,
+ DB_LOCK_CONFLICT, DB_LOCK_YOUNGEST, NULL);
+
+ /* Check every 100ms. */
+ (void)select(0, NULL, NULL, NULL, &t);
+ }
+
+ /* NOTREACHED */
+ return (NULL);
+}
+
+/*
+ * trickle --
+ * Thread start function for memp_trickle().
+ */
+void *
+trickle(arg)
+ void *arg;
+{
+ pthread_t tid;
+ int wrote;
+ char buf[64];
+
+ arg = arg; /* XXX: shut the compiler up. */
+ tid = pthread_self();
+
+ printf("trickle thread starting: tid: %lu\n", (u_long)tid);
+ fflush(stdout);
+
+ for (;;) {
+ (void)memp_trickle(dbenv, 10, &wrote);
+ if (verbose) {
+ sprintf(buf, "trickle: wrote %d\n", wrote);
+ write(STDOUT_FILENO, buf, strlen(buf));
+ }
+ if (wrote == 0) {
+ sleep(1);
+ sched_yield();
+ }
+ }
+
+ /* NOTREACHED */
+ return (NULL);
+}
+
+/*
+ * word --
+ * Build the dictionary word list.
+ */
+void
+word()
+{
+ FILE *fp;
+ int cnt;
+ char buf[256];
+
+ if ((fp = fopen(WORDLIST, "r")) == NULL)
+ fatal(WORDLIST, errno, 1);
+
+ if ((list = malloc(nlist * sizeof(char *))) == NULL)
+ fatal(NULL, errno, 1);
+
+ for (cnt = 0; cnt < nlist; ++cnt) {
+ if (fgets(buf, sizeof(buf), fp) == NULL)
+ break;
+ if ((list[cnt] = strdup(buf)) == NULL)
+ fatal(NULL, errno, 1);
+ }
+ nlist = cnt; /* In case nlist was larger than possible. */
+}
+
+/*
+ * fatal --
+ * Report a fatal error and quit.
+ */
+void
+fatal(msg, err, syserr)
+ char *msg;
+ int err, syserr;
+{
+ fprintf(stderr, "%s: ", progname);
+ if (msg != NULL) {
+ fprintf(stderr, "%s", msg);
+ if (syserr)
+ fprintf(stderr, ": ");
+ }
+ if (syserr)
+ fprintf(stderr, "%s", strerror(err));
+ fprintf(stderr, "\n");
+ exit (1);
+
+ /* NOTREACHED */
+}
+
+/*
+ * usage --
+ * Usage message.
+ */
+void
+usage()
+{
+ (void)fprintf(stderr,
+ "usage: %s [-pv] [-h home] [-n words] [-r readers] [-w writers]\n",
+ progname);
+ exit(1);
+}
diff --git a/bdb/examples_c/ex_tpcb.c b/bdb/examples_c/ex_tpcb.c
new file mode 100644
index 00000000000..2fd11510af7
--- /dev/null
+++ b/bdb/examples_c/ex_tpcb.c
@@ -0,0 +1,811 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: ex_tpcb.c,v 11.21 2000/10/27 20:32:00 dda Exp $
+ */
+
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#ifdef DB_WIN32
+#include <sys/types.h>
+#include <sys/timeb.h>
+#endif
+
+#include <db.h>
+
+typedef enum { ACCOUNT, BRANCH, TELLER } FTYPE;
+
+DB_ENV *db_init __P((char *, char *, int, int, int));
+int hpopulate __P((DB *, int, int, int, int));
+int populate __P((DB *, u_int32_t, u_int32_t, int, char *));
+u_int32_t random_id __P((FTYPE, int, int, int));
+u_int32_t random_int __P((u_int32_t, u_int32_t));
+int tp_populate __P((DB_ENV *, int, int, int, int, int));
+int tp_run __P((DB_ENV *, int, int, int, int, int));
+int tp_txn __P((DB_ENV *, DB *, DB *, DB *, DB *, int, int, int, int));
+
+#ifdef HAVE_VXWORKS
+#define ERROR_RETURN ERROR
+#define HOME "/vxtmp/vxtmp/TESTDIR"
+#define VXSHM_KEY 13
+int ex_tpcb_init __P(());
+int ex_tpcb __P(());
+#else
+#define ERROR_RETURN 1
+void invarg __P((char *, int, char *));
+int main __P((int, char *[]));
+void usage __P((char *));
+#endif
+
+/*
+ * This program implements a basic TPC/B driver program. To create the
+ * TPC/B database, run with the -i (init) flag. The number of records
+ * with which to populate the account, history, branch, and teller tables
+ * is specified by the a, s, b, and t flags respectively. To run a TPC/B
+ * test, use the n flag to indicate a number of transactions to run (note
+ * that you can run many of these processes in parallel to simulate a
+ * multiuser test run).
+ */
+#define TELLERS_PER_BRANCH 10
+#define ACCOUNTS_PER_TELLER 10000
+#define HISTORY_PER_BRANCH 2592000
+
+/*
+ * The default configuration that adheres to TPCB scaling rules requires
+ * nearly 3 GB of space. To avoid requiring that much space for testing,
+ * we set the parameters much lower. If you want to run a valid 10 TPS
+ * configuration, define VALID_SCALING.
+ */
+#ifdef VALID_SCALING
+#define ACCOUNTS 1000000
+#define BRANCHES 10
+#define TELLERS 100
+#define HISTORY 25920000
+#endif
+
+#ifdef TINY
+#define ACCOUNTS 1000
+#define BRANCHES 10
+#define TELLERS 100
+#define HISTORY 10000
+#endif
+
+#ifdef VERY_TINY
+#define ACCOUNTS 500
+#define BRANCHES 10
+#define TELLERS 50
+#define HISTORY 5000
+#endif
+
+#if !defined(VALID_SCALING) && !defined(TINY) && !defined(VERY_TINY)
+#define ACCOUNTS 100000
+#define BRANCHES 10
+#define TELLERS 100
+#define HISTORY 259200
+#endif
+
+#define HISTORY_LEN 100
+#define RECLEN 100
+#define BEGID 1000000
+
+typedef struct _defrec {
+ u_int32_t id;
+ u_int32_t balance;
+ u_int8_t pad[RECLEN - sizeof(u_int32_t) - sizeof(u_int32_t)];
+} defrec;
+
+typedef struct _histrec {
+ u_int32_t aid;
+ u_int32_t bid;
+ u_int32_t tid;
+ u_int32_t amount;
+ u_int8_t pad[RECLEN - 4 * sizeof(u_int32_t)];
+} histrec;
+
+#ifdef HAVE_VXWORKS
+int
+ex_tpcb_init()
+{
+ DB_ENV *dbenv;
+ int accounts, branches, ret, seed, t_ret, tellers, history, verbose;
+ char *home;
+ char *progname = "ex_tpcb_init"; /* Program name. */
+
+ verbose = 1;
+ if ((dbenv = db_init(HOME, progname, 0, 1, 0)) == NULL)
+ return (ERROR_RETURN);
+
+ accounts = ACCOUNTS;
+ branches = BRANCHES;
+ tellers = TELLERS;
+ history = HISTORY;
+
+ if ((ret = tp_populate(dbenv, accounts, branches, history, tellers,
+ verbose)) != OK)
+ fprintf(stderr, "%s: %s\n", progname, db_strerror(ret));
+ if ((t_ret = dbenv->close(dbenv, 0)) != 0) {
+ fprintf(stderr, "%s: %s\n", progname, db_strerror(ret));
+ return (ERROR_RETURN);
+ }
+
+ return (ret == 0 ? t_ret : ret);
+}
+
+int
+ex_tpcb()
+{
+ DB_ENV *dbenv;
+ int accounts, branches, seed, tellers, history;
+ int ch, mpool, ntxns, ret, t_ret, txn_no_sync, verbose;
+ char *progname = "ex_tpcb"; /* Program name. */
+
+ accounts = ACCOUNTS;
+ branches = BRANCHES;
+ tellers = TELLERS;
+ history = HISTORY;
+
+ txn_no_sync = 0;
+ mpool = 0;
+ ntxns = 20;
+ verbose = 1;
+ seed = (int)((u_int)getpid() | time(NULL));
+
+ srand((u_int)seed);
+
+ /* Initialize the database environment. */
+ if ((dbenv = db_init(HOME, progname, mpool, 0,
+ txn_no_sync ? DB_TXN_NOSYNC : 0)) == NULL)
+ return (ERROR_RETURN);
+
+ if (verbose)
+ printf("%ld Accounts, %ld Branches, %ld Tellers, %ld History\n",
+ (long)accounts, (long)branches,
+ (long)tellers, (long)history);
+
+ if ((ret = tp_run(dbenv, ntxns, accounts, branches, tellers, verbose))
+ != OK)
+ fprintf(stderr, "tp_run failed\n");
+
+ if ((t_ret = dbenv->close(dbenv, 0)) != 0) {
+ fprintf(stderr, "%s: %s\n", progname, db_strerror(ret));
+ return (ERROR_RETURN);
+ }
+ return (ret == 0 ? t_ret : ret);
+}
+#else
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ DB_ENV *dbenv;
+ int accounts, branches, seed, tellers, history;
+ int ch, iflag, mpool, ntxns, ret, txn_no_sync, verbose;
+ char *home, *progname;
+
+ home = "TESTDIR";
+ progname = "ex_tpcb";
+ accounts = branches = history = tellers = 0;
+ txn_no_sync = 0;
+ mpool = ntxns = 0;
+ verbose = 0;
+ iflag = 0;
+ seed = (int)((u_int)getpid() | time(NULL));
+ while ((ch = getopt(argc, argv, "a:b:c:fh:in:S:s:t:v")) != EOF)
+ switch (ch) {
+ case 'a': /* Number of account records */
+ if ((accounts = atoi(optarg)) <= 0)
+ invarg(progname, ch, optarg);
+ break;
+ case 'b': /* Number of branch records */
+ if ((branches = atoi(optarg)) <= 0)
+ invarg(progname, ch, optarg);
+ break;
+ case 'c': /* Cachesize in bytes */
+ if ((mpool = atoi(optarg)) <= 0)
+ invarg(progname, ch, optarg);
+ break;
+ case 'f': /* Fast mode: no txn sync. */
+ txn_no_sync = 1;
+ break;
+ case 'h': /* DB home. */
+ home = optarg;
+ break;
+ case 'i': /* Initialize the test. */
+ iflag = 1;
+ break;
+ case 'n': /* Number of transactions */
+ if ((ntxns = atoi(optarg)) <= 0)
+ invarg(progname, ch, optarg);
+ break;
+ case 'S': /* Random number seed. */
+ if ((seed = atoi(optarg)) <= 0)
+ invarg(progname, ch, optarg);
+ break;
+ case 's': /* Number of history records */
+ if ((history = atoi(optarg)) <= 0)
+ invarg(progname, ch, optarg);
+ break;
+ case 't': /* Number of teller records */
+ if ((tellers = atoi(optarg)) <= 0)
+ invarg(progname, ch, optarg);
+ break;
+ case 'v': /* Verbose option. */
+ verbose = 1;
+ break;
+ case '?':
+ default:
+ usage(progname);
+ }
+ argc -= optind;
+ argv += optind;
+
+ srand((u_int)seed);
+
+ /* Initialize the database environment. */
+ if ((dbenv = db_init(home,
+ progname, mpool, iflag, txn_no_sync ? DB_TXN_NOSYNC : 0)) == NULL)
+ return (1);
+
+ accounts = accounts == 0 ? ACCOUNTS : accounts;
+ branches = branches == 0 ? BRANCHES : branches;
+ tellers = tellers == 0 ? TELLERS : tellers;
+ history = history == 0 ? HISTORY : history;
+
+ if (verbose)
+ printf("%ld Accounts, %ld Branches, %ld Tellers, %ld History\n",
+ (long)accounts, (long)branches,
+ (long)tellers, (long)history);
+
+ if (iflag) {
+ if (ntxns != 0)
+ usage(progname);
+ tp_populate(dbenv,
+ accounts, branches, history, tellers, verbose);
+ } else {
+ if (ntxns == 0)
+ usage(progname);
+ tp_run(dbenv, ntxns, accounts, branches, tellers, verbose);
+ }
+
+ if ((ret = dbenv->close(dbenv, 0)) != 0) {
+ fprintf(stderr, "%s: dbenv->close failed: %s\n",
+ progname, db_strerror(ret));
+ return (1);
+ }
+
+ return (0);
+}
+
+void
+invarg(progname, arg, str)
+ char *progname;
+ int arg;
+ char *str;
+{
+ (void)fprintf(stderr,
+ "%s: invalid argument for -%c: %s\n", progname, arg, str);
+ exit (1);
+}
+
+void
+usage(progname)
+ char *progname;
+{
+ char *a1, *a2;
+
+ a1 = "[-fv] [-a accounts] [-b branches]\n";
+ a2 = "\t[-c cache_size] [-h home] [-S seed] [-s history] [-t tellers]";
+ (void)fprintf(stderr, "usage: %s -i %s %s\n", progname, a1, a2);
+ (void)fprintf(stderr,
+ " %s -n transactions %s %s\n", progname, a1, a2);
+ exit(1);
+}
+#endif
+
+/*
+ * db_init --
+ * Initialize the environment.
+ */
+DB_ENV *
+db_init(home, prefix, cachesize, initializing, flags)
+ char *home, *prefix;
+ int cachesize, initializing, flags;
+{
+ DB_ENV *dbenv;
+ u_int32_t local_flags;
+ int ret;
+
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_env_create");
+ return (NULL);
+ }
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, prefix);
+#ifdef HAVE_VXWORKS
+ if ((ret = dbenv->set_shm_key(dbenv, VXSHM_KEY)) != 0) {
+ dbenv->err(dbenv, ret, "set_shm_key");
+ return (NULL);
+ }
+#endif
+ (void)dbenv->set_cachesize(dbenv, 0,
+ cachesize == 0 ? 4 * 1024 * 1024 : (u_int32_t)cachesize, 0);
+
+ local_flags = flags | DB_CREATE | (initializing ? DB_INIT_MPOOL :
+ DB_INIT_TXN | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL);
+ if ((ret = dbenv->open(dbenv, home, local_flags, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DBENV->open: %s", home);
+ (void)dbenv->close(dbenv, 0);
+ return (NULL);
+ }
+ return (dbenv);
+}
+
+/*
+ * Initialize the database to the specified number of accounts, branches,
+ * history records, and tellers.
+ */
+int
+tp_populate(env, accounts, branches, history, tellers, verbose)
+ DB_ENV *env;
+ int accounts, branches, history, tellers, verbose;
+{
+ DB *dbp;
+ char dbname[100];
+ u_int32_t balance, idnum, oflags;
+ u_int32_t end_anum, end_bnum, end_tnum;
+ u_int32_t start_anum, start_bnum, start_tnum;
+ int ret;
+
+ idnum = BEGID;
+ balance = 500000;
+#ifdef HAVE_VXWORKS
+ oflags = DB_CREATE;
+#else
+ oflags = DB_CREATE | DB_TRUNCATE;
+#endif
+
+ if ((ret = db_create(&dbp, env, 0)) != 0) {
+ env->err(env, ret, "db_create");
+ return (ERROR_RETURN);
+ }
+ (void)dbp->set_h_nelem(dbp, (u_int32_t)accounts);
+
+ snprintf(dbname, sizeof(dbname), "account");
+ if ((ret = dbp->open(dbp, dbname, NULL,
+ DB_HASH, oflags, 0644)) != 0) {
+ env->err(env, ret, "DB->open: account");
+ return (ERROR_RETURN);
+ }
+
+ start_anum = idnum;
+ populate(dbp, idnum, balance, accounts, "account");
+ idnum += accounts;
+ end_anum = idnum - 1;
+ if ((ret = dbp->close(dbp, 0)) != 0) {
+ env->err(env, ret, "DB->close: account");
+ return (ERROR_RETURN);
+ }
+ if (verbose)
+ printf("Populated accounts: %ld - %ld\n",
+ (long)start_anum, (long)end_anum);
+
+ /*
+ * Since the number of branches is very small, we want to use very
+ * small pages and only 1 key per page, i.e., key-locking instead
+ * of page locking.
+ */
+ if ((ret = db_create(&dbp, env, 0)) != 0) {
+ env->err(env, ret, "db_create");
+ return (ERROR_RETURN);
+ }
+ (void)dbp->set_h_ffactor(dbp, 1);
+ (void)dbp->set_h_nelem(dbp, (u_int32_t)branches);
+ (void)dbp->set_pagesize(dbp, 512);
+ snprintf(dbname, sizeof(dbname), "branch");
+ if ((ret = dbp->open(dbp, dbname, NULL,
+ DB_HASH, oflags, 0644)) != 0) {
+ env->err(env, ret, "DB->open: branch");
+ return (ERROR_RETURN);
+ }
+ start_bnum = idnum;
+ populate(dbp, idnum, balance, branches, "branch");
+ idnum += branches;
+ end_bnum = idnum - 1;
+ if ((ret = dbp->close(dbp, 0)) != 0) {
+ env->err(env, ret, "DB->close: branch");
+ return (ERROR_RETURN);
+ }
+ if (verbose)
+ printf("Populated branches: %ld - %ld\n",
+ (long)start_bnum, (long)end_bnum);
+
+ /*
+ * In the case of tellers, we also want small pages, but we'll let
+ * the fill factor dynamically adjust itself.
+ */
+ if ((ret = db_create(&dbp, env, 0)) != 0) {
+ env->err(env, ret, "db_create");
+ return (ERROR_RETURN);
+ }
+ (void)dbp->set_h_ffactor(dbp, 0);
+ (void)dbp->set_h_nelem(dbp, (u_int32_t)tellers);
+ (void)dbp->set_pagesize(dbp, 512);
+ snprintf(dbname, sizeof(dbname), "teller");
+ if ((ret = dbp->open(dbp, dbname, NULL,
+ DB_HASH, oflags, 0644)) != 0) {
+ env->err(env, ret, "DB->open: teller");
+ return (ERROR_RETURN);
+ }
+
+ start_tnum = idnum;
+ populate(dbp, idnum, balance, tellers, "teller");
+ idnum += tellers;
+ end_tnum = idnum - 1;
+ if ((ret = dbp->close(dbp, 0)) != 0) {
+ env->err(env, ret, "DB->close: teller");
+ return (ERROR_RETURN);
+ }
+ if (verbose)
+ printf("Populated tellers: %ld - %ld\n",
+ (long)start_tnum, (long)end_tnum);
+
+ if ((ret = db_create(&dbp, env, 0)) != 0) {
+ env->err(env, ret, "db_create");
+ return (ERROR_RETURN);
+ }
+ (void)dbp->set_re_len(dbp, HISTORY_LEN);
+ snprintf(dbname, sizeof(dbname), "history");
+ if ((ret = dbp->open(dbp, dbname, NULL,
+ DB_RECNO, oflags, 0644)) != 0) {
+ env->err(env, ret, "DB->open: history");
+ return (ERROR_RETURN);
+ }
+
+ hpopulate(dbp, history, accounts, branches, tellers);
+ if ((ret = dbp->close(dbp, 0)) != 0) {
+ env->err(env, ret, "DB->close: history");
+ return (ERROR_RETURN);
+ }
+ return (0);
+}
+
+int
+populate(dbp, start_id, balance, nrecs, msg)
+ DB *dbp;
+ u_int32_t start_id, balance;
+ int nrecs;
+ char *msg;
+{
+ DBT kdbt, ddbt;
+ defrec drec;
+ int i, ret;
+
+ kdbt.flags = 0;
+ kdbt.data = &drec.id;
+ kdbt.size = sizeof(u_int32_t);
+ ddbt.flags = 0;
+ ddbt.data = &drec;
+ ddbt.size = sizeof(drec);
+ memset(&drec.pad[0], 1, sizeof(drec.pad));
+
+ for (i = 0; i < nrecs; i++) {
+ drec.id = start_id + (u_int32_t)i;
+ drec.balance = balance;
+ if ((ret =
+ (dbp->put)(dbp, NULL, &kdbt, &ddbt, DB_NOOVERWRITE)) != 0) {
+ dbp->err(dbp,
+ ret, "Failure initializing %s file\n", msg);
+ return (ERROR_RETURN);
+ }
+ }
+ return (0);
+}
+
+int
+hpopulate(dbp, history, accounts, branches, tellers)
+ DB *dbp;
+ int history, accounts, branches, tellers;
+{
+ DBT kdbt, ddbt;
+ histrec hrec;
+ db_recno_t key;
+ int i, ret;
+
+ memset(&kdbt, 0, sizeof(kdbt));
+ memset(&ddbt, 0, sizeof(ddbt));
+ ddbt.data = &hrec;
+ ddbt.size = sizeof(hrec);
+ kdbt.data = &key;
+ kdbt.size = sizeof(key);
+ memset(&hrec.pad[0], 1, sizeof(hrec.pad));
+ hrec.amount = 10;
+
+ for (i = 1; i <= history; i++) {
+ hrec.aid = random_id(ACCOUNT, accounts, branches, tellers);
+ hrec.bid = random_id(BRANCH, accounts, branches, tellers);
+ hrec.tid = random_id(TELLER, accounts, branches, tellers);
+ if ((ret = dbp->put(dbp, NULL, &kdbt, &ddbt, DB_APPEND)) != 0) {
+ dbp->err(dbp, ret, "dbp->put");
+ return (ERROR_RETURN);
+ }
+ }
+ return (0);
+}
+
+u_int32_t
+random_int(lo, hi)
+ u_int32_t lo, hi;
+{
+ u_int32_t ret;
+ int t;
+
+#ifndef RAND_MAX
+#define RAND_MAX 0x7fffffff
+#endif
+ t = rand();
+ ret = (u_int32_t)(((double)t / ((double)(RAND_MAX) + 1)) *
+ (hi - lo + 1));
+ ret += lo;
+ return (ret);
+}
+
+u_int32_t
+random_id(type, accounts, branches, tellers)
+ FTYPE type;
+ int accounts, branches, tellers;
+{
+ u_int32_t min, max, num;
+
+ max = min = BEGID;
+ num = accounts;
+ switch(type) {
+ case TELLER:
+ min += branches;
+ num = tellers;
+ /* FALLTHROUGH */
+ case BRANCH:
+ if (type == BRANCH)
+ num = branches;
+ min += accounts;
+ /* FALLTHROUGH */
+ case ACCOUNT:
+ max = min + num - 1;
+ }
+ return (random_int(min, max));
+}
+
+int
+tp_run(dbenv, n, accounts, branches, tellers, verbose)
+ DB_ENV *dbenv;
+ int n, accounts, branches, tellers, verbose;
+{
+ DB *adb, *bdb, *hdb, *tdb;
+ char dbname[100];
+ double gtps, itps;
+ int failed, ifailed, ret, txns;
+ time_t starttime, curtime, lasttime;
+#ifndef DB_WIN32
+ pid_t pid;
+
+ pid = getpid();
+#else
+ int pid;
+
+ pid = 0;
+#endif
+
+ /*
+ * Open the database files.
+ */
+ if ((ret = db_create(&adb, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ return (ERROR_RETURN);
+ }
+ snprintf(dbname, sizeof(dbname), "account");
+ if ((ret = adb->open(adb, dbname, NULL, DB_UNKNOWN, 0, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->open: account");
+ return (ERROR_RETURN);
+ }
+
+ if ((ret = db_create(&bdb, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ return (ERROR_RETURN);
+ }
+ snprintf(dbname, sizeof(dbname), "branch");
+ if ((ret = bdb->open(bdb, dbname, NULL, DB_UNKNOWN, 0, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->open: branch");
+ return (ERROR_RETURN);
+ }
+
+ if ((ret = db_create(&tdb, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ return (ERROR_RETURN);
+ }
+ snprintf(dbname, sizeof(dbname), "teller");
+ if ((ret = tdb->open(tdb, dbname, NULL, DB_UNKNOWN, 0, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->open: teller");
+ return (ERROR_RETURN);
+ }
+
+ if ((ret = db_create(&hdb, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ return (ERROR_RETURN);
+ }
+ snprintf(dbname, sizeof(dbname), "history");
+ if ((ret = hdb->open(hdb, dbname, NULL, DB_UNKNOWN, 0, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->open: history");
+ return (ERROR_RETURN);
+ }
+
+ txns = failed = ifailed = 0;
+ starttime = time(NULL);
+ lasttime = starttime;
+ while (n-- > 0) {
+ txns++;
+ ret = tp_txn(dbenv, adb, bdb, tdb, hdb,
+ accounts, branches, tellers, verbose);
+ if (ret != 0) {
+ failed++;
+ ifailed++;
+ }
+ if (n % 5000 == 0) {
+ curtime = time(NULL);
+ gtps = (double)(txns - failed) / (curtime - starttime);
+ itps = (double)(5000 - ifailed) / (curtime - lasttime);
+ printf("[%d] %d txns %d failed ", (int)pid,
+ txns, failed);
+ printf("%6.2f TPS (gross) %6.2f TPS (interval)\n",
+ gtps, itps);
+ lasttime = curtime;
+ ifailed = 0;
+ }
+ }
+
+ (void)adb->close(adb, 0);
+ (void)bdb->close(bdb, 0);
+ (void)tdb->close(tdb, 0);
+ (void)hdb->close(hdb, 0);
+
+ printf("%ld transactions begun %ld failed\n", (long)txns, (long)failed);
+ return (0);
+}
+
+/*
+ * XXX Figure out the appropriate way to pick out IDs.
+ */
+int
+tp_txn(dbenv, adb, bdb, tdb, hdb, accounts, branches, tellers, verbose)
+ DB_ENV *dbenv;
+ DB *adb, *bdb, *tdb, *hdb;
+ int accounts, branches, tellers, verbose;
+{
+ DBC *acurs, *bcurs, *tcurs;
+ DBT d_dbt, d_histdbt, k_dbt, k_histdbt;
+ DB_TXN *t;
+ db_recno_t key;
+ defrec rec;
+ histrec hrec;
+ int account, branch, teller;
+
+ t = NULL;
+ acurs = bcurs = tcurs = NULL;
+
+ /*
+ * XXX We could move a lot of this into the driver to make this
+ * faster.
+ */
+ account = random_id(ACCOUNT, accounts, branches, tellers);
+ branch = random_id(BRANCH, accounts, branches, tellers);
+ teller = random_id(TELLER, accounts, branches, tellers);
+
+ memset(&d_histdbt, 0, sizeof(d_histdbt));
+
+ memset(&k_histdbt, 0, sizeof(k_histdbt));
+ k_histdbt.data = &key;
+ k_histdbt.size = sizeof(key);
+
+ memset(&k_dbt, 0, sizeof(k_dbt));
+ k_dbt.size = sizeof(int);
+
+ memset(&d_dbt, 0, sizeof(d_dbt));
+ d_dbt.flags = DB_DBT_USERMEM;
+ d_dbt.data = &rec;
+ d_dbt.ulen = sizeof(rec);
+
+ hrec.aid = account;
+ hrec.bid = branch;
+ hrec.tid = teller;
+ hrec.amount = 10;
+ /* Request 0 bytes since we're just positioning. */
+ d_histdbt.flags = DB_DBT_PARTIAL;
+
+ /* START TIMING */
+ if (txn_begin(dbenv, NULL, &t, 0) != 0)
+ goto err;
+
+ if (adb->cursor(adb, t, &acurs, 0) != 0 ||
+ bdb->cursor(bdb, t, &bcurs, 0) != 0 ||
+ tdb->cursor(tdb, t, &tcurs, 0) != 0)
+ goto err;
+
+ /* Account record */
+ k_dbt.data = &account;
+ if (acurs->c_get(acurs, &k_dbt, &d_dbt, DB_SET) != 0)
+ goto err;
+ rec.balance += 10;
+ if (acurs->c_put(acurs, &k_dbt, &d_dbt, DB_CURRENT) != 0)
+ goto err;
+
+ /* Branch record */
+ k_dbt.data = &branch;
+ if (bcurs->c_get(bcurs, &k_dbt, &d_dbt, DB_SET) != 0)
+ goto err;
+ rec.balance += 10;
+ if (bcurs->c_put(bcurs, &k_dbt, &d_dbt, DB_CURRENT) != 0)
+ goto err;
+
+ /* Teller record */
+ k_dbt.data = &teller;
+ if (tcurs->c_get(tcurs, &k_dbt, &d_dbt, DB_SET) != 0)
+ goto err;
+ rec.balance += 10;
+ if (tcurs->c_put(tcurs, &k_dbt, &d_dbt, DB_CURRENT) != 0)
+ goto err;
+
+ /* History record */
+ d_histdbt.flags = 0;
+ d_histdbt.data = &hrec;
+ d_histdbt.ulen = sizeof(hrec);
+ if (hdb->put(hdb, t, &k_histdbt, &d_histdbt, DB_APPEND) != 0)
+ goto err;
+
+ if (acurs->c_close(acurs) != 0 || bcurs->c_close(bcurs) != 0 ||
+ tcurs->c_close(tcurs) != 0)
+ goto err;
+
+ if (txn_commit(t, 0) != 0)
+ goto err;
+
+ /* END TIMING */
+ return (0);
+
+err: if (acurs != NULL)
+ (void)acurs->c_close(acurs);
+ if (bcurs != NULL)
+ (void)bcurs->c_close(bcurs);
+ if (tcurs != NULL)
+ (void)tcurs->c_close(tcurs);
+ if (t != NULL)
+ (void)txn_abort(t);
+
+ if (verbose)
+ printf("Transaction A=%ld B=%ld T=%ld failed\n",
+ (long)account, (long)branch, (long)teller);
+ return (-1);
+}
diff --git a/bdb/examples_c/ex_tpcb.h b/bdb/examples_c/ex_tpcb.h
new file mode 100644
index 00000000000..ef90bc53234
--- /dev/null
+++ b/bdb/examples_c/ex_tpcb.h
@@ -0,0 +1,39 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: ex_tpcb.h,v 11.4 2000/05/17 19:21:02 bostic Exp $
+ */
+
+#ifndef _TPCB_H_
+#define _TPCB_H_
+
+typedef enum { ACCOUNT, BRANCH, TELLER } FTYPE;
+
+#define TELLERS_PER_BRANCH 100
+#define ACCOUNTS_PER_TELLER 1000
+
+#define ACCOUNTS 1000000
+#define BRANCHES 10
+#define TELLERS 1000
+#define HISTORY 1000000
+#define HISTORY_LEN 100
+#define RECLEN 100
+#define BEGID 1000000
+
+typedef struct _defrec {
+ u_int32_t id;
+ u_int32_t balance;
+ u_int8_t pad[RECLEN - sizeof(u_int32_t) - sizeof(u_int32_t)];
+} defrec;
+
+typedef struct _histrec {
+ u_int32_t aid;
+ u_int32_t bid;
+ u_int32_t tid;
+ u_int32_t amount;
+ u_int8_t pad[RECLEN - 4 * sizeof(u_int32_t)];
+} histrec;
+#endif /* _TPCB_H_ */
diff --git a/bdb/examples_cxx/AccessExample.cpp b/bdb/examples_cxx/AccessExample.cpp
new file mode 100644
index 00000000000..ae885aa8388
--- /dev/null
+++ b/bdb/examples_cxx/AccessExample.cpp
@@ -0,0 +1,151 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: AccessExample.cpp,v 11.7 2000/12/06 18:58:23 bostic Exp $
+ */
+
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <iostream.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#ifndef _MSC_VER
+#include <unistd.h>
+#endif
+#endif
+
+#include <iomanip.h>
+#include <db_cxx.h>
+
+class AccessExample
+{
+public:
+ AccessExample();
+ void run();
+
+private:
+ static const char FileName[];
+
+ // no need for copy and assignment
+ AccessExample(const AccessExample &);
+ void operator = (const AccessExample &);
+};
+
+static void usage(); // forward
+
+int main(int argc, char *argv[])
+{
+ if (argc > 1) {
+ usage();
+ }
+
+ // Use a try block just to report any errors.
+ // An alternate approach to using exceptions is to
+ // use error models (see DbEnv::set_error_model()) so
+ // that error codes are returned for all Berkeley DB methods.
+ //
+ try {
+ AccessExample app;
+ app.run();
+ return 0;
+ }
+ catch (DbException &dbe) {
+ cerr << "AccessExample: " << dbe.what() << "\n";
+ return 1;
+ }
+}
+
+static void usage()
+{
+ cerr << "usage: AccessExample\n";
+ exit(1);
+}
+
+const char AccessExample::FileName[] = "access.db";
+
+AccessExample::AccessExample()
+{
+}
+
+void AccessExample::run()
+{
+ // Remove the previous database.
+ (void)unlink(FileName);
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ Db db(0, 0);
+
+ db.set_error_stream(&cerr);
+ db.set_errpfx("AccessExample");
+ db.set_pagesize(1024); /* Page size: 1K. */
+ db.set_cachesize(0, 32 * 1024, 0);
+ db.open(FileName, NULL, DB_BTREE, DB_CREATE, 0664);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ char buf[1024];
+ char rbuf[1024];
+ char *t;
+ char *p;
+ int ret;
+ int len;
+
+ for (;;) {
+ cout << "input> ";
+ cout.flush();
+
+ cin.getline(buf, sizeof(buf));
+ if (cin.eof())
+ break;
+
+ if ((len = strlen(buf)) <= 0)
+ continue;
+ for (t = rbuf, p = buf + (len - 1); p >= buf;)
+ *t++ = *p--;
+ *t++ = '\0';
+
+ Dbt key(buf, len + 1);
+ Dbt data(rbuf, len + 1);
+
+ ret = db.put(0, &key, &data, DB_NOOVERWRITE);
+ if (ret == DB_KEYEXIST) {
+ cout << "Key " << buf << " already exists.\n";
+ }
+ }
+ cout << "\n";
+
+ // We put a try block around this section of code
+ // to ensure that our database is properly closed
+ // in the event of an error.
+ //
+ try {
+ // Acquire a cursor for the table.
+ Dbc *dbcp;
+ db.cursor(NULL, &dbcp, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ Dbt key;
+ Dbt data;
+ while (dbcp->get(&key, &data, DB_NEXT) == 0) {
+ char *key_string = (char *)key.get_data();
+ char *data_string = (char *)data.get_data();
+ cout << key_string << " : " << data_string << "\n";
+ }
+ dbcp->close();
+ }
+ catch (DbException &dbe) {
+ cerr << "AccessExample: " << dbe.what() << "\n";
+ }
+
+ db.close(0);
+}
diff --git a/bdb/examples_cxx/BtRecExample.cpp b/bdb/examples_cxx/BtRecExample.cpp
new file mode 100644
index 00000000000..98d9626b969
--- /dev/null
+++ b/bdb/examples_cxx/BtRecExample.cpp
@@ -0,0 +1,247 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: BtRecExample.cpp,v 11.6 2000/02/19 20:57:59 bostic Exp $
+ */
+
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <errno.h>
+#include <iostream.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include <iomanip.h>
+#include <db_cxx.h>
+
+#define DATABASE "access.db"
+#define WORDLIST "../test/wordlist"
+
+void usage();
+extern "C" int getopt(int, char * const *, const char *);
+
+char *progname = "BtRecExample"; // Program name.
+
+class BtRecExample
+{
+public:
+ BtRecExample(FILE *fp);
+ ~BtRecExample();
+ void run();
+ void stats();
+ void show(char *msg, Dbt *key, Dbt *data);
+
+private:
+ Db *dbp;
+ Dbc *dbcp;
+};
+
+BtRecExample::BtRecExample(FILE *fp)
+{
+ char *p, *t, buf[1024], rbuf[1024];
+ int ret;
+
+ // Remove the previous database.
+ (void)unlink(DATABASE);
+
+ dbp = new Db(NULL, 0);
+
+ dbp->set_error_stream(&cerr);
+ dbp->set_errpfx(progname);
+ dbp->set_pagesize(1024); // 1K page sizes.
+
+ dbp->set_flags(DB_RECNUM); // Record numbers.
+ dbp->open(DATABASE, NULL, DB_BTREE, DB_CREATE, 0664);
+
+ //
+ // Insert records into the database, where the key is the word
+ // preceded by its record number, and the data is the same, but
+ // in reverse order.
+ //
+
+ for (int cnt = 1; cnt <= 1000; ++cnt) {
+ (void)sprintf(buf, "%04d_", cnt);
+ if (fgets(buf + 4, sizeof(buf) - 4, fp) == NULL)
+ break;
+ u_int32_t len = strlen(buf);
+ buf[len - 1] = '\0';
+ for (t = rbuf, p = buf + (len - 2); p >= buf;)
+ *t++ = *p--;
+ *t++ = '\0';
+
+ // As a convenience for printing, we include the null terminator
+ // in the stored data.
+ //
+ Dbt key(buf, len);
+ Dbt data(rbuf, len);
+
+ if ((ret = dbp->put(NULL, &key, &data, DB_NOOVERWRITE)) != 0) {
+ dbp->err(ret, "Db::put");
+ if (ret != DB_KEYEXIST)
+ throw DbException(ret);
+ }
+ }
+}
+
+BtRecExample::~BtRecExample()
+{
+ if (dbcp != 0)
+ dbcp->close();
+ dbp->close(0);
+ delete dbp;
+}
+
+//
+// Print out the number of records in the database.
+//
+void BtRecExample::stats()
+{
+ DB_BTREE_STAT *statp;
+
+ dbp->stat(&statp, NULL, 0);
+ cout << progname << ": database contains "
+ << (u_long)statp->bt_ndata << " records\n";
+
+ // Note: must use free, not delete.
+ // This struct is allocated by C.
+ //
+ free(statp);
+}
+
+void BtRecExample::run()
+{
+ db_recno_t recno;
+ int ret;
+ char buf[1024];
+
+ // Acquire a cursor for the database.
+ dbp->cursor(NULL, &dbcp, 0);
+
+ //
+ // Prompt the user for a record number, then retrieve and display
+ // that record.
+ //
+ for (;;) {
+ // Get a record number.
+ cout << "recno #> ";
+ cout.flush();
+ if (fgets(buf, sizeof(buf), stdin) == NULL)
+ break;
+ recno = atoi(buf);
+
+ //
+ // Start with a fresh key each time,
+ // the dbp->get() routine returns
+ // the key and data pair, not just the key!
+ //
+ Dbt key(&recno, sizeof(recno));
+ Dbt data;
+
+ if ((ret = dbcp->get(&key, &data, DB_SET_RECNO)) != 0) {
+ dbp->err(ret, "DBcursor->get");
+ throw DbException(ret);
+ }
+
+ // Display the key and data.
+ show("k/d\t", &key, &data);
+
+ // Move the cursor a record forward.
+ if ((ret = dbcp->get(&key, &data, DB_NEXT)) != 0) {
+ dbp->err(ret, "DBcursor->get");
+ throw DbException(ret);
+ }
+
+ // Display the key and data.
+ show("next\t", &key, &data);
+
+ //
+ // Retrieve the record number for the following record into
+ // local memory.
+ //
+ data.set_data(&recno);
+ data.set_size(sizeof(recno));
+ data.set_ulen(sizeof(recno));
+ data.set_flags(data.get_flags() | DB_DBT_USERMEM);
+
+ if ((ret = dbcp->get(&key, &data, DB_GET_RECNO)) != 0) {
+ if (ret != DB_NOTFOUND && ret != DB_KEYEMPTY) {
+ dbp->err(ret, "DBcursor->get");
+ throw DbException(ret);
+ }
+ }
+ else {
+ cout << "retrieved recno: " << (u_long)recno << "\n";
+ }
+ }
+
+ dbcp->close();
+ dbcp = NULL;
+}
+
+//
+// show --
+// Display a key/data pair.
+//
+void BtRecExample::show(char *msg, Dbt *key, Dbt *data)
+{
+ cout << msg << (char *)key->get_data()
+ << " : " << (char *)data->get_data() << "\n";
+}
+
+int
+main(int argc, char *argv[])
+{
+ extern char *optarg;
+ extern int optind;
+ FILE *fp;
+ int ch;
+
+ while ((ch = getopt(argc, argv, "")) != EOF)
+ switch (ch) {
+ case '?':
+ default:
+ usage();
+ }
+ argc -= optind;
+ argv += optind;
+
+ // Open the word database.
+ if ((fp = fopen(WORDLIST, "r")) == NULL) {
+ fprintf(stderr, "%s: open %s: %s\n",
+ progname, WORDLIST, db_strerror(errno));
+ exit (1);
+ }
+
+ try {
+ BtRecExample app(fp);
+
+ // Close the word database.
+ (void)fclose(fp);
+ fp = NULL;
+
+ app.stats();
+ app.run();
+ }
+ catch (DbException &dbe) {
+ cerr << "Exception: " << dbe.what() << "\n";
+ return dbe.get_errno();
+ }
+
+ return (0);
+}
+
+void
+usage()
+{
+ (void)fprintf(stderr, "usage: %s\n", progname);
+ exit(1);
+}
diff --git a/bdb/examples_cxx/EnvExample.cpp b/bdb/examples_cxx/EnvExample.cpp
new file mode 100644
index 00000000000..bef1f3d1ace
--- /dev/null
+++ b/bdb/examples_cxx/EnvExample.cpp
@@ -0,0 +1,122 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: EnvExample.cpp,v 11.12 2000/10/27 20:32:00 dda Exp $
+ */
+
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <errno.h>
+#include <iostream.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include <db_cxx.h>
+
+#ifdef macintosh
+#define DATABASE_HOME ":database"
+#define CONFIG_DATA_DIR ":database"
+#else
+#ifdef DB_WIN32
+#define DATABASE_HOME "\\tmp\\database"
+#define CONFIG_DATA_DIR "\\database\\files"
+#else
+#define DATABASE_HOME "/tmp/database"
+#define CONFIG_DATA_DIR "/database/files"
+#endif
+#endif
+
+void db_setup(char *, char *, ostream&);
+void db_teardown(char *, char *, ostream&);
+
+char *progname = "EnvExample"; /* Program name. */
+
+//
+// An example of a program creating/configuring a Berkeley DB environment.
+//
+int
+main(int, char **)
+{
+ //
+ // Note: it may be easiest to put all Berkeley DB operations in a
+ // try block, as seen here. Alternatively, you can change the
+ // ErrorModel in the DbEnv so that exceptions are never thrown
+ // and check error returns from all methods.
+ //
+ try {
+ char *data_dir, *home;
+
+ //
+ // All of the shared database files live in /home/database,
+ // but data files live in /database.
+ //
+ home = DATABASE_HOME;
+ data_dir = CONFIG_DATA_DIR;
+
+ cout << "Setup env\n";
+ db_setup(DATABASE_HOME, data_dir, cerr);
+
+ cout << "Teardown env\n";
+ db_teardown(DATABASE_HOME, data_dir, cerr);
+ return 0;
+ }
+ catch (DbException &dbe) {
+ cerr << "AccessExample: " << dbe.what() << "\n";
+ return 1;
+ }
+}
+
+// Note that any of the db calls can throw DbException
+void
+db_setup(char *home, char *data_dir, ostream& err_stream)
+{
+ //
+ // Create an environment object and initialize it for error
+ // reporting.
+ //
+ DbEnv *dbenv = new DbEnv(0);
+ dbenv->set_error_stream(&err_stream);
+ dbenv->set_errpfx(progname);
+
+ //
+ // We want to specify the shared memory buffer pool cachesize,
+ // but everything else is the default.
+ //
+ dbenv->set_cachesize(0, 64 * 1024, 0);
+
+ // Databases are in a subdirectory.
+ (void)dbenv->set_data_dir(data_dir);
+
+ // Open the environment with full transactional support.
+ dbenv->open(DATABASE_HOME,
+ DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN, 0);
+
+ // Do something interesting...
+
+ // Close the handle.
+ dbenv->close(0);
+}
+
+void
+db_teardown(char *home, char *data_dir, ostream& err_stream)
+{
+ // Remove the shared database regions.
+ DbEnv *dbenv = new DbEnv(0);
+
+ dbenv->set_error_stream(&err_stream);
+ dbenv->set_errpfx(progname);
+
+ (void)dbenv->set_data_dir(data_dir);
+ dbenv->remove(home, 0);
+ delete dbenv;
+}
diff --git a/bdb/examples_cxx/LockExample.cpp b/bdb/examples_cxx/LockExample.cpp
new file mode 100644
index 00000000000..cfab2868098
--- /dev/null
+++ b/bdb/examples_cxx/LockExample.cpp
@@ -0,0 +1,236 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: LockExample.cpp,v 11.8 2001/01/04 14:23:30 dda Exp $
+ */
+
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <errno.h>
+#include <iostream.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include <db_cxx.h>
+
+char *progname = "LockExample"; // Program name.
+
+//
+// An example of a program using DBLock and related classes.
+//
+class LockExample : public DbEnv
+{
+public:
+ void run();
+
+ LockExample(const char *home, u_int32_t maxlocks, int do_unlink);
+
+private:
+ static const char FileName[];
+
+ // no need for copy and assignment
+ LockExample(const LockExample &);
+ void operator = (const LockExample &);
+};
+
+static void usage(); // forward
+
+int
+main(int argc, char *argv[])
+{
+ const char *home;
+ int do_unlink;
+ u_int32_t maxlocks;
+ int i;
+
+ home = "TESTDIR";
+ maxlocks = 0;
+ do_unlink = 0;
+ for (int argnum = 1; argnum < argc; ++argnum) {
+ if (strcmp(argv[argnum], "-h") == 0) {
+ if (++argnum >= argc)
+ usage();
+ home = argv[argnum];
+ }
+ else if (strcmp(argv[argnum], "-m") == 0) {
+ if (++argnum >= argc)
+ usage();
+ if ((i = atoi(argv[argnum])) <= 0)
+ usage();
+ maxlocks = (u_int32_t)i; /* XXX: possible overflow. */
+ }
+ else if (strcmp(argv[argnum], "-u") == 0) {
+ do_unlink = 1;
+ }
+ else {
+ usage();
+ }
+ }
+
+ try {
+ if (do_unlink) {
+ // Create an environment that immediately
+ // removes all files.
+ LockExample tmp(home, maxlocks, do_unlink);
+ }
+
+ LockExample app(home, maxlocks, do_unlink);
+ app.run();
+ app.close(0);
+ return 0;
+ }
+ catch (DbException &dbe) {
+ cerr << "LockExample: " << dbe.what() << "\n";
+ return 1;
+ }
+}
+
+LockExample::LockExample(const char *home, u_int32_t maxlocks, int do_unlink)
+: DbEnv(0)
+{
+ int ret;
+
+ if (do_unlink) {
+ if ((ret = remove(home, DB_FORCE)) != 0) {
+ cerr << progname << ": DbEnv::remove: "
+ << strerror(errno) << "\n";
+ exit (1);
+ }
+ }
+ else {
+ set_error_stream(&cerr);
+ set_errpfx("LockExample");
+ if (maxlocks != 0)
+ set_lk_max_locks(maxlocks);
+ open(home, DB_CREATE | DB_INIT_LOCK, 0);
+ }
+}
+
+void LockExample::run()
+{
+ long held;
+ u_int32_t len, locker;
+ int did_get, ret;
+ DbLock *locks = 0;
+ int lockcount = 0;
+ char objbuf[1024];
+ int lockid = 0;
+
+ //
+ // Accept lock requests.
+ //
+ lock_id(&locker);
+ for (held = 0;;) {
+ cout << "Operation get/release [get]> ";
+ cout.flush();
+
+ char opbuf[16];
+ cin.getline(opbuf, sizeof(opbuf));
+ if (cin.eof())
+ break;
+ if ((len = strlen(opbuf)) <= 1 || strcmp(opbuf, "get") == 0) {
+ // Acquire a lock.
+ cout << "input object (text string) to lock> ";
+ cout.flush();
+ cin.getline(objbuf, sizeof(objbuf));
+ if (cin.eof())
+ break;
+ if ((len = strlen(objbuf)) <= 0)
+ continue;
+
+ char lockbuf[16];
+ do {
+ cout << "lock type read/write [read]> ";
+ cout.flush();
+ cin.getline(lockbuf, sizeof(lockbuf));
+ if (cin.eof())
+ break;
+ len = strlen(lockbuf);
+ } while (len >= 1 &&
+ strcmp(lockbuf, "read") != 0 &&
+ strcmp(lockbuf, "write") != 0);
+
+ db_lockmode_t lock_type;
+ if (len <= 1 || strcmp(lockbuf, "read") == 0)
+ lock_type = DB_LOCK_READ;
+ else
+ lock_type = DB_LOCK_WRITE;
+
+ Dbt dbt(objbuf, strlen(objbuf));
+
+ DbLock lock;
+ ret = lock_get(locker, DB_LOCK_NOWAIT, &dbt,
+ lock_type, &lock);
+ did_get = 1;
+ lockid = lockcount++;
+ if (locks == NULL) {
+ locks = new DbLock[1];
+ }
+ else {
+ DbLock *newlocks = new DbLock[lockcount];
+ for (int lockno = 0; lockno < lockid; lockno++) {
+ newlocks[lockno] = locks[lockno];
+ }
+ delete locks;
+ locks = newlocks;
+ }
+ locks[lockid] = lock;
+ } else {
+ // Release a lock.
+ do {
+ cout << "input lock to release> ";
+ cout.flush();
+ cin.getline(objbuf, sizeof(objbuf));
+ if (cin.eof())
+ break;
+ } while ((len = strlen(objbuf)) <= 0);
+ lockid = strtol(objbuf, NULL, 16);
+ if (lockid < 0 || lockid >= lockcount) {
+ cout << "Lock #" << lockid << " out of range\n";
+ continue;
+ }
+ DbLock lock = locks[lockid];
+ ret = lock.put(this);
+ did_get = 0;
+ }
+
+ switch (ret) {
+ case 0:
+ cout << "Lock #" << lockid << " "
+ << (did_get ? "granted" : "released")
+ << "\n";
+ held += did_get ? 1 : -1;
+ break;
+ case DB_LOCK_NOTGRANTED:
+ cout << "Lock not granted\n";
+ break;
+ case DB_LOCK_DEADLOCK:
+ cerr << "LockExample: lock_"
+ << (did_get ? "get" : "put")
+ << ": " << "returned DEADLOCK";
+ break;
+ default:
+ cerr << "LockExample: lock_get: %s",
+ strerror(errno);
+ }
+ }
+ cout << "\n";
+ cout << "Closing lock region " << held << " locks held\n";
+ if (locks != 0)
+ delete locks;
+}
+
+static void
+usage()
+{
+ cerr << "usage: LockExample [-u] [-h home] [-m maxlocks]\n";
+ exit(1);
+}
diff --git a/bdb/examples_cxx/MpoolExample.cpp b/bdb/examples_cxx/MpoolExample.cpp
new file mode 100644
index 00000000000..cf0f5f7e6a4
--- /dev/null
+++ b/bdb/examples_cxx/MpoolExample.cpp
@@ -0,0 +1,210 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: MpoolExample.cpp,v 11.9 2000/10/27 20:32:01 dda Exp $
+ */
+
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <iostream.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+#endif
+
+#include <db_cxx.h>
+
+#define MPOOL "mpool"
+
+void init(char *, int, int);
+void run(DB_ENV *, int, int, int);
+
+static void usage();
+
+char *progname = "MpoolExample"; // Program name.
+
+class MpoolExample : public DbEnv
+{
+public:
+ MpoolExample();
+ void initdb(const char *home, int cachesize);
+ void run(int hits, int pagesize, int npages);
+
+private:
+ static const char FileName[];
+
+ // no need for copy and assignment
+ MpoolExample(const MpoolExample &);
+ void operator = (const MpoolExample &);
+};
+
+int main(int argc, char *argv[])
+{
+ int cachesize = 20 * 1024;
+ int hits = 1000;
+ int npages = 50;
+ int pagesize = 1024;
+
+ for (int i = 1; i < argc; ++i) {
+ if (strcmp(argv[i], "-c") == 0) {
+ if ((cachesize = atoi(argv[++i])) < 20 * 1024)
+ usage();
+ }
+ else if (strcmp(argv[i], "-h") == 0) {
+ if ((hits = atoi(argv[++i])) <= 0)
+ usage();
+ }
+ else if (strcmp(argv[i], "-n") == 0) {
+ if ((npages = atoi(argv[++i])) <= 0)
+ usage();
+ }
+ else if (strcmp(argv[i], "-p") == 0) {
+ if ((pagesize = atoi(argv[++i])) <= 0)
+ usage();
+ }
+ else {
+ usage();
+ }
+ }
+
+ // Initialize the file.
+ init(MPOOL, pagesize, npages);
+
+ try {
+ MpoolExample app;
+
+ cout << progname
+ << ": cachesize: " << cachesize
+ << "; pagesize: " << pagesize
+ << "; N pages: " << npages << "\n";
+
+ app.initdb(NULL, cachesize);
+ app.run(hits, pagesize, npages);
+ cout << "MpoolExample: completed\n";
+ return 0;
+ }
+ catch (DbException &dbe) {
+ cerr << "MpoolExample: " << dbe.what() << "\n";
+ return 1;
+ }
+}
+
+//
+// init --
+// Create a backing file.
+//
+void
+init(char *file, int pagesize, int npages)
+{
+ //
+ // Create a file with the right number of pages, and store a page
+ // number on each page.
+ //
+ int fd;
+ int flags = O_CREAT | O_RDWR | O_TRUNC;
+#ifdef DB_WIN32
+ flags |= O_BINARY;
+#endif
+ if ((fd = open(file, flags, 0666)) < 0) {
+ cerr << "MpoolExample: " << file << ": " << strerror(errno) << "\n";
+ exit(1);
+ }
+ char *p = new char[pagesize];
+ memset(p, 0, pagesize);
+
+ // The pages are numbered from 0.
+ for (int cnt = 0; cnt <= npages; ++cnt) {
+ *(db_pgno_t *)p = cnt;
+ if (write(fd, p, pagesize) != pagesize) {
+ cerr << "MpoolExample: " << file
+ << ": " << strerror(errno) << "\n";
+ exit(1);
+ }
+ }
+ delete [] p;
+}
+
+static void
+usage()
+{
+ cerr << "usage: MpoolExample [-c cachesize] "
+ << "[-h hits] [-n npages] [-p pagesize]\n";
+ exit(1);
+}
+
+// Note: by using DB_CXX_NO_EXCEPTIONS, we get explicit error returns
+// from various methods rather than exceptions so we can report more
+// information with each error.
+//
+MpoolExample::MpoolExample()
+: DbEnv(DB_CXX_NO_EXCEPTIONS)
+{
+}
+
+void MpoolExample::initdb(const char *home, int cachesize)
+{
+ set_error_stream(&cerr);
+ set_errpfx("MpoolExample");
+ set_cachesize(0, cachesize, 0);
+
+ open(home, DB_CREATE | DB_INIT_MPOOL, 0);
+}
+
+//
+// run --
+// Get a set of pages.
+//
+void
+MpoolExample::run(int hits, int pagesize, int npages)
+{
+ db_pgno_t pageno;
+ int cnt;
+ void *p;
+
+ // Open the file in the pool.
+ DbMpoolFile *dbmfp;
+
+ DbMpoolFile::open(this, MPOOL, 0, 0, pagesize, NULL, &dbmfp);
+
+ cout << "retrieve " << hits << " random pages... ";
+
+ srand((unsigned int)time(NULL));
+ for (cnt = 0; cnt < hits; ++cnt) {
+ pageno = (rand() % npages) + 1;
+ if ((errno = dbmfp->get(&pageno, 0, &p)) != 0) {
+ cerr << "MpoolExample: unable to retrieve page "
+ << (unsigned long)pageno << ": "
+ << strerror(errno) << "\n";
+ exit(1);
+ }
+ if (*(db_pgno_t *)p != pageno) {
+ cerr << "MpoolExample: wrong page retrieved ("
+ << (unsigned long)pageno << " != "
+ << *(int *)p << ")\n";
+ exit(1);
+ }
+ if ((errno = dbmfp->put(p, 0)) != 0) {
+ cerr << "MpoolExample: unable to return page "
+ << (unsigned long)pageno << ": "
+ << strerror(errno) << "\n";
+ exit(1);
+ }
+ }
+
+ cout << "successful.\n";
+
+ // Close the pool.
+ if ((errno = close(0)) != 0) {
+ cerr << "MpoolExample: " << strerror(errno) << "\n";
+ exit(1);
+ }
+}
diff --git a/bdb/examples_cxx/TpcbExample.cpp b/bdb/examples_cxx/TpcbExample.cpp
new file mode 100644
index 00000000000..f4ca72df8e3
--- /dev/null
+++ b/bdb/examples_cxx/TpcbExample.cpp
@@ -0,0 +1,666 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TpcbExample.cpp,v 11.14 2000/10/27 20:32:01 dda Exp $
+ */
+
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#ifdef DB_WIN32
+#include <sys/types.h>
+#include <sys/timeb.h>
+#endif
+
+#include <iostream.h>
+#include <iomanip.h>
+#include <db_cxx.h>
+
+typedef enum { ACCOUNT, BRANCH, TELLER } FTYPE;
+
+void errExit(int err, const char *); // show err as errno and exit
+
+void invarg(int, char *);
+u_int32_t random_id(FTYPE, u_int32_t, u_int32_t, u_int32_t);
+u_int32_t random_int(u_int32_t, u_int32_t);
+static void usage(void);
+
+int verbose;
+char *progname = "TpcbExample"; // Program name.
+
+class TpcbExample : public DbEnv
+{
+public:
+ void populate(int, int, int, int);
+ void run(int, int, int, int);
+ int txn(Db *, Db *, Db *, Db *,
+ int, int, int);
+ void populateHistory(Db *, int, u_int32_t, u_int32_t, u_int32_t);
+ void populateTable(Db *, u_int32_t, u_int32_t, int, char *);
+
+ // Note: the constructor creates a DbEnv(), which is
+ // not fully initialized until the DbEnv::open() method
+ // is called.
+ //
+ TpcbExample(const char *home, int cachesize,
+ int initializing, int flags);
+
+private:
+ static const char FileName[];
+
+ // no need for copy and assignment
+ TpcbExample(const TpcbExample &);
+ void operator = (const TpcbExample &);
+};
+
+//
+// This program implements a basic TPC/B driver program. To create the
+// TPC/B database, run with the -i (init) flag. The number of records
+// with which to populate the account, history, branch, and teller tables
+// is specified by the a, s, b, and t flags respectively. To run a TPC/B
+// test, use the n flag to indicate a number of transactions to run (note
+// that you can run many of these processes in parallel to simulate a
+// multiuser test run).
+//
+#define TELLERS_PER_BRANCH 100
+#define ACCOUNTS_PER_TELLER 1000
+#define HISTORY_PER_BRANCH 2592000
+
+/*
+ * The default configuration that adheres to TPCB scaling rules requires
+ * nearly 3 GB of space. To avoid requiring that much space for testing,
+ * we set the parameters much lower. If you want to run a valid 10 TPS
+ * configuration, define VALID_SCALING.
+ */
+#ifdef VALID_SCALING
+#define ACCOUNTS 1000000
+#define BRANCHES 10
+#define TELLERS 100
+#define HISTORY 25920000
+#endif
+
+#ifdef TINY
+#define ACCOUNTS 1000
+#define BRANCHES 10
+#define TELLERS 100
+#define HISTORY 10000
+#endif
+
+#if !defined(VALID_SCALING) && !defined(TINY)
+#define ACCOUNTS 100000
+#define BRANCHES 10
+#define TELLERS 100
+#define HISTORY 259200
+#endif
+
+#define HISTORY_LEN 100
+#define RECLEN 100
+#define BEGID 1000000
+
+struct Defrec {
+ u_int32_t id;
+ u_int32_t balance;
+ u_int8_t pad[RECLEN - sizeof(u_int32_t) - sizeof(u_int32_t)];
+};
+
+struct Histrec {
+ u_int32_t aid;
+ u_int32_t bid;
+ u_int32_t tid;
+ u_int32_t amount;
+ u_int8_t pad[RECLEN - 4 * sizeof(u_int32_t)];
+};
+
+int
+main(int argc, char *argv[])
+{
+ unsigned long seed;
+ int accounts, branches, tellers, history;
+ int iflag, mpool, ntxns, txn_no_sync;
+ char *home, *endarg;
+
+ home = "TESTDIR";
+ accounts = branches = history = tellers = 0;
+ txn_no_sync = 0;
+ mpool = ntxns = 0;
+ verbose = 0;
+ iflag = 0;
+ seed = (unsigned long)getpid();
+
+ for (int i = 1; i < argc; ++i) {
+
+ if (strcmp(argv[i], "-a") == 0) {
+ // Number of account records
+ if ((accounts = atoi(argv[++i])) <= 0)
+ invarg('a', argv[i]);
+ }
+ else if (strcmp(argv[i], "-b") == 0) {
+ // Number of branch records
+ if ((branches = atoi(argv[++i])) <= 0)
+ invarg('b', argv[i]);
+ }
+ else if (strcmp(argv[i], "-c") == 0) {
+ // Cachesize in bytes
+ if ((mpool = atoi(argv[++i])) <= 0)
+ invarg('c', argv[i]);
+ }
+ else if (strcmp(argv[i], "-f") == 0) {
+ // Fast mode: no txn sync.
+ txn_no_sync = 1;
+ }
+ else if (strcmp(argv[i], "-h") == 0) {
+ // DB home.
+ home = argv[++i];
+ }
+ else if (strcmp(argv[i], "-i") == 0) {
+ // Initialize the test.
+ iflag = 1;
+ }
+ else if (strcmp(argv[i], "-n") == 0) {
+ // Number of transactions
+ if ((ntxns = atoi(argv[++i])) <= 0)
+ invarg('n', argv[i]);
+ }
+ else if (strcmp(argv[i], "-S") == 0) {
+ // Random number seed.
+ seed = strtoul(argv[++i], &endarg, 0);
+ if (*endarg != '\0')
+ invarg('S', argv[i]);
+ }
+ else if (strcmp(argv[i], "-s") == 0) {
+ // Number of history records
+ if ((history = atoi(argv[++i])) <= 0)
+ invarg('s', argv[i]);
+ }
+ else if (strcmp(argv[i], "-t") == 0) {
+ // Number of teller records
+ if ((tellers = atoi(argv[++i])) <= 0)
+ invarg('t', argv[i]);
+ }
+ else if (strcmp(argv[i], "-v") == 0) {
+ // Verbose option.
+ verbose = 1;
+ }
+ else {
+ usage();
+ }
+ }
+
+ srand((unsigned int)seed);
+
+ accounts = accounts == 0 ? ACCOUNTS : accounts;
+ branches = branches == 0 ? BRANCHES : branches;
+ tellers = tellers == 0 ? TELLERS : tellers;
+ history = history == 0 ? HISTORY : history;
+
+ if (verbose)
+ cout << (long)accounts << " Accounts "
+ << (long)branches << " Branches "
+ << (long)tellers << " Tellers "
+ << (long)history << " History\n";
+
+ try {
+ // Initialize the database environment.
+ // Must be done in within a try block, unless you
+ // change the error model in the environment options.
+ //
+ TpcbExample app(home, mpool, iflag, txn_no_sync ? DB_TXN_NOSYNC : 0);
+
+ if (iflag) {
+ if (ntxns != 0)
+ usage();
+ app.populate(accounts, branches, history, tellers);
+ }
+ else {
+ if (ntxns == 0)
+ usage();
+ app.run(ntxns, accounts, branches, tellers);
+ }
+
+ app.close(0);
+ return 0;
+ }
+ catch (DbException &dbe) {
+ cerr << "TpcbExample: " << dbe.what() << "\n";
+ return 1;
+ }
+}
+
+void
+invarg(int arg, char *str)
+{
+ cerr << "TpcbExample: invalid argument for -"
+ << (char)arg << ": " << str << "\n";
+ exit(1);
+}
+
+static void
+usage()
+{
+ cerr << "usage: TpcbExample [-fiv] [-a accounts] [-b branches]\n"
+ << " [-c cachesize] [-h home] [-n transactions ]\n"
+ << " [-S seed] [-s history] [-t tellers]\n";
+ exit(1);
+}
+
+TpcbExample::TpcbExample(const char *home, int cachesize,
+ int initializing, int flags)
+: DbEnv(0)
+{
+ u_int32_t local_flags;
+
+ set_error_stream(&cerr);
+ set_errpfx("TpcbExample");
+ (void)set_cachesize(0, cachesize == 0 ?
+ 4 * 1024 * 1024 : (u_int32_t)cachesize, 0);
+
+ local_flags = flags | DB_CREATE | DB_INIT_MPOOL;
+ if (!initializing)
+ local_flags |= DB_INIT_TXN | DB_INIT_LOCK | DB_INIT_LOG;
+ open(home, local_flags, 0);
+}
+
+//
+// Initialize the database to the specified number of accounts, branches,
+// history records, and tellers.
+//
+void
+TpcbExample::populate(int accounts, int branches, int history, int tellers)
+{
+ Db *dbp;
+
+ int err;
+ u_int32_t balance, idnum;
+ u_int32_t end_anum, end_bnum, end_tnum;
+ u_int32_t start_anum, start_bnum, start_tnum;
+
+ idnum = BEGID;
+ balance = 500000;
+
+ dbp = new Db(this, 0);
+ dbp->set_h_nelem((unsigned int)accounts);
+
+ if ((err = dbp->open("account", NULL, DB_HASH,
+ DB_CREATE | DB_TRUNCATE, 0644)) != 0) {
+ errExit(err, "Open of account file failed");
+ }
+
+ start_anum = idnum;
+ populateTable(dbp, idnum, balance, accounts, "account");
+ idnum += accounts;
+ end_anum = idnum - 1;
+ if ((err = dbp->close(0)) != 0) {
+ errExit(err, "Account file close failed");
+ }
+ delete dbp;
+ if (verbose)
+ cout << "Populated accounts: "
+ << (long)start_anum << " - " << (long)end_anum << "\n";
+
+ dbp = new Db(this, 0);
+ //
+ // Since the number of branches is very small, we want to use very
+ // small pages and only 1 key per page. This is the poor-man's way
+ // of getting key locking instead of page locking.
+ //
+ dbp->set_h_ffactor(1);
+ dbp->set_h_nelem((unsigned int)branches);
+ dbp->set_pagesize(512);
+
+ if ((err = dbp->open("branch", NULL, DB_HASH,
+ DB_CREATE | DB_TRUNCATE, 0644)) != 0) {
+ errExit(err, "Branch file create failed");
+ }
+ start_bnum = idnum;
+ populateTable(dbp, idnum, balance, branches, "branch");
+ idnum += branches;
+ end_bnum = idnum - 1;
+ if ((err = dbp->close(0)) != 0) {
+ errExit(err, "Close of branch file failed");
+ }
+ delete dbp;
+
+ if (verbose)
+ cout << "Populated branches: "
+ << (long)start_bnum << " - " << (long)end_bnum << "\n";
+
+ dbp = new Db(this, 0);
+ //
+ // In the case of tellers, we also want small pages, but we'll let
+ // the fill factor dynamically adjust itself.
+ //
+ dbp->set_h_ffactor(0);
+ dbp->set_h_nelem((unsigned int)tellers);
+ dbp->set_pagesize(512);
+
+ if ((err = dbp->open("teller", NULL, DB_HASH,
+ DB_CREATE | DB_TRUNCATE, 0644)) != 0) {
+ errExit(err, "Teller file create failed");
+ }
+
+ start_tnum = idnum;
+ populateTable(dbp, idnum, balance, tellers, "teller");
+ idnum += tellers;
+ end_tnum = idnum - 1;
+ if ((err = dbp->close(0)) != 0) {
+ errExit(err, "Close of teller file failed");
+ }
+ delete dbp;
+ if (verbose)
+ cout << "Populated tellers: "
+ << (long)start_tnum << " - " << (long)end_tnum << "\n";
+
+ dbp = new Db(this, 0);
+ dbp->set_re_len(HISTORY_LEN);
+ if ((err = dbp->open("history", NULL, DB_RECNO,
+ DB_CREATE | DB_TRUNCATE, 0644)) != 0) {
+ errExit(err, "Create of history file failed");
+ }
+
+ populateHistory(dbp, history, accounts, branches, tellers);
+ if ((err = dbp->close(0)) != 0) {
+ errExit(err, "Close of history file failed");
+ }
+ delete dbp;
+}
+
+void
+TpcbExample::populateTable(Db *dbp,
+ u_int32_t start_id, u_int32_t balance,
+ int nrecs, char *msg)
+{
+ Defrec drec;
+ memset(&drec.pad[0], 1, sizeof(drec.pad));
+
+ Dbt kdbt(&drec.id, sizeof(u_int32_t));
+ Dbt ddbt(&drec, sizeof(drec));
+
+ for (int i = 0; i < nrecs; i++) {
+ drec.id = start_id + (u_int32_t)i;
+ drec.balance = balance;
+ int err;
+ if ((err =
+ dbp->put(NULL, &kdbt, &ddbt, DB_NOOVERWRITE)) != 0) {
+ cerr << "Failure initializing " << msg << " file: "
+ << strerror(err) << "\n";
+ exit(1);
+ }
+ }
+}
+
+void
+TpcbExample::populateHistory(Db *dbp, int nrecs,
+ u_int32_t accounts, u_int32_t branches, u_int32_t tellers)
+{
+ Histrec hrec;
+ memset(&hrec.pad[0], 1, sizeof(hrec.pad));
+ hrec.amount = 10;
+ db_recno_t key;
+
+ Dbt kdbt(&key, sizeof(u_int32_t));
+ Dbt ddbt(&hrec, sizeof(hrec));
+
+ for (int i = 1; i <= nrecs; i++) {
+ hrec.aid = random_id(ACCOUNT, accounts, branches, tellers);
+ hrec.bid = random_id(BRANCH, accounts, branches, tellers);
+ hrec.tid = random_id(TELLER, accounts, branches, tellers);
+
+ int err;
+ key = (db_recno_t)i;
+ if ((err = dbp->put(NULL, &kdbt, &ddbt, DB_APPEND)) != 0) {
+ errExit(err, "Failure initializing history file");
+ }
+ }
+}
+
+u_int32_t
+random_int(u_int32_t lo, u_int32_t hi)
+{
+ u_int32_t ret;
+ int t;
+
+ t = rand();
+ ret = (u_int32_t)(((double)t / ((double)(RAND_MAX) + 1)) *
+ (hi - lo + 1));
+ ret += lo;
+ return (ret);
+}
+
+u_int32_t
+random_id(FTYPE type, u_int32_t accounts, u_int32_t branches, u_int32_t tellers)
+{
+ u_int32_t min, max, num;
+
+ max = min = BEGID;
+ num = accounts;
+ switch(type) {
+ case TELLER:
+ min += branches;
+ num = tellers;
+ // Fallthrough
+ case BRANCH:
+ if (type == BRANCH)
+ num = branches;
+ min += accounts;
+ // Fallthrough
+ case ACCOUNT:
+ max = min + num - 1;
+ }
+ return (random_int(min, max));
+}
+
+void
+TpcbExample::run(int n, int accounts, int branches, int tellers)
+{
+ Db *adb, *bdb, *hdb, *tdb;
+ double gtps, itps;
+ int failed, ifailed, ret, txns;
+ time_t starttime, curtime, lasttime;
+#ifndef DB_WIN32
+ pid_t pid;
+
+ pid = getpid();
+#else
+ int pid;
+
+ pid = 0;
+#endif
+
+ //
+ // Open the database files.
+ //
+
+ int err;
+ adb = new Db(this, 0);
+ if ((err = adb->open("account", NULL, DB_UNKNOWN, 0, 0)) != 0)
+ errExit(err, "Open of account file failed");
+
+ bdb = new Db(this, 0);
+ if ((err = bdb->open("branch", NULL, DB_UNKNOWN, 0, 0)) != 0)
+ errExit(err, "Open of branch file failed");
+
+ tdb = new Db(this, 0);
+ if ((err = tdb->open("teller", NULL, DB_UNKNOWN, 0, 0)) != 0)
+ errExit(err, "Open of teller file failed");
+
+ hdb = new Db(this, 0);
+ if ((err = hdb->open("history", NULL, DB_UNKNOWN, 0, 0)) != 0)
+ errExit(err, "Open of history file failed");
+
+ txns = failed = ifailed = 0;
+ starttime = time(NULL);
+ lasttime = starttime;
+ while (n-- > 0) {
+ txns++;
+ ret = txn(adb, bdb, tdb, hdb, accounts, branches, tellers);
+ if (ret != 0) {
+ failed++;
+ ifailed++;
+ }
+ if (n % 5000 == 0) {
+ curtime = time(NULL);
+ gtps = (double)(txns - failed) / (curtime - starttime);
+ itps = (double)(5000 - ifailed) / (curtime - lasttime);
+
+ // We use printf because it provides much simpler
+ // formatting than iostreams.
+ //
+ printf("[%d] %d txns %d failed ", (int)pid,
+ txns, failed);
+ printf("%6.2f TPS (gross) %6.2f TPS (interval)\n",
+ gtps, itps);
+ lasttime = curtime;
+ ifailed = 0;
+ }
+ }
+
+ (void)adb->close(0);
+ (void)bdb->close(0);
+ (void)tdb->close(0);
+ (void)hdb->close(0);
+
+ cout << (long)txns << " transactions begun "
+ << (long)failed << " failed\n";
+}
+
+//
+// XXX Figure out the appropriate way to pick out IDs.
+//
+int
+TpcbExample::txn(Db *adb, Db *bdb, Db *tdb, Db *hdb,
+ int accounts, int branches, int tellers)
+{
+ Dbc *acurs = NULL;
+ Dbc *bcurs = NULL;
+ Dbc *tcurs = NULL;
+ DbTxn *t = NULL;
+
+ db_recno_t key;
+ Defrec rec;
+ Histrec hrec;
+ int account, branch, teller;
+
+ Dbt d_dbt;
+ Dbt d_histdbt;
+ Dbt k_dbt;
+ Dbt k_histdbt(&key, sizeof(key));
+
+ //
+ // XXX We could move a lot of this into the driver to make this
+ // faster.
+ //
+ account = random_id(ACCOUNT, accounts, branches, tellers);
+ branch = random_id(BRANCH, accounts, branches, tellers);
+ teller = random_id(TELLER, accounts, branches, tellers);
+
+ k_dbt.set_size(sizeof(int));
+
+ d_dbt.set_flags(DB_DBT_USERMEM);
+ d_dbt.set_data(&rec);
+ d_dbt.set_ulen(sizeof(rec));
+
+ hrec.aid = account;
+ hrec.bid = branch;
+ hrec.tid = teller;
+ hrec.amount = 10;
+ // Request 0 bytes since we're just positioning.
+ d_histdbt.set_flags(DB_DBT_PARTIAL);
+
+ // START TIMING
+ if (txn_begin(NULL, &t, 0) != 0)
+ goto err;
+
+ if (adb->cursor(t, &acurs, 0) != 0 ||
+ bdb->cursor(t, &bcurs, 0) != 0 ||
+ tdb->cursor(t, &tcurs, 0) != 0)
+ goto err;
+
+ // Account record
+ k_dbt.set_data(&account);
+ if (acurs->get(&k_dbt, &d_dbt, DB_SET) != 0)
+ goto err;
+ rec.balance += 10;
+ if (acurs->put(&k_dbt, &d_dbt, DB_CURRENT) != 0)
+ goto err;
+
+ // Branch record
+ k_dbt.set_data(&branch);
+ if (bcurs->get(&k_dbt, &d_dbt, DB_SET) != 0)
+ goto err;
+ rec.balance += 10;
+ if (bcurs->put(&k_dbt, &d_dbt, DB_CURRENT) != 0)
+ goto err;
+
+ // Teller record
+ k_dbt.set_data(&teller);
+ if (tcurs->get(&k_dbt, &d_dbt, DB_SET) != 0)
+ goto err;
+ rec.balance += 10;
+ if (tcurs->put(&k_dbt, &d_dbt, DB_CURRENT) != 0)
+ goto err;
+
+ // History record
+ d_histdbt.set_flags(0);
+ d_histdbt.set_data(&hrec);
+ d_histdbt.set_ulen(sizeof(hrec));
+ if (hdb->put(t, &k_histdbt, &d_histdbt, DB_APPEND) != 0)
+ goto err;
+
+ if (acurs->close() != 0 || bcurs->close() != 0 ||
+ tcurs->close() != 0)
+ goto err;
+
+ if (t->commit(0) != 0)
+ goto err;
+
+ // END TIMING
+ return (0);
+
+err:
+ if (acurs != NULL)
+ (void)acurs->close();
+ if (bcurs != NULL)
+ (void)bcurs->close();
+ if (tcurs != NULL)
+ (void)tcurs->close();
+ if (t != NULL)
+ (void)t->abort();
+
+ if (verbose)
+ cout << "Transaction A=" << (long)account
+ << " B=" << (long)branch
+ << " T=" << (long)teller << " failed\n";
+ return (-1);
+}
+
+void errExit(int err, const char *s)
+{
+ cerr << progname << ": ";
+ if (s != NULL) {
+ cerr << s << ": ";
+ }
+ cerr << strerror(err) << "\n";
+ exit(1);
+}
diff --git a/bdb/hash/hash.c b/bdb/hash/hash.c
new file mode 100644
index 00000000000..e96fd4898f0
--- /dev/null
+++ b/bdb/hash/hash.c
@@ -0,0 +1,2096 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994
+ * Margo Seltzer. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: hash.c,v 11.94 2001/01/03 16:42:26 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_am.h"
+#include "db_ext.h"
+#include "db_shash.h"
+#include "db_swap.h"
+#include "hash.h"
+#include "btree.h"
+#include "log.h"
+#include "lock.h"
+#include "txn.h"
+
+static int __ham_c_close __P((DBC *, db_pgno_t, int *));
+static int __ham_c_del __P((DBC *));
+static int __ham_c_destroy __P((DBC *));
+static int __ham_c_get __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+static int __ham_c_put __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+static int __ham_c_writelock __P((DBC *));
+static int __ham_del_dups __P((DBC *, DBT *));
+static int __ham_delete __P((DB *, DB_TXN *, DBT *, u_int32_t));
+static int __ham_dup_return __P((DBC *, DBT *, u_int32_t));
+static int __ham_expand_table __P((DBC *));
+static int __ham_init_htab __P((DBC *,
+ const char *, db_pgno_t, u_int32_t, u_int32_t));
+static int __ham_lookup __P((DBC *,
+ const DBT *, u_int32_t, db_lockmode_t, db_pgno_t *));
+static int __ham_overwrite __P((DBC *, DBT *, u_int32_t));
+
+/*
+ * __ham_metachk --
+ *
+ * PUBLIC: int __ham_metachk __P((DB *, const char *, HMETA *));
+ */
+int
+__ham_metachk(dbp, name, hashm)
+ DB *dbp;
+ const char *name;
+ HMETA *hashm;
+{
+ DB_ENV *dbenv;
+ u_int32_t vers;
+ int ret;
+
+ dbenv = dbp->dbenv;
+
+ /*
+ * At this point, all we know is that the magic number is for a Hash.
+ * Check the version, the database may be out of date.
+ */
+ vers = hashm->dbmeta.version;
+ if (F_ISSET(dbp, DB_AM_SWAP))
+ M_32_SWAP(vers);
+ switch (vers) {
+ case 4:
+ case 5:
+ case 6:
+ __db_err(dbenv,
+ "%s: hash version %lu requires a version upgrade",
+ name, (u_long)vers);
+ return (DB_OLD_VERSION);
+ case 7:
+ break;
+ default:
+ __db_err(dbenv,
+ "%s: unsupported hash version: %lu", name, (u_long)vers);
+ return (EINVAL);
+ }
+
+ /* Swap the page if we need to. */
+ if (F_ISSET(dbp, DB_AM_SWAP) && (ret = __ham_mswap((PAGE *)hashm)) != 0)
+ return (ret);
+
+ /* Check the type. */
+ if (dbp->type != DB_HASH && dbp->type != DB_UNKNOWN)
+ return (EINVAL);
+ dbp->type = DB_HASH;
+ DB_ILLEGAL_METHOD(dbp, DB_OK_HASH);
+
+ /*
+ * Check application info against metadata info, and set info, flags,
+ * and type based on metadata info.
+ */
+ if ((ret = __db_fchk(dbenv,
+ "DB->open", hashm->dbmeta.flags,
+ DB_HASH_DUP | DB_HASH_SUBDB | DB_HASH_DUPSORT)) != 0)
+ return (ret);
+
+ if (F_ISSET(&hashm->dbmeta, DB_HASH_DUP))
+ F_SET(dbp, DB_AM_DUP);
+ else
+ if (F_ISSET(dbp, DB_AM_DUP)) {
+ __db_err(dbenv,
+ "%s: DB_DUP specified to open method but not set in database",
+ name);
+ return (EINVAL);
+ }
+
+ if (F_ISSET(&hashm->dbmeta, DB_HASH_SUBDB))
+ F_SET(dbp, DB_AM_SUBDB);
+ else
+ if (F_ISSET(dbp, DB_AM_SUBDB)) {
+ __db_err(dbenv,
+ "%s: multiple databases specified but not supported in file",
+ name);
+ return (EINVAL);
+ }
+
+ if (F_ISSET(&hashm->dbmeta, DB_HASH_DUPSORT)) {
+ if (dbp->dup_compare == NULL)
+ dbp->dup_compare = __bam_defcmp;
+ } else
+ if (dbp->dup_compare != NULL) {
+ __db_err(dbenv,
+ "%s: duplicate sort function specified but not set in database",
+ name);
+ return (EINVAL);
+ }
+
+ /* Set the page size. */
+ dbp->pgsize = hashm->dbmeta.pagesize;
+
+ /* Copy the file's ID. */
+ memcpy(dbp->fileid, hashm->dbmeta.uid, DB_FILE_ID_LEN);
+
+ return (0);
+}
+
+/*
+ * __ham_open --
+ *
+ * PUBLIC: int __ham_open __P((DB *, const char *, db_pgno_t, u_int32_t));
+ */
+int
+__ham_open(dbp, name, base_pgno, flags)
+ DB *dbp;
+ const char *name;
+ db_pgno_t base_pgno;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DBC *dbc;
+ HASH_CURSOR *hcp;
+ HASH *hashp;
+ int need_sync, ret, t_ret;
+
+ dbc = NULL;
+ dbenv = dbp->dbenv;
+ need_sync = 0;
+
+ /* Initialize the remaining fields/methods of the DB. */
+ dbp->del = __ham_delete;
+ dbp->stat = __ham_stat;
+
+ /*
+ * Get a cursor. If DB_CREATE is specified, we may be creating
+ * pages, and to do that safely in CDB we need a write cursor.
+ * In STD_LOCKING mode, we'll synchronize using the meta page
+ * lock instead.
+ */
+ if ((ret = dbp->cursor(dbp,
+ dbp->open_txn, &dbc, LF_ISSET(DB_CREATE) && CDB_LOCKING(dbenv) ?
+ DB_WRITECURSOR : 0)) != 0)
+ return (ret);
+
+ hcp = (HASH_CURSOR *)dbc->internal;
+ hashp = dbp->h_internal;
+ hashp->meta_pgno = base_pgno;
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ goto err1;
+
+ /*
+ * If this is a new file, initialize it, and put it back dirty.
+ *
+ * Initialize the hdr structure.
+ */
+ if (hcp->hdr->dbmeta.magic == DB_HASHMAGIC) {
+ /* File exists, verify the data in the header. */
+ if (hashp->h_hash == NULL)
+ hashp->h_hash = hcp->hdr->dbmeta.version < 5
+ ? __ham_func4 : __ham_func5;
+ if (!F_ISSET(dbp, DB_RDONLY) &&
+ hashp->h_hash(dbp,
+ CHARKEY, sizeof(CHARKEY)) != hcp->hdr->h_charkey) {
+ __db_err(dbp->dbenv,
+ "hash: incompatible hash function");
+ ret = EINVAL;
+ goto err2;
+ }
+ if (F_ISSET(&hcp->hdr->dbmeta, DB_HASH_DUP))
+ F_SET(dbp, DB_AM_DUP);
+ if (F_ISSET(&hcp->hdr->dbmeta, DB_HASH_DUPSORT))
+ F_SET(dbp, DB_AM_DUPSORT);
+ if (F_ISSET(&hcp->hdr->dbmeta, DB_HASH_SUBDB))
+ F_SET(dbp, DB_AM_SUBDB);
+ } else if (!IS_RECOVERING(dbenv)) {
+ /*
+ * File does not exist, we must initialize the header. If
+ * locking is enabled that means getting a write lock first.
+ * During recovery the meta page will be in the log.
+ */
+ dbc->lock.pgno = base_pgno;
+
+ if (STD_LOCKING(dbc) &&
+ ((ret = lock_put(dbenv, &hcp->hlock)) != 0 ||
+ (ret = lock_get(dbenv, dbc->locker,
+ DB_NONBLOCK(dbc) ? DB_LOCK_NOWAIT : 0,
+ &dbc->lock_dbt, DB_LOCK_WRITE, &hcp->hlock)) != 0))
+ goto err2;
+ else if (CDB_LOCKING(dbp->dbenv)) {
+ DB_ASSERT(LF_ISSET(DB_CREATE));
+ if ((ret = lock_get(dbenv, dbc->locker,
+ DB_LOCK_UPGRADE, &dbc->lock_dbt, DB_LOCK_WRITE,
+ &dbc->mylock)) != 0)
+ goto err2;
+ }
+ if ((ret = __ham_init_htab(dbc, name,
+ base_pgno, hashp->h_nelem, hashp->h_ffactor)) != 0)
+ goto err2;
+
+ need_sync = 1;
+ }
+
+err2: /* Release the meta data page */
+ if ((t_ret = __ham_release_meta(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+err1: if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Sync the file so that we know that the meta data goes to disk. */
+ if (ret == 0 && need_sync)
+ ret = dbp->sync(dbp, 0);
+#if CONFIG_TEST
+ if (ret == 0)
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTSYNC, ret, name);
+
+DB_TEST_RECOVERY_LABEL
+#endif
+ if (ret != 0)
+ (void)__ham_db_close(dbp);
+
+ return (ret);
+}
+
+/************************** LOCAL CREATION ROUTINES **********************/
+/*
+ * Returns 0 on No Error
+ */
+static int
+__ham_init_htab(dbc, name, pgno, nelem, ffactor)
+ DBC *dbc;
+ const char *name;
+ db_pgno_t pgno;
+ u_int32_t nelem, ffactor;
+{
+ DB *dbp;
+ DB_LOCK metalock;
+ DB_LSN orig_lsn;
+ DBMETA *mmeta;
+ HASH_CURSOR *hcp;
+ HASH *hashp;
+ PAGE *h;
+ db_pgno_t mpgno;
+ int32_t l2, nbuckets;
+ int dirty_mmeta, i, ret, t_ret;
+
+ hcp = (HASH_CURSOR *)dbc->internal;
+ dbp = dbc->dbp;
+ hashp = dbp->h_internal;
+ mmeta = NULL;
+ h = NULL;
+ ret = 0;
+ dirty_mmeta = 0;
+ metalock.off = LOCK_INVALID;
+
+ if (hashp->h_hash == NULL)
+ hashp->h_hash = DB_HASHVERSION < 5 ? __ham_func4 : __ham_func5;
+
+ if (nelem != 0 && ffactor != 0) {
+ nelem = (nelem - 1) / ffactor + 1;
+ l2 = __db_log2(nelem > 2 ? nelem : 2);
+ } else
+ l2 = 1;
+ nbuckets = 1 << l2;
+
+ orig_lsn = hcp->hdr->dbmeta.lsn;
+ memset(hcp->hdr, 0, sizeof(HMETA));
+ ZERO_LSN(hcp->hdr->dbmeta.lsn);
+ hcp->hdr->dbmeta.pgno = pgno;
+ hcp->hdr->dbmeta.magic = DB_HASHMAGIC;
+ hcp->hdr->dbmeta.version = DB_HASHVERSION;
+ hcp->hdr->dbmeta.pagesize = dbp->pgsize;
+ hcp->hdr->dbmeta.type = P_HASHMETA;
+ hcp->hdr->dbmeta.free = PGNO_INVALID;
+ hcp->hdr->max_bucket = hcp->hdr->high_mask = nbuckets - 1;
+ hcp->hdr->low_mask = (nbuckets >> 1) - 1;
+ hcp->hdr->ffactor = ffactor;
+ hcp->hdr->h_charkey = hashp->h_hash(dbp, CHARKEY, sizeof(CHARKEY));
+ memcpy(hcp->hdr->dbmeta.uid, dbp->fileid, DB_FILE_ID_LEN);
+
+ if (F_ISSET(dbp, DB_AM_DUP))
+ F_SET(&hcp->hdr->dbmeta, DB_HASH_DUP);
+ if (F_ISSET(dbp, DB_AM_SUBDB))
+ F_SET(&hcp->hdr->dbmeta, DB_HASH_SUBDB);
+ if (dbp->dup_compare != NULL)
+ F_SET(&hcp->hdr->dbmeta, DB_HASH_DUPSORT);
+
+ if ((ret = memp_fset(dbp->mpf, hcp->hdr, DB_MPOOL_DIRTY)) != 0)
+ goto err;
+
+ /*
+ * Create the first and second buckets pages so that we have the
+ * page numbers for them and we can store that page number
+ * in the meta-data header (spares[0]).
+ */
+ hcp->hdr->spares[0] = nbuckets;
+ if ((ret = memp_fget(dbp->mpf,
+ &hcp->hdr->spares[0], DB_MPOOL_NEW_GROUP, &h)) != 0)
+ goto err;
+
+ P_INIT(h, dbp->pgsize, hcp->hdr->spares[0], PGNO_INVALID,
+ PGNO_INVALID, 0, P_HASH);
+
+ /* Fill in the last fields of the meta data page. */
+ hcp->hdr->spares[0] -= (nbuckets - 1);
+ for (i = 1; i <= l2; i++)
+ hcp->hdr->spares[i] = hcp->hdr->spares[0];
+ for (; i < NCACHED; i++)
+ hcp->hdr->spares[i] = PGNO_INVALID;
+
+ /*
+ * Before we are about to put any dirty pages, we need to log
+ * the meta-data page create.
+ */
+ ret = __db_log_page(dbp, name, &orig_lsn, pgno, (PAGE *)hcp->hdr);
+
+ if (dbp->open_txn != NULL) {
+ mmeta = (DBMETA *) hcp->hdr;
+ if (F_ISSET(dbp, DB_AM_SUBDB)) {
+
+ /*
+ * If this is a subdatabase, then we need to
+ * get the LSN off the master meta data page
+ * because that's where free pages are linked
+ * and during recovery we need to access
+ * that page and roll it backward/forward
+ * correctly with respect to LSN.
+ */
+ mpgno = PGNO_BASE_MD;
+ if ((ret = __db_lget(dbc,
+ 0, mpgno, DB_LOCK_WRITE, 0, &metalock)) != 0)
+ goto err;
+ if ((ret = memp_fget(dbp->mpf,
+ &mpgno, 0, (PAGE **)&mmeta)) != 0)
+ goto err;
+ }
+ if ((t_ret = __ham_groupalloc_log(dbp->dbenv,
+ dbp->open_txn, &LSN(mmeta), 0, dbp->log_fileid,
+ &LSN(mmeta), hcp->hdr->spares[0],
+ hcp->hdr->max_bucket + 1, mmeta->free)) != 0 && ret == 0)
+ ret = t_ret;
+ if (ret == 0) {
+ /* need to update real LSN for buffer manager */
+ dirty_mmeta = 1;
+ }
+
+ }
+
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTLOG, ret, name);
+
+DB_TEST_RECOVERY_LABEL
+err: if (h != NULL &&
+ (t_ret = memp_fput(dbp->mpf, h, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (F_ISSET(dbp, DB_AM_SUBDB) && mmeta != NULL)
+ if ((t_ret = memp_fput(dbp->mpf, mmeta,
+ dirty_mmeta ? DB_MPOOL_DIRTY : 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (metalock.off != LOCK_INVALID)
+ (void)__TLPUT(dbc, metalock);
+
+ return (ret);
+}
+
+static int
+__ham_delete(dbp, txn, key, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ DBT *key;
+ u_int32_t flags;
+{
+ DBC *dbc;
+ HASH_CURSOR *hcp;
+ db_pgno_t pgno;
+ int ret, t_ret;
+
+ /*
+ * This is the only access method routine called directly from
+ * the dbp, so we have to do error checking.
+ */
+
+ PANIC_CHECK(dbp->dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->del");
+ DB_CHECK_TXN(dbp, txn);
+
+ if ((ret =
+ __db_delchk(dbp, key, flags, F_ISSET(dbp, DB_AM_RDONLY))) != 0)
+ return (ret);
+
+ if ((ret = dbp->cursor(dbp, txn, &dbc, DB_WRITELOCK)) != 0)
+ return (ret);
+
+ DEBUG_LWRITE(dbc, txn, "ham_delete", key, NULL, flags);
+
+ hcp = (HASH_CURSOR *)dbc->internal;
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ goto out;
+
+ pgno = PGNO_INVALID;
+ if ((ret = __ham_lookup(dbc, key, 0, DB_LOCK_WRITE, &pgno)) == 0) {
+ if (F_ISSET(hcp, H_OK)) {
+ if (pgno == PGNO_INVALID)
+ ret = __ham_del_pair(dbc, 1);
+ else {
+ /* When we close the cursor in __ham_del_dups,
+ * that will make the off-page dup tree go
+ * go away as well as our current entry. When
+ * it updates cursors, ours should get marked
+ * as H_DELETED.
+ */
+ ret = __ham_del_dups(dbc, key);
+ }
+ } else
+ ret = DB_NOTFOUND;
+ }
+
+ if ((t_ret = __ham_release_meta(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+out: if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/* ****************** CURSORS ********************************** */
+/*
+ * __ham_c_init --
+ * Initialize the hash-specific portion of a cursor.
+ *
+ * PUBLIC: int __ham_c_init __P((DBC *));
+ */
+int
+__ham_c_init(dbc)
+ DBC *dbc;
+{
+ DB_ENV *dbenv;
+ HASH_CURSOR *new_curs;
+ int ret;
+
+ dbenv = dbc->dbp->dbenv;
+ if ((ret = __os_calloc(dbenv,
+ 1, sizeof(struct cursor_t), &new_curs)) != 0)
+ return (ret);
+ if ((ret = __os_malloc(dbenv,
+ dbc->dbp->pgsize, NULL, &new_curs->split_buf)) != 0) {
+ __os_free(new_curs, sizeof(*new_curs));
+ return (ret);
+ }
+
+ dbc->internal = (DBC_INTERNAL *) new_curs;
+ dbc->c_close = __db_c_close;
+ dbc->c_count = __db_c_count;
+ dbc->c_del = __db_c_del;
+ dbc->c_dup = __db_c_dup;
+ dbc->c_get = __db_c_get;
+ dbc->c_put = __db_c_put;
+ dbc->c_am_close = __ham_c_close;
+ dbc->c_am_del = __ham_c_del;
+ dbc->c_am_destroy = __ham_c_destroy;
+ dbc->c_am_get = __ham_c_get;
+ dbc->c_am_put = __ham_c_put;
+ dbc->c_am_writelock = __ham_c_writelock;
+
+ __ham_item_init(dbc);
+
+ return (0);
+}
+
+/*
+ * __ham_c_close --
+ * Close down the cursor from a single use.
+ */
+static int
+__ham_c_close(dbc, root_pgno, rmroot)
+ DBC *dbc;
+ db_pgno_t root_pgno;
+ int *rmroot;
+{
+ HASH_CURSOR *hcp;
+ HKEYDATA *dp;
+ int doroot, gotmeta, ret, t_ret;
+ u_int32_t dirty;
+
+ COMPQUIET(rmroot, 0);
+ dirty = 0;
+ doroot = gotmeta = ret = 0;
+ hcp = (HASH_CURSOR *) dbc->internal;
+
+ /* Check for off page dups. */
+ if (dbc->internal->opd != NULL) {
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ goto done;
+ gotmeta = 1;
+ if ((ret = __ham_get_cpage(dbc, DB_LOCK_READ)) != 0)
+ goto out;
+ dp = (HKEYDATA *)H_PAIRDATA(hcp->page, hcp->indx);
+ DB_ASSERT(HPAGE_PTYPE(dp) == H_OFFDUP);
+ memcpy(&root_pgno, HOFFPAGE_PGNO(dp), sizeof(db_pgno_t));
+
+ if ((ret =
+ hcp->opd->c_am_close(hcp->opd, root_pgno, &doroot)) != 0)
+ goto out;
+ if (doroot != 0) {
+ if ((ret = __ham_del_pair(dbc, 1)) != 0)
+ goto out;
+ dirty = DB_MPOOL_DIRTY;
+ }
+ }
+
+out: if (hcp->page != NULL && (t_ret =
+ memp_fput(dbc->dbp->mpf, hcp->page, dirty)) != 0 && ret == 0)
+ ret = t_ret;
+ if (gotmeta != 0 && (t_ret = __ham_release_meta(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+done:
+ __ham_item_init(dbc);
+ return (ret);
+}
+
+/*
+ * __ham_c_destroy --
+ * Cleanup the access method private part of a cursor.
+ */
+static int
+__ham_c_destroy(dbc)
+ DBC *dbc;
+{
+ HASH_CURSOR *hcp;
+
+ hcp = (HASH_CURSOR *)dbc->internal;
+ if (hcp->split_buf != NULL)
+ __os_free(hcp->split_buf, dbc->dbp->pgsize);
+ __os_free(hcp, sizeof(HASH_CURSOR));
+
+ return (0);
+}
+
+/*
+ * __ham_c_count --
+ * Return a count of on-page duplicates.
+ *
+ * PUBLIC: int __ham_c_count __P((DBC *, db_recno_t *));
+ */
+int
+__ham_c_count(dbc, recnop)
+ DBC *dbc;
+ db_recno_t *recnop;
+{
+ DB *dbp;
+ HASH_CURSOR *hcp;
+ db_indx_t len;
+ db_recno_t recno;
+ int ret, t_ret;
+ u_int8_t *p, *pend;
+
+ dbp = dbc->dbp;
+ hcp = (HASH_CURSOR *) dbc->internal;
+
+ recno = 0;
+
+ if ((ret = __ham_get_cpage(dbc, DB_LOCK_READ)) != 0)
+ return (ret);
+
+ switch (HPAGE_PTYPE(H_PAIRDATA(hcp->page, hcp->indx))) {
+ case H_KEYDATA:
+ case H_OFFPAGE:
+ recno = 1;
+ break;
+ case H_DUPLICATE:
+ p = HKEYDATA_DATA(H_PAIRDATA(hcp->page, hcp->indx));
+ pend = p +
+ LEN_HDATA(hcp->page, dbp->pgsize, hcp->indx);
+ for (; p < pend; recno++) {
+ /* p may be odd, so copy rather than just dereffing */
+ memcpy(&len, p, sizeof(db_indx_t));
+ p += 2 * sizeof(db_indx_t) + len;
+ }
+
+ break;
+ default:
+ ret = __db_unknown_type(dbp->dbenv, "__ham_c_count",
+ HPAGE_PTYPE(H_PAIRDATA(hcp->page, hcp->indx)));
+ goto err;
+ }
+
+ *recnop = recno;
+
+err: if ((t_ret = memp_fput(dbc->dbp->mpf, hcp->page, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ hcp->page = NULL;
+ return (ret);
+}
+
+static int
+__ham_c_del(dbc)
+ DBC *dbc;
+{
+ DB *dbp;
+ DBT repldbt;
+ HASH_CURSOR *hcp;
+ int ret, t_ret;
+
+ dbp = dbc->dbp;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ if (F_ISSET(hcp, H_DELETED))
+ return (DB_NOTFOUND);
+
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ goto out;
+
+ if ((ret = __ham_get_cpage(dbc, DB_LOCK_WRITE)) != 0)
+ goto out;
+
+ /* Off-page duplicates. */
+ if (HPAGE_TYPE(hcp->page, H_DATAINDEX(hcp->indx)) == H_OFFDUP)
+ goto out;
+
+ if (F_ISSET(hcp, H_ISDUP)) { /* On-page duplicate. */
+ if (hcp->dup_off == 0 &&
+ DUP_SIZE(hcp->dup_len) == LEN_HDATA(hcp->page,
+ hcp->hdr->dbmeta.pagesize, hcp->indx))
+ ret = __ham_del_pair(dbc, 1);
+ else {
+ repldbt.flags = 0;
+ F_SET(&repldbt, DB_DBT_PARTIAL);
+ repldbt.doff = hcp->dup_off;
+ repldbt.dlen = DUP_SIZE(hcp->dup_len);
+ repldbt.size = 0;
+ repldbt.data = HKEYDATA_DATA(H_PAIRDATA(hcp->page,
+ hcp->indx));
+ ret = __ham_replpair(dbc, &repldbt, 0);
+ hcp->dup_tlen -= DUP_SIZE(hcp->dup_len);
+ F_SET(hcp, H_DELETED);
+ ret = __ham_c_update(dbc, DUP_SIZE(hcp->dup_len), 0, 1);
+ }
+
+ } else /* Not a duplicate */
+ ret = __ham_del_pair(dbc, 1);
+
+out: if (ret == 0 && hcp->page != NULL &&
+ (t_ret = memp_fput(dbp->mpf, hcp->page, DB_MPOOL_DIRTY)) != 0)
+ ret = t_ret;
+ hcp->page = NULL;
+ if ((t_ret = __ham_release_meta(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __ham_c_dup --
+ * Duplicate a hash cursor, such that the new one holds appropriate
+ * locks for the position of the original.
+ *
+ * PUBLIC: int __ham_c_dup __P((DBC *, DBC *));
+ */
+int
+__ham_c_dup(orig_dbc, new_dbc)
+ DBC *orig_dbc, *new_dbc;
+{
+ HASH_CURSOR *orig, *new;
+
+ orig = (HASH_CURSOR *)orig_dbc->internal;
+ new = (HASH_CURSOR *)new_dbc->internal;
+
+ new->bucket = orig->bucket;
+ new->lbucket = orig->lbucket;
+ new->dup_off = orig->dup_off;
+ new->dup_len = orig->dup_len;
+ new->dup_tlen = orig->dup_tlen;
+
+ if (F_ISSET(orig, H_DELETED))
+ F_SET(new, H_DELETED);
+ if (F_ISSET(orig, H_ISDUP))
+ F_SET(new, H_ISDUP);
+
+ /*
+ * If the old cursor held a lock and we're not in transactions, get one
+ * for the new one. The reason that we don't need a new lock if we're
+ * in a transaction is because we already hold a lock and will continue
+ * to do so until commit, so there is no point in reaquiring it. We
+ * don't know if the old lock was a read or write lock, but it doesn't
+ * matter. We'll get a read lock. We know that this locker already
+ * holds a lock of the correct type, so if we need a write lock and
+ * request it, we know that we'll get it.
+ */
+ if (orig->lock.off == LOCK_INVALID || orig_dbc->txn != NULL)
+ return (0);
+
+ return (__ham_lock_bucket(new_dbc, DB_LOCK_READ));
+}
+
+static int
+__ham_c_get(dbc, key, data, flags, pgnop)
+ DBC *dbc;
+ DBT *key;
+ DBT *data;
+ u_int32_t flags;
+ db_pgno_t *pgnop;
+{
+ DB *dbp;
+ HASH_CURSOR *hcp;
+ db_lockmode_t lock_type;
+ int get_key, ret, t_ret;
+
+ hcp = (HASH_CURSOR *)dbc->internal;
+ dbp = dbc->dbp;
+
+ /* Clear OR'd in additional bits so we can check for flag equality. */
+ if (F_ISSET(dbc, DBC_RMW))
+ lock_type = DB_LOCK_WRITE;
+ else
+ lock_type = DB_LOCK_READ;
+
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ return (ret);
+ hcp->seek_size = 0;
+
+ ret = 0;
+ get_key = 1;
+ switch (flags) {
+ case DB_PREV_NODUP:
+ F_SET(hcp, H_NEXT_NODUP);
+ /* FALLTHROUGH */
+ case DB_PREV:
+ if (IS_INITIALIZED(dbc)) {
+ ret = __ham_item_prev(dbc, lock_type, pgnop);
+ break;
+ }
+ /* FALLTHROUGH */
+ case DB_LAST:
+ ret = __ham_item_last(dbc, lock_type, pgnop);
+ break;
+ case DB_NEXT_NODUP:
+ F_SET(hcp, H_NEXT_NODUP);
+ /* FALLTHROUGH */
+ case DB_NEXT:
+ if (IS_INITIALIZED(dbc)) {
+ ret = __ham_item_next(dbc, lock_type, pgnop);
+ break;
+ }
+ /* FALLTHROUGH */
+ case DB_FIRST:
+ ret = __ham_item_first(dbc, lock_type, pgnop);
+ break;
+ case DB_NEXT_DUP:
+ /* cgetchk has already determined that the cursor is set. */
+ F_SET(hcp, H_DUPONLY);
+ ret = __ham_item_next(dbc, lock_type, pgnop);
+ break;
+ case DB_SET:
+ case DB_SET_RANGE:
+ case DB_GET_BOTH:
+ ret = __ham_lookup(dbc, key, 0, lock_type, pgnop);
+ get_key = 0;
+ break;
+ case DB_GET_BOTHC:
+ F_SET(hcp, H_DUPONLY);
+
+ ret = __ham_item_next(dbc, lock_type, pgnop);
+ get_key = 0;
+ break;
+ case DB_CURRENT:
+ /* cgetchk has already determined that the cursor is set. */
+ if (F_ISSET(hcp, H_DELETED)) {
+ ret = DB_KEYEMPTY;
+ goto err;
+ }
+
+ ret = __ham_item(dbc, lock_type, pgnop);
+ break;
+ }
+
+ /*
+ * Must always enter this loop to do error handling and
+ * check for big key/data pair.
+ */
+ for (;;) {
+ if (ret != 0 && ret != DB_NOTFOUND)
+ goto err;
+ else if (F_ISSET(hcp, H_OK)) {
+ if (*pgnop == PGNO_INVALID)
+ ret = __ham_dup_return (dbc, data, flags);
+ break;
+ } else if (!F_ISSET(hcp, H_NOMORE)) {
+ __db_err(dbp->dbenv,
+ "H_NOMORE returned to __ham_c_get");
+ ret = EINVAL;
+ break;
+ }
+
+ /*
+ * Ran out of entries in a bucket; change buckets.
+ */
+ switch (flags) {
+ case DB_LAST:
+ case DB_PREV:
+ case DB_PREV_NODUP:
+ ret = memp_fput(dbp->mpf, hcp->page, 0);
+ hcp->page = NULL;
+ if (hcp->bucket == 0) {
+ ret = DB_NOTFOUND;
+ hcp->pgno = PGNO_INVALID;
+ goto err;
+ }
+ F_CLR(hcp, H_ISDUP);
+ hcp->bucket--;
+ hcp->indx = NDX_INVALID;
+ hcp->pgno = BUCKET_TO_PAGE(hcp, hcp->bucket);
+ if (ret == 0)
+ ret = __ham_item_prev(dbc,
+ lock_type, pgnop);
+ break;
+ case DB_FIRST:
+ case DB_NEXT:
+ case DB_NEXT_NODUP:
+ ret = memp_fput(dbp->mpf, hcp->page, 0);
+ hcp->page = NULL;
+ hcp->indx = NDX_INVALID;
+ hcp->bucket++;
+ F_CLR(hcp, H_ISDUP);
+ hcp->pgno = BUCKET_TO_PAGE(hcp, hcp->bucket);
+ if (hcp->bucket > hcp->hdr->max_bucket) {
+ ret = DB_NOTFOUND;
+ hcp->pgno = PGNO_INVALID;
+ goto err;
+ }
+ if (ret == 0)
+ ret = __ham_item_next(dbc,
+ lock_type, pgnop);
+ break;
+ case DB_GET_BOTH:
+ case DB_GET_BOTHC:
+ case DB_NEXT_DUP:
+ case DB_SET:
+ case DB_SET_RANGE:
+ /* Key not found. */
+ ret = DB_NOTFOUND;
+ goto err;
+ case DB_CURRENT:
+ /*
+ * This should only happen if you are doing
+ * deletes and reading with concurrent threads
+ * and not doing proper locking. We return
+ * the same error code as we would if the
+ * cursor were deleted.
+ */
+ ret = DB_KEYEMPTY;
+ goto err;
+ default:
+ DB_ASSERT(0);
+ }
+ }
+
+ if (get_key == 0)
+ F_SET(key, DB_DBT_ISSET);
+
+err: if ((t_ret = __ham_release_meta(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ F_CLR(hcp, H_DUPONLY);
+ F_CLR(hcp, H_NEXT_NODUP);
+
+ return (ret);
+}
+
+static int
+__ham_c_put(dbc, key, data, flags, pgnop)
+ DBC *dbc;
+ DBT *key;
+ DBT *data;
+ u_int32_t flags;
+ db_pgno_t *pgnop;
+{
+ DB *dbp;
+ DBT tmp_val, *myval;
+ HASH_CURSOR *hcp;
+ u_int32_t nbytes;
+ int ret, t_ret;
+
+ /*
+ * The compiler doesn't realize that we only use this when ret is
+ * equal to 0 and that if ret is equal to 0, that we must have set
+ * myval. So, we initialize it here to shut the compiler up.
+ */
+ COMPQUIET(myval, NULL);
+
+ dbp = dbc->dbp;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ if (F_ISSET(hcp, H_DELETED) &&
+ flags != DB_KEYFIRST && flags != DB_KEYLAST)
+ return (DB_NOTFOUND);
+
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ goto err1;
+
+ switch (flags) {
+ case DB_KEYLAST:
+ case DB_KEYFIRST:
+ case DB_NODUPDATA:
+ nbytes = (ISBIG(hcp, key->size) ? HOFFPAGE_PSIZE :
+ HKEYDATA_PSIZE(key->size)) +
+ (ISBIG(hcp, data->size) ? HOFFPAGE_PSIZE :
+ HKEYDATA_PSIZE(data->size));
+ if ((ret = __ham_lookup(dbc,
+ key, nbytes, DB_LOCK_WRITE, pgnop)) == DB_NOTFOUND) {
+ ret = 0;
+ if (hcp->seek_found_page != PGNO_INVALID &&
+ hcp->seek_found_page != hcp->pgno) {
+ if ((ret = memp_fput(dbp->mpf, hcp->page, 0))
+ != 0)
+ goto err2;
+ hcp->page = NULL;
+ hcp->pgno = hcp->seek_found_page;
+ hcp->indx = NDX_INVALID;
+ }
+
+ if (F_ISSET(data, DB_DBT_PARTIAL) && data->doff != 0) {
+ /*
+ * A partial put, but the key does not exist
+ * and we are not beginning the write at 0.
+ * We must create a data item padded up to doff
+ * and then write the new bytes represented by
+ * val.
+ */
+ if ((ret = __ham_init_dbt(dbp->dbenv,
+ &tmp_val, data->size + data->doff,
+ &dbc->rdata.data, &dbc->rdata.ulen)) == 0) {
+ memset(tmp_val.data, 0, data->doff);
+ memcpy((u_int8_t *)tmp_val.data +
+ data->doff, data->data, data->size);
+ myval = &tmp_val;
+ }
+ } else
+ myval = (DBT *)data;
+
+ if (ret == 0)
+ ret = __ham_add_el(dbc, key, myval, H_KEYDATA);
+ goto done;
+ }
+ break;
+ case DB_BEFORE:
+ case DB_AFTER:
+ case DB_CURRENT:
+ ret = __ham_item(dbc, DB_LOCK_WRITE, pgnop);
+ break;
+ }
+
+ if (*pgnop == PGNO_INVALID && ret == 0) {
+ if (flags == DB_CURRENT ||
+ ((flags == DB_KEYFIRST ||
+ flags == DB_KEYLAST || flags == DB_NODUPDATA) &&
+ !(F_ISSET(dbp, DB_AM_DUP) || F_ISSET(key, DB_DBT_DUPOK))))
+ ret = __ham_overwrite(dbc, data, flags);
+ else
+ ret = __ham_add_dup(dbc, data, flags, pgnop);
+ }
+
+done: if (ret == 0 && F_ISSET(hcp, H_EXPAND)) {
+ ret = __ham_expand_table(dbc);
+ F_CLR(hcp, H_EXPAND);
+ }
+
+ if (ret == 0 &&
+ (t_ret = memp_fset(dbp->mpf, hcp->page, DB_MPOOL_DIRTY)) != 0)
+ ret = t_ret;
+
+err2: if ((t_ret = __ham_release_meta(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+err1: return (ret);
+}
+
+/********************************* UTILITIES ************************/
+
+/*
+ * __ham_expand_table --
+ */
+static int
+__ham_expand_table(dbc)
+ DBC *dbc;
+{
+ DB *dbp;
+ PAGE *h;
+ HASH_CURSOR *hcp;
+ db_pgno_t pgno;
+ u_int32_t old_bucket, new_bucket;
+ int ret;
+
+ dbp = dbc->dbp;
+ hcp = (HASH_CURSOR *)dbc->internal;
+ if ((ret = __ham_dirty_meta(dbc)) != 0)
+ return (ret);
+
+ /*
+ * If the split point is about to increase, make sure that we
+ * have enough extra pages. The calculation here is weird.
+ * We'd like to do this after we've upped max_bucket, but it's
+ * too late then because we've logged the meta-data split. What
+ * we'll do between then and now is increment max bucket and then
+ * see what the log of one greater than that is; here we have to
+ * look at the log of max + 2. VERY NASTY STUFF.
+ *
+ * It just got even nastier. With subdatabases, we have to request
+ * a chunk of contiguous pages, so we do that here using an
+ * undocumented feature of mpool (the MPOOL_NEW_GROUP flag) to
+ * give us a number of contiguous pages. Ouch.
+ */
+ if (hcp->hdr->max_bucket == hcp->hdr->high_mask) {
+ /*
+ * Ask mpool to give us a set of contiguous page numbers
+ * large enough to contain the next doubling.
+ *
+ * Figure out how many new pages we need. This will return
+ * us the last page. We calculate its page number, initialize
+ * the page and then write it back to reserve all the pages
+ * in between. It is possible that the allocation of new pages
+ * has already been done, but the tranaction aborted. Since
+ * we don't undo the allocation, check for a valid pgno before
+ * doing the allocation.
+ */
+ pgno = hcp->hdr->max_bucket + 1;
+ if (hcp->hdr->spares[__db_log2(pgno) + 1] == PGNO_INVALID)
+ /* Allocate a group of pages. */
+ ret = memp_fget(dbp->mpf,
+ &pgno, DB_MPOOL_NEW_GROUP, &h);
+ else {
+ /* Just read in the last page of the batch */
+ pgno = hcp->hdr->spares[__db_log2(pgno) + 1] +
+ hcp->hdr->max_bucket + 1;
+ /* Move to the last page of the group. */
+ pgno += hcp->hdr->max_bucket;
+ ret = memp_fget(dbp->mpf,
+ &pgno, DB_MPOOL_CREATE, &h);
+ }
+ if (ret != 0)
+ return (ret);
+
+ P_INIT(h, dbp->pgsize, pgno,
+ PGNO_INVALID, PGNO_INVALID, 0, P_HASH);
+ pgno -= hcp->hdr->max_bucket;
+ } else {
+ pgno = BUCKET_TO_PAGE(hcp, hcp->hdr->max_bucket + 1);
+ if ((ret =
+ memp_fget(dbp->mpf, &pgno, DB_MPOOL_CREATE, &h)) != 0)
+ return (ret);
+ }
+
+ /* Now we can log the meta-data split. */
+ if (DB_LOGGING(dbc)) {
+ if ((ret = __ham_metagroup_log(dbp->dbenv,
+ dbc->txn, &h->lsn, 0, dbp->log_fileid,
+ hcp->hdr->max_bucket, pgno, &hcp->hdr->dbmeta.lsn,
+ &h->lsn)) != 0) {
+ (void)memp_fput(dbp->mpf, h, DB_MPOOL_DIRTY);
+ return (ret);
+ }
+
+ hcp->hdr->dbmeta.lsn = h->lsn;
+ }
+
+ /* If we allocated some new pages, write out the last page. */
+ if ((ret = memp_fput(dbp->mpf, h, DB_MPOOL_DIRTY)) != 0)
+ return (ret);
+
+ new_bucket = ++hcp->hdr->max_bucket;
+ old_bucket = (hcp->hdr->max_bucket & hcp->hdr->low_mask);
+
+ /*
+ * If we started a new doubling, fill in the spares array with
+ * the starting page number negatively offset by the bucket number.
+ */
+ if (new_bucket > hcp->hdr->high_mask) {
+ /* Starting a new doubling */
+ hcp->hdr->low_mask = hcp->hdr->high_mask;
+ hcp->hdr->high_mask = new_bucket | hcp->hdr->low_mask;
+ if (hcp->hdr->spares[__db_log2(new_bucket) + 1] == PGNO_INVALID)
+ hcp->hdr->spares[__db_log2(new_bucket) + 1] =
+ pgno - new_bucket;
+ }
+
+ /* Relocate records to the new bucket */
+ return (__ham_split_page(dbc, old_bucket, new_bucket));
+}
+
+/*
+ * PUBLIC: u_int32_t __ham_call_hash __P((DBC *, u_int8_t *, int32_t));
+ */
+u_int32_t
+__ham_call_hash(dbc, k, len)
+ DBC *dbc;
+ u_int8_t *k;
+ int32_t len;
+{
+ DB *dbp;
+ u_int32_t n, bucket;
+ HASH_CURSOR *hcp;
+ HASH *hashp;
+
+ dbp = dbc->dbp;
+ hcp = (HASH_CURSOR *)dbc->internal;
+ hashp = dbp->h_internal;
+
+ n = (u_int32_t)(hashp->h_hash(dbp, k, len));
+
+ bucket = n & hcp->hdr->high_mask;
+ if (bucket > hcp->hdr->max_bucket)
+ bucket = bucket & hcp->hdr->low_mask;
+ return (bucket);
+}
+
+/*
+ * Check for duplicates, and call __db_ret appropriately. Release
+ * everything held by the cursor.
+ */
+static int
+__ham_dup_return (dbc, val, flags)
+ DBC *dbc;
+ DBT *val;
+ u_int32_t flags;
+{
+ DB *dbp;
+ HASH_CURSOR *hcp;
+ PAGE *pp;
+ DBT *myval, tmp_val;
+ db_indx_t ndx;
+ db_pgno_t pgno;
+ u_int32_t off, tlen;
+ u_int8_t *hk, type;
+ int cmp, ret;
+ db_indx_t len;
+
+ /* Check for duplicate and return the first one. */
+ dbp = dbc->dbp;
+ hcp = (HASH_CURSOR *)dbc->internal;
+ ndx = H_DATAINDEX(hcp->indx);
+ type = HPAGE_TYPE(hcp->page, ndx);
+ pp = hcp->page;
+ myval = val;
+
+ /*
+ * There are 4 cases:
+ * 1. We are not in duplicate, simply return; the upper layer
+ * will do the right thing.
+ * 2. We are looking at keys and stumbled onto a duplicate.
+ * 3. We are in the middle of a duplicate set. (ISDUP set)
+ * 4. We need to check for particular data match.
+ */
+
+ /* We should never get here with off-page dups. */
+ DB_ASSERT(type != H_OFFDUP);
+
+ /* Case 1 */
+ if (type != H_DUPLICATE &&
+ flags != DB_GET_BOTH && flags != DB_GET_BOTHC)
+ return (0);
+
+ /*
+ * Here we check for the case where we just stumbled onto a
+ * duplicate. In this case, we do initialization and then
+ * let the normal duplicate code handle it. (Case 2)
+ */
+ if (!F_ISSET(hcp, H_ISDUP) && type == H_DUPLICATE) {
+ F_SET(hcp, H_ISDUP);
+ hcp->dup_tlen = LEN_HDATA(hcp->page,
+ hcp->hdr->dbmeta.pagesize, hcp->indx);
+ hk = H_PAIRDATA(hcp->page, hcp->indx);
+ if (flags == DB_LAST
+ || flags == DB_PREV || flags == DB_PREV_NODUP) {
+ hcp->dup_off = 0;
+ do {
+ memcpy(&len,
+ HKEYDATA_DATA(hk) + hcp->dup_off,
+ sizeof(db_indx_t));
+ hcp->dup_off += DUP_SIZE(len);
+ } while (hcp->dup_off < hcp->dup_tlen);
+ hcp->dup_off -= DUP_SIZE(len);
+ } else {
+ memcpy(&len,
+ HKEYDATA_DATA(hk), sizeof(db_indx_t));
+ hcp->dup_off = 0;
+ }
+ hcp->dup_len = len;
+ }
+
+ /*
+ * If we are retrieving a specific key/data pair, then we
+ * may need to adjust the cursor before returning data.
+ * Case 4
+ */
+ if (flags == DB_GET_BOTH || flags == DB_GET_BOTHC) {
+ if (F_ISSET(hcp, H_ISDUP)) {
+ /*
+ * If we're doing a join, search forward from the
+ * current position, not the beginning of the dup set.
+ */
+ if (flags == DB_GET_BOTHC)
+ F_SET(hcp, H_CONTINUE);
+
+ __ham_dsearch(dbc, val, &off, &cmp);
+
+ /*
+ * This flag is set nowhere else and is safe to
+ * clear unconditionally.
+ */
+ F_CLR(hcp, H_CONTINUE);
+ hcp->dup_off = off;
+ } else {
+ hk = H_PAIRDATA(hcp->page, hcp->indx);
+ if (((HKEYDATA *)hk)->type == H_OFFPAGE) {
+ memcpy(&tlen,
+ HOFFPAGE_TLEN(hk), sizeof(u_int32_t));
+ memcpy(&pgno,
+ HOFFPAGE_PGNO(hk), sizeof(db_pgno_t));
+ if ((ret = __db_moff(dbp, val,
+ pgno, tlen, dbp->dup_compare, &cmp)) != 0)
+ return (ret);
+ } else {
+ /*
+ * We do not zero tmp_val since the comparison
+ * routines may only look at data and size.
+ */
+ tmp_val.data = HKEYDATA_DATA(hk);
+ tmp_val.size = LEN_HDATA(hcp->page,
+ dbp->pgsize, hcp->indx);
+ cmp = dbp->dup_compare == NULL ?
+ __bam_defcmp(dbp, &tmp_val, val) :
+ dbp->dup_compare(dbp, &tmp_val, val);
+ }
+ }
+
+ if (cmp != 0)
+ return (DB_NOTFOUND);
+ }
+
+ /*
+ * Now, everything is initialized, grab a duplicate if
+ * necessary.
+ */
+ if (F_ISSET(hcp, H_ISDUP)) { /* Case 3 */
+ /*
+ * Copy the DBT in case we are retrieving into user
+ * memory and we need the parameters for it. If the
+ * user requested a partial, then we need to adjust
+ * the user's parameters to get the partial of the
+ * duplicate which is itself a partial.
+ */
+ memcpy(&tmp_val, val, sizeof(*val));
+ if (F_ISSET(&tmp_val, DB_DBT_PARTIAL)) {
+ /*
+ * Take the user's length unless it would go
+ * beyond the end of the duplicate.
+ */
+ if (tmp_val.doff + hcp->dup_off > hcp->dup_len)
+ tmp_val.dlen = 0;
+ else if (tmp_val.dlen + tmp_val.doff >
+ hcp->dup_len)
+ tmp_val.dlen =
+ hcp->dup_len - tmp_val.doff;
+
+ /*
+ * Calculate the new offset.
+ */
+ tmp_val.doff += hcp->dup_off;
+ } else {
+ F_SET(&tmp_val, DB_DBT_PARTIAL);
+ tmp_val.dlen = hcp->dup_len;
+ tmp_val.doff = hcp->dup_off + sizeof(db_indx_t);
+ }
+ myval = &tmp_val;
+ }
+
+ /*
+ * Finally, if we had a duplicate, pp, ndx, and myval should be
+ * set appropriately.
+ */
+ if ((ret = __db_ret(dbp, pp, ndx, myval, &dbc->rdata.data,
+ &dbc->rdata.ulen)) != 0)
+ return (ret);
+
+ /*
+ * In case we sent a temporary off to db_ret, set the real
+ * return values.
+ */
+ val->data = myval->data;
+ val->size = myval->size;
+
+ F_SET(val, DB_DBT_ISSET);
+
+ return (0);
+}
+
+static int
+__ham_overwrite(dbc, nval, flags)
+ DBC *dbc;
+ DBT *nval;
+ u_int32_t flags;
+{
+ DB *dbp;
+ HASH_CURSOR *hcp;
+ DBT *myval, tmp_val, tmp_val2;
+ void *newrec;
+ u_int8_t *hk, *p;
+ u_int32_t len, nondup_size;
+ db_indx_t newsize;
+ int ret;
+
+ dbp = dbc->dbp;
+ hcp = (HASH_CURSOR *)dbc->internal;
+ if (F_ISSET(hcp, H_ISDUP)) {
+ /*
+ * This is an overwrite of a duplicate. We should never
+ * be off-page at this point.
+ */
+ DB_ASSERT(hcp->opd == NULL);
+ /* On page dups */
+ if (F_ISSET(nval, DB_DBT_PARTIAL)) {
+ /*
+ * We're going to have to get the current item, then
+ * construct the record, do any padding and do a
+ * replace.
+ */
+ memset(&tmp_val, 0, sizeof(tmp_val));
+ if ((ret =
+ __ham_dup_return (dbc, &tmp_val, DB_CURRENT)) != 0)
+ return (ret);
+
+ /* Figure out new size. */
+ nondup_size = tmp_val.size;
+ newsize = nondup_size;
+
+ /*
+ * Three cases:
+ * 1. strictly append (may need to allocate space
+ * for pad bytes; really gross).
+ * 2. overwrite some and append.
+ * 3. strictly overwrite.
+ */
+ if (nval->doff > nondup_size)
+ newsize +=
+ (nval->doff - nondup_size + nval->size);
+ else if (nval->doff + nval->dlen > nondup_size)
+ newsize += nval->size -
+ (nondup_size - nval->doff);
+ else
+ newsize += nval->size - nval->dlen;
+
+ /*
+ * Make sure that the new size doesn't put us over
+ * the onpage duplicate size in which case we need
+ * to convert to off-page duplicates.
+ */
+ if (ISBIG(hcp, hcp->dup_tlen - nondup_size + newsize)) {
+ if ((ret = __ham_dup_convert(dbc)) != 0)
+ return (ret);
+ return (hcp->opd->c_am_put(hcp->opd,
+ NULL, nval, flags, NULL));
+ }
+
+ if ((ret = __os_malloc(dbp->dbenv,
+ DUP_SIZE(newsize), NULL, &newrec)) != 0)
+ return (ret);
+ memset(&tmp_val2, 0, sizeof(tmp_val2));
+ F_SET(&tmp_val2, DB_DBT_PARTIAL);
+
+ /* Construct the record. */
+ p = newrec;
+ /* Initial size. */
+ memcpy(p, &newsize, sizeof(db_indx_t));
+ p += sizeof(db_indx_t);
+
+ /* First part of original record. */
+ len = nval->doff > tmp_val.size
+ ? tmp_val.size : nval->doff;
+ memcpy(p, tmp_val.data, len);
+ p += len;
+
+ if (nval->doff > tmp_val.size) {
+ /* Padding */
+ memset(p, 0, nval->doff - tmp_val.size);
+ p += nval->doff - tmp_val.size;
+ }
+
+ /* New bytes */
+ memcpy(p, nval->data, nval->size);
+ p += nval->size;
+
+ /* End of original record (if there is any) */
+ if (nval->doff + nval->dlen < tmp_val.size) {
+ len = tmp_val.size - nval->doff - nval->dlen;
+ memcpy(p, (u_int8_t *)tmp_val.data +
+ nval->doff + nval->dlen, len);
+ p += len;
+ }
+
+ /* Final size. */
+ memcpy(p, &newsize, sizeof(db_indx_t));
+
+ /*
+ * Make sure that the caller isn't corrupting
+ * the sort order.
+ */
+ if (dbp->dup_compare != NULL) {
+ tmp_val2.data =
+ (u_int8_t *)newrec + sizeof(db_indx_t);
+ tmp_val2.size = newsize;
+ if (dbp->dup_compare(
+ dbp, &tmp_val, &tmp_val2) != 0) {
+ (void)__os_free(newrec,
+ DUP_SIZE(newsize));
+ return (__db_duperr(dbp, flags));
+ }
+ }
+
+ tmp_val2.data = newrec;
+ tmp_val2.size = DUP_SIZE(newsize);
+ tmp_val2.doff = hcp->dup_off;
+ tmp_val2.dlen = DUP_SIZE(hcp->dup_len);
+
+ ret = __ham_replpair(dbc, &tmp_val2, 0);
+ (void)__os_free(newrec, DUP_SIZE(newsize));
+
+ /* Update cursor */
+ if (ret != 0)
+ return (ret);
+
+ if (newsize > nondup_size)
+ hcp->dup_tlen += (newsize - nondup_size);
+ else
+ hcp->dup_tlen -= (nondup_size - newsize);
+ hcp->dup_len = DUP_SIZE(newsize);
+ return (0);
+ } else {
+ /* Check whether we need to convert to off page. */
+ if (ISBIG(hcp,
+ hcp->dup_tlen - hcp->dup_len + nval->size)) {
+ if ((ret = __ham_dup_convert(dbc)) != 0)
+ return (ret);
+ return (hcp->opd->c_am_put(hcp->opd,
+ NULL, nval, flags, NULL));
+ }
+
+ /* Make sure we maintain sort order. */
+ if (dbp->dup_compare != NULL) {
+ tmp_val2.data =
+ HKEYDATA_DATA(H_PAIRDATA(hcp->page,
+ hcp->indx)) + hcp->dup_off +
+ sizeof(db_indx_t);
+ tmp_val2.size = hcp->dup_len;
+ if (dbp->dup_compare(dbp, nval, &tmp_val2) != 0)
+ return (EINVAL);
+ }
+ /* Overwriting a complete duplicate. */
+ if ((ret =
+ __ham_make_dup(dbp->dbenv, nval,
+ &tmp_val, &dbc->rdata.data, &dbc->rdata.ulen)) != 0)
+ return (ret);
+ /* Now fix what we are replacing. */
+ tmp_val.doff = hcp->dup_off;
+ tmp_val.dlen = DUP_SIZE(hcp->dup_len);
+
+ /* Update cursor */
+ if (nval->size > hcp->dup_len)
+ hcp->dup_tlen += (nval->size - hcp->dup_len);
+ else
+ hcp->dup_tlen -= (hcp->dup_len - nval->size);
+ hcp->dup_len = DUP_SIZE(nval->size);
+ }
+ myval = &tmp_val;
+ } else if (!F_ISSET(nval, DB_DBT_PARTIAL)) {
+ /* Put/overwrite */
+ memcpy(&tmp_val, nval, sizeof(*nval));
+ F_SET(&tmp_val, DB_DBT_PARTIAL);
+ tmp_val.doff = 0;
+ hk = H_PAIRDATA(hcp->page, hcp->indx);
+ if (HPAGE_PTYPE(hk) == H_OFFPAGE)
+ memcpy(&tmp_val.dlen,
+ HOFFPAGE_TLEN(hk), sizeof(u_int32_t));
+ else
+ tmp_val.dlen = LEN_HDATA(hcp->page,
+ hcp->hdr->dbmeta.pagesize, hcp->indx);
+ myval = &tmp_val;
+ } else
+ /* Regular partial put */
+ myval = nval;
+
+ return (__ham_replpair(dbc, myval, 0));
+}
+
+/*
+ * Given a key and a cursor, sets the cursor to the page/ndx on which
+ * the key resides. If the key is found, the cursor H_OK flag is set
+ * and the pagep, bndx, pgno (dpagep, dndx, dpgno) fields are set.
+ * If the key is not found, the H_OK flag is not set. If the sought
+ * field is non-0, the pagep, bndx, pgno (dpagep, dndx, dpgno) fields
+ * are set indicating where an add might take place. If it is 0,
+ * non of the cursor pointer field are valid.
+ */
+static int
+__ham_lookup(dbc, key, sought, mode, pgnop)
+ DBC *dbc;
+ const DBT *key;
+ u_int32_t sought;
+ db_lockmode_t mode;
+ db_pgno_t *pgnop;
+{
+ DB *dbp;
+ HASH_CURSOR *hcp;
+ db_pgno_t pgno;
+ u_int32_t tlen;
+ int match, ret;
+ u_int8_t *hk, *dk;
+
+ dbp = dbc->dbp;
+ hcp = (HASH_CURSOR *)dbc->internal;
+ /*
+ * Set up cursor so that we're looking for space to add an item
+ * as we cycle through the pages looking for the key.
+ */
+ if ((ret = __ham_item_reset(dbc)) != 0)
+ return (ret);
+ hcp->seek_size = sought;
+
+ hcp->bucket = __ham_call_hash(dbc, (u_int8_t *)key->data, key->size);
+ hcp->pgno = BUCKET_TO_PAGE(hcp, hcp->bucket);
+
+ while (1) {
+ *pgnop = PGNO_INVALID;
+ if ((ret = __ham_item_next(dbc, mode, pgnop)) != 0)
+ return (ret);
+
+ if (F_ISSET(hcp, H_NOMORE))
+ break;
+
+ hk = H_PAIRKEY(hcp->page, hcp->indx);
+ switch (HPAGE_PTYPE(hk)) {
+ case H_OFFPAGE:
+ memcpy(&tlen, HOFFPAGE_TLEN(hk), sizeof(u_int32_t));
+ if (tlen == key->size) {
+ memcpy(&pgno,
+ HOFFPAGE_PGNO(hk), sizeof(db_pgno_t));
+ if ((ret = __db_moff(dbp,
+ key, pgno, tlen, NULL, &match)) != 0)
+ return (ret);
+ if (match == 0)
+ goto found_key;
+ }
+ break;
+ case H_KEYDATA:
+ if (key->size ==
+ LEN_HKEY(hcp->page, dbp->pgsize, hcp->indx) &&
+ memcmp(key->data,
+ HKEYDATA_DATA(hk), key->size) == 0) {
+ /* Found the key, check for data type. */
+found_key: F_SET(hcp, H_OK);
+ dk = H_PAIRDATA(hcp->page, hcp->indx);
+ if (HPAGE_PTYPE(dk) == H_OFFDUP)
+ memcpy(pgnop, HOFFDUP_PGNO(dk),
+ sizeof(db_pgno_t));
+ return (0);
+ }
+ break;
+ case H_DUPLICATE:
+ case H_OFFDUP:
+ /*
+ * These are errors because keys are never
+ * duplicated, only data items are.
+ */
+ return (__db_pgfmt(dbp, PGNO(hcp->page)));
+ }
+ }
+
+ /*
+ * Item was not found.
+ */
+
+ if (sought != 0)
+ return (ret);
+
+ return (ret);
+}
+
+/*
+ * __ham_init_dbt --
+ * Initialize a dbt using some possibly already allocated storage
+ * for items.
+ *
+ * PUBLIC: int __ham_init_dbt __P((DB_ENV *,
+ * PUBLIC: DBT *, u_int32_t, void **, u_int32_t *));
+ */
+int
+__ham_init_dbt(dbenv, dbt, size, bufp, sizep)
+ DB_ENV *dbenv;
+ DBT *dbt;
+ u_int32_t size;
+ void **bufp;
+ u_int32_t *sizep;
+{
+ int ret;
+
+ memset(dbt, 0, sizeof(*dbt));
+ if (*sizep < size) {
+ if ((ret = __os_realloc(dbenv, size, NULL, bufp)) != 0) {
+ *sizep = 0;
+ return (ret);
+ }
+ *sizep = size;
+ }
+ dbt->data = *bufp;
+ dbt->size = size;
+ return (0);
+}
+
+/*
+ * Adjust the cursor after an insert or delete. The cursor passed is
+ * the one that was operated upon; we just need to check any of the
+ * others.
+ *
+ * len indicates the length of the item added/deleted
+ * add indicates if the item indicated by the cursor has just been
+ * added (add == 1) or deleted (add == 0).
+ * dup indicates if the addition occurred into a duplicate set.
+ *
+ * PUBLIC: int __ham_c_update
+ * PUBLIC: __P((DBC *, u_int32_t, int, int));
+ */
+int
+__ham_c_update(dbc, len, add, is_dup)
+ DBC *dbc;
+ u_int32_t len;
+ int add, is_dup;
+{
+ DB *dbp, *ldbp;
+ DBC *cp;
+ DB_ENV *dbenv;
+ DB_LSN lsn;
+ DB_TXN *my_txn;
+ HASH_CURSOR *hcp, *lcp;
+ int found, ret;
+ u_int32_t order;
+
+ dbp = dbc->dbp;
+ dbenv = dbp->dbenv;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ /*
+ * Adjustment will only be logged if this is a subtransaction.
+ * Only subtransactions can abort and effect their parent
+ * transactions cursors.
+ */
+
+ my_txn = IS_SUBTRANSACTION(dbc->txn) ? dbc->txn : NULL;
+ found = 0;
+
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+
+ /*
+ * Calcuate the order of this deleted record.
+ * This will be one grater than any cursor that is pointing
+ * at this record and already marked as deleted.
+ */
+ order = 0;
+ if (!add) {
+ order = 1;
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (cp = TAILQ_FIRST(&ldbp->active_queue); cp != NULL;
+ cp = TAILQ_NEXT(cp, links)) {
+ if (cp == dbc || cp->dbtype != DB_HASH)
+ continue;
+ lcp = (HASH_CURSOR *)cp->internal;
+ if (F_ISSET(lcp, H_DELETED) &&
+ hcp->pgno == lcp->pgno &&
+ hcp->indx == lcp->indx &&
+ order <= lcp->order &&
+ (!is_dup || hcp->dup_off == lcp->dup_off))
+ order = lcp->order +1;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ hcp->order = order;
+ }
+
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (cp = TAILQ_FIRST(&ldbp->active_queue); cp != NULL;
+ cp = TAILQ_NEXT(cp, links)) {
+ if (cp == dbc || cp->dbtype != DB_HASH)
+ continue;
+
+ lcp = (HASH_CURSOR *)cp->internal;
+
+ if (lcp->pgno != hcp->pgno || lcp->indx == NDX_INVALID)
+ continue;
+
+ if (my_txn != NULL && cp->txn != my_txn)
+ found = 1;
+
+ if (!is_dup) {
+ if (add) {
+ /*
+ * This routine is not called to add
+ * non-dup records which are always put
+ * at the end. It is only called from
+ * recovery in this case and the
+ * cursor will be marked deleted.
+ * We are "undeleting" so unmark all
+ * cursors with the same order.
+ */
+ if (lcp->indx == hcp->indx
+ && F_ISSET(lcp, H_DELETED)) {
+ if (lcp->order == hcp->order)
+ F_CLR(lcp, H_DELETED);
+ else if (lcp->order >
+ hcp->order) {
+
+ /*
+ * If we've moved this cursor's
+ * index, split its order
+ * number--i.e., decrement it by
+ * enough so that the lowest
+ * cursor moved has order 1.
+ * cp_arg->order is the split
+ * point, so decrement by one
+ * less than that.
+ */
+ lcp->order -=
+ (hcp->order - 1);
+ lcp->indx += 2;
+ }
+ } else if (lcp->indx >= hcp->indx)
+ lcp->indx += 2;
+
+ } else {
+ if (lcp->indx > hcp->indx) {
+ lcp->indx -= 2;
+ if (lcp->indx == hcp->indx
+ && F_ISSET(lcp, H_DELETED))
+ lcp->order += order;
+ } else if (lcp->indx == hcp->indx
+ && !F_ISSET(lcp, H_DELETED)) {
+ F_SET(lcp, H_DELETED);
+ lcp->order = order;
+ }
+ }
+ } else if (lcp->indx == hcp->indx) {
+ /*
+ * Handle duplicates. This routine is
+ * only called for on page dups.
+ * Off page dups are handled by btree/rtree
+ * code.
+ */
+ if (add) {
+ lcp->dup_tlen += len;
+ if (lcp->dup_off == hcp->dup_off
+ && F_ISSET(hcp, H_DELETED)
+ && F_ISSET(lcp, H_DELETED)) {
+ /* Abort of a delete. */
+ if (lcp->order == hcp->order)
+ F_CLR(lcp, H_DELETED);
+ else if (lcp->order >
+ hcp->order) {
+ lcp->order -=
+ (hcp->order -1);
+ lcp->dup_off += len;
+ }
+ } else if (lcp->dup_off >= hcp->dup_off)
+ lcp->dup_off += len;
+ } else {
+ lcp->dup_tlen -= len;
+ if (lcp->dup_off > hcp->dup_off) {
+ lcp->dup_off -= len;
+ if (lcp->dup_off == hcp->dup_off
+ && F_ISSET(lcp, H_DELETED))
+ lcp->order += order;
+ } else if (lcp->dup_off ==
+ hcp->dup_off &&
+ !F_ISSET(lcp, H_DELETED)) {
+ F_SET(lcp, H_DELETED);
+ lcp->order = order;
+ }
+ }
+ }
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+ if (found != 0 && DB_LOGGING(dbc)) {
+ if ((ret = __ham_curadj_log(dbenv,
+ my_txn, &lsn, 0, dbp->log_fileid, hcp->pgno,
+ hcp->indx, len, hcp->dup_off, add, is_dup, order)) != 0)
+ return (ret);
+ }
+
+ return (0);
+}
+
+/*
+ * __ham_get_clist --
+ *
+ * Get a list of cursors either on a particular bucket or on a particular
+ * page and index combination. The former is so that we can update
+ * cursors on a split. The latter is so we can update cursors when we
+ * move items off page.
+ *
+ * PUBLIC: int __ham_get_clist __P((DB *,
+ * PUBLIC: db_pgno_t, u_int32_t, DBC ***));
+ */
+int
+__ham_get_clist(dbp, bucket, indx, listp)
+ DB *dbp;
+ db_pgno_t bucket;
+ u_int32_t indx;
+ DBC ***listp;
+{
+ DB *ldbp;
+ DBC *cp;
+ DB_ENV *dbenv;
+ int nalloc, nused, ret;
+
+ /*
+ * Assume that finding anything is the exception, so optimize for
+ * the case where there aren't any.
+ */
+ nalloc = nused = 0;
+ *listp = NULL;
+ dbenv = dbp->dbenv;
+
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (cp = TAILQ_FIRST(&ldbp->active_queue); cp != NULL;
+ cp = TAILQ_NEXT(cp, links))
+ if (cp->dbtype == DB_HASH &&
+ ((indx == NDX_INVALID &&
+ ((HASH_CURSOR *)(cp->internal))->bucket
+ == bucket) || (indx != NDX_INVALID &&
+ cp->internal->pgno == bucket &&
+ cp->internal->indx == indx))) {
+ if (nused >= nalloc) {
+ nalloc += 10;
+ if ((ret = __os_realloc(dbp->dbenv,
+ nalloc * sizeof(HASH_CURSOR *),
+ NULL, listp)) != 0)
+ return (ret);
+ }
+ (*listp)[nused++] = cp;
+ }
+
+ MUTEX_THREAD_UNLOCK(dbp->dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+ if (listp != NULL) {
+ if (nused >= nalloc) {
+ nalloc++;
+ if ((ret = __os_realloc(dbp->dbenv,
+ nalloc * sizeof(HASH_CURSOR *), NULL, listp)) != 0)
+ return (ret);
+ }
+ (*listp)[nused] = NULL;
+ }
+ return (0);
+}
+
+static int
+__ham_del_dups(orig_dbc, key)
+ DBC *orig_dbc;
+ DBT *key;
+{
+ DBC *dbc;
+ DBT data, lkey;
+ int ret, t_ret;
+
+ /* Allocate a cursor. */
+ if ((ret = orig_dbc->c_dup(orig_dbc, &dbc, 0)) != 0)
+ return (ret);
+
+ /*
+ * Walk a cursor through the key/data pairs, deleting as we go. Set
+ * the DB_DBT_USERMEM flag, as this might be a threaded application
+ * and the flags checking will catch us. We don't actually want the
+ * keys or data, so request a partial of length 0.
+ */
+ memset(&lkey, 0, sizeof(lkey));
+ F_SET(&lkey, DB_DBT_USERMEM | DB_DBT_PARTIAL);
+ memset(&data, 0, sizeof(data));
+ F_SET(&data, DB_DBT_USERMEM | DB_DBT_PARTIAL);
+
+ /* Walk through the set of key/data pairs, deleting as we go. */
+ if ((ret = dbc->c_get(dbc, key, &data, DB_SET)) != 0) {
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+ goto err;
+ }
+
+ for (;;) {
+ if ((ret = dbc->c_del(dbc, 0)) != 0)
+ goto err;
+ if ((ret = dbc->c_get(dbc, &lkey, &data, DB_NEXT_DUP)) != 0) {
+ if (ret == DB_NOTFOUND) {
+ ret = 0;
+ break;
+ }
+ goto err;
+ }
+ }
+
+err: /*
+ * Discard the cursor. This will cause the underlying off-page dup
+ * tree to go away as well as the actual entry on the page.
+ */
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+
+}
+
+static int
+__ham_c_writelock(dbc)
+ DBC *dbc;
+{
+ HASH_CURSOR *hcp;
+ DB_LOCK tmp_lock;
+ int ret;
+
+ /*
+ * All we need do is acquire the lock and let the off-page
+ * dup tree do its thing.
+ */
+ if (!STD_LOCKING(dbc))
+ return (0);
+
+ hcp = (HASH_CURSOR *)dbc->internal;
+ if ((hcp->lock.off == LOCK_INVALID || hcp->lock_mode == DB_LOCK_READ)) {
+ tmp_lock = hcp->lock;
+ if ((ret = __ham_lock_bucket(dbc, DB_LOCK_WRITE)) != 0)
+ return (ret);
+ if (tmp_lock.off != LOCK_INVALID &&
+ (ret = lock_put(dbc->dbp->dbenv, &tmp_lock)) != 0)
+ return (ret);
+ }
+ return (0);
+}
+
+/*
+ * __ham_c_chgpg --
+ *
+ * Adjust the cursors after moving an item from one page to another.
+ * If the old_index is NDX_INVALID, that means that we copied the
+ * page wholesale and we're leaving indices intact and just changing
+ * the page number.
+ *
+ * PUBLIC: int __ham_c_chgpg
+ * PUBLIC: __P((DBC *, db_pgno_t, u_int32_t, db_pgno_t, u_int32_t));
+ */
+int
+__ham_c_chgpg(dbc, old_pgno, old_index, new_pgno, new_index)
+ DBC *dbc;
+ db_pgno_t old_pgno, new_pgno;
+ u_int32_t old_index, new_index;
+{
+ DB *dbp, *ldbp;
+ DB_ENV *dbenv;
+ DB_LSN lsn;
+ DB_TXN *my_txn;
+ DBC *cp;
+ HASH_CURSOR *hcp;
+ int found, ret;
+
+ dbp = dbc->dbp;
+ dbenv = dbp->dbenv;
+
+ my_txn = IS_SUBTRANSACTION(dbc->txn) ? dbc->txn : NULL;
+ found = 0;
+
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (cp = TAILQ_FIRST(&ldbp->active_queue); cp != NULL;
+ cp = TAILQ_NEXT(cp, links)) {
+ if (cp == dbc || cp->dbtype != DB_HASH)
+ continue;
+
+ hcp = (HASH_CURSOR *)cp->internal;
+ if (hcp->pgno == old_pgno) {
+ if (old_index == NDX_INVALID) {
+ hcp->pgno = new_pgno;
+ } else if (hcp->indx == old_index) {
+ hcp->pgno = new_pgno;
+ hcp->indx = new_index;
+ } else
+ continue;
+ if (my_txn != NULL && cp->txn != my_txn)
+ found = 1;
+ }
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+ if (found != 0 && DB_LOGGING(dbc)) {
+ if ((ret = __ham_chgpg_log(dbenv,
+ my_txn, &lsn, 0, dbp->log_fileid, DB_HAM_CHGPG,
+ old_pgno, new_pgno, old_index, new_index)) != 0)
+ return (ret);
+ }
+ return (0);
+}
diff --git a/bdb/hash/hash.src b/bdb/hash/hash.src
new file mode 100644
index 00000000000..e6ecd11c907
--- /dev/null
+++ b/bdb/hash/hash.src
@@ -0,0 +1,361 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1995, 1996
+ * Margo Seltzer. All rights reserved.
+ */
+/*
+ * Copyright (c) 1995, 1996
+ * The President and Fellows of Harvard University. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id: hash.src,v 10.24 2000/12/12 17:41:48 bostic Exp $
+ */
+
+/*
+ * This is the source file used to create the logging functions for the
+ * hash package. Each access method (or set of routines wishing to register
+ * record types with the transaction system) should have a file like this.
+ * Each type of log record and its parameters is defined. The basic
+ * format of a record definition is:
+ *
+ * BEGIN <RECORD_TYPE>
+ * ARG|STRING|POINTER <variable name> <variable type> <printf format>
+ * ...
+ * END
+ * ARG the argument is a simple parameter of the type * specified.
+ * DBT the argument is a DBT (db.h) containing a length and pointer.
+ * PTR the argument is a pointer to the data type specified; the entire
+ * type should be logged.
+ *
+ * There are a set of shell scripts of the form xxx.sh that generate c
+ * code and or h files to process these. (This is probably better done
+ * in a single PERL script, but for now, this works.)
+ *
+ * The DB recovery system requires the following three fields appear in
+ * every record, and will assign them to the per-record-type structures
+ * as well as making them the first parameters to the appropriate logging
+ * call.
+ * rectype: record-type, identifies the structure and log/read call
+ * txnid: transaction id, a DBT in this implementation
+ * prev: the last LSN for this transaction
+ */
+
+/*
+ * Use the argument of PREFIX as the prefix for all record types,
+ * routines, id numbers, etc.
+ */
+PREFIX ham
+
+INCLUDE #include "db_config.h"
+INCLUDE
+INCLUDE #ifndef NO_SYSTEM_INCLUDES
+INCLUDE #include <sys/types.h>
+INCLUDE
+INCLUDE #include <ctype.h>
+INCLUDE #include <errno.h>
+INCLUDE #include <string.h>
+INCLUDE #endif
+INCLUDE
+INCLUDE #include "db_int.h"
+INCLUDE #include "db_page.h"
+INCLUDE #include "db_dispatch.h"
+INCLUDE #include "db_am.h"
+INCLUDE #include "hash.h"
+INCLUDE #include "txn.h"
+INCLUDE
+
+/*
+ * HASH-insdel: used for hash to insert/delete a pair of entries onto a master
+ * page. The pair might be regular key/data pairs or they might be the
+ * structures that refer to off page items, duplicates or offpage duplicates.
+ * opcode - PUTPAIR/DELPAIR + big masks
+ * fileid - identifies the file referenced
+ * pgno - page within file
+ * ndx - index on the page of the item being added (item index)
+ * pagelsn - lsn on the page before the update
+ * key - the key being inserted
+ * data - the data being inserted
+ */
+BEGIN insdel 21
+ARG opcode u_int32_t lu
+ARG fileid int32_t ld
+ARG pgno db_pgno_t lu
+ARG ndx u_int32_t lu
+POINTER pagelsn DB_LSN * lu
+DBT key DBT s
+DBT data DBT s
+END
+
+/*
+ * Used to add and remove overflow pages.
+ * prev_pgno is the previous page that is going to get modified to
+ * point to this one. If this is the first page in a chain
+ * then prev_pgno should be PGNO_INVALID.
+ * new_pgno is the page being allocated.
+ * next_pgno is the page that follows this one. On allocation,
+ * this should be PGNO_INVALID. For deletes, it may exist.
+ * pagelsn is the old lsn on the page.
+ */
+BEGIN newpage 22
+ARG opcode u_int32_t lu
+ARG fileid int32_t ld
+ARG prev_pgno db_pgno_t lu
+POINTER prevlsn DB_LSN * lu
+ARG new_pgno db_pgno_t lu
+POINTER pagelsn DB_LSN * lu
+ARG next_pgno db_pgno_t lu
+POINTER nextlsn DB_LSN * lu
+END
+
+/*
+ * DEPRECATED in 3.0.
+ * Superceded by metagroup which allocates a group of new pages.
+ *
+ * Splitting requires two types of log messages. The first logs the
+ * meta-data of the split.
+ *
+ * For the meta-data split
+ * bucket: max_bucket in table before split
+ * ovflpoint: overflow point before split.
+ * spares: spares[ovflpoint] before split.
+ */
+DEPRECATED splitmeta 23
+ARG fileid int32_t ld
+ARG bucket u_int32_t lu
+ARG ovflpoint u_int32_t lu
+ARG spares u_int32_t lu
+POINTER metalsn DB_LSN * lu
+END
+
+/*
+ * Splitting requires two types of log messages. The second logs the
+ * data on the original page. To redo the split, we have to visit the
+ * new page (pages) and add the items back on the page if they are not
+ * yet there.
+ */
+BEGIN splitdata 24
+ARG fileid int32_t ld
+ARG opcode u_int32_t lu
+ARG pgno db_pgno_t lu
+DBT pageimage DBT s
+POINTER pagelsn DB_LSN * lu
+END
+
+/*
+ * HASH-replace: is used for hash to handle partial puts that only
+ * affect a single master page.
+ * fileid - identifies the file referenced
+ * pgno - page within file
+ * ndx - index on the page of the item being modified (item index)
+ * pagelsn - lsn on the page before the update
+ * off - offset in the old item where the new item is going.
+ * olditem - DBT that describes the part of the item being replaced.
+ * newitem - DBT of the new item.
+ * makedup - this was a replacement that made an item a duplicate.
+ */
+BEGIN replace 25
+ARG fileid int32_t ld
+ARG pgno db_pgno_t lu
+ARG ndx u_int32_t lu
+POINTER pagelsn DB_LSN * lu
+ARG off int32_t ld
+DBT olditem DBT s
+DBT newitem DBT s
+ARG makedup u_int32_t lu
+END
+
+/*
+ * DEPRECATED in 3.0.
+ * Hash now uses the btree allocation and deletion page routines.
+ *
+ * HASH-newpgno: is used to record getting/deleting a new page number.
+ * This doesn't require much data modification, just modifying the
+ * meta-data.
+ * pgno is the page being allocated/freed.
+ * free_pgno is the next_pgno on the free list.
+ * old_type was the type of a page being deallocated.
+ * old_pgno was the next page number before the deallocation.
+ */
+DEPRECATED newpgno 26
+ARG opcode u_int32_t lu
+ARG fileid int32_t ld
+ARG pgno db_pgno_t lu
+ARG free_pgno db_pgno_t lu
+ARG old_type u_int32_t lu
+ARG old_pgno db_pgno_t lu
+ARG new_type u_int32_t lu
+POINTER pagelsn DB_LSN * lu
+POINTER metalsn DB_LSN * lu
+END
+
+/*
+ * DEPRECATED in 3.0.
+ * Since we now pre-allocate the contiguous chunk of pages for a doubling,
+ * there is no big benefit to pre-allocating a few extra pages. It used
+ * to be that the file was only physically as large as the current bucket,
+ * so if you were on a doubling of 16K, but were only on the first bucket
+ * of that 16K, the file was much shorter than it would be at the end of
+ * the doubling, so we didn't want to force overflow pages at the end of the
+ * 16K pages. Since we now must allocate the 16K pages (because of sub
+ * databases), it's not a big deal to tack extra pages on at the end.
+ *
+ * ovfl: initialize a set of overflow pages.
+ */
+DEPRECATED ovfl 27
+ARG fileid int32_t ld
+ARG start_pgno db_pgno_t lu
+ARG npages u_int32_t lu
+ARG free_pgno db_pgno_t lu
+ARG ovflpoint u_int32_t lu
+POINTER metalsn DB_LSN * lu
+END
+
+/*
+ * Used when we empty the first page in a bucket and there are pages after
+ * it. The page after it gets copied into the bucket page (since bucket
+ * pages have to be in fixed locations).
+ * pgno: the bucket page
+ * pagelsn: the old LSN on the bucket page
+ * next_pgno: the page number of the next page
+ * nnext_pgno: page after next_pgno (may need to change its prev)
+ * nnextlsn: the LSN of nnext_pgno.
+ */
+BEGIN copypage 28
+ARG fileid int32_t ld
+ARG pgno db_pgno_t lu
+POINTER pagelsn DB_LSN * lu
+ARG next_pgno db_pgno_t lu
+POINTER nextlsn DB_LSN * lu
+ARG nnext_pgno db_pgno_t lu
+POINTER nnextlsn DB_LSN * lu
+DBT page DBT s
+END
+
+/*
+ * This replaces the old splitmeta operation. It behaves largely the same
+ * way, but it has enough information so that we can record a group allocation
+ * which we do now because of sub databases. The number of pages allocated is
+ * always bucket + 1 pgno is the page number of the first newly allocated
+ * bucket.
+ * bucket: Old maximum bucket number.
+ * pgno: Page allocated to bucket + 1 (first newly allocated page)
+ * metalsn: Lsn of the meta-data page.
+ * pagelsn: Lsn of the maximum page allocated.
+ */
+BEGIN metagroup 29
+ARG fileid int32_t ld
+ARG bucket u_int32_t lu
+ARG pgno db_pgno_t lu
+POINTER metalsn DB_LSN * lu
+POINTER pagelsn DB_LSN * lu
+END
+
+/*
+ * groupalloc
+ *
+ * This is used in conjunction with MPOOL_NEW_GROUP when we are creating
+ * a new database to make sure that we recreate or reclaim free pages
+ * when we allocate a chunk of contiguous ones during database creation.
+ *
+ * pgno: meta-data page number
+ * metalsn: meta-data lsn
+ * start_pgno: starting page number
+ * num: number of allocated pages
+ */
+DEPRECATED groupalloc1 30
+ARG fileid int32_t ld
+ARG pgno db_pgno_t lu
+POINTER metalsn DB_LSN * lu
+POINTER mmetalsn DB_LSN * lu
+ARG start_pgno db_pgno_t lu
+ARG num u_int32_t lu
+END
+
+DEPRECATED groupalloc2 31
+ARG fileid int32_t ld
+POINTER meta_lsn DB_LSN * lu
+POINTER alloc_lsn DB_LSN * lu
+ARG start_pgno db_pgno_t lu
+ARG num u_int32_t lu
+ARG free db_pgno_t lu
+END
+
+BEGIN groupalloc 32
+ARG fileid int32_t ld
+POINTER meta_lsn DB_LSN * lu
+ARG start_pgno db_pgno_t lu
+ARG num u_int32_t lu
+ARG free db_pgno_t lu
+END
+
+/*
+ * Records for backing out cursor adjustment.
+ * curadj - added or deleted a record or a dup
+ * within a record.
+ * pgno - page that was effected
+ * indx - indx of recrod effected.
+ * len - if a dup its length.
+ * dup_off - if a dup its offset
+ * add - 1 if add 0 if delete
+ * is_dup - 1 if dup 0 otherwise.
+ * order - order assinged to this deleted record or dup.
+ *
+ * chgpg - rmoved a page, move the records to a new page
+ * mode - CHGPG page was deleted or records move to new page.
+ * - SPLIT we split a bucket
+ * - DUP we convered to off page duplicates.
+ * old_pgno, new_pgno - old and new page numbers.
+ * old_index, new_index - old and new index numbers, NDX_INVALID if
+ * it effects all records on the page.
+ */
+BEGIN curadj 33
+ARG fileid int32_t ld
+ARG pgno db_pgno_t lu
+ARG indx u_int32_t lu
+ARG len u_int32_t lu
+ARG dup_off u_int32_t lu
+ARG add int ld
+ARG is_dup int ld
+ARG order u_int32_t lu
+END
+
+BEGIN chgpg 34
+ARG fileid int32_t ld
+ARG mode db_ham_mode ld
+ARG old_pgno db_pgno_t lu
+ARG new_pgno db_pgno_t lu
+ARG old_indx u_int32_t lu
+ARG new_indx u_int32_t lu
+END
+
diff --git a/bdb/hash/hash_auto.c b/bdb/hash/hash_auto.c
new file mode 100644
index 00000000000..b6faf4f5645
--- /dev/null
+++ b/bdb/hash/hash_auto.c
@@ -0,0 +1,2023 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <errno.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_dispatch.h"
+#include "db_am.h"
+#include "hash.h"
+#include "txn.h"
+
+int
+__ham_insdel_log(dbenv, txnid, ret_lsnp, flags,
+ opcode, fileid, pgno, ndx, pagelsn, key,
+ data)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ u_int32_t opcode;
+ int32_t fileid;
+ db_pgno_t pgno;
+ u_int32_t ndx;
+ DB_LSN * pagelsn;
+ const DBT *key;
+ const DBT *data;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_ham_insdel;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(opcode)
+ + sizeof(fileid)
+ + sizeof(pgno)
+ + sizeof(ndx)
+ + sizeof(*pagelsn)
+ + sizeof(u_int32_t) + (key == NULL ? 0 : key->size)
+ + sizeof(u_int32_t) + (data == NULL ? 0 : data->size);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(bp, &opcode, sizeof(opcode));
+ bp += sizeof(opcode);
+ memcpy(bp, &fileid, sizeof(fileid));
+ bp += sizeof(fileid);
+ memcpy(bp, &pgno, sizeof(pgno));
+ bp += sizeof(pgno);
+ memcpy(bp, &ndx, sizeof(ndx));
+ bp += sizeof(ndx);
+ if (pagelsn != NULL)
+ memcpy(bp, pagelsn, sizeof(*pagelsn));
+ else
+ memset(bp, 0, sizeof(*pagelsn));
+ bp += sizeof(*pagelsn);
+ if (key == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &key->size, sizeof(key->size));
+ bp += sizeof(key->size);
+ memcpy(bp, key->data, key->size);
+ bp += key->size;
+ }
+ if (data == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &data->size, sizeof(data->size));
+ bp += sizeof(data->size);
+ memcpy(bp, data->data, data->size);
+ bp += data->size;
+ }
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__ham_insdel_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __ham_insdel_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __ham_insdel_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]ham_insdel: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\topcode: %lu\n", (u_long)argp->opcode);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ printf("\tndx: %lu\n", (u_long)argp->ndx);
+ printf("\tpagelsn: [%lu][%lu]\n",
+ (u_long)argp->pagelsn.file, (u_long)argp->pagelsn.offset);
+ printf("\tkey: ");
+ for (i = 0; i < argp->key.size; i++) {
+ ch = ((u_int8_t *)argp->key.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\tdata: ");
+ for (i = 0; i < argp->data.size; i++) {
+ ch = ((u_int8_t *)argp->data.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__ham_insdel_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __ham_insdel_args **argpp;
+{
+ __ham_insdel_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__ham_insdel_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->opcode, bp, sizeof(argp->opcode));
+ bp += sizeof(argp->opcode);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->pgno, bp, sizeof(argp->pgno));
+ bp += sizeof(argp->pgno);
+ memcpy(&argp->ndx, bp, sizeof(argp->ndx));
+ bp += sizeof(argp->ndx);
+ memcpy(&argp->pagelsn, bp, sizeof(argp->pagelsn));
+ bp += sizeof(argp->pagelsn);
+ memset(&argp->key, 0, sizeof(argp->key));
+ memcpy(&argp->key.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->key.data = bp;
+ bp += argp->key.size;
+ memset(&argp->data, 0, sizeof(argp->data));
+ memcpy(&argp->data.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->data.data = bp;
+ bp += argp->data.size;
+ *argpp = argp;
+ return (0);
+}
+
+int
+__ham_newpage_log(dbenv, txnid, ret_lsnp, flags,
+ opcode, fileid, prev_pgno, prevlsn, new_pgno, pagelsn,
+ next_pgno, nextlsn)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ u_int32_t opcode;
+ int32_t fileid;
+ db_pgno_t prev_pgno;
+ DB_LSN * prevlsn;
+ db_pgno_t new_pgno;
+ DB_LSN * pagelsn;
+ db_pgno_t next_pgno;
+ DB_LSN * nextlsn;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_ham_newpage;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(opcode)
+ + sizeof(fileid)
+ + sizeof(prev_pgno)
+ + sizeof(*prevlsn)
+ + sizeof(new_pgno)
+ + sizeof(*pagelsn)
+ + sizeof(next_pgno)
+ + sizeof(*nextlsn);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(bp, &opcode, sizeof(opcode));
+ bp += sizeof(opcode);
+ memcpy(bp, &fileid, sizeof(fileid));
+ bp += sizeof(fileid);
+ memcpy(bp, &prev_pgno, sizeof(prev_pgno));
+ bp += sizeof(prev_pgno);
+ if (prevlsn != NULL)
+ memcpy(bp, prevlsn, sizeof(*prevlsn));
+ else
+ memset(bp, 0, sizeof(*prevlsn));
+ bp += sizeof(*prevlsn);
+ memcpy(bp, &new_pgno, sizeof(new_pgno));
+ bp += sizeof(new_pgno);
+ if (pagelsn != NULL)
+ memcpy(bp, pagelsn, sizeof(*pagelsn));
+ else
+ memset(bp, 0, sizeof(*pagelsn));
+ bp += sizeof(*pagelsn);
+ memcpy(bp, &next_pgno, sizeof(next_pgno));
+ bp += sizeof(next_pgno);
+ if (nextlsn != NULL)
+ memcpy(bp, nextlsn, sizeof(*nextlsn));
+ else
+ memset(bp, 0, sizeof(*nextlsn));
+ bp += sizeof(*nextlsn);
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__ham_newpage_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __ham_newpage_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __ham_newpage_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]ham_newpage: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\topcode: %lu\n", (u_long)argp->opcode);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tprev_pgno: %lu\n", (u_long)argp->prev_pgno);
+ printf("\tprevlsn: [%lu][%lu]\n",
+ (u_long)argp->prevlsn.file, (u_long)argp->prevlsn.offset);
+ printf("\tnew_pgno: %lu\n", (u_long)argp->new_pgno);
+ printf("\tpagelsn: [%lu][%lu]\n",
+ (u_long)argp->pagelsn.file, (u_long)argp->pagelsn.offset);
+ printf("\tnext_pgno: %lu\n", (u_long)argp->next_pgno);
+ printf("\tnextlsn: [%lu][%lu]\n",
+ (u_long)argp->nextlsn.file, (u_long)argp->nextlsn.offset);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__ham_newpage_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __ham_newpage_args **argpp;
+{
+ __ham_newpage_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__ham_newpage_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->opcode, bp, sizeof(argp->opcode));
+ bp += sizeof(argp->opcode);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->prev_pgno, bp, sizeof(argp->prev_pgno));
+ bp += sizeof(argp->prev_pgno);
+ memcpy(&argp->prevlsn, bp, sizeof(argp->prevlsn));
+ bp += sizeof(argp->prevlsn);
+ memcpy(&argp->new_pgno, bp, sizeof(argp->new_pgno));
+ bp += sizeof(argp->new_pgno);
+ memcpy(&argp->pagelsn, bp, sizeof(argp->pagelsn));
+ bp += sizeof(argp->pagelsn);
+ memcpy(&argp->next_pgno, bp, sizeof(argp->next_pgno));
+ bp += sizeof(argp->next_pgno);
+ memcpy(&argp->nextlsn, bp, sizeof(argp->nextlsn));
+ bp += sizeof(argp->nextlsn);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__ham_splitmeta_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __ham_splitmeta_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __ham_splitmeta_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]ham_splitmeta: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tbucket: %lu\n", (u_long)argp->bucket);
+ printf("\tovflpoint: %lu\n", (u_long)argp->ovflpoint);
+ printf("\tspares: %lu\n", (u_long)argp->spares);
+ printf("\tmetalsn: [%lu][%lu]\n",
+ (u_long)argp->metalsn.file, (u_long)argp->metalsn.offset);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__ham_splitmeta_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __ham_splitmeta_args **argpp;
+{
+ __ham_splitmeta_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__ham_splitmeta_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->bucket, bp, sizeof(argp->bucket));
+ bp += sizeof(argp->bucket);
+ memcpy(&argp->ovflpoint, bp, sizeof(argp->ovflpoint));
+ bp += sizeof(argp->ovflpoint);
+ memcpy(&argp->spares, bp, sizeof(argp->spares));
+ bp += sizeof(argp->spares);
+ memcpy(&argp->metalsn, bp, sizeof(argp->metalsn));
+ bp += sizeof(argp->metalsn);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__ham_splitdata_log(dbenv, txnid, ret_lsnp, flags,
+ fileid, opcode, pgno, pageimage, pagelsn)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ int32_t fileid;
+ u_int32_t opcode;
+ db_pgno_t pgno;
+ const DBT *pageimage;
+ DB_LSN * pagelsn;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_ham_splitdata;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(fileid)
+ + sizeof(opcode)
+ + sizeof(pgno)
+ + sizeof(u_int32_t) + (pageimage == NULL ? 0 : pageimage->size)
+ + sizeof(*pagelsn);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(bp, &fileid, sizeof(fileid));
+ bp += sizeof(fileid);
+ memcpy(bp, &opcode, sizeof(opcode));
+ bp += sizeof(opcode);
+ memcpy(bp, &pgno, sizeof(pgno));
+ bp += sizeof(pgno);
+ if (pageimage == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &pageimage->size, sizeof(pageimage->size));
+ bp += sizeof(pageimage->size);
+ memcpy(bp, pageimage->data, pageimage->size);
+ bp += pageimage->size;
+ }
+ if (pagelsn != NULL)
+ memcpy(bp, pagelsn, sizeof(*pagelsn));
+ else
+ memset(bp, 0, sizeof(*pagelsn));
+ bp += sizeof(*pagelsn);
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__ham_splitdata_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __ham_splitdata_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __ham_splitdata_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]ham_splitdata: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\topcode: %lu\n", (u_long)argp->opcode);
+ printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ printf("\tpageimage: ");
+ for (i = 0; i < argp->pageimage.size; i++) {
+ ch = ((u_int8_t *)argp->pageimage.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\tpagelsn: [%lu][%lu]\n",
+ (u_long)argp->pagelsn.file, (u_long)argp->pagelsn.offset);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__ham_splitdata_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __ham_splitdata_args **argpp;
+{
+ __ham_splitdata_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__ham_splitdata_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->opcode, bp, sizeof(argp->opcode));
+ bp += sizeof(argp->opcode);
+ memcpy(&argp->pgno, bp, sizeof(argp->pgno));
+ bp += sizeof(argp->pgno);
+ memset(&argp->pageimage, 0, sizeof(argp->pageimage));
+ memcpy(&argp->pageimage.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->pageimage.data = bp;
+ bp += argp->pageimage.size;
+ memcpy(&argp->pagelsn, bp, sizeof(argp->pagelsn));
+ bp += sizeof(argp->pagelsn);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__ham_replace_log(dbenv, txnid, ret_lsnp, flags,
+ fileid, pgno, ndx, pagelsn, off, olditem,
+ newitem, makedup)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ int32_t fileid;
+ db_pgno_t pgno;
+ u_int32_t ndx;
+ DB_LSN * pagelsn;
+ int32_t off;
+ const DBT *olditem;
+ const DBT *newitem;
+ u_int32_t makedup;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_ham_replace;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(fileid)
+ + sizeof(pgno)
+ + sizeof(ndx)
+ + sizeof(*pagelsn)
+ + sizeof(off)
+ + sizeof(u_int32_t) + (olditem == NULL ? 0 : olditem->size)
+ + sizeof(u_int32_t) + (newitem == NULL ? 0 : newitem->size)
+ + sizeof(makedup);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(bp, &fileid, sizeof(fileid));
+ bp += sizeof(fileid);
+ memcpy(bp, &pgno, sizeof(pgno));
+ bp += sizeof(pgno);
+ memcpy(bp, &ndx, sizeof(ndx));
+ bp += sizeof(ndx);
+ if (pagelsn != NULL)
+ memcpy(bp, pagelsn, sizeof(*pagelsn));
+ else
+ memset(bp, 0, sizeof(*pagelsn));
+ bp += sizeof(*pagelsn);
+ memcpy(bp, &off, sizeof(off));
+ bp += sizeof(off);
+ if (olditem == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &olditem->size, sizeof(olditem->size));
+ bp += sizeof(olditem->size);
+ memcpy(bp, olditem->data, olditem->size);
+ bp += olditem->size;
+ }
+ if (newitem == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &newitem->size, sizeof(newitem->size));
+ bp += sizeof(newitem->size);
+ memcpy(bp, newitem->data, newitem->size);
+ bp += newitem->size;
+ }
+ memcpy(bp, &makedup, sizeof(makedup));
+ bp += sizeof(makedup);
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__ham_replace_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __ham_replace_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __ham_replace_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]ham_replace: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ printf("\tndx: %lu\n", (u_long)argp->ndx);
+ printf("\tpagelsn: [%lu][%lu]\n",
+ (u_long)argp->pagelsn.file, (u_long)argp->pagelsn.offset);
+ printf("\toff: %ld\n", (long)argp->off);
+ printf("\tolditem: ");
+ for (i = 0; i < argp->olditem.size; i++) {
+ ch = ((u_int8_t *)argp->olditem.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\tnewitem: ");
+ for (i = 0; i < argp->newitem.size; i++) {
+ ch = ((u_int8_t *)argp->newitem.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\tmakedup: %lu\n", (u_long)argp->makedup);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__ham_replace_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __ham_replace_args **argpp;
+{
+ __ham_replace_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__ham_replace_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->pgno, bp, sizeof(argp->pgno));
+ bp += sizeof(argp->pgno);
+ memcpy(&argp->ndx, bp, sizeof(argp->ndx));
+ bp += sizeof(argp->ndx);
+ memcpy(&argp->pagelsn, bp, sizeof(argp->pagelsn));
+ bp += sizeof(argp->pagelsn);
+ memcpy(&argp->off, bp, sizeof(argp->off));
+ bp += sizeof(argp->off);
+ memset(&argp->olditem, 0, sizeof(argp->olditem));
+ memcpy(&argp->olditem.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->olditem.data = bp;
+ bp += argp->olditem.size;
+ memset(&argp->newitem, 0, sizeof(argp->newitem));
+ memcpy(&argp->newitem.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->newitem.data = bp;
+ bp += argp->newitem.size;
+ memcpy(&argp->makedup, bp, sizeof(argp->makedup));
+ bp += sizeof(argp->makedup);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__ham_newpgno_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __ham_newpgno_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __ham_newpgno_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]ham_newpgno: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\topcode: %lu\n", (u_long)argp->opcode);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ printf("\tfree_pgno: %lu\n", (u_long)argp->free_pgno);
+ printf("\told_type: %lu\n", (u_long)argp->old_type);
+ printf("\told_pgno: %lu\n", (u_long)argp->old_pgno);
+ printf("\tnew_type: %lu\n", (u_long)argp->new_type);
+ printf("\tpagelsn: [%lu][%lu]\n",
+ (u_long)argp->pagelsn.file, (u_long)argp->pagelsn.offset);
+ printf("\tmetalsn: [%lu][%lu]\n",
+ (u_long)argp->metalsn.file, (u_long)argp->metalsn.offset);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__ham_newpgno_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __ham_newpgno_args **argpp;
+{
+ __ham_newpgno_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__ham_newpgno_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->opcode, bp, sizeof(argp->opcode));
+ bp += sizeof(argp->opcode);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->pgno, bp, sizeof(argp->pgno));
+ bp += sizeof(argp->pgno);
+ memcpy(&argp->free_pgno, bp, sizeof(argp->free_pgno));
+ bp += sizeof(argp->free_pgno);
+ memcpy(&argp->old_type, bp, sizeof(argp->old_type));
+ bp += sizeof(argp->old_type);
+ memcpy(&argp->old_pgno, bp, sizeof(argp->old_pgno));
+ bp += sizeof(argp->old_pgno);
+ memcpy(&argp->new_type, bp, sizeof(argp->new_type));
+ bp += sizeof(argp->new_type);
+ memcpy(&argp->pagelsn, bp, sizeof(argp->pagelsn));
+ bp += sizeof(argp->pagelsn);
+ memcpy(&argp->metalsn, bp, sizeof(argp->metalsn));
+ bp += sizeof(argp->metalsn);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__ham_ovfl_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __ham_ovfl_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __ham_ovfl_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]ham_ovfl: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tstart_pgno: %lu\n", (u_long)argp->start_pgno);
+ printf("\tnpages: %lu\n", (u_long)argp->npages);
+ printf("\tfree_pgno: %lu\n", (u_long)argp->free_pgno);
+ printf("\tovflpoint: %lu\n", (u_long)argp->ovflpoint);
+ printf("\tmetalsn: [%lu][%lu]\n",
+ (u_long)argp->metalsn.file, (u_long)argp->metalsn.offset);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__ham_ovfl_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __ham_ovfl_args **argpp;
+{
+ __ham_ovfl_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__ham_ovfl_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->start_pgno, bp, sizeof(argp->start_pgno));
+ bp += sizeof(argp->start_pgno);
+ memcpy(&argp->npages, bp, sizeof(argp->npages));
+ bp += sizeof(argp->npages);
+ memcpy(&argp->free_pgno, bp, sizeof(argp->free_pgno));
+ bp += sizeof(argp->free_pgno);
+ memcpy(&argp->ovflpoint, bp, sizeof(argp->ovflpoint));
+ bp += sizeof(argp->ovflpoint);
+ memcpy(&argp->metalsn, bp, sizeof(argp->metalsn));
+ bp += sizeof(argp->metalsn);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__ham_copypage_log(dbenv, txnid, ret_lsnp, flags,
+ fileid, pgno, pagelsn, next_pgno, nextlsn, nnext_pgno,
+ nnextlsn, page)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DB_LSN * pagelsn;
+ db_pgno_t next_pgno;
+ DB_LSN * nextlsn;
+ db_pgno_t nnext_pgno;
+ DB_LSN * nnextlsn;
+ const DBT *page;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_ham_copypage;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(fileid)
+ + sizeof(pgno)
+ + sizeof(*pagelsn)
+ + sizeof(next_pgno)
+ + sizeof(*nextlsn)
+ + sizeof(nnext_pgno)
+ + sizeof(*nnextlsn)
+ + sizeof(u_int32_t) + (page == NULL ? 0 : page->size);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(bp, &fileid, sizeof(fileid));
+ bp += sizeof(fileid);
+ memcpy(bp, &pgno, sizeof(pgno));
+ bp += sizeof(pgno);
+ if (pagelsn != NULL)
+ memcpy(bp, pagelsn, sizeof(*pagelsn));
+ else
+ memset(bp, 0, sizeof(*pagelsn));
+ bp += sizeof(*pagelsn);
+ memcpy(bp, &next_pgno, sizeof(next_pgno));
+ bp += sizeof(next_pgno);
+ if (nextlsn != NULL)
+ memcpy(bp, nextlsn, sizeof(*nextlsn));
+ else
+ memset(bp, 0, sizeof(*nextlsn));
+ bp += sizeof(*nextlsn);
+ memcpy(bp, &nnext_pgno, sizeof(nnext_pgno));
+ bp += sizeof(nnext_pgno);
+ if (nnextlsn != NULL)
+ memcpy(bp, nnextlsn, sizeof(*nnextlsn));
+ else
+ memset(bp, 0, sizeof(*nnextlsn));
+ bp += sizeof(*nnextlsn);
+ if (page == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &page->size, sizeof(page->size));
+ bp += sizeof(page->size);
+ memcpy(bp, page->data, page->size);
+ bp += page->size;
+ }
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__ham_copypage_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __ham_copypage_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __ham_copypage_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]ham_copypage: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ printf("\tpagelsn: [%lu][%lu]\n",
+ (u_long)argp->pagelsn.file, (u_long)argp->pagelsn.offset);
+ printf("\tnext_pgno: %lu\n", (u_long)argp->next_pgno);
+ printf("\tnextlsn: [%lu][%lu]\n",
+ (u_long)argp->nextlsn.file, (u_long)argp->nextlsn.offset);
+ printf("\tnnext_pgno: %lu\n", (u_long)argp->nnext_pgno);
+ printf("\tnnextlsn: [%lu][%lu]\n",
+ (u_long)argp->nnextlsn.file, (u_long)argp->nnextlsn.offset);
+ printf("\tpage: ");
+ for (i = 0; i < argp->page.size; i++) {
+ ch = ((u_int8_t *)argp->page.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__ham_copypage_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __ham_copypage_args **argpp;
+{
+ __ham_copypage_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__ham_copypage_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->pgno, bp, sizeof(argp->pgno));
+ bp += sizeof(argp->pgno);
+ memcpy(&argp->pagelsn, bp, sizeof(argp->pagelsn));
+ bp += sizeof(argp->pagelsn);
+ memcpy(&argp->next_pgno, bp, sizeof(argp->next_pgno));
+ bp += sizeof(argp->next_pgno);
+ memcpy(&argp->nextlsn, bp, sizeof(argp->nextlsn));
+ bp += sizeof(argp->nextlsn);
+ memcpy(&argp->nnext_pgno, bp, sizeof(argp->nnext_pgno));
+ bp += sizeof(argp->nnext_pgno);
+ memcpy(&argp->nnextlsn, bp, sizeof(argp->nnextlsn));
+ bp += sizeof(argp->nnextlsn);
+ memset(&argp->page, 0, sizeof(argp->page));
+ memcpy(&argp->page.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->page.data = bp;
+ bp += argp->page.size;
+ *argpp = argp;
+ return (0);
+}
+
+int
+__ham_metagroup_log(dbenv, txnid, ret_lsnp, flags,
+ fileid, bucket, pgno, metalsn, pagelsn)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ int32_t fileid;
+ u_int32_t bucket;
+ db_pgno_t pgno;
+ DB_LSN * metalsn;
+ DB_LSN * pagelsn;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_ham_metagroup;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(fileid)
+ + sizeof(bucket)
+ + sizeof(pgno)
+ + sizeof(*metalsn)
+ + sizeof(*pagelsn);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(bp, &fileid, sizeof(fileid));
+ bp += sizeof(fileid);
+ memcpy(bp, &bucket, sizeof(bucket));
+ bp += sizeof(bucket);
+ memcpy(bp, &pgno, sizeof(pgno));
+ bp += sizeof(pgno);
+ if (metalsn != NULL)
+ memcpy(bp, metalsn, sizeof(*metalsn));
+ else
+ memset(bp, 0, sizeof(*metalsn));
+ bp += sizeof(*metalsn);
+ if (pagelsn != NULL)
+ memcpy(bp, pagelsn, sizeof(*pagelsn));
+ else
+ memset(bp, 0, sizeof(*pagelsn));
+ bp += sizeof(*pagelsn);
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__ham_metagroup_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __ham_metagroup_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __ham_metagroup_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]ham_metagroup: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tbucket: %lu\n", (u_long)argp->bucket);
+ printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ printf("\tmetalsn: [%lu][%lu]\n",
+ (u_long)argp->metalsn.file, (u_long)argp->metalsn.offset);
+ printf("\tpagelsn: [%lu][%lu]\n",
+ (u_long)argp->pagelsn.file, (u_long)argp->pagelsn.offset);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__ham_metagroup_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __ham_metagroup_args **argpp;
+{
+ __ham_metagroup_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__ham_metagroup_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->bucket, bp, sizeof(argp->bucket));
+ bp += sizeof(argp->bucket);
+ memcpy(&argp->pgno, bp, sizeof(argp->pgno));
+ bp += sizeof(argp->pgno);
+ memcpy(&argp->metalsn, bp, sizeof(argp->metalsn));
+ bp += sizeof(argp->metalsn);
+ memcpy(&argp->pagelsn, bp, sizeof(argp->pagelsn));
+ bp += sizeof(argp->pagelsn);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__ham_groupalloc1_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __ham_groupalloc1_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __ham_groupalloc1_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]ham_groupalloc1: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ printf("\tmetalsn: [%lu][%lu]\n",
+ (u_long)argp->metalsn.file, (u_long)argp->metalsn.offset);
+ printf("\tmmetalsn: [%lu][%lu]\n",
+ (u_long)argp->mmetalsn.file, (u_long)argp->mmetalsn.offset);
+ printf("\tstart_pgno: %lu\n", (u_long)argp->start_pgno);
+ printf("\tnum: %lu\n", (u_long)argp->num);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__ham_groupalloc1_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __ham_groupalloc1_args **argpp;
+{
+ __ham_groupalloc1_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__ham_groupalloc1_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->pgno, bp, sizeof(argp->pgno));
+ bp += sizeof(argp->pgno);
+ memcpy(&argp->metalsn, bp, sizeof(argp->metalsn));
+ bp += sizeof(argp->metalsn);
+ memcpy(&argp->mmetalsn, bp, sizeof(argp->mmetalsn));
+ bp += sizeof(argp->mmetalsn);
+ memcpy(&argp->start_pgno, bp, sizeof(argp->start_pgno));
+ bp += sizeof(argp->start_pgno);
+ memcpy(&argp->num, bp, sizeof(argp->num));
+ bp += sizeof(argp->num);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__ham_groupalloc2_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __ham_groupalloc2_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __ham_groupalloc2_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]ham_groupalloc2: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tmeta_lsn: [%lu][%lu]\n",
+ (u_long)argp->meta_lsn.file, (u_long)argp->meta_lsn.offset);
+ printf("\talloc_lsn: [%lu][%lu]\n",
+ (u_long)argp->alloc_lsn.file, (u_long)argp->alloc_lsn.offset);
+ printf("\tstart_pgno: %lu\n", (u_long)argp->start_pgno);
+ printf("\tnum: %lu\n", (u_long)argp->num);
+ printf("\tfree: %lu\n", (u_long)argp->free);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__ham_groupalloc2_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __ham_groupalloc2_args **argpp;
+{
+ __ham_groupalloc2_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__ham_groupalloc2_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->meta_lsn, bp, sizeof(argp->meta_lsn));
+ bp += sizeof(argp->meta_lsn);
+ memcpy(&argp->alloc_lsn, bp, sizeof(argp->alloc_lsn));
+ bp += sizeof(argp->alloc_lsn);
+ memcpy(&argp->start_pgno, bp, sizeof(argp->start_pgno));
+ bp += sizeof(argp->start_pgno);
+ memcpy(&argp->num, bp, sizeof(argp->num));
+ bp += sizeof(argp->num);
+ memcpy(&argp->free, bp, sizeof(argp->free));
+ bp += sizeof(argp->free);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__ham_groupalloc_log(dbenv, txnid, ret_lsnp, flags,
+ fileid, meta_lsn, start_pgno, num, free)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ int32_t fileid;
+ DB_LSN * meta_lsn;
+ db_pgno_t start_pgno;
+ u_int32_t num;
+ db_pgno_t free;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_ham_groupalloc;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(fileid)
+ + sizeof(*meta_lsn)
+ + sizeof(start_pgno)
+ + sizeof(num)
+ + sizeof(free);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(bp, &fileid, sizeof(fileid));
+ bp += sizeof(fileid);
+ if (meta_lsn != NULL)
+ memcpy(bp, meta_lsn, sizeof(*meta_lsn));
+ else
+ memset(bp, 0, sizeof(*meta_lsn));
+ bp += sizeof(*meta_lsn);
+ memcpy(bp, &start_pgno, sizeof(start_pgno));
+ bp += sizeof(start_pgno);
+ memcpy(bp, &num, sizeof(num));
+ bp += sizeof(num);
+ memcpy(bp, &free, sizeof(free));
+ bp += sizeof(free);
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__ham_groupalloc_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __ham_groupalloc_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __ham_groupalloc_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]ham_groupalloc: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tmeta_lsn: [%lu][%lu]\n",
+ (u_long)argp->meta_lsn.file, (u_long)argp->meta_lsn.offset);
+ printf("\tstart_pgno: %lu\n", (u_long)argp->start_pgno);
+ printf("\tnum: %lu\n", (u_long)argp->num);
+ printf("\tfree: %lu\n", (u_long)argp->free);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__ham_groupalloc_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __ham_groupalloc_args **argpp;
+{
+ __ham_groupalloc_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__ham_groupalloc_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->meta_lsn, bp, sizeof(argp->meta_lsn));
+ bp += sizeof(argp->meta_lsn);
+ memcpy(&argp->start_pgno, bp, sizeof(argp->start_pgno));
+ bp += sizeof(argp->start_pgno);
+ memcpy(&argp->num, bp, sizeof(argp->num));
+ bp += sizeof(argp->num);
+ memcpy(&argp->free, bp, sizeof(argp->free));
+ bp += sizeof(argp->free);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__ham_curadj_log(dbenv, txnid, ret_lsnp, flags,
+ fileid, pgno, indx, len, dup_off, add,
+ is_dup, order)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ int32_t fileid;
+ db_pgno_t pgno;
+ u_int32_t indx;
+ u_int32_t len;
+ u_int32_t dup_off;
+ int add;
+ int is_dup;
+ u_int32_t order;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_ham_curadj;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(fileid)
+ + sizeof(pgno)
+ + sizeof(indx)
+ + sizeof(len)
+ + sizeof(dup_off)
+ + sizeof(add)
+ + sizeof(is_dup)
+ + sizeof(order);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(bp, &fileid, sizeof(fileid));
+ bp += sizeof(fileid);
+ memcpy(bp, &pgno, sizeof(pgno));
+ bp += sizeof(pgno);
+ memcpy(bp, &indx, sizeof(indx));
+ bp += sizeof(indx);
+ memcpy(bp, &len, sizeof(len));
+ bp += sizeof(len);
+ memcpy(bp, &dup_off, sizeof(dup_off));
+ bp += sizeof(dup_off);
+ memcpy(bp, &add, sizeof(add));
+ bp += sizeof(add);
+ memcpy(bp, &is_dup, sizeof(is_dup));
+ bp += sizeof(is_dup);
+ memcpy(bp, &order, sizeof(order));
+ bp += sizeof(order);
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__ham_curadj_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __ham_curadj_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __ham_curadj_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]ham_curadj: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ printf("\tindx: %lu\n", (u_long)argp->indx);
+ printf("\tlen: %lu\n", (u_long)argp->len);
+ printf("\tdup_off: %lu\n", (u_long)argp->dup_off);
+ printf("\tadd: %ld\n", (long)argp->add);
+ printf("\tis_dup: %ld\n", (long)argp->is_dup);
+ printf("\torder: %lu\n", (u_long)argp->order);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__ham_curadj_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __ham_curadj_args **argpp;
+{
+ __ham_curadj_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__ham_curadj_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->pgno, bp, sizeof(argp->pgno));
+ bp += sizeof(argp->pgno);
+ memcpy(&argp->indx, bp, sizeof(argp->indx));
+ bp += sizeof(argp->indx);
+ memcpy(&argp->len, bp, sizeof(argp->len));
+ bp += sizeof(argp->len);
+ memcpy(&argp->dup_off, bp, sizeof(argp->dup_off));
+ bp += sizeof(argp->dup_off);
+ memcpy(&argp->add, bp, sizeof(argp->add));
+ bp += sizeof(argp->add);
+ memcpy(&argp->is_dup, bp, sizeof(argp->is_dup));
+ bp += sizeof(argp->is_dup);
+ memcpy(&argp->order, bp, sizeof(argp->order));
+ bp += sizeof(argp->order);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__ham_chgpg_log(dbenv, txnid, ret_lsnp, flags,
+ fileid, mode, old_pgno, new_pgno, old_indx, new_indx)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ int32_t fileid;
+ db_ham_mode mode;
+ db_pgno_t old_pgno;
+ db_pgno_t new_pgno;
+ u_int32_t old_indx;
+ u_int32_t new_indx;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_ham_chgpg;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(fileid)
+ + sizeof(mode)
+ + sizeof(old_pgno)
+ + sizeof(new_pgno)
+ + sizeof(old_indx)
+ + sizeof(new_indx);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(bp, &fileid, sizeof(fileid));
+ bp += sizeof(fileid);
+ memcpy(bp, &mode, sizeof(mode));
+ bp += sizeof(mode);
+ memcpy(bp, &old_pgno, sizeof(old_pgno));
+ bp += sizeof(old_pgno);
+ memcpy(bp, &new_pgno, sizeof(new_pgno));
+ bp += sizeof(new_pgno);
+ memcpy(bp, &old_indx, sizeof(old_indx));
+ bp += sizeof(old_indx);
+ memcpy(bp, &new_indx, sizeof(new_indx));
+ bp += sizeof(new_indx);
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__ham_chgpg_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __ham_chgpg_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __ham_chgpg_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]ham_chgpg: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tmode: %ld\n", (long)argp->mode);
+ printf("\told_pgno: %lu\n", (u_long)argp->old_pgno);
+ printf("\tnew_pgno: %lu\n", (u_long)argp->new_pgno);
+ printf("\told_indx: %lu\n", (u_long)argp->old_indx);
+ printf("\tnew_indx: %lu\n", (u_long)argp->new_indx);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__ham_chgpg_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __ham_chgpg_args **argpp;
+{
+ __ham_chgpg_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__ham_chgpg_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->mode, bp, sizeof(argp->mode));
+ bp += sizeof(argp->mode);
+ memcpy(&argp->old_pgno, bp, sizeof(argp->old_pgno));
+ bp += sizeof(argp->old_pgno);
+ memcpy(&argp->new_pgno, bp, sizeof(argp->new_pgno));
+ bp += sizeof(argp->new_pgno);
+ memcpy(&argp->old_indx, bp, sizeof(argp->old_indx));
+ bp += sizeof(argp->old_indx);
+ memcpy(&argp->new_indx, bp, sizeof(argp->new_indx));
+ bp += sizeof(argp->new_indx);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__ham_init_print(dbenv)
+ DB_ENV *dbenv;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv,
+ __ham_insdel_print, DB_ham_insdel)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __ham_newpage_print, DB_ham_newpage)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __ham_splitmeta_print, DB_ham_splitmeta)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __ham_splitdata_print, DB_ham_splitdata)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __ham_replace_print, DB_ham_replace)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __ham_newpgno_print, DB_ham_newpgno)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __ham_ovfl_print, DB_ham_ovfl)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __ham_copypage_print, DB_ham_copypage)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __ham_metagroup_print, DB_ham_metagroup)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __ham_groupalloc1_print, DB_ham_groupalloc1)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __ham_groupalloc2_print, DB_ham_groupalloc2)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __ham_groupalloc_print, DB_ham_groupalloc)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __ham_curadj_print, DB_ham_curadj)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __ham_chgpg_print, DB_ham_chgpg)) != 0)
+ return (ret);
+ return (0);
+}
+
+int
+__ham_init_recover(dbenv)
+ DB_ENV *dbenv;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv,
+ __ham_insdel_recover, DB_ham_insdel)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __ham_newpage_recover, DB_ham_newpage)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __deprecated_recover, DB_ham_splitmeta)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __ham_splitdata_recover, DB_ham_splitdata)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __ham_replace_recover, DB_ham_replace)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __deprecated_recover, DB_ham_newpgno)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __deprecated_recover, DB_ham_ovfl)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __ham_copypage_recover, DB_ham_copypage)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __ham_metagroup_recover, DB_ham_metagroup)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __deprecated_recover, DB_ham_groupalloc1)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __deprecated_recover, DB_ham_groupalloc2)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __ham_groupalloc_recover, DB_ham_groupalloc)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __ham_curadj_recover, DB_ham_curadj)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __ham_chgpg_recover, DB_ham_chgpg)) != 0)
+ return (ret);
+ return (0);
+}
+
diff --git a/bdb/hash/hash_conv.c b/bdb/hash/hash_conv.c
new file mode 100644
index 00000000000..30d17a6164d
--- /dev/null
+++ b/bdb/hash/hash_conv.c
@@ -0,0 +1,112 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: hash_conv.c,v 11.5 2000/03/31 00:30:32 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_swap.h"
+#include "hash.h"
+
+/*
+ * __ham_pgin --
+ * Convert host-specific page layout from the host-independent format
+ * stored on disk.
+ *
+ * PUBLIC: int __ham_pgin __P((DB_ENV *, db_pgno_t, void *, DBT *));
+ */
+int
+__ham_pgin(dbenv, pg, pp, cookie)
+ DB_ENV *dbenv;
+ db_pgno_t pg;
+ void *pp;
+ DBT *cookie;
+{
+ DB_PGINFO *pginfo;
+ PAGE *h;
+
+ h = pp;
+ pginfo = (DB_PGINFO *)cookie->data;
+
+ /*
+ * The hash access method does blind reads of pages, causing them
+ * to be created. If the type field isn't set it's one of them,
+ * initialize the rest of the page and return.
+ */
+ if (h->type != P_HASHMETA && h->pgno == PGNO_INVALID) {
+ P_INIT(pp, pginfo->db_pagesize,
+ pg, PGNO_INVALID, PGNO_INVALID, 0, P_HASH);
+ return (0);
+ }
+
+ if (!pginfo->needswap)
+ return (0);
+
+ return (h->type == P_HASHMETA ? __ham_mswap(pp) :
+ __db_byteswap(dbenv, pg, pp, pginfo->db_pagesize, 1));
+}
+
+/*
+ * __ham_pgout --
+ * Convert host-specific page layout to the host-independent format
+ * stored on disk.
+ *
+ * PUBLIC: int __ham_pgout __P((DB_ENV *, db_pgno_t, void *, DBT *));
+ */
+int
+__ham_pgout(dbenv, pg, pp, cookie)
+ DB_ENV *dbenv;
+ db_pgno_t pg;
+ void *pp;
+ DBT *cookie;
+{
+ DB_PGINFO *pginfo;
+ PAGE *h;
+
+ pginfo = (DB_PGINFO *)cookie->data;
+ if (!pginfo->needswap)
+ return (0);
+
+ h = pp;
+ return (h->type == P_HASHMETA ? __ham_mswap(pp) :
+ __db_byteswap(dbenv, pg, pp, pginfo->db_pagesize, 0));
+}
+
+/*
+ * __ham_mswap --
+ * Swap the bytes on the hash metadata page.
+ *
+ * PUBLIC: int __ham_mswap __P((void *));
+ */
+int
+__ham_mswap(pg)
+ void *pg;
+{
+ u_int8_t *p;
+ int i;
+
+ __db_metaswap(pg);
+
+ p = (u_int8_t *)pg + sizeof(DBMETA);
+
+ SWAP32(p); /* max_bucket */
+ SWAP32(p); /* high_mask */
+ SWAP32(p); /* low_mask */
+ SWAP32(p); /* ffactor */
+ SWAP32(p); /* nelem */
+ SWAP32(p); /* h_charkey */
+ for (i = 0; i < NCACHED; ++i)
+ SWAP32(p); /* spares */
+ return (0);
+}
diff --git a/bdb/hash/hash_dup.c b/bdb/hash/hash_dup.c
new file mode 100644
index 00000000000..f5fbf4f472f
--- /dev/null
+++ b/bdb/hash/hash_dup.c
@@ -0,0 +1,805 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: hash_dup.c,v 11.49 2000/12/21 21:54:35 margo Exp $";
+#endif /* not lint */
+
+/*
+ * PACKAGE: hashing
+ *
+ * DESCRIPTION:
+ * Manipulation of duplicates for the hash package.
+ *
+ * ROUTINES:
+ *
+ * External
+ * __add_dup
+ * Internal
+ */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "hash.h"
+#include "btree.h"
+#include "txn.h"
+
+static int __ham_check_move __P((DBC *, u_int32_t));
+static int __ham_dcursor __P((DBC *, db_pgno_t, u_int32_t));
+
+/*
+ * Called from hash_access to add a duplicate key. nval is the new
+ * value that we want to add. The flags correspond to the flag values
+ * to cursor_put indicating where to add the new element.
+ * There are 4 cases.
+ * Case 1: The existing duplicate set already resides on a separate page.
+ * We return and let the common code handle this.
+ * Case 2: The element is small enough to just be added to the existing set.
+ * Case 3: The element is large enough to be a big item, so we're going to
+ * have to push the set onto a new page.
+ * Case 4: The element is large enough to push the duplicate set onto a
+ * separate page.
+ *
+ * PUBLIC: int __ham_add_dup __P((DBC *, DBT *, u_int32_t, db_pgno_t *));
+ */
+int
+__ham_add_dup(dbc, nval, flags, pgnop)
+ DBC *dbc;
+ DBT *nval;
+ u_int32_t flags;
+ db_pgno_t *pgnop;
+{
+ DB *dbp;
+ HASH_CURSOR *hcp;
+ DBT pval, tmp_val;
+ u_int32_t add_bytes, new_size;
+ int cmp, ret;
+ u_int8_t *hk;
+
+ dbp = dbc->dbp;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ DB_ASSERT(flags != DB_CURRENT);
+
+ add_bytes = nval->size +
+ (F_ISSET(nval, DB_DBT_PARTIAL) ? nval->doff : 0);
+ add_bytes = DUP_SIZE(add_bytes);
+
+ if ((ret = __ham_check_move(dbc, add_bytes)) != 0)
+ return (ret);
+
+ /*
+ * Check if resulting duplicate set is going to need to go
+ * onto a separate duplicate page. If so, convert the
+ * duplicate set and add the new one. After conversion,
+ * hcp->dndx is the first free ndx or the index of the
+ * current pointer into the duplicate set.
+ */
+ hk = H_PAIRDATA(hcp->page, hcp->indx);
+ /* Add the len bytes to the current singleton. */
+ if (HPAGE_PTYPE(hk) != H_DUPLICATE)
+ add_bytes += DUP_SIZE(0);
+ new_size =
+ LEN_HKEYDATA(hcp->page, dbp->pgsize, H_DATAINDEX(hcp->indx)) +
+ add_bytes;
+
+ /*
+ * We convert to off-page duplicates if the item is a big item,
+ * the addition of the new item will make the set large, or
+ * if there isn't enough room on this page to add the next item.
+ */
+ if (HPAGE_PTYPE(hk) != H_OFFDUP &&
+ (HPAGE_PTYPE(hk) == H_OFFPAGE || ISBIG(hcp, new_size) ||
+ add_bytes > P_FREESPACE(hcp->page))) {
+
+ if ((ret = __ham_dup_convert(dbc)) != 0)
+ return (ret);
+ return (hcp->opd->c_am_put(hcp->opd,
+ NULL, nval, flags, NULL));
+ }
+
+ /* There are two separate cases here: on page and off page. */
+ if (HPAGE_PTYPE(hk) != H_OFFDUP) {
+ if (HPAGE_PTYPE(hk) != H_DUPLICATE) {
+ pval.flags = 0;
+ pval.data = HKEYDATA_DATA(hk);
+ pval.size = LEN_HDATA(hcp->page, dbp->pgsize,
+ hcp->indx);
+ if ((ret = __ham_make_dup(dbp->dbenv,
+ &pval, &tmp_val, &dbc->rdata.data,
+ &dbc->rdata.ulen)) != 0 || (ret =
+ __ham_replpair(dbc, &tmp_val, 1)) != 0)
+ return (ret);
+ hk = H_PAIRDATA(hcp->page, hcp->indx);
+ HPAGE_PTYPE(hk) = H_DUPLICATE;
+
+ /*
+ * Update the cursor position since we now are in
+ * duplicates.
+ */
+ F_SET(hcp, H_ISDUP);
+ hcp->dup_off = 0;
+ hcp->dup_len = pval.size;
+ hcp->dup_tlen = DUP_SIZE(hcp->dup_len);
+ }
+
+ /* Now make the new entry a duplicate. */
+ if ((ret = __ham_make_dup(dbp->dbenv, nval,
+ &tmp_val, &dbc->rdata.data, &dbc->rdata.ulen)) != 0)
+ return (ret);
+
+ tmp_val.dlen = 0;
+ switch (flags) { /* On page. */
+ case DB_KEYFIRST:
+ case DB_KEYLAST:
+ case DB_NODUPDATA:
+ if (dbp->dup_compare != NULL) {
+ __ham_dsearch(dbc, nval, &tmp_val.doff, &cmp);
+
+ /* dup dups are not supported w/ sorted dups */
+ if (cmp == 0)
+ return (__db_duperr(dbp, flags));
+ } else {
+ hcp->dup_tlen = LEN_HDATA(hcp->page,
+ dbp->pgsize, hcp->indx);
+ hcp->dup_len = nval->size;
+ F_SET(hcp, H_ISDUP);
+ if (flags == DB_KEYFIRST)
+ hcp->dup_off = tmp_val.doff = 0;
+ else
+ hcp->dup_off =
+ tmp_val.doff = hcp->dup_tlen;
+ }
+ break;
+ case DB_BEFORE:
+ tmp_val.doff = hcp->dup_off;
+ break;
+ case DB_AFTER:
+ tmp_val.doff = hcp->dup_off + DUP_SIZE(hcp->dup_len);
+ break;
+ }
+ /* Add the duplicate. */
+ ret = __ham_replpair(dbc, &tmp_val, 0);
+ if (ret == 0)
+ ret = memp_fset(dbp->mpf, hcp->page, DB_MPOOL_DIRTY);
+
+ if (ret != 0)
+ return (ret);
+
+ /* Now, update the cursor if necessary. */
+ switch (flags) {
+ case DB_AFTER:
+ hcp->dup_off += DUP_SIZE(hcp->dup_len);
+ hcp->dup_len = nval->size;
+ hcp->dup_tlen += DUP_SIZE(nval->size);
+ break;
+ case DB_KEYFIRST:
+ case DB_KEYLAST:
+ case DB_BEFORE:
+ hcp->dup_tlen += DUP_SIZE(nval->size);
+ hcp->dup_len = nval->size;
+ break;
+ }
+ ret = __ham_c_update(dbc, tmp_val.size, 1, 1);
+ return (ret);
+ }
+
+ /*
+ * If we get here, then we're on duplicate pages; set pgnop and
+ * return so the common code can handle it.
+ */
+ memcpy(pgnop,
+ HOFFDUP_PGNO(H_PAIRDATA(hcp->page, hcp->indx)), sizeof(db_pgno_t));
+
+ return (ret);
+}
+
+/*
+ * Convert an on-page set of duplicates to an offpage set of duplicates.
+ *
+ * PUBLIC: int __ham_dup_convert __P((DBC *));
+ */
+int
+__ham_dup_convert(dbc)
+ DBC *dbc;
+{
+ DB *dbp;
+ DBC **hcs;
+ DB_LSN lsn;
+ PAGE *dp;
+ HASH_CURSOR *hcp;
+ BOVERFLOW bo;
+ DBT dbt;
+ HOFFPAGE ho;
+ db_indx_t i, len, off;
+ int c, ret, t_ret;
+ u_int8_t *p, *pend;
+
+ dbp = dbc->dbp;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ /*
+ * Create a new page for the duplicates.
+ */
+ if ((ret = __db_new(dbc,
+ dbp->dup_compare == NULL ? P_LRECNO : P_LDUP, &dp)) != 0)
+ return (ret);
+ P_INIT(dp, dbp->pgsize,
+ dp->pgno, PGNO_INVALID, PGNO_INVALID, LEAFLEVEL, TYPE(dp));
+
+ /*
+ * Get the list of cursors that may need to be updated.
+ */
+ if ((ret = __ham_get_clist(dbp,
+ PGNO(hcp->page), (u_int32_t)hcp->indx, &hcs)) != 0)
+ return (ret);
+
+ /*
+ * Now put the duplicates onto the new page.
+ */
+ dbt.flags = 0;
+ switch (HPAGE_PTYPE(H_PAIRDATA(hcp->page, hcp->indx))) {
+ case H_KEYDATA:
+ /* Simple case, one key on page; move it to dup page. */
+ dbt.size = LEN_HDATA(hcp->page, dbp->pgsize, hcp->indx);
+ dbt.data = HKEYDATA_DATA(H_PAIRDATA(hcp->page, hcp->indx));
+ ret = __db_pitem(dbc,
+ dp, 0, BKEYDATA_SIZE(dbt.size), NULL, &dbt);
+ goto finish;
+ case H_OFFPAGE:
+ /* Simple case, one key on page; move it to dup page. */
+ memcpy(&ho,
+ P_ENTRY(hcp->page, H_DATAINDEX(hcp->indx)), HOFFPAGE_SIZE);
+ UMRW_SET(bo.unused1);
+ B_TSET(bo.type, ho.type, 0);
+ UMRW_SET(bo.unused2);
+ bo.pgno = ho.pgno;
+ bo.tlen = ho.tlen;
+ dbt.size = BOVERFLOW_SIZE;
+ dbt.data = &bo;
+
+ ret = __db_pitem(dbc, dp, 0, dbt.size, &dbt, NULL);
+
+finish: if (ret == 0) {
+ memp_fset(dbp->mpf, dp, DB_MPOOL_DIRTY);
+ /*
+ * Update any other cursors
+ */
+ if (hcs != NULL && DB_LOGGING(dbc)
+ && IS_SUBTRANSACTION(dbc->txn)) {
+ if ((ret = __ham_chgpg_log(dbp->dbenv,
+ dbc->txn, &lsn, 0, dbp->log_fileid,
+ DB_HAM_DUP, PGNO(hcp->page),
+ PGNO(dp), hcp->indx, 0)) != 0)
+ break;
+ }
+ for (c = 0; hcs != NULL && hcs[c] != NULL; c++)
+ if ((ret = __ham_dcursor(hcs[c],
+ PGNO(dp), 0)) != 0)
+ break;
+
+ }
+ break;
+
+ case H_DUPLICATE:
+ p = HKEYDATA_DATA(H_PAIRDATA(hcp->page, hcp->indx));
+ pend = p +
+ LEN_HDATA(hcp->page, dbp->pgsize, hcp->indx);
+
+ /*
+ * We need to maintain the duplicate cursor position.
+ * Keep track of where we are in the duplicate set via
+ * the offset, and when it matches the one in the cursor,
+ * set the off-page duplicate cursor index to the current
+ * index.
+ */
+ for (off = 0, i = 0; p < pend; i++) {
+ memcpy(&len, p, sizeof(db_indx_t));
+ dbt.size = len;
+ p += sizeof(db_indx_t);
+ dbt.data = p;
+ p += len + sizeof(db_indx_t);
+ if ((ret = __db_pitem(dbc, dp,
+ i, BKEYDATA_SIZE(dbt.size), NULL, &dbt)) != 0)
+ break;
+ /*
+ * Update any other cursors
+ */
+ for (c = 0; hcs != NULL && hcs[c] != NULL; c++)
+ if (((HASH_CURSOR *)(hcs[c]->internal))->dup_off
+ == off && (ret = __ham_dcursor(hcs[c],
+ PGNO(dp), i)) != 0)
+ goto out;
+ off += len + 2 * sizeof(db_indx_t);
+ }
+out: break;
+
+ default:
+ ret = __db_pgfmt(dbp, (u_long)hcp->pgno);
+ break;
+ }
+ if (ret == 0) {
+ /*
+ * Now attach this to the source page in place of
+ * the old duplicate item.
+ */
+ __ham_move_offpage(dbc, hcp->page,
+ (u_int32_t)H_DATAINDEX(hcp->indx), PGNO(dp));
+
+ ret = memp_fset(dbp->mpf, hcp->page, DB_MPOOL_DIRTY);
+ if ((t_ret = memp_fput(dbp->mpf, dp, DB_MPOOL_DIRTY)) != 0)
+ ret = t_ret;
+ hcp->dup_tlen = hcp->dup_off = hcp->dup_len = 0;
+ } else
+ (void)__db_free(dbc, dp);
+
+ if (hcs != NULL)
+ __os_free(hcs, 0);
+
+ return (ret);
+}
+
+/*
+ * __ham_make_dup
+ *
+ * Take a regular dbt and make it into a duplicate item with all the partial
+ * information set appropriately. If the incoming dbt is a partial, assume
+ * we are creating a new entry and make sure that we do any initial padding.
+ *
+ * PUBLIC: int __ham_make_dup __P((DB_ENV *,
+ * PUBLIC: const DBT *, DBT *d, void **, u_int32_t *));
+ */
+int
+__ham_make_dup(dbenv, notdup, duplicate, bufp, sizep)
+ DB_ENV *dbenv;
+ const DBT *notdup;
+ DBT *duplicate;
+ void **bufp;
+ u_int32_t *sizep;
+{
+ db_indx_t tsize, item_size;
+ int ret;
+ u_int8_t *p;
+
+ item_size = (db_indx_t)notdup->size;
+ if (F_ISSET(notdup, DB_DBT_PARTIAL))
+ item_size += notdup->doff;
+
+ tsize = DUP_SIZE(item_size);
+ if ((ret = __ham_init_dbt(dbenv, duplicate, tsize, bufp, sizep)) != 0)
+ return (ret);
+
+ duplicate->dlen = 0;
+ duplicate->flags = notdup->flags;
+ F_SET(duplicate, DB_DBT_PARTIAL);
+
+ p = duplicate->data;
+ memcpy(p, &item_size, sizeof(db_indx_t));
+ p += sizeof(db_indx_t);
+ if (F_ISSET(notdup, DB_DBT_PARTIAL)) {
+ memset(p, 0, notdup->doff);
+ p += notdup->doff;
+ }
+ memcpy(p, notdup->data, notdup->size);
+ p += notdup->size;
+ memcpy(p, &item_size, sizeof(db_indx_t));
+
+ duplicate->doff = 0;
+ duplicate->dlen = notdup->size;
+
+ return (0);
+}
+
+/*
+ * __ham_check_move --
+ *
+ * Check if we can do whatever we need to on this page. If not,
+ * then we'll have to move the current element to a new page.
+ */
+static int
+__ham_check_move(dbc, add_len)
+ DBC *dbc;
+ u_int32_t add_len;
+{
+ DB *dbp;
+ HASH_CURSOR *hcp;
+ DBT k, d;
+ DB_LSN new_lsn;
+ PAGE *next_pagep;
+ db_pgno_t next_pgno;
+ u_int32_t new_datalen, old_len, rectype;
+ u_int8_t *hk;
+ int ret;
+
+ dbp = dbc->dbp;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ hk = H_PAIRDATA(hcp->page, hcp->indx);
+
+ /*
+ * If the item is already off page duplicates or an offpage item,
+ * then we know we can do whatever we need to do in-place
+ */
+ if (HPAGE_PTYPE(hk) == H_OFFDUP || HPAGE_PTYPE(hk) == H_OFFPAGE)
+ return (0);
+
+ old_len = LEN_HITEM(hcp->page, dbp->pgsize, H_DATAINDEX(hcp->indx));
+ new_datalen = old_len - HKEYDATA_SIZE(0) + add_len;
+ if (HPAGE_PTYPE(hk) != H_DUPLICATE)
+ new_datalen += DUP_SIZE(0);
+
+ /*
+ * We need to add a new page under two conditions:
+ * 1. The addition makes the total data length cross the BIG
+ * threshold and the OFFDUP structure won't fit on this page.
+ * 2. The addition does not make the total data cross the
+ * threshold, but the new data won't fit on the page.
+ * If neither of these is true, then we can return.
+ */
+ if (ISBIG(hcp, new_datalen) && (old_len > HOFFDUP_SIZE ||
+ HOFFDUP_SIZE - old_len <= P_FREESPACE(hcp->page)))
+ return (0);
+
+ if (!ISBIG(hcp, new_datalen) && add_len <= P_FREESPACE(hcp->page))
+ return (0);
+
+ /*
+ * If we get here, then we need to move the item to a new page.
+ * Check if there are more pages in the chain. We now need to
+ * update new_datalen to include the size of both the key and
+ * the data that we need to move.
+ */
+
+ new_datalen = ISBIG(hcp, new_datalen) ?
+ HOFFDUP_SIZE : HKEYDATA_SIZE(new_datalen);
+ new_datalen += LEN_HITEM(hcp->page, dbp->pgsize, H_KEYINDEX(hcp->indx));
+
+ next_pagep = NULL;
+ for (next_pgno = NEXT_PGNO(hcp->page); next_pgno != PGNO_INVALID;
+ next_pgno = NEXT_PGNO(next_pagep)) {
+ if (next_pagep != NULL &&
+ (ret = memp_fput(dbp->mpf, next_pagep, 0)) != 0)
+ return (ret);
+
+ if ((ret = memp_fget(dbp->mpf,
+ &next_pgno, DB_MPOOL_CREATE, &next_pagep)) != 0)
+ return (ret);
+
+ if (P_FREESPACE(next_pagep) >= new_datalen)
+ break;
+ }
+
+ /* No more pages, add one. */
+ if (next_pagep == NULL && (ret = __ham_add_ovflpage(dbc,
+ hcp->page, 0, &next_pagep)) != 0)
+ return (ret);
+
+ /* Add new page at the end of the chain. */
+ if (P_FREESPACE(next_pagep) < new_datalen && (ret =
+ __ham_add_ovflpage(dbc, next_pagep, 1, &next_pagep)) != 0) {
+ (void)memp_fput(dbp->mpf, next_pagep, 0);
+ return (ret);
+ }
+
+ /* Copy the item to the new page. */
+ if (DB_LOGGING(dbc)) {
+ rectype = PUTPAIR;
+ k.flags = 0;
+ d.flags = 0;
+ if (HPAGE_PTYPE(
+ H_PAIRKEY(hcp->page, hcp->indx)) == H_OFFPAGE) {
+ rectype |= PAIR_KEYMASK;
+ k.data = H_PAIRKEY(hcp->page, hcp->indx);
+ k.size = HOFFPAGE_SIZE;
+ } else {
+ k.data =
+ HKEYDATA_DATA(H_PAIRKEY(hcp->page, hcp->indx));
+ k.size = LEN_HKEY(hcp->page, dbp->pgsize, hcp->indx);
+ }
+
+ if (HPAGE_PTYPE(hk) == H_OFFPAGE) {
+ rectype |= PAIR_DATAMASK;
+ d.data = H_PAIRDATA(hcp->page, hcp->indx);
+ d.size = HOFFPAGE_SIZE;
+ } else {
+ if (HPAGE_PTYPE(H_PAIRDATA(hcp->page, hcp->indx))
+ == H_DUPLICATE)
+ rectype |= PAIR_DUPMASK;
+ d.data =
+ HKEYDATA_DATA(H_PAIRDATA(hcp->page, hcp->indx));
+ d.size = LEN_HDATA(hcp->page, dbp->pgsize, hcp->indx);
+ }
+
+ if ((ret = __ham_insdel_log(dbp->dbenv,
+ dbc->txn, &new_lsn, 0, rectype,
+ dbp->log_fileid, PGNO(next_pagep),
+ (u_int32_t)NUM_ENT(next_pagep), &LSN(next_pagep),
+ &k, &d)) != 0) {
+ (void)memp_fput(dbp->mpf, next_pagep, 0);
+ return (ret);
+ }
+
+ /* Move lsn onto page. */
+ LSN(next_pagep) = new_lsn; /* Structure assignment. */
+ }
+
+ __ham_copy_item(dbp->pgsize,
+ hcp->page, H_KEYINDEX(hcp->indx), next_pagep);
+ __ham_copy_item(dbp->pgsize,
+ hcp->page, H_DATAINDEX(hcp->indx), next_pagep);
+
+ /*
+ * We've just manually inserted a key and set of data onto
+ * next_pagep; however, it's possible that our caller will
+ * return without further modifying the new page, for instance
+ * if DB_NODUPDATA is set and our new item is a duplicate duplicate.
+ * Thus, to be on the safe side, we need to mark the page dirty
+ * here. [#2996]
+ *
+ * Note that __ham_del_pair should dirty the page we're moving
+ * the items from, so we need only dirty the new page ourselves.
+ */
+ if ((ret = memp_fset(dbp->mpf, next_pagep, DB_MPOOL_DIRTY)) != 0)
+ goto out;
+
+ /* Update all cursors that used to point to this item. */
+ if ((ret = __ham_c_chgpg(dbc, PGNO(hcp->page), H_KEYINDEX(hcp->indx),
+ PGNO(next_pagep), NUM_ENT(next_pagep) - 2)) != 0)
+ goto out;
+
+ /* Now delete the pair from the current page. */
+ ret = __ham_del_pair(dbc, 0);
+
+ /*
+ * __ham_del_pair decremented nelem. This is incorrect; we
+ * manually copied the element elsewhere, so the total number
+ * of elements hasn't changed. Increment it again.
+ */
+ if (!STD_LOCKING(dbc))
+ hcp->hdr->nelem++;
+
+out:
+ (void)memp_fput(dbp->mpf, hcp->page, DB_MPOOL_DIRTY);
+ hcp->page = next_pagep;
+ hcp->pgno = PGNO(hcp->page);
+ hcp->indx = NUM_ENT(hcp->page) - 2;
+ F_SET(hcp, H_EXPAND);
+ F_CLR(hcp, H_DELETED);
+
+ return (ret);
+}
+
+/*
+ * __ham_move_offpage --
+ * Replace an onpage set of duplicates with the OFFDUP structure
+ * that references the duplicate page.
+ *
+ * XXX
+ * This is really just a special case of __onpage_replace; we should
+ * probably combine them.
+ *
+ * PUBLIC: void __ham_move_offpage __P((DBC *, PAGE *, u_int32_t, db_pgno_t));
+ */
+void
+__ham_move_offpage(dbc, pagep, ndx, pgno)
+ DBC *dbc;
+ PAGE *pagep;
+ u_int32_t ndx;
+ db_pgno_t pgno;
+{
+ DB *dbp;
+ HASH_CURSOR *hcp;
+ DBT new_dbt;
+ DBT old_dbt;
+ HOFFDUP od;
+ db_indx_t i;
+ int32_t shrink;
+ u_int8_t *src;
+
+ dbp = dbc->dbp;
+ hcp = (HASH_CURSOR *)dbc->internal;
+ od.type = H_OFFDUP;
+ UMRW_SET(od.unused[0]);
+ UMRW_SET(od.unused[1]);
+ UMRW_SET(od.unused[2]);
+ od.pgno = pgno;
+
+ if (DB_LOGGING(dbc)) {
+ new_dbt.data = &od;
+ new_dbt.size = HOFFDUP_SIZE;
+ old_dbt.data = P_ENTRY(pagep, ndx);
+ old_dbt.size = LEN_HITEM(pagep, dbp->pgsize, ndx);
+ (void)__ham_replace_log(dbp->dbenv,
+ dbc->txn, &LSN(pagep), 0, dbp->log_fileid,
+ PGNO(pagep), (u_int32_t)ndx, &LSN(pagep), -1,
+ &old_dbt, &new_dbt, 0);
+ }
+
+ shrink = LEN_HITEM(pagep, dbp->pgsize, ndx) - HOFFDUP_SIZE;
+
+ if (shrink != 0) {
+ /* Copy data. */
+ src = (u_int8_t *)(pagep) + HOFFSET(pagep);
+ memmove(src + shrink, src, pagep->inp[ndx] - HOFFSET(pagep));
+ HOFFSET(pagep) += shrink;
+
+ /* Update index table. */
+ for (i = ndx; i < NUM_ENT(pagep); i++)
+ pagep->inp[i] += shrink;
+ }
+
+ /* Now copy the offdup entry onto the page. */
+ memcpy(P_ENTRY(pagep, ndx), &od, HOFFDUP_SIZE);
+}
+
+/*
+ * __ham_dsearch:
+ * Locate a particular duplicate in a duplicate set. Make sure that
+ * we exit with the cursor set appropriately.
+ *
+ * PUBLIC: void __ham_dsearch __P((DBC *, DBT *, u_int32_t *, int *));
+ */
+void
+__ham_dsearch(dbc, dbt, offp, cmpp)
+ DBC *dbc;
+ DBT *dbt;
+ u_int32_t *offp;
+ int *cmpp;
+{
+ DB *dbp;
+ HASH_CURSOR *hcp;
+ DBT cur;
+ db_indx_t i, len;
+ int (*func) __P((DB *, const DBT *, const DBT *));
+ u_int8_t *data;
+
+ dbp = dbc->dbp;
+ hcp = (HASH_CURSOR *)dbc->internal;
+ if (dbp->dup_compare == NULL)
+ func = __bam_defcmp;
+ else
+ func = dbp->dup_compare;
+
+ i = F_ISSET(hcp, H_CONTINUE) ? hcp->dup_off: 0;
+ data = HKEYDATA_DATA(H_PAIRDATA(hcp->page, hcp->indx)) + i;
+ hcp->dup_tlen = LEN_HDATA(hcp->page, dbp->pgsize, hcp->indx);
+ while (i < hcp->dup_tlen) {
+ memcpy(&len, data, sizeof(db_indx_t));
+ data += sizeof(db_indx_t);
+ cur.data = data;
+ cur.size = (u_int32_t)len;
+ *cmpp = func(dbp, dbt, &cur);
+ if (*cmpp == 0 || (*cmpp < 0 && dbp->dup_compare != NULL))
+ break;
+ i += len + 2 * sizeof(db_indx_t);
+ data += len + sizeof(db_indx_t);
+ }
+ *offp = i;
+ hcp->dup_off = i;
+ hcp->dup_len = len;
+ F_SET(hcp, H_ISDUP);
+}
+
+#ifdef DEBUG
+/*
+ * __ham_cprint --
+ * Display the current cursor list.
+ *
+ * PUBLIC: int __ham_cprint __P((DB *));
+ */
+int
+__ham_cprint(dbp)
+ DB *dbp;
+{
+ HASH_CURSOR *cp;
+ DBC *dbc;
+
+ MUTEX_THREAD_LOCK(dbp->dbenv, dbp->mutexp);
+ for (dbc = TAILQ_FIRST(&dbp->active_queue);
+ dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
+ cp = (HASH_CURSOR *)dbc->internal;
+ fprintf(stderr, "%#0lx->%#0lx: page: %lu index: %lu",
+ P_TO_ULONG(dbc), P_TO_ULONG(cp), (u_long)cp->pgno,
+ (u_long)cp->indx);
+ if (F_ISSET(cp, H_DELETED))
+ fprintf(stderr, " (deleted)");
+ fprintf(stderr, "\n");
+ }
+ MUTEX_THREAD_UNLOCK(dbp->dbenv, dbp->mutexp);
+
+ return (0);
+}
+#endif /* DEBUG */
+
+/*
+ * __ham_dcursor --
+ *
+ * Create an off page duplicate cursor for this cursor.
+ */
+static int
+__ham_dcursor(dbc, pgno, indx)
+ DBC *dbc;
+ db_pgno_t pgno;
+ u_int32_t indx;
+{
+ DB *dbp;
+ DBC *dbc_nopd;
+ HASH_CURSOR *hcp;
+ BTREE_CURSOR *dcp;
+ int ret;
+
+ dbp = dbc->dbp;
+
+ if ((ret = __db_c_newopd(dbc, pgno, &dbc_nopd)) != 0)
+ return (ret);
+
+ dcp = (BTREE_CURSOR *)dbc_nopd->internal;
+ dcp->pgno = pgno;
+ dcp->indx = indx;
+
+ if (dbp->dup_compare == NULL) {
+ /*
+ * Converting to off-page Recno trees is tricky. The
+ * record number for the cursor is the index + 1 (to
+ * convert to 1-based record numbers).
+ */
+ dcp->recno = indx + 1;
+ }
+
+ /*
+ * Transfer the deleted flag from the top-level cursor to the
+ * created one.
+ */
+ hcp = (HASH_CURSOR *)dbc->internal;
+ if (F_ISSET(hcp, H_DELETED)) {
+ F_SET(dcp, C_DELETED);
+ F_CLR(hcp, H_DELETED);
+ }
+
+ /* Stack the cursors and reset the initial cursor's index. */
+ hcp->opd = dbc_nopd;
+
+ return (0);
+}
diff --git a/bdb/hash/hash_func.c b/bdb/hash/hash_func.c
new file mode 100644
index 00000000000..22b4f08ee70
--- /dev/null
+++ b/bdb/hash/hash_func.c
@@ -0,0 +1,242 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993
+ * Margo Seltzer. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: hash_func.c,v 11.7 2000/08/16 18:26:19 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "hash.h"
+
+/*
+ * __ham_func2 --
+ * Phong Vo's linear congruential hash.
+ *
+ * PUBLIC: u_int32_t __ham_func2 __P((DB *, const void *, u_int32_t));
+ */
+#define DCHARHASH(h, c) ((h) = 0x63c63cd9*(h) + 0x9c39c33d + (c))
+
+u_int32_t
+__ham_func2(dbp, key, len)
+ DB *dbp;
+ const void *key;
+ u_int32_t len;
+{
+ const u_int8_t *e, *k;
+ u_int32_t h;
+ u_int8_t c;
+
+ if (dbp != NULL)
+ COMPQUIET(dbp, NULL);
+
+ k = key;
+ e = k + len;
+ for (h = 0; k != e;) {
+ c = *k++;
+ if (!c && k > e)
+ break;
+ DCHARHASH(h, c);
+ }
+ return (h);
+}
+
+/*
+ * __ham_func3 --
+ * Ozan Yigit's original sdbm hash.
+ *
+ * Ugly, but fast. Break the string up into 8 byte units. On the first time
+ * through the loop get the "leftover bytes" (strlen % 8). On every other
+ * iteration, perform 8 HASHC's so we handle all 8 bytes. Essentially, this
+ * saves us 7 cmp & branch instructions.
+ *
+ * PUBLIC: u_int32_t __ham_func3 __P((DB *, const void *, u_int32_t));
+ */
+u_int32_t
+__ham_func3(dbp, key, len)
+ DB *dbp;
+ const void *key;
+ u_int32_t len;
+{
+ const u_int8_t *k;
+ u_int32_t n, loop;
+
+ if (dbp != NULL)
+ COMPQUIET(dbp, NULL);
+
+ if (len == 0)
+ return (0);
+
+#define HASHC n = *k++ + 65599 * n
+ n = 0;
+ k = key;
+
+ loop = (len + 8 - 1) >> 3;
+ switch (len & (8 - 1)) {
+ case 0:
+ do {
+ HASHC;
+ case 7:
+ HASHC;
+ case 6:
+ HASHC;
+ case 5:
+ HASHC;
+ case 4:
+ HASHC;
+ case 3:
+ HASHC;
+ case 2:
+ HASHC;
+ case 1:
+ HASHC;
+ } while (--loop);
+ }
+ return (n);
+}
+
+/*
+ * __ham_func4 --
+ * Chris Torek's hash function. Although this function performs only
+ * slightly worse than __ham_func5 on strings, it performs horribly on
+ * numbers.
+ *
+ * PUBLIC: u_int32_t __ham_func4 __P((DB *, const void *, u_int32_t));
+ */
+u_int32_t
+__ham_func4(dbp, key, len)
+ DB *dbp;
+ const void *key;
+ u_int32_t len;
+{
+ const u_int8_t *k;
+ u_int32_t h, loop;
+
+ if (dbp != NULL)
+ COMPQUIET(dbp, NULL);
+
+ if (len == 0)
+ return (0);
+
+#define HASH4a h = (h << 5) - h + *k++;
+#define HASH4b h = (h << 5) + h + *k++;
+#define HASH4 HASH4b
+ h = 0;
+ k = key;
+
+ loop = (len + 8 - 1) >> 3;
+ switch (len & (8 - 1)) {
+ case 0:
+ do {
+ HASH4;
+ case 7:
+ HASH4;
+ case 6:
+ HASH4;
+ case 5:
+ HASH4;
+ case 4:
+ HASH4;
+ case 3:
+ HASH4;
+ case 2:
+ HASH4;
+ case 1:
+ HASH4;
+ } while (--loop);
+ }
+ return (h);
+}
+
+/*
+ * Fowler/Noll/Vo hash
+ *
+ * The basis of the hash algorithm was taken from an idea sent by email to the
+ * IEEE Posix P1003.2 mailing list from Phong Vo (kpv@research.att.com) and
+ * Glenn Fowler (gsf@research.att.com). Landon Curt Noll (chongo@toad.com)
+ * later improved on their algorithm.
+ *
+ * The magic is in the interesting relationship between the special prime
+ * 16777619 (2^24 + 403) and 2^32 and 2^8.
+ *
+ * This hash produces the fewest collisions of any function that we've seen so
+ * far, and works well on both numbers and strings.
+ *
+ * PUBLIC: u_int32_t __ham_func5 __P((DB *, const void *, u_int32_t));
+ */
+u_int32_t
+__ham_func5(dbp, key, len)
+ DB *dbp;
+ const void *key;
+ u_int32_t len;
+{
+ const u_int8_t *k, *e;
+ u_int32_t h;
+
+ if (dbp != NULL)
+ COMPQUIET(dbp, NULL);
+
+ k = key;
+ e = k + len;
+ for (h = 0; k < e; ++k) {
+ h *= 16777619;
+ h ^= *k;
+ }
+ return (h);
+}
+
+u_int32_t
+__ham_test(dbp, key, len)
+ DB *dbp;
+ const void *key;
+ u_int32_t len;
+{
+ COMPQUIET(dbp, NULL);
+ COMPQUIET(len, 0);
+ return ((u_int32_t)*(char *)key);
+}
diff --git a/bdb/hash/hash_meta.c b/bdb/hash/hash_meta.c
new file mode 100644
index 00000000000..d96a6db3207
--- /dev/null
+++ b/bdb/hash/hash_meta.c
@@ -0,0 +1,121 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: hash_meta.c,v 11.10 2000/12/21 21:54:35 margo Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "hash.h"
+#include "db_shash.h"
+#include "lock.h"
+#include "txn.h"
+
+/*
+ * Acquire the meta-data page.
+ *
+ * PUBLIC: int __ham_get_meta __P((DBC *));
+ */
+int
+__ham_get_meta(dbc)
+ DBC *dbc;
+{
+ HASH_CURSOR *hcp;
+ HASH *hashp;
+ DB *dbp;
+ int ret;
+
+ hcp = (HASH_CURSOR *)dbc->internal;
+ dbp = dbc->dbp;
+ hashp = dbp->h_internal;
+
+ if (dbp->dbenv != NULL &&
+ STD_LOCKING(dbc) && !F_ISSET(dbc, DBC_RECOVER)) {
+ dbc->lock.pgno = hashp->meta_pgno;
+ if ((ret = lock_get(dbp->dbenv, dbc->locker,
+ DB_NONBLOCK(dbc) ? DB_LOCK_NOWAIT : 0,
+ &dbc->lock_dbt, DB_LOCK_READ, &hcp->hlock)) != 0)
+ return (ret);
+ }
+
+ if ((ret = memp_fget(dbc->dbp->mpf,
+ &hashp->meta_pgno, DB_MPOOL_CREATE, &(hcp->hdr))) != 0 &&
+ hcp->hlock.off != LOCK_INVALID) {
+ (void)lock_put(dbc->dbp->dbenv, &hcp->hlock);
+ hcp->hlock.off = LOCK_INVALID;
+ }
+
+ return (ret);
+}
+
+/*
+ * Release the meta-data page.
+ *
+ * PUBLIC: int __ham_release_meta __P((DBC *));
+ */
+int
+__ham_release_meta(dbc)
+ DBC *dbc;
+{
+ HASH_CURSOR *hcp;
+
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ if (hcp->hdr)
+ (void)memp_fput(dbc->dbp->mpf, hcp->hdr,
+ F_ISSET(hcp, H_DIRTY) ? DB_MPOOL_DIRTY : 0);
+ hcp->hdr = NULL;
+ if (!F_ISSET(dbc, DBC_RECOVER) &&
+ dbc->txn == NULL && hcp->hlock.off != LOCK_INVALID)
+ (void)lock_put(dbc->dbp->dbenv, &hcp->hlock);
+ hcp->hlock.off = LOCK_INVALID;
+ F_CLR(hcp, H_DIRTY);
+
+ return (0);
+}
+
+/*
+ * Mark the meta-data page dirty.
+ *
+ * PUBLIC: int __ham_dirty_meta __P((DBC *));
+ */
+int
+__ham_dirty_meta(dbc)
+ DBC *dbc;
+{
+ DB *dbp;
+ DB_LOCK _tmp;
+ HASH *hashp;
+ HASH_CURSOR *hcp;
+ int ret;
+
+ dbp = dbc->dbp;
+ hashp = dbp->h_internal;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ ret = 0;
+ if (STD_LOCKING(dbc) && !F_ISSET(dbc, DBC_RECOVER)) {
+ dbc->lock.pgno = hashp->meta_pgno;
+ if ((ret = lock_get(dbp->dbenv, dbc->locker,
+ DB_NONBLOCK(dbc) ? DB_LOCK_NOWAIT : 0,
+ &dbc->lock_dbt, DB_LOCK_WRITE, &_tmp)) == 0) {
+ ret = lock_put(dbp->dbenv, &hcp->hlock);
+ hcp->hlock = _tmp;
+ }
+ }
+
+ if (ret == 0)
+ F_SET(hcp, H_DIRTY);
+ return (ret);
+}
diff --git a/bdb/hash/hash_method.c b/bdb/hash/hash_method.c
new file mode 100644
index 00000000000..f8239993dc5
--- /dev/null
+++ b/bdb/hash/hash_method.c
@@ -0,0 +1,126 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: hash_method.c,v 11.7 2000/07/04 18:28:23 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "hash.h"
+
+static int __ham_set_h_ffactor __P((DB *, u_int32_t));
+static int __ham_set_h_hash
+ __P((DB *, u_int32_t(*)(DB *, const void *, u_int32_t)));
+static int __ham_set_h_nelem __P((DB *, u_int32_t));
+
+/*
+ * __ham_db_create --
+ * Hash specific initialization of the DB structure.
+ *
+ * PUBLIC: int __ham_db_create __P((DB *));
+ */
+int
+__ham_db_create(dbp)
+ DB *dbp;
+{
+ HASH *hashp;
+ int ret;
+
+ if ((ret = __os_malloc(dbp->dbenv,
+ sizeof(HASH), NULL, &dbp->h_internal)) != 0)
+ return (ret);
+
+ hashp = dbp->h_internal;
+
+ hashp->h_nelem = 0; /* Defaults. */
+ hashp->h_ffactor = 0;
+ hashp->h_hash = NULL;
+
+ dbp->set_h_ffactor = __ham_set_h_ffactor;
+ dbp->set_h_hash = __ham_set_h_hash;
+ dbp->set_h_nelem = __ham_set_h_nelem;
+
+ return (0);
+}
+
+/*
+ * PUBLIC: int __ham_db_close __P((DB *));
+ */
+int
+__ham_db_close(dbp)
+ DB *dbp;
+{
+ if (dbp->h_internal == NULL)
+ return (0);
+ __os_free(dbp->h_internal, sizeof(HASH));
+ dbp->h_internal = NULL;
+ return (0);
+}
+
+/*
+ * __ham_set_h_ffactor --
+ * Set the fill factor.
+ */
+static int
+__ham_set_h_ffactor(dbp, h_ffactor)
+ DB *dbp;
+ u_int32_t h_ffactor;
+{
+ HASH *hashp;
+
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_h_ffactor");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_HASH);
+
+ hashp = dbp->h_internal;
+ hashp->h_ffactor = h_ffactor;
+ return (0);
+}
+
+/*
+ * __ham_set_h_hash --
+ * Set the hash function.
+ */
+static int
+__ham_set_h_hash(dbp, func)
+ DB *dbp;
+ u_int32_t (*func) __P((DB *, const void *, u_int32_t));
+{
+ HASH *hashp;
+
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_h_hash");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_HASH);
+
+ hashp = dbp->h_internal;
+ hashp->h_hash = func;
+ return (0);
+}
+
+/*
+ * __ham_set_h_nelem --
+ * Set the table size.
+ */
+static int
+__ham_set_h_nelem(dbp, h_nelem)
+ DB *dbp;
+ u_int32_t h_nelem;
+{
+ HASH *hashp;
+
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_h_nelem");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_HASH);
+
+ hashp = dbp->h_internal;
+ hashp->h_nelem = h_nelem;
+ return (0);
+}
diff --git a/bdb/hash/hash_page.c b/bdb/hash/hash_page.c
new file mode 100644
index 00000000000..64f38853284
--- /dev/null
+++ b/bdb/hash/hash_page.c
@@ -0,0 +1,1655 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994
+ * Margo Seltzer. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: hash_page.c,v 11.46 2001/01/11 18:19:51 bostic Exp $";
+#endif /* not lint */
+
+/*
+ * PACKAGE: hashing
+ *
+ * DESCRIPTION:
+ * Page manipulation for hashing package.
+ *
+ * ROUTINES:
+ *
+ * External
+ * __get_page
+ * __add_ovflpage
+ * __overflow_page
+ * Internal
+ * open_temp
+ */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_shash.h"
+#include "hash.h"
+#include "lock.h"
+#include "txn.h"
+
+/*
+ * PUBLIC: int __ham_item __P((DBC *, db_lockmode_t, db_pgno_t *));
+ */
+int
+__ham_item(dbc, mode, pgnop)
+ DBC *dbc;
+ db_lockmode_t mode;
+ db_pgno_t *pgnop;
+{
+ DB *dbp;
+ HASH_CURSOR *hcp;
+ db_pgno_t next_pgno;
+ int ret;
+
+ dbp = dbc->dbp;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ if (F_ISSET(hcp, H_DELETED)) {
+ __db_err(dbp->dbenv, "Attempt to return a deleted item");
+ return (EINVAL);
+ }
+ F_CLR(hcp, H_OK | H_NOMORE);
+
+ /* Check if we need to get a page for this cursor. */
+ if ((ret = __ham_get_cpage(dbc, mode)) != 0)
+ return (ret);
+
+recheck:
+ /* Check if we are looking for space in which to insert an item. */
+ if (hcp->seek_size && hcp->seek_found_page == PGNO_INVALID
+ && hcp->seek_size < P_FREESPACE(hcp->page))
+ hcp->seek_found_page = hcp->pgno;
+
+ /* Check for off-page duplicates. */
+ if (hcp->indx < NUM_ENT(hcp->page) &&
+ HPAGE_TYPE(hcp->page, H_DATAINDEX(hcp->indx)) == H_OFFDUP) {
+ memcpy(pgnop,
+ HOFFDUP_PGNO(H_PAIRDATA(hcp->page, hcp->indx)),
+ sizeof(db_pgno_t));
+ F_SET(hcp, H_OK);
+ return (0);
+ }
+
+ /* Check if we need to go on to the next page. */
+ if (F_ISSET(hcp, H_ISDUP))
+ /*
+ * ISDUP is set, and offset is at the beginning of the datum.
+ * We need to grab the length of the datum, then set the datum
+ * pointer to be the beginning of the datum.
+ */
+ memcpy(&hcp->dup_len,
+ HKEYDATA_DATA(H_PAIRDATA(hcp->page, hcp->indx)) +
+ hcp->dup_off, sizeof(db_indx_t));
+
+ if (hcp->indx >= (db_indx_t)NUM_ENT(hcp->page)) {
+ /* Fetch next page. */
+ if (NEXT_PGNO(hcp->page) == PGNO_INVALID) {
+ F_SET(hcp, H_NOMORE);
+ return (DB_NOTFOUND);
+ }
+ next_pgno = NEXT_PGNO(hcp->page);
+ hcp->indx = 0;
+ if ((ret = __ham_next_cpage(dbc, next_pgno, 0)) != 0)
+ return (ret);
+ goto recheck;
+ }
+
+ F_SET(hcp, H_OK);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __ham_item_reset __P((DBC *));
+ */
+int
+__ham_item_reset(dbc)
+ DBC *dbc;
+{
+ HASH_CURSOR *hcp;
+ DB *dbp;
+ int ret;
+
+ ret = 0;
+ dbp = dbc->dbp;
+ hcp = (HASH_CURSOR *)dbc->internal;
+ if (hcp->page != NULL)
+ ret = memp_fput(dbp->mpf, hcp->page, 0);
+
+ __ham_item_init(dbc);
+ return (ret);
+}
+
+/*
+ * PUBLIC: void __ham_item_init __P((DBC *));
+ */
+void
+__ham_item_init(dbc)
+ DBC *dbc;
+{
+ HASH_CURSOR *hcp;
+
+ hcp = (HASH_CURSOR *)dbc->internal;
+ /*
+ * If this cursor still holds any locks, we must
+ * release them if we are not running with transactions.
+ */
+ if (hcp->lock.off != LOCK_INVALID && dbc->txn == NULL)
+ (void)lock_put(dbc->dbp->dbenv, &hcp->lock);
+
+ /*
+ * The following fields must *not* be initialized here
+ * because they may have meaning across inits.
+ * hlock, hdr, split_buf, stats
+ */
+ hcp->bucket = BUCKET_INVALID;
+ hcp->lbucket = BUCKET_INVALID;
+ hcp->lock.off = LOCK_INVALID;
+ hcp->lock_mode = DB_LOCK_NG;
+ hcp->dup_off = 0;
+ hcp->dup_len = 0;
+ hcp->dup_tlen = 0;
+ hcp->seek_size = 0;
+ hcp->seek_found_page = PGNO_INVALID;
+ hcp->flags = 0;
+
+ hcp->pgno = PGNO_INVALID;
+ hcp->indx = NDX_INVALID;
+ hcp->page = NULL;
+}
+
+/*
+ * Returns the last item in a bucket.
+ *
+ * PUBLIC: int __ham_item_last __P((DBC *, db_lockmode_t, db_pgno_t *));
+ */
+int
+__ham_item_last(dbc, mode, pgnop)
+ DBC *dbc;
+ db_lockmode_t mode;
+ db_pgno_t *pgnop;
+{
+ HASH_CURSOR *hcp;
+ int ret;
+
+ hcp = (HASH_CURSOR *)dbc->internal;
+ if ((ret = __ham_item_reset(dbc)) != 0)
+ return (ret);
+
+ hcp->bucket = hcp->hdr->max_bucket;
+ hcp->pgno = BUCKET_TO_PAGE(hcp, hcp->bucket);
+ F_SET(hcp, H_OK);
+ return (__ham_item_prev(dbc, mode, pgnop));
+}
+
+/*
+ * PUBLIC: int __ham_item_first __P((DBC *, db_lockmode_t, db_pgno_t *));
+ */
+int
+__ham_item_first(dbc, mode, pgnop)
+ DBC *dbc;
+ db_lockmode_t mode;
+ db_pgno_t *pgnop;
+{
+ HASH_CURSOR *hcp;
+ int ret;
+
+ hcp = (HASH_CURSOR *)dbc->internal;
+ if ((ret = __ham_item_reset(dbc)) != 0)
+ return (ret);
+ F_SET(hcp, H_OK);
+ hcp->bucket = 0;
+ hcp->pgno = BUCKET_TO_PAGE(hcp, hcp->bucket);
+ return (__ham_item_next(dbc, mode, pgnop));
+}
+
+/*
+ * __ham_item_prev --
+ * Returns a pointer to key/data pair on a page. In the case of
+ * bigkeys, just returns the page number and index of the bigkey
+ * pointer pair.
+ *
+ * PUBLIC: int __ham_item_prev __P((DBC *, db_lockmode_t, db_pgno_t *));
+ */
+int
+__ham_item_prev(dbc, mode, pgnop)
+ DBC *dbc;
+ db_lockmode_t mode;
+ db_pgno_t *pgnop;
+{
+ DB *dbp;
+ HASH_CURSOR *hcp;
+ db_pgno_t next_pgno;
+ int ret;
+
+ dbp = dbc->dbp;
+ hcp = (HASH_CURSOR *)dbc->internal;
+ /*
+ * There are 5 cases for backing up in a hash file.
+ * Case 1: In the middle of a page, no duplicates, just dec the index.
+ * Case 2: In the middle of a duplicate set, back up one.
+ * Case 3: At the beginning of a duplicate set, get out of set and
+ * back up to next key.
+ * Case 4: At the beginning of a page; go to previous page.
+ * Case 5: At the beginning of a bucket; go to prev bucket.
+ */
+ F_CLR(hcp, H_OK | H_NOMORE | H_DELETED);
+
+ if ((ret = __ham_get_cpage(dbc, mode)) != 0)
+ return (ret);
+
+ /*
+ * First handle the duplicates. Either you'll get the key here
+ * or you'll exit the duplicate set and drop into the code below
+ * to handle backing up through keys.
+ */
+ if (!F_ISSET(hcp, H_NEXT_NODUP) && F_ISSET(hcp, H_ISDUP)) {
+ if (HPAGE_TYPE(hcp->page, H_DATAINDEX(hcp->indx)) == H_OFFDUP) {
+ memcpy(pgnop,
+ HOFFDUP_PGNO(H_PAIRDATA(hcp->page, hcp->indx)),
+ sizeof(db_pgno_t));
+ F_SET(hcp, H_OK);
+ return (0);
+ }
+
+ /* Duplicates are on-page. */
+ if (hcp->dup_off != 0) {
+ memcpy(&hcp->dup_len, HKEYDATA_DATA(
+ H_PAIRDATA(hcp->page, hcp->indx))
+ + hcp->dup_off - sizeof(db_indx_t),
+ sizeof(db_indx_t));
+ hcp->dup_off -=
+ DUP_SIZE(hcp->dup_len);
+ return (__ham_item(dbc, mode, pgnop));
+ }
+ }
+
+ /*
+ * If we get here, we are not in a duplicate set, and just need
+ * to back up the cursor. There are still three cases:
+ * midpage, beginning of page, beginning of bucket.
+ */
+
+ if (F_ISSET(hcp, H_DUPONLY)) {
+ F_CLR(hcp, H_OK);
+ F_SET(hcp, H_NOMORE);
+ return (0);
+ } else
+ /*
+ * We are no longer in a dup set; flag this so the dup code
+ * will reinitialize should we stumble upon another one.
+ */
+ F_CLR(hcp, H_ISDUP);
+
+ if (hcp->indx == 0) { /* Beginning of page. */
+ hcp->pgno = PREV_PGNO(hcp->page);
+ if (hcp->pgno == PGNO_INVALID) {
+ /* Beginning of bucket. */
+ F_SET(hcp, H_NOMORE);
+ return (DB_NOTFOUND);
+ } else if ((ret =
+ __ham_next_cpage(dbc, hcp->pgno, 0)) != 0)
+ return (ret);
+ else
+ hcp->indx = NUM_ENT(hcp->page);
+ }
+
+ /*
+ * Either we've got the cursor set up to be decremented, or we
+ * have to find the end of a bucket.
+ */
+ if (hcp->indx == NDX_INVALID) {
+ DB_ASSERT(hcp->page != NULL);
+
+ hcp->indx = NUM_ENT(hcp->page);
+ for (next_pgno = NEXT_PGNO(hcp->page);
+ next_pgno != PGNO_INVALID;
+ next_pgno = NEXT_PGNO(hcp->page)) {
+ if ((ret = __ham_next_cpage(dbc, next_pgno, 0)) != 0)
+ return (ret);
+ hcp->indx = NUM_ENT(hcp->page);
+ }
+
+ if (hcp->indx == 0) {
+ /* Bucket was empty. */
+ F_SET(hcp, H_NOMORE);
+ return (DB_NOTFOUND);
+ }
+ }
+
+ hcp->indx -= 2;
+
+ return (__ham_item(dbc, mode, pgnop));
+}
+
+/*
+ * Sets the cursor to the next key/data pair on a page.
+ *
+ * PUBLIC: int __ham_item_next __P((DBC *, db_lockmode_t, db_pgno_t *));
+ */
+int
+__ham_item_next(dbc, mode, pgnop)
+ DBC *dbc;
+ db_lockmode_t mode;
+ db_pgno_t *pgnop;
+{
+ HASH_CURSOR *hcp;
+ int ret;
+
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ if ((ret = __ham_get_cpage(dbc, mode)) != 0)
+ return (ret);
+
+ /*
+ * Deleted on-page duplicates are a weird case. If we delete the last
+ * one, then our cursor is at the very end of a duplicate set and
+ * we actually need to go on to the next key.
+ */
+ if (F_ISSET(hcp, H_DELETED)) {
+ if (hcp->indx != NDX_INVALID &&
+ F_ISSET(hcp, H_ISDUP) &&
+ HPAGE_TYPE(hcp->page, H_DATAINDEX(hcp->indx))
+ == H_DUPLICATE && hcp->dup_tlen == hcp->dup_off) {
+ if (F_ISSET(hcp, H_DUPONLY)) {
+ F_CLR(hcp, H_OK);
+ F_SET(hcp, H_NOMORE);
+ return (0);
+ } else {
+ F_CLR(hcp, H_ISDUP);
+ hcp->indx += 2;
+ }
+ } else if (!F_ISSET(hcp, H_ISDUP) && F_ISSET(hcp, H_DUPONLY)) {
+ F_CLR(hcp, H_OK);
+ F_SET(hcp, H_NOMORE);
+ return (0);
+ } else if (F_ISSET(hcp, H_ISDUP) &&
+ F_ISSET(hcp, H_NEXT_NODUP)) {
+ F_CLR(hcp, H_ISDUP);
+ hcp->indx += 2;
+ }
+ F_CLR(hcp, H_DELETED);
+ } else if (hcp->indx == NDX_INVALID) {
+ hcp->indx = 0;
+ F_CLR(hcp, H_ISDUP);
+ } else if (F_ISSET(hcp, H_NEXT_NODUP)) {
+ hcp->indx += 2;
+ F_CLR(hcp, H_ISDUP);
+ } else if (F_ISSET(hcp, H_ISDUP) && hcp->dup_tlen != 0) {
+ if (hcp->dup_off + DUP_SIZE(hcp->dup_len) >=
+ hcp->dup_tlen && F_ISSET(hcp, H_DUPONLY)) {
+ F_CLR(hcp, H_OK);
+ F_SET(hcp, H_NOMORE);
+ return (0);
+ }
+ hcp->dup_off += DUP_SIZE(hcp->dup_len);
+ if (hcp->dup_off >= hcp->dup_tlen) {
+ F_CLR(hcp, H_ISDUP);
+ hcp->indx += 2;
+ }
+ } else if (F_ISSET(hcp, H_DUPONLY)) {
+ F_CLR(hcp, H_OK);
+ F_SET(hcp, H_NOMORE);
+ return (0);
+ } else {
+ hcp->indx += 2;
+ F_CLR(hcp, H_ISDUP);
+ }
+
+ return (__ham_item(dbc, mode, pgnop));
+}
+
+/*
+ * PUBLIC: void __ham_putitem __P((PAGE *p, const DBT *, int));
+ *
+ * This is a little bit sleazy in that we're overloading the meaning
+ * of the H_OFFPAGE type here. When we recover deletes, we have the
+ * entire entry instead of having only the DBT, so we'll pass type
+ * H_OFFPAGE to mean, "copy the whole entry" as opposed to constructing
+ * an H_KEYDATA around it.
+ */
+void
+__ham_putitem(p, dbt, type)
+ PAGE *p;
+ const DBT *dbt;
+ int type;
+{
+ u_int16_t n, off;
+
+ n = NUM_ENT(p);
+
+ /* Put the item element on the page. */
+ if (type == H_OFFPAGE) {
+ off = HOFFSET(p) - dbt->size;
+ HOFFSET(p) = p->inp[n] = off;
+ memcpy(P_ENTRY(p, n), dbt->data, dbt->size);
+ } else {
+ off = HOFFSET(p) - HKEYDATA_SIZE(dbt->size);
+ HOFFSET(p) = p->inp[n] = off;
+ PUT_HKEYDATA(P_ENTRY(p, n), dbt->data, dbt->size, type);
+ }
+
+ /* Adjust page info. */
+ NUM_ENT(p) += 1;
+}
+
+/*
+ * PUBLIC: void __ham_reputpair
+ * PUBLIC: __P((PAGE *p, u_int32_t, u_int32_t, const DBT *, const DBT *));
+ *
+ * This is a special case to restore a key/data pair to its original
+ * location during recovery. We are guaranteed that the pair fits
+ * on the page and is not the last pair on the page (because if it's
+ * the last pair, the normal insert works).
+ */
+void
+__ham_reputpair(p, psize, ndx, key, data)
+ PAGE *p;
+ u_int32_t psize, ndx;
+ const DBT *key, *data;
+{
+ db_indx_t i, movebytes, newbytes;
+ u_int8_t *from;
+
+ /* First shuffle the existing items up on the page. */
+ movebytes =
+ (ndx == 0 ? psize : p->inp[H_DATAINDEX(ndx - 2)]) - HOFFSET(p);
+ newbytes = key->size + data->size;
+ from = (u_int8_t *)p + HOFFSET(p);
+ memmove(from - newbytes, from, movebytes);
+
+ /*
+ * Adjust the indices and move them up 2 spaces. Note that we
+ * have to check the exit condition inside the loop just in case
+ * we are dealing with index 0 (db_indx_t's are unsigned).
+ */
+ for (i = NUM_ENT(p) - 1; ; i-- ) {
+ p->inp[i + 2] = p->inp[i] - newbytes;
+ if (i == H_KEYINDEX(ndx))
+ break;
+ }
+
+ /* Put the key and data on the page. */
+ p->inp[H_KEYINDEX(ndx)] =
+ (ndx == 0 ? psize : p->inp[H_DATAINDEX(ndx - 2)]) - key->size;
+ p->inp[H_DATAINDEX(ndx)] = p->inp[H_KEYINDEX(ndx)] - data->size;
+ memcpy(P_ENTRY(p, H_KEYINDEX(ndx)), key->data, key->size);
+ memcpy(P_ENTRY(p, H_DATAINDEX(ndx)), data->data, data->size);
+
+ /* Adjust page info. */
+ HOFFSET(p) -= newbytes;
+ NUM_ENT(p) += 2;
+}
+
+/*
+ * PUBLIC: int __ham_del_pair __P((DBC *, int));
+ */
+int
+__ham_del_pair(dbc, reclaim_page)
+ DBC *dbc;
+ int reclaim_page;
+{
+ DB *dbp;
+ HASH_CURSOR *hcp;
+ DBT data_dbt, key_dbt;
+ DB_ENV *dbenv;
+ DB_LSN new_lsn, *n_lsn, tmp_lsn;
+ PAGE *n_pagep, *nn_pagep, *p, *p_pagep;
+ db_indx_t ndx;
+ db_pgno_t chg_pgno, pgno, tmp_pgno;
+ int ret, t_ret;
+
+ dbp = dbc->dbp;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ dbenv = dbp->dbenv;
+ ndx = hcp->indx;
+
+ n_pagep = p_pagep = nn_pagep = NULL;
+
+ if (hcp->page == NULL && (ret = memp_fget(dbp->mpf,
+ &hcp->pgno, DB_MPOOL_CREATE, &hcp->page)) != 0)
+ return (ret);
+ p = hcp->page;
+
+ /*
+ * We optimize for the normal case which is when neither the key nor
+ * the data are large. In this case, we write a single log record
+ * and do the delete. If either is large, we'll call __big_delete
+ * to remove the big item and then update the page to remove the
+ * entry referring to the big item.
+ */
+ ret = 0;
+ if (HPAGE_PTYPE(H_PAIRKEY(p, ndx)) == H_OFFPAGE) {
+ memcpy(&pgno, HOFFPAGE_PGNO(P_ENTRY(p, H_KEYINDEX(ndx))),
+ sizeof(db_pgno_t));
+ ret = __db_doff(dbc, pgno);
+ }
+
+ if (ret == 0)
+ switch (HPAGE_PTYPE(H_PAIRDATA(p, ndx))) {
+ case H_OFFPAGE:
+ memcpy(&pgno,
+ HOFFPAGE_PGNO(P_ENTRY(p, H_DATAINDEX(ndx))),
+ sizeof(db_pgno_t));
+ ret = __db_doff(dbc, pgno);
+ break;
+ case H_OFFDUP:
+ case H_DUPLICATE:
+ /*
+ * If we delete a pair that is/was a duplicate, then
+ * we had better clear the flag so that we update the
+ * cursor appropriately.
+ */
+ F_CLR(hcp, H_ISDUP);
+ break;
+ }
+
+ if (ret)
+ return (ret);
+
+ /* Now log the delete off this page. */
+ if (DB_LOGGING(dbc)) {
+ key_dbt.data = P_ENTRY(p, H_KEYINDEX(ndx));
+ key_dbt.size = LEN_HITEM(p, dbp->pgsize, H_KEYINDEX(ndx));
+ data_dbt.data = P_ENTRY(p, H_DATAINDEX(ndx));
+ data_dbt.size = LEN_HITEM(p, dbp->pgsize, H_DATAINDEX(ndx));
+
+ if ((ret = __ham_insdel_log(dbenv,
+ dbc->txn, &new_lsn, 0, DELPAIR,
+ dbp->log_fileid, PGNO(p), (u_int32_t)ndx,
+ &LSN(p), &key_dbt, &data_dbt)) != 0)
+ return (ret);
+
+ /* Move lsn onto page. */
+ LSN(p) = new_lsn;
+ }
+
+ /* Do the delete. */
+ __ham_dpair(dbp, p, ndx);
+
+ /*
+ * Mark item deleted so that we don't try to return it, and
+ * so that we update the cursor correctly on the next call
+ * to next.
+ */
+ F_SET(hcp, H_DELETED);
+ F_CLR(hcp, H_OK);
+
+ /*
+ * Update cursors that are on the page where the delete happend.
+ */
+ if ((ret = __ham_c_update(dbc, 0, 0, 0)) != 0)
+ return (ret);
+
+ /*
+ * If we are locking, we will not maintain this, because it is
+ * a hot spot.
+ *
+ * XXX
+ * Perhaps we can retain incremental numbers and apply them later.
+ */
+ if (!STD_LOCKING(dbc))
+ --hcp->hdr->nelem;
+
+ /*
+ * If we need to reclaim the page, then check if the page is empty.
+ * There are two cases. If it's empty and it's not the first page
+ * in the bucket (i.e., the bucket page) then we can simply remove
+ * it. If it is the first chain in the bucket, then we need to copy
+ * the second page into it and remove the second page.
+ * If its the only page in the bucket we leave it alone.
+ */
+ if (!reclaim_page ||
+ NUM_ENT(p) != 0 ||
+ (PREV_PGNO(p) == PGNO_INVALID && NEXT_PGNO(p) == PGNO_INVALID))
+ return (memp_fset(dbp->mpf, p, DB_MPOOL_DIRTY));
+
+ if (PREV_PGNO(p) == PGNO_INVALID) {
+ /*
+ * First page in chain is empty and we know that there
+ * are more pages in the chain.
+ */
+ if ((ret =
+ memp_fget(dbp->mpf, &NEXT_PGNO(p), 0, &n_pagep)) != 0)
+ return (ret);
+
+ if (NEXT_PGNO(n_pagep) != PGNO_INVALID &&
+ (ret = memp_fget(dbp->mpf, &NEXT_PGNO(n_pagep), 0,
+ &nn_pagep)) != 0)
+ goto err;
+
+ if (DB_LOGGING(dbc)) {
+ key_dbt.data = n_pagep;
+ key_dbt.size = dbp->pgsize;
+ if ((ret = __ham_copypage_log(dbenv,
+ dbc->txn, &new_lsn, 0, dbp->log_fileid, PGNO(p),
+ &LSN(p), PGNO(n_pagep), &LSN(n_pagep),
+ NEXT_PGNO(n_pagep),
+ nn_pagep == NULL ? NULL : &LSN(nn_pagep),
+ &key_dbt)) != 0)
+ goto err;
+
+ /* Move lsn onto page. */
+ LSN(p) = new_lsn; /* Structure assignment. */
+ LSN(n_pagep) = new_lsn;
+ if (NEXT_PGNO(n_pagep) != PGNO_INVALID)
+ LSN(nn_pagep) = new_lsn;
+ }
+ if (nn_pagep != NULL) {
+ PREV_PGNO(nn_pagep) = PGNO(p);
+ if ((ret = memp_fput(dbp->mpf,
+ nn_pagep, DB_MPOOL_DIRTY)) != 0) {
+ nn_pagep = NULL;
+ goto err;
+ }
+ }
+
+ tmp_pgno = PGNO(p);
+ tmp_lsn = LSN(p);
+ memcpy(p, n_pagep, dbp->pgsize);
+ PGNO(p) = tmp_pgno;
+ LSN(p) = tmp_lsn;
+ PREV_PGNO(p) = PGNO_INVALID;
+
+ /*
+ * Update cursors to reflect the fact that records
+ * on the second page have moved to the first page.
+ */
+ if ((ret = __ham_c_chgpg(dbc,
+ PGNO(n_pagep), NDX_INVALID, PGNO(p), NDX_INVALID)) != 0)
+ return (ret);
+
+ /*
+ * Update the cursor to reflect its new position.
+ */
+ hcp->indx = 0;
+ hcp->pgno = PGNO(p);
+ if ((ret = memp_fset(dbp->mpf, p, DB_MPOOL_DIRTY)) != 0 ||
+ (ret = __db_free(dbc, n_pagep)) != 0)
+ return (ret);
+ } else {
+ if ((ret =
+ memp_fget(dbp->mpf, &PREV_PGNO(p), 0, &p_pagep)) != 0)
+ goto err;
+
+ if (NEXT_PGNO(p) != PGNO_INVALID) {
+ if ((ret = memp_fget(dbp->mpf,
+ &NEXT_PGNO(p), 0, &n_pagep)) != 0)
+ goto err;
+ n_lsn = &LSN(n_pagep);
+ } else {
+ n_pagep = NULL;
+ n_lsn = NULL;
+ }
+
+ NEXT_PGNO(p_pagep) = NEXT_PGNO(p);
+ if (n_pagep != NULL)
+ PREV_PGNO(n_pagep) = PGNO(p_pagep);
+
+ if (DB_LOGGING(dbc)) {
+ if ((ret = __ham_newpage_log(dbenv,
+ dbc->txn, &new_lsn, 0, DELOVFL,
+ dbp->log_fileid, PREV_PGNO(p), &LSN(p_pagep),
+ PGNO(p), &LSN(p), NEXT_PGNO(p), n_lsn)) != 0)
+ goto err;
+
+ /* Move lsn onto page. */
+ LSN(p_pagep) = new_lsn; /* Structure assignment. */
+ if (n_pagep)
+ LSN(n_pagep) = new_lsn;
+ LSN(p) = new_lsn;
+ }
+ if (NEXT_PGNO(p) == PGNO_INVALID) {
+ /*
+ * There is no next page; put the cursor on the
+ * previous page as if we'd deleted the last item
+ * on that page; index greater than number of
+ * valid entries and H_DELETED set.
+ */
+ hcp->pgno = PGNO(p_pagep);
+ hcp->indx = NUM_ENT(p_pagep);
+ F_SET(hcp, H_DELETED);
+ } else {
+ hcp->pgno = NEXT_PGNO(p);
+ hcp->indx = 0;
+ }
+
+ /*
+ * Since we are about to delete the cursor page and we have
+ * just moved the cursor, we need to make sure that the
+ * old page pointer isn't left hanging around in the cursor.
+ */
+ hcp->page = NULL;
+ chg_pgno = PGNO(p);
+ ret = __db_free(dbc, p);
+ if ((t_ret = memp_fput(dbp->mpf, p_pagep, DB_MPOOL_DIRTY)) != 0
+ && ret == 0)
+ ret = t_ret;
+ if (n_pagep != NULL && (t_ret = memp_fput(dbp->mpf,
+ n_pagep, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+ if (ret != 0)
+ return (ret);
+ ret = __ham_c_chgpg(dbc,
+ chg_pgno, 0, hcp->pgno, hcp->indx);
+ }
+ return (ret);
+
+err: /* Clean up any pages. */
+ if (n_pagep != NULL)
+ (void)memp_fput(dbp->mpf, n_pagep, 0);
+ if (nn_pagep != NULL)
+ (void)memp_fput(dbp->mpf, nn_pagep, 0);
+ if (p_pagep != NULL)
+ (void)memp_fput(dbp->mpf, p_pagep, 0);
+ return (ret);
+}
+
+/*
+ * __ham_replpair --
+ * Given the key data indicated by the cursor, replace part/all of it
+ * according to the fields in the dbt.
+ *
+ * PUBLIC: int __ham_replpair __P((DBC *, DBT *, u_int32_t));
+ */
+int
+__ham_replpair(dbc, dbt, make_dup)
+ DBC *dbc;
+ DBT *dbt;
+ u_int32_t make_dup;
+{
+ DB *dbp;
+ HASH_CURSOR *hcp;
+ DBT old_dbt, tdata, tmp;
+ DB_LSN new_lsn;
+ int32_t change; /* XXX: Possible overflow. */
+ u_int32_t dup, len, memsize;
+ int is_big, ret, type;
+ u_int8_t *beg, *dest, *end, *hk, *src;
+ void *memp;
+
+ /*
+ * Big item replacements are handled in generic code.
+ * Items that fit on the current page fall into 4 classes.
+ * 1. On-page element, same size
+ * 2. On-page element, new is bigger (fits)
+ * 3. On-page element, new is bigger (does not fit)
+ * 4. On-page element, old is bigger
+ * Numbers 1, 2, and 4 are essentially the same (and should
+ * be the common case). We handle case 3 as a delete and
+ * add.
+ */
+ dbp = dbc->dbp;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ /*
+ * We need to compute the number of bytes that we are adding or
+ * removing from the entry. Normally, we can simply substract
+ * the number of bytes we are replacing (dbt->dlen) from the
+ * number of bytes we are inserting (dbt->size). However, if
+ * we are doing a partial put off the end of a record, then this
+ * formula doesn't work, because we are essentially adding
+ * new bytes.
+ */
+ change = dbt->size - dbt->dlen;
+
+ hk = H_PAIRDATA(hcp->page, hcp->indx);
+ is_big = HPAGE_PTYPE(hk) == H_OFFPAGE;
+
+ if (is_big)
+ memcpy(&len, HOFFPAGE_TLEN(hk), sizeof(u_int32_t));
+ else
+ len = LEN_HKEYDATA(hcp->page,
+ dbp->pgsize, H_DATAINDEX(hcp->indx));
+
+ if (dbt->doff + dbt->dlen > len)
+ change += dbt->doff + dbt->dlen - len;
+
+ if (change > (int32_t)P_FREESPACE(hcp->page) || is_big) {
+ /*
+ * Case 3 -- two subcases.
+ * A. This is not really a partial operation, but an overwrite.
+ * Simple del and add works.
+ * B. This is a partial and we need to construct the data that
+ * we are really inserting (yuck).
+ * In both cases, we need to grab the key off the page (in
+ * some cases we could do this outside of this routine; for
+ * cleanliness we do it here. If you happen to be on a big
+ * key, this could be a performance hit).
+ */
+ memset(&tmp, 0, sizeof(tmp));
+ if ((ret =
+ __db_ret(dbp, hcp->page, H_KEYINDEX(hcp->indx),
+ &tmp, &dbc->rkey.data, &dbc->rkey.ulen)) != 0)
+ return (ret);
+
+ /* Preserve duplicate info. */
+ dup = F_ISSET(hcp, H_ISDUP);
+ if (dbt->doff == 0 && dbt->dlen == len) {
+ ret = __ham_del_pair(dbc, 0);
+ if (ret == 0)
+ ret = __ham_add_el(dbc,
+ &tmp, dbt, dup ? H_DUPLICATE : H_KEYDATA);
+ } else { /* Case B */
+ type = HPAGE_PTYPE(hk) != H_OFFPAGE ?
+ HPAGE_PTYPE(hk) : H_KEYDATA;
+ memset(&tdata, 0, sizeof(tdata));
+ memp = NULL;
+ memsize = 0;
+ if ((ret = __db_ret(dbp, hcp->page,
+ H_DATAINDEX(hcp->indx), &tdata, &memp, &memsize))
+ != 0)
+ goto err;
+
+ /* Now we can delete the item. */
+ if ((ret = __ham_del_pair(dbc, 0)) != 0) {
+ __os_free(memp, memsize);
+ goto err;
+ }
+
+ /* Now shift old data around to make room for new. */
+ if (change > 0) {
+ if ((ret = __os_realloc(dbp->dbenv,
+ tdata.size + change,
+ NULL, &tdata.data)) != 0)
+ return (ret);
+ memp = tdata.data;
+ memsize = tdata.size + change;
+ memset((u_int8_t *)tdata.data + tdata.size,
+ 0, change);
+ }
+ end = (u_int8_t *)tdata.data + tdata.size;
+
+ src = (u_int8_t *)tdata.data + dbt->doff + dbt->dlen;
+ if (src < end && tdata.size > dbt->doff + dbt->dlen) {
+ len = tdata.size - dbt->doff - dbt->dlen;
+ dest = src + change;
+ memmove(dest, src, len);
+ }
+ memcpy((u_int8_t *)tdata.data + dbt->doff,
+ dbt->data, dbt->size);
+ tdata.size += change;
+
+ /* Now add the pair. */
+ ret = __ham_add_el(dbc, &tmp, &tdata, type);
+ __os_free(memp, memsize);
+ }
+ F_SET(hcp, dup);
+err: return (ret);
+ }
+
+ /*
+ * Set up pointer into existing data. Do it before the log
+ * message so we can use it inside of the log setup.
+ */
+ beg = HKEYDATA_DATA(H_PAIRDATA(hcp->page, hcp->indx));
+ beg += dbt->doff;
+
+ /*
+ * If we are going to have to move bytes at all, figure out
+ * all the parameters here. Then log the call before moving
+ * anything around.
+ */
+ if (DB_LOGGING(dbc)) {
+ old_dbt.data = beg;
+ old_dbt.size = dbt->dlen;
+ if ((ret = __ham_replace_log(dbp->dbenv,
+ dbc->txn, &new_lsn, 0, dbp->log_fileid, PGNO(hcp->page),
+ (u_int32_t)H_DATAINDEX(hcp->indx), &LSN(hcp->page),
+ (u_int32_t)dbt->doff, &old_dbt, dbt, make_dup)) != 0)
+ return (ret);
+
+ LSN(hcp->page) = new_lsn; /* Structure assignment. */
+ }
+
+ __ham_onpage_replace(hcp->page, dbp->pgsize,
+ (u_int32_t)H_DATAINDEX(hcp->indx), (int32_t)dbt->doff, change, dbt);
+
+ return (0);
+}
+
+/*
+ * Replace data on a page with new data, possibly growing or shrinking what's
+ * there. This is called on two different occasions. On one (from replpair)
+ * we are interested in changing only the data. On the other (from recovery)
+ * we are replacing the entire data (header and all) with a new element. In
+ * the latter case, the off argument is negative.
+ * pagep: the page that we're changing
+ * ndx: page index of the element that is growing/shrinking.
+ * off: Offset at which we are beginning the replacement.
+ * change: the number of bytes (+ or -) that the element is growing/shrinking.
+ * dbt: the new data that gets written at beg.
+ * PUBLIC: void __ham_onpage_replace __P((PAGE *, size_t, u_int32_t, int32_t,
+ * PUBLIC: int32_t, DBT *));
+ */
+void
+__ham_onpage_replace(pagep, pgsize, ndx, off, change, dbt)
+ PAGE *pagep;
+ size_t pgsize;
+ u_int32_t ndx;
+ int32_t off;
+ int32_t change;
+ DBT *dbt;
+{
+ db_indx_t i;
+ int32_t len;
+ u_int8_t *src, *dest;
+ int zero_me;
+
+ if (change != 0) {
+ zero_me = 0;
+ src = (u_int8_t *)(pagep) + HOFFSET(pagep);
+ if (off < 0)
+ len = pagep->inp[ndx] - HOFFSET(pagep);
+ else if ((u_int32_t)off >= LEN_HKEYDATA(pagep, pgsize, ndx)) {
+ len = HKEYDATA_DATA(P_ENTRY(pagep, ndx)) +
+ LEN_HKEYDATA(pagep, pgsize, ndx) - src;
+ zero_me = 1;
+ } else
+ len = (HKEYDATA_DATA(P_ENTRY(pagep, ndx)) + off) - src;
+ dest = src - change;
+ memmove(dest, src, len);
+ if (zero_me)
+ memset(dest + len, 0, change);
+
+ /* Now update the indices. */
+ for (i = ndx; i < NUM_ENT(pagep); i++)
+ pagep->inp[i] -= change;
+ HOFFSET(pagep) -= change;
+ }
+ if (off >= 0)
+ memcpy(HKEYDATA_DATA(P_ENTRY(pagep, ndx)) + off,
+ dbt->data, dbt->size);
+ else
+ memcpy(P_ENTRY(pagep, ndx), dbt->data, dbt->size);
+}
+
+/*
+ * PUBLIC: int __ham_split_page __P((DBC *, u_int32_t, u_int32_t));
+ */
+int
+__ham_split_page(dbc, obucket, nbucket)
+ DBC *dbc;
+ u_int32_t obucket, nbucket;
+{
+ DB *dbp;
+ DBC **carray;
+ HASH_CURSOR *hcp, *cp;
+ DBT key, page_dbt;
+ DB_ENV *dbenv;
+ DB_LSN new_lsn;
+ PAGE **pp, *old_pagep, *temp_pagep, *new_pagep;
+ db_indx_t n;
+ db_pgno_t bucket_pgno, npgno, next_pgno;
+ u_int32_t big_len, len;
+ int found, i, ret, t_ret;
+ void *big_buf;
+
+ dbp = dbc->dbp;
+ hcp = (HASH_CURSOR *)dbc->internal;
+ dbenv = dbp->dbenv;
+ temp_pagep = old_pagep = new_pagep = NULL;
+
+ if ((ret = __ham_get_clist(dbp, obucket, NDX_INVALID, &carray)) != 0)
+ return (ret);
+
+ bucket_pgno = BUCKET_TO_PAGE(hcp, obucket);
+ if ((ret = memp_fget(dbp->mpf,
+ &bucket_pgno, DB_MPOOL_CREATE, &old_pagep)) != 0)
+ goto err;
+
+ /* Properly initialize the new bucket page. */
+ npgno = BUCKET_TO_PAGE(hcp, nbucket);
+ if ((ret = memp_fget(dbp->mpf,
+ &npgno, DB_MPOOL_CREATE, &new_pagep)) != 0)
+ goto err;
+ P_INIT(new_pagep,
+ dbp->pgsize, npgno, PGNO_INVALID, PGNO_INVALID, 0, P_HASH);
+
+ temp_pagep = hcp->split_buf;
+ memcpy(temp_pagep, old_pagep, dbp->pgsize);
+
+ if (DB_LOGGING(dbc)) {
+ page_dbt.size = dbp->pgsize;
+ page_dbt.data = old_pagep;
+ if ((ret = __ham_splitdata_log(dbenv,
+ dbc->txn, &new_lsn, 0, dbp->log_fileid, SPLITOLD,
+ PGNO(old_pagep), &page_dbt, &LSN(old_pagep))) != 0)
+ goto err;
+ }
+
+ P_INIT(old_pagep, dbp->pgsize, PGNO(old_pagep), PGNO_INVALID,
+ PGNO_INVALID, 0, P_HASH);
+
+ if (DB_LOGGING(dbc))
+ LSN(old_pagep) = new_lsn; /* Structure assignment. */
+
+ big_len = 0;
+ big_buf = NULL;
+ key.flags = 0;
+ while (temp_pagep != NULL) {
+ for (n = 0; n < (db_indx_t)NUM_ENT(temp_pagep); n += 2) {
+ if ((ret =
+ __db_ret(dbp, temp_pagep, H_KEYINDEX(n),
+ &key, &big_buf, &big_len)) != 0)
+ goto err;
+
+ if (__ham_call_hash(dbc, key.data, key.size)
+ == obucket)
+ pp = &old_pagep;
+ else
+ pp = &new_pagep;
+
+ /*
+ * Figure out how many bytes we need on the new
+ * page to store the key/data pair.
+ */
+
+ len = LEN_HITEM(temp_pagep, dbp->pgsize,
+ H_DATAINDEX(n)) +
+ LEN_HITEM(temp_pagep, dbp->pgsize,
+ H_KEYINDEX(n)) +
+ 2 * sizeof(db_indx_t);
+
+ if (P_FREESPACE(*pp) < len) {
+ if (DB_LOGGING(dbc)) {
+ page_dbt.size = dbp->pgsize;
+ page_dbt.data = *pp;
+ if ((ret = __ham_splitdata_log(
+ dbenv, dbc->txn,
+ &new_lsn, 0, dbp->log_fileid,
+ SPLITNEW, PGNO(*pp), &page_dbt,
+ &LSN(*pp))) != 0)
+ goto err;
+ LSN(*pp) = new_lsn;
+ }
+ if ((ret =
+ __ham_add_ovflpage(dbc, *pp, 1, pp)) != 0)
+ goto err;
+ }
+
+ /* Check if we need to update a cursor. */
+ if (carray != NULL) {
+ found = 0;
+ for (i = 0; carray[i] != NULL; i++) {
+ cp =
+ (HASH_CURSOR *)carray[i]->internal;
+ if (cp->pgno == PGNO(temp_pagep)
+ && cp->indx == n) {
+ cp->pgno = PGNO(*pp);
+ cp->indx = NUM_ENT(*pp);
+ found = 1;
+ }
+ }
+ if (found && DB_LOGGING(dbc)
+ && IS_SUBTRANSACTION(dbc->txn)) {
+ if ((ret =
+ __ham_chgpg_log(dbp->dbenv,
+ dbc->txn, &new_lsn, 0,
+ dbp->log_fileid,
+ DB_HAM_SPLIT, PGNO(temp_pagep),
+ PGNO(*pp), n, NUM_ENT(*pp))) != 0)
+ goto err;
+ }
+ }
+ __ham_copy_item(dbp->pgsize,
+ temp_pagep, H_KEYINDEX(n), *pp);
+ __ham_copy_item(dbp->pgsize,
+ temp_pagep, H_DATAINDEX(n), *pp);
+ }
+ next_pgno = NEXT_PGNO(temp_pagep);
+
+ /* Clear temp_page; if it's a link overflow page, free it. */
+ if (PGNO(temp_pagep) != bucket_pgno && (ret =
+ __db_free(dbc, temp_pagep)) != 0) {
+ temp_pagep = NULL;
+ goto err;
+ }
+
+ if (next_pgno == PGNO_INVALID)
+ temp_pagep = NULL;
+ else if ((ret = memp_fget(dbp->mpf,
+ &next_pgno, DB_MPOOL_CREATE, &temp_pagep)) != 0)
+ goto err;
+
+ if (temp_pagep != NULL && DB_LOGGING(dbc)) {
+ page_dbt.size = dbp->pgsize;
+ page_dbt.data = temp_pagep;
+ if ((ret = __ham_splitdata_log(dbenv,
+ dbc->txn, &new_lsn, 0, dbp->log_fileid,
+ SPLITOLD, PGNO(temp_pagep),
+ &page_dbt, &LSN(temp_pagep))) != 0)
+ goto err;
+ LSN(temp_pagep) = new_lsn;
+ }
+ }
+ if (big_buf != NULL)
+ __os_free(big_buf, big_len);
+
+ /*
+ * If the original bucket spanned multiple pages, then we've got
+ * a pointer to a page that used to be on the bucket chain. It
+ * should be deleted.
+ */
+ if (temp_pagep != NULL && PGNO(temp_pagep) != bucket_pgno &&
+ (ret = __db_free(dbc, temp_pagep)) != 0) {
+ temp_pagep = NULL;
+ goto err;
+ }
+
+ /*
+ * Write new buckets out.
+ */
+ if (DB_LOGGING(dbc)) {
+ page_dbt.size = dbp->pgsize;
+ page_dbt.data = old_pagep;
+ if ((ret = __ham_splitdata_log(dbenv, dbc->txn, &new_lsn, 0,
+ dbp->log_fileid, SPLITNEW, PGNO(old_pagep), &page_dbt,
+ &LSN(old_pagep))) != 0)
+ goto err;
+ LSN(old_pagep) = new_lsn;
+
+ page_dbt.data = new_pagep;
+ if ((ret = __ham_splitdata_log(dbenv, dbc->txn, &new_lsn, 0,
+ dbp->log_fileid, SPLITNEW, PGNO(new_pagep), &page_dbt,
+ &LSN(new_pagep))) != 0)
+ goto err;
+ LSN(new_pagep) = new_lsn;
+ }
+ ret = memp_fput(dbp->mpf, old_pagep, DB_MPOOL_DIRTY);
+ if ((t_ret = memp_fput(dbp->mpf, new_pagep, DB_MPOOL_DIRTY)) != 0
+ && ret == 0)
+ ret = t_ret;
+
+ if (0) {
+err: if (old_pagep != NULL)
+ (void)memp_fput(dbp->mpf, old_pagep, DB_MPOOL_DIRTY);
+ if (new_pagep != NULL)
+ (void)memp_fput(dbp->mpf, new_pagep, DB_MPOOL_DIRTY);
+ if (temp_pagep != NULL && PGNO(temp_pagep) != bucket_pgno)
+ (void)memp_fput(dbp->mpf, temp_pagep, DB_MPOOL_DIRTY);
+ }
+ if (carray != NULL) /* We never knew its size. */
+ __os_free(carray, 0);
+ return (ret);
+}
+
+/*
+ * Add the given pair to the page. The page in question may already be
+ * held (i.e. it was already gotten). If it is, then the page is passed
+ * in via the pagep parameter. On return, pagep will contain the page
+ * to which we just added something. This allows us to link overflow
+ * pages and return the new page having correctly put the last page.
+ *
+ * PUBLIC: int __ham_add_el __P((DBC *, const DBT *, const DBT *, int));
+ */
+int
+__ham_add_el(dbc, key, val, type)
+ DBC *dbc;
+ const DBT *key, *val;
+ int type;
+{
+ DB *dbp;
+ HASH_CURSOR *hcp;
+ const DBT *pkey, *pdata;
+ DBT key_dbt, data_dbt;
+ DB_LSN new_lsn;
+ HOFFPAGE doff, koff;
+ db_pgno_t next_pgno, pgno;
+ u_int32_t data_size, key_size, pairsize, rectype;
+ int do_expand, is_keybig, is_databig, ret;
+ int key_type, data_type;
+
+ dbp = dbc->dbp;
+ hcp = (HASH_CURSOR *)dbc->internal;
+ do_expand = 0;
+
+ pgno = hcp->seek_found_page != PGNO_INVALID ? hcp->seek_found_page :
+ hcp->pgno;
+ if (hcp->page == NULL && (ret = memp_fget(dbp->mpf, &pgno,
+ DB_MPOOL_CREATE, &hcp->page)) != 0)
+ return (ret);
+
+ key_size = HKEYDATA_PSIZE(key->size);
+ data_size = HKEYDATA_PSIZE(val->size);
+ is_keybig = ISBIG(hcp, key->size);
+ is_databig = ISBIG(hcp, val->size);
+ if (is_keybig)
+ key_size = HOFFPAGE_PSIZE;
+ if (is_databig)
+ data_size = HOFFPAGE_PSIZE;
+
+ pairsize = key_size + data_size;
+
+ /* Advance to first page in chain with room for item. */
+ while (H_NUMPAIRS(hcp->page) && NEXT_PGNO(hcp->page) != PGNO_INVALID) {
+ /*
+ * This may not be the end of the chain, but the pair may fit
+ * anyway. Check if it's a bigpair that fits or a regular
+ * pair that fits.
+ */
+ if (P_FREESPACE(hcp->page) >= pairsize)
+ break;
+ next_pgno = NEXT_PGNO(hcp->page);
+ if ((ret =
+ __ham_next_cpage(dbc, next_pgno, 0)) != 0)
+ return (ret);
+ }
+
+ /*
+ * Check if we need to allocate a new page.
+ */
+ if (P_FREESPACE(hcp->page) < pairsize) {
+ do_expand = 1;
+ if ((ret = __ham_add_ovflpage(dbc,
+ (PAGE *)hcp->page, 1, (PAGE **)&hcp->page)) != 0)
+ return (ret);
+ hcp->pgno = PGNO(hcp->page);
+ }
+
+ /*
+ * Update cursor.
+ */
+ hcp->indx = NUM_ENT(hcp->page);
+ F_CLR(hcp, H_DELETED);
+ if (is_keybig) {
+ koff.type = H_OFFPAGE;
+ UMRW_SET(koff.unused[0]);
+ UMRW_SET(koff.unused[1]);
+ UMRW_SET(koff.unused[2]);
+ if ((ret = __db_poff(dbc, key, &koff.pgno)) != 0)
+ return (ret);
+ koff.tlen = key->size;
+ key_dbt.data = &koff;
+ key_dbt.size = sizeof(koff);
+ pkey = &key_dbt;
+ key_type = H_OFFPAGE;
+ } else {
+ pkey = key;
+ key_type = H_KEYDATA;
+ }
+
+ if (is_databig) {
+ doff.type = H_OFFPAGE;
+ UMRW_SET(doff.unused[0]);
+ UMRW_SET(doff.unused[1]);
+ UMRW_SET(doff.unused[2]);
+ if ((ret = __db_poff(dbc, val, &doff.pgno)) != 0)
+ return (ret);
+ doff.tlen = val->size;
+ data_dbt.data = &doff;
+ data_dbt.size = sizeof(doff);
+ pdata = &data_dbt;
+ data_type = H_OFFPAGE;
+ } else {
+ pdata = val;
+ data_type = type;
+ }
+
+ if (DB_LOGGING(dbc)) {
+ rectype = PUTPAIR;
+ if (is_databig)
+ rectype |= PAIR_DATAMASK;
+ if (is_keybig)
+ rectype |= PAIR_KEYMASK;
+ if (type == H_DUPLICATE)
+ rectype |= PAIR_DUPMASK;
+
+ if ((ret = __ham_insdel_log(dbp->dbenv, dbc->txn, &new_lsn, 0,
+ rectype, dbp->log_fileid, PGNO(hcp->page),
+ (u_int32_t)NUM_ENT(hcp->page), &LSN(hcp->page), pkey,
+ pdata)) != 0)
+ return (ret);
+
+ /* Move lsn onto page. */
+ LSN(hcp->page) = new_lsn; /* Structure assignment. */
+ }
+
+ __ham_putitem(hcp->page, pkey, key_type);
+ __ham_putitem(hcp->page, pdata, data_type);
+
+ /*
+ * For splits, we are going to update item_info's page number
+ * field, so that we can easily return to the same page the
+ * next time we come in here. For other operations, this shouldn't
+ * matter, since odds are this is the last thing that happens before
+ * we return to the user program.
+ */
+ hcp->pgno = PGNO(hcp->page);
+
+ /*
+ * XXX
+ * Maybe keep incremental numbers here.
+ */
+ if (!STD_LOCKING(dbc))
+ hcp->hdr->nelem++;
+
+ if (do_expand || (hcp->hdr->ffactor != 0 &&
+ (u_int32_t)H_NUMPAIRS(hcp->page) > hcp->hdr->ffactor))
+ F_SET(hcp, H_EXPAND);
+ return (0);
+}
+
+/*
+ * Special __putitem call used in splitting -- copies one entry to
+ * another. Works for all types of hash entries (H_OFFPAGE, H_KEYDATA,
+ * H_DUPLICATE, H_OFFDUP). Since we log splits at a high level, we
+ * do not need to do any logging here.
+ *
+ * PUBLIC: void __ham_copy_item __P((size_t, PAGE *, u_int32_t, PAGE *));
+ */
+void
+__ham_copy_item(pgsize, src_page, src_ndx, dest_page)
+ size_t pgsize;
+ PAGE *src_page;
+ u_int32_t src_ndx;
+ PAGE *dest_page;
+{
+ u_int32_t len;
+ void *src, *dest;
+
+ /*
+ * Copy the key and data entries onto this new page.
+ */
+ src = P_ENTRY(src_page, src_ndx);
+
+ /* Set up space on dest. */
+ len = LEN_HITEM(src_page, pgsize, src_ndx);
+ HOFFSET(dest_page) -= len;
+ dest_page->inp[NUM_ENT(dest_page)] = HOFFSET(dest_page);
+ dest = P_ENTRY(dest_page, NUM_ENT(dest_page));
+ NUM_ENT(dest_page)++;
+
+ memcpy(dest, src, len);
+}
+
+/*
+ *
+ * Returns:
+ * pointer on success
+ * NULL on error
+ *
+ * PUBLIC: int __ham_add_ovflpage __P((DBC *, PAGE *, int, PAGE **));
+ */
+int
+__ham_add_ovflpage(dbc, pagep, release, pp)
+ DBC *dbc;
+ PAGE *pagep;
+ int release;
+ PAGE **pp;
+{
+ DB *dbp;
+ HASH_CURSOR *hcp;
+ DB_LSN new_lsn;
+ PAGE *new_pagep;
+ int ret;
+
+ dbp = dbc->dbp;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ if ((ret = __db_new(dbc, P_HASH, &new_pagep)) != 0)
+ return (ret);
+
+ if (DB_LOGGING(dbc)) {
+ if ((ret = __ham_newpage_log(dbp->dbenv, dbc->txn, &new_lsn, 0,
+ PUTOVFL, dbp->log_fileid, PGNO(pagep), &LSN(pagep),
+ PGNO(new_pagep), &LSN(new_pagep), PGNO_INVALID, NULL)) != 0)
+ return (ret);
+
+ /* Move lsn onto page. */
+ LSN(pagep) = LSN(new_pagep) = new_lsn;
+ }
+ NEXT_PGNO(pagep) = PGNO(new_pagep);
+ PREV_PGNO(new_pagep) = PGNO(pagep);
+
+ if (release)
+ ret = memp_fput(dbp->mpf, pagep, DB_MPOOL_DIRTY);
+
+ *pp = new_pagep;
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __ham_get_cpage __P((DBC *, db_lockmode_t));
+ */
+int
+__ham_get_cpage(dbc, mode)
+ DBC *dbc;
+ db_lockmode_t mode;
+{
+ DB *dbp;
+ DB_LOCK tmp_lock;
+ HASH_CURSOR *hcp;
+ int ret;
+
+ dbp = dbc->dbp;
+ hcp = (HASH_CURSOR *)dbc->internal;
+ ret = 0;
+
+ /*
+ * There are four cases with respect to buckets and locks.
+ * 1. If there is no lock held, then if we are locking, we should
+ * get the lock.
+ * 2. If there is a lock held, it's for the current bucket, and it's
+ * for the right mode, we don't need to do anything.
+ * 3. If there is a lock held for the current bucket but it's not
+ * strong enough, we need to upgrade.
+ * 4. If there is a lock, but it's for a different bucket, then we need
+ * to release the existing lock and get a new lock.
+ */
+ tmp_lock.off = LOCK_INVALID;
+ if (STD_LOCKING(dbc)) {
+ if (hcp->lock.off != LOCK_INVALID &&
+ hcp->lbucket != hcp->bucket) { /* Case 4 */
+ if (dbc->txn == NULL &&
+ (ret = lock_put(dbp->dbenv, &hcp->lock)) != 0)
+ return (ret);
+ hcp->lock.off = LOCK_INVALID;
+ }
+ if ((hcp->lock.off != LOCK_INVALID &&
+ (hcp->lock_mode == DB_LOCK_READ &&
+ mode == DB_LOCK_WRITE))) {
+ /* Case 3. */
+ tmp_lock = hcp->lock;
+ hcp->lock.off = LOCK_INVALID;
+ }
+
+ /* Acquire the lock. */
+ if (hcp->lock.off == LOCK_INVALID)
+ /* Cases 1, 3, and 4. */
+ if ((ret = __ham_lock_bucket(dbc, mode)) != 0)
+ return (ret);
+
+ if (ret == 0) {
+ hcp->lock_mode = mode;
+ hcp->lbucket = hcp->bucket;
+ if (tmp_lock.off != LOCK_INVALID)
+ /* Case 3: release the original lock. */
+ ret = lock_put(dbp->dbenv, &tmp_lock);
+ } else if (tmp_lock.off != LOCK_INVALID)
+ hcp->lock = tmp_lock;
+ }
+
+ if (ret == 0 && hcp->page == NULL) {
+ if (hcp->pgno == PGNO_INVALID)
+ hcp->pgno = BUCKET_TO_PAGE(hcp, hcp->bucket);
+ if ((ret = memp_fget(dbp->mpf,
+ &hcp->pgno, DB_MPOOL_CREATE, &hcp->page)) != 0)
+ return (ret);
+ }
+
+ return (0);
+}
+
+/*
+ * Get a new page at the cursor, putting the last page if necessary.
+ * If the flag is set to H_ISDUP, then we are talking about the
+ * duplicate page, not the main page.
+ *
+ * PUBLIC: int __ham_next_cpage __P((DBC *, db_pgno_t, int));
+ */
+int
+__ham_next_cpage(dbc, pgno, dirty)
+ DBC *dbc;
+ db_pgno_t pgno;
+ int dirty;
+{
+ DB *dbp;
+ HASH_CURSOR *hcp;
+ PAGE *p;
+ int ret;
+
+ dbp = dbc->dbp;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ if (hcp->page != NULL && (ret = memp_fput(dbp->mpf,
+ hcp->page, dirty ? DB_MPOOL_DIRTY : 0)) != 0)
+ return (ret);
+
+ if ((ret = memp_fget(dbp->mpf, &pgno, DB_MPOOL_CREATE, &p)) != 0)
+ return (ret);
+
+ hcp->page = p;
+ hcp->pgno = pgno;
+ hcp->indx = 0;
+
+ return (0);
+}
+
+/*
+ * __ham_lock_bucket --
+ * Get the lock on a particular bucket.
+ *
+ * PUBLIC: int __ham_lock_bucket __P((DBC *, db_lockmode_t));
+ */
+int
+__ham_lock_bucket(dbc, mode)
+ DBC *dbc;
+ db_lockmode_t mode;
+{
+ HASH_CURSOR *hcp;
+ u_int32_t flags;
+ int gotmeta, ret;
+
+ hcp = (HASH_CURSOR *)dbc->internal;
+ gotmeta = hcp->hdr == NULL ? 1 : 0;
+ if (gotmeta)
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ return (ret);
+ dbc->lock.pgno = BUCKET_TO_PAGE(hcp, hcp->bucket);
+ if (gotmeta)
+ if ((ret = __ham_release_meta(dbc)) != 0)
+ return (ret);
+
+ flags = 0;
+ if (DB_NONBLOCK(dbc))
+ LF_SET(DB_LOCK_NOWAIT);
+
+ ret = lock_get(dbc->dbp->dbenv,
+ dbc->locker, flags, &dbc->lock_dbt, mode, &hcp->lock);
+
+ hcp->lock_mode = mode;
+ return (ret);
+}
+
+/*
+ * __ham_dpair --
+ * Delete a pair on a page, paying no attention to what the pair
+ * represents. The caller is responsible for freeing up duplicates
+ * or offpage entries that might be referenced by this pair.
+ *
+ * PUBLIC: void __ham_dpair __P((DB *, PAGE *, u_int32_t));
+ */
+void
+__ham_dpair(dbp, p, indx)
+ DB *dbp;
+ PAGE *p;
+ u_int32_t indx;
+{
+ db_indx_t delta, n;
+ u_int8_t *dest, *src;
+
+ /*
+ * Compute "delta", the amount we have to shift all of the
+ * offsets. To find the delta, we just need to calculate
+ * the size of the pair of elements we are removing.
+ */
+ delta = H_PAIRSIZE(p, dbp->pgsize, indx);
+
+ /*
+ * The hard case: we want to remove something other than
+ * the last item on the page. We need to shift data and
+ * offsets down.
+ */
+ if ((db_indx_t)indx != NUM_ENT(p) - 2) {
+ /*
+ * Move the data: src is the first occupied byte on
+ * the page. (Length is delta.)
+ */
+ src = (u_int8_t *)p + HOFFSET(p);
+
+ /*
+ * Destination is delta bytes beyond src. This might
+ * be an overlapping copy, so we have to use memmove.
+ */
+ dest = src + delta;
+ memmove(dest, src, p->inp[H_DATAINDEX(indx)] - HOFFSET(p));
+ }
+
+ /* Adjust page metadata. */
+ HOFFSET(p) = HOFFSET(p) + delta;
+ NUM_ENT(p) = NUM_ENT(p) - 2;
+
+ /* Adjust the offsets. */
+ for (n = (db_indx_t)indx; n < (db_indx_t)(NUM_ENT(p)); n++)
+ p->inp[n] = p->inp[n + 2] + delta;
+
+}
diff --git a/bdb/hash/hash_rec.c b/bdb/hash/hash_rec.c
new file mode 100644
index 00000000000..ded58c281e9
--- /dev/null
+++ b/bdb/hash/hash_rec.c
@@ -0,0 +1,1078 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1995, 1996
+ * Margo Seltzer. All rights reserved.
+ */
+/*
+ * Copyright (c) 1995, 1996
+ * The President and Fellows of Harvard University. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: hash_rec.c,v 11.34 2001/01/11 18:19:52 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_shash.h"
+#include "btree.h"
+#include "hash.h"
+#include "lock.h"
+#include "log.h"
+#include "mp.h"
+
+static int __ham_alloc_pages __P((DB *, __ham_groupalloc_args *));
+
+/*
+ * __ham_insdel_recover --
+ *
+ * PUBLIC: int __ham_insdel_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_insdel_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_insdel_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ u_int32_t opcode;
+ int cmp_n, cmp_p, flags, getmeta, ret, type;
+
+ COMPQUIET(info, NULL);
+
+ getmeta = 0;
+ REC_PRINT(__ham_insdel_print);
+ REC_INTRO(__ham_insdel_read, 1);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op)) {
+ /*
+ * We are undoing and the page doesn't exist. That
+ * is equivalent to having a pagelsn of 0, so we
+ * would not have to undo anything. In this case,
+ * don't bother creating a page.
+ */
+ goto done;
+ } else if ((ret = memp_fget(mpf, &argp->pgno,
+ DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ goto out;
+ getmeta = 1;
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->pagelsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->pagelsn);
+ /*
+ * Two possible things going on:
+ * redo a delete/undo a put: delete the item from the page.
+ * redo a put/undo a delete: add the item to the page.
+ * If we are undoing a delete, then the information logged is the
+ * entire entry off the page, not just the data of a dbt. In
+ * this case, we want to copy it back onto the page verbatim.
+ * We do this by calling __putitem with the type H_OFFPAGE instead
+ * of H_KEYDATA.
+ */
+ opcode = OPCODE_OF(argp->opcode);
+
+ flags = 0;
+ if ((opcode == DELPAIR && cmp_n == 0 && DB_UNDO(op)) ||
+ (opcode == PUTPAIR && cmp_p == 0 && DB_REDO(op))) {
+ /*
+ * Need to redo a PUT or undo a delete. If we are undoing a
+ * delete, we've got to restore the item back to its original
+ * position. That's a royal pain in the butt (because we do
+ * not store item lengths on the page), but there's no choice.
+ */
+ if (opcode != DELPAIR ||
+ argp->ndx == (u_int32_t)NUM_ENT(pagep)) {
+ __ham_putitem(pagep, &argp->key,
+ DB_UNDO(op) || PAIR_ISKEYBIG(argp->opcode) ?
+ H_OFFPAGE : H_KEYDATA);
+
+ if (PAIR_ISDATADUP(argp->opcode))
+ type = H_DUPLICATE;
+ else if (DB_UNDO(op) || PAIR_ISDATABIG(argp->opcode))
+ type = H_OFFPAGE;
+ else
+ type = H_KEYDATA;
+ __ham_putitem(pagep, &argp->data, type);
+ } else
+ (void)__ham_reputpair(pagep, file_dbp->pgsize,
+ argp->ndx, &argp->key, &argp->data);
+
+ LSN(pagep) = DB_REDO(op) ? *lsnp : argp->pagelsn;
+ flags = DB_MPOOL_DIRTY;
+
+ } else if ((opcode == DELPAIR && cmp_p == 0 && DB_REDO(op))
+ || (opcode == PUTPAIR && cmp_n == 0 && DB_UNDO(op))) {
+ /* Need to undo a put or redo a delete. */
+ __ham_dpair(file_dbp, pagep, argp->ndx);
+ LSN(pagep) = DB_REDO(op) ? *lsnp : argp->pagelsn;
+ flags = DB_MPOOL_DIRTY;
+ }
+
+ if ((ret = memp_fput(file_dbp->mpf, pagep, flags)) != 0)
+ goto out;
+
+ /* Return the previous LSN. */
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (getmeta)
+ (void)__ham_release_meta(dbc);
+ REC_CLOSE;
+}
+
+/*
+ * __ham_newpage_recover --
+ * This log message is used when we add/remove overflow pages. This
+ * message takes care of the pointer chains, not the data on the pages.
+ *
+ * PUBLIC: int __ham_newpage_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_newpage_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_newpage_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, flags, getmeta, ret;
+
+ COMPQUIET(info, NULL);
+
+ getmeta = 0;
+ REC_PRINT(__ham_newpage_print);
+ REC_INTRO(__ham_newpage_read, 1);
+
+ if ((ret = memp_fget(mpf, &argp->new_pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op)) {
+ /*
+ * We are undoing and the page doesn't exist. That
+ * is equivalent to having a pagelsn of 0, so we
+ * would not have to undo anything. In this case,
+ * don't bother creating a page.
+ */
+ ret = 0;
+ goto ppage;
+ } else if ((ret = memp_fget(mpf, &argp->new_pgno,
+ DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ goto out;
+ getmeta = 1;
+
+ /*
+ * There are potentially three pages we need to check: the one
+ * that we created/deleted, the one before it and the one after
+ * it.
+ */
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->pagelsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->pagelsn);
+
+ flags = 0;
+ if ((cmp_p == 0 && DB_REDO(op) && argp->opcode == PUTOVFL) ||
+ (cmp_n == 0 && DB_UNDO(op) && argp->opcode == DELOVFL)) {
+ /* Redo a create new page or undo a delete new page. */
+ P_INIT(pagep, file_dbp->pgsize, argp->new_pgno,
+ argp->prev_pgno, argp->next_pgno, 0, P_HASH);
+ flags = DB_MPOOL_DIRTY;
+ } else if ((cmp_p == 0 && DB_REDO(op) && argp->opcode == DELOVFL) ||
+ (cmp_n == 0 && DB_UNDO(op) && argp->opcode == PUTOVFL)) {
+ /*
+ * Redo a delete or undo a create new page. All we
+ * really need to do is change the LSN.
+ */
+ flags = DB_MPOOL_DIRTY;
+ }
+
+ if (flags)
+ LSN(pagep) = DB_REDO(op) ? *lsnp : argp->pagelsn;
+
+ if ((ret = memp_fput(file_dbp->mpf, pagep, flags)) != 0)
+ goto out;
+
+ /* Now do the prev page. */
+ppage: if (argp->prev_pgno != PGNO_INVALID) {
+ if ((ret = memp_fget(mpf, &argp->prev_pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op)) {
+ /*
+ * We are undoing and the page doesn't exist.
+ * That is equivalent to having a pagelsn of 0,
+ * so we would not have to undo anything. In
+ * this case, don't bother creating a page.
+ */
+ ret = 0;
+ goto npage;
+ } else if ((ret =
+ memp_fget(mpf, &argp->prev_pgno,
+ DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->prevlsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->prevlsn);
+ flags = 0;
+
+ if ((cmp_p == 0 && DB_REDO(op) && argp->opcode == PUTOVFL) ||
+ (cmp_n == 0 && DB_UNDO(op) && argp->opcode == DELOVFL)) {
+ /* Redo a create new page or undo a delete new page. */
+ pagep->next_pgno = argp->new_pgno;
+ flags = DB_MPOOL_DIRTY;
+ } else if ((cmp_p == 0 && DB_REDO(op) && argp->opcode == DELOVFL) ||
+ (cmp_n == 0 && DB_UNDO(op) && argp->opcode == PUTOVFL)) {
+ /* Redo a delete or undo a create new page. */
+ pagep->next_pgno = argp->next_pgno;
+ flags = DB_MPOOL_DIRTY;
+ }
+
+ if (flags)
+ LSN(pagep) = DB_REDO(op) ? *lsnp : argp->prevlsn;
+
+ if ((ret = memp_fput(file_dbp->mpf, pagep, flags)) != 0)
+ goto out;
+ }
+
+ /* Now time to do the next page */
+npage: if (argp->next_pgno != PGNO_INVALID) {
+ if ((ret = memp_fget(mpf, &argp->next_pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op)) {
+ /*
+ * We are undoing and the page doesn't exist.
+ * That is equivalent to having a pagelsn of 0,
+ * so we would not have to undo anything. In
+ * this case, don't bother creating a page.
+ */
+ goto done;
+ } else if ((ret =
+ memp_fget(mpf, &argp->next_pgno,
+ DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->nextlsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->nextlsn);
+ flags = 0;
+
+ if ((cmp_p == 0 && DB_REDO(op) && argp->opcode == PUTOVFL) ||
+ (cmp_n == 0 && DB_UNDO(op) && argp->opcode == DELOVFL)) {
+ /* Redo a create new page or undo a delete new page. */
+ pagep->prev_pgno = argp->new_pgno;
+ flags = DB_MPOOL_DIRTY;
+ } else if ((cmp_p == 0 && DB_REDO(op) && argp->opcode == DELOVFL) ||
+ (cmp_n == 0 && DB_UNDO(op) && argp->opcode == PUTOVFL)) {
+ /* Redo a delete or undo a create new page. */
+ pagep->prev_pgno = argp->prev_pgno;
+ flags = DB_MPOOL_DIRTY;
+ }
+
+ if (flags)
+ LSN(pagep) = DB_REDO(op) ? *lsnp : argp->nextlsn;
+
+ if ((ret = memp_fput(file_dbp->mpf, pagep, flags)) != 0)
+ goto out;
+ }
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (getmeta)
+ (void)__ham_release_meta(dbc);
+ REC_CLOSE;
+}
+
+/*
+ * __ham_replace_recover --
+ * This log message refers to partial puts that are local to a single
+ * page. You can think of them as special cases of the more general
+ * insdel log message.
+ *
+ * PUBLIC: int __ham_replace_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_replace_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_replace_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ DBT dbt;
+ PAGE *pagep;
+ int32_t grow;
+ int cmp_n, cmp_p, flags, getmeta, ret;
+ u_int8_t *hk;
+
+ COMPQUIET(info, NULL);
+
+ getmeta = 0;
+ REC_PRINT(__ham_replace_print);
+ REC_INTRO(__ham_replace_read, 1);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op)) {
+ /*
+ * We are undoing and the page doesn't exist. That
+ * is equivalent to having a pagelsn of 0, so we
+ * would not have to undo anything. In this case,
+ * don't bother creating a page.
+ */
+ goto done;
+ } else if ((ret = memp_fget(mpf, &argp->pgno,
+ DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ goto out;
+ getmeta = 1;
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->pagelsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->pagelsn);
+
+ memset(&dbt, 0, sizeof(dbt));
+ flags = 0;
+ grow = 1;
+
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Reapply the change as specified. */
+ dbt.data = argp->newitem.data;
+ dbt.size = argp->newitem.size;
+ grow = argp->newitem.size - argp->olditem.size;
+ LSN(pagep) = *lsnp;
+ flags = DB_MPOOL_DIRTY;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Undo the already applied change. */
+ dbt.data = argp->olditem.data;
+ dbt.size = argp->olditem.size;
+ grow = argp->olditem.size - argp->newitem.size;
+ LSN(pagep) = argp->pagelsn;
+ flags = DB_MPOOL_DIRTY;
+ }
+
+ if (flags) {
+ __ham_onpage_replace(pagep,
+ file_dbp->pgsize, argp->ndx, argp->off, grow, &dbt);
+ if (argp->makedup) {
+ hk = P_ENTRY(pagep, argp->ndx);
+ if (DB_REDO(op))
+ HPAGE_PTYPE(hk) = H_DUPLICATE;
+ else
+ HPAGE_PTYPE(hk) = H_KEYDATA;
+ }
+ }
+
+ if ((ret = memp_fput(file_dbp->mpf, pagep, flags)) != 0)
+ goto out;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (getmeta)
+ (void)__ham_release_meta(dbc);
+ REC_CLOSE;
+}
+
+/*
+ * __ham_splitdata_recover --
+ *
+ * PUBLIC: int __ham_splitdata_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_splitdata_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_splitdata_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, flags, getmeta, ret;
+
+ COMPQUIET(info, NULL);
+
+ getmeta = 0;
+ REC_PRINT(__ham_splitdata_print);
+ REC_INTRO(__ham_splitdata_read, 1);
+
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op)) {
+ /*
+ * We are undoing and the page doesn't exist. That
+ * is equivalent to having a pagelsn of 0, so we
+ * would not have to undo anything. In this case,
+ * don't bother creating a page.
+ */
+ goto done;
+ } else if ((ret = memp_fget(mpf, &argp->pgno,
+ DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ goto out;
+ getmeta = 1;
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->pagelsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->pagelsn);
+
+ /*
+ * There are two types of log messages here, one for the old page
+ * and one for the new pages created. The original image in the
+ * SPLITOLD record is used for undo. The image in the SPLITNEW
+ * is used for redo. We should never have a case where there is
+ * a redo operation and the SPLITOLD record is on disk, but not
+ * the SPLITNEW record. Therefore, we only have work to do when
+ * redo NEW messages and undo OLD messages, but we have to update
+ * LSNs in both cases.
+ */
+ flags = 0;
+ if (cmp_p == 0 && DB_REDO(op)) {
+ if (argp->opcode == SPLITNEW)
+ /* Need to redo the split described. */
+ memcpy(pagep, argp->pageimage.data,
+ argp->pageimage.size);
+ LSN(pagep) = *lsnp;
+ flags = DB_MPOOL_DIRTY;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ if (argp->opcode == SPLITOLD) {
+ /* Put back the old image. */
+ memcpy(pagep, argp->pageimage.data,
+ argp->pageimage.size);
+ } else
+ P_INIT(pagep, file_dbp->pgsize, argp->pgno,
+ PGNO_INVALID, PGNO_INVALID, 0, P_HASH);
+ LSN(pagep) = argp->pagelsn;
+ flags = DB_MPOOL_DIRTY;
+ }
+ if ((ret = memp_fput(file_dbp->mpf, pagep, flags)) != 0)
+ goto out;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (getmeta)
+ (void)__ham_release_meta(dbc);
+ REC_CLOSE;
+}
+
+/*
+ * __ham_copypage_recover --
+ * Recovery function for copypage.
+ *
+ * PUBLIC: int __ham_copypage_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_copypage_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_copypage_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, flags, getmeta, ret;
+
+ COMPQUIET(info, NULL);
+
+ getmeta = 0;
+ REC_PRINT(__ham_copypage_print);
+ REC_INTRO(__ham_copypage_read, 1);
+
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ goto out;
+ getmeta = 1;
+ flags = 0;
+
+ /* This is the bucket page. */
+ if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op)) {
+ /*
+ * We are undoing and the page doesn't exist. That
+ * is equivalent to having a pagelsn of 0, so we
+ * would not have to undo anything. In this case,
+ * don't bother creating a page.
+ */
+ ret = 0;
+ goto donext;
+ } else if ((ret = memp_fget(mpf, &argp->pgno,
+ DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->pagelsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->pagelsn);
+
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ memcpy(pagep, argp->page.data, argp->page.size);
+ PGNO(pagep) = argp->pgno;
+ PREV_PGNO(pagep) = PGNO_INVALID;
+ LSN(pagep) = *lsnp;
+ flags = DB_MPOOL_DIRTY;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Need to undo update described. */
+ P_INIT(pagep, file_dbp->pgsize, argp->pgno, PGNO_INVALID,
+ argp->next_pgno, 0, P_HASH);
+ LSN(pagep) = argp->pagelsn;
+ flags = DB_MPOOL_DIRTY;
+ }
+ if ((ret = memp_fput(mpf, pagep, flags)) != 0)
+ goto out;
+
+donext: /* Now fix up the "next" page. */
+ if ((ret = memp_fget(mpf, &argp->next_pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op)) {
+ /*
+ * We are undoing and the page doesn't exist. That
+ * is equivalent to having a pagelsn of 0, so we
+ * would not have to undo anything. In this case,
+ * don't bother creating a page.
+ */
+ ret = 0;
+ goto do_nn;
+ } else if ((ret = memp_fget(mpf, &argp->next_pgno,
+ DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ /* For REDO just update the LSN. For UNDO copy page back. */
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->nextlsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->nextlsn);
+ flags = 0;
+ if (cmp_p == 0 && DB_REDO(op)) {
+ LSN(pagep) = *lsnp;
+ flags = DB_MPOOL_DIRTY;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Need to undo update described. */
+ memcpy(pagep, argp->page.data, argp->page.size);
+ flags = DB_MPOOL_DIRTY;
+ }
+ if ((ret = memp_fput(mpf, pagep, flags)) != 0)
+ goto out;
+
+ /* Now fix up the next's next page. */
+do_nn: if (argp->nnext_pgno == PGNO_INVALID)
+ goto done;
+
+ if ((ret = memp_fget(mpf, &argp->nnext_pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op)) {
+ /*
+ * We are undoing and the page doesn't exist. That
+ * is equivalent to having a pagelsn of 0, so we
+ * would not have to undo anything. In this case,
+ * don't bother creating a page.
+ */
+ goto done;
+ } else if ((ret = memp_fget(mpf, &argp->nnext_pgno,
+ DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->nnextlsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->nnextlsn);
+
+ flags = 0;
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ PREV_PGNO(pagep) = argp->pgno;
+ LSN(pagep) = *lsnp;
+ flags = DB_MPOOL_DIRTY;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Need to undo update described. */
+ PREV_PGNO(pagep) = argp->next_pgno;
+ LSN(pagep) = argp->nnextlsn;
+ flags = DB_MPOOL_DIRTY;
+ }
+ if ((ret = memp_fput(mpf, pagep, flags)) != 0)
+ goto out;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (getmeta)
+ (void)__ham_release_meta(dbc);
+ REC_CLOSE;
+}
+
+/*
+ * __ham_metagroup_recover --
+ * Recovery function for metagroup.
+ *
+ * PUBLIC: int __ham_metagroup_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_metagroup_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_metagroup_args *argp;
+ HASH_CURSOR *hcp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ db_pgno_t last_pgno;
+ int cmp_n, cmp_p, flags, groupgrow, ret;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__ham_metagroup_print);
+ REC_INTRO(__ham_metagroup_read, 1);
+
+ /*
+ * This logs the virtual create of pages pgno to pgno + bucket
+ * Since the mpool page-allocation is not really able to be
+ * transaction protected, we can never undo it. Even in an abort,
+ * we have to allocate these pages to the hash table.
+ * The log record contains:
+ * bucket: new bucket being allocated.
+ * pgno: page number of the new bucket.
+ * if bucket is a power of 2, then we allocated a whole batch of
+ * pages; if it's not, then we simply allocated one new page.
+ */
+ groupgrow =
+ (u_int32_t)(1 << __db_log2(argp->bucket + 1)) == argp->bucket + 1;
+
+ last_pgno = argp->pgno;
+ if (groupgrow)
+ /* Read the last page. */
+ last_pgno += argp->bucket;
+
+ if ((ret = memp_fget(mpf, &last_pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->pagelsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->pagelsn);
+
+ flags = 0;
+ if ((cmp_p == 0 && DB_REDO(op)) || (cmp_n == 0 && DB_UNDO(op))) {
+ /*
+ * We need to make sure that we redo the allocation of the
+ * pages.
+ */
+ if (DB_REDO(op))
+ pagep->lsn = *lsnp;
+ else
+ pagep->lsn = argp->pagelsn;
+ flags = DB_MPOOL_DIRTY;
+ }
+ if ((ret = memp_fput(mpf, pagep, flags)) != 0)
+ goto out;
+
+ /* Now we have to update the meta-data page. */
+ hcp = (HASH_CURSOR *)dbc->internal;
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ goto out;
+ cmp_n = log_compare(lsnp, &hcp->hdr->dbmeta.lsn);
+ cmp_p = log_compare(&hcp->hdr->dbmeta.lsn, &argp->metalsn);
+ CHECK_LSN(op, cmp_p, &hcp->hdr->dbmeta.lsn, &argp->metalsn);
+ if ((cmp_p == 0 && DB_REDO(op)) || (cmp_n == 0 && DB_UNDO(op))) {
+ if (DB_REDO(op)) {
+ /* Redo the actual updating of bucket counts. */
+ ++hcp->hdr->max_bucket;
+ if (groupgrow) {
+ hcp->hdr->low_mask = hcp->hdr->high_mask;
+ hcp->hdr->high_mask =
+ (argp->bucket + 1) | hcp->hdr->low_mask;
+ }
+ hcp->hdr->dbmeta.lsn = *lsnp;
+ } else {
+ /* Undo the actual updating of bucket counts. */
+ --hcp->hdr->max_bucket;
+ if (groupgrow) {
+ hcp->hdr->high_mask = hcp->hdr->low_mask;
+ hcp->hdr->low_mask = hcp->hdr->high_mask >> 1;
+ }
+ hcp->hdr->dbmeta.lsn = argp->metalsn;
+ }
+ if (groupgrow &&
+ hcp->hdr->spares[__db_log2(argp->bucket + 1) + 1] ==
+ PGNO_INVALID)
+ hcp->hdr->spares[__db_log2(argp->bucket + 1) + 1] =
+ argp->pgno - argp->bucket - 1;
+ F_SET(hcp, H_DIRTY);
+ }
+ if ((ret = __ham_release_meta(dbc)) != 0)
+ goto out;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __ham_groupalloc_recover --
+ * Recover the batch creation of a set of pages for a new database.
+ *
+ * PUBLIC: int __ham_groupalloc_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_groupalloc_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_groupalloc_args *argp;
+ DBMETA *mmeta;
+ DB_MPOOLFILE *mpf;
+ DB *file_dbp;
+ DBC *dbc;
+ db_pgno_t pgno;
+ int cmp_n, cmp_p, flags, ret;
+
+ REC_PRINT(__ham_groupalloc_print);
+ REC_INTRO(__ham_groupalloc_read, 0);
+
+ pgno = PGNO_BASE_MD;
+ if ((ret = memp_fget(mpf, &pgno, 0, &mmeta)) != 0) {
+ if (DB_REDO(op)) {
+ /* Page should have existed. */
+ (void)__db_pgerr(file_dbp, pgno);
+ goto out;
+ } else {
+ ret = 0;
+ goto done;
+ }
+ }
+
+ cmp_n = log_compare(lsnp, &LSN(mmeta));
+ cmp_p = log_compare(&LSN(mmeta), &argp->meta_lsn);
+ CHECK_LSN(op, cmp_p, &LSN(mmeta), &argp->meta_lsn);
+
+ /*
+ * Basically, we used mpool to allocate a chunk of pages.
+ * We need to either add those to a free list (in the undo
+ * case) or initialize them (in the redo case).
+ *
+ * If we are redoing and this is a hash subdatabase, it's possible
+ * that the pages were never allocated, so we'd better check for
+ * that and handle it here.
+ */
+
+ flags = 0;
+ if (DB_REDO(op)) {
+ if ((ret = __ham_alloc_pages(file_dbp, argp)) != 0)
+ goto out1;
+ if (cmp_p == 0) {
+ LSN(mmeta) = *lsnp;
+ flags = DB_MPOOL_DIRTY;
+ }
+ }
+
+ /*
+ * Always put the pages into the limbo list and free them later.
+ */
+ else if (DB_UNDO(op)) {
+ if ((ret = __db_add_limbo(dbenv,
+ info, argp->fileid, argp->start_pgno, argp->num)) != 0)
+ goto out;
+ if (cmp_n == 0) {
+ LSN(mmeta) = argp->meta_lsn;
+ flags = DB_MPOOL_DIRTY;
+ }
+ }
+
+out1: if ((ret = memp_fput(mpf, mmeta, flags)) != 0)
+ goto out;
+
+done: if (ret == 0)
+ *lsnp = argp->prev_lsn;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __ham_alloc_pages --
+ *
+ * Called during redo of a file create. We create new pages in the file
+ * using the MPOOL_NEW_GROUP flag. We then log the meta-data page with a
+ * __crdel_metasub message. If we manage to crash without the newly written
+ * pages getting to disk (I'm not sure this can happen anywhere except our
+ * test suite?!), then we need to go through a recreate the final pages.
+ * Hash normally has holes in its files and handles them appropriately.
+ */
+static int
+__ham_alloc_pages(dbp, argp)
+ DB *dbp;
+ __ham_groupalloc_args *argp;
+{
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ db_pgno_t pgno;
+ int ret;
+
+ mpf = dbp->mpf;
+
+ /* Read the last page of the allocation. */
+ pgno = argp->start_pgno + argp->num - 1;
+
+ /* If the page exists, and it has been initialized, then we're done. */
+ if ((ret = memp_fget(mpf, &pgno, 0, &pagep)) == 0) {
+ if ((pagep->type == P_INVALID) && IS_ZERO_LSN(pagep->lsn))
+ goto reinit_page;
+ if ((ret = memp_fput(mpf, pagep, 0)) != 0)
+ return (ret);
+ return (0);
+ }
+
+ /*
+ * Had to create the page. On some systems (read "Windows"),
+ * you can find random garbage on pages to which you haven't
+ * yet written. So, we have an os layer that will do the
+ * right thing for group allocations. We call that directly
+ * to make sure all the pages are allocated and then continue
+ * merrily on our way with normal recovery.
+ */
+ if ((ret = __os_fpinit(dbp->dbenv, &mpf->fh,
+ argp->start_pgno, argp->num, dbp->pgsize)) != 0)
+ return (ret);
+
+ if ((ret = memp_fget(mpf, &pgno, DB_MPOOL_CREATE, &pagep)) != 0) {
+ (void)__db_pgerr(dbp, pgno);
+ return (ret);
+ }
+
+reinit_page:
+ /* Initialize the newly allocated page. */
+ P_INIT(pagep,
+ dbp->pgsize, pgno, PGNO_INVALID, PGNO_INVALID, 0, P_HASH);
+ ZERO_LSN(pagep->lsn);
+
+ if ((ret = memp_fput(mpf, pagep, DB_MPOOL_DIRTY)) != 0)
+ return (ret);
+
+ return (0);
+}
+
+/*
+ * __ham_curadj_recover --
+ * Undo cursor adjustments if a subtransaction fails.
+ *
+ * PUBLIC: int __ham_curadj_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+
+int
+__ham_curadj_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_curadj_args *argp;
+ DB_MPOOLFILE *mpf;
+ DB *file_dbp;
+ DBC *dbc;
+ int ret;
+ HASH_CURSOR *hcp;
+
+ REC_PRINT(__ham_groupalloc_print);
+
+ ret = 0;
+ if (op != DB_TXN_ABORT)
+ goto done;
+ REC_INTRO(__ham_curadj_read, 0);
+
+ COMPQUIET(info, NULL);
+ /*
+ * Undo the adjustment by reinitializing the the cursor
+ * to look like the one that was used to do the adustment,
+ * then we invert the add so that undo the adjustment.
+ */
+ hcp = (HASH_CURSOR *)dbc->internal;
+ hcp->pgno = argp->pgno;
+ hcp->indx = argp->indx;
+ hcp->dup_off = argp->dup_off;
+ hcp->order = argp->order;
+ if (!argp->add)
+ F_SET(hcp, H_DELETED);
+ (void)__ham_c_update(dbc, argp->len, !argp->add, argp->is_dup);
+
+done: *lsnp = argp->prev_lsn;
+out: REC_CLOSE;
+}
+
+/*
+ * __ham_chgpg_recover --
+ * Undo cursor adjustments if a subtransaction fails.
+ *
+ * PUBLIC: int __ham_chgpg_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+
+int
+__ham_chgpg_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_chgpg_args *argp;
+ BTREE_CURSOR *opdcp;
+ DB_MPOOLFILE *mpf;
+ DB *file_dbp, *ldbp;
+ DBC *dbc;
+ int ret;
+ DBC *cp;
+ HASH_CURSOR *lcp;
+
+ REC_PRINT(__ham_chgpg_print);
+
+ ret = 0;
+ if (op != DB_TXN_ABORT)
+ goto out;
+ REC_INTRO(__ham_chgpg_read, 0);
+
+ COMPQUIET(info, NULL);
+
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ for (ldbp = __dblist_get(dbenv, file_dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == file_dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, file_dbp->mutexp);
+
+ for (cp = TAILQ_FIRST(&ldbp->active_queue); cp != NULL;
+ cp = TAILQ_NEXT(cp, links)) {
+ lcp = (HASH_CURSOR *)cp->internal;
+
+ switch (argp->mode) {
+ case DB_HAM_CHGPG:
+ if (lcp->pgno != argp->new_pgno)
+ break;
+
+ if (argp->old_indx == NDX_INVALID)
+ lcp->pgno = argp->old_pgno;
+ else if (lcp->indx == argp->new_indx) {
+ lcp->indx = argp->old_indx;
+ lcp->pgno = argp->old_pgno;
+ }
+ break;
+
+ case DB_HAM_SPLIT:
+ if (lcp->pgno == argp->new_pgno
+ && lcp->indx == argp->new_indx) {
+ lcp->indx = argp->old_indx;
+ lcp->pgno = argp->old_pgno;
+ }
+ break;
+
+ case DB_HAM_DUP:
+ if (lcp->opd != NULL) {
+ opdcp =
+ (BTREE_CURSOR *)lcp->opd->internal;
+ if (opdcp->pgno == argp->new_pgno &&
+ opdcp->indx == argp->new_indx) {
+ if (F_ISSET(opdcp, C_DELETED))
+ F_SET(lcp, H_DELETED);
+ if ((ret =
+ lcp->opd->c_close(
+ lcp->opd)) != 0)
+ goto out;
+ lcp->opd = NULL;
+ }
+ }
+ break;
+ }
+ }
+
+ MUTEX_THREAD_UNLOCK(dbenv, file_dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+out: REC_CLOSE;
+}
diff --git a/bdb/hash/hash_reclaim.c b/bdb/hash/hash_reclaim.c
new file mode 100644
index 00000000000..8857c5406a4
--- /dev/null
+++ b/bdb/hash/hash_reclaim.c
@@ -0,0 +1,68 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: hash_reclaim.c,v 11.4 2000/11/30 00:58:37 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_shash.h"
+#include "hash.h"
+#include "lock.h"
+
+/*
+ * __ham_reclaim --
+ * Reclaim the pages from a subdatabase and return them to the
+ * parent free list. For now, we link each freed page on the list
+ * separately. If people really store hash databases in subdatabases
+ * and do a lot of creates and deletes, this is going to be a problem,
+ * because hash needs chunks of contiguous storage. We may eventually
+ * need to go to a model where we maintain the free list with chunks of
+ * contiguous pages as well.
+ *
+ * PUBLIC: int __ham_reclaim __P((DB *, DB_TXN *txn));
+ */
+int
+__ham_reclaim(dbp, txn)
+ DB *dbp;
+ DB_TXN *txn;
+{
+ DBC *dbc;
+ HASH_CURSOR *hcp;
+ int ret;
+
+ /* Open up a cursor that we'll use for traversing. */
+ if ((ret = dbp->cursor(dbp, txn, &dbc, 0)) != 0)
+ return (ret);
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ goto err;
+
+ if ((ret = __ham_traverse(dbp,
+ dbc, DB_LOCK_WRITE, __db_reclaim_callback, dbc)) != 0)
+ goto err;
+ if ((ret = dbc->c_close(dbc)) != 0)
+ goto err;
+ if ((ret = __ham_release_meta(dbc)) != 0)
+ goto err;
+ return (0);
+
+err: if (hcp->hdr != NULL)
+ (void)__ham_release_meta(dbc);
+ (void)dbc->c_close(dbc);
+ return (ret);
+}
diff --git a/bdb/hash/hash_stat.c b/bdb/hash/hash_stat.c
new file mode 100644
index 00000000000..ed64bbc68bd
--- /dev/null
+++ b/bdb/hash/hash_stat.c
@@ -0,0 +1,329 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: hash_stat.c,v 11.24 2000/12/21 21:54:35 margo Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_shash.h"
+#include "btree.h"
+#include "hash.h"
+#include "lock.h"
+
+static int __ham_stat_callback __P((DB *, PAGE *, void *, int *));
+
+/*
+ * __ham_stat --
+ * Gather/print the hash statistics
+ *
+ * PUBLIC: int __ham_stat __P((DB *, void *, void *(*)(size_t), u_int32_t));
+ */
+int
+__ham_stat(dbp, spp, db_malloc, flags)
+ DB *dbp;
+ void *spp, *(*db_malloc) __P((size_t));
+ u_int32_t flags;
+{
+ DB_HASH_STAT *sp;
+ HASH_CURSOR *hcp;
+ DBC *dbc;
+ PAGE *h;
+ db_pgno_t pgno;
+ int ret;
+
+ PANIC_CHECK(dbp->dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->stat");
+
+ sp = NULL;
+
+ /* Check for invalid flags. */
+ if ((ret = __db_statchk(dbp, flags)) != 0)
+ return (ret);
+
+ if ((ret = dbp->cursor(dbp, NULL, &dbc, 0)) != 0)
+ return (ret);
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ goto err;
+
+ /* Allocate and clear the structure. */
+ if ((ret = __os_malloc(dbp->dbenv, sizeof(*sp), db_malloc, &sp)) != 0)
+ goto err;
+ memset(sp, 0, sizeof(*sp));
+ if (flags == DB_CACHED_COUNTS) {
+ sp->hash_nkeys = hcp->hdr->dbmeta.key_count;
+ sp->hash_ndata = hcp->hdr->dbmeta.record_count;
+ goto done;
+ }
+
+ /* Copy the fields that we have. */
+ sp->hash_pagesize = dbp->pgsize;
+ sp->hash_buckets = hcp->hdr->max_bucket + 1;
+ sp->hash_magic = hcp->hdr->dbmeta.magic;
+ sp->hash_version = hcp->hdr->dbmeta.version;
+ sp->hash_metaflags = hcp->hdr->dbmeta.flags;
+ sp->hash_nelem = hcp->hdr->nelem;
+ sp->hash_ffactor = hcp->hdr->ffactor;
+
+ /* Walk the free list, counting pages. */
+ for (sp->hash_free = 0, pgno = hcp->hdr->dbmeta.free;
+ pgno != PGNO_INVALID;) {
+ ++sp->hash_free;
+
+ if ((ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0)
+ goto err;
+
+ pgno = h->next_pgno;
+ (void)memp_fput(dbp->mpf, h, 0);
+ }
+
+ /* Now traverse the rest of the table. */
+ if ((ret = __ham_traverse(dbp,
+ dbc, DB_LOCK_READ, __ham_stat_callback, sp)) != 0)
+ goto err;
+
+ if (!F_ISSET(dbp, DB_AM_RDONLY)) {
+ if ((ret = __ham_dirty_meta(dbc)) != 0)
+ goto err;
+ hcp->hdr->dbmeta.key_count = sp->hash_nkeys;
+ hcp->hdr->dbmeta.record_count = sp->hash_ndata;
+ }
+
+done:
+ if ((ret = __ham_release_meta(dbc)) != 0)
+ goto err;
+ if ((ret = dbc->c_close(dbc)) != 0)
+ goto err;
+
+ *(DB_HASH_STAT **)spp = sp;
+ return (0);
+
+err: if (sp != NULL)
+ __os_free(sp, sizeof(*sp));
+ if (hcp->hdr != NULL)
+ (void)__ham_release_meta(dbc);
+ (void)dbc->c_close(dbc);
+ return (ret);
+
+}
+
+/*
+ * __ham_traverse
+ * Traverse an entire hash table. We use the callback so that we
+ * can use this both for stat collection and for deallocation.
+ *
+ * PUBLIC: int __ham_traverse __P((DB *, DBC *, db_lockmode_t,
+ * PUBLIC: int (*)(DB *, PAGE *, void *, int *), void *));
+ */
+int
+__ham_traverse(dbp, dbc, mode, callback, cookie)
+ DB *dbp;
+ DBC *dbc;
+ db_lockmode_t mode;
+ int (*callback) __P((DB *, PAGE *, void *, int *));
+ void *cookie;
+{
+ HASH_CURSOR *hcp;
+ HKEYDATA *hk;
+ DBC *opd;
+ db_pgno_t pgno, opgno;
+ u_int32_t bucket;
+ int did_put, i, ret, t_ret;
+
+ hcp = (HASH_CURSOR *)dbc->internal;
+ opd = NULL;
+ ret = 0;
+
+ /*
+ * In a perfect world, we could simply read each page in the file
+ * and look at its page type to tally the information necessary.
+ * Unfortunately, the bucket locking that hash tables do to make
+ * locking easy, makes this a pain in the butt. We have to traverse
+ * duplicate, overflow and big pages from the bucket so that we
+ * don't access anything that isn't properly locked.
+ */
+ for (bucket = 0; bucket <= hcp->hdr->max_bucket; bucket++) {
+ hcp->bucket = bucket;
+ hcp->pgno = pgno = BUCKET_TO_PAGE(hcp, bucket);
+ for (ret = __ham_get_cpage(dbc, mode); ret == 0;
+ ret = __ham_next_cpage(dbc, pgno, 0)) {
+ pgno = NEXT_PGNO(hcp->page);
+
+ /*
+ * Go through each item on the page checking for
+ * duplicates (in which case we have to count the
+ * duplicate pages) or big key/data items (in which
+ * case we have to count those pages).
+ */
+ for (i = 0; i < NUM_ENT(hcp->page); i++) {
+ hk = (HKEYDATA *)P_ENTRY(hcp->page, i);
+ switch (HPAGE_PTYPE(hk)) {
+ case H_OFFDUP:
+ memcpy(&opgno, HOFFDUP_PGNO(hk),
+ sizeof(db_pgno_t));
+ if ((ret = __db_c_newopd(dbc,
+ opgno, &opd)) != 0)
+ return (ret);
+ if ((ret = __bam_traverse(opd,
+ DB_LOCK_READ, opgno,
+ __ham_stat_callback, cookie))
+ != 0)
+ goto err;
+ if ((ret = opd->c_close(opd)) != 0)
+ return (ret);
+ opd = NULL;
+ break;
+ case H_OFFPAGE:
+ /*
+ * We are about to get a big page
+ * which will use the same spot that
+ * the current page uses, so we need
+ * to restore the current page before
+ * looking at it again.
+ */
+ memcpy(&opgno, HOFFPAGE_PGNO(hk),
+ sizeof(db_pgno_t));
+ if ((ret = __db_traverse_big(dbp,
+ opgno, callback, cookie)) != 0)
+ goto err;
+ break;
+ case H_KEYDATA:
+ break;
+ }
+ }
+
+ /* Call the callback on main pages. */
+ if ((ret = callback(dbp,
+ hcp->page, cookie, &did_put)) != 0)
+ goto err;
+
+ if (did_put)
+ hcp->page = NULL;
+ if (pgno == PGNO_INVALID)
+ break;
+ }
+ if (ret != 0)
+ goto err;
+
+ if (STD_LOCKING(dbc))
+ (void)lock_put(dbp->dbenv, &hcp->lock);
+
+ if (hcp->page != NULL) {
+ if ((ret = memp_fput(dbc->dbp->mpf, hcp->page, 0)) != 0)
+ return (ret);
+ hcp->page = NULL;
+ }
+
+ }
+err: if (opd != NULL &&
+ (t_ret = opd->c_close(opd)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+static int
+__ham_stat_callback(dbp, pagep, cookie, putp)
+ DB *dbp;
+ PAGE *pagep;
+ void *cookie;
+ int *putp;
+{
+ DB_HASH_STAT *sp;
+ DB_BTREE_STAT bstat;
+ db_indx_t indx, len, off, tlen, top;
+ u_int8_t *hk;
+
+ *putp = 0;
+ sp = cookie;
+
+ switch (pagep->type) {
+ case P_INVALID:
+ /*
+ * Hash pages may be wholly zeroed; this is not a bug.
+ * Obviously such pages have no data, so we can just proceed.
+ */
+ break;
+ case P_HASH:
+ /*
+ * We count the buckets and the overflow pages
+ * separately and tally their bytes separately
+ * as well. We need to figure out if this page
+ * is a bucket.
+ */
+ if (PREV_PGNO(pagep) == PGNO_INVALID)
+ sp->hash_bfree += P_FREESPACE(pagep);
+ else {
+ sp->hash_overflows++;
+ sp->hash_ovfl_free += P_FREESPACE(pagep);
+ }
+ top = NUM_ENT(pagep);
+ /* Correct for on-page duplicates and deleted items. */
+ for (indx = 0; indx < top; indx += P_INDX) {
+ switch (*H_PAIRDATA(pagep, indx)) {
+ case H_OFFDUP:
+ case H_OFFPAGE:
+ break;
+ case H_KEYDATA:
+ sp->hash_ndata++;
+ break;
+ case H_DUPLICATE:
+ tlen = LEN_HDATA(pagep, 0, indx);
+ hk = H_PAIRDATA(pagep, indx);
+ for (off = 0; off < tlen;
+ off += len + 2 * sizeof (db_indx_t)) {
+ sp->hash_ndata++;
+ memcpy(&len,
+ HKEYDATA_DATA(hk)
+ + off, sizeof(db_indx_t));
+ }
+ }
+ }
+ sp->hash_nkeys += H_NUMPAIRS(pagep);
+ break;
+ case P_IBTREE:
+ case P_IRECNO:
+ case P_LBTREE:
+ case P_LRECNO:
+ case P_LDUP:
+ /*
+ * These are all btree pages; get a correct
+ * cookie and call them. Then add appropriate
+ * fields into our stat structure.
+ */
+ memset(&bstat, 0, sizeof(bstat));
+ bstat.bt_dup_pgfree = 0;
+ bstat.bt_int_pgfree = 0;
+ bstat.bt_leaf_pgfree = 0;
+ bstat.bt_ndata = 0;
+ __bam_stat_callback(dbp, pagep, &bstat, putp);
+ sp->hash_dup++;
+ sp->hash_dup_free += bstat.bt_leaf_pgfree +
+ bstat.bt_dup_pgfree + bstat.bt_int_pgfree;
+ sp->hash_ndata += bstat.bt_ndata;
+ break;
+ case P_OVERFLOW:
+ sp->hash_bigpages++;
+ sp->hash_big_bfree += P_OVFLSPACE(dbp->pgsize, pagep);
+ break;
+ default:
+ return (__db_unknown_type(dbp->dbenv,
+ "__ham_stat_callback", pagep->type));
+ }
+
+ return (0);
+}
diff --git a/bdb/hash/hash_upgrade.c b/bdb/hash/hash_upgrade.c
new file mode 100644
index 00000000000..c34381276b4
--- /dev/null
+++ b/bdb/hash/hash_upgrade.c
@@ -0,0 +1,271 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: hash_upgrade.c,v 11.25 2000/12/14 19:18:32 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <limits.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_swap.h"
+#include "hash.h"
+#include "db_upgrade.h"
+
+/*
+ * __ham_30_hashmeta --
+ * Upgrade the database from version 4/5 to version 6.
+ *
+ * PUBLIC: int __ham_30_hashmeta __P((DB *, char *, u_int8_t *));
+ */
+int
+__ham_30_hashmeta(dbp, real_name, obuf)
+ DB *dbp;
+ char *real_name;
+ u_int8_t *obuf;
+{
+ DB_ENV *dbenv;
+ HASHHDR *oldmeta;
+ HMETA30 newmeta;
+ u_int32_t *o_spares, *n_spares;
+ u_int32_t fillf, maxb, nelem;
+ int i, max_entry, ret;
+
+ dbenv = dbp->dbenv;
+ memset(&newmeta, 0, sizeof(newmeta));
+
+ oldmeta = (HASHHDR *)obuf;
+
+ /*
+ * The first 32 bytes are similar. The only change is the version
+ * and that we removed the ovfl_point and have the page type now.
+ */
+
+ newmeta.dbmeta.lsn = oldmeta->lsn;
+ newmeta.dbmeta.pgno = oldmeta->pgno;
+ newmeta.dbmeta.magic = oldmeta->magic;
+ newmeta.dbmeta.version = 6;
+ newmeta.dbmeta.pagesize = oldmeta->pagesize;
+ newmeta.dbmeta.type = P_HASHMETA;
+
+ /* Move flags */
+ newmeta.dbmeta.flags = oldmeta->flags;
+
+ /* Copy the free list, which has changed its name but works the same. */
+ newmeta.dbmeta.free = oldmeta->last_freed;
+
+ /* Copy: max_bucket, high_mask, low-mask, ffactor, nelem, h_charkey */
+ newmeta.max_bucket = oldmeta->max_bucket;
+ newmeta.high_mask = oldmeta->high_mask;
+ newmeta.low_mask = oldmeta->low_mask;
+ newmeta.ffactor = oldmeta->ffactor;
+ newmeta.nelem = oldmeta->nelem;
+ newmeta.h_charkey = oldmeta->h_charkey;
+
+ /*
+ * There was a bug in 2.X versions where the nelem could go negative.
+ * In general, this is considered "bad." If it does go negative
+ * (that is, very large and positive), we'll die trying to dump and
+ * load this database. So, let's see if we can fix it here.
+ */
+ nelem = newmeta.nelem;
+ fillf = newmeta.ffactor;
+ maxb = newmeta.max_bucket;
+
+ if ((fillf != 0 && fillf * maxb < 2 * nelem) ||
+ (fillf == 0 && nelem > 0x8000000))
+ newmeta.nelem = 0;
+
+ /*
+ * We now have to convert the spares array. The old spares array
+ * contained the total number of extra pages allocated prior to
+ * the bucket that begins the next doubling. The new spares array
+ * contains the page number of the first bucket in the next doubling
+ * MINUS the bucket number of that bucket.
+ */
+ o_spares = oldmeta->spares;
+ n_spares = newmeta.spares;
+ max_entry = __db_log2(maxb + 1); /* highest spares entry in use */
+ n_spares[0] = 1;
+ for (i = 1; i < NCACHED && i <= max_entry; i++)
+ n_spares[i] = 1 + o_spares[i - 1];
+
+ /* Replace the unique ID. */
+ if ((ret = __os_fileid(dbenv, real_name, 1, newmeta.dbmeta.uid)) != 0)
+ return (ret);
+
+ /* Overwrite the original. */
+ memcpy(oldmeta, &newmeta, sizeof(newmeta));
+
+ return (0);
+}
+
+/*
+ * __ham_30_sizefix --
+ * Make sure that all hash pages belonging to the current
+ * hash doubling are within the bounds of the file.
+ *
+ * PUBLIC: int __ham_30_sizefix __P((DB *, DB_FH *, char *, u_int8_t *));
+ */
+int
+__ham_30_sizefix(dbp, fhp, realname, metabuf)
+ DB *dbp;
+ DB_FH *fhp;
+ char *realname;
+ u_int8_t *metabuf;
+{
+ u_int8_t buf[DB_MAX_PGSIZE];
+ DB_ENV *dbenv;
+ HMETA30 *meta;
+ db_pgno_t last_actual, last_desired;
+ int ret;
+ size_t nw;
+ u_int32_t pagesize;
+
+ dbenv = dbp->dbenv;
+ memset(buf, 0, DB_MAX_PGSIZE);
+
+ meta = (HMETA30 *)metabuf;
+ pagesize = meta->dbmeta.pagesize;
+
+ /*
+ * Get the last page number. To do this, we'll need dbp->pgsize
+ * to be set right, so slam it into place.
+ */
+ dbp->pgsize = pagesize;
+ if ((ret = __db_lastpgno(dbp, realname, fhp, &last_actual)) != 0)
+ return (ret);
+
+ /*
+ * The last bucket in the doubling is equal to high_mask; calculate
+ * the page number that implies.
+ */
+ last_desired = BS_TO_PAGE(meta->high_mask, meta->spares);
+
+ /*
+ * If last_desired > last_actual, we need to grow the file. Write
+ * a zeroed page where last_desired would go.
+ */
+ if (last_desired > last_actual) {
+ if ((ret = __os_seek(dbenv,
+ fhp, pagesize, last_desired, 0, 0, DB_OS_SEEK_SET)) != 0)
+ return (ret);
+ if ((ret = __os_write(dbenv, fhp, buf, pagesize, &nw)) != 0)
+ return (ret);
+ if (nw != pagesize) {
+ __db_err(dbenv, "Short write during upgrade");
+ return (EIO);
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * __ham_31_hashmeta --
+ * Upgrade the database from version 6 to version 7.
+ *
+ * PUBLIC: int __ham_31_hashmeta
+ * PUBLIC: __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *));
+ */
+int
+__ham_31_hashmeta(dbp, real_name, flags, fhp, h, dirtyp)
+ DB *dbp;
+ char *real_name;
+ u_int32_t flags;
+ DB_FH *fhp;
+ PAGE *h;
+ int *dirtyp;
+{
+ HMETA31 *newmeta;
+ HMETA30 *oldmeta;
+
+ COMPQUIET(dbp, NULL);
+ COMPQUIET(real_name, NULL);
+ COMPQUIET(fhp, NULL);
+
+ newmeta = (HMETA31 *)h;
+ oldmeta = (HMETA30 *)h;
+
+ /*
+ * Copy the fields down the page.
+ * The fields may overlap so start at the bottom and use memmove().
+ */
+ memmove(newmeta->spares, oldmeta->spares, sizeof(oldmeta->spares));
+ newmeta->h_charkey = oldmeta->h_charkey;
+ newmeta->nelem = oldmeta->nelem;
+ newmeta->ffactor = oldmeta->ffactor;
+ newmeta->low_mask = oldmeta->low_mask;
+ newmeta->high_mask = oldmeta->high_mask;
+ newmeta->max_bucket = oldmeta->max_bucket;
+ memmove(newmeta->dbmeta.uid,
+ oldmeta->dbmeta.uid, sizeof(oldmeta->dbmeta.uid));
+ newmeta->dbmeta.flags = oldmeta->dbmeta.flags;
+ newmeta->dbmeta.record_count = 0;
+ newmeta->dbmeta.key_count = 0;
+ ZERO_LSN(newmeta->dbmeta.unused3);
+
+ /* Update the version. */
+ newmeta->dbmeta.version = 7;
+
+ /* Upgrade the flags. */
+ if (LF_ISSET(DB_DUPSORT))
+ F_SET(&newmeta->dbmeta, DB_HASH_DUPSORT);
+
+ *dirtyp = 1;
+ return (0);
+}
+
+/*
+ * __ham_31_hash --
+ * Upgrade the database hash leaf pages.
+ *
+ * PUBLIC: int __ham_31_hash
+ * PUBLIC: __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *));
+ */
+int
+__ham_31_hash(dbp, real_name, flags, fhp, h, dirtyp)
+ DB *dbp;
+ char *real_name;
+ u_int32_t flags;
+ DB_FH *fhp;
+ PAGE *h;
+ int *dirtyp;
+{
+ HKEYDATA *hk;
+ db_pgno_t pgno, tpgno;
+ db_indx_t indx;
+ int ret;
+
+ COMPQUIET(flags, 0);
+
+ ret = 0;
+ for (indx = 0; indx < NUM_ENT(h); indx += 2) {
+ hk = (HKEYDATA *)H_PAIRDATA(h, indx);
+ if (HPAGE_PTYPE(hk) == H_OFFDUP) {
+ memcpy(&pgno, HOFFDUP_PGNO(hk), sizeof(db_pgno_t));
+ tpgno = pgno;
+ if ((ret = __db_31_offdup(dbp, real_name, fhp,
+ LF_ISSET(DB_DUPSORT) ? 1 : 0, &tpgno)) != 0)
+ break;
+ if (pgno != tpgno) {
+ *dirtyp = 1;
+ memcpy(HOFFDUP_PGNO(hk),
+ &tpgno, sizeof(db_pgno_t));
+ }
+ }
+ }
+
+ return (ret);
+}
diff --git a/bdb/hash/hash_verify.c b/bdb/hash/hash_verify.c
new file mode 100644
index 00000000000..31dd7cc2299
--- /dev/null
+++ b/bdb/hash/hash_verify.c
@@ -0,0 +1,1051 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: hash_verify.c,v 1.31 2000/11/30 00:58:37 ubell Exp $
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: hash_verify.c,v 1.31 2000/11/30 00:58:37 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_verify.h"
+#include "btree.h"
+#include "hash.h"
+
+static int __ham_dups_unsorted __P((DB *, u_int8_t *, u_int32_t));
+static int __ham_vrfy_bucket __P((DB *, VRFY_DBINFO *, HMETA *, u_int32_t,
+ u_int32_t));
+static int __ham_vrfy_item __P((DB *,
+ VRFY_DBINFO *, db_pgno_t, PAGE *, u_int32_t, u_int32_t));
+
+/*
+ * __ham_vrfy_meta --
+ * Verify the hash-specific part of a metadata page.
+ *
+ * Note that unlike btree, we don't save things off, because we
+ * will need most everything again to verify each page and the
+ * amount of state here is significant.
+ *
+ * PUBLIC: int __ham_vrfy_meta __P((DB *, VRFY_DBINFO *, HMETA *,
+ * PUBLIC: db_pgno_t, u_int32_t));
+ */
+int
+__ham_vrfy_meta(dbp, vdp, m, pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ HMETA *m;
+ db_pgno_t pgno;
+ u_int32_t flags;
+{
+ HASH *hashp;
+ VRFY_PAGEINFO *pip;
+ int i, ret, t_ret, isbad;
+ u_int32_t pwr, mbucket;
+ u_int32_t (*hfunc) __P((DB *, const void *, u_int32_t));
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+ isbad = 0;
+
+ hashp = dbp->h_internal;
+
+ if (hashp != NULL && hashp->h_hash != NULL)
+ hfunc = hashp->h_hash;
+ else
+ hfunc = __ham_func5;
+
+ /*
+ * If we haven't already checked the common fields in pagezero,
+ * check them.
+ */
+ if (!F_ISSET(pip, VRFY_INCOMPLETE) &&
+ (ret = __db_vrfy_meta(dbp, vdp, &m->dbmeta, pgno, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+
+ /* h_charkey */
+ if (!LF_ISSET(DB_NOORDERCHK))
+ if (m->h_charkey != hfunc(dbp, CHARKEY, sizeof(CHARKEY))) {
+ EPRINT((dbp->dbenv,
+"Database has different custom hash function; reverify with DB_NOORDERCHK set"
+ ));
+ /*
+ * Return immediately; this is probably a sign
+ * of user error rather than database corruption, so
+ * we want to avoid extraneous errors.
+ */
+ isbad = 1;
+ goto err;
+ }
+
+ /* max_bucket must be less than the last pgno. */
+ if (m->max_bucket > vdp->last_pgno) {
+ EPRINT((dbp->dbenv,
+ "Impossible max_bucket %lu on meta page %lu",
+ m->max_bucket, pgno));
+ /*
+ * Most other fields depend somehow on max_bucket, so
+ * we just return--there will be lots of extraneous
+ * errors.
+ */
+ isbad = 1;
+ goto err;
+ }
+
+ /*
+ * max_bucket, high_mask and low_mask: high_mask must be one
+ * less than the next power of two above max_bucket, and
+ * low_mask must be one less than the power of two below it.
+ *
+ *
+ */
+ pwr = (m->max_bucket == 0) ? 1 : 1 << __db_log2(m->max_bucket + 1);
+ if (m->high_mask != pwr - 1) {
+ EPRINT((dbp->dbenv,
+ "Incorrect high_mask %lu on page %lu, should be %lu",
+ m->high_mask, pgno, pwr - 1));
+ isbad = 1;
+ }
+ pwr >>= 1;
+ if (m->low_mask != pwr - 1) {
+ EPRINT((dbp->dbenv,
+ "Incorrect low_mask %lu on page %lu, should be %lu",
+ m->low_mask, pgno, pwr - 1));
+ isbad = 1;
+ }
+
+ /* ffactor: no check possible. */
+ pip->h_ffactor = m->ffactor;
+
+ /*
+ * nelem: just make sure it's not astronomical for now. This is the
+ * same check that hash_upgrade does, since there was a bug in 2.X
+ * which could make nelem go "negative".
+ */
+ if (m->nelem > 0x80000000) {
+ EPRINT((dbp->dbenv,
+ "Suspiciously high nelem of %lu on page %lu",
+ m->nelem, pgno));
+ isbad = 1;
+ pip->h_nelem = 0;
+ } else
+ pip->h_nelem = m->nelem;
+
+ /* flags */
+ if (F_ISSET(&m->dbmeta, DB_HASH_DUP))
+ F_SET(pip, VRFY_HAS_DUPS);
+ if (F_ISSET(&m->dbmeta, DB_HASH_DUPSORT))
+ F_SET(pip, VRFY_HAS_DUPSORT);
+ /* XXX: Why is the DB_HASH_SUBDB flag necessary? */
+
+ /* spares array */
+ for (i = 0; m->spares[i] != 0 && i < NCACHED; i++) {
+ /*
+ * We set mbucket to the maximum bucket that would use a given
+ * spares entry; we want to ensure that it's always less
+ * than last_pgno.
+ */
+ mbucket = (1 << i) - 1;
+ if (BS_TO_PAGE(mbucket, m->spares) > vdp->last_pgno) {
+ EPRINT((dbp->dbenv,
+ "Spares array entry %lu, page %lu is invalid",
+ i, pgno));
+ isbad = 1;
+ }
+ }
+
+err: if ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __ham_vrfy --
+ * Verify hash page.
+ *
+ * PUBLIC: int __ham_vrfy __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t,
+ * PUBLIC: u_int32_t));
+ */
+int
+__ham_vrfy(dbp, vdp, h, pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ PAGE *h;
+ db_pgno_t pgno;
+ u_int32_t flags;
+{
+ VRFY_PAGEINFO *pip;
+ u_int32_t ent, himark, inpend;
+ int isbad, ret, t_ret;
+
+ isbad = 0;
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ /* Sanity check our flags and page type. */
+ if ((ret = __db_fchk(dbp->dbenv, "__ham_vrfy",
+ flags, DB_AGGRESSIVE | DB_NOORDERCHK | DB_SALVAGE)) != 0)
+ goto err;
+
+ if (TYPE(h) != P_HASH) {
+ TYPE_ERR_PRINT(dbp->dbenv, "__ham_vrfy", pgno, TYPE(h));
+ DB_ASSERT(0);
+ ret = EINVAL;
+ goto err;
+ }
+
+ /* Verify and save off fields common to all PAGEs. */
+ if ((ret = __db_vrfy_datapage(dbp, vdp, h, pgno, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+
+ /*
+ * Verify inp[]. Each offset from 0 to NUM_ENT(h) must be lower
+ * than the previous one, higher than the current end of the inp array,
+ * and lower than the page size.
+ *
+ * In any case, we return immediately if things are bad, as it would
+ * be unsafe to proceed.
+ */
+ for (ent = 0, himark = dbp->pgsize,
+ inpend = (u_int8_t *)h->inp - (u_int8_t *)h;
+ ent < NUM_ENT(h); ent++)
+ if (h->inp[ent] >= himark) {
+ EPRINT((dbp->dbenv,
+ "Item %lu on page %lu out of order or nonsensical",
+ ent, pgno));
+ isbad = 1;
+ goto err;
+ } else if (inpend >= himark) {
+ EPRINT((dbp->dbenv,
+ "inp array collided with data on page %lu",
+ pgno));
+ isbad = 1;
+ goto err;
+
+ } else {
+ himark = h->inp[ent];
+ inpend += sizeof(db_indx_t);
+ if ((ret = __ham_vrfy_item(
+ dbp, vdp, pgno, h, ent, flags)) != 0)
+ goto err;
+ }
+
+err: if ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret == 0 && isbad == 1 ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __ham_vrfy_item --
+ * Given a hash page and an offset, sanity-check the item itself,
+ * and save off any overflow items or off-page dup children as necessary.
+ */
+static int
+__ham_vrfy_item(dbp, vdp, pgno, h, i, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+ PAGE *h;
+ u_int32_t i, flags;
+{
+ HOFFPAGE hop;
+ HOFFDUP hod;
+ VRFY_CHILDINFO child;
+ VRFY_PAGEINFO *pip;
+ db_indx_t offset, len, dlen, elen;
+ int ret, t_ret;
+ u_int8_t *databuf;
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ switch (HPAGE_TYPE(h, i)) {
+ case H_KEYDATA:
+ /* Nothing to do here--everything but the type field is data */
+ break;
+ case H_DUPLICATE:
+ /* Are we a datum or a key? Better be the former. */
+ if (i % 2 == 0) {
+ EPRINT((dbp->dbenv,
+ "Hash key stored as duplicate at page %lu item %lu",
+ pip->pgno, i));
+ }
+ /*
+ * Dups are encoded as a series within a single HKEYDATA,
+ * in which each dup is surrounded by a copy of its length
+ * on either side (so that the series can be walked in either
+ * direction. We loop through this series and make sure
+ * each dup is reasonable.
+ *
+ * Note that at this point, we've verified item i-1, so
+ * it's safe to use LEN_HKEYDATA (which looks at inp[i-1]).
+ */
+ len = LEN_HKEYDATA(h, dbp->pgsize, i);
+ databuf = HKEYDATA_DATA(P_ENTRY(h, i));
+ for (offset = 0; offset < len; offset += DUP_SIZE(dlen)) {
+ memcpy(&dlen, databuf + offset, sizeof(db_indx_t));
+
+ /* Make sure the length is plausible. */
+ if (offset + DUP_SIZE(dlen) > len) {
+ EPRINT((dbp->dbenv,
+ "Duplicate item %lu, page %lu has bad length",
+ i, pip->pgno));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+
+ /*
+ * Make sure the second copy of the length is the
+ * same as the first.
+ */
+ memcpy(&elen,
+ databuf + offset + dlen + sizeof(db_indx_t),
+ sizeof(db_indx_t));
+ if (elen != dlen) {
+ EPRINT((dbp->dbenv,
+ "Duplicate item %lu, page %lu has two different lengths",
+ i, pip->pgno));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+ }
+ F_SET(pip, VRFY_HAS_DUPS);
+ if (!LF_ISSET(DB_NOORDERCHK) &&
+ __ham_dups_unsorted(dbp, databuf, len))
+ F_SET(pip, VRFY_DUPS_UNSORTED);
+ break;
+ case H_OFFPAGE:
+ /* Offpage item. Make sure pgno is sane, save off. */
+ memcpy(&hop, P_ENTRY(h, i), HOFFPAGE_SIZE);
+ if (!IS_VALID_PGNO(hop.pgno) || hop.pgno == pip->pgno ||
+ hop.pgno == PGNO_INVALID) {
+ EPRINT((dbp->dbenv,
+ "Offpage item %lu, page %lu has bad page number",
+ i, pip->pgno));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+ memset(&child, 0, sizeof(VRFY_CHILDINFO));
+ child.pgno = hop.pgno;
+ child.type = V_OVERFLOW;
+ child.tlen = hop.tlen; /* This will get checked later. */
+ if ((ret = __db_vrfy_childput(vdp, pip->pgno, &child)) != 0)
+ goto err;
+ break;
+ case H_OFFDUP:
+ /* Offpage duplicate item. Same drill. */
+ memcpy(&hod, P_ENTRY(h, i), HOFFDUP_SIZE);
+ if (!IS_VALID_PGNO(hod.pgno) || hod.pgno == pip->pgno ||
+ hod.pgno == PGNO_INVALID) {
+ EPRINT((dbp->dbenv,
+ "Offpage item %lu, page %lu has bad page number",
+ i, pip->pgno));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+ memset(&child, 0, sizeof(VRFY_CHILDINFO));
+ child.pgno = hod.pgno;
+ child.type = V_DUPLICATE;
+ if ((ret = __db_vrfy_childput(vdp, pip->pgno, &child)) != 0)
+ goto err;
+ F_SET(pip, VRFY_HAS_DUPS);
+ break;
+ default:
+ EPRINT((dbp->dbenv,
+ "Item %i, page %lu has bad type", i, pip->pgno));
+ ret = DB_VERIFY_BAD;
+ break;
+ }
+
+err: if ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __ham_vrfy_structure --
+ * Verify the structure of a hash database.
+ *
+ * PUBLIC: int __ham_vrfy_structure __P((DB *, VRFY_DBINFO *, db_pgno_t,
+ * PUBLIC: u_int32_t));
+ */
+int
+__ham_vrfy_structure(dbp, vdp, meta_pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ db_pgno_t meta_pgno;
+ u_int32_t flags;
+{
+ DB *pgset;
+ HMETA *m;
+ PAGE *h;
+ VRFY_PAGEINFO *pip;
+ int isbad, p, ret, t_ret;
+ db_pgno_t pgno;
+ u_int32_t bucket;
+
+ ret = isbad = 0;
+ h = NULL;
+ pgset = vdp->pgset;
+
+ if ((ret = __db_vrfy_pgset_get(pgset, meta_pgno, &p)) != 0)
+ return (ret);
+ if (p != 0) {
+ EPRINT((dbp->dbenv,
+ "Hash meta page %lu referenced twice", meta_pgno));
+ return (DB_VERIFY_BAD);
+ }
+ if ((ret = __db_vrfy_pgset_inc(pgset, meta_pgno)) != 0)
+ return (ret);
+
+ /* Get the meta page; we'll need it frequently. */
+ if ((ret = memp_fget(dbp->mpf, &meta_pgno, 0, &m)) != 0)
+ return (ret);
+
+ /* Loop through bucket by bucket. */
+ for (bucket = 0; bucket <= m->max_bucket; bucket++)
+ if ((ret =
+ __ham_vrfy_bucket(dbp, vdp, m, bucket, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+
+ /*
+ * There may be unused hash pages corresponding to buckets
+ * that have been allocated but not yet used. These may be
+ * part of the current doubling above max_bucket, or they may
+ * correspond to buckets that were used in a transaction
+ * that then aborted.
+ *
+ * Loop through them, as far as the spares array defines them,
+ * and make sure they're all empty.
+ *
+ * Note that this should be safe, since we've already verified
+ * that the spares array is sane.
+ */
+ for (bucket = m->max_bucket + 1;
+ m->spares[__db_log2(bucket + 1)] != 0; bucket++) {
+ pgno = BS_TO_PAGE(bucket, m->spares);
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ goto err;
+
+ /* It's okay if these pages are totally zeroed; unmark it. */
+ F_CLR(pip, VRFY_IS_ALLZEROES);
+
+ if (pip->type != P_HASH) {
+ EPRINT((dbp->dbenv,
+ "Hash bucket %lu maps to non-hash page %lu",
+ bucket, pgno));
+ isbad = 1;
+ } else if (pip->entries != 0) {
+ EPRINT((dbp->dbenv,
+ "Non-empty page %lu in unused hash bucket %lu",
+ pgno, bucket));
+ isbad = 1;
+ } else {
+ if ((ret = __db_vrfy_pgset_get(pgset, pgno, &p)) != 0)
+ goto err;
+ if (p != 0) {
+ EPRINT((dbp->dbenv,
+ "Hash page %lu above max_bucket referenced",
+ pgno));
+ isbad = 1;
+ } else {
+ if ((ret =
+ __db_vrfy_pgset_inc(pgset, pgno)) != 0)
+ goto err;
+ if ((ret =
+ __db_vrfy_putpageinfo(vdp, pip)) != 0)
+ goto err;
+ continue;
+ }
+ }
+
+ /* If we got here, it's an error. */
+ (void)__db_vrfy_putpageinfo(vdp, pip);
+ goto err;
+ }
+
+err: if ((t_ret = memp_fput(dbp->mpf, m, 0)) != 0)
+ return (t_ret);
+ if (h != NULL && (t_ret = memp_fput(dbp->mpf, h, 0)) != 0)
+ return (t_ret);
+ return ((isbad == 1 && ret == 0) ? DB_VERIFY_BAD: ret);
+}
+
+/*
+ * __ham_vrfy_bucket --
+ * Verify a given bucket.
+ */
+static int
+__ham_vrfy_bucket(dbp, vdp, m, bucket, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ HMETA *m;
+ u_int32_t bucket, flags;
+{
+ HASH *hashp;
+ VRFY_CHILDINFO *child;
+ VRFY_PAGEINFO *mip, *pip;
+ int ret, t_ret, isbad, p;
+ db_pgno_t pgno, next_pgno;
+ DBC *cc;
+ u_int32_t (*hfunc) __P((DB *, const void *, u_int32_t));
+
+ isbad = 0;
+ pip = NULL;
+ cc = NULL;
+
+ hashp = dbp->h_internal;
+ if (hashp != NULL && hashp->h_hash != NULL)
+ hfunc = hashp->h_hash;
+ else
+ hfunc = __ham_func5;
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, PGNO(m), &mip)) != 0)
+ return (ret);
+
+ /* Calculate the first pgno for this bucket. */
+ pgno = BS_TO_PAGE(bucket, m->spares);
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ goto err;
+
+ /* Make sure we got a plausible page number. */
+ if (pgno > vdp->last_pgno || pip->type != P_HASH) {
+ EPRINT((dbp->dbenv, "Bucket %lu has impossible first page %lu",
+ bucket, pgno));
+ /* Unsafe to continue. */
+ isbad = 1;
+ goto err;
+ }
+
+ if (pip->prev_pgno != PGNO_INVALID) {
+ EPRINT((dbp->dbenv,
+ "First hash page %lu in bucket %lu has a prev_pgno", pgno));
+ isbad = 1;
+ }
+
+ /*
+ * Set flags for dups and sorted dups.
+ */
+ flags |= F_ISSET(mip, VRFY_HAS_DUPS) ? ST_DUPOK : 0;
+ flags |= F_ISSET(mip, VRFY_HAS_DUPSORT) ? ST_DUPSORT : 0;
+
+ /* Loop until we find a fatal bug, or until we run out of pages. */
+ for (;;) {
+ /* Provide feedback on our progress to the application. */
+ if (!LF_ISSET(DB_SALVAGE))
+ __db_vrfy_struct_feedback(dbp, vdp);
+
+ if ((ret = __db_vrfy_pgset_get(vdp->pgset, pgno, &p)) != 0)
+ goto err;
+ if (p != 0) {
+ EPRINT((dbp->dbenv,
+ "Hash page %lu referenced twice", pgno));
+ isbad = 1;
+ /* Unsafe to continue. */
+ goto err;
+ } else if ((ret = __db_vrfy_pgset_inc(vdp->pgset, pgno)) != 0)
+ goto err;
+
+ /*
+ * Hash pages that nothing has ever hashed to may never
+ * have actually come into existence, and may appear to be
+ * entirely zeroed. This is acceptable, and since there's
+ * no real way for us to know whether this has actually
+ * occurred, we clear the "wholly zeroed" flag on every
+ * hash page. A wholly zeroed page, by nature, will appear
+ * to have no flags set and zero entries, so should
+ * otherwise verify correctly.
+ */
+ F_CLR(pip, VRFY_IS_ALLZEROES);
+
+ /* If we have dups, our meta page had better know about it. */
+ if (F_ISSET(pip, VRFY_HAS_DUPS)
+ && !F_ISSET(mip, VRFY_HAS_DUPS)) {
+ EPRINT((dbp->dbenv,
+ "Duplicates present in non-duplicate database, page %lu",
+ pgno));
+ isbad = 1;
+ }
+
+ /*
+ * If the database has sorted dups, this page had better
+ * not have unsorted ones.
+ */
+ if (F_ISSET(mip, VRFY_HAS_DUPSORT) &&
+ F_ISSET(pip, VRFY_DUPS_UNSORTED)) {
+ EPRINT((dbp->dbenv,
+ "Unsorted dups in sorted-dup database, page %lu",
+ pgno));
+ isbad = 1;
+ }
+
+ /* Walk overflow chains and offpage dup trees. */
+ if ((ret = __db_vrfy_childcursor(vdp, &cc)) != 0)
+ goto err;
+ for (ret = __db_vrfy_ccset(cc, pip->pgno, &child); ret == 0;
+ ret = __db_vrfy_ccnext(cc, &child))
+ if (child->type == V_OVERFLOW) {
+ if ((ret = __db_vrfy_ovfl_structure(dbp, vdp,
+ child->pgno, child->tlen, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+ } else if (child->type == V_DUPLICATE) {
+ if ((ret = __db_vrfy_duptype(dbp,
+ vdp, child->pgno, flags)) != 0) {
+ isbad = 1;
+ continue;
+ }
+ if ((ret = __bam_vrfy_subtree(dbp, vdp,
+ child->pgno, NULL, NULL,
+ flags | ST_RECNUM | ST_DUPSET, NULL,
+ NULL, NULL)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+ }
+ if ((ret = __db_vrfy_ccclose(cc)) != 0)
+ goto err;
+ cc = NULL;
+
+ /* If it's safe to check that things hash properly, do so. */
+ if (isbad == 0 && !LF_ISSET(DB_NOORDERCHK) &&
+ (ret = __ham_vrfy_hashing(dbp, pip->entries,
+ m, bucket, pgno, flags, hfunc)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+
+ next_pgno = pip->next_pgno;
+ ret = __db_vrfy_putpageinfo(vdp, pip);
+
+ pip = NULL;
+ if (ret != 0)
+ goto err;
+
+ if (next_pgno == PGNO_INVALID)
+ break; /* End of the bucket. */
+
+ /* We already checked this, but just in case... */
+ if (!IS_VALID_PGNO(next_pgno)) {
+ DB_ASSERT(0);
+ EPRINT((dbp->dbenv,
+ "Hash page %lu has bad next_pgno", pgno));
+ isbad = 1;
+ goto err;
+ }
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, next_pgno, &pip)) != 0)
+ goto err;
+
+ if (pip->prev_pgno != pgno) {
+ EPRINT((dbp->dbenv, "Hash page %lu has bad prev_pgno",
+ next_pgno));
+ isbad = 1;
+ }
+ pgno = next_pgno;
+ }
+
+err: if (cc != NULL && ((t_ret = __db_vrfy_ccclose(cc)) != 0) && ret == 0)
+ ret = t_ret;
+ if (mip != NULL && ((t_ret = __db_vrfy_putpageinfo(vdp, mip)) != 0) &&
+ ret == 0)
+ ret = t_ret;
+ if (pip != NULL && ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0) &&
+ ret == 0)
+ ret = t_ret;
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __ham_vrfy_hashing --
+ * Verify that all items on a given hash page hash correctly.
+ *
+ * PUBLIC: int __ham_vrfy_hashing __P((DB *,
+ * PUBLIC: u_int32_t, HMETA *, u_int32_t, db_pgno_t, u_int32_t,
+ * PUBLIC: u_int32_t (*) __P((DB *, const void *, u_int32_t))));
+ */
+int
+__ham_vrfy_hashing(dbp, nentries, m, thisbucket, pgno, flags, hfunc)
+ DB *dbp;
+ u_int32_t nentries;
+ HMETA *m;
+ u_int32_t thisbucket;
+ db_pgno_t pgno;
+ u_int32_t flags;
+ u_int32_t (*hfunc) __P((DB *, const void *, u_int32_t));
+{
+ DBT dbt;
+ PAGE *h;
+ db_indx_t i;
+ int ret, t_ret, isbad;
+ u_int32_t hval, bucket;
+
+ ret = isbad = 0;
+ memset(&dbt, 0, sizeof(DBT));
+ F_SET(&dbt, DB_DBT_REALLOC);
+
+ if ((ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0)
+ return (ret);
+
+ for (i = 0; i < nentries; i += 2) {
+ /*
+ * We've already verified the page integrity and that of any
+ * overflow chains linked off it; it is therefore safe to use
+ * __db_ret. It's also not all that much slower, since we have
+ * to copy every hash item to deal with alignment anyway; we
+ * can tweak this a bit if this proves to be a bottleneck,
+ * but for now, take the easy route.
+ */
+ if ((ret = __db_ret(dbp, h, i, &dbt, NULL, NULL)) != 0)
+ goto err;
+ hval = hfunc(dbp, dbt.data, dbt.size);
+
+ bucket = hval & m->high_mask;
+ if (bucket > m->max_bucket)
+ bucket = bucket & m->low_mask;
+
+ if (bucket != thisbucket) {
+ EPRINT((dbp->dbenv,
+ "Item %lu on page %lu hashes incorrectly",
+ i, pgno));
+ isbad = 1;
+ }
+ }
+
+err: if (dbt.data != NULL)
+ __os_free(dbt.data, 0);
+ if ((t_ret = memp_fput(dbp->mpf, h, 0)) != 0)
+ return (t_ret);
+
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __ham_salvage --
+ * Safely dump out anything that looks like a key on an alleged
+ * hash page.
+ *
+ * PUBLIC: int __ham_salvage __P((DB *, VRFY_DBINFO *, db_pgno_t, PAGE *,
+ * PUBLIC: void *, int (*)(void *, const void *), u_int32_t));
+ */
+int
+__ham_salvage(dbp, vdp, pgno, h, handle, callback, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+ PAGE *h;
+ void *handle;
+ int (*callback) __P((void *, const void *));
+ u_int32_t flags;
+{
+ DBT dbt, unkdbt;
+ db_pgno_t dpgno;
+ int ret, err_ret, t_ret;
+ u_int32_t himark, tlen;
+ u_int8_t *hk;
+ void *buf;
+ u_int32_t dlen, len, i;
+
+ memset(&dbt, 0, sizeof(DBT));
+ dbt.flags = DB_DBT_REALLOC;
+
+ memset(&unkdbt, 0, sizeof(DBT));
+ unkdbt.size = strlen("UNKNOWN") + 1;
+ unkdbt.data = "UNKNOWN";
+
+ err_ret = 0;
+
+ /*
+ * Allocate a buffer for overflow items. Start at one page;
+ * __db_safe_goff will realloc as needed.
+ */
+ if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, NULL, &buf)) != 0)
+ return (ret);
+
+ himark = dbp->pgsize;
+ for (i = 0;; i++) {
+ /* If we're not aggressive, break when we hit NUM_ENT(h). */
+ if (!LF_ISSET(DB_AGGRESSIVE) && i >= NUM_ENT(h))
+ break;
+
+ /* Verify the current item. */
+ ret = __db_vrfy_inpitem(dbp,
+ h, pgno, i, 0, flags, &himark, NULL);
+ /* If this returned a fatality, it's time to break. */
+ if (ret == DB_VERIFY_FATAL)
+ break;
+
+ if (ret == 0) {
+ hk = P_ENTRY(h, i);
+ len = LEN_HKEYDATA(h, dbp->pgsize, i);
+ if ((u_int32_t)(hk + len - (u_int8_t *)h) >
+ dbp->pgsize) {
+ /*
+ * Item is unsafely large; either continue
+ * or set it to the whole page, depending on
+ * aggressiveness.
+ */
+ if (!LF_ISSET(DB_AGGRESSIVE))
+ continue;
+ len = dbp->pgsize -
+ (u_int32_t)(hk - (u_int8_t *)h);
+ err_ret = DB_VERIFY_BAD;
+ }
+ switch (HPAGE_PTYPE(hk)) {
+ default:
+ if (!LF_ISSET(DB_AGGRESSIVE))
+ break;
+ err_ret = DB_VERIFY_BAD;
+ /* FALLTHROUGH */
+ case H_KEYDATA:
+keydata: memcpy(buf, HKEYDATA_DATA(hk), len);
+ dbt.size = len;
+ dbt.data = buf;
+ if ((ret = __db_prdbt(&dbt,
+ 0, " ", handle, callback, 0, NULL)) != 0)
+ err_ret = ret;
+ break;
+ case H_OFFPAGE:
+ if (len < HOFFPAGE_SIZE) {
+ err_ret = DB_VERIFY_BAD;
+ continue;
+ }
+ memcpy(&dpgno,
+ HOFFPAGE_PGNO(hk), sizeof(dpgno));
+ if ((ret = __db_safe_goff(dbp, vdp,
+ dpgno, &dbt, &buf, flags)) != 0) {
+ err_ret = ret;
+ (void)__db_prdbt(&unkdbt, 0, " ",
+ handle, callback, 0, NULL);
+ break;
+ }
+ if ((ret = __db_prdbt(&dbt,
+ 0, " ", handle, callback, 0, NULL)) != 0)
+ err_ret = ret;
+ break;
+ case H_OFFDUP:
+ if (len < HOFFPAGE_SIZE) {
+ err_ret = DB_VERIFY_BAD;
+ continue;
+ }
+ memcpy(&dpgno,
+ HOFFPAGE_PGNO(hk), sizeof(dpgno));
+ /* UNKNOWN iff pgno is bad or we're a key. */
+ if (!IS_VALID_PGNO(dpgno) || (i % 2 == 0)) {
+ if ((ret = __db_prdbt(&unkdbt, 0, " ",
+ handle, callback, 0, NULL)) != 0)
+ err_ret = ret;
+ } else if ((ret = __db_salvage_duptree(dbp,
+ vdp, dpgno, &dbt, handle, callback,
+ flags | SA_SKIPFIRSTKEY)) != 0)
+ err_ret = ret;
+ break;
+ case H_DUPLICATE:
+ /*
+ * We're a key; printing dups will seriously
+ * foul the output. If we're being aggressive,
+ * pretend this is a key and let the app.
+ * programmer sort out the mess.
+ */
+ if (i % 2 == 0) {
+ err_ret = ret;
+ if (LF_ISSET(DB_AGGRESSIVE))
+ goto keydata;
+ break;
+ }
+
+ /* Too small to have any data. */
+ if (len <
+ HKEYDATA_SIZE(2 * sizeof(db_indx_t))) {
+ err_ret = DB_VERIFY_BAD;
+ continue;
+ }
+
+ /* Loop until we hit the total length. */
+ for (tlen = 0; tlen + sizeof(db_indx_t) < len;
+ tlen += dlen) {
+ tlen += sizeof(db_indx_t);
+ memcpy(&dlen, hk, sizeof(db_indx_t));
+ /*
+ * If dlen is too long, print all the
+ * rest of the dup set in a chunk.
+ */
+ if (dlen + tlen > len)
+ dlen = len - tlen;
+ memcpy(buf, hk + tlen, dlen);
+ dbt.size = dlen;
+ dbt.data = buf;
+ if ((ret = __db_prdbt(&dbt, 0, " ",
+ handle, callback, 0, NULL)) != 0)
+ err_ret = ret;
+ tlen += sizeof(db_indx_t);
+ }
+ break;
+ }
+ }
+ }
+
+ __os_free(buf, 0);
+ if ((t_ret = __db_salvage_markdone(vdp, pgno)) != 0)
+ return (t_ret);
+ return ((ret == 0 && err_ret != 0) ? err_ret : ret);
+}
+
+/*
+ * __ham_meta2pgset --
+ * Return the set of hash pages corresponding to the given
+ * known-good meta page.
+ *
+ * PUBLIC: int __ham_meta2pgset __P((DB *, VRFY_DBINFO *, HMETA *, u_int32_t,
+ * PUBLIC: DB *));
+ */
+int __ham_meta2pgset(dbp, vdp, hmeta, flags, pgset)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ HMETA *hmeta;
+ u_int32_t flags;
+ DB *pgset;
+{
+ PAGE *h;
+ db_pgno_t pgno;
+ u_int32_t bucket, totpgs;
+ int ret, val;
+
+ /*
+ * We don't really need flags, but leave them for consistency with
+ * __bam_meta2pgset.
+ */
+ COMPQUIET(flags, 0);
+
+ DB_ASSERT(pgset != NULL);
+
+ totpgs = 0;
+
+ /*
+ * Loop through all the buckets, pushing onto pgset the corresponding
+ * page(s) for each one.
+ */
+ for (bucket = 0; bucket <= hmeta->max_bucket; bucket++) {
+ pgno = BS_TO_PAGE(bucket, hmeta->spares);
+
+ /*
+ * We know the initial pgno is safe because the spares array has
+ * been verified.
+ *
+ * Safely walk the list of pages in this bucket.
+ */
+ for (;;) {
+ if ((ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0)
+ return (ret);
+ if (TYPE(h) == P_HASH) {
+
+ /*
+ * Make sure we don't go past the end of
+ * pgset.
+ */
+ if (++totpgs > vdp->last_pgno) {
+ (void)memp_fput(dbp->mpf, h, 0);
+ return (DB_VERIFY_BAD);
+ }
+ if ((ret =
+ __db_vrfy_pgset_inc(pgset, pgno)) != 0)
+ return (ret);
+
+ pgno = NEXT_PGNO(h);
+ } else
+ pgno = PGNO_INVALID;
+
+ if ((ret = memp_fput(dbp->mpf, h, 0)) != 0)
+ return (ret);
+
+ /* If the new pgno is wonky, go onto the next bucket. */
+ if (!IS_VALID_PGNO(pgno) ||
+ pgno == PGNO_INVALID)
+ goto nextbucket;
+
+ /*
+ * If we've touched this page before, we have a cycle;
+ * go on to the next bucket.
+ */
+ if ((ret = __db_vrfy_pgset_get(pgset, pgno, &val)) != 0)
+ return (ret);
+ if (val != 0)
+ goto nextbucket;
+ }
+nextbucket: ;
+ }
+ return (0);
+}
+
+/*
+ * __ham_dups_unsorted --
+ * Takes a known-safe hash duplicate set and its total length.
+ * Returns 1 if there are out-of-order duplicates in this set,
+ * 0 if there are not.
+ */
+static int
+__ham_dups_unsorted(dbp, buf, len)
+ DB *dbp;
+ u_int8_t *buf;
+ u_int32_t len;
+{
+ DBT a, b;
+ db_indx_t offset, dlen;
+ int (*func) __P((DB *, const DBT *, const DBT *));
+
+ memset(&a, 0, sizeof(DBT));
+ memset(&b, 0, sizeof(DBT));
+
+ func = (dbp->dup_compare == NULL) ? __bam_defcmp : dbp->dup_compare;
+
+ /*
+ * Loop through the dup set until we hit the end or we find
+ * a pair of dups that's out of order. b is always the current
+ * dup, a the one before it.
+ */
+ for (offset = 0; offset < len; offset += DUP_SIZE(dlen)) {
+ memcpy(&dlen, buf + offset, sizeof(db_indx_t));
+ b.data = buf + offset + sizeof(db_indx_t);
+ b.size = dlen;
+
+ if (a.data != NULL && func(dbp, &a, &b) > 0)
+ return (1);
+
+ a.data = b.data;
+ a.size = b.size;
+ }
+
+ return (0);
+}
diff --git a/bdb/hsearch/hsearch.c b/bdb/hsearch/hsearch.c
new file mode 100644
index 00000000000..c2869c4c47c
--- /dev/null
+++ b/bdb/hsearch/hsearch.c
@@ -0,0 +1,148 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993
+ * Margo Seltzer. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: hsearch.c,v 11.5 2000/11/30 00:58:37 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#define DB_DBM_HSEARCH 1
+#include "db_int.h"
+
+static DB *dbp;
+static ENTRY retval;
+
+int
+__db_hcreate(nel)
+ size_t nel;
+{
+ int ret;
+
+ if ((ret = db_create(&dbp, NULL, 0)) != 0) {
+ __os_set_errno(ret);
+ return (1);
+ }
+
+ if ((ret = dbp->set_pagesize(dbp, 512)) != 0 ||
+ (ret = dbp->set_h_ffactor(dbp, 16)) != 0 ||
+ (ret = dbp->set_h_nelem(dbp, nel)) != 0 ||
+ (ret = dbp->open(dbp,
+ NULL, NULL, DB_HASH, DB_CREATE, __db_omode("rw----"))) != 0)
+ __os_set_errno(ret);
+
+ /*
+ * !!!
+ * Hsearch returns 0 on error, not 1.
+ */
+ return (ret == 0 ? 1 : 0);
+}
+
+ENTRY *
+__db_hsearch(item, action)
+ ENTRY item;
+ ACTION action;
+{
+ DBT key, val;
+ int ret;
+
+ if (dbp == NULL) {
+ __os_set_errno(EINVAL);
+ return (NULL);
+ }
+ memset(&key, 0, sizeof(key));
+ memset(&val, 0, sizeof(val));
+ key.data = item.key;
+ key.size = strlen(item.key) + 1;
+
+ switch (action) {
+ case ENTER:
+ val.data = item.data;
+ val.size = strlen(item.data) + 1;
+
+ /*
+ * Try and add the key to the database. If we fail because
+ * the key already exists, return the existing key.
+ */
+ if ((ret =
+ dbp->put(dbp, NULL, &key, &val, DB_NOOVERWRITE)) == 0)
+ break;
+ if (ret == DB_KEYEXIST &&
+ (ret = dbp->get(dbp, NULL, &key, &val, 0)) == 0)
+ break;
+ /*
+ * The only possible DB error is DB_NOTFOUND, and it can't
+ * happen. Check for a DB error, and lie if we find one.
+ */
+ __os_set_errno(ret > 0 ? ret : EINVAL);
+ return (NULL);
+ case FIND:
+ if ((ret = dbp->get(dbp, NULL, &key, &val, 0)) != 0) {
+ if (ret != DB_NOTFOUND)
+ __os_set_errno(ret);
+ return (NULL);
+ }
+ item.data = (char *)val.data;
+ break;
+ default:
+ __os_set_errno(EINVAL);
+ return (NULL);
+ }
+ retval.key = item.key;
+ retval.data = item.data;
+ return (&retval);
+}
+
+void
+__db_hdestroy()
+{
+ if (dbp != NULL) {
+ (void)dbp->close(dbp, 0);
+ dbp = NULL;
+ }
+}
diff --git a/bdb/include/btree.h b/bdb/include/btree.h
new file mode 100644
index 00000000000..395f645f03f
--- /dev/null
+++ b/bdb/include/btree.h
@@ -0,0 +1,317 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995, 1996
+ * Keith Bostic. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Mike Olson.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id: btree.h,v 11.37 2001/01/17 17:09:52 bostic Exp $
+ */
+
+/* Forward structure declarations. */
+struct __btree; typedef struct __btree BTREE;
+struct __cursor; typedef struct __cursor BTREE_CURSOR;
+struct __epg; typedef struct __epg EPG;
+struct __recno; typedef struct __recno RECNO;
+
+#define DEFMINKEYPAGE (2)
+
+/*
+ * A recno order of 0 indicates that we don't have an order, not that we've
+ * an order less than 1.
+ */
+#define INVALID_ORDER 0
+
+#define ISINTERNAL(p) (TYPE(p) == P_IBTREE || TYPE(p) == P_IRECNO)
+#define ISLEAF(p) (TYPE(p) == P_LBTREE || \
+ TYPE(p) == P_LRECNO || TYPE(p) == P_LDUP)
+
+/* Flags for __bam_cadjust_log(). */
+#define CAD_UPDATEROOT 0x01 /* Root page count was updated. */
+
+/* Flags for __bam_split_log(). */
+#define SPL_NRECS 0x01 /* Split tree has record count. */
+
+/* Flags for __bam_iitem(). */
+#define BI_DELETED 0x01 /* Key/data pair only placeholder. */
+
+/* Flags for __bam_stkrel(). */
+#define STK_CLRDBC 0x01 /* Clear dbc->page reference. */
+#define STK_NOLOCK 0x02 /* Don't retain locks. */
+
+/* Flags for __ram_ca(). These get logged, so make the values explicit. */
+typedef enum {
+ CA_DELETE = 0, /* Delete the current record. */
+ CA_IAFTER = 1, /* Insert before the current record. */
+ CA_IBEFORE = 2, /* Insert after the current record. */
+ CA_ICURRENT = 3 /* Overwrite the current record. */
+} ca_recno_arg;
+
+/*
+ * Flags for __bam_search() and __bam_rsearch().
+ *
+ * Note, internal page searches must find the largest record less than key in
+ * the tree so that descents work. Leaf page searches must find the smallest
+ * record greater than key so that the returned index is the record's correct
+ * position for insertion.
+ *
+ * The flags parameter to the search routines describes three aspects of the
+ * search: the type of locking required (including if we're locking a pair of
+ * pages), the item to return in the presence of duplicates and whether or not
+ * to return deleted entries. To simplify both the mnemonic representation
+ * and the code that checks for various cases, we construct a set of bitmasks.
+ */
+#define S_READ 0x00001 /* Read locks. */
+#define S_WRITE 0x00002 /* Write locks. */
+
+#define S_APPEND 0x00040 /* Append to the tree. */
+#define S_DELNO 0x00080 /* Don't return deleted items. */
+#define S_DUPFIRST 0x00100 /* Return first duplicate. */
+#define S_DUPLAST 0x00200 /* Return last duplicate. */
+#define S_EXACT 0x00400 /* Exact items only. */
+#define S_PARENT 0x00800 /* Lock page pair. */
+#define S_STACK 0x01000 /* Need a complete stack. */
+#define S_PAST_EOF 0x02000 /* If doing insert search (or keyfirst
+ * or keylast operations), or a split
+ * on behalf of an insert, it's okay to
+ * return an entry one past end-of-page.
+ */
+#define S_STK_ONLY 0x04000 /* Just return info in the stack */
+
+#define S_DELETE (S_WRITE | S_DUPFIRST | S_DELNO | S_EXACT | S_STACK)
+#define S_FIND (S_READ | S_DUPFIRST | S_DELNO)
+#define S_FIND_WR (S_WRITE | S_DUPFIRST | S_DELNO)
+#define S_INSERT (S_WRITE | S_DUPLAST | S_PAST_EOF | S_STACK)
+#define S_KEYFIRST (S_WRITE | S_DUPFIRST | S_PAST_EOF | S_STACK)
+#define S_KEYLAST (S_WRITE | S_DUPLAST | S_PAST_EOF | S_STACK)
+#define S_WRPAIR (S_WRITE | S_DUPLAST | S_PAST_EOF | S_PARENT)
+
+/*
+ * Various routines pass around page references. A page reference is
+ * a pointer to the page, and the indx indicates an item on the page.
+ * Each page reference may include a lock.
+ */
+struct __epg {
+ PAGE *page; /* The page. */
+ db_indx_t indx; /* The index on the page. */
+ db_indx_t entries; /* The number of entries on page */
+ DB_LOCK lock; /* The page's lock. */
+ db_lockmode_t lock_mode; /* The lock mode. */
+};
+
+/*
+ * We maintain a stack of the pages that we're locking in the tree. Grow
+ * the stack as necessary.
+ *
+ * XXX
+ * Temporary fix for #3243 -- clear the page and lock from the stack entry.
+ * The correct fix is to never release a stack that doesn't hold items.
+ */
+#define BT_STK_CLR(c) do { \
+ (c)->csp = (c)->sp; \
+ (c)->csp->page = NULL; \
+ (c)->csp->lock.off = LOCK_INVALID; \
+} while (0)
+
+#define BT_STK_ENTER(dbenv, c, pagep, page_indx, l, mode, ret) do { \
+ if ((ret = \
+ (c)->csp == (c)->esp ? __bam_stkgrow(dbenv, c) : 0) == 0) { \
+ (c)->csp->page = pagep; \
+ (c)->csp->indx = page_indx; \
+ (c)->csp->entries = NUM_ENT(pagep); \
+ (c)->csp->lock = l; \
+ (c)->csp->lock_mode = mode; \
+ } \
+} while (0)
+
+#define BT_STK_PUSH(dbenv, c, pagep, page_indx, lock, mode, ret) do { \
+ BT_STK_ENTER(dbenv, c, pagep, page_indx, lock, mode, ret); \
+ ++(c)->csp; \
+} while (0)
+
+#define BT_STK_NUM(dbenv, c, pagep, page_indx, ret) do { \
+ if ((ret = \
+ (c)->csp == (c)->esp ? __bam_stkgrow(dbenv, c) : 0) == 0) { \
+ (c)->csp->page = NULL; \
+ (c)->csp->indx = page_indx; \
+ (c)->csp->entries = NUM_ENT(pagep); \
+ (c)->csp->lock.off = LOCK_INVALID; \
+ (c)->csp->lock_mode = DB_LOCK_NG; \
+ } \
+} while (0)
+
+#define BT_STK_NUMPUSH(dbenv, c, pagep, page_indx,ret) do { \
+ BT_STK_NUM(dbenv, cp, pagep, page_indx, ret); \
+ ++(c)->csp; \
+} while (0)
+
+#define BT_STK_POP(c) \
+ ((c)->csp == (c)->sp ? NULL : --(c)->csp)
+
+/* Btree/Recno cursor. */
+struct __cursor {
+ /* struct __dbc_internal */
+ __DBC_INTERNAL
+
+ /* btree private part */
+ EPG *sp; /* Stack pointer. */
+ EPG *csp; /* Current stack entry. */
+ EPG *esp; /* End stack pointer. */
+ EPG stack[5];
+
+ db_indx_t ovflsize; /* Maximum key/data on-page size. */
+
+ db_recno_t recno; /* Current record number. */
+ u_int32_t order; /* Relative order among deleted curs. */
+
+ /*
+ * Btree:
+ * We set a flag in the cursor structure if the underlying object has
+ * been deleted. It's not strictly necessary, we could get the same
+ * information by looking at the page itself, but this method doesn't
+ * require us to retrieve the page on cursor delete.
+ *
+ * Recno:
+ * When renumbering recno databases during deletes, cursors referencing
+ * "deleted" records end up positioned between two records, and so must
+ * be specially adjusted on the next operation.
+ */
+#define C_DELETED 0x0001 /* Record was deleted. */
+ /*
+ * There are three tree types that require maintaining record numbers.
+ * Recno AM trees, Btree AM trees for which the DB_RECNUM flag was set,
+ * and Btree off-page duplicate trees.
+ */
+#define C_RECNUM 0x0002 /* Tree requires record counts. */
+ /*
+ * Recno trees have immutable record numbers by default, but optionally
+ * support mutable record numbers. Off-page duplicate Recno trees have
+ * mutable record numbers. All Btrees with record numbers (including
+ * off-page duplicate trees) are mutable by design, no flag is needed.
+ */
+#define C_RENUMBER 0x0004 /* Tree records are mutable. */
+ u_int32_t flags;
+};
+
+/*
+ * Threshhold value, as a function of bt_minkey, of the number of
+ * bytes a key/data pair can use before being placed on an overflow
+ * page. Assume every item requires the maximum alignment for
+ * padding, out of sheer paranoia.
+ */
+#define B_MINKEY_TO_OVFLSIZE(minkey, pgsize) \
+ ((u_int16_t)(((pgsize) - P_OVERHEAD) / ((minkey) * P_INDX) - \
+ (BKEYDATA_PSIZE(0) + ALIGN(1, sizeof(int32_t)))))
+
+/*
+ * The maximum space that a single item can ever take up on one page.
+ * Used by __bam_split to determine whether a split is still necessary.
+ */
+#define B_MAX(a,b) (((a) > (b)) ? (a) : (b))
+#define B_MAXSIZEONPAGE(ovflsize) \
+ (B_MAX(BOVERFLOW_PSIZE, BKEYDATA_PSIZE(ovflsize)))
+
+/*
+ * The in-memory, per-tree btree/recno data structure.
+ */
+struct __btree { /* Btree access method. */
+ /*
+ * !!!
+ * These fields are write-once (when the structure is created) and
+ * so are ignored as far as multi-threading is concerned.
+ */
+ db_pgno_t bt_meta; /* Database meta-data page. */
+ db_pgno_t bt_root; /* Database root page. */
+
+ u_int32_t bt_maxkey; /* Maximum keys per page. */
+ u_int32_t bt_minkey; /* Minimum keys per page. */
+
+ /* Btree comparison function. */
+ int (*bt_compare) __P((DB *, const DBT *, const DBT *));
+ /* Btree prefix function. */
+ size_t (*bt_prefix) __P((DB *, const DBT *, const DBT *));
+
+ /* Recno access method. */
+ int re_pad; /* Fixed-length padding byte. */
+ int re_delim; /* Variable-length delimiting byte. */
+ u_int32_t re_len; /* Length for fixed-length records. */
+ char *re_source; /* Source file name. */
+
+ /*
+ * !!!
+ * The bt_lpgno field is NOT protected by any mutex, and for this
+ * reason must be advisory only, so, while it is read/written by
+ * multiple threads, DB is completely indifferent to the quality
+ * of its information.
+ */
+ db_pgno_t bt_lpgno; /* Last insert location. */
+
+ /*
+ * !!!
+ * The re_modified field is NOT protected by any mutex, and for this
+ * reason cannot be anything more complicated than a zero/non-zero
+ * value. The actual writing of the backing source file cannot be
+ * threaded, so clearing the flag isn't a problem.
+ */
+ int re_modified; /* If the tree was modified. */
+
+ /*
+ * !!!
+ * These fields are ignored as far as multi-threading is concerned.
+ * There are no transaction semantics associated with backing files,
+ * nor is there any thread protection.
+ */
+ FILE *re_fp; /* Source file handle. */
+ int re_eof; /* Backing source file EOF reached. */
+ db_recno_t re_last; /* Last record number read. */
+};
+
+/*
+ * Modes for the __bam_curadj recovery records (btree_curadj).
+ * These appear in log records, so we wire the values and
+ * do not leave it up to the compiler.
+ */
+typedef enum {
+ DB_CA_DI = 1,
+ DB_CA_DUP = 2,
+ DB_CA_RSPLIT = 3,
+ DB_CA_SPLIT = 4
+} db_ca_mode;
+
+#include "btree_auto.h"
+#include "btree_ext.h"
+#include "db_am.h"
diff --git a/bdb/include/btree_auto.h b/bdb/include/btree_auto.h
new file mode 100644
index 00000000000..214f84332cf
--- /dev/null
+++ b/bdb/include/btree_auto.h
@@ -0,0 +1,267 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+
+#ifndef bam_AUTO_H
+#define bam_AUTO_H
+
+#define DB_bam_pg_alloc 51
+typedef struct _bam_pg_alloc_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ DB_LSN meta_lsn;
+ DB_LSN page_lsn;
+ db_pgno_t pgno;
+ u_int32_t ptype;
+ db_pgno_t next;
+} __bam_pg_alloc_args;
+
+int __bam_pg_alloc_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, int32_t, DB_LSN *, DB_LSN *, db_pgno_t, u_int32_t, db_pgno_t));
+int __bam_pg_alloc_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_pg_alloc_read __P((DB_ENV *, void *, __bam_pg_alloc_args **));
+
+#define DB_bam_pg_alloc1 60
+typedef struct _bam_pg_alloc1_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ DB_LSN meta_lsn;
+ DB_LSN alloc_lsn;
+ DB_LSN page_lsn;
+ db_pgno_t pgno;
+ u_int32_t ptype;
+ db_pgno_t next;
+} __bam_pg_alloc1_args;
+
+int __bam_pg_alloc1_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_pg_alloc1_read __P((DB_ENV *, void *, __bam_pg_alloc1_args **));
+
+#define DB_bam_pg_free 52
+typedef struct _bam_pg_free_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DB_LSN meta_lsn;
+ DBT header;
+ db_pgno_t next;
+} __bam_pg_free_args;
+
+int __bam_pg_free_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, int32_t, db_pgno_t, DB_LSN *, const DBT *, db_pgno_t));
+int __bam_pg_free_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_pg_free_read __P((DB_ENV *, void *, __bam_pg_free_args **));
+
+#define DB_bam_pg_free1 61
+typedef struct _bam_pg_free1_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DB_LSN meta_lsn;
+ DB_LSN alloc_lsn;
+ DBT header;
+ db_pgno_t next;
+} __bam_pg_free1_args;
+
+int __bam_pg_free1_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_pg_free1_read __P((DB_ENV *, void *, __bam_pg_free1_args **));
+
+#define DB_bam_split1 53
+typedef struct _bam_split1_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t left;
+ DB_LSN llsn;
+ db_pgno_t right;
+ DB_LSN rlsn;
+ u_int32_t indx;
+ db_pgno_t npgno;
+ DB_LSN nlsn;
+ DBT pg;
+} __bam_split1_args;
+
+int __bam_split1_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_split1_read __P((DB_ENV *, void *, __bam_split1_args **));
+
+#define DB_bam_split 62
+typedef struct _bam_split_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t left;
+ DB_LSN llsn;
+ db_pgno_t right;
+ DB_LSN rlsn;
+ u_int32_t indx;
+ db_pgno_t npgno;
+ DB_LSN nlsn;
+ db_pgno_t root_pgno;
+ DBT pg;
+ u_int32_t opflags;
+} __bam_split_args;
+
+int __bam_split_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, int32_t, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *, u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, const DBT *, u_int32_t));
+int __bam_split_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_split_read __P((DB_ENV *, void *, __bam_split_args **));
+
+#define DB_bam_rsplit1 54
+typedef struct _bam_rsplit1_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DBT pgdbt;
+ db_pgno_t nrec;
+ DBT rootent;
+ DB_LSN rootlsn;
+} __bam_rsplit1_args;
+
+int __bam_rsplit1_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_rsplit1_read __P((DB_ENV *, void *, __bam_rsplit1_args **));
+
+#define DB_bam_rsplit 63
+typedef struct _bam_rsplit_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DBT pgdbt;
+ db_pgno_t root_pgno;
+ db_pgno_t nrec;
+ DBT rootent;
+ DB_LSN rootlsn;
+} __bam_rsplit_args;
+
+int __bam_rsplit_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, int32_t, db_pgno_t, const DBT *, db_pgno_t, db_pgno_t, const DBT *, DB_LSN *));
+int __bam_rsplit_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_rsplit_read __P((DB_ENV *, void *, __bam_rsplit_args **));
+
+#define DB_bam_adj 55
+typedef struct _bam_adj_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DB_LSN lsn;
+ u_int32_t indx;
+ u_int32_t indx_copy;
+ u_int32_t is_insert;
+} __bam_adj_args;
+
+int __bam_adj_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, int32_t, db_pgno_t, DB_LSN *, u_int32_t, u_int32_t, u_int32_t));
+int __bam_adj_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_adj_read __P((DB_ENV *, void *, __bam_adj_args **));
+
+#define DB_bam_cadjust 56
+typedef struct _bam_cadjust_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DB_LSN lsn;
+ u_int32_t indx;
+ int32_t adjust;
+ u_int32_t opflags;
+} __bam_cadjust_args;
+
+int __bam_cadjust_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, int32_t, db_pgno_t, DB_LSN *, u_int32_t, int32_t, u_int32_t));
+int __bam_cadjust_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_cadjust_read __P((DB_ENV *, void *, __bam_cadjust_args **));
+
+#define DB_bam_cdel 57
+typedef struct _bam_cdel_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DB_LSN lsn;
+ u_int32_t indx;
+} __bam_cdel_args;
+
+int __bam_cdel_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, int32_t, db_pgno_t, DB_LSN *, u_int32_t));
+int __bam_cdel_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_cdel_read __P((DB_ENV *, void *, __bam_cdel_args **));
+
+#define DB_bam_repl 58
+typedef struct _bam_repl_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DB_LSN lsn;
+ u_int32_t indx;
+ u_int32_t isdeleted;
+ DBT orig;
+ DBT repl;
+ u_int32_t prefix;
+ u_int32_t suffix;
+} __bam_repl_args;
+
+int __bam_repl_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, int32_t, db_pgno_t, DB_LSN *, u_int32_t, u_int32_t, const DBT *, const DBT *, u_int32_t, u_int32_t));
+int __bam_repl_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_repl_read __P((DB_ENV *, void *, __bam_repl_args **));
+
+#define DB_bam_root 59
+typedef struct _bam_root_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t meta_pgno;
+ db_pgno_t root_pgno;
+ DB_LSN meta_lsn;
+} __bam_root_args;
+
+int __bam_root_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, int32_t, db_pgno_t, db_pgno_t, DB_LSN *));
+int __bam_root_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_root_read __P((DB_ENV *, void *, __bam_root_args **));
+
+#define DB_bam_curadj 64
+typedef struct _bam_curadj_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_ca_mode mode;
+ db_pgno_t from_pgno;
+ db_pgno_t to_pgno;
+ db_pgno_t left_pgno;
+ u_int32_t first_indx;
+ u_int32_t from_indx;
+ u_int32_t to_indx;
+} __bam_curadj_args;
+
+int __bam_curadj_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, int32_t, db_ca_mode, db_pgno_t, db_pgno_t, db_pgno_t, u_int32_t, u_int32_t, u_int32_t));
+int __bam_curadj_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_curadj_read __P((DB_ENV *, void *, __bam_curadj_args **));
+
+#define DB_bam_rcuradj 65
+typedef struct _bam_rcuradj_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ ca_recno_arg mode;
+ db_pgno_t root;
+ db_recno_t recno;
+ u_int32_t order;
+} __bam_rcuradj_args;
+
+int __bam_rcuradj_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, int32_t, ca_recno_arg, db_pgno_t, db_recno_t, u_int32_t));
+int __bam_rcuradj_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_rcuradj_read __P((DB_ENV *, void *, __bam_rcuradj_args **));
+int __bam_init_print __P((DB_ENV *));
+int __bam_init_recover __P((DB_ENV *));
+#endif
diff --git a/bdb/include/btree_ext.h b/bdb/include/btree_ext.h
new file mode 100644
index 00000000000..8a9866e0b5a
--- /dev/null
+++ b/bdb/include/btree_ext.h
@@ -0,0 +1,122 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _btree_ext_h_
+#define _btree_ext_h_
+#if defined(__cplusplus)
+extern "C" {
+#endif
+int __bam_cmp __P((DB *, const DBT *, PAGE *,
+ u_int32_t, int (*)(DB *, const DBT *, const DBT *), int *));
+int __bam_defcmp __P((DB *, const DBT *, const DBT *));
+size_t __bam_defpfx __P((DB *, const DBT *, const DBT *));
+int __bam_pgin __P((DB_ENV *, db_pgno_t, void *, DBT *));
+int __bam_pgout __P((DB_ENV *, db_pgno_t, void *, DBT *));
+int __bam_mswap __P((PAGE *));
+void __bam_cprint __P((DBC *));
+int __bam_ca_delete __P((DB *, db_pgno_t, u_int32_t, int));
+int __ram_ca_delete __P((DB *, db_pgno_t));
+int __bam_ca_di __P((DBC *, db_pgno_t, u_int32_t, int));
+int __bam_ca_dup __P((DBC *,
+ u_int32_t, db_pgno_t, u_int32_t, db_pgno_t, u_int32_t));
+int __bam_ca_undodup __P((DB *,
+ u_int32_t, db_pgno_t, u_int32_t, u_int32_t));
+int __bam_ca_rsplit __P((DBC *, db_pgno_t, db_pgno_t));
+int __bam_ca_split __P((DBC *,
+ db_pgno_t, db_pgno_t, db_pgno_t, u_int32_t, int));
+void __bam_ca_undosplit __P((DB *,
+ db_pgno_t, db_pgno_t, db_pgno_t, u_int32_t));
+int __bam_c_init __P((DBC *, DBTYPE));
+int __bam_c_refresh __P((DBC *));
+int __bam_c_count __P((DBC *, db_recno_t *));
+int __bam_c_dup __P((DBC *, DBC *));
+int __bam_c_rget __P((DBC *, DBT *, u_int32_t));
+int __bam_delete __P((DB *, DB_TXN *, DBT *, u_int32_t));
+int __bam_ditem __P((DBC *, PAGE *, u_int32_t));
+int __bam_adjindx __P((DBC *, PAGE *, u_int32_t, u_int32_t, int));
+int __bam_dpages __P((DBC *, EPG *));
+int __bam_db_create __P((DB *));
+int __bam_db_close __P((DB *));
+int __bam_set_flags __P((DB *, u_int32_t *flagsp));
+int __ram_set_flags __P((DB *, u_int32_t *flagsp));
+int __bam_open __P((DB *, const char *, db_pgno_t, u_int32_t));
+int __bam_metachk __P((DB *, const char *, BTMETA *));
+int __bam_read_root __P((DB *, const char *, db_pgno_t, u_int32_t));
+int __bam_iitem __P((DBC *, DBT *, DBT *, u_int32_t, u_int32_t));
+u_int32_t __bam_partsize __P((u_int32_t, DBT *, PAGE *, u_int32_t));
+int __bam_build __P((DBC *, u_int32_t,
+ DBT *, PAGE *, u_int32_t, u_int32_t));
+int __bam_ritem __P((DBC *, PAGE *, u_int32_t, DBT *));
+int __bam_pg_alloc_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_pg_free_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_split_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_rsplit_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_adj_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_cadjust_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_cdel_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_repl_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_root_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_curadj_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_rcuradj_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_reclaim __P((DB *, DB_TXN *));
+int __ram_open __P((DB *, const char *, db_pgno_t, u_int32_t));
+int __ram_c_del __P((DBC *));
+int __ram_c_get
+ __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+int __ram_c_put __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+int __ram_ca __P((DBC *, ca_recno_arg));
+int __ram_getno __P((DBC *, const DBT *, db_recno_t *, int));
+int __ram_writeback __P((DB *));
+int __bam_rsearch __P((DBC *, db_recno_t *, u_int32_t, int, int *));
+int __bam_adjust __P((DBC *, int32_t));
+int __bam_nrecs __P((DBC *, db_recno_t *));
+db_recno_t __bam_total __P((PAGE *));
+int __bam_search __P((DBC *,
+ const DBT *, u_int32_t, int, db_recno_t *, int *));
+int __bam_stkrel __P((DBC *, u_int32_t));
+int __bam_stkgrow __P((DB_ENV *, BTREE_CURSOR *));
+int __bam_split __P((DBC *, void *));
+int __bam_copy __P((DB *, PAGE *, PAGE *, u_int32_t, u_int32_t));
+int __bam_stat __P((DB *, void *, void *(*)(size_t), u_int32_t));
+int __bam_traverse __P((DBC *, db_lockmode_t,
+ db_pgno_t, int (*)(DB *, PAGE *, void *, int *), void *));
+int __bam_stat_callback __P((DB *, PAGE *, void *, int *));
+int __bam_key_range __P((DB *,
+ DB_TXN *, DBT *, DB_KEY_RANGE *, u_int32_t));
+int __bam_30_btreemeta __P((DB *, char *, u_int8_t *));
+int __bam_31_btreemeta
+ __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *));
+int __bam_31_lbtree
+ __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *));
+int __bam_vrfy_meta __P((DB *, VRFY_DBINFO *, BTMETA *,
+ db_pgno_t, u_int32_t));
+int __ram_vrfy_leaf __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t,
+ u_int32_t));
+int __bam_vrfy __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t,
+ u_int32_t));
+int __bam_vrfy_itemorder __P((DB *, VRFY_DBINFO *, PAGE *,
+ db_pgno_t, u_int32_t, int, int, u_int32_t));
+int __bam_vrfy_structure __P((DB *, VRFY_DBINFO *, db_pgno_t,
+ u_int32_t));
+int __bam_vrfy_subtree __P((DB *, VRFY_DBINFO *, db_pgno_t, void *,
+ void *, u_int32_t, u_int32_t *, u_int32_t *, u_int32_t *));
+int __bam_salvage __P((DB *, VRFY_DBINFO *, db_pgno_t, u_int32_t,
+ PAGE *, void *, int (*)(void *, const void *), DBT *,
+ u_int32_t));
+int __bam_salvage_walkdupint __P((DB *, VRFY_DBINFO *, PAGE *,
+ DBT *, void *, int (*)(void *, const void *), u_int32_t));
+int __bam_meta2pgset __P((DB *, VRFY_DBINFO *, BTMETA *,
+ u_int32_t, DB *));
+#if defined(__cplusplus)
+}
+#endif
+#endif /* _btree_ext_h_ */
diff --git a/bdb/include/clib_ext.h b/bdb/include/clib_ext.h
new file mode 100644
index 00000000000..efd0796afe3
--- /dev/null
+++ b/bdb/include/clib_ext.h
@@ -0,0 +1,38 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _clib_ext_h_
+#define _clib_ext_h_
+#if defined(__cplusplus)
+extern "C" {
+#endif
+#ifndef HAVE_GETCWD
+char *getcwd __P((char *, size_t));
+#endif
+#ifndef HAVE_GETOPT
+int getopt __P((int, char * const *, const char *));
+#endif
+#ifndef HAVE_MEMCMP
+int memcmp __P((const void *, const void *, size_t));
+#endif
+#ifndef HAVE_MEMCPY
+void *memcpy __P((void *, const void *, size_t));
+#endif
+#ifndef HAVE_MEMMOVE
+void *memmove __P((void *, const void *, size_t));
+#endif
+#ifndef HAVE_RAISE
+int raise __P((int));
+#endif
+#ifndef HAVE_SNPRINTF
+int snprintf __P((char *, size_t, const char *, ...));
+#endif
+int strcasecmp __P((const char *, const char *));
+#ifndef HAVE_STRERROR
+char *strerror __P((int));
+#endif
+#ifndef HAVE_VSNPRINTF
+int vsnprintf();
+#endif
+#if defined(__cplusplus)
+}
+#endif
+#endif /* _clib_ext_h_ */
diff --git a/bdb/include/common_ext.h b/bdb/include/common_ext.h
new file mode 100644
index 00000000000..a36d62cac4a
--- /dev/null
+++ b/bdb/include/common_ext.h
@@ -0,0 +1,44 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _common_ext_h_
+#define _common_ext_h_
+#if defined(__cplusplus)
+extern "C" {
+#endif
+int __db_byteorder __P((DB_ENV *, int));
+int __db_fchk __P((DB_ENV *, const char *, u_int32_t, u_int32_t));
+int __db_fcchk
+ __P((DB_ENV *, const char *, u_int32_t, u_int32_t, u_int32_t));
+int __db_ferr __P((const DB_ENV *, const char *, int));
+int __db_pgerr __P((DB *, db_pgno_t));
+int __db_pgfmt __P((DB *, db_pgno_t));
+int __db_eopnotsup __P((const DB_ENV *));
+#ifdef DIAGNOSTIC
+void __db_assert __P((const char *, const char *, int));
+#endif
+int __db_panic_msg __P((DB_ENV *));
+int __db_panic __P((DB_ENV *, int));
+void __db_err __P((const DB_ENV *, const char *, ...));
+void __db_real_err
+ __P((const DB_ENV *, int, int, int, const char *, va_list));
+void __db_logmsg __P((const DB_ENV *,
+ DB_TXN *, const char *, u_int32_t, const char *, ...));
+void __db_real_log __P((const DB_ENV *,
+ DB_TXN *, const char *, u_int32_t, const char *, va_list ap));
+int __db_unknown_flag __P((DB_ENV *, char *, u_int32_t));
+int __db_unknown_type __P((DB_ENV *, char *, u_int32_t));
+#ifdef DIAGNOSTIC
+int __db_missing_txn_err __P((DB_ENV *));
+#endif
+int __db_getlong
+ __P((DB *, const char *, char *, long, long, long *));
+int __db_getulong
+ __P((DB *, const char *, char *, u_long, u_long, u_long *));
+u_int32_t __db_log2 __P((u_int32_t));
+int __db_util_logset __P((const char *, char *));
+void __db_util_siginit __P((void));
+int __db_util_interrupted __P((void));
+void __db_util_sigresend __P((void));
+#if defined(__cplusplus)
+}
+#endif
+#endif /* _common_ext_h_ */
diff --git a/bdb/include/crdel_auto.h b/bdb/include/crdel_auto.h
new file mode 100644
index 00000000000..409c256811f
--- /dev/null
+++ b/bdb/include/crdel_auto.h
@@ -0,0 +1,88 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+
+#ifndef crdel_AUTO_H
+#define crdel_AUTO_H
+
+#define DB_crdel_fileopen 141
+typedef struct _crdel_fileopen_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ DBT name;
+ u_int32_t mode;
+} __crdel_fileopen_args;
+
+int __crdel_fileopen_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, const DBT *, u_int32_t));
+int __crdel_fileopen_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __crdel_fileopen_read __P((DB_ENV *, void *, __crdel_fileopen_args **));
+
+#define DB_crdel_metasub 142
+typedef struct _crdel_metasub_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DBT page;
+ DB_LSN lsn;
+} __crdel_metasub_args;
+
+int __crdel_metasub_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, int32_t, db_pgno_t, const DBT *, DB_LSN *));
+int __crdel_metasub_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __crdel_metasub_read __P((DB_ENV *, void *, __crdel_metasub_args **));
+
+#define DB_crdel_metapage 143
+typedef struct _crdel_metapage_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ DBT name;
+ db_pgno_t pgno;
+ DBT page;
+} __crdel_metapage_args;
+
+int __crdel_metapage_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, int32_t, const DBT *, db_pgno_t, const DBT *));
+int __crdel_metapage_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __crdel_metapage_read __P((DB_ENV *, void *, __crdel_metapage_args **));
+
+#define DB_crdel_old_delete 144
+typedef struct _crdel_old_delete_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ DBT name;
+} __crdel_old_delete_args;
+
+int __crdel_old_delete_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __crdel_old_delete_read __P((DB_ENV *, void *, __crdel_old_delete_args **));
+
+#define DB_crdel_rename 145
+typedef struct _crdel_rename_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ DBT name;
+ DBT newname;
+} __crdel_rename_args;
+
+int __crdel_rename_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, int32_t, const DBT *, const DBT *));
+int __crdel_rename_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __crdel_rename_read __P((DB_ENV *, void *, __crdel_rename_args **));
+
+#define DB_crdel_delete 146
+typedef struct _crdel_delete_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ DBT name;
+} __crdel_delete_args;
+
+int __crdel_delete_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, int32_t, const DBT *));
+int __crdel_delete_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __crdel_delete_read __P((DB_ENV *, void *, __crdel_delete_args **));
+int __crdel_init_print __P((DB_ENV *));
+int __crdel_init_recover __P((DB_ENV *));
+#endif
diff --git a/bdb/include/cxx_int.h b/bdb/include/cxx_int.h
new file mode 100644
index 00000000000..4a9a40ceba1
--- /dev/null
+++ b/bdb/include/cxx_int.h
@@ -0,0 +1,96 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: cxx_int.h,v 11.13 2000/11/21 22:56:36 dda Exp $
+ */
+
+#ifndef _CXX_INT_H_
+#define _CXX_INT_H_
+
+// private data structures known to the implementation only
+
+//
+// Using FooImp classes will allow the implementation to change in the
+// future without any modification to user code or even to header files
+// that the user includes. FooImp * is just like void * except that it
+// provides a little extra protection, since you cannot randomly assign
+// any old pointer to a FooImp* as you can with void *. Currently, a
+// pointer to such an opaque class is always just a pointer to the
+// appropriate underlying implementation struct. These are converted
+// back and forth using the various overloaded wrap()/unwrap() methods.
+// This is essentially a use of the "Bridge" Design Pattern.
+//
+// WRAPPED_CLASS implements the appropriate wrap() and unwrap() methods
+// for a wrapper class that has an underlying pointer representation.
+//
+#define WRAPPED_CLASS(_WRAPPER_CLASS, _IMP_CLASS, _WRAPPED_TYPE) \
+ \
+ class _IMP_CLASS {}; \
+ \
+ inline _WRAPPED_TYPE unwrap(_WRAPPER_CLASS *val) \
+ { \
+ if (!val) return (0); \
+ return ((_WRAPPED_TYPE)((void *)(val->imp()))); \
+ } \
+ \
+ inline const _WRAPPED_TYPE unwrapConst(const _WRAPPER_CLASS *val) \
+ { \
+ if (!val) return (0); \
+ return ((const _WRAPPED_TYPE)((void *)(val->constimp()))); \
+ } \
+ \
+ inline _IMP_CLASS *wrap(_WRAPPED_TYPE val) \
+ { \
+ return ((_IMP_CLASS*)((void *)val)); \
+ }
+
+WRAPPED_CLASS(DbMpoolFile, DbMpoolFileImp, DB_MPOOLFILE*)
+WRAPPED_CLASS(Db, DbImp, DB*)
+WRAPPED_CLASS(DbEnv, DbEnvImp, DB_ENV*)
+WRAPPED_CLASS(DbTxn, DbTxnImp, DB_TXN*)
+
+// A tristate integer value used by the DB_ERROR macro below.
+// We chose not to make this an enumerated type so it can
+// be kept private, even though methods that return the
+// tristate int can be declared in db_cxx.h .
+//
+#define ON_ERROR_THROW 1
+#define ON_ERROR_RETURN 0
+#define ON_ERROR_UNKNOWN (-1)
+
+// Macros that handle detected errors, in case we want to
+// change the default behavior. The 'policy' is one of
+// the tristate values given above. If UNKNOWN is specified,
+// the behavior is taken from the last initialized DbEnv.
+//
+#define DB_ERROR(caller, ecode, policy) \
+ DbEnv::runtime_error(caller, ecode, policy)
+
+// These defines are for tedious field set/get access methods.
+//
+
+#define DB_RO_ACCESS(_class, _type, _cxx_name, _field) \
+ \
+_type _class::get_##_cxx_name() const \
+{ \
+ return (_field); \
+}
+
+#define DB_WO_ACCESS(_class, _type, _cxx_name, _field) \
+ \
+void _class::set_##_cxx_name(_type value) \
+{ \
+ _field = value; \
+} \
+
+#define DB_RW_ACCESS(_class, _type, _cxx_name, _field) \
+ DB_RO_ACCESS(_class, _type, _cxx_name, _field) \
+ DB_WO_ACCESS(_class, _type, _cxx_name, _field)
+
+/* values for Db::flags_ */
+#define DB_CXX_PRIVATE_ENV 0x00000001
+
+#endif /* !_CXX_INT_H_ */
diff --git a/bdb/include/db.src b/bdb/include/db.src
new file mode 100644
index 00000000000..6dc0071efae
--- /dev/null
+++ b/bdb/include/db.src
@@ -0,0 +1,1375 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: db.src,v 11.121 2001/01/10 15:43:08 sue Exp $
+ */
+
+#ifndef _DB_H_
+#define _DB_H_
+
+#ifndef __NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#endif
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*
+ * XXX
+ * Handle function prototypes and the keyword "const". This steps on name
+ * space that DB doesn't control, but all of the other solutions are worse.
+ *
+ * XXX
+ * While Microsoft's compiler is ANSI C compliant, it doesn't have _STDC_
+ * defined by default, you specify a command line flag or #pragma to turn
+ * it on. Don't do that, however, because some of Microsoft's own header
+ * files won't compile.
+ */
+#undef __P
+#if defined(__STDC__) || defined(__cplusplus) || defined(_MSC_VER)
+#define __P(protos) protos /* ANSI C prototypes */
+#else
+#define const
+#define __P(protos) () /* K&R C preprocessor */
+#endif
+
+/*
+ * !!!
+ * DB needs basic information about specifically sized types. If they're
+ * not provided by the system, typedef them here.
+ *
+ * We protect them against multiple inclusion using __BIT_TYPES_DEFINED__,
+ * as does BIND and Kerberos, since we don't know for sure what #include
+ * files the user is using.
+ *
+ * !!!
+ * We also provide the standard u_int, u_long etc., if they're not provided
+ * by the system.
+ */
+#ifndef __BIT_TYPES_DEFINED__
+#define __BIT_TYPES_DEFINED__
+@u_int8_decl@
+@int16_decl@
+@u_int16_decl@
+@int32_decl@
+@u_int32_decl@
+#endif
+
+@u_char_decl@
+@u_short_decl@
+@u_int_decl@
+@u_long_decl@
+@ssize_t_decl@
+
+#define DB_VERSION_MAJOR @DB_VERSION_MAJOR@
+#define DB_VERSION_MINOR @DB_VERSION_MINOR@
+#define DB_VERSION_PATCH @DB_VERSION_PATCH@
+#define DB_VERSION_STRING @DB_VERSION_STRING@
+
+typedef u_int32_t db_pgno_t; /* Page number type. */
+typedef u_int16_t db_indx_t; /* Page offset type. */
+#define DB_MAX_PAGES 0xffffffff /* >= # of pages in a file */
+
+typedef u_int32_t db_recno_t; /* Record number type. */
+#define DB_MAX_RECORDS 0xffffffff /* >= # of records in a tree */
+
+/* Forward structure declarations, so applications get type checking. */
+struct __db; typedef struct __db DB;
+#ifdef DB_DBM_HSEARCH
+ typedef struct __db DBM;
+#endif
+struct __db_bt_stat; typedef struct __db_bt_stat DB_BTREE_STAT;
+struct __db_dbt; typedef struct __db_dbt DBT;
+struct __db_env; typedef struct __db_env DB_ENV;
+struct __db_h_stat; typedef struct __db_h_stat DB_HASH_STAT;
+struct __db_ilock; typedef struct __db_ilock DB_LOCK_ILOCK;
+struct __db_lock_stat; typedef struct __db_lock_stat DB_LOCK_STAT;
+struct __db_lock_u; typedef struct __db_lock_u DB_LOCK;
+struct __db_lockreq; typedef struct __db_lockreq DB_LOCKREQ;
+struct __db_log_stat; typedef struct __db_log_stat DB_LOG_STAT;
+struct __db_lsn; typedef struct __db_lsn DB_LSN;
+struct __db_mpool_finfo;typedef struct __db_mpool_finfo DB_MPOOL_FINFO;
+struct __db_mpool_fstat;typedef struct __db_mpool_fstat DB_MPOOL_FSTAT;
+struct __db_mpool_stat; typedef struct __db_mpool_stat DB_MPOOL_STAT;
+struct __db_mpoolfile; typedef struct __db_mpoolfile DB_MPOOLFILE;
+struct __db_qam_stat; typedef struct __db_qam_stat DB_QUEUE_STAT;
+struct __db_txn; typedef struct __db_txn DB_TXN;
+struct __db_txn_active; typedef struct __db_txn_active DB_TXN_ACTIVE;
+struct __db_txn_stat; typedef struct __db_txn_stat DB_TXN_STAT;
+struct __dbc; typedef struct __dbc DBC;
+struct __dbc_internal; typedef struct __dbc_internal DBC_INTERNAL;
+struct __fh_t; typedef struct __fh_t DB_FH;
+struct __key_range; typedef struct __key_range DB_KEY_RANGE;
+
+/* Key/data structure -- a Data-Base Thang. */
+struct __db_dbt {
+ /*
+ * data/size must be fields 1 and 2 for DB 1.85 compatibility.
+ */
+ void *data; /* Key/data */
+ u_int32_t size; /* key/data length */
+
+ u_int32_t ulen; /* RO: length of user buffer. */
+ u_int32_t dlen; /* RO: get/put record length. */
+ u_int32_t doff; /* RO: get/put record offset. */
+
+#define DB_DBT_ISSET 0x001 /* Lower level calls set value. */
+#define DB_DBT_MALLOC 0x002 /* Return in malloc'd memory. */
+#define DB_DBT_PARTIAL 0x004 /* Partial put/get. */
+#define DB_DBT_REALLOC 0x008 /* Return in realloc'd memory. */
+#define DB_DBT_USERMEM 0x010 /* Return in user's memory. */
+#define DB_DBT_DUPOK 0x020 /* Insert if duplicate. */
+ u_int32_t flags;
+};
+
+/*
+ * Common flags --
+ * Interfaces which use any of these common flags should never have
+ * interface specific flags in this range.
+ */
+#define DB_CREATE 0x000001 /* Create file as necessary. */
+#define DB_CXX_NO_EXCEPTIONS 0x000002 /* C++: return error values. */
+#define DB_FORCE 0x000004 /* Force (anything). */
+#define DB_NOMMAP 0x000008 /* Don't mmap underlying file. */
+#define DB_RDONLY 0x000010 /* Read-only (O_RDONLY). */
+#define DB_RECOVER 0x000020 /* Run normal recovery. */
+#define DB_THREAD 0x000040 /* Applications are threaded. */
+#define DB_TXN_NOSYNC 0x000080 /* Do not sync log on commit. */
+#define DB_USE_ENVIRON 0x000100 /* Use the environment. */
+#define DB_USE_ENVIRON_ROOT 0x000200 /* Use the environment if root. */
+
+/*
+ * Flags private to db_env_create.
+ */
+#define DB_CLIENT 0x000400 /* Open for a client environment. */
+
+/*
+ * Flags private to db_create.
+ */
+#define DB_XA_CREATE 0x000400 /* Open in an XA environment. */
+
+/*
+ * Flags private to DBENV->open.
+ */
+#define DB_INIT_CDB 0x000400 /* Concurrent Access Methods. */
+#define DB_INIT_LOCK 0x000800 /* Initialize locking. */
+#define DB_INIT_LOG 0x001000 /* Initialize logging. */
+#define DB_INIT_MPOOL 0x002000 /* Initialize mpool. */
+#define DB_INIT_TXN 0x004000 /* Initialize transactions. */
+#define DB_JOINENV 0x008000 /* Initialize all subsystems present. */
+#define DB_LOCKDOWN 0x010000 /* Lock memory into physical core. */
+#define DB_PRIVATE 0x020000 /* DB_ENV is process local. */
+#define DB_RECOVER_FATAL 0x040000 /* Run catastrophic recovery. */
+#define DB_SYSTEM_MEM 0x080000 /* Use system-backed memory. */
+
+/*
+ * Flags private to DB->open.
+ */
+#define DB_EXCL 0x000400 /* Exclusive open (O_EXCL). */
+#define DB_FCNTL_LOCKING 0x000800 /* UNDOC: fcntl(2) locking. */
+#define DB_ODDFILESIZE 0x001000 /* UNDOC: truncate to N * pgsize. */
+#define DB_RDWRMASTER 0x002000 /* UNDOC: allow subdb master open R/W */
+#define DB_TRUNCATE 0x004000 /* Discard existing DB (O_TRUNC). */
+#define DB_EXTENT 0x008000 /* UNDOC: dealing with an extent. */
+
+/*
+ * Flags private to DBENV->txn_begin.
+ */
+#define DB_TXN_NOWAIT 0x000400 /* Do not wait for locks in this TXN. */
+#define DB_TXN_SYNC 0x000800 /* Always sync log on commit. */
+
+/*
+ * Flags private to DBENV->set_flags.
+ */
+#define DB_CDB_ALLDB 0x000400 /* In CDB, lock across environment. */
+
+/*
+ * Flags private to DB->set_feedback's callback.
+ */
+#define DB_UPGRADE 0x000400 /* Upgrading. */
+#define DB_VERIFY 0x000800 /* Verifying. */
+
+/*
+ * Flags private to DB->set_flags.
+ *
+ * DB->set_flags does not share common flags and so values start at 0x01.
+ */
+#define DB_DUP 0x0001 /* Btree, Hash: duplicate keys. */
+#define DB_DUPSORT 0x0002 /* Btree, Hash: duplicate keys. */
+#define DB_RECNUM 0x0004 /* Btree: record numbers. */
+#define DB_RENUMBER 0x0008 /* Recno: renumber on insert/delete. */
+#define DB_REVSPLITOFF 0x0010 /* Btree: turn off reverse splits. */
+#define DB_SNAPSHOT 0x0020 /* Recno: snapshot the input. */
+
+/*
+ * Flags private to DB->join.
+ *
+ * DB->join does not share common flags and so values start at 0x01.
+ */
+#define DB_JOIN_NOSORT 0x0001 /* Don't try to optimize join. */
+
+/*
+ * Flags private to DB->verify.
+ *
+ * DB->verify does not share common flags and so values start at 0x01.
+ */
+#define DB_AGGRESSIVE 0x0001 /* Salvage anything which might be data.*/
+#define DB_NOORDERCHK 0x0002 /* Skip order check; subdb w/ user func */
+#define DB_ORDERCHKONLY 0x0004 /* Only perform an order check on subdb */
+#define DB_PR_PAGE 0x0008 /* Show page contents (-da). */
+#define DB_PR_HEADERS 0x0010 /* Show only page headers (-dh). */
+#define DB_PR_RECOVERYTEST 0x0020 /* Recovery test (-dr). */
+#define DB_SALVAGE 0x0040 /* Salvage what looks like data. */
+/*
+ * !!!
+ * These must not go over 0x8000, or they will collide with the flags
+ * used by __bam_vrfy_subtree.
+ */
+#define DB_VRFY_FLAGMASK 0xffff /* For masking above flags. */
+
+/*
+ * Deadlock detector modes; used in the DBENV structure to configure the
+ * locking subsystem.
+ */
+#define DB_LOCK_NORUN 0
+#define DB_LOCK_DEFAULT 1 /* Default policy. */
+#define DB_LOCK_OLDEST 2 /* Abort oldest transaction. */
+#define DB_LOCK_RANDOM 3 /* Abort random transaction. */
+#define DB_LOCK_YOUNGEST 4 /* Abort youngest transaction. */
+
+/*******************************************************
+ * Environment.
+ *******************************************************/
+#define DB_REGION_MAGIC 0x120897 /* Environment magic number. */
+
+typedef enum {
+ DB_TXN_ABORT,
+ DB_TXN_BACKWARD_ROLL,
+ DB_TXN_FORWARD_ROLL,
+ DB_TXN_OPENFILES
+} db_recops;
+
+#define DB_UNDO(op) ((op) == DB_TXN_ABORT || (op) == DB_TXN_BACKWARD_ROLL)
+#define DB_REDO(op) ((op) == DB_TXN_FORWARD_ROLL)
+
+struct __db_env {
+ /*******************************************************
+ * Public: owned by the application.
+ *******************************************************/
+ FILE *db_errfile; /* Error message file stream. */
+ const char *db_errpfx; /* Error message prefix. */
+ /* Callbacks. */
+ void (*db_errcall) __P((const char *, char *));
+ void (*db_feedback) __P((DB_ENV *, int, int));
+ void (*db_paniccall) __P((DB_ENV *, int));
+ int (*db_recovery_init) __P((DB_ENV *));
+
+ /*
+ * Currently, the verbose list is a bit field with room for 32
+ * entries. There's no reason that it needs to be limited, if
+ * there are ever more than 32 entries, convert to a bit array.
+ */
+#define DB_VERB_CHKPOINT 0x0001 /* List checkpoints. */
+#define DB_VERB_DEADLOCK 0x0002 /* Deadlock detection information. */
+#define DB_VERB_RECOVERY 0x0004 /* Recovery information. */
+#define DB_VERB_WAITSFOR 0x0008 /* Dump waits-for table. */
+ u_int32_t verbose; /* Verbose output. */
+
+ void *app_private; /* Application-private handle. */
+
+ /* Locking. */
+ u_int8_t *lk_conflicts; /* Two dimensional conflict matrix. */
+ u_int32_t lk_modes; /* Number of lock modes in table. */
+ u_int32_t lk_max; /* Maximum number of locks. */
+ u_int32_t lk_max_lockers;/* Maximum number of lockers. */
+ u_int32_t lk_max_objects;/* Maximum number of locked objects. */
+ u_int32_t lk_detect; /* Deadlock detect on all conflicts. */
+
+ /* Logging. */
+ u_int32_t lg_bsize; /* Buffer size. */
+ u_int32_t lg_max; /* Maximum file size. */
+
+ /* Memory pool. */
+ u_int32_t mp_gbytes; /* Cachesize: GB. */
+ u_int32_t mp_bytes; /* Cachesize: Bytes. */
+ size_t mp_size; /* DEPRECATED: Cachesize: bytes. */
+ int mp_ncache; /* Number of cache regions. */
+ size_t mp_mmapsize; /* Maximum file size for mmap. */
+
+ /* Transactions. */
+ u_int32_t tx_max; /* Maximum number of transactions. */
+ time_t tx_timestamp; /* Recover to specific timestamp. */
+ int (*tx_recover) /* Dispatch function for recovery. */
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops));
+
+ /*******************************************************
+ * Private: owned by DB.
+ *******************************************************/
+ int db_panic; /* Panic causing errno. */
+
+ /* User files, paths. */
+ char *db_home; /* Database home. */
+ char *db_log_dir; /* Database log file directory. */
+ char *db_tmp_dir; /* Database tmp file directory. */
+
+ char **db_data_dir; /* Database data file directories. */
+ int data_cnt; /* Database data file slots. */
+ int data_next; /* Next Database data file slot. */
+
+ int db_mode; /* Default open permissions. */
+
+ void *reginfo; /* REGINFO structure reference. */
+ DB_FH *lockfhp; /* fcntl(2) locking file handle. */
+ long shm_key; /* shmget(2) key. */
+
+ void *lg_handle; /* Log handle. */
+
+ void *lk_handle; /* Lock handle. */
+
+ void *mp_handle; /* Mpool handle. */
+
+ void *tx_handle; /* Txn handle. */
+
+ int (**dtab) /* Dispatch table */
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t dtab_size; /* Slots in the dispatch table. */
+
+ void *cl_handle; /* RPC: remote client handle. */
+ long cl_id; /* RPC: Remote client env id. */
+
+ int dblocal_ref; /* DB_ENV_DBLOCAL: reference count. */
+ u_int32_t db_mutexlocks; /* db_set_mutexlocks */
+
+ /*
+ * List of open DB handles for this DB_ENV, used for cursor
+ * adjustment. Must be protected for multi-threaded support.
+ *
+ * !!!
+ * As this structure is allocated in per-process memory, the
+ * mutex may need to be stored elsewhere on architectures unable
+ * to support mutexes in heap memory, e.g. HP/UX 9.
+ */
+ void *dblist_mutexp; /* Mutex. */
+ /*
+ * !!!
+ * Explicit representation of structure in queue.h.
+ * LIST_HEAD(dblist, __db);
+ */
+ struct {
+ struct __db *lh_first;
+ } dblist;
+
+ /*
+ * XA support.
+ *
+ * !!!
+ * Explicit representations of structures in queue.h.
+ *
+ * TAILQ_ENTRY(__db_env);
+ */
+ struct {
+ struct __db_env *tqe_next;
+ struct __db_env **tqe_prev;
+ } links;
+ int xa_rmid; /* XA Resource Manager ID. */
+ DB_TXN *xa_txn; /* XA Current transaction. */
+
+ void *cj_internal; /* C++/Java private. */
+
+ /* Methods. */
+ int (*close) __P((DB_ENV *, u_int32_t));
+ void (*err) __P((const DB_ENV *, int, const char *, ...));
+ void (*errx) __P((const DB_ENV *, const char *, ...));
+ int (*open) __P((DB_ENV *, const char *, u_int32_t, int));
+ int (*remove) __P((DB_ENV *, const char *, u_int32_t));
+ int (*set_data_dir) __P((DB_ENV *, const char *));
+ void (*set_errcall) __P((DB_ENV *, void (*)(const char *, char *)));
+ void (*set_errfile) __P((DB_ENV *, FILE *));
+ void (*set_errpfx) __P((DB_ENV *, const char *));
+ int (*set_feedback) __P((DB_ENV *, void (*)(DB_ENV *, int, int)));
+ int (*set_flags) __P((DB_ENV *, u_int32_t, int));
+ int (*set_mutexlocks) __P((DB_ENV *, int));
+ int (*set_paniccall) __P((DB_ENV *, void (*)(DB_ENV *, int)));
+ int (*set_recovery_init) __P((DB_ENV *, int (*)(DB_ENV *)));
+ int (*set_server) __P((DB_ENV *, char *, long, long, u_int32_t));
+ int (*set_shm_key) __P((DB_ENV *, long));
+ int (*set_tmp_dir) __P((DB_ENV *, const char *));
+ int (*set_verbose) __P((DB_ENV *, u_int32_t, int));
+
+ int (*set_lg_bsize) __P((DB_ENV *, u_int32_t));
+ int (*set_lg_dir) __P((DB_ENV *, const char *));
+ int (*set_lg_max) __P((DB_ENV *, u_int32_t));
+
+ int (*set_lk_conflicts) __P((DB_ENV *, u_int8_t *, int));
+ int (*set_lk_detect) __P((DB_ENV *, u_int32_t));
+ int (*set_lk_max) __P((DB_ENV *, u_int32_t));
+ int (*set_lk_max_locks) __P((DB_ENV *, u_int32_t));
+ int (*set_lk_max_lockers) __P((DB_ENV *, u_int32_t));
+ int (*set_lk_max_objects) __P((DB_ENV *, u_int32_t));
+
+ int (*set_mp_mmapsize) __P((DB_ENV *, size_t));
+ int (*set_cachesize) __P((DB_ENV *, u_int32_t, u_int32_t, int));
+
+ int (*set_tx_max) __P((DB_ENV *, u_int32_t));
+ int (*set_tx_recover) __P((DB_ENV *,
+ int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops)));
+ int (*set_tx_timestamp) __P((DB_ENV *, time_t *));
+
+#ifdef CONFIG_TEST
+#define DB_TEST_PREOPEN 1 /* before __os_open */
+#define DB_TEST_POSTOPEN 2 /* after __os_open */
+#define DB_TEST_POSTLOGMETA 3 /* after logging meta in btree */
+#define DB_TEST_POSTLOG 4 /* after logging all pages */
+#define DB_TEST_POSTSYNC 5 /* after syncing the log */
+#define DB_TEST_PRERENAME 6 /* before __os_rename */
+#define DB_TEST_POSTRENAME 7 /* after __os_rename */
+ int test_abort; /* Abort value for testing. */
+ int test_copy; /* Copy value for testing. */
+#endif
+
+#define DB_ENV_CDB 0x00001 /* DB_INIT_CDB. */
+#define DB_ENV_CDB_ALLDB 0x00002 /* CDB environment wide locking. */
+#define DB_ENV_CREATE 0x00004 /* DB_CREATE set. */
+#define DB_ENV_DBLOCAL 0x00008 /* DB_ENV allocated for private DB. */
+#define DB_ENV_LOCKDOWN 0x00010 /* DB_LOCKDOWN set. */
+#define DB_ENV_NOMMAP 0x00020 /* DB_NOMMAP set. */
+#define DB_ENV_OPEN_CALLED 0x00040 /* DBENV->open called (paths valid). */
+#define DB_ENV_PRIVATE 0x00080 /* DB_PRIVATE set. */
+#define DB_ENV_RPCCLIENT 0x00100 /* DB_CLIENT set. */
+#define DB_ENV_STANDALONE 0x00200 /* Test: freestanding environment. */
+#define DB_ENV_SYSTEM_MEM 0x00400 /* DB_SYSTEM_MEM set. */
+#define DB_ENV_THREAD 0x00800 /* DB_THREAD set. */
+#define DB_ENV_TXN_NOSYNC 0x01000 /* DB_TXN_NOSYNC set. */
+#define DB_ENV_USER_ALLOC 0x02000 /* User allocated the structure. */
+ u_int32_t flags; /* Flags. */
+};
+
+/*******************************************************
+ * Access methods.
+ *******************************************************/
+/*
+ * !!!
+ * Changes here must be reflected in java/src/com/sleepycat/db/Db.java.
+ */
+typedef enum {
+ DB_BTREE=1,
+ DB_HASH,
+ DB_RECNO,
+ DB_QUEUE,
+ DB_UNKNOWN /* Figure it out on open. */
+} DBTYPE;
+
+#define DB_BTREEVERSION 8 /* Current btree version. */
+#define DB_BTREEOLDVER 6 /* Oldest btree version supported. */
+#define DB_BTREEMAGIC 0x053162
+
+#define DB_HASHVERSION 7 /* Current hash version. */
+#define DB_HASHOLDVER 4 /* Oldest hash version supported. */
+#define DB_HASHMAGIC 0x061561
+
+#define DB_QAMVERSION 3 /* Current queue version. */
+#define DB_QAMOLDVER 1 /* Oldest queue version supported. */
+#define DB_QAMMAGIC 0x042253
+
+#define DB_LOGVERSION 3 /* Current log version. */
+#define DB_LOGOLDVER 3 /* Oldest log version supported. */
+#define DB_LOGMAGIC 0x040988
+
+/*
+ * DB access method and cursor operation values. Each value is an operation
+ * code to which additional bit flags are added.
+ */
+#define DB_AFTER 1 /* c_put() */
+#define DB_APPEND 2 /* put() */
+#define DB_BEFORE 3 /* c_put() */
+#define DB_CACHED_COUNTS 4 /* stat() */
+#define DB_CHECKPOINT 5 /* log_put(), log_get() */
+#define DB_CONSUME 6 /* get() */
+#define DB_CONSUME_WAIT 7 /* get() */
+#define DB_CURLSN 8 /* log_put() */
+#define DB_CURRENT 9 /* c_get(), c_put(), log_get() */
+#define DB_FIRST 10 /* c_get(), log_get() */
+#define DB_FLUSH 11 /* log_put() */
+#define DB_GET_BOTH 12 /* get(), c_get() */
+#define DB_GET_BOTHC 13 /* c_get() (internal) */
+#define DB_GET_RECNO 14 /* c_get() */
+#define DB_JOIN_ITEM 15 /* c_get(); do not do primary lookup */
+#define DB_KEYFIRST 16 /* c_put() */
+#define DB_KEYLAST 17 /* c_put() */
+#define DB_LAST 18 /* c_get(), log_get() */
+#define DB_NEXT 19 /* c_get(), log_get() */
+#define DB_NEXT_DUP 20 /* c_get() */
+#define DB_NEXT_NODUP 21 /* c_get() */
+#define DB_NODUPDATA 22 /* put(), c_put() */
+#define DB_NOOVERWRITE 23 /* put() */
+#define DB_NOSYNC 24 /* close() */
+#define DB_POSITION 25 /* c_dup() */
+#define DB_POSITIONI 26 /* c_dup() (internal) */
+#define DB_PREV 27 /* c_get(), log_get() */
+#define DB_PREV_NODUP 28 /* c_get(), log_get() */
+#define DB_RECORDCOUNT 29 /* stat() */
+#define DB_SET 30 /* c_get(), log_get() */
+#define DB_SET_RANGE 31 /* c_get() */
+#define DB_SET_RECNO 32 /* get(), c_get() */
+#define DB_WRITECURSOR 33 /* cursor() */
+#define DB_WRITELOCK 34 /* cursor() (internal) */
+
+/* This has to change when the max opcode hits 255. */
+#define DB_OPFLAGS_MASK 0x000000ff /* Mask for operations flags. */
+#define DB_RMW 0x80000000 /* Acquire write flag immediately. */
+
+/*
+ * DB (user visible) error return codes.
+ *
+ * !!!
+ * Changes to any of the user visible error return codes must be reflected
+ * in java/src/com/sleepycat/db/Db.java.
+ *
+ * !!!
+ * For source compatibility with DB 2.X deadlock return (EAGAIN), use the
+ * following:
+ * #include <errno.h>
+ * #define DB_LOCK_DEADLOCK EAGAIN
+ *
+ * !!!
+ * We don't want our error returns to conflict with other packages where
+ * possible, so pick a base error value that's hopefully not common. We
+ * document that we own the error name space from -30,800 to -30,999.
+ */
+/* Public error return codes. */
+#define DB_INCOMPLETE (-30999)/* Sync didn't finish. */
+#define DB_KEYEMPTY (-30998)/* Key/data deleted or never created. */
+#define DB_KEYEXIST (-30997)/* The key/data pair already exists. */
+#define DB_LOCK_DEADLOCK (-30996)/* Deadlock. */
+#define DB_LOCK_NOTGRANTED (-30995)/* Lock unavailable. */
+#define DB_NOSERVER (-30994)/* Server panic return. */
+#define DB_NOSERVER_HOME (-30993)/* Bad home sent to server. */
+#define DB_NOSERVER_ID (-30992)/* Bad ID sent to server. */
+#define DB_NOTFOUND (-30991)/* Key/data pair not found (EOF). */
+#define DB_OLD_VERSION (-30990)/* Out-of-date version. */
+#define DB_RUNRECOVERY (-30989)/* Panic return. */
+#define DB_VERIFY_BAD (-30988)/* Verify failed; bad format. */
+
+/* DB (private) error return codes. */
+#define DB_ALREADY_ABORTED (-30899)
+#define DB_DELETED (-30898)/* Recovery file marked deleted. */
+#define DB_JAVA_CALLBACK (-30897)/* Exception during a java callback. */
+#define DB_NEEDSPLIT (-30896)/* Page needs to be split. */
+#define DB_SWAPBYTES (-30895)/* Database needs byte swapping. */
+#define DB_TXN_CKP (-30894)/* Encountered ckp record in log. */
+#define DB_VERIFY_FATAL (-30893)/* Fatal: DB->verify cannot proceed. */
+
+#define DB_FILE_ID_LEN 20 /* DB file ID length. */
+
+/* DB access method description structure. */
+struct __db {
+ /*******************************************************
+ * Public: owned by the application.
+ *******************************************************/
+ u_int32_t pgsize; /* Database logical page size. */
+
+ /* Callbacks. */
+ int (*db_append_recno) __P((DB *, DBT *, db_recno_t));
+ void (*db_feedback) __P((DB *, int, int));
+ void *(*db_malloc) __P((size_t));
+ void *(*db_realloc) __P((void *, size_t));
+ int (*dup_compare) __P((DB *, const DBT *, const DBT *));
+
+ void *app_private; /* Application-private handle. */
+
+ /*******************************************************
+ * Private: owned by DB.
+ *******************************************************/
+ DB_ENV *dbenv; /* Backing environment. */
+
+ DBTYPE type; /* DB access method type. */
+
+ DB_MPOOLFILE *mpf; /* Backing buffer pool. */
+
+ void *mutexp; /* Synchronization for free threading */
+
+ u_int8_t fileid[DB_FILE_ID_LEN];/* File's unique ID for locking. */
+
+ u_int32_t adj_fileid; /* File's unique ID for curs. adj. */
+
+#define DB_LOGFILEID_INVALID -1
+ int32_t log_fileid; /* File's unique ID for logging. */
+ db_pgno_t meta_pgno; /* Meta page number */
+ DB_TXN *open_txn; /* Transaction to protect creates. */
+
+ long cl_id; /* RPC: remote client id. */
+
+ /*
+ * !!!
+ * Some applications use DB but implement their own locking outside of
+ * DB. If they're using fcntl(2) locking on the underlying database
+ * file, and we open and close a file descriptor for that file, we will
+ * discard their locks. The DB_FCNTL_LOCKING flag to DB->open is an
+ * undocumented interface to support this usage which leaves any file
+ * descriptors we open until DB->close. This will only work with the
+ * DB->open interface and simple caches, e.g., creating a transaction
+ * thread may open/close file descriptors this flag doesn't protect.
+ * Locking with fcntl(2) on a file that you don't own is a very, very
+ * unsafe thing to do. 'Nuff said.
+ */
+ DB_FH *saved_open_fhp; /* Saved file handle. */
+
+ /*
+ * Linked list of DBP's, used in the log's dbentry table
+ * to keep track of all open db handles for a given log id.
+ * !!!
+ * Explicit representations of structures in queue.h.
+ *
+ * TAILQ_ENTRY(__db) links;
+ */
+ struct {
+ struct __db *tqe_next;
+ struct __db **tqe_prev;
+ } links;
+
+ /*
+ * Linked list of DBP's, linked from the DB_ENV, used to
+ * keep track of all open db handles for cursor adjustment.
+ *
+ * XXX
+ * Eventually, this should be merged with "links" above.
+ *
+ * !!!
+ * Explicit representations of structures in queue.h.
+ *
+ * LIST_ENTRY(__db) dblistlinks;
+ */
+ struct {
+ struct __db *le_next;
+ struct __db **le_prev;
+ } dblistlinks;
+
+ /*
+ * Cursor queues.
+ *
+ * !!!
+ * Explicit representations of structures in queue.h.
+ *
+ * TAILQ_HEAD(free_queue, __dbc);
+ * TAILQ_HEAD(active_queue, __dbc);
+ * TAILQ_HEAD(join_queue, __dbc);
+ */
+ struct {
+ struct __dbc *tqh_first;
+ struct __dbc **tqh_last;
+ } free_queue;
+ struct {
+ struct __dbc *tqh_first;
+ struct __dbc **tqh_last;
+ } active_queue;
+ struct {
+ struct __dbc *tqh_first;
+ struct __dbc **tqh_last;
+ } join_queue;
+
+ void *bt_internal; /* Btree/Recno access method private. */
+ void *cj_internal; /* C++/Java private. */
+ void *h_internal; /* Hash access method private. */
+ void *q_internal; /* Queue access method private. */
+ void *xa_internal; /* XA private. */
+
+ /* Methods. */
+ int (*close) __P((DB *, u_int32_t));
+ int (*cursor) __P((DB *, DB_TXN *, DBC **, u_int32_t));
+ int (*del) __P((DB *, DB_TXN *, DBT *, u_int32_t));
+ void (*err) __P((DB *, int, const char *, ...));
+ void (*errx) __P((DB *, const char *, ...));
+ int (*fd) __P((DB *, int *));
+ int (*get) __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+ int (*get_byteswapped) __P((DB *));
+ DBTYPE
+ (*get_type) __P((DB *));
+ int (*join) __P((DB *, DBC **, DBC **, u_int32_t));
+ int (*key_range) __P((DB *,
+ DB_TXN *, DBT *, DB_KEY_RANGE *, u_int32_t));
+ int (*open) __P((DB *,
+ const char *, const char *, DBTYPE, u_int32_t, int));
+ int (*put) __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+ int (*remove) __P((DB *, const char *, const char *, u_int32_t));
+ int (*rename) __P((DB *,
+ const char *, const char *, const char *, u_int32_t));
+ int (*set_append_recno) __P((DB *, int (*)(DB *, DBT *, db_recno_t)));
+ int (*set_cachesize) __P((DB *, u_int32_t, u_int32_t, int));
+ int (*set_dup_compare) __P((DB *,
+ int (*)(DB *, const DBT *, const DBT *)));
+ void (*set_errcall) __P((DB *, void (*)(const char *, char *)));
+ void (*set_errfile) __P((DB *, FILE *));
+ void (*set_errpfx) __P((DB *, const char *));
+ int (*set_feedback) __P((DB *, void (*)(DB *, int, int)));
+ int (*set_flags) __P((DB *, u_int32_t));
+ int (*set_lorder) __P((DB *, int));
+ int (*set_malloc) __P((DB *, void *(*)(size_t)));
+ int (*set_pagesize) __P((DB *, u_int32_t));
+ int (*set_paniccall) __P((DB *, void (*)(DB_ENV *, int)));
+ int (*set_realloc) __P((DB *, void *(*)(void *, size_t)));
+ int (*stat) __P((DB *, void *, void *(*)(size_t), u_int32_t));
+ int (*sync) __P((DB *, u_int32_t));
+ int (*upgrade) __P((DB *, const char *, u_int32_t));
+ int (*verify) __P((DB *,
+ const char *, const char *, FILE *, u_int32_t));
+
+ int (*set_bt_compare) __P((DB *,
+ int (*)(DB *, const DBT *, const DBT *)));
+ int (*set_bt_maxkey) __P((DB *, u_int32_t));
+ int (*set_bt_minkey) __P((DB *, u_int32_t));
+ int (*set_bt_prefix) __P((DB *,
+ size_t (*)(DB *, const DBT *, const DBT *)));
+
+ int (*set_h_ffactor) __P((DB *, u_int32_t));
+ int (*set_h_hash) __P((DB *,
+ u_int32_t (*)(DB *, const void *, u_int32_t)));
+ int (*set_h_nelem) __P((DB *, u_int32_t));
+
+ int (*set_re_delim) __P((DB *, int));
+ int (*set_re_len) __P((DB *, u_int32_t));
+ int (*set_re_pad) __P((DB *, int));
+ int (*set_re_source) __P((DB *, const char *));
+ int (*set_q_extentsize) __P((DB *, u_int32_t));
+
+ int (*db_am_remove) __P((DB *, const char *,
+ const char *, DB_LSN *, int (**)(DB *, void*), void **));
+ int (*db_am_rename) __P((DB *,
+ const char *, const char *, const char *));
+
+#define DB_OK_BTREE 0x01
+#define DB_OK_HASH 0x02
+#define DB_OK_QUEUE 0x04
+#define DB_OK_RECNO 0x08
+ u_int32_t am_ok; /* Legal AM choices. */
+
+#define DB_AM_DISCARD 0x00001 /* Discard any cached pages. */
+#define DB_AM_DUP 0x00002 /* DB_DUP. */
+#define DB_AM_DUPSORT 0x00004 /* DB_DUPSORT. */
+#define DB_AM_INMEM 0x00008 /* In-memory; no sync on close. */
+#define DB_AM_PGDEF 0x00010 /* Page size was defaulted. */
+#define DB_AM_RDONLY 0x00020 /* Database is readonly. */
+#define DB_AM_RECOVER 0x00040 /* DBP opened by recovery routine. */
+#define DB_AM_SUBDB 0x00080 /* Subdatabases supported. */
+#define DB_AM_SWAP 0x00100 /* Pages need to be byte-swapped. */
+#define DB_AM_TXN 0x00200 /* DBP was in a transaction. */
+#define DB_AM_VERIFYING 0x00400 /* DB handle is in the verifier. */
+#define DB_BT_RECNUM 0x00800 /* DB_RECNUM. */
+#define DB_BT_REVSPLIT 0x01000 /* DB_REVSPLITOFF. */
+#define DB_DBM_ERROR 0x02000 /* Error in DBM/NDBM database. */
+#define DB_OPEN_CALLED 0x04000 /* DB->open called. */
+#define DB_RE_DELIMITER 0x08000 /* Variablen length delimiter set. */
+#define DB_RE_FIXEDLEN 0x10000 /* Fixed-length records. */
+#define DB_RE_PAD 0x20000 /* Fixed-length record pad. */
+#define DB_RE_RENUMBER 0x40000 /* DB_RENUMBER. */
+#define DB_RE_SNAPSHOT 0x80000 /* DB_SNAPSHOT. */
+ u_int32_t flags;
+};
+
+/*
+ * DB_LOCK_ILOCK --
+ * Internal DB access method lock.
+ */
+struct __db_ilock {
+ db_pgno_t pgno; /* Page being locked. */
+ u_int8_t fileid[DB_FILE_ID_LEN];/* File id. */
+#define DB_RECORD_LOCK 1
+#define DB_PAGE_LOCK 2
+ u_int8_t type; /* Record or Page lock */
+};
+
+/*
+ * DB_LOCK --
+ * The structure is allocated by the caller and filled in during a
+ * lock_get request (or a lock_vec/DB_LOCK_GET).
+ */
+struct __db_lock_u {
+ size_t off; /* Offset of the lock in the region */
+ u_int32_t ndx; /* Index of the object referenced by
+ * this lock; used for locking. */
+ u_int32_t gen; /* Generation number of this lock. */
+};
+
+/* Cursor description structure. */
+struct __dbc {
+ DB *dbp; /* Related DB access method. */
+ DB_TXN *txn; /* Associated transaction. */
+
+ /*
+ * !!!
+ * Explicit representations of structures in queue.h.
+ *
+ * TAILQ_ENTRY(__dbc) links; Active/free cursor queues.
+ */
+ struct {
+ DBC *tqe_next;
+ DBC **tqe_prev;
+ } links;
+
+ DBT rkey; /* Returned key. */
+ DBT rdata; /* Returned data. */
+
+ u_int32_t lid; /* Default process' locker id. */
+ u_int32_t locker; /* Locker for this operation. */
+ DBT lock_dbt; /* DBT referencing lock. */
+ DB_LOCK_ILOCK lock; /* Object to be locked. */
+ DB_LOCK mylock; /* Lock held on this cursor. */
+
+ long cl_id; /* Remote client id. */
+
+ DBTYPE dbtype; /* Cursor type. */
+
+ DBC_INTERNAL *internal; /* Access method private. */
+
+ int (*c_close) __P((DBC *)); /* Methods: public. */
+ int (*c_count) __P((DBC *, db_recno_t *, u_int32_t));
+ int (*c_del) __P((DBC *, u_int32_t));
+ int (*c_dup) __P((DBC *, DBC **, u_int32_t));
+ int (*c_get) __P((DBC *, DBT *, DBT *, u_int32_t));
+ int (*c_put) __P((DBC *, DBT *, DBT *, u_int32_t));
+
+ /* Methods: private. */
+ int (*c_am_close) __P((DBC *, db_pgno_t, int *));
+ int (*c_am_del) __P((DBC *));
+ int (*c_am_destroy) __P((DBC *));
+ int (*c_am_get) __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+ int (*c_am_put) __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+ int (*c_am_writelock) __P((DBC *));
+
+#define DBC_ACTIVE 0x001 /* Cursor is being used. */
+#define DBC_OPD 0x002 /* Cursor references off-page dups. */
+#define DBC_RECOVER 0x004 /* Cursor created by recovery routine
+ * (do not log or lock).
+ */
+#define DBC_RMW 0x008 /* Acquire write flag in read op. */
+#define DBC_WRITECURSOR 0x010 /* Cursor may be used to write (CDB). */
+#define DBC_WRITEDUP 0x020 /* idup'ed DBC_WRITECURSOR (CDB). */
+#define DBC_WRITER 0x040 /* Cursor immediately writing (CDB). */
+#define DBC_TRANSIENT 0x080 /* Cursor is transient. */
+#define DBC_COMPENSATE 0x100 /* Cursor is doing compensation
+ * do not lock.
+ */
+ u_int32_t flags;
+};
+
+/* Key range statistics structure */
+struct __key_range {
+ double less;
+ double equal;
+ double greater;
+};
+
+/* Btree/Recno statistics structure. */
+struct __db_bt_stat {
+ u_int32_t bt_magic; /* Magic number. */
+ u_int32_t bt_version; /* Version number. */
+ u_int32_t bt_metaflags; /* Metadata flags. */
+ u_int32_t bt_nkeys; /* Number of unique keys. */
+ u_int32_t bt_ndata; /* Number of data items. */
+ u_int32_t bt_pagesize; /* Page size. */
+ u_int32_t bt_maxkey; /* Maxkey value. */
+ u_int32_t bt_minkey; /* Minkey value. */
+ u_int32_t bt_re_len; /* Fixed-length record length. */
+ u_int32_t bt_re_pad; /* Fixed-length record pad. */
+ u_int32_t bt_levels; /* Tree levels. */
+ u_int32_t bt_int_pg; /* Internal pages. */
+ u_int32_t bt_leaf_pg; /* Leaf pages. */
+ u_int32_t bt_dup_pg; /* Duplicate pages. */
+ u_int32_t bt_over_pg; /* Overflow pages. */
+ u_int32_t bt_free; /* Pages on the free list. */
+ u_int32_t bt_int_pgfree; /* Bytes free in internal pages. */
+ u_int32_t bt_leaf_pgfree; /* Bytes free in leaf pages. */
+ u_int32_t bt_dup_pgfree; /* Bytes free in duplicate pages. */
+ u_int32_t bt_over_pgfree; /* Bytes free in overflow pages. */
+};
+
+/* Queue statistics structure. */
+struct __db_qam_stat {
+ u_int32_t qs_magic; /* Magic number. */
+ u_int32_t qs_version; /* Version number. */
+ u_int32_t qs_metaflags; /* Metadata flags. */
+ u_int32_t qs_nkeys; /* Number of unique keys. */
+ u_int32_t qs_ndata; /* Number of data items. */
+ u_int32_t qs_pagesize; /* Page size. */
+ u_int32_t qs_pages; /* Data pages. */
+ u_int32_t qs_re_len; /* Fixed-length record length. */
+ u_int32_t qs_re_pad; /* Fixed-length record pad. */
+ u_int32_t qs_pgfree; /* Bytes free in data pages. */
+ u_int32_t qs_first_recno; /* First not deleted record. */
+ u_int32_t qs_cur_recno; /* Last allocated record number. */
+};
+
+/* Hash statistics structure. */
+struct __db_h_stat {
+ u_int32_t hash_magic; /* Magic number. */
+ u_int32_t hash_version; /* Version number. */
+ u_int32_t hash_metaflags; /* Metadata flags. */
+ u_int32_t hash_nkeys; /* Number of unique keys. */
+ u_int32_t hash_ndata; /* Number of data items. */
+ u_int32_t hash_pagesize; /* Page size. */
+ u_int32_t hash_nelem; /* Original nelem specified. */
+ u_int32_t hash_ffactor; /* Fill factor specified at create. */
+ u_int32_t hash_buckets; /* Number of hash buckets. */
+ u_int32_t hash_free; /* Pages on the free list. */
+ u_int32_t hash_bfree; /* Bytes free on bucket pages. */
+ u_int32_t hash_bigpages; /* Number of big key/data pages. */
+ u_int32_t hash_big_bfree; /* Bytes free on big item pages. */
+ u_int32_t hash_overflows; /* Number of overflow pages. */
+ u_int32_t hash_ovfl_free; /* Bytes free on ovfl pages. */
+ u_int32_t hash_dup; /* Number of dup pages. */
+ u_int32_t hash_dup_free; /* Bytes free on duplicate pages. */
+};
+
+int db_create __P((DB **, DB_ENV *, u_int32_t));
+int db_env_create __P((DB_ENV **, u_int32_t));
+int db_env_set_func_close __P((int (*)(int)));
+int db_env_set_func_dirfree __P((void (*)(char **, int)));
+int db_env_set_func_dirlist __P((int (*)(const char *, char ***, int *)));
+int db_env_set_func_exists __P((int (*)(const char *, int *)));
+int db_env_set_func_free __P((void (*)(void *)));
+int db_env_set_func_fsync __P((int (*)(int)));
+int db_env_set_func_ioinfo __P((int (*)(const char *,
+ int, u_int32_t *, u_int32_t *, u_int32_t *)));
+int db_env_set_func_malloc __P((void *(*)(size_t)));
+int db_env_set_func_map __P((int (*)(char *, size_t, int, int, void **)));
+int db_env_set_func_open __P((int (*)(const char *, int, ...)));
+int db_env_set_func_read __P((ssize_t (*)(int, void *, size_t)));
+int db_env_set_func_realloc __P((void *(*)(void *, size_t)));
+int db_env_set_func_rename __P((int (*)(const char *, const char *)));
+int db_env_set_func_seek
+ __P((int (*)(int, size_t, db_pgno_t, u_int32_t, int, int)));
+int db_env_set_func_sleep __P((int (*)(u_long, u_long)));
+int db_env_set_func_unlink __P((int (*)(const char *)));
+int db_env_set_func_unmap __P((int (*)(void *, size_t)));
+int db_env_set_func_write __P((ssize_t (*)(int, const void *, size_t)));
+int db_env_set_func_yield __P((int (*)(void)));
+int db_env_set_pageyield __P((int));
+int db_env_set_panicstate __P((int));
+int db_env_set_region_init __P((int));
+int db_env_set_tas_spins __P((u_int32_t));
+char *db_strerror __P((int));
+char *db_version __P((int *, int *, int *));
+
+/*******************************************************
+ * Locking
+ *******************************************************/
+#define DB_LOCKVERSION 1
+
+/* Flag values for lock_vec(), lock_get(). */
+#define DB_LOCK_NOWAIT 0x01 /* Don't wait on unavailable lock. */
+#define DB_LOCK_RECORD 0x02 /* Internal: record lock. */
+#define DB_LOCK_UPGRADE 0x04 /* Internal: upgrade existing lock. */
+#define DB_LOCK_SWITCH 0x08 /* Internal: switch existing lock. */
+
+/* Flag values for lock_detect(). */
+#define DB_LOCK_CONFLICT 0x01 /* Run on any conflict. */
+
+/*
+ * Request types.
+ *
+ * !!!
+ * Changes here must be reflected in java/src/com/sleepycat/db/Db.java.
+ */
+typedef enum {
+ DB_LOCK_DUMP=0, /* Display held locks. */
+ DB_LOCK_GET, /* Get the lock. */
+ DB_LOCK_INHERIT, /* Pass locks to parent. */
+ DB_LOCK_PUT, /* Release the lock. */
+ DB_LOCK_PUT_ALL, /* Release locker's locks. */
+ DB_LOCK_PUT_OBJ /* Release locker's locks on obj. */
+} db_lockop_t;
+
+/*
+ * Simple R/W lock modes and for multi-granularity intention locking.
+ *
+ * !!!
+ * These values are NOT random, as they are used as an index into the lock
+ * conflicts arrays, i.e., DB_LOCK_IWRITE must be == 3, and DB_LOCK_IREAD
+ * must be == 4.
+ *
+ * !!!
+ * Changes here must be reflected in java/src/com/sleepycat/db/Db.java.
+ */
+typedef enum {
+ DB_LOCK_NG=0, /* Not granted. */
+ DB_LOCK_READ, /* Shared/read. */
+ DB_LOCK_WRITE, /* Exclusive/write. */
+ DB_LOCK_WAIT, /* Wait for event */
+ DB_LOCK_IWRITE, /* Intent exclusive/write. */
+ DB_LOCK_IREAD, /* Intent to share/read. */
+ DB_LOCK_IWR /* Intent to read and write. */
+} db_lockmode_t;
+
+/*
+ * Status of a lock.
+ */
+typedef enum {
+ DB_LSTAT_ABORTED, /* Lock belongs to an aborted txn. */
+ DB_LSTAT_ERR, /* Lock is bad. */
+ DB_LSTAT_FREE, /* Lock is unallocated. */
+ DB_LSTAT_HELD, /* Lock is currently held. */
+ DB_LSTAT_NOGRANT, /* Lock was not granted. */
+ DB_LSTAT_PENDING, /* Lock was waiting and has been
+ * promoted; waiting for the owner
+ * to run and upgrade it to held. */
+ DB_LSTAT_WAITING /* Lock is on the wait queue. */
+} db_status_t;
+
+/* Lock request structure. */
+struct __db_lockreq {
+ db_lockop_t op; /* Operation. */
+ db_lockmode_t mode; /* Requested mode. */
+ u_int32_t locker; /* Locker identity. */
+ DBT *obj; /* Object being locked. */
+ DB_LOCK lock; /* Lock returned. */
+};
+
+/*
+ * Commonly used conflict matrices.
+ *
+ */
+
+/* Multi-granularity locking. */
+#define DB_LOCK_RIW_N 7
+extern const u_int8_t db_riw_conflicts[];
+
+struct __db_lock_stat {
+ u_int32_t st_lastid; /* Last allocated locker ID. */
+ u_int32_t st_maxlocks; /* Maximum number of locks in table. */
+ u_int32_t st_maxlockers; /* Maximum number of lockers in table. */
+ u_int32_t st_maxobjects; /* Maximum number of objects in table. */
+ u_int32_t st_nmodes; /* Number of lock modes. */
+ u_int32_t st_nlocks; /* Current number of locks. */
+ u_int32_t st_maxnlocks; /* Maximum number of locks so far. */
+ u_int32_t st_nlockers; /* Current number of lockers. */
+ u_int32_t st_maxnlockers; /* Maximum number of lockers so far. */
+ u_int32_t st_nobjects; /* Current number of objects. */
+ u_int32_t st_maxnobjects; /* Maximum number of objects so far. */
+ u_int32_t st_nconflicts; /* Number of lock conflicts. */
+ u_int32_t st_nrequests; /* Number of lock gets. */
+ u_int32_t st_nreleases; /* Number of lock puts. */
+ u_int32_t st_nnowaits; /* Number of requests that would have
+ waited, but NOWAIT was set. */
+ u_int32_t st_ndeadlocks; /* Number of lock deadlocks. */
+ u_int32_t st_region_wait; /* Region lock granted after wait. */
+ u_int32_t st_region_nowait; /* Region lock granted without wait. */
+ u_int32_t st_regsize; /* Region size. */
+};
+
+int lock_detect __P((DB_ENV *, u_int32_t, u_int32_t, int *));
+int lock_get __P((DB_ENV *,
+ u_int32_t, u_int32_t, const DBT *, db_lockmode_t, DB_LOCK *));
+int lock_id __P((DB_ENV *, u_int32_t *));
+int lock_put __P((DB_ENV *, DB_LOCK *));
+int lock_stat __P((DB_ENV *, DB_LOCK_STAT **, void *(*)(size_t)));
+int lock_vec __P((DB_ENV *,
+ u_int32_t, u_int32_t, DB_LOCKREQ *, int, DB_LOCKREQ **));
+
+/*******************************************************
+ * Logging.
+ *******************************************************/
+/* Flag values for log_archive(). */
+#define DB_ARCH_ABS 0x001 /* Absolute pathnames. */
+#define DB_ARCH_DATA 0x002 /* Data files. */
+#define DB_ARCH_LOG 0x004 /* Log files. */
+
+/*
+ * A DB_LSN has two parts, a fileid which identifies a specific file, and an
+ * offset within that file. The fileid is an unsigned 4-byte quantity that
+ * uniquely identifies a file within the log directory -- currently a simple
+ * counter inside the log. The offset is also an unsigned 4-byte value. The
+ * log manager guarantees the offset is never more than 4 bytes by switching
+ * to a new log file before the maximum length imposed by an unsigned 4-byte
+ * offset is reached.
+ */
+struct __db_lsn {
+ u_int32_t file; /* File ID. */
+ u_int32_t offset; /* File offset. */
+};
+
+/* Log statistics structure. */
+struct __db_log_stat {
+ u_int32_t st_magic; /* Log file magic number. */
+ u_int32_t st_version; /* Log file version number. */
+ int st_mode; /* Log file mode. */
+ u_int32_t st_lg_bsize; /* Log buffer size. */
+ u_int32_t st_lg_max; /* Maximum log file size. */
+ u_int32_t st_w_bytes; /* Bytes to log. */
+ u_int32_t st_w_mbytes; /* Megabytes to log. */
+ u_int32_t st_wc_bytes; /* Bytes to log since checkpoint. */
+ u_int32_t st_wc_mbytes; /* Megabytes to log since checkpoint. */
+ u_int32_t st_wcount; /* Total writes to the log. */
+ u_int32_t st_wcount_fill; /* Overflow writes to the log. */
+ u_int32_t st_scount; /* Total syncs to the log. */
+ u_int32_t st_region_wait; /* Region lock granted after wait. */
+ u_int32_t st_region_nowait; /* Region lock granted without wait. */
+ u_int32_t st_cur_file; /* Current log file number. */
+ u_int32_t st_cur_offset; /* Current log file offset. */
+ u_int32_t st_regsize; /* Region size. */
+};
+
+int log_archive __P((DB_ENV *, char **[], u_int32_t, void *(*)(size_t)));
+int log_compare __P((const DB_LSN *, const DB_LSN *));
+int log_file __P((DB_ENV *, const DB_LSN *, char *, size_t));
+int log_flush __P((DB_ENV *, const DB_LSN *));
+int log_get __P((DB_ENV *, DB_LSN *, DBT *, u_int32_t));
+int log_put __P((DB_ENV *, DB_LSN *, const DBT *, u_int32_t));
+int log_register __P((DB_ENV *, DB *, const char *));
+int log_stat __P((DB_ENV *, DB_LOG_STAT **, void *(*)(size_t)));
+int log_unregister __P((DB_ENV *, DB *));
+
+/*******************************************************
+ * Mpool
+ *******************************************************/
+/* Flag values for memp_fget(). */
+#define DB_MPOOL_CREATE 0x001 /* Create a page. */
+#define DB_MPOOL_LAST 0x002 /* Return the last page. */
+#define DB_MPOOL_NEW 0x004 /* Create a new page. */
+#define DB_MPOOL_NEW_GROUP 0x008 /* Create a group of pages. */
+#define DB_MPOOL_EXTENT 0x010 /* Get for an extent. */
+
+/* Flag values for memp_fput(), memp_fset(). */
+#define DB_MPOOL_CLEAN 0x001 /* Page is not modified. */
+#define DB_MPOOL_DIRTY 0x002 /* Page is modified. */
+#define DB_MPOOL_DISCARD 0x004 /* Don't cache the page. */
+
+/* Mpool statistics structure. */
+struct __db_mpool_stat {
+ u_int32_t st_cache_hit; /* Pages found in the cache. */
+ u_int32_t st_cache_miss; /* Pages not found in the cache. */
+ u_int32_t st_map; /* Pages from mapped files. */
+ u_int32_t st_page_create; /* Pages created in the cache. */
+ u_int32_t st_page_in; /* Pages read in. */
+ u_int32_t st_page_out; /* Pages written out. */
+ u_int32_t st_ro_evict; /* Clean pages forced from the cache. */
+ u_int32_t st_rw_evict; /* Dirty pages forced from the cache. */
+ u_int32_t st_hash_buckets; /* Number of hash buckets. */
+ u_int32_t st_hash_searches; /* Total hash chain searches. */
+ u_int32_t st_hash_longest; /* Longest hash chain searched. */
+ u_int32_t st_hash_examined; /* Total hash entries searched. */
+ u_int32_t st_page_clean; /* Clean pages. */
+ u_int32_t st_page_dirty; /* Dirty pages. */
+ u_int32_t st_page_trickle; /* Pages written by memp_trickle. */
+ u_int32_t st_region_wait; /* Region lock granted after wait. */
+ u_int32_t st_region_nowait; /* Region lock granted without wait. */
+ u_int32_t st_gbytes; /* Total cache size: GB. */
+ u_int32_t st_bytes; /* Total cache size: B. */
+ u_int32_t st_ncache; /* Number of caches. */
+ u_int32_t st_regsize; /* Cache size. */
+};
+
+/* Mpool file open information structure. */
+struct __db_mpool_finfo {
+ int ftype; /* File type. */
+ DBT *pgcookie; /* Byte-string passed to pgin/pgout. */
+ u_int8_t *fileid; /* Unique file ID. */
+ int32_t lsn_offset; /* LSN offset in page. */
+ u_int32_t clear_len; /* Cleared length on created pages. */
+};
+
+/* Mpool file statistics structure. */
+struct __db_mpool_fstat {
+ char *file_name; /* File name. */
+ size_t st_pagesize; /* Page size. */
+ u_int32_t st_cache_hit; /* Pages found in the cache. */
+ u_int32_t st_cache_miss; /* Pages not found in the cache. */
+ u_int32_t st_map; /* Pages from mapped files. */
+ u_int32_t st_page_create; /* Pages created in the cache. */
+ u_int32_t st_page_in; /* Pages read in. */
+ u_int32_t st_page_out; /* Pages written out. */
+};
+
+int memp_fclose __P((DB_MPOOLFILE *));
+int memp_fget __P((DB_MPOOLFILE *, db_pgno_t *, u_int32_t, void *));
+int memp_fopen __P((DB_ENV *, const char *,
+ u_int32_t, int, size_t, DB_MPOOL_FINFO *, DB_MPOOLFILE **));
+int memp_fput __P((DB_MPOOLFILE *, void *, u_int32_t));
+int memp_fset __P((DB_MPOOLFILE *, void *, u_int32_t));
+int memp_fsync __P((DB_MPOOLFILE *));
+int memp_register __P((DB_ENV *, int,
+ int (*)(DB_ENV *, db_pgno_t, void *, DBT *),
+ int (*)(DB_ENV *, db_pgno_t, void *, DBT *)));
+int memp_stat __P((DB_ENV *,
+ DB_MPOOL_STAT **, DB_MPOOL_FSTAT ***, void *(*)(size_t)));
+int memp_sync __P((DB_ENV *, DB_LSN *));
+int memp_trickle __P((DB_ENV *, int, int *));
+
+/*******************************************************
+ * Transactions.
+ *******************************************************/
+#define DB_TXNVERSION 1
+
+/* Operations values to the tx_recover() function. */
+#define DB_TXN_BACKWARD_ROLL 1 /* Read the log backwards. */
+#define DB_TXN_FORWARD_ROLL 2 /* Read the log forwards. */
+#define DB_TXN_OPENFILES 3 /* Read for open files. */
+#define DB_TXN_REDO 4 /* Redo the operation. */
+#define DB_TXN_UNDO 5 /* Undo the operation. */
+
+/* Internal transaction status values. */
+
+/* Transaction statistics structure. */
+struct __db_txn_active {
+ u_int32_t txnid; /* Transaction ID */
+ u_int32_t parentid; /* Transaction ID of parent */
+ DB_LSN lsn; /* Lsn of the begin record */
+};
+
+struct __db_txn_stat {
+ DB_LSN st_last_ckp; /* lsn of the last checkpoint */
+ DB_LSN st_pending_ckp; /* last checkpoint did not finish */
+ time_t st_time_ckp; /* time of last checkpoint */
+ u_int32_t st_last_txnid; /* last transaction id given out */
+ u_int32_t st_maxtxns; /* maximum txns possible */
+ u_int32_t st_naborts; /* number of aborted transactions */
+ u_int32_t st_nbegins; /* number of begun transactions */
+ u_int32_t st_ncommits; /* number of committed transactions */
+ u_int32_t st_nactive; /* number of active transactions */
+ u_int32_t st_maxnactive; /* maximum active transactions */
+ DB_TXN_ACTIVE
+ *st_txnarray; /* array of active transactions */
+ u_int32_t st_region_wait; /* Region lock granted after wait. */
+ u_int32_t st_region_nowait; /* Region lock granted without wait. */
+ u_int32_t st_regsize; /* Region size. */
+};
+
+int txn_abort __P((DB_TXN *));
+int txn_begin __P((DB_ENV *, DB_TXN *, DB_TXN **, u_int32_t));
+int txn_checkpoint __P((DB_ENV *, u_int32_t, u_int32_t, u_int32_t));
+int txn_commit __P((DB_TXN *, u_int32_t));
+u_int32_t txn_id __P((DB_TXN *));
+int txn_prepare __P((DB_TXN *));
+int txn_stat __P((DB_ENV *, DB_TXN_STAT **, void *(*)(size_t)));
+
+#ifndef DB_DBM_HSEARCH
+#define DB_DBM_HSEARCH 0 /* No historic interfaces by default. */
+#endif
+#if DB_DBM_HSEARCH != 0
+/*******************************************************
+ * Dbm/Ndbm historic interfaces.
+ *******************************************************/
+#define DBM_INSERT 0 /* Flags to dbm_store(). */
+#define DBM_REPLACE 1
+
+/*
+ * The DB support for ndbm(3) always appends this suffix to the
+ * file name to avoid overwriting the user's original database.
+ */
+#define DBM_SUFFIX ".db"
+
+#if defined(_XPG4_2)
+typedef struct {
+ char *dptr;
+ size_t dsize;
+} datum;
+#else
+typedef struct {
+ char *dptr;
+ int dsize;
+} datum;
+#endif
+
+/*
+ * Translate DBM calls into DB calls so that DB doesn't step on the
+ * application's name space.
+ *
+ * The global variables dbrdonly, dirf and pagf were not retained when 4BSD
+ * replaced the dbm interface with ndbm, and are not supported here.
+ */
+#define dbminit(a) __db_dbm_init(a)
+#define dbmclose __db_dbm_close
+#if !defined(__cplusplus)
+#define delete(a) __db_dbm_delete(a)
+#endif
+#define fetch(a) __db_dbm_fetch(a)
+#define firstkey __db_dbm_firstkey
+#define nextkey(a) __db_dbm_nextkey(a)
+#define store(a, b) __db_dbm_store(a, b)
+
+/* Prototype the DB calls. */
+int __db_dbm_close __P((void));
+int __db_dbm_dbrdonly __P((void));
+int __db_dbm_delete __P((datum));
+int __db_dbm_dirf __P((void));
+datum __db_dbm_fetch __P((datum));
+datum __db_dbm_firstkey __P((void));
+int __db_dbm_init __P((char *));
+datum __db_dbm_nextkey __P((datum));
+int __db_dbm_pagf __P((void));
+int __db_dbm_store __P((datum, datum));
+
+/*
+ * Translate NDBM calls into DB calls so that DB doesn't step on the
+ * application's name space.
+ */
+#define dbm_clearerr(a) __db_ndbm_clearerr(a)
+#define dbm_close(a) __db_ndbm_close(a)
+#define dbm_delete(a, b) __db_ndbm_delete(a, b)
+#define dbm_dirfno(a) __db_ndbm_dirfno(a)
+#define dbm_error(a) __db_ndbm_error(a)
+#define dbm_fetch(a, b) __db_ndbm_fetch(a, b)
+#define dbm_firstkey(a) __db_ndbm_firstkey(a)
+#define dbm_nextkey(a) __db_ndbm_nextkey(a)
+#define dbm_open(a, b, c) __db_ndbm_open(a, b, c)
+#define dbm_pagfno(a) __db_ndbm_pagfno(a)
+#define dbm_rdonly(a) __db_ndbm_rdonly(a)
+#define dbm_store(a, b, c, d) __db_ndbm_store(a, b, c, d)
+
+/* Prototype the DB calls. */
+int __db_ndbm_clearerr __P((DBM *));
+void __db_ndbm_close __P((DBM *));
+int __db_ndbm_delete __P((DBM *, datum));
+int __db_ndbm_dirfno __P((DBM *));
+int __db_ndbm_error __P((DBM *));
+datum __db_ndbm_fetch __P((DBM *, datum));
+datum __db_ndbm_firstkey __P((DBM *));
+datum __db_ndbm_nextkey __P((DBM *));
+DBM *__db_ndbm_open __P((const char *, int, int));
+int __db_ndbm_pagfno __P((DBM *));
+int __db_ndbm_rdonly __P((DBM *));
+int __db_ndbm_store __P((DBM *, datum, datum, int));
+
+/*******************************************************
+ * Hsearch historic interface.
+ *******************************************************/
+typedef enum {
+ FIND, ENTER
+} ACTION;
+
+typedef struct entry {
+ char *key;
+ char *data;
+} ENTRY;
+
+/*
+ * Translate HSEARCH calls into DB calls so that DB doesn't step on the
+ * application's name space.
+ */
+#define hcreate(a) __db_hcreate(a)
+#define hdestroy __db_hdestroy
+#define hsearch(a, b) __db_hsearch(a, b)
+
+/* Prototype the DB calls. */
+int __db_hcreate __P((size_t));
+void __db_hdestroy __P((void));
+ENTRY *__db_hsearch __P((ENTRY, ACTION));
+#endif /* DB_DBM_HSEARCH */
+
+/*
+ * XXX
+ * MacOS: Reset Metrowerks C enum sizes.
+ */
+#ifdef __MWERKS__
+#pragma enumsalwaysint reset
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* !_DB_H_ */
diff --git a/bdb/include/db_185.h b/bdb/include/db_185.h
new file mode 100644
index 00000000000..e50ebb0adb8
--- /dev/null
+++ b/bdb/include/db_185.h
@@ -0,0 +1,175 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id: db_185.h,v 11.4 2000/02/14 02:59:54 bostic Exp $
+ */
+
+#ifndef _DB_185_H_
+#define _DB_185_H_
+
+#include <sys/types.h>
+
+#include <limits.h>
+
+/*
+ * XXX
+ * Handle function prototypes and the keyword "const". This steps on name
+ * space that DB doesn't control, but all of the other solutions are worse.
+ */
+#undef __P
+#if defined(__STDC__) || defined(__cplusplus)
+#define __P(protos) protos /* ANSI C prototypes */
+#else
+#define const
+#define __P(protos) () /* K&R C preprocessor */
+#endif
+
+#define RET_ERROR -1 /* Return values. */
+#define RET_SUCCESS 0
+#define RET_SPECIAL 1
+
+#ifndef __BIT_TYPES_DEFINED__
+#define __BIT_TYPES_DEFINED__
+@u_int8_decl@
+@int16_decl@
+@u_int16_decl@
+@int32_decl@
+@u_int32_decl@
+#endif
+
+/*
+ * XXX
+ * SGI/IRIX already has a pgno_t.
+ */
+#ifdef sgi
+#define pgno_t db_pgno_t
+#endif
+
+#define MAX_PAGE_NUMBER 0xffffffff /* >= # of pages in a file */
+typedef u_int32_t pgno_t;
+#define MAX_PAGE_OFFSET 65535 /* >= # of bytes in a page */
+typedef u_int16_t indx_t;
+#define MAX_REC_NUMBER 0xffffffff /* >= # of records in a tree */
+typedef u_int32_t recno_t;
+
+/* Key/data structure -- a Data-Base Thang. */
+typedef struct {
+ void *data; /* data */
+ size_t size; /* data length */
+} DBT;
+
+/* Routine flags. */
+#define R_CURSOR 1 /* del, put, seq */
+#define __R_UNUSED 2 /* UNUSED */
+#define R_FIRST 3 /* seq */
+#define R_IAFTER 4 /* put (RECNO) */
+#define R_IBEFORE 5 /* put (RECNO) */
+#define R_LAST 6 /* seq (BTREE, RECNO) */
+#define R_NEXT 7 /* seq */
+#define R_NOOVERWRITE 8 /* put */
+#define R_PREV 9 /* seq (BTREE, RECNO) */
+#define R_SETCURSOR 10 /* put (RECNO) */
+#define R_RECNOSYNC 11 /* sync (RECNO) */
+
+typedef enum { DB_BTREE, DB_HASH, DB_RECNO } DBTYPE;
+
+/* Access method description structure. */
+typedef struct __db {
+ DBTYPE type; /* Underlying db type. */
+ int (*close) __P((struct __db *));
+ int (*del) __P((const struct __db *, const DBT *, u_int));
+ int (*get) __P((const struct __db *, const DBT *, DBT *, u_int));
+ int (*put) __P((const struct __db *, DBT *, const DBT *, u_int));
+ int (*seq) __P((const struct __db *, DBT *, DBT *, u_int));
+ int (*sync) __P((const struct __db *, u_int));
+ void *internal; /* Access method private. */
+ int (*fd) __P((const struct __db *));
+} DB;
+
+#define BTREEMAGIC 0x053162
+#define BTREEVERSION 3
+
+/* Structure used to pass parameters to the btree routines. */
+typedef struct {
+#define R_DUP 0x01 /* duplicate keys */
+ u_int32_t flags;
+ u_int32_t cachesize; /* bytes to cache */
+ u_int32_t maxkeypage; /* maximum keys per page */
+ u_int32_t minkeypage; /* minimum keys per page */
+ u_int32_t psize; /* page size */
+ int (*compare) /* comparison function */
+ __P((const DBT *, const DBT *));
+ size_t (*prefix) /* prefix function */
+ __P((const DBT *, const DBT *));
+ int lorder; /* byte order */
+} BTREEINFO;
+
+#define HASHMAGIC 0x061561
+#define HASHVERSION 2
+
+/* Structure used to pass parameters to the hashing routines. */
+typedef struct {
+ u_int32_t bsize; /* bucket size */
+ u_int32_t ffactor; /* fill factor */
+ u_int32_t nelem; /* number of elements */
+ u_int32_t cachesize; /* bytes to cache */
+ u_int32_t /* hash function */
+ (*hash) __P((const void *, size_t));
+ int lorder; /* byte order */
+} HASHINFO;
+
+/* Structure used to pass parameters to the record routines. */
+typedef struct {
+#define R_FIXEDLEN 0x01 /* fixed-length records */
+#define R_NOKEY 0x02 /* key not required */
+#define R_SNAPSHOT 0x04 /* snapshot the input */
+ u_int32_t flags;
+ u_int32_t cachesize; /* bytes to cache */
+ u_int32_t psize; /* page size */
+ int lorder; /* byte order */
+ size_t reclen; /* record length (fixed-length records) */
+ u_char bval; /* delimiting byte (variable-length records */
+ char *bfname; /* btree file name */
+} RECNOINFO;
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+#define dbopen __db185_open
+DB *__db185_open __P((const char *, int, int, DBTYPE, const void *));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_DB_185_H_ */
diff --git a/bdb/include/db_am.h b/bdb/include/db_am.h
new file mode 100644
index 00000000000..3a41eb3bbfd
--- /dev/null
+++ b/bdb/include/db_am.h
@@ -0,0 +1,131 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: db_am.h,v 11.21 2000/12/12 17:43:56 bostic Exp $
+ */
+#ifndef _DB_AM_H_
+#define _DB_AM_H_
+
+#define DB_MINPAGECACHE 10 /* Min pages access methods cache. */
+
+/* DB recovery operation codes. The low bits used to have flags or'd in. */
+#define DB_ADD_DUP 0x10
+#define DB_REM_DUP 0x20
+#define DB_ADD_BIG 0x30
+#define DB_REM_BIG 0x40
+#define DB_UNUSED_1 0x50
+#define DB_UNUSED_2 0x60
+#define DB_ADD_PAGE 0x70
+#define DB_REM_PAGE 0x80
+
+/*
+ * This is a grotesque naming hack. We have modified the btree page
+ * allocation and freeing functions to be generic and have therefore
+ * moved them into the access-method independent portion of the code.
+ * However, since we didn't want to create new log records and routines
+ * for them, we left their logging and recovery functions over in btree.
+ * To make the code look prettier, we macro them, but this is sure to
+ * confuse the heck out of everyone.
+ */
+#define __db_pg_alloc_log __bam_pg_alloc_log
+#define __db_pg_free_log __bam_pg_free_log
+
+/*
+ * Standard initialization and shutdown macros for all recovery functions.
+ *
+ * Requires the following local variables:
+ *
+ * DB *file_dbp;
+ * DB_MPOOLFILE *mpf;
+ * int ret;
+ */
+#define REC_INTRO(func, inc_count) { \
+ file_dbp = NULL; \
+ dbc = NULL; \
+ if ((ret = func(dbenv, dbtp->data, &argp)) != 0) \
+ goto out; \
+ if ((ret = __db_fileid_to_db(dbenv, \
+ &file_dbp, argp->fileid, inc_count)) != 0) { \
+ if (ret == DB_DELETED) { \
+ ret = 0; \
+ goto done; \
+ } \
+ goto out; \
+ } \
+ if (file_dbp == NULL) \
+ goto out; \
+ if ((ret = file_dbp->cursor(file_dbp, NULL, &dbc, 0)) != 0) \
+ goto out; \
+ F_SET(dbc, DBC_RECOVER); \
+ mpf = file_dbp->mpf; \
+}
+
+#define REC_CLOSE { \
+ int __t_ret; \
+ if (argp != NULL) \
+ __os_free(argp, sizeof(*argp)); \
+ if (dbc != NULL && (__t_ret = dbc->c_close(dbc)) != 0 && ret == 0) \
+ return (__t_ret); \
+ return (ret); \
+}
+
+/*
+ * No-op versions of the same macros.
+ */
+#define REC_NOOP_INTRO(func) { \
+ if ((ret = func(dbenv, dbtp->data, &argp)) != 0) \
+ return (ret); \
+}
+#define REC_NOOP_CLOSE \
+ if (argp != NULL) \
+ __os_free(argp, sizeof(*argp)); \
+ return (ret); \
+
+/*
+ * Standard debugging macro for all recovery functions.
+ */
+#ifdef DEBUG_RECOVER
+#define REC_PRINT(func) \
+ (void)func(dbenv, dbtp, lsnp, op, info);
+#else
+#define REC_PRINT(func)
+#endif
+
+/*
+ * Flags to __db_lget
+ */
+#define LCK_COUPLE 0x01 /* Lock Couple */
+#define LCK_ALWAYS 0x02 /* Lock even for off page dup cursors */
+#define LCK_ROLLBACK 0x04 /* Lock even if in rollback */
+
+/*
+ * If doing transactions we have to hold the locks associated with a data item
+ * from a page for the entire transaction. However, we don't have to hold the
+ * locks associated with walking the tree. Distinguish between the two so that
+ * we don't tie up the internal pages of the tree longer than necessary.
+ */
+#define __LPUT(dbc, lock) \
+ (lock.off != LOCK_INVALID ? \
+ lock_put((dbc)->dbp->dbenv, &(lock)) : 0)
+#define __TLPUT(dbc, lock) \
+ (lock.off != LOCK_INVALID && \
+ (dbc)->txn == NULL ? lock_put((dbc)->dbp->dbenv, &(lock)) : 0)
+
+#ifdef DIAGNOSTIC
+#define DB_CHECK_TXN(dbp, txn) \
+ if (txn != NULL) \
+ F_SET(dbp, DB_AM_TXN); \
+ else if (F_ISSET(dbp, DB_AM_TXN)) \
+ return (__db_missing_txn_err((dbp)->dbenv));
+#else
+#define DB_CHECK_TXN(dbp, txn)
+#endif
+
+#include "db_dispatch.h"
+#include "db_auto.h"
+#include "crdel_auto.h"
+#include "db_ext.h"
+#endif
diff --git a/bdb/include/db_auto.h b/bdb/include/db_auto.h
new file mode 100644
index 00000000000..88bf7419bea
--- /dev/null
+++ b/bdb/include/db_auto.h
@@ -0,0 +1,140 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+
+#ifndef db_AUTO_H
+#define db_AUTO_H
+
+#define DB_db_addrem 41
+typedef struct _db_addrem_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ u_int32_t opcode;
+ int32_t fileid;
+ db_pgno_t pgno;
+ u_int32_t indx;
+ size_t nbytes;
+ DBT hdr;
+ DBT dbt;
+ DB_LSN pagelsn;
+} __db_addrem_args;
+
+int __db_addrem_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, int32_t, db_pgno_t, u_int32_t, size_t, const DBT *, const DBT *, DB_LSN *));
+int __db_addrem_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_addrem_read __P((DB_ENV *, void *, __db_addrem_args **));
+
+#define DB_db_split 42
+typedef struct _db_split_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ u_int32_t opcode;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DBT pageimage;
+ DB_LSN pagelsn;
+} __db_split_args;
+
+int __db_split_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_split_read __P((DB_ENV *, void *, __db_split_args **));
+
+#define DB_db_big 43
+typedef struct _db_big_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ u_int32_t opcode;
+ int32_t fileid;
+ db_pgno_t pgno;
+ db_pgno_t prev_pgno;
+ db_pgno_t next_pgno;
+ DBT dbt;
+ DB_LSN pagelsn;
+ DB_LSN prevlsn;
+ DB_LSN nextlsn;
+} __db_big_args;
+
+int __db_big_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, int32_t, db_pgno_t, db_pgno_t, db_pgno_t, const DBT *, DB_LSN *, DB_LSN *, DB_LSN *));
+int __db_big_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_big_read __P((DB_ENV *, void *, __db_big_args **));
+
+#define DB_db_ovref 44
+typedef struct _db_ovref_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ int32_t adjust;
+ DB_LSN lsn;
+} __db_ovref_args;
+
+int __db_ovref_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, int32_t, db_pgno_t, int32_t, DB_LSN *));
+int __db_ovref_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_ovref_read __P((DB_ENV *, void *, __db_ovref_args **));
+
+#define DB_db_relink 45
+typedef struct _db_relink_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ u_int32_t opcode;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DB_LSN lsn;
+ db_pgno_t prev;
+ DB_LSN lsn_prev;
+ db_pgno_t next;
+ DB_LSN lsn_next;
+} __db_relink_args;
+
+int __db_relink_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, int32_t, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *));
+int __db_relink_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_relink_read __P((DB_ENV *, void *, __db_relink_args **));
+
+#define DB_db_addpage 46
+typedef struct _db_addpage_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DB_LSN lsn;
+ db_pgno_t nextpgno;
+ DB_LSN nextlsn;
+} __db_addpage_args;
+
+int __db_addpage_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_addpage_read __P((DB_ENV *, void *, __db_addpage_args **));
+
+#define DB_db_debug 47
+typedef struct _db_debug_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ DBT op;
+ int32_t fileid;
+ DBT key;
+ DBT data;
+ u_int32_t arg_flags;
+} __db_debug_args;
+
+int __db_debug_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, const DBT *, int32_t, const DBT *, const DBT *, u_int32_t));
+int __db_debug_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_debug_read __P((DB_ENV *, void *, __db_debug_args **));
+
+#define DB_db_noop 48
+typedef struct _db_noop_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DB_LSN prevlsn;
+} __db_noop_args;
+
+int __db_noop_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, int32_t, db_pgno_t, DB_LSN *));
+int __db_noop_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_noop_read __P((DB_ENV *, void *, __db_noop_args **));
+int __db_init_print __P((DB_ENV *));
+int __db_init_recover __P((DB_ENV *));
+#endif
diff --git a/bdb/include/db_cxx.h b/bdb/include/db_cxx.h
new file mode 100644
index 00000000000..b5599ee699c
--- /dev/null
+++ b/bdb/include/db_cxx.h
@@ -0,0 +1,652 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: db_cxx.h,v 11.44 2000/12/21 20:30:18 dda Exp $
+ */
+
+#ifndef _DB_CXX_H_
+#define _DB_CXX_H_
+//
+// C++ assumptions:
+//
+// To ensure portability to many platforms, both new and old, we make
+// few assumptions about the C++ compiler and library. For example,
+// we do not expect STL, templates or namespaces to be available. The
+// "newest" C++ feature used is exceptions, which are used liberally
+// to transmit error information. Even the use of exceptions can be
+// disabled at runtime, to do so, use the DB_CXX_NO_EXCEPTIONS flags
+// with the DbEnv or Db constructor.
+//
+// C++ naming conventions:
+//
+// - All top level class names start with Db.
+// - All class members start with lower case letter.
+// - All private data members are suffixed with underscore.
+// - Use underscores to divide names into multiple words.
+// - Simple data accessors are named with get_ or set_ prefix.
+// - All method names are taken from names of functions in the C
+// layer of db (usually by dropping a prefix like "db_").
+// These methods have the same argument types and order,
+// other than dropping the explicit arg that acts as "this".
+//
+// As a rule, each DbFoo object has exactly one underlying DB_FOO struct
+// (defined in db.h) associated with it. In some cases, we inherit directly
+// from the DB_FOO structure to make this relationship explicit. Often,
+// the underlying C layer allocates and deallocates these structures, so
+// there is no easy way to add any data to the DbFoo class. When you see
+// a comment about whether data is permitted to be added, this is what
+// is going on. Of course, if we need to add data to such C++ classes
+// in the future, we will arrange to have an indirect pointer to the
+// DB_FOO struct (as some of the classes already have).
+//
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Forward declarations
+//
+
+#include <iostream.h>
+#include <stdarg.h>
+#include "db.h"
+
+class Db; // forward
+class Dbc; // forward
+class DbEnv; // forward
+class DbException; // forward
+class DbInfo; // forward
+class DbLock; // forward
+class DbLsn; // forward
+class DbMpoolFile; // forward
+class Dbt; // forward
+class DbTxn; // forward
+
+// These classes are not defined here and should be invisible
+// to the user, but some compilers require forward references.
+// There is one for each use of the DEFINE_DB_CLASS macro.
+
+class DbImp;
+class DbEnvImp;
+class DbMpoolFileImp;
+class DbTxnImp;
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Mechanisms for declaring classes
+//
+
+//
+// Every class defined in this file has an _exported next to the class name.
+// This is needed for WinTel machines so that the class methods can
+// be exported or imported in a DLL as appropriate. Users of the DLL
+// use the define DB_USE_DLL. When the DLL is built, DB_CREATE_DLL
+// must be defined.
+//
+#if defined(_MSC_VER)
+
+# if defined(DB_CREATE_DLL)
+# define _exported __declspec(dllexport) // creator of dll
+# elif defined(DB_USE_DLL)
+# define _exported __declspec(dllimport) // user of dll
+# else
+# define _exported // static lib creator or user
+# endif
+
+#else
+
+# define _exported
+
+#endif
+
+// DEFINE_DB_CLASS defines an imp_ data member and imp() accessor.
+// The underlying type is a pointer to an opaque *Imp class, that
+// gets converted to the correct implementation class by the implementation.
+//
+// Since these defines use "private/public" labels, and leave the access
+// being "private", we always use these by convention before any data
+// members in the private section of a class. Keeping them in the
+// private section also emphasizes that they are off limits to user code.
+//
+#define DEFINE_DB_CLASS(name) \
+ public: class name##Imp* imp() { return (imp_); } \
+ public: const class name##Imp* constimp() const { return (imp_); } \
+ private: class name##Imp* imp_
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Turn off inappropriate compiler warnings
+//
+
+#ifdef _MSC_VER
+
+// These are level 4 warnings that are explicitly disabled.
+// With Visual C++, by default you do not see above level 3 unless
+// you use /W4. But we like to compile with the highest level
+// warnings to catch other errors.
+//
+// 4201: nameless struct/union
+// triggered by standard include file <winnt.h>
+//
+// 4514: unreferenced inline function has been removed
+// certain include files in MSVC define methods that are not called
+//
+#pragma warning(disable: 4201 4514)
+
+#endif
+
+// Some interfaces can be customized by allowing users
+// to define callback functions. For performance and
+// logistical reasons, some callbacks require you do
+// declare the functions in C, or in an extern "C" block.
+//
+extern "C" {
+ typedef void * (*db_malloc_fcn_type)
+ (size_t);
+ typedef void * (*db_realloc_fcn_type)
+ (void *, size_t);
+ typedef int (*bt_compare_fcn_type)
+ (DB *, const DBT *, const DBT *);
+ typedef size_t (*bt_prefix_fcn_type)
+ (DB *, const DBT *, const DBT *);
+ typedef int (*dup_compare_fcn_type)
+ (DB *, const DBT *, const DBT *);
+ typedef u_int32_t (*h_hash_fcn_type)
+ (DB *, const void *, u_int32_t);
+ typedef int (*pgin_fcn_type)(DB_ENV *dbenv,
+ db_pgno_t pgno, void *pgaddr, DBT *pgcookie);
+ typedef int (*pgout_fcn_type)(DB_ENV *dbenv,
+ db_pgno_t pgno, void *pgaddr, DBT *pgcookie);
+};
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Exception classes
+//
+
+// Almost any error in the DB library throws a DbException.
+// Every exception should be considered an abnormality
+// (e.g. bug, misuse of DB, file system error).
+//
+// NOTE: We would like to inherit from class exception and
+// let it handle what(), but there are
+// MSVC++ problems when <exception> is included.
+//
+class _exported DbException
+{
+public:
+ virtual ~DbException();
+ DbException(int err);
+ DbException(const char *description);
+ DbException(const char *prefix, int err);
+ DbException(const char *prefix1, const char *prefix2, int err);
+ int get_errno() const;
+ virtual const char *what() const;
+
+ DbException(const DbException &);
+ DbException &operator = (const DbException &);
+
+private:
+ char *what_;
+ int err_; // errno
+};
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Lock classes
+//
+
+class _exported DbLock
+{
+ friend class DbEnv;
+
+public:
+ DbLock();
+
+ int put(DbEnv *env);
+
+ DbLock(const DbLock &);
+ DbLock &operator = (const DbLock &);
+
+protected:
+ // We can add data to this class if needed
+ // since its contained class is not allocated by db.
+ // (see comment at top)
+
+ DbLock(DB_LOCK);
+ DB_LOCK lock_;
+};
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Log classes
+//
+
+class _exported DbLsn : protected DB_LSN
+{
+ friend class DbEnv; // friendship needed to cast to base class
+};
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Memory pool classes
+//
+
+class _exported DbMpoolFile
+{
+ friend class DbEnv;
+
+public:
+ int close();
+ int get(db_pgno_t *pgnoaddr, u_int32_t flags, void *pagep);
+ int put(void *pgaddr, u_int32_t flags);
+ int set(void *pgaddr, u_int32_t flags);
+ int sync();
+
+ static int open(DbEnv *envp, const char *file,
+ u_int32_t flags, int mode, size_t pagesize,
+ DB_MPOOL_FINFO *finfop, DbMpoolFile **mpf);
+
+private:
+ // We can add data to this class if needed
+ // since it is implemented via a pointer.
+ // (see comment at top)
+
+ // Note: use DbMpoolFile::open()
+ // to get pointers to a DbMpoolFile,
+ // and call DbMpoolFile::close() rather than delete to release them.
+ //
+ DbMpoolFile();
+
+ // Shut g++ up.
+protected:
+ ~DbMpoolFile();
+
+private:
+ // no copying
+ DbMpoolFile(const DbMpoolFile &);
+ void operator = (const DbMpoolFile &);
+
+ DEFINE_DB_CLASS(DbMpoolFile);
+};
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Transaction classes
+//
+
+class _exported DbTxn
+{
+ friend class DbEnv;
+
+public:
+ int abort();
+ int commit(u_int32_t flags);
+ u_int32_t id();
+ int prepare();
+
+private:
+ // We can add data to this class if needed
+ // since it is implemented via a pointer.
+ // (see comment at top)
+
+ // Note: use DbEnv::txn_begin() to get pointers to a DbTxn,
+ // and call DbTxn::abort() or DbTxn::commit rather than
+ // delete to release them.
+ //
+ DbTxn();
+ ~DbTxn();
+
+ // no copying
+ DbTxn(const DbTxn &);
+ void operator = (const DbTxn &);
+
+ DEFINE_DB_CLASS(DbTxn);
+};
+
+//
+// Berkeley DB environment class. Provides functions for opening databases.
+// User of this library can use this class as a starting point for
+// developing a DB application - derive their application class from
+// this one, add application control logic.
+//
+// Note that if you use the default constructor, you must explicitly
+// call appinit() before any other db activity (e.g. opening files)
+//
+class _exported DbEnv
+{
+ friend class Db;
+ friend class DbLock;
+ friend class DbMpoolFile;
+
+public:
+
+ ~DbEnv();
+
+ // After using this constructor, you can set any needed
+ // parameters for the environment using the set_* methods.
+ // Then call open() to finish initializing the environment
+ // and attaching it to underlying files.
+ //
+ DbEnv(u_int32_t flags);
+
+ // These methods match those in the C interface.
+ //
+ int close(u_int32_t);
+ void err(int, const char *, ...);
+ void errx(const char *, ...);
+ int open(const char *, u_int32_t, int);
+ int remove(const char *, u_int32_t);
+ int set_cachesize(u_int32_t, u_int32_t, int);
+ int set_data_dir(const char *);
+ void set_errcall(void (*)(const char *, char *));
+ void set_errfile(FILE *);
+ void set_errpfx(const char *);
+ int set_flags(u_int32_t, int);
+ int set_feedback(void (*)(DbEnv *, int, int));
+ int set_recovery_init(int (*)(DbEnv *));
+ int set_lg_bsize(u_int32_t);
+ int set_lg_dir(const char *);
+ int set_lg_max(u_int32_t);
+ int set_lk_conflicts(u_int8_t *, int);
+ int set_lk_detect(u_int32_t);
+ int set_lk_max(u_int32_t);
+ int set_lk_max_lockers(u_int32_t);
+ int set_lk_max_locks(u_int32_t);
+ int set_lk_max_objects(u_int32_t);
+ int set_mp_mmapsize(size_t);
+ int set_mutexlocks(int);
+ static int set_pageyield(int);
+ int set_paniccall(void (*)(DbEnv *, int));
+ static int set_panicstate(int);
+ static int set_region_init(int);
+ int set_server(char *, long, long, u_int32_t);
+ int set_shm_key(long);
+ int set_tmp_dir(const char *);
+ static int set_tas_spins(u_int32_t);
+ int set_tx_max(u_int32_t);
+ int set_tx_recover(int (*)(DbEnv *, Dbt *, DbLsn *, db_recops));
+ int set_tx_timestamp(time_t *);
+ int set_verbose(u_int32_t which, int onoff);
+
+ // Version information. A static method so it can be obtained anytime.
+ //
+ static char *version(int *major, int *minor, int *patch);
+
+ // Convert DB errors to strings
+ static char *strerror(int);
+
+ // If an error is detected and the error call function
+ // or stream is set, a message is dispatched or printed.
+ // If a prefix is set, each message is prefixed.
+ //
+ // You can use set_errcall() or set_errfile() above to control
+ // error functionality. Alternatively, you can call
+ // set_error_stream() to force all errors to a C++ stream.
+ // It is unwise to mix these approaches.
+ //
+ void set_error_stream(ostream *);
+
+ // used internally
+ static void runtime_error(const char *caller, int err,
+ int error_policy);
+
+ // Lock functions
+ //
+ int lock_detect(u_int32_t flags, u_int32_t atype, int *aborted);
+ int lock_get(u_int32_t locker, u_int32_t flags, const Dbt *obj,
+ db_lockmode_t lock_mode, DbLock *lock);
+ int lock_id(u_int32_t *idp);
+ int lock_stat(DB_LOCK_STAT **statp, db_malloc_fcn_type db_malloc_fcn);
+ int lock_vec(u_int32_t locker, u_int32_t flags, DB_LOCKREQ list[],
+ int nlist, DB_LOCKREQ **elistp);
+
+ // Log functions
+ //
+ int log_archive(char **list[], u_int32_t flags, db_malloc_fcn_type db_malloc_fcn);
+ static int log_compare(const DbLsn *lsn0, const DbLsn *lsn1);
+ int log_file(DbLsn *lsn, char *namep, size_t len);
+ int log_flush(const DbLsn *lsn);
+ int log_get(DbLsn *lsn, Dbt *data, u_int32_t flags);
+ int log_put(DbLsn *lsn, const Dbt *data, u_int32_t flags);
+
+ int log_register(Db *dbp, const char *name);
+ int log_stat(DB_LOG_STAT **spp, db_malloc_fcn_type db_malloc_fcn);
+ int log_unregister(Db *dbp);
+
+ // Mpool functions
+ //
+ int memp_register(int ftype,
+ pgin_fcn_type pgin_fcn,
+ pgout_fcn_type pgout_fcn);
+
+ int memp_stat(DB_MPOOL_STAT **gsp, DB_MPOOL_FSTAT ***fsp,
+ db_malloc_fcn_type db_malloc_fcn);
+ int memp_sync(DbLsn *lsn);
+ int memp_trickle(int pct, int *nwrotep);
+
+ // Transaction functions
+ //
+ int txn_begin(DbTxn *pid, DbTxn **tid, u_int32_t flags);
+ int txn_checkpoint(u_int32_t kbyte, u_int32_t min, u_int32_t flags);
+ int txn_stat(DB_TXN_STAT **statp, db_malloc_fcn_type db_malloc_fcn);
+
+ // These are public only because they need to be called
+ // via C functions. They should never be called by users
+ // of this class.
+ //
+ static void _stream_error_function(const char *, char *);
+ static int _tx_recover_intercept(DB_ENV *env, DBT *dbt, DB_LSN *lsn,
+ db_recops op);
+ static void _paniccall_intercept(DB_ENV *env, int errval);
+ static int _recovery_init_intercept(DB_ENV *env);
+ static void _feedback_intercept(DB_ENV *env, int opcode, int pct);
+ static void _destroy_check(const char *str, int isDbEnv);
+
+private:
+ void cleanup();
+ int initialize(DB_ENV *env);
+ int error_policy();
+
+ // Used internally
+ DbEnv(DB_ENV *, u_int32_t flags);
+
+ // no copying
+ DbEnv(const DbEnv &);
+ void operator = (const DbEnv &);
+
+ DEFINE_DB_CLASS(DbEnv);
+
+ // instance data
+ int construct_error_;
+ u_int32_t construct_flags_;
+ Db *headdb_;
+ Db *taildb_;
+ int (*tx_recover_callback_)(DbEnv *, Dbt *, DbLsn *, db_recops);
+ int (*recovery_init_callback_)(DbEnv *);
+ void (*paniccall_callback_)(DbEnv *, int);
+ void (*feedback_callback_)(DbEnv *, int, int);
+
+ // class data
+ static ostream *error_stream_;
+};
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Table access classes
+//
+
+//
+// Represents a database table = a set of keys with associated values.
+//
+class _exported Db
+{
+ friend class DbEnv;
+
+public:
+ Db(DbEnv*, u_int32_t); // create a Db object, then call open()
+ ~Db(); // does *not* call close.
+
+ // These methods exactly match those in the C interface.
+ //
+ int close(u_int32_t flags);
+ int cursor(DbTxn *txnid, Dbc **cursorp, u_int32_t flags);
+ int del(DbTxn *txnid, Dbt *key, u_int32_t flags);
+ void err(int, const char *, ...);
+ void errx(const char *, ...);
+ int fd(int *fdp);
+ int get(DbTxn *txnid, Dbt *key, Dbt *data, u_int32_t flags);
+ int get_byteswapped() const;
+ DBTYPE get_type() const;
+ int join(Dbc **curslist, Dbc **dbcp, u_int32_t flags);
+ int key_range(DbTxn *, Dbt *, DB_KEY_RANGE *, u_int32_t);
+ int open(const char *, const char *subname, DBTYPE, u_int32_t, int);
+ int put(DbTxn *, Dbt *, Dbt *, u_int32_t);
+ int remove(const char *, const char *, u_int32_t);
+ int rename(const char *, const char *, const char *, u_int32_t);
+ int set_bt_compare(bt_compare_fcn_type);
+ int set_bt_maxkey(u_int32_t);
+ int set_bt_minkey(u_int32_t);
+ int set_bt_prefix(bt_prefix_fcn_type);
+ int set_cachesize(u_int32_t, u_int32_t, int);
+ int set_dup_compare(dup_compare_fcn_type);
+ void set_errcall(void (*)(const char *, char *));
+ void set_errfile(FILE *);
+ void set_errpfx(const char *);
+ int set_append_recno(int (*)(Db *, Dbt *, db_recno_t));
+ int set_feedback(void (*)(Db *, int, int));
+ int set_flags(u_int32_t);
+ int set_h_ffactor(u_int32_t);
+ int set_h_hash(h_hash_fcn_type);
+ int set_h_nelem(u_int32_t);
+ int set_lorder(int);
+ int set_malloc(db_malloc_fcn_type);
+ int set_pagesize(u_int32_t);
+ int set_paniccall(void (*)(DbEnv *, int));
+ int set_realloc(db_realloc_fcn_type);
+ int set_re_delim(int);
+ int set_re_len(u_int32_t);
+ int set_re_pad(int);
+ int set_re_source(char *);
+ int set_q_extentsize(u_int32_t);
+ int stat(void *sp, db_malloc_fcn_type db_malloc_fcn, u_int32_t flags);
+ int sync(u_int32_t flags);
+ int upgrade(const char *name, u_int32_t flags);
+ int verify(const char *, const char *, ostream *, u_int32_t);
+
+ // This additional method is available for C++
+ //
+ void set_error_stream(ostream *);
+
+ // These are public only because it needs to be called
+ // via C functions. It should never be called by users
+ // of this class.
+ //
+ static void _feedback_intercept(DB *db, int opcode, int pct);
+ static int _append_recno_intercept(DB *db, DBT *data, db_recno_t recno);
+private:
+
+ // no copying
+ Db(const Db &);
+ Db &operator = (const Db &);
+
+ DEFINE_DB_CLASS(Db);
+
+ void cleanup();
+ int initialize();
+ int error_policy();
+
+ // instance data
+ DbEnv *env_;
+ Db *next_;
+ Db *prev_;
+ int construct_error_;
+ u_int32_t flags_;
+ u_int32_t construct_flags_;
+ void (*feedback_callback_)(Db *, int, int);
+ int (*append_recno_callback_)(Db *, Dbt *, db_recno_t);
+};
+
+//
+// A chunk of data, maybe a key or value.
+//
+class _exported Dbt : private DBT
+{
+ friend class Dbc;
+ friend class Db;
+ friend class DbEnv;
+
+public:
+
+ // key/data
+ void *get_data() const;
+ void set_data(void *);
+
+ // key/data length
+ u_int32_t get_size() const;
+ void set_size(u_int32_t);
+
+ // RO: length of user buffer.
+ u_int32_t get_ulen() const;
+ void set_ulen(u_int32_t);
+
+ // RO: get/put record length.
+ u_int32_t get_dlen() const;
+ void set_dlen(u_int32_t);
+
+ // RO: get/put record offset.
+ u_int32_t get_doff() const;
+ void set_doff(u_int32_t);
+
+ // flags
+ u_int32_t get_flags() const;
+ void set_flags(u_int32_t);
+
+ Dbt(void *data, size_t size);
+ Dbt();
+ ~Dbt();
+ Dbt(const Dbt &);
+ Dbt &operator = (const Dbt &);
+
+private:
+ // We can add data to this class if needed
+ // since parent class is not allocated by db.
+ // (see comment at top)
+};
+
+class _exported Dbc : protected DBC
+{
+ friend class Db;
+
+public:
+ int close();
+ int count(db_recno_t *countp, u_int32_t flags);
+ int del(u_int32_t flags);
+ int dup(Dbc** cursorp, u_int32_t flags);
+ int get(Dbt* key, Dbt *data, u_int32_t flags);
+ int put(Dbt* key, Dbt *data, u_int32_t flags);
+
+private:
+ // No data is permitted in this class (see comment at top)
+
+ // Note: use Db::cursor() to get pointers to a Dbc,
+ // and call Dbc::close() rather than delete to release them.
+ //
+ Dbc();
+ ~Dbc();
+
+ // no copying
+ Dbc(const Dbc &);
+ Dbc &operator = (const Dbc &);
+};
+#endif /* !_DB_CXX_H_ */
diff --git a/bdb/include/db_dispatch.h b/bdb/include/db_dispatch.h
new file mode 100644
index 00000000000..003acee6f65
--- /dev/null
+++ b/bdb/include/db_dispatch.h
@@ -0,0 +1,95 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1995, 1996
+ * The President and Fellows of Harvard University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id: db_dispatch.h,v 11.17 2000/12/14 07:39:13 ubell Exp $
+ */
+
+#ifndef _DB_DISPATCH_H_
+#define _DB_DISPATCH_H_
+
+/*
+ * Declarations and typedefs for the list of transaction IDs used during
+ * recovery. This is a generic list used to pass along whatever information
+ * we need during recovery.
+ */
+struct __db_txnhead {
+ LIST_HEAD(__db_headlink, __db_txnlist) head;
+ u_int32_t maxid;
+ int32_t generation;
+};
+
+#define TXNLIST_INVALID_ID 0xffffffff
+struct __db_txnlist {
+ db_txnlist_type type;
+ LIST_ENTRY(__db_txnlist) links;
+ union {
+ struct {
+ u_int32_t txnid;
+ int32_t generation;
+ int32_t aborted;
+ } t;
+ struct {
+#define TXNLIST_FLAG_DELETED 0x1
+#define TXNLIST_FLAG_CLOSED 0x2
+ u_int32_t flags;
+ int32_t fileid;
+ u_int32_t count;
+ char *fname;
+ } d;
+ struct {
+ int32_t ntxns;
+ int32_t maxn;
+ DB_LSN *lsn_array;
+ } l;
+ struct {
+ int32_t nentries;
+ int32_t maxentry;
+ char *fname;
+ int32_t fileid;
+ db_pgno_t *pgno_array;
+ u_int8_t uid[DB_FILE_ID_LEN];
+ } p;
+ } u;
+};
+
+/*
+ * Flag value for __db_txnlist_lsnadd. Distinguish whether we are replacing
+ * an entry in the transaction list or adding a new one.
+ */
+
+#define TXNLIST_NEW 0x1
+
+#define DB_user_BEGIN 10000
+
+#endif
diff --git a/bdb/include/db_ext.h b/bdb/include/db_ext.h
new file mode 100644
index 00000000000..efe25424791
--- /dev/null
+++ b/bdb/include/db_ext.h
@@ -0,0 +1,208 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _db_ext_h_
+#define _db_ext_h_
+#if defined(__cplusplus)
+extern "C" {
+#endif
+int __crdel_fileopen_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __crdel_metasub_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __crdel_metapage_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __crdel_delete_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __crdel_rename_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_open __P((DB *,
+ const char *, const char *, DBTYPE, u_int32_t, int));
+int __db_dbopen __P((DB *, const char *, u_int32_t, int, db_pgno_t));
+int __db_master_open __P((DB *,
+ const char *, u_int32_t, int, DB **));
+int __db_dbenv_setup __P((DB *, const char *, u_int32_t));
+int __db_close __P((DB *, u_int32_t));
+int __db_remove __P((DB *, const char *, const char *, u_int32_t));
+int __db_rename __P((DB *,
+ const char *, const char *, const char *, u_int32_t));
+int __db_metabegin __P((DB *, DB_LOCK *));
+int __db_metaend __P((DB *,
+ DB_LOCK *, int, int (*)(DB *, void *), void *));
+int __db_log_page __P((DB *,
+ const char *, DB_LSN *, db_pgno_t, PAGE *));
+int __db_backup_name __P((DB_ENV *,
+ const char *, char **, DB_LSN *));
+DB *__dblist_get __P((DB_ENV *, u_int32_t));
+int __db_testcopy __P((DB *, const char *));
+int __db_cursor __P((DB *, DB_TXN *, DBC **, u_int32_t));
+int __db_icursor
+ __P((DB *, DB_TXN *, DBTYPE, db_pgno_t, int, DBC **));
+int __db_cprint __P((DB *));
+int __db_fd __P((DB *, int *));
+int __db_get __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+int __db_put __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+int __db_sync __P((DB *, u_int32_t));
+int __db_c_close __P((DBC *));
+int __db_c_destroy __P((DBC *));
+int __db_c_count __P((DBC *, db_recno_t *, u_int32_t));
+int __db_c_del __P((DBC *, u_int32_t));
+int __db_c_dup __P((DBC *, DBC **, u_int32_t));
+int __db_c_newopd __P((DBC *, db_pgno_t, DBC **));
+int __db_c_get __P((DBC *, DBT *, DBT *, u_int32_t));
+int __db_c_put __P((DBC *, DBT *, DBT *, u_int32_t));
+int __db_duperr __P((DB *, u_int32_t));
+int __db_pgin __P((DB_ENV *, db_pgno_t, void *, DBT *));
+int __db_pgout __P((DB_ENV *, db_pgno_t, void *, DBT *));
+void __db_metaswap __P((PAGE *));
+int __db_byteswap __P((DB_ENV *, db_pgno_t, PAGE *, size_t, int));
+int __db_dispatch __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_add_recovery __P((DB_ENV *,
+ int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), u_int32_t));
+int __deprecated_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_txnlist_init __P((DB_ENV *, void *));
+int __db_txnlist_add __P((DB_ENV *, void *, u_int32_t, int32_t));
+int __db_txnlist_remove __P((void *, u_int32_t));
+int __db_txnlist_close __P((void *, int32_t, u_int32_t));
+int __db_txnlist_delete __P((DB_ENV *,
+ void *, char *, u_int32_t, int));
+void __db_txnlist_end __P((DB_ENV *, void *));
+int __db_txnlist_find __P((void *, u_int32_t));
+void __db_txnlist_gen __P((void *, int));
+int __db_txnlist_lsnadd __P((DB_ENV *, void *, DB_LSN *, u_int32_t));
+int __db_txnlist_lsnhead __P((void *, DB_LSN **));
+int __db_txnlist_lsninit __P((DB_ENV *, DB_TXNHEAD *, DB_LSN *));
+int __db_add_limbo __P((DB_ENV *,
+ void *, int32_t, db_pgno_t, int32_t));
+int __db_do_the_limbo __P((DB_ENV *, DB_TXNHEAD *));
+int __db_txnlist_pgnoadd __P((DB_ENV *, DB_TXNHEAD *,
+ int32_t, u_int8_t [DB_FILE_ID_LEN], char *, db_pgno_t));
+void __db_txnlist_print __P((void *));
+ int __db_ditem __P((DBC *, PAGE *, u_int32_t, u_int32_t));
+int __db_pitem
+ __P((DBC *, PAGE *, u_int32_t, u_int32_t, DBT *, DBT *));
+int __db_relink __P((DBC *, u_int32_t, PAGE *, PAGE **, int));
+int __db_cursorchk __P((const DB *, u_int32_t, int));
+int __db_ccountchk __P((const DB *, u_int32_t, int));
+int __db_cdelchk __P((const DB *, u_int32_t, int, int));
+int __db_cgetchk __P((const DB *, DBT *, DBT *, u_int32_t, int));
+int __db_cputchk __P((const DB *,
+ const DBT *, DBT *, u_int32_t, int, int));
+int __db_closechk __P((const DB *, u_int32_t));
+int __db_delchk __P((const DB *, DBT *, u_int32_t, int));
+int __db_getchk __P((const DB *, const DBT *, DBT *, u_int32_t));
+int __db_joinchk __P((const DB *, DBC * const *, u_int32_t));
+int __db_joingetchk __P((const DB *, DBT *, u_int32_t));
+int __db_putchk
+ __P((const DB *, DBT *, const DBT *, u_int32_t, int, int));
+int __db_removechk __P((const DB *, u_int32_t));
+int __db_statchk __P((const DB *, u_int32_t));
+int __db_syncchk __P((const DB *, u_int32_t));
+int __db_join __P((DB *, DBC **, DBC **, u_int32_t));
+int __db_new __P((DBC *, u_int32_t, PAGE **));
+int __db_free __P((DBC *, PAGE *));
+int __db_lprint __P((DBC *));
+int __db_lget __P((DBC *,
+ int, db_pgno_t, db_lockmode_t, int, DB_LOCK *));
+int __dbh_am_chk __P((DB *, u_int32_t));
+#ifdef HAVE_RPC
+int __dbcl_init __P((DB *, DB_ENV *, u_int32_t));
+#endif
+int __db_goff __P((DB *, DBT *,
+ u_int32_t, db_pgno_t, void **, u_int32_t *));
+int __db_poff __P((DBC *, const DBT *, db_pgno_t *));
+int __db_ovref __P((DBC *, db_pgno_t, int32_t));
+int __db_doff __P((DBC *, db_pgno_t));
+int __db_moff __P((DB *, const DBT *, db_pgno_t, u_int32_t,
+ int (*)(DB *, const DBT *, const DBT *), int *));
+int __db_vrfy_overflow __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t,
+ u_int32_t));
+int __db_vrfy_ovfl_structure
+ __P((DB *, VRFY_DBINFO *, db_pgno_t, u_int32_t, u_int32_t));
+int __db_safe_goff __P((DB *, VRFY_DBINFO *, db_pgno_t,
+ DBT *, void **, u_int32_t));
+void __db_loadme __P((void));
+int __db_dump __P((DB *, char *, char *));
+int __db_prnpage __P((DB *, db_pgno_t));
+int __db_prpage __P((DB *, PAGE *, u_int32_t));
+void __db_pr __P((u_int8_t *, u_int32_t));
+int __db_prdbt __P((DBT *, int, const char *, void *,
+ int (*)(void *, const void *), int, VRFY_DBINFO *));
+void __db_prflags __P((u_int32_t, const FN *, FILE *));
+const char *__db_pagetype_to_string __P((u_int32_t));
+int __db_prheader __P((DB *, char *, int, int, void *,
+ int (*)(void *, const void *), VRFY_DBINFO *, db_pgno_t));
+int __db_prfooter __P((void *, int (*)(void *, const void *)));
+int __db_addrem_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_big_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_ovref_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_relink_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_debug_recover __P((DB_ENV *,
+ DBT *, DB_LSN *, db_recops, void *));
+int __db_noop_recover __P((DB_ENV *,
+ DBT *, DB_LSN *, db_recops, void *));
+int __db_traverse_dup __P((DB *,
+ db_pgno_t, int (*)(DB *, PAGE *, void *, int *), void *));
+int __db_traverse_big __P((DB *,
+ db_pgno_t, int (*)(DB *, PAGE *, void *, int *), void *));
+int __db_reclaim_callback __P((DB *, PAGE *, void *, int *));
+int __db_ret __P((DB *,
+ PAGE *, u_int32_t, DBT *, void **, u_int32_t *));
+int __db_retcopy __P((DB *, DBT *,
+ void *, u_int32_t, void **, u_int32_t *));
+int __db_upgrade __P((DB *, const char *, u_int32_t));
+int __db_lastpgno __P((DB *, char *, DB_FH *, db_pgno_t *));
+int __db_31_offdup __P((DB *, char *, DB_FH *, int, db_pgno_t *));
+int __db_verify
+ __P((DB *, const char *, const char *, FILE *, u_int32_t));
+int __db_verify_callback __P((void *, const void *));
+int __db_verify_internal __P((DB *, const char *,
+ const char *, void *, int (*)(void *, const void *), u_int32_t));
+int __db_vrfy_datapage
+ __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t, u_int32_t));
+int __db_vrfy_meta
+ __P((DB *, VRFY_DBINFO *, DBMETA *, db_pgno_t, u_int32_t));
+int __db_vrfy_struct_feedback __P((DB *, VRFY_DBINFO *));
+int __db_salvage __P((DB *, VRFY_DBINFO *, db_pgno_t, PAGE *,
+ void *, int (*)(void *, const void *), u_int32_t));
+int __db_vrfy_inpitem __P((DB *, PAGE *,
+ db_pgno_t, u_int32_t, int, u_int32_t, u_int32_t *, u_int32_t *));
+int __db_vrfy_duptype
+ __P((DB *, VRFY_DBINFO *, db_pgno_t, u_int32_t));
+int __db_salvage_duptree __P((DB *, VRFY_DBINFO *, db_pgno_t,
+ DBT *, void *, int (*)(void *, const void *), u_int32_t));
+int __db_salvage_subdbpg
+ __P((DB *, VRFY_DBINFO *, PAGE *, void *,
+ int (*)(void *, const void *), u_int32_t));
+int __db_vrfy_dbinfo_create
+ __P((DB_ENV *, u_int32_t, VRFY_DBINFO **));
+int __db_vrfy_dbinfo_destroy __P((VRFY_DBINFO *));
+int __db_vrfy_getpageinfo
+ __P((VRFY_DBINFO *, db_pgno_t, VRFY_PAGEINFO **));
+int __db_vrfy_putpageinfo __P((VRFY_DBINFO *, VRFY_PAGEINFO *));
+int __db_vrfy_pgset __P((DB_ENV *, u_int32_t, DB **));
+int __db_vrfy_pgset_get __P((DB *, db_pgno_t, int *));
+int __db_vrfy_pgset_inc __P((DB *, db_pgno_t));
+int __db_vrfy_pgset_dec __P((DB *, db_pgno_t));
+int __db_vrfy_pgset_next __P((DBC *, db_pgno_t *));
+int __db_vrfy_childcursor __P((VRFY_DBINFO *, DBC **));
+int __db_vrfy_childput
+ __P((VRFY_DBINFO *, db_pgno_t, VRFY_CHILDINFO *));
+int __db_vrfy_ccset __P((DBC *, db_pgno_t, VRFY_CHILDINFO **));
+int __db_vrfy_ccnext __P((DBC *, VRFY_CHILDINFO **));
+int __db_vrfy_ccclose __P((DBC *));
+int __db_vrfy_pageinfo_create __P((VRFY_PAGEINFO **));
+int __db_salvage_init __P((VRFY_DBINFO *));
+void __db_salvage_destroy __P((VRFY_DBINFO *));
+int __db_salvage_getnext
+ __P((VRFY_DBINFO *, db_pgno_t *, u_int32_t *));
+int __db_salvage_isdone __P((VRFY_DBINFO *, db_pgno_t));
+int __db_salvage_markdone __P((VRFY_DBINFO *, db_pgno_t));
+int __db_salvage_markneeded
+ __P((VRFY_DBINFO *, db_pgno_t, u_int32_t));
+#if defined(__cplusplus)
+}
+#endif
+#endif /* _db_ext_h_ */
diff --git a/bdb/include/db_int.src b/bdb/include/db_int.src
new file mode 100644
index 00000000000..347169ab5cd
--- /dev/null
+++ b/bdb/include/db_int.src
@@ -0,0 +1,397 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: db_int.src,v 11.42 2001/01/11 17:49:17 krinsky Exp $
+ */
+
+#ifndef _DB_INTERNAL_H_
+#define _DB_INTERNAL_H_
+
+/*******************************************************
+ * General includes.
+ *******************************************************/
+#include "db.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#if defined(__STDC__) || defined(__cplusplus)
+#include <stdarg.h>
+#else
+#include <varargs.h>
+#endif
+#include <errno.h>
+#endif
+
+#include "queue.h"
+#include "shqueue.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*******************************************************
+ * General purpose constants and macros.
+ *******************************************************/
+#define UINT16_T_MAX 0xffff /* Maximum 16 bit unsigned. */
+#define UINT32_T_MAX 0xffffffff /* Maximum 32 bit unsigned. */
+
+#define MEGABYTE 1048576
+#define GIGABYTE 1073741824
+
+#define MS_PER_SEC 1000 /* Milliseconds in a second. */
+#define USEC_PER_MS 1000 /* Microseconds in a millisecond. */
+
+#define DB_MIN_PGSIZE 0x000200 /* Minimum page size (512). */
+#define DB_MAX_PGSIZE 0x010000 /* Maximum page size (65536). */
+
+#define RECNO_OOB 0 /* Illegal record number. */
+
+/*
+ * If we are unable to determine the underlying filesystem block size, use
+ * 8K on the grounds that most OS's use less than 8K for a VM page size.
+ */
+#define DB_DEF_IOSIZE (8 * 1024)
+
+/*
+ * Aligning items to particular sizes or in pages or memory.
+ *
+ * db_align_t --
+ * Largest integral type, used to align structures in memory. We don't store
+ * floating point types in structures, so integral types should be sufficient
+ * (and we don't have to worry about systems that store floats in other than
+ * power-of-2 numbers of bytes). Additionally this fixes compiler that rewrite
+ * structure assignments and ANSI C memcpy calls to be in-line instructions
+ * that happen to require alignment. Note: this alignment isn't sufficient for
+ * mutexes, which depend on things like cache line alignment. Mutex alignment
+ * is handled separately, in mutex.h.
+ *
+ * db_alignp_t --
+ * Integral type that's the same size as a pointer. There are places where
+ * DB modifies pointers by discarding the bottom bits to guarantee alignment.
+ * We can't use db_align_t, it may be larger than the pointer, and compilers
+ * get upset about that. So far we haven't run on any machine where there
+ * isn't an integral type the same size as a pointer -- here's hoping.
+ */
+@db_align_t_decl@
+@db_alignp_t_decl@
+
+/* Align an integer to a specific boundary. */
+#undef ALIGN
+#define ALIGN(value, bound) \
+ (((value) + (bound) - 1) & ~(((u_int)bound) - 1))
+
+/* Align a pointer to a specific boundary. */
+#undef ALIGNP
+#define ALIGNP(value, bound) ALIGN((db_alignp_t)value, bound)
+
+/*
+ * There are several on-page structures that are declared to have a number of
+ * fields followed by a variable length array of items. The structure size
+ * without including the variable length array or the address of the first of
+ * those elements can be found using SSZ.
+ *
+ * This macro can also be used to find the offset of a structure element in a
+ * structure. This is used in various places to copy structure elements from
+ * unaligned memory references, e.g., pointers into a packed page.
+ *
+ * There are two versions because compilers object if you take the address of
+ * an array.
+ */
+#undef SSZ
+#define SSZ(name, field) ((int)&(((name *)0)->field))
+
+#undef SSZA
+#define SSZA(name, field) ((int)&(((name *)0)->field[0]))
+
+/*
+ * Print an address as a u_long (a u_long is the largest type we can print
+ * portably). Most 64-bit systems have made longs 64-bits, so this should
+ * work.
+ */
+#define P_TO_ULONG(p) ((u_long)(db_alignp_t)(p))
+
+/* Structure used to print flag values. */
+typedef struct __fn {
+ u_int32_t mask; /* Flag value. */
+ const char *name; /* Flag name. */
+} FN;
+
+/* Set, clear and test flags. */
+#define FLD_CLR(fld, f) (fld) &= ~(f)
+#define FLD_ISSET(fld, f) ((fld) & (f))
+#define FLD_SET(fld, f) (fld) |= (f)
+#define F_CLR(p, f) (p)->flags &= ~(f)
+#define F_ISSET(p, f) ((p)->flags & (f))
+#define F_SET(p, f) (p)->flags |= (f)
+#define LF_CLR(f) (flags &= ~(f))
+#define LF_ISSET(f) (flags & (f))
+#define LF_SET(f) (flags |= (f))
+
+/* Display separator string. */
+#undef DB_LINE
+#define DB_LINE "=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-="
+
+/* Unused, or not-used-yet variable. "Shut that bloody compiler up!" */
+#define COMPQUIET(n, v) (n) = (v)
+
+/*******************************************************
+ * Files.
+ *******************************************************/
+ /*
+ * We use 1024 as the maximum path length. It's too hard to figure out what
+ * the real path length is, as it was traditionally stored in <sys/param.h>,
+ * and that file isn't always available.
+ */
+#undef MAXPATHLEN
+#define MAXPATHLEN 1024
+
+#define PATH_DOT "." /* Current working directory. */
+#define PATH_SEPARATOR "/" /* Path separator character. */
+
+/*
+ * Flags understood by __os_open.
+ */
+#define DB_OSO_CREATE 0x001 /* POSIX: O_CREAT */
+#define DB_OSO_EXCL 0x002 /* POSIX: O_EXCL */
+#define DB_OSO_LOG 0x004 /* Opening a log file. */
+#define DB_OSO_RDONLY 0x008 /* POSIX: O_RDONLY */
+#define DB_OSO_REGION 0x010 /* Opening a region file. */
+#define DB_OSO_SEQ 0x020 /* Expected sequential access. */
+#define DB_OSO_TEMP 0x040 /* Remove after last close. */
+#define DB_OSO_TRUNC 0x080 /* POSIX: O_TRUNC */
+
+/*
+ * Seek options understood by __os_seek.
+ */
+typedef enum {
+ DB_OS_SEEK_CUR, /* POSIX: SEEK_CUR */
+ DB_OS_SEEK_END, /* POSIX: SEEK_END */
+ DB_OS_SEEK_SET /* POSIX: SEEK_SET */
+} DB_OS_SEEK;
+
+/*******************************************************
+ * Environment.
+ *******************************************************/
+/* Type passed to __db_appname(). */
+typedef enum {
+ DB_APP_NONE=0, /* No type (region). */
+ DB_APP_DATA, /* Data file. */
+ DB_APP_LOG, /* Log file. */
+ DB_APP_TMP /* Temporary file. */
+} APPNAME;
+
+/*
+ * CDB_LOCKING CDB product locking.
+ * LOCKING_ON Locking has been configured.
+ * LOGGING_ON Logging has been configured.
+ * MPOOL_ON Memory pool has been configured.
+ * TXN_ON Transactions have been configured.
+ */
+#define CDB_LOCKING(dbenv) F_ISSET(dbenv, DB_ENV_CDB)
+#define LOCKING_ON(dbenv) ((dbenv)->lk_handle != NULL)
+#define LOGGING_ON(dbenv) ((dbenv)->lg_handle != NULL)
+#define MPOOL_ON(dbenv) ((dbenv)->mp_handle != NULL)
+#define TXN_ON(dbenv) ((dbenv)->tx_handle != NULL)
+
+/*
+ * STD_LOCKING Standard locking, that is, locking was configured and CDB
+ * was not. We do not do locking in off-page duplicate trees,
+ * so we check for that in the cursor first.
+ */
+#define STD_LOCKING(dbc) \
+ (!F_ISSET(dbc, DBC_OPD) && \
+ !CDB_LOCKING((dbc)->dbp->dbenv) && LOCKING_ON((dbc)->dbp->dbenv))
+
+/*
+ * IS_RECOVERING The system is running recovery.
+ */
+#define IS_RECOVERING(dbenv) \
+ (LOGGING_ON(dbenv) && \
+ F_ISSET((DB_LOG *)(dbenv)->lg_handle, DBLOG_RECOVER))
+
+/* Most initialization methods cannot be called after open is called. */
+#define ENV_ILLEGAL_AFTER_OPEN(dbenv, name) \
+ if (F_ISSET((dbenv), DB_ENV_OPEN_CALLED)) \
+ return (__db_mi_open(dbenv, name, 1));
+
+/* We're not actually user hostile, honest. */
+#define ENV_REQUIRES_CONFIG(dbenv, handle, subsystem) \
+ if (handle == NULL) \
+ return (__db_env_config(dbenv, subsystem));
+
+/*******************************************************
+ * Database Access Methods.
+ *******************************************************/
+/*
+ * DB_IS_THREADED --
+ * The database handle is free-threaded (was opened with DB_THREAD).
+ */
+#define DB_IS_THREADED(dbp) \
+ ((dbp)->mutexp != NULL)
+
+/* Initialization methods are often illegal before/after open is called. */
+#define DB_ILLEGAL_AFTER_OPEN(dbp, name) \
+ if (F_ISSET((dbp), DB_OPEN_CALLED)) \
+ return (__db_mi_open(dbp->dbenv, name, 1));
+#define DB_ILLEGAL_BEFORE_OPEN(dbp, name) \
+ if (!F_ISSET((dbp), DB_OPEN_CALLED)) \
+ return (__db_mi_open(dbp->dbenv, name, 0));
+/* Some initialization methods are illegal if environment isn't local. */
+#define DB_ILLEGAL_IN_ENV(dbp, name) \
+ if (!F_ISSET(dbp->dbenv, DB_ENV_DBLOCAL)) \
+ return (__db_mi_env(dbp->dbenv, name));
+#define DB_ILLEGAL_METHOD(dbp, flags) { \
+ int __ret; \
+ if ((__ret = __dbh_am_chk(dbp, flags)) != 0) \
+ return (__ret); \
+}
+
+/*
+ * Common DBC->internal fields. Each access method adds additional fields
+ * to this list, but the initial fields are common.
+ */
+#define __DBC_INTERNAL \
+ DBC *opd; /* Off-page duplicate cursor. */\
+ \
+ void *page; /* Referenced page. */ \
+ db_pgno_t root; /* Tree root. */ \
+ db_pgno_t pgno; /* Referenced page number. */ \
+ db_indx_t indx; /* Referenced key item index. */\
+ \
+ DB_LOCK lock; /* Cursor lock. */ \
+ db_lockmode_t lock_mode; /* Lock mode. */
+
+struct __dbc_internal {
+ __DBC_INTERNAL
+};
+
+/*
+ * Access-method-common macro for determining whether a cursor
+ * has been initialized.
+ */
+#define IS_INITIALIZED(dbc) ((dbc)->internal->pgno != PGNO_INVALID)
+
+/*******************************************************
+ * Mpool.
+ *******************************************************/
+/*
+ * File types for DB access methods. Negative numbers are reserved to DB.
+ */
+#define DB_FTYPE_SET -1 /* Call pgin/pgout functions. */
+#define DB_FTYPE_NOTSET 0 /* Don't call... */
+
+/* Structure used as the DB pgin/pgout pgcookie. */
+typedef struct __dbpginfo {
+ size_t db_pagesize; /* Underlying page size. */
+ int needswap; /* If swapping required. */
+} DB_PGINFO;
+
+/*******************************************************
+ * Log.
+ *******************************************************/
+/* Initialize an LSN to 'zero'. */
+#define ZERO_LSN(LSN) do { \
+ (LSN).file = 0; \
+ (LSN).offset = 0; \
+} while (0)
+
+/* Return 1 if LSN is a 'zero' lsn, otherwise return 0. */
+#define IS_ZERO_LSN(LSN) ((LSN).file == 0)
+
+/* Test if we need to log a change. */
+#define DB_LOGGING(dbc) \
+ (LOGGING_ON((dbc)->dbp->dbenv) && !F_ISSET(dbc, DBC_RECOVER))
+
+/* Internal flag for use with internal __log_unregister. */
+#define DB_LOGONLY 0x01
+/*******************************************************
+ * Txn.
+ *******************************************************/
+#define DB_NONBLOCK(C) ((C)->txn != NULL && F_ISSET((C)->txn, TXN_NOWAIT))
+#define IS_SUBTRANSACTION(txn) \
+ ((txn) != NULL && (txn)->parent != NULL)
+
+/*******************************************************
+ * Global variables.
+ *******************************************************/
+#ifdef HAVE_VXWORKS
+#include "semLib.h"
+#endif
+
+/*
+ * DB global variables. Done in a single structure to minimize the name-space
+ * pollution.
+ */
+typedef struct __db_globals {
+ u_int32_t db_pageyield; /* db_set_pageyield */
+ u_int32_t db_panic; /* db_set_panic */
+ u_int32_t db_region_init; /* db_set_region_init */
+ u_int32_t db_tas_spins; /* db_set_tas_spins */
+#ifdef HAVE_VXWORKS
+ u_int32_t db_global_init; /* VxWorks: inited */
+ SEM_ID db_global_lock; /* VxWorks: global semaphore */
+#endif
+ /* XA: list of opened environments. */
+ TAILQ_HEAD(__db_envq, __db_env) db_envq;
+} DB_GLOBALS;
+
+#ifdef DB_INITIALIZE_DB_GLOBALS
+DB_GLOBALS __db_global_values = {
+ 0, /* db_set_pageyield */
+ 1, /* db_set_panic */
+ 0, /* db_set_region_init */
+ 0, /* db_set_tas_spins */
+#ifdef HAVE_VXWORKS
+ 0, /* db_global_init */
+ NULL, /* db_global_lock */
+#endif
+ /* XA environment queue */
+ {NULL, &__db_global_values.db_envq.tqh_first}
+};
+#else
+extern DB_GLOBALS __db_global_values;
+#endif
+#define DB_GLOBAL(v) __db_global_values.v
+
+/* Forward structure declarations. */
+struct __db_reginfo_t; typedef struct __db_reginfo_t REGINFO;
+struct __mutex_t; typedef struct __mutex_t MUTEX;
+struct __vrfy_childinfo; typedef struct __vrfy_childinfo VRFY_CHILDINFO;
+struct __vrfy_dbinfo; typedef struct __vrfy_dbinfo VRFY_DBINFO;
+struct __vrfy_pageinfo; typedef struct __vrfy_pageinfo VRFY_PAGEINFO;
+struct __db_txnlist; typedef struct __db_txnlist DB_TXNLIST;
+struct __db_txnhead; typedef struct __db_txnhead DB_TXNHEAD;
+typedef enum {
+ TXNLIST_DELETE,
+ TXNLIST_LSN,
+ TXNLIST_TXNID,
+ TXNLIST_PGNO
+} db_txnlist_type;
+
+/*
+ * Currently, region offsets are limited to 32-bits. I expect that's going
+ * to have to be fixed in the not-too-distant future, since we won't want to
+ * split 100Gb memory pools into that many different regions. It's typedef'd
+ * so it won't be too painful to upgrade.
+ */
+typedef u_int32_t roff_t;
+
+#if defined(__cplusplus)
+}
+#endif
+
+/*******************************************************
+ * More general includes.
+ *******************************************************/
+#include "debug.h"
+#include "mutex.h"
+#include "region.h"
+#include "mutex_ext.h"
+#include "env_ext.h"
+#include "os.h"
+#include "os_ext.h"
+#include "common_ext.h"
+
+#endif /* !_DB_INTERNAL_H_ */
diff --git a/bdb/include/db_join.h b/bdb/include/db_join.h
new file mode 100644
index 00000000000..d92887bb589
--- /dev/null
+++ b/bdb/include/db_join.h
@@ -0,0 +1,30 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * @(#)db_join.h 11.1 (Sleepycat) 7/25/99
+ */
+
+#ifndef _DB_JOIN_H_
+#define _DB_JOIN_H_
+
+/*
+ * Joins use a join cursor that is similar to a regular DB cursor except
+ * that it only supports c_get and c_close functionality. Also, it does
+ * not support the full range of flags for get.
+ */
+typedef struct __join_cursor {
+ u_int8_t *j_exhausted; /* Array of flags; is cursor i exhausted? */
+ DBC **j_curslist; /* Array of cursors in the join: constant. */
+ DBC **j_fdupcurs; /* Cursors w/ first intances of current dup. */
+ DBC **j_workcurs; /* Scratch cursor copies to muck with. */
+ DB *j_primary; /* Primary dbp. */
+ DBT j_key; /* Used to do lookups. */
+ u_int32_t j_ncurs; /* How many cursors do we have? */
+#define JOIN_RETRY 0x01 /* Error on primary get; re-return same key. */
+ u_int32_t flags;
+} JOIN_CURSOR;
+
+#endif
diff --git a/bdb/include/db_page.h b/bdb/include/db_page.h
new file mode 100644
index 00000000000..8066424143b
--- /dev/null
+++ b/bdb/include/db_page.h
@@ -0,0 +1,576 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: db_page.h,v 11.28 2000/12/06 19:55:45 ubell Exp $
+ */
+
+#ifndef _DB_PAGE_H_
+#define _DB_PAGE_H_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*
+ * DB page formats.
+ *
+ * !!!
+ * This implementation requires that values within the following structures
+ * NOT be padded -- note, ANSI C permits random padding within structures.
+ * If your compiler pads randomly you can just forget ever making DB run on
+ * your system. In addition, no data type can require larger alignment than
+ * its own size, e.g., a 4-byte data element may not require 8-byte alignment.
+ *
+ * Note that key/data lengths are often stored in db_indx_t's -- this is
+ * not accidental, nor does it limit the key/data size. If the key/data
+ * item fits on a page, it's guaranteed to be small enough to fit into a
+ * db_indx_t, and storing it in one saves space.
+ */
+
+#define PGNO_INVALID 0 /* Invalid page number in any database. */
+#define PGNO_BASE_MD 0 /* Base database: metadata page number. */
+
+/* Page types. */
+#define P_INVALID 0 /* Invalid page type. */
+#define __P_DUPLICATE 1 /* Duplicate. DEPRECATED in 3.1 */
+#define P_HASH 2 /* Hash. */
+#define P_IBTREE 3 /* Btree internal. */
+#define P_IRECNO 4 /* Recno internal. */
+#define P_LBTREE 5 /* Btree leaf. */
+#define P_LRECNO 6 /* Recno leaf. */
+#define P_OVERFLOW 7 /* Overflow. */
+#define P_HASHMETA 8 /* Hash metadata page. */
+#define P_BTREEMETA 9 /* Btree metadata page. */
+#define P_QAMMETA 10 /* Queue metadata page. */
+#define P_QAMDATA 11 /* Queue data page. */
+#define P_LDUP 12 /* Off-page duplicate leaf. */
+#define P_PAGETYPE_MAX 13
+
+/*
+ * When we create pages in mpool, we ask mpool to clear some number of bytes
+ * in the header. This number must be at least as big as the regular page
+ * headers and cover enough of the btree and hash meta-data pages to obliterate
+ * the page type.
+ */
+#define DB_PAGE_DB_LEN 32
+#define DB_PAGE_QUEUE_LEN 0
+
+/************************************************************************
+ GENERIC METADATA PAGE HEADER
+ *
+ * !!!
+ * The magic and version numbers have to be in the same place in all versions
+ * of the metadata page as the application may not have upgraded the database.
+ ************************************************************************/
+typedef struct _dbmeta31 {
+ DB_LSN lsn; /* 00-07: LSN. */
+ db_pgno_t pgno; /* 08-11: Current page number. */
+ u_int32_t magic; /* 12-15: Magic number. */
+ u_int32_t version; /* 16-19: Version. */
+ u_int32_t pagesize; /* 20-23: Pagesize. */
+ u_int8_t unused1[1]; /* 24: Unused. */
+ u_int8_t type; /* 25: Page type. */
+ u_int8_t unused2[2]; /* 26-27: Unused. */
+ u_int32_t free; /* 28-31: Free list page number. */
+ DB_LSN unused3; /* 32-39: former Lsn for allocation */
+ u_int32_t key_count; /* 40-43: Cached key count. */
+ u_int32_t record_count; /* 44-47: Cached record count. */
+ u_int32_t flags; /* 48-51: Flags: unique to each AM. */
+ /* 52-71: Unique file ID. */
+ u_int8_t uid[DB_FILE_ID_LEN];
+} DBMETA31, DBMETA;
+
+/************************************************************************
+ BTREE METADATA PAGE LAYOUT
+ ************************************************************************/
+typedef struct _btmeta31 {
+#define BTM_DUP 0x001 /* Duplicates. */
+#define BTM_RECNO 0x002 /* Recno tree. */
+#define BTM_RECNUM 0x004 /* Btree: maintain record count. */
+#define BTM_FIXEDLEN 0x008 /* Recno: fixed length records. */
+#define BTM_RENUMBER 0x010 /* Recno: renumber on insert/delete. */
+#define BTM_SUBDB 0x020 /* Subdatabases. */
+#define BTM_DUPSORT 0x040 /* Duplicates are sorted. */
+#define BTM_MASK 0x07f
+ DBMETA dbmeta; /* 00-71: Generic meta-data header. */
+
+ u_int32_t maxkey; /* 72-75: Btree: Maxkey. */
+ u_int32_t minkey; /* 76-79: Btree: Minkey. */
+ u_int32_t re_len; /* 80-83: Recno: fixed-length record length. */
+ u_int32_t re_pad; /* 84-87: Recno: fixed-length record pad. */
+ u_int32_t root; /* 88-92: Root page. */
+
+ /*
+ * Minimum page size is 128.
+ */
+} BTMETA31, BTMETA;
+
+/************************************************************************
+ HASH METADATA PAGE LAYOUT
+ ************************************************************************/
+typedef struct _hashmeta31 {
+#define DB_HASH_DUP 0x01 /* Duplicates. */
+#define DB_HASH_SUBDB 0x02 /* Subdatabases. */
+#define DB_HASH_DUPSORT 0x04 /* Duplicates are sorted. */
+ DBMETA dbmeta; /* 00-71: Generic meta-data page header. */
+
+ u_int32_t max_bucket; /* 72-75: ID of Maximum bucket in use */
+ u_int32_t high_mask; /* 76-79: Modulo mask into table */
+ u_int32_t low_mask; /* 80-83: Modulo mask into table lower half */
+ u_int32_t ffactor; /* 84-87: Fill factor */
+ u_int32_t nelem; /* 88-91: Number of keys in hash table */
+ u_int32_t h_charkey; /* 92-95: Value of hash(CHARKEY) */
+#define NCACHED 32 /* number of spare points */
+ /* 96-223: Spare pages for overflow */
+ u_int32_t spares[NCACHED];
+
+ /*
+ * Minimum page size is 256.
+ */
+} HMETA31, HMETA;
+
+/************************************************************************
+ QUEUE METADATA PAGE LAYOUT
+ ************************************************************************/
+/*
+ * QAM Meta data page structure
+ *
+ */
+typedef struct _qmeta32 {
+ DBMETA dbmeta; /* 00-71: Generic meta-data header. */
+
+ u_int32_t first_recno; /* 72-75: First not deleted record. */
+ u_int32_t cur_recno; /* 76-79: Last recno allocated. */
+ u_int32_t re_len; /* 80-83: Fixed-length record length. */
+ u_int32_t re_pad; /* 84-87: Fixed-length record pad. */
+ u_int32_t rec_page; /* 88-91: Records Per Page. */
+ u_int32_t page_ext; /* 92-95: Pages per extent */
+
+ /*
+ * Minimum page size is 128.
+ */
+} QMETA32, QMETA;
+
+/*
+ * DBMETASIZE is a constant used by __db_file_setup and DB->verify
+ * as a buffer which is guaranteed to be larger than any possible
+ * metadata page size and smaller than any disk sector.
+ */
+#define DBMETASIZE 256
+
+/************************************************************************
+ BTREE/HASH MAIN PAGE LAYOUT
+ ************************************************************************/
+/*
+ * +-----------------------------------+
+ * | lsn | pgno | prev pgno |
+ * +-----------------------------------+
+ * | next pgno | entries | hf offset |
+ * +-----------------------------------+
+ * | level | type | index |
+ * +-----------------------------------+
+ * | index | free --> |
+ * +-----------+-----------------------+
+ * | F R E E A R E A |
+ * +-----------------------------------+
+ * | <-- free | item |
+ * +-----------------------------------+
+ * | item | item | item |
+ * +-----------------------------------+
+ *
+ * sizeof(PAGE) == 26 bytes, and the following indices are guaranteed to be
+ * two-byte aligned.
+ *
+ * For hash and btree leaf pages, index items are paired, e.g., inp[0] is the
+ * key for inp[1]'s data. All other types of pages only contain single items.
+ */
+typedef struct _db_page {
+ DB_LSN lsn; /* 00-07: Log sequence number. */
+ db_pgno_t pgno; /* 08-11: Current page number. */
+ db_pgno_t prev_pgno; /* 12-15: Previous page number. */
+ db_pgno_t next_pgno; /* 16-19: Next page number. */
+ db_indx_t entries; /* 20-21: Number of items on the page. */
+ db_indx_t hf_offset; /* 22-23: High free byte page offset. */
+
+ /*
+ * The btree levels are numbered from the leaf to the root, starting
+ * with 1, so the leaf is level 1, its parent is level 2, and so on.
+ * We maintain this level on all btree pages, but the only place that
+ * we actually need it is on the root page. It would not be difficult
+ * to hide the byte on the root page once it becomes an internal page,
+ * so we could get this byte back if we needed it for something else.
+ */
+#define LEAFLEVEL 1
+#define MAXBTREELEVEL 255
+ u_int8_t level; /* 24: Btree tree level. */
+ u_int8_t type; /* 25: Page type. */
+ db_indx_t inp[1]; /* Variable length index of items. */
+} PAGE;
+
+/* PAGE element macros. */
+#define LSN(p) (((PAGE *)p)->lsn)
+#define PGNO(p) (((PAGE *)p)->pgno)
+#define PREV_PGNO(p) (((PAGE *)p)->prev_pgno)
+#define NEXT_PGNO(p) (((PAGE *)p)->next_pgno)
+#define NUM_ENT(p) (((PAGE *)p)->entries)
+#define HOFFSET(p) (((PAGE *)p)->hf_offset)
+#define LEVEL(p) (((PAGE *)p)->level)
+#define TYPE(p) (((PAGE *)p)->type)
+
+/************************************************************************
+ QUEUE MAIN PAGE LAYOUT
+ ************************************************************************/
+typedef struct _qpage {
+ DB_LSN lsn; /* 00-07: Log sequence number. */
+ db_pgno_t pgno; /* 08-11: Current page number. */
+ u_int32_t unused0[3]; /* 12-23: Unused. */
+ u_int8_t unused1[1]; /* 24: Unused. */
+ u_int8_t type; /* 25: Page type. */
+ u_int8_t unused2[2]; /* 26-27: Unused. */
+} QPAGE;
+
+/*
+ * !!!
+ * The next_pgno and prev_pgno fields are not maintained for btree and recno
+ * internal pages. Doing so only provides a minor performance improvement,
+ * it's hard to do when deleting internal pages, and it increases the chance
+ * of deadlock during deletes and splits because we have to re-link pages at
+ * more than the leaf level.
+ *
+ * !!!
+ * The btree/recno access method needs db_recno_t bytes of space on the root
+ * page to specify how many records are stored in the tree. (The alternative
+ * is to store the number of records in the meta-data page, which will create
+ * a second hot spot in trees being actively modified, or recalculate it from
+ * the BINTERNAL fields on each access.) Overload the PREV_PGNO field.
+ */
+#define RE_NREC(p) \
+ ((TYPE(p) == P_IBTREE || TYPE(p) == P_IRECNO) ? \
+ PREV_PGNO(p) : (TYPE(p) == P_LBTREE ? NUM_ENT(p) / 2 : NUM_ENT(p)))
+#define RE_NREC_ADJ(p, adj) \
+ PREV_PGNO(p) += adj;
+#define RE_NREC_SET(p, num) \
+ PREV_PGNO(p) = num;
+
+/*
+ * Initialize a page.
+ *
+ * !!!
+ * Don't modify the page's LSN, code depends on it being unchanged after a
+ * P_INIT call.
+ */
+#define P_INIT(pg, pg_size, n, pg_prev, pg_next, btl, pg_type) do { \
+ PGNO(pg) = n; \
+ PREV_PGNO(pg) = pg_prev; \
+ NEXT_PGNO(pg) = pg_next; \
+ NUM_ENT(pg) = 0; \
+ HOFFSET(pg) = pg_size; \
+ LEVEL(pg) = btl; \
+ TYPE(pg) = pg_type; \
+} while (0)
+
+/* Page header length (offset to first index). */
+#define P_OVERHEAD (SSZA(PAGE, inp))
+
+/* First free byte. */
+#define LOFFSET(pg) (P_OVERHEAD + NUM_ENT(pg) * sizeof(db_indx_t))
+
+/* Free space on a regular page. */
+#define P_FREESPACE(pg) (HOFFSET(pg) - LOFFSET(pg))
+
+/* Get a pointer to the bytes at a specific index. */
+#define P_ENTRY(pg, indx) ((u_int8_t *)pg + ((PAGE *)pg)->inp[indx])
+
+/************************************************************************
+ OVERFLOW PAGE LAYOUT
+ ************************************************************************/
+
+/*
+ * Overflow items are referenced by HOFFPAGE and BOVERFLOW structures, which
+ * store a page number (the first page of the overflow item) and a length
+ * (the total length of the overflow item). The overflow item consists of
+ * some number of overflow pages, linked by the next_pgno field of the page.
+ * A next_pgno field of PGNO_INVALID flags the end of the overflow item.
+ *
+ * Overflow page overloads:
+ * The amount of overflow data stored on each page is stored in the
+ * hf_offset field.
+ *
+ * The implementation reference counts overflow items as it's possible
+ * for them to be promoted onto btree internal pages. The reference
+ * count is stored in the entries field.
+ */
+#define OV_LEN(p) (((PAGE *)p)->hf_offset)
+#define OV_REF(p) (((PAGE *)p)->entries)
+
+/* Maximum number of bytes that you can put on an overflow page. */
+#define P_MAXSPACE(psize) ((psize) - P_OVERHEAD)
+
+/* Free space on an overflow page. */
+#define P_OVFLSPACE(psize, pg) (P_MAXSPACE(psize) - HOFFSET(pg))
+
+/************************************************************************
+ HASH PAGE LAYOUT
+ ************************************************************************/
+
+/* Each index references a group of bytes on the page. */
+#define H_KEYDATA 1 /* Key/data item. */
+#define H_DUPLICATE 2 /* Duplicate key/data item. */
+#define H_OFFPAGE 3 /* Overflow key/data item. */
+#define H_OFFDUP 4 /* Overflow page of duplicates. */
+
+/*
+ * !!!
+ * Items on hash pages are (potentially) unaligned, so we can never cast the
+ * (page + offset) pointer to an HKEYDATA, HOFFPAGE or HOFFDUP structure, as
+ * we do with B+tree on-page structures. Because we frequently want the type
+ * field, it requires no alignment, and it's in the same location in all three
+ * structures, there's a pair of macros.
+ */
+#define HPAGE_PTYPE(p) (*(u_int8_t *)p)
+#define HPAGE_TYPE(pg, indx) (*P_ENTRY(pg, indx))
+
+/*
+ * The first and second types are H_KEYDATA and H_DUPLICATE, represented
+ * by the HKEYDATA structure:
+ *
+ * +-----------------------------------+
+ * | type | key/data ... |
+ * +-----------------------------------+
+ *
+ * For duplicates, the data field encodes duplicate elements in the data
+ * field:
+ *
+ * +---------------------------------------------------------------+
+ * | type | len1 | element1 | len1 | len2 | element2 | len2 |
+ * +---------------------------------------------------------------+
+ *
+ * Thus, by keeping track of the offset in the element, we can do both
+ * backward and forward traversal.
+ */
+typedef struct _hkeydata {
+ u_int8_t type; /* 00: Page type. */
+ u_int8_t data[1]; /* Variable length key/data item. */
+} HKEYDATA;
+#define HKEYDATA_DATA(p) (((u_int8_t *)p) + SSZA(HKEYDATA, data))
+
+/*
+ * The length of any HKEYDATA item. Note that indx is an element index,
+ * not a PAIR index.
+ */
+#define LEN_HITEM(pg, pgsize, indx) \
+ (((indx) == 0 ? pgsize : \
+ ((PAGE *)(pg))->inp[indx - 1]) - ((PAGE *)(pg))->inp[indx])
+
+#define LEN_HKEYDATA(pg, psize, indx) \
+ (LEN_HITEM(pg, psize, indx) - HKEYDATA_SIZE(0))
+
+/*
+ * Page space required to add a new HKEYDATA item to the page, with and
+ * without the index value.
+ */
+#define HKEYDATA_SIZE(len) \
+ ((len) + SSZA(HKEYDATA, data))
+#define HKEYDATA_PSIZE(len) \
+ (HKEYDATA_SIZE(len) + sizeof(db_indx_t))
+
+/* Put a HKEYDATA item at the location referenced by a page entry. */
+#define PUT_HKEYDATA(pe, kd, len, type) { \
+ ((HKEYDATA *)pe)->type = type; \
+ memcpy((u_int8_t *)pe + sizeof(u_int8_t), kd, len); \
+}
+
+/*
+ * Macros the describe the page layout in terms of key-data pairs.
+ */
+#define H_NUMPAIRS(pg) (NUM_ENT(pg) / 2)
+#define H_KEYINDEX(indx) (indx)
+#define H_DATAINDEX(indx) ((indx) + 1)
+#define H_PAIRKEY(pg, indx) P_ENTRY(pg, H_KEYINDEX(indx))
+#define H_PAIRDATA(pg, indx) P_ENTRY(pg, H_DATAINDEX(indx))
+#define H_PAIRSIZE(pg, psize, indx) \
+ (LEN_HITEM(pg, psize, H_KEYINDEX(indx)) + \
+ LEN_HITEM(pg, psize, H_DATAINDEX(indx)))
+#define LEN_HDATA(p, psize, indx) LEN_HKEYDATA(p, psize, H_DATAINDEX(indx))
+#define LEN_HKEY(p, psize, indx) LEN_HKEYDATA(p, psize, H_KEYINDEX(indx))
+
+/*
+ * The third type is the H_OFFPAGE, represented by the HOFFPAGE structure:
+ */
+typedef struct _hoffpage {
+ u_int8_t type; /* 00: Page type and delete flag. */
+ u_int8_t unused[3]; /* 01-03: Padding, unused. */
+ db_pgno_t pgno; /* 04-07: Offpage page number. */
+ u_int32_t tlen; /* 08-11: Total length of item. */
+} HOFFPAGE;
+
+#define HOFFPAGE_PGNO(p) (((u_int8_t *)p) + SSZ(HOFFPAGE, pgno))
+#define HOFFPAGE_TLEN(p) (((u_int8_t *)p) + SSZ(HOFFPAGE, tlen))
+
+/*
+ * Page space required to add a new HOFFPAGE item to the page, with and
+ * without the index value.
+ */
+#define HOFFPAGE_SIZE (sizeof(HOFFPAGE))
+#define HOFFPAGE_PSIZE (HOFFPAGE_SIZE + sizeof(db_indx_t))
+
+/*
+ * The fourth type is H_OFFDUP represented by the HOFFDUP structure:
+ */
+typedef struct _hoffdup {
+ u_int8_t type; /* 00: Page type and delete flag. */
+ u_int8_t unused[3]; /* 01-03: Padding, unused. */
+ db_pgno_t pgno; /* 04-07: Offpage page number. */
+} HOFFDUP;
+#define HOFFDUP_PGNO(p) (((u_int8_t *)p) + SSZ(HOFFDUP, pgno))
+
+/*
+ * Page space required to add a new HOFFDUP item to the page, with and
+ * without the index value.
+ */
+#define HOFFDUP_SIZE (sizeof(HOFFDUP))
+
+/************************************************************************
+ BTREE PAGE LAYOUT
+ ************************************************************************/
+
+/* Each index references a group of bytes on the page. */
+#define B_KEYDATA 1 /* Key/data item. */
+#define B_DUPLICATE 2 /* Duplicate key/data item. */
+#define B_OVERFLOW 3 /* Overflow key/data item. */
+
+/*
+ * We have to store a deleted entry flag in the page. The reason is complex,
+ * but the simple version is that we can't delete on-page items referenced by
+ * a cursor -- the return order of subsequent insertions might be wrong. The
+ * delete flag is an overload of the top bit of the type byte.
+ */
+#define B_DELETE (0x80)
+#define B_DCLR(t) (t) &= ~B_DELETE
+#define B_DSET(t) (t) |= B_DELETE
+#define B_DISSET(t) ((t) & B_DELETE)
+
+#define B_TYPE(t) ((t) & ~B_DELETE)
+#define B_TSET(t, type, deleted) { \
+ (t) = (type); \
+ if (deleted) \
+ B_DSET(t); \
+}
+
+/*
+ * The first type is B_KEYDATA, represented by the BKEYDATA structure:
+ */
+typedef struct _bkeydata {
+ db_indx_t len; /* 00-01: Key/data item length. */
+ u_int8_t type; /* 02: Page type AND DELETE FLAG. */
+ u_int8_t data[1]; /* Variable length key/data item. */
+} BKEYDATA;
+
+/* Get a BKEYDATA item for a specific index. */
+#define GET_BKEYDATA(pg, indx) \
+ ((BKEYDATA *)P_ENTRY(pg, indx))
+
+/*
+ * Page space required to add a new BKEYDATA item to the page, with and
+ * without the index value.
+ */
+#define BKEYDATA_SIZE(len) \
+ ALIGN((len) + SSZA(BKEYDATA, data), sizeof(u_int32_t))
+#define BKEYDATA_PSIZE(len) \
+ (BKEYDATA_SIZE(len) + sizeof(db_indx_t))
+
+/*
+ * The second and third types are B_DUPLICATE and B_OVERFLOW, represented
+ * by the BOVERFLOW structure.
+ */
+typedef struct _boverflow {
+ db_indx_t unused1; /* 00-01: Padding, unused. */
+ u_int8_t type; /* 02: Page type AND DELETE FLAG. */
+ u_int8_t unused2; /* 03: Padding, unused. */
+ db_pgno_t pgno; /* 04-07: Next page number. */
+ u_int32_t tlen; /* 08-11: Total length of item. */
+} BOVERFLOW;
+
+/* Get a BOVERFLOW item for a specific index. */
+#define GET_BOVERFLOW(pg, indx) \
+ ((BOVERFLOW *)P_ENTRY(pg, indx))
+
+/*
+ * Page space required to add a new BOVERFLOW item to the page, with and
+ * without the index value.
+ */
+#define BOVERFLOW_SIZE \
+ ALIGN(sizeof(BOVERFLOW), sizeof(u_int32_t))
+#define BOVERFLOW_PSIZE \
+ (BOVERFLOW_SIZE + sizeof(db_indx_t))
+
+/*
+ * Btree leaf and hash page layouts group indices in sets of two, one for the
+ * key and one for the data. Everything else does it in sets of one to save
+ * space. Use the following macros so that it's real obvious what's going on.
+ */
+#define O_INDX 1
+#define P_INDX 2
+
+/************************************************************************
+ BTREE INTERNAL PAGE LAYOUT
+ ************************************************************************/
+
+/*
+ * Btree internal entry.
+ */
+typedef struct _binternal {
+ db_indx_t len; /* 00-01: Key/data item length. */
+ u_int8_t type; /* 02: Page type AND DELETE FLAG. */
+ u_int8_t unused; /* 03: Padding, unused. */
+ db_pgno_t pgno; /* 04-07: Page number of referenced page. */
+ db_recno_t nrecs; /* 08-11: Subtree record count. */
+ u_int8_t data[1]; /* Variable length key item. */
+} BINTERNAL;
+
+/* Get a BINTERNAL item for a specific index. */
+#define GET_BINTERNAL(pg, indx) \
+ ((BINTERNAL *)P_ENTRY(pg, indx))
+
+/*
+ * Page space required to add a new BINTERNAL item to the page, with and
+ * without the index value.
+ */
+#define BINTERNAL_SIZE(len) \
+ ALIGN((len) + SSZA(BINTERNAL, data), sizeof(u_int32_t))
+#define BINTERNAL_PSIZE(len) \
+ (BINTERNAL_SIZE(len) + sizeof(db_indx_t))
+
+/************************************************************************
+ RECNO INTERNAL PAGE LAYOUT
+ ************************************************************************/
+
+/*
+ * The recno internal entry.
+ */
+typedef struct _rinternal {
+ db_pgno_t pgno; /* 00-03: Page number of referenced page. */
+ db_recno_t nrecs; /* 04-07: Subtree record count. */
+} RINTERNAL;
+
+/* Get a RINTERNAL item for a specific index. */
+#define GET_RINTERNAL(pg, indx) \
+ ((RINTERNAL *)P_ENTRY(pg, indx))
+
+/*
+ * Page space required to add a new RINTERNAL item to the page, with and
+ * without the index value.
+ */
+#define RINTERNAL_SIZE \
+ ALIGN(sizeof(RINTERNAL), sizeof(u_int32_t))
+#define RINTERNAL_PSIZE \
+ (RINTERNAL_SIZE + sizeof(db_indx_t))
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _DB_PAGE_H_ */
diff --git a/bdb/include/db_server.h b/bdb/include/db_server.h
new file mode 100644
index 00000000000..e12fdb212d3
--- /dev/null
+++ b/bdb/include/db_server.h
@@ -0,0 +1,762 @@
+/*
+ * Please do not edit this file.
+ * It was generated using rpcgen.
+ */
+
+#ifndef _DB_SERVER_H_RPCGEN
+#define _DB_SERVER_H_RPCGEN
+
+#include <rpc/rpc.h>
+
+struct __env_cachesize_msg {
+ u_int dbenvcl_id;
+ u_int gbytes;
+ u_int bytes;
+ u_int ncache;
+};
+typedef struct __env_cachesize_msg __env_cachesize_msg;
+
+struct __env_cachesize_reply {
+ u_int status;
+};
+typedef struct __env_cachesize_reply __env_cachesize_reply;
+
+struct __env_close_msg {
+ u_int dbenvcl_id;
+ u_int flags;
+};
+typedef struct __env_close_msg __env_close_msg;
+
+struct __env_close_reply {
+ u_int status;
+};
+typedef struct __env_close_reply __env_close_reply;
+
+struct __env_create_msg {
+ u_int timeout;
+};
+typedef struct __env_create_msg __env_create_msg;
+
+struct __env_create_reply {
+ u_int status;
+ u_int envcl_id;
+};
+typedef struct __env_create_reply __env_create_reply;
+
+struct __env_flags_msg {
+ u_int dbenvcl_id;
+ u_int flags;
+ u_int onoff;
+};
+typedef struct __env_flags_msg __env_flags_msg;
+
+struct __env_flags_reply {
+ u_int status;
+};
+typedef struct __env_flags_reply __env_flags_reply;
+
+struct __env_open_msg {
+ u_int dbenvcl_id;
+ char *home;
+ u_int flags;
+ u_int mode;
+};
+typedef struct __env_open_msg __env_open_msg;
+
+struct __env_open_reply {
+ u_int status;
+};
+typedef struct __env_open_reply __env_open_reply;
+
+struct __env_remove_msg {
+ u_int dbenvcl_id;
+ char *home;
+ u_int flags;
+};
+typedef struct __env_remove_msg __env_remove_msg;
+
+struct __env_remove_reply {
+ u_int status;
+};
+typedef struct __env_remove_reply __env_remove_reply;
+
+struct __txn_abort_msg {
+ u_int txnpcl_id;
+};
+typedef struct __txn_abort_msg __txn_abort_msg;
+
+struct __txn_abort_reply {
+ u_int status;
+};
+typedef struct __txn_abort_reply __txn_abort_reply;
+
+struct __txn_begin_msg {
+ u_int envpcl_id;
+ u_int parentcl_id;
+ u_int flags;
+};
+typedef struct __txn_begin_msg __txn_begin_msg;
+
+struct __txn_begin_reply {
+ u_int status;
+ u_int txnidcl_id;
+};
+typedef struct __txn_begin_reply __txn_begin_reply;
+
+struct __txn_commit_msg {
+ u_int txnpcl_id;
+ u_int flags;
+};
+typedef struct __txn_commit_msg __txn_commit_msg;
+
+struct __txn_commit_reply {
+ u_int status;
+};
+typedef struct __txn_commit_reply __txn_commit_reply;
+
+struct __db_bt_maxkey_msg {
+ u_int dbpcl_id;
+ u_int maxkey;
+};
+typedef struct __db_bt_maxkey_msg __db_bt_maxkey_msg;
+
+struct __db_bt_maxkey_reply {
+ u_int status;
+};
+typedef struct __db_bt_maxkey_reply __db_bt_maxkey_reply;
+
+struct __db_bt_minkey_msg {
+ u_int dbpcl_id;
+ u_int minkey;
+};
+typedef struct __db_bt_minkey_msg __db_bt_minkey_msg;
+
+struct __db_bt_minkey_reply {
+ u_int status;
+};
+typedef struct __db_bt_minkey_reply __db_bt_minkey_reply;
+
+struct __db_close_msg {
+ u_int dbpcl_id;
+ u_int flags;
+};
+typedef struct __db_close_msg __db_close_msg;
+
+struct __db_close_reply {
+ u_int status;
+};
+typedef struct __db_close_reply __db_close_reply;
+
+struct __db_create_msg {
+ u_int flags;
+ u_int envpcl_id;
+};
+typedef struct __db_create_msg __db_create_msg;
+
+struct __db_create_reply {
+ u_int status;
+ u_int dbpcl_id;
+};
+typedef struct __db_create_reply __db_create_reply;
+
+struct __db_del_msg {
+ u_int dbpcl_id;
+ u_int txnpcl_id;
+ u_int keydlen;
+ u_int keydoff;
+ u_int keyflags;
+ struct {
+ u_int keydata_len;
+ char *keydata_val;
+ } keydata;
+ u_int flags;
+};
+typedef struct __db_del_msg __db_del_msg;
+
+struct __db_del_reply {
+ u_int status;
+};
+typedef struct __db_del_reply __db_del_reply;
+
+struct __db_extentsize_msg {
+ u_int dbpcl_id;
+ u_int extentsize;
+};
+typedef struct __db_extentsize_msg __db_extentsize_msg;
+
+struct __db_extentsize_reply {
+ u_int status;
+};
+typedef struct __db_extentsize_reply __db_extentsize_reply;
+
+struct __db_flags_msg {
+ u_int dbpcl_id;
+ u_int flags;
+};
+typedef struct __db_flags_msg __db_flags_msg;
+
+struct __db_flags_reply {
+ u_int status;
+};
+typedef struct __db_flags_reply __db_flags_reply;
+
+struct __db_get_msg {
+ u_int dbpcl_id;
+ u_int txnpcl_id;
+ u_int keydlen;
+ u_int keydoff;
+ u_int keyflags;
+ struct {
+ u_int keydata_len;
+ char *keydata_val;
+ } keydata;
+ u_int datadlen;
+ u_int datadoff;
+ u_int dataflags;
+ struct {
+ u_int datadata_len;
+ char *datadata_val;
+ } datadata;
+ u_int flags;
+};
+typedef struct __db_get_msg __db_get_msg;
+
+struct __db_get_reply {
+ u_int status;
+ struct {
+ u_int keydata_len;
+ char *keydata_val;
+ } keydata;
+ struct {
+ u_int datadata_len;
+ char *datadata_val;
+ } datadata;
+};
+typedef struct __db_get_reply __db_get_reply;
+
+struct __db_h_ffactor_msg {
+ u_int dbpcl_id;
+ u_int ffactor;
+};
+typedef struct __db_h_ffactor_msg __db_h_ffactor_msg;
+
+struct __db_h_ffactor_reply {
+ u_int status;
+};
+typedef struct __db_h_ffactor_reply __db_h_ffactor_reply;
+
+struct __db_h_nelem_msg {
+ u_int dbpcl_id;
+ u_int nelem;
+};
+typedef struct __db_h_nelem_msg __db_h_nelem_msg;
+
+struct __db_h_nelem_reply {
+ u_int status;
+};
+typedef struct __db_h_nelem_reply __db_h_nelem_reply;
+
+struct __db_key_range_msg {
+ u_int dbpcl_id;
+ u_int txnpcl_id;
+ u_int keydlen;
+ u_int keydoff;
+ u_int keyflags;
+ struct {
+ u_int keydata_len;
+ char *keydata_val;
+ } keydata;
+ u_int flags;
+};
+typedef struct __db_key_range_msg __db_key_range_msg;
+
+struct __db_key_range_reply {
+ u_int status;
+ double less;
+ double equal;
+ double greater;
+};
+typedef struct __db_key_range_reply __db_key_range_reply;
+
+struct __db_lorder_msg {
+ u_int dbpcl_id;
+ u_int lorder;
+};
+typedef struct __db_lorder_msg __db_lorder_msg;
+
+struct __db_lorder_reply {
+ u_int status;
+};
+typedef struct __db_lorder_reply __db_lorder_reply;
+
+struct __db_open_msg {
+ u_int dbpcl_id;
+ char *name;
+ char *subdb;
+ u_int type;
+ u_int flags;
+ u_int mode;
+};
+typedef struct __db_open_msg __db_open_msg;
+
+struct __db_open_reply {
+ u_int status;
+ u_int type;
+ u_int dbflags;
+};
+typedef struct __db_open_reply __db_open_reply;
+
+struct __db_pagesize_msg {
+ u_int dbpcl_id;
+ u_int pagesize;
+};
+typedef struct __db_pagesize_msg __db_pagesize_msg;
+
+struct __db_pagesize_reply {
+ u_int status;
+};
+typedef struct __db_pagesize_reply __db_pagesize_reply;
+
+struct __db_put_msg {
+ u_int dbpcl_id;
+ u_int txnpcl_id;
+ u_int keydlen;
+ u_int keydoff;
+ u_int keyflags;
+ struct {
+ u_int keydata_len;
+ char *keydata_val;
+ } keydata;
+ u_int datadlen;
+ u_int datadoff;
+ u_int dataflags;
+ struct {
+ u_int datadata_len;
+ char *datadata_val;
+ } datadata;
+ u_int flags;
+};
+typedef struct __db_put_msg __db_put_msg;
+
+struct __db_put_reply {
+ u_int status;
+ struct {
+ u_int keydata_len;
+ char *keydata_val;
+ } keydata;
+};
+typedef struct __db_put_reply __db_put_reply;
+
+struct __db_re_delim_msg {
+ u_int dbpcl_id;
+ u_int delim;
+};
+typedef struct __db_re_delim_msg __db_re_delim_msg;
+
+struct __db_re_delim_reply {
+ u_int status;
+};
+typedef struct __db_re_delim_reply __db_re_delim_reply;
+
+struct __db_re_len_msg {
+ u_int dbpcl_id;
+ u_int len;
+};
+typedef struct __db_re_len_msg __db_re_len_msg;
+
+struct __db_re_len_reply {
+ u_int status;
+};
+typedef struct __db_re_len_reply __db_re_len_reply;
+
+struct __db_re_pad_msg {
+ u_int dbpcl_id;
+ u_int pad;
+};
+typedef struct __db_re_pad_msg __db_re_pad_msg;
+
+struct __db_re_pad_reply {
+ u_int status;
+};
+typedef struct __db_re_pad_reply __db_re_pad_reply;
+
+struct __db_remove_msg {
+ u_int dbpcl_id;
+ char *name;
+ char *subdb;
+ u_int flags;
+};
+typedef struct __db_remove_msg __db_remove_msg;
+
+struct __db_remove_reply {
+ u_int status;
+};
+typedef struct __db_remove_reply __db_remove_reply;
+
+struct __db_rename_msg {
+ u_int dbpcl_id;
+ char *name;
+ char *subdb;
+ char *newname;
+ u_int flags;
+};
+typedef struct __db_rename_msg __db_rename_msg;
+
+struct __db_rename_reply {
+ u_int status;
+};
+typedef struct __db_rename_reply __db_rename_reply;
+
+struct __db_stat_msg {
+ u_int dbpcl_id;
+ u_int flags;
+};
+typedef struct __db_stat_msg __db_stat_msg;
+
+struct __db_stat_statsreplist {
+ struct {
+ u_int ent_len;
+ char *ent_val;
+ } ent;
+ struct __db_stat_statsreplist *next;
+};
+typedef struct __db_stat_statsreplist __db_stat_statsreplist;
+
+struct __db_stat_reply {
+ u_int status;
+ __db_stat_statsreplist *statslist;
+};
+typedef struct __db_stat_reply __db_stat_reply;
+
+struct __db_swapped_msg {
+ u_int dbpcl_id;
+};
+typedef struct __db_swapped_msg __db_swapped_msg;
+
+struct __db_swapped_reply {
+ u_int status;
+};
+typedef struct __db_swapped_reply __db_swapped_reply;
+
+struct __db_sync_msg {
+ u_int dbpcl_id;
+ u_int flags;
+};
+typedef struct __db_sync_msg __db_sync_msg;
+
+struct __db_sync_reply {
+ u_int status;
+};
+typedef struct __db_sync_reply __db_sync_reply;
+
+struct __db_cursor_msg {
+ u_int dbpcl_id;
+ u_int txnpcl_id;
+ u_int flags;
+};
+typedef struct __db_cursor_msg __db_cursor_msg;
+
+struct __db_cursor_reply {
+ u_int status;
+ u_int dbcidcl_id;
+};
+typedef struct __db_cursor_reply __db_cursor_reply;
+
+struct __db_join_curslist {
+ struct {
+ u_int ent_len;
+ char *ent_val;
+ } ent;
+ struct __db_join_curslist *next;
+};
+typedef struct __db_join_curslist __db_join_curslist;
+
+struct __db_join_msg {
+ u_int dbpcl_id;
+ __db_join_curslist *curslist;
+ u_int flags;
+};
+typedef struct __db_join_msg __db_join_msg;
+
+struct __db_join_reply {
+ u_int status;
+ u_int dbcidcl_id;
+};
+typedef struct __db_join_reply __db_join_reply;
+
+struct __dbc_close_msg {
+ u_int dbccl_id;
+};
+typedef struct __dbc_close_msg __dbc_close_msg;
+
+struct __dbc_close_reply {
+ u_int status;
+};
+typedef struct __dbc_close_reply __dbc_close_reply;
+
+struct __dbc_count_msg {
+ u_int dbccl_id;
+ u_int flags;
+};
+typedef struct __dbc_count_msg __dbc_count_msg;
+
+struct __dbc_count_reply {
+ u_int status;
+ u_int dupcount;
+};
+typedef struct __dbc_count_reply __dbc_count_reply;
+
+struct __dbc_del_msg {
+ u_int dbccl_id;
+ u_int flags;
+};
+typedef struct __dbc_del_msg __dbc_del_msg;
+
+struct __dbc_del_reply {
+ u_int status;
+};
+typedef struct __dbc_del_reply __dbc_del_reply;
+
+struct __dbc_dup_msg {
+ u_int dbccl_id;
+ u_int flags;
+};
+typedef struct __dbc_dup_msg __dbc_dup_msg;
+
+struct __dbc_dup_reply {
+ u_int status;
+ u_int dbcidcl_id;
+};
+typedef struct __dbc_dup_reply __dbc_dup_reply;
+
+struct __dbc_get_msg {
+ u_int dbccl_id;
+ u_int keydlen;
+ u_int keydoff;
+ u_int keyflags;
+ struct {
+ u_int keydata_len;
+ char *keydata_val;
+ } keydata;
+ u_int datadlen;
+ u_int datadoff;
+ u_int dataflags;
+ struct {
+ u_int datadata_len;
+ char *datadata_val;
+ } datadata;
+ u_int flags;
+};
+typedef struct __dbc_get_msg __dbc_get_msg;
+
+struct __dbc_get_reply {
+ u_int status;
+ struct {
+ u_int keydata_len;
+ char *keydata_val;
+ } keydata;
+ struct {
+ u_int datadata_len;
+ char *datadata_val;
+ } datadata;
+};
+typedef struct __dbc_get_reply __dbc_get_reply;
+
+struct __dbc_put_msg {
+ u_int dbccl_id;
+ u_int keydlen;
+ u_int keydoff;
+ u_int keyflags;
+ struct {
+ u_int keydata_len;
+ char *keydata_val;
+ } keydata;
+ u_int datadlen;
+ u_int datadoff;
+ u_int dataflags;
+ struct {
+ u_int datadata_len;
+ char *datadata_val;
+ } datadata;
+ u_int flags;
+};
+typedef struct __dbc_put_msg __dbc_put_msg;
+
+struct __dbc_put_reply {
+ u_int status;
+ struct {
+ u_int keydata_len;
+ char *keydata_val;
+ } keydata;
+};
+typedef struct __dbc_put_reply __dbc_put_reply;
+
+#define DB_SERVERPROG ((unsigned long)(351457))
+#define DB_SERVERVERS ((unsigned long)(1))
+#define __DB_env_cachesize ((unsigned long)(1))
+extern __env_cachesize_reply * __db_env_cachesize_1();
+#define __DB_env_close ((unsigned long)(2))
+extern __env_close_reply * __db_env_close_1();
+#define __DB_env_create ((unsigned long)(3))
+extern __env_create_reply * __db_env_create_1();
+#define __DB_env_flags ((unsigned long)(4))
+extern __env_flags_reply * __db_env_flags_1();
+#define __DB_env_open ((unsigned long)(5))
+extern __env_open_reply * __db_env_open_1();
+#define __DB_env_remove ((unsigned long)(6))
+extern __env_remove_reply * __db_env_remove_1();
+#define __DB_txn_abort ((unsigned long)(7))
+extern __txn_abort_reply * __db_txn_abort_1();
+#define __DB_txn_begin ((unsigned long)(8))
+extern __txn_begin_reply * __db_txn_begin_1();
+#define __DB_txn_commit ((unsigned long)(9))
+extern __txn_commit_reply * __db_txn_commit_1();
+#define __DB_db_bt_maxkey ((unsigned long)(10))
+extern __db_bt_maxkey_reply * __db_db_bt_maxkey_1();
+#define __DB_db_bt_minkey ((unsigned long)(11))
+extern __db_bt_minkey_reply * __db_db_bt_minkey_1();
+#define __DB_db_close ((unsigned long)(12))
+extern __db_close_reply * __db_db_close_1();
+#define __DB_db_create ((unsigned long)(13))
+extern __db_create_reply * __db_db_create_1();
+#define __DB_db_del ((unsigned long)(14))
+extern __db_del_reply * __db_db_del_1();
+#define __DB_db_extentsize ((unsigned long)(15))
+extern __db_extentsize_reply * __db_db_extentsize_1();
+#define __DB_db_flags ((unsigned long)(16))
+extern __db_flags_reply * __db_db_flags_1();
+#define __DB_db_get ((unsigned long)(17))
+extern __db_get_reply * __db_db_get_1();
+#define __DB_db_h_ffactor ((unsigned long)(18))
+extern __db_h_ffactor_reply * __db_db_h_ffactor_1();
+#define __DB_db_h_nelem ((unsigned long)(19))
+extern __db_h_nelem_reply * __db_db_h_nelem_1();
+#define __DB_db_key_range ((unsigned long)(20))
+extern __db_key_range_reply * __db_db_key_range_1();
+#define __DB_db_lorder ((unsigned long)(21))
+extern __db_lorder_reply * __db_db_lorder_1();
+#define __DB_db_open ((unsigned long)(22))
+extern __db_open_reply * __db_db_open_1();
+#define __DB_db_pagesize ((unsigned long)(23))
+extern __db_pagesize_reply * __db_db_pagesize_1();
+#define __DB_db_put ((unsigned long)(24))
+extern __db_put_reply * __db_db_put_1();
+#define __DB_db_re_delim ((unsigned long)(25))
+extern __db_re_delim_reply * __db_db_re_delim_1();
+#define __DB_db_re_len ((unsigned long)(26))
+extern __db_re_len_reply * __db_db_re_len_1();
+#define __DB_db_re_pad ((unsigned long)(27))
+extern __db_re_pad_reply * __db_db_re_pad_1();
+#define __DB_db_remove ((unsigned long)(28))
+extern __db_remove_reply * __db_db_remove_1();
+#define __DB_db_rename ((unsigned long)(29))
+extern __db_rename_reply * __db_db_rename_1();
+#define __DB_db_stat ((unsigned long)(30))
+extern __db_stat_reply * __db_db_stat_1();
+#define __DB_db_swapped ((unsigned long)(31))
+extern __db_swapped_reply * __db_db_swapped_1();
+#define __DB_db_sync ((unsigned long)(32))
+extern __db_sync_reply * __db_db_sync_1();
+#define __DB_db_cursor ((unsigned long)(33))
+extern __db_cursor_reply * __db_db_cursor_1();
+#define __DB_db_join ((unsigned long)(34))
+extern __db_join_reply * __db_db_join_1();
+#define __DB_dbc_close ((unsigned long)(35))
+extern __dbc_close_reply * __db_dbc_close_1();
+#define __DB_dbc_count ((unsigned long)(36))
+extern __dbc_count_reply * __db_dbc_count_1();
+#define __DB_dbc_del ((unsigned long)(37))
+extern __dbc_del_reply * __db_dbc_del_1();
+#define __DB_dbc_dup ((unsigned long)(38))
+extern __dbc_dup_reply * __db_dbc_dup_1();
+#define __DB_dbc_get ((unsigned long)(39))
+extern __dbc_get_reply * __db_dbc_get_1();
+#define __DB_dbc_put ((unsigned long)(40))
+extern __dbc_put_reply * __db_dbc_put_1();
+extern int db_serverprog_1_freeresult();
+
+/* the xdr functions */
+extern bool_t xdr___env_cachesize_msg();
+extern bool_t xdr___env_cachesize_reply();
+extern bool_t xdr___env_close_msg();
+extern bool_t xdr___env_close_reply();
+extern bool_t xdr___env_create_msg();
+extern bool_t xdr___env_create_reply();
+extern bool_t xdr___env_flags_msg();
+extern bool_t xdr___env_flags_reply();
+extern bool_t xdr___env_open_msg();
+extern bool_t xdr___env_open_reply();
+extern bool_t xdr___env_remove_msg();
+extern bool_t xdr___env_remove_reply();
+extern bool_t xdr___txn_abort_msg();
+extern bool_t xdr___txn_abort_reply();
+extern bool_t xdr___txn_begin_msg();
+extern bool_t xdr___txn_begin_reply();
+extern bool_t xdr___txn_commit_msg();
+extern bool_t xdr___txn_commit_reply();
+extern bool_t xdr___db_bt_maxkey_msg();
+extern bool_t xdr___db_bt_maxkey_reply();
+extern bool_t xdr___db_bt_minkey_msg();
+extern bool_t xdr___db_bt_minkey_reply();
+extern bool_t xdr___db_close_msg();
+extern bool_t xdr___db_close_reply();
+extern bool_t xdr___db_create_msg();
+extern bool_t xdr___db_create_reply();
+extern bool_t xdr___db_del_msg();
+extern bool_t xdr___db_del_reply();
+extern bool_t xdr___db_extentsize_msg();
+extern bool_t xdr___db_extentsize_reply();
+extern bool_t xdr___db_flags_msg();
+extern bool_t xdr___db_flags_reply();
+extern bool_t xdr___db_get_msg();
+extern bool_t xdr___db_get_reply();
+extern bool_t xdr___db_h_ffactor_msg();
+extern bool_t xdr___db_h_ffactor_reply();
+extern bool_t xdr___db_h_nelem_msg();
+extern bool_t xdr___db_h_nelem_reply();
+extern bool_t xdr___db_key_range_msg();
+extern bool_t xdr___db_key_range_reply();
+extern bool_t xdr___db_lorder_msg();
+extern bool_t xdr___db_lorder_reply();
+extern bool_t xdr___db_open_msg();
+extern bool_t xdr___db_open_reply();
+extern bool_t xdr___db_pagesize_msg();
+extern bool_t xdr___db_pagesize_reply();
+extern bool_t xdr___db_put_msg();
+extern bool_t xdr___db_put_reply();
+extern bool_t xdr___db_re_delim_msg();
+extern bool_t xdr___db_re_delim_reply();
+extern bool_t xdr___db_re_len_msg();
+extern bool_t xdr___db_re_len_reply();
+extern bool_t xdr___db_re_pad_msg();
+extern bool_t xdr___db_re_pad_reply();
+extern bool_t xdr___db_remove_msg();
+extern bool_t xdr___db_remove_reply();
+extern bool_t xdr___db_rename_msg();
+extern bool_t xdr___db_rename_reply();
+extern bool_t xdr___db_stat_msg();
+extern bool_t xdr___db_stat_statsreplist();
+extern bool_t xdr___db_stat_reply();
+extern bool_t xdr___db_swapped_msg();
+extern bool_t xdr___db_swapped_reply();
+extern bool_t xdr___db_sync_msg();
+extern bool_t xdr___db_sync_reply();
+extern bool_t xdr___db_cursor_msg();
+extern bool_t xdr___db_cursor_reply();
+extern bool_t xdr___db_join_curslist();
+extern bool_t xdr___db_join_msg();
+extern bool_t xdr___db_join_reply();
+extern bool_t xdr___dbc_close_msg();
+extern bool_t xdr___dbc_close_reply();
+extern bool_t xdr___dbc_count_msg();
+extern bool_t xdr___dbc_count_reply();
+extern bool_t xdr___dbc_del_msg();
+extern bool_t xdr___dbc_del_reply();
+extern bool_t xdr___dbc_dup_msg();
+extern bool_t xdr___dbc_dup_reply();
+extern bool_t xdr___dbc_get_msg();
+extern bool_t xdr___dbc_get_reply();
+extern bool_t xdr___dbc_put_msg();
+extern bool_t xdr___dbc_put_reply();
+
+#endif /* !_DB_SERVER_H_RPCGEN */
diff --git a/bdb/include/db_server_int.h b/bdb/include/db_server_int.h
new file mode 100644
index 00000000000..69e88ea5aec
--- /dev/null
+++ b/bdb/include/db_server_int.h
@@ -0,0 +1,85 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: db_server_int.h,v 1.13 2001/01/11 18:19:52 bostic Exp $
+ */
+
+#ifndef _DB_SERVER_INT_H_
+#define _DB_SERVER_INT_H_
+
+#define DB_SERVER_TIMEOUT 300 /* 5 minutes */
+#define DB_SERVER_MAXTIMEOUT 1200 /* 20 minutes */
+#define DB_SERVER_IDLETIMEOUT 86400 /* 1 day */
+
+#define CT_CURSOR 0x001 /* Cursor */
+#define CT_DB 0x002 /* Database */
+#define CT_ENV 0x004 /* Env */
+#define CT_TXN 0x008 /* Txn */
+
+#define CT_JOIN 0x10000000 /* Join cursor component */
+#define CT_JOINCUR 0x20000000 /* Join cursor */
+
+typedef struct home_entry home_entry;
+struct home_entry {
+ LIST_ENTRY(home_entry) entries;
+ char *home;
+ char *dir;
+ char *name;
+};
+
+/*
+ * We maintain an activity timestamp for each handle. However, we
+ * set it to point, possibly to the ct_active field of its own handle
+ * or it may point to the ct_active field of a parent. In the case
+ * of nested transactions and any cursors within transactions it must
+ * point to the ct_active field of the ultimate parent of the transaction
+ * no matter how deeply it is nested.
+ */
+typedef struct ct_entry ct_entry;
+struct ct_entry {
+ LIST_ENTRY(ct_entry) entries; /* List of entries */
+ union {
+ DB_ENV *envp; /* H_ENV */
+ DB_TXN *txnp; /* H_TXN */
+ DB *dbp; /* H_DB */
+ DBC *dbc; /* H_CURSOR */
+ void *anyp;
+ } handle_u;
+ long ct_id; /* Client ID */
+ long *ct_activep; /* Activity timestamp pointer*/
+ long *ct_origp; /* Original timestamp pointer*/
+ long ct_active; /* Activity timestamp */
+ long ct_timeout; /* Resource timeout */
+ long ct_idle; /* Idle timeout */
+ u_int32_t ct_type; /* This entry's type */
+ struct ct_entry *ct_parent; /* Its parent */
+ struct ct_entry *ct_envparent; /* Its environment */
+};
+
+#define ct_envp handle_u.envp
+#define ct_txnp handle_u.txnp
+#define ct_dbp handle_u.dbp
+#define ct_dbc handle_u.dbc
+#define ct_anyp handle_u.anyp
+
+extern int __dbsrv_verbose;
+
+/*
+ * Get ctp and activate it.
+ * Assumes local variable 'replyp'.
+ * NOTE: May 'return' from macro.
+ */
+#define ACTIVATE_CTP(ctp, id, type) { \
+ (ctp) = get_tableent(id); \
+ if ((ctp) == NULL) { \
+ replyp->status = DB_NOSERVER_ID;\
+ return; \
+ } \
+ DB_ASSERT((ctp)->ct_type & (type)); \
+ __dbsrv_active(ctp); \
+}
+
+#endif /* _DB_SERVER_INT_H_ */
diff --git a/bdb/include/db_shash.h b/bdb/include/db_shash.h
new file mode 100644
index 00000000000..0b9aac98f53
--- /dev/null
+++ b/bdb/include/db_shash.h
@@ -0,0 +1,77 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: db_shash.h,v 11.7 2000/12/12 17:43:56 bostic Exp $
+ */
+
+/* Hash Headers */
+typedef SH_TAILQ_HEAD(__hash_head) DB_HASHTAB;
+
+/*
+ * HASHLOOKUP --
+ *
+ * Look up something in a shared memory hash table. The "elt" argument
+ * should be a key, and cmp_func must know how to compare a key to whatever
+ * structure it is that appears in the hash table. The comparison function
+ *
+ * begin: address of the beginning of the hash table.
+ * ndx: index into table for this item.
+ * type: the structure type of the elements that are linked in each bucket.
+ * field: the name of the field by which the "type" structures are linked.
+ * elt: the item for which we are searching in the hash table.
+ * res: the variable into which we'll store the element if we find it.
+ * cmp: called as: cmp(lookup_elt, table_elt).
+ *
+ * If the element is not in the hash table, this macro exits with res set
+ * to NULL.
+ */
+#define HASHLOOKUP(begin, ndx, type, field, elt, res, cmp) do { \
+ DB_HASHTAB *__bucket; \
+ \
+ __bucket = &begin[ndx]; \
+ for (res = SH_TAILQ_FIRST(__bucket, type); \
+ res != NULL; res = SH_TAILQ_NEXT(res, field, type)) \
+ if (cmp(elt, res)) \
+ break; \
+} while (0)
+
+/*
+ * HASHINSERT --
+ *
+ * Insert a new entry into the hash table. This assumes that you already
+ * have the bucket locked and that lookup has failed; don't call it if you
+ * haven't already called HASHLOOKUP. If you do, you could get duplicate
+ * entries.
+ *
+ * begin: the beginning address of the hash table.
+ * ndx: the index for this element.
+ * type: the structure type of the elements that are linked in each bucket.
+ * field: the name of the field by which the "type" structures are linked.
+ * elt: the item to be inserted.
+ */
+#define HASHINSERT(begin, ndx, type, field, elt) do { \
+ DB_HASHTAB *__bucket; \
+ \
+ __bucket = &begin[ndx]; \
+ SH_TAILQ_INSERT_HEAD(__bucket, elt, field, type); \
+} while (0)
+
+/*
+ * HASHREMOVE_EL --
+ * Given the object "obj" in the table, remove it.
+ *
+ * begin: address of the beginning of the hash table.
+ * ndx: index into hash table of where this element belongs.
+ * type: the structure type of the elements that are linked in each bucket.
+ * field: the name of the field by which the "type" structures are linked.
+ * obj: the object in the table that we with to delete.
+ */
+#define HASHREMOVE_EL(begin, ndx, type, field, obj) { \
+ DB_HASHTAB *__bucket; \
+ \
+ __bucket = &begin[ndx]; \
+ SH_TAILQ_REMOVE(__bucket, obj, field, type); \
+}
diff --git a/bdb/include/db_swap.h b/bdb/include/db_swap.h
new file mode 100644
index 00000000000..bc96afb7a10
--- /dev/null
+++ b/bdb/include/db_swap.h
@@ -0,0 +1,115 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id: db_swap.h,v 11.5 2000/03/28 16:14:36 bostic Exp $
+ */
+
+#ifndef _DB_SWAP_H_
+#define _DB_SWAP_H_
+
+/*
+ * Little endian <==> big endian 32-bit swap macros.
+ * M_32_SWAP swap a memory location
+ * P_32_COPY copy potentially unaligned 4 byte quantities
+ * P_32_SWAP swap a referenced memory location
+ */
+#define M_32_SWAP(a) { \
+ u_int32_t _tmp; \
+ _tmp = a; \
+ ((u_int8_t *)&a)[0] = ((u_int8_t *)&_tmp)[3]; \
+ ((u_int8_t *)&a)[1] = ((u_int8_t *)&_tmp)[2]; \
+ ((u_int8_t *)&a)[2] = ((u_int8_t *)&_tmp)[1]; \
+ ((u_int8_t *)&a)[3] = ((u_int8_t *)&_tmp)[0]; \
+}
+#define P_32_COPY(a, b) { \
+ ((u_int8_t *)b)[0] = ((u_int8_t *)a)[0]; \
+ ((u_int8_t *)b)[1] = ((u_int8_t *)a)[1]; \
+ ((u_int8_t *)b)[2] = ((u_int8_t *)a)[2]; \
+ ((u_int8_t *)b)[3] = ((u_int8_t *)a)[3]; \
+}
+#define P_32_SWAP(a) { \
+ u_int32_t _tmp; \
+ P_32_COPY(a, &_tmp); \
+ ((u_int8_t *)a)[0] = ((u_int8_t *)&_tmp)[3]; \
+ ((u_int8_t *)a)[1] = ((u_int8_t *)&_tmp)[2]; \
+ ((u_int8_t *)a)[2] = ((u_int8_t *)&_tmp)[1]; \
+ ((u_int8_t *)a)[3] = ((u_int8_t *)&_tmp)[0]; \
+}
+
+/*
+ * Little endian <==> big endian 16-bit swap macros.
+ * M_16_SWAP swap a memory location
+ * P_16_COPY copy potentially unaligned 2 byte quantities
+ * P_16_SWAP swap a referenced memory location
+ */
+#define M_16_SWAP(a) { \
+ u_int16_t _tmp; \
+ _tmp = (u_int16_t)a; \
+ ((u_int8_t *)&a)[0] = ((u_int8_t *)&_tmp)[1]; \
+ ((u_int8_t *)&a)[1] = ((u_int8_t *)&_tmp)[0]; \
+}
+#define P_16_COPY(a, b) { \
+ ((u_int8_t *)b)[0] = ((u_int8_t *)a)[0]; \
+ ((u_int8_t *)b)[1] = ((u_int8_t *)a)[1]; \
+}
+#define P_16_SWAP(a) { \
+ u_int16_t _tmp; \
+ P_16_COPY(a, &_tmp); \
+ ((u_int8_t *)a)[0] = ((u_int8_t *)&_tmp)[1]; \
+ ((u_int8_t *)a)[1] = ((u_int8_t *)&_tmp)[0]; \
+}
+
+#define SWAP32(p) { \
+ P_32_SWAP(p); \
+ (p) += sizeof(u_int32_t); \
+}
+#define SWAP16(p) { \
+ P_16_SWAP(p); \
+ (p) += sizeof(u_int16_t); \
+}
+
+/*
+ * DB has local versions of htonl() and ntohl() that only operate on pointers
+ * to the right size memory locations, the portability magic for finding the
+ * real ones isn't worth the effort.
+ */
+#if defined(WORDS_BIGENDIAN)
+#define DB_HTONL(p)
+#define DB_NTOHL(p)
+#else
+#define DB_HTONL(p) P_32_SWAP(p)
+#define DB_NTOHL(p) P_32_SWAP(p)
+#endif
+
+#endif /* !_DB_SWAP_H_ */
diff --git a/bdb/include/db_upgrade.h b/bdb/include/db_upgrade.h
new file mode 100644
index 00000000000..d8d99645231
--- /dev/null
+++ b/bdb/include/db_upgrade.h
@@ -0,0 +1,174 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: db_upgrade.h,v 1.5 2000/11/16 23:40:56 ubell Exp $
+ */
+
+#ifndef _DB_UPGRADE_H_
+#define _DB_UPGRADE_H_
+
+/*
+ * This file defines the metadata pages from the previous release.
+ * These structures are only used to upgrade old versions of databases.
+ */
+
+/* Structures from the 3.1 release */
+/*
+ * QAM Meta data page structure
+ *
+ */
+typedef struct _qmeta31 {
+ DBMETA dbmeta; /* 00-71: Generic meta-data header. */
+
+ u_int32_t start; /* 72-75: Start offset. */
+ u_int32_t first_recno; /* 76-79: First not deleted record. */
+ u_int32_t cur_recno; /* 80-83: Last recno allocated. */
+ u_int32_t re_len; /* 84-87: Fixed-length record length. */
+ u_int32_t re_pad; /* 88-91: Fixed-length record pad. */
+ u_int32_t rec_page; /* 92-95: Records Per Page. */
+
+ /*
+ * Minimum page size is 128.
+ */
+} QMETA31;
+
+/* Structures from the 3.0 release */
+
+typedef struct _dbmeta30 {
+ DB_LSN lsn; /* 00-07: LSN. */
+ db_pgno_t pgno; /* 08-11: Current page number. */
+ u_int32_t magic; /* 12-15: Magic number. */
+ u_int32_t version; /* 16-19: Version. */
+ u_int32_t pagesize; /* 20-23: Pagesize. */
+ u_int8_t unused1[1]; /* 24: Unused. */
+ u_int8_t type; /* 25: Page type. */
+ u_int8_t unused2[2]; /* 26-27: Unused. */
+ u_int32_t free; /* 28-31: Free list page number. */
+ u_int32_t flags; /* 32-35: Flags: unique to each AM. */
+ /* 36-55: Unique file ID. */
+ u_int8_t uid[DB_FILE_ID_LEN];
+} DBMETA30;
+
+/************************************************************************
+ BTREE METADATA PAGE LAYOUT
+ ************************************************************************/
+typedef struct _btmeta30 {
+ DBMETA30 dbmeta; /* 00-55: Generic meta-data header. */
+
+ u_int32_t maxkey; /* 56-59: Btree: Maxkey. */
+ u_int32_t minkey; /* 60-63: Btree: Minkey. */
+ u_int32_t re_len; /* 64-67: Recno: fixed-length record length. */
+ u_int32_t re_pad; /* 68-71: Recno: fixed-length record pad. */
+ u_int32_t root; /* 72-75: Root page. */
+
+ /*
+ * Minimum page size is 128.
+ */
+} BTMETA30;
+
+/************************************************************************
+ HASH METADATA PAGE LAYOUT
+ ************************************************************************/
+typedef struct _hashmeta30 {
+ DBMETA30 dbmeta; /* 00-55: Generic meta-data page header. */
+
+ u_int32_t max_bucket; /* 56-59: ID of Maximum bucket in use */
+ u_int32_t high_mask; /* 60-63: Modulo mask into table */
+ u_int32_t low_mask; /* 64-67: Modulo mask into table lower half */
+ u_int32_t ffactor; /* 68-71: Fill factor */
+ u_int32_t nelem; /* 72-75: Number of keys in hash table */
+ u_int32_t h_charkey; /* 76-79: Value of hash(CHARKEY) */
+#define NCACHED30 32 /* number of spare points */
+ /* 80-207: Spare pages for overflow */
+ u_int32_t spares[NCACHED30];
+
+ /*
+ * Minimum page size is 256.
+ */
+} HMETA30;
+
+/************************************************************************
+ QUEUE METADATA PAGE LAYOUT
+ ************************************************************************/
+/*
+ * QAM Meta data page structure
+ *
+ */
+typedef struct _qmeta30 {
+ DBMETA30 dbmeta; /* 00-55: Generic meta-data header. */
+
+ u_int32_t start; /* 56-59: Start offset. */
+ u_int32_t first_recno; /* 60-63: First not deleted record. */
+ u_int32_t cur_recno; /* 64-67: Last recno allocated. */
+ u_int32_t re_len; /* 68-71: Fixed-length record length. */
+ u_int32_t re_pad; /* 72-75: Fixed-length record pad. */
+ u_int32_t rec_page; /* 76-79: Records Per Page. */
+
+ /*
+ * Minimum page size is 128.
+ */
+} QMETA30;
+
+/* Structures from Release 2.x */
+
+/************************************************************************
+ BTREE METADATA PAGE LAYOUT
+ ************************************************************************/
+
+/*
+ * Btree metadata page layout:
+ */
+typedef struct _btmeta2X {
+ DB_LSN lsn; /* 00-07: LSN. */
+ db_pgno_t pgno; /* 08-11: Current page number. */
+ u_int32_t magic; /* 12-15: Magic number. */
+ u_int32_t version; /* 16-19: Version. */
+ u_int32_t pagesize; /* 20-23: Pagesize. */
+ u_int32_t maxkey; /* 24-27: Btree: Maxkey. */
+ u_int32_t minkey; /* 28-31: Btree: Minkey. */
+ u_int32_t free; /* 32-35: Free list page number. */
+ u_int32_t flags; /* 36-39: Flags. */
+ u_int32_t re_len; /* 40-43: Recno: fixed-length record length. */
+ u_int32_t re_pad; /* 44-47: Recno: fixed-length record pad. */
+ /* 48-67: Unique file ID. */
+ u_int8_t uid[DB_FILE_ID_LEN];
+} BTMETA2X;
+
+/************************************************************************
+ HASH METADATA PAGE LAYOUT
+ ************************************************************************/
+
+/*
+ * Hash metadata page layout:
+ */
+/* Hash Table Information */
+typedef struct hashhdr { /* Disk resident portion */
+ DB_LSN lsn; /* 00-07: LSN of the header page */
+ db_pgno_t pgno; /* 08-11: Page number (btree compatibility). */
+ u_int32_t magic; /* 12-15: Magic NO for hash tables */
+ u_int32_t version; /* 16-19: Version ID */
+ u_int32_t pagesize; /* 20-23: Bucket/Page Size */
+ u_int32_t ovfl_point; /* 24-27: Overflow page allocation location */
+ u_int32_t last_freed; /* 28-31: Last freed overflow page pgno */
+ u_int32_t max_bucket; /* 32-35: ID of Maximum bucket in use */
+ u_int32_t high_mask; /* 36-39: Modulo mask into table */
+ u_int32_t low_mask; /* 40-43: Modulo mask into table lower half */
+ u_int32_t ffactor; /* 44-47: Fill factor */
+ u_int32_t nelem; /* 48-51: Number of keys in hash table */
+ u_int32_t h_charkey; /* 52-55: Value of hash(CHARKEY) */
+ u_int32_t flags; /* 56-59: Allow duplicates. */
+#define NCACHED2X 32 /* number of spare points */
+ /* 60-187: Spare pages for overflow */
+ u_int32_t spares[NCACHED2X];
+ /* 188-207: Unique file ID. */
+ u_int8_t uid[DB_FILE_ID_LEN];
+
+ /*
+ * Minimum page size is 256.
+ */
+} HASHHDR;
+
+#endif
diff --git a/bdb/include/db_verify.h b/bdb/include/db_verify.h
new file mode 100644
index 00000000000..2507f1f1082
--- /dev/null
+++ b/bdb/include/db_verify.h
@@ -0,0 +1,191 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: db_verify.h,v 1.18 2000/12/31 17:51:52 bostic Exp $
+ */
+
+#ifndef _DB_VERIFY_H_
+#define _DB_VERIFY_H_
+
+/*
+ * Structures and macros for the storage and retrieval of all information
+ * needed for inter-page verification of a database.
+ */
+
+/*
+ * EPRINT is the macro for error printing. Takes as an arg the arg set
+ * for DB->err.
+ */
+#define EPRINT(x) \
+ do { \
+ if (!LF_ISSET(DB_SALVAGE)) \
+ __db_err x; \
+ } while (0)
+
+/* For fatal type errors--i.e., verifier bugs. */
+#define TYPE_ERR_PRINT(dbenv, func, pgno, ptype) \
+ EPRINT(((dbenv), "%s called on nonsensical page %lu of type %lu", \
+ (func), (u_long)(pgno), (u_long)(ptype)));
+
+/* Is x a power of two? (Tests true for zero, which doesn't matter here.) */
+#define POWER_OF_TWO(x) (((x) & ((x) - 1)) == 0)
+
+#define IS_VALID_PAGESIZE(x) \
+ (POWER_OF_TWO(x) && (x) >= DB_MIN_PGSIZE && ((x) <= DB_MAX_PGSIZE))
+
+/*
+ * Note that 0 is, in general, a valid pgno, despite equalling PGNO_INVALID;
+ * we have to test it separately where it's not appropriate.
+ */
+#define IS_VALID_PGNO(x) ((x) <= vdp->last_pgno)
+
+/*
+ * Flags understood by the btree structure checks (esp. __bam_vrfy_subtree).
+ * These share the same space as the global flags to __db_verify, and must not
+ * dip below 0x00010000.
+ */
+#define ST_DUPOK 0x00010000 /* Duplicates are acceptable. */
+#define ST_DUPSET 0x00020000 /* Subtree is in a duplicate tree. */
+#define ST_DUPSORT 0x00040000 /* Duplicates are sorted. */
+#define ST_IS_RECNO 0x00080000 /* Subtree is a recno. */
+#define ST_OVFL_LEAF 0x00100000 /* Overflow reffed from leaf page. */
+#define ST_RECNUM 0x00200000 /* Subtree has record numbering on. */
+#define ST_RELEN 0x00400000 /* Subtree has fixed-length records. */
+#define ST_TOPLEVEL 0x00800000 /* Subtree == entire tree */
+
+/*
+ * Flags understood by __bam_salvage and __db_salvage. These need not share
+ * the same space with the __bam_vrfy_subtree flags, but must share with
+ * __db_verify.
+ */
+#define SA_SKIPFIRSTKEY 0x00080000
+
+/*
+ * VRFY_DBINFO is the fundamental structure; it either represents the database
+ * of subdatabases, or the sole database if there are no subdatabases.
+ */
+struct __vrfy_dbinfo {
+ /* Info about this database in particular. */
+ DBTYPE type;
+
+ /* List of subdatabase meta pages, if any. */
+ LIST_HEAD(__subdbs, __vrfy_childinfo) subdbs;
+
+ /* File-global info--stores VRFY_PAGEINFOs for each page. */
+ DB *pgdbp;
+
+ /* Child database--stores VRFY_CHILDINFOs of each page. */
+ DB *cdbp;
+
+ /* Page info structures currently in use. */
+ LIST_HEAD(__activepips, __vrfy_pageinfo) activepips;
+
+ /*
+ * DB we use to keep track of which pages are linked somehow
+ * during verification. 0 is the default, "unseen"; 1 is seen.
+ */
+ DB *pgset;
+
+ /*
+ * This is a database we use during salvaging to keep track of which
+ * overflow and dup pages we need to come back to at the end and print
+ * with key "UNKNOWN". Pages which print with a good key get set
+ * to SALVAGE_IGNORE; others get set, as appropriate, to SALVAGE_LDUP,
+ * SALVAGE_LRECNODUP, SALVAGE_OVERFLOW for normal db overflow pages,
+ * and SALVAGE_BTREE, SALVAGE_LRECNO, and SALVAGE_HASH for subdb
+ * pages.
+ */
+#define SALVAGE_INVALID 0
+#define SALVAGE_IGNORE 1
+#define SALVAGE_LDUP 2
+#define SALVAGE_LRECNODUP 3
+#define SALVAGE_OVERFLOW 4
+#define SALVAGE_LBTREE 5
+#define SALVAGE_HASH 6
+#define SALVAGE_LRECNO 7
+ DB *salvage_pages;
+
+ db_pgno_t last_pgno;
+ db_pgno_t pgs_remaining; /* For dbp->db_feedback(). */
+
+ /* Queue needs these to verify data pages in the first pass. */
+ u_int32_t re_len;
+ u_int32_t rec_page;
+
+#define SALVAGE_PRINTHEADER 0x01
+#define SALVAGE_PRINTFOOTER 0x02
+ u_int32_t flags;
+}; /* VRFY_DBINFO */
+
+/*
+ * The amount of state information we need per-page is small enough that
+ * it's not worth the trouble to define separate structures for each
+ * possible type of page, and since we're doing verification with these we
+ * have to be open to the possibility that page N will be of a completely
+ * unexpected type anyway. So we define one structure here with all the
+ * info we need for inter-page verification.
+ */
+struct __vrfy_pageinfo {
+ u_int8_t type;
+ u_int8_t bt_level;
+ u_int8_t unused1;
+ u_int8_t unused2;
+ db_pgno_t pgno;
+ db_pgno_t prev_pgno;
+ db_pgno_t next_pgno;
+
+ /* meta pages */
+ db_pgno_t root;
+ db_pgno_t free; /* Free list head. */
+
+ db_indx_t entries; /* Actual number of entries. */
+ u_int16_t unused;
+ db_recno_t rec_cnt; /* Record count. */
+ u_int32_t re_len; /* Record length. */
+ u_int32_t bt_minkey;
+ u_int32_t bt_maxkey;
+ u_int32_t h_ffactor;
+ u_int32_t h_nelem;
+
+ /* overflow pages */
+ /*
+ * Note that refcount is the refcount for an overflow page; pi_refcount
+ * is this structure's own refcount!
+ */
+ u_int32_t refcount;
+ u_int32_t olen;
+
+#define VRFY_DUPS_UNSORTED 0x0001 /* Have to flag the negative! */
+#define VRFY_HAS_DUPS 0x0002
+#define VRFY_HAS_DUPSORT 0x0004 /* Has the flag set. */
+#define VRFY_HAS_SUBDBS 0x0008
+#define VRFY_HAS_RECNUMS 0x0010
+#define VRFY_INCOMPLETE 0x0020 /* Meta or item order checks incomp. */
+#define VRFY_IS_ALLZEROES 0x0040 /* Hash page we haven't touched? */
+#define VRFY_IS_FIXEDLEN 0x0080
+#define VRFY_IS_RECNO 0x0100
+#define VRFY_IS_RRECNO 0x0200
+#define VRFY_OVFL_LEAFSEEN 0x0400
+ u_int32_t flags;
+
+ LIST_ENTRY(__vrfy_pageinfo) links;
+ u_int32_t pi_refcount;
+}; /* VRFY_PAGEINFO */
+
+struct __vrfy_childinfo {
+ db_pgno_t pgno;
+
+#define V_DUPLICATE 1 /* off-page dup metadata */
+#define V_OVERFLOW 2 /* overflow page */
+#define V_RECNO 3 /* btree internal or leaf page */
+ u_int32_t type;
+ db_recno_t nrecs; /* record count on a btree subtree */
+ u_int32_t tlen; /* ovfl. item total size */
+
+ LIST_ENTRY(__vrfy_childinfo) links;
+}; /* VRFY_CHILDINFO */
+
+#endif /* _DB_VERIFY_H_ */
diff --git a/bdb/include/debug.h b/bdb/include/debug.h
new file mode 100644
index 00000000000..9a3ffc1acb6
--- /dev/null
+++ b/bdb/include/debug.h
@@ -0,0 +1,104 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: debug.h,v 11.17 2000/07/07 15:50:36 bostic Exp $
+ */
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*
+ * When running with #DIAGNOSTIC defined, we smash memory and do memory
+ * guarding with a special byte value.
+ */
+#define CLEAR_BYTE 0xdb
+#define GUARD_BYTE 0xdc
+
+/*
+ * DB assertions.
+ */
+#if defined(DIAGNOSTIC) && defined(__STDC__)
+#define DB_ASSERT(e) ((e) ? (void)0 : __db_assert(#e, __FILE__, __LINE__))
+#else
+#define DB_ASSERT(e) ((void)0)
+#endif
+
+/*
+ * Purify and other run-time tools complain about uninitialized reads/writes
+ * of structure fields whose only purpose is padding, as well as when heap
+ * memory that was never initialized is written to disk.
+ */
+#ifdef UMRW
+#define UMRW_SET(v) (v) = 0
+#else
+#define UMRW_SET(v)
+#endif
+
+/*
+ * Debugging macro to log operations.
+ * If DEBUG_WOP is defined, log operations that modify the database.
+ * If DEBUG_ROP is defined, log operations that read the database.
+ *
+ * D dbp
+ * T txn
+ * O operation (string)
+ * K key
+ * A data
+ * F flags
+ */
+#define LOG_OP(C, T, O, K, A, F) { \
+ DB_LSN __lsn; \
+ DBT __op; \
+ if (DB_LOGGING((C))) { \
+ memset(&__op, 0, sizeof(__op)); \
+ __op.data = O; \
+ __op.size = strlen(O) + 1; \
+ (void)__db_debug_log((C)->dbp->dbenv, \
+ T, &__lsn, 0, &__op, (C)->dbp->log_fileid, K, A, F);\
+ } \
+}
+#ifdef DEBUG_ROP
+#define DEBUG_LREAD(C, T, O, K, A, F) LOG_OP(C, T, O, K, A, F)
+#else
+#define DEBUG_LREAD(C, T, O, K, A, F)
+#endif
+#ifdef DEBUG_WOP
+#define DEBUG_LWRITE(C, T, O, K, A, F) LOG_OP(C, T, O, K, A, F)
+#else
+#define DEBUG_LWRITE(C, T, O, K, A, F)
+#endif
+
+/*
+ * Hook for testing recovery at various places in the create/delete paths.
+ */
+#if CONFIG_TEST
+#define DB_TEST_RECOVERY(dbp, val, ret, name) \
+do { \
+ int __ret; \
+ PANIC_CHECK((dbp)->dbenv); \
+ if ((dbp)->dbenv->test_copy == (val)) { \
+ /* COPY the FILE */ \
+ if (F_ISSET((dbp), DB_OPEN_CALLED) && (dbp)->mpf != NULL) \
+ (void)(dbp)->sync((dbp), 0); \
+ if ((__ret = __db_testcopy((dbp), (name))) != 0) \
+ (ret) = __db_panic((dbp)->dbenv, __ret); \
+ } \
+ if ((dbp)->dbenv->test_abort == (val)) { \
+ /* ABORT the TXN */ \
+ (ret) = EINVAL; \
+ goto db_tr_err; \
+ } \
+} while (0)
+#define DB_TEST_RECOVERY_LABEL db_tr_err:
+#else
+#define DB_TEST_RECOVERY(dbp, val, ret, name)
+#define DB_TEST_RECOVERY_LABEL
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
diff --git a/bdb/include/env_ext.h b/bdb/include/env_ext.h
new file mode 100644
index 00000000000..0e7313fde9d
--- /dev/null
+++ b/bdb/include/env_ext.h
@@ -0,0 +1,35 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _env_ext_h_
+#define _env_ext_h_
+#if defined(__cplusplus)
+extern "C" {
+#endif
+void __db_shalloc_init __P((void *, size_t));
+int __db_shalloc_size __P((size_t, size_t));
+int __db_shalloc __P((void *, size_t, size_t, void *));
+void __db_shalloc_free __P((void *, void *));
+size_t __db_shalloc_count __P((void *));
+size_t __db_shsizeof __P((void *));
+void __db_shalloc_dump __P((void *, FILE *));
+int __db_tablesize __P((u_int32_t));
+void __db_hashinit __P((void *, u_int32_t));
+int __dbenv_init __P((DB_ENV *));
+int __db_mi_env __P((DB_ENV *, const char *));
+int __db_mi_open __P((DB_ENV *, const char *, int));
+int __db_env_config __P((DB_ENV *, int));
+int __dbenv_open __P((DB_ENV *, const char *, u_int32_t, int));
+int __dbenv_remove __P((DB_ENV *, const char *, u_int32_t));
+int __dbenv_close __P((DB_ENV *, u_int32_t));
+int __db_appname __P((DB_ENV *, APPNAME,
+ const char *, const char *, u_int32_t, DB_FH *, char **));
+int __db_apprec __P((DB_ENV *, u_int32_t));
+int __db_e_attach __P((DB_ENV *, u_int32_t *));
+int __db_e_detach __P((DB_ENV *, int));
+int __db_e_remove __P((DB_ENV *, int));
+int __db_e_stat __P((DB_ENV *, REGENV *, REGION *, int *));
+int __db_r_attach __P((DB_ENV *, REGINFO *, size_t));
+int __db_r_detach __P((DB_ENV *, REGINFO *, int));
+#if defined(__cplusplus)
+}
+#endif
+#endif /* _env_ext_h_ */
diff --git a/bdb/include/gen_client_ext.h b/bdb/include/gen_client_ext.h
new file mode 100644
index 00000000000..5675b74d3ec
--- /dev/null
+++ b/bdb/include/gen_client_ext.h
@@ -0,0 +1,121 @@
+/* Do not edit: automatically built by gen_rpc.awk. */
+int __dbcl_env_cachesize __P((DB_ENV *, u_int32_t, u_int32_t, int));
+int __dbcl_env_close __P((DB_ENV *, u_int32_t));
+int __dbcl_env_close_ret __P((DB_ENV *, u_int32_t, __env_close_reply *));
+int __dbcl_rpc_illegal __P((DB_ENV *, char *));
+int __dbcl_set_data_dir __P((DB_ENV *, const char *));
+int __dbcl_env_set_feedback __P((DB_ENV *, void (*)(DB_ENV *, int, int)));
+int __dbcl_env_flags __P((DB_ENV *, u_int32_t, int));
+int __dbcl_set_lg_bsize __P((DB_ENV *, u_int32_t));
+int __dbcl_set_lg_dir __P((DB_ENV *, const char *));
+int __dbcl_set_lg_max __P((DB_ENV *, u_int32_t));
+int __dbcl_set_lk_conflict __P((DB_ENV *, u_int8_t *, int));
+int __dbcl_set_lk_detect __P((DB_ENV *, u_int32_t));
+int __dbcl_set_lk_max __P((DB_ENV *, u_int32_t));
+int __dbcl_set_lk_max_locks __P((DB_ENV *, u_int32_t));
+int __dbcl_set_lk_max_lockers __P((DB_ENV *, u_int32_t));
+int __dbcl_set_lk_max_objects __P((DB_ENV *, u_int32_t));
+int __dbcl_set_mp_mmapsize __P((DB_ENV *, size_t));
+int __dbcl_set_mutex_locks __P((DB_ENV *, int));
+int __dbcl_env_open __P((DB_ENV *, const char *, u_int32_t, int));
+int __dbcl_env_open_ret __P((DB_ENV *, const char *, u_int32_t, int, __env_open_reply *));
+int __dbcl_env_paniccall __P((DB_ENV *, void (*)(DB_ENV *, int)));
+int __dbcl_set_recovery_init __P((DB_ENV *, int (*)(DB_ENV *)));
+int __dbcl_env_remove __P((DB_ENV *, const char *, u_int32_t));
+int __dbcl_env_remove_ret __P((DB_ENV *, const char *, u_int32_t, __env_remove_reply *));
+int __dbcl_set_shm_key __P((DB_ENV *, long));
+int __dbcl_set_tmp_dir __P((DB_ENV *, const char *));
+int __dbcl_set_tx_recover __P((DB_ENV *, int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops)));
+int __dbcl_set_tx_max __P((DB_ENV *, u_int32_t));
+int __dbcl_set_tx_timestamp __P((DB_ENV *, time_t *));
+int __dbcl_set_verbose __P((DB_ENV *, u_int32_t, int));
+int __dbcl_txn_abort __P((DB_TXN *));
+int __dbcl_txn_abort_ret __P((DB_TXN *, __txn_abort_reply *));
+int __dbcl_txn_begin __P((DB_ENV *, DB_TXN *, DB_TXN **, u_int32_t));
+int __dbcl_txn_begin_ret __P((DB_ENV *, DB_TXN *, DB_TXN **, u_int32_t, __txn_begin_reply *));
+int __dbcl_txn_checkpoint __P((DB_ENV *, u_int32_t, u_int32_t));
+int __dbcl_txn_commit __P((DB_TXN *, u_int32_t));
+int __dbcl_txn_commit_ret __P((DB_TXN *, u_int32_t, __txn_commit_reply *));
+int __dbcl_txn_prepare __P((DB_TXN *));
+int __dbcl_txn_stat __P((DB_ENV *, DB_TXN_STAT **, void *(*)(size_t)));
+int __dbcl_db_bt_compare __P((DB *, int (*)(DB *, const DBT *, const DBT *)));
+int __dbcl_db_bt_maxkey __P((DB *, u_int32_t));
+int __dbcl_db_bt_minkey __P((DB *, u_int32_t));
+int __dbcl_db_bt_prefix __P((DB *, size_t(*)(DB *, const DBT *, const DBT *)));
+int __dbcl_db_set_append_recno __P((DB *, int (*)(DB *, DBT *, db_recno_t)));
+int __dbcl_db_cachesize __P((DB *, u_int32_t, u_int32_t, int));
+int __dbcl_db_close __P((DB *, u_int32_t));
+int __dbcl_db_close_ret __P((DB *, u_int32_t, __db_close_reply *));
+int __dbcl_db_del __P((DB *, DB_TXN *, DBT *, u_int32_t));
+int __dbcl_db_extentsize __P((DB *, u_int32_t));
+int __dbcl_db_fd __P((DB *, int *));
+int __dbcl_db_feedback __P((DB *, void (*)(DB *, int, int)));
+int __dbcl_db_flags __P((DB *, u_int32_t));
+int __dbcl_db_get __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+int __dbcl_db_get_ret __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t, __db_get_reply *));
+int __dbcl_db_h_ffactor __P((DB *, u_int32_t));
+int __dbcl_db_h_hash __P((DB *, u_int32_t(*)(DB *, const void *, u_int32_t)));
+int __dbcl_db_h_nelem __P((DB *, u_int32_t));
+int __dbcl_db_key_range __P((DB *, DB_TXN *, DBT *, DB_KEY_RANGE *, u_int32_t));
+int __dbcl_db_key_range_ret __P((DB *, DB_TXN *, DBT *, DB_KEY_RANGE *, u_int32_t, __db_key_range_reply *));
+int __dbcl_db_lorder __P((DB *, int));
+int __dbcl_db_malloc __P((DB *, void *(*)(size_t)));
+int __dbcl_db_open __P((DB *, const char *, const char *, DBTYPE, u_int32_t, int));
+int __dbcl_db_open_ret __P((DB *, const char *, const char *, DBTYPE, u_int32_t, int, __db_open_reply *));
+int __dbcl_db_pagesize __P((DB *, u_int32_t));
+int __dbcl_db_panic __P((DB *, void (*)(DB_ENV *, int)));
+int __dbcl_db_put __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+int __dbcl_db_put_ret __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t, __db_put_reply *));
+int __dbcl_db_realloc __P((DB *, void *(*)(void *, size_t)));
+int __dbcl_db_re_delim __P((DB *, int));
+int __dbcl_db_re_len __P((DB *, u_int32_t));
+int __dbcl_db_re_pad __P((DB *, int));
+int __dbcl_db_re_source __P((DB *, const char *));
+int __dbcl_db_remove __P((DB *, const char *, const char *, u_int32_t));
+int __dbcl_db_remove_ret __P((DB *, const char *, const char *, u_int32_t, __db_remove_reply *));
+int __dbcl_db_rename __P((DB *, const char *, const char *, const char *, u_int32_t));
+int __dbcl_db_rename_ret __P((DB *, const char *, const char *, const char *, u_int32_t, __db_rename_reply *));
+int __dbcl_db_stat __P((DB *, void *, void *(*)(size_t), u_int32_t));
+int __dbcl_db_stat_ret __P((DB *, void *, void *(*)(size_t), u_int32_t, __db_stat_reply *));
+int __dbcl_db_swapped __P((DB *));
+int __dbcl_db_sync __P((DB *, u_int32_t));
+int __dbcl_db_upgrade __P((DB *, const char *, u_int32_t));
+int __dbcl_db_cursor __P((DB *, DB_TXN *, DBC **, u_int32_t));
+int __dbcl_db_cursor_ret __P((DB *, DB_TXN *, DBC **, u_int32_t, __db_cursor_reply *));
+int __dbcl_db_join __P((DB *, DBC **, DBC **, u_int32_t));
+int __dbcl_db_join_ret __P((DB *, DBC **, DBC **, u_int32_t, __db_join_reply *));
+int __dbcl_dbc_close __P((DBC *));
+int __dbcl_dbc_close_ret __P((DBC *, __dbc_close_reply *));
+int __dbcl_dbc_count __P((DBC *, db_recno_t *, u_int32_t));
+int __dbcl_dbc_count_ret __P((DBC *, db_recno_t *, u_int32_t, __dbc_count_reply *));
+int __dbcl_dbc_del __P((DBC *, u_int32_t));
+int __dbcl_dbc_dup __P((DBC *, DBC **, u_int32_t));
+int __dbcl_dbc_dup_ret __P((DBC *, DBC **, u_int32_t, __dbc_dup_reply *));
+int __dbcl_dbc_get __P((DBC *, DBT *, DBT *, u_int32_t));
+int __dbcl_dbc_get_ret __P((DBC *, DBT *, DBT *, u_int32_t, __dbc_get_reply *));
+int __dbcl_dbc_put __P((DBC *, DBT *, DBT *, u_int32_t));
+int __dbcl_dbc_put_ret __P((DBC *, DBT *, DBT *, u_int32_t, __dbc_put_reply *));
+int __dbcl_lock_detect __P((DB_ENV *, u_int32_t, u_int32_t, int *));
+int __dbcl_lock_get __P((DB_ENV *, u_int32_t, u_int32_t, const DBT *, db_lockmode_t, DB_LOCK *));
+int __dbcl_lock_id __P((DB_ENV *, u_int32_t *));
+int __dbcl_lock_put __P((DB_ENV *, DB_LOCK *));
+int __dbcl_lock_stat __P((DB_ENV *, DB_LOCK_STAT **, void *(*)(size_t)));
+int __dbcl_lock_vec __P((DB_ENV *, u_int32_t, u_int32_t, DB_LOCKREQ *, int, DB_LOCKREQ **));
+int __dbcl_log_archive __P((DB_ENV *, char ***, u_int32_t, void *(*)(size_t)));
+int __dbcl_log_file __P((DB_ENV *, const DB_LSN *, char *, size_t));
+int __dbcl_log_flush __P((DB_ENV *, const DB_LSN *));
+int __dbcl_log_get __P((DB_ENV *, DB_LSN *, DBT *, u_int32_t));
+int __dbcl_log_put __P((DB_ENV *, DB_LSN *, const DBT *, u_int32_t));
+int __dbcl_log_register __P((DB_ENV *, DB *, const char *));
+int __dbcl_log_stat __P((DB_ENV *, DB_LOG_STAT **, void *(*)(size_t)));
+int __dbcl_log_unregister __P((DB_ENV *, DB *));
+int __dbcl_memp_fclose __P((DB_MPOOLFILE *));
+int __dbcl_memp_fget __P((DB_MPOOLFILE *, db_pgno_t *, u_int32_t, void **));
+int __dbcl_memp_fopen __P((DB_ENV *, const char *, u_int32_t, int, size_t, DB_MPOOL_FINFO *, DB_MPOOLFILE **));
+int __dbcl_memp_fput __P((DB_MPOOLFILE *, void *, u_int32_t));
+int __dbcl_memp_fset __P((DB_MPOOLFILE *, void *, u_int32_t));
+int __dbcl_memp_fsync __P((DB_MPOOLFILE *));
+int __dbcl_memp_register __P((DB_ENV *, int, int (*)(DB_ENV *, db_pgno_t, void *, DBT *), int (*)(DB_ENV *, db_pgno_t, void *, DBT *)));
+int __dbcl_memp_stat __P((DB_ENV *, DB_MPOOL_STAT **, DB_MPOOL_FSTAT ***, void *(*)(size_t)));
+int __dbcl_memp_sync __P((DB_ENV *, DB_LSN *));
+int __dbcl_memp_trickle __P((DB_ENV *, int, int *));
diff --git a/bdb/include/gen_server_ext.h b/bdb/include/gen_server_ext.h
new file mode 100644
index 00000000000..9037d908f17
--- /dev/null
+++ b/bdb/include/gen_server_ext.h
@@ -0,0 +1,106 @@
+/* Do not edit: automatically built by gen_rpc.awk. */
+__env_cachesize_reply * __db_env_cachesize_1 __P((__env_cachesize_msg *));
+void __env_cachesize_1_proc __P((long, u_int32_t, u_int32_t,
+ u_int32_t, __env_cachesize_reply *));
+__env_close_reply * __db_env_close_1 __P((__env_close_msg *));
+void __env_close_1_proc __P((long, u_int32_t, __env_close_reply *));
+__env_create_reply * __db_env_create_1 __P((__env_create_msg *));
+void __env_create_1_proc __P((u_int32_t, __env_create_reply *));
+__env_flags_reply * __db_env_flags_1 __P((__env_flags_msg *));
+void __env_flags_1_proc __P((long, u_int32_t, u_int32_t, __env_flags_reply *));
+__env_open_reply * __db_env_open_1 __P((__env_open_msg *));
+void __env_open_1_proc __P((long, char *, u_int32_t,
+ u_int32_t, __env_open_reply *));
+__env_remove_reply * __db_env_remove_1 __P((__env_remove_msg *));
+void __env_remove_1_proc __P((long, char *, u_int32_t, __env_remove_reply *));
+__txn_abort_reply * __db_txn_abort_1 __P((__txn_abort_msg *));
+void __txn_abort_1_proc __P((long, __txn_abort_reply *));
+__txn_begin_reply * __db_txn_begin_1 __P((__txn_begin_msg *));
+void __txn_begin_1_proc __P((long, long,
+ u_int32_t, __txn_begin_reply *));
+__txn_commit_reply * __db_txn_commit_1 __P((__txn_commit_msg *));
+void __txn_commit_1_proc __P((long, u_int32_t, __txn_commit_reply *));
+__db_bt_maxkey_reply * __db_db_bt_maxkey_1 __P((__db_bt_maxkey_msg *));
+void __db_bt_maxkey_1_proc __P((long, u_int32_t, __db_bt_maxkey_reply *));
+__db_bt_minkey_reply * __db_db_bt_minkey_1 __P((__db_bt_minkey_msg *));
+void __db_bt_minkey_1_proc __P((long, u_int32_t, __db_bt_minkey_reply *));
+__db_close_reply * __db_db_close_1 __P((__db_close_msg *));
+void __db_close_1_proc __P((long, u_int32_t, __db_close_reply *));
+__db_create_reply * __db_db_create_1 __P((__db_create_msg *));
+void __db_create_1_proc __P((u_int32_t, long, __db_create_reply *));
+__db_del_reply * __db_db_del_1 __P((__db_del_msg *));
+void __db_del_1_proc __P((long, long, u_int32_t,
+ u_int32_t, u_int32_t, void *, u_int32_t,
+ u_int32_t, __db_del_reply *));
+__db_extentsize_reply * __db_db_extentsize_1 __P((__db_extentsize_msg *));
+void __db_extentsize_1_proc __P((long, u_int32_t, __db_extentsize_reply *));
+__db_flags_reply * __db_db_flags_1 __P((__db_flags_msg *));
+void __db_flags_1_proc __P((long, u_int32_t, __db_flags_reply *));
+__db_get_reply * __db_db_get_1 __P((__db_get_msg *));
+void __db_get_1_proc __P((long, long, u_int32_t,
+ u_int32_t, u_int32_t, void *, u_int32_t,
+ u_int32_t, u_int32_t, u_int32_t, void *,
+ u_int32_t, u_int32_t, __db_get_reply *, int *));
+__db_h_ffactor_reply * __db_db_h_ffactor_1 __P((__db_h_ffactor_msg *));
+void __db_h_ffactor_1_proc __P((long, u_int32_t, __db_h_ffactor_reply *));
+__db_h_nelem_reply * __db_db_h_nelem_1 __P((__db_h_nelem_msg *));
+void __db_h_nelem_1_proc __P((long, u_int32_t, __db_h_nelem_reply *));
+__db_key_range_reply * __db_db_key_range_1 __P((__db_key_range_msg *));
+void __db_key_range_1_proc __P((long, long, u_int32_t,
+ u_int32_t, u_int32_t, void *, u_int32_t,
+ u_int32_t, __db_key_range_reply *));
+__db_lorder_reply * __db_db_lorder_1 __P((__db_lorder_msg *));
+void __db_lorder_1_proc __P((long, u_int32_t, __db_lorder_reply *));
+__db_open_reply * __db_db_open_1 __P((__db_open_msg *));
+void __db_open_1_proc __P((long, char *, char *,
+ u_int32_t, u_int32_t, u_int32_t, __db_open_reply *));
+__db_pagesize_reply * __db_db_pagesize_1 __P((__db_pagesize_msg *));
+void __db_pagesize_1_proc __P((long, u_int32_t, __db_pagesize_reply *));
+__db_put_reply * __db_db_put_1 __P((__db_put_msg *));
+void __db_put_1_proc __P((long, long, u_int32_t,
+ u_int32_t, u_int32_t, void *, u_int32_t,
+ u_int32_t, u_int32_t, u_int32_t, void *,
+ u_int32_t, u_int32_t, __db_put_reply *, int *));
+__db_re_delim_reply * __db_db_re_delim_1 __P((__db_re_delim_msg *));
+void __db_re_delim_1_proc __P((long, u_int32_t, __db_re_delim_reply *));
+__db_re_len_reply * __db_db_re_len_1 __P((__db_re_len_msg *));
+void __db_re_len_1_proc __P((long, u_int32_t, __db_re_len_reply *));
+__db_re_pad_reply * __db_db_re_pad_1 __P((__db_re_pad_msg *));
+void __db_re_pad_1_proc __P((long, u_int32_t, __db_re_pad_reply *));
+__db_remove_reply * __db_db_remove_1 __P((__db_remove_msg *));
+void __db_remove_1_proc __P((long, char *, char *,
+ u_int32_t, __db_remove_reply *));
+__db_rename_reply * __db_db_rename_1 __P((__db_rename_msg *));
+void __db_rename_1_proc __P((long, char *, char *,
+ char *, u_int32_t, __db_rename_reply *));
+__db_stat_reply * __db_db_stat_1 __P((__db_stat_msg *));
+void __db_stat_1_proc __P((long,
+ u_int32_t, __db_stat_reply *, int *));
+__db_swapped_reply * __db_db_swapped_1 __P((__db_swapped_msg *));
+void __db_swapped_1_proc __P((long, __db_swapped_reply *));
+__db_sync_reply * __db_db_sync_1 __P((__db_sync_msg *));
+void __db_sync_1_proc __P((long, u_int32_t, __db_sync_reply *));
+__db_cursor_reply * __db_db_cursor_1 __P((__db_cursor_msg *));
+void __db_cursor_1_proc __P((long, long,
+ u_int32_t, __db_cursor_reply *));
+__db_join_reply * __db_db_join_1 __P((__db_join_msg *));
+void __db_join_1_proc __P((long, u_int32_t *,
+ u_int32_t, __db_join_reply *));
+__dbc_close_reply * __db_dbc_close_1 __P((__dbc_close_msg *));
+void __dbc_close_1_proc __P((long, __dbc_close_reply *));
+__dbc_count_reply * __db_dbc_count_1 __P((__dbc_count_msg *));
+void __dbc_count_1_proc __P((long, u_int32_t, __dbc_count_reply *));
+__dbc_del_reply * __db_dbc_del_1 __P((__dbc_del_msg *));
+void __dbc_del_1_proc __P((long, u_int32_t, __dbc_del_reply *));
+__dbc_dup_reply * __db_dbc_dup_1 __P((__dbc_dup_msg *));
+void __dbc_dup_1_proc __P((long, u_int32_t, __dbc_dup_reply *));
+__dbc_get_reply * __db_dbc_get_1 __P((__dbc_get_msg *));
+void __dbc_get_1_proc __P((long, u_int32_t, u_int32_t,
+ u_int32_t, void *, u_int32_t, u_int32_t,
+ u_int32_t, u_int32_t, void *, u_int32_t,
+ u_int32_t, __dbc_get_reply *, int *));
+__dbc_put_reply * __db_dbc_put_1 __P((__dbc_put_msg *));
+void __dbc_put_1_proc __P((long, u_int32_t, u_int32_t,
+ u_int32_t, void *, u_int32_t, u_int32_t,
+ u_int32_t, u_int32_t, void *, u_int32_t,
+ u_int32_t, __dbc_put_reply *, int *));
diff --git a/bdb/include/hash.h b/bdb/include/hash.h
new file mode 100644
index 00000000000..14a88c80b9c
--- /dev/null
+++ b/bdb/include/hash.h
@@ -0,0 +1,140 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994
+ * Margo Seltzer. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id: hash.h,v 11.19 2000/12/21 23:05:16 krinsky Exp $
+ */
+
+/* Hash internal structure. */
+typedef struct hash_t {
+ db_pgno_t meta_pgno; /* Page number of the meta data page. */
+ u_int32_t h_ffactor; /* Fill factor. */
+ u_int32_t h_nelem; /* Number of elements. */
+ /* Hash function. */
+ u_int32_t (*h_hash) __P((DB *, const void *, u_int32_t));
+} HASH;
+
+/* Cursor structure definitions. */
+typedef struct cursor_t {
+ /* struct __dbc_internal */
+ __DBC_INTERNAL
+
+ /* Hash private part */
+
+ /* Per-thread information */
+ DB_LOCK hlock; /* Metadata page lock. */
+ HMETA *hdr; /* Pointer to meta-data page. */
+ PAGE *split_buf; /* Temporary buffer for splits. */
+
+ /* Hash cursor information */
+ db_pgno_t bucket; /* Bucket we are traversing. */
+ db_pgno_t lbucket; /* Bucket for which we are locked. */
+ db_indx_t dup_off; /* Offset within a duplicate set. */
+ db_indx_t dup_len; /* Length of current duplicate. */
+ db_indx_t dup_tlen; /* Total length of duplicate entry. */
+ u_int32_t seek_size; /* Number of bytes we need for add. */
+ db_pgno_t seek_found_page;/* Page on which we can insert. */
+ u_int32_t order; /* Relative order among deleted curs. */
+
+#define H_CONTINUE 0x0001 /* Join--search strictly fwd for data */
+#define H_DELETED 0x0002 /* Cursor item is deleted. */
+#define H_DIRTY 0x0004 /* Meta-data page needs to be written */
+#define H_DUPONLY 0x0008 /* Dups only; do not change key. */
+#define H_EXPAND 0x0010 /* Table expanded. */
+#define H_ISDUP 0x0020 /* Cursor is within duplicate set. */
+#define H_NEXT_NODUP 0x0040 /* Get next non-dup entry. */
+#define H_NOMORE 0x0080 /* No more entries in bucket. */
+#define H_OK 0x0100 /* Request succeeded. */
+ u_int32_t flags;
+} HASH_CURSOR;
+
+/* Test string. */
+#define CHARKEY "%$sniglet^&"
+
+/* Overflow management */
+/*
+ * The spares table indicates the page number at which each doubling begins.
+ * From this page number we subtract the number of buckets already allocated
+ * so that we can do a simple addition to calculate the page number here.
+ */
+#define BS_TO_PAGE(bucket, spares) \
+ ((bucket) + (spares)[__db_log2((bucket) + 1)])
+#define BUCKET_TO_PAGE(I, B) (BS_TO_PAGE((B), (I)->hdr->spares))
+
+/* Constraints about much data goes on a page. */
+
+#define MINFILL 4
+#define ISBIG(I, N) (((N) > ((I)->hdr->dbmeta.pagesize / MINFILL)) ? 1 : 0)
+
+/* Shorthands for accessing structure */
+#define NDX_INVALID 0xFFFF
+#define BUCKET_INVALID 0xFFFFFFFF
+
+/* On page duplicates are stored as a string of size-data-size triples. */
+#define DUP_SIZE(len) ((len) + 2 * sizeof(db_indx_t))
+
+/* Log messages types (these are subtypes within a record type) */
+#define PAIR_KEYMASK 0x1
+#define PAIR_DATAMASK 0x2
+#define PAIR_DUPMASK 0x4
+#define PAIR_MASK 0xf
+#define PAIR_ISKEYBIG(N) (N & PAIR_KEYMASK)
+#define PAIR_ISDATABIG(N) (N & PAIR_DATAMASK)
+#define PAIR_ISDATADUP(N) (N & PAIR_DUPMASK)
+#define OPCODE_OF(N) (N & ~PAIR_MASK)
+
+#define PUTPAIR 0x20
+#define DELPAIR 0x30
+#define PUTOVFL 0x40
+#define DELOVFL 0x50
+#define HASH_UNUSED1 0x60
+#define HASH_UNUSED2 0x70
+#define SPLITOLD 0x80
+#define SPLITNEW 0x90
+
+typedef enum {
+ DB_HAM_CHGPG = 1,
+ DB_HAM_SPLIT = 2,
+ DB_HAM_DUP = 3
+} db_ham_mode;
+
+#include "hash_auto.h"
+#include "hash_ext.h"
+#include "db_am.h"
diff --git a/bdb/include/hash_auto.h b/bdb/include/hash_auto.h
new file mode 100644
index 00000000000..5d816d5bbd4
--- /dev/null
+++ b/bdb/include/hash_auto.h
@@ -0,0 +1,248 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+
+#ifndef ham_AUTO_H
+#define ham_AUTO_H
+
+#define DB_ham_insdel 21
+typedef struct _ham_insdel_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ u_int32_t opcode;
+ int32_t fileid;
+ db_pgno_t pgno;
+ u_int32_t ndx;
+ DB_LSN pagelsn;
+ DBT key;
+ DBT data;
+} __ham_insdel_args;
+
+int __ham_insdel_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, int32_t, db_pgno_t, u_int32_t, DB_LSN *, const DBT *, const DBT *));
+int __ham_insdel_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_insdel_read __P((DB_ENV *, void *, __ham_insdel_args **));
+
+#define DB_ham_newpage 22
+typedef struct _ham_newpage_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ u_int32_t opcode;
+ int32_t fileid;
+ db_pgno_t prev_pgno;
+ DB_LSN prevlsn;
+ db_pgno_t new_pgno;
+ DB_LSN pagelsn;
+ db_pgno_t next_pgno;
+ DB_LSN nextlsn;
+} __ham_newpage_args;
+
+int __ham_newpage_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, int32_t, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *));
+int __ham_newpage_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_newpage_read __P((DB_ENV *, void *, __ham_newpage_args **));
+
+#define DB_ham_splitmeta 23
+typedef struct _ham_splitmeta_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ u_int32_t bucket;
+ u_int32_t ovflpoint;
+ u_int32_t spares;
+ DB_LSN metalsn;
+} __ham_splitmeta_args;
+
+int __ham_splitmeta_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_splitmeta_read __P((DB_ENV *, void *, __ham_splitmeta_args **));
+
+#define DB_ham_splitdata 24
+typedef struct _ham_splitdata_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ u_int32_t opcode;
+ db_pgno_t pgno;
+ DBT pageimage;
+ DB_LSN pagelsn;
+} __ham_splitdata_args;
+
+int __ham_splitdata_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, int32_t, u_int32_t, db_pgno_t, const DBT *, DB_LSN *));
+int __ham_splitdata_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_splitdata_read __P((DB_ENV *, void *, __ham_splitdata_args **));
+
+#define DB_ham_replace 25
+typedef struct _ham_replace_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ u_int32_t ndx;
+ DB_LSN pagelsn;
+ int32_t off;
+ DBT olditem;
+ DBT newitem;
+ u_int32_t makedup;
+} __ham_replace_args;
+
+int __ham_replace_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, int32_t, db_pgno_t, u_int32_t, DB_LSN *, int32_t, const DBT *, const DBT *, u_int32_t));
+int __ham_replace_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_replace_read __P((DB_ENV *, void *, __ham_replace_args **));
+
+#define DB_ham_newpgno 26
+typedef struct _ham_newpgno_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ u_int32_t opcode;
+ int32_t fileid;
+ db_pgno_t pgno;
+ db_pgno_t free_pgno;
+ u_int32_t old_type;
+ db_pgno_t old_pgno;
+ u_int32_t new_type;
+ DB_LSN pagelsn;
+ DB_LSN metalsn;
+} __ham_newpgno_args;
+
+int __ham_newpgno_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_newpgno_read __P((DB_ENV *, void *, __ham_newpgno_args **));
+
+#define DB_ham_ovfl 27
+typedef struct _ham_ovfl_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t start_pgno;
+ u_int32_t npages;
+ db_pgno_t free_pgno;
+ u_int32_t ovflpoint;
+ DB_LSN metalsn;
+} __ham_ovfl_args;
+
+int __ham_ovfl_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_ovfl_read __P((DB_ENV *, void *, __ham_ovfl_args **));
+
+#define DB_ham_copypage 28
+typedef struct _ham_copypage_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DB_LSN pagelsn;
+ db_pgno_t next_pgno;
+ DB_LSN nextlsn;
+ db_pgno_t nnext_pgno;
+ DB_LSN nnextlsn;
+ DBT page;
+} __ham_copypage_args;
+
+int __ham_copypage_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, int32_t, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *, const DBT *));
+int __ham_copypage_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_copypage_read __P((DB_ENV *, void *, __ham_copypage_args **));
+
+#define DB_ham_metagroup 29
+typedef struct _ham_metagroup_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ u_int32_t bucket;
+ db_pgno_t pgno;
+ DB_LSN metalsn;
+ DB_LSN pagelsn;
+} __ham_metagroup_args;
+
+int __ham_metagroup_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, int32_t, u_int32_t, db_pgno_t, DB_LSN *, DB_LSN *));
+int __ham_metagroup_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_metagroup_read __P((DB_ENV *, void *, __ham_metagroup_args **));
+
+#define DB_ham_groupalloc1 30
+typedef struct _ham_groupalloc1_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DB_LSN metalsn;
+ DB_LSN mmetalsn;
+ db_pgno_t start_pgno;
+ u_int32_t num;
+} __ham_groupalloc1_args;
+
+int __ham_groupalloc1_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_groupalloc1_read __P((DB_ENV *, void *, __ham_groupalloc1_args **));
+
+#define DB_ham_groupalloc2 31
+typedef struct _ham_groupalloc2_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ DB_LSN meta_lsn;
+ DB_LSN alloc_lsn;
+ db_pgno_t start_pgno;
+ u_int32_t num;
+ db_pgno_t free;
+} __ham_groupalloc2_args;
+
+int __ham_groupalloc2_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_groupalloc2_read __P((DB_ENV *, void *, __ham_groupalloc2_args **));
+
+#define DB_ham_groupalloc 32
+typedef struct _ham_groupalloc_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ DB_LSN meta_lsn;
+ db_pgno_t start_pgno;
+ u_int32_t num;
+ db_pgno_t free;
+} __ham_groupalloc_args;
+
+int __ham_groupalloc_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, int32_t, DB_LSN *, db_pgno_t, u_int32_t, db_pgno_t));
+int __ham_groupalloc_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_groupalloc_read __P((DB_ENV *, void *, __ham_groupalloc_args **));
+
+#define DB_ham_curadj 33
+typedef struct _ham_curadj_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ u_int32_t indx;
+ u_int32_t len;
+ u_int32_t dup_off;
+ int add;
+ int is_dup;
+ u_int32_t order;
+} __ham_curadj_args;
+
+int __ham_curadj_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, int32_t, db_pgno_t, u_int32_t, u_int32_t, u_int32_t, int, int, u_int32_t));
+int __ham_curadj_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_curadj_read __P((DB_ENV *, void *, __ham_curadj_args **));
+
+#define DB_ham_chgpg 34
+typedef struct _ham_chgpg_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_ham_mode mode;
+ db_pgno_t old_pgno;
+ db_pgno_t new_pgno;
+ u_int32_t old_indx;
+ u_int32_t new_indx;
+} __ham_chgpg_args;
+
+int __ham_chgpg_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, int32_t, db_ham_mode, db_pgno_t, db_pgno_t, u_int32_t, u_int32_t));
+int __ham_chgpg_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_chgpg_read __P((DB_ENV *, void *, __ham_chgpg_args **));
+int __ham_init_print __P((DB_ENV *));
+int __ham_init_recover __P((DB_ENV *));
+#endif
diff --git a/bdb/include/hash_ext.h b/bdb/include/hash_ext.h
new file mode 100644
index 00000000000..babb77a7902
--- /dev/null
+++ b/bdb/include/hash_ext.h
@@ -0,0 +1,106 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _hash_ext_h_
+#define _hash_ext_h_
+#if defined(__cplusplus)
+extern "C" {
+#endif
+int __ham_metachk __P((DB *, const char *, HMETA *));
+int __ham_open __P((DB *, const char *, db_pgno_t, u_int32_t));
+int __ham_c_init __P((DBC *));
+int __ham_c_count __P((DBC *, db_recno_t *));
+int __ham_c_dup __P((DBC *, DBC *));
+u_int32_t __ham_call_hash __P((DBC *, u_int8_t *, int32_t));
+int __ham_init_dbt __P((DB_ENV *,
+ DBT *, u_int32_t, void **, u_int32_t *));
+int __ham_c_update
+ __P((DBC *, u_int32_t, int, int));
+int __ham_get_clist __P((DB *,
+ db_pgno_t, u_int32_t, DBC ***));
+int __ham_c_chgpg
+ __P((DBC *, db_pgno_t, u_int32_t, db_pgno_t, u_int32_t));
+int __ham_pgin __P((DB_ENV *, db_pgno_t, void *, DBT *));
+int __ham_pgout __P((DB_ENV *, db_pgno_t, void *, DBT *));
+int __ham_mswap __P((void *));
+int __ham_add_dup __P((DBC *, DBT *, u_int32_t, db_pgno_t *));
+int __ham_dup_convert __P((DBC *));
+int __ham_make_dup __P((DB_ENV *,
+ const DBT *, DBT *d, void **, u_int32_t *));
+void __ham_move_offpage __P((DBC *, PAGE *, u_int32_t, db_pgno_t));
+void __ham_dsearch __P((DBC *, DBT *, u_int32_t *, int *));
+int __ham_cprint __P((DB *));
+u_int32_t __ham_func2 __P((DB *, const void *, u_int32_t));
+u_int32_t __ham_func3 __P((DB *, const void *, u_int32_t));
+u_int32_t __ham_func4 __P((DB *, const void *, u_int32_t));
+u_int32_t __ham_func5 __P((DB *, const void *, u_int32_t));
+int __ham_get_meta __P((DBC *));
+int __ham_release_meta __P((DBC *));
+int __ham_dirty_meta __P((DBC *));
+int __ham_db_create __P((DB *));
+int __ham_db_close __P((DB *));
+int __ham_item __P((DBC *, db_lockmode_t, db_pgno_t *));
+int __ham_item_reset __P((DBC *));
+void __ham_item_init __P((DBC *));
+int __ham_item_last __P((DBC *, db_lockmode_t, db_pgno_t *));
+int __ham_item_first __P((DBC *, db_lockmode_t, db_pgno_t *));
+int __ham_item_prev __P((DBC *, db_lockmode_t, db_pgno_t *));
+int __ham_item_next __P((DBC *, db_lockmode_t, db_pgno_t *));
+void __ham_putitem __P((PAGE *p, const DBT *, int));
+void __ham_reputpair
+ __P((PAGE *p, u_int32_t, u_int32_t, const DBT *, const DBT *));
+int __ham_del_pair __P((DBC *, int));
+int __ham_replpair __P((DBC *, DBT *, u_int32_t));
+void __ham_onpage_replace __P((PAGE *, size_t, u_int32_t, int32_t,
+ int32_t, DBT *));
+int __ham_split_page __P((DBC *, u_int32_t, u_int32_t));
+int __ham_add_el __P((DBC *, const DBT *, const DBT *, int));
+void __ham_copy_item __P((size_t, PAGE *, u_int32_t, PAGE *));
+int __ham_add_ovflpage __P((DBC *, PAGE *, int, PAGE **));
+int __ham_get_cpage __P((DBC *, db_lockmode_t));
+int __ham_next_cpage __P((DBC *, db_pgno_t, int));
+int __ham_lock_bucket __P((DBC *, db_lockmode_t));
+void __ham_dpair __P((DB *, PAGE *, u_int32_t));
+int __ham_insdel_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_newpage_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_replace_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_splitdata_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_copypage_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_metagroup_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_groupalloc_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_curadj_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_chgpg_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_reclaim __P((DB *, DB_TXN *txn));
+int __ham_stat __P((DB *, void *, void *(*)(size_t), u_int32_t));
+ int __ham_traverse __P((DB *, DBC *, db_lockmode_t,
+ int (*)(DB *, PAGE *, void *, int *), void *));
+int __ham_30_hashmeta __P((DB *, char *, u_int8_t *));
+int __ham_30_sizefix __P((DB *, DB_FH *, char *, u_int8_t *));
+int __ham_31_hashmeta
+ __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *));
+int __ham_31_hash
+ __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *));
+int __ham_vrfy_meta __P((DB *, VRFY_DBINFO *, HMETA *,
+ db_pgno_t, u_int32_t));
+int __ham_vrfy __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t,
+ u_int32_t));
+int __ham_vrfy_structure __P((DB *, VRFY_DBINFO *, db_pgno_t,
+ u_int32_t));
+int __ham_vrfy_hashing __P((DB *,
+ u_int32_t, HMETA *, u_int32_t, db_pgno_t, u_int32_t,
+ u_int32_t (*) __P((DB *, const void *, u_int32_t))));
+int __ham_salvage __P((DB *, VRFY_DBINFO *, db_pgno_t, PAGE *,
+ void *, int (*)(void *, const void *), u_int32_t));
+int __ham_meta2pgset __P((DB *, VRFY_DBINFO *, HMETA *, u_int32_t,
+ DB *));
+#if defined(__cplusplus)
+}
+#endif
+#endif /* _hash_ext_h_ */
diff --git a/bdb/include/lock.h b/bdb/include/lock.h
new file mode 100644
index 00000000000..e4a01ddf9c7
--- /dev/null
+++ b/bdb/include/lock.h
@@ -0,0 +1,190 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: lock.h,v 11.20 2000/12/12 17:43:56 bostic Exp $
+ */
+
+#define DB_LOCK_DEFAULT_N 1000 /* Default # of locks in region. */
+
+/*
+ * Out of band value for a lock. Locks contain an offset into a lock region,
+ * so we use an invalid region offset to indicate an invalid or unset lock.
+ */
+#define LOCK_INVALID INVALID_ROFF
+
+/*
+ * The locker id space is divided between the transaction manager and the lock
+ * manager. Lock IDs start at 0 and go to DB_LOCK_MAXID. Txn IDs start at
+ * DB_LOCK_MAXID + 1 and go up to TXN_INVALID.
+ */
+#define DB_LOCK_MAXID 0x7fffffff
+
+/*
+ * DB_LOCKREGION --
+ * The lock shared region.
+ */
+typedef struct __db_lockregion {
+ u_int32_t id; /* unique id generator */
+ u_int32_t need_dd; /* flag for deadlock detector */
+ u_int32_t detect; /* run dd on every conflict */
+ /* free lock header */
+ SH_TAILQ_HEAD(__flock) free_locks;
+ /* free obj header */
+ SH_TAILQ_HEAD(__fobj) free_objs;
+ /* free locker header */
+ SH_TAILQ_HEAD(__flocker) free_lockers;
+ SH_TAILQ_HEAD(__dobj) dd_objs; /* objects with waiters */
+ u_int32_t maxlocks; /* maximum number of locks in table */
+ u_int32_t maxlockers; /* maximum number of lockers in table */
+ u_int32_t maxobjects; /* maximum number of objects in table */
+ u_int32_t locker_t_size; /* size of locker hash table */
+ u_int32_t object_t_size; /* size of object hash table */
+ u_int32_t nmodes; /* number of lock modes */
+ u_int32_t nlocks; /* current number of locks */
+ u_int32_t maxnlocks; /* maximum number of locks so far*/
+ u_int32_t nlockers; /* current number of lockers */
+ u_int32_t maxnlockers; /* maximum number of lockers so far */
+ u_int32_t nobjects; /* current number of objects */
+ u_int32_t maxnobjects; /* maximum number of objects so far */
+ roff_t conf_off; /* offset of conflicts array */
+ roff_t obj_off; /* offset of object hash table */
+ roff_t osynch_off; /* offset of the object mutex table */
+ roff_t locker_off; /* offset of locker hash table */
+ roff_t lsynch_off; /* offset of the locker mutex table */
+ u_int32_t nconflicts; /* number of lock conflicts */
+ u_int32_t nrequests; /* number of lock gets */
+ u_int32_t nreleases; /* number of lock puts */
+ u_int32_t nnowaits; /* number of lock requests that would
+ have waited without nowait */
+ u_int32_t ndeadlocks; /* number of deadlocks */
+#ifdef MUTEX_SYSTEM_RESOURCES
+ roff_t maint_off; /* offset of region maintenance info */
+#endif
+} DB_LOCKREGION;
+
+/*
+ * Since we will store DBTs in shared memory, we need the equivalent of a
+ * DBT that will work in shared memory.
+ */
+typedef struct __sh_dbt {
+ u_int32_t size; /* Byte length. */
+ ssize_t off; /* Region offset. */
+} SH_DBT;
+
+#define SH_DBT_PTR(p) ((void *)(((u_int8_t *)(p)) + (p)->off))
+
+/*
+ * Object structures; these live in the object hash table.
+ */
+typedef struct __db_lockobj {
+ SH_DBT lockobj; /* Identifies object locked. */
+ SH_TAILQ_ENTRY links; /* Links for free list or hash list. */
+ SH_TAILQ_ENTRY dd_links; /* Links for dd list. */
+ SH_TAILQ_HEAD(__wait) waiters; /* List of waiting locks. */
+ SH_TAILQ_HEAD(__hold) holders; /* List of held locks. */
+ /* Declare room in the object to hold
+ * typical DB lock structures so that
+ * we do not have to allocate them from
+ * shalloc at run-time. */
+ u_int8_t objdata[sizeof(struct __db_ilock)];
+} DB_LOCKOBJ;
+
+/*
+ * Locker structures; these live in the locker hash table.
+ */
+typedef struct __db_locker {
+ u_int32_t id; /* Locker id. */
+ u_int32_t dd_id; /* Deadlock detector id. */
+ size_t master_locker; /* Locker of master transaction. */
+ size_t parent_locker; /* Parent of this child. */
+ SH_LIST_HEAD(_child) child_locker; /* List of descendant txns;
+ only used in a "master"
+ txn. */
+ SH_LIST_ENTRY child_link; /* Links transactions in the family;
+ elements of the child_locker
+ list. */
+ SH_TAILQ_ENTRY links; /* Links for free list. */
+ SH_LIST_HEAD(_held) heldby; /* Locks held by this locker. */
+
+#define DB_LOCKER_DELETED 0x0001
+ u_int32_t flags;
+} DB_LOCKER;
+
+/*
+ * Lockers can be freed if they are not part of a transaction family.
+ * Members of a family either point at the master transaction or are
+ * the master transaction and have children lockers.
+ */
+#define LOCKER_FREEABLE(lp) \
+ ((lp)->master_locker == TXN_INVALID_ID && \
+ SH_LIST_FIRST(&(lp)->child_locker, __db_locker) == NULL)
+
+/*
+ * DB_LOCKTAB --
+ * The primary library lock data structure (i.e., the one referenced
+ * by the environment, as opposed to the internal one laid out in the region.)
+ */
+typedef struct __db_locktab {
+ DB_ENV *dbenv; /* Environment. */
+ REGINFO reginfo; /* Region information. */
+ u_int8_t *conflicts; /* Pointer to conflict matrix. */
+ DB_HASHTAB *obj_tab; /* Beginning of object hash table. */
+ DB_HASHTAB *locker_tab; /* Beginning of locker hash table. */
+} DB_LOCKTAB;
+
+/* Test for conflicts. */
+#define CONFLICTS(T, R, HELD, WANTED) \
+ (T)->conflicts[(HELD) * (R)->nmodes + (WANTED)]
+
+#define OBJ_LINKS_VALID(L) ((L)->links.stqe_prev != -1)
+
+struct __db_lock {
+ /*
+ * Wait on mutex to wait on lock. You reference your own mutex with
+ * ID 0 and others reference your mutex with ID 1.
+ */
+ MUTEX mutex;
+
+ u_int32_t holder; /* Who holds this lock. */
+ u_int32_t gen; /* Generation count. */
+ SH_TAILQ_ENTRY links; /* Free or holder/waiter list. */
+ SH_LIST_ENTRY locker_links; /* List of locks held by a locker. */
+ u_int32_t refcount; /* Reference count the lock. */
+ db_lockmode_t mode; /* What sort of lock. */
+ ssize_t obj; /* Relative offset of object struct. */
+ db_status_t status; /* Status of this lock. */
+};
+
+/*
+ * Flag values for __lock_put_internal:
+ * DB_LOCK_DOALL: Unlock all references in this lock (instead of only 1).
+ * DB_LOCK_FREE: Free the lock (used in checklocker).
+ * DB_LOCK_IGNOREDEL: Remove from the locker hash table even if already
+ deleted (used in checklocker).
+ * DB_LOCK_NOPROMOTE: Don't bother running promotion when releasing locks
+ * (used by __lock_put_internal).
+ * DB_LOCK_UNLINK: Remove from the locker links (used in checklocker).
+ */
+#define DB_LOCK_DOALL 0x001
+#define DB_LOCK_FREE 0x002
+#define DB_LOCK_IGNOREDEL 0x004
+#define DB_LOCK_NOPROMOTE 0x008
+#define DB_LOCK_UNLINK 0x010
+#define DB_LOCK_NOWAITERS 0x020
+
+/*
+ * Macros to get/release different types of mutexes.
+ */
+#define OBJECT_LOCK(lt, reg, obj, ndx) \
+ ndx = __lock_ohash(obj) % (reg)->object_t_size
+#define SHOBJECT_LOCK(lt, reg, shobj, ndx) \
+ ndx = __lock_lhash(shobj) % (reg)->object_t_size
+#define LOCKER_LOCK(lt, reg, locker, ndx) \
+ ndx = __lock_locker_hash(locker) % (reg)->locker_t_size;
+
+#define LOCKREGION(dbenv, lt) R_LOCK((dbenv), &(lt)->reginfo)
+#define UNLOCKREGION(dbenv, lt) R_UNLOCK((dbenv), &(lt)->reginfo)
+#include "lock_ext.h"
diff --git a/bdb/include/lock_ext.h b/bdb/include/lock_ext.h
new file mode 100644
index 00000000000..7ed9b1c695b
--- /dev/null
+++ b/bdb/include/lock_ext.h
@@ -0,0 +1,39 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _lock_ext_h_
+#define _lock_ext_h_
+#if defined(__cplusplus)
+extern "C" {
+#endif
+int __lock_downgrade __P((DB_ENV *,
+ DB_LOCK *, db_lockmode_t, u_int32_t));
+int __lock_addfamilylocker __P((DB_ENV *, u_int32_t, u_int32_t));
+int __lock_freefamilylocker __P((DB_LOCKTAB *, u_int32_t));
+void __lock_freelocker __P((DB_LOCKTAB *,
+ DB_LOCKREGION *, DB_LOCKER *, u_int32_t));
+int __lock_getlocker __P((DB_LOCKTAB *,
+ u_int32_t, u_int32_t, int, DB_LOCKER **));
+int __lock_getobj __P((DB_LOCKTAB *,
+ const DBT *, u_int32_t, int, DB_LOCKOBJ **));
+int __lock_promote __P((DB_LOCKTAB *, DB_LOCKOBJ *, int));
+void __lock_printlock __P((DB_LOCKTAB *, struct __db_lock *, int));
+int __lock_set_lk_conflicts __P((DB_ENV *, u_int8_t *, int));
+int __lock_set_lk_detect __P((DB_ENV *, u_int32_t));
+int __lock_set_lk_max __P((DB_ENV *, u_int32_t));
+int __lock_set_lk_max_locks __P((DB_ENV *, u_int32_t));
+int __lock_set_lk_max_lockers __P((DB_ENV *, u_int32_t));
+int __lock_set_lk_max_objects __P((DB_ENV *, u_int32_t));
+void __lock_dbenv_create __P((DB_ENV *));
+void __lock_dbenv_close __P((DB_ENV *));
+int __lock_open __P((DB_ENV *));
+int __lock_close __P((DB_ENV *));
+void __lock_region_destroy __P((DB_ENV *, REGINFO *));
+void __lock_dump_region __P((DB_ENV *, char *, FILE *));
+int __lock_cmp __P((const DBT *, DB_LOCKOBJ *));
+int __lock_locker_cmp __P((u_int32_t, DB_LOCKER *));
+u_int32_t __lock_ohash __P((const DBT *));
+u_int32_t __lock_lhash __P((DB_LOCKOBJ *));
+u_int32_t __lock_locker_hash __P((u_int32_t));
+#if defined(__cplusplus)
+}
+#endif
+#endif /* _lock_ext_h_ */
diff --git a/bdb/include/log.h b/bdb/include/log.h
new file mode 100644
index 00000000000..08c2b8076be
--- /dev/null
+++ b/bdb/include/log.h
@@ -0,0 +1,208 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: log.h,v 11.19 2001/01/11 18:19:52 bostic Exp $
+ */
+
+#ifndef _LOG_H_
+#define _LOG_H_
+
+struct __db_log; typedef struct __db_log DB_LOG;
+struct __fname; typedef struct __fname FNAME;
+struct __hdr; typedef struct __hdr HDR;
+struct __log; typedef struct __log LOG;
+struct __log_persist; typedef struct __log_persist LOGP;
+
+#define LFPREFIX "log." /* Log file name prefix. */
+#define LFNAME "log.%010d" /* Log file name template. */
+#define LFNAME_V1 "log.%05d" /* Log file name template, rev 1. */
+
+#define LG_MAX_DEFAULT (10 * MEGABYTE) /* 10 MB. */
+#define LG_BSIZE_DEFAULT (32 * 1024) /* 32 KB. */
+#define LG_BASE_REGION_SIZE (60 * 1024) /* 60 KB. */
+
+/*
+ * The per-process table that maps log file-id's to DB structures.
+ */
+typedef struct __db_entry {
+ TAILQ_HEAD(dblist, __db) dblist; /* Associated DB structures. */
+ u_int32_t refcount; /* Reference counted. */
+ u_int32_t count; /* Number of ops on a deleted db. */
+ int deleted; /* File was not found during open. */
+} DB_ENTRY;
+
+/*
+ * DB_LOG
+ * Per-process log structure.
+ */
+struct __db_log {
+/*
+ * These fields need to be protected for multi-threaded support.
+ *
+ * !!!
+ * As this structure is allocated in per-process memory, the mutex may need
+ * to be stored elsewhere on architectures unable to support mutexes in heap
+ * memory, e.g., HP/UX 9.
+ */
+ MUTEX *mutexp; /* Mutex for thread protection. */
+
+ DB_ENTRY *dbentry; /* Recovery file-id mapping. */
+#define DB_GROW_SIZE 64
+ int32_t dbentry_cnt; /* Entries. Grows by DB_GROW_SIZE. */
+
+/*
+ * These fields are always accessed while the region lock is held, so they do
+ * not have to be protected by the thread lock as well, OR, they are only used
+ * when threads are not being used, i.e. most cursor operations are disallowed
+ * on threaded logs.
+ */
+ u_int32_t lfname; /* Log file "name". */
+ DB_FH lfh; /* Log file handle. */
+
+ DB_LSN c_lsn; /* Cursor: current LSN. */
+ DBT c_dbt; /* Cursor: return DBT structure. */
+ DB_FH c_fh; /* Cursor: file handle. */
+ FILE *c_fp; /* Cursor: file pointer. */
+ u_int32_t c_off; /* Cursor: previous record offset. */
+ u_int32_t c_len; /* Cursor: current record length. */
+ u_int32_t r_file; /* Cursor: current read file */
+ u_int32_t r_off; /* Cursor: offset of read buffer. */
+ u_int32_t r_size; /* Cursor: size of data in read buf. */
+
+ u_int8_t *bufp; /* Region buffer. */
+ u_int8_t *readbufp; /* Read buffer. */
+
+/* These fields are not protected. */
+ DB_ENV *dbenv; /* Reference to error information. */
+ REGINFO reginfo; /* Region information. */
+
+/*
+ * These fields are used by XA; since XA forbids threaded execution, these
+ * do not have to be protected.
+ */
+ void *xa_info; /* Committed transaction list that
+ * has to be carried between calls
+ * to xa_recover. */
+ DB_LSN xa_lsn; /* Position of an XA recovery scan. */
+ DB_LSN xa_first; /* LSN to which we need to roll back
+ for this XA recovery scan. */
+
+#define DBLOG_RECOVER 0x01 /* We are in recovery. */
+#define DBLOG_FORCE_OPEN 0x02 /* Force the db open even
+ * if it appears to be deleted.
+ */
+ u_int32_t flags;
+};
+
+/*
+ * HDR --
+ * Log record header.
+ */
+struct __hdr {
+ u_int32_t prev; /* Previous offset. */
+ u_int32_t cksum; /* Current checksum. */
+ u_int32_t len; /* Current length. */
+};
+
+struct __log_persist {
+ u_int32_t magic; /* DB_LOGMAGIC */
+ u_int32_t version; /* DB_LOGVERSION */
+
+ u_int32_t lg_max; /* Maximum file size. */
+ int mode; /* Log file mode. */
+};
+
+/*
+ * LOG --
+ * Shared log region. One of these is allocated in shared memory,
+ * and describes the log.
+ */
+struct __log {
+ LOGP persist; /* Persistent information. */
+
+ SH_TAILQ_HEAD(__fq) fq; /* List of file names. */
+
+ /*
+ * The lsn LSN is the file offset that we're about to write and which
+ * we will return to the user.
+ */
+ DB_LSN lsn; /* LSN at current file offset. */
+
+ /*
+ * The s_lsn LSN is the last LSN that we know is on disk, not just
+ * written, but synced.
+ */
+ DB_LSN s_lsn; /* LSN of the last sync. */
+
+ u_int32_t len; /* Length of the last record. */
+
+ u_int32_t w_off; /* Current write offset in the file. */
+
+ DB_LSN chkpt_lsn; /* LSN of the last checkpoint. */
+ time_t chkpt; /* Time of the last checkpoint. */
+
+ DB_LOG_STAT stat; /* Log statistics. */
+
+ /*
+ * The f_lsn LSN is the LSN (returned to the user) that "owns" the
+ * first byte of the buffer. If the record associated with the LSN
+ * spans buffers, it may not reflect the physical file location of
+ * the first byte of the buffer.
+ */
+ DB_LSN f_lsn; /* LSN of first byte in the buffer. */
+ size_t b_off; /* Current offset in the buffer. */
+
+ roff_t buffer_off; /* Log buffer offset. */
+ u_int32_t buffer_size; /* Log buffer size. */
+};
+
+/*
+ * FNAME --
+ * File name and id.
+ */
+struct __fname {
+ SH_TAILQ_ENTRY q; /* File name queue. */
+
+ u_int16_t ref; /* Reference count. */
+ u_int16_t locked; /* Table is locked. */
+
+ int32_t id; /* Logging file id. */
+ DBTYPE s_type; /* Saved DB type. */
+
+ roff_t name_off; /* Name offset. */
+ db_pgno_t meta_pgno; /* Page number of the meta page. */
+ u_int8_t ufid[DB_FILE_ID_LEN]; /* Unique file id. */
+};
+
+/* File open/close register log record opcodes. */
+#define LOG_CHECKPOINT 1 /* Checkpoint: file name/id dump. */
+#define LOG_CLOSE 2 /* File close. */
+#define LOG_OPEN 3 /* File open. */
+
+#define CHECK_LSN(redo, cmp, lsn, prev) \
+ DB_ASSERT(!DB_REDO(redo) || (cmp) >= 0); \
+ if (DB_REDO(redo) && (cmp) < 0) { \
+ __db_err(dbenv, \
+ "Log sequence error: page LSN %lu:%lu; previous LSN %lu %lu", \
+ (u_long)(lsn)->file, (u_long)(lsn)->offset, \
+ (u_long)(prev)->file, (u_long)(prev)->offset); \
+ goto out; \
+ }
+
+/*
+ * Status codes indicating the validity of a log file examined by
+ * __log_valid().
+ */
+typedef enum {
+ DB_LV_INCOMPLETE,
+ DB_LV_NORMAL,
+ DB_LV_OLD_READABLE,
+ DB_LV_OLD_UNREADABLE
+} logfile_validity;
+
+#include "log_auto.h"
+#include "log_ext.h"
+#endif /* _LOG_H_ */
diff --git a/bdb/include/log_auto.h b/bdb/include/log_auto.h
new file mode 100644
index 00000000000..ddbcbcb3ec6
--- /dev/null
+++ b/bdb/include/log_auto.h
@@ -0,0 +1,39 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+
+#ifndef log_AUTO_H
+#define log_AUTO_H
+
+#define DB_log_register1 1
+typedef struct _log_register1_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ u_int32_t opcode;
+ DBT name;
+ DBT uid;
+ int32_t fileid;
+ DBTYPE ftype;
+} __log_register1_args;
+
+int __log_register1_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __log_register1_read __P((DB_ENV *, void *, __log_register1_args **));
+
+#define DB_log_register 2
+typedef struct _log_register_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ u_int32_t opcode;
+ DBT name;
+ DBT uid;
+ int32_t fileid;
+ DBTYPE ftype;
+ db_pgno_t meta_pgno;
+} __log_register_args;
+
+int __log_register_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, const DBT *, const DBT *, int32_t, DBTYPE, db_pgno_t));
+int __log_register_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __log_register_read __P((DB_ENV *, void *, __log_register_args **));
+int __log_init_print __P((DB_ENV *));
+int __log_init_recover __P((DB_ENV *));
+#endif
diff --git a/bdb/include/log_ext.h b/bdb/include/log_ext.h
new file mode 100644
index 00000000000..985c5d7745b
--- /dev/null
+++ b/bdb/include/log_ext.h
@@ -0,0 +1,33 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _log_ext_h_
+#define _log_ext_h_
+#if defined(__cplusplus)
+extern "C" {
+#endif
+int __log_open __P((DB_ENV *));
+int __log_find __P((DB_LOG *, int, int *, logfile_validity *));
+int __log_valid __P((DB_LOG *, u_int32_t, int, logfile_validity *));
+int __log_close __P((DB_ENV *));
+int __log_lastckp __P((DB_ENV *, DB_LSN *));
+int __log_findckp __P((DB_ENV *, DB_LSN *));
+int __log_get __P((DB_LOG *, DB_LSN *, DBT *, u_int32_t, int));
+void __log_dbenv_create __P((DB_ENV *));
+int __log_put __P((DB_ENV *, DB_LSN *, const DBT *, u_int32_t));
+int __log_name __P((DB_LOG *,
+ u_int32_t, char **, DB_FH *, u_int32_t));
+int __log_register_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __log_reopen_file __P((DB_ENV *,
+ char *, int32_t, u_int8_t *, db_pgno_t));
+int __log_add_logid __P((DB_ENV *, DB_LOG *, DB *, int32_t));
+int __db_fileid_to_db __P((DB_ENV *, DB **, int32_t, int));
+void __log_close_files __P((DB_ENV *));
+void __log_rem_logid __P((DB_LOG *, DB *, int32_t));
+int __log_lid_to_fname __P((DB_LOG *, int32_t, FNAME **));
+int __log_filelist_update
+ __P((DB_ENV *, DB *, int32_t, const char *, int *));
+int __log_file_lock __P((DB *));
+#if defined(__cplusplus)
+}
+#endif
+#endif /* _log_ext_h_ */
diff --git a/bdb/include/mp.h b/bdb/include/mp.h
new file mode 100644
index 00000000000..233cb1c2b10
--- /dev/null
+++ b/bdb/include/mp.h
@@ -0,0 +1,244 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: mp.h,v 11.16 2001/01/10 04:50:53 ubell Exp $
+ */
+
+struct __bh; typedef struct __bh BH;
+struct __db_mpool; typedef struct __db_mpool DB_MPOOL;
+struct __db_mpreg; typedef struct __db_mpreg DB_MPREG;
+struct __mpool; typedef struct __mpool MPOOL;
+struct __mpoolfile; typedef struct __mpoolfile MPOOLFILE;
+
+/* We require at least 40K of cache. */
+#define DB_CACHESIZE_MIN (20 * 1024)
+
+/*
+ * DB_MPOOL --
+ * Per-process memory pool structure.
+ */
+struct __db_mpool {
+ /* These fields need to be protected for multi-threaded support. */
+ MUTEX *mutexp; /* Structure thread lock. */
+
+ /* List of pgin/pgout routines. */
+ LIST_HEAD(__db_mpregh, __db_mpreg) dbregq;
+
+ /* List of DB_MPOOLFILE's. */
+ TAILQ_HEAD(__db_mpoolfileh, __db_mpoolfile) dbmfq;
+
+ /* These fields are not thread-protected. */
+ DB_ENV *dbenv; /* Reference to error information. */
+
+ u_int32_t nreg; /* N underlying cache regions. */
+ REGINFO *reginfo; /* Underlying cache regions. */
+};
+
+/*
+ * DB_MPREG --
+ * DB_MPOOL registry of pgin/pgout functions.
+ */
+struct __db_mpreg {
+ LIST_ENTRY(__db_mpreg) q; /* Linked list. */
+
+ int ftype; /* File type. */
+ /* Pgin, pgout routines. */
+ int (*pgin) __P((DB_ENV *, db_pgno_t, void *, DBT *));
+ int (*pgout) __P((DB_ENV *, db_pgno_t, void *, DBT *));
+};
+
+/*
+ * DB_MPOOLFILE --
+ * Per-process DB_MPOOLFILE information.
+ */
+struct __db_mpoolfile {
+ /* These fields need to be protected for multi-threaded support. */
+ MUTEX *mutexp; /* Structure thread lock. */
+
+ DB_FH fh; /* Underlying file handle. */
+
+ u_int32_t ref; /* Reference count. */
+
+ /*
+ * !!!
+ * This field is a special case -- it's protected by the region lock
+ * NOT the thread lock. The reason for this is that we always have
+ * the region lock immediately before or after we modify the field,
+ * and we don't want to use the structure lock to protect it because
+ * then I/O (which is done with the structure lock held because of
+ * the race between the seek and write of the file descriptor) will
+ * block any other put/get calls using this DB_MPOOLFILE structure.
+ */
+ u_int32_t pinref; /* Pinned block reference count. */
+
+ /*
+ * !!!
+ * This field is a special case -- it's protected by the region lock
+ * since it's manipulated only when new files are added to the list.
+ */
+ TAILQ_ENTRY(__db_mpoolfile) q; /* Linked list of DB_MPOOLFILE's. */
+
+ /* These fields are not thread-protected. */
+ DB_MPOOL *dbmp; /* Overlying DB_MPOOL. */
+ MPOOLFILE *mfp; /* Underlying MPOOLFILE. */
+
+ void *addr; /* Address of mmap'd region. */
+ size_t len; /* Length of mmap'd region. */
+
+ /* These fields need to be protected for multi-threaded support. */
+#define MP_READONLY 0x01 /* File is readonly. */
+#define MP_UPGRADE 0x02 /* File descriptor is readwrite. */
+#define MP_UPGRADE_FAIL 0x04 /* Upgrade wasn't possible. */
+ u_int32_t flags;
+};
+
+/*
+ * NCACHE --
+ * Select a cache based on the page number. This assumes accesses are
+ * uniform across pages, which is probably OK -- what we really want to
+ * avoid is anything that puts all the pages for any single file in the
+ * same cache, as we expect that file access will be bursty.
+ */
+#define NCACHE(mp, pgno) \
+ ((pgno) % ((MPOOL *)mp)->nreg)
+
+/*
+ * NBUCKET --
+ * We make the assumption that early pages of the file are more likely
+ * to be retrieved than the later pages, which means the top bits will
+ * be more interesting for hashing as they're less likely to collide.
+ * That said, as 512 8K pages represents a 4MB file, so only reasonably
+ * large files will have page numbers with any other than the bottom 9
+ * bits set. We XOR in the MPOOL offset of the MPOOLFILE that backs the
+ * page, since that should also be unique for the page. We don't want
+ * to do anything very fancy -- speed is more important to us than using
+ * good hashing.
+ */
+#define NBUCKET(mc, mf_offset, pgno) \
+ (((pgno) ^ ((mf_offset) << 9)) % (mc)->htab_buckets)
+
+/*
+ * MPOOL --
+ * Shared memory pool region.
+ */
+struct __mpool {
+ /*
+ * The memory pool can be broken up into individual pieces/files.
+ * Not what we would have liked, but on Solaris you can allocate
+ * only a little more than 2GB of memory in a contiguous chunk,
+ * and I expect to see more systems with similar issues.
+ *
+ * The first of these pieces/files describes the entire pool, all
+ * subsequent ones only describe a part of the cache.
+ *
+ * We single-thread memp_sync and memp_fsync calls.
+ *
+ * This mutex is intended *only* to single-thread access to the call,
+ * it is not used to protect the lsn and lsn_cnt fields, the region
+ * lock is used to protect them.
+ */
+ MUTEX sync_mutex; /* Checkpoint lock. */
+ DB_LSN lsn; /* Maximum checkpoint LSN. */
+ u_int32_t lsn_cnt; /* Checkpoint buffers left to write. */
+
+ SH_TAILQ_HEAD(__mpfq) mpfq; /* List of MPOOLFILEs. */
+
+ u_int32_t nreg; /* Number of underlying REGIONS. */
+ roff_t regids; /* Array of underlying REGION Ids. */
+
+#define MP_LSN_RETRY 0x01 /* Retry all BH_WRITE buffers. */
+ u_int32_t flags;
+
+ /*
+ * The following structure fields only describe the cache portion of
+ * the region.
+ */
+ SH_TAILQ_HEAD(__bhq) bhq; /* LRU list of buffer headers. */
+
+ int htab_buckets; /* Number of hash table entries. */
+ roff_t htab; /* Hash table offset. */
+
+ DB_MPOOL_STAT stat; /* Per-cache mpool statistics. */
+#ifdef MUTEX_SYSTEM_RESOURCES
+ roff_t maint_off; /* Maintenance information offset */
+#endif
+};
+
+/*
+ * MPOOLFILE --
+ * Shared DB_MPOOLFILE information.
+ */
+struct __mpoolfile {
+ SH_TAILQ_ENTRY q; /* List of MPOOLFILEs */
+
+ db_pgno_t mpf_cnt; /* Ref count: DB_MPOOLFILEs. */
+ db_pgno_t block_cnt; /* Ref count: blocks in cache. */
+ db_pgno_t lsn_cnt; /* Checkpoint buffers left to write. */
+
+ int ftype; /* File type. */
+ int32_t lsn_off; /* Page's LSN offset. */
+ u_int32_t clear_len; /* Bytes to clear on page create. */
+
+ roff_t path_off; /* File name location. */
+ roff_t fileid_off; /* File identification location. */
+
+ roff_t pgcookie_len; /* Pgin/pgout cookie length. */
+ roff_t pgcookie_off; /* Pgin/pgout cookie location. */
+
+ db_pgno_t last_pgno; /* Last page in the file. */
+ db_pgno_t orig_last_pgno; /* Original last page in the file. */
+
+ DB_MPOOL_FSTAT stat; /* Per-file mpool statistics. */
+
+#define MP_CAN_MMAP 0x01 /* If the file can be mmap'd. */
+#define MP_DEADFILE 0x02 /* Dirty pages can simply be trashed. */
+#define MP_TEMP 0x04 /* Backing file is a temporary. */
+#define MP_UNLINK 0x08 /* Unlink file on last close. */
+ u_int32_t flags;
+};
+
+/*
+ * BH_TO_CACHE --
+ * Return the cache where we can find the specified buffer header.
+ */
+#define BH_TO_CACHE(dbmp, bhp) \
+ (dbmp)->reginfo[NCACHE((dbmp)->reginfo[0].primary, (bhp)->pgno)].primary
+
+/*
+ * BH --
+ * Buffer header.
+ */
+struct __bh {
+ MUTEX mutex; /* Buffer thread/process lock. */
+
+ u_int16_t ref; /* Reference count. */
+
+#define BH_CALLPGIN 0x001 /* Page needs to be reworked... */
+#define BH_DIRTY 0x002 /* Page was modified. */
+#define BH_DISCARD 0x004 /* Page is useless. */
+#define BH_LOCKED 0x008 /* Page is locked (I/O in progress). */
+#define BH_SYNC 0x010 /* memp sync: write the page */
+#define BH_SYNC_LOGFLSH 0x020 /* memp sync: also flush the log */
+#define BH_TRASH 0x040 /* Page is garbage. */
+ u_int16_t flags;
+
+ SH_TAILQ_ENTRY q; /* LRU queue. */
+ SH_TAILQ_ENTRY hq; /* MPOOL hash bucket queue. */
+
+ db_pgno_t pgno; /* Underlying MPOOLFILE page number. */
+ roff_t mf_offset; /* Associated MPOOLFILE offset. */
+
+ /*
+ * !!!
+ * This array must be at least size_t aligned -- the DB access methods
+ * put PAGE and other structures into it, and then access them directly.
+ * (We guarantee size_t alignment to applications in the documentation,
+ * too.)
+ */
+ u_int8_t buf[1]; /* Variable length data. */
+};
+
+#include "mp_ext.h"
diff --git a/bdb/include/mp_ext.h b/bdb/include/mp_ext.h
new file mode 100644
index 00000000000..9f2b8c61f45
--- /dev/null
+++ b/bdb/include/mp_ext.h
@@ -0,0 +1,33 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _mp_ext_h_
+#define _mp_ext_h_
+#if defined(__cplusplus)
+extern "C" {
+#endif
+int __memp_alloc __P((DB_MPOOL *,
+ REGINFO *, MPOOLFILE *, size_t, roff_t *, void *));
+int __memp_bhwrite
+ __P((DB_MPOOL *, MPOOLFILE *, BH *, int *, int *));
+int __memp_pgread __P((DB_MPOOLFILE *, BH *, int));
+int __memp_pgwrite
+ __P((DB_MPOOL *, DB_MPOOLFILE *, BH *, int *, int *));
+int __memp_pg __P((DB_MPOOLFILE *, BH *, int));
+void __memp_bhfree __P((DB_MPOOL *, BH *, int));
+void __memp_set_unlink __P((DB_MPOOLFILE *));
+void __memp_clear_unlink __P((DB_MPOOLFILE *));
+int __memp_fopen __P((DB_MPOOL *, MPOOLFILE *, const char *,
+ u_int32_t, int, size_t, int, DB_MPOOL_FINFO *, DB_MPOOLFILE **));
+void __memp_mf_discard __P((DB_MPOOL *, MPOOLFILE *));
+int __memp_fremove __P((DB_MPOOLFILE *));
+char * __memp_fn __P((DB_MPOOLFILE *));
+char * __memp_fns __P((DB_MPOOL *, MPOOLFILE *));
+void __memp_dbenv_create __P((DB_ENV *));
+int __memp_open __P((DB_ENV *));
+int __memp_close __P((DB_ENV *));
+void __mpool_region_destroy __P((DB_ENV *, REGINFO *));
+void __memp_dump_region __P((DB_ENV *, char *, FILE *));
+int __mp_xxx_fh __P((DB_MPOOLFILE *, DB_FH **));
+#if defined(__cplusplus)
+}
+#endif
+#endif /* _mp_ext_h_ */
diff --git a/bdb/include/mutex.h b/bdb/include/mutex.h
new file mode 100644
index 00000000000..a8a41451012
--- /dev/null
+++ b/bdb/include/mutex.h
@@ -0,0 +1,744 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: mutex.h,v 11.41 2000/12/22 19:28:15 bostic Exp $
+ */
+
+/*
+ * Some of the Berkeley DB ports require single-threading at various
+ * places in the code. In those cases, these #defines will be set.
+ */
+#define DB_BEGIN_SINGLE_THREAD
+#define DB_END_SINGLE_THREAD
+
+/*
+ * When the underlying system mutexes require system resources, we have
+ * to clean up after application failure. This violates the rule that
+ * we never look at a shared region after a failure, but there's no other
+ * choice. In those cases, this #define is set.
+ */
+#ifdef HAVE_QNX
+#define MUTEX_SYSTEM_RESOURCES
+#endif
+
+/*********************************************************************
+ * POSIX.1 pthreads interface.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_PTHREADS
+#include <pthread.h>
+
+#define MUTEX_FIELDS \
+ pthread_mutex_t mutex; /* Mutex. */ \
+ pthread_cond_t cond; /* Condition variable. */
+#endif
+
+/*********************************************************************
+ * Solaris lwp threads interface.
+ *
+ * !!!
+ * We use LWP mutexes on Solaris instead of UI or POSIX mutexes (both of
+ * which are available), for two reasons. First, the Solaris C library
+ * includes versions of the both UI and POSIX thread mutex interfaces, but
+ * they are broken in that they don't support inter-process locking, and
+ * there's no way to detect it, e.g., calls to configure the mutexes for
+ * inter-process locking succeed without error. So, we use LWP mutexes so
+ * that we don't fail in fairly undetectable ways because the application
+ * wasn't linked with the appropriate threads library. Second, there were
+ * bugs in SunOS 5.7 (Solaris 7) where if an application loaded the C library
+ * before loading the libthread/libpthread threads libraries (e.g., by using
+ * dlopen to load the DB library), the pwrite64 interface would be translated
+ * into a call to pwrite and DB would drop core.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_SOLARIS_LWP
+/*
+ * XXX
+ * Don't change <synch.h> to <sys/lwp.h> -- although lwp.h is listed in the
+ * Solaris manual page as the correct include to use, it causes the Solaris
+ * compiler on SunOS 2.6 to fail.
+ */
+#include <synch.h>
+
+#define MUTEX_FIELDS \
+ lwp_mutex_t mutex; /* Mutex. */ \
+ lwp_cond_t cond; /* Condition variable. */
+#endif
+
+/*********************************************************************
+ * Solaris/Unixware threads interface.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_UI_THREADS
+#include <thread.h>
+#include <synch.h>
+
+#define MUTEX_FIELDS \
+ mutex_t mutex; /* Mutex. */ \
+ cond_t cond; /* Condition variable. */
+#endif
+
+/*********************************************************************
+ * AIX C library functions.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_AIX_CHECK_LOCK
+#include <sys/atomic_op.h>
+typedef int tsl_t;
+#define MUTEX_ALIGN sizeof(int)
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+#define MUTEX_INIT(x) 0
+#define MUTEX_SET(x) (!_check_lock(x, 0, 1))
+#define MUTEX_UNSET(x) _clear_lock(x, 0)
+#endif
+#endif
+
+/*********************************************************************
+ * General C library functions (msemaphore).
+ *
+ * !!!
+ * Check for HPPA as a special case, because it requires unusual alignment,
+ * and doesn't support semaphores in malloc(3) or shmget(2) memory.
+ *
+ * !!!
+ * Do not remove the MSEM_IF_NOWAIT flag. The problem is that if a single
+ * process makes two msem_lock() calls in a row, the second one returns an
+ * error. We depend on the fact that we can lock against ourselves in the
+ * locking subsystem, where we set up a mutex so that we can block ourselves.
+ * Tested on OSF1 v4.0.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_HPPA_MSEM_INIT
+#define MUTEX_NO_MALLOC_LOCKS
+#define MUTEX_NO_SHMGET_LOCKS
+
+#define MUTEX_ALIGN 16
+#endif
+
+#if defined(HAVE_MUTEX_MSEM_INIT) || defined(HAVE_MUTEX_HPPA_MSEM_INIT)
+#include <sys/mman.h>
+typedef msemaphore tsl_t;
+
+#ifndef MUTEX_ALIGN
+#define MUTEX_ALIGN sizeof(int)
+#endif
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+#define MUTEX_INIT(x) (msem_init(x, MSEM_UNLOCKED) <= (msemaphore *)0)
+#define MUTEX_SET(x) (!msem_lock(x, MSEM_IF_NOWAIT))
+#define MUTEX_UNSET(x) msem_unlock(x, 0)
+#endif
+#endif
+
+/*********************************************************************
+ * Plan 9 library functions.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_PLAN9
+typedef Lock tsl_t;
+
+#define MUTEX_ALIGN sizeof(int)
+
+#define MUTEX_INIT(x) (memset(x, 0, sizeof(Lock)), 0)
+#define MUTEX_SET(x) canlock(x)
+#define MUTEX_UNSET(x) unlock(x)
+#endif
+
+/*********************************************************************
+ * Reliant UNIX C library functions.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_RELIANTUNIX_INITSPIN
+#include <ulocks.h>
+typedef spinlock_t tsl_t;
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+#define MUTEX_INIT(x) (initspin(x, 1), 0)
+#define MUTEX_SET(x) (cspinlock(x) == 0)
+#define MUTEX_UNSET(x) spinunlock(x)
+#endif
+#endif
+
+/*********************************************************************
+ * General C library functions (POSIX 1003.1 sema_XXX).
+ *
+ * !!!
+ * Never selected by autoconfig in this release (semaphore calls are known
+ * to not work in Solaris 5.5).
+ *********************************************************************/
+#ifdef HAVE_MUTEX_SEMA_INIT
+#include <synch.h>
+typedef sema_t tsl_t;
+#define MUTEX_ALIGN sizeof(int)
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+#define MUTEX_DESTROY(x) sema_destroy(x)
+#define MUTEX_INIT(x) (sema_init(x, 1, USYNC_PROCESS, NULL) != 0)
+#define MUTEX_SET(x) (sema_wait(x) == 0)
+#define MUTEX_UNSET(x) sema_post(x)
+#endif
+#endif
+
+/*********************************************************************
+ * SGI C library functions.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_SGI_INIT_LOCK
+#include <abi_mutex.h>
+typedef abilock_t tsl_t;
+#define MUTEX_ALIGN sizeof(int)
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+#define MUTEX_INIT(x) (init_lock(x) != 0)
+#define MUTEX_SET(x) (!acquire_lock(x))
+#define MUTEX_UNSET(x) release_lock(x)
+#endif
+#endif
+
+/*********************************************************************
+ * Solaris C library functions.
+ *
+ * !!!
+ * These are undocumented functions, but they're the only ones that work
+ * correctly as far as we know.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_SOLARIS_LOCK_TRY
+#include <sys/machlock.h>
+typedef lock_t tsl_t;
+#define MUTEX_ALIGN sizeof(int)
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+#define MUTEX_INIT(x) 0
+#define MUTEX_SET(x) _lock_try(x)
+#define MUTEX_UNSET(x) _lock_clear(x)
+#endif
+#endif
+
+/*********************************************************************
+ * VMS.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_VMS
+#include <sys/mman.h>;
+#include <builtins.h>
+typedef unsigned char tsl_t;
+#define MUTEX_ALIGN sizeof(unsigned int)
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+#ifdef __ALPHA
+#define MUTEX_SET(tsl) (!__TESTBITSSI(tsl, 0))
+#else /* __VAX */
+#define MUTEX_SET(tsl) (!(int)_BBSSI(0, tsl))
+#endif
+#define MUTEX_UNSET(tsl) (*(tsl) = 0)
+#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
+#endif
+#endif
+
+/*********************************************************************
+ * VxWorks
+ * Use basic binary semaphores in VxWorks, as we currently do not need
+ * any special features. We do need the ability to single-thread the
+ * entire system, however, because VxWorks doesn't support the open(2)
+ * flag O_EXCL, the mechanism we normally use to single thread access
+ * when we're first looking for a DB environment.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_VXWORKS
+#define MUTEX_SYSTEM_RESOURCES
+
+#include "semLib.h"
+typedef SEM_ID tsl_t;
+#define MUTEX_ALIGN sizeof(unsigned int)
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+#define MUTEX_SET(tsl) (semTake((*tsl), WAIT_FOREVER) == OK)
+#define MUTEX_UNSET(tsl) (semGive((*tsl)) == OK)
+#define MUTEX_INIT(tsl) \
+ ((*(tsl) = semBCreate(SEM_Q_FIFO, SEM_FULL)) == NULL)
+#define MUTEX_DESTROY(tsl) semDelete(*tsl)
+#endif
+
+/*
+ * Use the taskLock() mutex to eliminate a race where two tasks are
+ * trying to initialize the global lock at the same time.
+ */
+#undef DB_BEGIN_SINGLE_THREAD
+#define DB_BEGIN_SINGLE_THREAD \
+do { \
+ if (DB_GLOBAL(db_global_init)) \
+ (void)semTake(DB_GLOBAL(db_global_lock), WAIT_FOREVER); \
+ else { \
+ taskLock(); \
+ if (DB_GLOBAL(db_global_init)) { \
+ taskUnlock(); \
+ (void)semTake(DB_GLOBAL(db_global_lock), \
+ WAIT_FOREVER); \
+ continue; \
+ } \
+ DB_GLOBAL(db_global_lock) = \
+ semBCreate(SEM_Q_FIFO, SEM_EMPTY); \
+ if (DB_GLOBAL(db_global_lock) != NULL) \
+ DB_GLOBAL(db_global_init) = 1; \
+ taskUnlock(); \
+ } \
+} while (DB_GLOBAL(db_global_init) == 0)
+#undef DB_END_SINGLE_THREAD
+#define DB_END_SINGLE_THREAD (void)semGive(DB_GLOBAL(db_global_lock))
+#endif
+
+/*********************************************************************
+ * Win16
+ *
+ * Win16 spinlocks are simple because we cannot possibly be preempted.
+ *
+ * !!!
+ * We should simplify this by always returning a no-need-to-lock lock
+ * when we initialize the mutex.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_WIN16
+typedef unsigned int tsl_t;
+#define MUTEX_ALIGN sizeof(unsigned int)
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+#define MUTEX_INIT(x) 0
+#define MUTEX_SET(tsl) (*(tsl) = 1)
+#define MUTEX_UNSET(tsl) (*(tsl) = 0)
+#endif
+#endif
+
+/*********************************************************************
+ * Win32
+ *********************************************************************/
+#ifdef HAVE_MUTEX_WIN32
+typedef unsigned int tsl_t;
+#define MUTEX_ALIGN sizeof(unsigned int)
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+#define MUTEX_INIT(x) 0
+#define MUTEX_SET(tsl) (!InterlockedExchange((PLONG)tsl, 1))
+#define MUTEX_UNSET(tsl) (*(tsl) = 0)
+#endif
+#endif
+
+/*********************************************************************
+ * 68K/gcc assembly.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_68K_GCC_ASSEMBLY
+typedef unsigned char tsl_t;
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+/*
+ * For gcc/68K, 0 is clear, 1 is set.
+ */
+#define MUTEX_SET(tsl) ({ \
+ register tsl_t *__l = (tsl); \
+ int __r; \
+ asm volatile("tas %1; \n \
+ seq %0" \
+ : "=dm" (__r), "=m" (*__l) \
+ : "1" (*__l) \
+ ); \
+ __r & 1; \
+})
+
+#define MUTEX_UNSET(tsl) (*(tsl) = 0)
+#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
+#endif
+#endif
+
+/*********************************************************************
+ * ALPHA/gcc assembly.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_ALPHA_GCC_ASSEMBLY
+typedef u_int32_t tsl_t;
+#define MUTEX_ALIGN 4
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+/*
+ * For gcc/alpha. Should return 0 if could not acquire the lock, 1 if
+ * lock was acquired properly.
+ */
+#ifdef __GNUC__
+static inline int
+MUTEX_SET(tsl_t *tsl) {
+ register tsl_t *__l = tsl;
+ register tsl_t __r;
+ asm volatile(
+ "1: ldl_l %0,%2\n"
+ " blbs %0,2f\n"
+ " or $31,1,%0\n"
+ " stl_c %0,%1\n"
+ " beq %0,3f\n"
+ " mb\n"
+ " br 3f\n"
+ "2: xor %0,%0\n"
+ "3:"
+ : "=&r"(__r), "=m"(*__l) : "1"(*__l) : "memory");
+ return __r;
+}
+
+/*
+ * Unset mutex. Judging by Alpha Architecture Handbook, the mb instruction
+ * might be necessary before unlocking
+ */
+static inline int
+MUTEX_UNSET(tsl_t *tsl) {
+ asm volatile(" mb\n");
+ return *tsl = 0;
+}
+#endif
+
+#ifdef __DECC
+#include <alpha/builtins.h>
+#define MUTEX_SET(tsl) (__LOCK_LONG_RETRY((tsl), 1) != 0)
+#define MUTEX_UNSET(tsl) (*(tsl) = 0)
+#endif
+
+#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
+#endif
+#endif
+
+/*********************************************************************
+ * HPPA/gcc assembly.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_HPPA_GCC_ASSEMBLY
+typedef u_int32_t tsl_t;
+#define MUTEX_ALIGN 16
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+/*
+ * The PA-RISC has a "load and clear" instead of a "test and set" instruction.
+ * The 32-bit word used by that instruction must be 16-byte aligned. We could
+ * use the "aligned" attribute in GCC but that doesn't work for stack variables.
+ */
+#define MUTEX_SET(tsl) ({ \
+ register tsl_t *__l = (tsl); \
+ int __r; \
+ asm volatile("ldcws 0(%1),%0" : "=r" (__r) : "r" (__l)); \
+ __r & 1; \
+})
+
+#define MUTEX_UNSET(tsl) (*(tsl) = -1)
+#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
+#endif
+#endif
+
+/*********************************************************************
+ * IA64/gcc assembly.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_IA64_GCC_ASSEMBLY
+typedef unsigned char tsl_t;
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+/*
+ * For gcc/ia64, 0 is clear, 1 is set.
+ */
+#define MUTEX_SET(tsl) ({ \
+ register tsl_t *__l = (tsl); \
+ long __r; \
+ asm volatile("xchg1 %0=%1,%3" : "=r"(__r), "=m"(*__l) : "1"(*__l), "r"(1));\
+ __r ^ 1; \
+})
+
+/*
+ * Store through a "volatile" pointer so we get a store with "release"
+ * semantics.
+ */
+#define MUTEX_UNSET(tsl) (*(volatile unsigned char *)(tsl) = 0)
+#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
+#endif
+#endif
+
+/*********************************************************************
+ * PowerPC/gcc assembly.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_PPC_GCC_ASSEMBLY
+typedef u_int32_t tsl_t;
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+/*
+ * The PowerPC does a sort of pseudo-atomic locking. You set up a
+ * 'reservation' on a chunk of memory containing a mutex by loading the
+ * mutex value with LWARX. If the mutex has an 'unlocked' (arbitrary)
+ * value, you then try storing into it with STWCX. If no other process or
+ * thread broke your 'reservation' by modifying the memory containing the
+ * mutex, then the STCWX succeeds; otherwise it fails and you try to get
+ * a reservation again.
+ *
+ * While mutexes are explicitly 4 bytes, a 'reservation' applies to an
+ * entire cache line, normally 32 bytes, aligned naturally. If the mutex
+ * lives near data that gets changed a lot, there's a chance that you'll
+ * see more broken reservations than you might otherwise. The only
+ * situation in which this might be a problem is if one processor is
+ * beating on a variable in the same cache block as the mutex while another
+ * processor tries to acquire the mutex. That's bad news regardless
+ * because of the way it bashes caches, but if you can't guarantee that a
+ * mutex will reside in a relatively quiescent cache line, you might
+ * consider padding the mutex to force it to live in a cache line by
+ * itself. No, you aren't guaranteed that cache lines are 32 bytes. Some
+ * embedded processors use 16-byte cache lines, while some 64-bit
+ * processors use 128-bit cache lines. But assuming a 32-byte cache line
+ * won't get you into trouble for now.
+ *
+ * If mutex locking is a bottleneck, then you can speed it up by adding a
+ * regular LWZ load before the LWARX load, so that you can test for the
+ * common case of a locked mutex without wasting cycles making a reservation.
+ *
+ * 'set' mutexes have the value 1, like on Intel; the returned value from
+ * MUTEX_SET() is 1 if the mutex previously had its low bit set, 0 otherwise.
+ */
+#define MUTEX_SET(tsl) ({ \
+ int __one = 1; \
+ int __r; \
+ tsl_t *__l = (tsl); \
+ asm volatile (" \
+0: \
+ lwarx %0,0,%1; \
+ cmpwi %0,0; \
+ bne 1f; \
+ stwcx. %2,0,%1; \
+ bne- 0b; \
+1:" \
+ : "=&r" (__r) \
+ : "r" (__l), "r" (__one)); \
+ __r & 1; \
+})
+
+#define MUTEX_UNSET(tsl) (*(tsl) = 0)
+#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
+#endif
+#endif
+
+/*********************************************************************
+ * SCO/cc assembly.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_SCO_X86_CC_ASSEMBLY
+typedef unsigned char tsl_t;
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+/*
+ * UnixWare has threads in libthread, but OpenServer doesn't (yet).
+ *
+ * For cc/x86, 0 is clear, 1 is set.
+ */
+
+#if defined(__USLC__)
+asm int
+_tsl_set(void *tsl)
+{
+%mem tsl
+ movl tsl, %ecx
+ movl $1, %eax
+ lock
+ xchgb (%ecx),%al
+ xorl $1,%eax
+}
+#endif
+
+#define MUTEX_SET(tsl) _tsl_set(tsl)
+#define MUTEX_UNSET(tsl) (*(tsl) = 0)
+#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
+#endif
+#endif
+
+/*********************************************************************
+ * Sparc/gcc assembly.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_SPARC_GCC_ASSEMBLY
+typedef unsigned char tsl_t;
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+/*
+ *
+ * The ldstub instruction takes the location specified by its first argument
+ * (a register containing a memory address) and loads its contents into its
+ * second argument (a register) and atomically sets the contents the location
+ * specified by its first argument to a byte of 1s. (The value in the second
+ * argument is never read, but only overwritten.)
+ *
+ * The stbar is needed for v8, and is implemented as membar #sync on v9,
+ + so is functional there as well. For v7, stbar may generate an illegal
+ + instruction and we have no way to tell what we're running on. Some
+ + operating systems notice and skip this instruction in the fault handler.
+ *
+ * For gcc/sparc, 0 is clear, 1 is set.
+ */
+#define MUTEX_SET(tsl) ({ \
+ register tsl_t *__l = (tsl); \
+ register tsl_t __r; \
+ __asm__ volatile \
+ ("ldstub [%1],%0; stbar" \
+ : "=r"( __r) : "r" (__l)); \
+ !__r; \
+})
+
+#define MUTEX_UNSET(tsl) (*(tsl) = 0)
+#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
+#endif
+#endif
+
+/*********************************************************************
+ * UTS/cc assembly.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_UTS_CC_ASSEMBLY
+typedef int tsl_t;
+
+#define MUTEX_ALIGN sizeof(int)
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+#define MUTEX_INIT(x) 0
+#define MUTEX_SET(x) (!uts_lock(x, 1))
+#define MUTEX_UNSET(x) (*(x) = 0)
+#endif
+#endif
+
+/*********************************************************************
+ * x86/gcc assembly.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_X86_GCC_ASSEMBLY
+typedef unsigned char tsl_t;
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+/*
+ * For gcc/x86, 0 is clear, 1 is set.
+ */
+#define MUTEX_SET(tsl) ({ \
+ register tsl_t *__l = (tsl); \
+ int __r; \
+ asm volatile("movl $1,%%eax; lock; xchgb %1,%%al; xorl $1,%%eax"\
+ : "=&a" (__r), "=m" (*__l) \
+ : "1" (*__l) \
+ ); \
+ __r & 1; \
+})
+
+#define MUTEX_UNSET(tsl) (*(tsl) = 0)
+#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
+#endif
+#endif
+
+/*
+ * Mutex alignment defaults to one byte.
+ *
+ * !!!
+ * Various systems require different alignments for mutexes (the worst we've
+ * seen so far is 16-bytes on some HP architectures). Malloc(3) is assumed
+ * to return reasonable alignment, all other mutex users must ensure proper
+ * alignment locally.
+ */
+#ifndef MUTEX_ALIGN
+#define MUTEX_ALIGN 1
+#endif
+
+/*
+ * Mutex destruction defaults to a no-op.
+ */
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+#ifndef MUTEX_DESTROY
+#define MUTEX_DESTROY(x)
+#endif
+#endif
+
+#define MUTEX_IGNORE 0x001 /* Ignore, no lock required. */
+#define MUTEX_INITED 0x002 /* Mutex is successfully initialized */
+#define MUTEX_SELF_BLOCK 0x004 /* Must block self. */
+#define MUTEX_THREAD 0x008 /* Thread-only mutex. */
+
+/* Mutex. */
+struct __mutex_t {
+#ifdef HAVE_MUTEX_THREADS
+#ifdef MUTEX_FIELDS
+ MUTEX_FIELDS
+#else
+ tsl_t tas; /* Test and set. */
+#endif
+ u_int32_t spins; /* Spins before block. */
+ u_int32_t locked; /* !0 if locked. */
+#else
+ u_int32_t off; /* Byte offset to lock. */
+ u_int32_t pid; /* Lock holder: 0 or process pid. */
+#endif
+ u_int32_t mutex_set_wait; /* Granted after wait. */
+ u_int32_t mutex_set_nowait; /* Granted without waiting. */
+#ifdef MUTEX_SYSTEM_RESOURCES
+ roff_t reg_off; /* Shared lock info offset. */
+#endif
+
+ u_int8_t flags; /* MUTEX_XXX */
+};
+
+/* Redirect calls to the correct functions. */
+#ifdef HAVE_MUTEX_THREADS
+#if defined(HAVE_MUTEX_PTHREADS) || defined(HAVE_MUTEX_SOLARIS_LWP) || defined(HAVE_MUTEX_UI_THREADS)
+#define __db_mutex_init(a, b, c, d) __db_pthread_mutex_init(a, b, d)
+#define __db_mutex_lock(a, b, c) __db_pthread_mutex_lock(a, b)
+#define __db_mutex_unlock(a, b) __db_pthread_mutex_unlock(a, b)
+#define __db_mutex_destroy(a) __db_pthread_mutex_destroy(a)
+#else
+#define __db_mutex_init(a, b, c, d) __db_tas_mutex_init(a, b, d)
+#define __db_mutex_lock(a, b, c) __db_tas_mutex_lock(a, b)
+#define __db_mutex_unlock(a, b) __db_tas_mutex_unlock(a, b)
+#define __db_mutex_destroy(a) __db_tas_mutex_destroy(a)
+#endif
+#else
+#define __db_mutex_init(a, b, c, d) __db_fcntl_mutex_init(a, b, c)
+#define __db_mutex_lock(a, b, c) __db_fcntl_mutex_lock(a, b, c)
+#define __db_mutex_unlock(a, b) __db_fcntl_mutex_unlock(a, b)
+#define __db_mutex_destroy(a) __db_fcntl_mutex_destroy(a)
+#endif
+
+/* Redirect system resource calls to correct functions */
+#ifdef MUTEX_SYSTEM_RESOURCES
+#define __db_maintinit(a, b, c) __db_shreg_maintinit(a, b, c)
+#define __db_shlocks_clear(a, b, c) __db_shreg_locks_clear(a, b, c)
+#define __db_shlocks_destroy(a, b) __db_shreg_locks_destroy(a, b)
+#define __db_shmutex_init(a, b, c, d, e, f) \
+ __db_shreg_mutex_init(a, b, c, d, e, f)
+#else
+#define __db_maintinit(a, b, c)
+#define __db_shlocks_clear(a, b, c)
+#define __db_shlocks_destroy(a, b)
+#define __db_shmutex_init(a, b, c, d, e, f) __db_mutex_init(a, b, c, d)
+#endif
+
+/*
+ * Lock/unlock a mutex. If the mutex was marked as uninteresting, the thread
+ * of control can proceed without it.
+ *
+ * If the lock is for threads-only, then it was optionally not allocated and
+ * file handles aren't necessary, as threaded applications aren't supported by
+ * fcntl(2) locking.
+ */
+#ifdef DIAGNOSTIC
+ /*
+ * XXX
+ * We want to switch threads as often as possible. Yield every time
+ * we get a mutex to ensure contention.
+ */
+#define MUTEX_LOCK(dbenv, mp, fh) \
+ if (!F_ISSET((MUTEX *)(mp), MUTEX_IGNORE)) \
+ (void)__db_mutex_lock(dbenv, mp, fh); \
+ if (DB_GLOBAL(db_pageyield)) \
+ __os_yield(NULL, 1);
+#else
+#define MUTEX_LOCK(dbenv, mp, fh) \
+ if (!F_ISSET((MUTEX *)(mp), MUTEX_IGNORE)) \
+ (void)__db_mutex_lock(dbenv, mp, fh);
+#endif
+#define MUTEX_UNLOCK(dbenv, mp) \
+ if (!F_ISSET((MUTEX *)(mp), MUTEX_IGNORE)) \
+ (void)__db_mutex_unlock(dbenv, mp);
+#define MUTEX_THREAD_LOCK(dbenv, mp) \
+ if (mp != NULL) \
+ MUTEX_LOCK(dbenv, mp, NULL)
+#define MUTEX_THREAD_UNLOCK(dbenv, mp) \
+ if (mp != NULL) \
+ MUTEX_UNLOCK(dbenv, mp)
+
+/*
+ * We use a single file descriptor for fcntl(2) locking, and (generally) the
+ * object's offset in a shared region as the byte that we're locking. So,
+ * there's a (remote) possibility that two objects might have the same offsets
+ * such that the locks could conflict, resulting in deadlock. To avoid this
+ * possibility, we offset the region offset by a small integer value, using a
+ * different offset for each subsystem's locks. Since all region objects are
+ * suitably aligned, the offset guarantees that we don't collide with another
+ * region's objects.
+ */
+#define DB_FCNTL_OFF_GEN 0 /* Everything else. */
+#define DB_FCNTL_OFF_LOCK 1 /* Lock subsystem offset. */
+#define DB_FCNTL_OFF_MPOOL 2 /* Mpool subsystem offset. */
diff --git a/bdb/include/mutex_ext.h b/bdb/include/mutex_ext.h
new file mode 100644
index 00000000000..040a6615eef
--- /dev/null
+++ b/bdb/include/mutex_ext.h
@@ -0,0 +1,31 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _mutex_ext_h_
+#define _mutex_ext_h_
+#if defined(__cplusplus)
+extern "C" {
+#endif
+int __db_fcntl_mutex_init __P((DB_ENV *, MUTEX *, u_int32_t));
+int __db_fcntl_mutex_lock __P((DB_ENV *, MUTEX *, DB_FH *));
+int __db_fcntl_mutex_unlock __P((DB_ENV *, MUTEX *));
+int __db_fcntl_mutex_destroy __P((MUTEX *));
+int __db_pthread_mutex_init __P((DB_ENV *, MUTEX *, u_int32_t));
+int __db_pthread_mutex_lock __P((DB_ENV *, MUTEX *));
+int __db_pthread_mutex_unlock __P((DB_ENV *, MUTEX *));
+int __db_pthread_mutex_destroy __P((MUTEX *));
+int __db_tas_mutex_init __P((DB_ENV *, MUTEX *, u_int32_t));
+int __db_tas_mutex_lock __P((DB_ENV *, MUTEX *));
+int __db_tas_mutex_unlock __P((DB_ENV *, MUTEX *));
+int __db_tas_mutex_destroy __P((MUTEX *));
+int __db_mutex_alloc __P((DB_ENV *, REGINFO *, MUTEX **));
+void __db_mutex_free __P((DB_ENV *, REGINFO *, MUTEX *));
+int __db_shreg_locks_record __P((DB_ENV *, MUTEX *, REGINFO *,
+ REGMAINT *));
+void __db_shreg_locks_clear __P((MUTEX *, REGINFO *, REGMAINT *));
+void __db_shreg_locks_destroy __P((REGINFO *, REGMAINT *));
+int __db_shreg_mutex_init __P((DB_ENV *, MUTEX *, u_int32_t,
+ u_int32_t, REGINFO *, REGMAINT *));
+void __db_shreg_maintinit __P((REGINFO *, void *addr, size_t));
+#if defined(__cplusplus)
+}
+#endif
+#endif /* _mutex_ext_h_ */
diff --git a/bdb/include/os.h b/bdb/include/os.h
new file mode 100644
index 00000000000..b5d469e88fa
--- /dev/null
+++ b/bdb/include/os.h
@@ -0,0 +1,46 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: os.h,v 11.5 2000/10/27 20:32:01 dda Exp $
+ */
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+/*
+ * Filehandle.
+ */
+struct __fh_t {
+#if defined(DB_WIN32)
+ HANDLE handle; /* Windows/32 file handle. */
+#endif
+ int fd; /* POSIX file descriptor. */
+
+ u_int32_t log_size; /* XXX: Log file size. */
+
+#define DB_FH_NOSYNC 0x01 /* Handle doesn't need to be sync'd. */
+#define DB_FH_VALID 0x02 /* Handle is valid. */
+ u_int8_t flags;
+};
+
+/*
+ * We group certain seek/write calls into a single function so that we
+ * can use pread(2)/pwrite(2) where they're available.
+ */
+#define DB_IO_READ 1
+#define DB_IO_WRITE 2
+typedef struct __io_t {
+ DB_FH *fhp; /* I/O file handle. */
+ MUTEX *mutexp; /* Mutex to lock. */
+ size_t pagesize; /* Page size. */
+ db_pgno_t pgno; /* Page number. */
+ u_int8_t *buf; /* Buffer. */
+ size_t bytes; /* Bytes read/written. */
+} DB_IO;
+
+#if defined(__cplusplus)
+}
+#endif
diff --git a/bdb/include/os_ext.h b/bdb/include/os_ext.h
new file mode 100644
index 00000000000..ae9e3d304f2
--- /dev/null
+++ b/bdb/include/os_ext.h
@@ -0,0 +1,62 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _os_ext_h_
+#define _os_ext_h_
+#if defined(__cplusplus)
+extern "C" {
+#endif
+int __os_abspath __P((const char *));
+int __os_strdup __P((DB_ENV *, const char *, void *));
+int __os_calloc __P((DB_ENV *, size_t, size_t, void *));
+int __os_malloc __P((DB_ENV *, size_t, void *(*)(size_t), void *));
+int __os_realloc __P((DB_ENV *,
+ size_t, void *(*)(void *, size_t), void *));
+void __os_free __P((void *, size_t));
+void __os_freestr __P((void *));
+void *__ua_memcpy __P((void *, const void *, size_t));
+int __os_dirlist __P((DB_ENV *, const char *, char ***, int *));
+void __os_dirfree __P((char **, int));
+int __os_get_errno __P((void));
+void __os_set_errno __P((int));
+int __os_fileid __P((DB_ENV *, const char *, int, u_int8_t *));
+int __os_finit __P((DB_ENV *, DB_FH *, size_t, int));
+int __os_fpinit __P((DB_ENV *, DB_FH *, db_pgno_t, int, int));
+int __os_fsync __P((DB_ENV *, DB_FH *));
+int __os_openhandle __P((DB_ENV *, const char *, int, int, DB_FH *));
+int __os_closehandle __P((DB_FH *));
+int __os_r_sysattach __P((DB_ENV *, REGINFO *, REGION *));
+int __os_r_sysdetach __P((DB_ENV *, REGINFO *, int));
+int __os_mapfile __P((DB_ENV *,
+ char *, DB_FH *, size_t, int, void **));
+int __os_unmapfile __P((DB_ENV *, void *, size_t));
+u_int32_t __db_oflags __P((int));
+int __db_omode __P((const char *));
+int __os_open __P((DB_ENV *, const char *, u_int32_t, int, DB_FH *));
+int __os_shmname __P((DB_ENV *, const char *, char **));
+int __os_r_attach __P((DB_ENV *, REGINFO *, REGION *));
+int __os_r_detach __P((DB_ENV *, REGINFO *, int));
+int __os_rename __P((DB_ENV *, const char *, const char *));
+int __os_isroot __P((void));
+char *__db_rpath __P((const char *));
+int __os_io __P((DB_ENV *, DB_IO *, int, size_t *));
+int __os_read __P((DB_ENV *, DB_FH *, void *, size_t, size_t *));
+int __os_write __P((DB_ENV *, DB_FH *, void *, size_t, size_t *));
+int __os_seek __P((DB_ENV *,
+ DB_FH *, size_t, db_pgno_t, u_int32_t, int, DB_OS_SEEK));
+int __os_sleep __P((DB_ENV *, u_long, u_long));
+int __os_spin __P((void));
+void __os_yield __P((DB_ENV*, u_long));
+int __os_exists __P((const char *, int *));
+int __os_ioinfo __P((DB_ENV *, const char *,
+ DB_FH *, u_int32_t *, u_int32_t *, u_int32_t *));
+int __os_tmpdir __P((DB_ENV *, u_int32_t));
+int __os_unlink __P((DB_ENV *, const char *));
+int __os_region_unlink __P((DB_ENV *, const char *));
+#if defined(DB_WIN32)
+int __os_win32_errno __P((void));
+#endif
+int __os_fpinit __P((DB_ENV *, DB_FH *, db_pgno_t, int, int));
+int __os_is_winnt __P((void));
+#if defined(__cplusplus)
+}
+#endif
+#endif /* _os_ext_h_ */
diff --git a/bdb/include/os_jump.h b/bdb/include/os_jump.h
new file mode 100644
index 00000000000..681ba82d5eb
--- /dev/null
+++ b/bdb/include/os_jump.h
@@ -0,0 +1,34 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: os_jump.h,v 11.3 2000/02/14 02:59:55 bostic Exp $
+ */
+
+/* Calls which can be replaced by the application. */
+struct __db_jumptab {
+ int (*j_close) __P((int));
+ void (*j_dirfree) __P((char **, int));
+ int (*j_dirlist) __P((const char *, char ***, int *));
+ int (*j_exists) __P((const char *, int *));
+ void (*j_free) __P((void *));
+ int (*j_fsync) __P((int));
+ int (*j_ioinfo) __P((const char *,
+ int, u_int32_t *, u_int32_t *, u_int32_t *));
+ void *(*j_malloc) __P((size_t));
+ int (*j_map) __P((char *, size_t, int, int, void **));
+ int (*j_open) __P((const char *, int, ...));
+ ssize_t (*j_read) __P((int, void *, size_t));
+ void *(*j_realloc) __P((void *, size_t));
+ int (*j_rename) __P((const char *, const char *));
+ int (*j_seek) __P((int, size_t, db_pgno_t, u_int32_t, int, int));
+ int (*j_sleep) __P((u_long, u_long));
+ int (*j_unlink) __P((const char *));
+ int (*j_unmap) __P((void *, size_t));
+ ssize_t (*j_write) __P((int, const void *, size_t));
+ int (*j_yield) __P((void));
+};
+
+extern struct __db_jumptab __db_jump;
diff --git a/bdb/include/qam.h b/bdb/include/qam.h
new file mode 100644
index 00000000000..88cd68776a8
--- /dev/null
+++ b/bdb/include/qam.h
@@ -0,0 +1,150 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: qam.h,v 11.26 2001/01/11 18:19:52 bostic Exp $
+ */
+
+/*
+ * QAM data elements: a status field and the data.
+ */
+typedef struct _qamdata {
+ u_int8_t flags; /* 00: delete bit. */
+#define QAM_VALID 0x01
+#define QAM_SET 0x02
+ u_int8_t data[1]; /* Record. */
+} QAMDATA;
+
+struct __queue; typedef struct __queue QUEUE;
+struct __qcursor; typedef struct __qcursor QUEUE_CURSOR;
+
+struct __qcursor {
+ /* struct __dbc_internal */
+ __DBC_INTERNAL
+
+ /* Queue private part */
+
+ /* Per-thread information: queue private. */
+ db_recno_t recno; /* Current record number. */
+
+ u_int32_t flags;
+};
+
+/*
+ * The in-memory, per-tree queue data structure.
+ */
+
+typedef struct __mpfarray {
+ u_int32_t n_extent; /* Number of extents in table. */
+ u_int32_t low_extent; /* First extent open. */
+ u_int32_t hi_extent; /* Last extent open. */
+ struct __qmpf {
+ int pinref;
+ DB_MPOOLFILE *mpf;
+ } *mpfarray; /* Array of open extents. */
+} MPFARRAY;
+
+struct __queue {
+ db_pgno_t q_meta; /* Database meta-data page. */
+ db_pgno_t q_root; /* Database root page. */
+
+ int re_pad; /* Fixed-length padding byte. */
+ u_int32_t re_len; /* Length for fixed-length records. */
+ u_int32_t rec_page; /* records per page */
+ u_int32_t page_ext; /* Pages per extent */
+ MPFARRAY array1, array2; /* File arrays. */
+ DB_MPOOL_FINFO finfo; /* Initialized info struct. */
+ DB_PGINFO pginfo; /* Initialized pginfo struct. */
+ DBT pgcookie; /* Initialized pgcookie. */
+ char *path; /* Space allocated to file pathname. */
+ char *name; /* The name of the file. */
+ char *dir; /* The dir of the file. */
+ int mode; /* Mode to open extents. */
+};
+
+/* Format for queue extent names. */
+#define QUEUE_EXTENT "%s/__dbq.%s.%d"
+
+typedef struct __qam_filelist {
+ DB_MPOOLFILE *mpf;
+ u_int32_t id;
+} QUEUE_FILELIST;
+
+/*
+ * Caculate the page number of a recno
+ *
+ * Number of records per page =
+ * Divide the available space on the page by the record len + header.
+ *
+ * Page number for record =
+ * divide the physical record number by the records per page
+ * add the root page number
+ * For now the root page will always be 1, but we might want to change
+ * in the future (e.g. multiple fixed len queues per file).
+ *
+ * Index of record on page =
+ * physical record number, less the logical pno times records/page
+ */
+#define CALC_QAM_RECNO_PER_PAGE(dbp) \
+ (((dbp)->pgsize - sizeof(QPAGE)) / \
+ ALIGN(((QUEUE *)(dbp)->q_internal)->re_len + \
+ sizeof(QAMDATA) - SSZA(QAMDATA, data), sizeof(u_int32_t)))
+
+#define QAM_RECNO_PER_PAGE(dbp) (((QUEUE*)(dbp)->q_internal)->rec_page)
+
+#define QAM_RECNO_PAGE(dbp, recno) \
+ (((QUEUE *)(dbp)->q_internal)->q_root \
+ + (((recno) - 1) / QAM_RECNO_PER_PAGE(dbp)))
+
+#define QAM_RECNO_INDEX(dbp, pgno, recno) \
+ (((recno) - 1) - (QAM_RECNO_PER_PAGE(dbp) \
+ * (pgno - ((QUEUE *)(dbp)->q_internal)->q_root)))
+
+#define QAM_GET_RECORD(dbp, page, index) \
+ ((QAMDATA *)((u_int8_t *)(page) + \
+ sizeof(QPAGE) + (ALIGN(sizeof(QAMDATA) - SSZA(QAMDATA, data) + \
+ ((QUEUE *)(dbp)->q_internal)->re_len, sizeof(u_int32_t)) * index)))
+
+#define QAM_AFTER_CURRENT(meta, recno) \
+ ((recno) > (meta)->cur_recno && \
+ ((meta)->first_recno <= (meta)->cur_recno || (recno) < (meta)->first_recno))
+
+#define QAM_BEFORE_FIRST(meta, recno) \
+ ((recno) < (meta)->first_recno && \
+ ((meta->first_recno <= (meta)->cur_recno || (recno) > (meta)->cur_recno)))
+
+#define QAM_NOT_VALID(meta, recno) \
+ (recno == RECNO_OOB || \
+ QAM_BEFORE_FIRST(meta, recno) || QAM_AFTER_CURRENT(meta, recno))
+
+/*
+ * Log opcodes for the mvptr routine.
+ */
+#define QAM_SETFIRST 0x01
+#define QAM_SETCUR 0x02
+
+/*
+ * Parameter to __qam_position.
+ */
+typedef enum {
+ QAM_READ,
+ QAM_WRITE,
+ QAM_CONSUME
+} qam_position_mode;
+
+typedef enum {
+ QAM_PROBE_GET,
+ QAM_PROBE_PUT,
+ QAM_PROBE_MPF
+} qam_probe_mode;
+
+#define __qam_fget(dbp, pgnoaddr, flags, addrp) \
+ __qam_fprobe(dbp, *pgnoaddr, addrp, QAM_PROBE_GET, flags)
+
+#define __qam_fput(dbp, pageno, addrp, flags) \
+ __qam_fprobe(dbp, pageno, addrp, QAM_PROBE_PUT, flags)
+
+#include "qam_auto.h"
+#include "qam_ext.h"
diff --git a/bdb/include/qam_auto.h b/bdb/include/qam_auto.h
new file mode 100644
index 00000000000..8362b2118f4
--- /dev/null
+++ b/bdb/include/qam_auto.h
@@ -0,0 +1,129 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+
+#ifndef qam_AUTO_H
+#define qam_AUTO_H
+
+#define DB_qam_inc 76
+typedef struct _qam_inc_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ DB_LSN lsn;
+} __qam_inc_args;
+
+int __qam_inc_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, int32_t, DB_LSN *));
+int __qam_inc_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_inc_read __P((DB_ENV *, void *, __qam_inc_args **));
+
+#define DB_qam_incfirst 77
+typedef struct _qam_incfirst_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_recno_t recno;
+} __qam_incfirst_args;
+
+int __qam_incfirst_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, int32_t, db_recno_t));
+int __qam_incfirst_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_incfirst_read __P((DB_ENV *, void *, __qam_incfirst_args **));
+
+#define DB_qam_mvptr 78
+typedef struct _qam_mvptr_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ u_int32_t opcode;
+ int32_t fileid;
+ db_recno_t old_first;
+ db_recno_t new_first;
+ db_recno_t old_cur;
+ db_recno_t new_cur;
+ DB_LSN metalsn;
+} __qam_mvptr_args;
+
+int __qam_mvptr_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, int32_t, db_recno_t, db_recno_t, db_recno_t, db_recno_t, DB_LSN *));
+int __qam_mvptr_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_mvptr_read __P((DB_ENV *, void *, __qam_mvptr_args **));
+
+#define DB_qam_del 79
+typedef struct _qam_del_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ DB_LSN lsn;
+ db_pgno_t pgno;
+ u_int32_t indx;
+ db_recno_t recno;
+} __qam_del_args;
+
+int __qam_del_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, int32_t, DB_LSN *, db_pgno_t, u_int32_t, db_recno_t));
+int __qam_del_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_del_read __P((DB_ENV *, void *, __qam_del_args **));
+
+#define DB_qam_add 80
+typedef struct _qam_add_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ DB_LSN lsn;
+ db_pgno_t pgno;
+ u_int32_t indx;
+ db_recno_t recno;
+ DBT data;
+ u_int32_t vflag;
+ DBT olddata;
+} __qam_add_args;
+
+int __qam_add_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, int32_t, DB_LSN *, db_pgno_t, u_int32_t, db_recno_t, const DBT *, u_int32_t, const DBT *));
+int __qam_add_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_add_read __P((DB_ENV *, void *, __qam_add_args **));
+
+#define DB_qam_delete 81
+typedef struct _qam_delete_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ DBT name;
+ DB_LSN lsn;
+} __qam_delete_args;
+
+int __qam_delete_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, const DBT *, DB_LSN *));
+int __qam_delete_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_delete_read __P((DB_ENV *, void *, __qam_delete_args **));
+
+#define DB_qam_rename 82
+typedef struct _qam_rename_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ DBT name;
+ DBT newname;
+} __qam_rename_args;
+
+int __qam_rename_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, const DBT *, const DBT *));
+int __qam_rename_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_rename_read __P((DB_ENV *, void *, __qam_rename_args **));
+
+#define DB_qam_delext 83
+typedef struct _qam_delext_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ DB_LSN lsn;
+ db_pgno_t pgno;
+ u_int32_t indx;
+ db_recno_t recno;
+ DBT data;
+} __qam_delext_args;
+
+int __qam_delext_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, int32_t, DB_LSN *, db_pgno_t, u_int32_t, db_recno_t, const DBT *));
+int __qam_delext_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_delext_read __P((DB_ENV *, void *, __qam_delext_args **));
+int __qam_init_print __P((DB_ENV *));
+int __qam_init_recover __P((DB_ENV *));
+#endif
diff --git a/bdb/include/qam_ext.h b/bdb/include/qam_ext.h
new file mode 100644
index 00000000000..f6e95110c0e
--- /dev/null
+++ b/bdb/include/qam_ext.h
@@ -0,0 +1,56 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _qam_ext_h_
+#define _qam_ext_h_
+#if defined(__cplusplus)
+extern "C" {
+#endif
+int __qam_position
+ __P((DBC *, db_recno_t *, qam_position_mode, int *));
+int __qam_pitem
+ __P((DBC *, QPAGE *, u_int32_t, db_recno_t, DBT *));
+int __qam_put __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+int __qam_delete __P((DB *, DB_TXN *, DBT *, u_int32_t));
+int __qam_c_dup __P((DBC *, DBC *));
+int __qam_c_init __P((DBC *));
+int __qam_mswap __P((PAGE *));
+int __qam_pgin_out __P((DB_ENV *, db_pgno_t, void *, DBT *));
+int __qam_fprobe __P((DB *, db_pgno_t, void *, qam_probe_mode, int));
+int __qam_fclose __P((DB *, db_pgno_t));
+int __qam_fremove __P((DB *, db_pgno_t));
+int __qam_sync __P((DB *, u_int32_t));
+int __qam_gen_filelist __P(( DB *, QUEUE_FILELIST **));
+int __qam_db_create __P((DB *));
+int __qam_db_close __P((DB *));
+int __db_prqueue __P((DB *, u_int32_t));
+int __qam_remove __P((DB *, const char *,
+ const char *, DB_LSN *, int (**)(DB *, void*), void **));
+int __qam_rename __P((DB *,
+ const char *, const char *, const char *));
+int __qam_open __P((DB *, const char *, db_pgno_t, int, u_int32_t));
+int __qam_metachk __P((DB *, const char *, QMETA *));
+int __qam_inc_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_incfirst_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_mvptr_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_del_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_delext_recover __P((DB_ENV *,
+ DBT *, DB_LSN *, db_recops, void *));
+int __qam_add_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_delete_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_rename_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_stat __P((DB *, void *, void *(*)(size_t), u_int32_t));
+int __qam_31_qammeta __P((DB *, char *, u_int8_t *));
+int __qam_32_qammeta __P((DB *, char *, u_int8_t *));
+int __qam_vrfy_meta __P((DB *, VRFY_DBINFO *, QMETA *,
+ db_pgno_t, u_int32_t));
+int __qam_vrfy_data __P((DB *, VRFY_DBINFO *, QPAGE *,
+ db_pgno_t, u_int32_t));
+int __qam_vrfy_structure __P((DB *, VRFY_DBINFO *, u_int32_t));
+#if defined(__cplusplus)
+}
+#endif
+#endif /* _qam_ext_h_ */
diff --git a/bdb/include/queue.h b/bdb/include/queue.h
new file mode 100644
index 00000000000..8d4a771add6
--- /dev/null
+++ b/bdb/include/queue.h
@@ -0,0 +1,319 @@
+/*
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)queue.h 8.5 (Berkeley) 8/20/94
+ */
+
+/*
+ * XXX
+ * We #undef the queue macros because there are incompatible versions of this
+ * file and these macros on various systems. What makes the problem worse is
+ * they are included and/or defined by system include files which we may have
+ * already loaded into Berkeley DB before getting here. For example, FreeBSD's
+ * <rpc/rpc.h> includes its system <sys/queue.h>, and VxWorks UnixLib.h defines
+ * several of the LIST_XXX macros. Make sure we use ours.
+ */
+#undef LIST_HEAD
+#undef LIST_ENTRY
+#undef LIST_FIRST
+#undef LIST_NEXT
+#undef LIST_INIT
+#undef LIST_INSERT_AFTER
+#undef LIST_INSERT_BEFORE
+#undef LIST_INSERT_HEAD
+#undef LIST_REMOVE
+#undef TAILQ_HEAD
+#undef TAILQ_ENTRY
+#undef TAILQ_FIRST
+#undef TAILQ_NEXT
+#undef TAILQ_INIT
+#undef TAILQ_INSERT_HEAD
+#undef TAILQ_INSERT_TAIL
+#undef TAILQ_INSERT_AFTER
+#undef TAILQ_INSERT_BEFORE
+#undef TAILQ_REMOVE
+#undef CIRCLEQ_HEAD
+#undef CIRCLEQ_ENTRY
+#undef CIRCLEQ_FIRST
+#undef CIRCLEQ_LAST
+#undef CIRCLEQ_NEXT
+#undef CIRCLEQ_PREV
+#undef CIRCLEQ_INIT
+#undef CIRCLEQ_INSERT_AFTER
+#undef CIRCLEQ_INSERT_BEFORE
+#undef CIRCLEQ_INSERT_HEAD
+#undef CIRCLEQ_INSERT_TAIL
+#undef CIRCLEQ_REMOVE
+
+/*
+ * This file defines three types of data structures: lists, tail queues,
+ * and circular queues.
+ *
+ * A list is headed by a single forward pointer (or an array of forward
+ * pointers for a hash table header). The elements are doubly linked
+ * so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before
+ * or after an existing element or at the head of the list. A list
+ * may only be traversed in the forward direction.
+ *
+ * A tail queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or
+ * after an existing element, at the head of the list, or at the end of
+ * the list. A tail queue may only be traversed in the forward direction.
+ *
+ * A circle queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or after
+ * an existing element, at the head of the list, or at the end of the list.
+ * A circle queue may be traversed in either direction, but has a more
+ * complex end of list detection.
+ *
+ * For details on the use of these macros, see the queue(3) manual page.
+ */
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*
+ * List definitions.
+ */
+#define LIST_HEAD(name, type) \
+struct name { \
+ struct type *lh_first; /* first element */ \
+}
+
+#define LIST_ENTRY(type) \
+struct { \
+ struct type *le_next; /* next element */ \
+ struct type **le_prev; /* address of previous next element */ \
+}
+
+#define LIST_FIRST(head) ((head)->lh_first)
+#define LIST_NEXT(elm, field) ((elm)->field.le_next)
+
+/*
+ * List functions.
+ */
+#define LIST_INIT(head) { \
+ (head)->lh_first = NULL; \
+}
+
+#define LIST_INSERT_AFTER(listelm, elm, field) do { \
+ if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \
+ (listelm)->field.le_next->field.le_prev = \
+ &(elm)->field.le_next; \
+ (listelm)->field.le_next = (elm); \
+ (elm)->field.le_prev = &(listelm)->field.le_next; \
+} while (0)
+
+#define LIST_INSERT_BEFORE(listelm, elm, field) do { \
+ (elm)->field.le_prev = (listelm)->field.le_prev; \
+ (elm)->field.le_next = (listelm); \
+ *(listelm)->field.le_prev = (elm); \
+ (listelm)->field.le_prev = &(elm)->field.le_next; \
+} while (0)
+
+#define LIST_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.le_next = (head)->lh_first) != NULL) \
+ (head)->lh_first->field.le_prev = &(elm)->field.le_next;\
+ (head)->lh_first = (elm); \
+ (elm)->field.le_prev = &(head)->lh_first; \
+} while (0)
+
+#define LIST_REMOVE(elm, field) do { \
+ if ((elm)->field.le_next != NULL) \
+ (elm)->field.le_next->field.le_prev = \
+ (elm)->field.le_prev; \
+ *(elm)->field.le_prev = (elm)->field.le_next; \
+} while (0)
+
+/*
+ * Tail queue definitions.
+ */
+#define TAILQ_HEAD(name, type) \
+struct name { \
+ struct type *tqh_first; /* first element */ \
+ struct type **tqh_last; /* addr of last next element */ \
+}
+
+#define TAILQ_ENTRY(type) \
+struct { \
+ struct type *tqe_next; /* next element */ \
+ struct type **tqe_prev; /* address of previous next element */ \
+}
+
+#define TAILQ_FIRST(head) ((head)->tqh_first)
+#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
+
+/*
+ * Tail queue functions.
+ */
+#define TAILQ_INIT(head) do { \
+ (head)->tqh_first = NULL; \
+ (head)->tqh_last = &(head)->tqh_first; \
+} while (0)
+
+#define TAILQ_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \
+ (head)->tqh_first->field.tqe_prev = \
+ &(elm)->field.tqe_next; \
+ else \
+ (head)->tqh_last = &(elm)->field.tqe_next; \
+ (head)->tqh_first = (elm); \
+ (elm)->field.tqe_prev = &(head)->tqh_first; \
+} while (0)
+
+#define TAILQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.tqe_next = NULL; \
+ (elm)->field.tqe_prev = (head)->tqh_last; \
+ *(head)->tqh_last = (elm); \
+ (head)->tqh_last = &(elm)->field.tqe_next; \
+} while (0)
+
+#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
+ (elm)->field.tqe_next->field.tqe_prev = \
+ &(elm)->field.tqe_next; \
+ else \
+ (head)->tqh_last = &(elm)->field.tqe_next; \
+ (listelm)->field.tqe_next = (elm); \
+ (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \
+} while (0)
+
+#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
+ (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
+ (elm)->field.tqe_next = (listelm); \
+ *(listelm)->field.tqe_prev = (elm); \
+ (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
+} while (0)
+
+#define TAILQ_REMOVE(head, elm, field) do { \
+ if (((elm)->field.tqe_next) != NULL) \
+ (elm)->field.tqe_next->field.tqe_prev = \
+ (elm)->field.tqe_prev; \
+ else \
+ (head)->tqh_last = (elm)->field.tqe_prev; \
+ *(elm)->field.tqe_prev = (elm)->field.tqe_next; \
+} while (0)
+
+/*
+ * This macro is used to fixup the queue after moving the head.
+ */
+#define TAILQ_REINSERT_HEAD(head, elm, field) do { \
+ DB_ASSERT((head)->tqh_first == (elm)); \
+ (elm)->field.tqe_prev = &(head)->tqh_first; \
+} while (0)
+
+/*
+ * Circular queue definitions.
+ */
+#define CIRCLEQ_HEAD(name, type) \
+struct name { \
+ struct type *cqh_first; /* first element */ \
+ struct type *cqh_last; /* last element */ \
+}
+
+#define CIRCLEQ_ENTRY(type) \
+struct { \
+ struct type *cqe_next; /* next element */ \
+ struct type *cqe_prev; /* previous element */ \
+}
+
+#define CIRCLEQ_FIRST(head) ((head)->cqh_first)
+#define CIRCLEQ_LAST(head) ((head)->cqh_last)
+#define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
+#define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
+
+/*
+ * Circular queue functions.
+ */
+#define CIRCLEQ_INIT(head) do { \
+ (head)->cqh_first = (void *)(head); \
+ (head)->cqh_last = (void *)(head); \
+} while (0)
+
+#define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ (elm)->field.cqe_next = (listelm)->field.cqe_next; \
+ (elm)->field.cqe_prev = (listelm); \
+ if ((listelm)->field.cqe_next == (void *)(head)) \
+ (head)->cqh_last = (elm); \
+ else \
+ (listelm)->field.cqe_next->field.cqe_prev = (elm); \
+ (listelm)->field.cqe_next = (elm); \
+} while (0)
+
+#define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
+ (elm)->field.cqe_next = (listelm); \
+ (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \
+ if ((listelm)->field.cqe_prev == (void *)(head)) \
+ (head)->cqh_first = (elm); \
+ else \
+ (listelm)->field.cqe_prev->field.cqe_next = (elm); \
+ (listelm)->field.cqe_prev = (elm); \
+} while (0)
+
+#define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
+ (elm)->field.cqe_next = (head)->cqh_first; \
+ (elm)->field.cqe_prev = (void *)(head); \
+ if ((head)->cqh_last == (void *)(head)) \
+ (head)->cqh_last = (elm); \
+ else \
+ (head)->cqh_first->field.cqe_prev = (elm); \
+ (head)->cqh_first = (elm); \
+} while (0)
+
+#define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.cqe_next = (void *)(head); \
+ (elm)->field.cqe_prev = (head)->cqh_last; \
+ if ((head)->cqh_first == (void *)(head)) \
+ (head)->cqh_first = (elm); \
+ else \
+ (head)->cqh_last->field.cqe_next = (elm); \
+ (head)->cqh_last = (elm); \
+} while (0)
+
+#define CIRCLEQ_REMOVE(head, elm, field) do { \
+ if ((elm)->field.cqe_next == (void *)(head)) \
+ (head)->cqh_last = (elm)->field.cqe_prev; \
+ else \
+ (elm)->field.cqe_next->field.cqe_prev = \
+ (elm)->field.cqe_prev; \
+ if ((elm)->field.cqe_prev == (void *)(head)) \
+ (head)->cqh_first = (elm)->field.cqe_next; \
+ else \
+ (elm)->field.cqe_prev->field.cqe_next = \
+ (elm)->field.cqe_next; \
+} while (0)
+
+#if defined(__cplusplus)
+}
+#endif
diff --git a/bdb/include/region.h b/bdb/include/region.h
new file mode 100644
index 00000000000..c5882d09aad
--- /dev/null
+++ b/bdb/include/region.h
@@ -0,0 +1,292 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: region.h,v 11.13 2000/11/15 19:25:37 sue Exp $
+ */
+
+/*
+ * The DB environment consists of some number of "regions", which are described
+ * by the following four structures:
+ *
+ * REGENV -- shared information about the environment
+ * REGENV_REF -- file describing system memory version of REGENV
+ * REGION -- shared information about a single region
+ * REGINFO -- per-process information about a REGION
+ *
+ * There are three types of memory that hold regions:
+ * per-process heap (malloc)
+ * file mapped into memory (mmap, MapViewOfFile)
+ * system memory (shmget, CreateFileMapping)
+ *
+ * If the regions are private to a process, they're in malloc. If they're
+ * public, they're in file mapped memory, or, optionally, in system memory.
+ * Regions in the filesystem are named "__db.001", "__db.002" and so on. If
+ * we're not using a private environment allocated using malloc(3), the file
+ * "__db.001" will always exist, as we use it to synchronize on the regions,
+ * whether they exist in file mapped memory or system memory.
+ *
+ * The file "__db.001" contains a REGENV structure and a linked list of some
+ * number of REGION structures. Each of the REGION structures describes and
+ * locks one of the underlying shared regions used by DB.
+ *
+ * __db.001
+ * +---------+
+ * |REGENV |
+ * +---------+ +----------+
+ * |REGION |-> | __db.002 |
+ * | | +----------+
+ * +---------+ +----------+
+ * |REGION |-> | __db.003 |
+ * | | +----------+
+ * +---------+ +----------+
+ * |REGION |-> | __db.004 |
+ * | | +----------+
+ * +---------+
+ *
+ * The only tricky part about manipulating the regions is correctly creating
+ * or joining the REGENV file, i.e., __db.001. We have to be absolutely sure
+ * that only one process creates it, and that everyone else joins it without
+ * seeing inconsistent data. Once that region is created, we can use normal
+ * shared locking procedures to do mutal exclusion for all other regions.
+ *
+ * One of the REGION structures in the main environment region describes the
+ * environment region itself.
+ *
+ * To lock a region, locate the REGION structure that describes it and acquire
+ * the region's mutex. There is one exception to this rule -- the lock for the
+ * environment region itself is in the REGENV structure, and not in the REGION
+ * that describes the environment region. That's so that we can acquire a lock
+ * without walking linked lists that could potentially change underneath us.
+ * The REGION will not be moved or removed during the life of the region, and
+ * so long-lived references to it can be held by the process.
+ *
+ * All requests to create or join a region return a REGINFO structure, which
+ * is held by the caller and used to open and subsequently close the reference
+ * to the region. The REGINFO structure contains the per-process information
+ * that we need to access the region.
+ *
+ * The one remaining complication. If the regions (including the environment
+ * region) live in system memory, and the system memory isn't "named" somehow
+ * in the filesystem name space, we need some way of finding it. Do this by
+ * by writing the REGENV_REF structure into the "__db.001" file. When we find
+ * a __db.001 file that is too small to be a real, on-disk environment, we use
+ * the information it contains to redirect to the real "__db.001" file/memory.
+ * This currently only happens when the REGENV file is in shared system memory.
+ *
+ * Although DB does not currently grow regions when they run out of memory, it
+ * would be possible to do so. To grow a region, allocate a new region of the
+ * appropriate size, then copy the old region over it and insert the additional
+ * space into the already existing shalloc arena. Callers may have to fix up
+ * local references, but that should be easy to do. This failed in historic
+ * versions of DB because the region lock lived in the mapped memory, and when
+ * it was unmapped and remapped (or copied), threads could lose track of it.
+ * Once we moved that lock into a region that is never unmapped, growing should
+ * work. That all said, current versions of DB don't implement region grow
+ * because some systems don't support mutex copying, e.g., from OSF1 V4.0:
+ *
+ * The address of an msemaphore structure may be significant. If the
+ * msemaphore structure contains any value copied from an msemaphore
+ * structure at a different address, the result is undefined.
+ */
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#define DB_REGION_FMT "__db.%03d" /* Region file name format. */
+#define DB_REGION_NAME_NUM 5 /* First digit offset in file names. */
+#define DB_REGION_NAME_LENGTH 8 /* Length of file names. */
+
+#define DB_REGION_ENV "__db.001" /* Primary environment name. */
+
+#define INVALID_REGION_ID 0 /* Out-of-band region ID. */
+#define REGION_ID_ENV 1 /* Primary environment ID. */
+
+typedef enum {
+ INVALID_REGION_TYPE=0, /* Region type. */
+ REGION_TYPE_ENV,
+ REGION_TYPE_LOCK,
+ REGION_TYPE_LOG,
+ REGION_TYPE_MPOOL,
+ REGION_TYPE_MUTEX,
+ REGION_TYPE_TXN } reg_type;
+
+#define INVALID_REGION_SEGID -1 /* Segment IDs are either shmget(2) or
+ * Win16 segment identifiers. They are
+ * both stored in a "long", and we need
+ * an out-of-band value.
+ */
+/*
+ * Nothing can live at region offset 0, because, in all cases, that's where
+ * we store *something*. Lots of code needs an out-of-band value for region
+ * offsets, so we use 0.
+ */
+#define INVALID_ROFF 0
+
+/* Reference describing system memory version of REGENV. */
+typedef struct __db_reg_env_ref {
+ roff_t size; /* Region size. */
+ long segid; /* UNIX shmget(2) ID. */
+} REGENV_REF;
+
+/* Per-environment region information. */
+typedef struct __db_reg_env {
+ /*
+ * !!!
+ * The mutex must be the first entry in the structure to guarantee
+ * correct alignment.
+ */
+ MUTEX mutex; /* Environment mutex. */
+
+ /*
+ * !!!
+ * Note, the magic and panic fields are NOT protected by any mutex,
+ * and for this reason cannot be anything more complicated than a
+ * zero/non-zero value.
+ *
+ * !!!
+ * The valid region magic number must appear at the same byte offset
+ * in both the environment and each shared region, as Windows/95 uses
+ * it to determine if the memory has been zeroed since it was last used.
+ */
+ u_int32_t magic; /* Valid region magic number. */
+
+ int panic; /* Environment is dead. */
+
+ int majver; /* Major DB version number. */
+ int minver; /* Minor DB version number. */
+ int patch; /* Patch DB version number. */
+
+ u_int32_t init_flags; /* Flags the env was initialized with.*/
+
+ /* List of regions. */
+ SH_LIST_HEAD(__db_regionh) regionq;
+
+ u_int32_t refcnt; /* References to the environment. */
+
+ size_t pad; /* Guarantee that following memory is
+ * size_t aligned. This is necessary
+ * because we're going to store the
+ * allocation region information there.
+ */
+} REGENV;
+
+/* Per-region shared region information. */
+typedef struct __db_region {
+ /*
+ * !!!
+ * The mutex must be the first entry in the structure to guarantee
+ * correct alignment.
+ */
+ MUTEX mutex; /* Region mutex. */
+
+ /*
+ * !!!
+ * The valid region magic number must appear at the same byte offset
+ * in both the environment and each shared region, as Windows/95 uses
+ * it to determine if the memory has been zeroed since it was last used.
+ */
+ u_int32_t magic;
+
+ SH_LIST_ENTRY q; /* Linked list of REGIONs. */
+
+ reg_type type; /* Region type. */
+ u_int32_t id; /* Region id. */
+
+ roff_t size; /* Region size in bytes. */
+
+ roff_t primary; /* Primary data structure offset. */
+
+ long segid; /* UNIX shmget(2), Win16 segment ID. */
+} REGION;
+
+/*
+ * Per-process/per-attachment information about a single region.
+ */
+struct __db_reginfo_t { /* __db_r_attach IN parameters. */
+ reg_type type; /* Region type. */
+ u_int32_t id; /* Region id. */
+ int mode; /* File creation mode. */
+
+ /* __db_r_attach OUT parameters. */
+ REGION *rp; /* Shared region. */
+
+ char *name; /* Region file name. */
+
+ void *addr; /* Region allocation address. */
+ void *primary; /* Primary data structure address. */
+
+ void *wnt_handle; /* Win/NT HANDLE. */
+
+#define REGION_CREATE 0x01 /* Caller created region. */
+#define REGION_CREATE_OK 0x02 /* Caller willing to create region. */
+#define REGION_JOIN_OK 0x04 /* Caller is looking for a match. */
+ u_int32_t flags;
+};
+
+/*
+ * Mutex maintenance information each subsystem region must keep track
+ * of to manage resources adequately.
+ */
+typedef struct __db_regmaint_stat_t {
+ u_int32_t st_hint_hit;
+ u_int32_t st_hint_miss;
+ u_int32_t st_records;
+ u_int32_t st_clears;
+ u_int32_t st_destroys;
+ u_int32_t st_max_locks;
+} REGMAINT_STAT;
+
+typedef struct __db_regmaint_t {
+ u_int32_t reglocks; /* Maximum # of mutexes we track. */
+ u_int32_t regmutex_hint; /* Hint for next slot */
+ REGMAINT_STAT stat; /* Stats */
+ roff_t regmutexes[1]; /* Region mutexes in use. */
+} REGMAINT;
+
+/*
+ * R_ADDR Return a per-process address for a shared region offset.
+ * R_OFFSET Return a shared region offset for a per-process address.
+ *
+ * !!!
+ * R_OFFSET should really be returning a ptrdiff_t, but that's not yet
+ * portable. We use u_int32_t, which restricts regions to 4Gb in size.
+ */
+#define R_ADDR(base, offset) \
+ ((void *)((u_int8_t *)((base)->addr) + offset))
+#define R_OFFSET(base, p) \
+ ((u_int32_t)((u_int8_t *)(p) - (u_int8_t *)(base)->addr))
+
+/*
+ * R_LOCK Lock/unlock a region.
+ * R_UNLOCK
+ */
+#define R_LOCK(dbenv, reginfo) \
+ MUTEX_LOCK(dbenv, &(reginfo)->rp->mutex, (dbenv)->lockfhp)
+#define R_UNLOCK(dbenv, reginfo) \
+ MUTEX_UNLOCK(dbenv, &(reginfo)->rp->mutex)
+
+/* PANIC_CHECK: Check to see if the DB environment is dead. */
+#define PANIC_CHECK(dbenv) \
+ if (DB_GLOBAL(db_panic) && \
+ (dbenv)->reginfo != NULL && ((REGENV *) \
+ ((REGINFO *)(dbenv)->reginfo)->primary)->panic != 0) \
+ return (DB_RUNRECOVERY);
+
+/*
+ * All regions are created on 8K boundaries out of sheer paranoia, so that
+ * we don't make some underlying VM unhappy.
+ */
+#define OS_ROUNDOFF(i, s) { \
+ (i) += (s) - 1; \
+ (i) -= (i) % (s); \
+}
+#define OS_VMPAGESIZE (8 * 1024)
+#define OS_VMROUNDOFF(i) OS_ROUNDOFF(i, OS_VMPAGESIZE)
+
+#if defined(__cplusplus)
+}
+#endif
diff --git a/bdb/include/rpc_client_ext.h b/bdb/include/rpc_client_ext.h
new file mode 100644
index 00000000000..a5c4689cd27
--- /dev/null
+++ b/bdb/include/rpc_client_ext.h
@@ -0,0 +1,19 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _rpc_client_ext_h_
+#define _rpc_client_ext_h_
+#if defined(__cplusplus)
+extern "C" {
+#endif
+int __dbcl_envserver __P((DB_ENV *, char *, long, long, u_int32_t));
+int __dbcl_refresh __P((DB_ENV *));
+int __dbcl_txn_close __P((DB_ENV *));
+void __dbcl_txn_end __P((DB_TXN *));
+int __dbcl_c_destroy __P((DBC *));
+void __dbcl_c_refresh __P((DBC *));
+int __dbcl_c_setup __P((long, DB *, DBC **));
+int __dbcl_retcopy __P((DB_ENV *, DBT *, void *, u_int32_t));
+int __dbcl_dbclose_common __P((DB *));
+#if defined(__cplusplus)
+}
+#endif
+#endif /* _rpc_client_ext_h_ */
diff --git a/bdb/include/rpc_server_ext.h b/bdb/include/rpc_server_ext.h
new file mode 100644
index 00000000000..4abb0768134
--- /dev/null
+++ b/bdb/include/rpc_server_ext.h
@@ -0,0 +1,21 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _rpc_server_ext_h_
+#define _rpc_server_ext_h_
+#if defined(__cplusplus)
+extern "C" {
+#endif
+void __db_stats_freelist __P((__db_stat_statsreplist **));
+void __dbsrv_settimeout __P((ct_entry *, u_int32_t));
+void __dbsrv_timeout __P((int));
+void __dbclear_ctp __P((ct_entry *));
+void __dbdel_ctp __P((ct_entry *));
+ct_entry *new_ct_ent __P((u_int32_t *));
+ct_entry *get_tableent __P((long));
+void __dbsrv_active __P((ct_entry *));
+int __dbc_close_int __P((ct_entry *));
+int __dbenv_close_int __P((long, int));
+char *get_home __P((char *));
+#if defined(__cplusplus)
+}
+#endif
+#endif /* _rpc_server_ext_h_ */
diff --git a/bdb/include/shqueue.h b/bdb/include/shqueue.h
new file mode 100644
index 00000000000..115c5d39e88
--- /dev/null
+++ b/bdb/include/shqueue.h
@@ -0,0 +1,337 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: shqueue.h,v 11.6 2000/11/14 20:20:28 bostic Exp $
+ */
+#ifndef _SYS_SHQUEUE_H_
+#define _SYS_SHQUEUE_H_
+
+/*
+ * This file defines three types of data structures: lists, tail queues, and
+ * circular queues, similarly to the include file <sys/queue.h>.
+ *
+ * The difference is that this set of macros can be used for structures that
+ * reside in shared memory that may be mapped at different addresses in each
+ * process. In most cases, the macros for shared structures exactly mirror
+ * the normal macros, although the macro calls require an additional type
+ * parameter, only used by the HEAD and ENTRY macros of the standard macros.
+ *
+ * For details on the use of these macros, see the queue(3) manual page.
+ */
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*
+ * Shared list definitions.
+ */
+#define SH_LIST_HEAD(name) \
+struct name { \
+ ssize_t slh_first; /* first element */ \
+}
+
+#define SH_LIST_ENTRY \
+struct { \
+ ssize_t sle_next; /* relative offset next element */ \
+ ssize_t sle_prev; /* relative offset of prev element */ \
+}
+
+/*
+ * Shared list functions. Since we use relative offsets for pointers,
+ * 0 is a valid offset. Therefore, we use -1 to indicate end of list.
+ * The macros ending in "P" return pointers without checking for end
+ * of list, the others check for end of list and evaluate to either a
+ * pointer or NULL.
+ */
+
+#define SH_LIST_FIRSTP(head, type) \
+ ((struct type *)(((u_int8_t *)(head)) + (head)->slh_first))
+
+#define SH_LIST_FIRST(head, type) \
+ ((head)->slh_first == -1 ? NULL : \
+ ((struct type *)(((u_int8_t *)(head)) + (head)->slh_first)))
+
+#define SH_LIST_NEXTP(elm, field, type) \
+ ((struct type *)(((u_int8_t *)(elm)) + (elm)->field.sle_next))
+
+#define SH_LIST_NEXT(elm, field, type) \
+ ((elm)->field.sle_next == -1 ? NULL : \
+ ((struct type *)(((u_int8_t *)(elm)) + (elm)->field.sle_next)))
+
+#define SH_LIST_PREV(elm, field) \
+ ((ssize_t *)(((u_int8_t *)(elm)) + (elm)->field.sle_prev))
+
+#define SH_PTR_TO_OFF(src, dest) \
+ ((ssize_t)(((u_int8_t *)(dest)) - ((u_int8_t *)(src))))
+
+/*
+ * Take the element's next pointer and calculate what the corresponding
+ * Prev pointer should be -- basically it is the negation plus the offset
+ * of the next field in the structure.
+ */
+#define SH_LIST_NEXT_TO_PREV(elm, field) \
+ (-(elm)->field.sle_next + SH_PTR_TO_OFF(elm, &(elm)->field.sle_next))
+
+#define SH_LIST_INIT(head) (head)->slh_first = -1
+
+#define SH_LIST_INSERT_AFTER(listelm, elm, field, type) do { \
+ if ((listelm)->field.sle_next != -1) { \
+ (elm)->field.sle_next = SH_PTR_TO_OFF(elm, \
+ SH_LIST_NEXTP(listelm, field, type)); \
+ SH_LIST_NEXTP(listelm, field, type)->field.sle_prev = \
+ SH_LIST_NEXT_TO_PREV(elm, field); \
+ } else \
+ (elm)->field.sle_next = -1; \
+ (listelm)->field.sle_next = SH_PTR_TO_OFF(listelm, elm); \
+ (elm)->field.sle_prev = SH_LIST_NEXT_TO_PREV(listelm, field); \
+} while (0)
+
+#define SH_LIST_INSERT_HEAD(head, elm, field, type) do { \
+ if ((head)->slh_first != -1) { \
+ (elm)->field.sle_next = \
+ (head)->slh_first - SH_PTR_TO_OFF(head, elm); \
+ SH_LIST_FIRSTP(head, type)->field.sle_prev = \
+ SH_LIST_NEXT_TO_PREV(elm, field); \
+ } else \
+ (elm)->field.sle_next = -1; \
+ (head)->slh_first = SH_PTR_TO_OFF(head, elm); \
+ (elm)->field.sle_prev = SH_PTR_TO_OFF(elm, &(head)->slh_first); \
+} while (0)
+
+#define SH_LIST_REMOVE(elm, field, type) do { \
+ if ((elm)->field.sle_next != -1) { \
+ SH_LIST_NEXTP(elm, field, type)->field.sle_prev = \
+ (elm)->field.sle_prev - (elm)->field.sle_next; \
+ *SH_LIST_PREV(elm, field) += (elm)->field.sle_next; \
+ } else \
+ *SH_LIST_PREV(elm, field) = -1; \
+} while (0)
+
+/*
+ * Shared tail queue definitions.
+ */
+#define SH_TAILQ_HEAD(name) \
+struct name { \
+ ssize_t stqh_first; /* relative offset of first element */ \
+ ssize_t stqh_last; /* relative offset of last's next */ \
+}
+
+#define SH_TAILQ_ENTRY \
+struct { \
+ ssize_t stqe_next; /* relative offset of next element */ \
+ ssize_t stqe_prev; /* relative offset of prev's next */ \
+}
+
+/*
+ * Shared tail queue functions.
+ */
+#define SH_TAILQ_FIRSTP(head, type) \
+ ((struct type *)((u_int8_t *)(head) + (head)->stqh_first))
+
+#define SH_TAILQ_FIRST(head, type) \
+ ((head)->stqh_first == -1 ? NULL : SH_TAILQ_FIRSTP(head, type))
+
+#define SH_TAILQ_NEXTP(elm, field, type) \
+ ((struct type *)((u_int8_t *)(elm) + (elm)->field.stqe_next))
+
+#define SH_TAILQ_NEXT(elm, field, type) \
+ ((elm)->field.stqe_next == -1 ? NULL : SH_TAILQ_NEXTP(elm, field, type))
+
+#define SH_TAILQ_PREVP(elm, field) \
+ ((ssize_t *)((u_int8_t *)(elm) + (elm)->field.stqe_prev))
+
+#define SH_TAILQ_LAST(head) \
+ ((ssize_t *)(((u_int8_t *)(head)) + (head)->stqh_last))
+
+#define SH_TAILQ_NEXT_TO_PREV(elm, field) \
+ (-(elm)->field.stqe_next + SH_PTR_TO_OFF(elm, &(elm)->field.stqe_next))
+
+#define SH_TAILQ_INIT(head) { \
+ (head)->stqh_first = -1; \
+ (head)->stqh_last = SH_PTR_TO_OFF(head, &(head)->stqh_first); \
+}
+
+#define SH_TAILQ_INSERT_HEAD(head, elm, field, type) do { \
+ if ((head)->stqh_first != -1) { \
+ (elm)->field.stqe_next = \
+ (head)->stqh_first - SH_PTR_TO_OFF(head, elm); \
+ SH_TAILQ_FIRSTP(head, type)->field.stqe_prev = \
+ SH_TAILQ_NEXT_TO_PREV(elm, field); \
+ } else { \
+ (elm)->field.stqe_next = -1; \
+ (head)->stqh_last = \
+ SH_PTR_TO_OFF(head, &(elm)->field.stqe_next); \
+ } \
+ (head)->stqh_first = SH_PTR_TO_OFF(head, elm); \
+ (elm)->field.stqe_prev = \
+ SH_PTR_TO_OFF(elm, &(head)->stqh_first); \
+} while (0)
+
+#define SH_TAILQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.stqe_next = -1; \
+ (elm)->field.stqe_prev = \
+ -SH_PTR_TO_OFF(head, elm) + (head)->stqh_last; \
+ if ((head)->stqh_last == \
+ SH_PTR_TO_OFF((head), &(head)->stqh_first)) \
+ (head)->stqh_first = SH_PTR_TO_OFF(head, elm); \
+ else \
+ *SH_TAILQ_LAST(head) = -(head)->stqh_last + \
+ SH_PTR_TO_OFF((elm), &(elm)->field.stqe_next) + \
+ SH_PTR_TO_OFF(head, elm); \
+ (head)->stqh_last = \
+ SH_PTR_TO_OFF(head, &((elm)->field.stqe_next)); \
+} while (0)
+
+#define SH_TAILQ_INSERT_AFTER(head, listelm, elm, field, type) do { \
+ if ((listelm)->field.stqe_next != -1) { \
+ (elm)->field.stqe_next = (listelm)->field.stqe_next - \
+ SH_PTR_TO_OFF(listelm, elm); \
+ SH_TAILQ_NEXTP(listelm, field, type)->field.stqe_prev = \
+ SH_TAILQ_NEXT_TO_PREV(elm, field); \
+ } else { \
+ (elm)->field.stqe_next = -1; \
+ (head)->stqh_last = \
+ SH_PTR_TO_OFF(head, &elm->field.stqe_next); \
+ } \
+ (listelm)->field.stqe_next = SH_PTR_TO_OFF(listelm, elm); \
+ (elm)->field.stqe_prev = SH_TAILQ_NEXT_TO_PREV(listelm, field); \
+} while (0)
+
+#define SH_TAILQ_REMOVE(head, elm, field, type) do { \
+ if ((elm)->field.stqe_next != -1) { \
+ SH_TAILQ_NEXTP(elm, field, type)->field.stqe_prev = \
+ (elm)->field.stqe_prev + \
+ SH_PTR_TO_OFF(SH_TAILQ_NEXTP(elm, \
+ field, type), elm); \
+ *SH_TAILQ_PREVP(elm, field) += elm->field.stqe_next; \
+ } else { \
+ (head)->stqh_last = (elm)->field.stqe_prev + \
+ SH_PTR_TO_OFF(head, elm); \
+ *SH_TAILQ_PREVP(elm, field) = -1; \
+ } \
+} while (0)
+
+/*
+ * Shared circular queue definitions.
+ */
+#define SH_CIRCLEQ_HEAD(name) \
+struct name { \
+ ssize_t scqh_first; /* first element */ \
+ ssize_t scqh_last; /* last element */ \
+}
+
+#define SH_CIRCLEQ_ENTRY \
+struct { \
+ ssize_t scqe_next; /* next element */ \
+ ssize_t scqe_prev; /* previous element */ \
+}
+
+/*
+ * Shared circular queue functions.
+ */
+#define SH_CIRCLEQ_FIRSTP(head, type) \
+ ((struct type *)(((u_int8_t *)(head)) + (head)->scqh_first))
+
+#define SH_CIRCLEQ_FIRST(head, type) \
+ ((head)->scqh_first == -1 ? \
+ (void *)head : SH_CIRCLEQ_FIRSTP(head, type))
+
+#define SH_CIRCLEQ_LASTP(head, type) \
+ ((struct type *)(((u_int8_t *)(head)) + (head)->scqh_last))
+
+#define SH_CIRCLEQ_LAST(head, type) \
+ ((head)->scqh_last == -1 ? (void *)head : SH_CIRCLEQ_LASTP(head, type))
+
+#define SH_CIRCLEQ_NEXTP(elm, field, type) \
+ ((struct type *)(((u_int8_t *)(elm)) + (elm)->field.scqe_next))
+
+#define SH_CIRCLEQ_NEXT(head, elm, field, type) \
+ ((elm)->field.scqe_next == SH_PTR_TO_OFF(elm, head) ? \
+ (void *)head : SH_CIRCLEQ_NEXTP(elm, field, type))
+
+#define SH_CIRCLEQ_PREVP(elm, field, type) \
+ ((struct type *)(((u_int8_t *)(elm)) + (elm)->field.scqe_prev))
+
+#define SH_CIRCLEQ_PREV(head, elm, field, type) \
+ ((elm)->field.scqe_prev == SH_PTR_TO_OFF(elm, head) ? \
+ (void *)head : SH_CIRCLEQ_PREVP(elm, field, type))
+
+#define SH_CIRCLEQ_INIT(head) { \
+ (head)->scqh_first = 0; \
+ (head)->scqh_last = 0; \
+}
+
+#define SH_CIRCLEQ_INSERT_AFTER(head, listelm, elm, field, type) do { \
+ (elm)->field.scqe_prev = SH_PTR_TO_OFF(elm, listelm); \
+ (elm)->field.scqe_next = (listelm)->field.scqe_next + \
+ (elm)->field.scqe_prev; \
+ if (SH_CIRCLEQ_NEXTP(listelm, field, type) == (void *)head) \
+ (head)->scqh_last = SH_PTR_TO_OFF(head, elm); \
+ else \
+ SH_CIRCLEQ_NEXTP(listelm, \
+ field, type)->field.scqe_prev = \
+ SH_PTR_TO_OFF(SH_CIRCLEQ_NEXTP(listelm, \
+ field, type), elm); \
+ (listelm)->field.scqe_next = -(elm)->field.scqe_prev; \
+} while (0)
+
+#define SH_CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field, type) do { \
+ (elm)->field.scqe_next = SH_PTR_TO_OFF(elm, listelm); \
+ (elm)->field.scqe_prev = (elm)->field.scqe_next - \
+ SH_CIRCLEQ_PREVP(listelm, field, type)->field.scqe_next;\
+ if (SH_CIRCLEQ_PREVP(listelm, field, type) == (void *)(head)) \
+ (head)->scqh_first = SH_PTR_TO_OFF(head, elm); \
+ else \
+ SH_CIRCLEQ_PREVP(listelm, \
+ field, type)->field.scqe_next = \
+ SH_PTR_TO_OFF(SH_CIRCLEQ_PREVP(listelm, \
+ field, type), elm); \
+ (listelm)->field.scqe_prev = -(elm)->field.scqe_next; \
+} while (0)
+
+#define SH_CIRCLEQ_INSERT_HEAD(head, elm, field, type) do { \
+ (elm)->field.scqe_prev = SH_PTR_TO_OFF(elm, head); \
+ (elm)->field.scqe_next = (head)->scqh_first + \
+ (elm)->field.scqe_prev; \
+ if ((head)->scqh_last == 0) \
+ (head)->scqh_last = -(elm)->field.scqe_prev; \
+ else \
+ SH_CIRCLEQ_FIRSTP(head, type)->field.scqe_prev = \
+ SH_PTR_TO_OFF(SH_CIRCLEQ_FIRSTP(head, type), elm); \
+ (head)->scqh_first = -(elm)->field.scqe_prev; \
+} while (0)
+
+#define SH_CIRCLEQ_INSERT_TAIL(head, elm, field, type) do { \
+ (elm)->field.scqe_next = SH_PTR_TO_OFF(elm, head); \
+ (elm)->field.scqe_prev = (head)->scqh_last + \
+ (elm)->field.scqe_next; \
+ if ((head)->scqh_first == 0) \
+ (head)->scqh_first = -(elm)->field.scqe_next; \
+ else \
+ SH_CIRCLEQ_LASTP(head, type)->field.scqe_next = \
+ SH_PTR_TO_OFF(SH_CIRCLEQ_LASTP(head, type), elm); \
+ (head)->scqh_last = -(elm)->field.scqe_next; \
+} while (0)
+
+#define SH_CIRCLEQ_REMOVE(head, elm, field, type) do { \
+ if (SH_CIRCLEQ_NEXTP(elm, field, type) == (void *)(head)) \
+ (head)->scqh_last += (elm)->field.scqe_prev; \
+ else \
+ SH_CIRCLEQ_NEXTP(elm, field, type)->field.scqe_prev += \
+ (elm)->field.scqe_prev; \
+ if (SH_CIRCLEQ_PREVP(elm, field, type) == (void *)(head)) \
+ (head)->scqh_first += (elm)->field.scqe_next; \
+ else \
+ SH_CIRCLEQ_PREVP(elm, field, type)->field.scqe_next += \
+ (elm)->field.scqe_next; \
+} while (0)
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* !_SYS_SHQUEUE_H_ */
diff --git a/bdb/include/tcl_db.h b/bdb/include/tcl_db.h
new file mode 100644
index 00000000000..254006c2f6d
--- /dev/null
+++ b/bdb/include/tcl_db.h
@@ -0,0 +1,219 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: tcl_db.h,v 11.9 2000/12/12 17:43:56 bostic Exp $
+ */
+
+#define MSG_SIZE 100 /* Message size */
+
+enum INFOTYPE {
+ I_ENV, I_DB, I_DBC, I_TXN, I_MP, I_PG, I_LOCK, I_NDBM, I_MUTEX };
+
+#define MAX_ID 8 /* Maximum number of sub-id's we need */
+
+#define DBTCL_DBM 1
+#define DBTCL_NDBM 2
+
+typedef struct _mutex_entry {
+ union {
+ struct {
+ MUTEX real_m;
+ u_int32_t real_val;
+ } r;
+ /*
+ * This is here to make sure that each of the mutex structures
+ * are 16-byte aligned, which is required on HP architectures.
+ * The db_mutex_t structure might be >32 bytes itself, or the
+ * real_val might push it over the 32 byte boundary. The best
+ * we can do is use a 48 byte boundary.
+ */
+ char c[48];
+ } u;
+} _MUTEX_ENTRY;
+
+#define m u.r.real_m
+#define val u.r.real_val
+
+typedef struct _mutex_data {
+ DB_ENV *env;
+ REGINFO reginfo;
+ _MUTEX_ENTRY *marray;
+ size_t size;
+ u_int32_t n_mutex;
+} _MUTEX_DATA;
+
+/*
+ * Why use a home grown package over the Tcl_Hash functions?
+ *
+ * We could have implemented the stuff below without maintaining our
+ * own list manipulation, efficiently hashing it with the available
+ * Tcl functions (Tcl_CreateHashEntry, Tcl_GetHashValue, etc). I chose
+ * not to do so for these reasons:
+ *
+ * We still need the information below. Using the hashing only removes
+ * us from needing the next/prev pointers. We still need the structure
+ * itself because we need more than one value associated with a widget.
+ * We need to keep track of parent pointers for sub-widgets (like cursors)
+ * so we can correctly close. We need to keep track of individual widget's
+ * id counters for any sub-widgets they may have. We need to be able to
+ * associate the name/client data outside the scope of the widget.
+ *
+ * So, is it better to use the hashing rather than
+ * the linear list we have now? I decided against it for the simple reason
+ * that to access the structure would require two calls. The first is
+ * Tcl_FindHashEntry(table, key) and then, once we have the entry, we'd
+ * have to do Tcl_GetHashValue(entry) to get the pointer of the structure.
+ *
+ * I believe the number of simultaneous DB widgets in existence at one time
+ * is not going to be that large (more than several dozen) such that
+ * linearly searching the list is not going to impact performance in a
+ * noticable way. Should performance be impacted due to the size of the
+ * info list, then perhaps it is time to revisit this decision.
+ */
+typedef struct dbtcl_info {
+ LIST_ENTRY(dbtcl_info) entries;
+ Tcl_Interp *i_interp;
+ char *i_name;
+ enum INFOTYPE i_type;
+ union infop {
+ DB_ENV *envp;
+ void *anyp;
+ DB *dbp;
+ DBC *dbcp;
+ DB_TXN *txnp;
+ DB_MPOOLFILE *mp;
+ DB_LOCK *lock;
+ _MUTEX_DATA *mutex;
+#if 0
+ DBM *ndbmp; /* Compatibility */
+#endif
+ } un;
+ union data {
+ int anydata;
+ db_pgno_t pgno;
+ u_int32_t lockid;
+ } und;
+ union data2 {
+ int anydata;
+ size_t pagesz;
+ } und2;
+ DBT i_lockobj;
+ FILE *i_err;
+ char *i_errpfx;
+ struct dbtcl_info *i_parent;
+ int i_otherid[MAX_ID];
+} DBTCL_INFO;
+
+extern int __debug_on, __debug_print, __debug_stop, __debug_test;
+LIST_HEAD(infohead, dbtcl_info) __db_infohead;
+
+#define i_anyp un.anyp
+#define i_pagep un.anyp
+#define i_envp un.envp
+#define i_dbp un.dbp
+#define i_dbcp un.dbcp
+#define i_txnp un.txnp
+#define i_mp un.mp
+#define i_lock un.lock
+#define i_mutex un.mutex
+#if 0
+#define i_ndbm un.ndbmp
+#endif
+
+#define i_data und.anydata
+#define i_pgno und.pgno
+#define i_locker und.lockid
+#define i_data2 und2.anydata
+#define i_pgsz und2.pagesz
+
+#define i_envtxnid i_otherid[0]
+#define i_envmpid i_otherid[1]
+#define i_envlockid i_otherid[2]
+#define i_envmutexid i_otherid[3]
+
+#define i_mppgid i_otherid[0]
+
+#define i_dbdbcid i_otherid[0]
+
+#define NAME_TO_ENV(name) (DB_ENV *)_NameToPtr((name))
+#define NAME_TO_DB(name) (DB *)_NameToPtr((name))
+#define NAME_TO_DBC(name) (DBC *)_NameToPtr((name))
+#define NAME_TO_TXN(name) (DB_TXN *)_NameToPtr((name))
+#define NAME_TO_MP(name) (DB_MPOOLFILE *)_NameToPtr((name))
+#define NAME_TO_LOCK(name) (DB_LOCK *)_NameToPtr((name))
+
+/*
+ * MAKE_STAT_LIST appends a {name value} pair to a result list
+ * that MUST be called 'res' that is a Tcl_Obj * in the local
+ * function. This macro also assumes a label "error" to go to
+ * in the even of a Tcl error. For stat functions this will
+ * typically go before the "free" function to free the stat structure
+ * returned by DB.
+ */
+#define MAKE_STAT_LIST(s,v) \
+do { \
+ result = _SetListElemInt(interp, res, (s), (v)); \
+ if (result != TCL_OK) \
+ goto error; \
+} while (0)
+
+/*
+ * MAKE_STAT_STRLIST appends a {name string} pair to a result list
+ * that MUST be called 'res' that is a Tcl_Obj * in the local
+ * function. This macro also assumes a label "error" to go to
+ * in the even of a Tcl error. For stat functions this will
+ * typically go before the "free" function to free the stat structure
+ * returned by DB.
+ */
+#define MAKE_STAT_STRLIST(s,s1) \
+do { \
+ result = _SetListElem(interp, res, (s), strlen(s), \
+ (s1), strlen(s1)); \
+ if (result != TCL_OK) \
+ goto error; \
+} while (0)
+
+/*
+ * FLAG_CHECK checks that the given flag is not set yet.
+ * If it is, it sets up an error message.
+ */
+#define FLAG_CHECK(flag) \
+do { \
+ if ((flag) != 0) { \
+ Tcl_SetResult(interp, \
+ " Only 1 policy can be specified.\n", \
+ TCL_STATIC); \
+ result = TCL_ERROR; \
+ break; \
+ } \
+} while (0)
+
+/*
+ * FLAG_CHECK2 checks that the given flag is not set yet or is
+ * only set to the given allowed value.
+ * If it is, it sets up an error message.
+ */
+#define FLAG_CHECK2(flag,val) \
+do { \
+ if ((flag) != 0 && (flag) != (val)) { \
+ Tcl_SetResult(interp, \
+ " Only 1 policy can be specified.\n", \
+ TCL_STATIC); \
+ result = TCL_ERROR; \
+ break; \
+ } \
+} while (0)
+
+/*
+ * IS_HELP checks whether the arg we bombed on is -?, which is a help option.
+ * If it is, we return TCL_OK (but leave the result set to whatever
+ * Tcl_GetIndexFromObj says, which lists all the valid options. Otherwise
+ * return TCL_ERROR.
+ */
+#define IS_HELP(s) \
+ (strcmp(Tcl_GetStringFromObj(s,NULL), "-?") == 0) ? TCL_OK : TCL_ERROR
+
+#include "tcl_ext.h"
diff --git a/bdb/include/tcl_ext.h b/bdb/include/tcl_ext.h
new file mode 100644
index 00000000000..9baf7e4fdcf
--- /dev/null
+++ b/bdb/include/tcl_ext.h
@@ -0,0 +1,89 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _tcl_ext_h_
+#define _tcl_ext_h_
+#if defined(__cplusplus)
+extern "C" {
+#endif
+int bdb_HCommand __P((Tcl_Interp *, int, Tcl_Obj * CONST*));
+#if DB_DBM_HSEARCH != 0
+int bdb_NdbmOpen __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DBM **));
+#endif
+#if DB_DBM_HSEARCH != 0
+int bdb_DbmCommand
+ __P((Tcl_Interp *, int, Tcl_Obj * CONST*, int, DBM *));
+#endif
+int ndbm_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+int bdb_RandCommand __P((Tcl_Interp *, int, Tcl_Obj * CONST*));
+int tcl_Mutex __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *,
+ DBTCL_INFO *));
+int db_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+int dbc_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+int env_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+int tcl_EnvRemove __P((Tcl_Interp *, int, Tcl_Obj * CONST*,
+ DB_ENV *, DBTCL_INFO *));
+int tcl_EnvVerbose __P((Tcl_Interp *, DB_ENV *, Tcl_Obj *,
+ Tcl_Obj *));
+int tcl_EnvTest __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+DBTCL_INFO *_NewInfo __P((Tcl_Interp *,
+ void *, char *, enum INFOTYPE));
+void *_NameToPtr __P((CONST char *));
+char *_PtrToName __P((CONST void *));
+DBTCL_INFO *_PtrToInfo __P((CONST void *));
+DBTCL_INFO *_NameToInfo __P((CONST char *));
+void _SetInfoData __P((DBTCL_INFO *, void *));
+void _DeleteInfo __P((DBTCL_INFO *));
+int _SetListElem __P((Tcl_Interp *,
+ Tcl_Obj *, void *, int, void *, int));
+int _SetListElemInt __P((Tcl_Interp *, Tcl_Obj *, void *, int));
+int _SetListRecnoElem __P((Tcl_Interp *, Tcl_Obj *,
+ db_recno_t, u_char *, int));
+int _GetGlobPrefix __P((char *, char **));
+int _ReturnSetup __P((Tcl_Interp *, int, char *));
+int _ErrorSetup __P((Tcl_Interp *, int, char *));
+void _ErrorFunc __P((CONST char *, char *));
+int _GetLsn __P((Tcl_Interp *, Tcl_Obj *, DB_LSN *));
+void _debug_check __P((void));
+int tcl_LockDetect __P((Tcl_Interp *, int,
+ Tcl_Obj * CONST*, DB_ENV *));
+int tcl_LockGet __P((Tcl_Interp *, int,
+ Tcl_Obj * CONST*, DB_ENV *));
+int tcl_LockStat __P((Tcl_Interp *, int,
+ Tcl_Obj * CONST*, DB_ENV *));
+int tcl_LockVec __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+int tcl_LogArchive __P((Tcl_Interp *, int,
+ Tcl_Obj * CONST*, DB_ENV *));
+int tcl_LogCompare __P((Tcl_Interp *, int,
+ Tcl_Obj * CONST*));
+int tcl_LogFile __P((Tcl_Interp *, int,
+ Tcl_Obj * CONST*, DB_ENV *));
+int tcl_LogFlush __P((Tcl_Interp *, int,
+ Tcl_Obj * CONST*, DB_ENV *));
+int tcl_LogGet __P((Tcl_Interp *, int,
+ Tcl_Obj * CONST*, DB_ENV *));
+int tcl_LogPut __P((Tcl_Interp *, int,
+ Tcl_Obj * CONST*, DB_ENV *));
+int tcl_LogRegister __P((Tcl_Interp *, int,
+ Tcl_Obj * CONST*, DB_ENV *));
+int tcl_LogStat __P((Tcl_Interp *, int,
+ Tcl_Obj * CONST*, DB_ENV *));
+int tcl_LogUnregister __P((Tcl_Interp *, int,
+ Tcl_Obj * CONST*, DB_ENV *));
+void _MpInfoDelete __P((Tcl_Interp *, DBTCL_INFO *));
+int tcl_MpSync __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+int tcl_MpTrickle __P((Tcl_Interp *, int,
+ Tcl_Obj * CONST*, DB_ENV *));
+int tcl_Mp __P((Tcl_Interp *, int,
+ Tcl_Obj * CONST*, DB_ENV *, DBTCL_INFO *));
+int tcl_MpStat __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+void _TxnInfoDelete __P((Tcl_Interp *, DBTCL_INFO *));
+int tcl_TxnCheckpoint __P((Tcl_Interp *, int,
+ Tcl_Obj * CONST*, DB_ENV *));
+int tcl_Txn __P((Tcl_Interp *, int,
+ Tcl_Obj * CONST*, DB_ENV *, DBTCL_INFO *));
+int tcl_TxnStat __P((Tcl_Interp *, int,
+ Tcl_Obj * CONST*, DB_ENV *));
+int txn_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+#if defined(__cplusplus)
+}
+#endif
+#endif /* _tcl_ext_h_ */
diff --git a/bdb/include/txn.h b/bdb/include/txn.h
new file mode 100644
index 00000000000..009a1ca1589
--- /dev/null
+++ b/bdb/include/txn.h
@@ -0,0 +1,150 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: txn.h,v 11.12 2001/01/02 17:23:39 margo Exp $
+ */
+
+#ifndef _TXN_H_
+#define _TXN_H_
+
+#include "xa.h"
+
+struct __db_txnmgr; typedef struct __db_txnmgr DB_TXNMGR;
+struct __db_txnregion; typedef struct __db_txnregion DB_TXNREGION;
+
+/*
+ * !!!
+ * TXN_MINIMUM = (DB_LOCK_MAXID + 1) but this makes compilers complain.
+ */
+#define TXN_MINIMUM 0x80000000
+#define TXN_INVALID 0xffffffff /* Maximum number of txn ids. */
+#define TXN_INVALID_ID 0 /* Invalid transaction ID. */
+
+#define DEF_MAX_TXNS 20 /* Default max transactions. */
+
+/* The structure allocated for every transaction. */
+struct __db_txn {
+ DB_TXNMGR *mgrp; /* Pointer to transaction manager. */
+ DB_TXN *parent; /* Pointer to transaction's parent. */
+ DB_LSN last_lsn; /* Lsn of last log write. */
+ u_int32_t txnid; /* Unique transaction id. */
+ roff_t off; /* Detail structure within region. */
+ TAILQ_ENTRY(__db_txn) links; /* Links transactions off manager. */
+ TAILQ_HEAD(__kids, __db_txn) kids; /* Child transactions. */
+ TAILQ_ENTRY(__db_txn) klinks; /* Links child transactions. */
+ u_int32_t cursors; /* Number of cursors open for txn */
+
+#define TXN_CHILDCOMMIT 0x01 /* Transaction that has committed. */
+#define TXN_MALLOC 0x02 /* Structure allocated by TXN system. */
+#define TXN_NOSYNC 0x04 /* Do not sync on prepare and commit. */
+#define TXN_NOWAIT 0x08 /* Do not wait on locks. */
+#define TXN_SYNC 0x10 /* Sync on prepare and commit. */
+ u_int32_t flags;
+};
+
+/*
+ * Internal data maintained in shared memory for each transaction.
+ */
+typedef char DB_XID[XIDDATASIZE];
+
+typedef struct __txn_detail {
+ u_int32_t txnid; /* current transaction id
+ used to link free list also */
+ DB_LSN last_lsn; /* last lsn written for this txn */
+ DB_LSN begin_lsn; /* lsn of begin record */
+ roff_t parent; /* Offset of transaction's parent. */
+
+#define TXN_RUNNING 1
+#define TXN_ABORTED 2
+#define TXN_PREPARED 3
+#define TXN_COMMITTED 4
+ u_int32_t status; /* status of the transaction */
+
+ SH_TAILQ_ENTRY links; /* free/active list */
+
+#define TXN_XA_ABORTED 1
+#define TXN_XA_DEADLOCKED 2
+#define TXN_XA_ENDED 3
+#define TXN_XA_PREPARED 4
+#define TXN_XA_STARTED 5
+#define TXN_XA_SUSPENDED 6
+ u_int32_t xa_status; /* XA status */
+
+ /*
+ * XID (xid_t) structure: because these fields are logged, the
+ * sizes have to be explicit.
+ */
+ DB_XID xid; /* XA global transaction id */
+ u_int32_t bqual; /* bqual_length from XID */
+ u_int32_t gtrid; /* gtrid_length from XID */
+ int32_t format; /* XA format */
+} TXN_DETAIL;
+
+/*
+ * DB_TXNMGR --
+ * The transaction manager encapsulates the transaction system.
+ */
+struct __db_txnmgr {
+/*
+ * These fields need to be protected for multi-threaded support.
+ *
+ * !!!
+ * As this structure is allocated in per-process memory, the mutex may need
+ * to be stored elsewhere on architectures unable to support mutexes in heap
+ * memory, e.g., HP/UX 9.
+ */
+ MUTEX *mutexp; /* Lock list of active transactions
+ * (including the content of each
+ * TXN_DETAIL structure on the list).
+ */
+ /* List of active transactions. */
+ TAILQ_HEAD(_chain, __db_txn) txn_chain;
+
+/* These fields are never updated after creation, and so not protected. */
+ DB_ENV *dbenv; /* Environment. */
+ REGINFO reginfo; /* Region information. */
+};
+
+/*
+ * DB_TXNREGION --
+ * The primary transaction data structure in the shared memory region.
+ */
+struct __db_txnregion {
+ u_int32_t maxtxns; /* maximum number of active TXNs */
+ u_int32_t last_txnid; /* last transaction id given out */
+ DB_LSN pending_ckp; /* last checkpoint did not finish */
+ DB_LSN last_ckp; /* lsn of the last checkpoint */
+ time_t time_ckp; /* time of last checkpoint */
+ u_int32_t logtype; /* type of logging */
+ u_int32_t locktype; /* lock type */
+ u_int32_t naborts; /* number of aborted TXNs */
+ u_int32_t ncommits; /* number of committed TXNs */
+ u_int32_t nbegins; /* number of begun TXNs */
+ u_int32_t nactive; /* number of active TXNs */
+ u_int32_t maxnactive; /* maximum number of active TXNs */
+ /* active TXN list */
+ SH_TAILQ_HEAD(__active) active_txn;
+};
+
+/*
+ * Make the region large enough to hold N transaction detail structures
+ * plus some space to hold thread handles and the beginning of the shalloc
+ * region.
+ */
+#define TXN_REGION_SIZE(N) \
+ (sizeof(DB_TXNREGION) + N * sizeof(TXN_DETAIL) + 1000)
+
+/*
+ * Log record types.
+ */
+#define TXN_COMMIT 1
+#define TXN_PREPARE 2
+
+#include "txn_auto.h"
+#include "txn_ext.h"
+
+#include "xa_ext.h"
+#endif /* !_TXN_H_ */
diff --git a/bdb/include/txn_auto.h b/bdb/include/txn_auto.h
new file mode 100644
index 00000000000..c9cb5cfae4c
--- /dev/null
+++ b/bdb/include/txn_auto.h
@@ -0,0 +1,114 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+
+#ifndef txn_AUTO_H
+#define txn_AUTO_H
+
+#define DB_txn_old_regop 6
+typedef struct _txn_old_regop_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ u_int32_t opcode;
+} __txn_old_regop_args;
+
+int __txn_old_regop_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __txn_old_regop_read __P((DB_ENV *, void *, __txn_old_regop_args **));
+
+#define DB_txn_regop 10
+typedef struct _txn_regop_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ u_int32_t opcode;
+ int32_t timestamp;
+} __txn_regop_args;
+
+int __txn_regop_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, int32_t));
+int __txn_regop_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __txn_regop_read __P((DB_ENV *, void *, __txn_regop_args **));
+
+#define DB_txn_old_ckp 7
+typedef struct _txn_old_ckp_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ DB_LSN ckp_lsn;
+ DB_LSN last_ckp;
+} __txn_old_ckp_args;
+
+int __txn_old_ckp_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __txn_old_ckp_read __P((DB_ENV *, void *, __txn_old_ckp_args **));
+
+#define DB_txn_ckp 11
+typedef struct _txn_ckp_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ DB_LSN ckp_lsn;
+ DB_LSN last_ckp;
+ int32_t timestamp;
+} __txn_ckp_args;
+
+int __txn_ckp_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, DB_LSN *, DB_LSN *, int32_t));
+int __txn_ckp_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __txn_ckp_read __P((DB_ENV *, void *, __txn_ckp_args **));
+
+#define DB_txn_xa_regop_old 8
+typedef struct _txn_xa_regop_old_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ u_int32_t opcode;
+ DBT xid;
+ int32_t formatID;
+ u_int32_t gtrid;
+ u_int32_t bqual;
+} __txn_xa_regop_old_args;
+
+int __txn_xa_regop_old_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __txn_xa_regop_old_read __P((DB_ENV *, void *, __txn_xa_regop_old_args **));
+
+#define DB_txn_xa_regop 13
+typedef struct _txn_xa_regop_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ u_int32_t opcode;
+ DBT xid;
+ int32_t formatID;
+ u_int32_t gtrid;
+ u_int32_t bqual;
+ DB_LSN begin_lsn;
+} __txn_xa_regop_args;
+
+int __txn_xa_regop_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, const DBT *, int32_t, u_int32_t, u_int32_t, DB_LSN *));
+int __txn_xa_regop_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __txn_xa_regop_read __P((DB_ENV *, void *, __txn_xa_regop_args **));
+
+#define DB_txn_child_old 9
+typedef struct _txn_child_old_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ u_int32_t opcode;
+ u_int32_t parent;
+} __txn_child_old_args;
+
+int __txn_child_old_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __txn_child_old_read __P((DB_ENV *, void *, __txn_child_old_args **));
+
+#define DB_txn_child 12
+typedef struct _txn_child_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ u_int32_t child;
+ DB_LSN c_lsn;
+} __txn_child_args;
+
+int __txn_child_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, DB_LSN *));
+int __txn_child_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __txn_child_read __P((DB_ENV *, void *, __txn_child_args **));
+int __txn_init_print __P((DB_ENV *));
+int __txn_init_recover __P((DB_ENV *));
+#endif
diff --git a/bdb/include/txn_ext.h b/bdb/include/txn_ext.h
new file mode 100644
index 00000000000..ee6922d701c
--- /dev/null
+++ b/bdb/include/txn_ext.h
@@ -0,0 +1,24 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _txn_ext_h_
+#define _txn_ext_h_
+#if defined(__cplusplus)
+extern "C" {
+#endif
+int __txn_xa_begin __P((DB_ENV *, DB_TXN *));
+int __txn_end __P((DB_TXN *, int));
+int __txn_activekids __P((DB_ENV *, u_int32_t, DB_TXN *));
+int __txn_regop_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __txn_xa_regop_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __txn_ckp_recover
+__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __txn_child_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+void __txn_dbenv_create __P((DB_ENV *));
+int __txn_open __P((DB_ENV *));
+int __txn_close __P((DB_ENV *));
+#if defined(__cplusplus)
+}
+#endif
+#endif /* _txn_ext_h_ */
diff --git a/bdb/include/xa.h b/bdb/include/xa.h
new file mode 100644
index 00000000000..ce46179263a
--- /dev/null
+++ b/bdb/include/xa.h
@@ -0,0 +1,179 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: xa.h,v 11.3 2000/02/14 02:59:55 bostic Exp $
+ */
+/*
+ * Start of xa.h header
+ *
+ * Define a symbol to prevent multiple inclusions of this header file
+ */
+#ifndef XA_H
+#define XA_H
+
+/*
+ * Transaction branch identification: XID and NULLXID:
+ */
+#define XIDDATASIZE 128 /* size in bytes */
+#define MAXGTRIDSIZE 64 /* maximum size in bytes of gtrid */
+#define MAXBQUALSIZE 64 /* maximum size in bytes of bqual */
+
+struct xid_t {
+ long formatID; /* format identifier */
+ long gtrid_length; /* value from 1 through 64 */
+ long bqual_length; /* value from 1 through 64 */
+ char data[XIDDATASIZE];
+};
+typedef struct xid_t XID;
+/*
+ * A value of -1 in formatID means that the XID is null.
+ */
+
+/*
+ * Declarations of routines by which RMs call TMs:
+ */
+extern int ax_reg __P((int, XID *, long));
+extern int ax_unreg __P((int, long));
+
+/*
+ * XA Switch Data Structure
+ */
+#define RMNAMESZ 32 /* length of resource manager name, */
+ /* including the null terminator */
+#define MAXINFOSIZE 256 /* maximum size in bytes of xa_info */
+ /* strings, including the null
+ terminator */
+struct xa_switch_t {
+ char name[RMNAMESZ]; /* name of resource manager */
+ long flags; /* resource manager specific options */
+ long version; /* must be 0 */
+ int (*xa_open_entry) /* xa_open function pointer */
+ __P((char *, int, long));
+ int (*xa_close_entry) /* xa_close function pointer */
+ __P((char *, int, long));
+ int (*xa_start_entry) /* xa_start function pointer */
+ __P((XID *, int, long));
+ int (*xa_end_entry) /* xa_end function pointer */
+ __P((XID *, int, long));
+ int (*xa_rollback_entry) /* xa_rollback function pointer */
+ __P((XID *, int, long));
+ int (*xa_prepare_entry) /* xa_prepare function pointer */
+ __P((XID *, int, long));
+ int (*xa_commit_entry) /* xa_commit function pointer */
+ __P((XID *, int, long));
+ int (*xa_recover_entry) /* xa_recover function pointer */
+ __P((XID *, long, int, long));
+ int (*xa_forget_entry) /* xa_forget function pointer */
+ __P((XID *, int, long));
+ int (*xa_complete_entry) /* xa_complete function pointer */
+ __P((int *, int *, int, long));
+};
+
+/*
+ * Flag definitions for the RM switch
+ */
+#define TMNOFLAGS 0x00000000L /* no resource manager features
+ selected */
+#define TMREGISTER 0x00000001L /* resource manager dynamically
+ registers */
+#define TMNOMIGRATE 0x00000002L /* resource manager does not support
+ association migration */
+#define TMUSEASYNC 0x00000004L /* resource manager supports
+ asynchronous operations */
+/*
+ * Flag definitions for xa_ and ax_ routines
+ */
+/* use TMNOFLAGGS, defined above, when not specifying other flags */
+#define TMASYNC 0x80000000L /* perform routine asynchronously */
+#define TMONEPHASE 0x40000000L /* caller is using one-phase commit
+ optimisation */
+#define TMFAIL 0x20000000L /* dissociates caller and marks
+ transaction branch rollback-only */
+#define TMNOWAIT 0x10000000L /* return if blocking condition
+ exists */
+#define TMRESUME 0x08000000L /* caller is resuming association with
+ suspended transaction branch */
+#define TMSUCCESS 0x04000000L /* dissociate caller from transaction
+ branch */
+#define TMSUSPEND 0x02000000L /* caller is suspending, not ending,
+ association */
+#define TMSTARTRSCAN 0x01000000L /* start a recovery scan */
+#define TMENDRSCAN 0x00800000L /* end a recovery scan */
+#define TMMULTIPLE 0x00400000L /* wait for any asynchronous
+ operation */
+#define TMJOIN 0x00200000L /* caller is joining existing
+ transaction branch */
+#define TMMIGRATE 0x00100000L /* caller intends to perform
+ migration */
+
+/*
+ * ax_() return codes (transaction manager reports to resource manager)
+ */
+#define TM_JOIN 2 /* caller is joining existing
+ transaction branch */
+#define TM_RESUME 1 /* caller is resuming association with
+ suspended transaction branch */
+#define TM_OK 0 /* normal execution */
+#define TMER_TMERR -1 /* an error occurred in the transaction
+ manager */
+#define TMER_INVAL -2 /* invalid arguments were given */
+#define TMER_PROTO -3 /* routine invoked in an improper
+ context */
+
+/*
+ * xa_() return codes (resource manager reports to transaction manager)
+ */
+#define XA_RBBASE 100 /* The inclusive lower bound of the
+ rollback codes */
+#define XA_RBROLLBACK XA_RBBASE /* The rollback was caused by an
+ unspecified reason */
+#define XA_RBCOMMFAIL XA_RBBASE+1 /* The rollback was caused by a
+ communication failure */
+#define XA_RBDEADLOCK XA_RBBASE+2 /* A deadlock was detected */
+#define XA_RBINTEGRITY XA_RBBASE+3 /* A condition that violates the
+ integrity of the resources was
+ detected */
+#define XA_RBOTHER XA_RBBASE+4 /* The resource manager rolled back the
+ transaction branch for a reason not
+ on this list */
+#define XA_RBPROTO XA_RBBASE+5 /* A protocol error occurred in the
+ resource manager */
+#define XA_RBTIMEOUT XA_RBBASE+6 /* A transaction branch took too long */
+#define XA_RBTRANSIENT XA_RBBASE+7 /* May retry the transaction branch */
+#define XA_RBEND XA_RBTRANSIENT /* The inclusive upper bound of the
+ rollback codes */
+#define XA_NOMIGRATE 9 /* resumption must occur where
+ suspension occurred */
+#define XA_HEURHAZ 8 /* the transaction branch may have
+ been heuristically completed */
+#define XA_HEURCOM 7 /* the transaction branch has been
+ heuristically committed */
+#define XA_HEURRB 6 /* the transaction branch has been
+ heuristically rolled back */
+#define XA_HEURMIX 5 /* the transaction branch has been
+ heuristically committed and rolled
+ back */
+#define XA_RETRY 4 /* routine returned with no effect and
+ may be re-issued */
+#define XA_RDONLY 3 /* the transaction branch was read-only
+ and has been committed */
+#define XA_OK 0 /* normal execution */
+#define XAER_ASYNC -2 /* asynchronous operation already
+ outstanding */
+#define XAER_RMERR -3 /* a resource manager error occurred in
+ the transaction branch */
+#define XAER_NOTA -4 /* the XID is not valid */
+#define XAER_INVAL -5 /* invalid arguments were given */
+#define XAER_PROTO -6 /* routine invoked in an improper
+ context */
+#define XAER_RMFAIL -7 /* resource manager unavailable */
+#define XAER_DUPID -8 /* the XID already exists */
+#define XAER_OUTSIDE -9 /* resource manager doing work outside
+ transaction */
+#endif /* ifndef XA_H */
+/*
+ * End of xa.h header
+ */
diff --git a/bdb/include/xa_ext.h b/bdb/include/xa_ext.h
new file mode 100644
index 00000000000..cc16ba18337
--- /dev/null
+++ b/bdb/include/xa_ext.h
@@ -0,0 +1,17 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _xa_ext_h_
+#define _xa_ext_h_
+#if defined(__cplusplus)
+extern "C" {
+#endif
+int __db_xa_create __P((DB *));
+int __db_rmid_to_env __P((int rmid, DB_ENV **envp));
+int __db_xid_to_txn __P((DB_ENV *, XID *, size_t *));
+int __db_map_rmid __P((int, DB_ENV *));
+int __db_unmap_rmid __P((int));
+int __db_map_xid __P((DB_ENV *, XID *, size_t));
+void __db_unmap_xid __P((DB_ENV *, XID *, size_t));
+#if defined(__cplusplus)
+}
+#endif
+#endif /* _xa_ext_h_ */
diff --git a/bdb/java/src/com/sleepycat/db/Db.java b/bdb/java/src/com/sleepycat/db/Db.java
new file mode 100644
index 00000000000..de11e28414a
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/Db.java
@@ -0,0 +1,710 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: Db.java,v 11.38 2000/12/31 19:26:22 bostic Exp $
+ */
+
+package com.sleepycat.db;
+
+import java.io.OutputStream;
+import java.io.FileNotFoundException;
+
+/**
+ *
+ * @author Donald D. Anderson
+ */
+public class Db
+{
+ // All constant and flag values used with Db* classes are defined here.
+
+ // Collectively, these constants are known by the name
+ // "DBTYPE" in the documentation.
+ //
+ public static final int DB_BTREE = 1; // B+tree
+ public static final int DB_HASH = 2; // Extended Linear Hashing.
+ public static final int DB_RECNO = 3; // Fixed and variable-length records.
+ public static final int DB_QUEUE = 4; // Queue
+ public static final int DB_UNKNOWN = 5; // Figure it out on open.
+
+ // Flags understood by DbEnv()
+ //
+ // Note: DB_CXX_NO_EXCEPTIONS will have no effect in Java.
+ //
+ public static final int DB_CXX_NO_EXCEPTIONS; // C++: return error values
+ public static final int DB_CLIENT; // Open for a client environment.
+
+ // Flags understood by Db()
+ //
+ public static final int DB_XA_CREATE; // Open in an XA environment.
+
+ // Flags understood by Db.open(), DbEnv.open().
+ //
+ public static final int DB_CREATE; // O_CREAT: create file as necessary.
+ public static final int DB_NOMMAP; // Don't mmap underlying file.
+ public static final int DB_THREAD; // Free-thread DB package handles.
+
+ // Flags understood by only DbEnv.open().
+ //
+ public static final int DB_LOCKDOWN; // Lock memory into physical core.
+ public static final int DB_PRIVATE; // DB_ENV is process local.
+
+ //
+ // Flags understood by DbEnv.txn_begin().
+ //
+ public static final int DB_TXN_NOWAIT; // Do not wait for locks in this TXN.
+ public static final int DB_TXN_SYNC; // Always sync log on commit.
+
+ // Flags understood by DbEnv.set_flags().
+ //
+ public static final int DB_CDB_ALLDB; // In CDB, lock across environment.
+
+ //
+ // Flags understood by Db.open().
+ //
+ public static final int DB_EXCL; // Exclusive open (O_EXCL).
+ public static final int DB_RDONLY; // Read-only (O_RDONLY).
+ public static final int DB_TRUNCATE; // Discard existing DB.
+ public static final int DB_UPGRADE; // Upgrade if necessary.
+
+ //
+ // DB (user visible) error return codes.
+ //
+ public static final int DB_INCOMPLETE = -30999; // Sync didn't finish.
+ public static final int DB_KEYEMPTY = -30998; // The key/data pair was deleted or
+ // was never created by the user.
+ public static final int DB_KEYEXIST = -30997; // The key/data pair already exists.
+ public static final int DB_LOCK_DEADLOCK = -30996; // Locker killed to resolve deadlock.
+ public static final int DB_LOCK_NOTGRANTED = -30995; // Lock unavailable, no-wait set.
+ public static final int DB_NOSERVER = -30994; // Server panic return.
+ public static final int DB_NOSERVER_HOME = -30993; // Bad home sent to server.
+ public static final int DB_NOSERVER_ID = -30992; // Bad ID sent to server.
+ public static final int DB_NOTFOUND = -30991; // Key/data pair not found (EOF).
+ public static final int DB_OLD_VERSION = -30990; // Out-of-date version.
+ public static final int DB_RUNRECOVERY = -30989; // Panic return.
+ public static final int DB_VERIFY_BAD = -30988; // Verify failed; bad format.
+
+ //
+ // Flags used by DbEnv.open and DbEnv.remove.
+ //
+ public static final int DB_FORCE; // Force (anything).
+ public static final int DB_INIT_CDB; // Concurrent Access Methods.
+ public static final int DB_INIT_LOCK; // Initialize locking.
+ public static final int DB_INIT_LOG; // Initialize logging.
+ public static final int DB_INIT_MPOOL; // Initialize mpool.
+ public static final int DB_INIT_TXN; // Initialize transactions.
+ public static final int DB_JOINENV; // Initialize all subsystems present.
+ public static final int DB_RECOVER; // Run normal recovery.
+ public static final int DB_RECOVER_FATAL; // Run catastrophic recovery.
+ public static final int DB_SYSTEM_MEM; // Use system-backed memory.
+ public static final int DB_TXN_NOSYNC; // Do not sync log on commit.
+ public static final int DB_USE_ENVIRON; // Use the environment.
+ public static final int DB_USE_ENVIRON_ROOT; // Use the environment if root.
+
+ //
+ // Operations values to the tx_recover() function.
+ //
+ public static final int DB_TXN_BACKWARD_ROLL = 1;
+ public static final int DB_TXN_FORWARD_ROLL = 2;
+ public static final int DB_TXN_OPENFILES = 3;
+ public static final int DB_TXN_REDO = 4;
+ public static final int DB_TXN_UNDO = 5;
+
+ //
+ // Verbose flags; used for DbEnv.set_verbose
+ //
+ public static final int DB_VERB_CHKPOINT; // List checkpoints.
+ public static final int DB_VERB_DEADLOCK; // Deadlock detection information.
+ public static final int DB_VERB_RECOVERY; // Recovery information.
+ public static final int DB_VERB_WAITSFOR; // Dump waits-for table.
+
+ //
+ // Deadlock detector modes; used in the DBENV structure to configure the
+ // locking subsystem.
+ //
+ public static final int DB_LOCK_NORUN;
+ public static final int DB_LOCK_DEFAULT;
+ public static final int DB_LOCK_OLDEST;
+ public static final int DB_LOCK_RANDOM;
+ public static final int DB_LOCK_YOUNGEST;
+
+ //
+ // Flags understood by only Db.set_flags.
+ //
+ public static final int DB_DUP; // Btree, Hash: duplicate keys.
+ public static final int DB_DUPSORT; // Btree, Hash: duplicate keys.
+ public static final int DB_RECNUM; // Btree: record numbers.
+ public static final int DB_RENUMBER; // Recno: renumber on insert/delete.
+ public static final int DB_REVSPLITOFF;// Btree: turn off reverse splits.
+ public static final int DB_SNAPSHOT; // Recno: snapshot the input.
+
+ //
+ // Flags understood by only Db.join
+ //
+ public static final int DB_JOIN_NOSORT;// Don't try to optimize join.
+
+ //
+ // Flags understood by only Db.verify
+ //
+ public static final int DB_NOORDERCHK; // Skip order check; subdb w/ user func
+ public static final int DB_ORDERCHKONLY;// Only perform an order check on subdb
+ public static final int DB_SALVAGE; // Salvage what looks like data.
+ public static final int DB_AGGRESSIVE; // Salvage anything which might be data.
+
+ // Collectively, these constants are known by the name
+ // "db_lockmode_t" in the documentation.
+ //
+ public static final int DB_LOCK_NG = 0; // Not granted.
+ public static final int DB_LOCK_READ = 1; // Shared/read.
+ public static final int DB_LOCK_WRITE = 2; // Exclusive/write.
+ public static final int DB_LOCK_IWRITE = 3; // Intent exclusive/write.
+ public static final int DB_LOCK_IREAD = 4; // Intent to share/read.
+ public static final int DB_LOCK_IWR = 5; // Intent to read and write.
+
+ // Collectively, these constants are known by the name
+ // "db_lockop_t" in the documentation.
+ //
+ public static final int DB_LOCK_DUMP = 0; // Display held locks.
+ public static final int DB_LOCK_GET = 1; // Get the lock.
+ /* Not visible to API: DB_LOCK_INHERIT = 2 // Pass locks to parent. */
+ public static final int DB_LOCK_PUT = 3; // Release the lock.
+ public static final int DB_LOCK_PUT_ALL = 4;// Release locker's locks.
+ public static final int DB_LOCK_PUT_OBJ = 5;// Release locker's locks on obj.
+
+ // Flag values for DbLock.vec()
+ public static final int DB_LOCK_NOWAIT; // Don't wait on unavailable lock.
+
+ // Flag values for DbLock.detect()
+ public static final int DB_LOCK_CONFLICT; // Run on any conflict.
+
+ //
+ // Flag values for DbLog.archive()
+ //
+ public static final int DB_ARCH_ABS; // Absolute pathnames.
+ public static final int DB_ARCH_DATA; // Data files.
+ public static final int DB_ARCH_LOG; // Log files.
+
+ //
+ // DB access method and cursor operation values.
+ // Each value is an operation code to which
+ // additional bit flags are added.
+ //
+ public static final int DB_AFTER; // Dbc.put()
+ public static final int DB_APPEND; // Db.put()
+ public static final int DB_BEFORE; // Dbc.put()
+ public static final int DB_CACHED_COUNTS; // Db.stat()
+ public static final int DB_CHECKPOINT; // DbLog.put(), DbLog.get()
+ public static final int DB_CONSUME; // Db.get()
+ public static final int DB_CONSUME_WAIT; // Db.get()
+ public static final int DB_CURLSN; // DbLog.put()
+ public static final int DB_CURRENT; // Dbc.get(), Dbc.put(), DbLog.get()
+ public static final int DB_FIRST; // Dbc.get(), DbLog.get()
+ public static final int DB_FLUSH; // DbLog.put()
+ public static final int DB_GET_BOTH; // Db.get(), Dbc.get()
+ public static final int DB_GET_RECNO; // Dbc.get()
+ public static final int DB_JOIN_ITEM; // Dbc.get()
+ public static final int DB_KEYFIRST; // Dbc.put()
+ public static final int DB_KEYLAST; // Dbc.put()
+ public static final int DB_LAST; // Dbc.get(), DbLog.get()
+ public static final int DB_NEXT; // Dbc.get(), DbLog.get()
+ public static final int DB_NEXT_DUP; // Dbc.get()
+ public static final int DB_NEXT_NODUP; // Dbc.get()
+ public static final int DB_NODUPDATA; // Don't permit duplicated data
+ public static final int DB_NOOVERWRITE;// Db.put()
+ public static final int DB_NOSYNC; // Db.close()
+ public static final int DB_POSITION; // Dbc.dup()
+ public static final int DB_PREV; // Dbc.get(), DbLog.get()
+ public static final int DB_PREV_NODUP; // Dbc.get()
+ public static final int DB_RECORDCOUNT;// Db.stat()
+ public static final int DB_SET; // Dbc.get(), DbLog.get()
+ public static final int DB_SET_RANGE; // Dbc.get()
+ public static final int DB_SET_RECNO; // Dbc.get()
+ public static final int DB_WRITECURSOR;// Db.cursor()
+
+ // Other flags that can be added to an operation codes above.
+ //
+ public static final int DB_RMW; // Acquire write flag immediately.
+
+ // Collectively, these values are used for Dbt flags
+ //
+ // Return in allocated memory.
+ public static final int DB_DBT_MALLOC;
+
+ // Partial put/get.
+ public static final int DB_DBT_PARTIAL;
+
+ // Return in realloc'd memory.
+ public static final int DB_DBT_REALLOC;
+
+ // Return in user's memory.
+ public static final int DB_DBT_USERMEM;
+
+ // Note: the env can be null
+ //
+ public Db(DbEnv env, int flags)
+ throws DbException
+ {
+ constructor_env_ = env;
+ _init(env, flags);
+ if (env == null) {
+ dbenv_ = new DbEnv(this);
+ }
+ else {
+ dbenv_ = env;
+ }
+ dbenv_._add_db(this);
+ }
+
+ //
+ // Our parent DbEnv is notifying us that the environment is closing.
+ //
+ /*package*/ void _notify_dbenv_close()
+ {
+ dbenv_ = null;
+ _notify_internal();
+ }
+
+ private native void _init(DbEnv env, int flags)
+ throws DbException;
+
+ private native void _notify_internal();
+
+ // methods
+ //
+
+ public synchronized int close(int flags)
+ throws DbException
+ {
+ int err;
+
+ dbenv_._remove_db(this);
+ err = _close(flags);
+ if (constructor_env_ == null) {
+ dbenv_._notify_db_close();
+ }
+ return err;
+ }
+
+ public native int _close(int flags)
+ throws DbException;
+
+ public native Dbc cursor(DbTxn txnid, int flags)
+ throws DbException;
+
+ public native int del(DbTxn txnid, Dbt key, int flags)
+ throws DbException;
+
+ public native void err(int errcode, String message);
+
+ public native void errx(String message);
+
+ public native int fd()
+ throws DbException;
+
+ // overrides Object.finalize
+ protected void finalize()
+ throws Throwable
+ {
+ _finalize(dbenv_.errcall_, dbenv_.errpfx_);
+ }
+
+ protected native void _finalize(DbErrcall errcall, String errpfx)
+ throws Throwable;
+
+ // returns: 0, DB_NOTFOUND, or throws error
+ public native int get(DbTxn txnid, Dbt key, Dbt data, int flags)
+ throws DbException;
+
+ public native boolean get_byteswapped();
+
+ public native /*DBTYPE*/ int get_type();
+
+ public native Dbc join(Dbc curslist[], int flags)
+ throws DbException;
+
+ public native void key_range(DbTxn txn, Dbt key,
+ DbKeyRange range, int flags)
+ throws DbException;
+
+ public synchronized void open(String file, String database,
+ /*DBTYPE*/ int type,
+ int flags, int mode)
+ throws DbException, FileNotFoundException
+ {
+ _open(file, database, type, flags, mode);
+ }
+
+ // (Internal)
+ public native void _open(String file, String database,
+ /*DBTYPE*/ int type,
+ int flags, int mode)
+ throws DbException, FileNotFoundException;
+
+
+ // returns: 0, DB_KEYEXIST, or throws error
+ public native int put(DbTxn txnid, Dbt key, Dbt data, int flags)
+ throws DbException;
+
+ public synchronized native void rename(String file, String database,
+ String newname, int flags)
+ throws DbException, FileNotFoundException;
+
+ public synchronized native void remove(String file, String database,
+ int flags)
+ throws DbException, FileNotFoundException;
+
+ // Comparison function.
+ public void set_append_recno(DbAppendRecno append_recno)
+ throws DbException
+ {
+ append_recno_ = append_recno;
+ append_recno_changed(append_recno);
+ }
+
+ // (Internal)
+ private native void append_recno_changed(DbAppendRecno append_recno)
+ throws DbException;
+
+ // Comparison function.
+ public void set_bt_compare(DbBtreeCompare bt_compare)
+ throws DbException
+ {
+ bt_compare_ = bt_compare;
+ bt_compare_changed(bt_compare);
+ }
+
+ // (Internal)
+ private native void bt_compare_changed(DbBtreeCompare bt_compare)
+ throws DbException;
+
+ // Maximum keys per page.
+ public native void set_bt_maxkey(int maxkey)
+ throws DbException;
+
+ // Minimum keys per page.
+ public native void set_bt_minkey(int minkey)
+ throws DbException;
+
+ // Prefix function.
+ public void set_bt_prefix(DbBtreePrefix bt_prefix)
+ throws DbException
+ {
+ bt_prefix_ = bt_prefix;
+ bt_prefix_changed(bt_prefix);
+ }
+
+ // (Internal)
+ private native void bt_prefix_changed(DbBtreePrefix bt_prefix)
+ throws DbException;
+
+ // Set cache size
+ public native void set_cachesize(int gbytes, int bytes, int ncaches)
+ throws DbException;
+
+ // Duplication resolution
+ public void set_dup_compare(DbDupCompare dup_compare)
+ throws DbException
+ {
+ dup_compare_ = dup_compare;
+ dup_compare_changed(dup_compare);
+ }
+
+ // (Internal)
+ private native void dup_compare_changed(DbDupCompare dup_compare)
+ throws DbException;
+
+ // Error message callback.
+ public void set_errcall(DbErrcall errcall)
+ {
+ if (dbenv_ != null)
+ dbenv_.set_errcall(errcall);
+ }
+
+ // Error stream.
+ public void set_error_stream(OutputStream s)
+ {
+ DbOutputStreamErrcall errcall = new DbOutputStreamErrcall(s);
+ set_errcall(errcall);
+ }
+
+ // Error message prefix.
+ public void set_errpfx(String errpfx)
+ {
+ if (dbenv_ != null)
+ dbenv_.set_errpfx(errpfx);
+ }
+
+
+ // Feedback
+ public void set_feedback(DbFeedback feedback)
+ throws DbException
+ {
+ feedback_ = feedback;
+ feedback_changed(feedback);
+ }
+
+ // (Internal)
+ private native void feedback_changed(DbFeedback feedback)
+ throws DbException;
+
+ // Flags.
+ public native void set_flags(/*u_int32_t*/ int flags);
+
+ // Fill factor.
+ public native void set_h_ffactor(/*unsigned*/ int h_ffactor);
+
+ // Hash function.
+ public void set_h_hash(DbHash h_hash)
+ throws DbException
+ {
+ h_hash_ = h_hash;
+ hash_changed(h_hash);
+ }
+
+ // (Internal)
+ private native void hash_changed(DbHash hash)
+ throws DbException;
+
+ // Number of elements.
+ public native void set_h_nelem(/*unsigned*/ int h_nelem);
+
+ // Byte order.
+ public native void set_lorder(int lorder);
+
+ // Underlying page size.
+ public native void set_pagesize(/*size_t*/ long pagesize);
+
+ // Variable-length delimiting byte.
+ public native void set_re_delim(int re_delim);
+
+ // Length for fixed-length records.
+ public native void set_re_len(/*u_int32_t*/ int re_len);
+
+ // Fixed-length padding byte.
+ public native void set_re_pad(int re_pad);
+
+ // Source file name.
+ public native void set_re_source(String re_source);
+
+ // Extent size of Queue
+ public native void set_q_extentsize(/*u_int32_t*/ int extent_size);
+
+ // returns a DbBtreeStat or DbHashStat
+ public native Object stat(int flags)
+ throws DbException;
+
+ public native int sync(int flags)
+ throws DbException;
+
+ public native void upgrade(String name, int flags)
+ throws DbException;
+
+ public native void verify(String name, String subdb,
+ OutputStream outstr, int flags)
+ throws DbException;
+
+ ////////////////////////////////////////////////////////////////
+ //
+ // private data
+ //
+ private long private_dbobj_ = 0;
+ private long private_info_ = 0;
+ private DbEnv dbenv_ = null;
+ private DbEnv constructor_env_ = null;
+ private DbFeedback feedback_ = null;
+ private DbAppendRecno append_recno_ = null;
+ private DbBtreeCompare bt_compare_ = null;
+ private DbBtreePrefix bt_prefix_ = null;
+ private DbDupCompare dup_compare_ = null;
+ private DbHash h_hash_ = null;
+
+ ////////////////////////////////////////////////////////////////
+ //
+ // static methods and data that implement
+ // loading the native library and doing any
+ // extra sanity checks on startup.
+ //
+ private static boolean already_loaded_ = false;
+
+ public static void load_db()
+ {
+ if (already_loaded_)
+ return;
+
+ // An alternate library name can be specified via a property.
+ //
+ String overrideLibname = System.getProperty("sleepycat.db.libname");
+ if (overrideLibname != null) {
+ System.loadLibrary(overrideLibname);
+ }
+ else {
+ String os = System.getProperty("os.name");
+ if (os != null && os.startsWith("Windows")) {
+ // library name is "libdb_java30.dll" (for example) on Win/*
+ System.loadLibrary("libdb_java" +
+ DbConstants.DB_VERSION_MAJOR +
+ DbConstants.DB_VERSION_MINOR);
+ }
+ else {
+ // library name is "libdb_java-3.0.so" (for example) on UNIX
+ // Note: "db_java" isn't good enough;
+ // some Unixes require us to use the explicit SONAME.
+ System.loadLibrary("db_java-" +
+ DbConstants.DB_VERSION_MAJOR + "." +
+ DbConstants.DB_VERSION_MINOR);
+ }
+ }
+
+ already_loaded_ = true;
+ }
+
+ static private native void one_time_init();
+
+ static private void check_constant(int c1, int c2)
+ {
+ if (c1 != c2) {
+ System.err.println("Db: constant mismatch");
+ System.exit(1);
+ }
+ }
+
+ static {
+ Db.load_db();
+
+ // Note: constant values are stored in DbConstants, which
+ // is automatically generated. Initializing constants in
+ // static code insulates users from the possibility of
+ // changing constants.
+ //
+ DB_CXX_NO_EXCEPTIONS = DbConstants.DB_CXX_NO_EXCEPTIONS;
+ DB_CLIENT = DbConstants.DB_CLIENT;
+ DB_XA_CREATE = DbConstants.DB_XA_CREATE;
+
+ DB_CREATE = DbConstants.DB_CREATE;
+ DB_NOMMAP = DbConstants.DB_NOMMAP;
+ DB_THREAD = DbConstants.DB_THREAD;
+
+ DB_LOCKDOWN = DbConstants.DB_LOCKDOWN;
+ DB_PRIVATE = DbConstants.DB_PRIVATE;
+ DB_TXN_NOWAIT = DbConstants.DB_TXN_NOWAIT;
+ DB_TXN_SYNC = DbConstants.DB_TXN_SYNC;
+ DB_CDB_ALLDB = DbConstants.DB_CDB_ALLDB;
+
+ DB_EXCL = DbConstants.DB_EXCL;
+ DB_RDONLY = DbConstants.DB_RDONLY;
+ DB_TRUNCATE = DbConstants.DB_TRUNCATE;
+ DB_UPGRADE = DbConstants.DB_UPGRADE;
+
+ // These constants are not assigned, but rather checked.
+ // Having initialized constants for these values allows
+ // them to be used as case values in switch statements.
+ //
+ check_constant(DB_INCOMPLETE, DbConstants.DB_INCOMPLETE);
+ check_constant(DB_KEYEMPTY, DbConstants.DB_KEYEMPTY);
+ check_constant(DB_KEYEXIST, DbConstants.DB_KEYEXIST);
+ check_constant(DB_LOCK_DEADLOCK, DbConstants.DB_LOCK_DEADLOCK);
+ check_constant(DB_LOCK_NOTGRANTED, DbConstants.DB_LOCK_NOTGRANTED);
+ check_constant(DB_NOSERVER, DbConstants.DB_NOSERVER);
+ check_constant(DB_NOSERVER_HOME, DbConstants.DB_NOSERVER_HOME);
+ check_constant(DB_NOSERVER_ID, DbConstants.DB_NOSERVER_ID);
+ check_constant(DB_NOTFOUND, DbConstants.DB_NOTFOUND);
+ check_constant(DB_OLD_VERSION, DbConstants.DB_OLD_VERSION);
+ check_constant(DB_RUNRECOVERY, DbConstants.DB_RUNRECOVERY);
+ check_constant(DB_VERIFY_BAD, DbConstants.DB_VERIFY_BAD);
+ check_constant(DB_TXN_BACKWARD_ROLL, DbConstants.DB_TXN_BACKWARD_ROLL);
+ check_constant(DB_TXN_FORWARD_ROLL, DbConstants.DB_TXN_FORWARD_ROLL);
+ check_constant(DB_TXN_OPENFILES, DbConstants.DB_TXN_OPENFILES);
+ check_constant(DB_TXN_REDO, DbConstants.DB_TXN_REDO);
+ check_constant(DB_TXN_UNDO, DbConstants.DB_TXN_UNDO);
+
+ DB_FORCE = DbConstants.DB_FORCE;
+ DB_INIT_CDB = DbConstants.DB_INIT_CDB;
+ DB_INIT_LOCK = DbConstants.DB_INIT_LOCK;
+ DB_INIT_LOG = DbConstants.DB_INIT_LOG;
+ DB_INIT_MPOOL = DbConstants.DB_INIT_MPOOL;
+ DB_INIT_TXN = DbConstants.DB_INIT_TXN;
+ DB_JOINENV = DbConstants.DB_JOINENV;
+ DB_RECOVER = DbConstants.DB_RECOVER;
+ DB_RECOVER_FATAL = DbConstants.DB_RECOVER_FATAL;
+ DB_SYSTEM_MEM = DbConstants.DB_SYSTEM_MEM;
+ DB_TXN_NOSYNC = DbConstants.DB_TXN_NOSYNC;
+ DB_USE_ENVIRON = DbConstants.DB_USE_ENVIRON;
+ DB_USE_ENVIRON_ROOT = DbConstants.DB_USE_ENVIRON_ROOT;
+
+ DB_VERB_CHKPOINT = DbConstants.DB_VERB_CHKPOINT;
+ DB_VERB_DEADLOCK = DbConstants.DB_VERB_DEADLOCK;
+ DB_VERB_RECOVERY = DbConstants.DB_VERB_RECOVERY;
+ DB_VERB_WAITSFOR = DbConstants.DB_VERB_WAITSFOR;
+
+ DB_LOCK_NORUN = DbConstants.DB_LOCK_NORUN;
+ DB_LOCK_DEFAULT = DbConstants.DB_LOCK_DEFAULT;
+ DB_LOCK_OLDEST = DbConstants.DB_LOCK_OLDEST;
+ DB_LOCK_RANDOM = DbConstants.DB_LOCK_RANDOM;
+ DB_LOCK_YOUNGEST = DbConstants.DB_LOCK_YOUNGEST;
+
+ DB_DUP = DbConstants.DB_DUP;
+ DB_DUPSORT = DbConstants.DB_DUPSORT;
+ DB_RECNUM = DbConstants.DB_RECNUM;
+ DB_RENUMBER = DbConstants.DB_RENUMBER;
+ DB_REVSPLITOFF = DbConstants.DB_REVSPLITOFF;
+ DB_SNAPSHOT = DbConstants.DB_SNAPSHOT;
+
+ DB_JOIN_NOSORT = DbConstants.DB_JOIN_NOSORT;
+
+ DB_NOORDERCHK = DbConstants.DB_NOORDERCHK;
+ DB_ORDERCHKONLY = DbConstants.DB_ORDERCHKONLY;
+ DB_SALVAGE = DbConstants.DB_SALVAGE;
+ DB_AGGRESSIVE = DbConstants.DB_AGGRESSIVE;
+
+ DB_LOCK_NOWAIT = DbConstants.DB_LOCK_NOWAIT;
+ DB_LOCK_CONFLICT = DbConstants.DB_LOCK_CONFLICT;
+
+ DB_ARCH_ABS = DbConstants.DB_ARCH_ABS;
+ DB_ARCH_DATA = DbConstants.DB_ARCH_DATA;
+ DB_ARCH_LOG = DbConstants.DB_ARCH_LOG;
+
+ DB_AFTER = DbConstants.DB_AFTER;
+ DB_APPEND = DbConstants.DB_APPEND;
+ DB_BEFORE = DbConstants.DB_BEFORE;
+ DB_CACHED_COUNTS = DbConstants.DB_CACHED_COUNTS;
+ DB_CHECKPOINT = DbConstants.DB_CHECKPOINT;
+ DB_CONSUME = DbConstants.DB_CONSUME;
+ DB_CONSUME_WAIT = DbConstants.DB_CONSUME_WAIT;
+ DB_CURLSN = DbConstants.DB_CURLSN;
+ DB_CURRENT = DbConstants.DB_CURRENT;
+ DB_FIRST = DbConstants.DB_FIRST;
+ DB_FLUSH = DbConstants.DB_FLUSH;
+ DB_GET_BOTH = DbConstants.DB_GET_BOTH;
+ DB_GET_RECNO = DbConstants.DB_GET_RECNO;
+ DB_JOIN_ITEM = DbConstants.DB_JOIN_ITEM;
+ DB_KEYFIRST = DbConstants.DB_KEYFIRST;
+ DB_KEYLAST = DbConstants.DB_KEYLAST;
+ DB_LAST = DbConstants.DB_LAST;
+ DB_NEXT = DbConstants.DB_NEXT;
+ DB_NEXT_DUP = DbConstants.DB_NEXT_DUP;
+ DB_NEXT_NODUP = DbConstants.DB_NEXT_NODUP;
+ DB_NODUPDATA = DbConstants.DB_NODUPDATA;
+ DB_NOOVERWRITE = DbConstants.DB_NOOVERWRITE;
+ DB_NOSYNC = DbConstants.DB_NOSYNC;
+ DB_POSITION = DbConstants.DB_POSITION;
+ DB_PREV = DbConstants.DB_PREV;
+ DB_PREV_NODUP = DbConstants.DB_PREV_NODUP;
+ DB_RECORDCOUNT = DbConstants.DB_RECORDCOUNT;
+ DB_RMW = DbConstants.DB_RMW;
+ DB_SET = DbConstants.DB_SET;
+ DB_SET_RANGE = DbConstants.DB_SET_RANGE;
+ DB_SET_RECNO = DbConstants.DB_SET_RECNO;
+ DB_WRITECURSOR = DbConstants.DB_WRITECURSOR;
+
+ DB_DBT_MALLOC = DbConstants.DB_DBT_MALLOC;
+ DB_DBT_PARTIAL = DbConstants.DB_DBT_PARTIAL;
+ DB_DBT_REALLOC = DbConstants.DB_DBT_REALLOC;
+ DB_DBT_USERMEM = DbConstants.DB_DBT_USERMEM;
+
+ one_time_init();
+ }
+}
+
+// end of Db.java
diff --git a/bdb/java/src/com/sleepycat/db/DbAppendRecno.java b/bdb/java/src/com/sleepycat/db/DbAppendRecno.java
new file mode 100644
index 00000000000..ffe40e95f9e
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbAppendRecno.java
@@ -0,0 +1,22 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbAppendRecno.java,v 11.1 2000/07/31 20:28:30 dda Exp $
+ */
+
+package com.sleepycat.db;
+
+/*
+ * This interface is used by Db.set_append_recno()
+ *
+ */
+public interface DbAppendRecno
+{
+ public abstract void db_append_recno(Db db, Dbt data, int recno)
+ throws DbException;
+}
+
+// end of DbAppendRecno.java
diff --git a/bdb/java/src/com/sleepycat/db/DbBtreeCompare.java b/bdb/java/src/com/sleepycat/db/DbBtreeCompare.java
new file mode 100644
index 00000000000..2e5306af232
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbBtreeCompare.java
@@ -0,0 +1,21 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbBtreeCompare.java,v 11.2 2000/07/04 20:53:19 dda Exp $
+ */
+
+package com.sleepycat.db;
+
+/*
+ * This interface is used by DbEnv.set_bt_compare()
+ *
+ */
+public interface DbBtreeCompare
+{
+ public abstract int bt_compare(Db db, Dbt dbt1, Dbt dbt2);
+}
+
+// end of DbBtreeCompare.java
diff --git a/bdb/java/src/com/sleepycat/db/DbBtreePrefix.java b/bdb/java/src/com/sleepycat/db/DbBtreePrefix.java
new file mode 100644
index 00000000000..27e63054339
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbBtreePrefix.java
@@ -0,0 +1,21 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbBtreePrefix.java,v 11.2 2000/07/04 20:53:19 dda Exp $
+ */
+
+package com.sleepycat.db;
+
+/*
+ * This interface is used by DbEnv.set_bt_prefix()
+ *
+ */
+public interface DbBtreePrefix
+{
+ public abstract int bt_prefix(Db db, Dbt dbt1, Dbt dbt2);
+}
+
+// end of DbBtreePrefix.java
diff --git a/bdb/java/src/com/sleepycat/db/DbBtreeStat.java b/bdb/java/src/com/sleepycat/db/DbBtreeStat.java
new file mode 100644
index 00000000000..8dea8da107c
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbBtreeStat.java
@@ -0,0 +1,40 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbBtreeStat.java,v 11.5 2000/05/04 02:54:55 dda Exp $
+ */
+
+package com.sleepycat.db;
+
+/*
+ * This is filled in and returned by the
+ * Db.stat() method.
+ */
+public class DbBtreeStat
+{
+ public int bt_magic; // Magic number.
+ public int bt_version; // Version number.
+ public int bt_metaflags; // Meta-data flags.
+ public int bt_nkeys; // Number of unique keys.
+ public int bt_ndata; // Number of data items.
+ public int bt_pagesize; // Page size.
+ public int bt_maxkey; // Maxkey value.
+ public int bt_minkey; // Minkey value.
+ public int bt_re_len; // Fixed-length record length.
+ public int bt_re_pad; // Fixed-length record pad.
+ public int bt_levels; // Tree levels.
+ public int bt_int_pg; // Internal pages.
+ public int bt_leaf_pg; // Leaf pages.
+ public int bt_dup_pg; // Duplicate pages.
+ public int bt_over_pg; // Overflow pages.
+ public int bt_free; // Pages on the free list.
+ public int bt_int_pgfree; // Bytes free in internal pages.
+ public int bt_leaf_pgfree; // Bytes free in leaf pages.
+ public int bt_dup_pgfree; // Bytes free in duplicate pages.
+ public int bt_over_pgfree; // Bytes free in overflow pages.
+}
+
+// end of DbBtreeStat.java
diff --git a/bdb/java/src/com/sleepycat/db/DbConstants.java b/bdb/java/src/com/sleepycat/db/DbConstants.java
new file mode 100644
index 00000000000..491b1fce2e8
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbConstants.java
@@ -0,0 +1,217 @@
+// DO NOT EDIT: automatically built by dist/s_java.
+
+package com.sleepycat.db;
+
+public class DbConstants
+{
+ public static final int DB_MAX_PAGES = 0xffffffff;
+ public static final int DB_MAX_RECORDS = 0xffffffff;
+ public static final int DB_DBT_ISSET = 0x001;
+ public static final int DB_DBT_MALLOC = 0x002;
+ public static final int DB_DBT_PARTIAL = 0x004;
+ public static final int DB_DBT_REALLOC = 0x008;
+ public static final int DB_DBT_USERMEM = 0x010;
+ public static final int DB_DBT_DUPOK = 0x020;
+ public static final int DB_CREATE = 0x000001;
+ public static final int DB_CXX_NO_EXCEPTIONS = 0x000002;
+ public static final int DB_FORCE = 0x000004;
+ public static final int DB_NOMMAP = 0x000008;
+ public static final int DB_RDONLY = 0x000010;
+ public static final int DB_RECOVER = 0x000020;
+ public static final int DB_THREAD = 0x000040;
+ public static final int DB_TXN_NOSYNC = 0x000080;
+ public static final int DB_USE_ENVIRON = 0x000100;
+ public static final int DB_USE_ENVIRON_ROOT = 0x000200;
+ public static final int DB_CLIENT = 0x000400;
+ public static final int DB_XA_CREATE = 0x000400;
+ public static final int DB_INIT_CDB = 0x000400;
+ public static final int DB_INIT_LOCK = 0x000800;
+ public static final int DB_INIT_LOG = 0x001000;
+ public static final int DB_INIT_MPOOL = 0x002000;
+ public static final int DB_INIT_TXN = 0x004000;
+ public static final int DB_JOINENV = 0x008000;
+ public static final int DB_LOCKDOWN = 0x010000;
+ public static final int DB_PRIVATE = 0x020000;
+ public static final int DB_RECOVER_FATAL = 0x040000;
+ public static final int DB_SYSTEM_MEM = 0x080000;
+ public static final int DB_EXCL = 0x000400;
+ public static final int DB_FCNTL_LOCKING = 0x000800;
+ public static final int DB_ODDFILESIZE = 0x001000;
+ public static final int DB_RDWRMASTER = 0x002000;
+ public static final int DB_TRUNCATE = 0x004000;
+ public static final int DB_EXTENT = 0x008000;
+ public static final int DB_TXN_NOWAIT = 0x000400;
+ public static final int DB_TXN_SYNC = 0x000800;
+ public static final int DB_CDB_ALLDB = 0x000400;
+ public static final int DB_UPGRADE = 0x000400;
+ public static final int DB_VERIFY = 0x000800;
+ public static final int DB_DUP = 0x0001;
+ public static final int DB_DUPSORT = 0x0002;
+ public static final int DB_RECNUM = 0x0004;
+ public static final int DB_RENUMBER = 0x0008;
+ public static final int DB_REVSPLITOFF = 0x0010;
+ public static final int DB_SNAPSHOT = 0x0020;
+ public static final int DB_JOIN_NOSORT = 0x0001;
+ public static final int DB_AGGRESSIVE = 0x0001;
+ public static final int DB_NOORDERCHK = 0x0002;
+ public static final int DB_ORDERCHKONLY = 0x0004;
+ public static final int DB_PR_PAGE = 0x0008;
+ public static final int DB_PR_HEADERS = 0x0010;
+ public static final int DB_PR_RECOVERYTEST = 0x0020;
+ public static final int DB_SALVAGE = 0x0040;
+ public static final int DB_VRFY_FLAGMASK = 0xffff;
+ public static final int DB_LOCK_NORUN = 0;
+ public static final int DB_LOCK_DEFAULT = 1;
+ public static final int DB_LOCK_OLDEST = 2;
+ public static final int DB_LOCK_RANDOM = 3;
+ public static final int DB_LOCK_YOUNGEST = 4;
+ public static final int DB_REGION_MAGIC = 0x120897;
+ public static final int DB_VERB_CHKPOINT = 0x0001;
+ public static final int DB_VERB_DEADLOCK = 0x0002;
+ public static final int DB_VERB_RECOVERY = 0x0004;
+ public static final int DB_VERB_WAITSFOR = 0x0008;
+ public static final int DB_TEST_PREOPEN = 1;
+ public static final int DB_TEST_POSTOPEN = 2;
+ public static final int DB_TEST_POSTLOGMETA = 3;
+ public static final int DB_TEST_POSTLOG = 4;
+ public static final int DB_TEST_POSTSYNC = 5;
+ public static final int DB_TEST_PRERENAME = 6;
+ public static final int DB_TEST_POSTRENAME = 7;
+ public static final int DB_ENV_CDB = 0x00001;
+ public static final int DB_ENV_CDB_ALLDB = 0x00002;
+ public static final int DB_ENV_CREATE = 0x00004;
+ public static final int DB_ENV_DBLOCAL = 0x00008;
+ public static final int DB_ENV_LOCKDOWN = 0x00010;
+ public static final int DB_ENV_NOMMAP = 0x00020;
+ public static final int DB_ENV_OPEN_CALLED = 0x00040;
+ public static final int DB_ENV_PRIVATE = 0x00080;
+ public static final int DB_ENV_RPCCLIENT = 0x00100;
+ public static final int DB_ENV_STANDALONE = 0x00200;
+ public static final int DB_ENV_SYSTEM_MEM = 0x00400;
+ public static final int DB_ENV_THREAD = 0x00800;
+ public static final int DB_ENV_TXN_NOSYNC = 0x01000;
+ public static final int DB_ENV_USER_ALLOC = 0x02000;
+ public static final int DB_BTREEVERSION = 8;
+ public static final int DB_BTREEOLDVER = 6;
+ public static final int DB_BTREEMAGIC = 0x053162;
+ public static final int DB_HASHVERSION = 7;
+ public static final int DB_HASHOLDVER = 4;
+ public static final int DB_HASHMAGIC = 0x061561;
+ public static final int DB_QAMVERSION = 3;
+ public static final int DB_QAMOLDVER = 1;
+ public static final int DB_QAMMAGIC = 0x042253;
+ public static final int DB_LOGVERSION = 3;
+ public static final int DB_LOGOLDVER = 3;
+ public static final int DB_LOGMAGIC = 0x040988;
+ public static final int DB_AFTER = 1;
+ public static final int DB_APPEND = 2;
+ public static final int DB_BEFORE = 3;
+ public static final int DB_CACHED_COUNTS = 4;
+ public static final int DB_CHECKPOINT = 5;
+ public static final int DB_CONSUME = 6;
+ public static final int DB_CONSUME_WAIT = 7;
+ public static final int DB_CURLSN = 8;
+ public static final int DB_CURRENT = 9;
+ public static final int DB_FIRST = 10;
+ public static final int DB_FLUSH = 11;
+ public static final int DB_GET_BOTH = 12;
+ public static final int DB_GET_BOTHC = 13;
+ public static final int DB_GET_RECNO = 14;
+ public static final int DB_JOIN_ITEM = 15;
+ public static final int DB_KEYFIRST = 16;
+ public static final int DB_KEYLAST = 17;
+ public static final int DB_LAST = 18;
+ public static final int DB_NEXT = 19;
+ public static final int DB_NEXT_DUP = 20;
+ public static final int DB_NEXT_NODUP = 21;
+ public static final int DB_NODUPDATA = 22;
+ public static final int DB_NOOVERWRITE = 23;
+ public static final int DB_NOSYNC = 24;
+ public static final int DB_POSITION = 25;
+ public static final int DB_POSITIONI = 26;
+ public static final int DB_PREV = 27;
+ public static final int DB_PREV_NODUP = 28;
+ public static final int DB_RECORDCOUNT = 29;
+ public static final int DB_SET = 30;
+ public static final int DB_SET_RANGE = 31;
+ public static final int DB_SET_RECNO = 32;
+ public static final int DB_WRITECURSOR = 33;
+ public static final int DB_WRITELOCK = 34;
+ public static final int DB_OPFLAGS_MASK = 0x000000ff;
+ public static final int DB_RMW = 0x80000000;
+ public static final int DB_INCOMPLETE = -30999;
+ public static final int DB_KEYEMPTY = -30998;
+ public static final int DB_KEYEXIST = -30997;
+ public static final int DB_LOCK_DEADLOCK = -30996;
+ public static final int DB_LOCK_NOTGRANTED = -30995;
+ public static final int DB_NOSERVER = -30994;
+ public static final int DB_NOSERVER_HOME = -30993;
+ public static final int DB_NOSERVER_ID = -30992;
+ public static final int DB_NOTFOUND = -30991;
+ public static final int DB_OLD_VERSION = -30990;
+ public static final int DB_RUNRECOVERY = -30989;
+ public static final int DB_VERIFY_BAD = -30988;
+ public static final int DB_ALREADY_ABORTED = -30899;
+ public static final int DB_DELETED = -30898;
+ public static final int DB_JAVA_CALLBACK = -30897;
+ public static final int DB_NEEDSPLIT = -30896;
+ public static final int DB_SWAPBYTES = -30895;
+ public static final int DB_TXN_CKP = -30894;
+ public static final int DB_VERIFY_FATAL = -30893;
+ public static final int DB_FILE_ID_LEN = 20;
+ public static final int DB_LOGFILEID_INVALID = -1;
+ public static final int DB_OK_BTREE = 0x01;
+ public static final int DB_OK_HASH = 0x02;
+ public static final int DB_OK_QUEUE = 0x04;
+ public static final int DB_OK_RECNO = 0x08;
+ public static final int DB_AM_DISCARD = 0x00001;
+ public static final int DB_AM_DUP = 0x00002;
+ public static final int DB_AM_DUPSORT = 0x00004;
+ public static final int DB_AM_INMEM = 0x00008;
+ public static final int DB_AM_PGDEF = 0x00010;
+ public static final int DB_AM_RDONLY = 0x00020;
+ public static final int DB_AM_RECOVER = 0x00040;
+ public static final int DB_AM_SUBDB = 0x00080;
+ public static final int DB_AM_SWAP = 0x00100;
+ public static final int DB_AM_TXN = 0x00200;
+ public static final int DB_AM_VERIFYING = 0x00400;
+ public static final int DB_BT_RECNUM = 0x00800;
+ public static final int DB_BT_REVSPLIT = 0x01000;
+ public static final int DB_DBM_ERROR = 0x02000;
+ public static final int DB_OPEN_CALLED = 0x04000;
+ public static final int DB_RE_DELIMITER = 0x08000;
+ public static final int DB_RE_FIXEDLEN = 0x10000;
+ public static final int DB_RE_PAD = 0x20000;
+ public static final int DB_RE_RENUMBER = 0x40000;
+ public static final int DB_RE_SNAPSHOT = 0x80000;
+ public static final int DB_RECORD_LOCK = 1;
+ public static final int DB_PAGE_LOCK = 2;
+ public static final int DB_LOCKVERSION = 1;
+ public static final int DB_LOCK_NOWAIT = 0x01;
+ public static final int DB_LOCK_RECORD = 0x02;
+ public static final int DB_LOCK_UPGRADE = 0x04;
+ public static final int DB_LOCK_SWITCH = 0x08;
+ public static final int DB_LOCK_CONFLICT = 0x01;
+ public static final int DB_LOCK_RIW_N = 7;
+ public static final int DB_ARCH_ABS = 0x001;
+ public static final int DB_ARCH_DATA = 0x002;
+ public static final int DB_ARCH_LOG = 0x004;
+ public static final int DB_MPOOL_CREATE = 0x001;
+ public static final int DB_MPOOL_LAST = 0x002;
+ public static final int DB_MPOOL_NEW = 0x004;
+ public static final int DB_MPOOL_NEW_GROUP = 0x008;
+ public static final int DB_MPOOL_EXTENT = 0x010;
+ public static final int DB_MPOOL_CLEAN = 0x001;
+ public static final int DB_MPOOL_DIRTY = 0x002;
+ public static final int DB_MPOOL_DISCARD = 0x004;
+ public static final int DB_TXNVERSION = 1;
+ public static final int DB_TXN_BACKWARD_ROLL = 1;
+ public static final int DB_TXN_FORWARD_ROLL = 2;
+ public static final int DB_TXN_OPENFILES = 3;
+ public static final int DB_TXN_REDO = 4;
+ public static final int DB_TXN_UNDO = 5;
+ public static final int DB_DBM_HSEARCH = 0;
+ public static final int DB_VERSION_MAJOR = 3;
+ public static final int DB_VERSION_MINOR = 2;
+ public static final int DB_VERSION_PATCH = 9;
+}
diff --git a/bdb/java/src/com/sleepycat/db/DbDeadlockException.java b/bdb/java/src/com/sleepycat/db/DbDeadlockException.java
new file mode 100644
index 00000000000..beab2ad62fa
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbDeadlockException.java
@@ -0,0 +1,28 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbDeadlockException.java,v 11.3 2000/02/14 02:59:56 bostic Exp $
+ */
+
+package com.sleepycat.db;
+
+public class DbDeadlockException extends DbException
+{
+ // methods
+ //
+
+ public DbDeadlockException(String s)
+ {
+ super(s);
+ }
+
+ public DbDeadlockException(String s, int errno)
+ {
+ super(s, errno);
+ }
+}
+
+// end of DbDeadlockException.java
diff --git a/bdb/java/src/com/sleepycat/db/DbDupCompare.java b/bdb/java/src/com/sleepycat/db/DbDupCompare.java
new file mode 100644
index 00000000000..3d4b5a736f8
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbDupCompare.java
@@ -0,0 +1,21 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbDupCompare.java,v 11.2 2000/07/04 20:53:19 dda Exp $
+ */
+
+package com.sleepycat.db;
+
+/*
+ * This interface is used by DbEnv.set_dup_compare()
+ *
+ */
+public interface DbDupCompare
+{
+ public abstract int dup_compare(Db db, Dbt dbt1, Dbt dbt2);
+}
+
+// end of DbDupCompare.java
diff --git a/bdb/java/src/com/sleepycat/db/DbEnv.java b/bdb/java/src/com/sleepycat/db/DbEnv.java
new file mode 100644
index 00000000000..6e9ce7ae337
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbEnv.java
@@ -0,0 +1,392 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbEnv.java,v 11.25 2001/01/04 14:23:30 dda Exp $
+ */
+
+package com.sleepycat.db;
+
+import java.io.OutputStream;
+import java.io.FileNotFoundException;
+import java.util.Date;
+import java.util.Enumeration;
+import java.util.Vector;
+
+/**
+ *
+ * @author Donald D. Anderson
+ */
+public class DbEnv
+{
+ // methods
+ //
+
+ //
+ // After using this constructor, set any parameters via
+ // the set_* access methods below, and finally open
+ // the environment by calling open().
+ //
+ public DbEnv(int flags)
+ {
+ constructor_flags_ = flags;
+ _init(errstream_, constructor_flags_);
+ }
+
+ //
+ // This constructor is purposely not public.
+ // It is used internally to create a DbEnv wrapper
+ // when an underlying environment already exists.
+ //
+ /*package*/ DbEnv(Db db)
+ {
+ _init_using_db(errstream_, db);
+ }
+
+ //
+ // When a Db is created, it is kept in a private list,
+ // so that Db's can be notified when the environment
+ // is closed. This allows us to detect and guard
+ // against the following situation:
+ // DbEnv env = new DbEnv(0);
+ // Db db = new Db(0);
+ // env.close();
+ // db.close();
+ //
+ // This *is* a programming error, but not protecting
+ // against it will crash the VM.
+ //
+ /*package*/ void _add_db(Db db)
+ {
+ dblist_.addElement(db);
+ }
+
+ //
+ // Remove from the private list of Db's.
+ //
+ /*package*/ void _remove_db(Db db)
+ {
+ dblist_.removeElement(db);
+ }
+
+ //
+ // Iterate all the Db's in the list, and
+ // notify them that the environment is closing,
+ // so they can clean up.
+ //
+ /*package*/ void _notify_dbs()
+ {
+ Enumeration enum = dblist_.elements();
+ while (enum.hasMoreElements()) {
+ Db db = (Db)enum.nextElement();
+ db._notify_dbenv_close();
+ }
+ dblist_.removeAllElements();
+ }
+
+ // close discards any internal memory.
+ // After using close, the DbEnv can be reopened.
+ //
+ public synchronized void close(int flags)
+ throws DbException
+ {
+ _notify_dbs();
+ _close(flags);
+ }
+
+ // (Internal)
+ private native void _close(int flags)
+ throws DbException;
+
+ public native void err(int errcode, String message);
+
+ public native void errx(String message);
+
+ // overrides Object.finalize
+ protected void finalize()
+ throws Throwable
+ {
+ _notify_dbs();
+ _finalize(errcall_, errpfx_);
+ }
+
+ // (Internal)
+ protected native void _finalize(DbErrcall errcall, String errpfx)
+ throws Throwable;
+
+ // (Internal)
+ private native void _init(DbErrcall errcall, int flags);
+
+ // (Internal)
+ private native void _init_using_db(DbErrcall errcall, Db db);
+
+ /*package*/ native void _notify_db_close();
+
+ public native void open(String db_home, int flags, int mode)
+ throws DbException, FileNotFoundException;
+
+ // remove removes any files and discards any internal memory.
+ // (i.e. implicitly it does a close, if the environment is open).
+ // After using close, the DbEnv can no longer be used;
+ // create another one if needed.
+ //
+ public native synchronized void remove(String db_home, int flags)
+ throws DbException, FileNotFoundException;
+
+ ////////////////////////////////////////////////////////////////
+ // simple get/set access methods
+ //
+ // If you are calling set_ methods, you need to
+ // use the constructor with one argument along with open().
+
+ public native void set_cachesize(int gbytes, int bytes, int ncaches)
+ throws DbException;
+
+ // Error message callback.
+ public void set_errcall(DbErrcall errcall)
+ {
+ errcall_ = errcall;
+ _set_errcall(errcall);
+ }
+
+ public native void _set_errcall(DbErrcall errcall);
+
+ // Error stream.
+ public void set_error_stream(OutputStream s)
+ {
+ DbOutputStreamErrcall errcall = new DbOutputStreamErrcall(s);
+ set_errcall(errcall);
+ }
+
+ // Error message prefix.
+ public void set_errpfx(String errpfx)
+ {
+ errpfx_ = errpfx;
+ _set_errpfx(errpfx);
+ }
+
+ private native void _set_errpfx(String errpfx);
+
+ // Feedback
+ public void set_feedback(DbFeedback feedback)
+ throws DbException
+ {
+ feedback_ = feedback;
+ feedback_changed(feedback);
+ }
+
+ // (Internal)
+ private native void feedback_changed(DbFeedback feedback)
+ throws DbException;
+
+ // Generate debugging messages.
+ public native void set_verbose(int which, int onoff)
+ throws DbException;
+
+ public native void set_data_dir(String data_dir)
+ throws DbException;
+
+ // Log buffer size.
+ public native void set_lg_bsize(/*u_int32_t*/ int lg_max)
+ throws DbException;
+
+ // Log directory.
+ public native void set_lg_dir(String lg_dir)
+ throws DbException;
+
+ // Maximum log file size.
+ public native void set_lg_max(/*u_int32_t*/ int lg_max)
+ throws DbException;
+
+ // Two dimensional conflict matrix.
+ public native void set_lk_conflicts(byte[][] lk_conflicts)
+ throws DbException;
+
+ // Deadlock detect on every conflict.
+ public native void set_lk_detect(/*u_int32_t*/ int lk_detect)
+ throws DbException;
+
+ /**
+ * @deprecated DB 3.2.6, see the online documentation.
+ */
+ // Maximum number of locks.
+ public native void set_lk_max(/*unsigned*/ int lk_max)
+ throws DbException;
+
+ // Maximum number of lockers.
+ public native void set_lk_max_lockers(/*unsigned*/ int lk_max_lockers)
+ throws DbException;
+
+ // Maximum number of locks.
+ public native void set_lk_max_locks(/*unsigned*/ int lk_max_locks)
+ throws DbException;
+
+ // Maximum number of locked objects.
+ public native void set_lk_max_objects(/*unsigned*/ int lk_max_objects)
+ throws DbException;
+
+ // Maximum file size for mmap.
+ public native void set_mp_mmapsize(/*size_t*/ long mmapsize)
+ throws DbException;
+
+ public native void set_mutexlocks(int mutexlocks)
+ throws DbException;
+
+ public native static void set_pageyield(int pageyield)
+ throws DbException;
+
+ public native static void set_panicstate(int panicstate)
+ throws DbException;
+
+ public void set_recovery_init(DbRecoveryInit recovery_init)
+ throws DbException
+ {
+ recovery_init_ = recovery_init;
+ recovery_init_changed(recovery_init);
+ }
+
+ // (Internal)
+ private native void recovery_init_changed(DbRecoveryInit recovery_init)
+ throws DbException;
+
+ public native static void set_region_init(int region_init)
+ throws DbException;
+
+ public native void set_flags(int flags, int onoff)
+ throws DbException;
+
+ public native void set_server(String host, long cl_timeout,
+ long sv_timeout, int flags)
+ throws DbException;
+
+ public native void set_shm_key(long shm_key)
+ throws DbException;
+
+ public native static void set_tas_spins(int tas_spins)
+ throws DbException;
+
+ public native void set_tmp_dir(String tmp_dir)
+ throws DbException;
+
+ // Feedback
+ public void set_tx_recover(DbTxnRecover tx_recover)
+ throws DbException
+ {
+ tx_recover_ = tx_recover;
+ tx_recover_changed(tx_recover);
+ }
+
+ // (Internal)
+ private native void tx_recover_changed(DbTxnRecover tx_recover)
+ throws DbException;
+
+ // Maximum number of transactions.
+ public native void set_tx_max(/*unsigned*/ int tx_max)
+ throws DbException;
+
+ // Note: only the seconds (not milliseconds) of the timestamp
+ // are used in this API.
+ public void set_tx_timestamp(Date timestamp)
+ throws DbException
+ {
+ _set_tx_timestamp(timestamp.getTime()/1000);
+ }
+
+ // (Internal)
+ private native void _set_tx_timestamp(long seconds)
+ throws DbException;
+
+ // Versioning information
+ public native static int get_version_major();
+ public native static int get_version_minor();
+ public native static int get_version_patch();
+ public native static String get_version_string();
+
+ // Convert DB error codes to strings
+ public native static String strerror(int errcode);
+
+ public native int lock_detect(int flags, int atype)
+ throws DbException;
+
+ public native DbLock lock_get(/*u_int32_t*/ int locker,
+ int flags,
+ Dbt obj,
+ /*db_lockmode_t*/ int lock_mode)
+ throws DbException;
+
+ public native /*u_int32_t*/ int lock_id()
+ throws DbException;
+
+ public native DbLockStat lock_stat()
+ throws DbException;
+
+ public native String[] log_archive(int flags)
+ throws DbException;
+
+ public native static int log_compare(DbLsn lsn0, DbLsn lsn1);
+
+ public native String log_file(DbLsn lsn)
+ throws DbException;
+
+ public native void log_flush(DbLsn lsn)
+ throws DbException;
+
+ public native void log_get(DbLsn lsn, Dbt data, int flags)
+ throws DbException;
+
+ public native void log_put(DbLsn lsn, Dbt data, int flags)
+ throws DbException;
+
+ public native DbLogStat log_stat()
+ throws DbException;
+
+ public native void log_register(Db dbp, String name)
+ throws DbException;
+
+ public native void log_unregister(Db dbp)
+ throws DbException;
+
+ public native DbMpoolStat memp_stat()
+ throws DbException;
+
+ public native DbMpoolFStat[] memp_fstat()
+ throws DbException;
+
+ public native int memp_trickle(int pct)
+ throws DbException;
+
+ public native DbTxn txn_begin(DbTxn pid, int flags)
+ throws DbException;
+
+ public native int txn_checkpoint(int kbyte, int min, int flags)
+ throws DbException;
+
+
+ public native DbTxnStat txn_stat()
+ throws DbException;
+
+ ////////////////////////////////////////////////////////////////
+ //
+ // private data
+ //
+ private long private_dbobj_ = 0;
+ private long private_info_ = 0;
+ private int constructor_flags_ = 0;
+ private Vector dblist_ = new Vector(); // Db's that are open
+ private DbFeedback feedback_ = null;
+ private DbRecoveryInit recovery_init_ = null;
+ private DbTxnRecover tx_recover_ = null;
+ private DbOutputStreamErrcall errstream_ =
+ new DbOutputStreamErrcall(System.err);
+ /*package*/ DbErrcall errcall_ = errstream_;
+ /*package*/ String errpfx_;
+
+ static {
+ Db.load_db();
+ }
+
+}
+
+// end of DbEnv.java
diff --git a/bdb/java/src/com/sleepycat/db/DbEnvFeedback.java b/bdb/java/src/com/sleepycat/db/DbEnvFeedback.java
new file mode 100644
index 00000000000..9eec2b819f6
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbEnvFeedback.java
@@ -0,0 +1,19 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbEnvFeedback.java,v 11.3 2000/02/14 02:59:56 bostic Exp $
+ */
+
+package com.sleepycat.db;
+
+public interface DbEnvFeedback
+{
+ // methods
+ //
+ public abstract void feedback(DbEnv env, int opcode, int pct);
+}
+
+// end of DbFeedback.java
diff --git a/bdb/java/src/com/sleepycat/db/DbErrcall.java b/bdb/java/src/com/sleepycat/db/DbErrcall.java
new file mode 100644
index 00000000000..62d3a3e08b3
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbErrcall.java
@@ -0,0 +1,23 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbErrcall.java,v 11.3 2000/02/14 02:59:56 bostic Exp $
+ */
+
+package com.sleepycat.db;
+
+/**
+ *
+ * @author Donald D. Anderson
+ */
+public interface DbErrcall
+{
+ // methods
+ //
+ public abstract void errcall(String prefix, String buffer);
+}
+
+// end of DbErrcall.java
diff --git a/bdb/java/src/com/sleepycat/db/DbException.java b/bdb/java/src/com/sleepycat/db/DbException.java
new file mode 100644
index 00000000000..ed4d020b677
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbException.java
@@ -0,0 +1,56 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbException.java,v 11.4 2000/02/14 02:59:56 bostic Exp $
+ */
+
+package com.sleepycat.db;
+
+/**
+ *
+ * @author Donald D. Anderson
+ */
+public class DbException extends Exception
+{
+ // methods
+ //
+
+ public DbException(String s)
+ {
+ super(s);
+ }
+
+ public DbException(String s, int errno)
+ {
+ super(s);
+ this.errno_ = errno;
+ }
+
+ public String toString()
+ {
+ String s = super.toString();
+ if (errno_ == 0)
+ return s;
+ else
+ return s + ": " + DbEnv.strerror(errno_);
+
+ }
+
+ // get/set methods
+ //
+
+ public int get_errno()
+ {
+ return errno_;
+ }
+
+ // private data
+ //
+
+ private int errno_ = 0;
+}
+
+// end of DbException.java
diff --git a/bdb/java/src/com/sleepycat/db/DbFeedback.java b/bdb/java/src/com/sleepycat/db/DbFeedback.java
new file mode 100644
index 00000000000..d932d951a6f
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbFeedback.java
@@ -0,0 +1,23 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbFeedback.java,v 11.4 2000/02/14 02:59:56 bostic Exp $
+ */
+
+package com.sleepycat.db;
+
+/**
+ *
+ * @author Donald D. Anderson
+ */
+public interface DbFeedback
+{
+ // methods
+ //
+ public abstract void feedback(Db db, int opcode, int pct);
+}
+
+// end of DbFeedback.java
diff --git a/bdb/java/src/com/sleepycat/db/DbHash.java b/bdb/java/src/com/sleepycat/db/DbHash.java
new file mode 100644
index 00000000000..a72c2070b59
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbHash.java
@@ -0,0 +1,21 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbHash.java,v 11.1 2000/07/04 20:53:19 dda Exp $
+ */
+
+package com.sleepycat.db;
+
+/*
+ * This interface is used by DbEnv.set_bt_compare()
+ *
+ */
+public interface DbHash
+{
+ public abstract int hash(Db db, byte[] data, int len);
+}
+
+// end of DbHash.java
diff --git a/bdb/java/src/com/sleepycat/db/DbHashStat.java b/bdb/java/src/com/sleepycat/db/DbHashStat.java
new file mode 100644
index 00000000000..62154344732
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbHashStat.java
@@ -0,0 +1,37 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbHashStat.java,v 11.6 2000/05/04 02:54:55 dda Exp $
+ */
+
+package com.sleepycat.db;
+
+/*
+ * This is filled in and returned by the
+ * Db.stat() method.
+ */
+public class DbHashStat
+{
+ public int hash_magic; // Magic number.
+ public int hash_version; // Version number.
+ public int hash_metaflags; // Metadata flags.
+ public int hash_nkeys; // Number of unique keys.
+ public int hash_ndata; // Number of data items.
+ public int hash_pagesize; // Page size.
+ public int hash_nelem; // Original nelem specified.
+ public int hash_ffactor; // Fill factor specified at create.
+ public int hash_buckets; // Number of hash buckets.
+ public int hash_free; // Pages on the free list.
+ public int hash_bfree; // Bytes free on bucket pages.
+ public int hash_bigpages; // Number of big key/data pages.
+ public int hash_big_bfree; // Bytes free on big item pages.
+ public int hash_overflows; // Number of overflow pages.
+ public int hash_ovfl_free; // Bytes free on ovfl pages.
+ public int hash_dup; // Number of dup pages.
+ public int hash_dup_free; // Bytes free on duplicate pages.
+}
+
+// end of DbHashStat.java
diff --git a/bdb/java/src/com/sleepycat/db/DbKeyRange.java b/bdb/java/src/com/sleepycat/db/DbKeyRange.java
new file mode 100644
index 00000000000..6a86afd9109
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbKeyRange.java
@@ -0,0 +1,23 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbKeyRange.java,v 1.1 2000/04/12 15:07:02 dda Exp $
+ */
+
+package com.sleepycat.db;
+
+/**
+ *
+ * @author Donald D. Anderson
+ */
+public class DbKeyRange
+{
+ public double less;
+ public double equal;
+ public double greater;
+}
+
+// end of DbKeyRange.java
diff --git a/bdb/java/src/com/sleepycat/db/DbLock.java b/bdb/java/src/com/sleepycat/db/DbLock.java
new file mode 100644
index 00000000000..bc467913e92
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbLock.java
@@ -0,0 +1,38 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbLock.java,v 11.4 2000/05/25 04:18:13 dda Exp $
+ */
+
+package com.sleepycat.db;
+
+/**
+ *
+ * @author Donald D. Anderson
+ */
+public class DbLock
+{
+ protected native void finalize()
+ throws Throwable;
+
+ // methods
+ //
+ public native void put(DbEnv env)
+ throws DbException;
+
+ // get/set methods
+ //
+
+ // private data
+ //
+ private long private_dbobj_ = 0;
+
+ static {
+ Db.load_db();
+ }
+}
+
+// end of DbLock.java
diff --git a/bdb/java/src/com/sleepycat/db/DbLockStat.java b/bdb/java/src/com/sleepycat/db/DbLockStat.java
new file mode 100644
index 00000000000..f23f2ad5d49
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbLockStat.java
@@ -0,0 +1,30 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbLockStat.java,v 11.3 2000/02/14 02:59:56 bostic Exp $
+ */
+
+package com.sleepycat.db;
+
+/*
+ * This is filled in and returned by the
+ * DbLockTab.stat() method.
+ */
+public class DbLockStat
+{
+ public int st_maxlocks; // Maximum number of locks in table.
+ public int st_nmodes; // Number of lock modes.
+ public int st_nlockers; // Number of lockers.
+ public int st_nconflicts; // Number of lock conflicts.
+ public int st_nrequests; // Number of lock gets.
+ public int st_nreleases; // Number of lock puts.
+ public int st_ndeadlocks; // Number of lock deadlocks.
+ public int st_region_wait; // Region lock granted after wait.
+ public int st_region_nowait; // Region lock granted without wait.
+ public int st_regsize; // Region size.
+}
+
+// end of DbLockStat.java
diff --git a/bdb/java/src/com/sleepycat/db/DbLogStat.java b/bdb/java/src/com/sleepycat/db/DbLogStat.java
new file mode 100644
index 00000000000..d708f1c4148
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbLogStat.java
@@ -0,0 +1,35 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbLogStat.java,v 11.3 2000/02/14 02:59:56 bostic Exp $
+ */
+
+package com.sleepycat.db;
+
+/*
+ * This is filled in and returned by the
+ * DbLog.stat() method.
+ */
+public class DbLogStat
+{
+ public int st_magic; // Log file magic number.
+ public int st_version; // Log file version number.
+ public int st_mode; // Log file mode.
+ public int st_lg_max; // Maximum log file size.
+ public int st_w_bytes; // Bytes to log.
+ public int st_w_mbytes; // Megabytes to log.
+ public int st_wc_bytes; // Bytes to log since checkpoint.
+ public int st_wc_mbytes; // Megabytes to log since checkpoint.
+ public int st_wcount; // Total syncs to the log.
+ public int st_scount; // Total writes to the log.
+ public int st_region_wait; // Region lock granted after wait.
+ public int st_region_nowait; // Region lock granted without wait.
+ public int st_cur_file; // Current log file number.
+ public int st_cur_offset; // Current log file offset.
+ public int st_regsize; // Region size.
+}
+
+// end of DbLogStat.java
diff --git a/bdb/java/src/com/sleepycat/db/DbLsn.java b/bdb/java/src/com/sleepycat/db/DbLsn.java
new file mode 100644
index 00000000000..ff36ac61c99
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbLsn.java
@@ -0,0 +1,42 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbLsn.java,v 11.5 2000/09/11 16:21:37 dda Exp $
+ */
+
+package com.sleepycat.db;
+
+/**
+ *
+ * @author Donald D. Anderson
+ */
+public class DbLsn
+{
+ // methods
+ //
+ public DbLsn()
+ {
+ init_lsn();
+ }
+
+ protected native void finalize()
+ throws Throwable;
+
+ private native void init_lsn();
+
+ // get/set methods
+ //
+
+ // private data
+ //
+ private long private_dbobj_ = 0;
+
+ static {
+ Db.load_db();
+ }
+}
+
+// end of DbLsn.java
diff --git a/bdb/java/src/com/sleepycat/db/DbMemoryException.java b/bdb/java/src/com/sleepycat/db/DbMemoryException.java
new file mode 100644
index 00000000000..67a29a1f16f
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbMemoryException.java
@@ -0,0 +1,28 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbMemoryException.java,v 11.3 2000/02/14 02:59:56 bostic Exp $
+ */
+
+package com.sleepycat.db;
+
+public class DbMemoryException extends DbException
+{
+ // methods
+ //
+
+ public DbMemoryException(String s)
+ {
+ super(s);
+ }
+
+ public DbMemoryException(String s, int errno)
+ {
+ super(s, errno);
+ }
+}
+
+// end of DbMemoryException.java
diff --git a/bdb/java/src/com/sleepycat/db/DbMpoolFStat.java b/bdb/java/src/com/sleepycat/db/DbMpoolFStat.java
new file mode 100644
index 00000000000..44497b3bf74
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbMpoolFStat.java
@@ -0,0 +1,28 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbMpoolFStat.java,v 11.3 2000/02/14 02:59:56 bostic Exp $
+ */
+
+package com.sleepycat.db;
+
+/*
+ * This is filled in and returned by the
+ * DbMpool.fstat() method.
+ */
+public class DbMpoolFStat
+{
+ public String file_name; // File name.
+ public int st_pagesize; // Page size.
+ public int st_cache_hit; // Pages found in the cache.
+ public int st_cache_miss; // Pages not found in the cache.
+ public int st_map; // Pages from mapped files.
+ public int st_page_create; // Pages created in the cache.
+ public int st_page_in; // Pages read in.
+ public int st_page_out; // Pages written out.
+}
+
+// end of DbMpoolFStat.java
diff --git a/bdb/java/src/com/sleepycat/db/DbMpoolStat.java b/bdb/java/src/com/sleepycat/db/DbMpoolStat.java
new file mode 100644
index 00000000000..8a6d75e367b
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbMpoolStat.java
@@ -0,0 +1,42 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbMpoolStat.java,v 11.3 2000/02/14 02:59:56 bostic Exp $
+ */
+
+package com.sleepycat.db;
+
+/*
+ * This is filled in and returned by the
+ * DbMpool.stat() method.
+ */
+public class DbMpoolStat
+{
+ /**
+ * @deprecated As of Berkeley DB 2.8.2, cachesize for mpool unavailable.
+ */
+ public int st_cachesize; // Cache size.
+ public int st_cache_hit; // Pages found in the cache.
+ public int st_cache_miss; // Pages not found in the cache.
+ public int st_map; // Pages from mapped files.
+ public int st_page_create; // Pages created in the cache.
+ public int st_page_in; // Pages read in.
+ public int st_page_out; // Pages written out.
+ public int st_ro_evict; // Clean pages forced from the cache.
+ public int st_rw_evict; // Dirty pages forced from the cache.
+ public int st_hash_buckets; // Number of hash buckets.
+ public int st_hash_searches; // Total hash chain searches.
+ public int st_hash_longest; // Longest hash chain searched.
+ public int st_hash_examined; // Total hash entries searched.
+ public int st_page_clean; // Clean pages.
+ public int st_page_dirty; // Dirty pages.
+ public int st_page_trickle; // Pages written by memp_trickle.
+ public int st_region_wait; // Region lock granted after wait.
+ public int st_region_nowait; // Region lock granted without wait.
+ public int st_regsize; // Region size.
+}
+
+// end of DbMpoolStat.java
diff --git a/bdb/java/src/com/sleepycat/db/DbOutputStreamErrcall.java b/bdb/java/src/com/sleepycat/db/DbOutputStreamErrcall.java
new file mode 100644
index 00000000000..4f962d9a334
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbOutputStreamErrcall.java
@@ -0,0 +1,58 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbOutputStreamErrcall.java,v 11.3 2000/02/14 02:59:56 bostic Exp $
+ */
+
+package com.sleepycat.db;
+import java.io.OutputStream;
+import java.io.IOException;
+
+/**
+ *
+ * @author Donald D. Anderson
+ *
+ * This class is not public, as it is only used internally
+ * by Db to implement a default error handler.
+ */
+
+/*package*/ class DbOutputStreamErrcall implements DbErrcall
+{
+ DbOutputStreamErrcall(OutputStream stream)
+ {
+ this.stream_ = stream;
+ }
+
+ // errcall implements DbErrcall
+ //
+ public void errcall(String prefix, String buffer)
+ {
+ try {
+ if (prefix != null) {
+ stream_.write(prefix.getBytes());
+ stream_.write((new String(": ")).getBytes());
+ }
+ stream_.write(buffer.getBytes());
+ stream_.write((new String("\n")).getBytes());
+ }
+ catch (IOException e) {
+
+ // well, we tried.
+ // Do our best to report the problem by other means.
+ //
+ System.err.println("DbOutputStreamErrcall Exception: " + e);
+ if (prefix != null)
+ System.err.print(prefix + ": ");
+ System.err.println(buffer + "\n");
+ }
+ }
+
+ // private data
+ //
+ private OutputStream stream_;
+}
+
+// end of DbOutputStreamErrcall.java
diff --git a/bdb/java/src/com/sleepycat/db/DbQueueStat.java b/bdb/java/src/com/sleepycat/db/DbQueueStat.java
new file mode 100644
index 00000000000..652878b1adb
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbQueueStat.java
@@ -0,0 +1,32 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbQueueStat.java,v 11.5 2000/11/07 18:45:27 dda Exp $
+ */
+
+package com.sleepycat.db;
+
+/*
+ * This is filled in and returned by the
+ * Db.stat() method.
+ */
+public class DbQueueStat
+{
+ public int qs_magic; // Magic number.
+ public int qs_version; // Version number.
+ public int qs_metaflags; // Metadata flags.
+ public int qs_nkeys; // Number of unique keys.
+ public int qs_ndata; // Number of data items.
+ public int qs_pagesize; // Page size.
+ public int qs_pages; // Data pages.
+ public int qs_re_len; // Fixed-length record length.
+ public int qs_re_pad; // Fixed-length record pad.
+ public int qs_pgfree; // Bytes free in data pages.
+ public int qs_first_recno; // First not deleted record.
+ public int qs_cur_recno; // Last allocated record number.
+}
+
+// end of DbQueueStat.java
diff --git a/bdb/java/src/com/sleepycat/db/DbRecoveryInit.java b/bdb/java/src/com/sleepycat/db/DbRecoveryInit.java
new file mode 100644
index 00000000000..b32eebcaa6c
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbRecoveryInit.java
@@ -0,0 +1,23 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbRecoveryInit.java,v 11.3 2000/02/14 02:59:56 bostic Exp $
+ */
+
+package com.sleepycat.db;
+
+/**
+ *
+ * @author Donald D. Anderson
+ */
+public interface DbRecoveryInit
+{
+ // methods
+ //
+ public abstract void recovery_init(DbEnv dbenv);
+}
+
+// end of DbRecoveryInit.java
diff --git a/bdb/java/src/com/sleepycat/db/DbRunRecoveryException.java b/bdb/java/src/com/sleepycat/db/DbRunRecoveryException.java
new file mode 100644
index 00000000000..78736b6ed1e
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbRunRecoveryException.java
@@ -0,0 +1,32 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbRunRecoveryException.java,v 11.3 2000/02/14 02:59:56 bostic Exp $
+ */
+
+package com.sleepycat.db;
+
+/**
+ *
+ * @author Donald D. Anderson
+ */
+public class DbRunRecoveryException extends DbException
+{
+ // methods
+ //
+
+ public DbRunRecoveryException(String s)
+ {
+ super(s);
+ }
+
+ public DbRunRecoveryException(String s, int errno)
+ {
+ super(s, errno);
+ }
+}
+
+// end of DbRunRecoveryException.java
diff --git a/bdb/java/src/com/sleepycat/db/DbTxn.java b/bdb/java/src/com/sleepycat/db/DbTxn.java
new file mode 100644
index 00000000000..201ff94c8f3
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbTxn.java
@@ -0,0 +1,47 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbTxn.java,v 11.5 2000/05/25 04:18:13 dda Exp $
+ */
+
+package com.sleepycat.db;
+
+/**
+ *
+ * @author Donald D. Anderson
+ */
+public class DbTxn
+{
+ // methods
+ //
+ public native void abort()
+ throws DbException;
+
+ public native void commit(int flags)
+ throws DbException;
+
+ public native /*u_int32_t*/ int id()
+ throws DbException;
+
+ public native void prepare()
+ throws DbException;
+
+ protected native void finalize()
+ throws Throwable;
+
+ // get/set methods
+ //
+
+ // private data
+ //
+ private long private_dbobj_ = 0;
+
+ static {
+ Db.load_db();
+ }
+}
+
+// end of DbTxn.java
diff --git a/bdb/java/src/com/sleepycat/db/DbTxnRecover.java b/bdb/java/src/com/sleepycat/db/DbTxnRecover.java
new file mode 100644
index 00000000000..ee47935941d
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbTxnRecover.java
@@ -0,0 +1,22 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbTxnRecover.java,v 11.1 2000/06/29 18:08:17 dda Exp $
+ */
+
+package com.sleepycat.db;
+
+/*
+ * This interface is used by DbEnv.set_tx_recover()
+ *
+ */
+public interface DbTxnRecover
+{
+ // The value of recops is one of the Db.DB_TXN_* constants
+ public abstract int tx_recover(DbEnv env, Dbt dbt, DbLsn lsn, int recops);
+}
+
+// end of DbBtreeCompare.java
diff --git a/bdb/java/src/com/sleepycat/db/DbTxnStat.java b/bdb/java/src/com/sleepycat/db/DbTxnStat.java
new file mode 100644
index 00000000000..e72addb00b1
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbTxnStat.java
@@ -0,0 +1,40 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbTxnStat.java,v 11.3 2000/02/14 02:59:56 bostic Exp $
+ */
+
+package com.sleepycat.db;
+
+/*
+ * This is filled in and returned by the
+ * DbTxnMgr.fstat() method.
+ */
+public class DbTxnStat
+{
+ public static class Active {
+ public int txnid; // Transaction ID
+ public int parentid; // Transaction ID of parent
+ public DbLsn lsn; // Lsn of the begin record
+ };
+
+ public DbLsn st_last_ckp; // lsn of the last checkpoint
+ public DbLsn st_pending_ckp; // last checkpoint did not finish
+ public long st_time_ckp; // time of last checkpoint (UNIX secs)
+ public int st_last_txnid; // last transaction id given out
+ public int st_maxtxns; // maximum number of active txns
+ public int st_naborts; // number of aborted transactions
+ public int st_nbegins; // number of begun transactions
+ public int st_ncommits; // number of committed transactions
+ public int st_nactive; // number of active transactions
+ public int st_maxnactive; // maximum active transactions
+ public Active st_txnarray[]; // array of active transactions
+ public int st_region_wait; // Region lock granted after wait.
+ public int st_region_nowait; // Region lock granted without wait.
+ public int st_regsize; // Region size.
+}
+
+// end of DbTxnStat.java
diff --git a/bdb/java/src/com/sleepycat/db/Dbc.java b/bdb/java/src/com/sleepycat/db/Dbc.java
new file mode 100644
index 00000000000..b097cbad802
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/Dbc.java
@@ -0,0 +1,56 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: Dbc.java,v 11.5 2000/05/25 04:18:13 dda Exp $
+ */
+
+package com.sleepycat.db;
+
+/**
+ *
+ * @author Donald D. Anderson
+ */
+public class Dbc
+{
+ // methods
+ //
+ public native void close()
+ throws DbException;
+
+ public native int count(int flags)
+ throws DbException;
+
+ // returns: 0, DB_KEYEMPTY, or throws error
+ public native int del(int flags)
+ throws DbException;
+
+ public native Dbc dup(int flags)
+ throws DbException;
+
+ // returns: 0, DB_NOTFOUND, or throws error
+ public native int get(Dbt key, Dbt data, int flags)
+ throws DbException;
+
+ // returns: 0, DB_KEYEXIST, or throws error
+ public native int put(Dbt key, Dbt data, int flags)
+ throws DbException;
+
+ protected native void finalize()
+ throws Throwable;
+
+ // get/set methods
+ //
+
+ // private data
+ //
+ private long private_dbobj_ = 0;
+
+ static {
+ Db.load_db();
+ }
+}
+
+// end of Dbc.java
diff --git a/bdb/java/src/com/sleepycat/db/Dbt.java b/bdb/java/src/com/sleepycat/db/Dbt.java
new file mode 100644
index 00000000000..bbb478cd542
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/Dbt.java
@@ -0,0 +1,110 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: Dbt.java,v 11.6 2000/06/16 03:34:01 dda Exp $
+ */
+
+package com.sleepycat.db;
+
+/**
+ *
+ * @author Donald D. Anderson
+ */
+public class Dbt
+{
+ // methods
+ //
+
+ protected native void finalize()
+ throws Throwable;
+
+ // get/set methods
+ //
+
+ // key/data
+
+ public void set_data(byte[] data)
+ {
+ // internal_set_data is separated from set_data in case
+ // we want to have set_data automatically set some other
+ // fields (size, etc.) someday.
+ //
+ internal_set_data(data);
+ }
+
+ public native byte[] get_data();
+ private native void internal_set_data(byte[] data);
+
+ // These are not in the original DB interface,
+ // but they can be used to get/set the offset
+ // into the attached byte array.
+ //
+ public native void set_offset(int off);
+ public native int get_offset();
+
+ // key/data length
+ public native /*u_int32_t*/ int get_size();
+ public native void set_size(/*u_int32_t*/ int size);
+
+ // RO: length of user buffer.
+ public native /*u_int32_t*/ int get_ulen();
+ public native void set_ulen(/*u_int32_t*/ int ulen);
+
+ // RO: get/put record length.
+ public native /*u_int32_t*/ int get_dlen();
+ public native void set_dlen(/*u_int32_t*/ int dlen);
+
+ // RO: get/put record offset.
+ public native /*u_int32_t*/ int get_doff();
+ public native void set_doff(/*u_int32_t*/ int doff);
+
+ // flags
+ public native /*u_int32_t*/ int get_flags();
+ public native void set_flags(/*u_int32_t*/ int flags);
+
+ // These are not in the original DB interface.
+ // They can be used to set the recno key for a Dbt.
+ // Note: you must set the data field to an array of
+ // at least four bytes before calling either of these.
+ //
+ public native void set_recno_key_data(int recno);
+ public native int get_recno_key_data();
+
+ public Dbt(byte[] data)
+ {
+ init();
+ internal_set_data(data);
+ if (data != null)
+ set_size(data.length);
+ }
+
+ public Dbt(byte[] data, int off, int len)
+ {
+ this(data);
+ set_ulen(len);
+ set_offset(off);
+ }
+
+ public Dbt()
+ {
+ init();
+ }
+
+ // private methods
+ //
+ private native void init();
+
+ // private data
+ //
+ private long private_dbobj_ = 0;
+
+ static {
+ Db.load_db();
+ }
+}
+
+
+// end of Dbt.java
diff --git a/bdb/java/src/com/sleepycat/examples/AccessExample.java b/bdb/java/src/com/sleepycat/examples/AccessExample.java
new file mode 100644
index 00000000000..f3a98c2c7d5
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/examples/AccessExample.java
@@ -0,0 +1,186 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: AccessExample.java,v 11.5 2000/12/13 07:09:42 krinsky Exp $
+ */
+
+package com.sleepycat.examples;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.InputStreamReader;
+import java.io.IOException;
+import java.io.PrintStream;
+
+class AccessExample
+{
+ private static final String FileName = "access.db";
+
+ public AccessExample()
+ {
+ }
+
+ private static void usage()
+ {
+ System.err.println("usage: AccessExample\n");
+ System.exit(1);
+ }
+
+ public static void main(String argv[])
+ {
+ try
+ {
+ AccessExample app = new AccessExample();
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("AccessExample: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("AccessExample: " + fnfe.toString());
+ System.exit(1);
+ }
+ System.exit(0);
+ }
+
+ // Prompts for a line, and keeps prompting until a non blank
+ // line is returned. Returns null on error.
+ //
+ static public String askForLine(InputStreamReader reader,
+ PrintStream out, String prompt)
+ {
+ String result = "";
+ while (result != null && result.length() == 0) {
+ out.print(prompt);
+ out.flush();
+ result = getLine(reader);
+ }
+ return result;
+ }
+
+ // Not terribly efficient, but does the job.
+ // Works for reading a line from stdin or a file.
+ // Returns null on EOF. If EOF appears in the middle
+ // of a line, returns that line, then null on next call.
+ //
+ static public String getLine(InputStreamReader reader)
+ {
+ StringBuffer b = new StringBuffer();
+ int c;
+ try {
+ while ((c = reader.read()) != -1 && c != '\n') {
+ if (c != '\r')
+ b.append((char)c);
+ }
+ }
+ catch (IOException ioe) {
+ c = -1;
+ }
+
+ if (c == -1 && b.length() == 0)
+ return null;
+ else
+ return b.toString();
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ Db table = new Db(null, 0);
+ table.set_error_stream(System.err);
+ table.set_errpfx("AccessExample");
+ table.open(FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ InputStreamReader reader = new InputStreamReader(System.in);
+
+ for (;;) {
+ String line = askForLine(reader, System.out, "input> ");
+ if (line == null)
+ break;
+
+ String reversed = (new StringBuffer(line)).reverse().toString();
+
+ // See definition of StringDbt below
+ //
+ StringDbt key = new StringDbt(line);
+ StringDbt data = new StringDbt(reversed);
+
+ try
+ {
+ int err;
+ if ((err = table.put(null,
+ key, data, Db.DB_NOOVERWRITE)) == Db.DB_KEYEXIST) {
+ System.out.println("Key " + line + " already exists.");
+ }
+ }
+ catch (DbException dbe)
+ {
+ System.out.println(dbe.toString());
+ }
+ System.out.println("");
+ }
+
+ // Acquire an iterator for the table.
+ Dbc iterator;
+ iterator = table.cursor(null, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ // See class StringDbt defined below.
+ //
+ StringDbt key = new StringDbt();
+ StringDbt data = new StringDbt();
+ while (iterator.get(key, data, Db.DB_NEXT) == 0)
+ {
+ System.out.println(key.getString() + " : " + data.getString());
+ }
+ iterator.close();
+ table.close(0);
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+ }
+}
diff --git a/bdb/java/src/com/sleepycat/examples/BtRecExample.java b/bdb/java/src/com/sleepycat/examples/BtRecExample.java
new file mode 100644
index 00000000000..5101f676a0b
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/examples/BtRecExample.java
@@ -0,0 +1,348 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: BtRecExample.java,v 11.6 2000/02/19 20:58:02 bostic Exp $
+ */
+
+package com.sleepycat.examples;
+
+import com.sleepycat.db.*;
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileReader;
+import java.io.FileWriter;
+import java.io.InputStreamReader;
+import java.io.IOException;
+import java.io.PrintStream;
+
+public class BtRecExample
+{
+ static final String progname = "BtRecExample"; // Program name.
+ static final String database = "access.db";
+ static final String wordlist = "../test/wordlist";
+
+ BtRecExample(BufferedReader reader)
+ throws DbException, IOException, FileNotFoundException
+ {
+ int ret;
+
+ // Remove the previous database.
+ File f = new File(database);
+ f.delete();
+
+ dbp = new Db(null, 0);
+
+ dbp.set_error_stream(System.err);
+ dbp.set_errpfx(progname);
+ dbp.set_pagesize(1024); // 1K page sizes.
+
+ dbp.set_flags(Db.DB_RECNUM); // Record numbers.
+ dbp.open(database, null, Db.DB_BTREE, Db.DB_CREATE, 0664);
+
+ //
+ // Insert records into the database, where the key is the word
+ // preceded by its record number, and the data is the same, but
+ // in reverse order.
+ //
+
+ for (int cnt = 1; cnt <= 1000; ++cnt) {
+ String numstr = String.valueOf(cnt);
+ while (numstr.length() < 4)
+ numstr = "0" + numstr;
+ String buf = numstr + '_' + reader.readLine();
+ StringBuffer rbuf = new StringBuffer(buf).reverse();
+
+ StringDbt key = new StringDbt(buf);
+ StringDbt data = new StringDbt(rbuf.toString());
+
+ if ((ret = dbp.put(null, key, data, Db.DB_NOOVERWRITE)) != 0) {
+ if (ret != Db.DB_KEYEXIST)
+ throw new DbException("Db.put failed" + ret);
+ }
+ }
+ }
+
+ void run()
+ throws DbException
+ {
+ int recno;
+ int ret;
+
+ // Acquire a cursor for the database.
+ dbcp = dbp.cursor(null, 0);
+
+ //
+ // Prompt the user for a record number, then retrieve and display
+ // that record.
+ //
+ InputStreamReader reader = new InputStreamReader(System.in);
+
+ for (;;) {
+ // Get a record number.
+ String line = askForLine(reader, System.out, "recno #> ");
+ if (line == null)
+ break;
+
+ try {
+ recno = Integer.parseInt(line);
+ }
+ catch (NumberFormatException nfe) {
+ System.err.println("Bad record number: " + nfe);
+ continue;
+ }
+
+ //
+ // Start with a fresh key each time, the dbp.get() routine returns
+ // the key and data pair, not just the key!
+ //
+ RecnoStringDbt key = new RecnoStringDbt(recno, 100);
+ RecnoStringDbt data = new RecnoStringDbt(100);
+
+ if ((ret = dbcp.get(key, data, Db.DB_SET_RECNO)) != 0) {
+ throw new DbException("Dbc.get failed", ret);
+ }
+
+ // Display the key and data.
+ show("k/d\t", key, data);
+
+ // Move the cursor a record forward.
+ if ((ret = dbcp.get(key, data, Db.DB_NEXT)) != 0) {
+ throw new DbException("Dbc.get failed", ret);
+ }
+
+ // Display the key and data.
+ show("next\t", key, data);
+
+ RecnoStringDbt datano = new RecnoStringDbt(100);
+
+ //
+ // Retrieve the record number for the following record into
+ // local memory.
+ //
+ if ((ret = dbcp.get(key, datano, Db.DB_GET_RECNO)) != 0) {
+ if (ret != Db.DB_NOTFOUND && ret != Db.DB_KEYEMPTY) {
+ throw new DbException("Dbc.get failed", ret);
+ }
+ }
+ else {
+ recno = datano.getRecno();
+ System.out.println("retrieved recno: " + recno);
+ }
+ }
+
+ dbcp.close();
+ dbcp = null;
+ }
+
+ //
+ // Print out the number of records in the database.
+ //
+ void stats()
+ throws DbException
+ {
+ DbBtreeStat statp;
+
+ statp = (DbBtreeStat)dbp.stat(0);
+ System.out.println(progname + ": database contains " +
+ statp.bt_ndata + " records");
+ }
+
+ void show(String msg, RecnoStringDbt key, RecnoStringDbt data)
+ throws DbException
+ {
+ System.out.println(msg + key.getString() + ": " + data.getString());
+ }
+
+ public void shutdown()
+ throws DbException
+ {
+ if (dbcp != null) {
+ dbcp.close();
+ dbcp = null;
+ }
+ if (dbp != null) {
+ dbp.close(0);
+ dbp = null;
+ }
+ }
+
+ public static void main(String argv[])
+ {
+
+ try {
+ // Open the word database.
+ FileReader freader = new FileReader(wordlist);
+
+ BtRecExample app = new BtRecExample(new BufferedReader(freader));
+
+ // Close the word database.
+ freader.close();
+ freader = null;
+
+ app.stats();
+ app.run();
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println(progname + ": unexpected open error " + fnfe);
+ System.exit (1);
+ }
+ catch (IOException ioe) {
+ System.err.println(progname + ": open " + wordlist + ": " + ioe);
+ System.exit (1);
+ }
+ catch (DbException dbe) {
+ System.err.println("Exception: " + dbe);
+ System.exit(dbe.get_errno());
+ }
+
+ System.exit(0);
+ }
+
+ void
+ usage()
+ {
+ System.err.println("usage: " + progname);
+ System.exit(1);
+ }
+
+ // Prompts for a line, and keeps prompting until a non blank
+ // line is returned. Returns null on error.
+ //
+ static public String askForLine(InputStreamReader reader,
+ PrintStream out, String prompt)
+ {
+ String result = "";
+ while (result != null && result.length() == 0) {
+ out.print(prompt);
+ out.flush();
+ result = getLine(reader);
+ }
+ return result;
+ }
+
+ // Not terribly efficient, but does the job.
+ // Works for reading a line from stdin or a file.
+ // Returns null on EOF. If EOF appears in the middle
+ // of a line, returns that line, then null on next call.
+ //
+ static public String getLine(InputStreamReader reader)
+ {
+ StringBuffer b = new StringBuffer();
+ int c;
+ try {
+ while ((c = reader.read()) != -1 && c != '\n') {
+ if (c != '\r')
+ b.append((char)c);
+ }
+ }
+ catch (IOException ioe) {
+ c = -1;
+ }
+
+ if (c == -1 && b.length() == 0)
+ return null;
+ else
+ return b.toString();
+ }
+
+ private Dbc dbcp;
+ private Db dbp;
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings.
+ // We've declared it as a static inner class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ StringDbt(byte[] arr)
+ {
+ set_flags(Db.DB_DBT_USERMEM);
+ set_data(arr);
+ set_size(arr.length);
+ }
+
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ // must set ulen because sometimes a string is returned
+ set_ulen(value.length());
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+ }
+
+ // Here's an example of how you can extend a Dbt to store
+ // (potentially) both recno's and strings in the same
+ // structure.
+ //
+ static /*inner*/
+ class RecnoStringDbt extends Dbt
+ {
+ RecnoStringDbt(int maxsize)
+ {
+ this(0, maxsize); // let other constructor do most of the work
+ }
+
+ RecnoStringDbt(int value, int maxsize)
+ {
+ set_flags(Db.DB_DBT_USERMEM); // do not allocate on retrieval
+ arr = new byte[maxsize];
+ set_data(arr); // use our local array for data
+ set_ulen(maxsize); // size of return storage
+ setRecno(value);
+ }
+
+ RecnoStringDbt(String value, int maxsize)
+ {
+ set_flags(Db.DB_DBT_USERMEM); // do not allocate on retrieval
+ arr = new byte[maxsize];
+ set_data(arr); // use our local array for data
+ set_ulen(maxsize); // size of return storage
+ setString(value);
+ }
+
+ void setRecno(int value)
+ {
+ set_recno_key_data(value);
+ set_size(arr.length);
+ }
+
+ void setString(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ }
+
+ int getRecno()
+ {
+ return get_recno_key_data();
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+
+ byte arr[];
+ }
+}
diff --git a/bdb/java/src/com/sleepycat/examples/EnvExample.java b/bdb/java/src/com/sleepycat/examples/EnvExample.java
new file mode 100644
index 00000000000..f1b855836c5
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/examples/EnvExample.java
@@ -0,0 +1,128 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: EnvExample.java,v 11.7 2000/09/25 13:16:51 dda Exp $
+ */
+
+package com.sleepycat.examples;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+import java.io.OutputStream;
+
+/*
+ * An example of a program using DbEnv to configure its DB
+ * environment.
+ *
+ * For comparison purposes, this example uses a similar structure
+ * as examples/ex_env.c and examples_cxx/EnvExample.cpp.
+ */
+public class EnvExample
+{
+ private static final String progname = "EnvExample";
+ private static final String DATABASE_HOME = "/tmp/database";
+
+ private static void db_application()
+ throws DbException
+ {
+ // Do something interesting...
+ // Your application goes here.
+ }
+
+ private static void db_setup(String home, String data_dir,
+ OutputStream errs)
+ throws DbException, FileNotFoundException
+ {
+ //
+ // Create an environment object and initialize it for error
+ // reporting.
+ //
+ DbEnv dbenv = new DbEnv(0);
+ dbenv.set_error_stream(errs);
+ dbenv.set_errpfx(progname);
+
+ //
+ // We want to specify the shared memory buffer pool cachesize,
+ // but everything else is the default.
+ //
+ dbenv.set_cachesize(0, 64 * 1024, 0);
+
+ // Databases are in a subdirectory.
+ dbenv.set_data_dir(data_dir);
+
+ // Open the environment with full transactional support.
+ //
+ // open() will throw a DbException if there is an error.
+ //
+ // open is declared to throw a FileNotFoundException, which normally
+ // shouldn't occur with the DB_CREATE option.
+ //
+ dbenv.open(DATABASE_HOME,
+ Db.DB_CREATE | Db.DB_INIT_LOCK | Db.DB_INIT_LOG |
+ Db.DB_INIT_MPOOL | Db.DB_INIT_TXN, 0);
+
+ try {
+
+ // Start your application.
+ db_application();
+
+ }
+ finally {
+
+ // Close the environment. Doing this in the
+ // finally block ensures it is done, even if
+ // an error is thrown.
+ //
+ dbenv.close(0);
+ }
+ }
+
+ private static void db_teardown(String home, String data_dir,
+ OutputStream errs)
+ throws DbException, FileNotFoundException
+ {
+ // Remove the shared database regions.
+
+ DbEnv dbenv = new DbEnv(0);
+
+ dbenv.set_error_stream(errs);
+ dbenv.set_errpfx(progname);
+ dbenv.set_data_dir(data_dir);
+ dbenv.remove(home, 0);
+ }
+
+ public static void main(String[] args)
+ {
+ //
+ // All of the shared database files live in /tmp/database,
+ // but data files live in /database.
+ //
+ // Using Berkeley DB in C/C++, we need to allocate two elements
+ // in the array and set config[1] to NULL. This is not
+ // necessary in Java.
+ //
+ String home = DATABASE_HOME;
+ String config = "/database/files";
+
+ try {
+ System.out.println("Setup env");
+ db_setup(home, config, System.err);
+
+ System.out.println("Teardown env");
+ db_teardown(home, config, System.err);
+ }
+ catch (DbException dbe) {
+ System.err.println(progname + ": environment open: " + dbe.toString());
+ System.exit (1);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println(progname +
+ ": unexpected open environment error " + fnfe);
+ System.exit (1);
+ }
+ }
+
+}
diff --git a/bdb/java/src/com/sleepycat/examples/LockExample.java b/bdb/java/src/com/sleepycat/examples/LockExample.java
new file mode 100644
index 00000000000..33b7d0538ce
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/examples/LockExample.java
@@ -0,0 +1,235 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: LockExample.java,v 11.5 2001/01/04 14:23:30 dda Exp $
+ */
+
+package com.sleepycat.examples;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.Vector;
+
+//
+// An example of a program using DbLock and related classes.
+//
+class LockExample extends DbEnv
+{
+ private static final String progname = "LockExample";
+ private static final String LOCK_HOME = "TESTDIR";
+
+ public LockExample(String home, int maxlocks, boolean do_unlink)
+ throws DbException, FileNotFoundException
+ {
+ super(0);
+ if (do_unlink) {
+ remove(home, Db.DB_FORCE);
+ }
+ else {
+ set_error_stream(System.err);
+ set_errpfx("LockExample");
+ if (maxlocks != 0)
+ set_lk_max_locks(maxlocks);
+ open(home, Db.DB_CREATE|Db.DB_INIT_LOCK, 0);
+ }
+ }
+
+ // Prompts for a line, and keeps prompting until a non blank
+ // line is returned. Returns null on error.
+ //
+ static public String askForLine(InputStreamReader reader,
+ PrintStream out, String prompt)
+ {
+ String result = "";
+ while (result != null && result.length() == 0) {
+ out.print(prompt);
+ out.flush();
+ result = getLine(reader);
+ }
+ return result;
+ }
+
+ // Not terribly efficient, but does the job.
+ // Works for reading a line from stdin or a file.
+ // Returns null on EOF. If EOF appears in the middle
+ // of a line, returns that line, then null on next call.
+ //
+ static public String getLine(InputStreamReader reader)
+ {
+ StringBuffer b = new StringBuffer();
+ int c;
+ try {
+ while ((c = reader.read()) != -1 && c != '\n') {
+ if (c != '\r')
+ b.append((char)c);
+ }
+ }
+ catch (IOException ioe) {
+ c = -1;
+ }
+
+ if (c == -1 && b.length() == 0)
+ return null;
+ else
+ return b.toString();
+ }
+
+ public void run()
+ throws DbException
+ {
+ long held;
+ int len = 0, locker;
+ int ret;
+ boolean did_get = false;
+ int lockid = 0;
+ InputStreamReader in = new InputStreamReader(System.in);
+ Vector locks = new Vector();
+
+ //
+ // Accept lock requests.
+ //
+ locker = lock_id();
+ for (held = 0;;) {
+ String opbuf = askForLine(in, System.out,
+ "Operation get/release [get]> ");
+ if (opbuf == null)
+ break;
+
+ try {
+ if (opbuf.equals("get")) {
+ // Acquire a lock.
+ String objbuf = askForLine(in, System.out,
+ "input object (text string) to lock> ");
+ if (objbuf == null)
+ break;
+
+ String lockbuf;
+ do {
+ lockbuf = askForLine(in, System.out,
+ "lock type read/write [read]> ");
+ if (lockbuf == null)
+ break;
+ len = lockbuf.length();
+ } while (len >= 1 &&
+ !lockbuf.equals("read") &&
+ !lockbuf.equals("write"));
+
+ int lock_type;
+ if (len <= 1 || lockbuf.equals("read"))
+ lock_type = Db.DB_LOCK_READ;
+ else
+ lock_type = Db.DB_LOCK_WRITE;
+
+ Dbt dbt = new Dbt(objbuf.getBytes());
+
+ DbLock lock;
+ did_get = true;
+ lock = lock_get(locker, Db.DB_LOCK_NOWAIT,
+ dbt, lock_type);
+ lockid = locks.size();
+ locks.addElement(lock);
+ } else {
+ // Release a lock.
+ String objbuf;
+ objbuf = askForLine(in, System.out,
+ "input lock to release> ");
+ if (objbuf == null)
+ break;
+
+ lockid = Integer.parseInt(objbuf, 16);
+ if (lockid < 0 || lockid >= locks.size()) {
+ System.out.println("Lock #" + lockid + " out of range");
+ continue;
+ }
+ did_get = false;
+ DbLock lock = (DbLock)locks.elementAt(lockid);
+ lock.put(this);
+ }
+ System.out.println("Lock #" + lockid + " " +
+ (did_get ? "granted" : "released"));
+ held += did_get ? 1 : -1;
+ }
+ catch (DbException dbe) {
+ switch (dbe.get_errno()) {
+ case Db.DB_LOCK_NOTGRANTED:
+ System.out.println("Lock not granted");
+ break;
+ case Db.DB_LOCK_DEADLOCK:
+ System.err.println("LockExample: lock_" +
+ (did_get ? "get" : "put") +
+ ": returned DEADLOCK");
+ break;
+ default:
+ System.err.println("LockExample: lock_get: " + dbe.toString());
+ }
+ }
+ }
+ System.out.println();
+ System.out.println("Closing lock region " + String.valueOf(held) +
+ " locks held");
+ }
+
+ private static void usage()
+ {
+ System.err.println("usage: LockExample [-u] [-h home] [-m maxlocks]");
+ System.exit(1);
+ }
+
+ public static void main(String argv[])
+ {
+ String home = LOCK_HOME;
+ boolean do_unlink = false;
+ int maxlocks = 0;
+
+ for (int i = 0; i < argv.length; ++i) {
+ if (argv[i].equals("-h")) {
+ if (++i >= argv.length)
+ usage();
+ home = argv[i];
+ }
+ else if (argv[i].equals("-m")) {
+ if (++i >= argv.length)
+ usage();
+
+ try {
+ maxlocks = Integer.parseInt(argv[i]);
+ }
+ catch (NumberFormatException nfe) {
+ usage();
+ }
+ }
+ else if (argv[i].equals("-u")) {
+ do_unlink = true;
+ }
+ else {
+ usage();
+ }
+ }
+
+ try {
+ if (do_unlink) {
+ // Create an environment that immediately
+ // removes all files.
+ LockExample tmp = new LockExample(home, maxlocks, do_unlink);
+ }
+
+ LockExample app = new LockExample(home, maxlocks, do_unlink);
+ app.run();
+ app.close(0);
+ }
+ catch (DbException dbe) {
+ System.err.println(progname + ": " + dbe.toString());
+ }
+ catch (Throwable t) {
+ System.err.println(progname + ": " + t.toString());
+ }
+ System.out.println("LockExample completed");
+ }
+}
diff --git a/bdb/java/src/com/sleepycat/examples/TpcbExample.java b/bdb/java/src/com/sleepycat/examples/TpcbExample.java
new file mode 100644
index 00000000000..29a90790801
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/examples/TpcbExample.java
@@ -0,0 +1,831 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TpcbExample.java,v 11.9 2000/04/01 15:52:15 dda Exp $
+ */
+
+package com.sleepycat.examples;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+import java.util.Calendar;
+import java.util.Date;
+import java.util.Random;
+import java.util.GregorianCalendar;
+import java.math.BigDecimal;
+
+//
+// This program implements a basic TPC/B driver program. To create the
+// TPC/B database, run with the -i (init) flag. The number of records
+// with which to populate the account, history, branch, and teller tables
+// is specified by the a, s, b, and t flags respectively. To run a TPC/B
+// test, use the n flag to indicate a number of transactions to run (note
+// that you can run many of these processes in parallel to simulate a
+// multiuser test run).
+//
+class TpcbExample extends DbEnv
+{
+ public static final int TELLERS_PER_BRANCH = 10;
+ public static final int ACCOUNTS_PER_TELLER = 10000;
+ public static final int HISTORY_PER_BRANCH = 2592000;
+
+ //
+ // The default configuration that adheres to TPCB scaling rules requires
+ // nearly 3 GB of space. To avoid requiring that much space for testing,
+ // we set the parameters much lower. If you want to run a valid 10 TPS
+ // configuration, uncomment the VALID_SCALING configuration
+ //
+
+ // VALID_SCALING configuration
+ /*
+ public static final int ACCOUNTS = 1000000;
+ public static final int BRANCHES = 10;
+ public static final int TELLERS = 100;
+ public static final int HISTORY = 25920000;
+ */
+
+ // TINY configuration
+ /*
+ public static final int ACCOUNTS = 1000;
+ public static final int BRANCHES = 10;
+ public static final int TELLERS = 100;
+ public static final int HISTORY = 10000;
+ */
+
+ // Default configuration
+ public static final int ACCOUNTS = 100000;
+ public static final int BRANCHES = 10;
+ public static final int TELLERS = 100;
+ public static final int HISTORY = 259200;
+
+ public static final int HISTORY_LEN = 100;
+ public static final int RECLEN = 100;
+ public static final int BEGID = 1000000;
+
+ // used by random_id()
+ public static final int ACCOUNT = 0;
+ public static final int BRANCH = 1;
+ public static final int TELLER = 2;
+
+ private static boolean verbose = false;
+ private static final String progname = "TpcbExample"; // Program name.
+
+ public TpcbExample(String home, int cachesize,
+ boolean initializing, int flags)
+ throws DbException, FileNotFoundException
+ {
+ super(0);
+ set_error_stream(System.err);
+ set_errpfx(progname);
+ set_cachesize(0, cachesize == 0 ? 4 * 1024 * 1024 : cachesize, 0);
+
+ int local_flags = flags | Db.DB_CREATE;
+ if (initializing)
+ local_flags |= Db.DB_INIT_MPOOL;
+ else
+ local_flags |= Db.DB_INIT_TXN | Db.DB_INIT_LOCK |
+ Db.DB_INIT_LOG | Db.DB_INIT_MPOOL;
+
+ open(home, local_flags, 0); // may throw DbException
+ }
+
+ //
+ // Initialize the database to the specified number of accounts, branches,
+ // history records, and tellers.
+ //
+ // Note: num_h was unused in the original ex_tpcb.c example.
+ //
+ public void
+ populate(int num_a, int num_b, int num_h, int num_t)
+ {
+ Db dbp = null;
+
+ int err;
+ int balance, idnum;
+ int end_anum, end_bnum, end_tnum;
+ int start_anum, start_bnum, start_tnum;
+ int h_nelem;
+
+ idnum = BEGID;
+ balance = 500000;
+
+ h_nelem = num_a;
+
+ try {
+ dbp = new Db(this, 0);
+ dbp.set_h_nelem(h_nelem);
+ dbp.open("account", null,
+ Db.DB_HASH, Db.DB_CREATE | Db.DB_TRUNCATE, 0644);
+ }
+ // can be DbException or FileNotFoundException
+ catch (Exception e1) {
+ errExit(e1, "Open of account file failed");
+ }
+
+ start_anum = idnum;
+ populateTable(dbp, idnum, balance, h_nelem, "account");
+ idnum += h_nelem;
+ end_anum = idnum - 1;
+ try {
+ dbp.close(0);
+ }
+ catch (DbException e2) {
+ errExit(e2, "Account file close failed");
+ }
+
+ if (verbose)
+ System.out.println("Populated accounts: "
+ + String.valueOf(start_anum) + " - " + String.valueOf(end_anum));
+
+ //
+ // Since the number of branches is very small, we want to use very
+ // small pages and only 1 key per page. This is the poor-man's way
+ // of getting key locking instead of page locking.
+ //
+ h_nelem = (int)num_b;
+
+ try {
+ dbp = new Db(this, 0);
+
+ dbp.set_h_nelem(h_nelem);
+ dbp.set_h_ffactor(1);
+ dbp.set_pagesize(512);
+
+ dbp.open("branch", null,
+ Db.DB_HASH, Db.DB_CREATE | Db.DB_TRUNCATE, 0644);
+ }
+ // can be DbException or FileNotFoundException
+ catch (Exception e3) {
+ errExit(e3, "Branch file create failed");
+ }
+ start_bnum = idnum;
+ populateTable(dbp, idnum, balance, h_nelem, "branch");
+ idnum += h_nelem;
+ end_bnum = idnum - 1;
+
+ try {
+ dbp.close(0);
+ }
+ catch (DbException dbe4) {
+ errExit(dbe4, "Close of branch file failed");
+ }
+
+ if (verbose)
+ System.out.println("Populated branches: "
+ + String.valueOf(start_bnum) + " - " + String.valueOf(end_bnum));
+
+ //
+ // In the case of tellers, we also want small pages, but we'll let
+ // the fill factor dynamically adjust itself.
+ //
+ h_nelem = (int)num_t;
+
+ try {
+
+ dbp = new Db(this, 0);
+
+ dbp.set_h_nelem(h_nelem);
+ dbp.set_h_ffactor(0);
+ dbp.set_pagesize(512);
+
+ dbp.open("teller", null,
+ Db.DB_HASH, Db.DB_CREATE | Db.DB_TRUNCATE, 0644);
+ }
+ // can be DbException or FileNotFoundException
+ catch (Exception e5) {
+ errExit(e5, "Teller file create failed");
+ }
+
+ start_tnum = idnum;
+ populateTable(dbp, idnum, balance, h_nelem, "teller");
+ idnum += h_nelem;
+ end_tnum = idnum - 1;
+
+ try {
+ dbp.close(0);
+ }
+ catch (DbException e6) {
+ errExit(e6, "Close of teller file failed");
+ }
+
+ if (verbose)
+ System.out.println("Populated tellers: "
+ + String.valueOf(start_tnum) + " - " + String.valueOf(end_tnum));
+
+ try {
+ dbp = new Db(this, 0);
+ dbp.set_re_len(HISTORY_LEN);
+ dbp.open("history", null,
+ Db.DB_RECNO, Db.DB_CREATE | Db.DB_TRUNCATE, 0644);
+ }
+ // can be DbException or FileNotFoundException
+ catch (Exception e7) {
+ errExit(e7, "Create of history file failed");
+ }
+
+ populateHistory(dbp, num_h, num_a, num_b, num_t);
+
+ try {
+ dbp.close(0);
+ }
+ catch (DbException e8) {
+ errExit(e8, "Close of history file failed");
+ }
+ }
+
+ public void
+ populateTable(Db dbp,
+ int start_id, int balance,
+ int nrecs, String msg)
+ {
+ Defrec drec = new Defrec();
+
+ Dbt kdbt = new Dbt(drec.data);
+ kdbt.set_size(4); // sizeof(int)
+ Dbt ddbt = new Dbt(drec.data);
+ ddbt.set_size(drec.data.length); // uses whole array
+
+ try {
+ for (int i = 0; i < nrecs; i++) {
+ kdbt.set_recno_key_data(start_id + (int)i);
+ drec.set_balance(balance);
+ dbp.put(null, kdbt, ddbt, Db.DB_NOOVERWRITE);
+ }
+ }
+ catch (DbException dbe) {
+ System.err.println("Failure initializing " + msg + " file: " +
+ dbe.toString());
+ System.exit(1);
+ }
+ }
+
+ public void
+ populateHistory(Db dbp, int nrecs,
+ int anum, int bnum, int tnum)
+ {
+ Histrec hrec = new Histrec();
+ hrec.set_amount(10);
+
+ byte arr[] = new byte[4]; // sizeof(int)
+ int i;
+ Dbt kdbt = new Dbt(arr);
+ kdbt.set_size(arr.length);
+ Dbt ddbt = new Dbt(hrec.data);
+ ddbt.set_size(hrec.data.length);
+
+ try {
+ for (i = 1; i <= nrecs; i++) {
+ kdbt.set_recno_key_data(i);
+
+ hrec.set_aid(random_id(ACCOUNT, anum, bnum, tnum));
+ hrec.set_bid(random_id(BRANCH, anum, bnum, tnum));
+ hrec.set_tid(random_id(TELLER, anum, bnum, tnum));
+
+ dbp.put(null, kdbt, ddbt, Db.DB_APPEND);
+ }
+ }
+ catch (DbException dbe) {
+ errExit(dbe, "Failure initializing history file");
+ }
+ }
+
+ static Random rand = new Random();
+
+ public static int
+ random_int(int lo, int hi)
+ {
+ int ret;
+ int t;
+
+ t = rand.nextInt();
+ if (t < 0)
+ t = -t;
+ ret = (int)(((double)t / ((double)(Integer.MAX_VALUE) + 1)) *
+ (hi - lo + 1));
+ ret += lo;
+ return (ret);
+ }
+
+ public static int
+ random_id(int type, int accounts, int branches, int tellers)
+ {
+ int min, max, num;
+
+ max = min = BEGID;
+ num = accounts;
+ switch(type) {
+ case TELLER:
+ min += branches;
+ num = tellers;
+ // Fallthrough
+ case BRANCH:
+ if (type == BRANCH)
+ num = branches;
+ min += accounts;
+ // Fallthrough
+ case ACCOUNT:
+ max = min + num - 1;
+ }
+ return (random_int(min, max));
+ }
+
+ public void
+ run(int n, int accounts, int branches, int tellers)
+ {
+ Db adb = null;
+ Db bdb = null;
+ Db hdb = null;
+ Db tdb = null;
+ double gtps, itps;
+ int failed, ifailed, ret, txns;
+ long starttime, curtime, lasttime;
+
+ //
+ // Open the database files.
+ //
+ int err;
+ try {
+ adb = new Db(this, 0);
+ adb.open("account", null, Db.DB_UNKNOWN, 0, 0);
+ bdb = new Db(this, 0);
+ bdb.open("branch", null, Db.DB_UNKNOWN, 0, 0);
+ tdb = new Db(this, 0);
+ tdb.open("teller", null, Db.DB_UNKNOWN, 0, 0);
+ hdb = new Db(this, 0);
+ hdb.open("history", null, Db.DB_UNKNOWN, 0, 0);
+ }
+ catch (DbException dbe) {
+ errExit(dbe, "Open of db files failed");
+ }
+ catch (FileNotFoundException fnfe) {
+ errExit(fnfe, "Open of db files failed, missing file");
+ }
+
+ txns = failed = ifailed = 0;
+ starttime = (new Date()).getTime();
+ lasttime = starttime;
+ while (n-- > 0) {
+ txns++;
+ ret = txn(adb, bdb, tdb, hdb, accounts, branches, tellers);
+ if (ret != 0) {
+ failed++;
+ ifailed++;
+ }
+ if (n % 5000 == 0) {
+ curtime = (new Date()).getTime();
+ gtps = (double)(txns - failed) /
+ ((curtime - starttime) / 1000.0);
+ itps = (double)(5000 - ifailed) /
+ ((curtime - lasttime) / 1000.0);
+ System.out.print(String.valueOf(txns) + " txns " +
+ String.valueOf(failed) + " failed ");
+ System.out.println(showRounded(gtps, 2) + " TPS (gross) " +
+ showRounded(itps, 2) + " TPS (interval)");
+ lasttime = curtime;
+ ifailed = 0;
+ }
+ }
+
+ try {
+ adb.close(0);
+ bdb.close(0);
+ tdb.close(0);
+ hdb.close(0);
+ }
+ catch (DbException dbe2) {
+ errExit(dbe2, "Close of db files failed");
+ }
+
+ System.out.println((long)txns + " transactions begun "
+ + String.valueOf(failed) + " failed");
+
+ }
+
+ //
+ // XXX Figure out the appropriate way to pick out IDs.
+ //
+ public int
+ txn(Db adb, Db bdb, Db tdb, Db hdb,
+ int anum, int bnum, int tnum)
+ {
+ Dbc acurs = null;
+ Dbc bcurs = null;
+ Dbc hcurs = null;
+ Dbc tcurs = null;
+ DbTxn t = null;
+
+ Defrec rec = new Defrec();
+ Histrec hrec = new Histrec();
+ int account, branch, teller;
+
+ Dbt d_dbt = new Dbt();
+ Dbt d_histdbt = new Dbt();
+ Dbt k_dbt = new Dbt();
+ Dbt k_histdbt = new Dbt();
+
+ account = random_id(ACCOUNT, anum, bnum, tnum);
+ branch = random_id(BRANCH, anum, bnum, tnum);
+ teller = random_id(TELLER, anum, bnum, tnum);
+
+ // The history key will not actually be retrieved,
+ // but it does need to be set to something.
+ byte hist_key[] = new byte[4];
+ k_histdbt.set_data(hist_key);
+ k_histdbt.set_size(4 /* == sizeof(int)*/);
+
+ byte key_bytes[] = new byte[4];
+ k_dbt.set_data(key_bytes);
+ k_dbt.set_size(4 /* == sizeof(int)*/);
+
+ d_dbt.set_flags(Db.DB_DBT_USERMEM);
+ d_dbt.set_data(rec.data);
+ d_dbt.set_ulen(rec.length());
+
+ hrec.set_aid(account);
+ hrec.set_bid(branch);
+ hrec.set_tid(teller);
+ hrec.set_amount(10);
+ // Request 0 bytes since we're just positioning.
+ d_histdbt.set_flags(Db.DB_DBT_PARTIAL);
+
+ // START TIMING
+
+ try {
+ t = txn_begin(null, 0);
+
+ acurs = adb.cursor(t, 0);
+ bcurs = bdb.cursor(t, 0);
+ tcurs = tdb.cursor(t, 0);
+ hcurs = hdb.cursor(t, 0);
+
+ // Account record
+ k_dbt.set_recno_key_data(account);
+ if (acurs.get(k_dbt, d_dbt, Db.DB_SET) != 0)
+ throw new TpcbException("acurs get failed");
+ rec.set_balance(rec.get_balance() + 10);
+ acurs.put(k_dbt, d_dbt, Db.DB_CURRENT);
+
+ // Branch record
+ k_dbt.set_recno_key_data(branch);
+ if (bcurs.get(k_dbt, d_dbt, Db.DB_SET) != 0)
+ throw new TpcbException("bcurs get failed");
+ rec.set_balance(rec.get_balance() + 10);
+ bcurs.put(k_dbt, d_dbt, Db.DB_CURRENT);
+
+ // Teller record
+ k_dbt.set_recno_key_data(teller);
+ if (tcurs.get(k_dbt, d_dbt, Db.DB_SET) != 0)
+ throw new TpcbException("ccurs get failed");
+ rec.set_balance(rec.get_balance() + 10);
+ tcurs.put(k_dbt, d_dbt, Db.DB_CURRENT);
+
+ // History record
+ d_histdbt.set_flags(0);
+ d_histdbt.set_data(hrec.data);
+ d_histdbt.set_ulen(hrec.length());
+ if (hdb.put(t, k_histdbt, d_histdbt, Db.DB_APPEND) != 0)
+ throw(new DbException("put failed"));
+
+ acurs.close();
+ bcurs.close();
+ tcurs.close();
+ hcurs.close();
+
+ t.commit(0);
+
+ // END TIMING
+ return (0);
+
+ }
+ catch (Exception e) {
+ try {
+ if (acurs != null)
+ acurs.close();
+ if (bcurs != null)
+ bcurs.close();
+ if (tcurs != null)
+ tcurs.close();
+ if (hcurs != null)
+ hcurs.close();
+ if (t != null)
+ t.abort();
+ }
+ catch (DbException dbe) {
+ // not much we can do here.
+ }
+
+ if (verbose) {
+ System.out.println("Transaction A=" + String.valueOf(account)
+ + " B=" + String.valueOf(branch)
+ + " T=" + String.valueOf(teller) + " failed");
+ System.out.println("Reason: " + e.toString());
+ }
+ return (-1);
+ }
+ }
+
+ static void errExit(Exception err, String s)
+ {
+ System.err.print(progname + ": ");
+ if (s != null) {
+ System.err.print(s + ": ");
+ }
+ System.err.println(err.toString());
+ System.exit(1);
+ }
+
+ public static void main(String argv[])
+ {
+ long seed;
+ int accounts, branches, tellers, history;
+ boolean iflag, txn_no_sync;
+ int mpool, ntxns;
+ String home, endarg;
+
+ home = "TESTDIR";
+ accounts = branches = history = tellers = 0;
+ txn_no_sync = false;
+ mpool = ntxns = 0;
+ verbose = false;
+ iflag = false;
+ seed = (new GregorianCalendar()).get(Calendar.SECOND);
+
+ for (int i = 0; i < argv.length; ++i)
+ {
+ if (argv[i].equals("-a")) {
+ // Number of account records
+ if ((accounts = Integer.parseInt(argv[++i])) <= 0)
+ invarg(argv[i]);
+ }
+ else if (argv[i].equals("-b")) {
+ // Number of branch records
+ if ((branches = Integer.parseInt(argv[++i])) <= 0)
+ invarg(argv[i]);
+ }
+ else if (argv[i].equals("-c")) {
+ // Cachesize in bytes
+ if ((mpool = Integer.parseInt(argv[++i])) <= 0)
+ invarg(argv[i]);
+ }
+ else if (argv[i].equals("-f")) {
+ // Fast mode: no txn sync.
+ txn_no_sync = true;
+ }
+ else if (argv[i].equals("-h")) {
+ // DB home.
+ home = argv[++i];
+ }
+ else if (argv[i].equals("-i")) {
+ // Initialize the test.
+ iflag = true;
+ }
+ else if (argv[i].equals("-n")) {
+ // Number of transactions
+ if ((ntxns = Integer.parseInt(argv[++i])) <= 0)
+ invarg(argv[i]);
+ }
+ else if (argv[i].equals("-S")) {
+ // Random number seed.
+ seed = Long.parseLong(argv[++i]);
+ if (seed <= 0)
+ invarg(argv[i]);
+ }
+ else if (argv[i].equals("-s")) {
+ // Number of history records
+ if ((history = Integer.parseInt(argv[++i])) <= 0)
+ invarg(argv[i]);
+ }
+ else if (argv[i].equals("-t")) {
+ // Number of teller records
+ if ((tellers = Integer.parseInt(argv[++i])) <= 0)
+ invarg(argv[i]);
+ }
+ else if (argv[i].equals("-v")) {
+ // Verbose option.
+ verbose = true;
+ }
+ else
+ {
+ usage();
+ }
+ }
+
+ rand.setSeed((int)seed);
+
+ TpcbExample app = null;
+
+ // Initialize the database environment.
+ // Must be done in within a try block.
+ //
+ try {
+ app = new TpcbExample(home, mpool, iflag,
+ txn_no_sync ? Db.DB_TXN_NOSYNC : 0);
+ }
+ catch (Exception e1) {
+ errExit(e1, "initializing environment failed");
+ }
+
+ accounts = accounts == 0 ? ACCOUNTS : accounts;
+ branches = branches == 0 ? BRANCHES : branches;
+ tellers = tellers == 0 ? TELLERS : tellers;
+ history = history == 0 ? HISTORY : history;
+
+ if (verbose)
+ System.out.println((long)accounts + " Accounts "
+ + String.valueOf(branches) + " Branches "
+ + String.valueOf(tellers) + " Tellers "
+ + String.valueOf(history) + " History");
+
+ if (iflag) {
+ if (ntxns != 0)
+ usage();
+ app.populate(accounts, branches, history, tellers);
+ }
+ else {
+ if (ntxns == 0)
+ usage();
+ app.run(ntxns, accounts, branches, tellers);
+ }
+
+ // Shut down the application.
+
+ try {
+ app.close(0);
+ }
+ catch (DbException dbe2) {
+ errExit(dbe2, "appexit failed");
+ }
+
+ System.exit(0);
+ }
+
+ private static void invarg(String str)
+ {
+ System.err.println("TpcbExample: invalid argument: " + str);
+ System.exit(1);
+ }
+
+ private static void usage()
+ {
+ System.err.println(
+ "usage: TpcbExample [-fiv] [-a accounts] [-b branches]\n" +
+ " [-c cachesize] [-h home] [-n transactions ]\n" +
+ " [-S seed] [-s history] [-t tellers]");
+ System.exit(1);
+ }
+
+ // round 'd' to 'scale' digits, and return result as string
+ private String showRounded(double d, int scale)
+ {
+ return new BigDecimal(d).
+ setScale(scale, BigDecimal.ROUND_HALF_DOWN).toString();
+ }
+
+ // The byte order is our choice.
+ //
+ static long get_int_in_array(byte[] array, int offset)
+ {
+ return
+ ((0xff & array[offset+0]) << 0) |
+ ((0xff & array[offset+1]) << 8) |
+ ((0xff & array[offset+2]) << 16) |
+ ((0xff & array[offset+3]) << 24);
+ }
+
+ // Note: Value needs to be long to avoid sign extension
+ static void set_int_in_array(byte[] array, int offset, long value)
+ {
+ array[offset+0] = (byte)((value >> 0) & 0x0ff);
+ array[offset+1] = (byte)((value >> 8) & 0x0ff);
+ array[offset+2] = (byte)((value >> 16) & 0x0ff);
+ array[offset+3] = (byte)((value >> 24) & 0x0ff);
+ }
+
+};
+
+// Simulate the following C struct:
+// struct Defrec {
+// u_int32_t id;
+// u_int32_t balance;
+// u_int8_t pad[RECLEN - sizeof(int) - sizeof(int)];
+// };
+
+class Defrec
+{
+ public Defrec()
+ {
+ data = new byte[TpcbExample.RECLEN];
+ }
+
+ public int length()
+ {
+ return TpcbExample.RECLEN;
+ }
+
+ public long get_id()
+ {
+ return TpcbExample.get_int_in_array(data, 0);
+ }
+
+ public void set_id(long value)
+ {
+ TpcbExample.set_int_in_array(data, 0, value);
+ }
+
+ public long get_balance()
+ {
+ return TpcbExample.get_int_in_array(data, 4);
+ }
+
+ public void set_balance(long value)
+ {
+ TpcbExample.set_int_in_array(data, 4, value);
+ }
+
+ static {
+ Defrec d = new Defrec();
+ d.set_balance(500000);
+ }
+
+ public byte[] data;
+}
+
+// Simulate the following C struct:
+// struct Histrec {
+// u_int32_t aid;
+// u_int32_t bid;
+// u_int32_t tid;
+// u_int32_t amount;
+// u_int8_t pad[RECLEN - 4 * sizeof(u_int32_t)];
+// };
+
+class Histrec
+{
+ public Histrec()
+ {
+ data = new byte[TpcbExample.RECLEN];
+ }
+
+ public int length()
+ {
+ return TpcbExample.RECLEN;
+ }
+
+ public long get_aid()
+ {
+ return TpcbExample.get_int_in_array(data, 0);
+ }
+
+ public void set_aid(long value)
+ {
+ TpcbExample.set_int_in_array(data, 0, value);
+ }
+
+ public long get_bid()
+ {
+ return TpcbExample.get_int_in_array(data, 4);
+ }
+
+ public void set_bid(long value)
+ {
+ TpcbExample.set_int_in_array(data, 4, value);
+ }
+
+ public long get_tid()
+ {
+ return TpcbExample.get_int_in_array(data, 8);
+ }
+
+ public void set_tid(long value)
+ {
+ TpcbExample.set_int_in_array(data, 8, value);
+ }
+
+ public long get_amount()
+ {
+ return TpcbExample.get_int_in_array(data, 12);
+ }
+
+ public void set_amount(long value)
+ {
+ TpcbExample.set_int_in_array(data, 12, value);
+ }
+
+ public byte[] data;
+}
+
+class TpcbException extends Exception
+{
+ TpcbException()
+ {
+ super();
+ }
+
+ TpcbException(String s)
+ {
+ super(s);
+ }
+}
diff --git a/bdb/libdb_java/checkapi.prl b/bdb/libdb_java/checkapi.prl
new file mode 100644
index 00000000000..25882c056cc
--- /dev/null
+++ b/bdb/libdb_java/checkapi.prl
@@ -0,0 +1,132 @@
+#
+# Released to public domain by Donald Anderson dda@world.std.com
+# No warranties.
+#
+# Perl script to check for matching of JNI interfaces to implementation.
+# We check all .cpp arguments and .h arguments and make sure that for
+# each .h declaration (marked by JNIEXPORT keyword), there is a .cpp
+# definition for the same function (also marked by JNIEXPORT keyword),
+# and vice versa. Definitions and declarations are determined solely
+# by whether they are in a .h or .cpp file - we don't do any further
+# analysis.
+#
+# Some additions made to help with Berkeley DB sources:
+#
+# Berkeley DB Java sources use JAVADB_*_ACCESS #defines
+# to quickly define routine access functions.
+
+foreach $file (<@ARGV>) { # glob allows direct use from Win* makefiles
+ open (FILE, $file) || die "$file: cannot open\n";
+ $dot_h = 0;
+ if ($file =~ /.*[hH]$/) {
+ $dot_h = 1;
+ }
+ $in_def = 0;
+nextline:
+ while (<FILE>) {
+ chop;
+ if (/JNIEXPORT/ || /^JAVADB_.*_ACCESS/) {
+ $in_def = 1;
+ $def = "";
+ }
+ if ($in_def == 1) {
+ $def .= $_;
+ }
+ if (/\)/) {
+ $line = "";
+ $in_def = 0;
+ if ($def eq "") {
+ next nextline;
+ }
+ $_ = $def;
+ # remove comments
+ s@/\*[^*]*\*/@@g;
+ s@[ ][ ]*@ @g;
+ s@JNIEnv *\* *@JNIEnv @g;
+ s@([,*()]) @\1@g;
+ s@ ([,*()])@\1@g;
+
+ s/JAVADB_WO_ACCESS_METHOD/JAVADB_WO_ACCESS/;
+
+ if (/^JAVADB_.*_ACCESS/) {
+ s@ *@ @g;
+ s@_ACCESS_STRING\(([^,]*),@_ACCESS(\1,jstring,@;
+ s@_ACCESS_BEFORE_APPINIT@_ACCESS@;
+ s@_ACCESS\(@,normal,@;
+ s@JAVADB_@@;
+ s@\)@,@;
+ @vars = split(/,/);
+ $get = 0;
+ $set = 0;
+ if (@vars[0] eq "RW") {
+ $get = 1;
+ $set = 1;
+ }
+ if (@vars[0] eq "RO") {
+ $get = 1;
+ }
+ if (@vars[0] eq "WO") {
+ $set = 1;
+ }
+ if ($get == 0 && $set == 0) {
+ print "Invalid use of JAVADB_ macro\n";
+ }
+ if ($set == 1) {
+ $line = "JNIEXPORT void JNICALL Java_com_sleepycat_db_@vars[2]_set_1@vars[4](JNIEnv,jobject,@vars[3])";
+ }
+ if ($get == 1) {
+ $line2 = "JNIEXPORT @vars[3] JNICALL Java_com_sleepycat_db_@vars[2]_get_1@vars[4](JNIEnv,jobject)";
+ }
+ }
+ else {
+ s@([,(][a-zA-Z0-9_]*) [a-zA-Z0-9_]*@\1@g;
+ s@;$@@g;
+ $line = $_;
+ }
+
+ $def = "";
+
+ if ($line ne "") {
+ if ($lines{$line} eq "") {
+ $lines{$line} = 0;
+ }
+ if ($dot_h == 1) {
+ $lines{$line} += 1;
+ }
+ else {
+ $lines{$line} -= 1;
+ }
+ $line = "";
+ }
+ if ($line2 ne "") {
+ if ($lines{$line2} eq "") {
+ $lines{$line2} = 0;
+ }
+ if ($dot_h == 1) {
+ $lines{$line2} += 1;
+ }
+ else {
+ $lines{$line2} -= 1;
+ }
+ $line2 = "";
+ }
+ }
+ }
+ close (FILE);
+}
+
+$status = 0;
+foreach $key (sort keys %lines) {
+ if ($lines{$key} != 0) {
+ if ($lines{$key} > 0) {
+ print "Missing .cpp implementation: $lines${key}\n";
+ $status = 1;
+ }
+ else {
+ print "Missing .h declaration: $lines${key}\n";
+ $status = 1;
+ }
+ }
+}
+
+exit ($status);
diff --git a/bdb/libdb_java/com_sleepycat_db_Db.h b/bdb/libdb_java/com_sleepycat_db_Db.h
new file mode 100644
index 00000000000..d9e1f1cbbc7
--- /dev/null
+++ b/bdb/libdb_java/com_sleepycat_db_Db.h
@@ -0,0 +1,349 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include <jni.h>
+/* Header for class com_sleepycat_db_Db */
+
+#ifndef _Included_com_sleepycat_db_Db
+#define _Included_com_sleepycat_db_Db
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: _init
+ * Signature: (Lcom/sleepycat/db/DbEnv;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1init
+ (JNIEnv *, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: _notify_internal
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1notify_1internal
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: _close
+ * Signature: (I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db__1close
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: cursor
+ * Signature: (Lcom/sleepycat/db/DbTxn;I)Lcom/sleepycat/db/Dbc;
+ */
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_Db_cursor
+ (JNIEnv *, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: del
+ * Signature: (Lcom/sleepycat/db/DbTxn;Lcom/sleepycat/db/Dbt;I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_del
+ (JNIEnv *, jobject, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: err
+ * Signature: (ILjava/lang/String;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_err
+ (JNIEnv *, jobject, jint, jstring);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: errx
+ * Signature: (Ljava/lang/String;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_errx
+ (JNIEnv *, jobject, jstring);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: fd
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_fd
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: _finalize
+ * Signature: (Lcom/sleepycat/db/DbErrcall;Ljava/lang/String;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1finalize
+ (JNIEnv *, jobject, jobject, jstring);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: get
+ * Signature: (Lcom/sleepycat/db/DbTxn;Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/Dbt;I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_get
+ (JNIEnv *, jobject, jobject, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: get_byteswapped
+ * Signature: ()Z
+ */
+JNIEXPORT jboolean JNICALL Java_com_sleepycat_db_Db_get_1byteswapped
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: get_type
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_get_1type
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: join
+ * Signature: ([Lcom/sleepycat/db/Dbc;I)Lcom/sleepycat/db/Dbc;
+ */
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_Db_join
+ (JNIEnv *, jobject, jobjectArray, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: key_range
+ * Signature: (Lcom/sleepycat/db/DbTxn;Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/DbKeyRange;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_key_1range
+ (JNIEnv *, jobject, jobject, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: _open
+ * Signature: (Ljava/lang/String;Ljava/lang/String;III)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1open
+ (JNIEnv *, jobject, jstring, jstring, jint, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: put
+ * Signature: (Lcom/sleepycat/db/DbTxn;Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/Dbt;I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_put
+ (JNIEnv *, jobject, jobject, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: rename
+ * Signature: (Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_rename
+ (JNIEnv *, jobject, jstring, jstring, jstring, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: remove
+ * Signature: (Ljava/lang/String;Ljava/lang/String;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_remove
+ (JNIEnv *, jobject, jstring, jstring, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: append_recno_changed
+ * Signature: (Lcom/sleepycat/db/DbAppendRecno;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_append_1recno_1changed
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: bt_compare_changed
+ * Signature: (Lcom/sleepycat/db/DbBtreeCompare;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_bt_1compare_1changed
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_bt_maxkey
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1bt_1maxkey
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_bt_minkey
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1bt_1minkey
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: bt_prefix_changed
+ * Signature: (Lcom/sleepycat/db/DbBtreePrefix;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_bt_1prefix_1changed
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_cachesize
+ * Signature: (III)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1cachesize
+ (JNIEnv *, jobject, jint, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: dup_compare_changed
+ * Signature: (Lcom/sleepycat/db/DbDupCompare;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_dup_1compare_1changed
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: feedback_changed
+ * Signature: (Lcom/sleepycat/db/DbFeedback;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_feedback_1changed
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_flags
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1flags
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_h_ffactor
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1h_1ffactor
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: hash_changed
+ * Signature: (Lcom/sleepycat/db/DbHash;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_hash_1changed
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_h_nelem
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1h_1nelem
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_lorder
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1lorder
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_pagesize
+ * Signature: (J)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1pagesize
+ (JNIEnv *, jobject, jlong);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_re_delim
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1re_1delim
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_re_len
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1re_1len
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_re_pad
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1re_1pad
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_re_source
+ * Signature: (Ljava/lang/String;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1re_1source
+ (JNIEnv *, jobject, jstring);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_q_extentsize
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1q_1extentsize
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: stat
+ * Signature: (I)Ljava/lang/Object;
+ */
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_Db_stat
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: sync
+ * Signature: (I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_sync
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: upgrade
+ * Signature: (Ljava/lang/String;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_upgrade
+ (JNIEnv *, jobject, jstring, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: verify
+ * Signature: (Ljava/lang/String;Ljava/lang/String;Ljava/io/OutputStream;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_verify
+ (JNIEnv *, jobject, jstring, jstring, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: one_time_init
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_one_1time_1init
+ (JNIEnv *, jclass);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/bdb/libdb_java/com_sleepycat_db_DbEnv.h b/bdb/libdb_java/com_sleepycat_db_DbEnv.h
new file mode 100644
index 00000000000..4168ea9abe2
--- /dev/null
+++ b/bdb/libdb_java/com_sleepycat_db_DbEnv.h
@@ -0,0 +1,509 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include <jni.h>
+/* Header for class com_sleepycat_db_DbEnv */
+
+#ifndef _Included_com_sleepycat_db_DbEnv
+#define _Included_com_sleepycat_db_DbEnv
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: _close
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1close
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: err
+ * Signature: (ILjava/lang/String;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_err
+ (JNIEnv *, jobject, jint, jstring);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: errx
+ * Signature: (Ljava/lang/String;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_errx
+ (JNIEnv *, jobject, jstring);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: _finalize
+ * Signature: (Lcom/sleepycat/db/DbErrcall;Ljava/lang/String;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1finalize
+ (JNIEnv *, jobject, jobject, jstring);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: _init
+ * Signature: (Lcom/sleepycat/db/DbErrcall;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1init
+ (JNIEnv *, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: _init_using_db
+ * Signature: (Lcom/sleepycat/db/DbErrcall;Lcom/sleepycat/db/Db;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1init_1using_1db
+ (JNIEnv *, jobject, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: _notify_db_close
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1notify_1db_1close
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: open
+ * Signature: (Ljava/lang/String;II)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_open
+ (JNIEnv *, jobject, jstring, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: remove
+ * Signature: (Ljava/lang/String;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_remove
+ (JNIEnv *, jobject, jstring, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_cachesize
+ * Signature: (III)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1cachesize
+ (JNIEnv *, jobject, jint, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: _set_errcall
+ * Signature: (Lcom/sleepycat/db/DbErrcall;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1set_1errcall
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: _set_errpfx
+ * Signature: (Ljava/lang/String;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1set_1errpfx
+ (JNIEnv *, jobject, jstring);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: feedback_changed
+ * Signature: (Lcom/sleepycat/db/DbFeedback;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_feedback_1changed
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_verbose
+ * Signature: (II)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1verbose
+ (JNIEnv *, jobject, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_data_dir
+ * Signature: (Ljava/lang/String;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1data_1dir
+ (JNIEnv *, jobject, jstring);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_lg_bsize
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lg_1bsize
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_lg_dir
+ * Signature: (Ljava/lang/String;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lg_1dir
+ (JNIEnv *, jobject, jstring);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_lg_max
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lg_1max
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_lk_conflicts
+ * Signature: ([[B)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lk_1conflicts
+ (JNIEnv *, jobject, jobjectArray);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_lk_detect
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lk_1detect
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_lk_max
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lk_1max
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_lk_max_lockers
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lk_1max_1lockers
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_lk_max_locks
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lk_1max_1locks
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_lk_max_objects
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lk_1max_1objects
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_mp_mmapsize
+ * Signature: (J)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1mp_1mmapsize
+ (JNIEnv *, jobject, jlong);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_mutexlocks
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1mutexlocks
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_pageyield
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1pageyield
+ (JNIEnv *, jclass, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_panicstate
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1panicstate
+ (JNIEnv *, jclass, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: recovery_init_changed
+ * Signature: (Lcom/sleepycat/db/DbRecoveryInit;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_recovery_1init_1changed
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_region_init
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1region_1init
+ (JNIEnv *, jclass, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_flags
+ * Signature: (II)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1flags
+ (JNIEnv *, jobject, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_server
+ * Signature: (Ljava/lang/String;JJI)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1server
+ (JNIEnv *, jobject, jstring, jlong, jlong, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_shm_key
+ * Signature: (J)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1shm_1key
+ (JNIEnv *, jobject, jlong);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_tas_spins
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1tas_1spins
+ (JNIEnv *, jclass, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_tmp_dir
+ * Signature: (Ljava/lang/String;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1tmp_1dir
+ (JNIEnv *, jobject, jstring);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: tx_recover_changed
+ * Signature: (Lcom/sleepycat/db/DbTxnRecover;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_tx_1recover_1changed
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_tx_max
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1tx_1max
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: _set_tx_timestamp
+ * Signature: (J)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1set_1tx_1timestamp
+ (JNIEnv *, jobject, jlong);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: get_version_major
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_get_1version_1major
+ (JNIEnv *, jclass);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: get_version_minor
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_get_1version_1minor
+ (JNIEnv *, jclass);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: get_version_patch
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_get_1version_1patch
+ (JNIEnv *, jclass);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: get_version_string
+ * Signature: ()Ljava/lang/String;
+ */
+JNIEXPORT jstring JNICALL Java_com_sleepycat_db_DbEnv_get_1version_1string
+ (JNIEnv *, jclass);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: strerror
+ * Signature: (I)Ljava/lang/String;
+ */
+JNIEXPORT jstring JNICALL Java_com_sleepycat_db_DbEnv_strerror
+ (JNIEnv *, jclass, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: lock_detect
+ * Signature: (II)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_lock_1detect
+ (JNIEnv *, jobject, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: lock_get
+ * Signature: (IILcom/sleepycat/db/Dbt;I)Lcom/sleepycat/db/DbLock;
+ */
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_lock_1get
+ (JNIEnv *, jobject, jint, jint, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: lock_id
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_lock_1id
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: lock_stat
+ * Signature: ()Lcom/sleepycat/db/DbLockStat;
+ */
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_lock_1stat
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: log_archive
+ * Signature: (I)[Ljava/lang/String;
+ */
+JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_DbEnv_log_1archive
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: log_compare
+ * Signature: (Lcom/sleepycat/db/DbLsn;Lcom/sleepycat/db/DbLsn;)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_log_1compare
+ (JNIEnv *, jclass, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: log_file
+ * Signature: (Lcom/sleepycat/db/DbLsn;)Ljava/lang/String;
+ */
+JNIEXPORT jstring JNICALL Java_com_sleepycat_db_DbEnv_log_1file
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: log_flush
+ * Signature: (Lcom/sleepycat/db/DbLsn;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_log_1flush
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: log_get
+ * Signature: (Lcom/sleepycat/db/DbLsn;Lcom/sleepycat/db/Dbt;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_log_1get
+ (JNIEnv *, jobject, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: log_put
+ * Signature: (Lcom/sleepycat/db/DbLsn;Lcom/sleepycat/db/Dbt;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_log_1put
+ (JNIEnv *, jobject, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: log_stat
+ * Signature: ()Lcom/sleepycat/db/DbLogStat;
+ */
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_log_1stat
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: log_register
+ * Signature: (Lcom/sleepycat/db/Db;Ljava/lang/String;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_log_1register
+ (JNIEnv *, jobject, jobject, jstring);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: log_unregister
+ * Signature: (Lcom/sleepycat/db/Db;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_log_1unregister
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: memp_stat
+ * Signature: ()Lcom/sleepycat/db/DbMpoolStat;
+ */
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_memp_1stat
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: memp_fstat
+ * Signature: ()[Lcom/sleepycat/db/DbMpoolFStat;
+ */
+JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_DbEnv_memp_1fstat
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: memp_trickle
+ * Signature: (I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_memp_1trickle
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: txn_begin
+ * Signature: (Lcom/sleepycat/db/DbTxn;I)Lcom/sleepycat/db/DbTxn;
+ */
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_txn_1begin
+ (JNIEnv *, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: txn_checkpoint
+ * Signature: (III)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_txn_1checkpoint
+ (JNIEnv *, jobject, jint, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: txn_stat
+ * Signature: ()Lcom/sleepycat/db/DbTxnStat;
+ */
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_txn_1stat
+ (JNIEnv *, jobject);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/bdb/libdb_java/com_sleepycat_db_DbLock.h b/bdb/libdb_java/com_sleepycat_db_DbLock.h
new file mode 100644
index 00000000000..8a1c135bb3b
--- /dev/null
+++ b/bdb/libdb_java/com_sleepycat_db_DbLock.h
@@ -0,0 +1,29 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include <jni.h>
+/* Header for class com_sleepycat_db_DbLock */
+
+#ifndef _Included_com_sleepycat_db_DbLock
+#define _Included_com_sleepycat_db_DbLock
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+ * Class: com_sleepycat_db_DbLock
+ * Method: finalize
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLock_finalize
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbLock
+ * Method: put
+ * Signature: (Lcom/sleepycat/db/DbEnv;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLock_put
+ (JNIEnv *, jobject, jobject);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/bdb/libdb_java/com_sleepycat_db_DbLsn.h b/bdb/libdb_java/com_sleepycat_db_DbLsn.h
new file mode 100644
index 00000000000..093eaf372b5
--- /dev/null
+++ b/bdb/libdb_java/com_sleepycat_db_DbLsn.h
@@ -0,0 +1,29 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include <jni.h>
+/* Header for class com_sleepycat_db_DbLsn */
+
+#ifndef _Included_com_sleepycat_db_DbLsn
+#define _Included_com_sleepycat_db_DbLsn
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+ * Class: com_sleepycat_db_DbLsn
+ * Method: finalize
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLsn_finalize
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbLsn
+ * Method: init_lsn
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLsn_init_1lsn
+ (JNIEnv *, jobject);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/bdb/libdb_java/com_sleepycat_db_DbTxn.h b/bdb/libdb_java/com_sleepycat_db_DbTxn.h
new file mode 100644
index 00000000000..4dcf47405c0
--- /dev/null
+++ b/bdb/libdb_java/com_sleepycat_db_DbTxn.h
@@ -0,0 +1,53 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include <jni.h>
+/* Header for class com_sleepycat_db_DbTxn */
+
+#ifndef _Included_com_sleepycat_db_DbTxn
+#define _Included_com_sleepycat_db_DbTxn
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+ * Class: com_sleepycat_db_DbTxn
+ * Method: abort
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbTxn_abort
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbTxn
+ * Method: commit
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbTxn_commit
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbTxn
+ * Method: id
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbTxn_id
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbTxn
+ * Method: prepare
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbTxn_prepare
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbTxn
+ * Method: finalize
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbTxn_finalize
+ (JNIEnv *, jobject);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/bdb/libdb_java/com_sleepycat_db_Dbc.h b/bdb/libdb_java/com_sleepycat_db_Dbc.h
new file mode 100644
index 00000000000..e62679c6f66
--- /dev/null
+++ b/bdb/libdb_java/com_sleepycat_db_Dbc.h
@@ -0,0 +1,69 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include <jni.h>
+/* Header for class com_sleepycat_db_Dbc */
+
+#ifndef _Included_com_sleepycat_db_Dbc
+#define _Included_com_sleepycat_db_Dbc
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+ * Class: com_sleepycat_db_Dbc
+ * Method: close
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbc_close
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_Dbc
+ * Method: count
+ * Signature: (I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_count
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Dbc
+ * Method: del
+ * Signature: (I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_del
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Dbc
+ * Method: dup
+ * Signature: (I)Lcom/sleepycat/db/Dbc;
+ */
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_Dbc_dup
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Dbc
+ * Method: get
+ * Signature: (Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/Dbt;I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_get
+ (JNIEnv *, jobject, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Dbc
+ * Method: put
+ * Signature: (Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/Dbt;I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_put
+ (JNIEnv *, jobject, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Dbc
+ * Method: finalize
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbc_finalize
+ (JNIEnv *, jobject);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/bdb/libdb_java/com_sleepycat_db_Dbt.h b/bdb/libdb_java/com_sleepycat_db_Dbt.h
new file mode 100644
index 00000000000..cdb58c682c9
--- /dev/null
+++ b/bdb/libdb_java/com_sleepycat_db_Dbt.h
@@ -0,0 +1,157 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include <jni.h>
+/* Header for class com_sleepycat_db_Dbt */
+
+#ifndef _Included_com_sleepycat_db_Dbt
+#define _Included_com_sleepycat_db_Dbt
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+ * Class: com_sleepycat_db_Dbt
+ * Method: finalize
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_finalize
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_Dbt
+ * Method: get_data
+ * Signature: ()[B
+ */
+JNIEXPORT jbyteArray JNICALL Java_com_sleepycat_db_Dbt_get_1data
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_Dbt
+ * Method: internal_set_data
+ * Signature: ([B)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_internal_1set_1data
+ (JNIEnv *, jobject, jbyteArray);
+
+/*
+ * Class: com_sleepycat_db_Dbt
+ * Method: set_offset
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_set_1offset
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Dbt
+ * Method: get_offset
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbt_get_1offset
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_Dbt
+ * Method: get_size
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbt_get_1size
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_Dbt
+ * Method: set_size
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_set_1size
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Dbt
+ * Method: get_ulen
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbt_get_1ulen
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_Dbt
+ * Method: set_ulen
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_set_1ulen
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Dbt
+ * Method: get_dlen
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbt_get_1dlen
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_Dbt
+ * Method: set_dlen
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_set_1dlen
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Dbt
+ * Method: get_doff
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbt_get_1doff
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_Dbt
+ * Method: set_doff
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_set_1doff
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Dbt
+ * Method: get_flags
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbt_get_1flags
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_Dbt
+ * Method: set_flags
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_set_1flags
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Dbt
+ * Method: set_recno_key_data
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_set_1recno_1key_1data
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Dbt
+ * Method: get_recno_key_data
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbt_get_1recno_1key_1data
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_Dbt
+ * Method: init
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_init
+ (JNIEnv *, jobject);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/bdb/libdb_java/java_Db.c b/bdb/libdb_java/java_Db.c
new file mode 100644
index 00000000000..5b01e5068d6
--- /dev/null
+++ b/bdb/libdb_java/java_Db.c
@@ -0,0 +1,964 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: java_Db.c,v 11.34 2000/11/30 00:58:38 ubell Exp $";
+#endif /* not lint */
+
+#include <jni.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "db.h"
+#include "db_int.h"
+#include "db_page.h"
+#include "db_ext.h"
+#include "java_util.h"
+#include "com_sleepycat_db_Db.h"
+
+/* This struct is used in Db.verify and its callback */
+struct verify_callback_struct {
+ JNIEnv *env;
+ jobject streamobj;
+ jbyteArray bytes;
+ int nbytes;
+ jmethodID writemid;
+};
+
+JAVADB_WO_ACCESS_METHOD(Db, jint, flags, DB, flags)
+JAVADB_WO_ACCESS_METHOD(Db, jint, h_1ffactor, DB, h_ffactor)
+JAVADB_WO_ACCESS_METHOD(Db, jint, h_1nelem, DB, h_nelem)
+JAVADB_WO_ACCESS_METHOD(Db, jint, lorder, DB, lorder)
+JAVADB_WO_ACCESS_METHOD(Db, jint, re_1delim, DB, re_delim)
+JAVADB_WO_ACCESS_METHOD(Db, jint, re_1len, DB, re_len)
+JAVADB_WO_ACCESS_METHOD(Db, jint, re_1pad, DB, re_pad)
+JAVADB_WO_ACCESS_METHOD(Db, jint, q_1extentsize, DB, q_extentsize)
+JAVADB_WO_ACCESS_METHOD(Db, jint, bt_1maxkey, DB, bt_maxkey)
+JAVADB_WO_ACCESS_METHOD(Db, jint, bt_1minkey, DB, bt_minkey)
+
+/* This only gets called once ever, at the beginning of execution
+ * and can be used to initialize unchanging methodIds, fieldIds, etc.
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_one_1time_1init
+ (JNIEnv *jnienv, /*Db.class*/ jclass jthisclass)
+{
+ COMPQUIET(jnienv, NULL);
+ COMPQUIET(jthisclass, NULL);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1init
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbEnv*/ jobject jdbenv, jint flags)
+{
+ int err;
+ DB *db;
+ DB_JAVAINFO *dbinfo;
+ DB_ENV *dbenv;
+
+ dbenv = get_DB_ENV(jnienv, jdbenv);
+ dbinfo = get_DB_JAVAINFO(jnienv, jthis);
+ DB_ASSERT(dbinfo == NULL);
+
+ err = db_create(&db, dbenv, flags);
+ if (verify_return(jnienv, err, 0)) {
+ set_private_dbobj(jnienv, name_DB, jthis, db);
+ dbinfo = dbji_construct(jnienv, flags);
+ set_private_info(jnienv, name_DB, jthis, dbinfo);
+ db->cj_internal = dbinfo;
+ }
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db__1close
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, jint flags)
+{
+ int err;
+ DB *db;
+ DB_JAVAINFO *dbinfo;
+
+ db = get_DB(jnienv, jthis);
+ dbinfo = get_DB_JAVAINFO(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return (0);
+
+ JAVADB_API_BEGIN(db, jthis);
+
+ /* Null out the private data to indicate the DB is invalid.
+ * We do this in advance to help guard against multithreading
+ * issues.
+ */
+ set_private_dbobj(jnienv, name_DB, jthis, 0);
+
+ err = db->close(db, flags);
+ if (err != DB_INCOMPLETE)
+ verify_return(jnienv, err, 0);
+ dbji_dealloc(dbinfo, jnienv);
+
+ /* don't call JAVADB_API_END - db cannot be used */
+ return (err);
+}
+
+/* We are being notified that the parent DbEnv has closed.
+ * Zero out the pointer to the DB, since it is no longer
+ * valid, to prevent mistakes. The user will get a null
+ * pointer exception if they try to use this Db again.
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1notify_1internal
+ (JNIEnv *jnienv, /*Db*/ jobject jthis)
+{
+ set_private_dbobj(jnienv, name_DB, jthis, 0);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_append_1recno_1changed
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbAppendRecno*/ jobject jcallback)
+{
+ DB *db;
+ DB_JAVAINFO *dbinfo;
+
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return;
+
+ JAVADB_API_BEGIN(db, jthis);
+ dbinfo = (DB_JAVAINFO*)db->cj_internal;
+ dbji_set_append_recno_object(dbinfo, jnienv, db, jcallback);
+ JAVADB_API_END(db);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_bt_1compare_1changed
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbBtreeCompare*/ jobject jbtcompare)
+{
+ DB *db;
+ DB_JAVAINFO *dbinfo;
+
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return;
+
+ JAVADB_API_BEGIN(db, jthis);
+ dbinfo = (DB_JAVAINFO*)db->cj_internal;
+ dbji_set_bt_compare_object(dbinfo, jnienv, db, jbtcompare);
+ JAVADB_API_END(db);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_bt_1prefix_1changed
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbBtreePrefix*/ jobject jbtprefix)
+{
+ DB *db;
+ DB_JAVAINFO *dbinfo;
+
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return;
+
+ JAVADB_API_BEGIN(db, jthis);
+ dbinfo = (DB_JAVAINFO*)db->cj_internal;
+ dbji_set_bt_prefix_object(dbinfo, jnienv, db, jbtprefix);
+ JAVADB_API_END(db);
+}
+
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_Db_cursor
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbTxn*/ jobject txnid, jint flags)
+{
+ int err;
+ DBC *dbc;
+ DB *db = get_DB(jnienv, jthis);
+ DB_TXN *dbtxnid = get_DB_TXN(jnienv, txnid);
+
+ if (!verify_non_null(jnienv, db))
+ return (NULL);
+ err = db->cursor(db, dbtxnid, &dbc, flags);
+ verify_return(jnienv, err, 0);
+ return (get_Dbc(jnienv, dbc));
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_del
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbTxn*/ jobject txnid,
+ /*Dbt*/ jobject key, jint dbflags)
+{
+ int err;
+ DB_TXN *dbtxnid;
+ DB *db;
+ JDBT dbkey;
+
+ err = 0;
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return (0);
+
+ JAVADB_API_BEGIN(db, jthis);
+ dbtxnid = get_DB_TXN(jnienv, txnid);
+ if (jdbt_lock(&dbkey, jnienv, key, inOp) != 0)
+ goto out;
+
+ err = db->del(db, dbtxnid, &dbkey.dbt->dbt, dbflags);
+ if (err != DB_NOTFOUND) {
+ verify_return(jnienv, err, 0);
+ }
+
+ out:
+ jdbt_unlock(&dbkey, jnienv);
+ JAVADB_API_END(db);
+ return (err);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_dup_1compare_1changed
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbDupCompare*/ jobject jdupcompare)
+{
+ DB *db;
+ DB_JAVAINFO *dbinfo;
+
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return;
+
+ JAVADB_API_BEGIN(db, jthis);
+ dbinfo = (DB_JAVAINFO*)db->cj_internal;
+ dbji_set_dup_compare_object(dbinfo, jnienv, db, jdupcompare);
+ JAVADB_API_END(db);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_err
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, jint ecode, jstring msg)
+{
+ DB *db;
+ JSTR msg_string;
+
+ if (jstr_lock(&msg_string, jnienv, msg) != 0)
+ goto out;
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ goto out;
+
+ JAVADB_API_BEGIN(db, jthis);
+ db->err(db, ecode, msg_string.string);
+ JAVADB_API_END(db);
+
+ out:
+ jstr_unlock(&msg_string, jnienv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_errx
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, jstring msg)
+{
+ JSTR msg_string;
+ DB *db = get_DB(jnienv, jthis);
+
+ if (jstr_lock(&msg_string, jnienv, msg) != 0)
+ goto out;
+ if (!verify_non_null(jnienv, db))
+ goto out;
+
+ JAVADB_API_BEGIN(db, jthis);
+ db->errx(db, msg_string.string);
+ JAVADB_API_END(db);
+
+ out:
+ jstr_unlock(&msg_string, jnienv);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_fd
+ (JNIEnv *jnienv, /*Db*/ jobject jthis)
+{
+ int err;
+ int return_value = 0;
+ DB *db = get_DB(jnienv, jthis);
+
+ if (!verify_non_null(jnienv, db))
+ return (0);
+
+ JAVADB_API_BEGIN(db, jthis);
+ err = db->fd(db, &return_value);
+ verify_return(jnienv, err, 0);
+ JAVADB_API_END(db);
+
+ return (return_value);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_feedback_1changed
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbFeedback*/ jobject jfeedback)
+{
+ DB *db;
+ DB_JAVAINFO *dbinfo;
+
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return;
+
+ JAVADB_API_BEGIN(db, jthis);
+ dbinfo = (DB_JAVAINFO*)db->cj_internal;
+ dbji_set_feedback_object(dbinfo, jnienv, db, jfeedback);
+ JAVADB_API_END(db);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_get
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbTxn*/ jobject txnid,
+ /*Dbt*/ jobject key, /*Dbt*/ jobject data, jint flags)
+{
+ int err, op_flags, retry;
+ DB *db;
+ OpKind keyop, dataop;
+ DB_TXN *dbtxnid;
+ JDBT dbkey, dbdata;
+
+ err = 0;
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ goto out3;
+
+ JAVADB_API_BEGIN(db, jthis);
+
+ /* Depending on flags, the key may be input/output. */
+ keyop = inOp;
+ dataop = outOp;
+ op_flags = flags & DB_OPFLAGS_MASK;
+ if (op_flags == DB_SET_RECNO) {
+ keyop = inOutOp;
+ }
+ else if (op_flags == DB_GET_BOTH) {
+ keyop = inOutOp;
+ dataop = inOutOp;
+ }
+
+ dbtxnid = get_DB_TXN(jnienv, txnid);
+
+ if (jdbt_lock(&dbkey, jnienv, key, keyop) != 0)
+ goto out2;
+ if (jdbt_lock(&dbdata, jnienv, data, dataop) != 0)
+ goto out1;
+ for (retry = 0; retry < 3; retry++) {
+ err = db->get(db, dbtxnid, &dbkey.dbt->dbt, &dbdata.dbt->dbt, flags);
+
+ /* If we failed due to lack of memory in our DBT arrays,
+ * retry.
+ */
+ if (err != ENOMEM)
+ break;
+ if (!jdbt_realloc(&dbdata, jnienv))
+ break;
+ }
+ if (err != DB_NOTFOUND) {
+ verify_return(jnienv, err, 0);
+ }
+ out1:
+ jdbt_unlock(&dbdata, jnienv);
+ out2:
+ jdbt_unlock(&dbkey, jnienv);
+ out3:
+ JAVADB_API_END(db);
+ return (err);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_hash_1changed
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbHash*/ jobject jhash)
+{
+ DB *db;
+ DB_JAVAINFO *dbinfo;
+
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return;
+
+ JAVADB_API_BEGIN(db, jthis);
+ dbinfo = (DB_JAVAINFO*)db->cj_internal;
+ dbji_set_h_hash_object(dbinfo, jnienv, db, jhash);
+ JAVADB_API_END(db);
+}
+
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_Db_join
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*Dbc[]*/ jobjectArray curslist,
+ jint flags)
+{
+ int err;
+ DB *db = get_DB(jnienv, jthis);
+ int count = (*jnienv)->GetArrayLength(jnienv, curslist);
+ DBC **newlist = (DBC **)malloc(sizeof(DBC *) * (count+1));
+ DBC *dbc;
+ int i;
+
+ /* Convert the java array of Dbc's to a C array of DBC's. */
+ for (i=0; i<count; i++) {
+ jobject jobj = (*jnienv)->GetObjectArrayElement(jnienv, curslist, i);
+ if (jobj == 0) {
+ /*
+ * An embedded null in the array is treated
+ * as an endpoint.
+ */
+ newlist[i] = 0;
+ break;
+ }
+ else {
+ newlist[i] = get_DBC(jnienv, jobj);
+ }
+ }
+ newlist[count] = 0;
+
+ if (!verify_non_null(jnienv, db))
+ return (NULL);
+ JAVADB_API_BEGIN(db, jthis);
+
+ err = db->join(db, newlist, &dbc, flags);
+ free(newlist);
+ verify_return(jnienv, err, 0);
+
+ JAVADB_API_END(db);
+ return (get_Dbc(jnienv, dbc));
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_key_1range
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbTxn*/ jobject jtxn,
+ /*Dbt*/ jobject jkey, jobject /*DbKeyRange*/ range, jint flags)
+{
+ int err;
+ DB *db = get_DB(jnienv, jthis);
+ DB_TXN *txn = get_DB_TXN(jnienv, jtxn);
+ JDBT dbkey;
+ DB_KEY_RANGE result;
+ jfieldID fid;
+ jclass krclass;
+
+ if (!verify_non_null(jnienv, db))
+ return;
+ JAVADB_API_BEGIN(db, jthis);
+ if (!verify_non_null(jnienv, range))
+ return;
+ if (jdbt_lock(&dbkey, jnienv, jkey, inOp) != 0)
+ goto out;
+ err = db->key_range(db, txn, &dbkey.dbt->dbt, &result, flags);
+ if (verify_return(jnienv, err, 0)) {
+ /* fill in the values of the DbKeyRange structure */
+ krclass = get_class(jnienv, "DbKeyRange");
+ fid = (*jnienv)->GetFieldID(jnienv, krclass, "less", "D");
+ (*jnienv)->SetDoubleField(jnienv, range, fid, result.less);
+ fid = (*jnienv)->GetFieldID(jnienv, krclass, "equal", "D");
+ (*jnienv)->SetDoubleField(jnienv, range, fid, result.equal);
+ fid = (*jnienv)->GetFieldID(jnienv, krclass, "greater", "D");
+ (*jnienv)->SetDoubleField(jnienv, range, fid, result.greater);
+ }
+ out:
+ jdbt_unlock(&dbkey, jnienv);
+ JAVADB_API_END(db);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_put
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbTxn*/ jobject txnid,
+ /*Dbt*/ jobject key, /*Dbt*/ jobject data, jint flags)
+{
+ int err;
+ DB *db;
+ DB_TXN *dbtxnid;
+ JDBT dbkey, dbdata;
+
+ err = 0;
+ db = get_DB(jnienv, jthis);
+ dbtxnid = get_DB_TXN(jnienv, txnid);
+ if (!verify_non_null(jnienv, db))
+ return (0); /* error will be thrown, retval doesn't matter */
+ JAVADB_API_BEGIN(db, jthis);
+
+ if (jdbt_lock(&dbkey, jnienv, key, inOp) != 0)
+ goto out2;
+ if (jdbt_lock(&dbdata, jnienv, data, inOp) != 0)
+ goto out1;
+
+ if (!verify_non_null(jnienv, db))
+ goto out1;
+ err = db->put(db, dbtxnid, &dbkey.dbt->dbt, &dbdata.dbt->dbt, flags);
+ if (err != DB_KEYEXIST) {
+ verify_return(jnienv, err, 0);
+ }
+ out1:
+ jdbt_unlock(&dbdata, jnienv);
+ out2:
+ jdbt_unlock(&dbkey, jnienv);
+ JAVADB_API_END(db);
+ return (err);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_rename
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, jstring file,
+ jstring database, jstring newname, jint flags)
+{
+ int err;
+ DB *db;
+ DB_JAVAINFO *dbinfo;
+ JSTR j_file;
+ JSTR j_database;
+ JSTR j_newname;
+
+ db = get_DB(jnienv, jthis);
+ dbinfo = get_DB_JAVAINFO(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return;
+ JAVADB_API_BEGIN(db, jthis);
+ if (jstr_lock(&j_file, jnienv, file) != 0)
+ goto out3;
+ if (jstr_lock(&j_database, jnienv, database) != 0)
+ goto out2;
+ if (jstr_lock(&j_newname, jnienv, newname) != 0)
+ goto out1;
+
+ err = db->rename(db, j_file.string, j_database.string,
+ j_newname.string, flags);
+
+ verify_return(jnienv, err, EXCEPTION_FILE_NOT_FOUND);
+ dbji_dealloc(dbinfo, jnienv);
+ set_private_dbobj(jnienv, name_DB, jthis, 0);
+
+ out1:
+ jstr_unlock(&j_newname, jnienv);
+ out2:
+ jstr_unlock(&j_database, jnienv);
+ out3:
+ jstr_unlock(&j_file, jnienv);
+ /* don't call JAVADB_API_END - db cannot be used */
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_remove
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, jstring file,
+ jstring database, jint flags)
+{
+ int err;
+ DB *db = get_DB(jnienv, jthis);
+ DB_JAVAINFO *dbinfo = get_DB_JAVAINFO(jnienv, jthis);
+ JSTR j_file;
+ JSTR j_database;
+
+ dbinfo = get_DB_JAVAINFO(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return;
+ JAVADB_API_BEGIN(db, jthis);
+ if (jstr_lock(&j_file, jnienv, file) != 0)
+ goto out2;
+ if (jstr_lock(&j_database, jnienv, database) != 0)
+ goto out1;
+ err = db->remove(db, j_file.string, j_database.string, flags);
+
+ set_private_dbobj(jnienv, name_DB, jthis, 0);
+ verify_return(jnienv, err, EXCEPTION_FILE_NOT_FOUND);
+ dbji_dealloc(dbinfo, jnienv);
+
+ out1:
+ jstr_unlock(&j_database, jnienv);
+ out2:
+ jstr_unlock(&j_file, jnienv);
+ /* don't call JAVADB_API_END - db cannot be used */
+}
+
+JNIEXPORT void JNICALL
+ Java_com_sleepycat_db_Db_set_1pagesize
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, jlong value)
+{
+ int err;
+ DB *db;
+
+ db = get_DB(jnienv, jthis);
+ if (verify_non_null(jnienv, db)) {
+ JAVADB_API_BEGIN(db, jthis);
+ err = db->set_pagesize(db, (u_int32_t)value);
+ verify_return(jnienv, err, 0);
+ JAVADB_API_END(db);
+ }
+}
+
+JNIEXPORT void JNICALL
+ Java_com_sleepycat_db_Db_set_1cachesize
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, jint gbytes, jint bytes,
+ jint ncaches)
+{
+ int err;
+ DB *db;
+
+ db = get_DB(jnienv, jthis);
+ if (verify_non_null(jnienv, db)) {
+ JAVADB_API_BEGIN(db, jthis);
+ err = db->set_cachesize(db, gbytes, bytes, ncaches);
+ verify_return(jnienv, err, 0);
+ JAVADB_API_END(db);
+ }
+}
+
+JNIEXPORT void JNICALL
+ Java_com_sleepycat_db_Db_set_1re_1source
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, jstring re_source)
+{
+ int err;
+ DB *db;
+
+ db = get_DB(jnienv, jthis);
+ if (verify_non_null(jnienv, db)) {
+ JAVADB_API_BEGIN(db, jthis);
+
+ /* XXX does the string from get_c_string ever get freed? */
+ if (re_source != NULL)
+ err = db->set_re_source(db, get_c_string(jnienv, re_source));
+ else
+ err = db->set_re_source(db, 0);
+
+ verify_return(jnienv, err, 0);
+ JAVADB_API_END(db);
+ }
+}
+
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_Db_stat
+ (JNIEnv *jnienv, jobject jthis, jint flags)
+{
+ int err;
+ DB *db = get_DB(jnienv, jthis);
+ jobject retval = NULL;
+ jclass dbclass;
+ void *statp = 0;
+ DB_BTREE_STAT *bstp;
+ DB_HASH_STAT *hstp;
+ DB_QUEUE_STAT *qstp;
+
+ if (!verify_non_null(jnienv, db))
+ return (NULL);
+
+ JAVADB_API_BEGIN(db, jthis);
+
+ err = db->stat(db, &statp, NULL, flags);
+ if (verify_return(jnienv, err, 0)) {
+ DBTYPE dbtype = db->get_type(db);
+ switch (dbtype) {
+
+ /* Btree and recno share the same stat structure */
+ case DB_BTREE:
+ case DB_RECNO:
+ bstp = (DB_BTREE_STAT *)statp;
+ retval = create_default_object(jnienv,
+ name_DB_BTREE_STAT);
+ dbclass = get_class(jnienv, name_DB_BTREE_STAT);
+
+ /* Set the individual fields */
+ set_int_field(jnienv, dbclass, retval,
+ "bt_magic", bstp->bt_magic);
+ set_int_field(jnienv, dbclass, retval,
+ "bt_version", bstp->bt_version);
+ set_int_field(jnienv, dbclass, retval,
+ "bt_metaflags", bstp->bt_metaflags);
+ set_int_field(jnienv, dbclass, retval,
+ "bt_nkeys", bstp->bt_nkeys);
+ set_int_field(jnienv, dbclass, retval,
+ "bt_ndata", bstp->bt_ndata);
+ set_int_field(jnienv, dbclass, retval,
+ "bt_pagesize", bstp->bt_pagesize);
+ set_int_field(jnienv, dbclass, retval,
+ "bt_maxkey", bstp->bt_maxkey);
+ set_int_field(jnienv, dbclass, retval,
+ "bt_minkey", bstp->bt_minkey);
+ set_int_field(jnienv, dbclass, retval,
+ "bt_re_len", bstp->bt_re_len);
+ set_int_field(jnienv, dbclass, retval,
+ "bt_re_pad", bstp->bt_re_pad);
+ set_int_field(jnienv, dbclass, retval,
+ "bt_levels", bstp->bt_levels);
+ set_int_field(jnienv, dbclass, retval,
+ "bt_int_pg", bstp->bt_int_pg);
+ set_int_field(jnienv, dbclass, retval,
+ "bt_leaf_pg", bstp->bt_leaf_pg);
+ set_int_field(jnienv, dbclass, retval,
+ "bt_dup_pg", bstp->bt_dup_pg);
+ set_int_field(jnienv, dbclass, retval,
+ "bt_over_pg", bstp->bt_over_pg);
+ set_int_field(jnienv, dbclass, retval,
+ "bt_free", bstp->bt_free);
+ set_int_field(jnienv, dbclass, retval,
+ "bt_int_pgfree", bstp->bt_int_pgfree);
+ set_int_field(jnienv, dbclass, retval,
+ "bt_leaf_pgfree", bstp->bt_leaf_pgfree);
+ set_int_field(jnienv, dbclass, retval,
+ "bt_dup_pgfree", bstp->bt_dup_pgfree);
+ set_int_field(jnienv, dbclass, retval,
+ "bt_over_pgfree", bstp->bt_over_pgfree);
+
+ break;
+
+ /* Hash stat structure */
+ case DB_HASH:
+ hstp = (DB_HASH_STAT *)statp;
+ retval = create_default_object(jnienv,
+ name_DB_HASH_STAT);
+ dbclass = get_class(jnienv, name_DB_HASH_STAT);
+
+ /* Set the individual fields */
+ set_int_field(jnienv, dbclass, retval,
+ "hash_magic", hstp->hash_magic);
+ set_int_field(jnienv, dbclass, retval,
+ "hash_version", hstp->hash_version);
+ set_int_field(jnienv, dbclass, retval,
+ "hash_metaflags", hstp->hash_metaflags);
+ set_int_field(jnienv, dbclass, retval,
+ "hash_nkeys", hstp->hash_nkeys);
+ set_int_field(jnienv, dbclass, retval,
+ "hash_ndata", hstp->hash_ndata);
+ set_int_field(jnienv, dbclass, retval,
+ "hash_pagesize", hstp->hash_pagesize);
+ set_int_field(jnienv, dbclass, retval,
+ "hash_nelem", hstp->hash_nelem);
+ set_int_field(jnienv, dbclass, retval,
+ "hash_ffactor", hstp->hash_ffactor);
+ set_int_field(jnienv, dbclass, retval,
+ "hash_buckets", hstp->hash_buckets);
+ set_int_field(jnienv, dbclass, retval,
+ "hash_free", hstp->hash_free);
+ set_int_field(jnienv, dbclass, retval,
+ "hash_bfree", hstp->hash_bfree);
+ set_int_field(jnienv, dbclass, retval,
+ "hash_bigpages", hstp->hash_bigpages);
+ set_int_field(jnienv, dbclass, retval,
+ "hash_big_bfree", hstp->hash_big_bfree);
+ set_int_field(jnienv, dbclass, retval,
+ "hash_overflows", hstp->hash_overflows);
+ set_int_field(jnienv, dbclass, retval,
+ "hash_ovfl_free", hstp->hash_ovfl_free);
+ set_int_field(jnienv, dbclass, retval,
+ "hash_dup", hstp->hash_dup);
+ set_int_field(jnienv, dbclass, retval,
+ "hash_dup_free", hstp->hash_dup_free);
+
+ break;
+
+ case DB_QUEUE:
+ qstp = (DB_QUEUE_STAT *)statp;
+ retval = create_default_object(jnienv,
+ name_DB_QUEUE_STAT);
+ dbclass = get_class(jnienv, name_DB_QUEUE_STAT);
+
+ /* Set the individual fields */
+ set_int_field(jnienv, dbclass, retval,
+ "qs_magic", qstp->qs_magic);
+ set_int_field(jnienv, dbclass, retval,
+ "qs_version", qstp->qs_version);
+ set_int_field(jnienv, dbclass, retval,
+ "qs_metaflags", qstp->qs_metaflags);
+ set_int_field(jnienv, dbclass, retval,
+ "qs_nkeys", qstp->qs_nkeys);
+ set_int_field(jnienv, dbclass, retval,
+ "qs_ndata", qstp->qs_ndata);
+ set_int_field(jnienv, dbclass, retval,
+ "qs_pagesize", qstp->qs_pagesize);
+ set_int_field(jnienv, dbclass, retval,
+ "qs_pages", qstp->qs_pages);
+ set_int_field(jnienv, dbclass, retval,
+ "qs_re_len", qstp->qs_re_len);
+ set_int_field(jnienv, dbclass, retval,
+ "qs_re_pad", qstp->qs_re_pad);
+ set_int_field(jnienv, dbclass, retval,
+ "qs_pgfree", qstp->qs_pgfree);
+ set_int_field(jnienv, dbclass, retval,
+ "qs_first_recno", qstp->qs_first_recno);
+ set_int_field(jnienv, dbclass, retval,
+ "qs_cur_recno", qstp->qs_cur_recno);
+ break;
+
+ /* That's all the database types we're aware of! */
+ default:
+ report_exception(jnienv,
+ "Db.stat not implemented for types"
+ "other than HASH, BTREE and RECNO",
+ EINVAL, 0);
+ break;
+ }
+ free(statp);
+ }
+ JAVADB_API_END(db);
+ return (retval);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_sync
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, jint flags)
+{
+ int err;
+ DB *db = get_DB(jnienv, jthis);
+
+ if (!verify_non_null(jnienv, db))
+ return (0);
+ JAVADB_API_BEGIN(db, jthis);
+ err = db->sync(db, flags);
+ if (err != DB_INCOMPLETE)
+ verify_return(jnienv, err, 0);
+ JAVADB_API_END(db);
+ return (err);
+}
+
+JNIEXPORT jboolean JNICALL Java_com_sleepycat_db_Db_get_1byteswapped
+ (JNIEnv *jnienv, /*Db*/ jobject jthis)
+{
+ DB *db;
+ jboolean retval;
+
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return (0);
+
+ JAVADB_API_BEGIN(db, jthis);
+ retval = db->get_byteswapped(db) ? 1 : 0;
+ JAVADB_API_END(db);
+ return (retval);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_get_1type
+ (JNIEnv *jnienv, /*Db*/ jobject jthis)
+{
+ DB *db;
+
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return (0);
+
+ return ((jint)db->type);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1open
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, jstring file, jstring database,
+ jint type, jint flags, jint mode)
+{
+ int err;
+ DB *db;
+ JSTR dbfile;
+ JSTR dbdatabase;
+
+ /* Java is assumed to be threaded. */
+ flags |= DB_THREAD;
+
+ db = get_DB(jnienv, jthis);
+ if (jstr_lock(&dbfile, jnienv, file) != 0)
+ goto out2;
+ if (jstr_lock(&dbdatabase, jnienv, database) != 0)
+ goto out1;
+ if (verify_non_null(jnienv, db)) {
+ JAVADB_API_BEGIN(db, jthis);
+ err = db->open(db, dbfile.string, dbdatabase.string,
+ (DBTYPE)type, flags, mode);
+ verify_return(jnienv, err, EXCEPTION_FILE_NOT_FOUND);
+ JAVADB_API_END(db);
+ }
+ out1:
+ jstr_unlock(&dbdatabase, jnienv);
+ out2:
+ jstr_unlock(&dbfile, jnienv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_upgrade
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, jstring name,
+ jint flags)
+{
+ int err;
+ DB *db = get_DB(jnienv, jthis);
+ JSTR j_name;
+
+ if (verify_non_null(jnienv, db)) {
+ JAVADB_API_BEGIN(db, jthis);
+ if (jstr_lock(&j_name, jnienv, name) != 0)
+ goto out;
+ err = db->upgrade(db, j_name.string, flags);
+ verify_return(jnienv, err, 0);
+ JAVADB_API_END(db);
+ }
+ out:
+ jstr_unlock(&j_name, jnienv);
+}
+
+static int java_verify_callback(void *handle, const void *str_arg)
+{
+ char *str;
+ struct verify_callback_struct *vc;
+ int len;
+ jthrowable except;
+ JNIEnv *jnienv;
+
+ str = (char *)str_arg;
+ vc = (struct verify_callback_struct *)handle;
+ jnienv = vc->env;
+ len = strlen(str)+1;
+ if (len > vc->nbytes) {
+ vc->nbytes = len;
+ vc->bytes = (*jnienv)->NewByteArray(jnienv, len);
+ }
+ (*jnienv)->SetByteArrayRegion(jnienv, vc->bytes, 0, len, (jbyte*)str);
+ (*jnienv)->CallVoidMethod(jnienv, vc->streamobj,
+ vc->writemid, vc->bytes, 0, len-1);
+
+ if ((except = (*jnienv)->ExceptionOccurred(jnienv)) != NULL)
+ return (EIO);
+
+ return (0);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_verify
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, jstring name,
+ jstring subdb, jobject stream, jint flags)
+{
+ int err;
+ DB *db;
+ JSTR j_name;
+ JSTR j_subdb;
+ struct verify_callback_struct vcs;
+ jclass streamclass;
+
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return;
+ JAVADB_API_BEGIN(db, jthis);
+
+ if (jstr_lock(&j_name, jnienv, name) != 0)
+ goto out2;
+ if (jstr_lock(&j_subdb, jnienv, subdb) != 0)
+ goto out1;
+
+ /* set up everything we need for the callbacks */
+ vcs.env = jnienv;
+ vcs.streamobj = stream;
+ vcs.nbytes = 100;
+ vcs.bytes = (*jnienv)->NewByteArray(jnienv, vcs.nbytes);
+
+ /* get the method ID for OutputStream.write(byte[], int, int); */
+ streamclass = (*jnienv)->FindClass(jnienv, "java/io/OutputStream");
+ vcs.writemid = (*jnienv)->GetMethodID(jnienv, streamclass,
+ "write", "([BII)V");
+
+ /* invoke verify - this will invoke the callback repeatedly. */
+ err = __db_verify_internal(db, j_name.string, j_subdb.string,
+ &vcs, java_verify_callback, flags);
+ verify_return(jnienv, err, 0);
+
+out1:
+ jstr_unlock(&j_subdb, jnienv);
+out2:
+ jstr_unlock(&j_name, jnienv);
+ JAVADB_API_END(db);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1finalize
+ (JNIEnv *jnienv, jobject jthis,
+ jobject /*DbErrcall*/ errcall, jstring errpfx)
+{
+ DB_JAVAINFO *dbinfo;
+ DB *db;
+
+ dbinfo = get_DB_JAVAINFO(jnienv, jthis);
+ db = get_DB(jnienv, jthis);
+ DB_ASSERT(dbinfo != NULL);
+
+ /* Note: We can never be sure if the underlying DB is attached to
+ * a DB_ENV that was already closed. Sure, that's a user error,
+ * but it shouldn't crash the VM. Therefore, we cannot just
+ * automatically close if the handle indicates we are not yet
+ * closed. The best we can do is detect this and report it.
+ */
+ if (db != NULL) {
+ /* If this error occurs, this object was never closed. */
+ report_errcall(jnienv, errcall, errpfx,
+ "Db.finalize: open Db object destroyed");
+ }
+
+ /* Shouldn't see this object again, but just in case */
+ set_private_dbobj(jnienv, name_DB, jthis, 0);
+ set_private_info(jnienv, name_DB, jthis, 0);
+
+ dbji_destroy(dbinfo, jnienv);
+}
diff --git a/bdb/libdb_java/java_DbEnv.c b/bdb/libdb_java/java_DbEnv.c
new file mode 100644
index 00000000000..ff9207dd2c8
--- /dev/null
+++ b/bdb/libdb_java/java_DbEnv.c
@@ -0,0 +1,1300 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: java_DbEnv.c,v 11.37 2001/01/11 18:19:52 bostic Exp $";
+#endif /* not lint */
+
+#include <jni.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "db.h"
+#include "db_int.h"
+#include "java_util.h"
+#include "com_sleepycat_db_DbEnv.h"
+
+/* We keep these lined up, and alphabetical by field name,
+ * for comparison with C++'s list.
+ */
+JAVADB_WO_ACCESS_STRING(DbEnv, data_1dir, DB_ENV, data_dir)
+JAVADB_WO_ACCESS_METHOD(DbEnv, jint, lg_1bsize, DB_ENV, lg_bsize)
+JAVADB_WO_ACCESS_STRING(DbEnv, lg_1dir, DB_ENV, lg_dir)
+JAVADB_WO_ACCESS_METHOD(DbEnv, jint, lg_1max, DB_ENV, lg_max)
+JAVADB_WO_ACCESS_METHOD(DbEnv, jint, lk_1detect, DB_ENV, lk_detect)
+JAVADB_WO_ACCESS_METHOD(DbEnv, jint, lk_1max, DB_ENV, lk_max)
+JAVADB_WO_ACCESS_METHOD(DbEnv, jint, lk_1max_1locks, DB_ENV, lk_max_locks)
+JAVADB_WO_ACCESS_METHOD(DbEnv, jint, lk_1max_1lockers, DB_ENV, lk_max_lockers)
+JAVADB_WO_ACCESS_METHOD(DbEnv, jint, lk_1max_1objects, DB_ENV, lk_max_objects)
+/* mp_mmapsize is declared below, it needs an extra cast */
+JAVADB_WO_ACCESS_METHOD(DbEnv, jint, mutexlocks, DB_ENV, mutexlocks)
+JAVADB_WO_ACCESS_STRING(DbEnv, tmp_1dir, DB_ENV, tmp_dir)
+JAVADB_WO_ACCESS_METHOD(DbEnv, jint, tx_1max, DB_ENV, tx_max)
+
+static void DbEnv_errcall_callback(const char *prefix, char *message)
+{
+ JNIEnv *jnienv;
+ DB_ENV_JAVAINFO *envinfo = (DB_ENV_JAVAINFO *)prefix;
+ jstring pre;
+
+ /* Note: these error cases are "impossible", and would
+ * normally warrant an exception. However, without
+ * a jnienv, we cannot throw an exception...
+ * We don't want to trap or exit, since the point of
+ * this facility is for the user to completely control
+ * error situations.
+ */
+ if (envinfo == NULL) {
+ /* Something is *really* wrong here, the
+ * prefix is set in every environment created.
+ */
+ fprintf(stderr, "Error callback failed!\n");
+ fprintf(stderr, "error: %s\n", message);
+ return;
+ }
+
+ /* Should always succeed... */
+ jnienv = dbjie_get_jnienv(envinfo);
+
+ if (jnienv == NULL) {
+
+ /* But just in case... */
+ fprintf(stderr, "Cannot attach to current thread!\n");
+ fprintf(stderr, "error: %s\n", message);
+ return;
+ }
+
+ pre = dbjie_get_errpfx(envinfo, jnienv);
+ report_errcall(jnienv, dbjie_get_errcall(envinfo), pre, message);
+}
+
+static void DbEnv_initialize(JNIEnv *jnienv, DB_ENV *dbenv,
+ /*DbEnv*/ jobject jenv,
+ /*DbErrcall*/ jobject jerrcall,
+ int is_dbopen)
+{
+ DB_ENV_JAVAINFO *envinfo;
+
+ envinfo = get_DB_ENV_JAVAINFO(jnienv, jenv);
+ DB_ASSERT(envinfo == NULL);
+ envinfo = dbjie_construct(jnienv, jerrcall, is_dbopen);
+ set_private_info(jnienv, name_DB_ENV, jenv, envinfo);
+ dbenv->set_errpfx(dbenv, (const char*)envinfo);
+ dbenv->set_errcall(dbenv, DbEnv_errcall_callback);
+ dbenv->cj_internal = envinfo;
+ set_private_dbobj(jnienv, name_DB_ENV, jenv, dbenv);
+}
+
+/* This is called when this DbEnv was made on behalf of a Db
+ * created directly (without a parent DbEnv), and the Db is
+ * being closed. We'll zero out the pointer to the DB_ENV,
+ * since it is no longer valid, to prevent mistakes.
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1notify_1db_1close
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis)
+{
+ DB_ENV_JAVAINFO *dbenvinfo;
+
+ set_private_dbobj(jnienv, name_DB_ENV, jthis, 0);
+ dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
+ if (dbenvinfo != NULL)
+ dbjie_dealloc(dbenvinfo, jnienv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_feedback_1changed
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbFeedback*/ jobject jfeedback)
+{
+ DB_ENV *dbenv;
+ DB_ENV_JAVAINFO *dbenvinfo;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv) ||
+ !verify_non_null(jnienv, dbenvinfo))
+ return;
+
+ JAVADB_ENV_API_BEGIN(dbenv, jthis);
+ dbjie_set_feedback_object(dbenvinfo, jnienv, dbenv, jfeedback);
+ JAVADB_ENV_API_END(dbenv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1init
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jobject /*DbErrcall*/ jerrcall,
+ jint flags)
+{
+ int err;
+ DB_ENV *dbenv;
+
+ err = db_env_create(&dbenv, flags);
+ if (verify_return(jnienv, err, 0))
+ DbEnv_initialize(jnienv, dbenv, jthis, jerrcall, 0);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1init_1using_1db
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jobject /*DbErrcall*/ jerrcall,
+ /*Db*/ jobject jdb)
+{
+ DB_ENV *dbenv;
+ DB *db;
+
+ db = get_DB(jnienv, jdb);
+ dbenv = db->dbenv;
+ DbEnv_initialize(jnienv, dbenv, jthis, jerrcall, 1);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_open
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jstring db_home,
+ jint flags, jint mode)
+{
+ int err;
+ DB_ENV *dbenv;
+ JSTR j_home;
+ DB_ENV_JAVAINFO *dbenvinfo;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv) ||
+ !verify_non_null(jnienv, dbenvinfo))
+ return;
+ JAVADB_ENV_API_BEGIN(dbenv, jthis);
+ if (jstr_lock(&j_home, jnienv, db_home) != 0)
+ goto out;
+
+ /* Java is assumed to be threaded. */
+ flags |= DB_THREAD;
+
+ err = dbenv->open(dbenv, j_home.string, flags, mode);
+ verify_return(jnienv, err, EXCEPTION_FILE_NOT_FOUND);
+ out:
+ jstr_unlock(&j_home, jnienv);
+ JAVADB_ENV_API_END(dbenv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_remove
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jstring db_home, jint flags)
+{
+ DB_ENV *dbenv;
+ DB_ENV_JAVAINFO *dbenvinfo;
+ JSTR j_home;
+ int err = 0;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return;
+ JAVADB_ENV_API_BEGIN(dbenv, jthis);
+ if (jstr_lock(&j_home, jnienv, db_home) != 0)
+ goto out;
+
+ err = dbenv->remove(dbenv, j_home.string, flags);
+ set_private_dbobj(jnienv, name_DB_ENV, jthis, 0);
+
+ if (dbenvinfo != NULL)
+ dbjie_dealloc(dbenvinfo, jnienv);
+
+ verify_return(jnienv, err, 0);
+ out:
+ jstr_unlock(&j_home, jnienv);
+ /* don't call JAVADB_ENV_API_END - env cannot be used */
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1close
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint flags)
+{
+ int err;
+ DB_ENV *dbenv;
+ DB_ENV_JAVAINFO *dbenvinfo;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return;
+
+ JAVADB_ENV_API_BEGIN(dbenv, jthis);
+
+ err = dbenv->close(dbenv, flags);
+ set_private_dbobj(jnienv, name_DB_ENV, jthis, 0);
+
+ if (dbenvinfo != NULL)
+ dbjie_dealloc(dbenvinfo, jnienv);
+
+ /* Throw an exception if the close failed. */
+ verify_return(jnienv, err, 0);
+
+ /* don't call JAVADB_ENV_API_END - env cannot be used */
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_err
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint ecode, jstring msg)
+{
+ JSTR msg_string;
+ DB_ENV *dbenv;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return;
+
+ JAVADB_ENV_API_BEGIN(dbenv, jthis);
+
+ if (jstr_lock(&msg_string, jnienv, msg) != 0)
+ goto out;
+
+ dbenv->err(dbenv, ecode, msg_string.string);
+ out:
+ jstr_unlock(&msg_string, jnienv);
+ JAVADB_ENV_API_END(dbenv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_errx
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jstring msg)
+{
+ JSTR msg_string;
+ DB_ENV *dbenv;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return;
+
+ JAVADB_ENV_API_BEGIN(dbenv, jthis);
+
+ if (jstr_lock(&msg_string, jnienv, msg) != 0)
+ goto out;
+
+ dbenv->errx(dbenv, msg_string.string);
+ out:
+ jstr_unlock(&msg_string, jnienv);
+ JAVADB_ENV_API_END(dbenv);
+}
+
+/*static*/
+JNIEXPORT jstring JNICALL Java_com_sleepycat_db_DbEnv_strerror
+ (JNIEnv *jnienv, jclass jthis_class, jint ecode)
+{
+ const char *message;
+
+ COMPQUIET(jthis_class, NULL);
+ message = db_strerror(ecode);
+ return (get_java_string(jnienv, message));
+}
+
+JNIEXPORT void JNICALL
+ Java_com_sleepycat_db_DbEnv_set_1cachesize
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint gbytes, jint bytes,
+ jint ncaches)
+{
+ DB_ENV *dbenv;
+ int err;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (verify_non_null(jnienv, dbenv)) {
+ JAVADB_ENV_API_BEGIN(dbenv, jthis);
+ err = dbenv->set_cachesize(dbenv, gbytes, bytes, ncaches);
+ verify_return(jnienv, err, 0);
+ JAVADB_ENV_API_END(dbenv);
+ }
+}
+
+JNIEXPORT void JNICALL
+ Java_com_sleepycat_db_DbEnv_set_1flags
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint flags, jint onoff)
+{
+ DB_ENV *dbenv;
+ int err;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (verify_non_null(jnienv, dbenv)) {
+ JAVADB_ENV_API_BEGIN(dbenv, jthis);
+ err = dbenv->set_flags(dbenv, flags, onoff);
+ verify_return(jnienv, err, 0);
+ JAVADB_ENV_API_END(dbenv);
+ }
+}
+
+JNIEXPORT void JNICALL
+ Java_com_sleepycat_db_DbEnv_set_1mp_1mmapsize
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jlong value)
+{
+ DB_ENV *dbenv;
+ int err;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (verify_non_null(jnienv, dbenv)) {
+ JAVADB_ENV_API_BEGIN(dbenv, jthis);
+ err = dbenv->set_mp_mmapsize(dbenv, (size_t)value);
+ verify_return(jnienv, err, 0);
+ JAVADB_ENV_API_END(dbenv);
+ }
+}
+
+/*static*/
+JNIEXPORT void JNICALL
+ Java_com_sleepycat_db_DbEnv_set_1pageyield
+ (JNIEnv *jnienv, jclass jthis_class, jint value)
+{
+ int err;
+
+ COMPQUIET(jthis_class, NULL);
+ err = db_env_set_pageyield(value);
+ verify_return(jnienv, err, 0);
+}
+
+/*static*/
+JNIEXPORT void JNICALL
+ Java_com_sleepycat_db_DbEnv_set_1panicstate
+ (JNIEnv *jnienv, jclass jthis_class, jint value)
+{
+ int err;
+
+ COMPQUIET(jthis_class, NULL);
+ err = db_env_set_panicstate(value);
+ verify_return(jnienv, err, 0);
+}
+
+/*static*/
+JNIEXPORT void JNICALL
+ Java_com_sleepycat_db_DbEnv_set_1region_1init
+ (JNIEnv *jnienv, jclass jthis_class, jint value)
+{
+ int err;
+
+ COMPQUIET(jthis_class, NULL);
+ err = db_env_set_region_init(value);
+ verify_return(jnienv, err, 0);
+}
+
+/*static*/
+JNIEXPORT void JNICALL
+ Java_com_sleepycat_db_DbEnv_set_1tas_1spins
+ (JNIEnv *jnienv, jclass jthis_class, jint value)
+{
+ int err;
+
+ COMPQUIET(jthis_class, NULL);
+ err = db_env_set_tas_spins(value);
+ verify_return(jnienv, err, 0);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_recovery_1init_1changed
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbRecoveryInit*/ jobject jrecoveryinit)
+{
+ DB_ENV *dbenv;
+ DB_ENV_JAVAINFO *dbenvinfo;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv) ||
+ !verify_non_null(jnienv, dbenv))
+ return;
+
+ JAVADB_ENV_API_BEGIN(dbenv, jthis);
+ dbjie_set_recovery_init_object(dbenvinfo, jnienv, dbenv, jrecoveryinit);
+ JAVADB_ENV_API_END(dbenv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lk_1conflicts
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jobjectArray array)
+{
+ DB_ENV *dbenv;
+ DB_ENV_JAVAINFO *dbenvinfo;
+ int err;
+ jsize i, len;
+ unsigned char *newarr;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv) ||
+ !verify_non_null(jnienv, dbenvinfo))
+ return;
+
+ JAVADB_ENV_API_BEGIN(dbenv, jthis);
+
+ len = (*jnienv)->GetArrayLength(jnienv, array);
+
+ newarr = (unsigned char *)malloc(sizeof(unsigned char) * len * len);
+
+ for (i=0; i<len; i++) {
+ jobject subArray =
+ (*jnienv)->GetObjectArrayElement(jnienv, array, i);
+ (*jnienv)->GetByteArrayRegion(jnienv, (jbyteArray)subArray,
+ 0, len,
+ (jbyte *)&newarr[i*len]);
+ }
+ dbjie_set_conflict(dbenvinfo, newarr);
+ err = dbenv->set_lk_conflicts(dbenv, newarr, len);
+ verify_return(jnienv, err, 0);
+ JAVADB_ENV_API_END(dbenv);
+}
+
+JNIEXPORT void JNICALL
+ Java_com_sleepycat_db_DbEnv_set_1server
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jstring jhost, jlong tsec,
+ jlong ssec, jint flags)
+{
+ int err;
+ DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
+ const char *host = (*jnienv)->GetStringUTFChars(jnienv, jhost, NULL);
+
+ if (verify_non_null(jnienv, dbenv)) {
+ JAVADB_ENV_API_BEGIN(dbenv, jthis);
+ err = dbenv->set_server(dbenv, (char *)host,
+ (long)tsec, (long)ssec, flags);
+
+ /* Throw an exception if the call failed. */
+ verify_return(jnienv, err, 0);
+ JAVADB_ENV_API_END(dbenv);
+ }
+}
+
+JNIEXPORT void JNICALL
+ Java_com_sleepycat_db_DbEnv_set_1shm_1key
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jlong shm_key)
+{
+ int err;
+ DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
+
+ if (verify_non_null(jnienv, dbenv)) {
+ JAVADB_ENV_API_BEGIN(dbenv, jthis);
+ err = dbenv->set_shm_key(dbenv, (long)shm_key);
+
+ /* Throw an exception if the call failed. */
+ verify_return(jnienv, err, 0);
+ JAVADB_ENV_API_END(dbenv);
+ }
+}
+
+JNIEXPORT void JNICALL
+ Java_com_sleepycat_db_DbEnv__1set_1tx_1timestamp
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jlong seconds)
+{
+ int err;
+ DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
+ time_t time = seconds;
+
+ if (verify_non_null(jnienv, dbenv)) {
+ JAVADB_ENV_API_BEGIN(dbenv, jthis);
+ err = dbenv->set_tx_timestamp(dbenv, &time);
+
+ /* Throw an exception if the call failed. */
+ verify_return(jnienv, err, 0);
+ JAVADB_ENV_API_END(dbenv);
+ }
+}
+
+JNIEXPORT void JNICALL
+ Java_com_sleepycat_db_DbEnv_set_1verbose
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint which, jint onoff)
+{
+ int err;
+ DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
+
+ if (verify_non_null(jnienv, dbenv)) {
+ JAVADB_ENV_API_BEGIN(dbenv, jthis);
+ err = dbenv->set_verbose(dbenv, which, onoff);
+
+ /* Throw an exception if the call failed. */
+ verify_return(jnienv, err, 0);
+ JAVADB_ENV_API_END(dbenv);
+ }
+}
+
+/*static*/
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_get_1version_1major
+ (JNIEnv * jnienv, jclass this_class)
+{
+ COMPQUIET(jnienv, NULL);
+ COMPQUIET(this_class, NULL);
+
+ return (DB_VERSION_MAJOR);
+}
+
+/*static*/
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_get_1version_1minor
+ (JNIEnv * jnienv, jclass this_class)
+{
+ COMPQUIET(jnienv, NULL);
+ COMPQUIET(this_class, NULL);
+
+ return (DB_VERSION_MINOR);
+}
+
+/*static*/
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_get_1version_1patch
+ (JNIEnv * jnienv, jclass this_class)
+{
+ COMPQUIET(jnienv, NULL);
+ COMPQUIET(this_class, NULL);
+
+ return (DB_VERSION_PATCH);
+}
+
+/*static*/
+JNIEXPORT jstring JNICALL Java_com_sleepycat_db_DbEnv_get_1version_1string
+ (JNIEnv *jnienv, jclass this_class)
+{
+ COMPQUIET(this_class, NULL);
+
+ return ((*jnienv)->NewStringUTF(jnienv, DB_VERSION_STRING));
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_lock_1id
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis)
+{
+ int err;
+ u_int32_t id;
+ DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
+
+ if (!verify_non_null(jnienv, dbenv))
+ return (-1);
+ JAVADB_ENV_API_BEGIN(dbenv, jthis);
+ err = lock_id(dbenv, &id);
+ verify_return(jnienv, err, 0);
+ JAVADB_ENV_API_END(dbenv);
+ return (id);
+}
+
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_lock_1stat
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis)
+{
+ int err;
+ DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
+ DB_LOCK_STAT *statp = NULL;
+ jobject retval = NULL;
+ jclass dbclass;
+
+ if (!verify_non_null(jnienv, dbenv))
+ return (NULL);
+ JAVADB_ENV_API_BEGIN(dbenv, jthis);
+
+ /* We cannot use the default allocator (on Win* platforms anyway)
+ * because it often causes problems when we free storage
+ * in a DLL that was allocated in another DLL. Using
+ * our own allocator (ours just calls malloc!) ensures
+ * that there is no mismatch.
+ */
+ err = lock_stat(dbenv, &statp, NULL);
+ if (verify_return(jnienv, err, 0)) {
+ retval = create_default_object(jnienv, name_DB_LOCK_STAT);
+ dbclass = get_class(jnienv, name_DB_LOCK_STAT);
+
+ /* Set the individual fields */
+ set_int_field(jnienv, dbclass, retval,
+ "st_maxlocks", statp->st_maxlocks);
+ set_int_field(jnienv, dbclass, retval,
+ "st_nmodes", statp->st_nmodes);
+ set_int_field(jnienv, dbclass, retval,
+ "st_nlockers", statp->st_nlockers);
+ set_int_field(jnienv, dbclass, retval,
+ "st_nconflicts", statp->st_nconflicts);
+ set_int_field(jnienv, dbclass, retval,
+ "st_nrequests", statp->st_nrequests);
+ set_int_field(jnienv, dbclass, retval,
+ "st_nreleases", statp->st_nreleases);
+ set_int_field(jnienv, dbclass, retval,
+ "st_ndeadlocks", statp->st_ndeadlocks);
+ set_int_field(jnienv, dbclass, retval,
+ "st_region_wait", statp->st_region_wait);
+ set_int_field(jnienv, dbclass, retval,
+ "st_region_nowait", statp->st_region_nowait);
+ set_int_field(jnienv, dbclass, retval,
+ "st_regsize", statp->st_regsize);
+
+ free(statp);
+ }
+ JAVADB_ENV_API_END(dbenv);
+ return (retval);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_lock_1detect
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint atype, jint flags)
+{
+ int err;
+ DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
+ int aborted;
+
+ if (!verify_non_null(jnienv, dbenv))
+ return (0);
+ JAVADB_ENV_API_BEGIN(dbenv, jthis);
+ err = lock_detect(dbenv, atype, flags, &aborted);
+ verify_return(jnienv, err, 0);
+ JAVADB_ENV_API_END(dbenv);
+ return (aborted);
+}
+
+JNIEXPORT /*DbLock*/ jobject JNICALL Java_com_sleepycat_db_DbEnv_lock_1get
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*u_int32_t*/ jint locker,
+ jint flags, /*const Dbt*/ jobject obj, /*db_lockmode_t*/ jint lock_mode)
+{
+ int err;
+ DB_ENV *dbenv;
+ DB_LOCK *dblock;
+ JDBT dbobj;
+ /*DbLock*/ jobject retval;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return (NULL);
+
+ JAVADB_ENV_API_BEGIN(dbenv, jthis);
+ dblock = (DB_LOCK*)malloc(sizeof(DB_LOCK));
+ memset(dblock, 0, sizeof(DB_LOCK));
+ err = 0;
+ retval = NULL;
+ if (jdbt_lock(&dbobj, jnienv, obj, inOp) != 0)
+ goto out;
+
+ err = lock_get(dbenv, locker, flags, &dbobj.dbt->dbt,
+ (db_lockmode_t)lock_mode, dblock);
+ if (verify_return(jnienv, err, 0)) {
+ retval = create_default_object(jnienv, name_DB_LOCK);
+ set_private_dbobj(jnienv, name_DB_LOCK, retval, dblock);
+ }
+ out:
+ jdbt_unlock(&dbobj, jnienv);
+ JAVADB_ENV_API_END(dbenv);
+ return (retval);
+}
+
+JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_DbEnv_log_1archive
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint flags)
+{
+ int err, len, i;
+ char** ret;
+ jclass stringClass;
+ jobjectArray strarray;
+ DB_ENV *dbenv;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ strarray = NULL;
+ if (!verify_non_null(jnienv, dbenv))
+ return (0);
+ JAVADB_ENV_API_BEGIN(dbenv, jthis);
+ err = log_archive(dbenv, &ret, flags, 0);
+ if (!verify_return(jnienv, err, 0))
+ return (0);
+
+ if (ret != NULL) {
+ len = 0;
+ while (ret[len] != NULL)
+ len++;
+ stringClass = (*jnienv)->FindClass(jnienv, "java/lang/String");
+ strarray = (*jnienv)->NewObjectArray(jnienv, len,
+ stringClass, 0);
+ for (i=0; i<len; i++) {
+ jstring str = (*jnienv)->NewStringUTF(jnienv, ret[i]);
+ (*jnienv)->SetObjectArrayElement(jnienv, strarray,
+ i, str);
+ }
+ }
+ JAVADB_ENV_API_END(dbenv);
+ return (strarray);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_log_1compare
+ (JNIEnv *jnienv, jclass jthis_class,
+ /*DbLsn*/ jobject lsn0, /*DbLsn*/ jobject lsn1)
+{
+ DB_LSN *dblsn0;
+ DB_LSN *dblsn1;
+
+ COMPQUIET(jthis_class, NULL);
+ dblsn0 = get_DB_LSN(jnienv, lsn0);
+ dblsn1 = get_DB_LSN(jnienv, lsn1);
+
+ return (log_compare(dblsn0, dblsn1));
+}
+
+JNIEXPORT jstring JNICALL Java_com_sleepycat_db_DbEnv_log_1file
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbLsn*/ jobject lsn)
+{
+ int err;
+ DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
+ DB_LSN *dblsn = get_DB_LSN(jnienv, lsn);
+ char filename[FILENAME_MAX+1] = "";
+
+ if (!verify_non_null(jnienv, dbenv))
+ return (NULL);
+
+ JAVADB_ENV_API_BEGIN(dbenv, jthis);
+
+ err = log_file(dbenv, dblsn, filename, FILENAME_MAX);
+ verify_return(jnienv, err, 0);
+ filename[FILENAME_MAX] = '\0'; /* just to be sure */
+ JAVADB_ENV_API_END(dbenv);
+ return (get_java_string(jnienv, filename));
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_log_1flush
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbLsn*/ jobject lsn)
+{
+ int err;
+ DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
+ DB_LSN *dblsn = get_DB_LSN(jnienv, lsn);
+
+ if (!verify_non_null(jnienv, dbenv))
+ return;
+
+ JAVADB_ENV_API_BEGIN(dbenv, jthis);
+
+ err = log_flush(dbenv, dblsn);
+ verify_return(jnienv, err, 0);
+ JAVADB_ENV_API_END(dbenv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_log_1get
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbLsn*/ jobject lsn,
+ /*DbDbt*/ jobject data, jint flags)
+{
+ int err, retry;
+ DB_ENV *dbenv;
+ DB_LSN *dblsn;
+ JDBT dbdata;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ dblsn = get_DB_LSN(jnienv, lsn);
+
+ if (!verify_non_null(jnienv, dbenv))
+ return;
+
+ JAVADB_ENV_API_BEGIN(dbenv, jthis);
+
+ if (jdbt_lock(&dbdata, jnienv, data, outOp) != 0)
+ goto out;
+
+ for (retry = 0; retry < 3; retry++) {
+ err = log_get(dbenv, dblsn, &dbdata.dbt->dbt, flags);
+ /* If we failed due to lack of memory in our DBT arrays,
+ * retry.
+ */
+ if (err != ENOMEM)
+ break;
+ if (!jdbt_realloc(&dbdata, jnienv))
+ break;
+ }
+
+ verify_return(jnienv, err, 0);
+
+ out:
+ jdbt_unlock(&dbdata, jnienv);
+ JAVADB_ENV_API_END(dbenv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_log_1put
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbLsn*/ jobject lsn,
+ /*DbDbt*/ jobject data, jint flags)
+{
+ int err;
+ DB_ENV *dbenv;
+ DB_LSN *dblsn;
+ JDBT dbdata;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ dblsn = get_DB_LSN(jnienv, lsn);
+ if (!verify_non_null(jnienv, dbenv))
+ return;
+
+ JAVADB_ENV_API_BEGIN(dbenv, jthis);
+
+ if (jdbt_lock(&dbdata, jnienv, data, inOp) != 0)
+ goto out;
+
+ err = log_put(dbenv, dblsn, &dbdata.dbt->dbt, flags);
+ verify_return(jnienv, err, 0);
+ out:
+ jdbt_unlock(&dbdata, jnienv);
+ JAVADB_ENV_API_END(dbenv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_log_1register
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*Db*/ jobject dbp,
+ jstring name)
+{
+ int err;
+ DB_ENV *dbenv;
+ DB *dbdb;
+ JSTR dbname;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ dbdb = get_DB(jnienv, dbp);
+ if (!verify_non_null(jnienv, dbenv))
+ return;
+
+ JAVADB_ENV_API_BEGIN(dbenv, jthis);
+
+ if (jstr_lock(&dbname, jnienv, name) != 0)
+ goto out;
+
+ err = log_register(dbenv, dbdb, dbname.string);
+ verify_return(jnienv, err, 0);
+ out:
+ jstr_unlock(&dbname, jnienv);
+ JAVADB_ENV_API_END(dbenv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_log_1unregister
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*Db*/ jobject dbp)
+{
+ int err;
+ DB_ENV *dbenv;
+ DB *dbdb;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ dbdb = get_DB(jnienv, dbp);
+ if (!verify_non_null(jnienv, dbenv))
+ return;
+
+ JAVADB_ENV_API_BEGIN(dbenv, jthis);
+
+ err = log_unregister(dbenv, dbdb);
+ verify_return(jnienv, err, 0);
+ JAVADB_ENV_API_END(dbenv);
+}
+
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_log_1stat
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis)
+{
+ int err;
+ DB_ENV *dbenv;
+ DB_LOG_STAT *statp;
+ jobject retval;
+ jclass dbclass;
+
+ retval = NULL;
+ statp = NULL;
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return (NULL);
+
+ JAVADB_ENV_API_BEGIN(dbenv, jthis);
+
+ /* We cannot use the default allocator (on Win* platforms anyway)
+ * because it often causes problems when we free storage
+ * in a DLL that was allocated in another DLL. Using
+ * our own allocator (ours just calls malloc!) ensures
+ * that there is no mismatch.
+ */
+ err = log_stat(dbenv, &statp, NULL);
+ if (verify_return(jnienv, err, 0)) {
+ retval = create_default_object(jnienv, name_DB_LOG_STAT);
+ dbclass = get_class(jnienv, name_DB_LOG_STAT);
+
+ /* Set the individual fields */
+ set_int_field(jnienv, dbclass, retval,
+ "st_magic", statp->st_magic);
+ set_int_field(jnienv, dbclass, retval,
+ "st_version", statp->st_version);
+ set_int_field(jnienv, dbclass, retval,
+ "st_mode", statp->st_mode);
+ set_int_field(jnienv, dbclass, retval,
+ "st_lg_max", statp->st_lg_max);
+ set_int_field(jnienv, dbclass, retval,
+ "st_w_bytes", statp->st_w_bytes);
+ set_int_field(jnienv, dbclass, retval,
+ "st_w_mbytes", statp->st_w_mbytes);
+ set_int_field(jnienv, dbclass, retval,
+ "st_wc_bytes", statp->st_wc_bytes);
+ set_int_field(jnienv, dbclass, retval,
+ "st_wc_mbytes", statp->st_wc_mbytes);
+ set_int_field(jnienv, dbclass, retval,
+ "st_wcount", statp->st_wcount);
+ set_int_field(jnienv, dbclass, retval,
+ "st_scount", statp->st_scount);
+ set_int_field(jnienv, dbclass, retval,
+ "st_region_wait", statp->st_region_wait);
+ set_int_field(jnienv, dbclass, retval,
+ "st_region_nowait", statp->st_region_nowait);
+ set_int_field(jnienv, dbclass, retval,
+ "st_cur_file", statp->st_cur_file);
+ set_int_field(jnienv, dbclass, retval,
+ "st_cur_offset", statp->st_cur_offset);
+ set_int_field(jnienv, dbclass, retval,
+ "st_regsize", statp->st_regsize);
+
+ free(statp);
+ }
+ JAVADB_ENV_API_END(dbenv);
+ return (retval);
+}
+
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_memp_1stat
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis)
+{
+ int err;
+ jclass dbclass;
+ DB_ENV *dbenv;
+ DB_MPOOL_STAT *statp;
+ jobject retval;
+
+ retval = NULL;
+ statp = NULL;
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return (NULL);
+
+ JAVADB_ENV_API_BEGIN(dbenv, jthis);
+
+ /* We cannot use the default allocator (on Win* platforms anyway)
+ * because it often causes problems when we free storage
+ * in a DLL that was allocated in another DLL. Using
+ * our own allocator (ours just calls malloc!) ensures
+ * that there is no mismatch.
+ */
+ err = memp_stat(dbenv, &statp, 0, NULL);
+ if (verify_return(jnienv, err, 0)) {
+ retval = create_default_object(jnienv, name_DB_MPOOL_STAT);
+ dbclass = get_class(jnienv, name_DB_MPOOL_STAT);
+
+ set_int_field(jnienv, dbclass, retval, "st_cachesize", 0);
+ set_int_field(jnienv, dbclass, retval,
+ "st_cache_hit", statp->st_cache_hit);
+ set_int_field(jnienv, dbclass, retval,
+ "st_cache_miss", statp->st_cache_miss);
+ set_int_field(jnienv, dbclass, retval,
+ "st_map", statp->st_map);
+ set_int_field(jnienv, dbclass, retval,
+ "st_page_create", statp->st_page_create);
+ set_int_field(jnienv, dbclass, retval,
+ "st_page_in", statp->st_page_in);
+ set_int_field(jnienv, dbclass, retval,
+ "st_page_out", statp->st_page_out);
+ set_int_field(jnienv, dbclass, retval,
+ "st_ro_evict", statp->st_ro_evict);
+ set_int_field(jnienv, dbclass, retval,
+ "st_rw_evict", statp->st_rw_evict);
+ set_int_field(jnienv, dbclass, retval,
+ "st_hash_buckets", statp->st_hash_buckets);
+ set_int_field(jnienv, dbclass, retval,
+ "st_hash_searches", statp->st_hash_searches);
+ set_int_field(jnienv, dbclass, retval,
+ "st_hash_longest", statp->st_hash_longest);
+ set_int_field(jnienv, dbclass, retval,
+ "st_hash_examined", statp->st_hash_examined);
+ set_int_field(jnienv, dbclass, retval,
+ "st_page_clean", statp->st_page_clean);
+ set_int_field(jnienv, dbclass, retval,
+ "st_page_dirty", statp->st_page_dirty);
+ set_int_field(jnienv, dbclass, retval,
+ "st_page_trickle", statp->st_page_trickle);
+ set_int_field(jnienv, dbclass, retval,
+ "st_region_wait", statp->st_region_wait);
+ set_int_field(jnienv, dbclass, retval,
+ "st_region_nowait", statp->st_region_nowait);
+ set_int_field(jnienv, dbclass, retval,
+ "st_regsize", statp->st_regsize);
+
+ free(statp);
+ }
+ JAVADB_ENV_API_END(dbenv);
+ return (retval);
+}
+
+JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_DbEnv_memp_1fstat
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis)
+{
+ int err, i, len;
+ jclass fstat_class;
+ DB_ENV *dbenv;
+ DB_MPOOL_FSTAT **fstatp;
+ jobjectArray retval;
+ jfieldID filename_id;
+ jstring jfilename;
+
+ fstatp = NULL;
+ retval = NULL;
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return (NULL);
+
+ JAVADB_ENV_API_BEGIN(dbenv, jthis);
+
+ /* We cannot use the default allocator (on Win* platforms anyway)
+ * because it often causes problems when we free storage
+ * in a DLL that was allocated in another DLL. Using
+ * our own allocator (ours just calls malloc!) ensures
+ * that there is no mismatch.
+ */
+ err = memp_stat(dbenv, 0, &fstatp, NULL);
+ if (verify_return(jnienv, err, 0)) {
+ len = 0;
+ while (fstatp[len])
+ len++;
+ fstat_class = get_class(jnienv, name_DB_MPOOL_FSTAT);
+ retval = (*jnienv)->NewObjectArray(jnienv, len,
+ fstat_class, 0);
+ for (i=0; i<len; i++) {
+ jobject obj = create_default_object(jnienv,
+ name_DB_MPOOL_FSTAT);
+ (*jnienv)->SetObjectArrayElement(jnienv, retval,
+ i, obj);
+
+ /* Set the string field. */
+ filename_id =
+ (*jnienv)->GetFieldID(jnienv, fstat_class,
+ "file_name",
+ string_signature);
+ jfilename =
+ get_java_string(jnienv, fstatp[i]->file_name);
+ (*jnienv)->SetObjectField(jnienv, obj,
+ filename_id, jfilename);
+
+ set_int_field(jnienv, fstat_class, obj,
+ "st_pagesize", fstatp[i]->st_pagesize);
+ set_int_field(jnienv, fstat_class, obj,
+ "st_cache_hit", fstatp[i]->st_cache_hit);
+ set_int_field(jnienv, fstat_class, obj,
+ "st_cache_miss", fstatp[i]->st_cache_miss);
+ set_int_field(jnienv, fstat_class, obj,
+ "st_map", fstatp[i]->st_map);
+ set_int_field(jnienv, fstat_class, obj,
+ "st_page_create", fstatp[i]->st_page_create);
+ set_int_field(jnienv, fstat_class, obj,
+ "st_page_in", fstatp[i]->st_page_in);
+ set_int_field(jnienv, fstat_class, obj,
+ "st_page_out", fstatp[i]->st_page_out);
+ free(fstatp[i]);
+ }
+ free(fstatp);
+ }
+ JAVADB_ENV_API_END(dbenv);
+ return (retval);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_memp_1trickle
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint pct)
+{
+ int err;
+ DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
+ int result = 0;
+
+ if (verify_non_null(jnienv, dbenv)) {
+ JAVADB_ENV_API_BEGIN(dbenv, jthis);
+ err = memp_trickle(dbenv, pct, &result);
+ verify_return(jnienv, err, 0);
+ JAVADB_ENV_API_END(dbenv);
+ }
+ return (result);
+}
+
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_txn_1begin
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbTxn*/ jobject pid, jint flags)
+{
+ int err;
+ DB_TXN *dbpid, *result;
+ DB_ENV *dbenv;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return (0);
+
+ JAVADB_ENV_API_BEGIN(dbenv, jthis);
+
+ dbpid = get_DB_TXN(jnienv, pid);
+ result = 0;
+
+ err = txn_begin(dbenv, dbpid, &result, flags);
+ if (!verify_return(jnienv, err, 0))
+ return (0);
+ JAVADB_ENV_API_END(dbenv);
+ return (get_DbTxn(jnienv, result));
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_txn_1checkpoint
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint kbyte, jint min, jint flags)
+{
+ int err;
+ DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
+
+ if (!verify_non_null(jnienv, dbenv))
+ return (0);
+ JAVADB_ENV_API_BEGIN(dbenv, jthis);
+ err = txn_checkpoint(dbenv, kbyte, min, flags);
+ if (err != DB_INCOMPLETE)
+ verify_return(jnienv, err, 0);
+ JAVADB_ENV_API_END(dbenv);
+ return (err);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_tx_1recover_1changed
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbFeedback*/ jobject jtxrecover)
+{
+ DB_ENV *dbenv;
+ DB_ENV_JAVAINFO *dbenvinfo;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv) ||
+ !verify_non_null(jnienv, dbenvinfo))
+ return;
+
+ JAVADB_ENV_API_BEGIN(dbenv, jthis);
+ dbjie_set_tx_recover_object(dbenvinfo, jnienv, dbenv, jtxrecover);
+ JAVADB_ENV_API_END(dbenv);
+}
+
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_txn_1stat
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis)
+{
+ int err;
+ DB_ENV *dbenv;
+ DB_TXN_STAT *statp;
+ jobject retval, obj;
+ jclass dbclass, active_class;
+ char active_signature[512];
+ jfieldID arrid;
+ jobjectArray actives;
+ unsigned int i;
+
+ retval = NULL;
+ statp = NULL;
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return (NULL);
+
+ JAVADB_ENV_API_BEGIN(dbenv, jthis);
+
+ /* We cannot use the default allocator (on Win* platforms anyway)
+ * because it often causes problems when we free storage
+ * in a DLL that was allocated in another DLL. Using
+ * our own allocator (ours just calls malloc!) ensures
+ * that there is no mismatch.
+ */
+ err = txn_stat(dbenv, &statp, NULL);
+ if (verify_return(jnienv, err, 0)) {
+ retval = create_default_object(jnienv, name_DB_TXN_STAT);
+ dbclass = get_class(jnienv, name_DB_TXN_STAT);
+
+ /* Set the individual fields */
+
+ set_lsn_field(jnienv, dbclass, retval,
+ "st_last_ckp", statp->st_last_ckp);
+ set_lsn_field(jnienv, dbclass, retval,
+ "st_pending_ckp", statp->st_pending_ckp);
+ set_long_field(jnienv, dbclass, retval,
+ "st_time_ckp", statp->st_time_ckp);
+ set_int_field(jnienv, dbclass, retval,
+ "st_last_txnid", statp->st_last_txnid);
+ set_int_field(jnienv, dbclass, retval,
+ "st_maxtxns", statp->st_maxtxns);
+ set_int_field(jnienv, dbclass, retval,
+ "st_naborts", statp->st_naborts);
+ set_int_field(jnienv, dbclass, retval,
+ "st_nbegins", statp->st_nbegins);
+ set_int_field(jnienv, dbclass, retval,
+ "st_ncommits", statp->st_ncommits);
+ set_int_field(jnienv, dbclass, retval,
+ "st_nactive", statp->st_nactive);
+ set_int_field(jnienv, dbclass, retval,
+ "st_maxnactive", statp->st_maxnactive);
+
+ active_class = get_class(jnienv, name_DB_TXN_STAT_ACTIVE);
+ actives =
+ (*jnienv)->NewObjectArray(jnienv, statp->st_nactive,
+ active_class, 0);
+
+ /* Set the st_txnarray field. This is a little more involved
+ * than other fields, since the type is an array, so none
+ * of our utility functions help.
+ */
+ strncpy(active_signature, "[L", sizeof(active_signature));
+ strncat(active_signature, DB_PACKAGE_NAME,
+ sizeof(active_signature));
+ strncat(active_signature, name_DB_TXN_STAT_ACTIVE,
+ sizeof(active_signature));
+ strncat(active_signature, ";", sizeof(active_signature));
+
+ arrid = (*jnienv)->GetFieldID(jnienv, dbclass, "st_txnarray",
+ active_signature);
+ (*jnienv)->SetObjectField(jnienv, retval, arrid, actives);
+
+ /* Now fill the in the elements of st_txnarray. */
+ for (i=0; i<statp->st_nactive; i++) {
+ obj = create_default_object(jnienv, name_DB_TXN_STAT_ACTIVE);
+ (*jnienv)->SetObjectArrayElement(jnienv, actives, i, obj);
+
+ set_int_field(jnienv, active_class, obj,
+ "txnid", statp->st_txnarray[i].txnid);
+ set_int_field(jnienv, active_class, obj,
+ "parentid", statp->st_txnarray[i].parentid);
+ set_lsn_field(jnienv, active_class, obj,
+ "lsn", statp->st_txnarray[i].lsn);
+ }
+ set_int_field(jnienv, dbclass, retval,
+ "st_region_wait", statp->st_region_wait);
+ set_int_field(jnienv, dbclass, retval,
+ "st_region_nowait", statp->st_region_nowait);
+ set_int_field(jnienv, dbclass, retval,
+ "st_regsize", statp->st_regsize);
+
+ free(statp);
+ }
+ JAVADB_ENV_API_END(dbenv);
+ return (retval);
+}
+
+/* See discussion on errpfx, errcall in DB_ENV_JAVAINFO */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1set_1errcall
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jobject errcall)
+{
+ DB_ENV *dbenv;
+ DB_ENV_JAVAINFO *dbenvinfo;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
+
+ if (verify_non_null(jnienv, dbenv) &&
+ verify_non_null(jnienv, dbenvinfo)) {
+
+ JAVADB_ENV_API_BEGIN(dbenv, jthis);
+ dbjie_set_errcall(dbenvinfo, jnienv, errcall);
+ JAVADB_ENV_API_END(dbenv);
+ }
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1set_1errpfx
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jstring str)
+{
+ DB_ENV *dbenv;
+ DB_ENV_JAVAINFO *dbenvinfo;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
+
+ if (verify_non_null(jnienv, dbenv) &&
+ verify_non_null(jnienv, dbenvinfo)) {
+
+ JAVADB_ENV_API_BEGIN(dbenv, jthis);
+ dbjie_set_errpfx(dbenvinfo, jnienv, str);
+ JAVADB_ENV_API_END(dbenv);
+ }
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1finalize
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis,
+ jobject /*DbErrcall*/ errcall, jstring errpfx)
+{
+ DB_ENV *dbenv;
+ DB_ENV_JAVAINFO *envinfo;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ envinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
+ DB_ASSERT(envinfo != NULL);
+
+ /* Note: We detect unclosed DbEnvs and report it.
+ */
+ if (dbenv != NULL && envinfo != NULL && !dbjie_is_dbopen(envinfo)) {
+
+ /* If this error occurs, this object was never closed. */
+ report_errcall(jnienv, errcall, errpfx,
+ "DbEnv.finalize: open DbEnv object destroyed");
+ }
+
+ /* Shouldn't see this object again, but just in case */
+ set_private_dbobj(jnienv, name_DB_ENV, jthis, 0);
+ set_private_info(jnienv, name_DB_ENV, jthis, 0);
+
+ dbjie_destroy(envinfo, jnienv);
+}
diff --git a/bdb/libdb_java/java_DbLock.c b/bdb/libdb_java/java_DbLock.c
new file mode 100644
index 00000000000..287ca6622e5
--- /dev/null
+++ b/bdb/libdb_java/java_DbLock.c
@@ -0,0 +1,55 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: java_DbLock.c,v 11.4 2000/11/30 00:58:39 ubell Exp $";
+#endif /* not lint */
+
+#include <jni.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "db.h"
+#include "java_util.h"
+#include "com_sleepycat_db_DbLock.h"
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLock_put
+ (JNIEnv *jnienv, jobject jthis, /*DbEnv*/ jobject env)
+{
+ int err;
+ DB_LOCK *dblock = get_DB_LOCK(jnienv, jthis);
+ DB_ENV *dbenv = get_DB_ENV(jnienv, env);
+
+ if (!verify_non_null(jnienv, dbenv))
+ return;
+
+ if (!verify_non_null(jnienv, dblock))
+ return;
+
+ err = lock_put(dbenv, dblock);
+ if (verify_return(jnienv, err, 0)) {
+ /* After a successful put, the DbLock can no longer
+ * be used, so we release the storage related to it
+ * (allocated in DbEnv.lock_get() or lock_tget()).
+ */
+ free(dblock);
+
+ set_private_dbobj(jnienv, name_DB_LOCK, jthis, 0);
+ }
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLock_finalize
+ (JNIEnv *jnienv, jobject jthis)
+{
+ DB_LOCK *dblock = get_DB_LOCK(jnienv, jthis);
+ if (dblock) {
+ /* Free any data related to DB_LOCK here */
+ free(dblock);
+ }
+ set_private_dbobj(jnienv, name_DB_LOCK, jthis, 0); /* paranoia */
+}
diff --git a/bdb/libdb_java/java_DbLsn.c b/bdb/libdb_java/java_DbLsn.c
new file mode 100644
index 00000000000..8f26f2ecb58
--- /dev/null
+++ b/bdb/libdb_java/java_DbLsn.c
@@ -0,0 +1,43 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: java_DbLsn.c,v 11.5 2000/11/30 00:58:39 ubell Exp $";
+#endif /* not lint */
+
+#include <jni.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h> /* needed for FILENAME_MAX */
+
+#include "db.h"
+#include "db_int.h"
+#include "java_util.h"
+#include "com_sleepycat_db_DbLsn.h"
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLsn_init_1lsn
+ (JNIEnv *jnienv, /*DbLsn*/ jobject jthis)
+{
+ /* Note: the DB_LSN object stored in the private_dbobj_
+ * is allocated in get_DbLsn().
+ */
+
+ COMPQUIET(jnienv, NULL);
+ COMPQUIET(jthis, NULL);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLsn_finalize
+ (JNIEnv *jnienv, jobject jthis)
+{
+ DB_LSN *dblsn;
+
+ dblsn = get_DB_LSN(jnienv, jthis);
+ if (dblsn) {
+ free(dblsn);
+ }
+}
diff --git a/bdb/libdb_java/java_DbTxn.c b/bdb/libdb_java/java_DbTxn.c
new file mode 100644
index 00000000000..67c2599a6fc
--- /dev/null
+++ b/bdb/libdb_java/java_DbTxn.c
@@ -0,0 +1,82 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: java_DbTxn.c,v 11.3 2000/09/18 18:32:25 dda Exp $";
+#endif /* not lint */
+
+#include <jni.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "db.h"
+#include "java_util.h"
+#include "com_sleepycat_db_DbTxn.h"
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbTxn_abort
+ (JNIEnv *jnienv, jobject jthis)
+{
+ int err;
+ DB_TXN *dbtxn = get_DB_TXN(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbtxn))
+ return;
+
+ err = txn_abort(dbtxn);
+ verify_return(jnienv, err, 0);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbTxn_commit
+ (JNIEnv *jnienv, jobject jthis, jint flags)
+{
+ int err;
+ DB_TXN *dbtxn = get_DB_TXN(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbtxn))
+ return;
+
+ err = txn_commit(dbtxn, flags);
+ verify_return(jnienv, err, 0);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbTxn_id
+ (JNIEnv *jnienv, jobject jthis)
+{
+ int retval = 0;
+ DB_TXN *dbtxn = get_DB_TXN(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbtxn))
+ return (-1);
+
+ /* No error to check for from txn_id */
+ retval = txn_id(dbtxn);
+ return (retval);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbTxn_prepare
+ (JNIEnv *jnienv, jobject jthis)
+{
+ int err;
+ DB_TXN *dbtxn = get_DB_TXN(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbtxn))
+ return;
+
+ err = txn_prepare(dbtxn);
+ verify_return(jnienv, err, 0);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbTxn_finalize
+ (JNIEnv *jnienv, jobject jthis)
+{
+ DB_TXN *dbtxn = get_DB_TXN(jnienv, jthis);
+ if (dbtxn) {
+ /* Free any data related to DB_TXN here
+ * Note: we don't make a policy of doing
+ * a commit or abort here. The txnmgr
+ * should be closed, and DB will clean up.
+ */
+ }
+}
diff --git a/bdb/libdb_java/java_Dbc.c b/bdb/libdb_java/java_Dbc.c
new file mode 100644
index 00000000000..f1d0acdec85
--- /dev/null
+++ b/bdb/libdb_java/java_Dbc.c
@@ -0,0 +1,196 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: java_Dbc.c,v 11.10 2000/10/25 19:54:55 dda Exp $";
+#endif /* not lint */
+
+#include <jni.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef DIAGNOSTIC
+#include <stdio.h>
+#endif
+
+#include "db.h"
+#include "db_int.h"
+#include "java_util.h"
+#include "com_sleepycat_db_Dbc.h"
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbc_close
+ (JNIEnv *jnienv, jobject jthis)
+{
+ int err;
+ DBC *dbc = get_DBC(jnienv, jthis);
+
+ if (!verify_non_null(jnienv, dbc))
+ return;
+ err = dbc->c_close(dbc);
+ if (verify_return(jnienv, err, 0)) {
+ set_private_dbobj(jnienv, name_DBC, jthis, 0);
+ }
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_count
+ (JNIEnv *jnienv, jobject jthis, jint flags)
+{
+ int err;
+ DBC *dbc = get_DBC(jnienv, jthis);
+ db_recno_t count;
+
+ if (!verify_non_null(jnienv, dbc))
+ return (0);
+ err = dbc->c_count(dbc, &count, flags);
+ verify_return(jnienv, err, 0);
+ return (count);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_del
+ (JNIEnv *jnienv, jobject jthis, jint flags)
+{
+ int err;
+ DBC *dbc = get_DBC(jnienv, jthis);
+
+ if (!verify_non_null(jnienv, dbc))
+ return (0);
+ err = dbc->c_del(dbc, flags);
+ if (err != DB_KEYEMPTY) {
+ verify_return(jnienv, err, 0);
+ }
+ return (err);
+}
+
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_Dbc_dup
+ (JNIEnv *jnienv, jobject jthis, jint flags)
+{
+ int err;
+ DBC *dbc = get_DBC(jnienv, jthis);
+ DBC *dbc_ret = NULL;
+
+ if (!verify_non_null(jnienv, dbc))
+ return (0);
+ err = dbc->c_dup(dbc, &dbc_ret, flags);
+ if (!verify_return(jnienv, err, 0))
+ return (0);
+
+ return (get_Dbc(jnienv, dbc_ret));
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_get
+ (JNIEnv *jnienv, jobject jthis,
+ /*Dbt*/ jobject key, /*Dbt*/ jobject data, jint flags)
+{
+ int err, retry, op_flags;
+ DBC *dbc;
+ JDBT dbkey, dbdata;
+ OpKind keyop, dataop;
+
+ /* Depending on flags, the user may be supplying the key,
+ * or else we may have to retrieve it.
+ */
+ err = 0;
+ keyop = outOp;
+ dataop = outOp;
+
+ op_flags = flags & DB_OPFLAGS_MASK;
+ if (op_flags == DB_SET) {
+ keyop = inOp;
+ }
+ else if (op_flags == DB_SET_RANGE ||
+ op_flags == DB_SET_RECNO) {
+ keyop = inOutOp;
+ }
+ else if (op_flags == DB_GET_BOTH) {
+ keyop = inOutOp;
+ dataop = inOutOp;
+ }
+
+ dbc = get_DBC(jnienv, jthis);
+ if (jdbt_lock(&dbkey, jnienv, key, keyop) != 0)
+ goto out2;
+ if (jdbt_lock(&dbdata, jnienv, data, dataop) != 0)
+ goto out1;
+
+ if (!verify_non_null(jnienv, dbc))
+ goto out1;
+
+ for (retry = 0; retry < 3; retry++) {
+ err = dbc->c_get(dbc, &dbkey.dbt->dbt, &dbdata.dbt->dbt, flags);
+
+ /* If we failed due to lack of memory in our DBT arrays,
+ * retry.
+ */
+ if (err != ENOMEM)
+ break;
+ if (!jdbt_realloc(&dbkey, jnienv) && !jdbt_realloc(&dbdata, jnienv))
+ break;
+ }
+ if (err != DB_NOTFOUND) {
+ verify_return(jnienv, err, 0);
+ }
+ out1:
+ jdbt_unlock(&dbdata, jnienv);
+ out2:
+ jdbt_unlock(&dbkey, jnienv);
+ return (err);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_put
+ (JNIEnv *jnienv, jobject jthis,
+ /*Dbt*/ jobject key, /*Dbt*/ jobject data, jint flags)
+{
+ int err;
+ DBC *dbc;
+ JDBT dbkey, dbdata;
+
+ err = 0;
+ dbc = get_DBC(jnienv, jthis);
+ if (jdbt_lock(&dbkey, jnienv, key, inOp) != 0)
+ goto out2;
+ if (jdbt_lock(&dbdata, jnienv, data, inOp) != 0)
+ goto out1;
+
+ if (!verify_non_null(jnienv, dbc))
+ goto out1;
+ err = dbc->c_put(dbc, &dbkey.dbt->dbt, &dbdata.dbt->dbt, flags);
+ if (err != DB_KEYEXIST) {
+ verify_return(jnienv, err, 0);
+ }
+ out1:
+ jdbt_unlock(&dbdata, jnienv);
+ out2:
+ jdbt_unlock(&dbkey, jnienv);
+ return (err);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbc_finalize
+ (JNIEnv *jnienv, jobject jthis)
+{
+ /* Free any data related to DBC here.
+ * If we ever have java-only data embedded in the DBC
+ * and need to do this, we'll have to track Dbc's
+ * according to which Db owns them, just as
+ * we track Db's according to which DbEnv owns them.
+ * That's necessary to avoid double freeing that
+ * comes about when closes interact with GC.
+ */
+
+#ifdef DIAGNOSTIC
+ DBC *dbc;
+
+ dbc = get_DBC(jnienv, jthis);
+ if (dbc != NULL)
+ fprintf(stderr, "Java API: Dbc has not been closed\n");
+#else
+
+ COMPQUIET(jnienv, NULL);
+ COMPQUIET(jthis, NULL);
+
+#endif
+}
diff --git a/bdb/libdb_java/java_Dbt.c b/bdb/libdb_java/java_Dbt.c
new file mode 100644
index 00000000000..0e094da6a2d
--- /dev/null
+++ b/bdb/libdb_java/java_Dbt.c
@@ -0,0 +1,176 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: java_Dbt.c,v 11.10 2000/10/25 19:54:55 dda Exp $";
+#endif /* not lint */
+
+#include <jni.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "db.h"
+#include "java_util.h"
+#include "com_sleepycat_db_Dbt.h"
+
+JAVADB_RW_ACCESS(Dbt, jint, size, DBT, size)
+JAVADB_RW_ACCESS(Dbt, jint, ulen, DBT, ulen)
+JAVADB_RW_ACCESS(Dbt, jint, dlen, DBT, dlen)
+JAVADB_RW_ACCESS(Dbt, jint, doff, DBT, doff)
+JAVADB_RW_ACCESS(Dbt, jint, flags, DBT, flags)
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_init
+ (JNIEnv *jnienv, jobject jthis)
+{
+ DBT_JAVAINFO *dbtji;
+
+ dbtji = dbjit_construct();
+ set_private_dbobj(jnienv, name_DBT, jthis, dbtji);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_internal_1set_1data
+ (JNIEnv *jnienv, jobject jthis, jbyteArray array)
+{
+ DBT_JAVAINFO *db_this;
+
+ db_this = get_DBT_JAVAINFO(jnienv, jthis);
+ if (verify_non_null(jnienv, db_this)) {
+
+ /* If we previously allocated an array for java,
+ * must release reference.
+ */
+ dbjit_release(db_this, jnienv);
+
+ /* Make the array a global ref,
+ * it won't be GC'd till we release it.
+ */
+ if (array)
+ array = (jbyteArray)NEW_GLOBAL_REF(jnienv, array);
+ db_this->array_ = array;
+ }
+}
+
+JNIEXPORT jbyteArray JNICALL Java_com_sleepycat_db_Dbt_get_1data
+ (JNIEnv *jnienv, jobject jthis)
+{
+ DBT_JAVAINFO *db_this;
+ jbyteArray arr;
+ int len;
+
+ db_this = get_DBT_JAVAINFO(jnienv, jthis);
+ if (verify_non_null(jnienv, db_this)) {
+ /* XXX this will copy the data on each call to get_data,
+ * even if it is unchanged.
+ */
+ if (db_this->create_array_ != 0) {
+ /* XXX we should reuse the existing array if we can */
+ len = db_this->dbt.size;
+ if (db_this->array_ != NULL)
+ DELETE_GLOBAL_REF(jnienv, db_this->array_);
+ arr = (*jnienv)->NewByteArray(jnienv, len);
+ db_this->array_ =
+ (jbyteArray)NEW_GLOBAL_REF(jnienv, arr);
+ (*jnienv)->SetByteArrayRegion(jnienv, arr, 0, len,
+ db_this->dbt.data);
+ }
+ return (db_this->array_);
+ }
+ return (0);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_set_1offset
+ (JNIEnv *jnienv, jobject jthis, jint offset)
+{
+ DBT_JAVAINFO *db_this;
+
+ db_this = get_DBT_JAVAINFO(jnienv, jthis);
+ if (verify_non_null(jnienv, db_this)) {
+ db_this->offset_ = offset;
+ }
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbt_get_1offset
+ (JNIEnv *jnienv, jobject jthis)
+{
+ DBT_JAVAINFO *db_this;
+
+ db_this = get_DBT_JAVAINFO(jnienv, jthis);
+ if (verify_non_null(jnienv, db_this)) {
+ return db_this->offset_;
+ }
+ return (0);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_set_1recno_1key_1data(JNIEnv *jnienv, jobject jthis, jint value)
+{
+ JDBT jdbt;
+
+ if (jdbt_lock(&jdbt, jnienv, jthis, inOp) != 0)
+ goto out;
+
+ if (!jdbt.dbt->dbt.data ||
+ jdbt.java_array_len_ < sizeof(db_recno_t)) {
+ char buf[200];
+ sprintf(buf, "set_recno_key_data error: %p %p %d %d",
+ &jdbt.dbt->dbt, jdbt.dbt->dbt.data,
+ jdbt.dbt->dbt.ulen, sizeof(db_recno_t));
+ report_exception(jnienv, buf, 0, 0);
+ }
+ else {
+ *(db_recno_t*)(jdbt.dbt->dbt.data) = value;
+ }
+ out:
+ jdbt_unlock(&jdbt, jnienv);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbt_get_1recno_1key_1data(JNIEnv *jnienv, jobject jthis)
+{
+ jint ret;
+ JDBT jdbt;
+
+ ret = 0;
+
+ /* Although this is kind of like "retrieve", we don't support
+ * DB_DBT_MALLOC for this operation, so we tell jdbt_lock
+ * that is not a retrieve.
+ */
+ if (jdbt_lock(&jdbt, jnienv, jthis, inOp) != 0)
+ goto out;
+
+ if (!jdbt.dbt->dbt.data ||
+ jdbt.java_array_len_ < sizeof(db_recno_t)) {
+ char buf[200];
+ sprintf(buf, "get_recno_key_data error: %p %p %d %d",
+ &jdbt.dbt->dbt, jdbt.dbt->dbt.data,
+ jdbt.dbt->dbt.ulen, sizeof(db_recno_t));
+ report_exception(jnienv, buf, 0, 0);
+ }
+ else {
+ ret = *(db_recno_t*)(jdbt.dbt->dbt.data);
+ }
+ out:
+ jdbt_unlock(&jdbt, jnienv);
+ return (ret);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_finalize
+ (JNIEnv *jnienv, jobject jthis)
+{
+ DBT_JAVAINFO *dbtji;
+
+ dbtji = get_DBT_JAVAINFO(jnienv, jthis);
+ if (dbtji) {
+ /* Free any data related to DBT here */
+ dbjit_release(dbtji, jnienv);
+
+ /* Extra paranoia */
+ memset(dbtji, 0, sizeof(DBT_JAVAINFO));
+ free(dbtji);
+ }
+}
diff --git a/bdb/libdb_java/java_info.c b/bdb/libdb_java/java_info.c
new file mode 100644
index 00000000000..ccd469fa256
--- /dev/null
+++ b/bdb/libdb_java/java_info.c
@@ -0,0 +1,1001 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: java_info.c,v 11.18 2000/10/28 13:09:39 dda Exp $";
+#endif /* not lint */
+
+#include <jni.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "db.h"
+#include "db_int.h"
+#include "java_util.h"
+
+/****************************************************************
+ *
+ * Callback functions
+ *
+ */
+
+static void Db_feedback_callback(DB *db, int opcode, int percent)
+{
+ DB_JAVAINFO *dbinfo;
+
+ DB_ASSERT(db != NULL);
+ dbinfo = (DB_JAVAINFO *)db->cj_internal;
+ dbji_call_feedback(dbinfo, db, dbinfo->jdbref_, opcode, percent);
+}
+
+static int Db_append_recno_callback(DB *db, DBT *dbt, db_recno_t recno)
+{
+ DB_JAVAINFO *dbinfo;
+
+ dbinfo = (DB_JAVAINFO *)db->cj_internal;
+ return (dbji_call_append_recno(dbinfo, db, dbinfo->jdbref_, dbt, recno));
+}
+
+static int Db_bt_compare_callback(DB *db, const DBT *dbt1, const DBT *dbt2)
+{
+ DB_JAVAINFO *dbinfo;
+
+ dbinfo = (DB_JAVAINFO *)db->cj_internal;
+ return (dbji_call_bt_compare(dbinfo, db, dbinfo->jdbref_, dbt1, dbt2));
+}
+
+static size_t Db_bt_prefix_callback(DB *db, const DBT *dbt1, const DBT *dbt2)
+{
+ DB_JAVAINFO *dbinfo;
+
+ dbinfo = (DB_JAVAINFO *)db->cj_internal;
+ return (dbji_call_bt_prefix(dbinfo, db, dbinfo->jdbref_, dbt1, dbt2));
+}
+
+static int Db_dup_compare_callback(DB *db, const DBT *dbt1, const DBT *dbt2)
+{
+ DB_JAVAINFO *dbinfo;
+
+ dbinfo = (DB_JAVAINFO *)db->cj_internal;
+ return (dbji_call_dup_compare(dbinfo, db, dbinfo->jdbref_, dbt1, dbt2));
+}
+
+static u_int32_t Db_h_hash_callback(DB *db, const void *data, u_int32_t len)
+{
+ DB_JAVAINFO *dbinfo;
+
+ dbinfo = (DB_JAVAINFO *)db->cj_internal;
+ return (dbji_call_h_hash(dbinfo, db, dbinfo->jdbref_, data, len));
+}
+
+static void DbEnv_feedback_callback(DB_ENV *dbenv, int opcode, int percent)
+{
+ DB_ENV_JAVAINFO *dbinfo;
+
+ DB_ASSERT(dbenv != NULL);
+ dbinfo = (DB_ENV_JAVAINFO *)dbenv->cj_internal;
+ dbjie_call_feedback(dbinfo, dbenv, dbinfo->jenvref_, opcode, percent);
+}
+
+static int DbEnv_recovery_init_callback(DB_ENV *dbenv)
+{
+ DB_ENV_JAVAINFO *dbinfo;
+
+ dbinfo = (DB_ENV_JAVAINFO *)dbenv->cj_internal;
+ return (dbjie_call_recovery_init(dbinfo, dbenv, dbinfo->jenvref_));
+}
+
+static int DbEnv_tx_recover_callback(DB_ENV *dbenv, DBT *dbt,
+ DB_LSN *lsn, db_recops recops)
+{
+ DB_ENV_JAVAINFO *dbinfo;
+
+ DB_ASSERT(dbenv != NULL);
+ dbinfo = (DB_ENV_JAVAINFO *)dbenv->cj_internal;
+ return dbjie_call_tx_recover(dbinfo, dbenv, dbinfo->jenvref_, dbt,
+ lsn, recops);
+}
+
+/****************************************************************
+ *
+ * Implementation of class DBT_javainfo
+ *
+ */
+DBT_JAVAINFO *
+dbjit_construct()
+{
+ DBT_JAVAINFO *dbjit;
+
+ dbjit = (DBT_JAVAINFO *)malloc(sizeof(DBT_JAVAINFO));
+ memset(dbjit, 0, sizeof(DBT_JAVAINFO));
+ return (dbjit);
+}
+
+void dbjit_destroy(DBT_JAVAINFO *dbjit)
+{
+ /* Sanity check:
+ * We cannot delete the global ref because we don't have a JNIEnv.
+ */
+ if (dbjit->array_ != NULL) {
+ fprintf(stderr, "object is not freed\n");
+ }
+
+ /* Extra paranoia */
+ memset(dbjit, 0, sizeof(DB_JAVAINFO));
+ free(dbjit);
+}
+
+void dbjit_release(DBT_JAVAINFO *dbjit, JNIEnv *jnienv)
+{
+ if (dbjit->array_ != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbjit->array_);
+ dbjit->array_ = NULL;
+ }
+}
+
+/****************************************************************
+ *
+ * Implementation of class DB_ENV_JAVAINFO
+ *
+ */
+
+/* create/initialize an object */
+DB_ENV_JAVAINFO *
+dbjie_construct(JNIEnv *jnienv,
+ jobject default_errcall,
+ int is_dbopen)
+{
+ DB_ENV_JAVAINFO *dbjie;
+
+ dbjie = (DB_ENV_JAVAINFO *)malloc(sizeof(DB_ENV_JAVAINFO));
+ memset(dbjie, 0, sizeof(DB_ENV_JAVAINFO));
+ dbjie->is_dbopen_ = is_dbopen;
+
+ if ((*jnienv)->GetJavaVM(jnienv, &dbjie->javavm_) != 0) {
+ free(dbjie);
+ report_exception(jnienv, "cannot get Java VM", 0, 0);
+ return (NULL);
+ }
+
+ /* The default error call just prints to the 'System.err'
+ * stream. If the user does set_errcall to null, we'll
+ * want to have a reference to set it back to.
+ *
+ * Why do we have always set db_errcall to our own callback?
+ * Because it makes the interaction between setting the
+ * error prefix, error stream, and user's error callback
+ * that much easier.
+ */
+ dbjie->default_errcall_ = NEW_GLOBAL_REF(jnienv, default_errcall);
+ dbjie->errcall_ = NEW_GLOBAL_REF(jnienv, default_errcall);
+ return (dbjie);
+}
+
+/* release all objects held by this this one */
+void dbjie_dealloc(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv)
+{
+ if (dbjie->recovery_init_ != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbjie->recovery_init_);
+ dbjie->recovery_init_ = NULL;
+ }
+ if (dbjie->feedback_ != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbjie->feedback_);
+ dbjie->feedback_ = NULL;
+ }
+ if (dbjie->tx_recover_ != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbjie->tx_recover_);
+ dbjie->tx_recover_ = NULL;
+ }
+ if (dbjie->errcall_ != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbjie->errcall_);
+ dbjie->errcall_ = NULL;
+ }
+ if (dbjie->default_errcall_ != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbjie->default_errcall_);
+ dbjie->default_errcall_ = NULL;
+ }
+
+ if (dbjie->conflict_ != NULL) {
+ free(dbjie->conflict_);
+ dbjie->conflict_ = NULL;
+ }
+ if (dbjie->errpfx_ != NULL) {
+ free(dbjie->errpfx_);
+ dbjie->errpfx_ = NULL;
+ }
+}
+
+/* free this object, releasing anything allocated on its behalf */
+void dbjie_destroy(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv)
+{
+ dbjie_dealloc(dbjie, jnienv);
+
+ /* Extra paranoia */
+ memset(dbjie, 0, sizeof(DB_ENV_JAVAINFO));
+ free(dbjie);
+}
+
+/* Attach to the current thread that is running and
+ * return that. We use the java virtual machine
+ * that we saved in the constructor.
+ */
+JNIEnv *
+dbjie_get_jnienv(DB_ENV_JAVAINFO *dbjie)
+{
+ /* Note:
+ * Different versions of the JNI disagree on the signature
+ * for AttachCurrentThread. The most recent documentation
+ * seems to say that (JNIEnv **) is correct, but newer
+ * JNIs seem to use (void **), oddly enough.
+ */
+#ifdef JNI_VERSION_1_2
+ void *attachret = 0;
+#else
+ JNIEnv *attachret = 0;
+#endif
+
+ /* This should always succeed, as we are called via
+ * some Java activity. I think therefore I am (a thread).
+ */
+ if ((*dbjie->javavm_)->AttachCurrentThread(dbjie->javavm_, &attachret, 0) != 0)
+ return (0);
+
+ return ((JNIEnv *)attachret);
+}
+
+jstring
+dbjie_get_errpfx(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv)
+{
+ return (get_java_string(jnienv, dbjie->errpfx_));
+}
+
+void
+dbjie_set_errcall(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv, jobject new_errcall)
+{
+ /* If the new_errcall is null, we'll set the error call
+ * to the default one.
+ */
+ if (new_errcall == NULL)
+ new_errcall = dbjie->default_errcall_;
+
+ DELETE_GLOBAL_REF(jnienv, dbjie->errcall_);
+ dbjie->errcall_ = NEW_GLOBAL_REF(jnienv, new_errcall);
+}
+
+void
+dbjie_set_errpfx(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv, jstring errpfx)
+{
+ if (dbjie->errpfx_ != NULL)
+ free(dbjie->errpfx_);
+
+ if (errpfx)
+ dbjie->errpfx_ = get_c_string(jnienv, errpfx);
+ else
+ dbjie->errpfx_ = NULL;
+}
+
+void
+dbjie_set_conflict(DB_ENV_JAVAINFO *dbjie, unsigned char *newarr)
+{
+ if (dbjie->conflict_)
+ free(dbjie->conflict_);
+ dbjie->conflict_ = newarr;
+}
+
+void dbjie_set_feedback_object(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv,
+ DB_ENV *dbenv, jobject jfeedback)
+{
+ int err;
+
+ if (dbjie->feedback_ != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbjie->feedback_);
+ }
+ if (jfeedback == NULL) {
+ if ((err = dbenv->set_feedback(dbenv, NULL)) != 0)
+ report_exception(jnienv, "set_feedback failed",
+ err, 0);
+ }
+ else {
+ if ((err = dbenv->set_feedback(dbenv,
+ DbEnv_feedback_callback)) != 0)
+ report_exception(jnienv, "set_feedback failed",
+ err, 0);
+ }
+
+ dbjie->feedback_ = NEW_GLOBAL_REF(jnienv, jfeedback);
+}
+
+void dbjie_call_feedback(DB_ENV_JAVAINFO *dbjie, DB_ENV *dbenv, jobject jenv,
+ int opcode, int percent)
+{
+ JNIEnv *jnienv;
+ jclass feedback_class;
+ jmethodID id;
+
+ COMPQUIET(dbenv, NULL);
+ jnienv = dbjie_get_jnienv(dbjie);
+ if (jnienv == NULL) {
+ fprintf(stderr, "Cannot attach to current thread!\n");
+ return;
+ }
+
+ feedback_class = get_class(jnienv, name_DbEnvFeedback);
+ id = (*jnienv)->GetMethodID(jnienv, feedback_class,
+ "feedback",
+ "(Lcom/sleepycat/db/DbEnv;II)V");
+ if (!id) {
+ fprintf(stderr, "Cannot find callback class\n");
+ return;
+ }
+
+ (*jnienv)->CallVoidMethod(jnienv, dbjie->feedback_, id,
+ jenv, (jint)opcode, (jint)percent);
+}
+
+void dbjie_set_recovery_init_object(DB_ENV_JAVAINFO *dbjie,
+ JNIEnv *jnienv, DB_ENV *dbenv,
+ jobject jrecovery_init)
+{
+ int err;
+
+ if (dbjie->recovery_init_ != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbjie->recovery_init_);
+ }
+ if (jrecovery_init == NULL) {
+ if ((err = dbenv->set_recovery_init(dbenv, NULL)) != 0)
+ report_exception(jnienv, "set_recovery_init failed",
+ err, 0);
+ }
+ else {
+ if ((err = dbenv->set_recovery_init(dbenv,
+ DbEnv_recovery_init_callback)) != 0)
+ report_exception(jnienv, "set_recovery_init failed",
+ err, 0);
+ }
+
+ dbjie->recovery_init_ = NEW_GLOBAL_REF(jnienv, jrecovery_init);
+}
+
+int dbjie_call_recovery_init(DB_ENV_JAVAINFO *dbjie, DB_ENV *dbenv,
+ jobject jenv)
+{
+ JNIEnv *jnienv;
+ jclass recovery_init_class;
+ jmethodID id;
+
+ COMPQUIET(dbenv, NULL);
+ jnienv = dbjie_get_jnienv(dbjie);
+ if (jnienv == NULL) {
+ fprintf(stderr, "Cannot attach to current thread!\n");
+ return (EINVAL);
+ }
+
+ recovery_init_class = get_class(jnienv, name_DbRecoveryInit);
+ id = (*jnienv)->GetMethodID(jnienv, recovery_init_class,
+ "recovery_init",
+ "(Lcom/sleepycat/db/DbEnv;)V");
+ if (!id) {
+ fprintf(stderr, "Cannot find callback class\n");
+ return (EINVAL);
+ }
+ return (*jnienv)->CallIntMethod(jnienv, dbjie->recovery_init_,
+ id, jenv);
+}
+
+void dbjie_set_tx_recover_object(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv,
+ DB_ENV *dbenv, jobject jtx_recover)
+{
+ int err;
+
+ if (dbjie->tx_recover_ != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbjie->tx_recover_);
+ }
+ if (jtx_recover == NULL) {
+ if ((err = dbenv->set_tx_recover(dbenv, NULL)) != 0)
+ report_exception(jnienv, "set_tx_recover failed",
+ err, 0);
+ }
+ else {
+ if ((err = dbenv->set_tx_recover(dbenv,
+ DbEnv_tx_recover_callback)) != 0)
+ report_exception(jnienv, "set_tx_recover failed",
+ err, 0);
+ }
+
+ dbjie->tx_recover_ = NEW_GLOBAL_REF(jnienv, jtx_recover);
+}
+
+int dbjie_call_tx_recover(DB_ENV_JAVAINFO *dbjie, DB_ENV *dbenv, jobject jenv,
+ DBT *dbt, DB_LSN *lsn, int recops)
+{
+ JNIEnv *jnienv;
+ jclass tx_recover_class;
+ jmethodID id;
+ jobject jdbt;
+ jobject jlsn;
+
+ COMPQUIET(dbenv, NULL);
+ jnienv = dbjie_get_jnienv(dbjie);
+ if (jnienv == NULL) {
+ fprintf(stderr, "Cannot attach to current thread!\n");
+ return (0);
+ }
+
+ tx_recover_class = get_class(jnienv, name_DbTxnRecover);
+ id = (*jnienv)->GetMethodID(jnienv, tx_recover_class,
+ "tx_recover",
+ "(Lcom/sleepycat/db/DbEnv;"
+ "Lcom/sleepycat/db/Dbt;"
+ "Lcom/sleepycat/db/DbLsn;"
+ "I)I");
+ if (!id) {
+ fprintf(stderr, "Cannot find callback class\n");
+ return (0);
+ }
+
+ if (dbt == NULL)
+ jdbt = NULL;
+ else
+ jdbt = get_Dbt(jnienv, dbt);
+
+ if (lsn == NULL)
+ jlsn = NULL;
+ else
+ jlsn = get_DbLsn(jnienv, *lsn);
+
+ return (*jnienv)->CallIntMethod(jnienv, dbjie->tx_recover_, id, jenv,
+ jdbt, jlsn, recops);
+}
+
+jobject dbjie_get_errcall(DB_ENV_JAVAINFO *dbjie)
+{
+ return (dbjie->errcall_);
+}
+
+int dbjie_is_dbopen(DB_ENV_JAVAINFO *dbjie)
+{
+ return (dbjie->is_dbopen_);
+}
+
+/****************************************************************
+ *
+ * Implementation of class DB_JAVAINFO
+ *
+ */
+
+DB_JAVAINFO *dbji_construct(JNIEnv *jnienv, jint flags)
+{
+ DB_JAVAINFO *dbji;
+
+ dbji = (DB_JAVAINFO *)malloc(sizeof(DB_JAVAINFO));
+ memset(dbji, 0, sizeof(DB_JAVAINFO));
+
+ if ((*jnienv)->GetJavaVM(jnienv, &dbji->javavm_) != 0) {
+ report_exception(jnienv, "cannot get Java VM", 0, 0);
+ free(dbji);
+ return (NULL);
+ }
+ dbji->construct_flags_ = flags;
+ return (dbji);
+}
+
+void
+dbji_dealloc(DB_JAVAINFO *dbji, JNIEnv *jnienv)
+{
+ if (dbji->append_recno_ != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->append_recno_);
+ dbji->append_recno_ = NULL;
+ }
+ if (dbji->bt_compare_ != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->bt_compare_);
+ dbji->bt_compare_ = NULL;
+ }
+ if (dbji->bt_prefix_ != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->bt_prefix_);
+ dbji->bt_prefix_ = NULL;
+ }
+ if (dbji->dup_compare_ != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->dup_compare_);
+ dbji->dup_compare_ = NULL;
+ }
+ if (dbji->feedback_ != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->feedback_);
+ dbji->feedback_ = NULL;
+ }
+ if (dbji->h_hash_ != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->h_hash_);
+ dbji->h_hash_ = NULL;
+ }
+}
+
+void
+dbji_destroy(DB_JAVAINFO *dbji, JNIEnv *jnienv)
+{
+ dbji_dealloc(dbji, jnienv);
+ free(dbji);
+}
+
+JNIEnv *dbji_get_jnienv(DB_JAVAINFO *dbji)
+{
+ /* Note:
+ * Different versions of the JNI disagree on the signature
+ * for AttachCurrentThread. The most recent documentation
+ * seems to say that (JNIEnv **) is correct, but newer
+ * JNIs seem to use (void **), oddly enough.
+ */
+#ifdef JNI_VERSION_1_2
+ void *attachret = 0;
+#else
+ JNIEnv *attachret = 0;
+#endif
+
+ /* This should always succeed, as we are called via
+ * some Java activity. I think therefore I am (a thread).
+ */
+ if ((*dbji->javavm_)->AttachCurrentThread(dbji->javavm_, &attachret, 0) != 0)
+ return (0);
+
+ return ((JNIEnv *)attachret);
+}
+
+jint dbji_get_flags(DB_JAVAINFO *dbji)
+{
+ return (dbji->construct_flags_);
+}
+
+void dbji_set_feedback_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
+ DB *db, jobject jfeedback)
+{
+ jclass feedback_class;
+
+ if (dbji->feedback_method_id_ == NULL) {
+ feedback_class = get_class(jnienv, name_DbFeedback);
+ dbji->feedback_method_id_ =
+ (*jnienv)->GetMethodID(jnienv, feedback_class,
+ "feedback",
+ "(Lcom/sleepycat/db/Db;II)V");
+ if (dbji->feedback_method_id_ != NULL) {
+ /* XXX
+ * We should really have a better way
+ * to translate this to a Java exception class.
+ * In theory, it shouldn't happen.
+ */
+ report_exception(jnienv, "Cannot find callback method",
+ EFAULT, 0);
+ return;
+ }
+ }
+
+ if (dbji->feedback_ != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->feedback_);
+ }
+ if (jfeedback == NULL) {
+ db->set_feedback(db, NULL);
+ }
+ else {
+ db->set_feedback(db, Db_feedback_callback);
+ }
+
+ dbji->feedback_ = NEW_GLOBAL_REF(jnienv, jfeedback);
+
+}
+
+void dbji_call_feedback(DB_JAVAINFO *dbji, DB *db, jobject jdb,
+ int opcode, int percent)
+{
+ JNIEnv *jnienv;
+
+ COMPQUIET(db, NULL);
+ jnienv = dbji_get_jnienv(dbji);
+ if (jnienv == NULL) {
+ fprintf(stderr, "Cannot attach to current thread!\n");
+ return;
+ }
+
+ DB_ASSERT(dbji->feedback_method_id_ != NULL);
+ (*jnienv)->CallVoidMethod(jnienv, dbji->feedback_,
+ dbji->feedback_method_id_,
+ jdb, (jint)opcode, (jint)percent);
+}
+
+void dbji_set_append_recno_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
+ DB *db, jobject jcallback)
+{
+ jclass append_recno_class;
+
+ if (dbji->append_recno_method_id_ == NULL) {
+ append_recno_class = get_class(jnienv, name_DbAppendRecno);
+ dbji->append_recno_method_id_ =
+ (*jnienv)->GetMethodID(jnienv, append_recno_class,
+ "db_append_recno",
+ "(Lcom/sleepycat/db/Db;"
+ "Lcom/sleepycat/db/Dbt;I)V");
+ if (dbji->append_recno_method_id_ == NULL) {
+ /* XXX
+ * We should really have a better way
+ * to translate this to a Java exception class.
+ * In theory, it shouldn't happen.
+ */
+ report_exception(jnienv, "Cannot find callback method",
+ EFAULT, 0);
+ return;
+ }
+ }
+
+ if (dbji->append_recno_ != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->append_recno_);
+ }
+ if (jcallback == NULL) {
+ db->set_append_recno(db, NULL);
+ }
+ else {
+ db->set_append_recno(db, Db_append_recno_callback);
+ }
+
+ dbji->append_recno_ = NEW_GLOBAL_REF(jnienv, jcallback);
+}
+
+extern int dbji_call_append_recno(DB_JAVAINFO *dbji, DB *db, jobject jdb,
+ DBT *dbt, jint recno)
+{
+ JNIEnv *jnienv;
+ jobject jdbt;
+ DBT_JAVAINFO *dbtji;
+ jbyteArray arr;
+ unsigned int arraylen;
+ unsigned char *data;
+
+ COMPQUIET(db, NULL);
+ jnienv = dbji_get_jnienv(dbji);
+ if (jnienv == NULL) {
+ fprintf(stderr, "Cannot attach to current thread!\n");
+ return (0);
+ }
+
+ /* XXX
+ * We should have a pool of Dbt objects used for this purpose
+ * instead of creating new ones each time. Because of
+ * multithreading, we may need an arbitrary number (more than two).
+ * We might also have a byte arrays that grow as needed,
+ * so we don't need to allocate those either.
+ *
+ * Note, we do not set the 'create_array_' flag as on other
+ * callbacks as we are creating the array here.
+ */
+ jdbt = create_default_object(jnienv, name_DBT);
+ dbtji = get_DBT_JAVAINFO(jnienv, jdbt);
+ memcpy(&dbtji->dbt, dbt, sizeof(DBT));
+ dbtji->dbt.data = NULL;
+ arr = (*jnienv)->NewByteArray(jnienv, dbt->size);
+ (*jnienv)->SetByteArrayRegion(jnienv, arr, 0, dbt->size,
+ (jbyte *)dbt->data);
+ dbtji->array_ = (jbyteArray)NEW_GLOBAL_REF(jnienv, arr);
+
+ DB_ASSERT(dbji->append_recno_method_id_ != NULL);
+ (*jnienv)->CallVoidMethod(jnienv, dbji->append_recno_,
+ dbji->append_recno_method_id_,
+ jdb, jdbt, recno);
+
+ /* The underlying C API requires that an errno be returned
+ * on error. Java users know nothing of errnos, so we
+ * allow them to throw exceptions instead. We leave the
+ * exception in place and return DB_JAVA_CALLBACK to the C API
+ * that called us. Eventually the DB->get will fail and
+ * when java prepares to throw an exception in
+ * report_exception(), this will be spotted as a special case,
+ * and the original exception will be preserved.
+ *
+ * Note: we have sometimes noticed strange behavior with
+ * exceptions under Linux 1.1.7 JVM. (i.e. multiple calls
+ * to ExceptionOccurred() may report different results).
+ * Currently we don't know of any problems related to this
+ * in our code, but if it pops up in the future, users are
+ * encouranged to get a more recent JVM.
+ */
+ if ((*jnienv)->ExceptionOccurred(jnienv) != NULL)
+ return (DB_JAVA_CALLBACK);
+
+ if (dbtji->array_ == NULL) {
+ report_exception(jnienv, "Dbt.data is null", 0, 0);
+ return (EFAULT);
+ }
+
+ arraylen = (*jnienv)->GetArrayLength(jnienv, dbtji->array_);
+ if (dbtji->offset_ < 0 ) {
+ report_exception(jnienv, "Dbt.offset illegal", 0, 0);
+ return (EFAULT);
+ }
+ if (dbt->ulen + dbtji->offset_ > arraylen) {
+ report_exception(jnienv,
+ "Dbt.ulen + Dbt.offset greater than array length", 0, 0);
+ return (EFAULT);
+ }
+
+ data = (*jnienv)->GetByteArrayElements(jnienv, dbtji->array_,
+ (jboolean *)0);
+ dbt->data = data + dbtji->offset_;
+ return (0);
+}
+
+void dbji_set_bt_compare_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
+ DB *db, jobject jcompare)
+{
+ jclass bt_compare_class;
+
+ if (dbji->bt_compare_method_id_ == NULL) {
+ bt_compare_class = get_class(jnienv, name_DbBtreeCompare);
+ dbji->bt_compare_method_id_ =
+ (*jnienv)->GetMethodID(jnienv, bt_compare_class,
+ "bt_compare",
+ "(Lcom/sleepycat/db/Db;"
+ "Lcom/sleepycat/db/Dbt;"
+ "Lcom/sleepycat/db/Dbt;)I");
+ if (dbji->bt_compare_method_id_ == NULL) {
+ /* XXX
+ * We should really have a better way
+ * to translate this to a Java exception class.
+ * In theory, it shouldn't happen.
+ */
+ report_exception(jnienv, "Cannot find callback method",
+ EFAULT, 0);
+ return;
+ }
+ }
+
+ if (dbji->bt_compare_ != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->bt_compare_);
+ }
+ if (jcompare == NULL) {
+ db->set_bt_compare(db, NULL);
+ }
+ else {
+ db->set_bt_compare(db, Db_bt_compare_callback);
+ }
+
+ dbji->bt_compare_ = NEW_GLOBAL_REF(jnienv, jcompare);
+}
+
+int dbji_call_bt_compare(DB_JAVAINFO *dbji, DB *db, jobject jdb,
+ const DBT *dbt1, const DBT *dbt2)
+{
+ JNIEnv *jnienv;
+ jobject jdbt1, jdbt2;
+ DBT_JAVAINFO *dbtji1, *dbtji2;
+
+ COMPQUIET(db, NULL);
+ jnienv = dbji_get_jnienv(dbji);
+ if (jnienv == NULL) {
+ fprintf(stderr, "Cannot attach to current thread!\n");
+ return (0);
+ }
+
+ /* XXX
+ * We should have a pool of Dbt objects used for this purpose
+ * instead of creating new ones each time. Because of
+ * multithreading, we may need an arbitrary number (more than two).
+ * We might also have a byte arrays that grow as needed,
+ * so we don't need to allocate those either.
+ */
+ jdbt1 = create_default_object(jnienv, name_DBT);
+ jdbt2 = create_default_object(jnienv, name_DBT);
+ dbtji1 = get_DBT_JAVAINFO(jnienv, jdbt1);
+ memcpy(&dbtji1->dbt, dbt1, sizeof(DBT));
+ dbtji1->create_array_ = 1;
+ dbtji2 = get_DBT_JAVAINFO(jnienv, jdbt2);
+ memcpy(&dbtji2->dbt, dbt2, sizeof(DBT));
+ dbtji2->create_array_ = 1;
+
+ DB_ASSERT(dbji->bt_compare_method_id_ != NULL);
+ return (*jnienv)->CallIntMethod(jnienv, dbji->bt_compare_,
+ dbji->bt_compare_method_id_,
+ jdb, jdbt1, jdbt2);
+}
+
+void dbji_set_bt_prefix_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
+ DB *db, jobject jprefix)
+{
+ jclass bt_prefix_class;
+
+ if (dbji->bt_prefix_method_id_ == NULL) {
+ bt_prefix_class = get_class(jnienv, name_DbBtreePrefix);
+ dbji->bt_prefix_method_id_ =
+ (*jnienv)->GetMethodID(jnienv, bt_prefix_class,
+ "bt_prefix",
+ "(Lcom/sleepycat/db/Db;"
+ "Lcom/sleepycat/db/Dbt;"
+ "Lcom/sleepycat/db/Dbt;)I");
+ if (dbji->bt_prefix_method_id_ == NULL) {
+ /* XXX
+ * We should really have a better way
+ * to translate this to a Java exception class.
+ * In theory, it shouldn't happen.
+ */
+ report_exception(jnienv, "Cannot find callback method",
+ EFAULT, 0);
+ return;
+ }
+ }
+
+ if (dbji->bt_prefix_ != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->bt_prefix_);
+ }
+ if (jprefix == NULL) {
+ db->set_bt_prefix(db, NULL);
+ }
+ else {
+ db->set_bt_prefix(db, Db_bt_prefix_callback);
+ }
+
+ dbji->bt_prefix_ = NEW_GLOBAL_REF(jnienv, jprefix);
+}
+
+size_t dbji_call_bt_prefix(DB_JAVAINFO *dbji, DB *db, jobject jdb,
+ const DBT *dbt1, const DBT *dbt2)
+{
+ JNIEnv *jnienv;
+ jobject jdbt1, jdbt2;
+ DBT_JAVAINFO *dbtji1, *dbtji2;
+
+ COMPQUIET(db, NULL);
+ jnienv = dbji_get_jnienv(dbji);
+ if (jnienv == NULL) {
+ fprintf(stderr, "Cannot attach to current thread!\n");
+ return (0);
+ }
+
+ /* XXX
+ * We should have a pool of Dbt objects used for this purpose
+ * instead of creating new ones each time. Because of
+ * multithreading, we may need an arbitrary number (more than two).
+ * We might also have a byte arrays that grow as needed,
+ * so we don't need to allocate those either.
+ */
+ jdbt1 = create_default_object(jnienv, name_DBT);
+ jdbt2 = create_default_object(jnienv, name_DBT);
+ dbtji1 = get_DBT_JAVAINFO(jnienv, jdbt1);
+ memcpy(&dbtji1->dbt, dbt1, sizeof(DBT));
+ dbtji1->create_array_ = 1;
+ dbtji2 = get_DBT_JAVAINFO(jnienv, jdbt2);
+ memcpy(&dbtji2->dbt, dbt2, sizeof(DBT));
+ dbtji2->create_array_ = 1;
+
+ DB_ASSERT(dbji->bt_prefix_method_id_ != NULL);
+ return (size_t)(*jnienv)->CallIntMethod(jnienv, dbji->bt_prefix_,
+ dbji->bt_prefix_method_id_,
+ jdb, jdbt1, jdbt2);
+}
+
+void dbji_set_dup_compare_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
+ DB *db, jobject jcompare)
+{
+ jclass dup_compare_class;
+
+ if (dbji->dup_compare_method_id_ == NULL) {
+ dup_compare_class = get_class(jnienv, name_DbDupCompare);
+ dbji->dup_compare_method_id_ =
+ (*jnienv)->GetMethodID(jnienv, dup_compare_class,
+ "dup_compare",
+ "(Lcom/sleepycat/db/Db;"
+ "Lcom/sleepycat/db/Dbt;"
+ "Lcom/sleepycat/db/Dbt;)I");
+ if (dbji->dup_compare_method_id_ == NULL) {
+ /* XXX
+ * We should really have a better way
+ * to translate this to a Java exception class.
+ * In theory, it shouldn't happen.
+ */
+ report_exception(jnienv, "Cannot find callback method",
+ EFAULT, 0);
+ return;
+ }
+ }
+
+ if (dbji->dup_compare_ != NULL)
+ DELETE_GLOBAL_REF(jnienv, dbji->dup_compare_);
+
+ if (jcompare == NULL)
+ db->set_dup_compare(db, NULL);
+ else
+ db->set_dup_compare(db, Db_dup_compare_callback);
+
+ dbji->dup_compare_ = NEW_GLOBAL_REF(jnienv, jcompare);
+}
+
+int dbji_call_dup_compare(DB_JAVAINFO *dbji, DB *db, jobject jdb,
+ const DBT *dbt1, const DBT *dbt2)
+{
+ JNIEnv *jnienv;
+ jobject jdbt1, jdbt2;
+ DBT_JAVAINFO *dbtji1, *dbtji2;
+
+ COMPQUIET(db, NULL);
+ jnienv = dbji_get_jnienv(dbji);
+ if (jnienv == NULL) {
+ fprintf(stderr, "Cannot attach to current thread!\n");
+ return (0);
+ }
+
+ /* XXX
+ * We should have a pool of Dbt objects used for this purpose
+ * instead of creating new ones each time. Because of
+ * multithreading, we may need an arbitrary number (more than two).
+ * We might also have a byte arrays that grow as needed,
+ * so we don't need to allocate those either.
+ */
+ jdbt1 = create_default_object(jnienv, name_DBT);
+ jdbt2 = create_default_object(jnienv, name_DBT);
+ dbtji1 = get_DBT_JAVAINFO(jnienv, jdbt1);
+ memcpy(&dbtji1->dbt, dbt1, sizeof(DBT));
+ dbtji1->create_array_ = 1;
+ dbtji2 = get_DBT_JAVAINFO(jnienv, jdbt2);
+ memcpy(&dbtji2->dbt, dbt2, sizeof(DBT));
+ dbtji2->create_array_ = 1;
+
+ DB_ASSERT(dbji->dup_compare_method_id_ != NULL);
+ return (*jnienv)->CallIntMethod(jnienv, dbji->dup_compare_,
+ dbji->dup_compare_method_id_,
+ jdb, jdbt1, jdbt2);
+}
+
+void dbji_set_h_hash_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
+ DB *db, jobject jhash)
+{
+ jclass h_hash_class;
+
+ if (dbji->h_hash_method_id_ == NULL) {
+ h_hash_class = get_class(jnienv, name_DbHash);
+ dbji->h_hash_method_id_ =
+ (*jnienv)->GetMethodID(jnienv, h_hash_class,
+ "hash",
+ "(Lcom/sleepycat/db/Db;"
+ "[BI)I");
+ if (dbji->h_hash_method_id_ == NULL) {
+ /* XXX
+ * We should really have a better way
+ * to translate this to a Java exception class.
+ * In theory, it shouldn't happen.
+ */
+ report_exception(jnienv, "Cannot find callback method",
+ EFAULT, 0);
+ return;
+ }
+ }
+
+ if (dbji->h_hash_ != NULL)
+ DELETE_GLOBAL_REF(jnienv, dbji->h_hash_);
+
+ if (jhash == NULL)
+ db->set_h_hash(db, NULL);
+ else
+ db->set_h_hash(db, Db_h_hash_callback);
+
+ dbji->h_hash_ = NEW_GLOBAL_REF(jnienv, jhash);
+}
+
+int dbji_call_h_hash(DB_JAVAINFO *dbji, DB *db, jobject jdb,
+ const void *data, int len)
+{
+ JNIEnv *jnienv;
+ jbyteArray jarray;
+
+ COMPQUIET(db, NULL);
+ jnienv = dbji_get_jnienv(dbji);
+ if (jnienv == NULL) {
+ fprintf(stderr, "Cannot attach to current thread!\n");
+ return (0);
+ }
+
+ DB_ASSERT(dbji->h_hash_method_id_ != NULL);
+
+ jarray = (*jnienv)->NewByteArray(jnienv, len);
+ (*jnienv)->SetByteArrayRegion(jnienv, jarray, 0, len, (void *)data);
+ return (*jnienv)->CallIntMethod(jnienv, dbji->h_hash_,
+ dbji->h_hash_method_id_,
+ jdb, jarray, len);
+}
diff --git a/bdb/libdb_java/java_info.h b/bdb/libdb_java/java_info.h
new file mode 100644
index 00000000000..69032be80e6
--- /dev/null
+++ b/bdb/libdb_java/java_info.h
@@ -0,0 +1,200 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: java_info.h,v 11.17 2000/07/31 20:28:30 dda Exp $
+ */
+
+#ifndef _JAVA_INFO_H_
+#define _JAVA_INFO_H_
+
+/*
+ * "Info" classes for Java implementation of Berkeley DB API.
+ * These classes hold extra information for which there is
+ * no room or counterpart in the base classes used in the C API.
+ * In the case of a DBT, the DBT_javainfo class is stored in the
+ * 'private' variable of the java Dbt, and the DBT_javainfo is subclassed
+ * from a DBT. In the case of DB and DB_ENV, the appropriate
+ * info objects are pointed to by the DB and DB_ENV objects.
+ * This is convenient to implement callbacks.
+ */
+
+/****************************************************************
+ *
+ * Declaration of class DBT_javainfo
+ *
+ * A DBT_javainfo is created whenever a Dbt (java) object is created,
+ * and a pointer to it is stored in its private info storage.
+ * It is subclassed from DBT, because we must retain some extra
+ * information in it while it is in use. In particular, when
+ * a java array is associated with it, we need to keep a Globally
+ * Locked reference to it so it is not GC'd. This reference is
+ * destroyed when the Dbt is GC'd.
+ */
+typedef struct _dbt_javainfo
+{
+ DBT dbt;
+ DB *db_; /* associated DB */
+ jobject dbtref_; /* the java Dbt object */
+ jbyteArray array_;
+ int offset_;
+ int create_array_; /* flag to create the array as needed */
+}
+DBT_JAVAINFO; /* used with all 'dbtji' functions */
+
+extern DBT_JAVAINFO *dbjit_construct();
+extern void dbjit_release(DBT_JAVAINFO *dbjit, JNIEnv *jnienv);
+
+/****************************************************************
+ *
+ * Declaration of class DB_ENV_JAVAINFO
+ *
+ * A DB_ENV_JAVAINFO is allocated and stuffed into the cj_internal
+ * and the db_errpfx for every DB_ENV created. It holds a
+ * little extra info that is needed to support callbacks.
+ *
+ * There's a bit of trickery here, because we have built this
+ * above a layer that has a C function callback that gets
+ * invoked when an error occurs. One of the C callback's arguments
+ * is the prefix from the DB_ENV, but since we stuffed a pointer
+ * to our own DB_ENV_JAVAINFO into the prefix, we get that object as an
+ * argument to the C callback. Thus, the C callback can have
+ * access to much more than just the prefix, and it needs that
+ * to call back into the Java enviroment.
+ *
+ * The DB_ENV_JAVAINFO object holds a copy of the Java Virtual Machine,
+ * which is needed to attach to the current running thread
+ * whenever we need to make a callback. (This is more reliable
+ * than our previous approach, which was to save the thread
+ * that created the DbEnv). It also has the Java callback object,
+ * as well as a 'default' callback object that is used when the
+ * caller sets the callback to null. It also has the original
+ * error prefix, since we overwrote the one in the DB_ENV.
+ * There are also fields that are unrelated to the handling
+ * of callbacks, but are convenient to attach to a DB_ENV.
+ *
+ * Note: We assume that the Java layer is the only one
+ * fiddling with the contents of db_errpfx, db_errcall, cj_internal
+ * for a DB_ENV that was created via Java. Since the Java layer should
+ * have the only pointer to such a DB_ENV, this should be true.
+ */
+typedef struct _db_env_javainfo
+{
+ JavaVM *javavm_;
+ int is_dbopen_;
+ char *errpfx_;
+ jobject jdbref_; /* temporary reference */
+ jobject jenvref_; /* temporary reference */
+ jobject default_errcall_; /* global reference */
+ jobject errcall_; /* global reference */
+ jobject feedback_; /* global reference */
+ jobject tx_recover_; /* global reference */
+ jobject recovery_init_; /* global reference */
+ unsigned char *conflict_;
+}
+DB_ENV_JAVAINFO; /* used with all 'dbjie' functions */
+
+/* create/initialize an object */
+extern DB_ENV_JAVAINFO *dbjie_construct(JNIEnv *jnienv,
+ jobject default_errcall,
+ int is_dbopen);
+
+/* release all objects held by this this one */
+extern void dbjie_dealloc(DB_ENV_JAVAINFO *, JNIEnv *jnienv);
+
+/* free this object, releasing anything allocated on its behalf */
+extern void dbjie_destroy(DB_ENV_JAVAINFO *, JNIEnv *jnienv);
+
+/* This gets the environment for the current thread */
+extern JNIEnv *dbjie_get_jnienv(DB_ENV_JAVAINFO *);
+
+extern void dbjie_set_errpfx(DB_ENV_JAVAINFO *, JNIEnv *jnienv,
+ jstring errpfx);
+extern jstring dbjie_get_errpfx(DB_ENV_JAVAINFO *, JNIEnv *jnienv);
+extern void dbjie_set_errcall(DB_ENV_JAVAINFO *, JNIEnv *jnienv,
+ jobject new_errcall);
+extern void dbjie_set_conflict(DB_ENV_JAVAINFO *, unsigned char *v);
+extern void dbjie_set_feedback_object(DB_ENV_JAVAINFO *, JNIEnv *jnienv,
+ DB_ENV *dbenv, jobject value);
+extern void dbjie_call_feedback(DB_ENV_JAVAINFO *, DB_ENV *dbenv, jobject jenv,
+ int opcode, int percent);
+extern void dbjie_set_recovery_init_object(DB_ENV_JAVAINFO *, JNIEnv *jnienv,
+ DB_ENV *dbenv, jobject value);
+extern int dbjie_call_recovery_init(DB_ENV_JAVAINFO *, DB_ENV *dbenv,
+ jobject jenv);
+extern void dbjie_set_tx_recover_object(DB_ENV_JAVAINFO *, JNIEnv *jnienv,
+ DB_ENV *dbenv, jobject value);
+extern int dbjie_call_tx_recover(DB_ENV_JAVAINFO *,
+ DB_ENV *dbenv, jobject jenv,
+ DBT *dbt, DB_LSN *lsn, int recops);
+extern jobject dbjie_get_errcall(DB_ENV_JAVAINFO *) ;
+extern int dbjie_is_dbopen(DB_ENV_JAVAINFO *);
+
+/****************************************************************
+ *
+ * Declaration of class DB_JAVAINFO
+ *
+ * A DB_JAVAINFO is allocated and stuffed into the cj_internal field
+ * for every DB created. It holds a little extra info that is needed
+ * to support callbacks.
+ *
+ * Note: We assume that the Java layer is the only one
+ * fiddling with the contents of cj_internal
+ * for a DB that was created via Java. Since the Java layer should
+ * have the only pointer to such a DB, this should be true.
+ */
+typedef struct _db_javainfo
+{
+ JavaVM *javavm_;
+ jobject jdbref_; /* temporary reference during callback */
+ jobject feedback_; /* global reference */
+ jobject append_recno_; /* global reference */
+ jobject bt_compare_; /* global reference */
+ jobject bt_prefix_; /* global reference */
+ jobject dup_compare_; /* global reference */
+ jobject h_hash_; /* global reference */
+ jmethodID feedback_method_id_;
+ jmethodID append_recno_method_id_;
+ jmethodID bt_compare_method_id_;
+ jmethodID bt_prefix_method_id_;
+ jmethodID dup_compare_method_id_;
+ jmethodID h_hash_method_id_;
+ jint construct_flags_;
+} DB_JAVAINFO;
+
+/* create/initialize an object */
+extern DB_JAVAINFO *dbji_construct(JNIEnv *jnienv, jint flags);
+
+/* release all objects held by this this one */
+extern void dbji_dealloc(DB_JAVAINFO *, JNIEnv *jnienv);
+
+/* free this object, releasing anything allocated on its behalf */
+extern void dbji_destroy(DB_JAVAINFO *, JNIEnv *jnienv);
+
+/* This gets the environment for the current thread */
+extern JNIEnv *dbji_get_jnienv();
+extern jint dbji_get_flags();
+
+extern void dbji_set_feedback_object(DB_JAVAINFO *, JNIEnv *jnienv, DB *db, jobject value);
+extern void dbji_call_feedback(DB_JAVAINFO *, DB *db, jobject jdb,
+ int opcode, int percent);
+
+extern void dbji_set_append_recno_object(DB_JAVAINFO *, JNIEnv *jnienv, DB *db, jobject value);
+extern int dbji_call_append_recno(DB_JAVAINFO *, DB *db, jobject jdb,
+ DBT *dbt, jint recno);
+extern void dbji_set_bt_compare_object(DB_JAVAINFO *, JNIEnv *jnienv, DB *db, jobject value);
+extern int dbji_call_bt_compare(DB_JAVAINFO *, DB *db, jobject jdb,
+ const DBT *dbt1, const DBT *dbt2);
+extern void dbji_set_bt_prefix_object(DB_JAVAINFO *, JNIEnv *jnienv, DB *db, jobject value);
+extern size_t dbji_call_bt_prefix(DB_JAVAINFO *, DB *db, jobject jdb,
+ const DBT *dbt1, const DBT *dbt2);
+extern void dbji_set_dup_compare_object(DB_JAVAINFO *, JNIEnv *jnienv, DB *db, jobject value);
+extern int dbji_call_dup_compare(DB_JAVAINFO *, DB *db, jobject jdb,
+ const DBT *dbt1, const DBT *dbt2);
+extern void dbji_set_h_hash_object(DB_JAVAINFO *, JNIEnv *jnienv, DB *db, jobject value);
+extern int dbji_call_h_hash(DB_JAVAINFO *, DB *db, jobject jdb,
+ const void *data, int len);
+
+#endif /* !_JAVA_INFO_H_ */
diff --git a/bdb/libdb_java/java_locked.c b/bdb/libdb_java/java_locked.c
new file mode 100644
index 00000000000..a5603df5d60
--- /dev/null
+++ b/bdb/libdb_java/java_locked.c
@@ -0,0 +1,294 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: java_locked.c,v 11.11 2000/10/25 19:54:55 dda Exp $";
+#endif /* not lint */
+
+#include <jni.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "db.h"
+#include "java_util.h"
+
+/****************************************************************
+ *
+ * Implementation of class LockedDBT
+ *
+ */
+int
+jdbt_lock(JDBT *jdbt, JNIEnv *jnienv, jobject obj, OpKind kind)
+{
+ DBT *dbt;
+
+ jdbt->obj_ = obj;
+ jdbt->do_realloc_ = 0;
+ jdbt->kind_ = kind;
+ jdbt->java_array_len_= 0;
+ jdbt->java_data_ = 0;
+ jdbt->before_data_ = 0;
+ jdbt->has_error_ = 0;
+ jdbt->dbt = (DBT_JAVAINFO *)get_private_dbobj(jnienv, name_DBT, obj);
+
+ if (!verify_non_null(jnienv, jdbt->dbt)) {
+ jdbt->has_error_ = 1;
+ return (EINVAL);
+ }
+ dbt = &jdbt->dbt->dbt;
+
+ if (kind == outOp &&
+ (dbt->flags & (DB_DBT_USERMEM | DB_DBT_MALLOC | DB_DBT_REALLOC)) == 0) {
+ report_exception(jnienv,
+ "Dbt.flags must be set to Db.DB_DBT_USERMEM, "
+ "Db.DB_DBT_MALLOC or Db.DB_DBT_REALLOC",
+ 0, 0);
+ jdbt->has_error_ = 1;
+ return (EINVAL);
+ }
+
+ /* If this is requested to be realloc, we cannot use the
+ * underlying realloc, because the array we will pass in
+ * is not allocated by us, but the Java VM, so it cannot
+ * be successfully realloced. We simulate the reallocation,
+ * by using USERMEM and reallocating the java array when a
+ * ENOMEM error occurs. We change the flags during the operation,
+ * and they are reset when the operation completes (in the
+ * LockedDBT destructor.
+ */
+ if ((dbt->flags & DB_DBT_REALLOC) != 0) {
+ dbt->flags &= ~DB_DBT_REALLOC;
+ dbt->flags |= DB_DBT_USERMEM;
+ jdbt->do_realloc_ = 1;
+ }
+
+ if ((dbt->flags & DB_DBT_USERMEM) || kind != outOp) {
+
+ /* If writing with DB_DBT_USERMEM/REALLOC
+ * or it's a set (or get/set) operation,
+ * then the data should point to a java array.
+ * Note that outOp means data is coming out of the database
+ * (it's a get). inOp means data is going into the database
+ * (either a put, or a key input).
+ */
+ if (!jdbt->dbt->array_) {
+ report_exception(jnienv, "Dbt.data is null", 0, 0);
+ jdbt->has_error_ = 1;
+ return (EINVAL);
+ }
+
+ /* Verify other parameters */
+ jdbt->java_array_len_ = (*jnienv)->GetArrayLength(jnienv, jdbt->dbt->array_);
+ if (jdbt->dbt->offset_ < 0 ) {
+ report_exception(jnienv, "Dbt.offset illegal", 0, 0);
+ jdbt->has_error_ = 1;
+ return (EINVAL);
+ }
+ if (dbt->ulen + jdbt->dbt->offset_ > jdbt->java_array_len_) {
+ report_exception(jnienv,
+ "Dbt.ulen + Dbt.offset greater than array length", 0, 0);
+ jdbt->has_error_ = 1;
+ return (EINVAL);
+ }
+
+ jdbt->java_data_ = (*jnienv)->GetByteArrayElements(jnienv, jdbt->dbt->array_,
+ (jboolean *)0);
+ dbt->data = jdbt->before_data_ = jdbt->java_data_ + jdbt->dbt->offset_;
+ }
+ else {
+
+ /* If writing with DB_DBT_MALLOC, then the data is
+ * allocated by DB.
+ */
+ dbt->data = jdbt->before_data_ = 0;
+ }
+ return (0);
+}
+
+/* The LockedDBT destructor is called when the java handler returns
+ * to the user, since that's when the LockedDBT objects go out of scope.
+ * Since it is thus called after any call to the underlying database,
+ * it copies any information from temporary structures back to user
+ * accessible arrays, and of course must free memory and remove references.
+ */
+void
+jdbt_unlock(JDBT *jdbt, JNIEnv *jnienv)
+{
+ DBT *dbt;
+
+ dbt = &jdbt->dbt->dbt;
+
+ /* Fix up the flags if we changed them. */
+ if (jdbt->do_realloc_) {
+ dbt->flags &= ~DB_DBT_USERMEM;
+ dbt->flags |= DB_DBT_REALLOC;
+ }
+
+ if ((dbt->flags & (DB_DBT_USERMEM | DB_DBT_REALLOC)) ||
+ jdbt->kind_ == inOp) {
+
+ /* If writing with DB_DBT_USERMEM/REALLOC or it's a set
+ * (or get/set) operation, then the data may be already in
+ * the java array, in which case, we just need to release it.
+ * If DB didn't put it in the array (indicated by the
+ * dbt->data changing), we need to do that
+ */
+ if (jdbt->before_data_ != jdbt->java_data_) {
+ (*jnienv)->SetByteArrayRegion(jnienv,
+ jdbt->dbt->array_,
+ jdbt->dbt->offset_,
+ dbt->ulen,
+ jdbt->before_data_);
+ }
+ (*jnienv)->ReleaseByteArrayElements(jnienv, jdbt->dbt->array_, jdbt->java_data_, 0);
+ dbt->data = 0;
+ }
+ if ((dbt->flags & DB_DBT_MALLOC) && jdbt->kind_ != inOp) {
+
+ /* If writing with DB_DBT_MALLOC, then the data was allocated
+ * by DB. If dbt->data is zero, it means an error occurred
+ * (and should have been already reported).
+ */
+ if (dbt->data) {
+
+ /* Release any old references. */
+ dbjit_release(jdbt->dbt, jnienv);
+
+ /* In the case of SET_RANGE, the key is inOutOp
+ * and when not found, its data will be left as
+ * its original value. Only copy and free it
+ * here if it has been allocated by DB
+ * (dbt->data has changed).
+ */
+ if (dbt->data != jdbt->before_data_) {
+ jdbt->dbt->array_ = (jbyteArray)
+ NEW_GLOBAL_REF(jnienv,
+ (*jnienv)->NewByteArray(jnienv,
+ dbt->size));
+ jdbt->dbt->offset_ = 0;
+ (*jnienv)->SetByteArrayRegion(jnienv,
+ jdbt->dbt->array_, 0, dbt->size,
+ (jbyte *)dbt->data);
+ free(dbt->data);
+ dbt->data = 0;
+ }
+ }
+ }
+}
+
+/* Realloc the java array to receive data if the DBT was marked
+ * for realloc, and the last operation set the size field to an
+ * amount greater than ulen.
+ */
+int jdbt_realloc(JDBT *jdbt, JNIEnv *jnienv)
+{
+ DBT *dbt;
+
+ dbt = &jdbt->dbt->dbt;
+
+ if (!jdbt->do_realloc_ || jdbt->has_error_ || dbt->size <= dbt->ulen)
+ return (0);
+
+ (*jnienv)->ReleaseByteArrayElements(jnienv, jdbt->dbt->array_, jdbt->java_data_, 0);
+ dbjit_release(jdbt->dbt, jnienv);
+
+ /* We allocate a new array of the needed size.
+ * We'll set the offset to 0, as the old offset
+ * really doesn't make any sense.
+ */
+ jdbt->java_array_len_ = dbt->ulen = dbt->size;
+ jdbt->dbt->offset_ = 0;
+ jdbt->dbt->array_ = (jbyteArray)
+ NEW_GLOBAL_REF(jnienv, (*jnienv)->NewByteArray(jnienv, dbt->size));
+
+ jdbt->java_data_ = (*jnienv)->GetByteArrayElements(jnienv,
+ jdbt->dbt->array_,
+ (jboolean *)0);
+ dbt->data = jdbt->before_data_ = jdbt->java_data_;
+ return (1);
+}
+
+/****************************************************************
+ *
+ * Implementation of class JSTR
+ *
+ */
+int
+jstr_lock(JSTR *js, JNIEnv *jnienv, jstring jstr)
+{
+ js->jstr_ = jstr;
+
+ if (jstr == 0)
+ js->string = 0;
+ else
+ js->string = (*jnienv)->GetStringUTFChars(jnienv, jstr,
+ (jboolean *)0);
+ return (0);
+}
+
+void jstr_unlock(JSTR *js, JNIEnv *jnienv)
+{
+ if (js->jstr_)
+ (*jnienv)->ReleaseStringUTFChars(jnienv, js->jstr_, js->string);
+}
+
+/****************************************************************
+ *
+ * Implementation of class JSTRARRAY
+ *
+ */
+int
+jstrarray_lock(JSTRARRAY *jsa, JNIEnv *jnienv, jobjectArray arr)
+{
+ int i;
+
+ jsa->arr_ = arr;
+ jsa->array = 0;
+
+ if (arr != 0) {
+ int count = (*jnienv)->GetArrayLength(jnienv, arr);
+ const char **new_array =
+ (const char **)malloc((sizeof(const char *))*(count+1));
+ for (i=0; i<count; i++) {
+ jstring jstr = (jstring)(*jnienv)->GetObjectArrayElement(jnienv, arr, i);
+ if (jstr == 0) {
+ /*
+ * An embedded null in the string array
+ * is treated as an endpoint.
+ */
+ new_array[i] = 0;
+ break;
+ }
+ else {
+ new_array[i] =
+ (*jnienv)->GetStringUTFChars(jnienv, jstr, (jboolean *)0);
+ }
+ }
+ new_array[count] = 0;
+ jsa->array = new_array;
+ }
+ return (0);
+}
+
+void jstrarray_unlock(JSTRARRAY *jsa, JNIEnv *jnienv)
+{
+ int i;
+ jstring jstr;
+
+ if (jsa->arr_) {
+ int count = (*jnienv)->GetArrayLength(jnienv, jsa->arr_);
+ for (i=0; i<count; i++) {
+ if (jsa->array[i] == 0)
+ break;
+ jstr = (jstring)(*jnienv)->GetObjectArrayElement(jnienv, jsa->arr_, i);
+ (*jnienv)->ReleaseStringUTFChars(jnienv, jstr, jsa->array[i]);
+ }
+ free((void*)jsa->array);
+ }
+}
diff --git a/bdb/libdb_java/java_locked.h b/bdb/libdb_java/java_locked.h
new file mode 100644
index 00000000000..9b88cdd0619
--- /dev/null
+++ b/bdb/libdb_java/java_locked.h
@@ -0,0 +1,98 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: java_locked.h,v 11.9 2000/10/25 19:54:55 dda Exp $
+ */
+
+#ifndef _JAVA_LOCKED_H_
+#define _JAVA_LOCKED_H_
+
+/*
+ * Used internally by LockedDBT constructor.
+ */
+typedef enum _OpKind {
+ inOp, /* setting data in database (passing data in) */
+ outOp, /* getting data from database to user memory */
+ inOutOp /* both getting/setting data */
+} OpKind;
+
+/*
+ *
+ * Declaration of JDBT
+ *
+ * A JDBT object exists during a
+ * single native call to the DB API. Its constructor's job is
+ * to temporarily convert any java array found in the DBT_JAVAINFO
+ * to actual bytes in memory that remain locked in place. These
+ * bytes are used during the call to the underlying DB C layer,
+ * and are released and/or copied back by the destructor.
+ * Thus, a LockedDBT must be declared as a stack object to
+ * function properly.
+ */
+typedef struct _jdbt
+{
+ /* these are accessed externally to ldbt_ functions */
+ DBT_JAVAINFO *dbt;
+ unsigned int java_array_len_;
+
+ /* these are for used internally by ldbt_ functions */
+ jobject obj_;
+ jbyte *java_data_;
+ jbyte *before_data_;
+ int has_error_;
+ int do_realloc_;
+ OpKind kind_;
+} JDBT;
+
+extern int jdbt_lock(JDBT *, JNIEnv *jnienv, jobject obj, OpKind kind);
+extern void jdbt_unlock(JDBT *, JNIEnv *jnienv); /* this unlocks and frees the memory */
+extern int jdbt_realloc(JDBT *, JNIEnv *jnienv); /* returns 1 if reallocation took place */
+
+/****************************************************************
+ *
+ * Declaration of JSTR
+ *
+ * A JSTR exists temporarily to convert a java jstring object
+ * to a char *. Because the memory for the char * string is
+ * managed by the JVM, it must be released when we are done
+ * looking at it. Typically, jstr_lock() is called at the
+ * beginning of a function for each jstring object, and jstr_unlock
+ * is called at the end of each function for each JSTR.
+ */
+typedef struct _jstr
+{
+ /* this accessed externally to jstr_ functions */
+ const char *string;
+
+ /* this is used internally by jstr_ functions */
+ jstring jstr_;
+} JSTR;
+
+extern int jstr_lock(JSTR *, JNIEnv *jnienv, jstring jstr);
+extern void jstr_unlock(JSTR *, JNIEnv *jnienv); /* this unlocks and frees mem */
+
+/****************************************************************
+ *
+ * Declaration of class LockedStrarray
+ *
+ * Given a java jobjectArray object (that must be a String[]),
+ * we extract the individual strings and build a const char **
+ * When the LockedStrarray object is destroyed, the individual
+ * strings are released.
+ */
+typedef struct _jstrarray
+{
+ /* this accessed externally to jstrarray_ functions */
+ const char **array;
+
+ /* this is used internally by jstrarray_ functions */
+ jobjectArray arr_;
+} JSTRARRAY;
+
+extern int jstrarray_lock(JSTRARRAY *, JNIEnv *jnienv, jobjectArray arr);
+extern void jstrarray_unlock(JSTRARRAY *, JNIEnv *jnienv); /* this unlocks and frees mem */
+
+#endif /* !_JAVA_LOCKED_H_ */
diff --git a/bdb/libdb_java/java_util.c b/bdb/libdb_java/java_util.c
new file mode 100644
index 00000000000..f42ceafbee8
--- /dev/null
+++ b/bdb/libdb_java/java_util.c
@@ -0,0 +1,556 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: java_util.c,v 11.17 2000/10/28 13:09:39 dda Exp $";
+#endif /* not lint */
+
+#include <jni.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "db.h"
+#include "java_util.h"
+
+#ifdef DB_WIN32
+#define sys_errlist _sys_errlist
+#define sys_nerr _sys_nerr
+#endif
+
+const char * const name_DB = "Db";
+const char * const name_DB_BTREE_STAT = "DbBtreeStat";
+const char * const name_DBC = "Dbc";
+const char * const name_DB_DEADLOCK_EX = "DbDeadlockException";
+const char * const name_DB_ENV = "DbEnv";
+const char * const name_DB_EXCEPTION = "DbException";
+const char * const name_DB_HASH_STAT = "DbHashStat";
+const char * const name_DB_LOCK = "DbLock";
+const char * const name_DB_LOCK_STAT = "DbLockStat";
+const char * const name_DB_LOG_STAT = "DbLogStat";
+const char * const name_DB_LSN = "DbLsn";
+const char * const name_DB_MEMORY_EX = "DbMemoryException";
+const char * const name_DB_MPOOL_FSTAT = "DbMpoolFStat";
+const char * const name_DB_MPOOL_STAT = "DbMpoolStat";
+const char * const name_DB_QUEUE_STAT = "DbQueueStat";
+const char * const name_DB_RUNRECOVERY_EX = "DbRunRecoveryException";
+const char * const name_DBT = "Dbt";
+const char * const name_DB_TXN = "DbTxn";
+const char * const name_DB_TXN_STAT = "DbTxnStat";
+const char * const name_DB_TXN_STAT_ACTIVE = "DbTxnStat$Active";
+const char * const name_DbAppendRecno = "DbAppendRecno";
+const char * const name_DbBtreeCompare = "DbBtreeCompare";
+const char * const name_DbBtreePrefix = "DbBtreePrefix";
+const char * const name_DbDupCompare = "DbDupCompare";
+const char * const name_DbEnvFeedback = "DbEnvFeedback";
+const char * const name_DbErrcall = "DbErrcall";
+const char * const name_DbHash = "DbHash";
+const char * const name_DbFeedback = "DbFeedback";
+const char * const name_DbRecoveryInit = "DbRecoveryInit";
+const char * const name_DbTxnRecover = "DbTxnRecover";
+
+const char * const string_signature = "Ljava/lang/String;";
+
+/****************************************************************
+ *
+ * Utility functions used by "glue" functions.
+ *
+ */
+
+/* Get the private data from a Db* object that points back to a C DB_* object.
+ * The private data is stored in the object as a Java long (64 bits),
+ * which is long enough to store a pointer on current architectures.
+ */
+void *get_private_dbobj(JNIEnv *jnienv, const char *classname,
+ jobject obj)
+{
+ jclass dbClass;
+ jfieldID id;
+ long_to_ptr lp;
+
+ if (!obj)
+ return (0);
+
+ dbClass = get_class(jnienv, classname);
+ id = (*jnienv)->GetFieldID(jnienv, dbClass, "private_dbobj_", "J");
+ lp.java_long = (*jnienv)->GetLongField(jnienv, obj, id);
+ return (lp.ptr);
+}
+
+/* Set the private data in a Db* object that points back to a C DB_* object.
+ * The private data is stored in the object as a Java long (64 bits),
+ * which is long enough to store a pointer on current architectures.
+ */
+void set_private_dbobj(JNIEnv *jnienv, const char *classname,
+ jobject obj, void *value)
+{
+ long_to_ptr lp;
+ jclass dbClass;
+ jfieldID id;
+
+ lp.java_long = 0; /* no junk in case sizes mismatch */
+ lp.ptr = value;
+ dbClass = get_class(jnienv, classname);
+ id = (*jnienv)->GetFieldID(jnienv, dbClass, "private_dbobj_", "J");
+ (*jnienv)->SetLongField(jnienv, obj, id, lp.java_long);
+}
+
+/* Get the private data in a Db/DbEnv object that holds additional 'side data'.
+ * The private data is stored in the object as a Java long (64 bits),
+ * which is long enough to store a pointer on current architectures.
+ */
+void *get_private_info(JNIEnv *jnienv, const char *classname,
+ jobject obj)
+{
+ jclass dbClass;
+ jfieldID id;
+ long_to_ptr lp;
+
+ if (!obj)
+ return (0);
+
+ dbClass = get_class(jnienv, classname);
+ id = (*jnienv)->GetFieldID(jnienv, dbClass, "private_info_", "J");
+ lp.java_long = (*jnienv)->GetLongField(jnienv, obj, id);
+ return (lp.ptr);
+}
+
+/* Set the private data in a Db/DbEnv object that holds additional 'side data'.
+ * The private data is stored in the object as a Java long (64 bits),
+ * which is long enough to store a pointer on current architectures.
+ */
+void set_private_info(JNIEnv *jnienv, const char *classname,
+ jobject obj, void *value)
+{
+ long_to_ptr lp;
+ jclass dbClass;
+ jfieldID id;
+
+ lp.java_long = 0; /* no junk in case sizes mismatch */
+ lp.ptr = value;
+ dbClass = get_class(jnienv, classname);
+ id = (*jnienv)->GetFieldID(jnienv, dbClass, "private_info_", "J");
+ (*jnienv)->SetLongField(jnienv, obj, id, lp.java_long);
+}
+
+/*
+ * Given a non-qualified name (e.g. "foo"), get the class handle
+ * for the fully qualified name (e.g. "com.sleepycat.db.foo")
+ */
+jclass get_class(JNIEnv *jnienv, const char *classname)
+{
+ /* Note: PERFORMANCE: It should be possible to cache jclass's.
+ * If we do a NewGlobalRef on each one, we can keep them
+ * around in a table. A jclass is a jobject, and
+ * since NewGlobalRef returns a jobject, it isn't
+ * technically right, but it would likely work with
+ * most implementations. Possibly make it configurable.
+ */
+ char fullname[128] = DB_PACKAGE_NAME;
+ strncat(fullname, classname, sizeof(fullname));
+ return ((*jnienv)->FindClass(jnienv, fullname));
+}
+
+/* Set an individual field in a Db* object.
+ * The field must be a DB object type.
+ */
+void set_object_field(JNIEnv *jnienv, jclass class_of_this,
+ jobject jthis, const char *object_classname,
+ const char *name_of_field, jobject obj)
+{
+ char signature[512];
+ jfieldID id;
+
+ strncpy(signature, "L", sizeof(signature));
+ strncat(signature, DB_PACKAGE_NAME, sizeof(signature));
+ strncat(signature, object_classname, sizeof(signature));
+ strncat(signature, ";", sizeof(signature));
+
+ id = (*jnienv)->GetFieldID(jnienv, class_of_this, name_of_field, signature);
+ (*jnienv)->SetObjectField(jnienv, jthis, id, obj);
+}
+
+/* Set an individual field in a Db* object.
+ * The field must be an integer type.
+ */
+void set_int_field(JNIEnv *jnienv, jclass class_of_this,
+ jobject jthis, const char *name_of_field, jint value)
+{
+ jfieldID id = (*jnienv)->GetFieldID(jnienv, class_of_this, name_of_field, "I");
+ (*jnienv)->SetIntField(jnienv, jthis, id, value);
+}
+
+/* Set an individual field in a Db* object.
+ * The field must be an integer type.
+ */
+void set_long_field(JNIEnv *jnienv, jclass class_of_this,
+ jobject jthis, const char *name_of_field, jlong value)
+{
+ jfieldID id = (*jnienv)->GetFieldID(jnienv, class_of_this, name_of_field, "J");
+ (*jnienv)->SetLongField(jnienv, jthis, id, value);
+}
+
+/* Set an individual field in a Db* object.
+ * The field must be an integer type.
+ */
+void set_lsn_field(JNIEnv *jnienv, jclass class_of_this,
+ jobject jthis, const char *name_of_field, DB_LSN value)
+{
+ set_object_field(jnienv, class_of_this, jthis, name_DB_LSN,
+ name_of_field, get_DbLsn(jnienv, value));
+}
+
+/* Report an exception back to the java side.
+ */
+void report_exception(JNIEnv *jnienv, const char *text, int err,
+ unsigned long expect_mask)
+{
+ jstring textString;
+ jclass dbexcept;
+ jclass javaexcept;
+ jmethodID constructId;
+ jthrowable obj;
+
+ textString = NULL;
+ dbexcept = NULL;
+ javaexcept = NULL;
+ constructId = NULL;
+ obj = NULL;
+
+ switch (err) {
+ /* DB_JAVA_CALLBACK is returned by dbji_call_append_recno()
+ * (the append_recno callback) when the Java version of the
+ * callback has thrown an exception, and we want to pass the
+ * exception on. The exception has already been thrown, we
+ * don't want to throw a new one.
+ */
+ case DB_JAVA_CALLBACK:
+ break;
+ case ENOMEM:
+ dbexcept = get_class(jnienv, name_DB_MEMORY_EX);
+ break;
+ case ENOENT:
+ /* In this case there is a corresponding standard java
+ * exception type that we'll use. First we make sure
+ * that the calling function expected this kind of error,
+ * if not we give an 'internal error' DbException, since
+ * we must not throw an exception type that isn't
+ * declared in the signature.
+ *
+ * We'll make this a little more general if/when we add
+ * more java standard exceptions.
+ */
+ if ((expect_mask & EXCEPTION_FILE_NOT_FOUND) == 0) {
+ char errstr[1024];
+
+ strncpy(errstr, "internal error: unexpected errno: ",
+ sizeof(errstr));
+ strncat(errstr, text, sizeof(errstr));
+ textString = get_java_string(jnienv, errstr);
+ dbexcept = get_class(jnienv, name_DB_EXCEPTION);
+ }
+ else {
+ javaexcept =
+ (*jnienv)->FindClass(jnienv, "java/io/FileNotFoundException");
+ }
+ break;
+ case DB_RUNRECOVERY:
+ dbexcept = get_class(jnienv, name_DB_RUNRECOVERY_EX);
+ break;
+ case DB_LOCK_DEADLOCK:
+ dbexcept = get_class(jnienv, name_DB_DEADLOCK_EX);
+ break;
+ default:
+ dbexcept = get_class(jnienv, name_DB_EXCEPTION);
+ break;
+ }
+ if (dbexcept != NULL) {
+ if (textString == NULL)
+ textString = get_java_string(jnienv, text);
+ constructId = (*jnienv)->GetMethodID(jnienv, dbexcept,
+ "<init>",
+ "(Ljava/lang/String;I)V");
+ obj = (jthrowable)(*jnienv)->NewObject(jnienv, dbexcept,
+ constructId, textString,
+ err);
+ (*jnienv)->Throw(jnienv, obj);
+ }
+ else if (javaexcept != NULL) {
+ javaexcept =
+ (*jnienv)->FindClass(jnienv, "java/io/FileNotFoundException");
+ (*jnienv)->ThrowNew(jnienv, javaexcept, text);
+ }
+}
+
+/* Report an error via the errcall mechanism.
+ */
+void report_errcall(JNIEnv *jnienv, jobject errcall,
+ jstring prefix, const char *message)
+{
+ jmethodID id;
+ jclass errcall_class;
+ jstring msg;
+
+ errcall_class = get_class(jnienv, name_DbErrcall);
+ msg = get_java_string(jnienv, message);
+
+ id = (*jnienv)->GetMethodID(jnienv, errcall_class,
+ "errcall",
+ "(Ljava/lang/String;Ljava/lang/String;)V");
+ if (id == NULL) {
+ fprintf(stderr, "Cannot get errcall methodID!\n");
+ fprintf(stderr, "error: %s\n", message);
+ return;
+ }
+
+ (*jnienv)->CallVoidMethod(jnienv, errcall, id, prefix, msg);
+}
+
+/* If the object is null, report an exception and return false (0),
+ * otherwise return true (1).
+ */
+int verify_non_null(JNIEnv *jnienv, void *obj)
+{
+ if (obj == NULL) {
+ report_exception(jnienv, "null object", EINVAL, 0);
+ return (0);
+ }
+ return (1);
+}
+
+/* If the error code is non-zero, report an exception and return false (0),
+ * otherwise return true (1).
+ */
+int verify_return(JNIEnv *jnienv, int err, unsigned long expect_mask)
+{
+ if (err == 0)
+ return 1;
+
+ report_exception(jnienv, db_strerror(err), err, expect_mask);
+ return 0;
+}
+
+/* Create an object of the given class, calling its default constructor.
+ */
+jobject create_default_object(JNIEnv *jnienv, const char *class_name)
+{
+ jclass dbclass = get_class(jnienv, class_name);
+ jmethodID id = (*jnienv)->GetMethodID(jnienv, dbclass, "<init>", "()V");
+ jobject object = (*jnienv)->NewObject(jnienv, dbclass, id);
+ return (object);
+}
+
+/* Convert an DB object to a Java encapsulation of that object.
+ * Note: This implementation creates a new Java object on each call,
+ * so it is generally useful when a new DB object has just been created.
+ */
+jobject convert_object(JNIEnv *jnienv, const char *class_name, void *dbobj)
+{
+ jobject jo;
+
+ if (!dbobj)
+ return (0);
+
+ jo = create_default_object(jnienv, class_name);
+ set_private_dbobj(jnienv, class_name, jo, dbobj);
+ return (jo);
+}
+
+/* Create a copy of the string
+ */
+char *dup_string(const char *str)
+{
+ int len;
+ char *retval;
+
+ len = strlen(str) + 1;
+ retval = (char *)malloc(sizeof(char)*len);
+ strncpy(retval, str, len);
+ return (retval);
+}
+
+/* Create a java string from the given string
+ */
+jstring get_java_string(JNIEnv *jnienv, const char* string)
+{
+ if (string == 0)
+ return (0);
+ return ((*jnienv)->NewStringUTF(jnienv, string));
+}
+
+/* Create a malloc'ed copy of the java string.
+ * Caller must free it.
+ */
+char *get_c_string(JNIEnv *jnienv, jstring jstr)
+{
+ const jbyte *utf;
+ char *retval;
+
+ utf = (*jnienv)->GetStringUTFChars(jnienv, jstr, NULL);
+ retval = dup_string((const char *)utf);
+ (*jnienv)->ReleaseStringUTFChars(jnienv, jstr, utf);
+ return retval;
+}
+
+/* Convert a java object to the various C pointers they represent.
+ */
+DB *get_DB(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB *)get_private_dbobj(jnienv, name_DB, obj));
+}
+
+DB_BTREE_STAT *get_DB_BTREE_STAT(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB_BTREE_STAT *)get_private_dbobj(jnienv, name_DB_BTREE_STAT, obj));
+}
+
+DBC *get_DBC(JNIEnv *jnienv, jobject obj)
+{
+ return ((DBC *)get_private_dbobj(jnienv, name_DBC, obj));
+}
+
+DB_ENV *get_DB_ENV(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB_ENV *)get_private_dbobj(jnienv, name_DB_ENV, obj));
+}
+
+DB_ENV_JAVAINFO *get_DB_ENV_JAVAINFO(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB_ENV_JAVAINFO *)get_private_info(jnienv, name_DB_ENV, obj));
+}
+
+DB_HASH_STAT *get_DB_HASH_STAT(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB_HASH_STAT *)get_private_dbobj(jnienv, name_DB_HASH_STAT, obj));
+}
+
+DB_JAVAINFO *get_DB_JAVAINFO(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB_JAVAINFO *)get_private_info(jnienv, name_DB, obj));
+}
+
+DB_LOCK *get_DB_LOCK(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB_LOCK *)get_private_dbobj(jnienv, name_DB_LOCK, obj));
+}
+
+DB_LOG_STAT *get_DB_LOG_STAT(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB_LOG_STAT *)get_private_dbobj(jnienv, name_DB_LOG_STAT, obj));
+}
+
+DB_LSN *get_DB_LSN(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB_LSN *)get_private_dbobj(jnienv, name_DB_LSN, obj));
+}
+
+DB_MPOOL_FSTAT *get_DB_MPOOL_FSTAT(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB_MPOOL_FSTAT *)get_private_dbobj(jnienv, name_DB_MPOOL_FSTAT, obj));
+}
+
+DB_MPOOL_STAT *get_DB_MPOOL_STAT(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB_MPOOL_STAT *)get_private_dbobj(jnienv, name_DB_MPOOL_STAT, obj));
+}
+
+DB_QUEUE_STAT *get_DB_QUEUE_STAT(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB_QUEUE_STAT *)get_private_dbobj(jnienv, name_DB_QUEUE_STAT, obj));
+}
+
+DB_TXN *get_DB_TXN(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB_TXN *)get_private_dbobj(jnienv, name_DB_TXN, obj));
+}
+
+DB_TXN_STAT *get_DB_TXN_STAT(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB_TXN_STAT *)get_private_dbobj(jnienv, name_DB_TXN_STAT, obj));
+}
+
+DBT *get_DBT(JNIEnv *jnienv, jobject obj)
+{
+ DBT_JAVAINFO *ji;
+
+ ji = (DBT_JAVAINFO *)get_private_dbobj(jnienv, name_DBT, obj);
+ if (ji == NULL)
+ return (NULL);
+ else
+ return (&ji->dbt);
+}
+
+DBT_JAVAINFO *get_DBT_JAVAINFO(JNIEnv *jnienv, jobject obj)
+{
+ return ((DBT_JAVAINFO *)get_private_dbobj(jnienv, name_DBT, obj));
+}
+
+/* Convert a C pointer to the various Java objects they represent.
+ */
+jobject get_DbBtreeStat(JNIEnv *jnienv, DB_BTREE_STAT *dbobj)
+{
+ return (convert_object(jnienv, name_DB_BTREE_STAT, dbobj));
+}
+
+jobject get_Dbc(JNIEnv *jnienv, DBC *dbobj)
+{
+ return (convert_object(jnienv, name_DBC, dbobj));
+}
+
+jobject get_DbHashStat(JNIEnv *jnienv, DB_HASH_STAT *dbobj)
+{
+ return (convert_object(jnienv, name_DB_HASH_STAT, dbobj));
+}
+
+jobject get_DbLogStat(JNIEnv *jnienv, DB_LOG_STAT *dbobj)
+{
+ return (convert_object(jnienv, name_DB_LOG_STAT, dbobj));
+}
+
+/* LSNs are different since they are really normally
+ * treated as by-value objects. We actually create
+ * a pointer to the LSN and store that, deleting it
+ * when the LSN is GC'd.
+ */
+jobject get_DbLsn(JNIEnv *jnienv, DB_LSN dbobj)
+{
+ DB_LSN *lsnp = (DB_LSN *)malloc(sizeof(DB_LSN));
+ memset(lsnp, 0, sizeof(DB_LSN));
+ *lsnp = dbobj;
+ return (convert_object(jnienv, name_DB_LSN, lsnp));
+}
+
+jobject get_Dbt(JNIEnv *jnienv, DBT *dbt)
+{
+ return (convert_object(jnienv, name_DBT, dbt));
+}
+
+jobject get_DbMpoolFStat(JNIEnv *jnienv, DB_MPOOL_FSTAT *dbobj)
+{
+ return (convert_object(jnienv, name_DB_MPOOL_FSTAT, dbobj));
+}
+
+jobject get_DbMpoolStat(JNIEnv *jnienv, DB_MPOOL_STAT *dbobj)
+{
+ return (convert_object(jnienv, name_DB_MPOOL_STAT, dbobj));
+}
+
+jobject get_DbQueueStat(JNIEnv *jnienv, DB_QUEUE_STAT *dbobj)
+{
+ return (convert_object(jnienv, name_DB_QUEUE_STAT, dbobj));
+}
+
+jobject get_DbTxn(JNIEnv *jnienv, DB_TXN *dbobj)
+{
+ return (convert_object(jnienv, name_DB_TXN, dbobj));
+}
+
+jobject get_DbTxnStat(JNIEnv *jnienv, DB_TXN_STAT *dbobj)
+{
+ return (convert_object(jnienv, name_DB_TXN_STAT, dbobj));
+}
diff --git a/bdb/libdb_java/java_util.h b/bdb/libdb_java/java_util.h
new file mode 100644
index 00000000000..eb47dc67629
--- /dev/null
+++ b/bdb/libdb_java/java_util.h
@@ -0,0 +1,359 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: java_util.h,v 11.22 2001/01/11 18:19:53 bostic Exp $
+ */
+
+#ifndef _JAVA_UTIL_H_
+#define _JAVA_UTIL_H_
+
+#ifdef _MSC_VER
+
+/* These are level 4 warnings that are explicitly disabled.
+ * With Visual C++, by default you do not see above level 3 unless
+ * you use /W4. But we like to compile with the highest level
+ * warnings to catch other errors.
+ *
+ * 4201: nameless struct/union
+ * triggered by standard include file <winnt.h>
+ *
+ * 4244: '=' : convert from '__int64' to 'unsigned int', possible loss of data
+ * results from making size_t data members correspond to jlongs
+ *
+ * 4514: unreferenced inline function has been removed
+ * jni.h defines methods that are not called
+ *
+ * 4127: conditional expression is constant
+ * occurs because of arg in JAVADB_RW_ACCESS_STRING macro
+ */
+#pragma warning(disable: 4244 4201 4514 4127)
+
+#endif
+
+#include "db_config.h"
+#include "db.h"
+#include "java_info.h"
+#include "java_locked.h"
+#include <jni.h>
+#include <string.h> /* needed for memset */
+
+#define DB_PACKAGE_NAME "com/sleepycat/db/"
+
+/* Union to convert longs to pointers (see {get,set}_private_dbobj).
+ */
+typedef union {
+ jlong java_long;
+ void *ptr;
+} long_to_ptr;
+
+/****************************************************************
+ *
+ * Utility functions and definitions used by "glue" functions.
+ *
+ */
+
+#define NOT_IMPLEMENTED(str) \
+ report_exception(jnienv, str /*concatenate*/ ": not implemented", 0)
+
+/* Get, delete a global reference.
+ * Making this operation a function call allows for
+ * easier tracking for debugging. Global references
+ * are mostly grabbed at 'open' and 'close' points,
+ * so there shouldn't be a big performance hit.
+ *
+ * Macro-izing this makes it easier to add debugging code
+ * to track unreleased references.
+ */
+#ifdef DBJAVA_DEBUG
+#include <unistd.h>
+static void wrdebug(const char *str)
+{
+ write(2, str, strlen(str));
+ write(2, "\n", 1);
+}
+
+static jobject debug_new_global_ref(JNIEnv *jnienv, jobject obj, const char *s)
+{
+ wrdebug(s);
+ return (*jnienv)->NewGlobalRef(jnienv, obj);
+}
+
+static void debug_delete_global_ref(JNIEnv *jnienv, jobject obj, const char *s)
+{
+ wrdebug(s);
+ (*jnienv)->DeleteGlobalRef(jnienv, obj);
+}
+
+#define NEW_GLOBAL_REF(jnienv, obj) \
+ debug_new_global_ref(jnienv, obj, "+Ref: " #obj)
+#define DELETE_GLOBAL_REF(jnienv, obj) \
+ debug_delete_global_ref(jnienv, obj, "-Ref: " #obj)
+#else
+#define NEW_GLOBAL_REF(jnienv, obj) (*jnienv)->NewGlobalRef(jnienv, obj)
+#define DELETE_GLOBAL_REF(jnienv, obj) (*jnienv)->DeleteGlobalRef(jnienv, obj)
+#define wrdebug(x)
+#endif
+
+/* Get the private data from a Db* object that points back to a C DB_* object.
+ * The private data is stored in the object as a Java long (64 bits),
+ * which is long enough to store a pointer on current architectures.
+ */
+void *get_private_dbobj(JNIEnv *jnienv, const char *classname,
+ jobject obj);
+
+/* Set the private data in a Db* object that points back to a C DB_* object.
+ * The private data is stored in the object as a Java long (64 bits),
+ * which is long enough to store a pointer on current architectures.
+ */
+void set_private_dbobj(JNIEnv *jnienv, const char *classname,
+ jobject obj, void *value);
+
+/* Get the private data in a Db/DbEnv object that holds additional 'side data'.
+ * The private data is stored in the object as a Java long (64 bits),
+ * which is long enough to store a pointer on current architectures.
+ */
+void *get_private_info(JNIEnv *jnienv, const char *classname,
+ jobject obj);
+
+/* Set the private data in a Db/DbEnv object that holds additional 'side data'.
+ * The private data is stored in the object as a Java long (64 bits),
+ * which is long enough to store a pointer on current architectures.
+ */
+void set_private_info(JNIEnv *jnienv, const char *classname,
+ jobject obj, void *value);
+
+/*
+ * Given a non-qualified name (e.g. "foo"), get the class handl
+ * for the fully qualified name (e.g. "com.sleepycat.db.foo")
+ */
+jclass get_class(JNIEnv *jnienv, const char *classname);
+
+/* Set an individual field in a Db* object.
+ * The field must be a DB object type.
+ */
+void set_object_field(JNIEnv *jnienv, jclass class_of_this,
+ jobject jthis, const char *object_classname,
+ const char *name_of_field, jobject obj);
+
+/* Set an individual field in a Db* object.
+ * The field must be an integer type.
+ */
+void set_int_field(JNIEnv *jnienv, jclass class_of_this,
+ jobject jthis, const char *name_of_field, jint value);
+
+/* Set an individual field in a Db* object.
+ * The field must be an integer type.
+ */
+void set_long_field(JNIEnv *jnienv, jclass class_of_this,
+ jobject jthis, const char *name_of_field, jlong value);
+
+/* Set an individual field in a Db* object.
+ * The field must be an DbLsn type.
+ */
+void set_lsn_field(JNIEnv *jnienv, jclass class_of_this,
+ jobject jthis, const char *name_of_field, DB_LSN value);
+
+/* Values of expect_mask
+ */
+static const int EXCEPTION_FILE_NOT_FOUND = 0x0001;
+
+/* Report an exception back to the java side.
+ */
+void report_exception(JNIEnv *jnienv, const char *text, int err,
+ unsigned long expect_mask);
+
+/* Report an error via the errcall mechanism.
+ */
+void report_errcall(JNIEnv *jnienv, jobject errcall,
+ jstring prefix, const char *message);
+
+/* If the object is null, report an exception and return false (0),
+ * otherwise return true (1).
+ */
+int verify_non_null(JNIEnv *jnienv, void *obj);
+
+/* If the error code is non-zero, report an exception and return false (0),
+ * otherwise return true (1).
+ */
+int verify_return(JNIEnv *jnienv, int err, unsigned long expect_mask);
+
+/* Create an object of the given class, calling its default constructor.
+ */
+jobject create_default_object(JNIEnv *jnienv, const char *class_name);
+
+/* Convert an DB object to a Java encapsulation of that object.
+ * Note: This implementation creates a new Java object on each call,
+ * so it is generally useful when a new DB object has just been created.
+ */
+jobject convert_object(JNIEnv *jnienv, const char *class_name, void *dbobj);
+
+/* Create a copy of the string
+ */
+char *dup_string(const char *str);
+
+/* Create a malloc'ed copy of the java string.
+ * Caller must free it.
+ */
+char *get_c_string(JNIEnv *jnienv, jstring jstr);
+
+/* Create a java string from the given string
+ */
+jstring get_java_string(JNIEnv *jnienv, const char* string);
+
+/* Convert a java object to the various C pointers they represent.
+ */
+DB *get_DB (JNIEnv *jnienv, jobject obj);
+DB_BTREE_STAT *get_DB_BTREE_STAT (JNIEnv *jnienv, jobject obj);
+DBC *get_DBC (JNIEnv *jnienv, jobject obj);
+DB_ENV *get_DB_ENV (JNIEnv *jnienv, jobject obj);
+DB_ENV_JAVAINFO *get_DB_ENV_JAVAINFO (JNIEnv *jnienv, jobject obj);
+DB_HASH_STAT *get_DB_HASH_STAT (JNIEnv *jnienv, jobject obj);
+DB_JAVAINFO *get_DB_JAVAINFO (JNIEnv *jnienv, jobject obj);
+DB_LOCK *get_DB_LOCK (JNIEnv *jnienv, jobject obj);
+DB_LOG_STAT *get_DB_LOG_STAT (JNIEnv *jnienv, jobject obj);
+DB_LSN *get_DB_LSN (JNIEnv *jnienv, jobject obj);
+DB_MPOOL_FSTAT *get_DB_MPOOL_FSTAT(JNIEnv *jnienv, jobject obj);
+DB_MPOOL_STAT *get_DB_MPOOL_STAT (JNIEnv *jnienv, jobject obj);
+DB_QUEUE_STAT *get_DB_QUEUE_STAT (JNIEnv *jnienv, jobject obj);
+DB_TXN *get_DB_TXN (JNIEnv *jnienv, jobject obj);
+DB_TXN_STAT *get_DB_TXN_STAT (JNIEnv *jnienv, jobject obj);
+DBT *get_DBT (JNIEnv *jnienv, jobject obj);
+DBT_JAVAINFO *get_DBT_JAVAINFO (JNIEnv *jnienv, jobject obj);
+
+/* From a C object, create a Java object.
+ */
+jobject get_DbBtreeStat (JNIEnv *jnienv, DB_BTREE_STAT *dbobj);
+jobject get_Dbc (JNIEnv *jnienv, DBC *dbobj);
+jobject get_DbHashStat (JNIEnv *jnienv, DB_HASH_STAT *dbobj);
+jobject get_DbLogStat (JNIEnv *jnienv, DB_LOG_STAT *dbobj);
+jobject get_DbLsn (JNIEnv *jnienv, DB_LSN dbobj);
+jobject get_DbMpoolStat (JNIEnv *jnienv, DB_MPOOL_STAT *dbobj);
+jobject get_DbMpoolFStat (JNIEnv *jnienv, DB_MPOOL_FSTAT *dbobj);
+jobject get_DbQueueStat (JNIEnv *jnienv, DB_QUEUE_STAT *dbobj);
+jobject get_Dbt (JNIEnv *jnienv, DBT *dbt);
+jobject get_DbTxn (JNIEnv *jnienv, DB_TXN *dbobj);
+jobject get_DbTxnStat (JNIEnv *jnienv, DB_TXN_STAT *dbobj);
+
+/* The java names of DB classes */
+extern const char * const name_DB;
+extern const char * const name_DB_BTREE_STAT;
+extern const char * const name_DBC;
+extern const char * const name_DB_DEADLOCK_EX;
+extern const char * const name_DB_ENV;
+extern const char * const name_DB_EXCEPTION;
+extern const char * const name_DB_HASH_STAT;
+extern const char * const name_DB_LOCK;
+extern const char * const name_DB_LOCK_STAT;
+extern const char * const name_DB_LOG_STAT;
+extern const char * const name_DB_LSN;
+extern const char * const name_DB_MEMORY_EX;
+extern const char * const name_DB_MPOOL_FSTAT;
+extern const char * const name_DB_MPOOL_STAT;
+extern const char * const name_DB_QUEUE_STAT;
+extern const char * const name_DB_RUNRECOVERY_EX;
+extern const char * const name_DBT;
+extern const char * const name_DB_TXN;
+extern const char * const name_DB_TXN_STAT;
+extern const char * const name_DB_TXN_STAT_ACTIVE;
+extern const char * const name_DbAppendRecno;
+extern const char * const name_DbBtreeCompare;
+extern const char * const name_DbBtreePrefix;
+extern const char * const name_DbDupCompare;
+extern const char * const name_DbEnvFeedback;
+extern const char * const name_DbErrcall;
+extern const char * const name_DbFeedback;
+extern const char * const name_DbHash;
+extern const char * const name_DbRecoveryInit;
+extern const char * const name_DbTxnRecover;
+
+extern const char * const string_signature;
+
+#define JAVADB_RO_ACCESS(j_class, j_fieldtype, j_field, c_type, c_field) \
+JNIEXPORT j_fieldtype JNICALL \
+ Java_com_sleepycat_db_##j_class##_get_1##j_field \
+ (JNIEnv *jnienv, jobject jthis) \
+{ \
+ c_type *db_this = get_##c_type(jnienv, jthis); \
+ \
+ if (verify_non_null(jnienv, db_this)) { \
+ return db_this->c_field; \
+ } \
+ return 0; \
+}
+
+#define JAVADB_WO_ACCESS(j_class, j_fieldtype, j_field, c_type, c_field) \
+JNIEXPORT void JNICALL \
+ Java_com_sleepycat_db_##j_class##_set_1##j_field \
+ (JNIEnv *jnienv, jobject jthis, j_fieldtype value) \
+{ \
+ c_type *db_this = get_##c_type(jnienv, jthis); \
+ \
+ if (verify_non_null(jnienv, db_this)) { \
+ db_this->c_field = value; \
+ } \
+}
+
+/* This is a variant of the JAVADB_WO_ACCESS macro to define a simple set_
+ * method using a C "method" call. These should be used with set_
+ * methods that cannot invoke java 'callbacks' (no set_ method currently
+ * does that). That assumption allows us to optimize (and simplify)
+ * by not calling API_BEGIN/END macros.
+ */
+#define JAVADB_WO_ACCESS_METHOD(j_class, j_fieldtype, \
+ j_field, c_type, c_field) \
+JNIEXPORT void JNICALL \
+ Java_com_sleepycat_db_##j_class##_set_1##j_field \
+ (JNIEnv *jnienv, jobject jthis, j_fieldtype value) \
+{ \
+ c_type *db_this; \
+ int err; \
+ \
+ db_this = get_##c_type(jnienv, jthis); \
+ if (verify_non_null(jnienv, db_this)) { \
+ err = db_this->set_##c_field(db_this, value); \
+ verify_return(jnienv, err, 0); \
+ } \
+}
+
+#define JAVADB_RW_ACCESS(j_class, j_fieldtype, j_field, c_type, c_field) \
+ JAVADB_RO_ACCESS(j_class, j_fieldtype, j_field, c_type, c_field) \
+ JAVADB_WO_ACCESS(j_class, j_fieldtype, j_field, c_type, c_field)
+
+#define JAVADB_WO_ACCESS_STRING(j_class, j_field, c_type, c_field) \
+JNIEXPORT void JNICALL \
+ Java_com_sleepycat_db_##j_class##_set_1##j_field \
+ (JNIEnv *jnienv, jobject jthis, jstring value) \
+{ \
+ c_type *db_this; \
+ int err; \
+ \
+ db_this = get_##c_type(jnienv, jthis); \
+ if (verify_non_null(jnienv, db_this)) { \
+ err = db_this->set_##c_field(db_this, \
+ (*jnienv)->GetStringUTFChars(jnienv, value, NULL)); \
+ verify_return(jnienv, err, 0); \
+ } \
+}
+
+#define JAVADB_API_BEGIN(db, jthis) \
+ if ((db) != NULL) \
+ ((DB_JAVAINFO*)(db)->cj_internal)->jdbref_ = \
+ ((DB_ENV_JAVAINFO*)((db)->dbenv->cj_internal))->jdbref_ = (jthis)
+
+#define JAVADB_API_END(db) \
+ if ((db) != NULL) \
+ ((DB_JAVAINFO*)(db)->cj_internal)->jdbref_ = \
+ ((DB_ENV_JAVAINFO*)((db)->dbenv->cj_internal))->jdbref_ = 0
+
+#define JAVADB_ENV_API_BEGIN(dbenv, jthis) \
+ if ((dbenv) != NULL) \
+ ((DB_ENV_JAVAINFO*)((dbenv)->cj_internal))->jenvref_ = (jthis)
+
+#define JAVADB_ENV_API_END(dbenv) \
+ if ((dbenv) != NULL) \
+ ((DB_ENV_JAVAINFO*)((dbenv)->cj_internal))->jenvref_ = 0
+
+#endif /* !_JAVA_UTIL_H_ */
diff --git a/bdb/lock/Design b/bdb/lock/Design
new file mode 100644
index 00000000000..ac8f0b02fbf
--- /dev/null
+++ b/bdb/lock/Design
@@ -0,0 +1,293 @@
+# $Id: Design,v 11.3 2000/02/19 20:58:03 bostic Exp $
+
+Synchronization in the Locking Subsystem
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+1. Data structures
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+
+The lock manager maintains 3 different structures:
+
+Objects (__db_lockobj):
+ Describes an object that is locked. When used with DB, this consists
+ of a __db_ilock (a file identifier and a page number).
+
+Lockers (__db_locker):
+ Identifies a specific locker ID and maintains the head of a list of
+ locks held by a locker (for using during transaction commit/abort).
+
+Locks (__db_lock):
+ Describes a particular object lock held on behalf of a particular
+ locker id.
+
+Objects and Lockers reference Locks.
+
+These structures are organized via two synchronized hash tables. Each
+hash table consists of two physical arrays: the array of actual hash
+buckets and an array of mutexes so we can lock individual buckets, rather
+than the whole table.
+
+One hash table contains Objects and the other hash table contains Lockers.
+Objects contain two lists of locks, waiters and holders: holders currently
+hold a lock on the Object, waiters are lock waiting to be granted.
+Lockers are a single linked list that connects the Locks held on behalf
+of the specific locker ID.
+
+In the diagram below:
+
+Locker ID #1 holds a lock on Object #1 (L1) and Object #2 (L5), and is
+waiting on a lock on Object #1 (L3).
+
+Locker ID #2 holds a lock on Object #1 (L2) and is waiting on a lock for
+Object #2 (L7).
+
+Locker ID #3 is waiting for a lock on Object #2 (L6).
+
+ OBJECT -----------------------
+ HASH | |
+ ----|------------- |
+ ________ _______ | | ________ | |
+ | |-->| O1 |--|---|-->| O2 | | |
+ |_______| |_____| | | |______| V |
+ | | W H--->L1->L2 W H--->L5 | holders
+ |_______| | | | | V
+ | | ------->L3 \ ------->L6------>L7 waiters
+ |_______| / \ \
+ . . / \ \
+ . . | \ \
+ . . | \ -----------
+ |_______| | -------------- |
+ | | ____|____ ___|_____ _|______
+ |_______| | | | | | |
+ | | | LID1 | | LID2 | | LID3 |
+ |_______| |_______| |_______| |______|
+ ^ ^ ^
+ | | |
+ ___|________________________|________|___
+ LOCKER | | | | | | | | |
+ HASH | | | | | | | | |
+ | | | | | | | | |
+ |____|____|____|____|____|____|____|____|
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+2. Synchronization
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+
+There are four types of mutexes in the subsystem.
+
+Object mutexes;
+ These map one-to-one to each bucket in the Object hash table.
+ Holding a mutex on an Object bucket secures all the Objects in
+ that bucket as well as the Lock structures linked from those
+ Objects. All fields in the Locks EXCEPT the Locker links (the
+ links that attach Locks by Locker ID) are protected by these
+ mutexes.
+
+Locker mutexes:
+ These map one-to-one to each bucket in the Locker hash table.
+ Holding a mutex on a Locker bucket secures the Locker structures
+ and the Locker links in the Locks.
+
+Memory mutex:
+ This mutex allows calls to allocate/free memory, i.e. calls to
+ __db_shalloc and __db_shalloc_free, as well as manipulation of
+ the Object, Locker and Lock free lists.
+
+Region mutex:
+ This mutex is currently only used to protect the locker ids.
+ It may also be needed later to provide exclusive access to
+ the region for deadlock detection.
+
+Creating or removing a Lock requires locking both the Object lock and the
+Locker lock (and eventually the shalloc lock to return the item to the
+free list).
+
+The locking hierarchy is as follows:
+
+ The Region mutex may never be acquired after any other mutex.
+
+ The Object mutex may be acquired after the Region mutex.
+
+ The Locker mutex may be acquired after the Region and Object
+ mutexes.
+
+ The Memory mutex may be acquired after any mutex.
+
+So, if both and Object mutex and a Locker mutex are going to be acquired,
+the Object mutex must be acquired first.
+
+The Memory mutex may be acquired after any other mutex, but no other mutexes
+can be acquired once the Memory mutex is held.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+3. The algorithms:
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+The locking subsystem supports four basic operations:
+ Get a Lock (lock_get)
+
+ Release a Lock (lock_put)
+
+ Release all the Locks on a specific Object (lock_vec)
+
+ Release all the Locks for a specific Locker (lock_vec)
+
+Get a lock:
+ Acquire Object bucket mutex.
+ Acquire Locker bucket mutex.
+
+ Acquire Memory mutex.
+ If the Object does not exist
+ Take an Object off the freelist.
+ If the Locker doesn't exist
+ Take a Locker off the freelist.
+ Take a Lock off the free list.
+ Release Memory mutex.
+
+ Add Lock to the Object list.
+ Add Lock to the Locker list.
+ Release Locker bucket mutex
+
+ If the lock cannot be granted
+ Release Object bucket mutex
+ Acquire lock mutex (blocks)
+
+ Acquire Object bucket mutex
+ If lock acquisition did not succeed (e.g, deadlock)
+ Acquire Locker bucket mutex
+ If locker should be destroyed
+ Remove locker from hash table
+ Acquire Memory mutex
+ Return locker to free list
+ Release Memory mutex
+ Release Locker bucket mutex
+
+ If object should be released
+ Acquire Memory mutex
+ Return object to free list
+ Release Memory mutex
+
+ Release Object bucket mutex
+
+Release a lock:
+ Acquire Object bucket mutex.
+ (Requires that we be able to find the Object hash bucket
+ without looking inside the Lock itself.)
+
+ If releasing a single lock and the user provided generation number
+ doesn't match the Lock's generation number, the Lock has been reused
+ and we return failure.
+
+ Enter lock_put_internal:
+ if the Lock is still on the Object's lists:
+ Increment Lock's generation number.
+ Remove Lock from the Object's list (NULL link fields).
+ Promote locks for the Object.
+
+ Enter locker_list_removal
+ Acquire Locker bucket mutex.
+ If Locker doesn't exist:
+ Release Locker bucket mutex
+ Release Object bucket mutex
+ Return error.
+ Else if Locker marked as deleted:
+ dont_release = TRUE
+ Else
+ Remove Lock from Locker list.
+ If Locker has no more locks
+ Remove Locker from table.
+ Acquire Memory mutex.
+ Return Locker to free list
+ Release Memory mutex
+ Release Locker bucket mutex.
+ Exit locker_list_removal
+
+ If (!dont_release)
+ Acquire Memory mutex
+ Return Lock to free list
+ Release Memory mutex
+
+ Exit lock_put_internal
+
+ Release Object bucket mutex
+
+Release all the Locks on a specific Object (lock_vec, DB_PUT_ALL_OBJ):
+
+ Acquire Object bucket mutex.
+
+ For each lock on the waiter list:
+ lock_put_internal
+ For each lock on the holder list:
+ lock_put_internal
+
+ Release Object bucket mutex.
+
+Release all the Locks for a specific Locker (lock_vec, DB_PUT_ALL):
+
+ Acquire Locker bucket mutex.
+ Mark Locker deleted.
+ Release Locker mutex.
+
+ For each lock on the Locker's list:
+ Remove from locker's list
+ (The lock could get put back on the free list in
+ lock_put and then could get reallocated and the
+ act of setting its locker links could clobber us.)
+ Perform "Release a Lock" above: skip locker_list_removal.
+
+ Acquire Locker bucket mutex.
+ Remove Locker
+ Release Locker mutex.
+
+ Acquire Memory mutex
+ Return Locker to free list
+ Release Memory mutex
+
+Deadlock detection (lock_detect):
+
+ For each bucket in Object table
+ Acquire the Object bucket mutex.
+ create waitsfor
+
+ For each bucket in Object table
+ Release the Object mutex.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+FAQ:
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+Q: Why do you need generation numbers?
+A: If a lock has been released due to a transaction abort (potentially in a
+ different process), and then lock is released by a thread of control
+ unaware of the abort, the lock might have potentially been re-allocated
+ to a different object. The generation numbers detect this problem.
+
+ Note, we assume that reads/writes of lock generation numbers are atomic,
+ if they are not, it is theoretically possible that a re-allocated lock
+ could be mistaken for another lock.
+
+Q: Why is is safe to walk the Locker list without holding any mutexes at
+ all?
+A: Locks are created with both the Object and Locker bucket mutexes held.
+ Once created, they removed in two ways:
+
+ a) when a specific Lock is released, in which case, the Object and
+ Locker bucket mutexes are again held, and
+
+ b) when all Locks for a specific Locker Id is released.
+
+ In case b), the Locker bucket mutex is held while the Locker chain is
+ marked as "destroyed", which blocks any further access to the Locker
+ chain. Then, each individual Object bucket mutex is acquired when each
+ individual Lock is removed.
+
+Q: What are the implications of doing fine grain locking?
+
+A: Since we no longer globally lock the entire region, lock_vec will no
+ longer be atomic. We still execute the items in a lock_vec in order,
+ so things like lock-coupling still work, but you can't make any
+ guarantees about atomicity.
+
+Q: How do I configure for FINE_GRAIN locking?
+
+A: We currently do not support any automatic configuration for FINE_GRAIN
+ locking. When we do, will need to document that atomicity discussion
+ listed above (it is bug-report #553).
diff --git a/bdb/lock/lock.c b/bdb/lock/lock.c
new file mode 100644
index 00000000000..8d246f7ded3
--- /dev/null
+++ b/bdb/lock/lock.c
@@ -0,0 +1,1439 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: lock.c,v 11.40 2000/12/19 23:18:58 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#ifdef HAVE_RPC
+#include "db_server.h"
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_shash.h"
+#include "lock.h"
+#include "log.h"
+#include "db_am.h"
+#include "txn.h"
+
+#ifdef HAVE_RPC
+#include "gen_client_ext.h"
+#include "rpc_client_ext.h"
+#endif
+
+static int __lock_checklocker __P((DB_LOCKTAB *,
+ struct __db_lock *, u_int32_t, u_int32_t, int *));
+static int __lock_get_internal __P((DB_LOCKTAB *, u_int32_t,
+ u_int32_t, const DBT *, db_lockmode_t, DB_LOCK *));
+static int __lock_is_parent __P((DB_LOCKTAB *, u_int32_t, DB_LOCKER *));
+static int __lock_put_internal __P((DB_LOCKTAB *,
+ struct __db_lock *, u_int32_t, u_int32_t));
+static int __lock_put_nolock __P((DB_ENV *, DB_LOCK *, int *, int));
+static void __lock_remove_waiter __P((DB_ENV *,
+ DB_LOCKOBJ *, struct __db_lock *, db_status_t));
+
+static const char __db_lock_err[] = "Lock table is out of available %s";
+static const char __db_lock_invalid[] = "%s: Lock is no longer valid";
+static const char __db_locker_invalid[] = "Locker is not valid";
+
+/*
+ * lock_id --
+ * Generate a unique locker id.
+ */
+int
+lock_id(dbenv, idp)
+ DB_ENV *dbenv;
+ u_int32_t *idp;
+{
+ DB_LOCKTAB *lt;
+ DB_LOCKREGION *region;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
+ return (__dbcl_lock_id(dbenv, idp));
+#endif
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->lk_handle, DB_INIT_LOCK);
+
+ lt = dbenv->lk_handle;
+ region = lt->reginfo.primary;
+
+ /*
+ * Note that we are letting locker IDs wrap.
+ *
+ * This is potentially dangerous in that it's conceivable that you
+ * could be allocating a new locker id and still have someone using
+ * it. However, the alternatives are that we keep a bitmap of
+ * locker ids or we forbid wrapping. Both are probably bad. The
+ * bitmap of locker ids will take up 64 MB of space. Forbidding
+ * wrapping means that we'll run out of locker IDs after 2 billion.
+ * In order for the wrap bug to fire, we'd need to have something
+ * that stayed open while 2 billion locker ids were used up. Since
+ * we cache cursors it means that something would have to stay open
+ * sufficiently long that we open and close a lot of files and a
+ * lot of cursors within them. Betting that this won't happen seems
+ * to the lesser of the evils.
+ */
+ LOCKREGION(dbenv, lt);
+ if (region->id >= DB_LOCK_MAXID)
+ region->id = 0;
+ *idp = ++region->id;
+ UNLOCKREGION(dbenv, lt);
+
+ return (0);
+}
+
+/*
+ * Vector lock routine. This function takes a set of operations
+ * and performs them all at once. In addition, lock_vec provides
+ * functionality for lock inheritance, releasing all locks for a
+ * given locker (used during transaction commit/abort), releasing
+ * all locks on a given object, and generating debugging information.
+ */
+int
+lock_vec(dbenv, locker, flags, list, nlist, elistp)
+ DB_ENV *dbenv;
+ u_int32_t locker, flags;
+ int nlist;
+ DB_LOCKREQ *list, **elistp;
+{
+ struct __db_lock *lp, *next_lock;
+ DB_LOCKER *sh_locker, *sh_parent;
+ DB_LOCKOBJ *obj, *sh_obj;
+ DB_LOCKREGION *region;
+ DB_LOCKTAB *lt;
+ u_int32_t lndx, ndx;
+ int did_abort, i, ret, run_dd;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
+ return (__dbcl_lock_vec(dbenv, locker,
+ flags, list, nlist, elistp));
+#endif
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->lk_handle, DB_INIT_LOCK);
+
+ /* Validate arguments. */
+ if ((ret = __db_fchk(dbenv, "lock_vec", flags, DB_LOCK_NOWAIT)) != 0)
+ return (ret);
+
+ lt = dbenv->lk_handle;
+ region = lt->reginfo.primary;
+
+ run_dd = 0;
+ LOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);
+ for (i = 0, ret = 0; i < nlist && ret == 0; i++)
+ switch (list[i].op) {
+ case DB_LOCK_GET:
+ ret = __lock_get_internal(dbenv->lk_handle,
+ locker, flags,
+ list[i].obj, list[i].mode, &list[i].lock);
+ break;
+ case DB_LOCK_INHERIT:
+
+ /*
+ * Get the committing locker and mark it as deleted.
+ * This allows us to traverse the locker links without
+ * worrying that someone else is deleting locks out
+ * from under us. However, if the locker doesn't
+ * exist, that just means that the child holds no
+ * locks, so inheritance is easy!
+ */
+ LOCKER_LOCK(lt, region, locker, ndx);
+ if ((ret = __lock_getlocker(lt,
+ locker, ndx, 0, &sh_locker)) != 0 ||
+ sh_locker == NULL ||
+ F_ISSET(sh_locker, DB_LOCKER_DELETED)) {
+ if (ret == 0 && sh_locker != NULL)
+ ret = EACCES;
+ __db_err(dbenv, __db_locker_invalid);
+ break;
+ }
+
+ /* Make sure we are a child transaction. */
+ if (sh_locker->parent_locker == INVALID_ROFF) {
+ __db_err(dbenv, "Not a child transaction");
+ ret = EINVAL;
+ break;
+ }
+ sh_parent = (DB_LOCKER *)
+ R_ADDR(&lt->reginfo, sh_locker->parent_locker);
+ F_SET(sh_locker, DB_LOCKER_DELETED);
+
+ /*
+ * Now, lock the parent locker; move locks from
+ * the committing list to the parent's list.
+ */
+ LOCKER_LOCK(lt, region, locker, ndx);
+ if (F_ISSET(sh_parent, DB_LOCKER_DELETED)) {
+ if (ret == 0) {
+ __db_err(dbenv,
+ "Parent locker is not valid");
+ ret = EACCES;
+ }
+ break;
+ }
+
+ for (lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock);
+ lp != NULL;
+ lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock)) {
+ SH_LIST_REMOVE(lp, locker_links, __db_lock);
+ SH_LIST_INSERT_HEAD(&sh_parent->heldby, lp,
+ locker_links, __db_lock);
+ lp->holder = sh_parent->id;
+
+ /* Get the object associated with this lock. */
+ obj = (DB_LOCKOBJ *)((u_int8_t *)lp + lp->obj);
+
+ (void)__lock_promote(lt, obj,
+ LF_ISSET(DB_LOCK_NOWAITERS));
+ }
+
+ /* Now free the original locker. */
+ ret = __lock_checklocker(lt,
+ NULL, locker, DB_LOCK_IGNOREDEL, NULL);
+ break;
+ case DB_LOCK_PUT:
+ ret =
+ __lock_put_nolock(dbenv, &list[i].lock, &run_dd, 0);
+ break;
+ case DB_LOCK_PUT_ALL:
+ /*
+ * Get the locker and mark it as deleted. This
+ * allows us to traverse the locker links without
+ * worrying that someone else is deleting locks out
+ * from under us. Since the locker may hold no
+ * locks (i.e., you could call abort before you've
+ * done any work), it's perfectly reasonable for there
+ * to be no locker; this is not an error.
+ */
+ LOCKER_LOCK(lt, region, locker, ndx);
+ if ((ret = __lock_getlocker(lt,
+ locker, ndx, 0, &sh_locker)) != 0 ||
+ sh_locker == NULL ||
+ F_ISSET(sh_locker, DB_LOCKER_DELETED))
+ /*
+ * If ret is set, then we'll generate an
+ * error. If it's not set, we have nothing
+ * to do.
+ */
+ break;
+ F_SET(sh_locker, DB_LOCKER_DELETED);
+
+ /* Now traverse the locks, releasing each one. */
+ for (lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock);
+ lp != NULL;
+ lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock)) {
+ SH_LIST_REMOVE(lp, locker_links, __db_lock);
+ sh_obj =
+ (DB_LOCKOBJ *)((u_int8_t *)lp + lp->obj);
+ SHOBJECT_LOCK(lt, region, sh_obj, lndx);
+ ret = __lock_put_internal(lt,
+ lp, lndx, DB_LOCK_FREE | DB_LOCK_DOALL);
+ if (ret != 0)
+ break;
+ }
+ ret = __lock_checklocker(lt,
+ NULL, locker, DB_LOCK_IGNOREDEL, NULL);
+ break;
+ case DB_LOCK_PUT_OBJ:
+ /* Remove all the locks associated with an object. */
+ OBJECT_LOCK(lt, region, list[i].obj, ndx);
+ if ((ret = __lock_getobj(lt, list[i].obj,
+ ndx, 0, &sh_obj)) != 0 || sh_obj == NULL) {
+ if (ret == 0)
+ ret = EINVAL;
+ break;
+ }
+
+ /*
+ * Go through both waiters and holders. Don't bother
+ * to run promotion, because everyone is getting
+ * released. The processes waiting will still get
+ * awakened as their waiters are released.
+ */
+ for (lp = SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock);
+ ret == 0 && lp != NULL;
+ lp = SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock))
+ ret = __lock_put_internal(lt,
+ lp, ndx, DB_LOCK_NOPROMOTE | DB_LOCK_DOALL);
+
+ /*
+ * On the last time around, the object will get
+ * reclaimed by __lock_put_internal, structure the
+ * loop carefully so we do not get bitten.
+ */
+ for (lp = SH_TAILQ_FIRST(&sh_obj->holders, __db_lock);
+ ret == 0 && lp != NULL;
+ lp = next_lock) {
+ next_lock = SH_TAILQ_NEXT(lp, links, __db_lock);
+ ret = __lock_put_internal(lt,
+ lp, ndx, DB_LOCK_NOPROMOTE | DB_LOCK_DOALL);
+ }
+ break;
+#ifdef DEBUG
+ case DB_LOCK_DUMP:
+ /* Find the locker. */
+ LOCKER_LOCK(lt, region, locker, ndx);
+ if ((ret = __lock_getlocker(lt,
+ locker, ndx, 0, &sh_locker)) != 0
+ || sh_locker == NULL
+ || F_ISSET(sh_locker, DB_LOCKER_DELETED))
+ break;
+
+ for (lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock);
+ lp != NULL;
+ lp = SH_LIST_NEXT(lp, locker_links, __db_lock)) {
+ __lock_printlock(lt, lp, 1);
+ }
+ break;
+#endif
+ default:
+ __db_err(dbenv,
+ "Invalid lock operation: %d", list[i].op);
+ ret = EINVAL;
+ break;
+ }
+
+ if (ret == 0 && region->need_dd && region->detect != DB_LOCK_NORUN) {
+ run_dd = 1;
+ region->need_dd = 0;
+ }
+ UNLOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);
+
+ if (run_dd)
+ (void)lock_detect(dbenv, 0, region->detect, &did_abort);
+
+ if (ret != 0 && elistp != NULL)
+ *elistp = &list[i - 1];
+
+ return (ret);
+}
+
+/*
+ * Lock acquisition routines. There are two library interfaces:
+ *
+ * lock_get --
+ * original lock get interface that takes a locker id.
+ *
+ * All the work for lock_get (and for the GET option of lock_vec) is done
+ * inside of lock_get_internal.
+ */
+int
+lock_get(dbenv, locker, flags, obj, lock_mode, lock)
+ DB_ENV *dbenv;
+ u_int32_t locker, flags;
+ const DBT *obj;
+ db_lockmode_t lock_mode;
+ DB_LOCK *lock;
+{
+ int ret;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
+ return (__dbcl_lock_get(dbenv, locker,
+ flags, obj, lock_mode, lock));
+#endif
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->lk_handle, DB_INIT_LOCK);
+
+ if (IS_RECOVERING(dbenv)) {
+ lock->off = LOCK_INVALID;
+ return (0);
+ }
+
+ /* Validate arguments. */
+ if ((ret = __db_fchk(dbenv,
+ "lock_get", flags,
+ DB_LOCK_NOWAIT | DB_LOCK_UPGRADE | DB_LOCK_SWITCH)) != 0)
+ return (ret);
+
+ LOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);
+ ret = __lock_get_internal(dbenv->lk_handle,
+ locker, flags, obj, lock_mode, lock);
+ UNLOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);
+ return (ret);
+}
+
+static int
+__lock_get_internal(lt, locker, flags, obj, lock_mode, lock)
+ DB_LOCKTAB *lt;
+ u_int32_t locker, flags;
+ const DBT *obj;
+ db_lockmode_t lock_mode;
+ DB_LOCK *lock;
+{
+ struct __db_lock *newl, *lp;
+ DB_ENV *dbenv;
+ DB_LOCKER *sh_locker;
+ DB_LOCKOBJ *sh_obj;
+ DB_LOCKREGION *region;
+ u_int32_t locker_ndx;
+ int did_abort, freed, ihold, on_locker_list, no_dd, ret;
+
+ no_dd = ret = 0;
+ on_locker_list = 0;
+ region = lt->reginfo.primary;
+ dbenv = lt->dbenv;
+
+ /*
+ * If we are not going to reuse this lock, initialize
+ * the offset to invalid so that if we fail it
+ * will not look like a valid lock.
+ */
+ if (!LF_ISSET(DB_LOCK_UPGRADE | DB_LOCK_SWITCH))
+ lock->off = LOCK_INVALID;
+
+ /*
+ * Check that the lock mode is valid.
+ */
+ if ((u_int32_t)lock_mode >= region->nmodes) {
+ __db_err(dbenv,
+ "lock_get: invalid lock mode %lu\n", (u_long)lock_mode);
+ return (EINVAL);
+ }
+
+ /* Allocate a new lock. Optimize for the common case of a grant. */
+ region->nrequests++;
+ if ((newl = SH_TAILQ_FIRST(&region->free_locks, __db_lock)) != NULL)
+ SH_TAILQ_REMOVE(&region->free_locks, newl, links, __db_lock);
+ if (newl == NULL) {
+ __db_err(dbenv, __db_lock_err, "locks");
+ return (ENOMEM);
+ }
+ if (++region->nlocks > region->maxnlocks)
+ region->maxnlocks = region->nlocks;
+
+ /* Allocate a new object. */
+ OBJECT_LOCK(lt, region, obj, lock->ndx);
+ if ((ret = __lock_getobj(lt, obj, lock->ndx, 1, &sh_obj)) != 0)
+ goto err;
+
+ /* Get the locker, we may need it to find our parent. */
+ LOCKER_LOCK(lt, region, locker, locker_ndx);
+ if ((ret =
+ __lock_getlocker(lt, locker, locker_ndx, 1, &sh_locker)) != 0) {
+ /*
+ * XXX: Margo
+ * CLEANUP the object and the lock.
+ */
+ return (ret);
+ }
+
+ /*
+ * Now we have a lock and an object and we need to see if we should
+ * grant the lock. We use a FIFO ordering so we can only grant a
+ * new lock if it does not conflict with anyone on the holders list
+ * OR anyone on the waiters list. The reason that we don't grant if
+ * there's a conflict is that this can lead to starvation (a writer
+ * waiting on a popularly read item will never be granted). The
+ * downside of this is that a waiting reader can prevent an upgrade
+ * from reader to writer, which is not uncommon.
+ *
+ * There is one exception to the no-conflict rule. If a lock is held
+ * by the requesting locker AND the new lock does not conflict with
+ * any other holders, then we grant the lock. The most common place
+ * this happens is when the holder has a WRITE lock and a READ lock
+ * request comes in for the same locker. If we do not grant the read
+ * lock, then we guarantee deadlock.
+ *
+ * In case of conflict, we put the new lock on the end of the waiters
+ * list, unless we are upgrading in which case the locker goes on the
+ * front of the list.
+ */
+ ihold = 0;
+ lp = NULL;
+ if (LF_ISSET(DB_LOCK_SWITCH))
+ goto put_lock;
+
+ for (lp = SH_TAILQ_FIRST(&sh_obj->holders, __db_lock);
+ lp != NULL;
+ lp = SH_TAILQ_NEXT(lp, links, __db_lock)) {
+ if (locker == lp->holder ||
+ __lock_is_parent(lt, lp->holder, sh_locker)) {
+ if (lp->mode == lock_mode &&
+ lp->status == DB_LSTAT_HELD) {
+ if (LF_ISSET(DB_LOCK_UPGRADE))
+ goto upgrade;
+
+ /*
+ * Lock is held, so we can increment the
+ * reference count and return this lock.
+ */
+ lp->refcount++;
+ lock->off = R_OFFSET(&lt->reginfo, lp);
+ lock->gen = lp->gen;
+
+ ret = 0;
+ goto done;
+ } else
+ ihold = 1;
+ } else if (CONFLICTS(lt, region, lp->mode, lock_mode))
+ break;
+ }
+
+ /*
+ * Make the new lock point to the new object, initialize fields.
+ *
+ * This lock is not linked in anywhere, so we can muck with it
+ * without holding any mutexes.
+ */
+put_lock:
+ newl->holder = locker;
+ newl->refcount = 1;
+ newl->mode = lock_mode;
+ newl->obj = SH_PTR_TO_OFF(newl, sh_obj);
+ newl->status = DB_LSTAT_HELD;
+
+ /*
+ * If we are upgrading, then there are two scenarios. Either
+ * we had no conflicts, so we can do the upgrade. Or, there
+ * is a conflict and we should wait at the HEAD of the waiters
+ * list.
+ */
+ if (LF_ISSET(DB_LOCK_UPGRADE)) {
+ if (lp == NULL)
+ goto upgrade;
+
+ /*
+ * There was a conflict, wait. If this is the first waiter,
+ * add the object to the deadlock detector's list.
+ */
+ if (SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock) == NULL)
+ SH_TAILQ_INSERT_HEAD(&region->dd_objs,
+ sh_obj, dd_links, __db_lockobj);
+
+ SH_TAILQ_INSERT_HEAD(&sh_obj->waiters, newl, links, __db_lock);
+ goto llist;
+ }
+
+ if (lp == NULL && !ihold)
+ for (lp = SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock);
+ lp != NULL;
+ lp = SH_TAILQ_NEXT(lp, links, __db_lock)) {
+ if (CONFLICTS(lt, region, lp->mode, lock_mode) &&
+ locker != lp->holder)
+ break;
+ }
+ if (!LF_ISSET(DB_LOCK_SWITCH) && lp == NULL)
+ SH_TAILQ_INSERT_TAIL(&sh_obj->holders, newl, links);
+ else if (!LF_ISSET(DB_LOCK_NOWAIT)) {
+ /*
+ * If this is the first waiter, add the object to the
+ * deadlock detector's list.
+ */
+ if (SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock) == NULL)
+ SH_TAILQ_INSERT_HEAD(&region->dd_objs,
+ sh_obj, dd_links, __db_lockobj);
+ SH_TAILQ_INSERT_TAIL(&sh_obj->waiters, newl, links);
+ } else {
+ ret = DB_LOCK_NOTGRANTED;
+ if (SH_LIST_FIRST(&sh_locker->heldby, __db_lock) == NULL
+ && LOCKER_FREEABLE(sh_locker))
+ __lock_freelocker( lt, region, sh_locker, locker_ndx);
+ region->nnowaits++;
+ goto err;
+ }
+
+llist:
+ /*
+ * Now, insert the lock onto its locker's list. If the locker does
+ * not currently hold any locks, there's no reason to run a deadlock
+ * detector, save that information.
+ */
+ on_locker_list = 1;
+ no_dd = sh_locker->master_locker == INVALID_ROFF
+ && SH_LIST_FIRST(&sh_locker->child_locker, __db_locker) == NULL
+ && SH_LIST_FIRST(&sh_locker->heldby, __db_lock) == NULL;
+
+ SH_LIST_INSERT_HEAD(&sh_locker->heldby, newl, locker_links, __db_lock);
+
+ if (LF_ISSET(DB_LOCK_SWITCH) || lp != NULL) {
+ if (LF_ISSET(DB_LOCK_SWITCH) &&
+ (ret = __lock_put_nolock(dbenv,
+ lock, &ihold, DB_LOCK_NOWAITERS)) != 0)
+ goto err;
+ /*
+ * This is really a blocker for the thread. It should be
+ * initialized locked, so that when we try to acquire it, we
+ * block.
+ */
+ newl->status = DB_LSTAT_WAITING;
+ region->nconflicts++;
+ if (region->detect == DB_LOCK_NORUN)
+ region->need_dd = 1;
+ UNLOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);
+
+ /*
+ * We are about to wait; before waiting, see if the deadlock
+ * detector should be run.
+ */
+ if (region->detect != DB_LOCK_NORUN && !no_dd)
+ (void)lock_detect(dbenv, 0, region->detect, &did_abort);
+
+ MUTEX_LOCK(dbenv, &newl->mutex, dbenv->lockfhp);
+ LOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);
+
+ if (newl->status != DB_LSTAT_PENDING) {
+ (void)__lock_checklocker(lt,
+ newl, newl->holder, 0, &freed);
+ switch (newl->status) {
+ case DB_LSTAT_ABORTED:
+ on_locker_list = 0;
+ ret = DB_LOCK_DEADLOCK;
+ break;
+ case DB_LSTAT_NOGRANT:
+ ret = DB_LOCK_NOTGRANTED;
+ break;
+ default:
+ ret = EINVAL;
+ break;
+ }
+ goto err;
+ } else if (LF_ISSET(DB_LOCK_UPGRADE)) {
+ /*
+ * The lock that was just granted got put on the
+ * holders list. Since we're upgrading some other
+ * lock, we've got to remove it here.
+ */
+ SH_TAILQ_REMOVE(
+ &sh_obj->holders, newl, links, __db_lock);
+ /*
+ * Ensure that the object is not believed to be on
+ * the object's lists, if we're traversing by locker.
+ */
+ newl->links.stqe_prev = -1;
+ goto upgrade;
+ } else
+ newl->status = DB_LSTAT_HELD;
+ }
+
+ lock->off = R_OFFSET(&lt->reginfo, newl);
+ lock->gen = newl->gen;
+
+ return (0);
+
+upgrade:/*
+ * This was an upgrade, so return the new lock to the free list and
+ * upgrade the mode of the original lock.
+ */
+ ((struct __db_lock *)R_ADDR(&lt->reginfo, lock->off))->mode = lock_mode;
+
+ ret = 0;
+ /* FALLTHROUGH */
+
+done:
+err: newl->status = DB_LSTAT_FREE;
+ if (on_locker_list) {
+ SH_LIST_REMOVE(newl, locker_links, __db_lock);
+ }
+ SH_TAILQ_INSERT_HEAD(&region->free_locks, newl, links, __db_lock);
+ region->nlocks--;
+ return (ret);
+}
+
+/*
+ * Lock release routines.
+ *
+ * The user callable one is lock_put and the three we use internally are
+ * __lock_put_nolock, __lock_put_internal and __lock_downgrade.
+ */
+int
+lock_put(dbenv, lock)
+ DB_ENV *dbenv;
+ DB_LOCK *lock;
+{
+ DB_LOCKTAB *lt;
+ int ret, run_dd;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
+ return (__dbcl_lock_put(dbenv, lock));
+#endif
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->lk_handle, DB_INIT_LOCK);
+
+ if (IS_RECOVERING(dbenv))
+ return (0);
+
+ lt = dbenv->lk_handle;
+
+ LOCKREGION(dbenv, lt);
+ ret = __lock_put_nolock(dbenv, lock, &run_dd, 0);
+ UNLOCKREGION(dbenv, lt);
+
+ if (ret == 0 && run_dd)
+ (void)lock_detect(dbenv, 0,
+ ((DB_LOCKREGION *)lt->reginfo.primary)->detect, NULL);
+ return (ret);
+}
+
+static int
+__lock_put_nolock(dbenv, lock, runp, flags)
+ DB_ENV *dbenv;
+ DB_LOCK *lock;
+ int *runp;
+ int flags;
+{
+ struct __db_lock *lockp;
+ DB_LOCKREGION *region;
+ DB_LOCKTAB *lt;
+ u_int32_t locker;
+ int ret;
+
+ lt = dbenv->lk_handle;
+ region = lt->reginfo.primary;
+
+ lockp = (struct __db_lock *)R_ADDR(&lt->reginfo, lock->off);
+ lock->off = LOCK_INVALID;
+ if (lock->gen != lockp->gen) {
+ __db_err(dbenv, __db_lock_invalid, "lock_put");
+ return (EACCES);
+ }
+
+ locker = lockp->holder;
+ ret = __lock_put_internal(lt,
+ lockp, lock->ndx, flags | DB_LOCK_UNLINK | DB_LOCK_FREE);
+
+ *runp = 0;
+ if (ret == 0 && region->need_dd && region->detect != DB_LOCK_NORUN) {
+ *runp = 1;
+ region->need_dd = 0;
+ }
+
+ return (ret);
+}
+
+/*
+ * __lock_downgrade --
+ * Used by the concurrent access product to downgrade write locks
+ * back to iwrite locks.
+ *
+ * PUBLIC: int __lock_downgrade __P((DB_ENV *,
+ * PUBLIC: DB_LOCK *, db_lockmode_t, u_int32_t));
+ */
+int
+__lock_downgrade(dbenv, lock, new_mode, flags)
+ DB_ENV *dbenv;
+ DB_LOCK *lock;
+ db_lockmode_t new_mode;
+ u_int32_t flags;
+{
+ struct __db_lock *lockp;
+ DB_LOCKOBJ *obj;
+ DB_LOCKREGION *region;
+ DB_LOCKTAB *lt;
+ int ret;
+
+ COMPQUIET(flags, 0);
+
+ PANIC_CHECK(dbenv);
+
+ lt = dbenv->lk_handle;
+ region = lt->reginfo.primary;
+
+ LOCKREGION(dbenv, lt);
+
+ lockp = (struct __db_lock *)R_ADDR(&lt->reginfo, lock->off);
+ if (lock->gen != lockp->gen) {
+ __db_err(dbenv, __db_lock_invalid, "lock_downgrade");
+ ret = EACCES;
+ goto out;
+ }
+
+ lockp->mode = new_mode;
+
+ /* Get the object associated with this lock. */
+ obj = (DB_LOCKOBJ *)((u_int8_t *)lockp + lockp->obj);
+ (void)__lock_promote(lt, obj, LF_ISSET(DB_LOCK_NOWAITERS));
+
+ ++region->nreleases;
+out: UNLOCKREGION(dbenv, lt);
+
+ return (0);
+}
+
+static int
+__lock_put_internal(lt, lockp, obj_ndx, flags)
+ DB_LOCKTAB *lt;
+ struct __db_lock *lockp;
+ u_int32_t obj_ndx;
+ u_int32_t flags;
+{
+ DB_LOCKOBJ *sh_obj;
+ DB_LOCKREGION *region;
+ int no_reclaim, ret, state_changed;
+
+ region = lt->reginfo.primary;
+ no_reclaim = ret = state_changed = 0;
+
+ if (!OBJ_LINKS_VALID(lockp)) {
+ /*
+ * Someone removed this lock while we were doing a release
+ * by locker id. We are trying to free this lock, but it's
+ * already been done; all we need to do is return it to the
+ * free list.
+ */
+ lockp->status = DB_LSTAT_FREE;
+ SH_TAILQ_INSERT_HEAD(
+ &region->free_locks, lockp, links, __db_lock);
+ region->nlocks--;
+ return (0);
+ }
+
+ if (LF_ISSET(DB_LOCK_DOALL))
+ region->nreleases += lockp->refcount;
+ else
+ region->nreleases++;
+
+ if (!LF_ISSET(DB_LOCK_DOALL) && lockp->refcount > 1) {
+ lockp->refcount--;
+ return (0);
+ }
+
+ /* Increment generation number. */
+ lockp->gen++;
+
+ /* Get the object associated with this lock. */
+ sh_obj = (DB_LOCKOBJ *)((u_int8_t *)lockp + lockp->obj);
+
+ /* Remove this lock from its holders/waitlist. */
+ if (lockp->status != DB_LSTAT_HELD)
+ __lock_remove_waiter(lt->dbenv, sh_obj, lockp, DB_LSTAT_FREE);
+ else {
+ SH_TAILQ_REMOVE(&sh_obj->holders, lockp, links, __db_lock);
+ lockp->links.stqe_prev = -1;
+ }
+
+ if (LF_ISSET(DB_LOCK_NOPROMOTE))
+ state_changed = 0;
+ else
+ state_changed =
+ __lock_promote(lt, sh_obj, LF_ISSET(DB_LOCK_NOWAITERS));
+
+ if (LF_ISSET(DB_LOCK_UNLINK))
+ ret = __lock_checklocker(lt, lockp, lockp->holder, flags, NULL);
+
+ /* Check if object should be reclaimed. */
+ if (SH_TAILQ_FIRST(&sh_obj->holders, __db_lock) == NULL
+ && SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock) == NULL) {
+ HASHREMOVE_EL(lt->obj_tab,
+ obj_ndx, __db_lockobj, links, sh_obj);
+ if (sh_obj->lockobj.size > sizeof(sh_obj->objdata))
+ __db_shalloc_free(lt->reginfo.addr,
+ SH_DBT_PTR(&sh_obj->lockobj));
+ SH_TAILQ_INSERT_HEAD(
+ &region->free_objs, sh_obj, links, __db_lockobj);
+ region->nobjects--;
+ state_changed = 1;
+ }
+
+ /* Free lock. */
+ if (!LF_ISSET(DB_LOCK_UNLINK) && LF_ISSET(DB_LOCK_FREE)) {
+ lockp->status = DB_LSTAT_FREE;
+ SH_TAILQ_INSERT_HEAD(
+ &region->free_locks, lockp, links, __db_lock);
+ region->nlocks--;
+ }
+
+ /*
+ * If we did not promote anyone; we need to run the deadlock
+ * detector again.
+ */
+ if (state_changed == 0)
+ region->need_dd = 1;
+
+ return (ret);
+}
+
+/*
+ * Utility functions; listed alphabetically.
+ */
+
+/*
+ * __lock_checklocker --
+ * If a locker has no more locks, then we can free the object.
+ * Return a boolean indicating whether we freed the object or not.
+ *
+ * Must be called without the locker's lock set.
+ */
+static int
+__lock_checklocker(lt, lockp, locker, flags, freed)
+ DB_LOCKTAB *lt;
+ struct __db_lock *lockp;
+ u_int32_t locker, flags;
+ int *freed;
+{
+ DB_ENV *dbenv;
+ DB_LOCKER *sh_locker;
+ DB_LOCKREGION *region;
+ u_int32_t indx;
+ int ret;
+
+ dbenv = lt->dbenv;
+ region = lt->reginfo.primary;
+ ret = 0;
+
+ if (freed != NULL)
+ *freed = 0;
+
+ LOCKER_LOCK(lt, region, locker, indx);
+
+ /* If the locker's list is NULL, free up the locker. */
+ if ((ret = __lock_getlocker(lt,
+ locker, indx, 0, &sh_locker)) != 0 || sh_locker == NULL) {
+ if (ret == 0)
+ ret = EACCES;
+ __db_err(lt->dbenv, __db_locker_invalid);
+ goto freelock;
+ }
+
+ if (F_ISSET(sh_locker, DB_LOCKER_DELETED)) {
+ LF_CLR(DB_LOCK_FREE);
+ if (!LF_ISSET(DB_LOCK_IGNOREDEL))
+ goto freelock;
+ }
+
+ if (LF_ISSET(DB_LOCK_UNLINK))
+ SH_LIST_REMOVE(lockp, locker_links, __db_lock);
+
+ if (SH_LIST_FIRST(&sh_locker->heldby, __db_lock) == NULL
+ && LOCKER_FREEABLE(sh_locker)) {
+ __lock_freelocker( lt, region, sh_locker, indx);
+ if (freed != NULL)
+ *freed = 1;
+ }
+
+freelock:
+ if (LF_ISSET(DB_LOCK_FREE)) {
+ lockp->status = DB_LSTAT_FREE;
+ SH_TAILQ_INSERT_HEAD(
+ &region->free_locks, lockp, links, __db_lock);
+ region->nlocks--;
+ }
+
+ return (ret);
+}
+
+/*
+ * __lock_addfamilylocker
+ * Put a locker entry in for a child transaction.
+ *
+ * PUBLIC: int __lock_addfamilylocker __P((DB_ENV *, u_int32_t, u_int32_t));
+ */
+int
+__lock_addfamilylocker(dbenv, pid, id)
+ DB_ENV *dbenv;
+ u_int32_t pid, id;
+{
+ DB_LOCKER *lockerp, *mlockerp;
+ DB_LOCKREGION *region;
+ DB_LOCKTAB *lt;
+ u_int32_t ndx;
+ int ret;
+
+ lt = dbenv->lk_handle;
+ region = lt->reginfo.primary;
+ LOCKREGION(dbenv, lt);
+
+ /* get/create the parent locker info */
+ LOCKER_LOCK(lt, region, pid, ndx);
+ if ((ret = __lock_getlocker(dbenv->lk_handle,
+ pid, ndx, 1, &mlockerp)) != 0)
+ goto err;
+
+ /*
+ * We assume that only one thread can manipulate
+ * a single transaction family.
+ * Therefore the master locker cannot go away while
+ * we manipulate it, nor can another child in the
+ * family be created at the same time.
+ */
+ LOCKER_LOCK(lt, region, id, ndx);
+ if ((ret = __lock_getlocker(dbenv->lk_handle,
+ id, ndx, 1, &lockerp)) != 0)
+ goto err;
+
+ /* Point to our parent. */
+ lockerp->parent_locker = R_OFFSET(&lt->reginfo, mlockerp);
+
+ /* See if this locker is the family master. */
+ if (mlockerp->master_locker == INVALID_ROFF)
+ lockerp->master_locker = R_OFFSET(&lt->reginfo, mlockerp);
+ else {
+ lockerp->master_locker = mlockerp->master_locker;
+ mlockerp = R_ADDR(&lt->reginfo, mlockerp->master_locker);
+ }
+
+ /*
+ * Link the child at the head of the master's list.
+ * The guess is when looking for deadlock that
+ * the most recent child is the one thats blocked.
+ */
+ SH_LIST_INSERT_HEAD(
+ &mlockerp->child_locker, lockerp, child_link, __db_locker);
+
+err:
+ UNLOCKREGION(dbenv, lt);
+
+ return (ret);
+}
+
+/*
+ * __lock_freefamilylocker
+ * Remove a locker from the hash table and its family.
+ *
+ * This must be called without the locker bucket locked.
+ *
+ * PUBLIC: int __lock_freefamilylocker __P((DB_LOCKTAB *, u_int32_t));
+ */
+int
+__lock_freefamilylocker(lt, locker)
+ DB_LOCKTAB *lt;
+ u_int32_t locker;
+{
+ DB_ENV *dbenv;
+ DB_LOCKER *sh_locker;
+ DB_LOCKREGION *region;
+ u_int32_t indx;
+ int ret;
+
+ dbenv = lt->dbenv;
+ region = lt->reginfo.primary;
+
+ LOCKREGION(dbenv, lt);
+ LOCKER_LOCK(lt, region, locker, indx);
+
+ if ((ret = __lock_getlocker(lt,
+ locker, indx, 0, &sh_locker)) != 0 || sh_locker == NULL) {
+ if (ret == 0)
+ ret = EACCES;
+ goto freelock;
+ }
+ if (SH_LIST_FIRST(&sh_locker->heldby, __db_lock) != NULL) {
+ ret = EINVAL;
+ __db_err(dbenv, "Freeing locker with locks");
+ goto freelock;
+ }
+
+ /* If this is part of a family, we must fix up its links. */
+ if (sh_locker->master_locker != INVALID_ROFF)
+ SH_LIST_REMOVE(sh_locker, child_link, __db_locker);
+
+ __lock_freelocker(lt, region, sh_locker, indx);
+
+freelock:
+ UNLOCKREGION(dbenv, lt);
+ return (ret);
+}
+
+/*
+ * __lock_freelocker
+ * common code for deleting a locker.
+ *
+ * This must be called with the locker bucket locked.
+ *
+ * PUBLIC: void __lock_freelocker __P((DB_LOCKTAB *,
+ * PUBLIC: DB_LOCKREGION *, DB_LOCKER *, u_int32_t));
+ */
+void
+__lock_freelocker(lt, region, sh_locker, indx)
+ DB_LOCKTAB *lt;
+ DB_LOCKREGION *region;
+ DB_LOCKER *sh_locker;
+ u_int32_t indx;
+
+{
+ HASHREMOVE_EL(
+ lt->locker_tab, indx, __db_locker, links, sh_locker);
+ SH_TAILQ_INSERT_HEAD(
+ &region->free_lockers, sh_locker, links, __db_locker);
+ region->nlockers--;
+}
+
+/*
+ * __lock_getlocker --
+ * Get a locker in the locker hash table. The create parameter
+ * indicates if the locker should be created if it doesn't exist in
+ * the table.
+ *
+ * This must be called with the locker bucket locked.
+ *
+ * PUBLIC: int __lock_getlocker __P((DB_LOCKTAB *,
+ * PUBLIC: u_int32_t, u_int32_t, int, DB_LOCKER **));
+ */
+int
+__lock_getlocker(lt, locker, indx, create, retp)
+ DB_LOCKTAB *lt;
+ u_int32_t locker, indx;
+ int create;
+ DB_LOCKER **retp;
+{
+ DB_ENV *dbenv;
+ DB_LOCKER *sh_locker;
+ DB_LOCKREGION *region;
+
+ dbenv = lt->dbenv;
+ region = lt->reginfo.primary;
+
+ HASHLOOKUP(lt->locker_tab,
+ indx, __db_locker, links, locker, sh_locker, __lock_locker_cmp);
+
+ /*
+ * If we found the locker, then we can just return it. If
+ * we didn't find the locker, then we need to create it.
+ */
+ if (sh_locker == NULL && create) {
+ /* Create new locker and then insert it into hash table. */
+ if ((sh_locker = SH_TAILQ_FIRST(
+ &region->free_lockers, __db_locker)) == NULL) {
+ __db_err(lt->dbenv, __db_lock_err, "locker entries");
+ return (ENOMEM);
+ }
+ SH_TAILQ_REMOVE(
+ &region->free_lockers, sh_locker, links, __db_locker);
+ if (++region->nlockers > region->maxnlockers)
+ region->maxnlockers = region->nlockers;
+
+ sh_locker->id = locker;
+ sh_locker->dd_id = 0;
+ sh_locker->master_locker = INVALID_ROFF;
+ sh_locker->parent_locker = INVALID_ROFF;
+ SH_LIST_INIT(&sh_locker->child_locker);
+ sh_locker->flags = 0;
+ SH_LIST_INIT(&sh_locker->heldby);
+
+ HASHINSERT(lt->locker_tab, indx, __db_locker, links, sh_locker);
+ }
+
+ *retp = sh_locker;
+ return (0);
+}
+
+/*
+ * __lock_getobj --
+ * Get an object in the object hash table. The create parameter
+ * indicates if the object should be created if it doesn't exist in
+ * the table.
+ *
+ * This must be called with the object bucket locked.
+ *
+ * PUBLIC: int __lock_getobj __P((DB_LOCKTAB *,
+ * PUBLIC: const DBT *, u_int32_t, int, DB_LOCKOBJ **));
+ */
+int
+__lock_getobj(lt, obj, ndx, create, retp)
+ DB_LOCKTAB *lt;
+ const DBT *obj;
+ u_int32_t ndx;
+ int create;
+ DB_LOCKOBJ **retp;
+{
+ DB_ENV *dbenv;
+ DB_LOCKOBJ *sh_obj;
+ DB_LOCKREGION *region;
+ int ret;
+ void *p;
+
+ dbenv = lt->dbenv;
+ region = lt->reginfo.primary;
+
+ /* Look up the object in the hash table. */
+ HASHLOOKUP(lt->obj_tab,
+ ndx, __db_lockobj, links, obj, sh_obj, __lock_cmp);
+
+ /*
+ * If we found the object, then we can just return it. If
+ * we didn't find the object, then we need to create it.
+ */
+ if (sh_obj == NULL && create) {
+ /* Create new object and then insert it into hash table. */
+ if ((sh_obj =
+ SH_TAILQ_FIRST(&region->free_objs, __db_lockobj)) == NULL) {
+ __db_err(lt->dbenv, __db_lock_err, "object entries");
+ ret = ENOMEM;
+ goto err;
+ }
+
+ /*
+ * If we can fit this object in the structure, do so instead
+ * of shalloc-ing space for it.
+ */
+ if (obj->size <= sizeof(sh_obj->objdata))
+ p = sh_obj->objdata;
+ else if ((ret = __db_shalloc(
+ lt->reginfo.addr, obj->size, 0, &p)) != 0) {
+ __db_err(dbenv, "No space for lock object storage");
+ goto err;
+ }
+
+ memcpy(p, obj->data, obj->size);
+
+ SH_TAILQ_REMOVE(
+ &region->free_objs, sh_obj, links, __db_lockobj);
+ if (++region->nobjects > region->maxnobjects)
+ region->maxnobjects = region->nobjects;
+
+ SH_TAILQ_INIT(&sh_obj->waiters);
+ SH_TAILQ_INIT(&sh_obj->holders);
+ sh_obj->lockobj.size = obj->size;
+ sh_obj->lockobj.off = SH_PTR_TO_OFF(&sh_obj->lockobj, p);
+
+ HASHINSERT(lt->obj_tab, ndx, __db_lockobj, links, sh_obj);
+ }
+
+ *retp = sh_obj;
+ return (0);
+
+err: return (ret);
+}
+
+/*
+ * __lock_is_parent --
+ * Given a locker and a transaction, return 1 if the locker is
+ * an ancestor of the designcated transaction. This is used to determine
+ * if we should grant locks that appear to conflict, but don't because
+ * the lock is already held by an ancestor.
+ */
+static int
+__lock_is_parent(lt, locker, sh_locker)
+ DB_LOCKTAB *lt;
+ u_int32_t locker;
+ DB_LOCKER *sh_locker;
+{
+ DB_LOCKER *parent;
+
+ parent = sh_locker;
+ while (parent->parent_locker != INVALID_ROFF) {
+ parent = (DB_LOCKER *)
+ R_ADDR(&lt->reginfo, parent->parent_locker);
+ if (parent->id == locker)
+ return (1);
+ }
+
+ return (0);
+}
+
+/*
+ * __lock_promote --
+ *
+ * Look through the waiters and holders lists and decide which (if any)
+ * locks can be promoted. Promote any that are eligible.
+ *
+ * PUBLIC: int __lock_promote __P((DB_LOCKTAB *, DB_LOCKOBJ *, int));
+ */
+int
+__lock_promote(lt, obj, not_waiters)
+ DB_LOCKTAB *lt;
+ DB_LOCKOBJ *obj;
+ int not_waiters;
+{
+ struct __db_lock *lp_w, *lp_h, *next_waiter;
+ DB_LOCKER *sh_locker;
+ DB_LOCKREGION *region;
+ u_int32_t locker_ndx;
+ int had_waiters, state_changed;
+
+ region = lt->reginfo.primary;
+ had_waiters = 0;
+
+ /*
+ * We need to do lock promotion. We also need to determine if we're
+ * going to need to run the deadlock detector again. If we release
+ * locks, and there are waiters, but no one gets promoted, then we
+ * haven't fundamentally changed the lockmgr state, so we may still
+ * have a deadlock and we have to run again. However, if there were
+ * no waiters, or we actually promoted someone, then we are OK and we
+ * don't have to run it immediately.
+ *
+ * During promotion, we look for state changes so we can return this
+ * information to the caller.
+ */
+
+ for (lp_w = SH_TAILQ_FIRST(&obj->waiters, __db_lock),
+ state_changed = lp_w == NULL;
+ lp_w != NULL;
+ lp_w = next_waiter) {
+ had_waiters = 1;
+ next_waiter = SH_TAILQ_NEXT(lp_w, links, __db_lock);
+ /* Are we switching locks? */
+ if (not_waiters && lp_w->mode == DB_LOCK_WAIT)
+ continue;
+ for (lp_h = SH_TAILQ_FIRST(&obj->holders, __db_lock);
+ lp_h != NULL;
+ lp_h = SH_TAILQ_NEXT(lp_h, links, __db_lock)) {
+ if (lp_h->holder != lp_w->holder &&
+ CONFLICTS(lt, region, lp_h->mode, lp_w->mode)) {
+
+ LOCKER_LOCK(lt, region, lp_w->holder, locker_ndx);
+ if ((__lock_getlocker(lt, lp_w->holder,
+ locker_ndx, 0, &sh_locker)) != 0) {
+ DB_ASSERT(0);
+ break;
+ }
+ if (!__lock_is_parent(lt,
+ lp_h->holder, sh_locker))
+ break;
+ }
+ }
+ if (lp_h != NULL) /* Found a conflict. */
+ break;
+
+ /* No conflict, promote the waiting lock. */
+ SH_TAILQ_REMOVE(&obj->waiters, lp_w, links, __db_lock);
+ lp_w->status = DB_LSTAT_PENDING;
+ SH_TAILQ_INSERT_TAIL(&obj->holders, lp_w, links);
+
+ /* Wake up waiter. */
+ MUTEX_UNLOCK(lt->dbenv, &lp_w->mutex);
+ state_changed = 1;
+ }
+
+ /*
+ * If this object had waiters and doesn't any more, then we need
+ * to remove it from the dd_obj list.
+ */
+ if (had_waiters && SH_TAILQ_FIRST(&obj->waiters, __db_lock) == NULL)
+ SH_TAILQ_REMOVE(&region->dd_objs, obj, dd_links, __db_lockobj);
+ return (state_changed);
+}
+
+/*
+ * __lock_remove_waiter --
+ * Any lock on the waitlist has a process waiting for it. Therefore,
+ * we can't return the lock to the freelist immediately. Instead, we can
+ * remove the lock from the list of waiters, set the status field of the
+ * lock, and then let the process waking up return the lock to the
+ * free list.
+ *
+ * This must be called with the Object bucket locked.
+ */
+static void
+__lock_remove_waiter(dbenv, sh_obj, lockp, status)
+ DB_ENV *dbenv;
+ DB_LOCKOBJ *sh_obj;
+ struct __db_lock *lockp;
+ db_status_t status;
+{
+ int do_wakeup;
+
+ do_wakeup = lockp->status == DB_LSTAT_WAITING;
+
+ SH_TAILQ_REMOVE(&sh_obj->waiters, lockp, links, __db_lock);
+ lockp->links.stqe_prev = -1;
+ lockp->status = status;
+
+ /*
+ * Wake whoever is waiting on this lock.
+ *
+ * The MUTEX_UNLOCK macro normally resolves to a single argument,
+ * keep the compiler quiet.
+ */
+ if (do_wakeup)
+ MUTEX_UNLOCK(dbenv, &lockp->mutex);
+}
+
+/*
+ * __lock_printlock --
+ *
+ * PUBLIC: void __lock_printlock __P((DB_LOCKTAB *, struct __db_lock *, int));
+ */
+void
+__lock_printlock(lt, lp, ispgno)
+ DB_LOCKTAB *lt;
+ struct __db_lock *lp;
+ int ispgno;
+{
+ DB_LOCKOBJ *lockobj;
+ db_pgno_t pgno;
+ u_int32_t *fidp;
+ u_int8_t *ptr, type;
+ const char *mode, *status;
+
+ switch (lp->mode) {
+ case DB_LOCK_IREAD:
+ mode = "IREAD";
+ break;
+ case DB_LOCK_IWR:
+ mode = "IWR";
+ break;
+ case DB_LOCK_IWRITE:
+ mode = "IWRITE";
+ break;
+ case DB_LOCK_NG:
+ mode = "NG";
+ break;
+ case DB_LOCK_READ:
+ mode = "READ";
+ break;
+ case DB_LOCK_WRITE:
+ mode = "WRITE";
+ break;
+ case DB_LOCK_WAIT:
+ mode = "WAIT";
+ break;
+ default:
+ mode = "UNKNOWN";
+ break;
+ }
+ switch (lp->status) {
+ case DB_LSTAT_ABORTED:
+ status = "ABORT";
+ break;
+ case DB_LSTAT_ERR:
+ status = "ERROR";
+ break;
+ case DB_LSTAT_FREE:
+ status = "FREE";
+ break;
+ case DB_LSTAT_HELD:
+ status = "HELD";
+ break;
+ case DB_LSTAT_NOGRANT:
+ status = "NONE";
+ break;
+ case DB_LSTAT_WAITING:
+ status = "WAIT";
+ break;
+ case DB_LSTAT_PENDING:
+ status = "PENDING";
+ break;
+ default:
+ status = "UNKNOWN";
+ break;
+ }
+ printf("\t%lx\t%s\t%lu\t%s\t",
+ (u_long)lp->holder, mode, (u_long)lp->refcount, status);
+
+ lockobj = (DB_LOCKOBJ *)((u_int8_t *)lp + lp->obj);
+ ptr = SH_DBT_PTR(&lockobj->lockobj);
+ if (ispgno && lockobj->lockobj.size == sizeof(struct __db_ilock)) {
+ /* Assume this is a DBT lock. */
+ memcpy(&pgno, ptr, sizeof(db_pgno_t));
+ fidp = (u_int32_t *)(ptr + sizeof(db_pgno_t));
+ type = *(u_int8_t *)(ptr + sizeof(db_pgno_t) + DB_FILE_ID_LEN);
+ printf("%s %lu (%lu %lu %lu %lu %lu)\n",
+ type == DB_PAGE_LOCK ? "page" : "record",
+ (u_long)pgno,
+ (u_long)fidp[0], (u_long)fidp[1], (u_long)fidp[2],
+ (u_long)fidp[3], (u_long)fidp[4]);
+ } else {
+ printf("0x%lx ", (u_long)R_OFFSET(&lt->reginfo, lockobj));
+ __db_pr(ptr, lockobj->lockobj.size);
+ printf("\n");
+ }
+}
diff --git a/bdb/lock/lock_conflict.c b/bdb/lock/lock_conflict.c
new file mode 100644
index 00000000000..2d7945fe201
--- /dev/null
+++ b/bdb/lock/lock_conflict.c
@@ -0,0 +1,34 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: lock_conflict.c,v 11.6 2000/12/12 17:38:13 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * The conflict arrays are set up such that the row is the lock you
+ * are holding and the column is the lock that is desired.
+ */
+
+const u_int8_t db_riw_conflicts[] = {
+ /* N S X WT IX IS SIX */
+ /* N */ 0, 0, 0, 0, 0, 0, 0,
+ /* S */ 0, 0, 1, 0, 1, 0, 1,
+ /* X */ 0, 1, 1, 1, 1, 1, 1,
+ /* WT */ 0, 0, 0, 0, 0, 0, 0,
+ /* IX */ 0, 1, 1, 0, 0, 0, 0,
+ /* IS */ 0, 0, 1, 0, 0, 0, 0,
+ /* SIX */ 0, 1, 1, 0, 0, 0, 0
+};
diff --git a/bdb/lock/lock_deadlock.c b/bdb/lock/lock_deadlock.c
new file mode 100644
index 00000000000..1f37db3890e
--- /dev/null
+++ b/bdb/lock/lock_deadlock.c
@@ -0,0 +1,637 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: lock_deadlock.c,v 11.23 2000/12/08 20:15:31 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#ifdef HAVE_RPC
+#include "db_server.h"
+#endif
+
+#include "db_int.h"
+#include "db_shash.h"
+#include "lock.h"
+#include "txn.h"
+
+#ifdef HAVE_RPC
+#include "gen_client_ext.h"
+#include "rpc_client_ext.h"
+#endif
+
+#define ISSET_MAP(M, N) ((M)[(N) / 32] & (1 << (N) % 32))
+
+#define CLEAR_MAP(M, N) { \
+ u_int32_t __i; \
+ for (__i = 0; __i < (N); __i++) \
+ (M)[__i] = 0; \
+}
+
+#define SET_MAP(M, B) ((M)[(B) / 32] |= (1 << ((B) % 32)))
+#define CLR_MAP(M, B) ((M)[(B) / 32] &= ~(1 << ((B) % 32)))
+
+#define OR_MAP(D, S, N) { \
+ u_int32_t __i; \
+ for (__i = 0; __i < (N); __i++) \
+ D[__i] |= S[__i]; \
+}
+#define BAD_KILLID 0xffffffff
+
+typedef struct {
+ int valid;
+ u_int32_t id;
+ u_int32_t last_lock;
+ u_int32_t last_locker_id;
+ db_pgno_t pgno;
+} locker_info;
+
+static int __dd_abort __P((DB_ENV *, locker_info *));
+static int __dd_build
+ __P((DB_ENV *, u_int32_t **, u_int32_t *, locker_info **));
+static int __dd_find
+ __P((DB_ENV *,u_int32_t *, locker_info *, u_int32_t, u_int32_t ***));
+
+#ifdef DIAGNOSTIC
+static void __dd_debug __P((DB_ENV *, locker_info *, u_int32_t *, u_int32_t));
+#endif
+
+int
+lock_detect(dbenv, flags, atype, abortp)
+ DB_ENV *dbenv;
+ u_int32_t flags, atype;
+ int *abortp;
+{
+ DB_LOCKREGION *region;
+ DB_LOCKTAB *lt;
+ locker_info *idmap;
+ u_int32_t *bitmap, **deadp, **free_me, i, killid, nentries, nlockers;
+ int do_pass, ret;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
+ return (__dbcl_lock_detect(dbenv, flags, atype, abortp));
+#endif
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->lk_handle, DB_INIT_LOCK);
+
+ lt = dbenv->lk_handle;
+ if (abortp != NULL)
+ *abortp = 0;
+
+ /* Validate arguments. */
+ if ((ret =
+ __db_fchk(dbenv, "lock_detect", flags, DB_LOCK_CONFLICT)) != 0)
+ return (ret);
+
+ /* Check if a detector run is necessary. */
+ LOCKREGION(dbenv, lt);
+ if (LF_ISSET(DB_LOCK_CONFLICT)) {
+ /* Make a pass every time a lock waits. */
+ region = lt->reginfo.primary;
+ do_pass = region->need_dd != 0;
+
+ if (!do_pass) {
+ UNLOCKREGION(dbenv, lt);
+ return (0);
+ }
+ }
+
+ /* Build the waits-for bitmap. */
+ ret = __dd_build(dbenv, &bitmap, &nlockers, &idmap);
+ UNLOCKREGION(dbenv, lt);
+ if (ret != 0)
+ return (ret);
+
+ if (nlockers == 0)
+ return (0);
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_WAITSFOR))
+ __dd_debug(dbenv, idmap, bitmap, nlockers);
+#endif
+ /* Find a deadlock. */
+ if ((ret = __dd_find(dbenv, bitmap, idmap, nlockers, &deadp)) != 0)
+ return (ret);
+
+ nentries = ALIGN(nlockers, 32) / 32;
+ killid = BAD_KILLID;
+ free_me = deadp;
+ for (; *deadp != NULL; deadp++) {
+ if (abortp != NULL)
+ ++*abortp;
+ switch (atype) { /* Kill someone. */
+ case DB_LOCK_OLDEST:
+ /*
+ * Find the first bit set in the current
+ * array and then look for a lower tid in
+ * the array.
+ */
+ for (i = 0; i < nlockers; i++)
+ if (ISSET_MAP(*deadp, i)) {
+ killid = i;
+ break;
+
+ }
+ /*
+ * It's conceivable that under XA, the locker could
+ * have gone away.
+ */
+ if (killid == BAD_KILLID)
+ break;
+
+ /*
+ * The oldest transaction has the lowest
+ * transaction id.
+ */
+ for (i = killid + 1; i < nlockers; i++)
+ if (ISSET_MAP(*deadp, i) &&
+ idmap[i].id < idmap[killid].id)
+ killid = i;
+ break;
+ case DB_LOCK_DEFAULT:
+ case DB_LOCK_RANDOM:
+ /*
+ * We are trying to calculate the id of the
+ * locker whose entry is indicated by deadlock.
+ */
+ killid = (*deadp - bitmap) / nentries;
+ break;
+ case DB_LOCK_YOUNGEST:
+ /*
+ * Find the first bit set in the current
+ * array and then look for a lower tid in
+ * the array.
+ */
+ for (i = 0; i < nlockers; i++)
+ if (ISSET_MAP(*deadp, i)) {
+ killid = i;
+ break;
+ }
+
+ /*
+ * It's conceivable that under XA, the locker could
+ * have gone away.
+ */
+ if (killid == BAD_KILLID)
+ break;
+
+ /*
+ * The youngest transaction has the highest
+ * transaction id.
+ */
+ for (i = killid + 1; i < nlockers; i++)
+ if (ISSET_MAP(*deadp, i) &&
+ idmap[i].id > idmap[killid].id)
+ killid = i;
+ break;
+ default:
+ killid = BAD_KILLID;
+ ret = EINVAL;
+ }
+
+ if (killid == BAD_KILLID)
+ continue;
+
+ /* Kill the locker with lockid idmap[killid]. */
+ if ((ret = __dd_abort(dbenv, &idmap[killid])) != 0) {
+ /*
+ * It's possible that the lock was already aborted;
+ * this isn't necessarily a problem, so do not treat
+ * it as an error.
+ */
+ if (ret == DB_ALREADY_ABORTED)
+ ret = 0;
+ else
+ __db_err(dbenv,
+ "warning: unable to abort locker %lx",
+ (u_long)idmap[killid].id);
+ } else if (FLD_ISSET(dbenv->verbose, DB_VERB_DEADLOCK))
+ __db_err(dbenv,
+ "Aborting locker %lx", (u_long)idmap[killid].id);
+ }
+ __os_free(free_me, 0);
+ __os_free(bitmap, 0);
+ __os_free(idmap, 0);
+
+ return (ret);
+}
+
+/*
+ * ========================================================================
+ * Utilities
+ */
+
+# define DD_INVALID_ID ((u_int32_t) -1)
+
+static int
+__dd_build(dbenv, bmp, nlockers, idmap)
+ DB_ENV *dbenv;
+ u_int32_t **bmp, *nlockers;
+ locker_info **idmap;
+{
+ struct __db_lock *lp;
+ DB_LOCKER *lip, *lockerp, *child;
+ DB_LOCKOBJ *op, *lo;
+ DB_LOCKREGION *region;
+ DB_LOCKTAB *lt;
+ locker_info *id_array;
+ u_int32_t *bitmap, count, dd, *entryp, i, id, ndx, nentries, *tmpmap;
+ u_int8_t *pptr;
+ int is_first, ret;
+
+ lt = dbenv->lk_handle;
+ region = lt->reginfo.primary;
+
+ /*
+ * We'll check how many lockers there are, add a few more in for
+ * good measure and then allocate all the structures. Then we'll
+ * verify that we have enough room when we go back in and get the
+ * mutex the second time.
+ */
+retry: count = region->nlockers;
+ region->need_dd = 0;
+
+ if (count == 0) {
+ *nlockers = 0;
+ return (0);
+ }
+
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_DEADLOCK))
+ __db_err(dbenv, "%lu lockers", (u_long)count);
+
+ count += 40;
+ nentries = ALIGN(count, 32) / 32;
+
+ /*
+ * Allocate enough space for a count by count bitmap matrix.
+ *
+ * XXX
+ * We can probably save the malloc's between iterations just
+ * reallocing if necessary because count grew by too much.
+ */
+ if ((ret = __os_calloc(dbenv, (size_t)count,
+ sizeof(u_int32_t) * nentries, &bitmap)) != 0)
+ return (ret);
+
+ if ((ret = __os_calloc(dbenv,
+ sizeof(u_int32_t), nentries, &tmpmap)) != 0) {
+ __os_free(bitmap, sizeof(u_int32_t) * nentries);
+ return (ret);
+ }
+
+ if ((ret = __os_calloc(dbenv,
+ (size_t)count, sizeof(locker_info), &id_array)) != 0) {
+ __os_free(bitmap, count * sizeof(u_int32_t) * nentries);
+ __os_free(tmpmap, sizeof(u_int32_t) * nentries);
+ return (ret);
+ }
+
+ /*
+ * Now go back in and actually fill in the matrix.
+ */
+ if (region->nlockers > count) {
+ __os_free(bitmap, count * sizeof(u_int32_t) * nentries);
+ __os_free(tmpmap, sizeof(u_int32_t) * nentries);
+ __os_free(id_array, count * sizeof(locker_info));
+ goto retry;
+ }
+
+ /*
+ * First we go through and assign each locker a deadlock detector id.
+ */
+ for (id = 0, i = 0; i < region->locker_t_size; i++) {
+ for (lip = SH_TAILQ_FIRST(&lt->locker_tab[i], __db_locker);
+ lip != NULL; lip = SH_TAILQ_NEXT(lip, links, __db_locker))
+ if (lip->master_locker == INVALID_ROFF) {
+ lip->dd_id = id++;
+ id_array[lip->dd_id].id = lip->id;
+ } else
+ lip->dd_id = DD_INVALID_ID;
+ }
+
+ /*
+ * We only need consider objects that have waiters, so we use
+ * the list of objects with waiters (dd_objs) instead of traversing
+ * the entire hash table. For each object, we traverse the waiters
+ * list and add an entry in the waitsfor matrix for each waiter/holder
+ * combination.
+ */
+ for (op = SH_TAILQ_FIRST(&region->dd_objs, __db_lockobj);
+ op != NULL; op = SH_TAILQ_NEXT(op, dd_links, __db_lockobj)) {
+ CLEAR_MAP(tmpmap, nentries);
+
+ /*
+ * First we go through and create a bit map that
+ * represents all the holders of this object.
+ */
+ for (lp = SH_TAILQ_FIRST(&op->holders, __db_lock);
+ lp != NULL;
+ lp = SH_TAILQ_NEXT(lp, links, __db_lock)) {
+ LOCKER_LOCK(lt, region, lp->holder, ndx);
+ if ((ret = __lock_getlocker(lt,
+ lp->holder, ndx, 0, &lockerp)) != 0)
+ continue;
+ if (lockerp->dd_id == DD_INVALID_ID)
+ dd = ((DB_LOCKER *)
+ R_ADDR(&lt->reginfo,
+ lockerp->master_locker))->dd_id;
+ else
+ dd = lockerp->dd_id;
+ id_array[dd].valid = 1;
+
+ /*
+ * If the holder has already been aborted, then
+ * we should ignore it for now.
+ */
+ if (lp->status == DB_LSTAT_HELD)
+ SET_MAP(tmpmap, dd);
+ }
+
+ /*
+ * Next, for each waiter, we set its row in the matrix
+ * equal to the map of holders we set up above.
+ */
+ for (is_first = 1,
+ lp = SH_TAILQ_FIRST(&op->waiters, __db_lock);
+ lp != NULL;
+ is_first = 0,
+ lp = SH_TAILQ_NEXT(lp, links, __db_lock)) {
+ LOCKER_LOCK(lt, region, lp->holder, ndx);
+ if ((ret = __lock_getlocker(lt,
+ lp->holder, ndx, 0, &lockerp)) != 0)
+ continue;
+ if (lockerp->dd_id == DD_INVALID_ID)
+ dd = ((DB_LOCKER *)
+ R_ADDR(&lt->reginfo,
+ lockerp->master_locker))->dd_id;
+ else
+ dd = lockerp->dd_id;
+ id_array[dd].valid = 1;
+
+ /*
+ * If the transaction is pending abortion, then
+ * ignore it on this iteration.
+ */
+ if (lp->status != DB_LSTAT_WAITING)
+ continue;
+
+ entryp = bitmap + (nentries * dd);
+ OR_MAP(entryp, tmpmap, nentries);
+ /*
+ * If this is the first waiter on the queue,
+ * then we remove the waitsfor relationship
+ * with oneself. However, if it's anywhere
+ * else on the queue, then we have to keep
+ * it and we have an automatic deadlock.
+ */
+ if (is_first)
+ CLR_MAP(entryp, dd);
+ }
+ }
+
+ /* Now for each locker; record its last lock. */
+ for (id = 0; id < count; id++) {
+ if (!id_array[id].valid)
+ continue;
+ LOCKER_LOCK(lt, region, id_array[id].id, ndx);
+ if ((ret = __lock_getlocker(lt,
+ id_array[id].id, ndx, 0, &lockerp)) != 0) {
+ __db_err(dbenv,
+ "No locks for locker %lu", (u_long)id_array[id].id);
+ continue;
+ }
+
+ /*
+ * If this is a master transaction, try to
+ * find one of its children's locks first,
+ * as they are probably more recent.
+ */
+ child = SH_LIST_FIRST(&lockerp->child_locker, __db_locker);
+ if (child != NULL) {
+ do {
+ lp = SH_LIST_FIRST(&child->heldby, __db_lock);
+ if (lp != NULL &&
+ lp->status == DB_LSTAT_WAITING) {
+ id_array[id].last_locker_id = child->id;
+ goto get_lock;
+ }
+ child = SH_LIST_NEXT(
+ child, child_link, __db_locker);
+ } while (child != NULL);
+ }
+ lp = SH_LIST_FIRST(&lockerp->heldby, __db_lock);
+ if (lp != NULL) {
+ id_array[id].last_locker_id = lockerp->id;
+ get_lock: id_array[id].last_lock = R_OFFSET(&lt->reginfo, lp);
+ lo = (DB_LOCKOBJ *)((u_int8_t *)lp + lp->obj);
+ pptr = SH_DBT_PTR(&lo->lockobj);
+ if (lo->lockobj.size >= sizeof(db_pgno_t))
+ memcpy(&id_array[id].pgno,
+ pptr, sizeof(db_pgno_t));
+ else
+ id_array[id].pgno = 0;
+ }
+ }
+
+ /* Pass complete, reset the deadlock detector bit. */
+ region->need_dd = 0;
+
+ /*
+ * Now we can release everything except the bitmap matrix that we
+ * created.
+ */
+ *nlockers = id;
+ *idmap = id_array;
+ *bmp = bitmap;
+ __os_free(tmpmap, sizeof(u_int32_t) * nentries);
+ return (0);
+}
+
+static int
+__dd_find(dbenv, bmp, idmap, nlockers, deadp)
+ DB_ENV *dbenv;
+ u_int32_t *bmp, nlockers;
+ locker_info *idmap;
+ u_int32_t ***deadp;
+{
+ u_int32_t i, j, k, nentries, *mymap, *tmpmap;
+ u_int32_t **retp;
+ int ndead, ndeadalloc, ret;
+
+#undef INITIAL_DEAD_ALLOC
+#define INITIAL_DEAD_ALLOC 8
+
+ ndeadalloc = INITIAL_DEAD_ALLOC;
+ ndead = 0;
+ if ((ret = __os_malloc(dbenv,
+ ndeadalloc * sizeof(u_int32_t *), NULL, &retp)) != 0)
+ return (ret);
+
+ /*
+ * For each locker, OR in the bits from the lockers on which that
+ * locker is waiting.
+ */
+ nentries = ALIGN(nlockers, 32) / 32;
+ for (mymap = bmp, i = 0; i < nlockers; i++, mymap += nentries) {
+ if (!idmap[i].valid)
+ continue;
+ for (j = 0; j < nlockers; j++) {
+ if (!ISSET_MAP(mymap, j))
+ continue;
+
+ /* Find the map for this bit. */
+ tmpmap = bmp + (nentries * j);
+ OR_MAP(mymap, tmpmap, nentries);
+ if (!ISSET_MAP(mymap, i))
+ continue;
+
+ /* Make sure we leave room for NULL. */
+ if (ndead + 2 >= ndeadalloc) {
+ ndeadalloc <<= 1;
+ /*
+ * If the alloc fails, then simply return the
+ * deadlocks that we already have.
+ */
+ if (__os_realloc(dbenv,
+ ndeadalloc * sizeof(u_int32_t),
+ NULL, &retp) != 0) {
+ retp[ndead] = NULL;
+ *deadp = retp;
+ return (0);
+ }
+ }
+ retp[ndead++] = mymap;
+
+ /* Mark all participants in this deadlock invalid. */
+ for (k = 0; k < nlockers; k++)
+ if (ISSET_MAP(mymap, k))
+ idmap[k].valid = 0;
+ break;
+ }
+ }
+ retp[ndead] = NULL;
+ *deadp = retp;
+ return (0);
+}
+
+static int
+__dd_abort(dbenv, info)
+ DB_ENV *dbenv;
+ locker_info *info;
+{
+ struct __db_lock *lockp;
+ DB_LOCKER *lockerp;
+ DB_LOCKOBJ *sh_obj;
+ DB_LOCKREGION *region;
+ DB_LOCKTAB *lt;
+ u_int32_t ndx;
+ int ret;
+
+ lt = dbenv->lk_handle;
+ region = lt->reginfo.primary;
+
+ LOCKREGION(dbenv, lt);
+ /* Find the locker's last lock. */
+ LOCKER_LOCK(lt, region, info->last_locker_id, ndx);
+ if ((ret = __lock_getlocker(lt,
+ info->last_locker_id, ndx, 0, &lockerp)) != 0 || lockerp == NULL) {
+ if (ret == 0)
+ ret = DB_ALREADY_ABORTED;
+ goto out;
+ }
+
+ lockp = SH_LIST_FIRST(&lockerp->heldby, __db_lock);
+
+ /*
+ * It's possible that this locker was already aborted. If that's
+ * the case, make sure that we remove its locker from the hash table.
+ */
+ if (lockp == NULL) {
+ if (LOCKER_FREEABLE(lockerp)) {
+ __lock_freelocker(lt, region, lockerp, ndx);
+ goto out;
+ }
+ } else if (R_OFFSET(&lt->reginfo, lockp) != info->last_lock ||
+ lockp->status != DB_LSTAT_WAITING) {
+ ret = DB_ALREADY_ABORTED;
+ goto out;
+ }
+
+ sh_obj = (DB_LOCKOBJ *)((u_int8_t *)lockp + lockp->obj);
+ SH_LIST_REMOVE(lockp, locker_links, __db_lock);
+
+ /* Abort lock, take it off list, and wake up this lock. */
+ SHOBJECT_LOCK(lt, region, sh_obj, ndx);
+ lockp->status = DB_LSTAT_ABORTED;
+ SH_TAILQ_REMOVE(&sh_obj->waiters, lockp, links, __db_lock);
+
+ /*
+ * Either the waiters list is now empty, in which case we remove
+ * it from dd_objs, or it is not empty, in which case we need to
+ * do promotion.
+ */
+ if (SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock) == NULL)
+ SH_TAILQ_REMOVE(&region->dd_objs,
+ sh_obj, dd_links, __db_lockobj);
+ else
+ ret = __lock_promote(lt, sh_obj, 0);
+ MUTEX_UNLOCK(dbenv, &lockp->mutex);
+
+ region->ndeadlocks++;
+ UNLOCKREGION(dbenv, lt);
+
+ return (0);
+
+out: UNLOCKREGION(dbenv, lt);
+ return (ret);
+}
+
+#ifdef DIAGNOSTIC
+static void
+__dd_debug(dbenv, idmap, bitmap, nlockers)
+ DB_ENV *dbenv;
+ locker_info *idmap;
+ u_int32_t *bitmap, nlockers;
+{
+ u_int32_t i, j, *mymap, nentries;
+ int ret;
+ char *msgbuf;
+
+ __db_err(dbenv, "Waitsfor array\nWaiter:\tWaiting on:");
+
+ /* Allocate space to print 10 bytes per item waited on. */
+#undef MSGBUF_LEN
+#define MSGBUF_LEN ((nlockers + 1) * 10 + 64)
+ if ((ret = __os_malloc(dbenv, MSGBUF_LEN, NULL, &msgbuf)) != 0)
+ return;
+
+ nentries = ALIGN(nlockers, 32) / 32;
+ for (mymap = bitmap, i = 0; i < nlockers; i++, mymap += nentries) {
+ if (!idmap[i].valid)
+ continue;
+ sprintf(msgbuf, /* Waiter. */
+ "%lx/%lu:\t", (u_long)idmap[i].id, (u_long)idmap[i].pgno);
+ for (j = 0; j < nlockers; j++)
+ if (ISSET_MAP(mymap, j))
+ sprintf(msgbuf, "%s %lx", msgbuf,
+ (u_long)idmap[j].id);
+ (void)sprintf(msgbuf,
+ "%s %lu", msgbuf, (u_long)idmap[i].last_lock);
+ __db_err(dbenv, msgbuf);
+ }
+
+ __os_free(msgbuf, MSGBUF_LEN);
+}
+#endif
diff --git a/bdb/lock/lock_method.c b/bdb/lock/lock_method.c
new file mode 100644
index 00000000000..46ed9e5166f
--- /dev/null
+++ b/bdb/lock/lock_method.c
@@ -0,0 +1,148 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: lock_method.c,v 11.5 2000/12/21 19:16:42 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_shash.h"
+#include "lock.h"
+
+/*
+ * __lock_set_lk_conflicts
+ * Set the conflicts matrix.
+ *
+ * PUBLIC: int __lock_set_lk_conflicts __P((DB_ENV *, u_int8_t *, int));
+ */
+int
+__lock_set_lk_conflicts(dbenv, lk_conflicts, lk_modes)
+ DB_ENV *dbenv;
+ u_int8_t *lk_conflicts;
+ int lk_modes;
+{
+ int ret;
+
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_lk_conflicts");
+
+ if (dbenv->lk_conflicts != NULL) {
+ __os_free(dbenv->lk_conflicts,
+ dbenv->lk_modes * dbenv->lk_modes);
+ dbenv->lk_conflicts = NULL;
+ }
+ if ((ret = __os_malloc(dbenv,
+ lk_modes * lk_modes, NULL, &dbenv->lk_conflicts)) != 0)
+ return (ret);
+ memcpy(dbenv->lk_conflicts, lk_conflicts, lk_modes * lk_modes);
+ dbenv->lk_modes = lk_modes;
+
+ return (0);
+}
+
+/*
+ * __lock_set_lk_detect
+ * Set the automatic deadlock detection.
+ *
+ * PUBLIC: int __lock_set_lk_detect __P((DB_ENV *, u_int32_t));
+ */
+int
+__lock_set_lk_detect(dbenv, lk_detect)
+ DB_ENV *dbenv;
+ u_int32_t lk_detect;
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_lk_detect");
+
+ switch (lk_detect) {
+ case DB_LOCK_DEFAULT:
+ case DB_LOCK_OLDEST:
+ case DB_LOCK_RANDOM:
+ case DB_LOCK_YOUNGEST:
+ break;
+ default:
+ return (EINVAL);
+ }
+ dbenv->lk_detect = lk_detect;
+ return (0);
+}
+
+/*
+ * __lock_set_lk_max
+ * Set the lock table size.
+ *
+ * PUBLIC: int __lock_set_lk_max __P((DB_ENV *, u_int32_t));
+ */
+int
+__lock_set_lk_max(dbenv, lk_max)
+ DB_ENV *dbenv;
+ u_int32_t lk_max;
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_lk_max");
+
+ dbenv->lk_max = lk_max;
+ dbenv->lk_max_objects = lk_max;
+ dbenv->lk_max_lockers = lk_max;
+ return (0);
+}
+
+/*
+ * __lock_set_lk_max_locks
+ * Set the lock table size.
+ *
+ * PUBLIC: int __lock_set_lk_max_locks __P((DB_ENV *, u_int32_t));
+ */
+int
+__lock_set_lk_max_locks(dbenv, lk_max)
+ DB_ENV *dbenv;
+ u_int32_t lk_max;
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_lk_max_locks");
+
+ dbenv->lk_max = lk_max;
+ return (0);
+}
+
+/*
+ * __lock_set_lk_max_lockers
+ * Set the lock table size.
+ *
+ * PUBLIC: int __lock_set_lk_max_lockers __P((DB_ENV *, u_int32_t));
+ */
+int
+__lock_set_lk_max_lockers(dbenv, lk_max)
+ DB_ENV *dbenv;
+ u_int32_t lk_max;
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_lk_max_lockers");
+
+ dbenv->lk_max_lockers = lk_max;
+ return (0);
+}
+
+/*
+ * __lock_set_lk_max_objects
+ * Set the lock table size.
+ *
+ * PUBLIC: int __lock_set_lk_max_objects __P((DB_ENV *, u_int32_t));
+ */
+int
+__lock_set_lk_max_objects(dbenv, lk_max)
+ DB_ENV *dbenv;
+ u_int32_t lk_max;
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_lk_max_objects");
+
+ dbenv->lk_max_objects = lk_max;
+ return (0);
+}
diff --git a/bdb/lock/lock_region.c b/bdb/lock/lock_region.c
new file mode 100644
index 00000000000..4bd4ee4b765
--- /dev/null
+++ b/bdb/lock/lock_region.c
@@ -0,0 +1,430 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: lock_region.c,v 11.41 2000/12/20 21:53:04 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#ifdef HAVE_RPC
+#include "db_server.h"
+#endif
+
+#include "db_int.h"
+#include "db_shash.h"
+#include "lock.h"
+
+#ifdef HAVE_RPC
+#include "gen_client_ext.h"
+#include "rpc_client_ext.h"
+#endif
+
+static int __lock_init __P((DB_ENV *, DB_LOCKTAB *));
+static size_t
+ __lock_region_size __P((DB_ENV *));
+
+#ifdef MUTEX_SYSTEM_RESOURCES
+static size_t __lock_region_maint __P((DB_ENV *));
+#endif
+
+/*
+ * This conflict array is used for concurrent db access (CDB). It
+ * uses the same locks as the db_rw_conflict array, but adds an IW
+ * mode to be used for write cursors.
+ */
+#define DB_LOCK_CDB_N 5
+static u_int8_t const db_cdb_conflicts[] = {
+ /* N R W WT IW*/
+ /* N */ 0, 0, 0, 0, 0,
+ /* R */ 0, 0, 1, 0, 0,
+ /* W */ 0, 1, 1, 1, 1,
+ /* WT */ 0, 0, 0, 0, 0,
+ /* IW */ 0, 0, 1, 0, 1,
+};
+
+/*
+ * __lock_dbenv_create --
+ * Lock specific creation of the DB_ENV structure.
+ *
+ * PUBLIC: void __lock_dbenv_create __P((DB_ENV *));
+ */
+void
+__lock_dbenv_create(dbenv)
+ DB_ENV *dbenv;
+{
+ dbenv->lk_max = DB_LOCK_DEFAULT_N;
+ dbenv->lk_max_lockers = DB_LOCK_DEFAULT_N;
+ dbenv->lk_max_objects = DB_LOCK_DEFAULT_N;
+
+ dbenv->set_lk_conflicts = __lock_set_lk_conflicts;
+ dbenv->set_lk_detect = __lock_set_lk_detect;
+ dbenv->set_lk_max = __lock_set_lk_max;
+ dbenv->set_lk_max_locks = __lock_set_lk_max_locks;
+ dbenv->set_lk_max_lockers = __lock_set_lk_max_lockers;
+ dbenv->set_lk_max_objects = __lock_set_lk_max_objects;
+
+#ifdef HAVE_RPC
+ /*
+ * If we have a client, overwrite what we just set up to point
+ * to the client functions.
+ */
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) {
+ dbenv->set_lk_conflicts = __dbcl_set_lk_conflict;
+ dbenv->set_lk_detect = __dbcl_set_lk_detect;
+ dbenv->set_lk_max = __dbcl_set_lk_max;
+ dbenv->set_lk_max_locks = __dbcl_set_lk_max_locks;
+ dbenv->set_lk_max_lockers = __dbcl_set_lk_max_lockers;
+ dbenv->set_lk_max_objects = __dbcl_set_lk_max_objects;
+ }
+#endif
+}
+
+/*
+ * __lock_dbenv_close --
+ * Lock specific destruction of the DB_ENV structure.
+ *
+ * PUBLIC: void __lock_dbenv_close __P((DB_ENV *));
+ */
+void
+__lock_dbenv_close(dbenv)
+ DB_ENV *dbenv;
+{
+ if (!F_ISSET(dbenv, DB_ENV_USER_ALLOC) && dbenv->lk_conflicts != NULL) {
+ __os_free(dbenv->lk_conflicts,
+ dbenv->lk_modes * dbenv->lk_modes);
+ dbenv->lk_conflicts = NULL;
+ }
+}
+
+/*
+ * __lock_open --
+ * Internal version of lock_open: only called from DB_ENV->open.
+ *
+ * PUBLIC: int __lock_open __P((DB_ENV *));
+ */
+int
+__lock_open(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_LOCKREGION *region;
+ DB_LOCKTAB *lt;
+ size_t size;
+ int ret;
+
+ /* Create the lock table structure. */
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_LOCKTAB), &lt)) != 0)
+ return (ret);
+ lt->dbenv = dbenv;
+
+ /* Join/create the lock region. */
+ lt->reginfo.type = REGION_TYPE_LOCK;
+ lt->reginfo.id = INVALID_REGION_ID;
+ lt->reginfo.mode = dbenv->db_mode;
+ lt->reginfo.flags = REGION_JOIN_OK;
+ if (F_ISSET(dbenv, DB_ENV_CREATE))
+ F_SET(&lt->reginfo, REGION_CREATE_OK);
+ size = __lock_region_size(dbenv);
+ if ((ret = __db_r_attach(dbenv, &lt->reginfo, size)) != 0)
+ goto err;
+
+ /* If we created the region, initialize it. */
+ if (F_ISSET(&lt->reginfo, REGION_CREATE))
+ if ((ret = __lock_init(dbenv, lt)) != 0)
+ goto err;
+
+ /* Set the local addresses. */
+ region = lt->reginfo.primary =
+ R_ADDR(&lt->reginfo, lt->reginfo.rp->primary);
+
+ /* Check for incompatible automatic deadlock detection requests. */
+ if (dbenv->lk_detect != DB_LOCK_NORUN) {
+ if (region->detect != DB_LOCK_NORUN &&
+ dbenv->lk_detect != DB_LOCK_DEFAULT &&
+ region->detect != dbenv->lk_detect) {
+ __db_err(dbenv,
+ "lock_open: incompatible deadlock detector mode");
+ ret = EINVAL;
+ goto err;
+ }
+
+ /*
+ * Upgrade if our caller wants automatic detection, and it
+ * was not currently being done, whether or not we created
+ * the region.
+ */
+ if (region->detect == DB_LOCK_NORUN)
+ region->detect = dbenv->lk_detect;
+ }
+
+ /* Set remaining pointers into region. */
+ lt->conflicts = (u_int8_t *)R_ADDR(&lt->reginfo, region->conf_off);
+ lt->obj_tab = (DB_HASHTAB *)R_ADDR(&lt->reginfo, region->obj_off);
+ lt->locker_tab = (DB_HASHTAB *)R_ADDR(&lt->reginfo, region->locker_off);
+
+ R_UNLOCK(dbenv, &lt->reginfo);
+
+ dbenv->lk_handle = lt;
+ return (0);
+
+err: if (lt->reginfo.addr != NULL) {
+ if (F_ISSET(&lt->reginfo, REGION_CREATE))
+ ret = __db_panic(dbenv, ret);
+ R_UNLOCK(dbenv, &lt->reginfo);
+ (void)__db_r_detach(dbenv, &lt->reginfo, 0);
+ }
+ __os_free(lt, sizeof(*lt));
+ return (ret);
+}
+
+/*
+ * __lock_init --
+ * Initialize the lock region.
+ */
+static int
+__lock_init(dbenv, lt)
+ DB_ENV *dbenv;
+ DB_LOCKTAB *lt;
+{
+ const u_int8_t *lk_conflicts;
+ struct __db_lock *lp;
+ DB_LOCKER *lidp;
+ DB_LOCKOBJ *op;
+ DB_LOCKREGION *region;
+#ifdef MUTEX_SYSTEM_RESOURCES
+ size_t maint_size;
+#endif
+ u_int32_t i, lk_modes;
+ u_int8_t *addr;
+ int ret;
+
+ if ((ret = __db_shalloc(lt->reginfo.addr,
+ sizeof(DB_LOCKREGION), 0, &lt->reginfo.primary)) != 0)
+ goto mem_err;
+ lt->reginfo.rp->primary = R_OFFSET(&lt->reginfo, lt->reginfo.primary);
+ region = lt->reginfo.primary;
+ memset(region, 0, sizeof(*region));
+
+ /* Select a conflict matrix if none specified. */
+ if (dbenv->lk_modes == 0)
+ if (CDB_LOCKING(dbenv)) {
+ lk_modes = DB_LOCK_CDB_N;
+ lk_conflicts = db_cdb_conflicts;
+ } else {
+ lk_modes = DB_LOCK_RIW_N;
+ lk_conflicts = db_riw_conflicts;
+ }
+ else {
+ lk_modes = dbenv->lk_modes;
+ lk_conflicts = dbenv->lk_conflicts;
+ }
+
+ region->id = 0;
+ region->need_dd = 0;
+ region->detect = DB_LOCK_NORUN;
+ region->maxlocks = dbenv->lk_max;
+ region->maxlockers = dbenv->lk_max_lockers;
+ region->maxobjects = dbenv->lk_max_objects;
+ region->locker_t_size = __db_tablesize(dbenv->lk_max_lockers);
+ region->object_t_size = __db_tablesize(dbenv->lk_max_objects);
+ region->nmodes = lk_modes;
+ region->nlocks = 0;
+ region->maxnlocks = 0;
+ region->nlockers = 0;
+ region->maxnlockers = 0;
+ region->nobjects = 0;
+ region->maxnobjects = 0;
+ region->nconflicts = 0;
+ region->nrequests = 0;
+ region->nreleases = 0;
+ region->ndeadlocks = 0;
+
+ /* Allocate room for the conflict matrix and initialize it. */
+ if ((ret =
+ __db_shalloc(lt->reginfo.addr, lk_modes * lk_modes, 0, &addr)) != 0)
+ goto mem_err;
+ memcpy(addr, lk_conflicts, lk_modes * lk_modes);
+ region->conf_off = R_OFFSET(&lt->reginfo, addr);
+
+ /* Allocate room for the object hash table and initialize it. */
+ if ((ret = __db_shalloc(lt->reginfo.addr,
+ region->object_t_size * sizeof(DB_HASHTAB), 0, &addr)) != 0)
+ goto mem_err;
+ __db_hashinit(addr, region->object_t_size);
+ region->obj_off = R_OFFSET(&lt->reginfo, addr);
+
+ /* Allocate room for the locker hash table and initialize it. */
+ if ((ret = __db_shalloc(lt->reginfo.addr,
+ region->locker_t_size * sizeof(DB_HASHTAB), 0, &addr)) != 0)
+ goto mem_err;
+ __db_hashinit(addr, region->locker_t_size);
+ region->locker_off = R_OFFSET(&lt->reginfo, addr);
+
+#ifdef MUTEX_SYSTEM_RESOURCES
+ maint_size = __lock_region_maint(dbenv);
+ /* Allocate room for the locker maintenance info and initialize it. */
+ if ((ret = __db_shalloc(lt->reginfo.addr,
+ sizeof(REGMAINT) + maint_size, 0, &addr)) != 0)
+ goto mem_err;
+ __db_maintinit(&lt->reginfo, addr, maint_size);
+ region->maint_off = R_OFFSET(&lt->reginfo, addr);
+#endif
+
+ /*
+ * Initialize locks onto a free list. Initialize and lock the mutex
+ * so that when we need to block, all we need do is try to acquire
+ * the mutex.
+ */
+ SH_TAILQ_INIT(&region->free_locks);
+ for (i = 0; i < region->maxlocks; ++i) {
+ if ((ret = __db_shalloc(lt->reginfo.addr,
+ sizeof(struct __db_lock), MUTEX_ALIGN, &lp)) != 0)
+ goto mem_err;
+ lp->status = DB_LSTAT_FREE;
+ if ((ret = __db_shmutex_init(dbenv, &lp->mutex,
+ R_OFFSET(&lt->reginfo, &lp->mutex) + DB_FCNTL_OFF_LOCK,
+ MUTEX_SELF_BLOCK, &lt->reginfo,
+ (REGMAINT *)R_ADDR(&lt->reginfo, region->maint_off))) != 0)
+ return (ret);
+ MUTEX_LOCK(dbenv, &lp->mutex, lt->dbenv->lockfhp);
+ SH_TAILQ_INSERT_HEAD(&region->free_locks, lp, links, __db_lock);
+ }
+
+ /* Initialize objects onto a free list. */
+ SH_TAILQ_INIT(&region->dd_objs);
+ SH_TAILQ_INIT(&region->free_objs);
+ for (i = 0; i < region->maxobjects; ++i) {
+ if ((ret = __db_shalloc(lt->reginfo.addr,
+ sizeof(DB_LOCKOBJ), 0, &op)) != 0)
+ goto mem_err;
+ SH_TAILQ_INSERT_HEAD(
+ &region->free_objs, op, links, __db_lockobj);
+ }
+
+ /* Initialize lockers onto a free list. */
+ SH_TAILQ_INIT(&region->free_lockers);
+ for (i = 0; i < region->maxlockers; ++i) {
+ if ((ret = __db_shalloc(lt->reginfo.addr,
+ sizeof(DB_LOCKER), 0, &lidp)) != 0) {
+mem_err: __db_err(dbenv, "Unable to allocate memory for the lock table");
+ return (ret);
+ }
+ SH_TAILQ_INSERT_HEAD(
+ &region->free_lockers, lidp, links, __db_locker);
+ }
+
+ return (0);
+}
+
+/*
+ * __lock_close --
+ * Internal version of lock_close: only called from db_appinit.
+ *
+ * PUBLIC: int __lock_close __P((DB_ENV *));
+ */
+int
+__lock_close(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_LOCKTAB *lt;
+ int ret;
+
+ lt = dbenv->lk_handle;
+
+ /* Detach from the region. */
+ ret = __db_r_detach(dbenv, &lt->reginfo, 0);
+
+ __os_free(lt, sizeof(*lt));
+
+ dbenv->lk_handle = NULL;
+ return (ret);
+}
+
+/*
+ * __lock_region_size --
+ * Return the region size.
+ */
+static size_t
+__lock_region_size(dbenv)
+ DB_ENV *dbenv;
+{
+ size_t retval;
+
+ /*
+ * Figure out how much space we're going to need. This list should
+ * map one-to-one with the __db_shalloc calls in __lock_init.
+ */
+ retval = 0;
+ retval += __db_shalloc_size(sizeof(DB_LOCKREGION), 1);
+ retval += __db_shalloc_size(dbenv->lk_modes * dbenv->lk_modes, 1);
+ retval += __db_shalloc_size(
+ __db_tablesize(dbenv->lk_max_lockers) * (sizeof(DB_HASHTAB)), 1);
+ retval += __db_shalloc_size(
+ __db_tablesize(dbenv->lk_max_objects) * (sizeof(DB_HASHTAB)), 1);
+#ifdef MUTEX_SYSTEM_RESOURCES
+ retval +=
+ __db_shalloc_size(sizeof(REGMAINT) + __lock_region_maint(dbenv), 1);
+#endif
+ retval += __db_shalloc_size(
+ sizeof(struct __db_lock), MUTEX_ALIGN) * dbenv->lk_max;
+ retval += __db_shalloc_size(sizeof(DB_LOCKOBJ), 1) * dbenv->lk_max_objects;
+ retval += __db_shalloc_size(sizeof(DB_LOCKER), 1) * dbenv->lk_max_lockers;
+
+ /*
+ * Include 16 bytes of string space per lock. DB doesn't use it
+ * because we pre-allocate lock space for DBTs in the structure.
+ */
+ retval += __db_shalloc_size(dbenv->lk_max * 16, sizeof(size_t));
+
+ /* And we keep getting this wrong, let's be generous. */
+ retval += retval / 4;
+
+ return (retval);
+}
+
+#ifdef MUTEX_SYSTEM_RESOURCES
+/*
+ * __lock_region_maint --
+ * Return the amount of space needed for region maintenance info.
+ */
+static size_t
+__lock_region_maint(dbenv)
+ DB_ENV *dbenv;
+{
+ size_t s;
+
+ s = sizeof(MUTEX *) * dbenv->lk_max;
+ return (s);
+}
+#endif
+
+/*
+ * __lock_region_destroy
+ * Destroy any region maintenance info.
+ *
+ * PUBLIC: void __lock_region_destroy __P((DB_ENV *, REGINFO *));
+ */
+void
+__lock_region_destroy(dbenv, infop)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+{
+ DB_LOCKREGION *region;
+
+ COMPQUIET(dbenv, NULL);
+ region = R_ADDR(infop, infop->rp->primary);
+
+ __db_shlocks_destroy(infop,
+ (REGMAINT *)R_ADDR(infop, region->maint_off));
+ return;
+}
diff --git a/bdb/lock/lock_stat.c b/bdb/lock/lock_stat.c
new file mode 100644
index 00000000000..ed5b60d0d7a
--- /dev/null
+++ b/bdb/lock/lock_stat.c
@@ -0,0 +1,308 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: lock_stat.c,v 11.4 2000/12/08 20:15:31 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <ctype.h>
+#endif
+
+#ifdef HAVE_RPC
+#include "db_server.h"
+#endif
+
+#include "db_int.h"
+#include "db_shash.h"
+#include "lock.h"
+
+#ifdef HAVE_RPC
+#include "gen_client_ext.h"
+#include "rpc_client_ext.h"
+#endif
+
+static void __lock_dump_locker __P((DB_LOCKTAB *, DB_LOCKER *, FILE *));
+static void __lock_dump_object __P((DB_LOCKTAB *, DB_LOCKOBJ *, FILE *));
+static const char *
+ __lock_dump_status __P((db_status_t));
+
+/*
+ * lock_stat --
+ * Return LOCK statistics.
+ */
+int
+lock_stat(dbenv, statp, db_malloc)
+ DB_ENV *dbenv;
+ DB_LOCK_STAT **statp;
+ void *(*db_malloc) __P((size_t));
+{
+ DB_LOCKREGION *region;
+ DB_LOCKTAB *lt;
+ DB_LOCK_STAT *stats;
+ int ret;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
+ return (__dbcl_lock_stat(dbenv, statp, db_malloc));
+#endif
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->lk_handle, DB_INIT_LOCK);
+
+ *statp = NULL;
+
+ lt = dbenv->lk_handle;
+
+ if ((ret = __os_malloc(dbenv, sizeof(*stats), db_malloc, &stats)) != 0)
+ return (ret);
+
+ /* Copy out the global statistics. */
+ R_LOCK(dbenv, &lt->reginfo);
+
+ region = lt->reginfo.primary;
+ stats->st_lastid = region->id;
+ stats->st_maxlocks = region->maxlocks;
+ stats->st_maxlockers = region->maxlockers;
+ stats->st_maxobjects = region->maxobjects;
+ stats->st_nmodes = region->nmodes;
+ stats->st_nlockers = region->nlockers;
+ stats->st_maxnlockers = region->maxnlockers;
+ stats->st_nobjects = region->nobjects;
+ stats->st_maxnobjects = region->maxnobjects;
+ stats->st_nlocks = region->nlocks;
+ stats->st_maxnlocks = region->maxnlocks;
+ stats->st_nconflicts = region->nconflicts;
+ stats->st_nrequests = region->nrequests;
+ stats->st_nreleases = region->nreleases;
+ stats->st_nnowaits = region->nnowaits;
+ stats->st_ndeadlocks = region->ndeadlocks;
+
+ stats->st_region_wait = lt->reginfo.rp->mutex.mutex_set_wait;
+ stats->st_region_nowait = lt->reginfo.rp->mutex.mutex_set_nowait;
+ stats->st_regsize = lt->reginfo.rp->size;
+
+ R_UNLOCK(dbenv, &lt->reginfo);
+
+ *statp = stats;
+ return (0);
+}
+
+#define LOCK_DUMP_CONF 0x001 /* Conflict matrix. */
+#define LOCK_DUMP_FREE 0x002 /* Display lock free list. */
+#define LOCK_DUMP_LOCKERS 0x004 /* Display lockers. */
+#define LOCK_DUMP_MEM 0x008 /* Display region memory. */
+#define LOCK_DUMP_OBJECTS 0x010 /* Display objects. */
+#define LOCK_DUMP_ALL 0x01f /* Display all. */
+
+/*
+ * __lock_dump_region --
+ *
+ * PUBLIC: void __lock_dump_region __P((DB_ENV *, char *, FILE *));
+ */
+void
+__lock_dump_region(dbenv, area, fp)
+ DB_ENV *dbenv;
+ char *area;
+ FILE *fp;
+{
+ struct __db_lock *lp;
+ DB_LOCKER *lip;
+ DB_LOCKOBJ *op;
+ DB_LOCKREGION *lrp;
+ DB_LOCKTAB *lt;
+ u_int32_t flags, i, j;
+ int label;
+
+ /* Make it easy to call from the debugger. */
+ if (fp == NULL)
+ fp = stderr;
+
+ for (flags = 0; *area != '\0'; ++area)
+ switch (*area) {
+ case 'A':
+ LF_SET(LOCK_DUMP_ALL);
+ break;
+ case 'c':
+ LF_SET(LOCK_DUMP_CONF);
+ break;
+ case 'f':
+ LF_SET(LOCK_DUMP_FREE);
+ break;
+ case 'l':
+ LF_SET(LOCK_DUMP_LOCKERS);
+ break;
+ case 'm':
+ LF_SET(LOCK_DUMP_MEM);
+ break;
+ case 'o':
+ LF_SET(LOCK_DUMP_OBJECTS);
+ break;
+ }
+
+ lt = dbenv->lk_handle;
+ lrp = lt->reginfo.primary;
+ LOCKREGION(dbenv, lt);
+
+ fprintf(fp, "%s\nLock region parameters\n", DB_LINE);
+ fprintf(fp, "%s: %lu, %s: %lu, %s: %lu, %s: %lu, %s: %lu, %s: %lu, %s: %lu\n",
+ "locker table size", (u_long)lrp->locker_t_size,
+ "object table size", (u_long)lrp->object_t_size,
+ "obj_off", (u_long)lrp->obj_off,
+ "osynch_off", (u_long)lrp->osynch_off,
+ "locker_off", (u_long)lrp->locker_off,
+ "lsynch_off", (u_long)lrp->lsynch_off,
+ "need_dd", (u_long)lrp->need_dd);
+
+ if (LF_ISSET(LOCK_DUMP_CONF)) {
+ fprintf(fp, "\n%s\nConflict matrix\n", DB_LINE);
+ for (i = 0; i < lrp->nmodes; i++) {
+ for (j = 0; j < lrp->nmodes; j++)
+ fprintf(fp, "%lu\t",
+ (u_long)lt->conflicts[i * lrp->nmodes + j]);
+ fprintf(fp, "\n");
+ }
+ }
+
+ if (LF_ISSET(LOCK_DUMP_LOCKERS)) {
+ fprintf(fp, "%s\nLocker hash buckets\n", DB_LINE);
+ for (i = 0; i < lrp->locker_t_size; i++) {
+ label = 1;
+ for (lip =
+ SH_TAILQ_FIRST(&lt->locker_tab[i], __db_locker);
+ lip != NULL;
+ lip = SH_TAILQ_NEXT(lip, links, __db_locker)) {
+ if (label) {
+ fprintf(fp, "Bucket %lu:\n", (u_long)i);
+ label = 0;
+ }
+ __lock_dump_locker(lt, lip, fp);
+ }
+ }
+ }
+
+ if (LF_ISSET(LOCK_DUMP_OBJECTS)) {
+ fprintf(fp, "%s\nObject hash buckets\n", DB_LINE);
+ for (i = 0; i < lrp->object_t_size; i++) {
+ label = 1;
+ for (op = SH_TAILQ_FIRST(&lt->obj_tab[i], __db_lockobj);
+ op != NULL;
+ op = SH_TAILQ_NEXT(op, links, __db_lockobj)) {
+ if (label) {
+ fprintf(fp, "Bucket %lu:\n", (u_long)i);
+ label = 0;
+ }
+ __lock_dump_object(lt, op, fp);
+ }
+ }
+ }
+
+ if (LF_ISSET(LOCK_DUMP_FREE)) {
+ fprintf(fp, "%s\nLock free list\n", DB_LINE);
+ for (lp = SH_TAILQ_FIRST(&lrp->free_locks, __db_lock);
+ lp != NULL;
+ lp = SH_TAILQ_NEXT(lp, links, __db_lock))
+ fprintf(fp, "0x%lx: %lu\t%lu\t%s\t0x%lx\n", (u_long)lp,
+ (u_long)lp->holder, (u_long)lp->mode,
+ __lock_dump_status(lp->status), (u_long)lp->obj);
+
+ fprintf(fp, "%s\nObject free list\n", DB_LINE);
+ for (op = SH_TAILQ_FIRST(&lrp->free_objs, __db_lockobj);
+ op != NULL;
+ op = SH_TAILQ_NEXT(op, links, __db_lockobj))
+ fprintf(fp, "0x%lx\n", (u_long)op);
+
+ fprintf(fp, "%s\nLocker free list\n", DB_LINE);
+ for (lip = SH_TAILQ_FIRST(&lrp->free_lockers, __db_locker);
+ lip != NULL;
+ lip = SH_TAILQ_NEXT(lip, links, __db_locker))
+ fprintf(fp, "0x%lx\n", (u_long)lip);
+ }
+
+ if (LF_ISSET(LOCK_DUMP_MEM))
+ __db_shalloc_dump(lt->reginfo.addr, fp);
+
+ UNLOCKREGION(dbenv, lt);
+}
+
+static void
+__lock_dump_locker(lt, lip, fp)
+ DB_LOCKTAB *lt;
+ DB_LOCKER *lip;
+ FILE *fp;
+{
+ struct __db_lock *lp;
+
+ fprintf(fp, "L %lx [%ld]", (u_long)lip->id, (long)lip->dd_id);
+ fprintf(fp, " %s ", F_ISSET(lip, DB_LOCKER_DELETED) ? "(D)" : " ");
+
+ if ((lp = SH_LIST_FIRST(&lip->heldby, __db_lock)) == NULL)
+ fprintf(fp, "\n");
+ else
+ for (; lp != NULL;
+ lp = SH_LIST_NEXT(lp, locker_links, __db_lock))
+ __lock_printlock(lt, lp, 1);
+}
+
+static void
+__lock_dump_object(lt, op, fp)
+ DB_LOCKTAB *lt;
+ DB_LOCKOBJ *op;
+ FILE *fp;
+{
+ struct __db_lock *lp;
+ u_int32_t j;
+ u_int8_t *ptr;
+ u_int ch;
+
+ ptr = SH_DBT_PTR(&op->lockobj);
+ for (j = 0; j < op->lockobj.size; ptr++, j++) {
+ ch = *ptr;
+ fprintf(fp, isprint(ch) ? "%c" : "\\%o", ch);
+ }
+ fprintf(fp, "\n");
+
+ fprintf(fp, "H:");
+ for (lp =
+ SH_TAILQ_FIRST(&op->holders, __db_lock);
+ lp != NULL;
+ lp = SH_TAILQ_NEXT(lp, links, __db_lock))
+ __lock_printlock(lt, lp, 1);
+ lp = SH_TAILQ_FIRST(&op->waiters, __db_lock);
+ if (lp != NULL) {
+ fprintf(fp, "\nW:");
+ for (; lp != NULL; lp = SH_TAILQ_NEXT(lp, links, __db_lock))
+ __lock_printlock(lt, lp, 1);
+ }
+}
+
+static const char *
+__lock_dump_status(status)
+ db_status_t status;
+{
+ switch (status) {
+ case DB_LSTAT_ABORTED:
+ return ("aborted");
+ case DB_LSTAT_ERR:
+ return ("err");
+ case DB_LSTAT_FREE:
+ return ("free");
+ case DB_LSTAT_HELD:
+ return ("held");
+ case DB_LSTAT_NOGRANT:
+ return ("nogrant");
+ case DB_LSTAT_PENDING:
+ return ("pending");
+ case DB_LSTAT_WAITING:
+ return ("waiting");
+ }
+ return ("unknown status");
+}
diff --git a/bdb/lock/lock_util.c b/bdb/lock/lock_util.c
new file mode 100644
index 00000000000..fd5c6ad90cb
--- /dev/null
+++ b/bdb/lock/lock_util.c
@@ -0,0 +1,138 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: lock_util.c,v 11.5 2000/07/04 18:28:24 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_shash.h"
+#include "hash.h"
+#include "lock.h"
+
+/*
+ * __lock_cmp --
+ * This function is used to compare a DBT that is about to be entered
+ * into a hash table with an object already in the hash table. Note
+ * that it just returns true on equal and 0 on not-equal. Therefore
+ * this function cannot be used as a sort function; its purpose is to
+ * be used as a hash comparison function.
+ *
+ * PUBLIC: int __lock_cmp __P((const DBT *, DB_LOCKOBJ *));
+ */
+int
+__lock_cmp(dbt, lock_obj)
+ const DBT *dbt;
+ DB_LOCKOBJ *lock_obj;
+{
+ void *obj_data;
+
+ obj_data = SH_DBT_PTR(&lock_obj->lockobj);
+ return (dbt->size == lock_obj->lockobj.size &&
+ memcmp(dbt->data, obj_data, dbt->size) == 0);
+}
+
+/*
+ * PUBLIC: int __lock_locker_cmp __P((u_int32_t, DB_LOCKER *));
+ */
+int
+__lock_locker_cmp(locker, sh_locker)
+ u_int32_t locker;
+ DB_LOCKER *sh_locker;
+{
+ return (locker == sh_locker->id);
+}
+
+/*
+ * The next two functions are the hash functions used to store objects in the
+ * lock hash tables. They are hashing the same items, but one (__lock_ohash)
+ * takes a DBT (used for hashing a parameter passed from the user) and the
+ * other (__lock_lhash) takes a DB_LOCKOBJ (used for hashing something that is
+ * already in the lock manager). In both cases, we have a special check to
+ * fast path the case where we think we are doing a hash on a DB page/fileid
+ * pair. If the size is right, then we do the fast hash.
+ *
+ * We know that DB uses DB_LOCK_ILOCK types for its lock objects. The first
+ * four bytes are the 4-byte page number and the next DB_FILE_ID_LEN bytes
+ * are a unique file id, where the first 4 bytes on UNIX systems are the file
+ * inode number, and the first 4 bytes on Windows systems are the FileIndexLow
+ * bytes. So, we use the XOR of the page number and the first four bytes of
+ * the file id to produce a 32-bit hash value.
+ *
+ * We have no particular reason to believe that this algorithm will produce
+ * a good hash, but we want a fast hash more than we want a good one, when
+ * we're coming through this code path.
+ */
+#define FAST_HASH(P) { \
+ u_int32_t __h; \
+ u_int8_t *__cp, *__hp; \
+ __hp = (u_int8_t *)&__h; \
+ __cp = (u_int8_t *)(P); \
+ __hp[0] = __cp[0] ^ __cp[4]; \
+ __hp[1] = __cp[1] ^ __cp[5]; \
+ __hp[2] = __cp[2] ^ __cp[6]; \
+ __hp[3] = __cp[3] ^ __cp[7]; \
+ return (__h); \
+}
+
+/*
+ * __lock_ohash --
+ *
+ * PUBLIC: u_int32_t __lock_ohash __P((const DBT *));
+ */
+u_int32_t
+__lock_ohash(dbt)
+ const DBT *dbt;
+{
+ if (dbt->size == sizeof(DB_LOCK_ILOCK))
+ FAST_HASH(dbt->data);
+
+ return (__ham_func5(NULL, dbt->data, dbt->size));
+}
+
+/*
+ * __lock_lhash --
+ *
+ * PUBLIC: u_int32_t __lock_lhash __P((DB_LOCKOBJ *));
+ */
+u_int32_t
+__lock_lhash(lock_obj)
+ DB_LOCKOBJ *lock_obj;
+{
+ void *obj_data;
+
+ obj_data = SH_DBT_PTR(&lock_obj->lockobj);
+
+ if (lock_obj->lockobj.size == sizeof(DB_LOCK_ILOCK))
+ FAST_HASH(obj_data);
+
+ return (__ham_func5(NULL, obj_data, lock_obj->lockobj.size));
+}
+
+/*
+ * __lock_locker_hash --
+ * Hash function for entering lockers into the locker hash table.
+ * Since these are simply 32-bit unsigned integers, just return
+ * the locker value.
+ *
+ * PUBLIC: u_int32_t __lock_locker_hash __P((u_int32_t));
+ */
+u_int32_t
+__lock_locker_hash(locker)
+ u_int32_t locker;
+{
+ return (locker);
+}
diff --git a/bdb/log/log.c b/bdb/log/log.c
new file mode 100644
index 00000000000..69af1624824
--- /dev/null
+++ b/bdb/log/log.c
@@ -0,0 +1,653 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: log.c,v 11.42 2001/01/15 16:42:37 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#ifdef HAVE_RPC
+#include "db_server.h"
+#endif
+
+#include "db_int.h"
+#include "log.h"
+#include "db_dispatch.h"
+#include "txn.h"
+#include "txn_auto.h"
+
+#ifdef HAVE_RPC
+#include "gen_client_ext.h"
+#include "rpc_client_ext.h"
+#endif
+
+static int __log_init __P((DB_ENV *, DB_LOG *));
+static int __log_recover __P((DB_LOG *));
+
+/*
+ * __log_open --
+ * Internal version of log_open: only called from DB_ENV->open.
+ *
+ * PUBLIC: int __log_open __P((DB_ENV *));
+ */
+int
+__log_open(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_LOG *dblp;
+ LOG *lp;
+ int ret;
+ u_int8_t *readbufp;
+
+ readbufp = NULL;
+
+ /* Create/initialize the DB_LOG structure. */
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_LOG), &dblp)) != 0)
+ return (ret);
+ if ((ret = __os_calloc(dbenv, 1, dbenv->lg_bsize, &readbufp)) != 0)
+ goto err;
+ ZERO_LSN(dblp->c_lsn);
+ dblp->dbenv = dbenv;
+
+ /* Join/create the log region. */
+ dblp->reginfo.type = REGION_TYPE_LOG;
+ dblp->reginfo.id = INVALID_REGION_ID;
+ dblp->reginfo.mode = dbenv->db_mode;
+ dblp->reginfo.flags = REGION_JOIN_OK;
+ if (F_ISSET(dbenv, DB_ENV_CREATE))
+ F_SET(&dblp->reginfo, REGION_CREATE_OK);
+ if ((ret = __db_r_attach(
+ dbenv, &dblp->reginfo, LG_BASE_REGION_SIZE + dbenv->lg_bsize)) != 0)
+ goto err;
+
+ dblp->readbufp = readbufp;
+
+ /* If we created the region, initialize it. */
+ if (F_ISSET(&dblp->reginfo, REGION_CREATE) &&
+ (ret = __log_init(dbenv, dblp)) != 0)
+ goto err;
+
+ /* Set the local addresses. */
+ lp = dblp->reginfo.primary =
+ R_ADDR(&dblp->reginfo, dblp->reginfo.rp->primary);
+ dblp->bufp = R_ADDR(&dblp->reginfo, lp->buffer_off);
+
+ /*
+ * If the region is threaded, then we have to lock both the handles
+ * and the region, and we need to allocate a mutex for that purpose.
+ */
+ if (F_ISSET(dbenv, DB_ENV_THREAD)) {
+ if ((ret = __db_mutex_alloc(
+ dbenv, &dblp->reginfo, &dblp->mutexp)) != 0)
+ goto err;
+ if ((ret = __db_mutex_init(
+ dbenv, dblp->mutexp, 0, MUTEX_THREAD)) != 0)
+ goto err;
+ }
+
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ dblp->r_file = 0;
+ dblp->r_off = 0;
+ dblp->r_size = 0;
+ dbenv->lg_handle = dblp;
+ return (0);
+
+err: if (dblp->reginfo.addr != NULL) {
+ if (F_ISSET(&dblp->reginfo, REGION_CREATE))
+ ret = __db_panic(dbenv, ret);
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ (void)__db_r_detach(dbenv, &dblp->reginfo, 0);
+ }
+
+ if (readbufp != NULL)
+ __os_free(readbufp, dbenv->lg_bsize);
+ if (dblp->mutexp != NULL)
+ __db_mutex_free(dbenv, &dblp->reginfo, dblp->mutexp);
+ __os_free(dblp, sizeof(*dblp));
+ return (ret);
+}
+
+/*
+ * __log_init --
+ * Initialize a log region in shared memory.
+ */
+static int
+__log_init(dbenv, dblp)
+ DB_ENV *dbenv;
+ DB_LOG *dblp;
+{
+ LOG *region;
+ int ret;
+ void *p;
+
+ if ((ret = __db_shalloc(dblp->reginfo.addr,
+ sizeof(*region), 0, &dblp->reginfo.primary)) != 0)
+ goto mem_err;
+ dblp->reginfo.rp->primary =
+ R_OFFSET(&dblp->reginfo, dblp->reginfo.primary);
+ region = dblp->reginfo.primary;
+ memset(region, 0, sizeof(*region));
+
+ region->persist.lg_max = dbenv->lg_max;
+ region->persist.magic = DB_LOGMAGIC;
+ region->persist.version = DB_LOGVERSION;
+ region->persist.mode = dbenv->db_mode;
+ SH_TAILQ_INIT(&region->fq);
+
+ /* Initialize LOG LSNs. */
+ region->lsn.file = 1;
+ region->lsn.offset = 0;
+
+ /* Initialize the buffer. */
+ if ((ret =
+ __db_shalloc(dblp->reginfo.addr, dbenv->lg_bsize, 0, &p)) != 0) {
+mem_err: __db_err(dbenv, "Unable to allocate memory for the log buffer");
+ return (ret);
+ }
+ region->buffer_size = dbenv->lg_bsize;
+ region->buffer_off = R_OFFSET(&dblp->reginfo, p);
+
+ /* Try and recover any previous log files before releasing the lock. */
+ return (__log_recover(dblp));
+}
+
+/*
+ * __log_recover --
+ * Recover a log.
+ */
+static int
+__log_recover(dblp)
+ DB_LOG *dblp;
+{
+ DBT dbt;
+ DB_LSN lsn;
+ LOG *lp;
+ int cnt, found_checkpoint, ret;
+ u_int32_t chk;
+ logfile_validity status;
+
+ lp = dblp->reginfo.primary;
+
+ /*
+ * Find a log file. If none exist, we simply return, leaving
+ * everything initialized to a new log.
+ */
+ if ((ret = __log_find(dblp, 0, &cnt, &status)) != 0)
+ return (ret);
+ if (cnt == 0)
+ return (0);
+
+ /*
+ * If the last file is an old version, readable or no, start a new
+ * file. Don't bother finding checkpoints; if we didn't take a
+ * checkpoint right before upgrading, the user screwed up anyway.
+ */
+ if (status == DB_LV_OLD_READABLE || status == DB_LV_OLD_UNREADABLE) {
+ lp->lsn.file = lp->s_lsn.file = cnt + 1;
+ lp->lsn.offset = lp->s_lsn.offset = 0;
+ goto skipsearch;
+ }
+ DB_ASSERT(status == DB_LV_NORMAL);
+
+ /*
+ * We have the last useful log file and we've loaded any persistent
+ * information. Set the end point of the log past the end of the last
+ * file. Read the last file, looking for the last checkpoint and
+ * the log's end.
+ */
+ lp->lsn.file = cnt + 1;
+ lp->lsn.offset = 0;
+ lsn.file = cnt;
+ lsn.offset = 0;
+
+ /* Set the cursor. Shouldn't fail; leave error messages on. */
+ memset(&dbt, 0, sizeof(dbt));
+ if ((ret = __log_get(dblp, &lsn, &dbt, DB_SET, 0)) != 0)
+ return (ret);
+
+ /*
+ * Read to the end of the file, saving checkpoints. This will fail
+ * at some point, so turn off error messages.
+ */
+ found_checkpoint = 0;
+ while (__log_get(dblp, &lsn, &dbt, DB_NEXT, 1) == 0) {
+ if (dbt.size < sizeof(u_int32_t))
+ continue;
+ memcpy(&chk, dbt.data, sizeof(u_int32_t));
+ if (chk == DB_txn_ckp) {
+ lp->chkpt_lsn = lsn;
+ found_checkpoint = 1;
+ }
+ }
+
+ /*
+ * We now know where the end of the log is. Set the first LSN that
+ * we want to return to an application and the LSN of the last known
+ * record on disk.
+ */
+ lp->lsn = lsn;
+ lp->s_lsn = lsn;
+ lp->lsn.offset += dblp->c_len;
+ lp->s_lsn.offset += dblp->c_len;
+
+ /* Set up the current buffer information, too. */
+ lp->len = dblp->c_len;
+ lp->b_off = 0;
+ lp->w_off = lp->lsn.offset;
+
+ /*
+ * It's possible that we didn't find a checkpoint because there wasn't
+ * one in the last log file. Start searching.
+ */
+ if (!found_checkpoint && cnt > 1) {
+ lsn.file = cnt;
+ lsn.offset = 0;
+
+ /* Set the cursor. Shouldn't fail, leave error messages on. */
+ if ((ret = __log_get(dblp, &lsn, &dbt, DB_SET, 0)) != 0)
+ return (ret);
+
+ /*
+ * Read to the end of the file, saving checkpoints. Again,
+ * this can fail if there are no checkpoints in any log file,
+ * so turn error messages off.
+ */
+ while (__log_get(dblp, &lsn, &dbt, DB_PREV, 1) == 0) {
+ if (dbt.size < sizeof(u_int32_t))
+ continue;
+ memcpy(&chk, dbt.data, sizeof(u_int32_t));
+ if (chk == DB_txn_ckp) {
+ lp->chkpt_lsn = lsn;
+ found_checkpoint = 1;
+ break;
+ }
+ }
+ }
+
+ /* If we never find a checkpoint, that's okay, just 0 it out. */
+ if (!found_checkpoint)
+skipsearch: ZERO_LSN(lp->chkpt_lsn);
+
+ /*
+ * Reset the cursor lsn to the beginning of the log, so that an
+ * initial call to DB_NEXT does the right thing.
+ */
+ ZERO_LSN(dblp->c_lsn);
+
+ if (FLD_ISSET(dblp->dbenv->verbose, DB_VERB_RECOVERY))
+ __db_err(dblp->dbenv,
+ "Finding last valid log LSN: file: %lu offset %lu",
+ (u_long)lp->lsn.file, (u_long)lp->lsn.offset);
+
+ return (0);
+}
+
+/*
+ * __log_find --
+ * Try to find a log file. If find_first is set, valp will contain
+ * the number of the first readable log file, else it will contain the number
+ * of the last log file (which may be too old to read).
+ *
+ * PUBLIC: int __log_find __P((DB_LOG *, int, int *, logfile_validity *));
+ */
+int
+__log_find(dblp, find_first, valp, statusp)
+ DB_LOG *dblp;
+ int find_first, *valp;
+ logfile_validity *statusp;
+{
+ logfile_validity clv_status, status;
+ u_int32_t clv, logval;
+ int cnt, fcnt, ret;
+ const char *dir;
+ char **names, *p, *q, savech;
+
+ clv_status = status = DB_LV_NORMAL;
+
+ /* Return a value of 0 as the log file number on failure. */
+ *valp = 0;
+
+ /* Find the directory name. */
+ if ((ret = __log_name(dblp, 1, &p, NULL, 0)) != 0)
+ return (ret);
+ if ((q = __db_rpath(p)) == NULL) {
+ COMPQUIET(savech, 0);
+ dir = PATH_DOT;
+ } else {
+ savech = *q;
+ *q = '\0';
+ dir = p;
+ }
+
+ /* Get the list of file names. */
+ ret = __os_dirlist(dblp->dbenv, dir, &names, &fcnt);
+
+ /*
+ * !!!
+ * We overwrote a byte in the string with a nul. Restore the string
+ * so that the diagnostic checks in the memory allocation code work
+ * and any error messages display the right file name.
+ */
+ if (q != NULL)
+ *q = savech;
+
+ if (ret != 0) {
+ __db_err(dblp->dbenv, "%s: %s", dir, db_strerror(ret));
+ __os_freestr(p);
+ return (ret);
+ }
+
+ /* Search for a valid log file name. */
+ for (cnt = fcnt, clv = logval = 0; --cnt >= 0;) {
+ if (strncmp(names[cnt], LFPREFIX, sizeof(LFPREFIX) - 1) != 0)
+ continue;
+
+ /*
+ * Use atol, not atoi; if an "int" is 16-bits, the largest
+ * log file name won't fit.
+ */
+ clv = atol(names[cnt] + (sizeof(LFPREFIX) - 1));
+ if (find_first) {
+ if (logval != 0 && clv > logval)
+ continue;
+ } else
+ if (logval != 0 && clv < logval)
+ continue;
+
+ /*
+ * Take note of whether the log file logval is
+ * an old version or incompletely initialized.
+ */
+ if ((ret = __log_valid(dblp, clv, 1, &status)) != 0)
+ goto err;
+ switch (status) {
+ case DB_LV_INCOMPLETE:
+ /*
+ * It's acceptable for the last log file to
+ * have been incompletely initialized--it's possible
+ * to create a log file but not write anything to it,
+ * and recovery needs to gracefully handle this.
+ *
+ * Just ignore it; we don't want to return this
+ * as a valid log file.
+ */
+ break;
+ case DB_LV_NORMAL:
+ case DB_LV_OLD_READABLE:
+ logval = clv;
+ clv_status = status;
+ break;
+ case DB_LV_OLD_UNREADABLE:
+ /*
+ * Continue; we want the oldest valid log,
+ * and clv is too old to be useful. We don't
+ * want it to supplant logval if we're looking for
+ * the oldest valid log, but we do want to return
+ * it if it's the last log file--we want the very
+ * last file number, so that our caller can
+ * start a new file after it.
+ *
+ * The code here assumes that there will never
+ * be a too-old log that's preceded by a log
+ * of the current version, but in order to
+ * attain that state of affairs the user
+ * would have had to really seriously screw
+ * up; I think we can safely assume this won't
+ * happen.
+ */
+ if (!find_first) {
+ logval = clv;
+ clv_status = status;
+ }
+ break;
+ }
+ }
+
+ *valp = logval;
+
+err: __os_dirfree(names, fcnt);
+ __os_freestr(p);
+ *statusp = clv_status;
+
+ return (ret);
+}
+
+/*
+ * log_valid --
+ * Validate a log file. Returns an error code in the event of
+ * a fatal flaw in a the specified log file; returns success with
+ * a code indicating the currentness and completeness of the specified
+ * log file if it is not unexpectedly flawed (that is, if it's perfectly
+ * normal, if it's zero-length, or if it's an old version).
+ *
+ * PUBLIC: int __log_valid __P((DB_LOG *, u_int32_t, int, logfile_validity *));
+ */
+int
+__log_valid(dblp, number, set_persist, statusp)
+ DB_LOG *dblp;
+ u_int32_t number;
+ int set_persist;
+ logfile_validity *statusp;
+{
+ DB_FH fh;
+ LOG *region;
+ LOGP persist;
+ char *fname;
+ int ret;
+ logfile_validity status;
+ size_t nw;
+
+ status = DB_LV_NORMAL;
+
+ /* Try to open the log file. */
+ if ((ret = __log_name(dblp,
+ number, &fname, &fh, DB_OSO_RDONLY | DB_OSO_SEQ)) != 0) {
+ __os_freestr(fname);
+ return (ret);
+ }
+
+ /* Try to read the header. */
+ if ((ret =
+ __os_seek(dblp->dbenv,
+ &fh, 0, 0, sizeof(HDR), 0, DB_OS_SEEK_SET)) != 0 ||
+ (ret =
+ __os_read(dblp->dbenv, &fh, &persist, sizeof(LOGP), &nw)) != 0 ||
+ nw != sizeof(LOGP)) {
+ if (ret == 0)
+ status = DB_LV_INCOMPLETE;
+ else
+ /*
+ * The error was a fatal read error, not just an
+ * incompletely initialized log file.
+ */
+ __db_err(dblp->dbenv, "Ignoring log file: %s: %s",
+ fname, db_strerror(ret));
+
+ (void)__os_closehandle(&fh);
+ goto err;
+ }
+ (void)__os_closehandle(&fh);
+
+ /* Validate the header. */
+ if (persist.magic != DB_LOGMAGIC) {
+ __db_err(dblp->dbenv,
+ "Ignoring log file: %s: magic number %lx, not %lx",
+ fname, (u_long)persist.magic, (u_long)DB_LOGMAGIC);
+ ret = EINVAL;
+ goto err;
+ }
+
+ /*
+ * Set our status code to indicate whether the log file
+ * belongs to an unreadable or readable old version; leave it
+ * alone if and only if the log file version is the current one.
+ */
+ if (persist.version > DB_LOGVERSION) {
+ /* This is a fatal error--the log file is newer than DB. */
+ __db_err(dblp->dbenv,
+ "Ignoring log file: %s: unsupported log version %lu",
+ fname, (u_long)persist.version);
+ ret = EINVAL;
+ goto err;
+ } else if (persist.version < DB_LOGOLDVER) {
+ status = DB_LV_OLD_UNREADABLE;
+ /*
+ * We don't want to set persistent info based on an
+ * unreadable region, so jump to "err".
+ */
+ goto err;
+ } else if (persist.version < DB_LOGVERSION)
+ status = DB_LV_OLD_READABLE;
+
+ /*
+ * If the log is thus far readable and we're doing system
+ * initialization, set the region's persistent information
+ * based on the headers.
+ */
+ if (set_persist) {
+ region = dblp->reginfo.primary;
+ region->persist.lg_max = persist.lg_max;
+ region->persist.mode = persist.mode;
+ }
+
+err: __os_freestr(fname);
+ *statusp = status;
+ return (ret);
+}
+
+/*
+ * __log_close --
+ * Internal version of log_close: only called from dbenv_refresh.
+ *
+ * PUBLIC: int __log_close __P((DB_ENV *));
+ */
+int
+__log_close(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_LOG *dblp;
+ int ret, t_ret;
+
+ ret = 0;
+ dblp = dbenv->lg_handle;
+
+ /* We may have opened files as part of XA; if so, close them. */
+ F_SET(dblp, DBLOG_RECOVER);
+ __log_close_files(dbenv);
+
+ /* Discard the per-thread lock. */
+ if (dblp->mutexp != NULL)
+ __db_mutex_free(dbenv, &dblp->reginfo, dblp->mutexp);
+
+ /* Detach from the region. */
+ ret = __db_r_detach(dbenv, &dblp->reginfo, 0);
+
+ /* Close open files, release allocated memory. */
+ if (F_ISSET(&dblp->lfh, DB_FH_VALID) &&
+ (t_ret = __os_closehandle(&dblp->lfh)) != 0 && ret == 0)
+ ret = t_ret;
+ if (dblp->c_dbt.data != NULL)
+ __os_free(dblp->c_dbt.data, dblp->c_dbt.ulen);
+ if (F_ISSET(&dblp->c_fh, DB_FH_VALID) &&
+ (t_ret = __os_closehandle(&dblp->c_fh)) != 0 && ret == 0)
+ ret = t_ret;
+ if (dblp->dbentry != NULL)
+ __os_free(dblp->dbentry,
+ (dblp->dbentry_cnt * sizeof(DB_ENTRY)));
+ if (dblp->readbufp != NULL)
+ __os_free(dblp->readbufp, dbenv->lg_bsize);
+
+ __os_free(dblp, sizeof(*dblp));
+
+ dbenv->lg_handle = NULL;
+ return (ret);
+}
+
+/*
+ * log_stat --
+ * Return LOG statistics.
+ */
+int
+log_stat(dbenv, statp, db_malloc)
+ DB_ENV *dbenv;
+ DB_LOG_STAT **statp;
+ void *(*db_malloc) __P((size_t));
+{
+ DB_LOG *dblp;
+ DB_LOG_STAT *stats;
+ LOG *region;
+ int ret;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
+ return (__dbcl_log_stat(dbenv, statp, db_malloc));
+#endif
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->lg_handle, DB_INIT_LOG);
+
+ *statp = NULL;
+
+ dblp = dbenv->lg_handle;
+ region = dblp->reginfo.primary;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(DB_LOG_STAT), db_malloc, &stats)) != 0)
+ return (ret);
+
+ /* Copy out the global statistics. */
+ R_LOCK(dbenv, &dblp->reginfo);
+ *stats = region->stat;
+
+ stats->st_magic = region->persist.magic;
+ stats->st_version = region->persist.version;
+ stats->st_mode = region->persist.mode;
+ stats->st_lg_bsize = region->buffer_size;
+ stats->st_lg_max = region->persist.lg_max;
+
+ stats->st_region_wait = dblp->reginfo.rp->mutex.mutex_set_wait;
+ stats->st_region_nowait = dblp->reginfo.rp->mutex.mutex_set_nowait;
+ stats->st_regsize = dblp->reginfo.rp->size;
+
+ stats->st_cur_file = region->lsn.file;
+ stats->st_cur_offset = region->lsn.offset;
+
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ *statp = stats;
+ return (0);
+}
+
+/*
+ * __log_lastckp --
+ * Return the current chkpt_lsn, so that we can store it in
+ * the transaction region and keep the chain of checkpoints
+ * unbroken across environment recreates.
+ *
+ * PUBLIC: int __log_lastckp __P((DB_ENV *, DB_LSN *));
+ */
+int
+__log_lastckp(dbenv, lsnp)
+ DB_ENV *dbenv;
+ DB_LSN *lsnp;
+{
+ LOG *lp;
+
+ lp = (LOG *)(((DB_LOG *)dbenv->lg_handle)->reginfo.primary);
+
+ *lsnp = lp->chkpt_lsn;
+ return (0);
+}
diff --git a/bdb/log/log.src b/bdb/log/log.src
new file mode 100644
index 00000000000..a92fae8de26
--- /dev/null
+++ b/bdb/log/log.src
@@ -0,0 +1,46 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: log.src,v 10.12 2000/02/17 20:24:10 bostic Exp $
+ */
+
+PREFIX log
+
+INCLUDE #include "db_config.h"
+INCLUDE
+INCLUDE #ifndef NO_SYSTEM_INCLUDES
+INCLUDE #include <sys/types.h>
+INCLUDE
+INCLUDE #include <ctype.h>
+INCLUDE #include <errno.h>
+INCLUDE #include <string.h>
+INCLUDE #endif
+INCLUDE
+INCLUDE #include "db_int.h"
+INCLUDE #include "db_page.h"
+INCLUDE #include "db_dispatch.h"
+INCLUDE #include "db_am.h"
+INCLUDE #include "log.h"
+INCLUDE #include "txn.h"
+INCLUDE
+
+/* Used for registering name/id translations at open or close. */
+DEPRECATED register1 1
+ARG opcode u_int32_t lu
+DBT name DBT s
+DBT uid DBT s
+ARG fileid int32_t ld
+ARG ftype DBTYPE lx
+END
+
+BEGIN register 2
+ARG opcode u_int32_t lu
+DBT name DBT s
+DBT uid DBT s
+ARG fileid int32_t ld
+ARG ftype DBTYPE lx
+ARG meta_pgno db_pgno_t lu
+END
diff --git a/bdb/log/log_archive.c b/bdb/log/log_archive.c
new file mode 100644
index 00000000000..83728c79e55
--- /dev/null
+++ b/bdb/log/log_archive.c
@@ -0,0 +1,447 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: log_archive.c,v 11.13 2000/11/30 00:58:40 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#ifdef HAVE_RPC
+#include "db_server.h"
+#endif
+
+#include "db_int.h"
+#include "db_dispatch.h"
+#include "log.h"
+#include "clib_ext.h" /* XXX: needed for getcwd. */
+
+#ifdef HAVE_RPC
+#include "gen_client_ext.h"
+#include "rpc_client_ext.h"
+#endif
+
+static int __absname __P((DB_ENV *, char *, char *, char **));
+static int __build_data __P((DB_ENV *, char *, char ***, void *(*)(size_t)));
+static int __cmpfunc __P((const void *, const void *));
+static int __usermem __P((DB_ENV *, char ***, void *(*)(size_t)));
+
+/*
+ * log_archive --
+ * Supporting function for db_archive(1).
+ */
+int
+log_archive(dbenv, listp, flags, db_malloc)
+ DB_ENV *dbenv;
+ char ***listp;
+ u_int32_t flags;
+ void *(*db_malloc) __P((size_t));
+{
+ DBT rec;
+ DB_LOG *dblp;
+ DB_LSN stable_lsn;
+ u_int32_t fnum;
+ int array_size, n, ret;
+ char **array, **arrayp, *name, *p, *pref, buf[MAXPATHLEN];
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
+ return (__dbcl_log_archive(dbenv, listp, flags, db_malloc));
+#endif
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->lg_handle, DB_INIT_LOG);
+
+ name = NULL;
+ dblp = dbenv->lg_handle;
+ COMPQUIET(fnum, 0);
+
+#define OKFLAGS (DB_ARCH_ABS | DB_ARCH_DATA | DB_ARCH_LOG)
+ if (flags != 0) {
+ if ((ret =
+ __db_fchk(dbenv, "log_archive", flags, OKFLAGS)) != 0)
+ return (ret);
+ if ((ret =
+ __db_fcchk(dbenv,
+ "log_archive", flags, DB_ARCH_DATA, DB_ARCH_LOG)) != 0)
+ return (ret);
+ }
+
+ /*
+ * Get the absolute pathname of the current directory. It would
+ * be nice to get the shortest pathname of the database directory,
+ * but that's just not possible.
+ *
+ * XXX
+ * Can't trust getcwd(3) to set a valid errno. If it doesn't, just
+ * guess that we ran out of memory.
+ */
+ if (LF_ISSET(DB_ARCH_ABS)) {
+ __os_set_errno(0);
+ if ((pref = getcwd(buf, sizeof(buf))) == NULL) {
+ if (__os_get_errno() == 0)
+ __os_set_errno(ENOMEM);
+ return (__os_get_errno());
+ }
+ } else
+ pref = NULL;
+
+ switch (LF_ISSET(~DB_ARCH_ABS)) {
+ case DB_ARCH_DATA:
+ return (__build_data(dbenv, pref, listp, db_malloc));
+ case DB_ARCH_LOG:
+ memset(&rec, 0, sizeof(rec));
+ if (F_ISSET(dbenv, DB_ENV_THREAD))
+ F_SET(&rec, DB_DBT_MALLOC);
+ if ((ret = log_get(dbenv, &stable_lsn, &rec, DB_LAST)) != 0)
+ return (ret);
+ if (F_ISSET(dbenv, DB_ENV_THREAD))
+ __os_free(rec.data, rec.size);
+ fnum = stable_lsn.file;
+ break;
+ case 0:
+ if ((ret = __log_findckp(dbenv, &stable_lsn)) != 0) {
+ /*
+ * A return of DB_NOTFOUND means that we didn't find
+ * any records in the log (so we are not going to be
+ * deleting any log files).
+ */
+ if (ret != DB_NOTFOUND)
+ return (ret);
+ *listp = NULL;
+ return (0);
+ }
+ /* Remove any log files before the last stable LSN. */
+ fnum = stable_lsn.file - 1;
+ break;
+ }
+
+#define LIST_INCREMENT 64
+ /* Get some initial space. */
+ array_size = 10;
+ if ((ret = __os_malloc(dbenv,
+ sizeof(char *) * array_size, NULL, &array)) != 0)
+ return (ret);
+ array[0] = NULL;
+
+ /* Build an array of the file names. */
+ for (n = 0; fnum > 0; --fnum) {
+ if ((ret = __log_name(dblp, fnum, &name, NULL, 0)) != 0)
+ goto err;
+ if (__os_exists(name, NULL) != 0) {
+ if (LF_ISSET(DB_ARCH_LOG) && fnum == stable_lsn.file)
+ continue;
+ __os_freestr(name);
+ name = NULL;
+ break;
+ }
+
+ if (n >= array_size - 1) {
+ array_size += LIST_INCREMENT;
+ if ((ret = __os_realloc(dbenv,
+ sizeof(char *) * array_size, NULL, &array)) != 0)
+ goto err;
+ }
+
+ if (LF_ISSET(DB_ARCH_ABS)) {
+ if ((ret = __absname(dbenv,
+ pref, name, &array[n])) != 0)
+ goto err;
+ __os_freestr(name);
+ } else if ((p = __db_rpath(name)) != NULL) {
+ if ((ret = __os_strdup(dbenv, p + 1, &array[n])) != 0)
+ goto err;
+ __os_freestr(name);
+ } else
+ array[n] = name;
+
+ name = NULL;
+ array[++n] = NULL;
+ }
+
+ /* If there's nothing to return, we're done. */
+ if (n == 0) {
+ *listp = NULL;
+ ret = 0;
+ goto err;
+ }
+
+ /* Sort the list. */
+ qsort(array, (size_t)n, sizeof(char *), __cmpfunc);
+
+ /* Rework the memory. */
+ if ((ret = __usermem(dbenv, &array, db_malloc)) != 0)
+ goto err;
+
+ *listp = array;
+ return (0);
+
+err: if (array != NULL) {
+ for (arrayp = array; *arrayp != NULL; ++arrayp)
+ __os_freestr(*arrayp);
+ __os_free(array, sizeof(char *) * array_size);
+ }
+ if (name != NULL)
+ __os_freestr(name);
+ return (ret);
+}
+
+/*
+ * __build_data --
+ * Build a list of datafiles for return.
+ */
+static int
+__build_data(dbenv, pref, listp, db_malloc)
+ DB_ENV *dbenv;
+ char *pref, ***listp;
+ void *(*db_malloc) __P((size_t));
+{
+ DBT rec;
+ DB_LSN lsn;
+ __log_register_args *argp;
+ u_int32_t rectype;
+ int array_size, last, n, nxt, ret;
+ char **array, **arrayp, *p, *real_name;
+
+ /* Get some initial space. */
+ array_size = 10;
+ if ((ret = __os_malloc(dbenv,
+ sizeof(char *) * array_size, NULL, &array)) != 0)
+ return (ret);
+ array[0] = NULL;
+
+ memset(&rec, 0, sizeof(rec));
+ if (F_ISSET(dbenv, DB_ENV_THREAD))
+ F_SET(&rec, DB_DBT_MALLOC);
+ for (n = 0, ret = log_get(dbenv, &lsn, &rec, DB_FIRST);
+ ret == 0; ret = log_get(dbenv, &lsn, &rec, DB_NEXT)) {
+ if (rec.size < sizeof(rectype)) {
+ ret = EINVAL;
+ __db_err(dbenv, "log_archive: bad log record");
+ goto lg_free;
+ }
+
+ memcpy(&rectype, rec.data, sizeof(rectype));
+ if (rectype != DB_log_register) {
+ if (F_ISSET(dbenv, DB_ENV_THREAD)) {
+ __os_free(rec.data, rec.size);
+ rec.data = NULL;
+ }
+ continue;
+ }
+ if ((ret = __log_register_read(dbenv, rec.data, &argp)) != 0) {
+ ret = EINVAL;
+ __db_err(dbenv,
+ "log_archive: unable to read log record");
+ goto lg_free;
+ }
+
+ if (n >= array_size - 1) {
+ array_size += LIST_INCREMENT;
+ if ((ret = __os_realloc(dbenv,
+ sizeof(char *) * array_size, NULL, &array)) != 0)
+ goto lg_free;
+ }
+
+ if ((ret = __os_strdup(dbenv,
+ argp->name.data, &array[n])) != 0) {
+lg_free: if (F_ISSET(&rec, DB_DBT_MALLOC) && rec.data != NULL)
+ __os_free(rec.data, rec.size);
+ goto err1;
+ }
+
+ array[++n] = NULL;
+ __os_free(argp, 0);
+
+ if (F_ISSET(dbenv, DB_ENV_THREAD)) {
+ __os_free(rec.data, rec.size);
+ rec.data = NULL;
+ }
+ }
+
+ /* If there's nothing to return, we're done. */
+ if (n == 0) {
+ ret = 0;
+ *listp = NULL;
+ goto err1;
+ }
+
+ /* Sort the list. */
+ qsort(array, (size_t)n, sizeof(char *), __cmpfunc);
+
+ /*
+ * Build the real pathnames, discarding nonexistent files and
+ * duplicates.
+ */
+ for (last = nxt = 0; nxt < n;) {
+ /*
+ * Discard duplicates. Last is the next slot we're going
+ * to return to the user, nxt is the next slot that we're
+ * going to consider.
+ */
+ if (last != nxt) {
+ array[last] = array[nxt];
+ array[nxt] = NULL;
+ }
+ for (++nxt; nxt < n &&
+ strcmp(array[last], array[nxt]) == 0; ++nxt) {
+ __os_freestr(array[nxt]);
+ array[nxt] = NULL;
+ }
+
+ /* Get the real name. */
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, NULL, array[last], 0, NULL, &real_name)) != 0)
+ goto err2;
+
+ /* If the file doesn't exist, ignore it. */
+ if (__os_exists(real_name, NULL) != 0) {
+ __os_freestr(real_name);
+ __os_freestr(array[last]);
+ array[last] = NULL;
+ continue;
+ }
+
+ /* Rework the name as requested by the user. */
+ __os_freestr(array[last]);
+ array[last] = NULL;
+ if (pref != NULL) {
+ ret = __absname(dbenv, pref, real_name, &array[last]);
+ __os_freestr(real_name);
+ if (ret != 0)
+ goto err2;
+ } else if ((p = __db_rpath(real_name)) != NULL) {
+ ret = __os_strdup(dbenv, p + 1, &array[last]);
+ __os_freestr(real_name);
+ if (ret != 0)
+ goto err2;
+ } else
+ array[last] = real_name;
+ ++last;
+ }
+
+ /* NULL-terminate the list. */
+ array[last] = NULL;
+
+ /* Rework the memory. */
+ if ((ret = __usermem(dbenv, &array, db_malloc)) != 0)
+ goto err1;
+
+ *listp = array;
+ return (0);
+
+err2: /*
+ * XXX
+ * We've possibly inserted NULLs into the array list, so clean up a
+ * bit so that the other error processing works.
+ */
+ if (array != NULL)
+ for (; nxt < n; ++nxt)
+ __os_freestr(array[nxt]);
+ /* FALLTHROUGH */
+
+err1: if (array != NULL) {
+ for (arrayp = array; *arrayp != NULL; ++arrayp)
+ __os_freestr(*arrayp);
+ __os_free(array, array_size * sizeof(char *));
+ }
+ return (ret);
+}
+
+/*
+ * __absname --
+ * Return an absolute path name for the file.
+ */
+static int
+__absname(dbenv, pref, name, newnamep)
+ DB_ENV *dbenv;
+ char *pref, *name, **newnamep;
+{
+ size_t l_pref, l_name;
+ int isabspath, ret;
+ char *newname;
+
+ l_name = strlen(name);
+ isabspath = __os_abspath(name);
+ l_pref = isabspath ? 0 : strlen(pref);
+
+ /* Malloc space for concatenating the two. */
+ if ((ret = __os_malloc(dbenv,
+ l_pref + l_name + 2, NULL, &newname)) != 0)
+ return (ret);
+ *newnamep = newname;
+
+ /* Build the name. If `name' is an absolute path, ignore any prefix. */
+ if (!isabspath) {
+ memcpy(newname, pref, l_pref);
+ if (strchr(PATH_SEPARATOR, newname[l_pref - 1]) == NULL)
+ newname[l_pref++] = PATH_SEPARATOR[0];
+ }
+ memcpy(newname + l_pref, name, l_name + 1);
+
+ return (0);
+}
+
+/*
+ * __usermem --
+ * Create a single chunk of memory that holds the returned information.
+ * If the user has their own malloc routine, use it.
+ */
+static int
+__usermem(dbenv, listp, db_malloc)
+ DB_ENV *dbenv;
+ char ***listp;
+ void *(*db_malloc) __P((size_t));
+{
+ size_t len;
+ int ret;
+ char **array, **arrayp, **orig, *strp;
+
+ /* Find out how much space we need. */
+ for (len = 0, orig = *listp; *orig != NULL; ++orig)
+ len += sizeof(char *) + strlen(*orig) + 1;
+ len += sizeof(char *);
+
+ /* Allocate it and set up the pointers. */
+ if ((ret = __os_malloc(dbenv, len, db_malloc, &array)) != 0)
+ return (ret);
+
+ strp = (char *)(array + (orig - *listp) + 1);
+
+ /* Copy the original information into the new memory. */
+ for (orig = *listp, arrayp = array; *orig != NULL; ++orig, ++arrayp) {
+ len = strlen(*orig);
+ memcpy(strp, *orig, len + 1);
+ *arrayp = strp;
+ strp += len + 1;
+
+ __os_freestr(*orig);
+ }
+
+ /* NULL-terminate the list. */
+ *arrayp = NULL;
+
+ __os_free(*listp, 0);
+ *listp = array;
+
+ return (0);
+}
+
+static int
+__cmpfunc(p1, p2)
+ const void *p1, *p2;
+{
+ return (strcmp(*((char * const *)p1), *((char * const *)p2)));
+}
diff --git a/bdb/log/log_auto.c b/bdb/log/log_auto.c
new file mode 100644
index 00000000000..281296cc238
--- /dev/null
+++ b/bdb/log/log_auto.c
@@ -0,0 +1,326 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <errno.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_dispatch.h"
+#include "db_am.h"
+#include "log.h"
+#include "txn.h"
+
+int
+__log_register1_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __log_register1_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __log_register1_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]log_register1: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\topcode: %lu\n", (u_long)argp->opcode);
+ printf("\tname: ");
+ for (i = 0; i < argp->name.size; i++) {
+ ch = ((u_int8_t *)argp->name.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\tuid: ");
+ for (i = 0; i < argp->uid.size; i++) {
+ ch = ((u_int8_t *)argp->uid.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tftype: 0x%lx\n", (u_long)argp->ftype);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__log_register1_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __log_register1_args **argpp;
+{
+ __log_register1_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__log_register1_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->opcode, bp, sizeof(argp->opcode));
+ bp += sizeof(argp->opcode);
+ memset(&argp->name, 0, sizeof(argp->name));
+ memcpy(&argp->name.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->name.data = bp;
+ bp += argp->name.size;
+ memset(&argp->uid, 0, sizeof(argp->uid));
+ memcpy(&argp->uid.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->uid.data = bp;
+ bp += argp->uid.size;
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->ftype, bp, sizeof(argp->ftype));
+ bp += sizeof(argp->ftype);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__log_register_log(dbenv, txnid, ret_lsnp, flags,
+ opcode, name, uid, fileid, ftype, meta_pgno)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ u_int32_t opcode;
+ const DBT *name;
+ const DBT *uid;
+ int32_t fileid;
+ DBTYPE ftype;
+ db_pgno_t meta_pgno;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_log_register;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(opcode)
+ + sizeof(u_int32_t) + (name == NULL ? 0 : name->size)
+ + sizeof(u_int32_t) + (uid == NULL ? 0 : uid->size)
+ + sizeof(fileid)
+ + sizeof(ftype)
+ + sizeof(meta_pgno);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(bp, &opcode, sizeof(opcode));
+ bp += sizeof(opcode);
+ if (name == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &name->size, sizeof(name->size));
+ bp += sizeof(name->size);
+ memcpy(bp, name->data, name->size);
+ bp += name->size;
+ }
+ if (uid == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &uid->size, sizeof(uid->size));
+ bp += sizeof(uid->size);
+ memcpy(bp, uid->data, uid->size);
+ bp += uid->size;
+ }
+ memcpy(bp, &fileid, sizeof(fileid));
+ bp += sizeof(fileid);
+ memcpy(bp, &ftype, sizeof(ftype));
+ bp += sizeof(ftype);
+ memcpy(bp, &meta_pgno, sizeof(meta_pgno));
+ bp += sizeof(meta_pgno);
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = __log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__log_register_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __log_register_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __log_register_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]log_register: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\topcode: %lu\n", (u_long)argp->opcode);
+ printf("\tname: ");
+ for (i = 0; i < argp->name.size; i++) {
+ ch = ((u_int8_t *)argp->name.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\tuid: ");
+ for (i = 0; i < argp->uid.size; i++) {
+ ch = ((u_int8_t *)argp->uid.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tftype: 0x%lx\n", (u_long)argp->ftype);
+ printf("\tmeta_pgno: %lu\n", (u_long)argp->meta_pgno);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__log_register_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __log_register_args **argpp;
+{
+ __log_register_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__log_register_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->opcode, bp, sizeof(argp->opcode));
+ bp += sizeof(argp->opcode);
+ memset(&argp->name, 0, sizeof(argp->name));
+ memcpy(&argp->name.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->name.data = bp;
+ bp += argp->name.size;
+ memset(&argp->uid, 0, sizeof(argp->uid));
+ memcpy(&argp->uid.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->uid.data = bp;
+ bp += argp->uid.size;
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->ftype, bp, sizeof(argp->ftype));
+ bp += sizeof(argp->ftype);
+ memcpy(&argp->meta_pgno, bp, sizeof(argp->meta_pgno));
+ bp += sizeof(argp->meta_pgno);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__log_init_print(dbenv)
+ DB_ENV *dbenv;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv,
+ __log_register1_print, DB_log_register1)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __log_register_print, DB_log_register)) != 0)
+ return (ret);
+ return (0);
+}
+
+int
+__log_init_recover(dbenv)
+ DB_ENV *dbenv;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv,
+ __deprecated_recover, DB_log_register1)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __log_register_recover, DB_log_register)) != 0)
+ return (ret);
+ return (0);
+}
+
diff --git a/bdb/log/log_compare.c b/bdb/log/log_compare.c
new file mode 100644
index 00000000000..9bc3c028a5f
--- /dev/null
+++ b/bdb/log/log_compare.c
@@ -0,0 +1,34 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: log_compare.c,v 11.3 2000/02/14 02:59:59 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * log_compare --
+ * Compare two LSN's; return 1, 0, -1 if first is >, == or < second.
+ */
+int
+log_compare(lsn0, lsn1)
+ const DB_LSN *lsn0, *lsn1;
+{
+ if (lsn0->file != lsn1->file)
+ return (lsn0->file < lsn1->file ? -1 : 1);
+
+ if (lsn0->offset != lsn1->offset)
+ return (lsn0->offset < lsn1->offset ? -1 : 1);
+
+ return (0);
+}
diff --git a/bdb/log/log_findckp.c b/bdb/log/log_findckp.c
new file mode 100644
index 00000000000..b1e8fddbdb7
--- /dev/null
+++ b/bdb/log/log_findckp.c
@@ -0,0 +1,135 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: log_findckp.c,v 11.5 2000/11/30 00:58:40 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "log.h"
+#include "txn.h"
+
+/*
+ * __log_findckp --
+ *
+ * Looks for the most recent checkpoint that occurs before the most recent
+ * checkpoint LSN, subject to the constraint that there must be at least two
+ * checkpoints. The reason you need two checkpoints is that you might have
+ * crashed during the most recent one and may not have a copy of all the
+ * open files. This is the point from which recovery can start and the
+ * point up to which archival/truncation can take place. Checkpoints in
+ * the log look like:
+ *
+ * -------------------------------------------------------------------
+ * | ckp A, ckplsn 100 | .... record .... | ckp B, ckplsn 600 | ...
+ * -------------------------------------------------------------------
+ * LSN 500 LSN 1000
+ *
+ * If we read what log returns from using the DB_CKP parameter to logput,
+ * we'll get the record at LSN 1000. The checkpoint LSN there is 600.
+ * Now we have to scan backwards looking for a checkpoint before LSN 600.
+ * We find one at 500. This means that we can truncate the log before
+ * 500 or run recovery beginning at 500.
+ *
+ * Returns 0 if we find a suitable checkpoint or we retrieved the first
+ * record in the log from which to start. Returns DB_NOTFOUND if there
+ * are no log records, errno on error.
+ *
+ * PUBLIC: int __log_findckp __P((DB_ENV *, DB_LSN *));
+ */
+int
+__log_findckp(dbenv, lsnp)
+ DB_ENV *dbenv;
+ DB_LSN *lsnp;
+{
+ DBT data;
+ DB_LSN ckp_lsn, final_ckp, last_ckp, next_lsn;
+ __txn_ckp_args *ckp_args;
+ int ret;
+
+ /*
+ * Need to find the appropriate point from which to begin
+ * recovery.
+ */
+ memset(&data, 0, sizeof(data));
+ if (F_ISSET(dbenv, DB_ENV_THREAD))
+ F_SET(&data, DB_DBT_MALLOC);
+ ZERO_LSN(ckp_lsn);
+ if ((ret = log_get(dbenv, &last_ckp, &data, DB_CHECKPOINT)) != 0) {
+ if (ret == ENOENT)
+ goto get_first;
+ else
+ return (ret);
+ }
+ final_ckp = last_ckp;
+
+ next_lsn = last_ckp;
+ do {
+ if (F_ISSET(dbenv, DB_ENV_THREAD))
+ __os_free(data.data, data.size);
+
+ if ((ret = log_get(dbenv, &next_lsn, &data, DB_SET)) != 0)
+ return (ret);
+ if ((ret = __txn_ckp_read(dbenv, data.data, &ckp_args)) != 0) {
+ if (F_ISSET(dbenv, DB_ENV_THREAD))
+ __os_free(data.data, data.size);
+ return (ret);
+ }
+ if (IS_ZERO_LSN(ckp_lsn))
+ ckp_lsn = ckp_args->ckp_lsn;
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_CHKPOINT)) {
+ __db_err(dbenv, "Checkpoint at: [%lu][%lu]",
+ (u_long)last_ckp.file, (u_long)last_ckp.offset);
+ __db_err(dbenv, "Checkpoint LSN: [%lu][%lu]",
+ (u_long)ckp_args->ckp_lsn.file,
+ (u_long)ckp_args->ckp_lsn.offset);
+ __db_err(dbenv, "Previous checkpoint: [%lu][%lu]",
+ (u_long)ckp_args->last_ckp.file,
+ (u_long)ckp_args->last_ckp.offset);
+ }
+ last_ckp = next_lsn;
+ next_lsn = ckp_args->last_ckp;
+ __os_free(ckp_args, sizeof(*ckp_args));
+
+ /*
+ * Keep looping until either you 1) run out of checkpoints,
+ * 2) you've found a checkpoint before the most recent
+ * checkpoint's LSN and you have at least 2 checkpoints.
+ */
+ } while (!IS_ZERO_LSN(next_lsn) &&
+ (log_compare(&last_ckp, &ckp_lsn) > 0 ||
+ log_compare(&final_ckp, &last_ckp) == 0));
+
+ if (F_ISSET(dbenv, DB_ENV_THREAD))
+ __os_free(data.data, data.size);
+
+ /*
+ * At this point, either, next_lsn is ZERO or ckp_lsn is the
+ * checkpoint lsn and last_ckp is the LSN of the last checkpoint
+ * before ckp_lsn. If the compare in the loop is still true, then
+ * next_lsn must be 0 and we need to roll forward from the
+ * beginning of the log.
+ */
+ if (log_compare(&last_ckp, &ckp_lsn) >= 0 ||
+ log_compare(&final_ckp, &last_ckp) == 0) {
+get_first: if ((ret = log_get(dbenv, &last_ckp, &data, DB_FIRST)) != 0)
+ return (ret);
+ if (F_ISSET(dbenv, DB_ENV_THREAD))
+ __os_free(data.data, data.size);
+ }
+ *lsnp = last_ckp;
+
+ return (IS_ZERO_LSN(last_ckp) ? DB_NOTFOUND : 0);
+}
diff --git a/bdb/log/log_get.c b/bdb/log/log_get.c
new file mode 100644
index 00000000000..b75d50a62fd
--- /dev/null
+++ b/bdb/log/log_get.c
@@ -0,0 +1,465 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: log_get.c,v 11.32 2001/01/11 18:19:53 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#ifdef HAVE_RPC
+#include "db_server.h"
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "log.h"
+#include "hash.h"
+
+#ifdef HAVE_RPC
+#include "gen_client_ext.h"
+#include "rpc_client_ext.h"
+#endif
+
+/*
+ * log_get --
+ * Get a log record.
+ */
+int
+log_get(dbenv, alsn, dbt, flags)
+ DB_ENV *dbenv;
+ DB_LSN *alsn;
+ DBT *dbt;
+ u_int32_t flags;
+{
+ DB_LOG *dblp;
+ DB_LSN saved_lsn;
+ int ret;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
+ return (__dbcl_log_get(dbenv, alsn, dbt, flags));
+#endif
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->lg_handle, DB_INIT_LOG);
+
+ /* Validate arguments. */
+ if (flags != DB_CHECKPOINT && flags != DB_CURRENT &&
+ flags != DB_FIRST && flags != DB_LAST &&
+ flags != DB_NEXT && flags != DB_PREV && flags != DB_SET)
+ return (__db_ferr(dbenv, "log_get", 1));
+
+ if (F_ISSET(dbenv, DB_ENV_THREAD)) {
+ if (flags == DB_NEXT || flags == DB_PREV || flags == DB_CURRENT)
+ return (__db_ferr(dbenv, "log_get", 1));
+ if (!F_ISSET(dbt,
+ DB_DBT_MALLOC | DB_DBT_REALLOC | DB_DBT_USERMEM))
+ return (__db_ferr(dbenv, "threaded data", 1));
+ }
+
+ dblp = dbenv->lg_handle;
+ R_LOCK(dbenv, &dblp->reginfo);
+
+ /*
+ * The alsn field is only initialized if DB_SET is the flag, so this
+ * assignment causes uninitialized memory complaints for other flag
+ * values.
+ */
+#ifdef UMRW
+ if (flags == DB_SET)
+ saved_lsn = *alsn;
+ else
+ ZERO_LSN(saved_lsn);
+#else
+ saved_lsn = *alsn;
+#endif
+
+ /*
+ * If we get one of the log's header records, repeat the operation.
+ * This assumes that applications don't ever request the log header
+ * records by LSN, but that seems reasonable to me.
+ */
+ if ((ret = __log_get(dblp,
+ alsn, dbt, flags, 0)) == 0 && alsn->offset == 0) {
+ switch (flags) {
+ case DB_FIRST:
+ flags = DB_NEXT;
+ break;
+ case DB_LAST:
+ flags = DB_PREV;
+ break;
+ }
+ if (F_ISSET(dbt, DB_DBT_MALLOC)) {
+ __os_free(dbt->data, dbt->size);
+ dbt->data = NULL;
+ }
+ ret = __log_get(dblp, alsn, dbt, flags, 0);
+ }
+ if (ret != 0)
+ *alsn = saved_lsn;
+
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ return (ret);
+}
+
+/*
+ * __log_get --
+ * Get a log record; internal version.
+ *
+ * PUBLIC: int __log_get __P((DB_LOG *, DB_LSN *, DBT *, u_int32_t, int));
+ */
+int
+__log_get(dblp, alsn, dbt, flags, silent)
+ DB_LOG *dblp;
+ DB_LSN *alsn;
+ DBT *dbt;
+ u_int32_t flags;
+ int silent;
+{
+ DB_ENV *dbenv;
+ DB_LSN nlsn;
+ HDR hdr;
+ LOG *lp;
+ const char *fail;
+ char *np, *tbuf;
+ int cnt, ret;
+ logfile_validity status;
+ size_t len, nr;
+ u_int32_t offset;
+ u_int8_t *p;
+ void *shortp, *readp;
+
+ lp = dblp->reginfo.primary;
+ fail = np = tbuf = NULL;
+ dbenv = dblp->dbenv;
+
+ nlsn = dblp->c_lsn;
+ switch (flags) {
+ case DB_CHECKPOINT:
+ nlsn = lp->chkpt_lsn;
+ if (IS_ZERO_LSN(nlsn)) {
+ /* No db_err. The caller may expect this. */
+ ret = ENOENT;
+ goto err2;
+ }
+ break;
+ case DB_NEXT: /* Next log record. */
+ if (!IS_ZERO_LSN(nlsn)) {
+ /* Increment the cursor by the cursor record size. */
+ nlsn.offset += dblp->c_len;
+ break;
+ }
+ /* FALLTHROUGH */
+ case DB_FIRST: /* Find the first log record. */
+ /* Find the first log file. */
+ if ((ret = __log_find(dblp, 1, &cnt, &status)) != 0)
+ goto err2;
+
+ /*
+ * We want any readable version, so either DB_LV_NORMAL
+ * or DB_LV_OLD_READABLE is acceptable here. If it's
+ * not one of those two, there is no first log record that
+ * we can read.
+ */
+ if (status != DB_LV_NORMAL && status != DB_LV_OLD_READABLE) {
+ ret = DB_NOTFOUND;
+ goto err2;
+ }
+
+ /*
+ * We may have only entered records in the buffer, and not
+ * yet written a log file. If no log files were found and
+ * there's anything in the buffer, it belongs to file 1.
+ */
+ if (cnt == 0)
+ cnt = 1;
+
+ nlsn.file = cnt;
+ nlsn.offset = 0;
+ break;
+ case DB_CURRENT: /* Current log record. */
+ break;
+ case DB_PREV: /* Previous log record. */
+ if (!IS_ZERO_LSN(nlsn)) {
+ /* If at start-of-file, move to the previous file. */
+ if (nlsn.offset == 0) {
+ if (nlsn.file == 1 ||
+ __log_valid(dblp,
+ nlsn.file - 1, 0, &status) != 0)
+ return (DB_NOTFOUND);
+
+ if (status != DB_LV_NORMAL &&
+ status != DB_LV_OLD_READABLE)
+ return (DB_NOTFOUND);
+
+ --nlsn.file;
+ nlsn.offset = dblp->c_off;
+ } else
+ nlsn.offset = dblp->c_off;
+ break;
+ }
+ /* FALLTHROUGH */
+ case DB_LAST: /* Last log record. */
+ nlsn.file = lp->lsn.file;
+ nlsn.offset = lp->lsn.offset - lp->len;
+ break;
+ case DB_SET: /* Set log record. */
+ nlsn = *alsn;
+ break;
+ }
+
+ if (0) { /* Move to the next file. */
+next_file: ++nlsn.file;
+ nlsn.offset = 0;
+ }
+
+ /* Return 1 if the request is past the end of the log. */
+ if (nlsn.file > lp->lsn.file ||
+ (nlsn.file == lp->lsn.file && nlsn.offset >= lp->lsn.offset))
+ return (DB_NOTFOUND);
+
+ /* If we've switched files, discard the current file handle. */
+ if (dblp->c_lsn.file != nlsn.file &&
+ F_ISSET(&dblp->c_fh, DB_FH_VALID)) {
+ (void)__os_closehandle(&dblp->c_fh);
+ }
+
+ /* If the entire record is in the in-memory buffer, copy it out. */
+ if (nlsn.file == lp->lsn.file && nlsn.offset >= lp->w_off) {
+ /* Copy the header. */
+ p = dblp->bufp + (nlsn.offset - lp->w_off);
+ memcpy(&hdr, p, sizeof(HDR));
+
+ /* Copy the record. */
+ len = hdr.len - sizeof(HDR);
+ if ((ret = __db_retcopy(NULL, dbt, p + sizeof(HDR),
+ len, &dblp->c_dbt.data, &dblp->c_dbt.ulen)) != 0)
+ goto err2;
+ goto cksum;
+ }
+
+ shortp = NULL;
+
+ /* Acquire a file descriptor. */
+ if (!F_ISSET(&dblp->c_fh, DB_FH_VALID)) {
+ if ((ret = __log_name(dblp, nlsn.file,
+ &np, &dblp->c_fh, DB_OSO_RDONLY | DB_OSO_SEQ)) != 0) {
+ fail = np;
+ goto err1;
+ }
+ __os_freestr(np);
+ np = NULL;
+ }
+
+ /* See if we've already read this */
+ if (nlsn.file == dblp->r_file && nlsn.offset > dblp->r_off
+ && nlsn.offset + sizeof(HDR) < dblp->r_off + dblp->r_size)
+ goto got_header;
+
+ /*
+ * Seek to the header offset and read the header. Because the file
+ * may be pre-allocated, we have to make sure that we're not reading
+ * past the information in the start of the in-memory buffer.
+ */
+
+ readp = &hdr;
+ offset = nlsn.offset;
+ if (nlsn.file == lp->lsn.file && offset + sizeof(HDR) > lp->w_off)
+ nr = lp->w_off - offset;
+ else if (dblp->readbufp == NULL)
+ nr = sizeof(HDR);
+ else {
+ nr = lp->buffer_size;
+ readp = dblp->readbufp;
+ dblp->r_file = nlsn.file;
+ /* Going backwards. Put the current in the middle. */
+ if (flags == DB_PREV || flags == DB_LAST) {
+ if (offset <= lp->buffer_size/2)
+ offset = 0;
+ else
+ offset = offset - lp->buffer_size/2;
+ }
+ if (nlsn.file == lp->lsn.file && offset + nr > lp->lsn.offset)
+ nr = lp->lsn.offset - offset;
+ dblp->r_off = offset;
+ }
+
+ if ((ret = __os_seek(dblp->dbenv,
+ &dblp->c_fh, 0, 0, offset, 0, DB_OS_SEEK_SET)) != 0) {
+ fail = "seek";
+ goto err1;
+ }
+ if ((ret = __os_read(dblp->dbenv, &dblp->c_fh, readp, nr, &nr)) != 0) {
+ fail = "read";
+ goto err1;
+ }
+ if (nr < sizeof(HDR)) {
+ /* If read returns EOF, try the next file. */
+ if (nr == 0) {
+ if (flags != DB_NEXT || nlsn.file == lp->lsn.file)
+ goto corrupt;
+ goto next_file;
+ }
+
+ if (dblp->readbufp != NULL)
+ memcpy((u_int8_t *) &hdr, readp, nr);
+
+ /*
+ * If read returns a short count the rest of the record has
+ * to be in the in-memory buffer.
+ */
+ if (lp->b_off < sizeof(HDR) - nr)
+ goto corrupt;
+
+ /* Get the rest of the header from the in-memory buffer. */
+ memcpy((u_int8_t *)&hdr + nr, dblp->bufp, sizeof(HDR) - nr);
+
+ if (hdr.len == 0)
+ goto next_file;
+
+ shortp = dblp->bufp + (sizeof(HDR) - nr);
+ }
+
+ else if (dblp->readbufp != NULL) {
+ dblp->r_size = nr;
+got_header: memcpy((u_int8_t *)&hdr,
+ dblp->readbufp + (nlsn.offset - dblp->r_off), sizeof(HDR));
+ }
+
+ /*
+ * Check for buffers of 0's, that's what we usually see during recovery,
+ * although it's certainly not something on which we can depend. Check
+ * for impossibly large records. The malloc should fail later, but we
+ * have customers that run mallocs that handle allocation failure as a
+ * fatal error.
+ */
+ if (hdr.len == 0)
+ goto next_file;
+ if (hdr.len <= sizeof(HDR) || hdr.len > lp->persist.lg_max)
+ goto corrupt;
+ len = hdr.len - sizeof(HDR);
+
+ /* If we've already moved to the in-memory buffer, fill from there. */
+ if (shortp != NULL) {
+ if (lp->b_off < ((u_int8_t *)shortp - dblp->bufp) + len)
+ goto corrupt;
+ if ((ret = __db_retcopy(NULL, dbt, shortp, len,
+ &dblp->c_dbt.data, &dblp->c_dbt.ulen)) != 0)
+ goto err2;
+ goto cksum;
+ }
+
+ if (dblp->readbufp != NULL) {
+ if (nlsn.offset + hdr.len < dblp->r_off + dblp->r_size) {
+ if ((ret = __db_retcopy(NULL, dbt, dblp->readbufp +
+ (nlsn.offset - dblp->r_off) + sizeof(HDR),
+ len, &dblp->c_dbt.data, &dblp->c_dbt.ulen)) != 0)
+ goto err2;
+ goto cksum;
+ } else if ((ret = __os_seek(dblp->dbenv, &dblp->c_fh, 0,
+ 0, nlsn.offset + sizeof(HDR), 0, DB_OS_SEEK_SET)) != 0) {
+ fail = "seek";
+ goto err1;
+ }
+ }
+
+ /*
+ * Allocate temporary memory to hold the record.
+ *
+ * XXX
+ * We're calling malloc(3) with a region locked. This isn't
+ * a good idea.
+ */
+ if ((ret = __os_malloc(dbenv, len, NULL, &tbuf)) != 0)
+ goto err1;
+
+ /*
+ * Read the record into the buffer. If read returns a short count,
+ * there was an error or the rest of the record is in the in-memory
+ * buffer. Note, the information may be garbage if we're in recovery,
+ * so don't read past the end of the buffer's memory.
+ *
+ * Because the file may be pre-allocated, we have to make sure that
+ * we're not reading past the information in the start of the in-memory
+ * buffer.
+ */
+ if (nlsn.file == lp->lsn.file &&
+ nlsn.offset + sizeof(HDR) + len > lp->w_off)
+ nr = lp->w_off - (nlsn.offset + sizeof(HDR));
+ else
+ nr = len;
+ if ((ret = __os_read(dblp->dbenv, &dblp->c_fh, tbuf, nr, &nr)) != 0) {
+ fail = "read";
+ goto err1;
+ }
+ if (len - nr > lp->buffer_size)
+ goto corrupt;
+ if (nr != len) {
+ if (lp->b_off < len - nr)
+ goto corrupt;
+
+ /* Get the rest of the record from the in-memory buffer. */
+ memcpy((u_int8_t *)tbuf + nr, dblp->bufp, len - nr);
+ }
+
+ /* Copy the record into the user's DBT. */
+ if ((ret = __db_retcopy(NULL, dbt, tbuf, len,
+ &dblp->c_dbt.data, &dblp->c_dbt.ulen)) != 0)
+ goto err2;
+ __os_free(tbuf, 0);
+ tbuf = NULL;
+
+cksum: /*
+ * If the user specified a partial record read, the checksum can't
+ * match. It's not an obvious thing to do, but a user testing for
+ * the length of a record might do it.
+ */
+ if (!F_ISSET(dbt, DB_DBT_PARTIAL) &&
+ hdr.cksum != __ham_func4(NULL, dbt->data, dbt->size)) {
+ if (!silent)
+ __db_err(dbenv, "log_get: checksum mismatch");
+ goto corrupt;
+ }
+
+ /* Update the cursor and the return lsn. */
+ dblp->c_off = hdr.prev;
+ dblp->c_len = hdr.len;
+ dblp->c_lsn = nlsn;
+ *alsn = nlsn;
+
+ return (0);
+
+corrupt:/*
+ * This is the catchall -- for some reason we didn't find enough
+ * information or it wasn't reasonable information, and it wasn't
+ * because a system call failed.
+ */
+ ret = EIO;
+ fail = "read";
+
+err1: if (!silent) {
+ if (fail == NULL)
+ __db_err(dbenv, "log_get: %s", db_strerror(ret));
+ else
+ __db_err(dbenv,
+ "log_get: %s: %s", fail, db_strerror(ret));
+ }
+
+err2: if (np != NULL)
+ __os_freestr(np);
+ if (tbuf != NULL)
+ __os_free(tbuf, 0);
+ return (ret);
+}
diff --git a/bdb/log/log_method.c b/bdb/log/log_method.c
new file mode 100644
index 00000000000..883f485d891
--- /dev/null
+++ b/bdb/log/log_method.c
@@ -0,0 +1,121 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: log_method.c,v 11.14 2000/11/30 00:58:40 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#ifdef HAVE_RPC
+#include "db_server.h"
+#endif
+
+#include "db_int.h"
+#include "log.h"
+
+#ifdef HAVE_RPC
+#include "gen_client_ext.h"
+#include "rpc_client_ext.h"
+#endif
+
+static int __log_set_lg_max __P((DB_ENV *, u_int32_t));
+static int __log_set_lg_bsize __P((DB_ENV *, u_int32_t));
+static int __log_set_lg_dir __P((DB_ENV *, const char *));
+
+/*
+ * __log_dbenv_create --
+ * Log specific initialization of the DB_ENV structure.
+ *
+ * PUBLIC: void __log_dbenv_create __P((DB_ENV *));
+ */
+void
+__log_dbenv_create(dbenv)
+ DB_ENV *dbenv;
+{
+ dbenv->lg_bsize = LG_BSIZE_DEFAULT;
+ dbenv->set_lg_bsize = __log_set_lg_bsize;
+
+ dbenv->lg_max = LG_MAX_DEFAULT;
+ dbenv->set_lg_max = __log_set_lg_max;
+
+ dbenv->set_lg_dir = __log_set_lg_dir;
+#ifdef HAVE_RPC
+ /*
+ * If we have a client, overwrite what we just setup to
+ * point to client functions.
+ */
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) {
+ dbenv->set_lg_bsize = __dbcl_set_lg_bsize;
+ dbenv->set_lg_max = __dbcl_set_lg_max;
+ dbenv->set_lg_dir = __dbcl_set_lg_dir;
+ }
+#endif
+}
+
+/*
+ * __log_set_lg_bsize --
+ * Set the log buffer size.
+ */
+static int
+__log_set_lg_bsize(dbenv, lg_bsize)
+ DB_ENV *dbenv;
+ u_int32_t lg_bsize;
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_lg_bsize");
+
+ /* Let's not be silly. */
+ if (lg_bsize > dbenv->lg_max / 4) {
+ __db_err(dbenv, "log buffer size must be <= log file size / 4");
+ return (EINVAL);
+ }
+
+ dbenv->lg_bsize = lg_bsize;
+ return (0);
+}
+
+/*
+ * __log_set_lg_max --
+ * Set the maximum log file size.
+ */
+static int
+__log_set_lg_max(dbenv, lg_max)
+ DB_ENV *dbenv;
+ u_int32_t lg_max;
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_lg_max");
+
+ /* Let's not be silly. */
+ if (lg_max < dbenv->lg_bsize * 4) {
+ __db_err(dbenv, "log file size must be >= log buffer size * 4");
+ return (EINVAL);
+ }
+
+ dbenv->lg_max = lg_max;
+ return (0);
+}
+
+/*
+ * __log_set_lg_dir --
+ * Set the log file directory.
+ */
+static int
+__log_set_lg_dir(dbenv, dir)
+ DB_ENV *dbenv;
+ const char *dir;
+{
+ if (dbenv->db_log_dir != NULL)
+ __os_freestr(dbenv->db_log_dir);
+ return (__os_strdup(dbenv, dir, &dbenv->db_log_dir));
+}
diff --git a/bdb/log/log_put.c b/bdb/log/log_put.c
new file mode 100644
index 00000000000..e5cdedb5493
--- /dev/null
+++ b/bdb/log/log_put.c
@@ -0,0 +1,701 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: log_put.c,v 11.26 2000/11/30 00:58:40 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#ifdef HAVE_RPC
+#include "db_server.h"
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "log.h"
+#include "hash.h"
+#include "clib_ext.h"
+
+#ifdef HAVE_RPC
+#include "gen_client_ext.h"
+#include "rpc_client_ext.h"
+#endif
+
+static int __log_fill __P((DB_LOG *, DB_LSN *, void *, u_int32_t));
+static int __log_flush __P((DB_LOG *, const DB_LSN *));
+static int __log_newfh __P((DB_LOG *));
+static int __log_putr __P((DB_LOG *, DB_LSN *, const DBT *, u_int32_t));
+static int __log_open_files __P((DB_ENV *));
+static int __log_write __P((DB_LOG *, void *, u_int32_t));
+
+/*
+ * log_put --
+ * Write a log record.
+ */
+int
+log_put(dbenv, lsn, dbt, flags)
+ DB_ENV *dbenv;
+ DB_LSN *lsn;
+ const DBT *dbt;
+ u_int32_t flags;
+{
+ DB_LOG *dblp;
+ int ret;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
+ return (__dbcl_log_put(dbenv, lsn, dbt, flags));
+#endif
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->lg_handle, DB_INIT_LOG);
+
+ /* Validate arguments. */
+ if (flags != 0 && flags != DB_CHECKPOINT &&
+ flags != DB_CURLSN && flags != DB_FLUSH)
+ return (__db_ferr(dbenv, "log_put", 0));
+
+ dblp = dbenv->lg_handle;
+ R_LOCK(dbenv, &dblp->reginfo);
+ ret = __log_put(dbenv, lsn, dbt, flags);
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ return (ret);
+}
+
+/*
+ * __log_put --
+ * Write a log record; internal version.
+ *
+ * PUBLIC: int __log_put __P((DB_ENV *, DB_LSN *, const DBT *, u_int32_t));
+ */
+int
+__log_put(dbenv, lsn, dbt, flags)
+ DB_ENV *dbenv;
+ DB_LSN *lsn;
+ const DBT *dbt;
+ u_int32_t flags;
+{
+ DBT t;
+ DB_LOG *dblp;
+ LOG *lp;
+ u_int32_t lastoff;
+ int ret;
+
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+
+ /*
+ * If the application just wants to know where we are, fill in
+ * the information. Currently used by the transaction manager
+ * to avoid writing TXN_begin records.
+ */
+ if (flags == DB_CURLSN) {
+ lsn->file = lp->lsn.file;
+ lsn->offset = lp->lsn.offset;
+ return (0);
+ }
+
+ /* If this information won't fit in the file, swap files. */
+ if (lp->lsn.offset + sizeof(HDR) + dbt->size > lp->persist.lg_max) {
+ if (sizeof(HDR) +
+ sizeof(LOGP) + dbt->size > lp->persist.lg_max) {
+ __db_err(dbenv,
+ "log_put: record larger than maximum file size");
+ return (EINVAL);
+ }
+
+ /* Flush the log. */
+ if ((ret = __log_flush(dblp, NULL)) != 0)
+ return (ret);
+
+ /*
+ * Save the last known offset from the previous file, we'll
+ * need it to initialize the persistent header information.
+ */
+ lastoff = lp->lsn.offset;
+
+ /* Point the current LSN to the new file. */
+ ++lp->lsn.file;
+ lp->lsn.offset = 0;
+
+ /* Reset the file write offset. */
+ lp->w_off = 0;
+ } else
+ lastoff = 0;
+
+ /* Initialize the LSN information returned to the user. */
+ lsn->file = lp->lsn.file;
+ lsn->offset = lp->lsn.offset;
+
+ /*
+ * Insert persistent information as the first record in every file.
+ * Note that the previous length is wrong for the very first record
+ * of the log, but that's okay, we check for it during retrieval.
+ */
+ if (lp->lsn.offset == 0) {
+ t.data = &lp->persist;
+ t.size = sizeof(LOGP);
+ if ((ret = __log_putr(dblp, lsn,
+ &t, lastoff == 0 ? 0 : lastoff - lp->len)) != 0)
+ return (ret);
+
+ /*
+ * Record files open in this log.
+ * If we are recovering then we are in the
+ * process of outputting the files, don't do
+ * it again.
+ */
+ if (!F_ISSET(dblp, DBLOG_RECOVER) &&
+ (ret = __log_open_files(dbenv)) != 0)
+ return (ret);
+
+ /* Update the LSN information returned to the user. */
+ lsn->file = lp->lsn.file;
+ lsn->offset = lp->lsn.offset;
+ }
+
+ /* Write the application's log record. */
+ if ((ret = __log_putr(dblp, lsn, dbt, lp->lsn.offset - lp->len)) != 0)
+ return (ret);
+
+ /*
+ * On a checkpoint, we:
+ * Put out the checkpoint record (above).
+ * Save the LSN of the checkpoint in the shared region.
+ * Append the set of file name information into the log.
+ */
+ if (flags == DB_CHECKPOINT) {
+ lp->chkpt_lsn = *lsn;
+ if ((ret = __log_open_files(dbenv)) != 0)
+ return (ret);
+ }
+
+ /*
+ * On a checkpoint or when flush is requested, we:
+ * Flush the current buffer contents to disk.
+ * Sync the log to disk.
+ */
+ if (flags == DB_FLUSH || flags == DB_CHECKPOINT)
+ if ((ret = __log_flush(dblp, NULL)) != 0)
+ return (ret);
+
+ /*
+ * On a checkpoint, we:
+ * Save the time the checkpoint was written.
+ * Reset the bytes written since the last checkpoint.
+ */
+ if (flags == DB_CHECKPOINT) {
+ (void)time(&lp->chkpt);
+ lp->stat.st_wc_bytes = lp->stat.st_wc_mbytes = 0;
+ }
+ return (0);
+}
+
+/*
+ * __log_putr --
+ * Actually put a record into the log.
+ */
+static int
+__log_putr(dblp, lsn, dbt, prev)
+ DB_LOG *dblp;
+ DB_LSN *lsn;
+ const DBT *dbt;
+ u_int32_t prev;
+{
+ HDR hdr;
+ LOG *lp;
+ int ret;
+
+ lp = dblp->reginfo.primary;
+
+ /*
+ * Initialize the header. If we just switched files, lsn.offset will
+ * be 0, and what we really want is the offset of the previous record
+ * in the previous file. Fortunately, prev holds the value we want.
+ */
+ hdr.prev = prev;
+ hdr.len = sizeof(HDR) + dbt->size;
+ hdr.cksum = __ham_func4(NULL, dbt->data, dbt->size);
+
+ if ((ret = __log_fill(dblp, lsn, &hdr, sizeof(HDR))) != 0)
+ return (ret);
+ lp->len = sizeof(HDR);
+ lp->lsn.offset += sizeof(HDR);
+
+ if ((ret = __log_fill(dblp, lsn, dbt->data, dbt->size)) != 0)
+ return (ret);
+ lp->len += dbt->size;
+ lp->lsn.offset += dbt->size;
+ return (0);
+}
+
+/*
+ * log_flush --
+ * Write all records less than or equal to the specified LSN.
+ */
+int
+log_flush(dbenv, lsn)
+ DB_ENV *dbenv;
+ const DB_LSN *lsn;
+{
+ DB_LOG *dblp;
+ int ret;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
+ return (__dbcl_log_flush(dbenv, lsn));
+#endif
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->lg_handle, DB_INIT_LOG);
+
+ dblp = dbenv->lg_handle;
+ R_LOCK(dbenv, &dblp->reginfo);
+ ret = __log_flush(dblp, lsn);
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ return (ret);
+}
+
+/*
+ * __log_flush --
+ * Write all records less than or equal to the specified LSN; internal
+ * version.
+ */
+static int
+__log_flush(dblp, lsn)
+ DB_LOG *dblp;
+ const DB_LSN *lsn;
+{
+ DB_LSN t_lsn;
+ LOG *lp;
+ int current, ret;
+
+ ret = 0;
+ lp = dblp->reginfo.primary;
+
+ /*
+ * If no LSN specified, flush the entire log by setting the flush LSN
+ * to the last LSN written in the log. Otherwise, check that the LSN
+ * isn't a non-existent record for the log.
+ */
+ if (lsn == NULL) {
+ t_lsn.file = lp->lsn.file;
+ t_lsn.offset = lp->lsn.offset - lp->len;
+ lsn = &t_lsn;
+ } else
+ if (lsn->file > lp->lsn.file ||
+ (lsn->file == lp->lsn.file &&
+ lsn->offset > lp->lsn.offset - lp->len)) {
+ __db_err(dblp->dbenv,
+ "log_flush: LSN past current end-of-log");
+ return (EINVAL);
+ }
+
+ /*
+ * If the LSN is less than or equal to the last-sync'd LSN, we're done.
+ * Note, the last-sync LSN saved in s_lsn is the LSN of the first byte
+ * after the byte we absolutely know was written to disk, so the test
+ * is <, not <=.
+ */
+ if (lsn->file < lp->s_lsn.file ||
+ (lsn->file == lp->s_lsn.file && lsn->offset < lp->s_lsn.offset))
+ return (0);
+
+ /*
+ * We may need to write the current buffer. We have to write the
+ * current buffer if the flush LSN is greater than or equal to the
+ * buffer's starting LSN.
+ */
+ current = 0;
+ if (lp->b_off != 0 && log_compare(lsn, &lp->f_lsn) >= 0) {
+ if ((ret = __log_write(dblp, dblp->bufp, lp->b_off)) != 0)
+ return (ret);
+
+ lp->b_off = 0;
+ current = 1;
+ }
+
+ /*
+ * It's possible that this thread may never have written to this log
+ * file. Acquire a file descriptor if we don't already have one.
+ * One last check -- if we're not writing anything from the current
+ * buffer, don't bother. We have nothing to write and nothing to
+ * sync.
+ */
+ if (dblp->lfname != lp->lsn.file) {
+ if (!current)
+ return (0);
+ if ((ret = __log_newfh(dblp)) != 0)
+ return (ret);
+ }
+
+ /* Sync all writes to disk. */
+ if ((ret = __os_fsync(dblp->dbenv, &dblp->lfh)) != 0)
+ return (__db_panic(dblp->dbenv, ret));
+ ++lp->stat.st_scount;
+
+ /* Set the last-synced LSN, using the on-disk write offset. */
+ lp->s_lsn.file = lp->f_lsn.file;
+ lp->s_lsn.offset = lp->w_off;
+
+ return (0);
+}
+
+/*
+ * __log_fill --
+ * Write information into the log.
+ */
+static int
+__log_fill(dblp, lsn, addr, len)
+ DB_LOG *dblp;
+ DB_LSN *lsn;
+ void *addr;
+ u_int32_t len;
+{
+ LOG *lp;
+ u_int32_t bsize, nrec;
+ size_t nw, remain;
+ int ret;
+
+ lp = dblp->reginfo.primary;
+ bsize = lp->buffer_size;
+
+ while (len > 0) { /* Copy out the data. */
+ /*
+ * If we're beginning a new buffer, note the user LSN to which
+ * the first byte of the buffer belongs. We have to know this
+ * when flushing the buffer so that we know if the in-memory
+ * buffer needs to be flushed.
+ */
+ if (lp->b_off == 0)
+ lp->f_lsn = *lsn;
+
+ /*
+ * If we're on a buffer boundary and the data is big enough,
+ * copy as many records as we can directly from the data.
+ */
+ if (lp->b_off == 0 && len >= bsize) {
+ nrec = len / bsize;
+ if ((ret = __log_write(dblp, addr, nrec * bsize)) != 0)
+ return (ret);
+ addr = (u_int8_t *)addr + nrec * bsize;
+ len -= nrec * bsize;
+ ++lp->stat.st_wcount_fill;
+ continue;
+ }
+
+ /* Figure out how many bytes we can copy this time. */
+ remain = bsize - lp->b_off;
+ nw = remain > len ? len : remain;
+ memcpy(dblp->bufp + lp->b_off, addr, nw);
+ addr = (u_int8_t *)addr + nw;
+ len -= nw;
+ lp->b_off += nw;
+
+ /* If we fill the buffer, flush it. */
+ if (lp->b_off == bsize) {
+ if ((ret = __log_write(dblp, dblp->bufp, bsize)) != 0)
+ return (ret);
+ lp->b_off = 0;
+ ++lp->stat.st_wcount_fill;
+ }
+ }
+ return (0);
+}
+
+/*
+ * __log_write --
+ * Write the log buffer to disk.
+ */
+static int
+__log_write(dblp, addr, len)
+ DB_LOG *dblp;
+ void *addr;
+ u_int32_t len;
+{
+ LOG *lp;
+ size_t nw;
+ int ret;
+
+ /*
+ * If we haven't opened the log file yet or the current one
+ * has changed, acquire a new log file.
+ */
+ lp = dblp->reginfo.primary;
+ if (!F_ISSET(&dblp->lfh, DB_FH_VALID) || dblp->lfname != lp->lsn.file)
+ if ((ret = __log_newfh(dblp)) != 0)
+ return (ret);
+
+ /*
+ * Seek to the offset in the file (someone may have written it
+ * since we last did).
+ */
+ if ((ret =
+ __os_seek(dblp->dbenv,
+ &dblp->lfh, 0, 0, lp->w_off, 0, DB_OS_SEEK_SET)) != 0 ||
+ (ret = __os_write(dblp->dbenv, &dblp->lfh, addr, len, &nw)) != 0)
+ return (__db_panic(dblp->dbenv, ret));
+ if (nw != len) {
+ __db_err(dblp->dbenv, "Short write while writing log");
+ return (EIO);
+ }
+
+ /* Reset the buffer offset and update the seek offset. */
+ lp->w_off += len;
+
+ /* Update written statistics. */
+ if ((lp->stat.st_w_bytes += len) >= MEGABYTE) {
+ lp->stat.st_w_bytes -= MEGABYTE;
+ ++lp->stat.st_w_mbytes;
+ }
+ if ((lp->stat.st_wc_bytes += len) >= MEGABYTE) {
+ lp->stat.st_wc_bytes -= MEGABYTE;
+ ++lp->stat.st_wc_mbytes;
+ }
+ ++lp->stat.st_wcount;
+
+ return (0);
+}
+
+/*
+ * log_file --
+ * Map a DB_LSN to a file name.
+ */
+int
+log_file(dbenv, lsn, namep, len)
+ DB_ENV *dbenv;
+ const DB_LSN *lsn;
+ char *namep;
+ size_t len;
+{
+ DB_LOG *dblp;
+ int ret;
+ char *name;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
+ return (__dbcl_log_file(dbenv, lsn, namep, len));
+#endif
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->lg_handle, DB_INIT_LOG);
+
+ dblp = dbenv->lg_handle;
+ R_LOCK(dbenv, &dblp->reginfo);
+ ret = __log_name(dblp, lsn->file, &name, NULL, 0);
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ if (ret != 0)
+ return (ret);
+
+ /* Check to make sure there's enough room and copy the name. */
+ if (len < strlen(name) + 1) {
+ *namep = '\0';
+ __db_err(dbenv, "log_file: name buffer is too short");
+ return (EINVAL);
+ }
+ (void)strcpy(namep, name);
+ __os_freestr(name);
+
+ return (0);
+}
+
+/*
+ * __log_newfh --
+ * Acquire a file handle for the current log file.
+ */
+static int
+__log_newfh(dblp)
+ DB_LOG *dblp;
+{
+ LOG *lp;
+ int ret;
+ char *name;
+
+ /* Close any previous file descriptor. */
+ if (F_ISSET(&dblp->lfh, DB_FH_VALID))
+ (void)__os_closehandle(&dblp->lfh);
+
+ /* Get the path of the new file and open it. */
+ lp = dblp->reginfo.primary;
+ dblp->lfname = lp->lsn.file;
+
+ /*
+ * Adding DB_OSO_LOG to the flags may add additional platform-specific
+ * optimizations. On WinNT, the logfile is preallocated, which may
+ * have a time penalty at startup, but have better overall throughput.
+ * We are not certain that this works reliably, so enable at your own
+ * risk.
+ *
+ * XXX:
+ * Initialize the log file size. This is a hack to push the log's
+ * maximum size down into the Windows __os_open routine, because it
+ * wants to pre-allocate it.
+ */
+ dblp->lfh.log_size = dblp->dbenv->lg_max;
+ if ((ret = __log_name(dblp, dblp->lfname,
+ &name, &dblp->lfh,
+ DB_OSO_CREATE |/* DB_OSO_LOG |*/ DB_OSO_SEQ)) != 0)
+ __db_err(dblp->dbenv,
+ "log_put: %s: %s", name, db_strerror(ret));
+
+ __os_freestr(name);
+ return (ret);
+}
+
+/*
+ * __log_name --
+ * Return the log name for a particular file, and optionally open it.
+ *
+ * PUBLIC: int __log_name __P((DB_LOG *,
+ * PUBLIC: u_int32_t, char **, DB_FH *, u_int32_t));
+ */
+int
+__log_name(dblp, filenumber, namep, fhp, flags)
+ DB_LOG *dblp;
+ u_int32_t filenumber, flags;
+ char **namep;
+ DB_FH *fhp;
+{
+ LOG *lp;
+ int ret;
+ char *oname;
+ char old[sizeof(LFPREFIX) + 5 + 20], new[sizeof(LFPREFIX) + 10 + 20];
+
+ lp = dblp->reginfo.primary;
+
+ /*
+ * !!!
+ * The semantics of this routine are bizarre.
+ *
+ * The reason for all of this is that we need a place where we can
+ * intercept requests for log files, and, if appropriate, check for
+ * both the old-style and new-style log file names. The trick is
+ * that all callers of this routine that are opening the log file
+ * read-only want to use an old-style file name if they can't find
+ * a match using a new-style name. The only down-side is that some
+ * callers may check for the old-style when they really don't need
+ * to, but that shouldn't mess up anything, and we only check for
+ * the old-style name when we've already failed to find a new-style
+ * one.
+ *
+ * Create a new-style file name, and if we're not going to open the
+ * file, return regardless.
+ */
+ (void)snprintf(new, sizeof(new), LFNAME, filenumber);
+ if ((ret = __db_appname(dblp->dbenv,
+ DB_APP_LOG, NULL, new, 0, NULL, namep)) != 0 || fhp == NULL)
+ return (ret);
+
+ /* Open the new-style file -- if we succeed, we're done. */
+ if ((ret = __os_open(dblp->dbenv,
+ *namep, flags, lp->persist.mode, fhp)) == 0)
+ return (0);
+
+ /*
+ * The open failed... if the DB_RDONLY flag isn't set, we're done,
+ * the caller isn't interested in old-style files.
+ */
+ if (!LF_ISSET(DB_OSO_RDONLY)) {
+ __db_err(dblp->dbenv,
+ "%s: log file open failed: %s", *namep, db_strerror(ret));
+ return (__db_panic(dblp->dbenv, ret));
+ }
+
+ /* Create an old-style file name. */
+ (void)snprintf(old, sizeof(old), LFNAME_V1, filenumber);
+ if ((ret = __db_appname(dblp->dbenv,
+ DB_APP_LOG, NULL, old, 0, NULL, &oname)) != 0)
+ goto err;
+
+ /*
+ * Open the old-style file -- if we succeed, we're done. Free the
+ * space allocated for the new-style name and return the old-style
+ * name to the caller.
+ */
+ if ((ret = __os_open(dblp->dbenv,
+ oname, flags, lp->persist.mode, fhp)) == 0) {
+ __os_freestr(*namep);
+ *namep = oname;
+ return (0);
+ }
+
+ /*
+ * Couldn't find either style of name -- return the new-style name
+ * for the caller's error message. If it's an old-style name that's
+ * actually missing we're going to confuse the user with the error
+ * message, but that implies that not only were we looking for an
+ * old-style name, but we expected it to exist and we weren't just
+ * looking for any log file. That's not a likely error.
+ */
+err: __os_freestr(oname);
+ return (ret);
+}
+
+static int
+__log_open_files(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_LOG *dblp;
+ DB_LSN r_unused;
+ DBT fid_dbt, t;
+ FNAME *fnp;
+ LOG *lp;
+ int ret;
+
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+
+ for (fnp = SH_TAILQ_FIRST(&lp->fq, __fname);
+ fnp != NULL; fnp = SH_TAILQ_NEXT(fnp, q, __fname)) {
+ if (fnp->ref == 0) /* Entry not in use. */
+ continue;
+ if (fnp->name_off != INVALID_ROFF) {
+ memset(&t, 0, sizeof(t));
+ t.data = R_ADDR(&dblp->reginfo, fnp->name_off);
+ t.size = strlen(t.data) + 1;
+ }
+ memset(&fid_dbt, 0, sizeof(fid_dbt));
+ fid_dbt.data = fnp->ufid;
+ fid_dbt.size = DB_FILE_ID_LEN;
+ /*
+ * Output LOG_CHECKPOINT records which will be
+ * processed during the OPENFILES pass of recovery.
+ * At the end of recovery we want to output the
+ * files that were open so that a future recovery
+ * run will have the correct files open during
+ * a backward pass. For this we output LOG_CLOSE
+ * records so that the files will be closed on
+ * the forward pass.
+ */
+ if ((ret = __log_register_log(dbenv,
+ NULL, &r_unused, 0,
+ F_ISSET(dblp, DBLOG_RECOVER) ? LOG_CLOSE : LOG_CHECKPOINT,
+ fnp->name_off == INVALID_ROFF ? NULL : &t,
+ &fid_dbt, fnp->id, fnp->s_type, fnp->meta_pgno)) != 0)
+ return (ret);
+ }
+ return (0);
+}
diff --git a/bdb/log/log_rec.c b/bdb/log/log_rec.c
new file mode 100644
index 00000000000..a871848295e
--- /dev/null
+++ b/bdb/log/log_rec.c
@@ -0,0 +1,621 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1995, 1996
+ * The President and Fellows of Harvard University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: log_rec.c,v 11.48 2001/01/11 18:19:53 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_am.h"
+#include "log.h"
+
+static int __log_do_open __P((DB_ENV *, DB_LOG *,
+ u_int8_t *, char *, DBTYPE, int32_t, db_pgno_t));
+static int __log_open_file __P((DB_ENV *, DB_LOG *, __log_register_args *));
+
+/*
+ * PUBLIC: int __log_register_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__log_register_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ DB_ENTRY *dbe;
+ DB_LOG *logp;
+ DB *dbp;
+ __log_register_args *argp;
+ int do_rem, ret, t_ret;
+
+ logp = dbenv->lg_handle;
+ dbp = NULL;
+
+#ifdef DEBUG_RECOVER
+ REC_PRINT(__log_register_print);
+#endif
+ COMPQUIET(lsnp, NULL);
+
+ if ((ret = __log_register_read(dbenv, dbtp->data, &argp)) != 0)
+ goto out;
+
+ if ((argp->opcode == LOG_OPEN &&
+ (DB_REDO(op) || op == DB_TXN_OPENFILES)) ||
+ (argp->opcode == LOG_CLOSE && DB_UNDO(op))) {
+ /*
+ * If we are redoing an open or undoing a close, then we need
+ * to open a file. We must open the file even if
+ * the meta page is not yet written as we may be creating it.
+ */
+ if (op == DB_TXN_OPENFILES)
+ F_SET(logp, DBLOG_FORCE_OPEN);
+ ret = __log_open_file(dbenv, logp, argp);
+ F_CLR(logp, DBLOG_FORCE_OPEN);
+ if (ret == ENOENT || ret == EINVAL) {
+ if (op == DB_TXN_OPENFILES && argp->name.size != 0 &&
+ (ret = __db_txnlist_delete(dbenv, info,
+ argp->name.data, argp->fileid, 0)) != 0)
+ goto out;
+ ret = 0;
+ }
+ } else if (argp->opcode != LOG_CHECKPOINT) {
+ /*
+ * If we are undoing an open, then we need to close the file.
+ *
+ * If the file is deleted, then we can just ignore this close.
+ * Otherwise, we should usually have a valid dbp we should
+ * close or whose reference count should be decremented.
+ * However, if we shut down without closing a file, we may, in
+ * fact, not have the file open, and that's OK.
+ */
+ do_rem = 0;
+ MUTEX_THREAD_LOCK(dbenv, logp->mutexp);
+ if (argp->fileid < logp->dbentry_cnt) {
+ dbe = &logp->dbentry[argp->fileid];
+
+ DB_ASSERT(dbe->refcount == 1);
+
+ ret = __db_txnlist_close(info,
+ argp->fileid, dbe->count);
+ if ((dbp = TAILQ_FIRST(&dbe->dblist)) != NULL)
+ (void)log_unregister(dbenv, dbp);
+ do_rem = 1;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, logp->mutexp);
+ if (do_rem) {
+ (void)__log_rem_logid(logp, dbp, argp->fileid);
+ /*
+ * If remove or rename has closed the file, don't
+ * sync.
+ */
+ if (dbp != NULL &&
+ (t_ret = dbp->close(dbp,
+ dbp->mpf == NULL ? DB_NOSYNC : 0)) != 0 && ret == 0)
+ ret = t_ret;
+ }
+ } else if (DB_UNDO(op) || op == DB_TXN_OPENFILES) {
+ /*
+ * It's a checkpoint and we are rolling backward. It
+ * is possible that the system was shut down and thus
+ * ended with a stable checkpoint; this file was never
+ * closed and has therefore not been reopened yet. If
+ * so, we need to try to open it.
+ */
+ ret = __log_open_file(dbenv, logp, argp);
+ if (ret == ENOENT || ret == EINVAL) {
+ if (argp->name.size != 0 && (ret =
+ __db_txnlist_delete(dbenv, info,
+ argp->name.data, argp->fileid, 0)) != 0)
+ goto out;
+ ret = 0;
+ }
+ }
+
+out: if (argp != NULL)
+ __os_free(argp, 0);
+ return (ret);
+}
+
+/*
+ * __log_open_file --
+ * Called during log_register recovery. Make sure that we have an
+ * entry in the dbentry table for this ndx. Returns 0 on success,
+ * non-zero on error.
+ */
+static int
+__log_open_file(dbenv, lp, argp)
+ DB_ENV *dbenv;
+ DB_LOG *lp;
+ __log_register_args *argp;
+{
+ DB_ENTRY *dbe;
+ DB *dbp;
+
+ /*
+ * We never re-open temporary files. Temp files are only
+ * useful during aborts in which case the dbp was entered
+ * when the file was registered. During recovery, we treat
+ * temp files as properly deleted files, allowing the open to
+ * fail and not reporting any errors when recovery fails to
+ * get a valid dbp from db_fileid_to_db.
+ */
+ if (argp->name.size == 0) {
+ (void)__log_add_logid(dbenv, lp, NULL, argp->fileid);
+ return (ENOENT);
+ }
+
+ /*
+ * Because of reference counting, we cannot automatically close files
+ * during recovery, so when we're opening, we have to check that the
+ * name we are opening is what we expect. If it's not, then we close
+ * the old file and open the new one.
+ */
+ MUTEX_THREAD_LOCK(dbenv, lp->mutexp);
+ if (argp->fileid < lp->dbentry_cnt)
+ dbe = &lp->dbentry[argp->fileid];
+ else
+ dbe = NULL;
+
+ if (dbe != NULL) {
+ dbe->deleted = 0;
+ if ((dbp = TAILQ_FIRST(&dbe->dblist)) != NULL) {
+ if (dbp->meta_pgno != argp->meta_pgno ||
+ memcmp(dbp->fileid,
+ argp->uid.data, DB_FILE_ID_LEN) != 0) {
+ MUTEX_THREAD_UNLOCK(dbenv, lp->mutexp);
+ goto reopen;
+ }
+ if (!F_ISSET(lp, DBLOG_RECOVER))
+ dbe->refcount++;
+ MUTEX_THREAD_UNLOCK(dbenv, lp->mutexp);
+ return (0);
+ }
+ }
+
+ MUTEX_THREAD_UNLOCK(dbenv, lp->mutexp);
+ if (0) {
+reopen: (void)log_unregister(dbp->dbenv, dbp);
+ (void)__log_rem_logid(lp, dbp, argp->fileid);
+ dbp->close(dbp, 0);
+ }
+
+ return (__log_do_open(dbenv, lp,
+ argp->uid.data, argp->name.data,
+ argp->ftype, argp->fileid, argp->meta_pgno));
+}
+
+/*
+ * log_reopen_file -- close and reopen a db file.
+ * Must be called when a metadata page changes.
+ *
+ * PUBLIC: int __log_reopen_file __P((DB_ENV *,
+ * PUBLIC: char *, int32_t, u_int8_t *, db_pgno_t));
+ *
+ */
+int
+__log_reopen_file(dbenv, name, ndx, fileid, meta_pgno)
+ DB_ENV *dbenv;
+ char *name;
+ int32_t ndx;
+ u_int8_t *fileid;
+ db_pgno_t meta_pgno;
+{
+ DB *dbp;
+ DB_LOG *logp;
+ DBTYPE ftype;
+ FNAME *fnp;
+ LOG *lp;
+ char *tmp_name;
+ int ret;
+
+ logp = dbenv->lg_handle;
+
+ if (name == NULL) {
+ R_LOCK(dbenv, &logp->reginfo);
+
+ lp = logp->reginfo.primary;
+
+ for (fnp = SH_TAILQ_FIRST(&lp->fq, __fname);
+ fnp != NULL; fnp = SH_TAILQ_NEXT(fnp, q, __fname)) {
+ if (fnp->ref == 0) /* Entry not in use. */
+ continue;
+ if (memcmp(fnp->ufid, fileid, DB_FILE_ID_LEN) == 0)
+ break;
+ }
+
+ if (fnp == 0 || fnp->name_off == INVALID_ROFF) {
+ __db_err(dbenv,
+ "metasub recover: non-existent file id");
+ return (EINVAL);
+ }
+
+ name = R_ADDR(&logp->reginfo, fnp->name_off);
+ ret = __os_strdup(dbenv, name, &tmp_name);
+ R_UNLOCK(dbenv, &logp->reginfo);
+ if (ret != 0)
+ goto out;
+ name = tmp_name;
+ } else
+ tmp_name = NULL;
+
+ if ((ret = __db_fileid_to_db(dbenv, &dbp, ndx, 0)) != 0)
+ goto out;
+ ftype = dbp->type;
+ (void)log_unregister(dbenv, dbp);
+ (void)__log_rem_logid(logp, dbp, ndx);
+ (void)dbp->close(dbp, 0);
+
+ ret = __log_do_open(dbenv, logp, fileid, name, ftype, ndx, meta_pgno);
+
+ if (tmp_name != NULL)
+ __os_free(tmp_name, 0);
+
+out: return (ret);
+}
+
+/*
+ * __log_do_open --
+ * Open files referenced in the log. This is the part of the open that
+ * is not protected by the thread mutex.
+ */
+static int
+__log_do_open(dbenv, lp, uid, name, ftype, ndx, meta_pgno)
+ DB_ENV *dbenv;
+ DB_LOG *lp;
+ u_int8_t *uid;
+ char *name;
+ DBTYPE ftype;
+ int32_t ndx;
+ db_pgno_t meta_pgno;
+{
+ DB *dbp;
+ int ret;
+ u_int8_t zeroid[DB_FILE_ID_LEN];
+
+ if ((ret = db_create(&dbp, lp->dbenv, 0)) != 0)
+ return (ret);
+
+ dbp->log_fileid = ndx;
+
+ /*
+ * This is needed to signal to the locking routines called while
+ * opening databases that we are potentially undoing a transaction
+ * from an XA process. Since the XA process does not share
+ * locks with the aborting transaction this prevents us from
+ * deadlocking during the open during rollback.
+ * Because this routine is called either during recovery or during an
+ * XA_ABORT, we can safely set DB_AM_RECOVER in the dbp since it
+ * will not be shared with other threads.
+ */
+ F_SET(dbp, DB_AM_RECOVER);
+ if (meta_pgno != PGNO_BASE_MD)
+ memcpy(dbp->fileid, uid, DB_FILE_ID_LEN);
+ dbp->type = ftype;
+ if ((ret =
+ __db_dbopen(dbp, name, 0, __db_omode("rw----"), meta_pgno)) == 0) {
+ /*
+ * Verify that we are opening the same file that we were
+ * referring to when we wrote this log record.
+ */
+ if (memcmp(uid, dbp->fileid, DB_FILE_ID_LEN) != 0) {
+ memset(zeroid, 0, DB_FILE_ID_LEN);
+ if (memcmp(dbp->fileid, zeroid, DB_FILE_ID_LEN) != 0)
+ goto not_right;
+ memcpy(dbp->fileid, uid, DB_FILE_ID_LEN);
+ }
+ if (IS_RECOVERING(dbenv)) {
+ (void)log_register(dbp->dbenv, dbp, name);
+ (void)__log_add_logid(dbenv, lp, dbp, ndx);
+ }
+ return (0);
+ }
+
+not_right:
+ (void)dbp->close(dbp, 0);
+ (void)__log_add_logid(dbenv, lp, NULL, ndx);
+
+ return (ENOENT);
+}
+
+/*
+ * __log_add_logid --
+ * Adds a DB entry to the log's DB entry table.
+ *
+ * PUBLIC: int __log_add_logid __P((DB_ENV *, DB_LOG *, DB *, int32_t));
+ */
+int
+__log_add_logid(dbenv, logp, dbp, ndx)
+ DB_ENV *dbenv;
+ DB_LOG *logp;
+ DB *dbp;
+ int32_t ndx;
+{
+ DB *dbtmp;
+ int32_t i;
+ int ret;
+
+ ret = 0;
+
+ MUTEX_THREAD_LOCK(dbenv, logp->mutexp);
+
+ /*
+ * Check if we need to grow the table. Note, ndx is 0-based (the
+ * index into the DB entry table) an dbentry_cnt is 1-based, the
+ * number of available slots.
+ */
+ if (logp->dbentry_cnt <= ndx) {
+ if ((ret = __os_realloc(dbenv,
+ (ndx + DB_GROW_SIZE) * sizeof(DB_ENTRY),
+ NULL, &logp->dbentry)) != 0)
+ goto err;
+
+ /*
+ * We have moved the head of the queue.
+ * Fix up the queue header of an empty queue or the previous
+ * pointer of the first element.
+ */
+ for (i = 0; i < logp->dbentry_cnt; i++) {
+ if ((dbtmp =
+ TAILQ_FIRST(&logp->dbentry[i].dblist)) == NULL)
+ TAILQ_INIT(&logp->dbentry[i].dblist);
+ else
+ TAILQ_REINSERT_HEAD(
+ &logp->dbentry[i].dblist, dbp, links);
+ }
+
+ /* Initialize the new entries. */
+ for (i = logp->dbentry_cnt; i < ndx + DB_GROW_SIZE; i++) {
+ logp->dbentry[i].count = 0;
+ TAILQ_INIT(&logp->dbentry[i].dblist);
+ logp->dbentry[i].deleted = 0;
+ logp->dbentry[i].refcount = 0;
+ }
+
+ logp->dbentry_cnt = i;
+ }
+
+ if (logp->dbentry[ndx].deleted == 0 &&
+ TAILQ_FIRST(&logp->dbentry[ndx].dblist) == NULL) {
+ logp->dbentry[ndx].count = 0;
+ if (dbp != NULL)
+ TAILQ_INSERT_HEAD(&logp->dbentry[ndx].dblist,
+ dbp, links);
+ logp->dbentry[ndx].deleted = dbp == NULL;
+ logp->dbentry[ndx].refcount = 1;
+ } else if (!F_ISSET(logp, DBLOG_RECOVER)) {
+ if (dbp != NULL)
+ TAILQ_INSERT_HEAD(&logp->dbentry[ndx].dblist,
+ dbp, links);
+ logp->dbentry[ndx].refcount++;
+ }
+
+err: MUTEX_THREAD_UNLOCK(dbenv, logp->mutexp);
+ return (ret);
+}
+
+/*
+ * __db_fileid_to_db --
+ * Return the DB corresponding to the specified fileid.
+ *
+ * PUBLIC: int __db_fileid_to_db __P((DB_ENV *, DB **, int32_t, int));
+ */
+int
+__db_fileid_to_db(dbenv, dbpp, ndx, inc)
+ DB_ENV *dbenv;
+ DB **dbpp;
+ int32_t ndx;
+ int inc;
+{
+ DB_LOG *logp;
+ DB *dbp;
+ FNAME *fname;
+ int ret;
+ char *name;
+
+ ret = 0;
+ logp = dbenv->lg_handle;
+
+ MUTEX_THREAD_LOCK(dbenv, logp->mutexp);
+
+ /*
+ * Under XA, a process different than the one issuing DB operations
+ * may abort a transaction. In this case, recovery routines are run
+ * by a process that does not necessarily have the file open, so we
+ * we must open the file explicitly.
+ */
+ if (ndx >= logp->dbentry_cnt ||
+ (!logp->dbentry[ndx].deleted &&
+ (dbp = TAILQ_FIRST(&logp->dbentry[ndx].dblist)) == NULL)) {
+ if (F_ISSET(logp, DBLOG_RECOVER)) {
+ ret = ENOENT;
+ goto err;
+ }
+ if (__log_lid_to_fname(logp, ndx, &fname) != 0) {
+ /* Couldn't find entry; this is a fatal error. */
+ __db_err(dbenv, "Missing log fileid entry");
+ ret = EINVAL;
+ goto err;
+ }
+ name = R_ADDR(&logp->reginfo, fname->name_off);
+
+ /*
+ * __log_do_open is called without protection of the
+ * log thread lock.
+ */
+ MUTEX_THREAD_UNLOCK(dbenv, logp->mutexp);
+
+ /*
+ * At this point, we are not holding the thread lock, so exit
+ * directly instead of going through the exit code at the
+ * bottom. If the __log_do_open succeeded, then we don't need
+ * to do any of the remaining error checking at the end of this
+ * routine.
+ */
+ if ((ret = __log_do_open(dbenv, logp,
+ fname->ufid, name, fname->s_type,
+ ndx, fname->meta_pgno)) != 0)
+ return (ret);
+
+ *dbpp = TAILQ_FIRST(&logp->dbentry[ndx].dblist);
+ return (0);
+ }
+
+ /*
+ * Return DB_DELETED if the file has been deleted (it's not an error).
+ */
+ if (logp->dbentry[ndx].deleted) {
+ ret = DB_DELETED;
+ if (inc)
+ logp->dbentry[ndx].count++;
+ goto err;
+ }
+
+ /*
+ * Otherwise return 0, but if we don't have a corresponding DB, it's
+ * an error.
+ */
+ if ((*dbpp = TAILQ_FIRST(&logp->dbentry[ndx].dblist)) == NULL)
+ ret = ENOENT;
+
+err: MUTEX_THREAD_UNLOCK(dbenv, logp->mutexp);
+ return (ret);
+}
+
+/*
+ * __log_close_files --
+ * Close files that were opened by the recovery daemon. We sync the
+ * file, unless its mpf pointer has been NULLed by a db_remove or
+ * db_rename. We may not have flushed the log_register record that
+ * closes the file.
+ *
+ * PUBLIC: void __log_close_files __P((DB_ENV *));
+ */
+void
+__log_close_files(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_ENTRY *dbe;
+ DB_LOG *logp;
+ DB *dbp;
+ int32_t i;
+
+ logp = dbenv->lg_handle;
+ MUTEX_THREAD_LOCK(dbenv, logp->mutexp);
+ for (i = 0; i < logp->dbentry_cnt; i++) {
+ dbe = &logp->dbentry[i];
+ while ((dbp = TAILQ_FIRST(&dbe->dblist)) != NULL) {
+ (void)log_unregister(dbenv, dbp);
+ TAILQ_REMOVE(&dbe->dblist, dbp, links);
+ (void)dbp->close(dbp, dbp->mpf == NULL ? DB_NOSYNC : 0);
+ }
+ dbe->deleted = 0;
+ dbe->refcount = 0;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, logp->mutexp);
+}
+
+/*
+ * __log_rem_logid
+ * Remove an entry from the log table. Find the appropriate DB and
+ * unlink it from the linked list off the table. If the DB is NULL, treat
+ * this as a simple refcount decrement.
+ *
+ * PUBLIC: void __log_rem_logid __P((DB_LOG *, DB *, int32_t));
+ */
+void
+__log_rem_logid(logp, dbp, ndx)
+ DB_LOG *logp;
+ DB *dbp;
+ int32_t ndx;
+{
+ DB *xdbp;
+
+ MUTEX_THREAD_LOCK(logp->dbenv, logp->mutexp);
+ if (--logp->dbentry[ndx].refcount == 0) {
+ TAILQ_INIT(&logp->dbentry[ndx].dblist);
+ logp->dbentry[ndx].deleted = 0;
+ } else if (dbp != NULL)
+ for (xdbp = TAILQ_FIRST(&logp->dbentry[ndx].dblist);
+ xdbp != NULL;
+ xdbp = TAILQ_NEXT(xdbp, links))
+ if (xdbp == dbp) {
+ TAILQ_REMOVE(&logp->dbentry[ndx].dblist,
+ xdbp, links);
+ break;
+ }
+
+ MUTEX_THREAD_UNLOCK(logp->dbenv, logp->mutexp);
+}
+
+/*
+ * __log_lid_to_fname --
+ * Traverse the shared-memory region looking for the entry that
+ * matches the passed log fileid. Returns 0 on success; -1 on error.
+ * PUBLIC: int __log_lid_to_fname __P((DB_LOG *, int32_t, FNAME **));
+ */
+int
+__log_lid_to_fname(dblp, lid, fnamep)
+ DB_LOG *dblp;
+ int32_t lid;
+ FNAME **fnamep;
+{
+ FNAME *fnp;
+ LOG *lp;
+
+ lp = dblp->reginfo.primary;
+
+ for (fnp = SH_TAILQ_FIRST(&lp->fq, __fname);
+ fnp != NULL; fnp = SH_TAILQ_NEXT(fnp, q, __fname)) {
+ if (fnp->ref == 0) /* Entry not in use. */
+ continue;
+ if (fnp->id == lid) {
+ *fnamep = fnp;
+ return (0);
+ }
+ }
+ return (-1);
+}
diff --git a/bdb/log/log_register.c b/bdb/log/log_register.c
new file mode 100644
index 00000000000..1e0e523d8b9
--- /dev/null
+++ b/bdb/log/log_register.c
@@ -0,0 +1,433 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: log_register.c,v 11.35 2001/01/10 16:04:19 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#ifdef HAVE_RPC
+#include "db_server.h"
+#endif
+
+#include "db_int.h"
+#include "log.h"
+
+#ifdef HAVE_RPC
+#include "gen_client_ext.h"
+#include "rpc_client_ext.h"
+#endif
+
+/*
+ * log_register --
+ * Register a file name.
+ */
+int
+log_register(dbenv, dbp, name)
+ DB_ENV *dbenv;
+ DB *dbp;
+ const char *name;
+{
+ DBT fid_dbt, r_name;
+ DB_LOG *dblp;
+ DB_LSN r_unused;
+ FNAME *found_fnp, *fnp, *recover_fnp, *reuse_fnp;
+ LOG *lp;
+ size_t len;
+ int32_t maxid;
+ int inserted, ok, ret;
+ void *namep;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
+ return (__dbcl_log_register(dbenv, dbp, name));
+#endif
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->lg_handle, DB_INIT_LOG);
+
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+ fnp = reuse_fnp = NULL;
+ inserted = ret = 0;
+ namep = NULL;
+
+ /* Check the arguments. */
+ if (dbp->type != DB_BTREE && dbp->type != DB_QUEUE &&
+ dbp->type != DB_HASH && dbp->type != DB_RECNO) {
+ __db_err(dbenv, "log_register: unknown DB file type");
+ return (EINVAL);
+ }
+
+ R_LOCK(dbenv, &dblp->reginfo);
+
+ /*
+ * See if we've already got this file in the log, finding the
+ * (maximum+1) in-use file id and some available file id (if we
+ * find an available fid, we'll use it, else we'll have to allocate
+ * one after the maximum that we found).
+ */
+ ok = 0;
+ found_fnp = recover_fnp = NULL;
+ for (maxid = 0, fnp = SH_TAILQ_FIRST(&lp->fq, __fname);
+ fnp != NULL; fnp = SH_TAILQ_NEXT(fnp, q, __fname)) {
+ if (F_ISSET(dblp, DBLOG_RECOVER) && fnp->id == dbp->log_fileid)
+ recover_fnp = fnp;
+ if (fnp->ref == 0) { /* Entry is not in use. */
+ if (reuse_fnp == NULL)
+ reuse_fnp = fnp;
+ continue;
+ }
+ if (memcmp(dbp->fileid, fnp->ufid, DB_FILE_ID_LEN) == 0) {
+ if (fnp->meta_pgno == 0) {
+ if (fnp->locked == 1) {
+ __db_err(dbenv, "File is locked");
+ return (EINVAL);
+ }
+ if (found_fnp != NULL) {
+ fnp = found_fnp;
+ goto found;
+ }
+ ok = 1;
+ }
+ if (dbp->meta_pgno == fnp->meta_pgno) {
+ if (F_ISSET(dblp, DBLOG_RECOVER)) {
+ if (fnp->id != dbp->log_fileid) {
+ /*
+ * If we are in recovery, there
+ * is only one dbp on the list.
+ * If the refcount goes to 0,
+ * we will clear the list. If
+ * it doesn't, we want to leave
+ * the dbp where it is, so
+ * passing a NULL to rem_logid
+ * is correct.
+ */
+ __log_rem_logid(dblp,
+ NULL, fnp->id);
+ if (recover_fnp != NULL)
+ break;
+ continue;
+ }
+ fnp->ref = 1;
+ goto found;
+ }
+ ++fnp->ref;
+ if (ok)
+ goto found;
+ found_fnp = fnp;
+ }
+ }
+ if (maxid <= fnp->id)
+ maxid = fnp->id + 1;
+ }
+ if ((fnp = found_fnp) != NULL)
+ goto found;
+
+ /* Fill in fnp structure. */
+ if (recover_fnp != NULL) /* This has the right number */
+ fnp = recover_fnp;
+ else if (reuse_fnp != NULL) /* Reuse existing one. */
+ fnp = reuse_fnp;
+ else { /* Allocate a new one. */
+ if ((ret = __db_shalloc(dblp->reginfo.addr,
+ sizeof(FNAME), 0, &fnp)) != 0)
+ goto mem_err;
+ fnp->id = maxid;
+ }
+
+ if (F_ISSET(dblp, DBLOG_RECOVER))
+ fnp->id = dbp->log_fileid;
+
+ fnp->ref = 1;
+ fnp->locked = 0;
+ fnp->s_type = dbp->type;
+ memcpy(fnp->ufid, dbp->fileid, DB_FILE_ID_LEN);
+ fnp->meta_pgno = dbp->meta_pgno;
+
+ if (name != NULL) {
+ len = strlen(name) + 1;
+ if ((ret =
+ __db_shalloc(dblp->reginfo.addr, len, 0, &namep)) != 0) {
+mem_err: __db_err(dbenv,
+ "Unable to allocate memory to register %s", name);
+ goto err;
+ }
+ fnp->name_off = R_OFFSET(&dblp->reginfo, namep);
+ memcpy(namep, name, len);
+ } else
+ fnp->name_off = INVALID_ROFF;
+
+ /* Only do the insert if we allocated a new fnp. */
+ if (reuse_fnp == NULL && recover_fnp == NULL)
+ SH_TAILQ_INSERT_HEAD(&lp->fq, fnp, q, __fname);
+ inserted = 1;
+
+ /* Log the registry. */
+ if (!F_ISSET(dblp, DBLOG_RECOVER)) {
+ /*
+ * We allow logging on in-memory databases, so the name here
+ * could be NULL.
+ */
+ if (name != NULL) {
+ r_name.data = (void *)name;
+ r_name.size = strlen(name) + 1;
+ }
+ memset(&fid_dbt, 0, sizeof(fid_dbt));
+ fid_dbt.data = dbp->fileid;
+ fid_dbt.size = DB_FILE_ID_LEN;
+ if ((ret = __log_register_log(dbenv, NULL, &r_unused,
+ 0, LOG_OPEN, name == NULL ? NULL : &r_name,
+ &fid_dbt, fnp->id, dbp->type, dbp->meta_pgno)) != 0)
+ goto err;
+ }
+
+found: /*
+ * If we found the entry in the shared area, then the file is
+ * already open, so there is no need to log the open. We only
+ * log the open and closes on the first open and last close.
+ */
+ if (!F_ISSET(dblp, DBLOG_RECOVER) &&
+ (ret = __log_add_logid(dbenv, dblp, dbp, fnp->id)) != 0)
+ goto err;
+
+ if (!F_ISSET(dblp, DBLOG_RECOVER))
+ dbp->log_fileid = fnp->id;
+
+ if (0) {
+err: if (inserted)
+ SH_TAILQ_REMOVE(&lp->fq, fnp, q, __fname);
+ if (namep != NULL)
+ __db_shalloc_free(dblp->reginfo.addr, namep);
+ if (fnp != NULL)
+ __db_shalloc_free(dblp->reginfo.addr, fnp);
+ }
+
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ return (ret);
+}
+
+/*
+ * log_unregister --
+ * Discard a registered file name.
+ */
+int
+log_unregister(dbenv, dbp)
+ DB_ENV *dbenv;
+ DB *dbp;
+{
+ int ret;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
+ return (__dbcl_log_unregister(dbenv, dbp));
+#endif
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->lg_handle, DB_INIT_LOG);
+
+ ret = __log_filelist_update(dbenv, dbp, dbp->log_fileid, NULL, NULL);
+ dbp->log_fileid = DB_LOGFILEID_INVALID;
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __log_filelist_update
+ * PUBLIC: __P((DB_ENV *, DB *, int32_t, const char *, int *));
+ *
+ * Utility player for updating and logging the file list. Called
+ * for 3 reasons:
+ * 1) mark file closed: newname == NULL.
+ * 2) change filename: newname != NULL.
+ * 3) from recovery to verify & change filename if necessary, set != NULL.
+ */
+int
+__log_filelist_update(dbenv, dbp, fid, newname, set)
+ DB_ENV *dbenv;
+ DB *dbp;
+ int32_t fid;
+ const char *newname;
+ int *set;
+{
+ DBT fid_dbt, r_name;
+ DB_LOG *dblp;
+ DB_LSN r_unused;
+ FNAME *fnp;
+ LOG *lp;
+ u_int32_t len, newlen;
+ int ret;
+ void *namep;
+
+ ret = 0;
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+
+ R_LOCK(dbenv, &dblp->reginfo);
+
+ /* Find the entry in the log. */
+ for (fnp = SH_TAILQ_FIRST(&lp->fq, __fname);
+ fnp != NULL; fnp = SH_TAILQ_NEXT(fnp, q, __fname))
+ if (fid == fnp->id)
+ break;
+ if (fnp == NULL) {
+ __db_err(dbenv, "log_unregister: non-existent file id");
+ ret = EINVAL;
+ goto ret1;
+ }
+
+ /*
+ * Log the unregistry only if this is the last one and we are
+ * really closing the file or if this is an abort of a created
+ * file and we need to make sure there is a record in the log.
+ */
+ namep = NULL;
+ len = 0;
+ if (fnp->name_off != INVALID_ROFF) {
+ namep = R_ADDR(&dblp->reginfo, fnp->name_off);
+ len = strlen(namep) + 1;
+ }
+ if (!F_ISSET(dblp, DBLOG_RECOVER) && fnp->ref == 1) {
+ if (namep != NULL) {
+ memset(&r_name, 0, sizeof(r_name));
+ r_name.data = namep;
+ r_name.size = len;
+ }
+ memset(&fid_dbt, 0, sizeof(fid_dbt));
+ fid_dbt.data = fnp->ufid;
+ fid_dbt.size = DB_FILE_ID_LEN;
+ if ((ret = __log_register_log(dbenv, NULL, &r_unused,
+ 0, LOG_CLOSE,
+ fnp->name_off == INVALID_ROFF ? NULL : &r_name,
+ &fid_dbt, fid, fnp->s_type, fnp->meta_pgno))
+ != 0)
+ goto ret1;
+ }
+
+ /*
+ * If we are changing the name we must log this fact.
+ */
+ if (newname != NULL) {
+ DB_ASSERT(fnp->ref == 1);
+ newlen = strlen(newname) + 1;
+ if (!F_ISSET(dblp, DBLOG_RECOVER)) {
+ r_name.data = (void *) newname;
+ r_name.size = newlen;
+ if ((ret = __log_register_log(dbenv,
+ NULL, &r_unused, 0, LOG_OPEN, &r_name, &fid_dbt,
+ fnp->id, fnp->s_type, fnp->meta_pgno)) != 0)
+ goto ret1;
+ }
+
+ /*
+ * Check to see if the name is already correct.
+ */
+ if (set != NULL) {
+ if (len != newlen || memcmp(namep, newname, len) != 0)
+ *set = 1;
+ else {
+ *set = 0;
+ goto ret1;
+ }
+ }
+
+ /*
+ * Change the name, realloc memory if necessary
+ */
+ if (len < newlen) {
+ __db_shalloc_free(dblp->reginfo.addr,
+ R_ADDR(&dblp->reginfo, fnp->name_off));
+ if ((ret = __db_shalloc(
+ dblp->reginfo.addr, newlen, 0, &namep)) != 0) {
+ __db_err(dbenv,
+ "Unable to allocate memory to register %s",
+ newname);
+ goto ret1;
+ }
+ fnp->name_off = R_OFFSET(&dblp->reginfo, namep);
+ } else
+ namep = R_ADDR(&dblp->reginfo, fnp->name_off);
+ memcpy(namep, newname, newlen);
+ } else {
+
+ /*
+ * If more than 1 reference, just decrement the reference
+ * and return. Otherwise, free the name if one exists.
+ */
+ DB_ASSERT(fnp->ref >= 1);
+ --fnp->ref;
+ if (fnp->ref == 0) {
+ if (fnp->name_off != INVALID_ROFF)
+ __db_shalloc_free(dblp->reginfo.addr,
+ R_ADDR(&dblp->reginfo, fnp->name_off));
+ fnp->name_off = INVALID_ROFF;
+ }
+
+ /*
+ * Remove from the process local table. If this
+ * operation is taking place during recovery, then
+ * the logid was never added to the table, so do not remove it.
+ */
+ if (!F_ISSET(dblp, DBLOG_RECOVER))
+ __log_rem_logid(dblp, dbp, fid);
+ }
+
+ret1: R_UNLOCK(dbenv, &dblp->reginfo);
+ return (ret);
+}
+
+/*
+ * __log_file_lock -- lock a file for single access
+ * This only works if logging is on.
+ *
+ * PUBLIC: int __log_file_lock __P((DB *));
+ */
+int
+__log_file_lock(dbp)
+ DB *dbp;
+{
+ DB_ENV *dbenv;
+ DB_LOG *dblp;
+ FNAME *fnp;
+ LOG *lp;
+ int ret;
+
+ dbenv = dbp->dbenv;
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+
+ ret = 0;
+ R_LOCK(dbenv, &dblp->reginfo);
+
+ for (fnp = SH_TAILQ_FIRST(&lp->fq, __fname);
+ fnp != NULL; fnp = SH_TAILQ_NEXT(fnp, q, __fname)) {
+ if (fnp->ref == 0)
+ continue;
+
+ if (!memcmp(dbp->fileid, fnp->ufid, DB_FILE_ID_LEN)) {
+ if (fnp->meta_pgno == 0) {
+ if (fnp->ref != 1)
+ goto err;
+
+ fnp->locked = 1;
+ } else {
+err: __db_err(dbp->dbenv, "File is open");
+ ret = EINVAL;
+ goto done;
+ }
+
+ }
+ }
+done: R_UNLOCK(dbenv, &dblp->reginfo);
+ return (ret);
+}
diff --git a/bdb/mp/Design b/bdb/mp/Design
new file mode 100644
index 00000000000..1b26aae6cba
--- /dev/null
+++ b/bdb/mp/Design
@@ -0,0 +1,52 @@
+$Id: Design,v 11.2 1999/11/21 23:08:27 bostic Exp $
+
+There are three ways we do locking in the mpool code:
+
+Locking a handle mutex to provide concurrency for DB_THREAD operations.
+Locking the region mutex to provide mutual exclusion while reading and
+ writing structures in the shared region.
+Locking buffer header mutexes during I/O.
+
+The first will not be further described here. We use the shared mpool
+region lock to provide mutual exclusion while reading/modifying all of
+the data structures, including the buffer headers. We use a per-buffer
+header lock to wait on buffer I/O. The order of locking is as follows:
+
+Searching for a buffer:
+ Acquire the region lock.
+ Find the buffer header.
+ Increment the reference count (guarantee the buffer stays).
+ While the BH_LOCKED flag is set (I/O is going on) {
+ Release the region lock.
+ Explicitly yield the processor if it's not the first pass
+ through this loop, otherwise, we can simply spin because
+ we'll be simply switching between the two locks.
+ Request the buffer lock.
+ The I/O will complete...
+ Acquire the buffer lock.
+ Release the buffer lock.
+ Acquire the region lock.
+ }
+ Return the buffer.
+
+Reading/writing a buffer:
+ Acquire the region lock.
+ Find/create the buffer header.
+ If reading, increment the reference count (guarantee the buffer stays).
+ Set the BH_LOCKED flag.
+ Acquire the buffer lock (guaranteed not to block).
+ Release the region lock.
+ Do the I/O and/or initialize the buffer contents.
+ Release the buffer lock.
+ At this point, the buffer lock is available, but the logical
+ operation (flagged by BH_LOCKED) is not yet completed. For
+ this reason, among others, threads checking the BH_LOCKED flag
+ must loop around their test.
+ Acquire the region lock.
+ Clear the BH_LOCKED flag.
+ Release the region lock.
+ Return/discard the buffer.
+
+Pointers to DB_MPOOL, MPOOL, DB_MPOOLFILE and MPOOLFILE structures are
+not reacquired when a region lock is reacquired because they couldn't
+have been closed/discarded and because they never move in memory.
diff --git a/bdb/mp/mp_alloc.c b/bdb/mp/mp_alloc.c
new file mode 100644
index 00000000000..731f569f57f
--- /dev/null
+++ b/bdb/mp/mp_alloc.c
@@ -0,0 +1,152 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: mp_alloc.c,v 11.7 2000/04/20 21:14:18 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "db_shash.h"
+#include "mp.h"
+
+/*
+ * __memp_alloc --
+ * Allocate some space from a cache region.
+ *
+ * PUBLIC: int __memp_alloc __P((DB_MPOOL *,
+ * PUBLIC: REGINFO *, MPOOLFILE *, size_t, roff_t *, void *));
+ */
+int
+__memp_alloc(dbmp, memreg, mfp, len, offsetp, retp)
+ DB_MPOOL *dbmp;
+ REGINFO *memreg;
+ MPOOLFILE *mfp;
+ size_t len;
+ roff_t *offsetp;
+ void *retp;
+{
+ BH *bhp, *nbhp;
+ MPOOL *c_mp;
+ MPOOLFILE *bh_mfp;
+ size_t total;
+ int nomore, restart, ret, wrote;
+ void *p;
+
+ c_mp = memreg->primary;
+
+ /*
+ * If we're allocating a buffer, and the one we're discarding is the
+ * same size, we don't want to waste the time to re-integrate it into
+ * the shared memory free list. If the DB_MPOOLFILE argument isn't
+ * NULL, we'll compare the underlying page sizes of the two buffers
+ * before free-ing and re-allocating buffers.
+ */
+ if (mfp != NULL)
+ len = (sizeof(BH) - sizeof(u_int8_t)) + mfp->stat.st_pagesize;
+
+ nomore = 0;
+alloc: if ((ret = __db_shalloc(memreg->addr, len, MUTEX_ALIGN, &p)) == 0) {
+ if (offsetp != NULL)
+ *offsetp = R_OFFSET(memreg, p);
+ *(void **)retp = p;
+ return (0);
+ }
+ if (nomore) {
+ __db_err(dbmp->dbenv,
+ "Unable to allocate %lu bytes from mpool shared region: %s\n",
+ (u_long)len, db_strerror(ret));
+ return (ret);
+ }
+
+retry: /* Find a buffer we can flush; pure LRU. */
+ restart = total = 0;
+ for (bhp =
+ SH_TAILQ_FIRST(&c_mp->bhq, __bh); bhp != NULL; bhp = nbhp) {
+ nbhp = SH_TAILQ_NEXT(bhp, q, __bh);
+
+ /* Ignore pinned or locked (I/O in progress) buffers. */
+ if (bhp->ref != 0 || F_ISSET(bhp, BH_LOCKED))
+ continue;
+
+ /* Find the associated MPOOLFILE. */
+ bh_mfp = R_ADDR(dbmp->reginfo, bhp->mf_offset);
+
+ /* Write the page if it's dirty. */
+ if (F_ISSET(bhp, BH_DIRTY)) {
+ ++bhp->ref;
+ if ((ret = __memp_bhwrite(dbmp,
+ bh_mfp, bhp, &restart, &wrote)) != 0)
+ return (ret);
+ --bhp->ref;
+
+ /*
+ * Another process may have acquired this buffer and
+ * incremented the ref count after we wrote it.
+ */
+ if (bhp->ref != 0)
+ goto retry;
+
+ /*
+ * If we wrote the page, continue and free the buffer.
+ * We don't have to rewalk the list to acquire the
+ * buffer because it was never available for any other
+ * process to modify it.
+ *
+ * If we didn't write the page, but we discarded and
+ * reacquired the region lock, restart the list walk.
+ *
+ * If we neither wrote the buffer nor discarded the
+ * region lock, continue down the buffer list.
+ */
+ if (wrote)
+ ++c_mp->stat.st_rw_evict;
+ else {
+ if (restart)
+ goto retry;
+ continue;
+ }
+ } else
+ ++c_mp->stat.st_ro_evict;
+
+ /*
+ * Check to see if the buffer is the size we're looking for.
+ * If it is, simply reuse it.
+ */
+ if (mfp != NULL &&
+ mfp->stat.st_pagesize == bh_mfp->stat.st_pagesize) {
+ __memp_bhfree(dbmp, bhp, 0);
+
+ if (offsetp != NULL)
+ *offsetp = R_OFFSET(memreg, bhp);
+ *(void **)retp = bhp;
+ return (0);
+ }
+
+ /* Note how much space we've freed, and free the buffer. */
+ total += __db_shsizeof(bhp);
+ __memp_bhfree(dbmp, bhp, 1);
+
+ /*
+ * Retry as soon as we've freed up sufficient space. If we
+ * have to coalesce of memory to satisfy the request, don't
+ * try until it's likely (possible?) that we'll succeed.
+ */
+ if (total >= 3 * len)
+ goto alloc;
+
+ /* Restart the walk if we discarded the region lock. */
+ if (restart)
+ goto retry;
+ }
+ nomore = 1;
+ goto alloc;
+}
diff --git a/bdb/mp/mp_bh.c b/bdb/mp/mp_bh.c
new file mode 100644
index 00000000000..e802b165b2d
--- /dev/null
+++ b/bdb/mp/mp_bh.c
@@ -0,0 +1,662 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: mp_bh.c,v 11.25 2001/01/10 04:50:53 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "db_shash.h"
+#include "mp.h"
+#include "log.h"
+#include "db_page.h"
+
+static int __memp_upgrade __P((DB_MPOOL *, DB_MPOOLFILE *, MPOOLFILE *));
+
+/*
+ * __memp_bhwrite --
+ * Write the page associated with a given bucket header.
+ *
+ * PUBLIC: int __memp_bhwrite
+ * PUBLIC: __P((DB_MPOOL *, MPOOLFILE *, BH *, int *, int *));
+ */
+int
+__memp_bhwrite(dbmp, mfp, bhp, restartp, wrotep)
+ DB_MPOOL *dbmp;
+ MPOOLFILE *mfp;
+ BH *bhp;
+ int *restartp, *wrotep;
+{
+ DB_MPOOLFILE *dbmfp;
+ DB_MPREG *mpreg;
+ int incremented, ret;
+
+ if (restartp != NULL)
+ *restartp = 0;
+ if (wrotep != NULL)
+ *wrotep = 0;
+ incremented = 0;
+
+ /*
+ * If the file has been removed or is a closed temporary file, Jump
+ * right ahead and pretend that we've found the file we want-- the
+ * page-write function knows how to handle the fact that we don't have
+ * (or need!) any real file descriptor information.
+ */
+ if (F_ISSET(mfp, MP_DEADFILE)) {
+ dbmfp = NULL;
+ goto found;
+ }
+
+ /*
+ * Walk the process' DB_MPOOLFILE list and find a file descriptor for
+ * the file. We also check that the descriptor is open for writing.
+ * If we find a descriptor on the file that's not open for writing, we
+ * try and upgrade it to make it writeable. If that fails, we're done.
+ */
+ MUTEX_THREAD_LOCK(dbmp->dbenv, dbmp->mutexp);
+ for (dbmfp = TAILQ_FIRST(&dbmp->dbmfq);
+ dbmfp != NULL; dbmfp = TAILQ_NEXT(dbmfp, q))
+ if (dbmfp->mfp == mfp) {
+ if (F_ISSET(dbmfp, MP_READONLY) &&
+ __memp_upgrade(dbmp, dbmfp, mfp)) {
+ MUTEX_THREAD_UNLOCK(dbmp->dbenv, dbmp->mutexp);
+ return (0);
+ }
+
+ /*
+ * Increment the reference count -- see the comment in
+ * memp_fclose().
+ */
+ ++dbmfp->ref;
+ incremented = 1;
+ break;
+ }
+ MUTEX_THREAD_UNLOCK(dbmp->dbenv, dbmp->mutexp);
+ if (dbmfp != NULL)
+ goto found;
+
+ /*
+ * !!!
+ * Don't try to attach to temporary files. There are two problems in
+ * trying to do that. First, if we have different privileges than the
+ * process that "owns" the temporary file, we might create the backing
+ * disk file such that the owning process couldn't read/write its own
+ * buffers, e.g., memp_trickle() running as root creating a file owned
+ * as root, mode 600. Second, if the temporary file has already been
+ * created, we don't have any way of finding out what its real name is,
+ * and, even if we did, it was already unlinked (so that it won't be
+ * left if the process dies horribly). This decision causes a problem,
+ * however: if the temporary file consumes the entire buffer cache,
+ * and the owner doesn't flush the buffers to disk, we could end up
+ * with resource starvation, and the memp_trickle() thread couldn't do
+ * anything about it. That's a pretty unlikely scenario, though.
+ *
+ * Note that we should never get here when the temporary file
+ * in question has already been closed in another process, in which
+ * case it should be marked MP_DEADFILE.
+ */
+ if (F_ISSET(mfp, MP_TEMP)) {
+ DB_ASSERT(!F_ISSET(mfp, MP_DEADFILE));
+ return (0);
+ }
+
+ /*
+ * It's not a page from a file we've opened. If the file requires
+ * input/output processing, see if this process has ever registered
+ * information as to how to write this type of file. If not, there's
+ * nothing we can do.
+ */
+ if (mfp->ftype != 0) {
+ MUTEX_THREAD_LOCK(dbmp->dbenv, dbmp->mutexp);
+ for (mpreg = LIST_FIRST(&dbmp->dbregq);
+ mpreg != NULL; mpreg = LIST_NEXT(mpreg, q))
+ if (mpreg->ftype == mfp->ftype)
+ break;
+ MUTEX_THREAD_UNLOCK(dbmp->dbenv, dbmp->mutexp);
+ if (mpreg == NULL)
+ return (0);
+ }
+
+ /*
+ * Try and open the file, attaching to the underlying shared area.
+ * Ignore any error, assume it's a permissions problem.
+ *
+ * XXX
+ * There's no negative cache, so we may repeatedly try and open files
+ * that we have previously tried (and failed) to open.
+ */
+ if (__memp_fopen(dbmp, mfp, R_ADDR(dbmp->reginfo, mfp->path_off),
+ 0, 0, mfp->stat.st_pagesize, 0, NULL, &dbmfp) != 0)
+ return (0);
+
+found: ret = __memp_pgwrite(dbmp, dbmfp, bhp, restartp, wrotep);
+
+ if (incremented) {
+ MUTEX_THREAD_LOCK(dbmp->dbenv, dbmp->mutexp);
+ --dbmfp->ref;
+ MUTEX_THREAD_UNLOCK(dbmp->dbenv, dbmp->mutexp);
+ }
+
+ return (ret);
+}
+
+/*
+ * __memp_pgread --
+ * Read a page from a file.
+ *
+ * PUBLIC: int __memp_pgread __P((DB_MPOOLFILE *, BH *, int));
+ */
+int
+__memp_pgread(dbmfp, bhp, can_create)
+ DB_MPOOLFILE *dbmfp;
+ BH *bhp;
+ int can_create;
+{
+ DB_IO db_io;
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+ MPOOLFILE *mfp;
+ size_t len, pagesize;
+ size_t nr;
+ int created, ret;
+
+ dbmp = dbmfp->dbmp;
+ dbenv = dbmp->dbenv;
+ mfp = dbmfp->mfp;
+ pagesize = mfp->stat.st_pagesize;
+
+ F_SET(bhp, BH_LOCKED | BH_TRASH);
+ MUTEX_LOCK(dbenv, &bhp->mutex, dbenv->lockfhp);
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
+ /*
+ * Temporary files may not yet have been created. We don't create
+ * them now, we create them when the pages have to be flushed.
+ */
+ nr = 0;
+ if (F_ISSET(&dbmfp->fh, DB_FH_VALID)) {
+ /*
+ * Ignore read errors if we have permission to create the page.
+ * Assume that the page doesn't exist, and that we'll create it
+ * when we write it out.
+ *
+ * XXX
+ * Theoretically, we could overwrite a page of data if it were
+ * possible for a file to be successfully opened for reading
+ * and then for the read to fail. Shouldn't ever happen, but
+ * it might be worth checking to see if the offset is past the
+ * known end-of-file.
+ */
+ db_io.fhp = &dbmfp->fh;
+ db_io.mutexp = dbmfp->mutexp;
+ db_io.pagesize = db_io.bytes = pagesize;
+ db_io.pgno = bhp->pgno;
+ db_io.buf = bhp->buf;
+
+ ret = __os_io(dbenv, &db_io, DB_IO_READ, &nr);
+ } else
+ ret = 0;
+
+ created = 0;
+ if (nr < pagesize) {
+ if (can_create)
+ created = 1;
+ else {
+ /*
+ * If we had a short read, ret may be 0. This may not
+ * be an error -- in particular DB recovery processing
+ * may request pages that have never been written to
+ * disk, in which case we won't find the page. So, the
+ * caller must know how to handle the error.
+ */
+ if (ret == 0)
+ ret = EIO;
+ goto err;
+ }
+ }
+
+ /*
+ * Clear any bytes we didn't read that need to be cleared. If we're
+ * running in diagnostic mode, smash any bytes on the page that are
+ * unknown quantities for the caller.
+ */
+ if (nr != pagesize) {
+ len = mfp->clear_len == 0 ? pagesize : mfp->clear_len;
+ if (nr < len)
+ memset(bhp->buf + nr, 0, len - nr);
+#ifdef DIAGNOSTIC
+ if (nr > len)
+ len = nr;
+ if (len < pagesize)
+ memset(bhp->buf + len, CLEAR_BYTE, pagesize - len);
+#endif
+ }
+
+ /* Call any pgin function. */
+ ret = mfp->ftype == 0 ? 0 : __memp_pg(dbmfp, bhp, 1);
+
+ /* Unlock the buffer and reacquire the region lock. */
+err: MUTEX_UNLOCK(dbenv, &bhp->mutex);
+ R_LOCK(dbenv, dbmp->reginfo);
+
+ /*
+ * If no errors occurred, the data is now valid, clear the BH_TRASH
+ * flag; regardless, clear the lock bit and let other threads proceed.
+ */
+ F_CLR(bhp, BH_LOCKED);
+ if (ret == 0) {
+ F_CLR(bhp, BH_TRASH);
+
+ /* Update the statistics. */
+ if (created)
+ ++mfp->stat.st_page_create;
+ else
+ ++mfp->stat.st_page_in;
+ }
+
+ return (ret);
+}
+
+/*
+ * __memp_pgwrite --
+ * Write a page to a file.
+ *
+ * PUBLIC: int __memp_pgwrite
+ * PUBLIC: __P((DB_MPOOL *, DB_MPOOLFILE *, BH *, int *, int *));
+ */
+int
+__memp_pgwrite(dbmp, dbmfp, bhp, restartp, wrotep)
+ DB_MPOOL *dbmp;
+ DB_MPOOLFILE *dbmfp;
+ BH *bhp;
+ int *restartp, *wrotep;
+{
+ DB_ENV *dbenv;
+ DB_IO db_io;
+ DB_LSN lsn;
+ MPOOL *c_mp, *mp;
+ MPOOLFILE *mfp;
+ size_t nw;
+ int callpgin, dosync, ret, syncfail;
+ const char *fail;
+
+ dbenv = dbmp->dbenv;
+ mp = dbmp->reginfo[0].primary;
+ mfp = dbmfp == NULL ? NULL : dbmfp->mfp;
+
+ if (restartp != NULL)
+ *restartp = 0;
+ if (wrotep != NULL)
+ *wrotep = 0;
+ callpgin = 0;
+
+ /*
+ * Check the dirty bit -- this buffer may have been written since we
+ * decided to write it.
+ */
+ if (!F_ISSET(bhp, BH_DIRTY)) {
+ if (wrotep != NULL)
+ *wrotep = 1;
+ return (0);
+ }
+
+ MUTEX_LOCK(dbenv, &bhp->mutex, dbenv->lockfhp);
+
+ /*
+ * If there were two writers, we may have just been waiting while the
+ * other writer completed I/O on this buffer. Check the dirty bit one
+ * more time.
+ */
+ if (!F_ISSET(bhp, BH_DIRTY)) {
+ MUTEX_UNLOCK(dbenv, &bhp->mutex);
+
+ if (wrotep != NULL)
+ *wrotep = 1;
+ return (0);
+ }
+
+ F_SET(bhp, BH_LOCKED);
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
+ if (restartp != NULL)
+ *restartp = 1;
+
+ /*
+ * It's possible that the underlying file doesn't exist, either
+ * because of an outright removal or because it was a temporary
+ * file that's been closed.
+ *
+ * !!!
+ * Once we pass this point, we know that dbmfp and mfp aren't NULL,
+ * and that we have a valid file reference.
+ */
+ if (mfp == NULL || F_ISSET(mfp, MP_DEADFILE))
+ goto file_dead;
+
+ /*
+ * Ensure the appropriate log records are on disk. If the page is
+ * being written as part of a sync operation, the flush has already
+ * been done, unless it was written by the application *after* the
+ * sync was scheduled.
+ */
+ if (LOGGING_ON(dbenv) &&
+ (!F_ISSET(bhp, BH_SYNC) || F_ISSET(bhp, BH_SYNC_LOGFLSH))) {
+ memcpy(&lsn, bhp->buf + mfp->lsn_off, sizeof(DB_LSN));
+ if ((ret = log_flush(dbenv, &lsn)) != 0)
+ goto err;
+ }
+ DB_ASSERT(!LOGGING_ON(dbenv) ||
+ log_compare(&((LOG *)((DB_LOG *)
+ dbenv->lg_handle)->reginfo.primary)->s_lsn, &LSN(bhp->buf)) > 0);
+
+ /*
+ * Call any pgout function. We set the callpgin flag so that we flag
+ * that the contents of the buffer will need to be passed through pgin
+ * before they are reused.
+ */
+ if (mfp->ftype == 0)
+ ret = 0;
+ else {
+ callpgin = 1;
+ if ((ret = __memp_pg(dbmfp, bhp, 0)) != 0)
+ goto err;
+ }
+
+ /* Temporary files may not yet have been created. */
+ if (!F_ISSET(&dbmfp->fh, DB_FH_VALID)) {
+ MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+ if (!F_ISSET(&dbmfp->fh, DB_FH_VALID) &&
+ ((ret = __db_appname(dbenv, DB_APP_TMP, NULL, NULL,
+ DB_OSO_CREATE | DB_OSO_EXCL | DB_OSO_TEMP,
+ &dbmfp->fh, NULL)) != 0 ||
+ !F_ISSET(&dbmfp->fh, DB_FH_VALID))) {
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+ __db_err(dbenv,
+ "unable to create temporary backing file");
+ goto err;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+ }
+
+ /* Write the page. */
+ db_io.fhp = &dbmfp->fh;
+ db_io.mutexp = dbmfp->mutexp;
+ db_io.pagesize = db_io.bytes = mfp->stat.st_pagesize;
+ db_io.pgno = bhp->pgno;
+ db_io.buf = bhp->buf;
+ if ((ret = __os_io(dbenv, &db_io, DB_IO_WRITE, &nw)) != 0) {
+ ret = __db_panic(dbenv, ret);
+ fail = "write";
+ goto syserr;
+ }
+ if (nw != mfp->stat.st_pagesize) {
+ ret = EIO;
+ fail = "write";
+ goto syserr;
+ }
+
+file_dead:
+ /*
+ * !!!
+ * Once we pass this point, dbmfp and mfp may be NULL, we may not have
+ * a valid file reference.
+ *
+ * Unlock the buffer and reacquire the region lock.
+ */
+ MUTEX_UNLOCK(dbenv, &bhp->mutex);
+ R_LOCK(dbenv, dbmp->reginfo);
+
+ /*
+ * Clean up the flags based on a successful write.
+ *
+ * If we rewrote the page, it will need processing by the pgin
+ * routine before reuse.
+ */
+ if (callpgin)
+ F_SET(bhp, BH_CALLPGIN);
+ F_CLR(bhp, BH_DIRTY | BH_LOCKED);
+
+ /*
+ * If we write a buffer for which a checkpoint is waiting, update
+ * the count of pending buffers (both in the mpool as a whole and
+ * for this file). If the count for this file goes to zero, set a
+ * flag so we flush the writes.
+ */
+ dosync = 0;
+ if (F_ISSET(bhp, BH_SYNC)) {
+ F_CLR(bhp, BH_SYNC | BH_SYNC_LOGFLSH);
+
+ --mp->lsn_cnt;
+ if (mfp != NULL)
+ dosync = --mfp->lsn_cnt == 0 ? 1 : 0;
+ }
+
+ /* Update the page clean/dirty statistics. */
+ c_mp = BH_TO_CACHE(dbmp, bhp);
+ ++c_mp->stat.st_page_clean;
+ --c_mp->stat.st_page_dirty;
+
+ /* Update I/O statistics. */
+ if (mfp != NULL)
+ ++mfp->stat.st_page_out;
+
+ /*
+ * Do the sync after everything else has been updated, so any incoming
+ * checkpoint doesn't see inconsistent information.
+ *
+ * XXX:
+ * Don't lock the region around the sync, fsync(2) has no atomicity
+ * issues.
+ *
+ * XXX:
+ * We ignore errors from the sync -- it makes no sense to return an
+ * error to the calling process, so set a flag causing the checkpoint
+ * to be retried later. There is a possibility, of course, that a
+ * subsequent checkpoint was started and that we're going to force it
+ * to fail. That should be unlikely, and fixing it would be difficult.
+ */
+ if (dosync) {
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ syncfail = __os_fsync(dbenv, &dbmfp->fh) != 0;
+ R_LOCK(dbenv, dbmp->reginfo);
+ if (syncfail)
+ F_SET(mp, MP_LSN_RETRY);
+ }
+
+ if (wrotep != NULL)
+ *wrotep = 1;
+
+ return (0);
+
+syserr: __db_err(dbenv, "%s: %s failed for page %lu",
+ __memp_fn(dbmfp), fail, (u_long)bhp->pgno);
+
+err: /* Unlock the buffer and reacquire the region lock. */
+ MUTEX_UNLOCK(dbenv, &bhp->mutex);
+ R_LOCK(dbenv, dbmp->reginfo);
+
+ /*
+ * Clean up the flags based on a failure.
+ *
+ * The page remains dirty but we remove our lock. If we rewrote the
+ * page, it will need processing by the pgin routine before reuse.
+ */
+ if (callpgin)
+ F_SET(bhp, BH_CALLPGIN);
+ F_CLR(bhp, BH_LOCKED);
+
+ return (ret);
+}
+
+/*
+ * __memp_pg --
+ * Call the pgin/pgout routine.
+ *
+ * PUBLIC: int __memp_pg __P((DB_MPOOLFILE *, BH *, int));
+ */
+int
+__memp_pg(dbmfp, bhp, is_pgin)
+ DB_MPOOLFILE *dbmfp;
+ BH *bhp;
+ int is_pgin;
+{
+ DBT dbt, *dbtp;
+ DB_MPOOL *dbmp;
+ DB_MPREG *mpreg;
+ MPOOLFILE *mfp;
+ int ftype, ret;
+
+ dbmp = dbmfp->dbmp;
+ mfp = dbmfp->mfp;
+
+ MUTEX_THREAD_LOCK(dbmp->dbenv, dbmp->mutexp);
+
+ ftype = mfp->ftype;
+ for (mpreg = LIST_FIRST(&dbmp->dbregq);
+ mpreg != NULL; mpreg = LIST_NEXT(mpreg, q)) {
+ if (ftype != mpreg->ftype)
+ continue;
+ if (mfp->pgcookie_len == 0)
+ dbtp = NULL;
+ else {
+ dbt.size = mfp->pgcookie_len;
+ dbt.data = R_ADDR(dbmp->reginfo, mfp->pgcookie_off);
+ dbtp = &dbt;
+ }
+ MUTEX_THREAD_UNLOCK(dbmp->dbenv, dbmp->mutexp);
+
+ if (is_pgin) {
+ if (mpreg->pgin != NULL &&
+ (ret = mpreg->pgin(dbmp->dbenv,
+ bhp->pgno, bhp->buf, dbtp)) != 0)
+ goto err;
+ } else
+ if (mpreg->pgout != NULL &&
+ (ret = mpreg->pgout(dbmp->dbenv,
+ bhp->pgno, bhp->buf, dbtp)) != 0)
+ goto err;
+ break;
+ }
+
+ if (mpreg == NULL)
+ MUTEX_THREAD_UNLOCK(dbmp->dbenv, dbmp->mutexp);
+
+ return (0);
+
+err: MUTEX_THREAD_UNLOCK(dbmp->dbenv, dbmp->mutexp);
+ __db_err(dbmp->dbenv, "%s: %s failed for page %lu",
+ __memp_fn(dbmfp), is_pgin ? "pgin" : "pgout", (u_long)bhp->pgno);
+ return (ret);
+}
+
+/*
+ * __memp_bhfree --
+ * Free a bucket header and its referenced data.
+ *
+ * PUBLIC: void __memp_bhfree __P((DB_MPOOL *, BH *, int));
+ */
+void
+__memp_bhfree(dbmp, bhp, free_mem)
+ DB_MPOOL *dbmp;
+ BH *bhp;
+ int free_mem;
+{
+ DB_HASHTAB *dbht;
+ MPOOL *c_mp, *mp;
+ MPOOLFILE *mfp;
+ int n_bucket, n_cache;
+
+ mp = dbmp->reginfo[0].primary;
+ c_mp = BH_TO_CACHE(dbmp, bhp);
+ n_cache = NCACHE(mp, bhp->pgno);
+ n_bucket = NBUCKET(c_mp, bhp->mf_offset, bhp->pgno);
+ dbht = R_ADDR(&dbmp->reginfo[n_cache], c_mp->htab);
+
+ /* Delete the buffer header from the hash bucket queue. */
+ SH_TAILQ_REMOVE(&dbht[n_bucket], bhp, hq, __bh);
+
+ /* Delete the buffer header from the LRU queue. */
+ SH_TAILQ_REMOVE(&c_mp->bhq, bhp, q, __bh);
+
+ /* Clear the mutex this buffer recorded */
+ __db_shlocks_clear(&bhp->mutex, &dbmp->reginfo[n_cache],
+ (REGMAINT *)R_ADDR(&dbmp->reginfo[n_cache], mp->maint_off));
+ /*
+ * Find the underlying MPOOLFILE and decrement its reference count.
+ * If this is its last reference, remove it.
+ */
+ mfp = R_ADDR(dbmp->reginfo, bhp->mf_offset);
+ if (--mfp->block_cnt == 0 && mfp->mpf_cnt == 0)
+ __memp_mf_discard(dbmp, mfp);
+
+ /*
+ * If we're not reusing it immediately, free the buffer header
+ * and data for real.
+ */
+ if (free_mem) {
+ --c_mp->stat.st_page_clean;
+ __db_shalloc_free(dbmp->reginfo[n_cache].addr, bhp);
+ }
+}
+
+/*
+ * __memp_upgrade --
+ * Upgrade a file descriptor from readonly to readwrite.
+ */
+static int
+__memp_upgrade(dbmp, dbmfp, mfp)
+ DB_MPOOL *dbmp;
+ DB_MPOOLFILE *dbmfp;
+ MPOOLFILE *mfp;
+{
+ DB_FH fh;
+ int ret;
+ char *rpath;
+
+ /*
+ * !!!
+ * We expect the handle to already be locked.
+ */
+
+ /* Check to see if we've already upgraded. */
+ if (F_ISSET(dbmfp, MP_UPGRADE))
+ return (0);
+
+ /* Check to see if we've already failed. */
+ if (F_ISSET(dbmfp, MP_UPGRADE_FAIL))
+ return (1);
+
+ /*
+ * Calculate the real name for this file and try to open it read/write.
+ * We know we have a valid pathname for the file because it's the only
+ * way we could have gotten a file descriptor of any kind.
+ */
+ if ((ret = __db_appname(dbmp->dbenv, DB_APP_DATA,
+ NULL, R_ADDR(dbmp->reginfo, mfp->path_off), 0, NULL, &rpath)) != 0)
+ return (ret);
+ if (__os_open(dbmp->dbenv, rpath, 0, 0, &fh) != 0) {
+ F_SET(dbmfp, MP_UPGRADE_FAIL);
+ ret = 1;
+ } else {
+ /* Swap the descriptors and set the upgrade flag. */
+ (void)__os_closehandle(&dbmfp->fh);
+ dbmfp->fh = fh;
+ F_SET(dbmfp, MP_UPGRADE);
+ ret = 0;
+ }
+ __os_freestr(rpath);
+ return (ret);
+}
diff --git a/bdb/mp/mp_fget.c b/bdb/mp/mp_fget.c
new file mode 100644
index 00000000000..1bff5e136ab
--- /dev/null
+++ b/bdb/mp/mp_fget.c
@@ -0,0 +1,417 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: mp_fget.c,v 11.28 2001/01/10 04:50:53 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#ifdef HAVE_RPC
+#include "db_server.h"
+#endif
+
+#include "db_int.h"
+#include "db_shash.h"
+#include "mp.h"
+
+#ifdef HAVE_RPC
+#include "gen_client_ext.h"
+#include "rpc_client_ext.h"
+#endif
+
+/*
+ * memp_fget --
+ * Get a page from the file.
+ */
+int
+memp_fget(dbmfp, pgnoaddr, flags, addrp)
+ DB_MPOOLFILE *dbmfp;
+ db_pgno_t *pgnoaddr;
+ u_int32_t flags;
+ void *addrp;
+{
+ BH *bhp;
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+ DB_HASHTAB *dbht;
+ MPOOL *c_mp, *mp;
+ MPOOLFILE *mfp;
+ size_t n_bucket, n_cache, mf_offset;
+ u_int32_t st_hsearch;
+ int b_incr, first, ret;
+
+ dbmp = dbmfp->dbmp;
+ dbenv = dbmp->dbenv;
+ mp = dbmp->reginfo[0].primary;
+ mfp = dbmfp->mfp;
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
+ return (__dbcl_memp_fget(dbmfp, pgnoaddr, flags, addrp));
+#endif
+
+ PANIC_CHECK(dbenv);
+
+ /*
+ * Validate arguments.
+ *
+ * !!!
+ * Don't test for DB_MPOOL_CREATE and DB_MPOOL_NEW flags for readonly
+ * files here, and create non-existent pages in readonly files if the
+ * flags are set, later. The reason is that the hash access method
+ * wants to get empty pages that don't really exist in readonly files.
+ * The only alternative is for hash to write the last "bucket" all the
+ * time, which we don't want to do because one of our big goals in life
+ * is to keep database files small. It's sleazy as hell, but we catch
+ * any attempt to actually write the file in memp_fput().
+ */
+#define OKFLAGS \
+ (DB_MPOOL_CREATE | DB_MPOOL_LAST | \
+ DB_MPOOL_NEW | DB_MPOOL_NEW_GROUP | DB_MPOOL_EXTENT)
+ if (flags != 0) {
+ if ((ret = __db_fchk(dbenv, "memp_fget", flags, OKFLAGS)) != 0)
+ return (ret);
+
+ switch (flags & ~DB_MPOOL_EXTENT) {
+ case DB_MPOOL_CREATE:
+ case DB_MPOOL_LAST:
+ case DB_MPOOL_NEW:
+ case DB_MPOOL_NEW_GROUP:
+ case 0:
+ break;
+ default:
+ return (__db_ferr(dbenv, "memp_fget", 1));
+ }
+ }
+
+#ifdef DIAGNOSTIC
+ /*
+ * XXX
+ * We want to switch threads as often as possible. Yield every time
+ * we get a new page to ensure contention.
+ */
+ if (DB_GLOBAL(db_pageyield))
+ __os_yield(dbenv, 1);
+#endif
+
+ /* Initialize remaining local variables. */
+ mf_offset = R_OFFSET(dbmp->reginfo, mfp);
+ bhp = NULL;
+ st_hsearch = 0;
+ b_incr = ret = 0;
+
+ R_LOCK(dbenv, dbmp->reginfo);
+
+ /*
+ * Check for the new, last or last + 1 page requests.
+ *
+ * Examine and update the file's last_pgno value. We don't care if
+ * the last_pgno value immediately changes due to another thread --
+ * at this instant in time, the value is correct. We do increment the
+ * current last_pgno value if the thread is asking for a new page,
+ * however, to ensure that two threads creating pages don't get the
+ * same one.
+ *
+ * If we create a page, there is the potential that a page after it
+ * in the file will be written before it will be written. Recovery
+ * depends on pages that are "created" in the file by subsequent pages
+ * being written be zeroed out, not have random garbage. Ensure that
+ * the OS agrees.
+ *
+ * !!!
+ * DB_MPOOL_NEW_GROUP is undocumented -- the hash access method needs
+ * to allocate contiguous groups of pages in order to do subdatabases.
+ * We return the first page in the group, but the caller must put an
+ * LSN on the *last* page and write it, otherwise after a crash we may
+ * not create all of the pages we need to create.
+ */
+ if (LF_ISSET(DB_MPOOL_LAST | DB_MPOOL_NEW | DB_MPOOL_NEW_GROUP)) {
+ if (LF_ISSET(DB_MPOOL_NEW)) {
+ if (F_ISSET(&dbmfp->fh, DB_FH_VALID) && (ret =
+ __os_fpinit(dbenv, &dbmfp->fh, mfp->last_pgno + 1,
+ 1, mfp->stat.st_pagesize)) != 0) {
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ return (ret);
+ }
+ ++mfp->last_pgno;
+ }
+ if (LF_ISSET(DB_MPOOL_NEW_GROUP)) {
+ if (F_ISSET(&dbmfp->fh, DB_FH_VALID) && (ret =
+ __os_fpinit(dbenv, &dbmfp->fh, mfp->last_pgno + 1,
+ (int)*pgnoaddr, mfp->stat.st_pagesize)) != 0) {
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ return (ret);
+ }
+ mfp->last_pgno += *pgnoaddr;
+ }
+ *pgnoaddr = mfp->last_pgno;
+ }
+
+ /*
+ * Determine the hash bucket where this page will live, and get local
+ * pointers to the cache and its hash table.
+ */
+ n_cache = NCACHE(mp, *pgnoaddr);
+ c_mp = dbmp->reginfo[n_cache].primary;
+ n_bucket = NBUCKET(c_mp, mf_offset, *pgnoaddr);
+ dbht = R_ADDR(&dbmp->reginfo[n_cache], c_mp->htab);
+
+ if (LF_ISSET(DB_MPOOL_NEW | DB_MPOOL_NEW_GROUP))
+ goto alloc;
+
+ /*
+ * If mmap'ing the file and the page is not past the end of the file,
+ * just return a pointer.
+ *
+ * The page may be past the end of the file, so check the page number
+ * argument against the original length of the file. If we previously
+ * returned pages past the original end of the file, last_pgno will
+ * have been updated to match the "new" end of the file, and checking
+ * against it would return pointers past the end of the mmap'd region.
+ *
+ * If another process has opened the file for writing since we mmap'd
+ * it, we will start playing the game by their rules, i.e. everything
+ * goes through the cache. All pages previously returned will be safe,
+ * as long as the correct locking protocol was observed.
+ *
+ * XXX
+ * We don't discard the map because we don't know when all of the
+ * pages will have been discarded from the process' address space.
+ * It would be possible to do so by reference counting the open
+ * pages from the mmap, but it's unclear to me that it's worth it.
+ */
+ if (dbmfp->addr != NULL && F_ISSET(mfp, MP_CAN_MMAP)) {
+ if (*pgnoaddr > mfp->orig_last_pgno) {
+ /*
+ * !!!
+ * See the comment above about non-existent pages and
+ * the hash access method.
+ */
+ if (!LF_ISSET(DB_MPOOL_CREATE)) {
+ if (!LF_ISSET(DB_MPOOL_EXTENT))
+ __db_err(dbenv,
+ "%s: page %lu doesn't exist",
+ __memp_fn(dbmfp), (u_long)*pgnoaddr);
+ ret = EINVAL;
+ goto err;
+ }
+ } else {
+ *(void **)addrp =
+ R_ADDR(dbmfp, *pgnoaddr * mfp->stat.st_pagesize);
+ ++mfp->stat.st_map;
+ goto done;
+ }
+ }
+
+ /* Search the hash chain for the page. */
+ for (bhp = SH_TAILQ_FIRST(&dbht[n_bucket], __bh);
+ bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, hq, __bh)) {
+ ++st_hsearch;
+ if (bhp->pgno != *pgnoaddr || bhp->mf_offset != mf_offset)
+ continue;
+
+ /* Increment the reference count. */
+ if (bhp->ref == UINT16_T_MAX) {
+ __db_err(dbenv,
+ "%s: page %lu: reference count overflow",
+ __memp_fn(dbmfp), (u_long)bhp->pgno);
+ ret = EINVAL;
+ goto err;
+ }
+
+ /*
+ * Increment the reference count. We may discard the region
+ * lock as we evaluate and/or read the buffer, so we need to
+ * ensure that it doesn't move and that its contents remain
+ * unchanged.
+ */
+ ++bhp->ref;
+ b_incr = 1;
+
+ /*
+ * Any buffer we find might be trouble.
+ *
+ * BH_LOCKED --
+ * I/O is in progress. Because we've incremented the buffer
+ * reference count, we know the buffer can't move. Unlock
+ * the region lock, wait for the I/O to complete, and reacquire
+ * the region.
+ */
+ for (first = 1; F_ISSET(bhp, BH_LOCKED); first = 0) {
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
+ /*
+ * Explicitly yield the processor if it's not the first
+ * pass through this loop -- if we don't, we might end
+ * up running to the end of our CPU quantum as we will
+ * simply be swapping between the two locks.
+ */
+ if (!first)
+ __os_yield(dbenv, 1);
+
+ MUTEX_LOCK(dbenv, &bhp->mutex, dbenv->lockfhp);
+ /* Wait for I/O to finish... */
+ MUTEX_UNLOCK(dbenv, &bhp->mutex);
+ R_LOCK(dbenv, dbmp->reginfo);
+ }
+
+ /*
+ * BH_TRASH --
+ * The contents of the buffer are garbage. Shouldn't happen,
+ * and this read is likely to fail, but might as well try.
+ */
+ if (F_ISSET(bhp, BH_TRASH))
+ goto reread;
+
+ /*
+ * BH_CALLPGIN --
+ * The buffer was converted so it could be written, and the
+ * contents need to be converted again.
+ */
+ if (F_ISSET(bhp, BH_CALLPGIN)) {
+ if ((ret = __memp_pg(dbmfp, bhp, 1)) != 0)
+ goto err;
+ F_CLR(bhp, BH_CALLPGIN);
+ }
+
+ ++mfp->stat.st_cache_hit;
+ *(void **)addrp = bhp->buf;
+ goto done;
+ }
+
+alloc: /* Allocate new buffer header and data space. */
+ if ((ret = __memp_alloc(dbmp,
+ &dbmp->reginfo[n_cache], mfp, 0, NULL, &bhp)) != 0)
+ goto err;
+
+ ++c_mp->stat.st_page_clean;
+
+ /*
+ * Initialize the BH fields so that we can call the __memp_bhfree
+ * routine if an error occurs.
+ */
+ memset(bhp, 0, sizeof(BH));
+ bhp->ref = 1;
+ bhp->pgno = *pgnoaddr;
+ bhp->mf_offset = mf_offset;
+
+ /* Increment the count of buffers referenced by this MPOOLFILE. */
+ ++mfp->block_cnt;
+
+ /*
+ * Prepend the bucket header to the head of the appropriate MPOOL
+ * bucket hash list. Append the bucket header to the tail of the
+ * MPOOL LRU chain.
+ */
+ SH_TAILQ_INSERT_HEAD(&dbht[n_bucket], bhp, hq, __bh);
+ SH_TAILQ_INSERT_TAIL(&c_mp->bhq, bhp, q);
+
+#ifdef DIAGNOSTIC
+ if ((db_alignp_t)bhp->buf & (sizeof(size_t) - 1)) {
+ __db_err(dbenv, "Internal error: BH data NOT size_t aligned.");
+ ret = EINVAL;
+ __memp_bhfree(dbmp, bhp, 1);
+ goto err;
+ }
+#endif
+
+ if ((ret = __db_shmutex_init(dbenv, &bhp->mutex,
+ R_OFFSET(dbmp->reginfo, &bhp->mutex) + DB_FCNTL_OFF_MPOOL,
+ 0, &dbmp->reginfo[n_cache],
+ (REGMAINT *)R_ADDR(&dbmp->reginfo[n_cache], c_mp->maint_off)))
+ != 0) {
+ __memp_bhfree(dbmp, bhp, 1);
+ goto err;
+ }
+
+ /*
+ * If we created the page, zero it out and continue.
+ *
+ * !!!
+ * Note: DB_MPOOL_NEW specifically doesn't call the pgin function.
+ * If DB_MPOOL_CREATE is used, then the application's pgin function
+ * has to be able to handle pages of 0's -- if it uses DB_MPOOL_NEW,
+ * it can detect all of its page creates, and not bother.
+ *
+ * If we're running in diagnostic mode, smash any bytes on the
+ * page that are unknown quantities for the caller.
+ *
+ * Otherwise, read the page into memory, optionally creating it if
+ * DB_MPOOL_CREATE is set.
+ */
+ if (LF_ISSET(DB_MPOOL_NEW | DB_MPOOL_NEW_GROUP)) {
+ if (mfp->clear_len == 0)
+ memset(bhp->buf, 0, mfp->stat.st_pagesize);
+ else {
+ memset(bhp->buf, 0, mfp->clear_len);
+#ifdef DIAGNOSTIC
+ memset(bhp->buf + mfp->clear_len, CLEAR_BYTE,
+ mfp->stat.st_pagesize - mfp->clear_len);
+#endif
+ }
+
+ ++mfp->stat.st_page_create;
+ } else {
+ /*
+ * It's possible for the read function to fail, which means
+ * that we fail as well. Note, the __memp_pgread() function
+ * discards the region lock, so the buffer must be pinned
+ * down so that it cannot move and its contents are unchanged.
+ */
+reread: if ((ret = __memp_pgread(dbmfp,
+ bhp, LF_ISSET(DB_MPOOL_CREATE|DB_MPOOL_EXTENT))) != 0) {
+ /*
+ * !!!
+ * Discard the buffer unless another thread is waiting
+ * on our I/O to complete. Regardless, the header has
+ * the BH_TRASH flag set.
+ */
+ if (bhp->ref == 1)
+ __memp_bhfree(dbmp, bhp, 1);
+ goto err;
+ }
+
+ ++mfp->stat.st_cache_miss;
+ }
+
+ /*
+ * If we're returning a page after our current notion of the last-page,
+ * update our information. Note, there's no way to un-instantiate this
+ * page, it's going to exist whether it's returned to us dirty or not.
+ */
+ if (bhp->pgno > mfp->last_pgno)
+ mfp->last_pgno = bhp->pgno;
+
+ *(void **)addrp = bhp->buf;
+
+done: /* Update the chain search statistics. */
+ if (st_hsearch) {
+ ++c_mp->stat.st_hash_searches;
+ if (st_hsearch > c_mp->stat.st_hash_longest)
+ c_mp->stat.st_hash_longest = st_hsearch;
+ c_mp->stat.st_hash_examined += st_hsearch;
+ }
+
+ ++dbmfp->pinref;
+
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
+ return (0);
+
+err: /* Discard our reference. */
+ if (b_incr)
+ --bhp->ref;
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
+ *(void **)addrp = NULL;
+ return (ret);
+}
diff --git a/bdb/mp/mp_fopen.c b/bdb/mp/mp_fopen.c
new file mode 100644
index 00000000000..3611ded18f4
--- /dev/null
+++ b/bdb/mp/mp_fopen.c
@@ -0,0 +1,756 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: mp_fopen.c,v 11.41 2001/01/10 04:50:53 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#ifdef HAVE_RPC
+#include "db_server.h"
+#endif
+
+#include "db_int.h"
+#include "db_shash.h"
+#include "mp.h"
+
+#ifdef HAVE_RPC
+#include "gen_client_ext.h"
+#include "rpc_client_ext.h"
+#endif
+
+static int __memp_mf_open __P((DB_MPOOL *, const char *,
+ size_t, db_pgno_t, DB_MPOOL_FINFO *, u_int32_t, MPOOLFILE **));
+
+/*
+ * MEMP_FREMOVE --
+ * Discard an MPOOLFILE and any buffers it references: update the flags
+ * so we never try to write buffers associated with the file, nor can we
+ * find it when looking for files to join. In addition, clear the ftype
+ * field, there's no reason to post-process pages, they can be discarded
+ * by any thread.
+ */
+#define MEMP_FREMOVE(mfp) { \
+ mfp->ftype = 0; \
+ F_SET(mfp, MP_DEADFILE); \
+}
+
+/*
+ * memp_fopen --
+ * Open a backing file for the memory pool.
+ */
+int
+memp_fopen(dbenv, path, flags, mode, pagesize, finfop, retp)
+ DB_ENV *dbenv;
+ const char *path;
+ u_int32_t flags;
+ int mode;
+ size_t pagesize;
+ DB_MPOOL_FINFO *finfop;
+ DB_MPOOLFILE **retp;
+{
+ DB_MPOOL *dbmp;
+ int ret;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
+ return (__dbcl_memp_fopen(dbenv, path, flags,
+ mode, pagesize, finfop, retp));
+#endif
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->mp_handle, DB_INIT_MPOOL);
+
+ dbmp = dbenv->mp_handle;
+
+ /* Validate arguments. */
+ if ((ret = __db_fchk(dbenv, "memp_fopen", flags,
+ DB_CREATE |
+ DB_NOMMAP | DB_ODDFILESIZE | DB_RDONLY | DB_TRUNCATE)) != 0)
+ return (ret);
+
+ /* Require a non-zero pagesize. */
+ if (pagesize == 0 ||
+ (finfop != NULL && finfop->clear_len > pagesize)) {
+ __db_err(dbenv, "memp_fopen: illegal page size.");
+ return (EINVAL);
+ }
+
+ return (__memp_fopen(dbmp,
+ NULL, path, flags, mode, pagesize, 1, finfop, retp));
+}
+
+/*
+ * __memp_set_unlink -- set unlink on last close flag.
+ *
+ * PUBLIC: void __memp_set_unlink __P((DB_MPOOLFILE *));
+ */
+void
+__memp_set_unlink(dbmpf)
+ DB_MPOOLFILE *dbmpf;
+{
+ DB_MPOOL *dbmp;
+ dbmp = dbmpf->dbmp;
+
+ R_LOCK(dbmp->dbenv, dbmp->reginfo);
+ F_SET(dbmpf->mfp, MP_UNLINK);
+ R_UNLOCK(dbmp->dbenv, dbmp->reginfo);
+}
+
+/*
+ * __memp_clear_unlink -- clear unlink on last close flag.
+ *
+ * PUBLIC: void __memp_clear_unlink __P((DB_MPOOLFILE *));
+ */
+void
+__memp_clear_unlink(dbmpf)
+ DB_MPOOLFILE *dbmpf;
+{
+ DB_MPOOL *dbmp;
+ dbmp = dbmpf->dbmp;
+
+ /*
+ * This bit is protected in the queue code because the metapage
+ * is locked so we can avoid geting the region lock.
+ * If this gets used from other than the queue code, we cannot.
+ */
+ if (!F_ISSET(dbmpf->mfp, MP_UNLINK))
+ return;
+ R_LOCK(dbmp->dbenv, dbmp->reginfo);
+ F_CLR(dbmpf->mfp, MP_UNLINK);
+ R_UNLOCK(dbmp->dbenv, dbmp->reginfo);
+}
+
+/*
+ * __memp_fopen --
+ * Open a backing file for the memory pool; internal version.
+ *
+ * PUBLIC: int __memp_fopen __P((DB_MPOOL *, MPOOLFILE *, const char *,
+ * PUBLIC: u_int32_t, int, size_t, int, DB_MPOOL_FINFO *, DB_MPOOLFILE **));
+ */
+int
+__memp_fopen(dbmp, mfp, path, flags, mode, pagesize, needlock, finfop, retp)
+ DB_MPOOL *dbmp;
+ MPOOLFILE *mfp;
+ const char *path;
+ u_int32_t flags;
+ int mode, needlock;
+ size_t pagesize;
+ DB_MPOOL_FINFO *finfop;
+ DB_MPOOLFILE **retp;
+{
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *dbmfp;
+ DB_MPOOL_FINFO finfo;
+ db_pgno_t last_pgno;
+ size_t maxmap;
+ u_int32_t mbytes, bytes, oflags;
+ int ret;
+ u_int8_t idbuf[DB_FILE_ID_LEN];
+ char *rpath;
+
+ dbenv = dbmp->dbenv;
+ ret = 0;
+ rpath = NULL;
+
+ /*
+ * If mfp is provided, we take the DB_MPOOL_FINFO information from
+ * the mfp. We don't bother initializing everything, because some
+ * of them are expensive to acquire. If no mfp is provided and the
+ * finfop argument is NULL, we default the values.
+ */
+ if (finfop == NULL) {
+ memset(&finfo, 0, sizeof(finfo));
+ if (mfp != NULL) {
+ finfo.ftype = mfp->ftype;
+ finfo.pgcookie = NULL;
+ finfo.fileid = NULL;
+ finfo.lsn_offset = mfp->lsn_off;
+ finfo.clear_len = mfp->clear_len;
+ } else {
+ finfo.ftype = 0;
+ finfo.pgcookie = NULL;
+ finfo.fileid = NULL;
+ finfo.lsn_offset = -1;
+ finfo.clear_len = 0;
+ }
+ finfop = &finfo;
+ }
+
+ /* Allocate and initialize the per-process structure. */
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_MPOOLFILE), &dbmfp)) != 0)
+ return (ret);
+ dbmfp->dbmp = dbmp;
+ dbmfp->ref = 1;
+ if (LF_ISSET(DB_RDONLY))
+ F_SET(dbmfp, MP_READONLY);
+
+ if (path == NULL) {
+ if (LF_ISSET(DB_RDONLY)) {
+ __db_err(dbenv,
+ "memp_fopen: temporary files can't be readonly");
+ ret = EINVAL;
+ goto err;
+ }
+ last_pgno = 0;
+ } else {
+ /* Get the real name for this file and open it. */
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, NULL, path, 0, NULL, &rpath)) != 0)
+ goto err;
+ oflags = 0;
+ if (LF_ISSET(DB_CREATE))
+ oflags |= DB_OSO_CREATE;
+ if (LF_ISSET(DB_RDONLY))
+ oflags |= DB_OSO_RDONLY;
+ if ((ret =
+ __os_open(dbenv, rpath, oflags, mode, &dbmfp->fh)) != 0) {
+ if (!LF_ISSET(DB_EXTENT))
+ __db_err(dbenv,
+ "%s: %s", rpath, db_strerror(ret));
+ goto err;
+ }
+
+ /*
+ * Don't permit files that aren't a multiple of the pagesize,
+ * and find the number of the last page in the file, all the
+ * time being careful not to overflow 32 bits.
+ *
+ * !!!
+ * We can't use off_t's here, or in any code in the mainline
+ * library for that matter. (We have to use them in the os
+ * stubs, of course, as there are system calls that take them
+ * as arguments.) The reason is that some customers build in
+ * environments where an off_t is 32-bits, but still run where
+ * offsets are 64-bits, and they pay us a lot of money.
+ */
+ if ((ret = __os_ioinfo(dbenv, rpath,
+ &dbmfp->fh, &mbytes, &bytes, NULL)) != 0) {
+ __db_err(dbenv, "%s: %s", rpath, db_strerror(ret));
+ goto err;
+ }
+
+ /*
+ * If we're doing a verify, we might have to cope with
+ * a truncated file; if the file size is not a multiple
+ * of the page size, round down to a page--we'll
+ * take care of the partial page outside the memp system.
+ */
+
+ /* Page sizes have to be a power-of-two, ignore mbytes. */
+ if (bytes % pagesize != 0) {
+ if (LF_ISSET(DB_ODDFILESIZE))
+ /*
+ * If we're doing a verify, we might
+ * have to cope with a truncated file;
+ * round down, we'll worry about the partial
+ * page outside the memp system.
+ */
+ bytes -= (bytes % pagesize);
+ else {
+ __db_err(dbenv,
+ "%s: file size not a multiple of the pagesize",
+ rpath);
+ ret = EINVAL;
+ goto err;
+ }
+ }
+
+ last_pgno = mbytes * (MEGABYTE / pagesize);
+ last_pgno += bytes / pagesize;
+
+ /* Correction: page numbers are zero-based, not 1-based. */
+ if (last_pgno != 0)
+ --last_pgno;
+
+ /*
+ * Get the file id if we weren't given one. Generated file id's
+ * don't use timestamps, otherwise there'd be no chance of any
+ * other process joining the party.
+ */
+ if (finfop->fileid == NULL) {
+ if ((ret = __os_fileid(dbenv, rpath, 0, idbuf)) != 0)
+ goto err;
+ finfop->fileid = idbuf;
+ }
+ }
+
+ /*
+ * If we weren't provided an underlying shared object to join with,
+ * find/allocate the shared file objects. Also allocate space for
+ * for the per-process thread lock.
+ */
+ if (needlock)
+ R_LOCK(dbenv, dbmp->reginfo);
+ if (mfp == NULL)
+ ret = __memp_mf_open(
+ dbmp, path, pagesize, last_pgno, finfop, flags, &mfp);
+ else {
+ ++mfp->mpf_cnt;
+ ret = 0;
+ }
+ if (needlock)
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ if (ret != 0)
+ goto err;
+
+ if (F_ISSET(dbenv, DB_ENV_THREAD)) {
+ if ((ret = __db_mutex_alloc(
+ dbenv, dbmp->reginfo, &dbmfp->mutexp)) != 0)
+ goto err;
+ if ((ret = __db_mutex_init(
+ dbenv, dbmfp->mutexp, 0, MUTEX_THREAD)) != 0)
+ goto err;
+
+ /* XXX: KEITH: CLOSE THE FILE ON FAILURE? */
+ }
+
+ dbmfp->mfp = mfp;
+
+ /*
+ * If a file:
+ * + is read-only
+ * + isn't temporary
+ * + doesn't require any pgin/pgout support
+ * + the DB_NOMMAP flag wasn't set (in either the file open or
+ * the environment in which it was opened)
+ * + and is less than mp_mmapsize bytes in size
+ *
+ * we can mmap it instead of reading/writing buffers. Don't do error
+ * checking based on the mmap call failure. We want to do normal I/O
+ * on the file if the reason we failed was because the file was on an
+ * NFS mounted partition, and we can fail in buffer I/O just as easily
+ * as here.
+ *
+ * XXX
+ * We'd like to test to see if the file is too big to mmap. Since we
+ * don't know what size or type off_t's or size_t's are, or the largest
+ * unsigned integral type is, or what random insanity the local C
+ * compiler will perpetrate, doing the comparison in a portable way is
+ * flatly impossible. Hope that mmap fails if the file is too large.
+ */
+#define DB_MAXMMAPSIZE (10 * 1024 * 1024) /* 10 Mb. */
+ if (F_ISSET(mfp, MP_CAN_MMAP)) {
+ if (!F_ISSET(dbmfp, MP_READONLY))
+ F_CLR(mfp, MP_CAN_MMAP);
+ if (path == NULL)
+ F_CLR(mfp, MP_CAN_MMAP);
+ if (finfop->ftype != 0)
+ F_CLR(mfp, MP_CAN_MMAP);
+ if (LF_ISSET(DB_NOMMAP) || F_ISSET(dbenv, DB_ENV_NOMMAP))
+ F_CLR(mfp, MP_CAN_MMAP);
+ maxmap = dbenv->mp_mmapsize == 0 ?
+ DB_MAXMMAPSIZE : dbenv->mp_mmapsize;
+ if (mbytes > maxmap / MEGABYTE ||
+ (mbytes == maxmap / MEGABYTE && bytes >= maxmap % MEGABYTE))
+ F_CLR(mfp, MP_CAN_MMAP);
+ }
+ dbmfp->addr = NULL;
+ if (F_ISSET(mfp, MP_CAN_MMAP)) {
+ dbmfp->len = (size_t)mbytes * MEGABYTE + bytes;
+ if (__os_mapfile(dbenv, rpath,
+ &dbmfp->fh, dbmfp->len, 1, &dbmfp->addr) != 0) {
+ dbmfp->addr = NULL;
+ F_CLR(mfp, MP_CAN_MMAP);
+ }
+ }
+ if (rpath != NULL)
+ __os_freestr(rpath);
+
+ MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+ TAILQ_INSERT_TAIL(&dbmp->dbmfq, dbmfp, q);
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+
+ *retp = dbmfp;
+ return (0);
+
+err: /*
+ * Note that we do not have to free the thread mutex, because we
+ * never get to here after we have successfully allocated it.
+ */
+ if (rpath != NULL)
+ __os_freestr(rpath);
+ if (F_ISSET(&dbmfp->fh, DB_FH_VALID))
+ (void)__os_closehandle(&dbmfp->fh);
+ if (dbmfp != NULL) {
+ if (dbmfp->mutexp != NULL)
+ __db_mutex_free(dbenv, dbmp->reginfo, dbmfp->mutexp);
+ __os_free(dbmfp, sizeof(DB_MPOOLFILE));
+ }
+ return (ret);
+}
+
+/*
+ * __memp_mf_open --
+ * Open an MPOOLFILE.
+ */
+static int
+__memp_mf_open(dbmp, path, pagesize, last_pgno, finfop, flags, retp)
+ DB_MPOOL *dbmp;
+ const char *path;
+ size_t pagesize;
+ db_pgno_t last_pgno;
+ DB_MPOOL_FINFO *finfop;
+ u_int32_t flags;
+ MPOOLFILE **retp;
+{
+ MPOOL *mp;
+ MPOOLFILE *mfp;
+ int ret;
+ void *p;
+
+#define ISTEMPORARY (path == NULL)
+
+ /*
+ * If not creating a temporary file, walk the list of MPOOLFILE's,
+ * looking for a matching file. Files backed by temporary files
+ * or previously removed files can't match.
+ *
+ * DB_TRUNCATE support.
+ *
+ * The fileID is a filesystem unique number (e.g., a UNIX dev/inode
+ * pair) plus a timestamp. If files are removed and created in less
+ * than a second, the fileID can be repeated. The problem with
+ * repetition happens when the file that previously had the fileID
+ * value still has pages in the pool, since we don't want to use them
+ * to satisfy requests for the new file.
+ *
+ * Because the DB_TRUNCATE flag reuses the dev/inode pair, repeated
+ * opens with that flag set guarantees matching fileIDs when the
+ * machine can open a file and then re-open with truncate within a
+ * second. For this reason, we pass that flag down, and, if we find
+ * a matching entry, we ensure that it's never found again, and we
+ * create a new entry for the current request.
+ */
+ if (!ISTEMPORARY) {
+ mp = dbmp->reginfo[0].primary;
+ for (mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile);
+ mfp != NULL; mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile)) {
+ if (F_ISSET(mfp, MP_DEADFILE | MP_TEMP))
+ continue;
+ if (memcmp(finfop->fileid, R_ADDR(dbmp->reginfo,
+ mfp->fileid_off), DB_FILE_ID_LEN) == 0) {
+ if (LF_ISSET(DB_TRUNCATE)) {
+ MEMP_FREMOVE(mfp);
+ continue;
+ }
+ if (finfop->clear_len != mfp->clear_len ||
+ pagesize != mfp->stat.st_pagesize) {
+ __db_err(dbmp->dbenv,
+ "%s: page size or clear length changed",
+ path);
+ return (EINVAL);
+ }
+
+ /*
+ * It's possible that our needs for pre- and
+ * post-processing are changing. For example,
+ * an application created a hash subdatabase
+ * in a database that was previously all btree.
+ */
+ if (finfop->ftype != 0)
+ mfp->ftype = finfop->ftype;
+
+ ++mfp->mpf_cnt;
+
+ *retp = mfp;
+ return (0);
+ }
+ }
+ }
+
+ /* Allocate a new MPOOLFILE. */
+ if ((ret = __memp_alloc(
+ dbmp, dbmp->reginfo, NULL, sizeof(MPOOLFILE), NULL, &mfp)) != 0)
+ goto mem_err;
+ *retp = mfp;
+
+ /* Initialize the structure. */
+ memset(mfp, 0, sizeof(MPOOLFILE));
+ mfp->mpf_cnt = 1;
+ mfp->ftype = finfop->ftype;
+ mfp->lsn_off = finfop->lsn_offset;
+ mfp->clear_len = finfop->clear_len;
+
+ /*
+ * If the user specifies DB_MPOOL_LAST or DB_MPOOL_NEW on a memp_fget,
+ * we have to know the last page in the file. Figure it out and save
+ * it away.
+ */
+ mfp->stat.st_pagesize = pagesize;
+ mfp->orig_last_pgno = mfp->last_pgno = last_pgno;
+
+ if (ISTEMPORARY)
+ F_SET(mfp, MP_TEMP);
+ else {
+ /* Copy the file path into shared memory. */
+ if ((ret = __memp_alloc(dbmp, dbmp->reginfo,
+ NULL, strlen(path) + 1, &mfp->path_off, &p)) != 0)
+ goto err;
+ memcpy(p, path, strlen(path) + 1);
+
+ /* Copy the file identification string into shared memory. */
+ if ((ret = __memp_alloc(dbmp, dbmp->reginfo,
+ NULL, DB_FILE_ID_LEN, &mfp->fileid_off, &p)) != 0)
+ goto err;
+ memcpy(p, finfop->fileid, DB_FILE_ID_LEN);
+
+ F_SET(mfp, MP_CAN_MMAP);
+ }
+
+ /* Copy the page cookie into shared memory. */
+ if (finfop->pgcookie == NULL || finfop->pgcookie->size == 0) {
+ mfp->pgcookie_len = 0;
+ mfp->pgcookie_off = 0;
+ } else {
+ if ((ret = __memp_alloc(dbmp, dbmp->reginfo,
+ NULL, finfop->pgcookie->size, &mfp->pgcookie_off, &p)) != 0)
+ goto err;
+ memcpy(p, finfop->pgcookie->data, finfop->pgcookie->size);
+ mfp->pgcookie_len = finfop->pgcookie->size;
+ }
+
+ /* Prepend the MPOOLFILE to the list of MPOOLFILE's. */
+ mp = dbmp->reginfo[0].primary;
+ SH_TAILQ_INSERT_HEAD(&mp->mpfq, mfp, q, __mpoolfile);
+
+ if (0) {
+err: if (mfp->path_off != 0)
+ __db_shalloc_free(dbmp->reginfo[0].addr,
+ R_ADDR(dbmp->reginfo, mfp->path_off));
+ if (mfp->fileid_off != 0)
+ __db_shalloc_free(dbmp->reginfo[0].addr,
+ R_ADDR(dbmp->reginfo, mfp->fileid_off));
+ if (mfp != NULL)
+ __db_shalloc_free(dbmp->reginfo[0].addr, mfp);
+mem_err: __db_err(dbmp->dbenv,
+ "Unable to allocate memory for mpool file");
+ }
+ return (ret);
+}
+
+/*
+ * memp_fclose --
+ * Close a backing file for the memory pool.
+ */
+int
+memp_fclose(dbmfp)
+ DB_MPOOLFILE *dbmfp;
+{
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+ MPOOLFILE *mfp;
+ char *rpath;
+ int ret, t_ret;
+
+ dbmp = dbmfp->dbmp;
+ dbenv = dbmp->dbenv;
+ ret = 0;
+
+ PANIC_CHECK(dbenv);
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
+ return (__dbcl_memp_fclose(dbmfp));
+#endif
+
+ /*
+ * Remove the DB_MPOOLFILE from the queue. This has to happen before
+ * we perform any action that can fail, otherwise __memp_close may
+ * loop infinitely when calling us to discard all of the DB_MPOOLFILEs.
+ */
+ for (;;) {
+ MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+
+ /*
+ * We have to reference count DB_MPOOLFILE structures as other
+ * threads may be using them. The problem only happens if the
+ * application makes a bad design choice. Here's the path:
+ *
+ * Thread A opens a database.
+ * Thread B uses thread A's DB_MPOOLFILE to write a buffer
+ * in order to free up memory in the mpool cache.
+ * Thread A closes the database while thread B is using the
+ * DB_MPOOLFILE structure.
+ *
+ * By opening all databases before creating the threads, and
+ * closing them after the threads have exited, applications
+ * get better performance and avoid the problem path entirely.
+ *
+ * Regardless, holding the DB_MPOOLFILE to flush a dirty buffer
+ * is a short-term lock, even in worst case, since we better be
+ * the only thread of control using the DB_MPOOLFILE structure
+ * to read pages *into* the cache. Wait until we're the only
+ * reference holder and remove the DB_MPOOLFILE structure from
+ * the list, so nobody else can even find it.
+ */
+ if (dbmfp->ref == 1) {
+ TAILQ_REMOVE(&dbmp->dbmfq, dbmfp, q);
+ break;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+
+ (void)__os_sleep(dbenv, 1, 0);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+
+ /* Complain if pinned blocks never returned. */
+ if (dbmfp->pinref != 0)
+ __db_err(dbenv, "%s: close: %lu blocks left pinned",
+ __memp_fn(dbmfp), (u_long)dbmfp->pinref);
+
+ /* Discard any mmap information. */
+ if (dbmfp->addr != NULL &&
+ (ret = __os_unmapfile(dbenv, dbmfp->addr, dbmfp->len)) != 0)
+ __db_err(dbenv, "%s: %s", __memp_fn(dbmfp), db_strerror(ret));
+
+ /* Close the file; temporary files may not yet have been created. */
+ if (F_ISSET(&dbmfp->fh, DB_FH_VALID) &&
+ (t_ret = __os_closehandle(&dbmfp->fh)) != 0) {
+ __db_err(dbenv, "%s: %s", __memp_fn(dbmfp), db_strerror(t_ret));
+ if (ret != 0)
+ t_ret = ret;
+ }
+
+ /* Discard the thread mutex. */
+ if (dbmfp->mutexp != NULL)
+ __db_mutex_free(dbenv, dbmp->reginfo, dbmfp->mutexp);
+
+ /*
+ * Discard our reference on the the underlying MPOOLFILE, and close
+ * it if it's no longer useful to anyone.
+ *
+ * If we're not discarding it, and it's a temp file, this means
+ * all the outstanding references belong to unflushed buffers.
+ * (A temp file can only be referenced by one DB_MPOOLFILE).
+ * We don't care about preserving any of those buffers, so mark
+ * the MPOOLFILE as dead so that when we try to flush them,
+ * even the dirty ones just get discarded.
+ */
+ R_LOCK(dbenv, dbmp->reginfo);
+ mfp = dbmfp->mfp;
+ if (--mfp->mpf_cnt == 0) {
+ if (F_ISSET(mfp, MP_UNLINK)) {
+ MEMP_FREMOVE(mfp);
+ if ((t_ret = __db_appname(dbmp->dbenv,
+ DB_APP_DATA, NULL, R_ADDR(dbmp->reginfo,
+ mfp->path_off), 0, NULL, &rpath)) != 0 && ret == 0)
+ ret = t_ret;
+ if (t_ret == 0 && (t_ret =
+ __os_unlink(dbmp->dbenv, rpath) != 0 && ret == 0))
+ ret = t_ret;
+ __os_free(rpath, 0);
+ }
+ if (mfp->block_cnt == 0)
+ __memp_mf_discard(dbmp, mfp);
+ }
+ else if (F_ISSET(mfp, MP_TEMP))
+ MEMP_FREMOVE(mfp);
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
+ /* Discard the DB_MPOOLFILE structure. */
+ __os_free(dbmfp, sizeof(DB_MPOOLFILE));
+
+ return (ret);
+}
+
+/*
+ * __memp_mf_discard --
+ * Discard an MPOOLFILE.
+ *
+ * PUBLIC: void __memp_mf_discard __P((DB_MPOOL *, MPOOLFILE *));
+ */
+void
+__memp_mf_discard(dbmp, mfp)
+ DB_MPOOL *dbmp;
+ MPOOLFILE *mfp;
+{
+ MPOOL *mp;
+
+ mp = dbmp->reginfo[0].primary;
+
+ /* Delete from the list of MPOOLFILEs. */
+ SH_TAILQ_REMOVE(&mp->mpfq, mfp, q, __mpoolfile);
+
+ /* Free the space. */
+ if (mfp->path_off != 0)
+ __db_shalloc_free(dbmp->reginfo[0].addr,
+ R_ADDR(dbmp->reginfo, mfp->path_off));
+ if (mfp->fileid_off != 0)
+ __db_shalloc_free(dbmp->reginfo[0].addr,
+ R_ADDR(dbmp->reginfo, mfp->fileid_off));
+ if (mfp->pgcookie_off != 0)
+ __db_shalloc_free(dbmp->reginfo[0].addr,
+ R_ADDR(dbmp->reginfo, mfp->pgcookie_off));
+ __db_shalloc_free(dbmp->reginfo[0].addr, mfp);
+}
+
+/*
+ * __memp_fremove --
+ * Remove an underlying file from the system.
+ *
+ * PUBLIC: int __memp_fremove __P((DB_MPOOLFILE *));
+ */
+int
+__memp_fremove(dbmfp)
+ DB_MPOOLFILE *dbmfp;
+{
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+ MPOOLFILE *mfp;
+
+ dbmp = dbmfp->dbmp;
+ dbenv = dbmp->dbenv;
+ mfp = dbmfp->mfp;
+
+ PANIC_CHECK(dbenv);
+
+ R_LOCK(dbenv, dbmp->reginfo);
+
+ MEMP_FREMOVE(mfp);
+
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
+ return (0);
+}
+
+/*
+ * __memp_fn --
+ * On errors we print whatever is available as the file name.
+ *
+ * PUBLIC: char * __memp_fn __P((DB_MPOOLFILE *));
+ */
+char *
+__memp_fn(dbmfp)
+ DB_MPOOLFILE *dbmfp;
+{
+ return (__memp_fns(dbmfp->dbmp, dbmfp->mfp));
+}
+
+/*
+ * __memp_fns --
+ * On errors we print whatever is available as the file name.
+ *
+ * PUBLIC: char * __memp_fns __P((DB_MPOOL *, MPOOLFILE *));
+ *
+ */
+char *
+__memp_fns(dbmp, mfp)
+ DB_MPOOL *dbmp;
+ MPOOLFILE *mfp;
+{
+ if (mfp->path_off == 0)
+ return ((char *)"temporary");
+
+ return ((char *)R_ADDR(dbmp->reginfo, mfp->path_off));
+}
diff --git a/bdb/mp/mp_fput.c b/bdb/mp/mp_fput.c
new file mode 100644
index 00000000000..be03b721f36
--- /dev/null
+++ b/bdb/mp/mp_fput.c
@@ -0,0 +1,186 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: mp_fput.c,v 11.16 2000/11/30 00:58:41 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#endif
+
+#ifdef HAVE_RPC
+#include "db_server.h"
+#endif
+
+#include "db_int.h"
+#include "db_shash.h"
+#include "mp.h"
+
+#ifdef HAVE_RPC
+#include "gen_client_ext.h"
+#include "rpc_client_ext.h"
+#endif
+
+/*
+ * memp_fput --
+ * Mpool file put function.
+ */
+int
+memp_fput(dbmfp, pgaddr, flags)
+ DB_MPOOLFILE *dbmfp;
+ void *pgaddr;
+ u_int32_t flags;
+{
+ BH *bhp;
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+ MPOOL *c_mp, *mp;
+ int ret, wrote;
+
+ dbmp = dbmfp->dbmp;
+ dbenv = dbmp->dbenv;
+ mp = dbmp->reginfo[0].primary;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
+ return (__dbcl_memp_fput(dbmfp, pgaddr, flags));
+#endif
+
+ PANIC_CHECK(dbenv);
+
+ /* Validate arguments. */
+ if (flags) {
+ if ((ret = __db_fchk(dbenv, "memp_fput", flags,
+ DB_MPOOL_CLEAN | DB_MPOOL_DIRTY | DB_MPOOL_DISCARD)) != 0)
+ return (ret);
+ if ((ret = __db_fcchk(dbenv, "memp_fput",
+ flags, DB_MPOOL_CLEAN, DB_MPOOL_DIRTY)) != 0)
+ return (ret);
+
+ if (LF_ISSET(DB_MPOOL_DIRTY) && F_ISSET(dbmfp, MP_READONLY)) {
+ __db_err(dbenv,
+ "%s: dirty flag set for readonly file page",
+ __memp_fn(dbmfp));
+ return (EACCES);
+ }
+ }
+
+ R_LOCK(dbenv, dbmp->reginfo);
+
+ /* Decrement the pinned reference count. */
+ if (dbmfp->pinref == 0) {
+ __db_err(dbenv,
+ "%s: more pages returned than retrieved", __memp_fn(dbmfp));
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ return (EINVAL);
+ } else
+ --dbmfp->pinref;
+
+ /*
+ * If we're mapping the file, there's nothing to do. Because we can
+ * stop mapping the file at any time, we have to check on each buffer
+ * to see if the address we gave the application was part of the map
+ * region.
+ */
+ if (dbmfp->addr != NULL && pgaddr >= dbmfp->addr &&
+ (u_int8_t *)pgaddr <= (u_int8_t *)dbmfp->addr + dbmfp->len) {
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ return (0);
+ }
+
+ /* Convert the page address to a buffer header. */
+ bhp = (BH *)((u_int8_t *)pgaddr - SSZA(BH, buf));
+
+ /* Convert the buffer header to a cache. */
+ c_mp = BH_TO_CACHE(dbmp, bhp);
+
+/* UNLOCK THE REGION, LOCK THE CACHE. */
+
+ /* Set/clear the page bits. */
+ if (LF_ISSET(DB_MPOOL_CLEAN) && F_ISSET(bhp, BH_DIRTY)) {
+ ++c_mp->stat.st_page_clean;
+ --c_mp->stat.st_page_dirty;
+ F_CLR(bhp, BH_DIRTY);
+ }
+ if (LF_ISSET(DB_MPOOL_DIRTY) && !F_ISSET(bhp, BH_DIRTY)) {
+ --c_mp->stat.st_page_clean;
+ ++c_mp->stat.st_page_dirty;
+ F_SET(bhp, BH_DIRTY);
+ }
+ if (LF_ISSET(DB_MPOOL_DISCARD))
+ F_SET(bhp, BH_DISCARD);
+
+ /*
+ * If the page is dirty and being scheduled to be written as part of
+ * a checkpoint, we no longer know that the log is up-to-date.
+ */
+ if (F_ISSET(bhp, BH_DIRTY) && F_ISSET(bhp, BH_SYNC))
+ F_SET(bhp, BH_SYNC_LOGFLSH);
+
+ /*
+ * Check for a reference count going to zero. This can happen if the
+ * application returns a page twice.
+ */
+ if (bhp->ref == 0) {
+ __db_err(dbenv, "%s: page %lu: unpinned page returned",
+ __memp_fn(dbmfp), (u_long)bhp->pgno);
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ return (EINVAL);
+ }
+
+ /*
+ * If more than one reference to the page, we're done. Ignore the
+ * discard flags (for now) and leave it at its position in the LRU
+ * chain. The rest gets done at last reference close.
+ */
+ if (--bhp->ref > 0) {
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ return (0);
+ }
+
+ /*
+ * Move the buffer to the head/tail of the LRU chain. We do this
+ * before writing the buffer for checkpoint purposes, as the write
+ * can discard the region lock and allow another process to acquire
+ * buffer. We could keep that from happening, but there seems no
+ * reason to do so.
+ */
+ SH_TAILQ_REMOVE(&c_mp->bhq, bhp, q, __bh);
+ if (F_ISSET(bhp, BH_DISCARD))
+ SH_TAILQ_INSERT_HEAD(&c_mp->bhq, bhp, q, __bh);
+ else
+ SH_TAILQ_INSERT_TAIL(&c_mp->bhq, bhp, q);
+
+ /*
+ * If this buffer is scheduled for writing because of a checkpoint, we
+ * need to write it (if it's dirty), or update the checkpoint counters
+ * (if it's not dirty). If we try to write it and can't, that's not
+ * necessarily an error as it's not completely unreasonable that the
+ * application have permission to write the underlying file, but set a
+ * flag so that the next time the memp_sync function is called we try
+ * writing it there, as the checkpoint thread of control better be able
+ * to write all of the files.
+ */
+ if (F_ISSET(bhp, BH_SYNC)) {
+ if (F_ISSET(bhp, BH_DIRTY)) {
+ if (__memp_bhwrite(dbmp,
+ dbmfp->mfp, bhp, NULL, &wrote) != 0 || !wrote)
+ F_SET(mp, MP_LSN_RETRY);
+ } else {
+ F_CLR(bhp, BH_SYNC);
+
+ --mp->lsn_cnt;
+ --dbmfp->mfp->lsn_cnt;
+ }
+ }
+
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ return (0);
+}
diff --git a/bdb/mp/mp_fset.c b/bdb/mp/mp_fset.c
new file mode 100644
index 00000000000..08313c9b6f5
--- /dev/null
+++ b/bdb/mp/mp_fset.c
@@ -0,0 +1,98 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: mp_fset.c,v 11.13 2000/11/30 00:58:41 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#endif
+
+#ifdef HAVE_RPC
+#include "db_server.h"
+#endif
+
+#include "db_int.h"
+#include "db_shash.h"
+#include "mp.h"
+
+#ifdef HAVE_RPC
+#include "gen_client_ext.h"
+#include "rpc_client_ext.h"
+#endif
+
+/*
+ * memp_fset --
+ * Mpool page set-flag routine.
+ */
+int
+memp_fset(dbmfp, pgaddr, flags)
+ DB_MPOOLFILE *dbmfp;
+ void *pgaddr;
+ u_int32_t flags;
+{
+ BH *bhp;
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+ MPOOL *c_mp, *mp;
+ int ret;
+
+ dbmp = dbmfp->dbmp;
+ dbenv = dbmp->dbenv;
+ mp = dbmp->reginfo[0].primary;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
+ return (__dbcl_memp_fset(dbmfp, pgaddr, flags));
+#endif
+
+ PANIC_CHECK(dbenv);
+
+ /* Validate arguments. */
+ if (flags == 0)
+ return (__db_ferr(dbenv, "memp_fset", 1));
+
+ if ((ret = __db_fchk(dbenv, "memp_fset", flags,
+ DB_MPOOL_DIRTY | DB_MPOOL_CLEAN | DB_MPOOL_DISCARD)) != 0)
+ return (ret);
+ if ((ret = __db_fcchk(dbenv, "memp_fset",
+ flags, DB_MPOOL_CLEAN, DB_MPOOL_DIRTY)) != 0)
+ return (ret);
+
+ if (LF_ISSET(DB_MPOOL_DIRTY) && F_ISSET(dbmfp, MP_READONLY)) {
+ __db_err(dbenv, "%s: dirty flag set for readonly file page",
+ __memp_fn(dbmfp));
+ return (EACCES);
+ }
+
+ /* Convert the page address to a buffer header. */
+ bhp = (BH *)((u_int8_t *)pgaddr - SSZA(BH, buf));
+
+ /* Convert the buffer header to a cache. */
+ c_mp = BH_TO_CACHE(dbmp, bhp);
+
+ R_LOCK(dbenv, dbmp->reginfo);
+
+ if (LF_ISSET(DB_MPOOL_CLEAN) && F_ISSET(bhp, BH_DIRTY)) {
+ ++c_mp->stat.st_page_clean;
+ --c_mp->stat.st_page_dirty;
+ F_CLR(bhp, BH_DIRTY);
+ }
+ if (LF_ISSET(DB_MPOOL_DIRTY) && !F_ISSET(bhp, BH_DIRTY)) {
+ --c_mp->stat.st_page_clean;
+ ++c_mp->stat.st_page_dirty;
+ F_SET(bhp, BH_DIRTY);
+ }
+ if (LF_ISSET(DB_MPOOL_DISCARD))
+ F_SET(bhp, BH_DISCARD);
+
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ return (0);
+}
diff --git a/bdb/mp/mp_method.c b/bdb/mp/mp_method.c
new file mode 100644
index 00000000000..85a6239b032
--- /dev/null
+++ b/bdb/mp/mp_method.c
@@ -0,0 +1,115 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: mp_method.c,v 11.10 2000/04/04 20:12:04 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#ifdef HAVE_RPC
+#include "db_server.h"
+#endif
+
+#include "db_int.h"
+#include "db_shash.h"
+#include "mp.h"
+
+#ifdef HAVE_RPC
+#include "gen_client_ext.h"
+#include "rpc_client_ext.h"
+#endif
+
+static int __memp_set_cachesize __P((DB_ENV *, u_int32_t, u_int32_t, int));
+static int __memp_set_mp_mmapsize __P((DB_ENV *, size_t));
+
+/*
+ * __memp_dbenv_create --
+ * Mpool specific creation of the DB_ENV structure.
+ *
+ * PUBLIC: void __memp_dbenv_create __P((DB_ENV *));
+ */
+void
+__memp_dbenv_create(dbenv)
+ DB_ENV *dbenv;
+{
+ /*
+ * We default to 32 8K pages. We don't default to a flat 256K, because
+ * some systems require significantly more memory to hold 32 pages than
+ * others. For example, HP-UX with POSIX pthreads needs 88 bytes for
+ * a POSIX pthread mutex and almost 200 bytes per buffer header, while
+ * Solaris needs 24 and 52 bytes for the same structures.
+ */
+ dbenv->mp_bytes = 32 * ((8 * 1024) + sizeof(BH));
+ dbenv->mp_ncache = 1;
+
+ dbenv->set_mp_mmapsize = __memp_set_mp_mmapsize;
+ dbenv->set_cachesize = __memp_set_cachesize;
+
+#ifdef HAVE_RPC
+ /*
+ * If we have a client, overwrite what we just setup to
+ * point to client functions.
+ */
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) {
+ dbenv->set_cachesize = __dbcl_env_cachesize;
+ dbenv->set_mp_mmapsize = __dbcl_set_mp_mmapsize;
+ }
+#endif
+
+}
+
+/*
+ * __memp_set_cachesize --
+ * Initialize the cache size.
+ */
+static int
+__memp_set_cachesize(dbenv, gbytes, bytes, ncache)
+ DB_ENV *dbenv;
+ u_int32_t gbytes, bytes;
+ int ncache;
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_cachesize");
+
+ dbenv->mp_gbytes = gbytes + bytes / GIGABYTE;
+ dbenv->mp_bytes = bytes % GIGABYTE;
+ dbenv->mp_ncache = ncache == 0 ? 1 : ncache;
+
+ /*
+ * If the application requested less than 500Mb, increase the
+ * cachesize by 25% to account for our overhead. (I'm guessing
+ * that caches over 500Mb are specifically sized, i.e., it's
+ * a large server and the application actually knows how much
+ * memory is available.)
+ *
+ * There is a minimum cache size, regardless.
+ */
+ if (dbenv->mp_gbytes == 0) {
+ if (dbenv->mp_bytes < 500 * MEGABYTE)
+ dbenv->mp_bytes += dbenv->mp_bytes / 4;
+ if (dbenv->mp_bytes < DB_CACHESIZE_MIN)
+ dbenv->mp_bytes = DB_CACHESIZE_MIN;
+ }
+
+ return (0);
+}
+
+/*
+ * __memp_set_mp_mmapsize --
+ * Set the maximum mapped file size.
+ */
+static int
+__memp_set_mp_mmapsize(dbenv, mp_mmapsize )
+ DB_ENV *dbenv;
+ size_t mp_mmapsize;
+{
+ dbenv->mp_mmapsize = mp_mmapsize;
+ return (0);
+}
diff --git a/bdb/mp/mp_region.c b/bdb/mp/mp_region.c
new file mode 100644
index 00000000000..4b85466ce63
--- /dev/null
+++ b/bdb/mp/mp_region.c
@@ -0,0 +1,357 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: mp_region.c,v 11.26 2000/11/30 00:58:41 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_shash.h"
+#include "mp.h"
+
+static int __mpool_init __P((DB_ENV *, DB_MPOOL *, int, int));
+#ifdef MUTEX_SYSTEM_RESOURCES
+static size_t __mpool_region_maint __P((REGINFO *));
+#endif
+
+/*
+ * __memp_open --
+ * Internal version of memp_open: only called from DB_ENV->open.
+ *
+ * PUBLIC: int __memp_open __P((DB_ENV *));
+ */
+int
+__memp_open(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_MPOOL *dbmp;
+ MPOOL *mp;
+ REGINFO reginfo;
+ roff_t reg_size, *regids;
+ u_int32_t i;
+ int htab_buckets, ret;
+
+ /* Figure out how big each cache region is. */
+ reg_size = (dbenv->mp_gbytes / dbenv->mp_ncache) * GIGABYTE;
+ reg_size += ((dbenv->mp_gbytes %
+ dbenv->mp_ncache) * GIGABYTE) / dbenv->mp_ncache;
+ reg_size += dbenv->mp_bytes / dbenv->mp_ncache;
+
+ /*
+ * Figure out how many hash buckets each region will have. Assume we
+ * want to keep the hash chains with under 10 pages on each chain. We
+ * don't know the pagesize in advance, and it may differ for different
+ * files. Use a pagesize of 1K for the calculation -- we walk these
+ * chains a lot, they must be kept short.
+ */
+ htab_buckets = __db_tablesize((reg_size / (1 * 1024)) / 10);
+
+ /* Create and initialize the DB_MPOOL structure. */
+ if ((ret = __os_calloc(dbenv, 1, sizeof(*dbmp), &dbmp)) != 0)
+ return (ret);
+ LIST_INIT(&dbmp->dbregq);
+ TAILQ_INIT(&dbmp->dbmfq);
+ dbmp->dbenv = dbenv;
+
+ /* Join/create the first mpool region. */
+ memset(&reginfo, 0, sizeof(REGINFO));
+ reginfo.type = REGION_TYPE_MPOOL;
+ reginfo.id = INVALID_REGION_ID;
+ reginfo.mode = dbenv->db_mode;
+ reginfo.flags = REGION_JOIN_OK;
+ if (F_ISSET(dbenv, DB_ENV_CREATE))
+ F_SET(&reginfo, REGION_CREATE_OK);
+ if ((ret = __db_r_attach(dbenv, &reginfo, reg_size)) != 0)
+ goto err;
+
+ /*
+ * If we created the region, initialize it. Create or join any
+ * additional regions.
+ */
+ if (F_ISSET(&reginfo, REGION_CREATE)) {
+ /*
+ * We define how many regions there are going to be, allocate
+ * the REGINFO structures and create them. Make sure we don't
+ * clear the wrong entries on error.
+ */
+ dbmp->nreg = dbenv->mp_ncache;
+ if ((ret = __os_calloc(dbenv,
+ dbmp->nreg, sizeof(REGINFO), &dbmp->reginfo)) != 0)
+ goto err;
+ /* Make sure we don't clear the wrong entries on error. */
+ for (i = 0; i < dbmp->nreg; ++i)
+ dbmp->reginfo[i].id = INVALID_REGION_ID;
+ dbmp->reginfo[0] = reginfo;
+
+ /* Initialize the first region. */
+ if ((ret = __mpool_init(dbenv, dbmp, 0, htab_buckets)) != 0)
+ goto err;
+
+ /*
+ * Create/initialize remaining regions and copy their IDs into
+ * the first region.
+ */
+ mp = R_ADDR(dbmp->reginfo, dbmp->reginfo[0].rp->primary);
+ regids = R_ADDR(dbmp->reginfo, mp->regids);
+ for (i = 1; i < dbmp->nreg; ++i) {
+ dbmp->reginfo[i].type = REGION_TYPE_MPOOL;
+ dbmp->reginfo[i].id = INVALID_REGION_ID;
+ dbmp->reginfo[i].mode = dbenv->db_mode;
+ dbmp->reginfo[i].flags = REGION_CREATE_OK;
+ if ((ret = __db_r_attach(
+ dbenv, &dbmp->reginfo[i], reg_size)) != 0)
+ goto err;
+ if ((ret =
+ __mpool_init(dbenv, dbmp, i, htab_buckets)) != 0)
+ goto err;
+ R_UNLOCK(dbenv, &dbmp->reginfo[i]);
+
+ regids[i] = dbmp->reginfo[i].id;
+ }
+ } else {
+ /*
+ * Determine how many regions there are going to be, allocate
+ * the REGINFO structures and fill in local copies of that
+ * information.
+ */
+ mp = R_ADDR(&reginfo, reginfo.rp->primary);
+ dbmp->nreg = mp->nreg;
+ if ((ret = __os_calloc(dbenv,
+ dbmp->nreg, sizeof(REGINFO), &dbmp->reginfo)) != 0)
+ goto err;
+ /* Make sure we don't clear the wrong entries on error. */
+ for (i = 0; i < dbmp->nreg; ++i)
+ dbmp->reginfo[i].id = INVALID_REGION_ID;
+ dbmp->reginfo[0] = reginfo;
+
+ /* Join remaining regions. */
+ regids = R_ADDR(dbmp->reginfo, mp->regids);
+ for (i = 1; i < dbmp->nreg; ++i) {
+ dbmp->reginfo[i].type = REGION_TYPE_MPOOL;
+ dbmp->reginfo[i].id = regids[i];
+ dbmp->reginfo[i].mode = 0;
+ dbmp->reginfo[i].flags = REGION_JOIN_OK;
+ if ((ret = __db_r_attach(
+ dbenv, &dbmp->reginfo[i], 0)) != 0)
+ goto err;
+ R_UNLOCK(dbenv, &dbmp->reginfo[i]);
+ }
+ }
+
+ /* Set the local addresses for the regions. */
+ for (i = 0; i < dbmp->nreg; ++i)
+ dbmp->reginfo[i].primary =
+ R_ADDR(&dbmp->reginfo[i], dbmp->reginfo[i].rp->primary);
+
+ /* If the region is threaded, allocate a mutex to lock the handles. */
+ if (F_ISSET(dbenv, DB_ENV_THREAD)) {
+ if ((ret = __db_mutex_alloc(
+ dbenv, dbmp->reginfo, &dbmp->mutexp)) != 0) {
+ goto err;
+ }
+ if ((ret =
+ __db_mutex_init(dbenv, dbmp->mutexp, 0, MUTEX_THREAD)) != 0)
+ goto err;
+ }
+
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
+ dbenv->mp_handle = dbmp;
+ return (0);
+
+err: if (dbmp->reginfo != NULL && dbmp->reginfo[0].addr != NULL) {
+ if (F_ISSET(dbmp->reginfo, REGION_CREATE))
+ ret = __db_panic(dbenv, ret);
+
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
+ for (i = 0; i < dbmp->nreg; ++i)
+ if (dbmp->reginfo[i].id != INVALID_REGION_ID)
+ (void)__db_r_detach(
+ dbenv, &dbmp->reginfo[i], 0);
+ __os_free(dbmp->reginfo,
+ dbmp->nreg * sizeof(*dbmp->reginfo));
+ }
+ if (dbmp->mutexp != NULL)
+ __db_mutex_free(dbenv, dbmp->reginfo, dbmp->mutexp);
+ __os_free(dbmp, sizeof(*dbmp));
+ return (ret);
+}
+
+/*
+ * __mpool_init --
+ * Initialize a MPOOL structure in shared memory.
+ */
+static int
+__mpool_init(dbenv, dbmp, reginfo_off, htab_buckets)
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+ int reginfo_off, htab_buckets;
+{
+ DB_HASHTAB *htab;
+ MPOOL *mp;
+ REGINFO *reginfo;
+#ifdef MUTEX_SYSTEM_RESOURCES
+ size_t maint_size;
+#endif
+ int ret;
+ void *p;
+
+ mp = NULL;
+
+ reginfo = &dbmp->reginfo[reginfo_off];
+ if ((ret = __db_shalloc(reginfo->addr,
+ sizeof(MPOOL), MUTEX_ALIGN, &reginfo->primary)) != 0)
+ goto mem_err;
+ reginfo->rp->primary = R_OFFSET(reginfo, reginfo->primary);
+ mp = reginfo->primary;
+ memset(mp, 0, sizeof(*mp));
+
+#ifdef MUTEX_SYSTEM_RESOURCES
+ maint_size = __mpool_region_maint(reginfo);
+ /* Allocate room for the maintenance info and initialize it. */
+ if ((ret = __db_shalloc(reginfo->addr,
+ sizeof(REGMAINT) + maint_size, 0, &p)) != 0)
+ goto mem_err;
+ __db_maintinit(reginfo, p, maint_size);
+ mp->maint_off = R_OFFSET(reginfo, p);
+#endif
+
+ if (reginfo_off == 0) {
+ SH_TAILQ_INIT(&mp->mpfq);
+
+ if ((ret = __db_shmutex_init(dbenv, &mp->sync_mutex,
+ R_OFFSET(dbmp->reginfo, &mp->sync_mutex) +
+ DB_FCNTL_OFF_MPOOL, 0, dbmp->reginfo,
+ (REGMAINT *)R_ADDR(dbmp->reginfo, mp->maint_off))) != 0)
+ goto err;
+
+ ZERO_LSN(mp->lsn);
+ mp->lsn_cnt = 0;
+
+ mp->nreg = dbmp->nreg;
+ if ((ret = __db_shalloc(dbmp->reginfo[0].addr,
+ dbmp->nreg * sizeof(int), 0, &p)) != 0)
+ goto mem_err;
+ mp->regids = R_OFFSET(dbmp->reginfo, p);
+ }
+
+ SH_TAILQ_INIT(&mp->bhq);
+
+ /* Allocate hash table space and initialize it. */
+ if ((ret = __db_shalloc(reginfo->addr,
+ htab_buckets * sizeof(DB_HASHTAB), 0, &htab)) != 0)
+ goto mem_err;
+ __db_hashinit(htab, htab_buckets);
+ mp->htab = R_OFFSET(reginfo, htab);
+ mp->htab_buckets = htab_buckets;
+
+ return (0);
+
+mem_err:__db_err(dbenv, "Unable to allocate memory for mpool region");
+err: if (reginfo->primary != NULL)
+ __db_shalloc_free(reginfo->addr, reginfo->primary);
+ return (ret);
+}
+
+/*
+ * __memp_close --
+ * Internal version of memp_close: only called from DB_ENV->close.
+ *
+ * PUBLIC: int __memp_close __P((DB_ENV *));
+ */
+int
+__memp_close(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_MPOOL *dbmp;
+ DB_MPOOLFILE *dbmfp;
+ DB_MPREG *mpreg;
+ u_int32_t i;
+ int ret, t_ret;
+
+ ret = 0;
+ dbmp = dbenv->mp_handle;
+
+ /* Discard DB_MPREGs. */
+ while ((mpreg = LIST_FIRST(&dbmp->dbregq)) != NULL) {
+ LIST_REMOVE(mpreg, q);
+ __os_free(mpreg, sizeof(DB_MPREG));
+ }
+
+ /* Discard DB_MPOOLFILEs. */
+ while ((dbmfp = TAILQ_FIRST(&dbmp->dbmfq)) != NULL)
+ if ((t_ret = memp_fclose(dbmfp)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Discard the thread mutex. */
+ if (dbmp->mutexp != NULL)
+ __db_mutex_free(dbenv, dbmp->reginfo, dbmp->mutexp);
+
+ /* Detach from the region(s). */
+ for (i = 0; i < dbmp->nreg; ++i)
+ if ((t_ret = __db_r_detach(
+ dbenv, &dbmp->reginfo[i], 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ __os_free(dbmp->reginfo, dbmp->nreg * sizeof(*dbmp->reginfo));
+ __os_free(dbmp, sizeof(*dbmp));
+
+ dbenv->mp_handle = NULL;
+ return (ret);
+}
+
+#ifdef MUTEX_SYSTEM_RESOURCES
+/*
+ * __mpool_region_maint --
+ * Return the amount of space needed for region maintenance info.
+ *
+ */
+static size_t
+__mpool_region_maint(infop)
+ REGINFO *infop;
+{
+ size_t s;
+ int numlocks;
+
+ /*
+ * For mutex maintenance we need one mutex per possible page.
+ * Compute the maximum number of pages this cache can have.
+ * Also add in an mpool mutex.
+ */
+ numlocks = ((infop->rp->size / DB_MIN_PGSIZE) + 1);
+ s = sizeof(roff_t) * numlocks;
+ return (s);
+}
+#endif
+
+/*
+ * __mpool_region_destroy
+ * Destroy any region maintenance info.
+ *
+ * PUBLIC: void __mpool_region_destroy __P((DB_ENV *, REGINFO *));
+ */
+void
+__mpool_region_destroy(dbenv, infop)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+{
+ MPOOL *mp;
+
+ COMPQUIET(dbenv, NULL);
+ mp = R_ADDR(infop, infop->rp->primary);
+
+ __db_shlocks_destroy(infop, (REGMAINT *)R_ADDR(infop, mp->maint_off));
+ return;
+}
diff --git a/bdb/mp/mp_register.c b/bdb/mp/mp_register.c
new file mode 100644
index 00000000000..27859f69d7b
--- /dev/null
+++ b/bdb/mp/mp_register.c
@@ -0,0 +1,85 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: mp_register.c,v 11.12 2000/11/15 19:25:39 sue Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#ifdef HAVE_RPC
+#include "db_server.h"
+#endif
+
+#include "db_int.h"
+#include "db_shash.h"
+#include "mp.h"
+
+#ifdef HAVE_RPC
+#include "gen_client_ext.h"
+#include "rpc_client_ext.h"
+#endif
+
+/*
+ * memp_register --
+ * Register a file type's pgin, pgout routines.
+ */
+int
+memp_register(dbenv, ftype, pgin, pgout)
+ DB_ENV *dbenv;
+ int ftype;
+ int (*pgin) __P((DB_ENV *, db_pgno_t, void *, DBT *));
+ int (*pgout) __P((DB_ENV *, db_pgno_t, void *, DBT *));
+{
+ DB_MPOOL *dbmp;
+ DB_MPREG *mpreg;
+ int ret;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
+ return (__dbcl_memp_register(dbenv, ftype, pgin, pgout));
+#endif
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->mp_handle, DB_INIT_MPOOL);
+
+ dbmp = dbenv->mp_handle;
+
+ /*
+ * Chances are good that the item has already been registered, as the
+ * DB access methods are the folks that call this routine. If already
+ * registered, just update the entry, although it's probably unchanged.
+ */
+ MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+ for (mpreg = LIST_FIRST(&dbmp->dbregq);
+ mpreg != NULL; mpreg = LIST_NEXT(mpreg, q))
+ if (mpreg->ftype == ftype) {
+ mpreg->pgin = pgin;
+ mpreg->pgout = pgout;
+ break;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+ if (mpreg != NULL)
+ return (0);
+
+ /* New entry. */
+ if ((ret = __os_malloc(dbenv, sizeof(DB_MPREG), NULL, &mpreg)) != 0)
+ return (ret);
+
+ mpreg->ftype = ftype;
+ mpreg->pgin = pgin;
+ mpreg->pgout = pgout;
+
+ MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+ LIST_INSERT_HEAD(&dbmp->dbregq, mpreg, q);
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+
+ return (0);
+}
diff --git a/bdb/mp/mp_stat.c b/bdb/mp/mp_stat.c
new file mode 100644
index 00000000000..7982513448d
--- /dev/null
+++ b/bdb/mp/mp_stat.c
@@ -0,0 +1,388 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: mp_stat.c,v 11.21 2001/01/09 16:59:30 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#ifdef HAVE_RPC
+#include "db_server.h"
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_shash.h"
+#include "db_am.h"
+#include "mp.h"
+
+#ifdef HAVE_RPC
+#include "gen_client_ext.h"
+#include "rpc_client_ext.h"
+#endif
+
+static void __memp_dumpcache
+ __P((DB_MPOOL *, REGINFO *, size_t *, FILE *, u_int32_t));
+static void __memp_pbh __P((DB_MPOOL *, BH *, size_t *, FILE *));
+
+/*
+ * memp_stat --
+ * Display MPOOL statistics.
+ */
+int
+memp_stat(dbenv, gspp, fspp, db_malloc)
+ DB_ENV *dbenv;
+ DB_MPOOL_STAT **gspp;
+ DB_MPOOL_FSTAT ***fspp;
+ void *(*db_malloc) __P((size_t));
+{
+ DB_MPOOL *dbmp;
+ DB_MPOOL_FSTAT **tfsp, *tstruct;
+ DB_MPOOL_STAT *sp;
+ MPOOL *c_mp, *mp;
+ MPOOLFILE *mfp;
+ char *tname;
+ size_t len, nlen;
+ u_int32_t i;
+ int ret;
+ char *name;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
+ return (__dbcl_memp_stat(dbenv, gspp, fspp, db_malloc));
+#endif
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->mp_handle, DB_INIT_MPOOL);
+
+ dbmp = dbenv->mp_handle;
+ sp = NULL;
+
+ /* Global statistics. */
+ mp = dbmp->reginfo[0].primary;
+ if (gspp != NULL) {
+ *gspp = NULL;
+
+ if ((ret = __os_calloc(dbenv, 1, sizeof(**gspp), gspp)) != 0)
+ return (ret);
+ sp = *gspp;
+
+ /*
+ * Initialization and information that is not maintained on
+ * a per-cache basis.
+ */
+ sp->st_hash_longest = 0;
+ sp->st_region_wait = dbmp->reginfo[0].rp->mutex.mutex_set_wait;
+ sp->st_region_nowait =
+ dbmp->reginfo[0].rp->mutex.mutex_set_nowait;
+ sp->st_gbytes = dbenv->mp_gbytes;
+ sp->st_bytes = dbenv->mp_bytes;
+ sp->st_ncache = dbmp->nreg;
+ sp->st_regsize = dbmp->reginfo[0].rp->size;
+
+ R_LOCK(dbenv, dbmp->reginfo);
+
+ /* Walk the cache list and accumulate the global information. */
+ for (i = 0; i < mp->nreg; ++i) {
+ c_mp = dbmp->reginfo[i].primary;
+ sp->st_cache_hit += c_mp->stat.st_cache_hit;
+ sp->st_cache_miss += c_mp->stat.st_cache_miss;
+ sp->st_map += c_mp->stat.st_map;
+ sp->st_page_create += c_mp->stat.st_page_create;
+ sp->st_page_in += c_mp->stat.st_page_in;
+ sp->st_page_out += c_mp->stat.st_page_out;
+ sp->st_ro_evict += c_mp->stat.st_ro_evict;
+ sp->st_rw_evict += c_mp->stat.st_rw_evict;
+ sp->st_hash_buckets += c_mp->stat.st_hash_buckets;
+ sp->st_hash_searches += c_mp->stat.st_hash_searches;
+ if (c_mp->stat.st_hash_longest > sp->st_hash_longest)
+ sp->st_hash_longest =
+ c_mp->stat.st_hash_longest;
+ sp->st_hash_examined += c_mp->stat.st_hash_examined;
+ sp->st_page_clean += c_mp->stat.st_page_clean;
+ sp->st_page_dirty += c_mp->stat.st_page_dirty;
+ sp->st_page_trickle += c_mp->stat.st_page_trickle;
+ sp->st_region_wait += c_mp->stat.st_region_wait;
+ sp->st_region_nowait += c_mp->stat.st_region_nowait;
+ }
+
+ /*
+ * We have duplicate statistics fields in the cache and
+ * per-file structures. The counters are only incremented
+ * in the per-file structures, though. The intent is that
+ * if we ever flush files from the pool we can save their
+ * last known totals in the cache structure.
+ */
+ for (mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile);
+ mfp != NULL; mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile)) {
+ sp->st_cache_hit += mfp->stat.st_cache_hit;
+ sp->st_cache_miss += mfp->stat.st_cache_miss;
+ sp->st_map += mfp->stat.st_map;
+ sp->st_page_create += mfp->stat.st_page_create;
+ sp->st_page_in += mfp->stat.st_page_in;
+ sp->st_page_out += mfp->stat.st_page_out;
+ }
+
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ }
+
+ /* Per-file statistics. */
+ if (fspp != NULL) {
+ *fspp = NULL;
+
+ R_LOCK(dbenv, dbmp->reginfo);
+
+ /* Count the MPOOLFILE structures. */
+ for (i = 0, len = 0,
+ mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile);
+ mfp != NULL;
+ ++i, mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile))
+ len += sizeof(DB_MPOOL_FSTAT *) +
+ sizeof(DB_MPOOL_FSTAT) +
+ strlen(__memp_fns(dbmp, mfp)) + 1;
+ len += sizeof(DB_MPOOL_FSTAT *); /* Trailing NULL */
+
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
+ if (len == 0)
+ return (0);
+
+ /* Allocate space */
+ if ((ret = __os_malloc(dbenv, len, db_malloc, fspp)) != 0)
+ return (ret);
+
+ R_LOCK(dbenv, dbmp->reginfo);
+
+ /*
+ * Build each individual entry. We assume that an array of
+ * pointers are aligned correctly to be followed by an array
+ * of structures, which should be safe (in this particular
+ * case, the first element of the structure is a pointer, so
+ * we're doubly safe). The array is followed by space for
+ * the text file names.
+ *
+ * Add 1 to i because we need to skip over the NULL.
+ */
+ tfsp = *fspp;
+ tstruct = (DB_MPOOL_FSTAT *)(tfsp + i + 1);
+ tname = (char *)(tstruct + i);
+
+ for (mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile);
+ mfp != NULL;
+ ++tfsp, ++tstruct, tname += nlen,
+ mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile)) {
+ name = __memp_fns(dbmp, mfp);
+ nlen = strlen(name) + 1;
+ *tfsp = tstruct;
+ *tstruct = mfp->stat;
+ tstruct->file_name = tname;
+ memcpy(tname, name, nlen);
+ }
+ *tfsp = NULL;
+
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ }
+ return (0);
+}
+
+#define FMAP_ENTRIES 200 /* Files we map. */
+
+#define MPOOL_DUMP_HASH 0x01 /* Debug hash chains. */
+#define MPOOL_DUMP_LRU 0x02 /* Debug LRU chains. */
+#define MPOOL_DUMP_MEM 0x04 /* Debug region memory. */
+#define MPOOL_DUMP_ALL 0x07 /* Debug all. */
+
+/*
+ * __memp_dump_region --
+ * Display MPOOL structures.
+ *
+ * PUBLIC: void __memp_dump_region __P((DB_ENV *, char *, FILE *));
+ */
+void
+__memp_dump_region(dbenv, area, fp)
+ DB_ENV *dbenv;
+ char *area;
+ FILE *fp;
+{
+ DB_MPOOL *dbmp;
+ DB_MPOOLFILE *dbmfp;
+ MPOOL *mp;
+ MPOOLFILE *mfp;
+ size_t fmap[FMAP_ENTRIES + 1];
+ u_int32_t i, flags;
+ int cnt;
+ u_int8_t *p;
+
+ dbmp = dbenv->mp_handle;
+
+ /* Make it easy to call from the debugger. */
+ if (fp == NULL)
+ fp = stderr;
+
+ for (flags = 0; *area != '\0'; ++area)
+ switch (*area) {
+ case 'A':
+ LF_SET(MPOOL_DUMP_ALL);
+ break;
+ case 'h':
+ LF_SET(MPOOL_DUMP_HASH);
+ break;
+ case 'l':
+ LF_SET(MPOOL_DUMP_LRU);
+ break;
+ case 'm':
+ LF_SET(MPOOL_DUMP_MEM);
+ break;
+ }
+
+ R_LOCK(dbenv, dbmp->reginfo);
+
+ mp = dbmp->reginfo[0].primary;
+
+ /* Display MPOOL structures. */
+ (void)fprintf(fp, "%s\nPool (region addr 0x%lx)\n",
+ DB_LINE, (u_long)dbmp->reginfo[0].addr);
+
+ /* Display the MPOOLFILE structures. */
+ cnt = 0;
+ for (mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile);
+ mfp != NULL; mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile), ++cnt) {
+ (void)fprintf(fp, "File #%d: %s: type %ld, %s\n\t [UID: ",
+ cnt + 1, __memp_fns(dbmp, mfp), (long)mfp->ftype,
+ F_ISSET(mfp, MP_CAN_MMAP) ? "mmap" : "read/write");
+ p = R_ADDR(dbmp->reginfo, mfp->fileid_off);
+ for (i = 0; i < DB_FILE_ID_LEN; ++i) {
+ (void)fprintf(fp, "%x", *p++);
+ if (i < DB_FILE_ID_LEN - 1)
+ (void)fprintf(fp, " ");
+ }
+ (void)fprintf(fp, "]\n");
+ if (cnt < FMAP_ENTRIES)
+ fmap[cnt] = R_OFFSET(dbmp->reginfo, mfp);
+ }
+
+ for (dbmfp = TAILQ_FIRST(&dbmp->dbmfq);
+ dbmfp != NULL; dbmfp = TAILQ_NEXT(dbmfp, q), ++cnt) {
+ (void)fprintf(fp, "File #%d: %s: per-process, %s\n",
+ cnt + 1, __memp_fn(dbmfp),
+ F_ISSET(dbmfp, MP_READONLY) ? "readonly" : "read/write");
+ if (cnt < FMAP_ENTRIES)
+ fmap[cnt] = R_OFFSET(dbmp->reginfo, mfp);
+ }
+ if (cnt < FMAP_ENTRIES)
+ fmap[cnt] = INVALID_ROFF;
+ else
+ fmap[FMAP_ENTRIES] = INVALID_ROFF;
+
+ /* Dump the memory pools. */
+ for (i = 0; i < mp->nreg; ++i) {
+ (void)fprintf(fp, "%s\nCache #%d:\n", DB_LINE, i + 1);
+ __memp_dumpcache(dbmp, &dbmp->reginfo[i], fmap, fp, flags);
+ }
+
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
+ /* Flush in case we're debugging. */
+ (void)fflush(fp);
+}
+
+/*
+ * __memp_dumpcache --
+ * Display statistics for a cache.
+ */
+static void
+__memp_dumpcache(dbmp, reginfo, fmap, fp, flags)
+ DB_MPOOL *dbmp;
+ REGINFO *reginfo;
+ size_t *fmap;
+ FILE *fp;
+ u_int32_t flags;
+{
+ BH *bhp;
+ DB_HASHTAB *dbht;
+ MPOOL *c_mp;
+ int bucket;
+
+ c_mp = reginfo->primary;
+
+ /* Display the hash table list of BH's. */
+ if (LF_ISSET(MPOOL_DUMP_HASH)) {
+ (void)fprintf(fp,
+ "%s\nBH hash table (%lu hash slots)\npageno, file, ref, address\n",
+ DB_LINE, (u_long)c_mp->htab_buckets);
+ for (dbht = R_ADDR(reginfo, c_mp->htab),
+ bucket = 0; bucket < c_mp->htab_buckets; ++dbht, ++bucket) {
+ if (SH_TAILQ_FIRST(dbht, __bh) != NULL)
+ (void)fprintf(fp, "%lu:\n", (u_long)bucket);
+ for (bhp = SH_TAILQ_FIRST(dbht, __bh);
+ bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, hq, __bh))
+ __memp_pbh(dbmp, bhp, fmap, fp);
+ }
+ }
+
+ /* Display the LRU list of BH's. */
+ if (LF_ISSET(MPOOL_DUMP_LRU)) {
+ (void)fprintf(fp, "%s\nBH LRU list\n", DB_LINE);
+ (void)fprintf(fp, "pageno, file, ref, address\n");
+ for (bhp = SH_TAILQ_FIRST(&c_mp->bhq, __bh);
+ bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, q, __bh))
+ __memp_pbh(dbmp, bhp, fmap, fp);
+ }
+
+ /* Dump the memory pool. */
+ if (LF_ISSET(MPOOL_DUMP_MEM))
+ __db_shalloc_dump(reginfo->addr, fp);
+}
+
+/*
+ * __memp_pbh --
+ * Display a BH structure.
+ */
+static void
+__memp_pbh(dbmp, bhp, fmap, fp)
+ DB_MPOOL *dbmp;
+ BH *bhp;
+ size_t *fmap;
+ FILE *fp;
+{
+ static const FN fn[] = {
+ { BH_CALLPGIN, "callpgin" },
+ { BH_DIRTY, "dirty" },
+ { BH_DISCARD, "discard" },
+ { BH_LOCKED, "locked" },
+ { BH_SYNC, "sync" },
+ { BH_SYNC_LOGFLSH, "sync:logflush" },
+ { BH_TRASH, "trash" },
+ { 0, NULL }
+ };
+ int i;
+
+ for (i = 0; i < FMAP_ENTRIES; ++i)
+ if (fmap[i] == INVALID_ROFF || fmap[i] == bhp->mf_offset)
+ break;
+
+ if (fmap[i] == INVALID_ROFF)
+ (void)fprintf(fp, " %4lu, %lu, %2lu, %lu",
+ (u_long)bhp->pgno, (u_long)bhp->mf_offset,
+ (u_long)bhp->ref, (u_long)R_OFFSET(dbmp->reginfo, bhp));
+ else
+ (void)fprintf(fp, " %4lu, #%d, %2lu, %lu",
+ (u_long)bhp->pgno, i + 1,
+ (u_long)bhp->ref, (u_long)R_OFFSET(dbmp->reginfo, bhp));
+
+ __db_prflags(bhp->flags, fn, fp);
+
+ (void)fprintf(fp, "\n");
+}
diff --git a/bdb/mp/mp_sync.c b/bdb/mp/mp_sync.c
new file mode 100644
index 00000000000..1b0751db709
--- /dev/null
+++ b/bdb/mp/mp_sync.c
@@ -0,0 +1,658 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: mp_sync.c,v 11.29 2001/01/11 18:19:53 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#endif
+
+#ifdef HAVE_RPC
+#include "db_server.h"
+#endif
+
+#include "db_int.h"
+#include "db_shash.h"
+#include "mp.h"
+
+#ifdef HAVE_RPC
+#include "gen_client_ext.h"
+#include "rpc_client_ext.h"
+#endif
+
+static int __bhcmp __P((const void *, const void *));
+static int __memp_fsync __P((DB_MPOOLFILE *));
+static int __memp_sballoc __P((DB_ENV *, BH ***, u_int32_t *));
+
+/*
+ * memp_sync --
+ * Mpool sync function.
+ */
+int
+memp_sync(dbenv, lsnp)
+ DB_ENV *dbenv;
+ DB_LSN *lsnp;
+{
+ BH *bhp, **bharray;
+ DB_MPOOL *dbmp;
+ DB_LSN tlsn;
+ MPOOL *c_mp, *mp;
+ MPOOLFILE *mfp;
+ u_int32_t ar_cnt, i, ndirty;
+ int ret, retry_done, retry_need, wrote;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
+ return (__dbcl_memp_sync(dbenv, lsnp));
+#endif
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->mp_handle, DB_INIT_MPOOL);
+
+ dbmp = dbenv->mp_handle;
+ mp = dbmp->reginfo[0].primary;
+
+ /*
+ * If no LSN is provided, flush the entire cache.
+ *
+ * !!!
+ * Our current behavior is to flush the entire cache, so there's
+ * nothing special we have to do here other than deal with NULL
+ * pointers.
+ */
+ if (lsnp == NULL) {
+ ZERO_LSN(tlsn);
+ lsnp = &tlsn;
+ F_SET(mp, MP_LSN_RETRY);
+ } else if (!LOGGING_ON(dbenv)) {
+ __db_err(dbenv, "memp_sync: requires logging");
+ return (EINVAL);
+ }
+
+ /*
+ * Sync calls are single-threaded so that we don't have multiple
+ * threads, with different checkpoint LSNs, walking the caches
+ * and updating the checkpoint LSNs and how many buffers remain
+ * to be written for the checkpoint. This shouldn't be a problem,
+ * any application that has multiple checkpoint threads isn't what
+ * I'd call trustworthy.
+ */
+ MUTEX_LOCK(dbenv, &mp->sync_mutex, dbenv->lockfhp);
+
+ /*
+ * If the application is asking about a previous call to memp_sync(),
+ * and we haven't found any buffers that the application holding the
+ * pin couldn't write, return yes or no based on the current count.
+ * Note, if the application is asking about a LSN *smaller* than one
+ * we've already handled or are currently handling, then we return a
+ * result based on the count for the larger LSN.
+ */
+ R_LOCK(dbenv, dbmp->reginfo);
+ if (!IS_ZERO_LSN(*lsnp) &&
+ !F_ISSET(mp, MP_LSN_RETRY) && log_compare(lsnp, &mp->lsn) <= 0) {
+ if (mp->lsn_cnt == 0) {
+ *lsnp = mp->lsn;
+ ret = 0;
+ } else
+ ret = DB_INCOMPLETE;
+
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ MUTEX_UNLOCK(dbenv, &mp->sync_mutex);
+ return (ret);
+ }
+
+ /*
+ * Allocate room for a list of buffers, and decide how many buffers
+ * we can pin down.
+ *
+ * !!!
+ * Note: __memp_sballoc has released the region lock if we're not
+ * continuing forward.
+ */
+ if ((ret =
+ __memp_sballoc(dbenv, &bharray, &ndirty)) != 0 || ndirty == 0) {
+ MUTEX_UNLOCK(dbenv, &mp->sync_mutex);
+ return (ret);
+ }
+
+ retry_done = 0;
+retry: retry_need = 0;
+ /*
+ * Start a new checkpoint.
+ *
+ * Save the LSN. We know that it's a new LSN, a retry, or larger than
+ * the one for which we were already doing a checkpoint. (BTW, I don't
+ * expect to see multiple LSN's from the same or multiple processes,
+ * but You Just Never Know. Responding as if they all called with the
+ * largest of the LSNs specified makes everything work.)
+ *
+ * We don't currently use the LSN we save. We could potentially save
+ * the last-written LSN in each buffer header and use it to determine
+ * what buffers need to be written. The problem with this is that it's
+ * sizeof(LSN) more bytes of buffer header. We currently write all the
+ * dirty buffers instead, but with a sufficiently large cache that's
+ * going to be a problem.
+ */
+ mp->lsn = *lsnp;
+
+ /*
+ * Clear the global count of buffers waiting to be written, walk the
+ * list of files clearing the count of buffers waiting to be written.
+ *
+ * Clear the retry flag.
+ */
+ mp->lsn_cnt = 0;
+ for (mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile);
+ mfp != NULL; mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile))
+ mfp->lsn_cnt = 0;
+ F_CLR(mp, MP_LSN_RETRY);
+
+ /*
+ * Walk each cache's list of buffers and mark all dirty buffers to be
+ * written and all pinned buffers to be potentially written (we can't
+ * know if they'll need to be written until the holder returns them to
+ * the cache). We do this in one pass while holding the region locked
+ * so that processes can't make new buffers dirty, causing us to never
+ * finish. Since the application may have restarted the sync using a
+ * different LSN value, clear any BH_SYNC | BH_SYNC_LOGFLSH flags that
+ * appear leftover from previous calls.
+ *
+ * Keep a count of the total number of buffers we need to write in
+ * MPOOL->lsn_cnt, and for each file, in MPOOLFILE->lsn_count.
+ */
+ for (ar_cnt = 0, i = 0; i < mp->nreg; ++i) {
+ c_mp = dbmp->reginfo[i].primary;
+ for (bhp = SH_TAILQ_FIRST(&c_mp->bhq, __bh);
+ bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, q, __bh)) {
+ if (F_ISSET(bhp, BH_DIRTY) || bhp->ref != 0) {
+ F_SET(bhp, BH_SYNC);
+
+ ++mp->lsn_cnt;
+
+ mfp = R_ADDR(dbmp->reginfo, bhp->mf_offset);
+ ++mfp->lsn_cnt;
+
+ /*
+ * If the buffer isn't being used, we can write
+ * it immediately, so increment its reference
+ * count to lock it down, and save a reference
+ * to it.
+ *
+ * If we've run out space to store buffer refs,
+ * we're screwed. We don't want to realloc the
+ * array while holding a region lock, so we set
+ * a flag and deal with it later.
+ */
+ if (bhp->ref == 0) {
+ ++bhp->ref;
+ bharray[ar_cnt] = bhp;
+
+ if (++ar_cnt >= ndirty) {
+ retry_need = 1;
+ break;
+ }
+ }
+ } else
+ if (F_ISSET(bhp, BH_SYNC))
+ F_CLR(bhp, BH_SYNC | BH_SYNC_LOGFLSH);
+ }
+ if (ar_cnt >= ndirty)
+ break;
+ }
+
+ /* If there no buffers we can write immediately, we're done. */
+ if (ar_cnt == 0) {
+ ret = mp->lsn_cnt ? DB_INCOMPLETE : 0;
+ goto done;
+ }
+
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
+ /*
+ * Sort the buffers we're going to write immediately.
+ *
+ * We try and write the buffers in file/page order: it should reduce
+ * seeks by the underlying filesystem and possibly reduce the actual
+ * number of writes.
+ */
+ if (ar_cnt > 1)
+ qsort(bharray, ar_cnt, sizeof(BH *), __bhcmp);
+
+ /*
+ * Flush the log. We have to ensure the log records reflecting the
+ * changes on the database pages we're writing have already made it
+ * to disk. We usually do that as we write each page, but if we
+ * are going to write a large number of pages, repeatedly acquiring
+ * the log region lock is going to be expensive. Flush the entire
+ * log now, so that sync doesn't require any more log flushes.
+ */
+ if (LOGGING_ON(dbenv) && (ret = log_flush(dbenv, NULL)) != 0)
+ goto done;
+
+ R_LOCK(dbenv, dbmp->reginfo);
+
+ /* Walk the array, writing buffers. */
+ for (i = 0; i < ar_cnt; ++i) {
+ /*
+ * It's possible for a thread to have gotten the buffer since
+ * we listed it for writing. If the reference count is still
+ * 1, we're the only ones using the buffer, go ahead and write.
+ * If it's >1, then skip the buffer and assume that it will be
+ * written when it's returned to the cache.
+ */
+ if (bharray[i]->ref > 1) {
+ --bharray[i]->ref;
+ continue;
+ }
+
+ /* Write the buffer. */
+ mfp = R_ADDR(dbmp->reginfo, bharray[i]->mf_offset);
+ ret = __memp_bhwrite(dbmp, mfp, bharray[i], NULL, &wrote);
+
+ /* Release the buffer. */
+ --bharray[i]->ref;
+
+ if (ret == 0 && wrote)
+ continue;
+
+ /*
+ * Any process syncing the shared memory buffer pool had best
+ * be able to write to any underlying file. Be understanding,
+ * but firm, on this point.
+ */
+ if (ret == 0) {
+ __db_err(dbenv, "%s: unable to flush page: %lu",
+ __memp_fns(dbmp, mfp), (u_long)bharray[i]->pgno);
+ ret = EPERM;
+ }
+
+ /*
+ * On error, clear MPOOL->lsn and set MP_LSN_RETRY so that no
+ * future checkpoint return can depend on this failure. Clear
+ * the buffer's BH_SYNC flag, because it's used to determine
+ * if lsn_cnt values are incremented/decremented. Don't bother
+ * to reset/clear:
+ *
+ * MPOOL->lsn_cnt
+ * MPOOLFILE->lsn_cnt
+ *
+ * they don't make any difference.
+ */
+ ZERO_LSN(mp->lsn);
+ F_SET(mp, MP_LSN_RETRY);
+
+ /* Release any buffers we're still pinning down. */
+ while (++i < ar_cnt) {
+ bhp = bharray[i];
+ --bhp->ref;
+ F_CLR(bhp, BH_SYNC | BH_SYNC_LOGFLSH);
+ }
+
+ goto done;
+ }
+
+ ret = mp->lsn_cnt != 0 ? DB_INCOMPLETE : 0;
+
+ /*
+ * If there were too many buffers and we're not returning an error, we
+ * re-try the checkpoint once -- since we allocated 80% of the total
+ * buffer count, once should be enough. If it still doesn't work, some
+ * other thread of control is dirtying buffers as fast as we're writing
+ * them, and we might as well give up for now. In the latter case, set
+ * the global retry flag, we'll have to start from scratch on the next
+ * checkpoint.
+ */
+ if (retry_need) {
+ if (retry_done) {
+ ret = DB_INCOMPLETE;
+ F_SET(mp, MP_LSN_RETRY);
+ } else {
+ retry_done = 1;
+ goto retry;
+ }
+ }
+
+done: R_UNLOCK(dbenv, dbmp->reginfo);
+ MUTEX_UNLOCK(dbenv, &mp->sync_mutex);
+
+ __os_free(bharray, ndirty * sizeof(BH *));
+
+ return (ret);
+}
+
+/*
+ * memp_fsync --
+ * Mpool file sync function.
+ */
+int
+memp_fsync(dbmfp)
+ DB_MPOOLFILE *dbmfp;
+{
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+ int is_tmp;
+
+ dbmp = dbmfp->dbmp;
+ dbenv = dbmp->dbenv;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
+ return (__dbcl_memp_fsync(dbmfp));
+#endif
+
+ PANIC_CHECK(dbenv);
+
+ /*
+ * If this handle doesn't have a file descriptor that's open for
+ * writing, or if the file is a temporary, there's no reason to
+ * proceed further.
+ */
+ if (F_ISSET(dbmfp, MP_READONLY))
+ return (0);
+
+ R_LOCK(dbenv, dbmp->reginfo);
+ is_tmp = F_ISSET(dbmfp->mfp, MP_TEMP);
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ if (is_tmp)
+ return (0);
+
+ return (__memp_fsync(dbmfp));
+}
+
+/*
+ * __mp_xxx_fh --
+ * Return a file descriptor for DB 1.85 compatibility locking.
+ *
+ * PUBLIC: int __mp_xxx_fh __P((DB_MPOOLFILE *, DB_FH **));
+ */
+int
+__mp_xxx_fh(dbmfp, fhp)
+ DB_MPOOLFILE *dbmfp;
+ DB_FH **fhp;
+{
+ /*
+ * This is a truly spectacular layering violation, intended ONLY to
+ * support compatibility for the DB 1.85 DB->fd call.
+ *
+ * Sync the database file to disk, creating the file as necessary.
+ *
+ * We skip the MP_READONLY and MP_TEMP tests done by memp_fsync(3).
+ * The MP_READONLY test isn't interesting because we will either
+ * already have a file descriptor (we opened the database file for
+ * reading) or we aren't readonly (we created the database which
+ * requires write privileges). The MP_TEMP test isn't interesting
+ * because we want to write to the backing file regardless so that
+ * we get a file descriptor to return.
+ */
+ *fhp = &dbmfp->fh;
+ return (F_ISSET(&dbmfp->fh, DB_FH_VALID) ? 0 : __memp_fsync(dbmfp));
+}
+
+/*
+ * __memp_fsync --
+ * Mpool file internal sync function.
+ */
+static int
+__memp_fsync(dbmfp)
+ DB_MPOOLFILE *dbmfp;
+{
+ BH *bhp, **bharray;
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+ MPOOL *c_mp, *mp;
+ size_t mf_offset;
+ u_int32_t ar_cnt, i, ndirty;
+ int incomplete, ret, retry_done, retry_need, wrote;
+
+ dbmp = dbmfp->dbmp;
+ dbenv = dbmp->dbenv;
+ mp = dbmp->reginfo[0].primary;
+
+ R_LOCK(dbenv, dbmp->reginfo);
+
+ /*
+ * Allocate room for a list of buffers, and decide how many buffers
+ * we can pin down.
+ *
+ * !!!
+ * Note: __memp_sballoc has released our region lock if we're not
+ * continuing forward.
+ */
+ if ((ret =
+ __memp_sballoc(dbenv, &bharray, &ndirty)) != 0 || ndirty == 0)
+ return (ret);
+
+ retry_done = 0;
+retry: retry_need = 0;
+ /*
+ * Walk each cache's list of buffers and mark all dirty buffers to be
+ * written and all pinned buffers to be potentially written (we can't
+ * know if they'll need to be written until the holder returns them to
+ * the cache). We do this in one pass while holding the region locked
+ * so that processes can't make new buffers dirty, causing us to never
+ * finish.
+ */
+ mf_offset = R_OFFSET(dbmp->reginfo, dbmfp->mfp);
+ for (ar_cnt = 0, incomplete = 0, i = 0; i < mp->nreg; ++i) {
+ c_mp = dbmp->reginfo[i].primary;
+ for (bhp = SH_TAILQ_FIRST(&c_mp->bhq, __bh);
+ bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, q, __bh)) {
+ if (!F_ISSET(bhp, BH_DIRTY) ||
+ bhp->mf_offset != mf_offset)
+ continue;
+ if (bhp->ref != 0 || F_ISSET(bhp, BH_LOCKED)) {
+ incomplete = 1;
+ continue;
+ }
+
+ /*
+ * If the buffer isn't being used, we can write
+ * it immediately, so increment its reference
+ * count to lock it down, and save a reference
+ * to it.
+ *
+ * If we've run out space to store buffer refs,
+ * we're screwed. We don't want to realloc the
+ * array while holding a region lock, so we set
+ * a flag and deal with it later.
+ */
+ ++bhp->ref;
+ bharray[ar_cnt] = bhp;
+ if (++ar_cnt >= ndirty) {
+ retry_need = 1;
+ break;
+ }
+ }
+ if (ar_cnt >= ndirty)
+ break;
+ }
+
+ /* If there no buffers we can write immediately, we're done. */
+ if (ar_cnt == 0) {
+ ret = 0;
+ goto done;
+ }
+
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
+ /* Sort the buffers we're going to write. */
+ if (ar_cnt > 1)
+ qsort(bharray, ar_cnt, sizeof(BH *), __bhcmp);
+
+ R_LOCK(dbenv, dbmp->reginfo);
+
+ /* Walk the array, writing buffers. */
+ for (i = 0; i < ar_cnt;) {
+ /*
+ * It's possible for a thread to have gotten the buffer since
+ * we listed it for writing. If the reference count is still
+ * 1, we're the only ones using the buffer, go ahead and write.
+ * If it's >1, then skip the buffer and assume that it will be
+ * written when it's returned to the cache.
+ */
+ if (bharray[i]->ref > 1) {
+ incomplete = 1;
+ --bharray[i++]->ref;
+ continue;
+ }
+
+ /* Write the buffer. */
+ ret = __memp_pgwrite(dbmp, dbmfp, bharray[i], NULL, &wrote);
+
+ /* Release the buffer. */
+ --bharray[i++]->ref;
+
+ if (ret == 0) {
+ if (!wrote)
+ incomplete = 1;
+ continue;
+ }
+
+ /*
+ * On error:
+ *
+ * Release any buffers we're still pinning down.
+ */
+ while (i < ar_cnt)
+ --bharray[i++]->ref;
+ break;
+ }
+
+ /*
+ * If there were too many buffers and we're not returning an error, we
+ * re-try the flush once -- since we allocated 80% of the total
+ * buffer count, once should be enough. If it still doesn't work, some
+ * other thread of control is dirtying buffers as fast as we're writing
+ * them, and we might as well give up.
+ */
+ if (retry_need) {
+ if (retry_done)
+ incomplete = 1;
+ else {
+ retry_done = 1;
+ goto retry;
+ }
+ }
+
+done: R_UNLOCK(dbenv, dbmp->reginfo);
+
+ __os_free(bharray, ndirty * sizeof(BH *));
+
+ /*
+ * Sync the underlying file as the last thing we do, so that the OS
+ * has a maximal opportunity to flush buffers before we request it.
+ *
+ * !!!:
+ * Don't lock the region around the sync, fsync(2) has no atomicity
+ * issues.
+ */
+ if (ret == 0)
+ ret = incomplete ?
+ DB_INCOMPLETE : __os_fsync(dbenv, &dbmfp->fh);
+
+ return (ret);
+}
+
+/*
+ * __memp_sballoc --
+ * Allocate room for a list of buffers.
+ */
+static int
+__memp_sballoc(dbenv, bharrayp, ndirtyp)
+ DB_ENV *dbenv;
+ BH ***bharrayp;
+ u_int32_t *ndirtyp;
+{
+ DB_MPOOL *dbmp;
+ MPOOL *c_mp, *mp;
+ u_int32_t i, nclean, ndirty, maxpin;
+ int ret;
+
+ dbmp = dbenv->mp_handle;
+ mp = dbmp->reginfo[0].primary;
+
+ /*
+ * We don't want to hold the region lock while we write the buffers,
+ * so only lock it while we create a list.
+ *
+ * Walk through the list of caches, figuring out how many buffers
+ * we're going to need.
+ *
+ * Make a point of not holding the region lock across the library
+ * allocation call.
+ */
+ for (nclean = ndirty = 0, i = 0; i < mp->nreg; ++i) {
+ c_mp = dbmp->reginfo[i].primary;
+ ndirty += c_mp->stat.st_page_dirty;
+ nclean += c_mp->stat.st_page_clean;
+ }
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ if (ndirty == 0) {
+ *ndirtyp = 0;
+ return (0);
+ }
+
+ /*
+ * We don't want to pin down the entire buffer cache, otherwise we'll
+ * starve threads needing new pages. Don't pin down more than 80% of
+ * the cache, making sure that we don't screw up just because only a
+ * few pages have been created.
+ */
+ maxpin = ((ndirty + nclean) * 8) / 10;
+ if (maxpin < 10)
+ maxpin = 10;
+
+ /*
+ * Get a good-sized block of memory to hold buffer pointers, we don't
+ * want to run out, but correct if we want to allocate more than we
+ * would be allowed to store, regardless.
+ */
+ ndirty += ndirty / 2 + 10;
+ if (ndirty > maxpin)
+ ndirty = maxpin;
+ if ((ret =
+ __os_malloc(dbenv, ndirty * sizeof(BH *), NULL, bharrayp)) != 0)
+ return (ret);
+
+ *ndirtyp = ndirty;
+
+ R_LOCK(dbenv, dbmp->reginfo);
+
+ return (0);
+}
+
+static int
+__bhcmp(p1, p2)
+ const void *p1, *p2;
+{
+ BH *bhp1, *bhp2;
+
+ bhp1 = *(BH * const *)p1;
+ bhp2 = *(BH * const *)p2;
+
+ /* Sort by file (shared memory pool offset). */
+ if (bhp1->mf_offset < bhp2->mf_offset)
+ return (-1);
+ if (bhp1->mf_offset > bhp2->mf_offset)
+ return (1);
+
+ /*
+ * !!!
+ * Defend against badly written quicksort code calling the comparison
+ * function with two identical pointers (e.g., WATCOM C++ (Power++)).
+ */
+ if (bhp1->pgno < bhp2->pgno)
+ return (-1);
+ if (bhp1->pgno > bhp2->pgno)
+ return (1);
+ return (0);
+}
diff --git a/bdb/mp/mp_trickle.c b/bdb/mp/mp_trickle.c
new file mode 100644
index 00000000000..f937805cf40
--- /dev/null
+++ b/bdb/mp/mp_trickle.c
@@ -0,0 +1,149 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: mp_trickle.c,v 11.12 2000/11/30 00:58:41 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#endif
+
+#ifdef HAVE_RPC
+#include "db_server.h"
+#endif
+
+#include "db_int.h"
+#include "db_shash.h"
+#include "mp.h"
+
+#ifdef HAVE_RPC
+#include "gen_client_ext.h"
+#include "rpc_client_ext.h"
+#endif
+
+static int __memp_trick __P((DB_ENV *, int, int, int *));
+
+/*
+ * memp_trickle --
+ * Keep a specified percentage of the buffers clean.
+ */
+int
+memp_trickle(dbenv, pct, nwrotep)
+ DB_ENV *dbenv;
+ int pct, *nwrotep;
+{
+ DB_MPOOL *dbmp;
+ MPOOL *mp;
+ u_int32_t i;
+ int ret;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
+ return (__dbcl_memp_trickle(dbenv, pct, nwrotep));
+#endif
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->mp_handle, DB_INIT_MPOOL);
+
+ dbmp = dbenv->mp_handle;
+ mp = dbmp->reginfo[0].primary;
+
+ if (nwrotep != NULL)
+ *nwrotep = 0;
+
+ if (pct < 1 || pct > 100)
+ return (EINVAL);
+
+ R_LOCK(dbenv, dbmp->reginfo);
+
+ /* Loop through the caches... */
+ for (ret = 0, i = 0; i < mp->nreg; ++i)
+ if ((ret = __memp_trick(dbenv, i, pct, nwrotep)) != 0)
+ break;
+
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ return (ret);
+}
+
+/*
+ * __memp_trick --
+ * Trickle a single cache.
+ */
+static int
+__memp_trick(dbenv, ncache, pct, nwrotep)
+ DB_ENV *dbenv;
+ int ncache, pct, *nwrotep;
+{
+ BH *bhp;
+ DB_MPOOL *dbmp;
+ MPOOL *c_mp;
+ MPOOLFILE *mfp;
+ db_pgno_t pgno;
+ u_long total;
+ int ret, wrote;
+
+ dbmp = dbenv->mp_handle;
+ c_mp = dbmp->reginfo[ncache].primary;
+
+ /*
+ * If there are sufficient clean buffers, or no buffers or no dirty
+ * buffers, we're done.
+ *
+ * XXX
+ * Using st_page_clean and st_page_dirty is our only choice at the
+ * moment, but it's not as correct as we might like in the presence
+ * of pools with more than one buffer size, as a free 512-byte buffer
+ * isn't the same as a free 8K buffer.
+ */
+loop: total = c_mp->stat.st_page_clean + c_mp->stat.st_page_dirty;
+ if (total == 0 || c_mp->stat.st_page_dirty == 0 ||
+ (c_mp->stat.st_page_clean * 100) / total >= (u_long)pct)
+ return (0);
+
+ /* Loop until we write a buffer. */
+ for (bhp = SH_TAILQ_FIRST(&c_mp->bhq, __bh);
+ bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, q, __bh)) {
+ if (bhp->ref != 0 ||
+ !F_ISSET(bhp, BH_DIRTY) || F_ISSET(bhp, BH_LOCKED))
+ continue;
+
+ mfp = R_ADDR(dbmp->reginfo, bhp->mf_offset);
+
+ /*
+ * We can't write to temporary files -- see the comment in
+ * mp_bh.c:__memp_bhwrite().
+ */
+ if (F_ISSET(mfp, MP_TEMP))
+ continue;
+
+ pgno = bhp->pgno;
+ if ((ret = __memp_bhwrite(dbmp, mfp, bhp, NULL, &wrote)) != 0)
+ return (ret);
+
+ /*
+ * Any process syncing the shared memory buffer pool had better
+ * be able to write to any underlying file. Be understanding,
+ * but firm, on this point.
+ */
+ if (!wrote) {
+ __db_err(dbenv, "%s: unable to flush page: %lu",
+ __memp_fns(dbmp, mfp), (u_long)pgno);
+ return (EPERM);
+ }
+
+ ++c_mp->stat.st_page_trickle;
+ if (nwrotep != NULL)
+ ++*nwrotep;
+ goto loop;
+ }
+
+ return (0);
+}
diff --git a/bdb/mutex/README b/bdb/mutex/README
new file mode 100644
index 00000000000..323c34f1e74
--- /dev/null
+++ b/bdb/mutex/README
@@ -0,0 +1,108 @@
+# $Id: README,v 11.2 1999/11/21 18:12:48 bostic Exp $
+
+Note: this only applies to locking using test-and-set and fcntl calls,
+pthreads were added after this was written.
+
+Resource locking routines: lock based on a db_mutex_t. All this gunk
+(including trying to make assembly code portable), is necessary because
+System V semaphores require system calls for uncontested locks and we
+don't want to make two system calls per resource lock.
+
+First, this is how it works. The db_mutex_t structure contains a resource
+test-and-set lock (tsl), a file offset, a pid for debugging and statistics
+information.
+
+If HAVE_MUTEX_THREADS is defined (i.e. we know how to do test-and-sets
+for this compiler/architecture combination), we try and lock the resource
+tsl __os_spin() times. If we can't acquire the lock that way, we use a
+system call to sleep for 1ms, 2ms, 4ms, etc. (The time is bounded at 1
+second, just in case.) Using the timer backoff means that there are two
+assumptions: that locks are held for brief periods (never over system
+calls or I/O) and that locks are not hotly contested.
+
+If HAVE_MUTEX_THREADS is not defined, i.e. we can't do test-and-sets, we
+use a file descriptor to do byte locking on a file at a specified offset.
+In this case, ALL of the locking is done in the kernel. Because file
+descriptors are allocated per process, we have to provide the file
+descriptor as part of the lock call. We still have to do timer backoff
+because we need to be able to block ourselves, i.e. the lock manager
+causes processes to wait by having the process acquire a mutex and then
+attempting to re-acquire the mutex. There's no way to use kernel locking
+to block yourself, i.e. if you hold a lock and attempt to re-acquire it,
+the attempt will succeed.
+
+Next, let's talk about why it doesn't work the way a reasonable person
+would think it should work.
+
+Ideally, we'd have the ability to try to lock the resource tsl, and if
+that fails, increment a counter of waiting processes, then block in the
+kernel until the tsl is released. The process holding the resource tsl
+would see the wait counter when it went to release the resource tsl, and
+would wake any waiting processes up after releasing the lock. This would
+actually require both another tsl (call it the mutex tsl) and
+synchronization between the call that blocks in the kernel and the actual
+resource tsl. The mutex tsl would be used to protect accesses to the
+db_mutex_t itself. Locking the mutex tsl would be done by a busy loop,
+which is safe because processes would never block holding that tsl (all
+they would do is try to obtain the resource tsl and set/check the wait
+count). The problem in this model is that the blocking call into the
+kernel requires a blocking semaphore, i.e. one whose normal state is
+locked.
+
+The only portable forms of locking under UNIX are fcntl(2) on a file
+descriptor/offset, and System V semaphores. Neither of these locking
+methods are sufficient to solve the problem.
+
+The problem with fcntl locking is that only the process that obtained the
+lock can release it. Remember, we want the normal state of the kernel
+semaphore to be locked. So, if the creator of the db_mutex_t were to
+initialize the lock to "locked", then a second process locks the resource
+tsl, and then a third process needs to block, waiting for the resource
+tsl, when the second process wants to wake up the third process, it can't
+because it's not the holder of the lock! For the second process to be
+the holder of the lock, we would have to make a system call per
+uncontested lock, which is what we were trying to get away from in the
+first place.
+
+There are some hybrid schemes, such as signaling the holder of the lock,
+or using a different blocking offset depending on which process is
+holding the lock, but it gets complicated fairly quickly. I'm open to
+suggestions, but I'm not holding my breath.
+
+Regardless, we use this form of locking when HAVE_SPINLOCKS is not
+defined, (i.e. we're locking in the kernel) because it doesn't have the
+limitations found in System V semaphores, and because the normal state of
+the kernel object in that case is unlocked, so the process releasing the
+lock is also the holder of the lock.
+
+The System V semaphore design has a number of other limitations that make
+it inappropriate for this task. Namely:
+
+First, the semaphore key name space is separate from the file system name
+space (although there exist methods for using file names to create
+semaphore keys). If we use a well-known key, there's no reason to believe
+that any particular key will not already be in use, either by another
+instance of the DB application or some other application, in which case
+the DB application will fail. If we create a key, then we have to use a
+file system name to rendezvous and pass around the key.
+
+Second, System V semaphores traditionally have compile-time, system-wide
+limits on the number of semaphore keys that you can have. Typically, that
+number is far too low for any practical purpose. Since the semaphores
+permit more than a single slot per semaphore key, we could try and get
+around that limit by using multiple slots, but that means that the file
+that we're using for rendezvous is going to have to contain slot
+information as well as semaphore key information, and we're going to be
+reading/writing it on every db_mutex_t init or destroy operation. Anyhow,
+similar compile-time, system-wide limits on the numbers of slots per
+semaphore key kick in, and you're right back where you started.
+
+My fantasy is that once POSIX.1 standard mutexes are in wide-spread use,
+we can switch to them. My guess is that it won't happen, because the
+POSIX semaphores are only required to work for threads within a process,
+and not independent processes.
+
+Note: there are races in the statistics code, but since it's just that,
+I didn't bother fixing them. (The fix requires a mutex tsl, so, when/if
+this code is fixed to do rational locking (see above), then change the
+statistics update code to acquire/release the mutex tsl.
diff --git a/bdb/mutex/mut_fcntl.c b/bdb/mutex/mut_fcntl.c
new file mode 100644
index 00000000000..02f4d4044f8
--- /dev/null
+++ b/bdb/mutex/mut_fcntl.c
@@ -0,0 +1,174 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: mut_fcntl.c,v 11.11 2001/01/11 18:19:53 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <fcntl.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __db_fcntl_mutex_init --
+ * Initialize a DB mutex structure.
+ *
+ * PUBLIC: int __db_fcntl_mutex_init __P((DB_ENV *, MUTEX *, u_int32_t));
+ */
+int
+__db_fcntl_mutex_init(dbenv, mutexp, offset)
+ DB_ENV *dbenv;
+ MUTEX *mutexp;
+ u_int32_t offset;
+{
+ memset(mutexp, 0, sizeof(*mutexp));
+
+ /*
+ * This is where we decide to ignore locks we don't need to set -- if
+ * the application is private, we don't need any locks.
+ */
+ if (F_ISSET(dbenv, DB_ENV_PRIVATE)) {
+ F_SET(mutexp, MUTEX_IGNORE);
+ return (0);
+ }
+
+ mutexp->off = offset;
+#ifdef MUTEX_SYSTEM_RESOURCES
+ mutexp->reg_off = INVALID_ROFF;
+#endif
+ F_SET(mutexp, MUTEX_INITED);
+
+ return (0);
+}
+
+/*
+ * __db_fcntl_mutex_lock
+ * Lock on a mutex, blocking if necessary.
+ *
+ * PUBLIC: int __db_fcntl_mutex_lock __P((DB_ENV *, MUTEX *, DB_FH *));
+ */
+int
+__db_fcntl_mutex_lock(dbenv, mutexp, fhp)
+ DB_ENV *dbenv;
+ MUTEX *mutexp;
+ DB_FH *fhp;
+{
+ struct flock k_lock;
+ int locked, ms, waited;
+
+ if (!dbenv->db_mutexlocks)
+ return (0);
+
+ /* Initialize the lock. */
+ k_lock.l_whence = SEEK_SET;
+ k_lock.l_start = mutexp->off;
+ k_lock.l_len = 1;
+
+ for (locked = waited = 0;;) {
+ /*
+ * Wait for the lock to become available; wait 1ms initially,
+ * up to 1 second.
+ */
+ for (ms = 1; mutexp->pid != 0;) {
+ waited = 1;
+ __os_yield(NULL, ms * USEC_PER_MS);
+ if ((ms <<= 1) > MS_PER_SEC)
+ ms = MS_PER_SEC;
+ }
+
+ /* Acquire an exclusive kernel lock. */
+ k_lock.l_type = F_WRLCK;
+ if (fcntl(fhp->fd, F_SETLKW, &k_lock))
+ return (__os_get_errno());
+
+ /* If the resource is still available, it's ours. */
+ if (mutexp->pid == 0) {
+ locked = 1;
+ mutexp->pid = (u_int32_t)getpid();
+ }
+
+ /* Release the kernel lock. */
+ k_lock.l_type = F_UNLCK;
+ if (fcntl(fhp->fd, F_SETLK, &k_lock))
+ return (__os_get_errno());
+
+ /*
+ * If we got the resource lock we're done.
+ *
+ * !!!
+ * We can't check to see if the lock is ours, because we may
+ * be trying to block ourselves in the lock manager, and so
+ * the holder of the lock that's preventing us from getting
+ * the lock may be us! (Seriously.)
+ */
+ if (locked)
+ break;
+ }
+
+ if (waited)
+ ++mutexp->mutex_set_wait;
+ else
+ ++mutexp->mutex_set_nowait;
+ return (0);
+}
+
+/*
+ * __db_fcntl_mutex_unlock --
+ * Release a lock.
+ *
+ * PUBLIC: int __db_fcntl_mutex_unlock __P((DB_ENV *, MUTEX *));
+ */
+int
+__db_fcntl_mutex_unlock(dbenv, mutexp)
+ DB_ENV *dbenv;
+ MUTEX *mutexp;
+{
+ if (!dbenv->db_mutexlocks)
+ return (0);
+
+#ifdef DIAGNOSTIC
+#define MSG "mutex_unlock: ERROR: released lock that was unlocked\n"
+#ifndef STDERR_FILENO
+#define STDERR_FILENO 2
+#endif
+ if (mutexp->pid == 0)
+ write(STDERR_FILENO, MSG, sizeof(MSG) - 1);
+#endif
+
+ /*
+ * Release the resource. We don't have to acquire any locks because
+ * processes trying to acquire the lock are checking for a pid set to
+ * 0/non-0, not to any specific value.
+ */
+ mutexp->pid = 0;
+
+ return (0);
+}
+
+/*
+ * __db_fcntl_mutex_destroy --
+ * Destroy a MUTEX.
+ *
+ * PUBLIC: int __db_fcntl_mutex_destroy __P((MUTEX *));
+ */
+int
+__db_fcntl_mutex_destroy(mutexp)
+ MUTEX *mutexp;
+{
+ COMPQUIET(mutexp, NULL);
+
+ return (0);
+}
diff --git a/bdb/mutex/mut_pthread.c b/bdb/mutex/mut_pthread.c
new file mode 100644
index 00000000000..3de4abcefc5
--- /dev/null
+++ b/bdb/mutex/mut_pthread.c
@@ -0,0 +1,328 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: mut_pthread.c,v 11.33 2001/01/09 00:56:16 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+#ifdef DIAGNOSTIC
+#undef MSG1
+#define MSG1 "mutex_lock: ERROR: lock currently in use: pid: %lu.\n"
+#undef MSG2
+#define MSG2 "mutex_unlock: ERROR: lock already unlocked\n"
+#ifndef STDERR_FILENO
+#define STDERR_FILENO 2
+#endif
+#endif
+
+#ifdef HAVE_MUTEX_SOLARIS_LWP
+#define pthread_cond_signal _lwp_cond_signal
+#define pthread_cond_wait _lwp_cond_wait
+#define pthread_mutex_lock _lwp_mutex_lock
+#define pthread_mutex_trylock _lwp_mutex_trylock
+#define pthread_mutex_unlock _lwp_mutex_unlock
+#define pthread_self _lwp_self
+#define pthread_mutex_destroy(x) 0
+#endif
+#ifdef HAVE_MUTEX_UI_THREADS
+#define pthread_cond_signal cond_signal
+#define pthread_cond_wait cond_wait
+#define pthread_mutex_lock mutex_lock
+#define pthread_mutex_trylock mutex_trylock
+#define pthread_mutex_unlock mutex_unlock
+#define pthread_self thr_self
+#define pthread_mutex_destroy mutex_destroy
+#endif
+
+#define PTHREAD_UNLOCK_ATTEMPTS 5
+
+/*
+ * __db_pthread_mutex_init --
+ * Initialize a MUTEX.
+ *
+ * PUBLIC: int __db_pthread_mutex_init __P((DB_ENV *, MUTEX *, u_int32_t));
+ */
+int
+__db_pthread_mutex_init(dbenv, mutexp, flags)
+ DB_ENV *dbenv;
+ MUTEX *mutexp;
+ u_int32_t flags;
+{
+ int ret;
+
+ ret = 0;
+ memset(mutexp, 0, sizeof(*mutexp));
+
+ /*
+ * If this is a thread lock or the process has told us that there are
+ * no other processes in the environment, use thread-only locks, they
+ * are faster in some cases.
+ *
+ * This is where we decide to ignore locks we don't need to set -- if
+ * the application isn't threaded, there aren't any threads to block.
+ */
+ if (LF_ISSET(MUTEX_THREAD) || F_ISSET(dbenv, DB_ENV_PRIVATE)) {
+ if (!F_ISSET(dbenv, DB_ENV_THREAD)) {
+ F_SET(mutexp, MUTEX_IGNORE);
+ return (0);
+ }
+ F_SET(mutexp, MUTEX_THREAD);
+ }
+
+#ifdef HAVE_MUTEX_PTHREADS
+ {
+ pthread_condattr_t condattr, *condattrp = NULL;
+ pthread_mutexattr_t mutexattr, *mutexattrp = NULL;
+
+ if (!F_ISSET(mutexp, MUTEX_THREAD)) {
+ ret = pthread_condattr_init(&condattr);
+ if (ret == 0)
+ ret = pthread_condattr_setpshared(
+ &condattr, PTHREAD_PROCESS_SHARED);
+ condattrp = &condattr;
+
+ if (ret == 0)
+ ret = pthread_mutexattr_init(&mutexattr);
+ if (ret == 0)
+ ret = pthread_mutexattr_setpshared(
+ &mutexattr, PTHREAD_PROCESS_SHARED);
+ mutexattrp = &mutexattr;
+ }
+
+ if (ret == 0)
+ ret = pthread_mutex_init(&mutexp->mutex, mutexattrp);
+ if (mutexattrp != NULL)
+ pthread_mutexattr_destroy(mutexattrp);
+ if (LF_ISSET(MUTEX_SELF_BLOCK)) {
+ if (ret == 0)
+ ret = pthread_cond_init(&mutexp->cond, condattrp);
+
+ F_SET(mutexp, MUTEX_SELF_BLOCK);
+ if (condattrp != NULL)
+ pthread_condattr_destroy(condattrp);
+ }}
+#endif
+#ifdef HAVE_MUTEX_SOLARIS_LWP
+ /*
+ * XXX
+ * Gcc complains about missing braces in the static initializations of
+ * lwp_cond_t and lwp_mutex_t structures because the structures contain
+ * sub-structures/unions and the Solaris include file that defines the
+ * initialization values doesn't have surrounding braces. There's not
+ * much we can do.
+ */
+ if (F_ISSET(mutexp, MUTEX_THREAD)) {
+ static lwp_mutex_t mi = DEFAULTMUTEX;
+
+ mutexp->mutex = mi;
+ } else {
+ static lwp_mutex_t mi = SHAREDMUTEX;
+
+ mutexp->mutex = mi;
+ }
+ if (LF_ISSET(MUTEX_SELF_BLOCK)) {
+ if (F_ISSET(mutexp, MUTEX_THREAD)) {
+ static lwp_cond_t ci = DEFAULTCV;
+
+ mutexp->cond = ci;
+ } else {
+ static lwp_cond_t ci = SHAREDCV;
+
+ mutexp->cond = ci;
+ }
+ F_SET(mutexp, MUTEX_SELF_BLOCK);
+ }
+#endif
+#ifdef HAVE_MUTEX_UI_THREADS
+ {
+ int type;
+
+ type = F_ISSET(mutexp, MUTEX_THREAD) ? USYNC_THREAD : USYNC_PROCESS;
+
+ ret = mutex_init(&mutexp->mutex, type, NULL);
+ if (ret == 0 && LF_ISSET(MUTEX_SELF_BLOCK)) {
+ ret = cond_init(&mutexp->cond, type, NULL);
+
+ F_SET(mutexp, MUTEX_SELF_BLOCK);
+ }}
+#endif
+
+ mutexp->spins = __os_spin();
+#ifdef MUTEX_SYSTEM_RESOURCES
+ mutexp->reg_off = INVALID_ROFF;
+#endif
+ if (ret == 0)
+ F_SET(mutexp, MUTEX_INITED);
+
+ return (ret);
+}
+
+/*
+ * __db_pthread_mutex_lock
+ * Lock on a mutex, logically blocking if necessary.
+ *
+ * PUBLIC: int __db_pthread_mutex_lock __P((DB_ENV *, MUTEX *));
+ */
+int
+__db_pthread_mutex_lock(dbenv, mutexp)
+ DB_ENV *dbenv;
+ MUTEX *mutexp;
+{
+ u_int32_t nspins;
+ int i, ret, waited;
+
+ if (!dbenv->db_mutexlocks || F_ISSET(mutexp, MUTEX_IGNORE))
+ return (0);
+
+ /* Attempt to acquire the resource for N spins. */
+ for (nspins = mutexp->spins; nspins > 0; --nspins)
+ if (pthread_mutex_trylock(&mutexp->mutex) == 0)
+ break;
+
+ if (nspins == 0 && (ret = pthread_mutex_lock(&mutexp->mutex)) != 0)
+ return (ret);
+
+ if (F_ISSET(mutexp, MUTEX_SELF_BLOCK)) {
+ for (waited = 0; mutexp->locked != 0; waited = 1) {
+ ret = pthread_cond_wait(&mutexp->cond, &mutexp->mutex);
+ /*
+ * !!!
+ * Solaris bug workaround:
+ * pthread_cond_wait() sometimes returns ETIME -- out
+ * of sheer paranoia, check both ETIME and ETIMEDOUT.
+ * We believe this happens when the application uses
+ * SIGALRM for some purpose, e.g., the C library sleep
+ * call, and Solaris delivers the signal to the wrong
+ * LWP.
+ */
+ if (ret != 0 && ret != ETIME && ret != ETIMEDOUT)
+ return (ret);
+ }
+
+ if (waited)
+ ++mutexp->mutex_set_wait;
+ else
+ ++mutexp->mutex_set_nowait;
+
+#ifdef DIAGNOSTIC
+ mutexp->locked = (u_int32_t)pthread_self();
+#else
+ mutexp->locked = 1;
+#endif
+ /*
+ * According to HP-UX engineers contacted by Netscape,
+ * pthread_mutex_unlock() will occasionally return EFAULT
+ * for no good reason on mutexes in shared memory regions,
+ * and the correct caller behavior is to try again. Do
+ * so, up to PTHREAD_UNLOCK_ATTEMPTS consecutive times.
+ * Note that we don't bother to restrict this to HP-UX;
+ * it should be harmless elsewhere. [#2471]
+ */
+ i = PTHREAD_UNLOCK_ATTEMPTS;
+ do {
+ ret = pthread_mutex_unlock(&mutexp->mutex);
+ } while (ret == EFAULT && --i > 0);
+ if (ret != 0)
+ return (ret);
+ } else {
+ if (nspins == mutexp->spins)
+ ++mutexp->mutex_set_nowait;
+ else
+ ++mutexp->mutex_set_wait;
+#ifdef DIAGNOSTIC
+ if (mutexp->locked) {
+ char msgbuf[128];
+ (void)snprintf(msgbuf,
+ sizeof(msgbuf), MSG1, (u_long)mutexp->locked);
+ (void)write(STDERR_FILENO, msgbuf, strlen(msgbuf));
+ }
+ mutexp->locked = (u_int32_t)pthread_self();
+#else
+ mutexp->locked = 1;
+#endif
+ }
+ return (0);
+}
+
+/*
+ * __db_pthread_mutex_unlock --
+ * Release a lock.
+ *
+ * PUBLIC: int __db_pthread_mutex_unlock __P((DB_ENV *, MUTEX *));
+ */
+int
+__db_pthread_mutex_unlock(dbenv, mutexp)
+ DB_ENV *dbenv;
+ MUTEX *mutexp;
+{
+ int i, ret;
+
+ if (!dbenv->db_mutexlocks || F_ISSET(mutexp, MUTEX_IGNORE))
+ return (0);
+
+#ifdef DIAGNOSTIC
+ if (!mutexp->locked)
+ (void)write(STDERR_FILENO, MSG2, sizeof(MSG2) - 1);
+#endif
+
+ if (F_ISSET(mutexp, MUTEX_SELF_BLOCK)) {
+ if ((ret = pthread_mutex_lock(&mutexp->mutex)) != 0)
+ return (ret);
+
+ mutexp->locked = 0;
+
+ if ((ret = pthread_cond_signal(&mutexp->cond)) != 0)
+ return (ret);
+
+ /* See comment above; workaround for [#2471]. */
+ i = PTHREAD_UNLOCK_ATTEMPTS;
+ do {
+ ret = pthread_mutex_unlock(&mutexp->mutex);
+ } while (ret == EFAULT && --i > 0);
+ if (ret != 0)
+ return (ret);
+ } else {
+ mutexp->locked = 0;
+
+ /* See comment above; workaround for [#2471]. */
+ i = PTHREAD_UNLOCK_ATTEMPTS;
+ do {
+ ret = pthread_mutex_unlock(&mutexp->mutex);
+ } while (ret == EFAULT && --i > 0);
+ if (ret != 0)
+ return (ret);
+ }
+
+ return (0);
+}
+
+/*
+ * __db_pthread_mutex_destroy --
+ * Destroy a MUTEX.
+ *
+ * PUBLIC: int __db_pthread_mutex_destroy __P((MUTEX *));
+ */
+int
+__db_pthread_mutex_destroy(mutexp)
+ MUTEX *mutexp;
+{
+ if (F_ISSET(mutexp, MUTEX_IGNORE))
+ return (0);
+
+ return (pthread_mutex_destroy(&mutexp->mutex));
+}
diff --git a/bdb/mutex/mut_tas.c b/bdb/mutex/mut_tas.c
new file mode 100644
index 00000000000..4b0db4bdf05
--- /dev/null
+++ b/bdb/mutex/mut_tas.c
@@ -0,0 +1,200 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: mut_tas.c,v 11.18 2000/11/30 00:58:41 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+/*
+ * This is where we load in the actual test-and-set mutex code.
+ */
+#define LOAD_ACTUAL_MUTEX_CODE
+#include "db_int.h"
+
+#ifdef DIAGNOSTIC
+#undef MSG1
+#define MSG1 "mutex_lock: ERROR: lock currently in use: pid: %lu.\n"
+#undef MSG2
+#define MSG2 "mutex_unlock: ERROR: lock already unlocked\n"
+#ifndef STDERR_FILENO
+#define STDERR_FILENO 2
+#endif
+#endif
+
+/*
+ * __db_tas_mutex_init --
+ * Initialize a MUTEX.
+ *
+ * PUBLIC: int __db_tas_mutex_init __P((DB_ENV *, MUTEX *, u_int32_t));
+ */
+int
+__db_tas_mutex_init(dbenv, mutexp, flags)
+ DB_ENV *dbenv;
+ MUTEX *mutexp;
+ u_int32_t flags;
+{
+ /* Check alignment. */
+ DB_ASSERT(((db_alignp_t)mutexp & (MUTEX_ALIGN - 1)) == 0);
+
+ memset(mutexp, 0, sizeof(*mutexp));
+
+ /*
+ * If this is a thread lock or the process has told us that there are
+ * no other processes in the environment, use thread-only locks, they
+ * are faster in some cases.
+ *
+ * This is where we decide to ignore locks we don't need to set -- if
+ * the application isn't threaded, there aren't any threads to block.
+ */
+ if (LF_ISSET(MUTEX_THREAD) || F_ISSET(dbenv, DB_ENV_PRIVATE)) {
+ if (!F_ISSET(dbenv, DB_ENV_THREAD)) {
+ F_SET(mutexp, MUTEX_IGNORE);
+ return (0);
+ }
+ F_SET(mutexp, MUTEX_THREAD);
+ }
+
+ /* Initialize the lock. */
+ if (MUTEX_INIT(&mutexp->tas))
+ return (__os_get_errno());
+
+ mutexp->spins = __os_spin();
+#ifdef MUTEX_SYSTEM_RESOURCES
+ mutexp->reg_off = INVALID_ROFF;
+#endif
+ F_SET(mutexp, MUTEX_INITED);
+
+ return (0);
+}
+
+/*
+ * __db_tas_mutex_lock
+ * Lock on a mutex, logically blocking if necessary.
+ *
+ * PUBLIC: int __db_tas_mutex_lock __P((DB_ENV *, MUTEX *));
+ */
+int
+__db_tas_mutex_lock(dbenv, mutexp)
+ DB_ENV *dbenv;
+ MUTEX *mutexp;
+{
+ u_long ms;
+ int nspins;
+
+ if (!dbenv->db_mutexlocks || F_ISSET(mutexp, MUTEX_IGNORE))
+ return (0);
+
+ ms = 1;
+
+loop: /* Attempt to acquire the resource for N spins. */
+ for (nspins = mutexp->spins; nspins > 0; --nspins) {
+#ifdef HAVE_MUTEX_HPPA_MSEM_INIT
+relock:
+#endif
+ if (!MUTEX_SET(&mutexp->tas))
+ continue;
+#ifdef HAVE_MUTEX_HPPA_MSEM_INIT
+ /*
+ * HP semaphores are unlocked automatically when a holding
+ * process exits. If the mutex appears to be locked
+ * (mutexp->locked != 0) but we got here, assume this has
+ * happened. Stick our own pid into mutexp->locked and
+ * lock again. (The default state of the mutexes used to
+ * block in __lock_get_internal is locked, so exiting with
+ * a locked mutex is reasonable behavior for a process that
+ * happened to initialize or use one of them.)
+ */
+ if (mutexp->locked != 0) {
+ mutexp->locked = (u_int32_t)getpid();
+ goto relock;
+ }
+ /*
+ * If we make it here, locked == 0, the diagnostic won't fire,
+ * and we were really unlocked by someone calling the
+ * DB mutex unlock function.
+ */
+#endif
+#ifdef DIAGNOSTIC
+ if (mutexp->locked != 0) {
+ char msgbuf[128];
+ (void)snprintf(msgbuf,
+ sizeof(msgbuf), MSG1, (u_long)mutexp->locked);
+ (void)write(STDERR_FILENO, msgbuf, strlen(msgbuf));
+ }
+#endif
+#if defined(DIAGNOSTIC) || defined(HAVE_MUTEX_HPPA_MSEM_INIT)
+ mutexp->locked = (u_int32_t)getpid();
+#endif
+ if (ms == 1)
+ ++mutexp->mutex_set_nowait;
+ else
+ ++mutexp->mutex_set_wait;
+ return (0);
+ }
+
+ /* Yield the processor; wait 1ms initially, up to 1 second. */
+ __os_yield(NULL, ms * USEC_PER_MS);
+ if ((ms <<= 1) > MS_PER_SEC)
+ ms = MS_PER_SEC;
+
+ goto loop;
+}
+
+/*
+ * __db_tas_mutex_unlock --
+ * Release a lock.
+ *
+ * PUBLIC: int __db_tas_mutex_unlock __P((DB_ENV *, MUTEX *));
+ */
+int
+__db_tas_mutex_unlock(dbenv, mutexp)
+ DB_ENV *dbenv;
+ MUTEX *mutexp;
+{
+ if (!dbenv->db_mutexlocks || F_ISSET(mutexp, MUTEX_IGNORE))
+ return (0);
+
+#ifdef DIAGNOSTIC
+ if (!mutexp->locked)
+ (void)write(STDERR_FILENO, MSG2, sizeof(MSG2) - 1);
+#endif
+#if defined(DIAGNOSTIC) || defined(HAVE_MUTEX_HPPA_MSEM_INIT)
+ mutexp->locked = 0;
+#endif
+
+ MUTEX_UNSET(&mutexp->tas);
+
+ return (0);
+}
+
+/*
+ * __db_tas_mutex_destroy --
+ * Destroy a MUTEX.
+ *
+ * PUBLIC: int __db_tas_mutex_destroy __P((MUTEX *));
+ */
+int
+__db_tas_mutex_destroy(mutexp)
+ MUTEX *mutexp;
+{
+ if (F_ISSET(mutexp, MUTEX_IGNORE))
+ return (0);
+
+ MUTEX_DESTROY(&mutexp->tas);
+
+ return (0);
+}
diff --git a/bdb/mutex/mutex.c b/bdb/mutex/mutex.c
new file mode 100644
index 00000000000..acc4af9bfcc
--- /dev/null
+++ b/bdb/mutex/mutex.c
@@ -0,0 +1,253 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: mutex.c,v 11.14 2000/11/30 00:58:42 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#endif
+
+#include "db_int.h"
+
+/*
+ * __db_mutex_alloc --
+ * Allocate and initialize a mutex.
+ *
+ * PUBLIC: int __db_mutex_alloc __P((DB_ENV *, REGINFO *, MUTEX **));
+ */
+int
+__db_mutex_alloc(dbenv, infop, storep)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ MUTEX **storep;
+{
+ int ret;
+
+ /*
+ * If the architecture supports mutexes in heap memory, use that
+ * memory. If it doesn't, we have to allocate space in a region.
+ *
+ * XXX
+ * There's a nasty starvation issue here for applications running
+ * on systems that don't support mutexes in heap memory. If the
+ * normal state of the entire region is dirty (e.g., mpool), then
+ * we can run out of memory to allocate for mutexes when new files
+ * are opened in the pool. We're not trying to fix this for now,
+ * because the only known system where we can see this failure at
+ * the moment is HP-UX 10.XX.
+ */
+#ifdef MUTEX_NO_MALLOC_LOCKS
+ R_LOCK(dbenv, infop);
+ ret = __db_shalloc(infop->addr, sizeof(MUTEX), MUTEX_ALIGN, storep);
+ R_UNLOCK(dbenv, infop);
+#else
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(infop, NULL);
+ ret = __os_calloc(dbenv, 1, sizeof(MUTEX), storep);
+#endif
+ if (ret != 0)
+ __db_err(dbenv, "Unable to allocate memory for mutex");
+ return (ret);
+}
+
+/*
+ * __db_mutex_free --
+ * Free a mutex.
+ *
+ * PUBLIC: void __db_mutex_free __P((DB_ENV *, REGINFO *, MUTEX *));
+ */
+void
+__db_mutex_free(dbenv, infop, mutexp)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ MUTEX *mutexp;
+{
+ if (F_ISSET(mutexp, MUTEX_INITED))
+ __db_mutex_destroy(mutexp);
+
+#ifdef MUTEX_NO_MALLOC_LOCKS
+ R_LOCK(dbenv, infop);
+ __db_shalloc_free(infop->addr, mutexp);
+ R_UNLOCK(dbenv, infop);
+#else
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(infop, NULL);
+ __os_free(mutexp, sizeof(*mutexp));
+#endif
+}
+
+#ifdef MUTEX_SYSTEM_RESOURCES
+/*
+ * __db_shreg_locks_record --
+ * Record an entry in the shared locks area.
+ * Region lock must be held in caller.
+ *
+ * PUBLIC: int __db_shreg_locks_record __P((DB_ENV *, MUTEX *, REGINFO *,
+ * PUBLIC: REGMAINT *));
+ */
+int
+__db_shreg_locks_record(dbenv, mutexp, infop, rp)
+ DB_ENV *dbenv;
+ MUTEX *mutexp;
+ REGINFO *infop;
+ REGMAINT *rp;
+{
+ u_int i;
+
+ if (!F_ISSET(mutexp, MUTEX_INITED))
+ return (0);
+ DB_ASSERT(mutexp->reg_off == INVALID_ROFF);
+ rp->stat.st_records++;
+ i = (roff_t *)R_ADDR(infop, rp->regmutex_hint) - &rp->regmutexes[0];
+ if (rp->regmutexes[i] != INVALID_ROFF) {
+ /*
+ * Our hint failed, search for a open slot.
+ */
+ rp->stat.st_hint_miss++;
+ for (i = 0; i < rp->reglocks; i++)
+ if (rp->regmutexes[i] == INVALID_ROFF)
+ break;
+ if (i == rp->reglocks) {
+ rp->stat.st_max_locks++;
+ __db_err(dbenv,
+ "Region mutexes: Exceeded maximum lock slots %lu",
+ (u_long)rp->reglocks);
+ return (ENOMEM);
+ }
+ } else
+ rp->stat.st_hint_hit++;
+ /*
+ * When we get here, i is an empty slot. Record this
+ * mutex, set hint to point to the next slot and we are done.
+ */
+ rp->regmutexes[i] = R_OFFSET(infop, mutexp);
+ mutexp->reg_off = R_OFFSET(infop, &rp->regmutexes[i]);
+ rp->regmutex_hint = (i < rp->reglocks - 1) ?
+ R_OFFSET(infop, &rp->regmutexes[i+1]) :
+ R_OFFSET(infop, &rp->regmutexes[0]);
+ return (0);
+}
+
+/*
+ * __db_shreg_locks_clear --
+ * Erase an entry in the shared locks area.
+ * Region lock must be held in caller.
+ *
+ * PUBLIC: void __db_shreg_locks_clear __P((MUTEX *, REGINFO *, REGMAINT *));
+ */
+void
+__db_shreg_locks_clear(mutexp, infop, rp)
+ MUTEX *mutexp;
+ REGINFO *infop;
+ REGMAINT *rp;
+{
+ if (!F_ISSET(mutexp, MUTEX_INITED))
+ return;
+ /*
+ * This function is generally only called on a forcible
+ * remove of an environment. We recorded our index in
+ * the mutex. Find it and clear it.
+ */
+ DB_ASSERT(mutexp->reg_off != INVALID_ROFF);
+ DB_ASSERT(*(roff_t *)R_ADDR(infop, mutexp->reg_off) == \
+ R_OFFSET(infop, mutexp));
+ *(roff_t *)R_ADDR(infop, mutexp->reg_off) = 0;
+ rp->regmutex_hint = mutexp->reg_off;
+ rp->stat.st_clears++;
+ mutexp->reg_off = INVALID_ROFF;
+ __db_mutex_destroy(mutexp);
+}
+
+/*
+ * __db_shreg_locks_destroy --
+ * Destroy all mutexes in a region's range.
+ *
+ * PUBLIC: void __db_shreg_locks_destroy __P((REGINFO *, REGMAINT *));
+ */
+void
+__db_shreg_locks_destroy(infop, rp)
+ REGINFO *infop;
+ REGMAINT *rp;
+{
+ u_int32_t i;
+
+ /*
+ * Go through the list of all mutexes and destroy them.
+ */
+ for (i = 0; i < rp->reglocks; i++)
+ if (rp->regmutexes[i] != 0) {
+ rp->stat.st_destroys++;
+ __db_mutex_destroy((MUTEX *)R_ADDR(infop,
+ rp->regmutexes[i]));
+ }
+}
+
+/*
+ * __db_shreg_mutex_init --
+ * Initialize a shared memory mutex.
+ *
+ * PUBLIC: int __db_shreg_mutex_init __P((DB_ENV *, MUTEX *, u_int32_t,
+ * PUBLIC: u_int32_t, REGINFO *, REGMAINT *));
+ */
+int
+__db_shreg_mutex_init(dbenv, mutexp, offset, flags, infop, rp)
+ DB_ENV *dbenv;
+ MUTEX *mutexp;
+ u_int32_t offset;
+ u_int32_t flags;
+ REGINFO *infop;
+ REGMAINT *rp;
+{
+ int ret;
+
+ if ((ret = __db_mutex_init(dbenv, mutexp, offset, flags)) != 0)
+ return (ret);
+ /*
+ * !!!
+ * Since __db_mutex_init is a macro, we may not be
+ * using the 'offset' as it is only used for one type
+ * of mutex. We COMPQUIET it here, after the call above.
+ */
+ COMPQUIET(offset, 0);
+
+ if (!F_ISSET(mutexp, MUTEX_THREAD))
+ ret = __db_shreg_locks_record(dbenv, mutexp, infop, rp);
+ /*
+ * If we couldn't record it and we are returning an error,
+ * we need to destroy the mutex we just created.
+ */
+ if (ret)
+ __db_mutex_destroy(mutexp);
+
+ return (ret);
+}
+
+/*
+ * __db_shreg_maintinit --
+ * Initialize a region's maintenance information.
+ *
+ * PUBLIC: void __db_shreg_maintinit __P((REGINFO *, void *addr, size_t));
+ */
+void
+__db_shreg_maintinit(infop, addr, size)
+ REGINFO *infop;
+ void *addr;
+ size_t size;
+{
+ REGMAINT *rp;
+
+ rp = (REGMAINT *)addr;
+ memset(addr, 0, sizeof(REGMAINT));
+ rp->reglocks = size / sizeof(roff_t);
+ rp->regmutex_hint = R_OFFSET(infop, &rp->regmutexes[0]);
+}
+#endif /* MUTEX_SYSTEM_RESOURCES */
diff --git a/bdb/mutex/uts4_cc.s b/bdb/mutex/uts4_cc.s
new file mode 100644
index 00000000000..ee5f4143bde
--- /dev/null
+++ b/bdb/mutex/uts4_cc.s
@@ -0,0 +1,21 @@
+ /
+ / int uts_lock ( int *p, int i );
+ / Update the lock word pointed to by p with the
+ / value i, using compare-and-swap.
+ / Returns 0 if update was successful.
+ / Returns 1 if update failed.
+ /
+ entry uts_lock
+ uts_lock:
+ using .,r15
+ st r2,8(sp) / Save R2
+ l r2,64+0(sp) / R2 -> word to update
+ slr r0, r0 / R0 = current lock value must be 0
+ l r1,64+4(sp) / R1 = new lock value
+ cs r0,r1,0(r2) / Try the update ...
+ be x / ... Success. Return 0
+ la r0,1 / ... Failure. Return 1
+ x: /
+ l r2,8(sp) / Restore R2
+ b 2(,r14) / Return to caller
+ drop r15
diff --git a/bdb/os/os_abs.c b/bdb/os/os_abs.c
new file mode 100644
index 00000000000..04be9873360
--- /dev/null
+++ b/bdb/os/os_abs.c
@@ -0,0 +1,31 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_abs.c,v 11.3 2000/02/14 03:00:04 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_abspath --
+ * Return if a path is an absolute path.
+ *
+ * PUBLIC: int __os_abspath __P((const char *));
+ */
+int
+__os_abspath(path)
+ const char *path;
+{
+ return (path[0] == '/');
+}
diff --git a/bdb/os/os_alloc.c b/bdb/os/os_alloc.c
new file mode 100644
index 00000000000..ee4a0f3c91f
--- /dev/null
+++ b/bdb/os/os_alloc.c
@@ -0,0 +1,342 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_alloc.c,v 11.18 2000/11/30 00:58:42 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "os_jump.h"
+
+#ifdef DIAGNOSTIC
+static void __os_guard __P((void));
+#endif
+
+/*
+ * !!!
+ * Correct for systems that return NULL when you allocate 0 bytes of memory.
+ * There are several places in DB where we allocate the number of bytes held
+ * by the key/data item, and it can be 0. Correct here so that malloc never
+ * returns a NULL for that reason (which behavior is permitted by ANSI). We
+ * could make these calls macros on non-Alpha architectures (that's where we
+ * saw the problem), but it's probably not worth the autoconf complexity.
+ *
+ * !!!
+ * Correct for systems that don't set errno when malloc and friends fail.
+ *
+ * Out of memory.
+ * We wish to hold the whole sky,
+ * But we never will.
+ */
+
+/*
+ * __os_strdup --
+ * The strdup(3) function for DB.
+ *
+ * PUBLIC: int __os_strdup __P((DB_ENV *, const char *, void *));
+ */
+int
+__os_strdup(dbenv, str, storep)
+ DB_ENV *dbenv;
+ const char *str;
+ void *storep;
+{
+ size_t size;
+ int ret;
+ void *p;
+
+ *(void **)storep = NULL;
+
+ size = strlen(str) + 1;
+ if ((ret = __os_malloc(dbenv, size, NULL, &p)) != 0)
+ return (ret);
+
+ memcpy(p, str, size);
+
+ *(void **)storep = p;
+ return (0);
+}
+
+/*
+ * __os_calloc --
+ * The calloc(3) function for DB.
+ *
+ * PUBLIC: int __os_calloc __P((DB_ENV *, size_t, size_t, void *));
+ */
+int
+__os_calloc(dbenv, num, size, storep)
+ DB_ENV *dbenv;
+ size_t num, size;
+ void *storep;
+{
+ void *p;
+ int ret;
+
+ size *= num;
+ if ((ret = __os_malloc(dbenv, size, NULL, &p)) != 0)
+ return (ret);
+
+ memset(p, 0, size);
+
+ *(void **)storep = p;
+ return (0);
+}
+
+/*
+ * __os_malloc --
+ * The malloc(3) function for DB.
+ *
+ * PUBLIC: int __os_malloc __P((DB_ENV *, size_t, void *(*)(size_t), void *));
+ */
+int
+__os_malloc(dbenv, size, db_malloc, storep)
+ DB_ENV *dbenv;
+ size_t size;
+ void *(*db_malloc) __P((size_t)), *storep;
+{
+ int ret;
+ void *p;
+
+ *(void **)storep = NULL;
+
+ /* Never allocate 0 bytes -- some C libraries don't like it. */
+ if (size == 0)
+ ++size;
+#ifdef DIAGNOSTIC
+ else
+ ++size; /* Add room for a guard byte. */
+#endif
+
+ /* Some C libraries don't correctly set errno when malloc(3) fails. */
+ __os_set_errno(0);
+ if (db_malloc != NULL)
+ p = db_malloc(size);
+ else if (__db_jump.j_malloc != NULL)
+ p = __db_jump.j_malloc(size);
+ else
+ p = malloc(size);
+ if (p == NULL) {
+ ret = __os_get_errno();
+ if (ret == 0) {
+ __os_set_errno(ENOMEM);
+ ret = ENOMEM;
+ }
+ __db_err(dbenv,
+ "malloc: %s: %lu", strerror(ret), (u_long)size);
+ return (ret);
+ }
+
+#ifdef DIAGNOSTIC
+ /*
+ * Guard bytes: if #DIAGNOSTIC is defined, we allocate an additional
+ * byte after the memory and set it to a special value that we check
+ * for when the memory is free'd. This is fine for structures, but
+ * not quite so fine for strings. There are places in DB where memory
+ * is allocated sufficient to hold the largest possible string that
+ * we'll see, and then only some subset of the memory is used. To
+ * support this usage, the __os_freestr() function checks the byte
+ * after the string's nul, which may or may not be the last byte in
+ * the originally allocated memory.
+ */
+ memset(p, CLEAR_BYTE, size); /* Initialize guard byte. */
+#endif
+ *(void **)storep = p;
+
+ return (0);
+}
+
+/*
+ * __os_realloc --
+ * The realloc(3) function for DB.
+ *
+ * PUBLIC: int __os_realloc __P((DB_ENV *,
+ * PUBLIC: size_t, void *(*)(void *, size_t), void *));
+ */
+int
+__os_realloc(dbenv, size, db_realloc, storep)
+ DB_ENV *dbenv;
+ size_t size;
+ void *(*db_realloc) __P((void *, size_t)), *storep;
+{
+ int ret;
+ void *p, *ptr;
+
+ ptr = *(void **)storep;
+
+ /* If we haven't yet allocated anything yet, simply call malloc. */
+ if (ptr == NULL && db_realloc == NULL)
+ return (__os_malloc(dbenv, size, NULL, storep));
+
+ /* Never allocate 0 bytes -- some C libraries don't like it. */
+ if (size == 0)
+ ++size;
+#ifdef DIAGNOSTIC
+ else
+ ++size; /* Add room for a guard byte. */
+#endif
+
+ /*
+ * Some C libraries don't correctly set errno when realloc(3) fails.
+ *
+ * Don't overwrite the original pointer, there are places in DB we
+ * try to continue after realloc fails.
+ */
+ __os_set_errno(0);
+ if (db_realloc != NULL)
+ p = db_realloc(ptr, size);
+ else if (__db_jump.j_realloc != NULL)
+ p = __db_jump.j_realloc(ptr, size);
+ else
+ p = realloc(ptr, size);
+ if (p == NULL) {
+ if ((ret = __os_get_errno()) == 0) {
+ ret = ENOMEM;
+ __os_set_errno(ENOMEM);
+ }
+ __db_err(dbenv,
+ "realloc: %s: %lu", strerror(ret), (u_long)size);
+ return (ret);
+ }
+#ifdef DIAGNOSTIC
+ ((u_int8_t *)p)[size - 1] = CLEAR_BYTE; /* Initialize guard byte. */
+#endif
+
+ *(void **)storep = p;
+
+ return (0);
+}
+
+/*
+ * __os_free --
+ * The free(3) function for DB.
+ *
+ * PUBLIC: void __os_free __P((void *, size_t));
+ */
+void
+__os_free(ptr, size)
+ void *ptr;
+ size_t size;
+{
+#ifdef DIAGNOSTIC
+ if (size != 0) {
+ /*
+ * Check that the guard byte (one past the end of the memory) is
+ * still CLEAR_BYTE.
+ */
+ if (((u_int8_t *)ptr)[size] != CLEAR_BYTE)
+ __os_guard();
+
+ /* Clear memory. */
+ if (size != 0)
+ memset(ptr, CLEAR_BYTE, size);
+ }
+#else
+ COMPQUIET(size, 0);
+#endif
+
+ if (__db_jump.j_free != NULL)
+ __db_jump.j_free(ptr);
+ else
+ free(ptr);
+}
+
+/*
+ * __os_freestr --
+ * The free(3) function for DB, freeing a string.
+ *
+ * PUBLIC: void __os_freestr __P((void *));
+ */
+void
+__os_freestr(ptr)
+ void *ptr;
+{
+#ifdef DIAGNOSTIC
+ size_t size;
+
+ size = strlen(ptr) + 1;
+
+ /*
+ * Check that the guard byte (one past the end of the memory) is
+ * still CLEAR_BYTE.
+ */
+ if (((u_int8_t *)ptr)[size] != CLEAR_BYTE)
+ __os_guard();
+
+ /* Clear memory. */
+ memset(ptr, CLEAR_BYTE, size);
+#endif
+
+ if (__db_jump.j_free != NULL)
+ __db_jump.j_free(ptr);
+ else
+ free(ptr);
+}
+
+#ifdef DIAGNOSTIC
+/*
+ * __os_guard --
+ * Complain and abort.
+ */
+static void
+__os_guard()
+{
+ /*
+ * Eventually, once we push a DB_ENV handle down to these
+ * routines, we should use the standard output channels.
+ */
+ fprintf(stderr, "Guard byte incorrect during free.\n");
+ abort();
+ /* NOTREACHED */
+}
+#endif
+
+/*
+ * __ua_memcpy --
+ * Copy memory to memory without relying on any kind of alignment.
+ *
+ * There are places in DB that we have unaligned data, for example,
+ * when we've stored a structure in a log record as a DBT, and now
+ * we want to look at it. Unfortunately, if you have code like:
+ *
+ * struct a {
+ * int x;
+ * } *p;
+ *
+ * void *func_argument;
+ * int local;
+ *
+ * p = (struct a *)func_argument;
+ * memcpy(&local, p->x, sizeof(local));
+ *
+ * compilers optimize to use inline instructions requiring alignment,
+ * and records in the log don't have any particular alignment. (This
+ * isn't a compiler bug, because it's a structure they're allowed to
+ * assume alignment.)
+ *
+ * Casting the memcpy arguments to (u_int8_t *) appears to work most
+ * of the time, but we've seen examples where it wasn't sufficient
+ * and there's nothing in ANSI C that requires that work.
+ *
+ * PUBLIC: void *__ua_memcpy __P((void *, const void *, size_t));
+ */
+void *
+__ua_memcpy(dst, src, len)
+ void *dst;
+ const void *src;
+ size_t len;
+{
+ return ((void *)memcpy(dst, src, len));
+}
diff --git a/bdb/os/os_dir.c b/bdb/os/os_dir.c
new file mode 100644
index 00000000000..50d00a5562f
--- /dev/null
+++ b/bdb/os/os_dir.c
@@ -0,0 +1,108 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_dir.c,v 11.8 2000/06/27 17:29:52 sue Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if HAVE_DIRENT_H
+# include <dirent.h>
+# define NAMLEN(dirent) strlen((dirent)->d_name)
+#else
+# define dirent direct
+# define NAMLEN(dirent) (dirent)->d_namlen
+# if HAVE_SYS_NDIR_H
+# include <sys/ndir.h>
+# endif
+# if HAVE_SYS_DIR_H
+# include <sys/dir.h>
+# endif
+# if HAVE_NDIR_H
+# include <ndir.h>
+# endif
+#endif
+
+#endif
+
+#include "db_int.h"
+#include "os_jump.h"
+
+/*
+ * __os_dirlist --
+ * Return a list of the files in a directory.
+ *
+ * PUBLIC: int __os_dirlist __P((DB_ENV *, const char *, char ***, int *));
+ */
+int
+__os_dirlist(dbenv, dir, namesp, cntp)
+ DB_ENV *dbenv;
+ const char *dir;
+ char ***namesp;
+ int *cntp;
+{
+ struct dirent *dp;
+ DIR *dirp;
+ int arraysz, cnt, ret;
+ char **names;
+
+ if (__db_jump.j_dirlist != NULL)
+ return (__db_jump.j_dirlist(dir, namesp, cntp));
+
+#ifdef HAVE_VXWORKS
+ if ((dirp = opendir((char *)dir)) == NULL)
+#else
+ if ((dirp = opendir(dir)) == NULL)
+#endif
+ return (__os_get_errno());
+ names = NULL;
+ for (arraysz = cnt = 0; (dp = readdir(dirp)) != NULL; ++cnt) {
+ if (cnt >= arraysz) {
+ arraysz += 100;
+ if ((ret = __os_realloc(dbenv,
+ arraysz * sizeof(names[0]), NULL, &names)) != 0)
+ goto nomem;
+ }
+ if ((ret = __os_strdup(dbenv, dp->d_name, &names[cnt])) != 0)
+ goto nomem;
+ }
+ (void)closedir(dirp);
+
+ *namesp = names;
+ *cntp = cnt;
+ return (0);
+
+nomem: if (names != NULL)
+ __os_dirfree(names, cnt);
+ if (dirp != NULL)
+ (void)closedir(dirp);
+ return (ret);
+}
+
+/*
+ * __os_dirfree --
+ * Free the list of files.
+ *
+ * PUBLIC: void __os_dirfree __P((char **, int));
+ */
+void
+__os_dirfree(names, cnt)
+ char **names;
+ int cnt;
+{
+ if (__db_jump.j_dirfree != NULL)
+ __db_jump.j_dirfree(names, cnt);
+ else {
+ while (cnt > 0)
+ __os_free(names[--cnt], 0);
+ __os_free(names, 0);
+ }
+}
diff --git a/bdb/os/os_errno.c b/bdb/os/os_errno.c
new file mode 100644
index 00000000000..f9b60f6354e
--- /dev/null
+++ b/bdb/os/os_errno.c
@@ -0,0 +1,44 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_errno.c,v 11.3 2000/02/14 03:00:05 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <errno.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_get_errno --
+ * Return the value of errno.
+ *
+ * PUBLIC: int __os_get_errno __P((void));
+ */
+int
+__os_get_errno()
+{
+ /* This routine must be able to return the same value repeatedly. */
+ return (errno);
+}
+
+/*
+ * __os_set_errno --
+ * Set the value of errno.
+ *
+ * PUBLIC: void __os_set_errno __P((int));
+ */
+void
+__os_set_errno(evalue)
+ int evalue;
+{
+ errno = evalue;
+}
diff --git a/bdb/os/os_fid.c b/bdb/os/os_fid.c
new file mode 100644
index 00000000000..f853f6a8dba
--- /dev/null
+++ b/bdb/os/os_fid.c
@@ -0,0 +1,140 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_fid.c,v 11.7 2000/10/26 14:17:05 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+#define SERIAL_INIT 0
+static u_int32_t fid_serial = SERIAL_INIT;
+
+/*
+ * __os_fileid --
+ * Return a unique identifier for a file.
+ *
+ * PUBLIC: int __os_fileid __P((DB_ENV *, const char *, int, u_int8_t *));
+ */
+int
+__os_fileid(dbenv, fname, unique_okay, fidp)
+ DB_ENV *dbenv;
+ const char *fname;
+ int unique_okay;
+ u_int8_t *fidp;
+{
+ struct stat sb;
+ size_t i;
+ int ret;
+ u_int32_t tmp;
+ u_int8_t *p;
+
+ /* Clear the buffer. */
+ memset(fidp, 0, DB_FILE_ID_LEN);
+
+ /* On POSIX/UNIX, use a dev/inode pair. */
+#ifdef HAVE_VXWORKS
+ if (stat((char *)fname, &sb)) {
+#else
+ if (stat(fname, &sb)) {
+#endif
+ ret = __os_get_errno();
+ __db_err(dbenv, "%s: %s", fname, strerror(ret));
+ return (ret);
+ }
+
+ /*
+ * Initialize/increment the serial number we use to help avoid
+ * fileid collisions. Note that we don't bother with locking;
+ * it's unpleasant to do from down in here, and if we race on
+ * this no real harm will be done, since the finished fileid
+ * has so many other components.
+ *
+ * We increment by 100000 on each call as a simple way of
+ * randomizing; simply incrementing seems potentially less useful
+ * if pids are also simply incremented, since this is process-local
+ * and we may be one of a set of processes starting up. 100000
+ * pushes us out of pid space on most platforms, and has few
+ * interesting properties in base 2.
+ */
+ if (fid_serial == SERIAL_INIT)
+ fid_serial = (u_int32_t)getpid();
+ else
+ fid_serial += 100000;
+
+ /*
+ * !!!
+ * Nothing is ever big enough -- on Sparc V9, st_ino, st_dev and the
+ * time_t types are all 8 bytes. As DB_FILE_ID_LEN is only 20 bytes,
+ * we convert to a (potentially) smaller fixed-size type and use it.
+ *
+ * We don't worry about byte sexing or the actual variable sizes.
+ *
+ * When this routine is called from the DB access methods, it's only
+ * called once -- whatever ID is generated when a database is created
+ * is stored in the database file's metadata, and that is what is
+ * saved in the mpool region's information to uniquely identify the
+ * file.
+ *
+ * When called from the mpool layer this routine will be called each
+ * time a new thread of control wants to share the file, which makes
+ * things tougher. As far as byte sexing goes, since the mpool region
+ * lives on a single host, there's no issue of that -- the entire
+ * region is byte sex dependent. As far as variable sizes go, we make
+ * the simplifying assumption that 32-bit and 64-bit processes will
+ * get the same 32-bit values if we truncate any returned 64-bit value
+ * to a 32-bit value. When we're called from the mpool layer, though,
+ * we need to be careful not to include anything that isn't
+ * reproducible for a given file, such as the timestamp or serial
+ * number.
+ */
+ tmp = (u_int32_t)sb.st_ino;
+ for (p = (u_int8_t *)&tmp, i = sizeof(u_int32_t); i > 0; --i)
+ *fidp++ = *p++;
+
+ tmp = (u_int32_t)sb.st_dev;
+ for (p = (u_int8_t *)&tmp, i = sizeof(u_int32_t); i > 0; --i)
+ *fidp++ = *p++;
+
+ if (unique_okay) {
+ /*
+ * We want the number of seconds, not the high-order 0 bits,
+ * so convert the returned time_t to a (potentially) smaller
+ * fixed-size type.
+ */
+ tmp = (u_int32_t)time(NULL);
+ for (p = (u_int8_t *)&tmp, i = sizeof(u_int32_t); i > 0; --i)
+ *fidp++ = *p++;
+
+ for (p = (u_int8_t *)&fid_serial, i = sizeof(u_int32_t);
+ i > 0; --i)
+ *fidp++ = *p++;
+ }
+
+ return (0);
+}
diff --git a/bdb/os/os_finit.c b/bdb/os/os_finit.c
new file mode 100644
index 00000000000..23b606ecb2c
--- /dev/null
+++ b/bdb/os/os_finit.c
@@ -0,0 +1,111 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_finit.c,v 11.8 2000/11/30 00:58:42 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_finit --
+ * Initialize a regular file, optionally zero-filling it as well.
+ *
+ * PUBLIC: int __os_finit __P((DB_ENV *, DB_FH *, size_t, int));
+ */
+int
+__os_finit(dbenv, fhp, size, zerofill)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+ size_t size;
+ int zerofill;
+{
+ db_pgno_t pages;
+ size_t i;
+ size_t nw;
+ u_int32_t relative;
+ int ret;
+ char buf[OS_VMPAGESIZE];
+
+ /* Write nuls to the new bytes. */
+ memset(buf, 0, sizeof(buf));
+
+ /*
+ * Extend the region by writing the last page. If the region is >4Gb,
+ * increment may be larger than the maximum possible seek "relative"
+ * argument, as it's an unsigned 32-bit value. Break the offset into
+ * pages of 1MB each so that we don't overflow (2^20 + 2^32 is bigger
+ * than any memory I expect to see for awhile).
+ */
+ if ((ret = __os_seek(dbenv, fhp, 0, 0, 0, 0, DB_OS_SEEK_END)) != 0)
+ return (ret);
+ pages = (size - OS_VMPAGESIZE) / MEGABYTE;
+ relative = (size - OS_VMPAGESIZE) % MEGABYTE;
+ if ((ret = __os_seek(dbenv,
+ fhp, MEGABYTE, pages, relative, 0, DB_OS_SEEK_CUR)) != 0)
+ return (ret);
+ if ((ret = __os_write(dbenv, fhp, buf, sizeof(buf), &nw)) != 0)
+ return (ret);
+ if (nw != sizeof(buf))
+ return (EIO);
+
+ /*
+ * We may want to guarantee that there is enough disk space for the
+ * file, so we also write a byte to each page. We write the byte
+ * because reading it is insufficient on systems smart enough not to
+ * instantiate disk pages to satisfy a read (e.g., Solaris).
+ */
+ if (zerofill) {
+ pages = size / MEGABYTE;
+ relative = size % MEGABYTE;
+ if ((ret = __os_seek(dbenv, fhp,
+ MEGABYTE, pages, relative, 1, DB_OS_SEEK_END)) != 0)
+ return (ret);
+
+ /* Write a byte to each page. */
+ for (i = 0; i < size; i += OS_VMPAGESIZE) {
+ if ((ret = __os_write(dbenv, fhp, buf, 1, &nw)) != 0)
+ return (ret);
+ if (nw != 1)
+ return (EIO);
+ if ((ret = __os_seek(dbenv, fhp,
+ 0, 0, OS_VMPAGESIZE - 1, 0, DB_OS_SEEK_CUR)) != 0)
+ return (ret);
+ }
+ }
+ return (0);
+}
+
+/*
+ * __os_fpinit --
+ * Initialize a page in a regular file.
+ *
+ * PUBLIC: int __os_fpinit __P((DB_ENV *, DB_FH *, db_pgno_t, int, int));
+ */
+int
+__os_fpinit(dbenv, fhp, pgno, pagecount, pagesize)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+ db_pgno_t pgno;
+ int pagecount, pagesize;
+{
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(fhp, NULL);
+ COMPQUIET(pgno, 0);
+ COMPQUIET(pagecount, 0);
+ COMPQUIET(pagesize, 0);
+
+ return (0);
+}
diff --git a/bdb/os/os_fsync.c b/bdb/os/os_fsync.c
new file mode 100644
index 00000000000..f5fd5f56abd
--- /dev/null
+++ b/bdb/os/os_fsync.c
@@ -0,0 +1,90 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_fsync.c,v 11.9 2000/04/04 23:29:20 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <fcntl.h> /* XXX: Required by __hp3000s900 */
+#include <unistd.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "os_jump.h"
+
+#ifdef HAVE_VXWORKS
+#include "ioLib.h"
+
+#define fsync(fd) __vx_fsync(fd);
+
+int
+__vx_fsync(fd)
+ int fd;
+{
+ int ret;
+
+ /*
+ * The results of ioctl are driver dependent. Some will return the
+ * number of bytes sync'ed. Only if it returns 'ERROR' should we
+ * flag it.
+ */
+ if ((ret = ioctl(fd, FIOSYNC, 0)) != ERROR)
+ return (0);
+ return (ret);
+}
+#endif
+
+#ifdef __hp3000s900
+#define fsync(fd) __mpe_fsync(fd);
+
+int
+__mpe_fsync(fd)
+ int fd;
+{
+ extern FCONTROL(short, short, void *);
+
+ FCONTROL(_MPE_FILENO(fd), 2, NULL); /* Flush the buffers */
+ FCONTROL(_MPE_FILENO(fd), 6, NULL); /* Write the EOF */
+ return (0);
+}
+#endif
+
+/*
+ * __os_fsync --
+ * Flush a file descriptor.
+ *
+ * PUBLIC: int __os_fsync __P((DB_ENV *, DB_FH *));
+ */
+int
+__os_fsync(dbenv, fhp)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+{
+ int ret;
+
+ /*
+ * Do nothing if the file descriptor has been marked as not requiring
+ * any sync to disk.
+ */
+ if (F_ISSET(fhp, DB_FH_NOSYNC))
+ return (0);
+
+ ret = __db_jump.j_fsync != NULL ?
+ __db_jump.j_fsync(fhp->fd) : fsync(fhp->fd);
+
+ if (ret != 0) {
+ ret = __os_get_errno();
+ __db_err(dbenv, "fsync %s", strerror(ret));
+ }
+ return (ret);
+}
diff --git a/bdb/os/os_handle.c b/bdb/os/os_handle.c
new file mode 100644
index 00000000000..b127c5ff506
--- /dev/null
+++ b/bdb/os/os_handle.c
@@ -0,0 +1,165 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_handle.c,v 11.19 2000/11/30 00:58:42 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <fcntl.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "os_jump.h"
+
+/*
+ * __os_openhandle --
+ * Open a file, using POSIX 1003.1 open flags.
+ *
+ * PUBLIC: int __os_openhandle __P((DB_ENV *, const char *, int, int, DB_FH *));
+ */
+int
+__os_openhandle(dbenv, name, flags, mode, fhp)
+ DB_ENV *dbenv;
+ const char *name;
+ int flags, mode;
+ DB_FH *fhp;
+{
+ int ret, nrepeat;
+#ifdef HAVE_VXWORKS
+ int newflags;
+#endif
+
+ memset(fhp, 0, sizeof(*fhp));
+
+ /* If the application specified an interface, use it. */
+ if (__db_jump.j_open != NULL) {
+ if ((fhp->fd = __db_jump.j_open(name, flags, mode)) == -1)
+ return (__os_get_errno());
+ F_SET(fhp, DB_FH_VALID);
+ return (0);
+ }
+
+ for (ret = 0, nrepeat = 1; nrepeat < 4; ++nrepeat) {
+#ifdef HAVE_VXWORKS
+ /*
+ * VxWorks does not support O_CREAT on open, you have to use
+ * creat() instead. (It does not support O_EXCL or O_TRUNC
+ * either, even though they are defined "for future support".)
+ * If O_EXCL is specified, single thread and try to open the
+ * file. If successful, return EEXIST. Otherwise, call creat
+ * and then end single threading.
+ */
+ if (LF_ISSET(O_CREAT)) {
+ DB_BEGIN_SINGLE_THREAD;
+ newflags = flags & ~(O_CREAT | O_EXCL);
+ if (LF_ISSET(O_EXCL)) {
+ if ((fhp->fd =
+ open(name, newflags, mode)) != -1) {
+ /*
+ * If we get here, we want O_EXCL
+ * create, and it exists. Close and
+ * return EEXISTS.
+ */
+ (void)close(fhp->fd);
+ DB_END_SINGLE_THREAD;
+ return (EEXIST);
+ }
+ /*
+ * XXX
+ * Assume any error means non-existence.
+ * Unfortunately return values (even for
+ * non-existence) are driver specific so
+ * there is no single error we can use to
+ * verify we truly got the equivalent of
+ * ENOENT.
+ */
+ }
+ fhp->fd = creat(name, newflags);
+ DB_END_SINGLE_THREAD;
+ } else
+
+ /* FALLTHROUGH */
+#endif
+#ifdef __VMS
+ /*
+ * !!!
+ * Open with full sharing on VMS.
+ *
+ * We use these flags because they are the ones set by the VMS
+ * CRTL mmap() call when it opens a file, and we have to be
+ * able to open files that mmap() has previously opened, e.g.,
+ * when we're joining already existing DB regions.
+ */
+ fhp->fd = open(name, flags, mode, "shr=get,put,upd,del,upi");
+#else
+ fhp->fd = open(name, flags, mode);
+#endif
+
+ if (fhp->fd == -1) {
+ /*
+ * If it's a "temporary" error, we retry up to 3 times,
+ * waiting up to 12 seconds. While it's not a problem
+ * if we can't open a database, an inability to open a
+ * log file is cause for serious dismay.
+ */
+ ret = __os_get_errno();
+ if (ret == ENFILE || ret == EMFILE || ret == ENOSPC) {
+ (void)__os_sleep(dbenv, nrepeat * 2, 0);
+ continue;
+ }
+ } else {
+#if defined(HAVE_FCNTL_F_SETFD)
+ /* Deny file descriptor access to any child process. */
+ if (fcntl(fhp->fd, F_SETFD, 1) == -1) {
+ ret = __os_get_errno();
+ __db_err(dbenv, "fcntl(F_SETFD): %s",
+ strerror(ret));
+ (void)__os_closehandle(fhp);
+ } else
+#endif
+ F_SET(fhp, DB_FH_VALID);
+ }
+ break;
+ }
+
+ return (ret);
+}
+
+/*
+ * __os_closehandle --
+ * Close a file.
+ *
+ * PUBLIC: int __os_closehandle __P((DB_FH *));
+ */
+int
+__os_closehandle(fhp)
+ DB_FH *fhp;
+{
+ int ret;
+
+ /* Don't close file descriptors that were never opened. */
+ DB_ASSERT(F_ISSET(fhp, DB_FH_VALID) && fhp->fd != -1);
+
+ ret = __db_jump.j_close != NULL ?
+ __db_jump.j_close(fhp->fd) : close(fhp->fd);
+
+ /*
+ * Smash the POSIX file descriptor -- it's never tested, but we want
+ * to catch any mistakes.
+ */
+ fhp->fd = -1;
+ F_CLR(fhp, DB_FH_VALID);
+
+ return (ret == 0 ? 0 : __os_get_errno());
+}
diff --git a/bdb/os/os_map.c b/bdb/os/os_map.c
new file mode 100644
index 00000000000..bb96a917d87
--- /dev/null
+++ b/bdb/os/os_map.c
@@ -0,0 +1,436 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_map.c,v 11.32 2000/11/30 00:58:42 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#ifdef HAVE_MMAP
+#include <sys/mman.h>
+#endif
+
+#ifdef HAVE_SHMGET
+#include <sys/ipc.h>
+#include <sys/shm.h>
+#endif
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_ext.h"
+#include "os_jump.h"
+
+#ifdef HAVE_MMAP
+static int __os_map __P((DB_ENV *, char *, DB_FH *, size_t, int, int, void **));
+#endif
+#ifndef HAVE_SHMGET
+static int __db_nosystemmem __P((DB_ENV *));
+#endif
+
+/*
+ * __os_r_sysattach --
+ * Create/join a shared memory region.
+ *
+ * PUBLIC: int __os_r_sysattach __P((DB_ENV *, REGINFO *, REGION *));
+ */
+int
+__os_r_sysattach(dbenv, infop, rp)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ REGION *rp;
+{
+ if (F_ISSET(dbenv, DB_ENV_SYSTEM_MEM)) {
+ /*
+ * If the region is in system memory on UNIX, we use shmget(2).
+ *
+ * !!!
+ * There exist spinlocks that don't work in shmget memory, e.g.,
+ * the HP/UX msemaphore interface. If we don't have locks that
+ * will work in shmget memory, we better be private and not be
+ * threaded. If we reach this point, we know we're public, so
+ * it's an error.
+ */
+#if defined(MUTEX_NO_SHMGET_LOCKS)
+ __db_err(dbenv,
+ "architecture does not support locks inside system shared memory");
+ return (EINVAL);
+#endif
+#if defined(HAVE_SHMGET)
+ {
+ key_t segid;
+ int id, ret;
+
+ /*
+ * We could potentially create based on REGION_CREATE_OK, but
+ * that's dangerous -- we might get crammed in sideways if
+ * some of the expected regions exist but others do not. Also,
+ * if the requested size differs from an existing region's
+ * actual size, then all sorts of nasty things can happen.
+ * Basing create solely on REGION_CREATE is much safer -- a
+ * recovery will get us straightened out.
+ */
+ if (F_ISSET(infop, REGION_CREATE)) {
+ /*
+ * The application must give us a base System V IPC key
+ * value. Adjust that value based on the region's ID,
+ * and correct so the user's original value appears in
+ * the ipcs output.
+ */
+ if (dbenv->shm_key == INVALID_REGION_SEGID) {
+ __db_err(dbenv,
+ "no base system shared memory ID specified");
+ return (EINVAL);
+ }
+ segid = (key_t)(dbenv->shm_key + (infop->id - 1));
+
+ /*
+ * If map to an existing region, assume the application
+ * crashed and we're restarting. Delete the old region
+ * and re-try. If that fails, return an error, the
+ * application will have to select a different segment
+ * ID or clean up some other way.
+ */
+ if ((id = shmget(segid, 0, 0)) != -1) {
+ (void)shmctl(id, IPC_RMID, NULL);
+ if ((id = shmget(segid, 0, 0)) != -1) {
+ __db_err(dbenv,
+ "shmget: key: %ld: shared system memory region already exists",
+ (long)segid);
+ return (EAGAIN);
+ }
+ }
+ if ((id =
+ shmget(segid, rp->size, IPC_CREAT | 0600)) == -1) {
+ ret = __os_get_errno();
+ __db_err(dbenv,
+ "shmget: key: %ld: unable to create shared system memory region: %s",
+ (long)segid, strerror(ret));
+ return (ret);
+ }
+ rp->segid = id;
+ } else
+ id = rp->segid;
+
+ if ((infop->addr = shmat(id, NULL, 0)) == (void *)-1) {
+ infop->addr = NULL;
+ ret = __os_get_errno();
+ __db_err(dbenv,
+ "shmat: id %d: unable to attach to shared system memory region: %s",
+ id, strerror(ret));
+ return (ret);
+ }
+
+ return (0);
+ }
+#else
+ return (__db_nosystemmem(dbenv));
+#endif
+ }
+
+#ifdef HAVE_MMAP
+ {
+ DB_FH fh;
+ int ret;
+
+ /*
+ * Try to open/create the shared region file. We DO NOT need to
+ * ensure that multiple threads/processes attempting to
+ * simultaneously create the region are properly ordered,
+ * our caller has already taken care of that.
+ */
+ if ((ret = __os_open(dbenv, infop->name, DB_OSO_REGION |
+ (F_ISSET(infop, REGION_CREATE_OK) ? DB_OSO_CREATE : 0),
+ infop->mode, &fh)) != 0)
+ __db_err(dbenv, "%s: %s", infop->name, db_strerror(ret));
+
+ /*
+ * If we created the file, grow it to its full size before mapping
+ * it in. We really want to avoid touching the buffer cache after
+ * mmap(2) is called, doing anything else confuses the hell out of
+ * systems without merged VM/buffer cache systems, or, more to the
+ * point, *badly* merged VM/buffer cache systems.
+ */
+ if (ret == 0 && F_ISSET(infop, REGION_CREATE))
+ ret = __os_finit(dbenv,
+ &fh, rp->size, DB_GLOBAL(db_region_init));
+
+ /* Map the file in. */
+ if (ret == 0)
+ ret = __os_map(dbenv,
+ infop->name, &fh, rp->size, 1, 0, &infop->addr);
+
+ (void)__os_closehandle(&fh);
+
+ return (ret);
+ }
+#else
+ COMPQUIET(infop, NULL);
+ COMPQUIET(rp, NULL);
+ __db_err(dbenv,
+ "architecture lacks mmap(2), shared environments not possible");
+ return (__db_eopnotsup(dbenv));
+#endif
+}
+
+/*
+ * __os_r_sysdetach --
+ * Detach from a shared memory region.
+ *
+ * PUBLIC: int __os_r_sysdetach __P((DB_ENV *, REGINFO *, int));
+ */
+int
+__os_r_sysdetach(dbenv, infop, destroy)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ int destroy;
+{
+ REGION *rp;
+
+ rp = infop->rp;
+
+ if (F_ISSET(dbenv, DB_ENV_SYSTEM_MEM)) {
+#ifdef HAVE_SHMGET
+ int ret, segid;
+
+ /*
+ * We may be about to remove the memory referenced by rp,
+ * save the segment ID, and (optionally) wipe the original.
+ */
+ segid = rp->segid;
+ if (destroy)
+ rp->segid = INVALID_REGION_SEGID;
+
+ if (shmdt(infop->addr) != 0) {
+ ret = __os_get_errno();
+ __db_err(dbenv, "shmdt: %s", strerror(ret));
+ return (ret);
+ }
+
+ if (destroy && shmctl(segid, IPC_RMID,
+ NULL) != 0 && (ret = __os_get_errno()) != EINVAL) {
+ __db_err(dbenv,
+ "shmctl: id %ld: unable to delete system shared memory region: %s",
+ segid, strerror(ret));
+ return (ret);
+ }
+
+ return (0);
+#else
+ return (__db_nosystemmem(dbenv));
+#endif
+ }
+
+#ifdef HAVE_MMAP
+#ifdef HAVE_MUNLOCK
+ if (F_ISSET(dbenv, DB_ENV_LOCKDOWN))
+ (void)munlock(infop->addr, rp->size);
+#endif
+ if (munmap(infop->addr, rp->size) != 0) {
+ int ret;
+
+ ret = __os_get_errno();
+ __db_err(dbenv, "munmap: %s", strerror(ret));
+ return (ret);
+ }
+
+ if (destroy && __os_region_unlink(dbenv, infop->name) != 0)
+ return (__os_get_errno());
+
+ return (0);
+#else
+ COMPQUIET(destroy, 0);
+ return (EINVAL);
+#endif
+}
+
+/*
+ * __os_mapfile --
+ * Map in a shared memory file.
+ *
+ * PUBLIC: int __os_mapfile __P((DB_ENV *,
+ * PUBLIC: char *, DB_FH *, size_t, int, void **));
+ */
+int
+__os_mapfile(dbenv, path, fhp, len, is_rdonly, addrp)
+ DB_ENV *dbenv;
+ char *path;
+ DB_FH *fhp;
+ int is_rdonly;
+ size_t len;
+ void **addrp;
+{
+#if defined(HAVE_MMAP) && !defined(HAVE_QNX)
+ return (__os_map(dbenv, path, fhp, len, 0, is_rdonly, addrp));
+#else
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(path, NULL);
+ COMPQUIET(fhp, NULL);
+ COMPQUIET(is_rdonly, 0);
+ COMPQUIET(len, 0);
+ COMPQUIET(addrp, NULL);
+ return (EINVAL);
+#endif
+}
+
+/*
+ * __os_unmapfile --
+ * Unmap the shared memory file.
+ *
+ * PUBLIC: int __os_unmapfile __P((DB_ENV *, void *, size_t));
+ */
+int
+__os_unmapfile(dbenv, addr, len)
+ DB_ENV *dbenv;
+ void *addr;
+ size_t len;
+{
+ /* If the user replaced the map call, call through their interface. */
+ if (__db_jump.j_unmap != NULL)
+ return (__db_jump.j_unmap(addr, len));
+
+#ifdef HAVE_MMAP
+#ifdef HAVE_MUNLOCK
+ if (F_ISSET(dbenv, DB_ENV_LOCKDOWN))
+ (void)munlock(addr, len);
+#else
+ COMPQUIET(dbenv, NULL);
+#endif
+ return (munmap(addr, len) ? __os_get_errno() : 0);
+#else
+ COMPQUIET(dbenv, NULL);
+
+ return (EINVAL);
+#endif
+}
+
+#ifdef HAVE_MMAP
+/*
+ * __os_map --
+ * Call the mmap(2) function.
+ */
+static int
+__os_map(dbenv, path, fhp, len, is_region, is_rdonly, addrp)
+ DB_ENV *dbenv;
+ char *path;
+ DB_FH *fhp;
+ int is_region, is_rdonly;
+ size_t len;
+ void **addrp;
+{
+ void *p;
+ int flags, prot, ret;
+
+ /* If the user replaced the map call, call through their interface. */
+ if (__db_jump.j_map != NULL)
+ return (__db_jump.j_map
+ (path, len, is_region, is_rdonly, addrp));
+
+ /*
+ * If it's read-only, it's private, and if it's not, it's shared.
+ * Don't bother with an additional parameter.
+ */
+ flags = is_rdonly ? MAP_PRIVATE : MAP_SHARED;
+
+#ifdef MAP_FILE
+ /*
+ * Historically, MAP_FILE was required for mapping regular files,
+ * even though it was the default. Some systems have it, some
+ * don't, some that have it set it to 0.
+ */
+ flags |= MAP_FILE;
+#endif
+
+ /*
+ * I know of no systems that implement the flag to tell the system
+ * that the region contains semaphores, but it's not an unreasonable
+ * thing to do, and has been part of the design since forever. I
+ * don't think anyone will object, but don't set it for read-only
+ * files, it doesn't make sense.
+ */
+#ifdef MAP_HASSEMAPHORE
+ if (is_region && !is_rdonly)
+ flags |= MAP_HASSEMAPHORE;
+#else
+ COMPQUIET(is_region, 0);
+#endif
+
+ prot = PROT_READ | (is_rdonly ? 0 : PROT_WRITE);
+
+ /*
+ * XXX
+ * Work around a bug in the VMS V7.1 mmap() implementation. To map
+ * a file into memory on VMS it needs to be opened in a certain way,
+ * originally. To get the file opened in that certain way, the VMS
+ * mmap() closes the file and re-opens it. When it does this, it
+ * doesn't flush any caches out to disk before closing. The problem
+ * this causes us is that when the memory cache doesn't get written
+ * out, the file isn't big enough to match the memory chunk and the
+ * mmap() call fails. This call to fsync() fixes the problem. DEC
+ * thinks this isn't a bug because of language in XPG5 discussing user
+ * responsibility for on-disk and in-memory synchronization.
+ */
+#ifdef VMS
+ if (__os_fsync(dbenv, fhp) == -1)
+ return (__os_get_errno());
+#endif
+
+ /* MAP_FAILED was not defined in early mmap implementations. */
+#ifndef MAP_FAILED
+#define MAP_FAILED -1
+#endif
+ if ((p = mmap(NULL,
+ len, prot, flags, fhp->fd, (off_t)0)) == (void *)MAP_FAILED) {
+ ret = __os_get_errno();
+ __db_err(dbenv, "mmap: %s", strerror(ret));
+ return (ret);
+ }
+
+#ifdef HAVE_MLOCK
+ /*
+ * If it's a region, we want to make sure that the memory isn't paged.
+ * For example, Solaris will page large mpools because it thinks that
+ * I/O buffer memory is more important than we are. The mlock system
+ * call may or may not succeed (mlock is restricted to the super-user
+ * on some systems). Currently, the only other use of mmap in DB is
+ * to map read-only databases -- we don't want them paged, either, so
+ * the call isn't conditional.
+ */
+ if (F_ISSET(dbenv, DB_ENV_LOCKDOWN) && mlock(p, len) != 0) {
+ ret = __os_get_errno();
+ (void)munmap(p, len);
+ __db_err(dbenv, "mlock: %s", strerror(ret));
+ return (ret);
+ }
+#else
+ COMPQUIET(dbenv, NULL);
+#endif
+
+ *addrp = p;
+ return (0);
+}
+#endif
+
+#ifndef HAVE_SHMGET
+/*
+ * __db_nosystemmem --
+ * No system memory environments error message.
+ */
+static int
+__db_nosystemmem(dbenv)
+ DB_ENV *dbenv;
+{
+ __db_err(dbenv,
+ "architecture doesn't support environments in system memory");
+ return (__db_eopnotsup(dbenv));
+}
+#endif
diff --git a/bdb/os/os_method.c b/bdb/os/os_method.c
new file mode 100644
index 00000000000..0e2bd394792
--- /dev/null
+++ b/bdb/os/os_method.c
@@ -0,0 +1,206 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_method.c,v 11.6 2000/11/15 19:25:39 sue Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "os_jump.h"
+
+struct __db_jumptab __db_jump;
+
+int
+db_env_set_func_close(func_close)
+ int (*func_close) __P((int));
+{
+ __db_jump.j_close = func_close;
+ return (0);
+}
+
+int
+db_env_set_func_dirfree(func_dirfree)
+ void (*func_dirfree) __P((char **, int));
+{
+ __db_jump.j_dirfree = func_dirfree;
+ return (0);
+}
+
+int
+db_env_set_func_dirlist(func_dirlist)
+ int (*func_dirlist) __P((const char *, char ***, int *));
+{
+ __db_jump.j_dirlist = func_dirlist;
+ return (0);
+}
+
+int
+db_env_set_func_exists(func_exists)
+ int (*func_exists) __P((const char *, int *));
+{
+ __db_jump.j_exists = func_exists;
+ return (0);
+}
+
+int
+db_env_set_func_free(func_free)
+ void (*func_free) __P((void *));
+{
+ __db_jump.j_free = func_free;
+ return (0);
+}
+
+int
+db_env_set_func_fsync(func_fsync)
+ int (*func_fsync) __P((int));
+{
+ __db_jump.j_fsync = func_fsync;
+ return (0);
+}
+
+int
+db_env_set_func_ioinfo(func_ioinfo)
+ int (*func_ioinfo)
+ __P((const char *, int, u_int32_t *, u_int32_t *, u_int32_t *));
+{
+ __db_jump.j_ioinfo = func_ioinfo;
+ return (0);
+}
+
+int
+db_env_set_func_malloc(func_malloc)
+ void *(*func_malloc) __P((size_t));
+{
+ __db_jump.j_malloc = func_malloc;
+ return (0);
+}
+
+int
+db_env_set_func_map(func_map)
+ int (*func_map) __P((char *, size_t, int, int, void **));
+{
+ __db_jump.j_map = func_map;
+ return (0);
+}
+
+int
+db_env_set_func_open(func_open)
+ int (*func_open) __P((const char *, int, ...));
+{
+ __db_jump.j_open = func_open;
+ return (0);
+}
+
+int
+db_env_set_func_read(func_read)
+ ssize_t (*func_read) __P((int, void *, size_t));
+{
+ __db_jump.j_read = func_read;
+ return (0);
+}
+
+int
+db_env_set_func_realloc(func_realloc)
+ void *(*func_realloc) __P((void *, size_t));
+{
+ __db_jump.j_realloc = func_realloc;
+ return (0);
+}
+
+int
+db_env_set_func_rename(func_rename)
+ int (*func_rename) __P((const char *, const char *));
+{
+ __db_jump.j_rename = func_rename;
+ return (0);
+}
+
+int
+db_env_set_func_seek(func_seek)
+ int (*func_seek) __P((int, size_t, db_pgno_t, u_int32_t, int, int));
+{
+ __db_jump.j_seek = func_seek;
+ return (0);
+}
+
+int
+db_env_set_func_sleep(func_sleep)
+ int (*func_sleep) __P((u_long, u_long));
+{
+ __db_jump.j_sleep = func_sleep;
+ return (0);
+}
+
+int
+db_env_set_func_unlink(func_unlink)
+ int (*func_unlink) __P((const char *));
+{
+ __db_jump.j_unlink = func_unlink;
+ return (0);
+}
+
+int
+db_env_set_func_unmap(func_unmap)
+ int (*func_unmap) __P((void *, size_t));
+{
+ __db_jump.j_unmap = func_unmap;
+ return (0);
+}
+
+int
+db_env_set_func_write(func_write)
+ ssize_t (*func_write) __P((int, const void *, size_t));
+{
+ __db_jump.j_write = func_write;
+ return (0);
+}
+
+int
+db_env_set_func_yield(func_yield)
+ int (*func_yield) __P((void));
+{
+ __db_jump.j_yield = func_yield;
+ return (0);
+}
+
+int
+db_env_set_pageyield(onoff)
+ int onoff;
+{
+ DB_GLOBAL(db_pageyield) = onoff;
+ return (0);
+}
+
+int
+db_env_set_panicstate(onoff)
+ int onoff;
+{
+ DB_GLOBAL(db_panic) = onoff;
+ return (0);
+}
+
+int
+db_env_set_region_init(onoff)
+ int onoff;
+{
+ DB_GLOBAL(db_region_init) = onoff;
+ return (0);
+}
+
+int
+db_env_set_tas_spins(tas_spins)
+ u_int32_t tas_spins;
+{
+ DB_GLOBAL(db_tas_spins) = tas_spins;
+ return (0);
+}
diff --git a/bdb/os/os_oflags.c b/bdb/os/os_oflags.c
new file mode 100644
index 00000000000..fd413bdacbe
--- /dev/null
+++ b/bdb/os/os_oflags.c
@@ -0,0 +1,106 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_oflags.c,v 11.6 2000/10/27 20:32:02 dda Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <fcntl.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __db_oflags --
+ * Convert open(2) flags to DB flags.
+ *
+ * PUBLIC: u_int32_t __db_oflags __P((int));
+ */
+u_int32_t
+__db_oflags(oflags)
+ int oflags;
+{
+ u_int32_t dbflags;
+
+ dbflags = 0;
+
+ if (oflags & O_CREAT)
+ dbflags |= DB_CREATE;
+
+ if (oflags & O_TRUNC)
+ dbflags |= DB_TRUNCATE;
+
+ /*
+ * !!!
+ * Convert POSIX 1003.1 open(2) mode flags to DB flags. This isn't
+ * an exact science as few POSIX implementations have a flag value
+ * for O_RDONLY, it's simply the lack of a write flag.
+ */
+#ifndef O_ACCMODE
+#define O_ACCMODE (O_RDONLY | O_RDWR | O_WRONLY)
+#endif
+ switch (oflags & O_ACCMODE) {
+ case O_RDWR:
+ case O_WRONLY:
+ break;
+ default:
+ dbflags |= DB_RDONLY;
+ break;
+ }
+ return (dbflags);
+}
+
+/*
+ * __db_omode --
+ * Convert a permission string to the correct open(2) flags.
+ *
+ * PUBLIC: int __db_omode __P((const char *));
+ */
+int
+__db_omode(perm)
+ const char *perm;
+{
+ int mode;
+
+#ifndef S_IRUSR
+#ifdef DB_WIN32
+#define S_IRUSR S_IREAD /* R for owner */
+#define S_IWUSR S_IWRITE /* W for owner */
+#define S_IRGRP 0 /* R for group */
+#define S_IWGRP 0 /* W for group */
+#define S_IROTH 0 /* R for other */
+#define S_IWOTH 0 /* W for other */
+#else
+#define S_IRUSR 0000400 /* R for owner */
+#define S_IWUSR 0000200 /* W for owner */
+#define S_IRGRP 0000040 /* R for group */
+#define S_IWGRP 0000020 /* W for group */
+#define S_IROTH 0000004 /* R for other */
+#define S_IWOTH 0000002 /* W for other */
+#endif /* DB_WIN32 */
+#endif
+ mode = 0;
+ if (perm[0] == 'r')
+ mode |= S_IRUSR;
+ if (perm[1] == 'w')
+ mode |= S_IWUSR;
+ if (perm[2] == 'r')
+ mode |= S_IRGRP;
+ if (perm[3] == 'w')
+ mode |= S_IWGRP;
+ if (perm[4] == 'r')
+ mode |= S_IROTH;
+ if (perm[5] == 'w')
+ mode |= S_IWOTH;
+ return (mode);
+}
diff --git a/bdb/os/os_open.c b/bdb/os/os_open.c
new file mode 100644
index 00000000000..cdc75cd737b
--- /dev/null
+++ b/bdb/os/os_open.c
@@ -0,0 +1,226 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_open.c,v 11.21 2001/01/11 18:19:53 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <fcntl.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+
+#ifdef HAVE_QNX
+static int __os_region_open __P((DB_ENV *, const char *, int, int, DB_FH *));
+#endif
+
+/*
+ * __os_open --
+ * Open a file.
+ *
+ * PUBLIC: int __os_open __P((DB_ENV *, const char *, u_int32_t, int, DB_FH *));
+ */
+int
+__os_open(dbenv, name, flags, mode, fhp)
+ DB_ENV *dbenv;
+ const char *name;
+ u_int32_t flags;
+ int mode;
+ DB_FH *fhp;
+{
+ int oflags, ret;
+
+ oflags = 0;
+
+#if defined(O_BINARY)
+ /*
+ * If there's a binary-mode open flag, set it, we never want any
+ * kind of translation. Some systems do translations by default,
+ * e.g., with Cygwin, the default mode for an open() is set by the
+ * mode of the mount that underlies the file.
+ */
+ oflags |= O_BINARY;
+#endif
+
+ /*
+ * DB requires the POSIX 1003.1 semantic that two files opened at the
+ * same time with DB_OSO_CREATE/O_CREAT and DB_OSO_EXCL/O_EXCL flags
+ * set return an EEXIST failure in at least one.
+ */
+ if (LF_ISSET(DB_OSO_CREATE))
+ oflags |= O_CREAT;
+
+ if (LF_ISSET(DB_OSO_EXCL))
+ oflags |= O_EXCL;
+
+#if defined(O_DSYNC) && defined(XXX_NEVER_SET)
+ /*
+ * !!!
+ * We should get better performance if we push the log files to disk
+ * immediately instead of waiting for the sync. However, Solaris
+ * (and likely any other system based on the 4BSD filesystem releases),
+ * doesn't implement O_DSYNC correctly, only flushing data blocks and
+ * not inode or indirect blocks.
+ */
+ if (LF_ISSET(DB_OSO_LOG))
+ oflags |= O_DSYNC;
+#endif
+
+ if (LF_ISSET(DB_OSO_RDONLY))
+ oflags |= O_RDONLY;
+ else
+ oflags |= O_RDWR;
+
+ if (LF_ISSET(DB_OSO_TRUNC))
+ oflags |= O_TRUNC;
+
+#ifdef HAVE_QNX
+ if (LF_ISSET(DB_OSO_REGION))
+ return (__os_region_open(dbenv, name, oflags, mode, fhp));
+#endif
+ /* Open the file. */
+ if ((ret = __os_openhandle(dbenv, name, oflags, mode, fhp)) != 0)
+ return (ret);
+
+ /*
+ * Delete any temporary file.
+ *
+ * !!!
+ * There's a race here, where we've created a file and we crash before
+ * we can unlink it. Temporary files aren't common in DB, regardless,
+ * it's not a security problem because the file is empty. There's no
+ * reasonable way to avoid the race (playing signal games isn't worth
+ * the portability nightmare), so we just live with it.
+ */
+ if (LF_ISSET(DB_OSO_TEMP))
+ (void)__os_unlink(dbenv, name);
+
+ return (0);
+}
+
+#ifdef HAVE_QNX
+/*
+ * __os_region_open --
+ * Open a shared memory region file using POSIX shm_open.
+ */
+static int
+__os_region_open(dbenv, name, oflags, mode, fhp)
+ DB_ENV *dbenv;
+ const char *name;
+ int oflags;
+ int mode;
+ DB_FH *fhp;
+{
+ int ret;
+ char *newname;
+
+ if ((ret = __os_shmname(dbenv, name, &newname)) != 0)
+ goto err;
+ memset(fhp, 0, sizeof(*fhp));
+ fhp->fd = shm_open(newname, oflags, mode);
+ if (fhp->fd == -1)
+ ret = __os_get_errno();
+ else {
+#ifdef HAVE_FCNTL_F_SETFD
+ /* Deny file descriptor acces to any child process. */
+ if (fcntl(fhp->fd, F_SETFD, 1) == -1) {
+ ret = __os_get_errno();
+ __db_err(dbenv, "fcntl(F_SETFD): %s", strerror(ret));
+ __os_closehandle(fhp);
+ } else
+#endif
+ F_SET(fhp, DB_FH_VALID);
+ }
+ /*
+ * Once we have created the object, we don't need the name
+ * anymore. Other callers of this will convert themselves.
+ */
+err:
+ if (newname != NULL)
+ __os_free(newname, 0);
+ return (ret);
+}
+
+/*
+ * __os_shmname --
+ * Translate a pathname into a shm_open memory object name.
+ *
+ * PUBLIC: int __os_shmname __P((DB_ENV *, const char *, char **));
+ */
+int
+__os_shmname(dbenv, name, newnamep)
+ DB_ENV *dbenv;
+ const char *name;
+ char **newnamep;
+{
+ int ret;
+ size_t size;
+ char *p, *q, *tmpname;
+
+ *newnamep = NULL;
+
+ /*
+ * POSIX states that the name for a shared memory object
+ * may begin with a slash '/' and support for subsequent
+ * slashes is implementation-dependent. The one implementation
+ * we know of right now, QNX, forbids subsequent slashes.
+ * We don't want to be parsing pathnames for '.' and '..' in
+ * the middle. In order to allow easy conversion, just take
+ * the last component as the shared memory name. This limits
+ * the namespace a bit, but makes our job a lot easier.
+ *
+ * We should not be modifying user memory, so we use our own.
+ * Caller is responsible for freeing the memory we give them.
+ */
+ if ((ret = __os_strdup(dbenv, name, &tmpname)) != 0)
+ return (ret);
+ /*
+ * Skip over filename component.
+ * We set that separator to '\0' so that we can do another
+ * __db_rpath. However, we immediately set it then to ':'
+ * so that we end up with the tailing directory:filename.
+ * We require a home directory component. Return an error
+ * if there isn't one.
+ */
+ p = __db_rpath(tmpname);
+ if (p == NULL)
+ return (EINVAL);
+ if (p != tmpname) {
+ *p = '\0';
+ q = p;
+ p = __db_rpath(tmpname);
+ *q = ':';
+ }
+ if (p != NULL) {
+ /*
+ * If we have a path component, copy and return it.
+ */
+ ret = __os_strdup(dbenv, p, newnamep);
+ __os_free(tmpname, 0);
+ return (ret);
+ }
+
+ /*
+ * We were given just a directory name with no path components.
+ * Add a leading slash, and copy the remainder.
+ */
+ size = strlen(tmpname) + 2;
+ if ((ret = __os_malloc(dbenv, size, NULL, &p)) != 0)
+ return (ret);
+ p[0] = '/';
+ memcpy(&p[1], tmpname, size-1);
+ __os_free(tmpname, 0);
+ *newnamep = p;
+ return (0);
+}
+#endif
diff --git a/bdb/os/os_region.c b/bdb/os/os_region.c
new file mode 100644
index 00000000000..1e36fc2cbe0
--- /dev/null
+++ b/bdb/os/os_region.c
@@ -0,0 +1,116 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_region.c,v 11.9 2000/11/30 00:58:42 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#endif
+
+#include "db_int.h"
+#include "os_jump.h"
+
+/*
+ * __os_r_attach --
+ * Attach to a shared memory region.
+ *
+ * PUBLIC: int __os_r_attach __P((DB_ENV *, REGINFO *, REGION *));
+ */
+int
+__os_r_attach(dbenv, infop, rp)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ REGION *rp;
+{
+ int ret;
+ /* Round off the requested size for the underlying VM. */
+ OS_VMROUNDOFF(rp->size);
+
+#ifdef DB_REGIONSIZE_MAX
+ /* Some architectures have hard limits on the maximum region size. */
+ if (rp->size > DB_REGIONSIZE_MAX) {
+ __db_err(dbenv, "region size %lu is too large; maximum is %lu",
+ (u_long)rp->size, (u_long)DB_REGIONSIZE_MAX);
+ return (EINVAL);
+ }
+#endif
+
+ /*
+ * If a region is private, malloc the memory.
+ *
+ * !!!
+ * If this fails because the region is too large to malloc, mmap(2)
+ * using the MAP_ANON or MAP_ANONYMOUS flags would be an alternative.
+ * I don't know of any architectures (yet!) where malloc is a problem.
+ */
+ if (F_ISSET(dbenv, DB_ENV_PRIVATE)) {
+#if defined(MUTEX_NO_MALLOC_LOCKS)
+ /*
+ * !!!
+ * There exist spinlocks that don't work in malloc memory, e.g.,
+ * the HP/UX msemaphore interface. If we don't have locks that
+ * will work in malloc memory, we better not be private or not
+ * be threaded.
+ */
+ if (F_ISSET(dbenv, DB_ENV_THREAD)) {
+ __db_err(dbenv, "%s",
+ "architecture does not support locks inside process-local (malloc) memory");
+ __db_err(dbenv, "%s",
+ "application may not specify both DB_PRIVATE and DB_THREAD");
+ return (EINVAL);
+ }
+#endif
+ if ((ret =
+ __os_malloc(dbenv, rp->size, NULL, &infop->addr)) != 0)
+ return (ret);
+#if defined(UMRW) && !defined(DIAGNOSTIC)
+ memset(infop->addr, CLEAR_BYTE, rp->size);
+#endif
+ return (0);
+ }
+
+ /* If the user replaced the map call, call through their interface. */
+ if (__db_jump.j_map != NULL)
+ return (__db_jump.j_map(infop->name,
+ rp->size, 1, 0, &infop->addr));
+
+ return (__os_r_sysattach(dbenv, infop, rp));
+}
+
+/*
+ * __os_r_detach --
+ * Detach from a shared memory region.
+ *
+ * PUBLIC: int __os_r_detach __P((DB_ENV *, REGINFO *, int));
+ */
+int
+__os_r_detach(dbenv, infop, destroy)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ int destroy;
+{
+ REGION *rp;
+
+ rp = infop->rp;
+
+ /* If a region is private, free the memory. */
+ if (F_ISSET(dbenv, DB_ENV_PRIVATE)) {
+ __os_free(infop->addr, rp->size);
+ return (0);
+ }
+
+ /* If the user replaced the map call, call through their interface. */
+ if (__db_jump.j_unmap != NULL)
+ return (__db_jump.j_unmap(infop->addr, rp->size));
+
+ return (__os_r_sysdetach(dbenv, infop, destroy));
+}
diff --git a/bdb/os/os_rename.c b/bdb/os/os_rename.c
new file mode 100644
index 00000000000..8108bba67d9
--- /dev/null
+++ b/bdb/os/os_rename.c
@@ -0,0 +1,46 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_rename.c,v 11.6 2000/04/14 16:56:33 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "os_jump.h"
+
+/*
+ * __os_rename --
+ * Rename a file.
+ *
+ * PUBLIC: int __os_rename __P((DB_ENV *, const char *, const char *));
+ */
+int
+__os_rename(dbenv, old, new)
+ DB_ENV *dbenv;
+ const char *old, *new;
+{
+ int ret;
+
+ ret = __db_jump.j_rename != NULL ?
+ __db_jump.j_rename(old, new) : rename(old, new);
+
+ if (ret == -1) {
+ ret = __os_get_errno();
+ __db_err(dbenv, "Rename %s %s: %s", old, new, strerror(ret));
+ }
+
+ return (ret);
+}
diff --git a/bdb/os/os_root.c b/bdb/os/os_root.c
new file mode 100644
index 00000000000..753285c1be6
--- /dev/null
+++ b/bdb/os/os_root.c
@@ -0,0 +1,36 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_root.c,v 11.4 2000/02/14 03:00:05 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_isroot --
+ * Return if user has special permissions.
+ *
+ * PUBLIC: int __os_isroot __P((void));
+ */
+int
+__os_isroot()
+{
+#ifdef HAVE_GETUID
+ return (getuid() == 0);
+#else
+ return (0);
+#endif
+}
diff --git a/bdb/os/os_rpath.c b/bdb/os/os_rpath.c
new file mode 100644
index 00000000000..75d394ef210
--- /dev/null
+++ b/bdb/os/os_rpath.c
@@ -0,0 +1,69 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_rpath.c,v 11.5 2000/06/30 13:40:30 sue Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#ifdef HAVE_VXWORKS
+#include "iosLib.h"
+#endif
+
+/*
+ * __db_rpath --
+ * Return the last path separator in the path or NULL if none found.
+ *
+ * PUBLIC: char *__db_rpath __P((const char *));
+ */
+char *
+__db_rpath(path)
+ const char *path;
+{
+ const char *s, *last;
+#ifdef HAVE_VXWORKS
+ DEV_HDR *dummy;
+ char *ptail;
+
+ /*
+ * VxWorks devices can be rooted at any name. We want to
+ * skip over the device name and not take into account any
+ * PATH_SEPARATOR characters that might be in that name.
+ *
+ * XXX [#2393]
+ * VxWorks supports having a filename directly follow a device
+ * name with no separator. I.e. to access a file 'xxx' in
+ * the top level directory of a device mounted at "mydrive"
+ * you could say "mydrivexxx" or "mydrive/xxx" or "mydrive\xxx".
+ * We do not support the first usage here.
+ * XXX
+ */
+ if ((dummy = iosDevFind((char *)path, &ptail)) == NULL)
+ s = path;
+ else
+ s = ptail;
+#else
+ s = path;
+#endif
+
+ last = NULL;
+ if (PATH_SEPARATOR[1] != '\0') {
+ for (; s[0] != '\0'; ++s)
+ if (strchr(PATH_SEPARATOR, s[0]) != NULL)
+ last = s;
+ } else
+ for (; s[0] != '\0'; ++s)
+ if (s[0] == PATH_SEPARATOR[0])
+ last = s;
+ return ((char *)last);
+}
diff --git a/bdb/os/os_rw.c b/bdb/os/os_rw.c
new file mode 100644
index 00000000000..7e8e1255d6b
--- /dev/null
+++ b/bdb/os/os_rw.c
@@ -0,0 +1,147 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_rw.c,v 11.15 2000/11/15 19:25:39 sue Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "os_jump.h"
+
+/*
+ * __os_io --
+ * Do an I/O.
+ *
+ * PUBLIC: int __os_io __P((DB_ENV *, DB_IO *, int, size_t *));
+ */
+int
+__os_io(dbenv, db_iop, op, niop)
+ DB_ENV *dbenv;
+ DB_IO *db_iop;
+ int op;
+ size_t *niop;
+{
+ int ret;
+
+#if defined(HAVE_PREAD) && defined(HAVE_PWRITE)
+ switch (op) {
+ case DB_IO_READ:
+ if (__db_jump.j_read != NULL)
+ goto slow;
+ *niop = pread(db_iop->fhp->fd, db_iop->buf,
+ db_iop->bytes, (off_t)db_iop->pgno * db_iop->pagesize);
+ break;
+ case DB_IO_WRITE:
+ if (__db_jump.j_write != NULL)
+ goto slow;
+ *niop = pwrite(db_iop->fhp->fd, db_iop->buf,
+ db_iop->bytes, (off_t)db_iop->pgno * db_iop->pagesize);
+ break;
+ }
+ if (*niop == (size_t)db_iop->bytes)
+ return (0);
+slow:
+#endif
+ MUTEX_THREAD_LOCK(dbenv, db_iop->mutexp);
+
+ if ((ret = __os_seek(dbenv, db_iop->fhp,
+ db_iop->pagesize, db_iop->pgno, 0, 0, DB_OS_SEEK_SET)) != 0)
+ goto err;
+ switch (op) {
+ case DB_IO_READ:
+ ret = __os_read(dbenv,
+ db_iop->fhp, db_iop->buf, db_iop->bytes, niop);
+ break;
+ case DB_IO_WRITE:
+ ret = __os_write(dbenv,
+ db_iop->fhp, db_iop->buf, db_iop->bytes, niop);
+ break;
+ }
+
+err: MUTEX_THREAD_UNLOCK(dbenv, db_iop->mutexp);
+
+ return (ret);
+
+}
+
+/*
+ * __os_read --
+ * Read from a file handle.
+ *
+ * PUBLIC: int __os_read __P((DB_ENV *, DB_FH *, void *, size_t, size_t *));
+ */
+int
+__os_read(dbenv, fhp, addr, len, nrp)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+ void *addr;
+ size_t len;
+ size_t *nrp;
+{
+ size_t offset;
+ ssize_t nr;
+ int ret;
+ u_int8_t *taddr;
+
+ for (taddr = addr,
+ offset = 0; offset < len; taddr += nr, offset += nr) {
+ if ((nr = __db_jump.j_read != NULL ?
+ __db_jump.j_read(fhp->fd, taddr, len - offset) :
+ read(fhp->fd, taddr, len - offset)) < 0) {
+ ret = __os_get_errno();
+ __db_err(dbenv, "read: 0x%x, %lu: %s", taddr,
+ (u_long)len-offset, strerror(ret));
+ return (ret);
+ }
+ if (nr == 0)
+ break;
+ }
+ *nrp = taddr - (u_int8_t *)addr;
+ return (0);
+}
+
+/*
+ * __os_write --
+ * Write to a file handle.
+ *
+ * PUBLIC: int __os_write __P((DB_ENV *, DB_FH *, void *, size_t, size_t *));
+ */
+int
+__os_write(dbenv, fhp, addr, len, nwp)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+ void *addr;
+ size_t len;
+ size_t *nwp;
+{
+ size_t offset;
+ ssize_t nw;
+ int ret;
+ u_int8_t *taddr;
+
+ for (taddr = addr,
+ offset = 0; offset < len; taddr += nw, offset += nw)
+ if ((nw = __db_jump.j_write != NULL ?
+ __db_jump.j_write(fhp->fd, taddr, len - offset) :
+ write(fhp->fd, taddr, len - offset)) < 0) {
+ ret = __os_get_errno();
+ __db_err(dbenv, "write: 0x%x, %lu: %s", taddr,
+ (u_long)len-offset, strerror(ret));
+ return (ret);
+ }
+ *nwp = len;
+ return (0);
+}
diff --git a/bdb/os/os_seek.c b/bdb/os/os_seek.c
new file mode 100644
index 00000000000..1c4dc2238e1
--- /dev/null
+++ b/bdb/os/os_seek.c
@@ -0,0 +1,76 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_seek.c,v 11.12 2000/11/30 00:58:42 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "os_jump.h"
+
+/*
+ * __os_seek --
+ * Seek to a page/byte offset in the file.
+ *
+ * PUBLIC: int __os_seek __P((DB_ENV *,
+ * PUBLIC: DB_FH *, size_t, db_pgno_t, u_int32_t, int, DB_OS_SEEK));
+ */
+int
+__os_seek(dbenv, fhp, pgsize, pageno, relative, isrewind, db_whence)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+ size_t pgsize;
+ db_pgno_t pageno;
+ u_int32_t relative;
+ int isrewind;
+ DB_OS_SEEK db_whence;
+{
+ off_t offset;
+ int ret, whence;
+
+ switch (db_whence) {
+ case DB_OS_SEEK_CUR:
+ whence = SEEK_CUR;
+ break;
+ case DB_OS_SEEK_END:
+ whence = SEEK_END;
+ break;
+ case DB_OS_SEEK_SET:
+ whence = SEEK_SET;
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ if (__db_jump.j_seek != NULL)
+ ret = __db_jump.j_seek(fhp->fd,
+ pgsize, pageno, relative, isrewind, whence);
+ else {
+ offset = (off_t)pgsize * pageno + relative;
+ if (isrewind)
+ offset = -offset;
+ ret =
+ lseek(fhp->fd, offset, whence) == -1 ? __os_get_errno() : 0;
+ }
+
+ if (ret != 0)
+ __db_err(dbenv, "seek: %lu %d %d: %s",
+ (u_long)pgsize * pageno + relative,
+ isrewind, db_whence, strerror(ret));
+
+ return (ret);
+}
diff --git a/bdb/os/os_sleep.c b/bdb/os/os_sleep.c
new file mode 100644
index 00000000000..137cd73b708
--- /dev/null
+++ b/bdb/os/os_sleep.c
@@ -0,0 +1,77 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_sleep.c,v 11.7 2000/04/07 14:26:36 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#ifdef HAVE_SYS_SELECT_H
+#include <sys/select.h>
+#endif
+
+#ifdef HAVE_VXWORKS
+#include <sys/times.h>
+#include <time.h>
+#else
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif /* HAVE_SYS_TIME_H */
+#endif /* TIME_WITH SYS_TIME */
+#endif /* HAVE_VXWORKS */
+
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "os_jump.h"
+
+/*
+ * __os_sleep --
+ * Yield the processor for a period of time.
+ *
+ * PUBLIC: int __os_sleep __P((DB_ENV *, u_long, u_long));
+ */
+int
+__os_sleep(dbenv, secs, usecs)
+ DB_ENV *dbenv;
+ u_long secs, usecs; /* Seconds and microseconds. */
+{
+ struct timeval t;
+ int ret;
+
+ /* Don't require that the values be normalized. */
+ for (; usecs >= 1000000; usecs -= 1000000)
+ ++secs;
+
+ if (__db_jump.j_sleep != NULL)
+ return (__db_jump.j_sleep(secs, usecs));
+
+ /*
+ * It's important that we yield the processor here so that other
+ * processes or threads are permitted to run.
+ */
+ t.tv_sec = secs;
+ t.tv_usec = usecs;
+ ret = select(0, NULL, NULL, NULL, &t) == -1 ? __os_get_errno() : 0;
+
+ if (ret != 0)
+ __db_err(dbenv, "select: %s", strerror(ret));
+
+ return (ret);
+}
diff --git a/bdb/os/os_spin.c b/bdb/os/os_spin.c
new file mode 100644
index 00000000000..b0800b98830
--- /dev/null
+++ b/bdb/os/os_spin.c
@@ -0,0 +1,109 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_spin.c,v 11.5 2000/03/30 01:46:42 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#if defined(HAVE_PSTAT_GETDYNAMIC)
+#include <sys/pstat.h>
+#endif
+
+#include <limits.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "os_jump.h"
+
+#if defined(HAVE_PSTAT_GETDYNAMIC)
+/*
+ * __os_pstat_getdynamic --
+ * HP/UX.
+ */
+static int
+__os_pstat_getdynamic()
+{
+ struct pst_dynamic psd;
+
+ return (pstat_getdynamic(&psd,
+ sizeof(psd), (size_t)1, 0) == -1 ? 1 : psd.psd_proc_cnt);
+}
+#endif
+
+#if defined(HAVE_SYSCONF) && defined(_SC_NPROCESSORS_ONLN)
+/*
+ * __os_sysconf --
+ * Solaris, Linux.
+ */
+static int
+__os_sysconf()
+{
+ int nproc;
+
+ return ((nproc = sysconf(_SC_NPROCESSORS_ONLN)) > 1 ? nproc : 1);
+}
+#endif
+
+/*
+ * __os_spin --
+ * Return the number of default spins before blocking.
+ *
+ * PUBLIC: int __os_spin __P((void));
+ */
+int
+__os_spin()
+{
+ /*
+ * If the application specified a value or we've already figured it
+ * out, return it.
+ *
+ * XXX
+ * We don't want to repeatedly call the underlying function because
+ * it can be expensive (e.g., requiring multiple filesystem accesses
+ * under Debian Linux).
+ */
+ if (DB_GLOBAL(db_tas_spins) != 0)
+ return (DB_GLOBAL(db_tas_spins));
+
+ DB_GLOBAL(db_tas_spins) = 1;
+#if defined(HAVE_PSTAT_GETDYNAMIC)
+ DB_GLOBAL(db_tas_spins) = __os_pstat_getdynamic();
+#endif
+#if defined(HAVE_SYSCONF) && defined(_SC_NPROCESSORS_ONLN)
+ DB_GLOBAL(db_tas_spins) = __os_sysconf();
+#endif
+
+ /*
+ * Spin 50 times per processor, we have anecdotal evidence that this
+ * is a reasonable value.
+ */
+ if (DB_GLOBAL(db_tas_spins) != 1)
+ DB_GLOBAL(db_tas_spins) *= 50;
+
+ return (DB_GLOBAL(db_tas_spins));
+}
+
+/*
+ * __os_yield --
+ * Yield the processor.
+ *
+ * PUBLIC: void __os_yield __P((DB_ENV*, u_long));
+ */
+void
+__os_yield(dbenv, usecs)
+ DB_ENV *dbenv;
+ u_long usecs;
+{
+ if (__db_jump.j_yield != NULL && __db_jump.j_yield() == 0)
+ return;
+ __os_sleep(dbenv, 0, usecs);
+}
diff --git a/bdb/os/os_stat.c b/bdb/os/os_stat.c
new file mode 100644
index 00000000000..1590e8ecd77
--- /dev/null
+++ b/bdb/os/os_stat.c
@@ -0,0 +1,108 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_stat.c,v 11.8 2000/10/27 20:32:02 dda Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "os_jump.h"
+
+/*
+ * __os_exists --
+ * Return if the file exists.
+ *
+ * PUBLIC: int __os_exists __P((const char *, int *));
+ */
+int
+__os_exists(path, isdirp)
+ const char *path;
+ int *isdirp;
+{
+ struct stat sb;
+
+ if (__db_jump.j_exists != NULL)
+ return (__db_jump.j_exists(path, isdirp));
+
+#ifdef HAVE_VXWORKS
+ if (stat((char *)path, &sb) != 0)
+#else
+ if (stat(path, &sb) != 0)
+#endif
+ return (__os_get_errno());
+
+#if !defined(S_ISDIR) || defined(STAT_MACROS_BROKEN)
+#ifdef DB_WIN32
+#define S_ISDIR(m) (_S_IFDIR & (m))
+#else
+#define S_ISDIR(m) (((m) & 0170000) == 0040000)
+#endif
+#endif
+ if (isdirp != NULL)
+ *isdirp = S_ISDIR(sb.st_mode);
+
+ return (0);
+}
+
+/*
+ * __os_ioinfo --
+ * Return file size and I/O size; abstracted to make it easier
+ * to replace.
+ *
+ * PUBLIC: int __os_ioinfo __P((DB_ENV *, const char *,
+ * PUBLIC: DB_FH *, u_int32_t *, u_int32_t *, u_int32_t *));
+ */
+int
+__os_ioinfo(dbenv, path, fhp, mbytesp, bytesp, iosizep)
+ DB_ENV *dbenv;
+ const char *path;
+ DB_FH *fhp;
+ u_int32_t *mbytesp, *bytesp, *iosizep;
+{
+ int ret;
+ struct stat sb;
+
+ if (__db_jump.j_ioinfo != NULL)
+ return (__db_jump.j_ioinfo(path,
+ fhp->fd, mbytesp, bytesp, iosizep));
+
+ if (fstat(fhp->fd, &sb) == -1) {
+ ret = __os_get_errno();
+ __db_err(dbenv, "fstat: %s", strerror(ret));
+ return (ret);
+ }
+
+ /* Return the size of the file. */
+ if (mbytesp != NULL)
+ *mbytesp = sb.st_size / MEGABYTE;
+ if (bytesp != NULL)
+ *bytesp = sb.st_size % MEGABYTE;
+
+ /*
+ * Return the underlying filesystem blocksize, if available.
+ *
+ * XXX
+ * Check for a 0 size -- the HP MPE/iX architecture has st_blksize,
+ * but it's always 0.
+ */
+#ifdef HAVE_ST_BLKSIZE
+ if (iosizep != NULL && (*iosizep = sb.st_blksize) == 0)
+ *iosizep = DB_DEF_IOSIZE;
+#else
+ if (iosizep != NULL)
+ *iosizep = DB_DEF_IOSIZE;
+#endif
+ return (0);
+}
diff --git a/bdb/os/os_tmpdir.c b/bdb/os/os_tmpdir.c
new file mode 100644
index 00000000000..0dff5c5b7f0
--- /dev/null
+++ b/bdb/os/os_tmpdir.c
@@ -0,0 +1,119 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_tmpdir.c,v 11.16 2001/01/08 20:42:06 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#endif
+
+#include "db_int.h"
+
+#ifdef macintosh
+#include <TFileSpec.h>
+#endif
+
+/*
+ * __os_tmpdir --
+ * Set the temporary directory path.
+ *
+ * The order of items in the list structure and the order of checks in
+ * the environment are documented.
+ *
+ * PUBLIC: int __os_tmpdir __P((DB_ENV *, u_int32_t));
+ */
+int
+__os_tmpdir(dbenv, flags)
+ DB_ENV *dbenv;
+ u_int32_t flags;
+{
+ /*
+ * !!!
+ * Don't change this to:
+ *
+ * static const char * const list[]
+ *
+ * because it creates a text relocation in position independent code.
+ */
+ static const char * list[] = {
+ "/var/tmp",
+ "/usr/tmp",
+ "/temp", /* Windows. */
+ "/tmp",
+ "C:/temp", /* Windows. */
+ "C:/tmp", /* Windows. */
+ NULL
+ };
+ const char * const *lp, *p;
+
+ /* Use the environment if it's permitted and initialized. */
+ if (LF_ISSET(DB_USE_ENVIRON) ||
+ (LF_ISSET(DB_USE_ENVIRON_ROOT) && __os_isroot())) {
+ if ((p = getenv("TMPDIR")) != NULL && p[0] == '\0') {
+ __db_err(dbenv, "illegal TMPDIR environment variable");
+ return (EINVAL);
+ }
+ /* Windows */
+ if (p == NULL && (p = getenv("TEMP")) != NULL && p[0] == '\0') {
+ __db_err(dbenv, "illegal TEMP environment variable");
+ return (EINVAL);
+ }
+ /* Windows */
+ if (p == NULL && (p = getenv("TMP")) != NULL && p[0] == '\0') {
+ __db_err(dbenv, "illegal TMP environment variable");
+ return (EINVAL);
+ }
+ /* Macintosh */
+ if (p == NULL &&
+ (p = getenv("TempFolder")) != NULL && p[0] == '\0') {
+ __db_err(dbenv,
+ "illegal TempFolder environment variable");
+ return (EINVAL);
+ }
+ if (p != NULL)
+ return (__os_strdup(dbenv, p, &dbenv->db_tmp_dir));
+ }
+
+#ifdef macintosh
+ /* Get the path to the temporary folder. */
+ {FSSpec spec;
+
+ if (!Special2FSSpec(kTemporaryFolderType,
+ kOnSystemDisk, 0, &spec))
+ return (__os_strdup(dbenv,
+ FSp2FullPath(&spec), &dbenv->db_tmp_dir));
+ }
+#endif
+#ifdef DB_WIN32
+ /* Get the path to the temporary directory. */
+ {int isdir, len;
+ char *eos, temp[MAXPATHLEN + 1];
+
+ if ((len = GetTempPath(sizeof(temp) - 1, temp)) > 2) {
+ eos = &temp[len];
+ *eos-- = '\0';
+ if (*eos == '\\' || *eos == '/')
+ *eos = '\0';
+ if (__os_exists(temp, &isdir) == 0 && isdir != 0)
+ return (__os_strdup(dbenv,
+ temp, &dbenv->db_tmp_dir));
+ }
+ }
+#endif
+
+ /* Step through the static list looking for a possibility. */
+ for (lp = list; *lp != NULL; ++lp)
+ if (__os_exists(*lp, NULL) == 0)
+ return (__os_strdup(dbenv, *lp, &dbenv->db_tmp_dir));
+ return (0);
+}
diff --git a/bdb/os/os_unlink.c b/bdb/os/os_unlink.c
new file mode 100644
index 00000000000..56c401fe342
--- /dev/null
+++ b/bdb/os/os_unlink.c
@@ -0,0 +1,106 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_unlink.c,v 11.13 2000/11/30 00:58:42 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "os_jump.h"
+
+/*
+ * __os_unlink --
+ * Remove a file.
+ *
+ * PUBLIC: int __os_unlink __P((DB_ENV *, const char *));
+ */
+int
+__os_unlink(dbenv, path)
+ DB_ENV *dbenv;
+ const char *path;
+{
+ int ret;
+
+ ret = __db_jump.j_unlink != NULL ?
+ __db_jump.j_unlink(path) :
+#ifdef HAVE_VXWORKS
+ unlink((char *)path);
+#else
+ unlink(path);
+#endif
+ if (ret == -1) {
+ ret = __os_get_errno();
+ /*
+ * XXX
+ * We really shouldn't be looking at this value ourselves,
+ * but ENOENT usually signals that a file is missing, and
+ * we attempt to unlink things (such as v. 2.x environment
+ * regions, in DB_ENV->remove) that we're expecting not to
+ * be there. Reporting errors in these cases is annoying.
+ */
+#ifdef HAVE_VXWORKS
+ /*
+ * XXX
+ * The results of unlink are file system driver specific
+ * on VxWorks. In the case of removing a file that did
+ * not exist, some, at least, return an error, but with
+ * an errno of 0, not ENOENT.
+ *
+ * Code below falls through to original if-statement only
+ * we didn't get a "successful" error.
+ */
+ if (ret != 0)
+ /* FALLTHROUGH */
+#endif
+ if (ret != ENOENT)
+ __db_err(dbenv, "Unlink: %s: %s", path, strerror(ret));
+ }
+
+ return (ret);
+}
+
+/*
+ * __os_region_unlink --
+ * Remove a shared memory object file.
+ *
+ * PUBLIC: int __os_region_unlink __P((DB_ENV *, const char *));
+ */
+int
+__os_region_unlink(dbenv, path)
+ DB_ENV *dbenv;
+ const char *path;
+{
+#ifdef HAVE_QNX
+ int ret;
+ char *newname;
+
+ if ((ret = __os_shmname(dbenv, path, &newname)) != 0)
+ goto err;
+
+ if ((ret = shm_unlink(newname)) != 0) {
+ ret = __os_get_errno();
+ if (ret != ENOENT)
+ __db_err(dbenv, "Shm_unlink: %s: %s",
+ newname, strerror(ret));
+ }
+err:
+ if (newname != NULL)
+ __os_free(newname, 0);
+ return (ret);
+#else
+ return (__os_unlink(dbenv, path));
+#endif
+}
diff --git a/bdb/os_vxworks/os_abs.c b/bdb/os_vxworks/os_abs.c
new file mode 100644
index 00000000000..162a556d3fa
--- /dev/null
+++ b/bdb/os_vxworks/os_abs.c
@@ -0,0 +1,45 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_abs.c,v 1.4 2000/08/09 19:09:48 sue Exp $";
+#endif /* not lint */
+
+#include "db_int.h"
+#include "iosLib.h"
+
+/*
+ * __os_abspath --
+ * Return if a path is an absolute path.
+ */
+int
+__os_abspath(path)
+ const char *path;
+{
+ DEV_HDR *dummy;
+ char *ptail;
+
+ /*
+ * VxWorks devices can be rooted at any name at all.
+ * Use iosDevFind() to see if name matches any of our devices.
+ */
+ if ((dummy = iosDevFind((char *)path, &ptail)) == NULL)
+ return (0);
+ /*
+ * If the routine used the default device, then we are not
+ * an abs path.
+ */
+ if (ptail == path)
+ return (0);
+ /*
+ * If the path starts with a '/', then we are an absolute path,
+ * using the host machine, otherwise we are not.
+ */
+ return (path[0] == '/');
+}
diff --git a/bdb/os_vxworks/os_finit.c b/bdb/os_vxworks/os_finit.c
new file mode 100644
index 00000000000..305f52f0996
--- /dev/null
+++ b/bdb/os_vxworks/os_finit.c
@@ -0,0 +1,57 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_finit.c,v 1.1 2000/06/21 20:05:18 sue Exp $";
+#endif /* not lint */
+
+#include "db_int.h"
+
+/*
+ * __os_fpinit --
+ * Initialize a page in a regular file.
+ *
+ * PUBLIC: int __os_fpinit __P((DB_ENV *, DB_FH *, db_pgno_t, int, int));
+ */
+int
+__os_fpinit(dbenv, fhp, pgno, pagecount, pagesize)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+ db_pgno_t pgno;
+ int pagecount, pagesize;
+{
+ size_t nw, totalbytes, curbytes;
+ int ret;
+ char buf[1024];
+
+ /*
+ * Some VxWorks FS drivers do not zero-fill pages that were never
+ * explicitly written to the file, they give you random garbage,
+ * and that breaks DB.
+ */
+ if ((ret = __os_seek(dbenv,
+ fhp, pagesize, pgno, 0, 0, DB_OS_SEEK_SET)) != 0)
+ return (ret);
+
+ memset(buf, 0, sizeof(buf));
+ totalbytes = pagecount * pagesize;
+
+ while (totalbytes > 0) {
+ if (totalbytes > sizeof(buf))
+ curbytes = sizeof(buf);
+ else
+ curbytes = totalbytes;
+ if ((ret = __os_write(dbenv, fhp, buf, curbytes, &nw)) != 0)
+ return (ret);
+ if (nw != curbytes)
+ return (EIO);
+ totalbytes -= curbytes;
+ }
+ return (0);
+}
diff --git a/bdb/os_vxworks/os_map.c b/bdb/os_vxworks/os_map.c
new file mode 100644
index 00000000000..7397995d1d1
--- /dev/null
+++ b/bdb/os_vxworks/os_map.c
@@ -0,0 +1,440 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * This code is derived from software contributed to Sleepycat Software by
+ * Frederick G.M. Roeber of Netscape Communications Corp.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_map.c,v 1.14 2000/12/04 19:01:43 sue Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "common_ext.h"
+
+/*
+ * DB uses memory-mapped files for two things:
+ * faster access of read-only databases, and
+ * shared memory for process synchronization and locking.
+ * The code carefully does not mix the two uses. The first-case uses are
+ * actually written such that memory-mapping isn't really required -- it's
+ * merely a convenience -- so we don't have to worry much about it. In the
+ * second case, it's solely used as a shared memory mechanism, so that's
+ * all we have to replace.
+ *
+ * All memory in VxWorks is shared, and a task can allocate memory and keep
+ * notes. So I merely have to allocate memory, remember the "filename" for
+ * that memory, and issue small-integer segment IDs which index the list of
+ * these shared-memory segments. Subsequent opens are checked against the
+ * list of already open segments.
+ */
+typedef struct {
+ void *segment; /* Segment address. */
+ u_int32_t size; /* Segment size. */
+ char *name; /* Segment name. */
+ long segid; /* Segment ID. */
+} os_segdata_t;
+
+static os_segdata_t *__os_segdata; /* Segment table. */
+static int __os_segdata_size; /* Segment table size. */
+
+#define OS_SEGDATA_STARTING_SIZE 16
+#define OS_SEGDATA_INCREMENT 16
+
+static int __os_segdata_allocate
+ __P((DB_ENV *, const char *, REGINFO *, REGION *));
+static int __os_segdata_find_byname
+ __P((DB_ENV *, const char *, REGINFO *, REGION *));
+static int __os_segdata_init __P((DB_ENV *));
+static int __os_segdata_new __P((DB_ENV *, int *));
+static int __os_segdata_release __P((DB_ENV *, REGION *, int));
+
+/*
+ * __os_r_sysattach --
+ * Create/join a shared memory region.
+ *
+ * PUBLIC: int __os_r_sysattach __P((DB_ENV *, REGINFO *, REGION *));
+ */
+int
+__os_r_sysattach(dbenv, infop, rp)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ REGION *rp;
+{
+ int ret;
+
+ if (__os_segdata == NULL)
+ __os_segdata_init(dbenv);
+
+ DB_BEGIN_SINGLE_THREAD;
+
+ /* Try to find an already existing segment. */
+ ret = __os_segdata_find_byname(dbenv, infop->name, infop, rp);
+
+ /*
+ * If we are trying to join a region, it is easy, either we
+ * found it and we return, or we didn't find it and we return
+ * an error that it doesn't exist.
+ */
+ if (!F_ISSET(infop, REGION_CREATE)) {
+ if (ret != 0) {
+ __db_err(dbenv, "segment %s does not exist",
+ infop->name);
+ ret = EAGAIN;
+ }
+ goto out;
+ }
+
+ /*
+ * If we get here, we are trying to create the region.
+ * There are several things to consider:
+ * - if we have an error (not a found or not-found value), return.
+ * - they better have shm_key set.
+ * - if the region is already there (ret == 0 from above),
+ * assume the application crashed and we're restarting.
+ * Delete the old region.
+ * - try to create the region.
+ */
+ if (ret != 0 && ret != ENOENT)
+ goto out;
+
+ if (dbenv->shm_key == INVALID_REGION_SEGID) {
+ __db_err(dbenv, "no base shared memory ID specified");
+ ret = EAGAIN;
+ goto out;
+ }
+ if (ret == 0 && __os_segdata_release(dbenv, rp, 1) != 0) {
+ __db_err(dbenv,
+ "key: %ld: shared memory region already exists",
+ dbenv->shm_key + (infop->id - 1));
+ ret = EAGAIN;
+ goto out;
+ }
+
+ ret = __os_segdata_allocate(dbenv, infop->name, infop, rp);
+out:
+ DB_END_SINGLE_THREAD;
+ return (ret);
+}
+
+/*
+ * __os_r_sysdetach --
+ * Detach from a shared region.
+ *
+ * PUBLIC: int __os_r_sysdetach __P((DB_ENV *, REGINFO *, int));
+ */
+int
+__os_r_sysdetach(dbenv, infop, destroy)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ int destroy;
+{
+ /*
+ * If just detaching, there is no mapping to discard.
+ * If destroying, remove the region.
+ */
+ if (destroy)
+ return (__os_segdata_release(dbenv, infop->rp, 0));
+ return (0);
+}
+
+/*
+ * __os_mapfile --
+ * Map in a shared memory file.
+ *
+ * PUBLIC: int __os_mapfile __P((DB_ENV *,
+ * PUBLIC: char *, DB_FH *, size_t, int, void **));
+ */
+int
+__os_mapfile(dbenv, path, fhp, len, is_rdonly, addrp)
+ DB_ENV *dbenv;
+ char *path;
+ DB_FH *fhp;
+ int is_rdonly;
+ size_t len;
+ void **addrp;
+{
+ /* We cannot map in regular files in VxWorks. */
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(path, NULL);
+ COMPQUIET(fhp, NULL);
+ COMPQUIET(is_rdonly, 0);
+ COMPQUIET(len, 0);
+ COMPQUIET(addrp, NULL);
+ return (EINVAL);
+}
+
+/*
+ * __os_unmapfile --
+ * Unmap the shared memory file.
+ *
+ * PUBLIC: int __os_unmapfile __P((DB_ENV *, void *, size_t));
+ */
+int
+__os_unmapfile(dbenv, addr, len)
+ DB_ENV *dbenv;
+ void *addr;
+ size_t len;
+{
+ /* We cannot map in regular files in VxWorks. */
+ COMPQUIET(addr, NULL);
+ COMPQUIET(len, 0);
+ return (EINVAL);
+}
+
+/*
+ * __os_segdata_init --
+ * Initializes the library's table of shared memory segments.
+ * Called once on the first time through __os_segdata_new().
+ */
+static int
+__os_segdata_init(dbenv)
+ DB_ENV *dbenv;
+{
+ int ret;
+
+ if (__os_segdata != NULL) {
+ __db_err(dbenv, "shared memory segment already exists");
+ return (EEXIST);
+ }
+
+ /*
+ * The lock init call returns a locked lock.
+ */
+ DB_BEGIN_SINGLE_THREAD;
+ __os_segdata_size = OS_SEGDATA_STARTING_SIZE;
+ ret = __os_calloc(dbenv,
+ __os_segdata_size, sizeof(os_segdata_t), &__os_segdata);
+ DB_END_SINGLE_THREAD;
+ return (ret);
+}
+
+/*
+ * __os_segdata_destroy --
+ * Destroys the library's table of shared memory segments. It also
+ * frees all linked data: the segments themselves, and their names.
+ * Currently not called. This function should be called if the
+ * user creates a function to unload or shutdown.
+ *
+ * PUBLIC: int __os_segdata_destroy __P((void));
+ */
+int
+__os_segdata_destroy()
+{
+ os_segdata_t *p;
+ int i;
+
+ if (__os_segdata == NULL)
+ return (0);
+
+ DB_BEGIN_SINGLE_THREAD;
+ for (i = 0; i < __os_segdata_size; i++) {
+ p = &__os_segdata[i];
+ if (p->name != NULL) {
+ __os_freestr(p->name);
+ p->name = NULL;
+ }
+ if (p->segment != NULL) {
+ __os_free(p->segment, p->size);
+ p->segment = NULL;
+ }
+ p->size = 0;
+ }
+
+ __os_free(__os_segdata, __os_segdata_size * sizeof(os_segdata_t));
+ __os_segdata = NULL;
+ __os_segdata_size = 0;
+ DB_END_SINGLE_THREAD;
+
+ return (0);
+}
+
+/*
+ * __os_segdata_allocate --
+ * Creates a new segment of the specified size, optionally with the
+ * specified name.
+ *
+ * Assumes it is called with the SEGDATA lock taken.
+ */
+static int
+__os_segdata_allocate(dbenv, name, infop, rp)
+ DB_ENV *dbenv;
+ const char *name;
+ REGINFO *infop;
+ REGION *rp;
+{
+ os_segdata_t *p;
+ int id, ret;
+
+ if ((ret = __os_segdata_new(dbenv, &id)) != 0)
+ return (ret);
+
+ p = &__os_segdata[id];
+ if ((ret = __os_calloc(dbenv, 1, rp->size, &p->segment)) != 0)
+ return (ret);
+ if ((ret = __os_strdup(dbenv, name, &p->name)) != 0) {
+ __os_free(p->segment, rp->size);
+ p->segment = NULL;
+ return (ret);
+ }
+ p->size = rp->size;
+ p->segid = dbenv->shm_key + infop->id - 1;
+
+ infop->addr = p->segment;
+ rp->segid = id;
+
+ return (0);
+}
+
+/*
+ * __os_segdata_new --
+ * Finds a new segdata slot. Does not initialise it, so the fd returned
+ * is only valid until you call this again.
+ *
+ * Assumes it is called with the SEGDATA lock taken.
+ */
+static int
+__os_segdata_new(dbenv, segidp)
+ DB_ENV *dbenv;
+ int *segidp;
+{
+ os_segdata_t *p;
+ int i, newsize, ret;
+
+ if (__os_segdata == NULL) {
+ __db_err(dbenv, "shared memory segment not initialized");
+ return (EAGAIN);
+ }
+
+ for (i = 0; i < __os_segdata_size; i++) {
+ p = &__os_segdata[i];
+ if (p->segment == NULL) {
+ *segidp = i;
+ return (0);
+ }
+ }
+
+ /*
+ * No more free slots, expand.
+ */
+ newsize = __os_segdata_size + OS_SEGDATA_INCREMENT;
+ if ((ret = __os_realloc(dbenv, newsize * sizeof(os_segdata_t),
+ NULL, &__os_segdata)) != 0)
+ return (ret);
+ memset(&__os_segdata[__os_segdata_size],
+ 0, OS_SEGDATA_INCREMENT * sizeof(os_segdata_t));
+
+ *segidp = __os_segdata_size;
+ __os_segdata_size = newsize;
+
+ return (0);
+}
+
+/*
+ * __os_segdata_find_byname --
+ * Finds a segment by its name and shm_key.
+ *
+ * Assumes it is called with the SEGDATA lock taken.
+ *
+ * PUBLIC: __os_segdata_find_byname
+ * PUBLIC: __P((DB_ENV *, const char *, REGINFO *, REGION *));
+ */
+static int
+__os_segdata_find_byname(dbenv, name, infop, rp)
+ DB_ENV *dbenv;
+ const char *name;
+ REGINFO *infop;
+ REGION *rp;
+{
+ os_segdata_t *p;
+ long segid;
+ int i;
+
+ if (__os_segdata == NULL) {
+ __db_err(dbenv, "shared memory segment not initialized");
+ return (EAGAIN);
+ }
+
+ if (name == NULL) {
+ __db_err(dbenv, "no segment name given");
+ return (EAGAIN);
+ }
+
+ /*
+ * If we are creating the region, compute the segid.
+ * If we are joining the region, we use the segid in the
+ * index we are given.
+ */
+ if (F_ISSET(infop, REGION_CREATE))
+ segid = dbenv->shm_key + (infop->id - 1);
+ else {
+ if (rp->segid >= __os_segdata_size ||
+ rp->segid == INVALID_REGION_SEGID) {
+ __db_err(dbenv, "Invalid segment id given");
+ return (EAGAIN);
+ }
+ segid = __os_segdata[rp->segid].segid;
+ }
+ for (i = 0; i < __os_segdata_size; i++) {
+ p = &__os_segdata[i];
+ if (p->name != NULL && strcmp(name, p->name) == 0 &&
+ p->segid == segid) {
+ infop->addr = p->segment;
+ rp->segid = i;
+ return (0);
+ }
+ }
+ return (ENOENT);
+}
+
+/*
+ * __os_segdata_release --
+ * Free a segdata entry.
+ */
+static int
+__os_segdata_release(dbenv, rp, is_locked)
+ DB_ENV *dbenv;
+ REGION *rp;
+ int is_locked;
+{
+ os_segdata_t *p;
+
+ if (__os_segdata == NULL) {
+ __db_err(dbenv, "shared memory segment not initialized");
+ return (EAGAIN);
+ }
+
+ if (rp->segid < 0 || rp->segid >= __os_segdata_size) {
+ __db_err(dbenv, "segment id %ld out of range", rp->segid);
+ return (EINVAL);
+ }
+
+ if (is_locked == 0)
+ DB_BEGIN_SINGLE_THREAD;
+ p = &__os_segdata[rp->segid];
+ if (p->name != NULL) {
+ __os_freestr(p->name);
+ p->name = NULL;
+ }
+ if (p->segment != NULL) {
+ __os_free(p->segment, p->size);
+ p->segment = NULL;
+ }
+ p->size = 0;
+ if (is_locked == 0)
+ DB_END_SINGLE_THREAD;
+
+ /* Any shrink-table logic could go here */
+
+ return (0);
+}
diff --git a/bdb/os_win32/os_abs.c b/bdb/os_win32/os_abs.c
new file mode 100644
index 00000000000..7b1e3fd05d5
--- /dev/null
+++ b/bdb/os_win32/os_abs.c
@@ -0,0 +1,33 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_abs.c,v 11.3 2000/02/14 03:00:06 bostic Exp $";
+#endif /* not lint */
+
+#include "db_int.h"
+
+/*
+ * __os_abspath --
+ * Return if a path is an absolute path.
+ */
+int
+__os_abspath(path)
+ const char *path;
+{
+ /*
+ * !!!
+ * Check for drive specifications, e.g., "C:". In addition, the path
+ * separator used by the win32 DB (PATH_SEPARATOR) is \; look for both
+ * / and \ since these are user-input paths.
+ */
+ if (isalpha(path[0]) && path[1] == ':')
+ path += 2;
+ return (path[0] == '/' || path[0] == '\\');
+}
diff --git a/bdb/os_win32/os_dir.c b/bdb/os_win32/os_dir.c
new file mode 100644
index 00000000000..d37b7601051
--- /dev/null
+++ b/bdb/os_win32/os_dir.c
@@ -0,0 +1,82 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_dir.c,v 11.4 2000/03/28 21:50:17 ubell Exp $";
+#endif /* not lint */
+
+#include "db_int.h"
+#include "os_jump.h"
+
+/*
+ * __os_dirlist --
+ * Return a list of the files in a directory.
+ */
+int
+__os_dirlist(dbenv, dir, namesp, cntp)
+ DB_ENV *dbenv;
+ const char *dir;
+ char ***namesp;
+ int *cntp;
+{
+ struct _finddata_t fdata;
+ long dirhandle;
+ int arraysz, cnt, finished, ret;
+ char **names, filespec[MAXPATHLEN];
+
+ if (__db_jump.j_dirlist != NULL)
+ return (__db_jump.j_dirlist(dir, namesp, cntp));
+
+ (void)snprintf(filespec, sizeof(filespec), "%s/*", dir);
+ if ((dirhandle = _findfirst(filespec, &fdata)) == -1)
+ return (__os_get_errno());
+
+ names = NULL;
+ finished = 0;
+ for (arraysz = cnt = 0; finished != 1; ++cnt) {
+ if (cnt >= arraysz) {
+ arraysz += 100;
+ if ((ret = __os_realloc(dbenv,
+ arraysz * sizeof(names[0]), NULL, &names)) != 0)
+ goto nomem;
+ }
+ if ((ret = __os_strdup(dbenv, fdata.name, &names[cnt])) != 0)
+ goto nomem;
+ if (_findnext(dirhandle,&fdata) != 0)
+ finished = 1;
+ }
+ _findclose(dirhandle);
+
+ *namesp = names;
+ *cntp = cnt;
+ return (0);
+
+nomem: if (names != NULL)
+ __os_dirfree(names, cnt);
+ return (ret);
+}
+
+/*
+ * __os_dirfree --
+ * Free the list of files.
+ */
+void
+__os_dirfree(names, cnt)
+ char **names;
+ int cnt;
+{
+ if (__db_jump.j_dirfree != NULL) {
+ __db_jump.j_dirfree(names, cnt);
+ return;
+ }
+
+ while (cnt > 0)
+ __os_free(names[--cnt], 0);
+ __os_free(names, 0);
+}
diff --git a/bdb/os_win32/os_errno.c b/bdb/os_win32/os_errno.c
new file mode 100644
index 00000000000..8324826b6f9
--- /dev/null
+++ b/bdb/os_win32/os_errno.c
@@ -0,0 +1,146 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_errno.c,v 11.5 2000/11/30 00:58:43 ubell Exp $";
+#endif /* not lint */
+
+#include "db_int.h"
+
+/*
+ * __os_get_errno --
+ * Return the value of errno.
+ */
+int
+__os_get_errno()
+{
+ /* This routine must be able to return the same value repeatedly. */
+ return (errno);
+}
+
+/*
+ * __os_set_errno --
+ * Set the value of errno.
+ */
+void
+__os_set_errno(evalue)
+ int evalue;
+{
+ errno = evalue;
+}
+
+/*
+ * __os_win32_errno --
+ * Return the last Windows error as an errno.
+ * We give generic error returns:
+ *
+ * EFAULT means Win* call failed,
+ * and GetLastError provided no extra info.
+ *
+ * EIO means error on Win* call.
+ * and we were unable to provide a meaningful errno for this Windows
+ * error. More information is only available by setting a breakpoint
+ * here.
+ *
+ * PUBLIC: #if defined(DB_WIN32)
+ * PUBLIC: int __os_win32_errno __P((void));
+ * PUBLIC: #endif
+ */
+int
+__os_win32_errno(void)
+{
+ DWORD last_error;
+ int ret;
+
+ /*
+ * It's possible that errno was set after the error.
+ * The caller must take care to set it to 0 before
+ * any system operation.
+ */
+ if (__os_get_errno() != 0)
+ return (__os_get_errno());
+
+ last_error = GetLastError();
+
+ /*
+ * Take our best guess at translating some of the Windows error
+ * codes. We really care about only a few of these.
+ */
+ switch (last_error) {
+ case ERROR_FILE_NOT_FOUND:
+ case ERROR_INVALID_DRIVE:
+ case ERROR_PATH_NOT_FOUND:
+ ret = ENOENT;
+ break;
+
+ case ERROR_NO_MORE_FILES:
+ case ERROR_TOO_MANY_OPEN_FILES:
+ ret = EMFILE;
+ break;
+
+ case ERROR_ACCESS_DENIED:
+ ret = EPERM;
+ break;
+
+ case ERROR_INVALID_HANDLE:
+ ret = EBADF;
+ break;
+
+ case ERROR_NOT_ENOUGH_MEMORY:
+ ret = ENOMEM;
+ break;
+
+ case ERROR_DISK_FULL:
+ ret = ENOSPC;
+
+ case ERROR_ARENA_TRASHED:
+ case ERROR_BAD_COMMAND:
+ case ERROR_BAD_ENVIRONMENT:
+ case ERROR_BAD_FORMAT:
+ case ERROR_GEN_FAILURE:
+ case ERROR_INVALID_ACCESS:
+ case ERROR_INVALID_BLOCK:
+ case ERROR_INVALID_DATA:
+ case ERROR_READ_FAULT:
+ case ERROR_WRITE_FAULT:
+ ret = EFAULT;
+ break;
+
+ case ERROR_FILE_EXISTS:
+ ret = EEXIST;
+ break;
+
+ case ERROR_NOT_SAME_DEVICE:
+ ret = EXDEV;
+ break;
+
+ case ERROR_WRITE_PROTECT:
+ ret = EACCES;
+ break;
+
+ case ERROR_NOT_READY:
+ ret = EBUSY;
+ break;
+
+ case ERROR_LOCK_VIOLATION:
+ case ERROR_SHARING_VIOLATION:
+ ret = EBUSY;
+ break;
+
+ case 0:
+ ret = EFAULT;
+ break;
+
+ default:
+ ret = EIO; /* Generic error. */
+ break;
+ }
+
+ return (ret);
+}
diff --git a/bdb/os_win32/os_fid.c b/bdb/os_win32/os_fid.c
new file mode 100644
index 00000000000..c66ac52102b
--- /dev/null
+++ b/bdb/os_win32/os_fid.c
@@ -0,0 +1,145 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_fid.c,v 11.7 2000/10/26 14:18:08 bostic Exp $";
+#endif /* not lint */
+
+#include "db_int.h"
+
+#define SERIAL_INIT 0
+static u_int32_t fid_serial = SERIAL_INIT;
+
+/*
+ * __os_fileid --
+ * Return a unique identifier for a file.
+ */
+int
+__os_fileid(dbenv, fname, unique_okay, fidp)
+ DB_ENV *dbenv;
+ const char *fname;
+ int unique_okay;
+ u_int8_t *fidp;
+{
+ size_t i;
+ u_int32_t tmp;
+ u_int8_t *p;
+ int ret;
+
+ /*
+ * The documentation for GetFileInformationByHandle() states that the
+ * inode-type numbers are not constant between processes. Actually,
+ * they are, they're the NTFS MFT indexes. So, this works on NTFS,
+ * but perhaps not on other platforms, and perhaps not over a network.
+ * Can't think of a better solution right now.
+ */
+ DB_FH fh;
+ HANDLE handle;
+ BY_HANDLE_FILE_INFORMATION fi;
+ BOOL retval = FALSE;
+
+ /* Clear the buffer. */
+ memset(fidp, 0, DB_FILE_ID_LEN);
+
+ /*
+ * Initialize/increment the serial number we use to help avoid
+ * fileid collisions. Note that we don't bother with locking;
+ * it's unpleasant to do from down in here, and if we race on
+ * this no real harm will be done, since the finished fileid
+ * has so many other components.
+ *
+ * We increment by 100000 on each call as a simple way of
+ * randomizing; simply incrementing seems potentially less useful
+ * if pids are also simply incremented, since this is process-local
+ * and we may be one of a set of processes starting up. 100000
+ * pushes us out of pid space on most platforms, and has few
+ * interesting properties in base 2.
+ */
+ if (fid_serial == SERIAL_INIT)
+ fid_serial = (u_int32_t)getpid();
+ else
+ fid_serial += 100000;
+
+ /*
+ * First we open the file, because we're not given a handle to it.
+ * If we can't open it, we're in trouble.
+ */
+ if ((ret = __os_open(dbenv, fname, DB_OSO_RDONLY, _S_IREAD, &fh)) != 0)
+ return (ret);
+
+ /* File open, get its info */
+ handle = (HANDLE)_get_osfhandle(fh.fd);
+ if (handle == INVALID_HANDLE_VALUE)
+ ret = __os_win32_errno();
+ else
+ if ((retval = GetFileInformationByHandle(handle, &fi)) == FALSE)
+ ret = __os_win32_errno();
+ __os_closehandle(&fh);
+
+ if (handle == INVALID_HANDLE_VALUE || retval == FALSE)
+ return (ret);
+
+ /*
+ * We want the three 32-bit words which tell us the volume ID and
+ * the file ID. We make a crude attempt to copy the bytes over to
+ * the callers buffer.
+ *
+ * We don't worry about byte sexing or the actual variable sizes.
+ *
+ * When this routine is called from the DB access methods, it's only
+ * called once -- whatever ID is generated when a database is created
+ * is stored in the database file's metadata, and that is what is
+ * saved in the mpool region's information to uniquely identify the
+ * file.
+ *
+ * When called from the mpool layer this routine will be called each
+ * time a new thread of control wants to share the file, which makes
+ * things tougher. As far as byte sexing goes, since the mpool region
+ * lives on a single host, there's no issue of that -- the entire
+ * region is byte sex dependent. As far as variable sizes go, we make
+ * the simplifying assumption that 32-bit and 64-bit processes will
+ * get the same 32-bit values if we truncate any returned 64-bit value
+ * to a 32-bit value.
+ */
+ tmp = (u_int32_t)fi.nFileIndexLow;
+ for (p = (u_int8_t *)&tmp, i = sizeof(u_int32_t); i > 0; --i)
+ *fidp++ = *p++;
+ tmp = (u_int32_t)fi.nFileIndexHigh;
+ for (p = (u_int8_t *)&tmp, i = sizeof(u_int32_t); i > 0; --i)
+ *fidp++ = *p++;
+ if (unique_okay) {
+ /*
+ * Use the system time to try to get a unique value
+ * within this process. A millisecond counter
+ * overflows 32 bits in about 49 days. So we use 8
+ * bytes, and don't bother with the volume ID, which
+ * is not very useful for our purposes.
+ */
+ SYSTEMTIME st;
+
+ GetSystemTime(&st);
+ tmp = (st.wYear - 1900) * 12 + (st.wMonth - 1);
+ for (p = (u_int8_t *)&tmp, i = sizeof(u_int32_t); i > 0; --i)
+ *fidp++ = *p++;
+ tmp = ((((st.wDay - 1) * 24 + st.wHour) * 60 +
+ st.wMinute) * 60 + st.wSecond) * 1000 +
+ st.wMilliseconds;
+ for (p = (u_int8_t *)&tmp, i = sizeof(u_int32_t); i > 0; --i)
+ *fidp++ = *p++;
+ for (p = (u_int8_t *)&fid_serial, i = sizeof(u_int32_t);
+ i > 0; --i)
+ *fidp++ = *p++;
+ } else {
+ tmp = (u_int32_t)fi.dwVolumeSerialNumber;
+ for (p = (u_int8_t *)&tmp, i = sizeof(u_int32_t); i > 0; --i)
+ *fidp++ = *p++;
+ }
+
+ return (0);
+}
diff --git a/bdb/os_win32/os_finit.c b/bdb/os_win32/os_finit.c
new file mode 100644
index 00000000000..61d2a33c7b4
--- /dev/null
+++ b/bdb/os_win32/os_finit.c
@@ -0,0 +1,60 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_finit.c,v 11.9 2000/03/29 20:50:52 ubell Exp $";
+#endif /* not lint */
+
+#include "db_int.h"
+
+/*
+ * __os_fpinit --
+ * Initialize a page in a regular file.
+ *
+ * PUBLIC: int __os_fpinit __P((DB_ENV *, DB_FH *, db_pgno_t, int, int));
+ */
+int
+__os_fpinit(dbenv, fhp, pgno, pagecount, pagesize)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+ db_pgno_t pgno;
+ int pagecount, pagesize;
+{
+ size_t nw, totalbytes, curbytes;
+ int ret;
+ char buf[1024];
+
+ /*
+ * Windows/NT zero-fills pages that were never explicitly written to
+ * the file. Windows 95/98 gives you random garbage, and that breaks
+ * DB.
+ */
+ if (__os_is_winnt())
+ return (0);
+
+ if ((ret = __os_seek(dbenv,
+ fhp, pagesize, pgno, 0, 0, DB_OS_SEEK_SET)) != 0)
+ return (ret);
+
+ memset(buf, 0, sizeof(buf));
+ totalbytes = pagecount * pagesize;
+
+ while (totalbytes > 0) {
+ if (totalbytes > sizeof(buf))
+ curbytes = sizeof(buf);
+ else
+ curbytes = totalbytes;
+ if ((ret = __os_write(dbenv, fhp, buf, curbytes, &nw)) != 0)
+ return (ret);
+ if (nw != curbytes)
+ return (EIO);
+ totalbytes -= curbytes;
+ }
+ return (0);
+}
diff --git a/bdb/os_win32/os_map.c b/bdb/os_win32/os_map.c
new file mode 100644
index 00000000000..d7b2839ed29
--- /dev/null
+++ b/bdb/os_win32/os_map.c
@@ -0,0 +1,310 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_map.c,v 11.22 2000/10/26 14:18:08 bostic Exp $";
+#endif /* not lint */
+
+#include "db_int.h"
+#include "os_jump.h"
+
+static int __os_map
+ __P((DB_ENV *, char *, REGINFO *, DB_FH *, size_t, int, int, int, void **));
+static int __os_unique_name __P((char *, int, char *));
+
+/*
+ * __os_r_sysattach --
+ * Create/join a shared memory region.
+ */
+int
+__os_r_sysattach(dbenv, infop, rp)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ REGION *rp;
+{
+ DB_FH fh;
+ int is_system, ret;
+
+ /*
+ * Try to open/create the file. We DO NOT need to ensure that multiple
+ * threads/processes attempting to simultaneously create the region are
+ * properly ordered, our caller has already taken care of that.
+ */
+ if ((ret = __os_open(dbenv, infop->name,
+ F_ISSET(infop, REGION_CREATE_OK) ? DB_OSO_CREATE: 0,
+ infop->mode, &fh)) != 0) {
+ __db_err(dbenv, "%s: %s", infop->name, db_strerror(ret));
+ return (ret);
+ }
+
+ /*
+ * On Windows/9X, files that are opened by multiple processes do not
+ * share data correctly. For this reason, the DB_SYSTEM_MEM flag is
+ * implied for any application that does not specify the DB_PRIVATE
+ * flag.
+ */
+ is_system = F_ISSET(dbenv, DB_ENV_SYSTEM_MEM) ||
+ (!F_ISSET(dbenv, DB_ENV_PRIVATE) && __os_is_winnt() == 0);
+
+ /*
+ * Map the file in. If we're creating an in-system-memory region,
+ * specify a segment ID (which is never used again) so that the
+ * calling code writes out the REGENV_REF structure to the primary
+ * environment file.
+ */
+ ret = __os_map(dbenv, infop->name, infop, &fh, rp->size,
+ 1, is_system, 0, &infop->addr);
+ if (ret == 0 && is_system == 1)
+ rp->segid = 1;
+
+ (void)__os_closehandle(&fh);
+
+ return (ret);
+}
+
+/*
+ * __os_r_sysdetach --
+ * Detach from a shared memory region.
+ */
+int
+__os_r_sysdetach(dbenv, infop, destroy)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ int destroy;
+{
+ int ret, t_ret;
+
+ if (infop->wnt_handle != NULL) {
+ (void)CloseHandle(*((HANDLE*)(infop->wnt_handle)));
+ __os_free(infop->wnt_handle, sizeof(HANDLE));
+ }
+
+ __os_set_errno(0);
+ ret = !UnmapViewOfFile(infop->addr) ? __os_win32_errno() : 0;
+ if (ret != 0)
+ __db_err(dbenv, "UnmapViewOfFile: %s", strerror(ret));
+
+ if (F_ISSET(dbenv, DB_ENV_SYSTEM_MEM) && destroy &&
+ (t_ret = __os_unlink(dbenv, infop->name)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __os_mapfile --
+ * Map in a shared memory file.
+ */
+int
+__os_mapfile(dbenv, path, fhp, len, is_rdonly, addr)
+ DB_ENV *dbenv;
+ char *path;
+ DB_FH *fhp;
+ int is_rdonly;
+ size_t len;
+ void **addr;
+{
+ /* If the user replaced the map call, call through their interface. */
+ if (__db_jump.j_map != NULL)
+ return (__db_jump.j_map(path, len, 0, is_rdonly, addr));
+
+ return (__os_map(dbenv, path, NULL, fhp, len, 0, 0, is_rdonly, addr));
+}
+
+/*
+ * __os_unmapfile --
+ * Unmap the shared memory file.
+ */
+int
+__os_unmapfile(dbenv, addr, len)
+ DB_ENV *dbenv;
+ void *addr;
+ size_t len;
+{
+ /* If the user replaced the map call, call through their interface. */
+ if (__db_jump.j_unmap != NULL)
+ return (__db_jump.j_unmap(addr, len));
+
+ __os_set_errno(0);
+ return (!UnmapViewOfFile(addr) ? __os_win32_errno() : 0);
+}
+
+/*
+ * __os_unique_name --
+ * Create a unique identifying name from a pathname (may be absolute or
+ * relative) and/or a file descriptor.
+ *
+ * The name returned must be unique (different files map to different
+ * names), and repeatable (same files, map to same names). It's not
+ * so easy to do by name. Should handle not only:
+ *
+ * foo.bar == ./foo.bar == c:/whatever_path/foo.bar
+ *
+ * but also understand that:
+ *
+ * foo.bar == Foo.Bar (FAT file system)
+ * foo.bar != Foo.Bar (NTFS)
+ *
+ * The best solution is to use the identifying number in the file
+ * information structure (similar to UNIX inode #).
+ */
+static int
+__os_unique_name(orig_path, fd, result_path)
+ char *orig_path, *result_path;
+ int fd;
+{
+ BY_HANDLE_FILE_INFORMATION fileinfo;
+
+ __os_set_errno(0);
+ if (!GetFileInformationByHandle(
+ (HANDLE)_get_osfhandle(fd), &fileinfo))
+ return (__os_win32_errno());
+ (void)sprintf(result_path, "%ld.%ld.%ld",
+ fileinfo.dwVolumeSerialNumber,
+ fileinfo.nFileIndexHigh, fileinfo.nFileIndexLow);
+ return (0);
+}
+
+/*
+ * __os_map --
+ * The mmap(2) function for Windows.
+ */
+static int
+__os_map(dbenv, path, infop, fhp, len, is_region, is_system, is_rdonly, addr)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ char *path;
+ DB_FH *fhp;
+ int is_region, is_system, is_rdonly;
+ size_t len;
+ void **addr;
+{
+ HANDLE hMemory;
+ REGENV *renv;
+ int ret;
+ void *pMemory;
+ char shmem_name[MAXPATHLEN];
+ int use_pagefile;
+
+ ret = 0;
+ if (infop != NULL)
+ infop->wnt_handle = NULL;
+
+ use_pagefile = is_region && is_system;
+
+ /*
+ * If creating a region in system space, get a matching name in the
+ * paging file namespace.
+ */
+ if (use_pagefile) {
+ (void)strcpy(shmem_name, "__db_shmem.");
+ if ((ret = __os_unique_name(path, fhp->fd,
+ &shmem_name[strlen(shmem_name)])) != 0)
+ return (ret);
+ }
+
+ /*
+ * XXX
+ * DB: We have not implemented copy-on-write here.
+ *
+ * XXX
+ * DB: This code will fail if the library is ever compiled on a 64-bit
+ * machine.
+ *
+ * XXX
+ * If this is an region in system memory, let's try opening using the
+ * OpenFileMapping() first. Why, oh why are we doing this?
+ *
+ * Well, we might be asking the OS for a handle to a pre-existing
+ * memory section, or we might be the first to get here and want the
+ * section created. CreateFileMapping() sounds like it will do both
+ * jobs. But, not so. It seems to mess up making the commit charge to
+ * the process. It thinks, incorrectly, that when we want to join a
+ * previously existing section, that it should make a commit charge
+ * for the whole section. In fact, there is no new committed memory
+ * whatever. The call can fail if there is insufficient memory free
+ * to handle the erroneous commit charge. So, we find that the bogus
+ * commit is not made if we call OpenFileMapping(). So we do that
+ * first, and only call CreateFileMapping() if we're really creating
+ * the section.
+ */
+ hMemory = NULL;
+ __os_set_errno(0);
+ if (use_pagefile)
+ hMemory = OpenFileMapping(
+ is_rdonly ? FILE_MAP_READ : FILE_MAP_ALL_ACCESS,
+ 0,
+ shmem_name);
+
+ if (hMemory == NULL)
+ hMemory = CreateFileMapping(
+ use_pagefile ?
+ (HANDLE)0xFFFFFFFF : (HANDLE)_get_osfhandle(fhp->fd),
+ 0,
+ is_rdonly ? PAGE_READONLY : PAGE_READWRITE,
+ 0, len,
+ use_pagefile ? shmem_name : NULL);
+ if (hMemory == NULL) {
+ __db_err(dbenv,
+ "OpenFileMapping: %s", strerror(__os_win32_errno()));
+ return (__os_win32_errno());
+ }
+
+ pMemory = MapViewOfFile(hMemory,
+ (is_rdonly ? FILE_MAP_READ : FILE_MAP_ALL_ACCESS), 0, 0, len);
+ if (pMemory == NULL) {
+ __db_err(dbenv,
+ "MapViewOfFile: %s", strerror(__os_win32_errno()));
+ return (__os_win32_errno());
+ }
+
+ /*
+ * XXX
+ * It turns out that the kernel object underlying the named section
+ * is reference counted, but that the call to MapViewOfFile() above
+ * does NOT increment the reference count! So, if we close the handle
+ * here, the kernel deletes the object from the kernel namespace.
+ * When a second process comes along to join the region, the kernel
+ * happily creates a new object with the same name, but completely
+ * different identity. The two processes then have distinct isolated
+ * mapped sections, not at all what was wanted. Not closing the handle
+ * here fixes this problem. We carry the handle around in the region
+ * structure so we can close it when unmap is called. Ignore malloc
+ * errors, it just means we leak the memory.
+ */
+ if (use_pagefile && infop != NULL) {
+ if (__os_malloc(NULL,
+ sizeof(HANDLE), NULL, &infop->wnt_handle) == 0)
+ memcpy(infop->wnt_handle, &hMemory, sizeof(HANDLE));
+ } else
+ CloseHandle(hMemory);
+
+ if (is_region) {
+ /*
+ * XXX
+ * Windows/95 zeroes anonymous memory regions at last close.
+ * This means that the backing file can exist and reference
+ * the region, but the region itself is no longer initialized.
+ * If the caller is capable of creating the region, update
+ * the REGINFO structure so that they do so.
+ */
+ renv = (REGENV *)pMemory;
+ if (renv->magic == 0)
+ if (F_ISSET(infop, REGION_CREATE_OK))
+ F_SET(infop, REGION_CREATE);
+ else {
+ (void)UnmapViewOfFile(pMemory);
+ pMemory = NULL;
+ ret = EAGAIN;
+ }
+ }
+
+ *addr = pMemory;
+ return (ret);
+}
diff --git a/bdb/os_win32/os_open.c b/bdb/os_win32/os_open.c
new file mode 100644
index 00000000000..7ecd96126df
--- /dev/null
+++ b/bdb/os_win32/os_open.c
@@ -0,0 +1,201 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_open.c,v 11.9 2000/11/30 00:58:43 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <fcntl.h>
+#include <signal.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "os_jump.h"
+
+int __os_win32_errno __P((void));
+
+/*
+ * __os_open --
+ * Open a file descriptor.
+ */
+int
+__os_open(dbenv, name, flags, mode, fhp)
+ DB_ENV *dbenv;
+ const char *name;
+ u_int32_t flags;
+ int mode;
+ DB_FH *fhp;
+{
+ DWORD bytesWritten;
+ HANDLE wh;
+ u_int32_t log_size;
+ int access, attr, oflags, share, createflag;
+ int ret, nrepeat;
+
+ /*
+ * The "public" interface to the __os_open routine passes around POSIX
+ * 1003.1 flags, not DB flags. If the user has defined their own open
+ * interface, use the POSIX flags.
+ */
+ if (__db_jump.j_open != NULL) {
+ oflags = O_BINARY | O_NOINHERIT;
+
+ if (LF_ISSET(DB_OSO_CREATE))
+ oflags |= O_CREAT;
+
+ if (LF_ISSET(DB_OSO_EXCL))
+ oflags |= O_EXCL;
+
+ if (LF_ISSET(DB_OSO_RDONLY))
+ oflags |= O_RDONLY;
+ else
+ oflags |= O_RDWR;
+
+ if (LF_ISSET(DB_OSO_SEQ))
+ oflags |= _O_SEQUENTIAL;
+ else
+ oflags |= _O_RANDOM;
+
+ if (LF_ISSET(DB_OSO_TEMP))
+ oflags |= _O_TEMPORARY;
+
+ if (LF_ISSET(DB_OSO_TRUNC))
+ oflags |= O_TRUNC;
+
+ return (__os_openhandle(dbenv, name, oflags, mode, fhp));
+ }
+
+ if (LF_ISSET(DB_OSO_LOG))
+ log_size = fhp->log_size; /* XXX: Gag. */
+
+ memset(fhp, 0, sizeof(*fhp));
+
+ /*
+ * Otherwise, use the Windows/32 CreateFile interface so that we can
+ * play magic games with log files to get data flush effects similar
+ * to the POSIX O_DSYNC flag.
+ *
+ * !!!
+ * We currently ignore the 'mode' argument. It would be possible
+ * to construct a set of security attributes that we could pass to
+ * CreateFile that would accurately represents the mode. In worst
+ * case, this would require looking up user and all group names and
+ * creating an entry for each. Alternatively, we could call the
+ * _chmod (partial emulation) function after file creation, although
+ * this leaves us with an obvious race. However, these efforts are
+ * largely meaningless on FAT, the most common file system, which
+ * only has a "readable" and "writeable" flag, applying to all users.
+ */
+ wh = INVALID_HANDLE_VALUE;
+
+ access = GENERIC_READ;
+ if (!LF_ISSET(DB_OSO_RDONLY))
+ access |= GENERIC_WRITE;
+
+ share = FILE_SHARE_READ | FILE_SHARE_WRITE;
+ attr = FILE_ATTRIBUTE_NORMAL;
+
+ /*
+ * Reproduce POSIX 1003.1 semantics: if O_CREATE and O_EXCL are both
+ * specified, fail, returning EEXIST, unless we create the file.
+ */
+ if (LF_ISSET(DB_OSO_CREATE) && LF_ISSET(DB_OSO_EXCL))
+ createflag = CREATE_NEW; /* create only if !exist*/
+ else if (!LF_ISSET(DB_OSO_CREATE) && LF_ISSET(DB_OSO_TRUNC))
+ createflag = TRUNCATE_EXISTING; /* truncate, fail if !exist */
+ else if (LF_ISSET(DB_OSO_TRUNC))
+ createflag = CREATE_ALWAYS; /* create and truncate */
+ else if (LF_ISSET(DB_OSO_CREATE))
+ createflag = OPEN_ALWAYS; /* open or create */
+ else
+ createflag = OPEN_EXISTING; /* open only if existing */
+
+ if (LF_ISSET(DB_OSO_LOG)) {
+ F_SET(fhp, DB_FH_NOSYNC);
+ attr |= FILE_FLAG_WRITE_THROUGH;
+ }
+
+ if (LF_ISSET(DB_OSO_SEQ))
+ attr |= FILE_FLAG_SEQUENTIAL_SCAN;
+ else
+ attr |= FILE_FLAG_RANDOM_ACCESS;
+
+ if (LF_ISSET(DB_OSO_TEMP))
+ attr |= FILE_FLAG_DELETE_ON_CLOSE;
+
+ for (nrepeat = 1; nrepeat < 4; ++nrepeat) {
+ ret = 0;
+ __os_set_errno(0);
+ wh = CreateFile(name, access, share, NULL, createflag, attr, 0);
+ if (wh == INVALID_HANDLE_VALUE) {
+ /*
+ * If it's a "temporary" error, we retry up to 3 times,
+ * waiting up to 12 seconds. While it's not a problem
+ * if we can't open a database, an inability to open a
+ * log file is cause for serious dismay.
+ */
+ ret = __os_win32_errno();
+ if (ret == ENFILE || ret == EMFILE || ret == ENOSPC) {
+ (void)__os_sleep(dbenv, nrepeat * 2, 0);
+ continue;
+ }
+ goto err;
+ }
+ break;
+ }
+
+ /*
+ * Special handling needed for log files. To get Windows to not update
+ * the MFT metadata on each write, extend the file to its maximum size.
+ * Windows will allocate all the data blocks and store them in the MFT
+ * (inode) area. In addition, flush the MFT area to disk.
+ * This strategy only works for Win/NT; Win/9X does not
+ * guarantee that the logs will be zero filled.
+ */
+ if (LF_ISSET(DB_OSO_LOG) && log_size != 0 &&
+ __os_is_winnt()) {
+ if (SetFilePointer(wh,
+ log_size - 1, NULL, FILE_BEGIN) == (DWORD)-1)
+ goto err;
+ if (WriteFile(wh, "\x00", 1, &bytesWritten, NULL) == 0)
+ goto err;
+ if (bytesWritten != 1)
+ goto err;
+ if (SetEndOfFile(wh) == 0)
+ goto err;
+ if (SetFilePointer(wh, 0, NULL, FILE_BEGIN) == (DWORD)-1)
+ goto err;
+ if (FlushFileBuffers(wh) == 0)
+ goto err;
+ }
+
+ /*
+ * We acquire a POSIX file descriptor as this allows us to use the
+ * general UNIX I/O routines instead of writing Windows specific
+ * ones. Closing that file descriptor is sufficient to close the
+ * Windows HANDLE.
+ */
+ fhp->fd =
+ _open_osfhandle((long)wh, LF_ISSET(DB_OSO_RDONLY) ? O_RDONLY : 0);
+ fhp->handle = wh;
+ F_SET(fhp, DB_FH_VALID);
+
+ return (0);
+
+err: if (ret == 0)
+ ret = __os_win32_errno();
+ if (wh != INVALID_HANDLE_VALUE)
+ (void)CloseHandle(wh);
+ return (ret);
+}
diff --git a/bdb/os_win32/os_rename.c b/bdb/os_win32/os_rename.c
new file mode 100644
index 00000000000..c824820462c
--- /dev/null
+++ b/bdb/os_win32/os_rename.c
@@ -0,0 +1,57 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_rename.c,v 1.2 2000/06/13 19:52:19 dda Exp $";
+#endif /* not lint */
+
+#include "db_int.h"
+#include "os_jump.h"
+
+/*
+ * __os_rename --
+ * Rename a file.
+ */
+int
+__os_rename(dbenv, old, new)
+ DB_ENV *dbenv;
+ const char *old, *new;
+{
+ int ret;
+
+ ret = 0;
+ if (__db_jump.j_rename != NULL) {
+ if (__db_jump.j_rename(old, new) == -1)
+ ret = __os_get_errno();
+ }
+ else {
+ /* Normally we would use a single MoveFileEx call with
+ * MOVEFILE_REPLACE_EXISTING flag to simulate Unix rename().
+ * But if the target file exists, and the two files' 8.3
+ * names are identical, a Windows bug causes the target file
+ * to be deleted, but the original file will not be renamed,
+ * and an ENOENT error will be returned. (See MSDN for a
+ * description of the bug).
+ *
+ * After the failed call, a MoveFile seems to perform
+ * the rename correctly (even another call to MoveFileEx
+ * does not)! The expense of this extra call only occurs
+ * on systems with the bug: Windows/98, for one, but
+ * apparently not Windows/NT and Windows/2000.
+ */
+ if (MoveFileEx(old, new, MOVEFILE_REPLACE_EXISTING) != TRUE)
+ ret = __os_win32_errno();
+ if (ret == ENOENT && MoveFile(old, new) == TRUE)
+ ret = 0;
+ }
+ if (ret != 0)
+ __db_err(dbenv, "Rename %s %s: %s", old, new, strerror(ret));
+
+ return (ret);
+}
diff --git a/bdb/os_win32/os_seek.c b/bdb/os_win32/os_seek.c
new file mode 100644
index 00000000000..8cf3c98aa91
--- /dev/null
+++ b/bdb/os_win32/os_seek.c
@@ -0,0 +1,65 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_seek.c,v 11.8 2000/05/17 19:30:19 bostic Exp $";
+#endif /* not lint */
+
+#include "db_int.h"
+#include "os_jump.h"
+
+/*
+ * __os_seek --
+ * Seek to a page/byte offset in the file.
+ */
+int
+__os_seek(dbenv, fhp, pgsize, pageno, relative, isrewind, db_whence)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+ size_t pgsize;
+ db_pgno_t pageno;
+ u_int32_t relative;
+ int isrewind;
+ DB_OS_SEEK db_whence;
+{
+ __int64 offset;
+ int ret, whence;
+
+ switch (db_whence) {
+ case DB_OS_SEEK_CUR:
+ whence = SEEK_CUR;
+ break;
+ case DB_OS_SEEK_END:
+ whence = SEEK_END;
+ break;
+ case DB_OS_SEEK_SET:
+ whence = SEEK_SET;
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ if (__db_jump.j_seek != NULL)
+ ret = __db_jump.j_seek(fhp->fd, pgsize, pageno,
+ relative, isrewind, whence);
+ else {
+ offset = (__int64)pgsize * pageno + relative;
+ if (isrewind)
+ offset = -offset;
+ ret = _lseeki64(
+ fhp->fd, offset, whence) == -1 ? __os_get_errno() : 0;
+ }
+
+ if (ret != 0)
+ __db_err(dbenv, "seek: %lu %d %d: %s",
+ (u_long)pgsize * pageno + relative,
+ isrewind, db_whence, strerror(ret));
+
+ return (ret);
+}
diff --git a/bdb/os_win32/os_sleep.c b/bdb/os_win32/os_sleep.c
new file mode 100644
index 00000000000..f0248a583de
--- /dev/null
+++ b/bdb/os_win32/os_sleep.c
@@ -0,0 +1,41 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_sleep.c,v 11.4 2000/03/30 01:46:43 ubell Exp $";
+#endif /* not lint */
+
+#include "db_int.h"
+#include "os_jump.h"
+
+/*
+ * __os_sleep --
+ * Yield the processor for a period of time.
+ */
+int
+__os_sleep(dbenv, secs, usecs)
+ DB_ENV *dbenv;
+ u_long secs, usecs; /* Seconds and microseconds. */
+{
+ COMPQUIET(dbenv, NULL);
+
+ /* Don't require that the values be normalized. */
+ for (; usecs >= 1000000; ++secs, usecs -= 1000000)
+ ;
+
+ if (__db_jump.j_sleep != NULL)
+ return (__db_jump.j_sleep(secs, usecs));
+
+ /*
+ * It's important that we yield the processor here so that other
+ * processes or threads are permitted to run.
+ */
+ Sleep(secs * 1000 + usecs / 1000);
+ return (0);
+}
diff --git a/bdb/os_win32/os_spin.c b/bdb/os_win32/os_spin.c
new file mode 100644
index 00000000000..f250c523d14
--- /dev/null
+++ b/bdb/os_win32/os_spin.c
@@ -0,0 +1,59 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_spin.c,v 11.6 2000/05/17 19:30:19 bostic Exp $";
+#endif /* not lint */
+
+#include "db_int.h"
+#include "os_jump.h"
+
+/*
+ * __os_spin --
+ * Return the number of default spins before blocking.
+ */
+int
+__os_spin()
+{
+ SYSTEM_INFO SystemInfo;
+
+ /*
+ * If the application specified a value or we've already figured it
+ * out, return it.
+ */
+ if (DB_GLOBAL(db_tas_spins) != 0)
+ return (DB_GLOBAL(db_tas_spins));
+
+ /* Get the number of processors */
+ GetSystemInfo(&SystemInfo);
+
+ /*
+ * Spin 50 times per processor -- we have anecdotal evidence that this
+ * is a reasonable value.
+ */
+ if (SystemInfo.dwNumberOfProcessors > 1)
+ DB_GLOBAL(db_tas_spins) = 50 * SystemInfo.dwNumberOfProcessors;
+ else
+ DB_GLOBAL(db_tas_spins) = 1;
+ return (DB_GLOBAL(db_tas_spins));
+}
+
+/*
+ * __os_yield --
+ * Yield the processor.
+ */
+void
+__os_yield(dbenv, usecs)
+ DB_ENV *dbenv;
+ u_long usecs;
+{
+ if (__db_jump.j_yield != NULL && __db_jump.j_yield() == 0)
+ return;
+ __os_sleep(dbenv, 0, usecs);
+}
diff --git a/bdb/os_win32/os_type.c b/bdb/os_win32/os_type.c
new file mode 100644
index 00000000000..a82fc4b1d4e
--- /dev/null
+++ b/bdb/os_win32/os_type.c
@@ -0,0 +1,35 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_type.c,v 11.3 2000/02/14 03:00:07 bostic Exp $";
+#endif /* not lint */
+
+/*
+ * __os_is_winnt --
+ * Return 1 if Windows/NT, otherwise 0.
+ *
+ * PUBLIC: int __os_is_winnt __P((void));
+ */
+int
+__os_is_winnt()
+{
+ static int __os_type = -1;
+
+ /*
+ * The value of __os_type is computed only once, and cached to
+ * avoid the overhead of repeated calls to GetVersion().
+ */
+ if (__os_type == -1)
+ if ((GetVersion() & 0x80000000) == 0)
+ __os_type = 1;
+ else
+ __os_type = 0;
+ return (__os_type);
+}
diff --git a/bdb/perl.BerkeleyDB/BerkeleyDB.pm b/bdb/perl.BerkeleyDB/BerkeleyDB.pm
new file mode 100644
index 00000000000..cc172a2bd22
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/BerkeleyDB.pm
@@ -0,0 +1,1227 @@
+
+package BerkeleyDB;
+
+
+# Copyright (c) 1997-2001 Paul Marquess. All rights reserved.
+# This program is free software; you can redistribute it and/or
+# modify it under the same terms as Perl itself.
+#
+
+# The documentation for this module is at the bottom of this file,
+# after the line __END__.
+
+BEGIN { require 5.004_04 }
+
+use strict;
+use Carp;
+use vars qw($VERSION @ISA @EXPORT $AUTOLOAD);
+
+$VERSION = '0.13';
+
+require Exporter;
+require DynaLoader;
+require AutoLoader;
+use IO ;
+
+@ISA = qw(Exporter DynaLoader);
+# Items to export into callers namespace by default. Note: do not export
+# names by default without a very good reason. Use EXPORT_OK instead.
+# Do not simply export all your public functions/methods/constants.
+@EXPORT = qw(
+
+ DB_AFTER
+ DB_APPEND
+ DB_ARCH_ABS
+ DB_ARCH_DATA
+ DB_ARCH_LOG
+ DB_BEFORE
+ DB_BTREE
+ DB_BTREEMAGIC
+ DB_BTREEOLDVER
+ DB_BTREEVERSION
+ DB_CHECKPOINT
+ DB_CONSUME
+ DB_CREATE
+ DB_CURLSN
+ DB_CURRENT
+ DB_DBT_MALLOC
+ DB_DBT_PARTIAL
+ DB_DBT_USERMEM
+ DB_DELETED
+ DB_DELIMITER
+ DB_DUP
+ DB_DUPSORT
+ DB_ENV_APPINIT
+ DB_ENV_STANDALONE
+ DB_ENV_THREAD
+ DB_EXCL
+ DB_FILE_ID_LEN
+ DB_FIRST
+ DB_FIXEDLEN
+ DB_FLUSH
+ DB_FORCE
+ DB_GET_BOTH
+ DB_GET_RECNO
+ DB_HASH
+ DB_HASHMAGIC
+ DB_HASHOLDVER
+ DB_HASHVERSION
+ DB_INCOMPLETE
+ DB_INIT_CDB
+ DB_INIT_LOCK
+ DB_INIT_LOG
+ DB_INIT_MPOOL
+ DB_INIT_TXN
+ DB_JOIN_ITEM
+ DB_KEYEMPTY
+ DB_KEYEXIST
+ DB_KEYFIRST
+ DB_KEYLAST
+ DB_LAST
+ DB_LOCKMAGIC
+ DB_LOCKVERSION
+ DB_LOCK_CONFLICT
+ DB_LOCK_DEADLOCK
+ DB_LOCK_DEFAULT
+ DB_LOCK_GET
+ DB_LOCK_NORUN
+ DB_LOCK_NOTGRANTED
+ DB_LOCK_NOTHELD
+ DB_LOCK_NOWAIT
+ DB_LOCK_OLDEST
+ DB_LOCK_RANDOM
+ DB_LOCK_RIW_N
+ DB_LOCK_RW_N
+ DB_LOCK_YOUNGEST
+ DB_LOGMAGIC
+ DB_LOGOLDVER
+ DB_MAX_PAGES
+ DB_MAX_RECORDS
+ DB_MPOOL_CLEAN
+ DB_MPOOL_CREATE
+ DB_MPOOL_DIRTY
+ DB_MPOOL_DISCARD
+ DB_MPOOL_LAST
+ DB_MPOOL_NEW
+ DB_MPOOL_PRIVATE
+ DB_MUTEXDEBUG
+ DB_MUTEXLOCKS
+ DB_NEEDSPLIT
+ DB_NEXT
+ DB_NEXT_DUP
+ DB_NOMMAP
+ DB_NOOVERWRITE
+ DB_NOSYNC
+ DB_NOTFOUND
+ DB_PAD
+ DB_PAGEYIELD
+ DB_POSITION
+ DB_PREV
+ DB_PRIVATE
+ DB_QUEUE
+ DB_RDONLY
+ DB_RECNO
+ DB_RECNUM
+ DB_RECORDCOUNT
+ DB_RECOVER
+ DB_RECOVER_FATAL
+ DB_REGISTERED
+ DB_RENUMBER
+ DB_RMW
+ DB_RUNRECOVERY
+ DB_SEQUENTIAL
+ DB_SET
+ DB_SET_RANGE
+ DB_SET_RECNO
+ DB_SNAPSHOT
+ DB_SWAPBYTES
+ DB_TEMPORARY
+ DB_THREAD
+ DB_TRUNCATE
+ DB_TXNMAGIC
+ DB_TXNVERSION
+ DB_TXN_BACKWARD_ROLL
+ DB_TXN_CKP
+ DB_TXN_FORWARD_ROLL
+ DB_TXN_LOCK_2PL
+ DB_TXN_LOCK_MASK
+ DB_TXN_LOCK_OPTIMIST
+ DB_TXN_LOCK_OPTIMISTIC
+ DB_TXN_LOG_MASK
+ DB_TXN_LOG_REDO
+ DB_TXN_LOG_UNDO
+ DB_TXN_LOG_UNDOREDO
+ DB_TXN_NOSYNC
+ DB_TXN_NOWAIT
+ DB_TXN_OPENFILES
+ DB_TXN_REDO
+ DB_TXN_SYNC
+ DB_TXN_UNDO
+ DB_USE_ENVIRON
+ DB_USE_ENVIRON_ROOT
+ DB_VERSION_MAJOR
+ DB_VERSION_MINOR
+ DB_VERSION_PATCH
+ DB_WRITECURSOR
+ );
+
+sub AUTOLOAD {
+ # This AUTOLOAD is used to 'autoload' constants from the constant()
+ # XS function. If a constant is not found then control is passed
+ # to the AUTOLOAD in AutoLoader.
+
+ my $constname;
+ ($constname = $AUTOLOAD) =~ s/.*:://;
+ my $val = constant($constname, @_ ? $_[0] : 0);
+ if ($! != 0) {
+ if ($! =~ /Invalid/) {
+ $AutoLoader::AUTOLOAD = $AUTOLOAD;
+ goto &AutoLoader::AUTOLOAD;
+ }
+ else {
+ croak "Your vendor has not defined BerkeleyDB macro $constname";
+ }
+ }
+ eval "sub $AUTOLOAD { $val }";
+ goto &$AUTOLOAD;
+}
+
+bootstrap BerkeleyDB $VERSION;
+
+# Preloaded methods go here.
+
+
+sub ParseParameters($@)
+{
+ my ($default, @rest) = @_ ;
+ my (%got) = %$default ;
+ my (@Bad) ;
+ my ($key, $value) ;
+ my $sub = (caller(1))[3] ;
+ my %options = () ;
+ local ($Carp::CarpLevel) = 1 ;
+
+ # allow the options to be passed as a hash reference or
+ # as the complete hash.
+ if (@rest == 1) {
+
+ croak "$sub: parameter is not a reference to a hash"
+ if ref $rest[0] ne "HASH" ;
+
+ %options = %{ $rest[0] } ;
+ }
+ elsif (@rest >= 2) {
+ %options = @rest ;
+ }
+
+ while (($key, $value) = each %options)
+ {
+ $key =~ s/^-// ;
+
+ if (exists $default->{$key})
+ { $got{$key} = $value }
+ else
+ { push (@Bad, $key) }
+ }
+
+ if (@Bad) {
+ my ($bad) = join(", ", @Bad) ;
+ croak "unknown key value(s) @Bad" ;
+ }
+
+ return \%got ;
+}
+
+use UNIVERSAL qw( isa ) ;
+
+sub env_remove
+{
+ # Usage:
+ #
+ # $env = new BerkeleyDB::Env
+ # [ -Home => $path, ]
+ # [ -Config => { name => value, name => value }
+ # [ -Flags => DB_INIT_LOCK| ]
+ # ;
+
+ my $got = BerkeleyDB::ParseParameters({
+ Home => undef,
+ Flags => 0,
+ Config => undef,
+ }, @_) ;
+
+ if (defined $got->{ErrFile}) {
+ if (!isaFilehandle($got->{ErrFile})) {
+ my $handle = new IO::File ">$got->{ErrFile}"
+ or croak "Cannot open file $got->{ErrFile}: $!\n" ;
+ $got->{ErrFile} = $handle ;
+ }
+ }
+
+
+ if (defined $got->{Config}) {
+ croak("Config parameter must be a hash reference")
+ if ! ref $got->{Config} eq 'HASH' ;
+
+ @BerkeleyDB::a = () ;
+ my $k = "" ; my $v = "" ;
+ while (($k, $v) = each %{$got->{Config}}) {
+ push @BerkeleyDB::a, "$k\t$v" ;
+ }
+
+ $got->{"Config"} = pack("p*", @BerkeleyDB::a, undef)
+ if @BerkeleyDB::a ;
+ }
+
+ return _env_remove($got) ;
+}
+
+sub db_remove
+{
+ my $got = BerkeleyDB::ParseParameters(
+ {
+ Filename => undef,
+ Subname => undef,
+ Flags => 0,
+ Env => undef,
+ }, @_) ;
+
+ croak("Must specify a filename")
+ if ! defined $got->{Filename} ;
+
+ croak("Env not of type BerkeleyDB::Env")
+ if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
+
+ return _db_remove($got);
+}
+
+package BerkeleyDB::Env ;
+
+use UNIVERSAL qw( isa ) ;
+use Carp ;
+use vars qw( %valid_config_keys ) ;
+
+sub isaFilehandle
+{
+ my $fh = shift ;
+
+ return ((isa($fh,'GLOB') or isa(\$fh,'GLOB')) and defined fileno($fh) )
+
+}
+
+%valid_config_keys = map { $_, 1 } qw( DB_DATA_DIR DB_LOG_DIR DB_TEMP_DIR ) ;
+
+sub new
+{
+ # Usage:
+ #
+ # $env = new BerkeleyDB::Env
+ # [ -Home => $path, ]
+ # [ -Mode => mode, ]
+ # [ -Config => { name => value, name => value }
+ # [ -ErrFile => filename or filehandle, ]
+ # [ -ErrPrefix => "string", ]
+ # [ -Flags => DB_INIT_LOCK| ]
+ # [ -Cachesize => number ]
+ # [ -LockDetect => ]
+ # [ -Verbose => boolean ]
+ # ;
+
+ my $pkg = shift ;
+ my $got = BerkeleyDB::ParseParameters({
+ Home => undef,
+ Server => undef,
+ Mode => 0666,
+ ErrFile => undef,
+ ErrPrefix => undef,
+ Flags => 0,
+ Cachesize => 0,
+ LockDetect => 0,
+ Verbose => 0,
+ Config => undef,
+ }, @_) ;
+
+ if (defined $got->{ErrFile}) {
+ if (!isaFilehandle($got->{ErrFile})) {
+ my $handle = new IO::File ">$got->{ErrFile}"
+ or croak "Cannot open file $got->{ErrFile}: $!\n" ;
+ $got->{ErrFile} = $handle ;
+ }
+ }
+
+
+ my %config ;
+ if (defined $got->{Config}) {
+ croak("Config parameter must be a hash reference")
+ if ! ref $got->{Config} eq 'HASH' ;
+
+ %config = %{ $got->{Config} } ;
+ @BerkeleyDB::a = () ;
+ my $k = "" ; my $v = "" ;
+ while (($k, $v) = each %config) {
+ if ($BerkeleyDB::db_version >= 3.1 && ! $valid_config_keys{$k} ) {
+ $BerkeleyDB::Error = "illegal name-value pair: $k $v\n" ;
+ croak $BerkeleyDB::Error ;
+ }
+ push @BerkeleyDB::a, "$k\t$v" ;
+ }
+
+ $got->{"Config"} = pack("p*", @BerkeleyDB::a, undef)
+ if @BerkeleyDB::a ;
+ }
+
+ my ($addr) = _db_appinit($pkg, $got) ;
+ my $obj ;
+ $obj = bless [$addr] , $pkg if $addr ;
+ if ($obj && $BerkeleyDB::db_version >= 3.1 && keys %config) {
+ my ($k, $v);
+ while (($k, $v) = each %config) {
+ if ($k eq 'DB_DATA_DIR')
+ { $obj->set_data_dir($v) }
+ elsif ($k eq 'DB_LOG_DIR')
+ { $obj->set_lg_dir($v) }
+ elsif ($k eq 'DB_TEMP_DIR')
+ { $obj->set_tmp_dir($v) }
+ else {
+ $BerkeleyDB::Error = "illegal name-value pair: $k $v\n" ;
+ croak $BerkeleyDB::Error
+ }
+ }
+ }
+ return $obj ;
+}
+
+
+sub TxnMgr
+{
+ my $env = shift ;
+ my ($addr) = $env->_TxnMgr() ;
+ my $obj ;
+ $obj = bless [$addr, $env] , "BerkeleyDB::TxnMgr" if $addr ;
+ return $obj ;
+}
+
+sub txn_begin
+{
+ my $env = shift ;
+ my ($addr) = $env->_txn_begin(@_) ;
+ my $obj ;
+ $obj = bless [$addr, $env] , "BerkeleyDB::Txn" if $addr ;
+ return $obj ;
+}
+
+sub DESTROY
+{
+ my $self = shift ;
+ $self->_DESTROY() ;
+}
+
+package BerkeleyDB::Hash ;
+
+use vars qw(@ISA) ;
+@ISA = qw( BerkeleyDB::Common BerkeleyDB::_tiedHash ) ;
+use UNIVERSAL qw( isa ) ;
+use Carp ;
+
+sub new
+{
+ my $self = shift ;
+ my $got = BerkeleyDB::ParseParameters(
+ {
+ # Generic Stuff
+ Filename => undef,
+ Subname => undef,
+ #Flags => BerkeleyDB::DB_CREATE(),
+ Flags => 0,
+ Property => 0,
+ Mode => 0666,
+ Cachesize => 0,
+ Lorder => 0,
+ Pagesize => 0,
+ Env => undef,
+ #Tie => undef,
+ Txn => undef,
+
+ # Hash specific
+ Ffactor => 0,
+ Nelem => 0,
+ Hash => undef,
+ DupCompare => undef,
+
+ # BerkeleyDB specific
+ ReadKey => undef,
+ WriteKey => undef,
+ ReadValue => undef,
+ WriteValue => undef,
+ }, @_) ;
+
+ croak("Env not of type BerkeleyDB::Env")
+ if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
+
+ croak("Txn not of type BerkeleyDB::Txn")
+ if defined $got->{Txn} and ! isa($got->{Txn},'BerkeleyDB::Txn');
+
+ croak("-Tie needs a reference to a hash")
+ if defined $got->{Tie} and $got->{Tie} !~ /HASH/ ;
+
+ my ($addr) = _db_open_hash($self, $got);
+ my $obj ;
+ if ($addr) {
+ $obj = bless [$addr] , $self ;
+ push @{ $obj }, $got->{Env} if $got->{Env} ;
+ $obj->Txn($got->{Txn}) if $got->{Txn} ;
+ }
+ return $obj ;
+}
+
+*TIEHASH = \&new ;
+
+
+package BerkeleyDB::Btree ;
+
+use vars qw(@ISA) ;
+@ISA = qw( BerkeleyDB::Common BerkeleyDB::_tiedHash ) ;
+use UNIVERSAL qw( isa ) ;
+use Carp ;
+
+sub new
+{
+ my $self = shift ;
+ my $got = BerkeleyDB::ParseParameters(
+ {
+ # Generic Stuff
+ Filename => undef,
+ Subname => undef,
+ #Flags => BerkeleyDB::DB_CREATE(),
+ Flags => 0,
+ Property => 0,
+ Mode => 0666,
+ Cachesize => 0,
+ Lorder => 0,
+ Pagesize => 0,
+ Env => undef,
+ #Tie => undef,
+ Txn => undef,
+
+ # Btree specific
+ Minkey => 0,
+ Compare => undef,
+ DupCompare => undef,
+ Prefix => undef,
+ }, @_) ;
+
+ croak("Env not of type BerkeleyDB::Env")
+ if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
+
+ croak("Txn not of type BerkeleyDB::Txn")
+ if defined $got->{Txn} and ! isa($got->{Txn},'BerkeleyDB::Txn');
+
+ croak("-Tie needs a reference to a hash")
+ if defined $got->{Tie} and $got->{Tie} !~ /HASH/ ;
+
+ my ($addr) = _db_open_btree($self, $got);
+ my $obj ;
+ if ($addr) {
+ $obj = bless [$addr] , $self ;
+ push @{ $obj }, $got->{Env} if $got->{Env} ;
+ $obj->Txn($got->{Txn}) if $got->{Txn} ;
+ }
+ return $obj ;
+}
+
+*BerkeleyDB::Btree::TIEHASH = \&BerkeleyDB::Btree::new ;
+
+
+package BerkeleyDB::Recno ;
+
+use vars qw(@ISA) ;
+@ISA = qw( BerkeleyDB::Common BerkeleyDB::_tiedArray ) ;
+use UNIVERSAL qw( isa ) ;
+use Carp ;
+
+sub new
+{
+ my $self = shift ;
+ my $got = BerkeleyDB::ParseParameters(
+ {
+ # Generic Stuff
+ Filename => undef,
+ Subname => undef,
+ #Flags => BerkeleyDB::DB_CREATE(),
+ Flags => 0,
+ Property => 0,
+ Mode => 0666,
+ Cachesize => 0,
+ Lorder => 0,
+ Pagesize => 0,
+ Env => undef,
+ #Tie => undef,
+ Txn => undef,
+
+ # Recno specific
+ Delim => undef,
+ Len => undef,
+ Pad => undef,
+ Source => undef,
+ ArrayBase => 1, # lowest index in array
+ }, @_) ;
+
+ croak("Env not of type BerkeleyDB::Env")
+ if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
+
+ croak("Txn not of type BerkeleyDB::Txn")
+ if defined $got->{Txn} and ! isa($got->{Txn},'BerkeleyDB::Txn');
+
+ croak("Tie needs a reference to an array")
+ if defined $got->{Tie} and $got->{Tie} !~ /ARRAY/ ;
+
+ croak("ArrayBase can only be 0 or 1, parsed $got->{ArrayBase}")
+ if $got->{ArrayBase} != 1 and $got->{ArrayBase} != 0 ;
+
+
+ $got->{Fname} = $got->{Filename} if defined $got->{Filename} ;
+
+ my ($addr) = _db_open_recno($self, $got);
+ my $obj ;
+ if ($addr) {
+ $obj = bless [$addr] , $self ;
+ push @{ $obj }, $got->{Env} if $got->{Env} ;
+ $obj->Txn($got->{Txn}) if $got->{Txn} ;
+ }
+ return $obj ;
+}
+
+*BerkeleyDB::Recno::TIEARRAY = \&BerkeleyDB::Recno::new ;
+*BerkeleyDB::Recno::db_stat = \&BerkeleyDB::Btree::db_stat ;
+
+package BerkeleyDB::Queue ;
+
+use vars qw(@ISA) ;
+@ISA = qw( BerkeleyDB::Common BerkeleyDB::_tiedArray ) ;
+use UNIVERSAL qw( isa ) ;
+use Carp ;
+
+sub new
+{
+ my $self = shift ;
+ my $got = BerkeleyDB::ParseParameters(
+ {
+ # Generic Stuff
+ Filename => undef,
+ Subname => undef,
+ #Flags => BerkeleyDB::DB_CREATE(),
+ Flags => 0,
+ Property => 0,
+ Mode => 0666,
+ Cachesize => 0,
+ Lorder => 0,
+ Pagesize => 0,
+ Env => undef,
+ #Tie => undef,
+ Txn => undef,
+
+ # Queue specific
+ Len => undef,
+ Pad => undef,
+ ArrayBase => 1, # lowest index in array
+ ExtentSize => undef,
+ }, @_) ;
+
+ croak("Env not of type BerkeleyDB::Env")
+ if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
+
+ croak("Txn not of type BerkeleyDB::Txn")
+ if defined $got->{Txn} and ! isa($got->{Txn},'BerkeleyDB::Txn');
+
+ croak("Tie needs a reference to an array")
+ if defined $got->{Tie} and $got->{Tie} !~ /ARRAY/ ;
+
+ croak("ArrayBase can only be 0 or 1, parsed $got->{ArrayBase}")
+ if $got->{ArrayBase} != 1 and $got->{ArrayBase} != 0 ;
+
+
+ my ($addr) = _db_open_queue($self, $got);
+ my $obj ;
+ if ($addr) {
+ $obj = bless [$addr] , $self ;
+ push @{ $obj }, $got->{Env} if $got->{Env} ;
+ $obj->Txn($got->{Txn}) if $got->{Txn} ;
+ }
+ return $obj ;
+}
+
+*BerkeleyDB::Queue::TIEARRAY = \&BerkeleyDB::Queue::new ;
+
+## package BerkeleyDB::Text ;
+##
+## use vars qw(@ISA) ;
+## @ISA = qw( BerkeleyDB::Common BerkeleyDB::_tiedArray ) ;
+## use UNIVERSAL qw( isa ) ;
+## use Carp ;
+##
+## sub new
+## {
+## my $self = shift ;
+## my $got = BerkeleyDB::ParseParameters(
+## {
+## # Generic Stuff
+## Filename => undef,
+## #Flags => BerkeleyDB::DB_CREATE(),
+## Flags => 0,
+## Property => 0,
+## Mode => 0666,
+## Cachesize => 0,
+## Lorder => 0,
+## Pagesize => 0,
+## Env => undef,
+## #Tie => undef,
+## Txn => undef,
+##
+## # Recno specific
+## Delim => undef,
+## Len => undef,
+## Pad => undef,
+## Btree => undef,
+## }, @_) ;
+##
+## croak("Env not of type BerkeleyDB::Env")
+## if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
+##
+## croak("Txn not of type BerkeleyDB::Txn")
+## if defined $got->{Txn} and ! isa($got->{Txn},'BerkeleyDB::Txn');
+##
+## croak("-Tie needs a reference to an array")
+## if defined $got->{Tie} and $got->{Tie} !~ /ARRAY/ ;
+##
+## # rearange for recno
+## $got->{Source} = $got->{Filename} if defined $got->{Filename} ;
+## delete $got->{Filename} ;
+## $got->{Fname} = $got->{Btree} if defined $got->{Btree} ;
+## return BerkeleyDB::Recno::_db_open_recno($self, $got);
+## }
+##
+## *BerkeleyDB::Text::TIEARRAY = \&BerkeleyDB::Text::new ;
+## *BerkeleyDB::Text::db_stat = \&BerkeleyDB::Btree::db_stat ;
+
+package BerkeleyDB::Unknown ;
+
+use vars qw(@ISA) ;
+@ISA = qw( BerkeleyDB::Common BerkeleyDB::_tiedArray ) ;
+use UNIVERSAL qw( isa ) ;
+use Carp ;
+
+sub new
+{
+ my $self = shift ;
+ my $got = BerkeleyDB::ParseParameters(
+ {
+ # Generic Stuff
+ Filename => undef,
+ Subname => undef,
+ #Flags => BerkeleyDB::DB_CREATE(),
+ Flags => 0,
+ Property => 0,
+ Mode => 0666,
+ Cachesize => 0,
+ Lorder => 0,
+ Pagesize => 0,
+ Env => undef,
+ #Tie => undef,
+ Txn => undef,
+
+ }, @_) ;
+
+ croak("Env not of type BerkeleyDB::Env")
+ if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
+
+ croak("Txn not of type BerkeleyDB::Txn")
+ if defined $got->{Txn} and ! isa($got->{Txn},'BerkeleyDB::Txn');
+
+ croak("-Tie needs a reference to a hash")
+ if defined $got->{Tie} and $got->{Tie} !~ /HASH/ ;
+
+ my ($addr, $type) = _db_open_unknown($got);
+ my $obj ;
+ if ($addr) {
+ $obj = bless [$addr], "BerkeleyDB::$type" ;
+ push @{ $obj }, $got->{Env} if $got->{Env} ;
+ $obj->Txn($got->{Txn}) if $got->{Txn} ;
+ }
+ return $obj ;
+}
+
+
+package BerkeleyDB::_tiedHash ;
+
+use Carp ;
+
+#sub TIEHASH
+#{
+# my $self = shift ;
+# my $db_object = shift ;
+#
+#print "Tiehash REF=[$self] [" . (ref $self) . "]\n" ;
+#
+# return bless { Obj => $db_object}, $self ;
+#}
+
+sub Tie
+{
+ # Usage:
+ #
+ # $db->Tie \%hash ;
+ #
+
+ my $self = shift ;
+
+ #print "Tie method REF=[$self] [" . (ref $self) . "]\n" ;
+
+ croak("usage \$x->Tie \\%hash\n") unless @_ ;
+ my $ref = shift ;
+
+ croak("Tie needs a reference to a hash")
+ if defined $ref and $ref !~ /HASH/ ;
+
+ #tie %{ $ref }, ref($self), $self ;
+ tie %{ $ref }, "BerkeleyDB::_tiedHash", $self ;
+ return undef ;
+}
+
+
+sub TIEHASH
+{
+ my $self = shift ;
+ my $db_object = shift ;
+ #return bless $db_object, 'BerkeleyDB::Common' ;
+ return $db_object ;
+}
+
+sub STORE
+{
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+
+ $self->db_put($key, $value) ;
+}
+
+sub FETCH
+{
+ my $self = shift ;
+ my $key = shift ;
+ my $value = undef ;
+ $self->db_get($key, $value) ;
+
+ return $value ;
+}
+
+sub EXISTS
+{
+ my $self = shift ;
+ my $key = shift ;
+ my $value = undef ;
+ $self->db_get($key, $value) == 0 ;
+}
+
+sub DELETE
+{
+ my $self = shift ;
+ my $key = shift ;
+ $self->db_del($key) ;
+}
+
+sub CLEAR
+{
+ my $self = shift ;
+ my ($key, $value) = (0, 0) ;
+ my $cursor = $self->db_cursor() ;
+ while ($cursor->c_get($key, $value, BerkeleyDB::DB_PREV()) == 0)
+ { $cursor->c_del() }
+ #1 while $cursor->c_del() == 0 ;
+ # cursor will self-destruct
+}
+
+#sub DESTROY
+#{
+# my $self = shift ;
+# print "BerkeleyDB::_tieHash::DESTROY\n" ;
+# $self->{Cursor}->c_close() if $self->{Cursor} ;
+#}
+
+package BerkeleyDB::_tiedArray ;
+
+use Carp ;
+
+sub Tie
+{
+ # Usage:
+ #
+ # $db->Tie \@array ;
+ #
+
+ my $self = shift ;
+
+ #print "Tie method REF=[$self] [" . (ref $self) . "]\n" ;
+
+ croak("usage \$x->Tie \\%hash\n") unless @_ ;
+ my $ref = shift ;
+
+ croak("Tie needs a reference to an array")
+ if defined $ref and $ref !~ /ARRAY/ ;
+
+ #tie %{ $ref }, ref($self), $self ;
+ tie @{ $ref }, "BerkeleyDB::_tiedArray", $self ;
+ return undef ;
+}
+
+
+#sub TIEARRAY
+#{
+# my $self = shift ;
+# my $db_object = shift ;
+#
+#print "Tiearray REF=[$self] [" . (ref $self) . "]\n" ;
+#
+# return bless { Obj => $db_object}, $self ;
+#}
+
+sub TIEARRAY
+{
+ my $self = shift ;
+ my $db_object = shift ;
+ #return bless $db_object, 'BerkeleyDB::Common' ;
+ return $db_object ;
+}
+
+sub STORE
+{
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+
+ $self->db_put($key, $value) ;
+}
+
+sub FETCH
+{
+ my $self = shift ;
+ my $key = shift ;
+ my $value = undef ;
+ $self->db_get($key, $value) ;
+
+ return $value ;
+}
+
+*CLEAR = \&BerkeleyDB::_tiedHash::CLEAR ;
+*FIRSTKEY = \&BerkeleyDB::_tiedHash::FIRSTKEY ;
+*NEXTKEY = \&BerkeleyDB::_tiedHash::NEXTKEY ;
+
+sub EXTEND {} # don't do anything with EXTEND
+
+
+sub SHIFT
+{
+ my $self = shift;
+ my ($key, $value) = (0, 0) ;
+ my $cursor = $self->db_cursor() ;
+ return undef if $cursor->c_get($key, $value, BerkeleyDB::DB_FIRST()) != 0 ;
+ return undef if $cursor->c_del() != 0 ;
+
+ return $value ;
+}
+
+
+sub UNSHIFT
+{
+ my $self = shift;
+ croak "unshift is unsupported with Queue databases"
+ if $self->type == BerkeleyDB::DB_QUEUE() ;
+ if (@_)
+ {
+ my ($key, $value) = (0, 0) ;
+ my $cursor = $self->db_cursor() ;
+ if ($cursor->c_get($key, $value, BerkeleyDB::DB_FIRST()) == 0)
+ {
+ foreach $value (reverse @_)
+ {
+ $key = 0 ;
+ $cursor->c_put($key, $value, BerkeleyDB::DB_BEFORE()) ;
+ }
+ }
+ }
+}
+
+sub PUSH
+{
+ my $self = shift;
+ if (@_)
+ {
+ my ($key, $value) = (0, 0) ;
+ my $cursor = $self->db_cursor() ;
+ if ($cursor->c_get($key, $value, BerkeleyDB::DB_LAST()) == 0)
+ {
+ foreach $value (@_)
+ {
+ ++ $key ;
+ $self->db_put($key, $value) ;
+ }
+ }
+
+# can use this when DB_APPEND is fixed.
+# foreach $value (@_)
+# {
+# my $status = $cursor->c_put($key, $value, BerkeleyDB::DB_AFTER()) ;
+#print "[$status]\n" ;
+# }
+ }
+}
+
+sub POP
+{
+ my $self = shift;
+ my ($key, $value) = (0, 0) ;
+ my $cursor = $self->db_cursor() ;
+ return undef if $cursor->c_get($key, $value, BerkeleyDB::DB_LAST()) != 0 ;
+ return undef if $cursor->c_del() != 0 ;
+
+ return $value ;
+}
+
+sub SPLICE
+{
+ my $self = shift;
+ croak "SPLICE is not implemented yet" ;
+}
+
+*shift = \&SHIFT ;
+*unshift = \&UNSHIFT ;
+*push = \&PUSH ;
+*pop = \&POP ;
+*clear = \&CLEAR ;
+*length = \&FETCHSIZE ;
+
+sub STORESIZE
+{
+ croak "STORESIZE is not implemented yet" ;
+#print "STORESIZE @_\n" ;
+# my $self = shift;
+# my $length = shift ;
+# my $current_length = $self->FETCHSIZE() ;
+#print "length is $current_length\n";
+#
+# if ($length < $current_length) {
+#print "Make smaller $length < $current_length\n" ;
+# my $key ;
+# for ($key = $current_length - 1 ; $key >= $length ; -- $key)
+# { $self->db_del($key) }
+# }
+# elsif ($length > $current_length) {
+#print "Make larger $length > $current_length\n" ;
+# $self->db_put($length-1, "") ;
+# }
+# else { print "stay the same\n" }
+
+}
+
+
+
+#sub DESTROY
+#{
+# my $self = shift ;
+# print "BerkeleyDB::_tieArray::DESTROY\n" ;
+#}
+
+
+package BerkeleyDB::Common ;
+
+
+use Carp ;
+
+sub DESTROY
+{
+ my $self = shift ;
+ $self->_DESTROY() ;
+}
+
+sub Txn
+{
+ my $self = shift ;
+ my $txn = shift ;
+ #print "BerkeleyDB::Common::Txn db [$self] txn [$txn]\n" ;
+ if ($txn) {
+ $self->_Txn($txn) ;
+ push @{ $txn }, $self ;
+ }
+ else {
+ $self->_Txn() ;
+ }
+ #print "end BerkeleyDB::Common::Txn \n";
+}
+
+
+sub get_dup
+{
+ croak "Usage: \$db->get_dup(key [,flag])\n"
+ unless @_ == 2 or @_ == 3 ;
+
+ my $db = shift ;
+ my $key = shift ;
+ my $flag = shift ;
+ my $value = 0 ;
+ my $origkey = $key ;
+ my $wantarray = wantarray ;
+ my %values = () ;
+ my @values = () ;
+ my $counter = 0 ;
+ my $status = 0 ;
+ my $cursor = $db->db_cursor() ;
+
+ # iterate through the database until either EOF ($status == 0)
+ # or a different key is encountered ($key ne $origkey).
+ for ($status = $cursor->c_get($key, $value, BerkeleyDB::DB_SET()) ;
+ $status == 0 and $key eq $origkey ;
+ $status = $cursor->c_get($key, $value, BerkeleyDB::DB_NEXT()) ) {
+ # save the value or count number of matches
+ if ($wantarray) {
+ if ($flag)
+ { ++ $values{$value} }
+ else
+ { push (@values, $value) }
+ }
+ else
+ { ++ $counter }
+
+ }
+
+ return ($wantarray ? ($flag ? %values : @values) : $counter) ;
+}
+
+sub db_cursor
+{
+ my $db = shift ;
+ my ($addr) = $db->_db_cursor(@_) ;
+ my $obj ;
+ $obj = bless [$addr, $db] , "BerkeleyDB::Cursor" if $addr ;
+ return $obj ;
+}
+
+sub db_join
+{
+ croak 'Usage: $db->BerkeleyDB::Common::db_join([cursors], flags=0)'
+ if @_ < 2 || @_ > 3 ;
+ my $db = shift ;
+ my ($addr) = $db->_db_join(@_) ;
+ my $obj ;
+ $obj = bless [$addr, $db] , "BerkeleyDB::Cursor" if $addr ;
+ return $obj ;
+}
+
+package BerkeleyDB::Cursor ;
+
+sub c_close
+{
+ my $cursor = shift ;
+ $cursor->[1] = "" ;
+ return $cursor->_c_close() ;
+}
+
+sub c_dup
+{
+ my $cursor = shift ;
+ my ($addr) = $cursor->_c_dup(@_) ;
+ my $obj ;
+ $obj = bless [$addr, $cursor->[1]] , "BerkeleyDB::Cursor" if $addr ;
+ return $obj ;
+}
+
+sub DESTROY
+{
+ my $self = shift ;
+ $self->_DESTROY() ;
+}
+
+package BerkeleyDB::TxnMgr ;
+
+sub DESTROY
+{
+ my $self = shift ;
+ $self->_DESTROY() ;
+}
+
+sub txn_begin
+{
+ my $txnmgr = shift ;
+ my ($addr) = $txnmgr->_txn_begin(@_) ;
+ my $obj ;
+ $obj = bless [$addr, $txnmgr] , "BerkeleyDB::Txn" if $addr ;
+ return $obj ;
+}
+
+package BerkeleyDB::Txn ;
+
+sub Txn
+{
+ my $self = shift ;
+ my $db ;
+ # keep a reference to each db in the txn object
+ foreach $db (@_) {
+ $db->_Txn($self) ;
+ push @{ $self}, $db ;
+ }
+}
+
+sub txn_commit
+{
+ my $self = shift ;
+ $self->disassociate() ;
+ my $status = $self->_txn_commit() ;
+ return $status ;
+}
+
+sub txn_abort
+{
+ my $self = shift ;
+ $self->disassociate() ;
+ my $status = $self->_txn_abort() ;
+ return $status ;
+}
+
+sub disassociate
+{
+ my $self = shift ;
+ my $db ;
+ while ( @{ $self } > 2) {
+ $db = pop @{ $self } ;
+ $db->Txn() ;
+ }
+ #print "end disassociate\n" ;
+}
+
+
+sub DESTROY
+{
+ my $self = shift ;
+
+ $self->disassociate() ;
+ # first close the close the transaction
+ $self->_DESTROY() ;
+}
+
+package BerkeleyDB::Term ;
+
+END
+{
+ close_everything() ;
+}
+
+
+package BerkeleyDB ;
+
+
+
+# Autoload methods go after =cut, and are processed by the autosplit program.
+
+1;
+__END__
+
+
diff --git a/bdb/perl.BerkeleyDB/BerkeleyDB.pod b/bdb/perl.BerkeleyDB/BerkeleyDB.pod
new file mode 100644
index 00000000000..2c5c3feb51e
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/BerkeleyDB.pod
@@ -0,0 +1,1751 @@
+=head1 NAME
+
+BerkeleyDB - Perl extension for Berkeley DB version 2 or 3
+
+=head1 SYNOPSIS
+
+ use BerkeleyDB;
+
+ $env = new BerkeleyDB::Env [OPTIONS] ;
+
+ $db = tie %hash, 'BerkeleyDB::Hash', [OPTIONS] ;
+ $db = new BerkeleyDB::Hash [OPTIONS] ;
+
+ $db = tie %hash, 'BerkeleyDB::Btree', [OPTIONS] ;
+ $db = new BerkeleyDB::Btree [OPTIONS] ;
+
+ $db = tie %hash, 'BerkeleyDB::Recno', [OPTIONS] ;
+ $db = new BerkeleyDB::Recno [OPTIONS] ;
+
+ $db = tie %hash, 'BerkeleyDB::Queue', [OPTIONS] ;
+ $db = new BerkeleyDB::Queue [OPTIONS] ;
+
+ $db = new BerkeleyDB::Unknown [OPTIONS] ;
+
+ $status = BerkeleyDB::db_remove [OPTIONS]
+
+ $hash{$key} = $value ;
+ $value = $hash{$key} ;
+ each %hash ;
+ keys %hash ;
+ values %hash ;
+
+ $status = $db->db_get()
+ $status = $db->db_put() ;
+ $status = $db->db_del() ;
+ $status = $db->db_sync() ;
+ $status = $db->db_close() ;
+ $hash_ref = $db->db_stat() ;
+ $status = $db->db_key_range();
+ $type = $db->type() ;
+ $status = $db->status() ;
+ $boolean = $db->byteswapped() ;
+
+ ($flag, $old_offset, $old_length) = $db->partial_set($offset, $length) ;
+ ($flag, $old_offset, $old_length) = $db->partial_clear() ;
+
+ $cursor = $db->db_cursor([$flags]) ;
+ $newcursor = $cursor->c_dup([$flags]);
+ $status = $cursor->c_get() ;
+ $status = $cursor->c_put() ;
+ $status = $cursor->c_del() ;
+ $status = $cursor->c_count() ;
+ $status = $cursor->status() ;
+ $status = $cursor->c_close() ;
+
+ $cursor = $db->db_join() ;
+ $status = $cursor->c_get() ;
+ $status = $cursor->c_close() ;
+
+ $status = $env->txn_checkpoint()
+ $hash_ref = $env->txn_stat()
+ $status = $env->setmutexlocks()
+
+ $txn = $env->txn_begin() ;
+ $status = $txn->txn_prepare()
+ $status = $txn->txn_commit()
+ $status = $txn->txn_abort()
+ $status = $txn->txn_id()
+
+ $BerkeleyDB::Error
+ $BerkeleyDB::db_version
+
+ # DBM Filters
+ $old_filter = $db->filter_store_key ( sub { ... } ) ;
+ $old_filter = $db->filter_store_value( sub { ... } ) ;
+ $old_filter = $db->filter_fetch_key ( sub { ... } ) ;
+ $old_filter = $db->filter_fetch_value( sub { ... } ) ;
+
+ # deprecated, but supported
+ $txn_mgr = $env->TxnMgr();
+ $status = $txn_mgr->txn_checkpoint()
+ $hash_ref = $txn_mgr->txn_stat()
+ $txn = $txn_mgr->txn_begin() ;
+
+=head1 DESCRIPTION
+
+B<NOTE: This document is still under construction. Expect it to be
+incomplete in places.>
+
+This Perl module provides an interface to most of the functionality
+available in Berkeley DB versions 2 and 3. In general it is safe to assume
+that the interface provided here to be identical to the Berkeley DB
+interface. The main changes have been to make the Berkeley DB API work
+in a Perl way. Note that if you are using Berkeley DB 2.x, the new
+features available in Berkeley DB 3.x are not available via this module.
+
+The reader is expected to be familiar with the Berkeley DB
+documentation. Where the interface provided here is identical to the
+Berkeley DB library and the... TODO
+
+The B<db_appinit>, B<db_cursor>, B<db_open> and B<db_txn> man pages are
+particularly relevant.
+
+The interface to Berkeley DB is implemented with a number of Perl
+classes.
+
+=head1 ENV CLASS
+
+The B<BerkeleyDB::Env> class provides an interface to the Berkeley DB
+function B<db_appinit> in Berkeley DB 2.x or B<db_env_create> and
+B<DBENV-E<gt>open> in Berkeley DB 3.x. Its purpose is to initialise a
+number of sub-systems that can then be used in a consistent way in all
+the databases you make use of the environment.
+
+If you don't intend using transactions, locking or logging, then you
+shouldn't need to make use of B<BerkeleyDB::Env>.
+
+=head2 Synopsis
+
+ $env = new BerkeleyDB::Env
+ [ -Home => $path, ]
+ [ -Server => $name, ]
+ [ -CacheSize => $number, ]
+ [ -Config => { name => value, name => value }, ]
+ [ -ErrFile => filename or filehandle, ]
+ [ -ErrPrefix => "string", ]
+ [ -Flags => number, ]
+ [ -LockDetect => number, ]
+ [ -Verbose => boolean, ]
+
+=over 5
+
+All the parameters to the BerkeleyDB::Env constructor are optional.
+
+=item -Home
+
+If present, this parameter should point to an existing directory. Any
+files that I<aren't> specified with an absolute path in the sub-systems
+that are initialised by the BerkeleyDB::Env class will be assumed to
+live in the B<Home> directory.
+
+For example, in the code fragment below the database "fred.db" will be
+opened in the directory "/home/databases" because it was specified as a
+relative path, but "joe.db" will be opened in "/other" because it was
+part of an absolute path.
+
+ $env = new BerkeleyDB::Env
+ -Home => "/home/databases"
+ ...
+
+ $db1 = new BerkeleyDB::Hash
+ -Filename = "fred.db",
+ -Env => $env
+ ...
+
+ $db2 = new BerkeleyDB::Hash
+ -Filename = "/other/joe.db",
+ -Env => $env
+ ...
+
+=item -Server
+
+If present, this parameter should be the hostname of a server that is running
+the Berkeley DB RPC server. All databases will be accessed via the RPC server.
+
+=item -Cachesize
+
+If present, this parameter sets the size of the environments shared memory
+buffer pool.
+
+=item -Config
+
+This is a variation on the C<-Home> parameter, but it allows finer
+control of where specific types of files will be stored.
+
+The parameter expects a reference to a hash. Valid keys are:
+B<DB_DATA_DIR>, B<DB_LOG_DIR> and B<DB_TMP_DIR>
+
+The code below shows an example of how it can be used.
+
+ $env = new BerkeleyDB::Env
+ -Config => { DB_DATA_DIR => "/home/databases",
+ DB_LOG_DIR => "/home/logs",
+ DB_TMP_DIR => "/home/tmp"
+ }
+ ...
+
+=item -ErrFile
+
+Expects either the name of a file or a reference to a filehandle. Any
+errors generated internally by Berkeley DB will be logged to this file.
+
+=item -ErrPrefix
+
+Allows a prefix to be added to the error messages before they are sent
+to B<-ErrFile>.
+
+=item -Flags
+
+The B<Flags> parameter specifies both which sub-systems to initialise,
+as well as a number of environment-wide options.
+See the Berkeley DB documentation for more details of these options.
+
+Any of the following can be specified by OR'ing them:
+
+B<DB_CREATE>
+
+If any of the files specified do not already exist, create them.
+
+B<DB_INIT_CDB>
+
+Initialise the Concurrent Access Methods
+
+B<DB_INIT_LOCK>
+
+Initialise the Locking sub-system.
+
+B<DB_INIT_LOG>
+
+Initialise the Logging sub-system.
+
+B<DB_INIT_MPOOL>
+
+Initialise the ...
+
+B<DB_INIT_TXN>
+
+Initialise the ...
+
+B<DB_MPOOL_PRIVATE>
+
+Initialise the ...
+
+B<DB_INIT_MPOOL> is also specified.
+
+Initialise the ...
+
+B<DB_NOMMAP>
+
+Initialise the ...
+
+B<DB_RECOVER>
+
+
+
+B<DB_RECOVER_FATAL>
+
+B<DB_THREAD>
+
+B<DB_TXN_NOSYNC>
+
+B<DB_USE_ENVIRON>
+
+B<DB_USE_ENVIRON_ROOT>
+
+=item -LockDetect
+
+Specifies what to do when a lock conflict occurs. The value should be one of
+
+B<DB_LOCK_DEFAULT>
+
+B<DB_LOCK_OLDEST>
+
+B<DB_LOCK_RANDOM>
+
+B<DB_LOCK_YOUNGEST>
+
+=item -Verbose
+
+Add extra debugging information to the messages sent to B<-ErrFile>.
+
+=back
+
+=head2 Methods
+
+The environment class has the following methods:
+
+=over 5
+
+=item $env->errPrefix("string") ;
+
+This method is identical to the B<-ErrPrefix> flag. It allows the
+error prefix string to be changed dynamically.
+
+=item $txn = $env->TxnMgr()
+
+Constructor for creating a B<TxnMgr> object.
+See L<"TRANSACTIONS"> for more details of using transactions.
+
+This method is deprecated. Access the transaction methods using the B<txn_>
+methods below from the environment object directly.
+
+=item $env->txn_begin()
+
+TODO
+
+=item $env->txn_stat()
+
+TODO
+
+=item $env->txn_checkpoint()
+
+TODO
+
+=item $env->status()
+
+Returns the status of the last BerkeleyDB::Env method.
+
+=item $env->setmutexlocks()
+
+Only available in Berkeley Db 3.0 or greater. Calls
+B<db_env_set_mutexlocks> when used with Berkeley DB 3.1.x. When used with
+Berkeley DB 3.0 or 3.2 and better it calls B<DBENV-E<gt>set_mutexlocks>.
+
+=back
+
+=head2 Examples
+
+TODO.
+
+=head1 THE DATABASE CLASSES
+
+B<BerkeleyDB> supports the following database formats:
+
+=over 5
+
+=item B<BerkeleyDB::Hash>
+
+This database type allows arbitrary key/value pairs to be stored in data
+files. This is equivalent to the functionality provided by other
+hashing packages like DBM, NDBM, ODBM, GDBM, and SDBM. Remember though,
+the files created using B<BerkeleyDB::Hash> are not compatible with any
+of the other packages mentioned.
+
+A default hashing algorithm, which will be adequate for most applications,
+is built into BerkeleyDB. If you do need to use your own hashing algorithm
+it is possible to write your own in Perl and have B<BerkeleyDB> use
+it instead.
+
+=item B<BerkeleyDB::Btree>
+
+The Btree format allows arbitrary key/value pairs to be stored in a
+B+tree.
+
+As with the B<BerkeleyDB::Hash> format, it is possible to provide a
+user defined Perl routine to perform the comparison of keys. By default,
+though, the keys are stored in lexical order.
+
+=item B<BerkeleyDB::Recno>
+
+TODO.
+
+
+=item B<BerkeleyDB::Queue>
+
+TODO.
+
+=item B<BerkeleyDB::Unknown>
+
+This isn't a database format at all. It is used when you want to open an
+existing Berkeley DB database without having to know what type is it.
+
+=back
+
+
+Each of the database formats described above is accessed via a
+corresponding B<BerkeleyDB> class. These will be described in turn in
+the next sections.
+
+=head1 BerkeleyDB::Hash
+
+Equivalent to calling B<db_open> with type B<DB_HASH> in Berkeley DB 2.x and
+calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_HASH> in
+Berkeley DB 3.x.
+
+Two forms of constructor are supported:
+
+ $db = new BerkeleyDB::Hash
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Hash specific
+ [ -Ffactor => number,]
+ [ -Nelem => number,]
+ [ -Hash => code reference,]
+ [ -DupCompare => code reference,]
+
+and this
+
+ [$db =] tie %hash, 'BerkeleyDB::Hash',
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Hash specific
+ [ -Ffactor => number,]
+ [ -Nelem => number,]
+ [ -Hash => code reference,]
+ [ -DupCompare => code reference,]
+
+
+When the "tie" interface is used, reading from and writing to the database
+is achieved via the tied hash. In this case the database operates like
+a Perl associative array that happens to be stored on disk.
+
+In addition to the high-level tied hash interface, it is possible to
+make use of the underlying methods provided by Berkeley DB
+
+=head2 Options
+
+In addition to the standard set of options (see L<COMMON OPTIONS>)
+B<BerkeleyDB::Hash> supports these options:
+
+=over 5
+
+=item -Property
+
+Used to specify extra flags when opening a database. The following
+flags may be specified by logically OR'ing together one or more of the
+following values:
+
+B<DB_DUP>
+
+When creating a new database, this flag enables the storing of duplicate
+keys in the database. If B<DB_DUPSORT> is not specified as well, the
+duplicates are stored in the order they are created in the database.
+
+B<DB_DUPSORT>
+
+Enables the sorting of duplicate keys in the database. Ignored if
+B<DB_DUP> isn't also specified.
+
+=item -Ffactor
+
+=item -Nelem
+
+See the Berkeley DB documentation for details of these options.
+
+=item -Hash
+
+Allows you to provide a user defined hash function. If not specified,
+a default hash function is used. Here is a template for a user-defined
+hash function
+
+ sub hash
+ {
+ my ($data) = shift ;
+ ...
+ # return the hash value for $data
+ return $hash ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Hash => \&hash,
+ ...
+
+See L<""> for an example.
+
+=item -DupCompare
+
+Used in conjunction with the B<DB_DUPOSRT> flag.
+
+ sub compare
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return 0 if $key1 eq $key2
+ # -1 if $key1 lt $key2
+ # 1 if $key1 gt $key2
+ return (-1 , 0 or 1) ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Property => DB_DUP|DB_DUPSORT,
+ -DupCompare => \&compare,
+ ...
+
+=back
+
+
+=head2 Methods
+
+B<BerkeleyDB::Hash> only supports the standard database methods.
+See L<COMMON DATABASE METHODS>.
+
+=head2 A Simple Tied Hash Example
+
+ use strict ;
+ use BerkeleyDB ;
+ use vars qw( %h $k $v ) ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $h{"apple"} = "red" ;
+ $h{"orange"} = "orange" ;
+ $h{"banana"} = "yellow" ;
+ $h{"tomato"} = "red" ;
+
+ # Check for existence of a key
+ print "Banana Exists\n\n" if $h{"banana"} ;
+
+ # Delete a key/value pair.
+ delete $h{"apple"} ;
+
+ # print the contents of the file
+ while (($k, $v) = each %h)
+ { print "$k -> $v\n" }
+
+ untie %h ;
+
+here is the output:
+
+ Banana Exists
+
+ orange -> orange
+ tomato -> red
+ banana -> yellow
+
+Note that the like ordinary associative arrays, the order of the keys
+retrieved from a Hash database are in an apparently random order.
+
+=head2 Another Simple Hash Example
+
+Do the same as the previous example but not using tie.
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ my $db = new BerkeleyDB::Hash
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $db->db_put("apple", "red") ;
+ $db->db_put("orange", "orange") ;
+ $db->db_put("banana", "yellow") ;
+ $db->db_put("tomato", "red") ;
+
+ # Check for existence of a key
+ print "Banana Exists\n\n" if $db->db_get("banana", $v) == 0;
+
+ # Delete a key/value pair.
+ $db->db_del("apple") ;
+
+ # print the contents of the file
+ my ($k, $v) = ("", "") ;
+ my $cursor = $db->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { print "$k -> $v\n" }
+
+ undef $cursor ;
+ undef $db ;
+
+=head2 Duplicate keys
+
+The code below is a variation on the examples above. This time the hash has
+been inverted. The key this time is colour and the value is the fruit name.
+The B<DB_DUP> flag has been specified to allow duplicates.
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ my $db = new BerkeleyDB::Hash
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_DUP
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $db->db_put("red", "apple") ;
+ $db->db_put("orange", "orange") ;
+ $db->db_put("green", "banana") ;
+ $db->db_put("yellow", "banana") ;
+ $db->db_put("red", "tomato") ;
+ $db->db_put("green", "apple") ;
+
+ # print the contents of the file
+ my ($k, $v) = ("", "") ;
+ my $cursor = $db->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { print "$k -> $v\n" }
+
+ undef $cursor ;
+ undef $db ;
+
+here is the output:
+
+ orange -> orange
+ yellow -> banana
+ red -> apple
+ red -> tomato
+ green -> banana
+ green -> apple
+
+=head2 Sorting Duplicate Keys
+
+In the previous example, when there were duplicate keys, the values are
+sorted in the order they are stored in. The code below is
+identical to the previous example except the B<DB_DUPSORT> flag is
+specified.
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ my $db = new BerkeleyDB::Hash
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_DUP | DB_DUPSORT
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $db->db_put("red", "apple") ;
+ $db->db_put("orange", "orange") ;
+ $db->db_put("green", "banana") ;
+ $db->db_put("yellow", "banana") ;
+ $db->db_put("red", "tomato") ;
+ $db->db_put("green", "apple") ;
+
+ # print the contents of the file
+ my ($k, $v) = ("", "") ;
+ my $cursor = $db->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { print "$k -> $v\n" }
+
+ undef $cursor ;
+ undef $db ;
+
+Notice that in the output below the duplicate values are sorted.
+
+ orange -> orange
+ yellow -> banana
+ red -> apple
+ red -> tomato
+ green -> apple
+ green -> banana
+
+=head2 Custom Sorting Duplicate Keys
+
+Another variation
+
+TODO
+
+=head2 Changing the hash
+
+TODO
+
+=head2 Using db_stat
+
+TODO
+
+=head1 BerkeleyDB::Btree
+
+Equivalent to calling B<db_open> with type B<DB_BTREE> in Berkeley DB 2.x and
+calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_BTREE> in
+Berkeley DB 3.x.
+
+Two forms of constructor are supported:
+
+
+ $db = new BerkeleyDB::Btree
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Btree specific
+ [ -Minkey => number,]
+ [ -Compare => code reference,]
+ [ -DupCompare => code reference,]
+ [ -Prefix => code reference,]
+
+and this
+
+ [$db =] tie %hash, 'BerkeleyDB::Btree',
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Btree specific
+ [ -Minkey => number,]
+ [ -Compare => code reference,]
+ [ -DupCompare => code reference,]
+ [ -Prefix => code reference,]
+
+=head2 Options
+
+In addition to the standard set of options (see L<COMMON OPTIONS>)
+B<BerkeleyDB::Btree> supports these options:
+
+=over 5
+
+=item -Property
+
+Used to specify extra flags when opening a database. The following
+flags may be specified by logically OR'ing together one or more of the
+following values:
+
+B<DB_DUP>
+
+When creating a new database, this flag enables the storing of duplicate
+keys in the database. If B<DB_DUPSORT> is not specified as well, the
+duplicates are stored in the order they are created in the database.
+
+B<DB_DUPSORT>
+
+Enables the sorting of duplicate keys in the database. Ignored if
+B<DB_DUP> isn't also specified.
+
+=item Minkey
+
+TODO
+
+=item Compare
+
+Allow you to override the default sort order used in the database. See
+L<"Changing the sort order"> for an example.
+
+ sub compare
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return 0 if $key1 eq $key2
+ # -1 if $key1 lt $key2
+ # 1 if $key1 gt $key2
+ return (-1 , 0 or 1) ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Compare => \&compare,
+ ...
+
+=item Prefix
+
+ sub prefix
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return number of bytes of $key2 which are
+ # necessary to determine that it is greater than $key1
+ return $bytes ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Prefix => \&prefix,
+ ...
+=item DupCompare
+
+ sub compare
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return 0 if $key1 eq $key2
+ # -1 if $key1 lt $key2
+ # 1 if $key1 gt $key2
+ return (-1 , 0 or 1) ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -DupCompare => \&compare,
+ ...
+
+=back
+
+=head2 Methods
+
+B<BerkeleyDB::Btree> supports the following database methods.
+See also L<COMMON DATABASE METHODS>.
+
+All the methods below return 0 to indicate success.
+
+=over 5
+
+=item $status = $db->db_key_range($key, $less, $equal, $greater [, $flags])
+
+Given a key, C<$key>, this method returns the proportion of keys less than
+C<$key> in C<$less>, the proportion equal to C<$key> in C<$equal> and the
+proportion greater than C<$key> in C<$greater>.
+
+The proportion is returned as a double in the range 0.0 to 1.0.
+
+=back
+
+=head2 A Simple Btree Example
+
+The code below is a simple example of using a btree database.
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "tree" ;
+ unlink $filename ;
+ my %h ;
+ tie %h, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a key/value pair to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+ $h{'duck'} = 'donald' ;
+
+ # Delete
+ delete $h{"duck"} ;
+
+ # Cycle through the keys printing them in order.
+ # Note it is not necessary to sort the keys as
+ # the btree will have kept them in order automatically.
+ foreach (keys %h)
+ { print "$_\n" }
+
+ untie %h ;
+
+Here is the output from the code above. The keys have been sorted using
+Berkeley DB's default sorting algorithm.
+
+ Smith
+ Wall
+ mouse
+
+
+=head2 Changing the sort order
+
+It is possible to supply your own sorting algorithm if the one that Berkeley
+DB used isn't suitable. The code below is identical to the previous example
+except for the case insensitive compare function.
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "tree" ;
+ unlink $filename ;
+ my %h ;
+ tie %h, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Compare => sub { lc $_[0] cmp lc $_[1] }
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a key/value pair to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+ $h{'duck'} = 'donald' ;
+
+ # Delete
+ delete $h{"duck"} ;
+
+ # Cycle through the keys printing them in order.
+ # Note it is not necessary to sort the keys as
+ # the btree will have kept them in order automatically.
+ foreach (keys %h)
+ { print "$_\n" }
+
+ untie %h ;
+
+Here is the output from the code above.
+
+ mouse
+ Smith
+ Wall
+
+There are a few point to bear in mind if you want to change the
+ordering in a BTREE database:
+
+=over 5
+
+=item 1.
+
+The new compare function must be specified when you create the database.
+
+=item 2.
+
+You cannot change the ordering once the database has been created. Thus
+you must use the same compare function every time you access the
+database.
+
+=back
+
+=head2 Using db_stat
+
+TODO
+
+=head1 BerkeleyDB::Recno
+
+Equivalent to calling B<db_open> with type B<DB_RECNO> in Berkeley DB 2.x and
+calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_RECNO> in
+Berkeley DB 3.x.
+
+Two forms of constructor are supported:
+
+ $db = new BerkeleyDB::Recno
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Recno specific
+ [ -Delim => byte,]
+ [ -Len => number,]
+ [ -Pad => byte,]
+ [ -Source => filename,]
+
+and this
+
+ [$db =] tie @arry, 'BerkeleyDB::Recno',
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Recno specific
+ [ -Delim => byte,]
+ [ -Len => number,]
+ [ -Pad => byte,]
+ [ -Source => filename,]
+
+=head2 A Recno Example
+
+Here is a simple example that uses RECNO (if you are using a version
+of Perl earlier than 5.004_57 this example won't work -- see
+L<Extra RECNO Methods> for a workaround).
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "text" ;
+ unlink $filename ;
+
+ my @h ;
+ tie @h, 'BerkeleyDB::Recno',
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_RENUMBER
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a few key/value pairs to the file
+ $h[0] = "orange" ;
+ $h[1] = "blue" ;
+ $h[2] = "yellow" ;
+
+ push @h, "green", "black" ;
+
+ my $elements = scalar @h ;
+ print "The array contains $elements entries\n" ;
+
+ my $last = pop @h ;
+ print "popped $last\n" ;
+
+ unshift @h, "white" ;
+ my $first = shift @h ;
+ print "shifted $first\n" ;
+
+ # Check for existence of a key
+ print "Element 1 Exists with value $h[1]\n" if $h[1] ;
+
+ untie @h ;
+
+Here is the output from the script:
+
+ The array contains 5 entries
+ popped black
+ shifted white
+ Element 1 Exists with value blue
+ The last element is green
+ The 2nd last element is yellow
+
+=head1 BerkeleyDB::Queue
+
+Equivalent to calling B<db_create> followed by B<DB-E<gt>open> with
+type B<DB_QUEUE> in Berkeley DB 3.x. This database format isn't available if
+you use Berkeley DB 2.x.
+
+Two forms of constructor are supported:
+
+ $db = new BerkeleyDB::Queue
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Queue specific
+ [ -Len => number,]
+ [ -Pad => byte,]
+ [ -ExtentSize => number, ]
+
+and this
+
+ [$db =] tie @arry, 'BerkeleyDB::Queue',
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Queue specific
+ [ -Len => number,]
+ [ -Pad => byte,]
+
+
+=head1 BerkeleyDB::Unknown
+
+This class is used to open an existing database.
+
+Equivalent to calling B<db_open> with type B<DB_UNKNOWN> in Berkeley DB 2.x and
+calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_UNKNOWN> in
+Berkeley DB 3.x.
+
+The constructor looks like this:
+
+ $db = new BerkeleyDB::Unknown
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+
+
+=head2 An example
+
+=head1 COMMON OPTIONS
+
+All database access class constructors support the common set of
+options defined below. All are optional.
+
+=over 5
+
+=item -Filename
+
+The database filename. If no filename is specified, a temporary file will
+be created and removed once the program terminates.
+
+=item -Subname
+
+Specifies the name of the sub-database to open.
+This option is only valid if you are using Berkeley DB 3.x.
+
+=item -Flags
+
+Specify how the database will be opened/created. The valid flags are:
+
+B<DB_CREATE>
+
+Create any underlying files, as necessary. If the files do not already
+exist and the B<DB_CREATE> flag is not specified, the call will fail.
+
+B<DB_NOMMAP>
+
+Not supported by BerkeleyDB.
+
+B<DB_RDONLY>
+
+Opens the database in read-only mode.
+
+B<DB_THREAD>
+
+Not supported by BerkeleyDB.
+
+B<DB_TRUNCATE>
+
+If the database file already exists, remove all the data before
+opening it.
+
+=item -Mode
+
+Determines the file protection when the database is created. Defaults
+to 0666.
+
+=item -Cachesize
+
+=item -Lorder
+
+=item -Pagesize
+
+=item -Env
+
+When working under a Berkeley DB environment, this parameter
+
+Defaults to no environment.
+
+=item -Txn
+
+TODO.
+
+=back
+
+=head1 COMMON DATABASE METHODS
+
+All the database interfaces support the common set of methods defined
+below.
+
+All the methods below return 0 to indicate success.
+
+=head2 $status = $db->db_get($key, $value [, $flags])
+
+Given a key (C<$key>) this method reads the value associated with it
+from the database. If it exists, the value read from the database is
+returned in the C<$value> parameter.
+
+The B<$flags> parameter is optional. If present, it must be set to B<one>
+of the following values:
+
+=over 5
+
+=item B<DB_GET_BOTH>
+
+When the B<DB_GET_BOTH> flag is specified, B<db_get> checks for the
+existence of B<both> the C<$key> B<and> C<$value> in the database.
+
+=item B<DB_SET_RECNO>
+
+TODO.
+
+=back
+
+In addition, the following value may be set by logically OR'ing it into
+the B<$flags> parameter:
+
+=over 5
+
+=item B<DB_RMW>
+
+TODO
+
+=back
+
+
+=head2 $status = $db->db_put($key, $value [, $flags])
+
+Stores a key/value pair in the database.
+
+The B<$flags> parameter is optional. If present it must be set to B<one>
+of the following values:
+
+=over 5
+
+=item B<DB_APPEND>
+
+This flag is only applicable when accessing a B<BerkeleyDB::Recno>
+database.
+
+TODO.
+
+
+=item B<DB_NOOVERWRITE>
+
+If this flag is specified and C<$key> already exists in the database,
+the call to B<db_put> will return B<DB_KEYEXIST>.
+
+=back
+
+=head2 $status = $db->db_del($key [, $flags])
+
+Deletes a key/value pair in the database associated with C<$key>.
+If duplicate keys are enabled in the database, B<db_del> will delete
+B<all> key/value pairs with key C<$key>.
+
+The B<$flags> parameter is optional and is currently unused.
+
+=head2 $status = $db->db_sync()
+
+If any parts of the database are in memory, write them to the database.
+
+=head2 $cursor = $db->db_cursor([$flags])
+
+Creates a cursor object. This is used to access the contents of the
+database sequentially. See L<CURSORS> for details of the methods
+available when working with cursors.
+
+The B<$flags> parameter is optional. If present it must be set to B<one>
+of the following values:
+
+=over 5
+
+=item B<DB_RMW>
+
+TODO.
+
+=back
+
+=head2 ($flag, $old_offset, $old_length) = $db->partial_set($offset, $length) ;
+
+TODO
+
+=head2 ($flag, $old_offset, $old_length) = $db->partial_clear() ;
+
+TODO
+
+=head2 $db->byteswapped()
+
+TODO
+
+=head2 $db->type()
+
+Returns the type of the database. The possible return code are B<DB_HASH>
+for a B<BerkeleyDB::Hash> database, B<DB_BTREE> for a B<BerkeleyDB::Btree>
+database and B<DB_RECNO> for a B<BerkeleyDB::Recno> database. This method
+is typically used when a database has been opened with
+B<BerkeleyDB::Unknown>.
+
+=item $ref = $db->db_stat()
+
+Returns a reference to an associative array containing information about
+the database. The keys of the associative array correspond directly to the
+names of the fields defined in the Berkeley DB documentation. For example,
+in the DB documentation, the field B<bt_version> stores the version of the
+Btree database. Assuming you called B<db_stat> on a Btree database the
+equivalent field would be accessed as follows:
+
+ $version = $ref->{'bt_version'} ;
+
+If you are using Berkeley DB 3.x, this method will work will all database
+formats. When DB 2.x is used, it only works with B<BerkeleyDB::Btree>.
+
+=head2 $status = $db->status()
+
+Returns the status of the last C<$db> method called.
+
+=head1 CURSORS
+
+A cursor is used whenever you want to access the contents of a database
+in sequential order.
+A cursor object is created with the C<db_cursor>
+
+A cursor object has the following methods available:
+
+=head2 $newcursor = $cursor->c_dup($flags)
+
+Creates a duplicate of C<$cursor>. This method needs Berkeley DB 3.0.x or better.
+
+The C<$flags> parameter is optional and can take the following value:
+
+=over 5
+
+=item DB_POSITION
+
+When present this flag will position the new cursor at the same place as the
+existing cursor.
+
+=back
+
+=head2 $status = $cursor->c_get($key, $value, $flags)
+
+Reads a key/value pair from the database, returning the data in C<$key>
+and C<$value>. The key/value pair actually read is controlled by the
+C<$flags> parameter, which can take B<one> of the following values:
+
+=over 5
+
+=item B<DB_FIRST>
+
+Set the cursor to point to the first key/value pair in the
+database. Return the key/value pair in C<$key> and C<$value>.
+
+=item B<DB_LAST>
+
+Set the cursor to point to the last key/value pair in the database. Return
+the key/value pair in C<$key> and C<$value>.
+
+=item B<DB_NEXT>
+
+If the cursor is already pointing to a key/value pair, it will be
+incremented to point to the next key/value pair and return its contents.
+
+If the cursor isn't initialised, B<DB_NEXT> works just like B<DB_FIRST>.
+
+If the cursor is already positioned at the last key/value pair, B<c_get>
+will return B<DB_NOTFOUND>.
+
+=item B<DB_NEXT_DUP>
+
+This flag is only valid when duplicate keys have been enabled in
+a database.
+If the cursor is already pointing to a key/value pair and the key of
+the next key/value pair is identical, the cursor will be incremented to
+point to it and their contents returned.
+
+=item B<DB_PREV>
+
+If the cursor is already pointing to a key/value pair, it will be
+decremented to point to the previous key/value pair and return its
+contents.
+
+If the cursor isn't initialised, B<DB_PREV> works just like B<DB_LAST>.
+
+If the cursor is already positioned at the first key/value pair, B<c_get>
+will return B<DB_NOTFOUND>.
+
+=item B<DB_CURRENT>
+
+If the cursor has been set to point to a key/value pair, return their
+contents.
+If the key/value pair referenced by the cursor has been deleted, B<c_get>
+will return B<DB_KEYEMPTY>.
+
+=item B<DB_SET>
+
+Set the cursor to point to the key/value pair referenced by B<$key>
+and return the value in B<$value>.
+
+=item B<DB_SET_RANGE>
+
+This flag is a variation on the B<DB_SET> flag. As well as returning
+the value, it also returns the key, via B<$key>.
+When used with a B<BerkeleyDB::Btree> database the key matched by B<c_get>
+will be the shortest key (in length) which is greater than or equal to
+the key supplied, via B<$key>. This allows partial key searches.
+See ??? for an example of how to use this flag.
+
+=item B<DB_GET_BOTH>
+
+Another variation on B<DB_SET>. This one returns both the key and
+the value.
+
+=item B<DB_SET_RECNO>
+
+TODO.
+
+=item B<DB_GET_RECNO>
+
+TODO.
+
+=back
+
+In addition, the following value may be set by logically OR'ing it into
+the B<$flags> parameter:
+
+=over 5
+
+=item B<DB_RMW>
+
+TODO.
+
+=back
+
+=head2 $status = $cursor->c_put($key, $value, $flags)
+
+Stores the key/value pair in the database. The position that the data is
+stored in the database is controlled by the C<$flags> parameter, which
+must take B<one> of the following values:
+
+=over 5
+
+=item B<DB_AFTER>
+
+When used with a Btree or Hash database, a duplicate of the key referenced
+by the current cursor position will be created and the contents of
+B<$value> will be associated with it - B<$key> is ignored.
+The new key/value pair will be stored immediately after the current
+cursor position.
+Obviously the database has to have been opened with B<DB_DUP>.
+
+When used with a Recno ... TODO
+
+
+=item B<DB_BEFORE>
+
+When used with a Btree or Hash database, a duplicate of the key referenced
+by the current cursor position will be created and the contents of
+B<$value> will be associated with it - B<$key> is ignored.
+The new key/value pair will be stored immediately before the current
+cursor position.
+Obviously the database has to have been opened with B<DB_DUP>.
+
+When used with a Recno ... TODO
+
+=item B<DB_CURRENT>
+
+If the cursor has been initialised, replace the value of the key/value
+pair stored in the database with the contents of B<$value>.
+
+=item B<DB_KEYFIRST>
+
+Only valid with a Btree or Hash database. This flag is only really
+used when duplicates are enabled in the database and sorted duplicates
+haven't been specified.
+In this case the key/value pair will be inserted as the first entry in
+the duplicates for the particular key.
+
+=item B<DB_KEYLAST>
+
+Only valid with a Btree or Hash database. This flag is only really
+used when duplicates are enabled in the database and sorted duplicates
+haven't been specified.
+In this case the key/value pair will be inserted as the last entry in
+the duplicates for the particular key.
+
+=back
+
+=head2 $status = $cursor->c_del([$flags])
+
+This method deletes the key/value pair associated with the current cursor
+position. The cursor position will not be changed by this operation, so
+any subsequent cursor operation must first initialise the cursor to
+point to a valid key/value pair.
+
+If the key/value pair associated with the cursor have already been
+deleted, B<c_del> will return B<DB_KEYEMPTY>.
+
+The B<$flags> parameter is not used at present.
+
+=head2 $status = $cursor->c_del($cnt [, $flags])
+
+Stores the number of duplicates at the current cursor position in B<$cnt>.
+
+The B<$flags> parameter is not used at present. This method needs
+Berkeley DB 3.1 or better.
+
+=head2 $status = $cursor->status()
+
+Returns the status of the last cursor method as a dual type.
+
+=head2 Cursor Examples
+
+TODO
+
+Iterating from first to last, then in reverse.
+
+examples of each of the flags.
+
+=head1 JOIN
+
+Join support for BerkeleyDB is in progress. Watch this space.
+
+TODO
+
+=head1 TRANSACTIONS
+
+TODO.
+
+=head1 DBM Filters
+
+A DBM Filter is a piece of code that is be used when you I<always>
+want to make the same transformation to all keys and/or values in a DBM
+database. All of the database classes (BerkeleyDB::Hash,
+BerkeleyDB::Btree and BerkeleyDB::Recno) support DBM Filters.
+
+There are four methods associated with DBM Filters. All work
+identically, and each is used to install (or uninstall) a single DBM
+Filter. Each expects a single parameter, namely a reference to a sub.
+The only difference between them is the place that the filter is
+installed.
+
+To summarise:
+
+=over 5
+
+=item B<filter_store_key>
+
+If a filter has been installed with this method, it will be invoked
+every time you write a key to a DBM database.
+
+=item B<filter_store_value>
+
+If a filter has been installed with this method, it will be invoked
+every time you write a value to a DBM database.
+
+
+=item B<filter_fetch_key>
+
+If a filter has been installed with this method, it will be invoked
+every time you read a key from a DBM database.
+
+=item B<filter_fetch_value>
+
+If a filter has been installed with this method, it will be invoked
+every time you read a value from a DBM database.
+
+=back
+
+You can use any combination of the methods, from none, to all four.
+
+All filter methods return the existing filter, if present, or C<undef>
+in not.
+
+To delete a filter pass C<undef> to it.
+
+=head2 The Filter
+
+When each filter is called by Perl, a local copy of C<$_> will contain
+the key or value to be filtered. Filtering is achieved by modifying
+the contents of C<$_>. The return code from the filter is ignored.
+
+=head2 An Example -- the NULL termination problem.
+
+Consider the following scenario. You have a DBM database that you need
+to share with a third-party C application. The C application assumes
+that I<all> keys and values are NULL terminated. Unfortunately when
+Perl writes to DBM databases it doesn't use NULL termination, so your
+Perl application will have to manage NULL termination itself. When you
+write to the database you will have to use something like this:
+
+ $hash{"$key\0"} = "$value\0" ;
+
+Similarly the NULL needs to be taken into account when you are considering
+the length of existing keys/values.
+
+It would be much better if you could ignore the NULL terminations issue
+in the main application code and have a mechanism that automatically
+added the terminating NULL to all keys and values whenever you write to
+the database and have them removed when you read from the database. As I'm
+sure you have already guessed, this is a problem that DBM Filters can
+fix very easily.
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my %hash ;
+ my $filename = "filt.db" ;
+ unlink $filename ;
+
+ my $db = tie %hash, 'BerkeleyDB::Hash',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+
+ # Install DBM Filters
+ $db->filter_fetch_key ( sub { s/\0$// } ) ;
+ $db->filter_store_key ( sub { $_ .= "\0" } ) ;
+ $db->filter_fetch_value( sub { s/\0$// } ) ;
+ $db->filter_store_value( sub { $_ .= "\0" } ) ;
+
+ $hash{"abc"} = "def" ;
+ my $a = $hash{"ABC"} ;
+ # ...
+ undef $db ;
+ untie %hash ;
+
+Hopefully the contents of each of the filters should be
+self-explanatory. Both "fetch" filters remove the terminating NULL,
+and both "store" filters add a terminating NULL.
+
+
+=head2 Another Example -- Key is a C int.
+
+Here is another real-life example. By default, whenever Perl writes to
+a DBM database it always writes the key and value as strings. So when
+you use this:
+
+ $hash{12345} = "something" ;
+
+the key 12345 will get stored in the DBM database as the 5 byte string
+"12345". If you actually want the key to be stored in the DBM database
+as a C int, you will have to use C<pack> when writing, and C<unpack>
+when reading.
+
+Here is a DBM Filter that does it:
+
+ use strict ;
+ use BerkeleyDB ;
+ my %hash ;
+ my $filename = "filt.db" ;
+ unlink $filename ;
+
+
+ my $db = tie %hash, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+
+ $db->filter_fetch_key ( sub { $_ = unpack("i", $_) } ) ;
+ $db->filter_store_key ( sub { $_ = pack ("i", $_) } ) ;
+ $hash{123} = "def" ;
+ # ...
+ undef $db ;
+ untie %hash ;
+
+This time only two filters have been used -- we only need to manipulate
+the contents of the key, so it wasn't necessary to install any value
+filters.
+
+=head1 Using BerkeleyDB with MLDBM
+
+Both BerkeleyDB::Hash and BerkeleyDB::Btree can be used with the MLDBM
+module. The code fragment below shows how to open associate MLDBM with
+BerkeleyDB::Btree. To use BerkeleyDB::Hash just replace
+BerkeleyDB::Btree with BerkeleyDB::Hash.
+
+ use strict ;
+ use BerkeleyDB ;
+ use MLDBM qw(BerkeleyDB::Btree) ;
+ use Data::Dumper;
+
+ my $filename = 'testmldbm' ;
+ my %o ;
+
+ unlink $filename ;
+ tie %o, 'MLDBM', -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open database '$filename: $!\n";
+
+See the MLDBM documentation for information on how to use the module
+and for details of its limitations.
+
+=head1 EXAMPLES
+
+TODO.
+
+=head1 HINTS & TIPS
+
+=head2 Sharing Databases With C Applications
+
+There is no technical reason why a Berkeley DB database cannot be
+shared by both a Perl and a C application.
+
+The vast majority of problems that are reported in this area boil down
+to the fact that C strings are NULL terminated, whilst Perl strings
+are not. See L<An Example -- the NULL termination problem.> in the DBM
+FILTERS section for a generic way to work around this problem.
+
+
+=head2 The untie Gotcha
+
+TODO
+
+=head1 COMMON QUESTIONS
+
+This section attempts to answer some of the more common questions that
+I get asked.
+
+
+=head2 Relationship with DB_File
+
+Before Berkeley DB 2.x was written there was only one Perl module that
+interfaced to Berkeley DB. That module is called B<DB_File>. Although
+B<DB_File> can be build with Berkeley DB 1.x, 2.x or 3.x, it only provides
+an interface to the functionality available in Berkeley DB 1.x. That
+means that it doesn't support transactions, locking or any of the other
+new features available in DB 2.x or 3.x.
+
+=head2 How do I store Perl data structures with BerkeleyDB?
+
+See L<Using BerkeleyDB with MLDBM>.
+
+=head1 HISTORY
+
+See the Changes file.
+
+=head1 AVAILABILITY
+
+The most recent version of B<BerkeleyDB> can always be found
+on CPAN (see L<perlmod/CPAN> for details), in the directory
+F<modules/by-module/BerkeleyDB>.
+
+The official web site for Berkeley DB is F<http://www.sleepycat.com>.
+
+=head1 COPYRIGHT
+
+Copyright (c) 1997-2001 Paul Marquess. All rights reserved. This program
+is free software; you can redistribute it and/or modify it under the
+same terms as Perl itself.
+
+Although B<BerkeleyDB> is covered by the Perl license, the library it
+makes use of, namely Berkeley DB, is not. Berkeley DB has its own
+copyright and its own license. Please take the time to read it.
+
+Here are few words taken from the Berkeley DB FAQ (at
+F<http://www.sleepycat.com>) regarding the license:
+
+ Do I have to license DB to use it in Perl scripts?
+
+ No. The Berkeley DB license requires that software that uses
+ Berkeley DB be freely redistributable. In the case of Perl, that
+ software is Perl, and not your scripts. Any Perl scripts that you
+ write are your property, including scripts that make use of Berkeley
+ DB. Neither the Perl license nor the Berkeley DB license
+ place any restriction on what you may do with them.
+
+If you are in any doubt about the license situation, contact either the
+Berkeley DB authors or the author of BerkeleyDB.
+See L<"AUTHOR"> for details.
+
+
+=head1 AUTHOR
+
+Paul Marquess E<lt>Paul.Marquess@btinternet.comE<gt>.
+
+Questions about Berkeley DB may be addressed to E<lt>db@sleepycat.comE<gt>.
+
+=head1 SEE ALSO
+
+perl(1), DB_File, Berkeley DB.
+
+=cut
diff --git a/bdb/perl.BerkeleyDB/BerkeleyDB.pod.P b/bdb/perl.BerkeleyDB/BerkeleyDB.pod.P
new file mode 100644
index 00000000000..2bcff2d99d1
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/BerkeleyDB.pod.P
@@ -0,0 +1,1518 @@
+=head1 NAME
+
+BerkeleyDB - Perl extension for Berkeley DB version 2 or 3
+
+=head1 SYNOPSIS
+
+ use BerkeleyDB;
+
+ $env = new BerkeleyDB::Env [OPTIONS] ;
+
+ $db = tie %hash, 'BerkeleyDB::Hash', [OPTIONS] ;
+ $db = new BerkeleyDB::Hash [OPTIONS] ;
+
+ $db = tie %hash, 'BerkeleyDB::Btree', [OPTIONS] ;
+ $db = new BerkeleyDB::Btree [OPTIONS] ;
+
+ $db = tie %hash, 'BerkeleyDB::Recno', [OPTIONS] ;
+ $db = new BerkeleyDB::Recno [OPTIONS] ;
+
+ $db = tie %hash, 'BerkeleyDB::Queue', [OPTIONS] ;
+ $db = new BerkeleyDB::Queue [OPTIONS] ;
+
+ $db = new BerkeleyDB::Unknown [OPTIONS] ;
+
+ $status = BerkeleyDB::db_remove [OPTIONS]
+
+ $hash{$key} = $value ;
+ $value = $hash{$key} ;
+ each %hash ;
+ keys %hash ;
+ values %hash ;
+
+ $status = $db->db_get()
+ $status = $db->db_put() ;
+ $status = $db->db_del() ;
+ $status = $db->db_sync() ;
+ $status = $db->db_close() ;
+ $hash_ref = $db->db_stat() ;
+ $status = $db->db_key_range();
+ $type = $db->type() ;
+ $status = $db->status() ;
+ $boolean = $db->byteswapped() ;
+
+ ($flag, $old_offset, $old_length) = $db->partial_set($offset, $length) ;
+ ($flag, $old_offset, $old_length) = $db->partial_clear() ;
+
+ $cursor = $db->db_cursor([$flags]) ;
+ $newcursor = $cursor->c_dup([$flags]);
+ $status = $cursor->c_get() ;
+ $status = $cursor->c_put() ;
+ $status = $cursor->c_del() ;
+ $status = $cursor->c_count() ;
+ $status = $cursor->status() ;
+ $status = $cursor->c_close() ;
+
+ $cursor = $db->db_join() ;
+ $status = $cursor->c_get() ;
+ $status = $cursor->c_close() ;
+
+ $status = $env->txn_checkpoint()
+ $hash_ref = $env->txn_stat()
+ $status = $env->setmutexlocks()
+
+ $txn = $env->txn_begin() ;
+ $status = $txn->txn_prepare()
+ $status = $txn->txn_commit()
+ $status = $txn->txn_abort()
+ $status = $txn->txn_id()
+
+ $BerkeleyDB::Error
+ $BerkeleyDB::db_version
+
+ # DBM Filters
+ $old_filter = $db->filter_store_key ( sub { ... } ) ;
+ $old_filter = $db->filter_store_value( sub { ... } ) ;
+ $old_filter = $db->filter_fetch_key ( sub { ... } ) ;
+ $old_filter = $db->filter_fetch_value( sub { ... } ) ;
+
+ # deprecated, but supported
+ $txn_mgr = $env->TxnMgr();
+ $status = $txn_mgr->txn_checkpoint()
+ $hash_ref = $txn_mgr->txn_stat()
+ $txn = $txn_mgr->txn_begin() ;
+
+=head1 DESCRIPTION
+
+B<NOTE: This document is still under construction. Expect it to be
+incomplete in places.>
+
+This Perl module provides an interface to most of the functionality
+available in Berkeley DB versions 2 and 3. In general it is safe to assume
+that the interface provided here to be identical to the Berkeley DB
+interface. The main changes have been to make the Berkeley DB API work
+in a Perl way. Note that if you are using Berkeley DB 2.x, the new
+features available in Berkeley DB 3.x are not available via this module.
+
+The reader is expected to be familiar with the Berkeley DB
+documentation. Where the interface provided here is identical to the
+Berkeley DB library and the... TODO
+
+The B<db_appinit>, B<db_cursor>, B<db_open> and B<db_txn> man pages are
+particularly relevant.
+
+The interface to Berkeley DB is implemented with a number of Perl
+classes.
+
+=head1 ENV CLASS
+
+The B<BerkeleyDB::Env> class provides an interface to the Berkeley DB
+function B<db_appinit> in Berkeley DB 2.x or B<db_env_create> and
+B<DBENV-E<gt>open> in Berkeley DB 3.x. Its purpose is to initialise a
+number of sub-systems that can then be used in a consistent way in all
+the databases you make use of the environment.
+
+If you don't intend using transactions, locking or logging, then you
+shouldn't need to make use of B<BerkeleyDB::Env>.
+
+=head2 Synopsis
+
+ $env = new BerkeleyDB::Env
+ [ -Home => $path, ]
+ [ -Server => $name, ]
+ [ -CacheSize => $number, ]
+ [ -Config => { name => value, name => value }, ]
+ [ -ErrFile => filename or filehandle, ]
+ [ -ErrPrefix => "string", ]
+ [ -Flags => number, ]
+ [ -LockDetect => number, ]
+ [ -Verbose => boolean, ]
+
+=over 5
+
+All the parameters to the BerkeleyDB::Env constructor are optional.
+
+=item -Home
+
+If present, this parameter should point to an existing directory. Any
+files that I<aren't> specified with an absolute path in the sub-systems
+that are initialised by the BerkeleyDB::Env class will be assumed to
+live in the B<Home> directory.
+
+For example, in the code fragment below the database "fred.db" will be
+opened in the directory "/home/databases" because it was specified as a
+relative path, but "joe.db" will be opened in "/other" because it was
+part of an absolute path.
+
+ $env = new BerkeleyDB::Env
+ -Home => "/home/databases"
+ ...
+
+ $db1 = new BerkeleyDB::Hash
+ -Filename = "fred.db",
+ -Env => $env
+ ...
+
+ $db2 = new BerkeleyDB::Hash
+ -Filename = "/other/joe.db",
+ -Env => $env
+ ...
+
+=item -Server
+
+If present, this parameter should be the hostname of a server that is running
+the Berkeley DB RPC server. All databases will be accessed via the RPC server.
+
+=item -Cachesize
+
+If present, this parameter sets the size of the environments shared memory
+buffer pool.
+
+=item -Config
+
+This is a variation on the C<-Home> parameter, but it allows finer
+control of where specific types of files will be stored.
+
+The parameter expects a reference to a hash. Valid keys are:
+B<DB_DATA_DIR>, B<DB_LOG_DIR> and B<DB_TMP_DIR>
+
+The code below shows an example of how it can be used.
+
+ $env = new BerkeleyDB::Env
+ -Config => { DB_DATA_DIR => "/home/databases",
+ DB_LOG_DIR => "/home/logs",
+ DB_TMP_DIR => "/home/tmp"
+ }
+ ...
+
+=item -ErrFile
+
+Expects either the name of a file or a reference to a filehandle. Any
+errors generated internally by Berkeley DB will be logged to this file.
+
+=item -ErrPrefix
+
+Allows a prefix to be added to the error messages before they are sent
+to B<-ErrFile>.
+
+=item -Flags
+
+The B<Flags> parameter specifies both which sub-systems to initialise,
+as well as a number of environment-wide options.
+See the Berkeley DB documentation for more details of these options.
+
+Any of the following can be specified by OR'ing them:
+
+B<DB_CREATE>
+
+If any of the files specified do not already exist, create them.
+
+B<DB_INIT_CDB>
+
+Initialise the Concurrent Access Methods
+
+B<DB_INIT_LOCK>
+
+Initialise the Locking sub-system.
+
+B<DB_INIT_LOG>
+
+Initialise the Logging sub-system.
+
+B<DB_INIT_MPOOL>
+
+Initialise the ...
+
+B<DB_INIT_TXN>
+
+Initialise the ...
+
+B<DB_MPOOL_PRIVATE>
+
+Initialise the ...
+
+B<DB_INIT_MPOOL> is also specified.
+
+Initialise the ...
+
+B<DB_NOMMAP>
+
+Initialise the ...
+
+B<DB_RECOVER>
+
+
+
+B<DB_RECOVER_FATAL>
+
+B<DB_THREAD>
+
+B<DB_TXN_NOSYNC>
+
+B<DB_USE_ENVIRON>
+
+B<DB_USE_ENVIRON_ROOT>
+
+=item -LockDetect
+
+Specifies what to do when a lock conflict occurs. The value should be one of
+
+B<DB_LOCK_DEFAULT>
+
+B<DB_LOCK_OLDEST>
+
+B<DB_LOCK_RANDOM>
+
+B<DB_LOCK_YOUNGEST>
+
+=item -Verbose
+
+Add extra debugging information to the messages sent to B<-ErrFile>.
+
+=back
+
+=head2 Methods
+
+The environment class has the following methods:
+
+=over 5
+
+=item $env->errPrefix("string") ;
+
+This method is identical to the B<-ErrPrefix> flag. It allows the
+error prefix string to be changed dynamically.
+
+=item $txn = $env->TxnMgr()
+
+Constructor for creating a B<TxnMgr> object.
+See L<"TRANSACTIONS"> for more details of using transactions.
+
+This method is deprecated. Access the transaction methods using the B<txn_>
+methods below from the environment object directly.
+
+=item $env->txn_begin()
+
+TODO
+
+=item $env->txn_stat()
+
+TODO
+
+=item $env->txn_checkpoint()
+
+TODO
+
+=item $env->status()
+
+Returns the status of the last BerkeleyDB::Env method.
+
+=item $env->setmutexlocks()
+
+Only available in Berkeley Db 3.0 or greater. Calls
+B<db_env_set_mutexlocks> when used with Berkeley DB 3.1.x. When used with
+Berkeley DB 3.0 or 3.2 and better it calls B<DBENV-E<gt>set_mutexlocks>.
+
+=back
+
+=head2 Examples
+
+TODO.
+
+=head1 THE DATABASE CLASSES
+
+B<BerkeleyDB> supports the following database formats:
+
+=over 5
+
+=item B<BerkeleyDB::Hash>
+
+This database type allows arbitrary key/value pairs to be stored in data
+files. This is equivalent to the functionality provided by other
+hashing packages like DBM, NDBM, ODBM, GDBM, and SDBM. Remember though,
+the files created using B<BerkeleyDB::Hash> are not compatible with any
+of the other packages mentioned.
+
+A default hashing algorithm, which will be adequate for most applications,
+is built into BerkeleyDB. If you do need to use your own hashing algorithm
+it is possible to write your own in Perl and have B<BerkeleyDB> use
+it instead.
+
+=item B<BerkeleyDB::Btree>
+
+The Btree format allows arbitrary key/value pairs to be stored in a
+B+tree.
+
+As with the B<BerkeleyDB::Hash> format, it is possible to provide a
+user defined Perl routine to perform the comparison of keys. By default,
+though, the keys are stored in lexical order.
+
+=item B<BerkeleyDB::Recno>
+
+TODO.
+
+
+=item B<BerkeleyDB::Queue>
+
+TODO.
+
+=item B<BerkeleyDB::Unknown>
+
+This isn't a database format at all. It is used when you want to open an
+existing Berkeley DB database without having to know what type is it.
+
+=back
+
+
+Each of the database formats described above is accessed via a
+corresponding B<BerkeleyDB> class. These will be described in turn in
+the next sections.
+
+=head1 BerkeleyDB::Hash
+
+Equivalent to calling B<db_open> with type B<DB_HASH> in Berkeley DB 2.x and
+calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_HASH> in
+Berkeley DB 3.x.
+
+Two forms of constructor are supported:
+
+ $db = new BerkeleyDB::Hash
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Hash specific
+ [ -Ffactor => number,]
+ [ -Nelem => number,]
+ [ -Hash => code reference,]
+ [ -DupCompare => code reference,]
+
+and this
+
+ [$db =] tie %hash, 'BerkeleyDB::Hash',
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Hash specific
+ [ -Ffactor => number,]
+ [ -Nelem => number,]
+ [ -Hash => code reference,]
+ [ -DupCompare => code reference,]
+
+
+When the "tie" interface is used, reading from and writing to the database
+is achieved via the tied hash. In this case the database operates like
+a Perl associative array that happens to be stored on disk.
+
+In addition to the high-level tied hash interface, it is possible to
+make use of the underlying methods provided by Berkeley DB
+
+=head2 Options
+
+In addition to the standard set of options (see L<COMMON OPTIONS>)
+B<BerkeleyDB::Hash> supports these options:
+
+=over 5
+
+=item -Property
+
+Used to specify extra flags when opening a database. The following
+flags may be specified by logically OR'ing together one or more of the
+following values:
+
+B<DB_DUP>
+
+When creating a new database, this flag enables the storing of duplicate
+keys in the database. If B<DB_DUPSORT> is not specified as well, the
+duplicates are stored in the order they are created in the database.
+
+B<DB_DUPSORT>
+
+Enables the sorting of duplicate keys in the database. Ignored if
+B<DB_DUP> isn't also specified.
+
+=item -Ffactor
+
+=item -Nelem
+
+See the Berkeley DB documentation for details of these options.
+
+=item -Hash
+
+Allows you to provide a user defined hash function. If not specified,
+a default hash function is used. Here is a template for a user-defined
+hash function
+
+ sub hash
+ {
+ my ($data) = shift ;
+ ...
+ # return the hash value for $data
+ return $hash ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Hash => \&hash,
+ ...
+
+See L<""> for an example.
+
+=item -DupCompare
+
+Used in conjunction with the B<DB_DUPOSRT> flag.
+
+ sub compare
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return 0 if $key1 eq $key2
+ # -1 if $key1 lt $key2
+ # 1 if $key1 gt $key2
+ return (-1 , 0 or 1) ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Property => DB_DUP|DB_DUPSORT,
+ -DupCompare => \&compare,
+ ...
+
+=back
+
+
+=head2 Methods
+
+B<BerkeleyDB::Hash> only supports the standard database methods.
+See L<COMMON DATABASE METHODS>.
+
+=head2 A Simple Tied Hash Example
+
+## simpleHash
+
+here is the output:
+
+ Banana Exists
+
+ orange -> orange
+ tomato -> red
+ banana -> yellow
+
+Note that the like ordinary associative arrays, the order of the keys
+retrieved from a Hash database are in an apparently random order.
+
+=head2 Another Simple Hash Example
+
+Do the same as the previous example but not using tie.
+
+## simpleHash2
+
+=head2 Duplicate keys
+
+The code below is a variation on the examples above. This time the hash has
+been inverted. The key this time is colour and the value is the fruit name.
+The B<DB_DUP> flag has been specified to allow duplicates.
+
+##dupHash
+
+here is the output:
+
+ orange -> orange
+ yellow -> banana
+ red -> apple
+ red -> tomato
+ green -> banana
+ green -> apple
+
+=head2 Sorting Duplicate Keys
+
+In the previous example, when there were duplicate keys, the values are
+sorted in the order they are stored in. The code below is
+identical to the previous example except the B<DB_DUPSORT> flag is
+specified.
+
+##dupSortHash
+
+Notice that in the output below the duplicate values are sorted.
+
+ orange -> orange
+ yellow -> banana
+ red -> apple
+ red -> tomato
+ green -> apple
+ green -> banana
+
+=head2 Custom Sorting Duplicate Keys
+
+Another variation
+
+TODO
+
+=head2 Changing the hash
+
+TODO
+
+=head2 Using db_stat
+
+TODO
+
+=head1 BerkeleyDB::Btree
+
+Equivalent to calling B<db_open> with type B<DB_BTREE> in Berkeley DB 2.x and
+calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_BTREE> in
+Berkeley DB 3.x.
+
+Two forms of constructor are supported:
+
+
+ $db = new BerkeleyDB::Btree
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Btree specific
+ [ -Minkey => number,]
+ [ -Compare => code reference,]
+ [ -DupCompare => code reference,]
+ [ -Prefix => code reference,]
+
+and this
+
+ [$db =] tie %hash, 'BerkeleyDB::Btree',
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Btree specific
+ [ -Minkey => number,]
+ [ -Compare => code reference,]
+ [ -DupCompare => code reference,]
+ [ -Prefix => code reference,]
+
+=head2 Options
+
+In addition to the standard set of options (see L<COMMON OPTIONS>)
+B<BerkeleyDB::Btree> supports these options:
+
+=over 5
+
+=item -Property
+
+Used to specify extra flags when opening a database. The following
+flags may be specified by logically OR'ing together one or more of the
+following values:
+
+B<DB_DUP>
+
+When creating a new database, this flag enables the storing of duplicate
+keys in the database. If B<DB_DUPSORT> is not specified as well, the
+duplicates are stored in the order they are created in the database.
+
+B<DB_DUPSORT>
+
+Enables the sorting of duplicate keys in the database. Ignored if
+B<DB_DUP> isn't also specified.
+
+=item Minkey
+
+TODO
+
+=item Compare
+
+Allow you to override the default sort order used in the database. See
+L<"Changing the sort order"> for an example.
+
+ sub compare
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return 0 if $key1 eq $key2
+ # -1 if $key1 lt $key2
+ # 1 if $key1 gt $key2
+ return (-1 , 0 or 1) ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Compare => \&compare,
+ ...
+
+=item Prefix
+
+ sub prefix
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return number of bytes of $key2 which are
+ # necessary to determine that it is greater than $key1
+ return $bytes ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Prefix => \&prefix,
+ ...
+=item DupCompare
+
+ sub compare
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return 0 if $key1 eq $key2
+ # -1 if $key1 lt $key2
+ # 1 if $key1 gt $key2
+ return (-1 , 0 or 1) ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -DupCompare => \&compare,
+ ...
+
+=back
+
+=head2 Methods
+
+B<BerkeleyDB::Btree> supports the following database methods.
+See also L<COMMON DATABASE METHODS>.
+
+All the methods below return 0 to indicate success.
+
+=over 5
+
+=item $status = $db->db_key_range($key, $less, $equal, $greater [, $flags])
+
+Given a key, C<$key>, this method returns the proportion of keys less than
+C<$key> in C<$less>, the proportion equal to C<$key> in C<$equal> and the
+proportion greater than C<$key> in C<$greater>.
+
+The proportion is returned as a double in the range 0.0 to 1.0.
+
+=back
+
+=head2 A Simple Btree Example
+
+The code below is a simple example of using a btree database.
+
+## btreeSimple
+
+Here is the output from the code above. The keys have been sorted using
+Berkeley DB's default sorting algorithm.
+
+ Smith
+ Wall
+ mouse
+
+
+=head2 Changing the sort order
+
+It is possible to supply your own sorting algorithm if the one that Berkeley
+DB used isn't suitable. The code below is identical to the previous example
+except for the case insensitive compare function.
+
+## btreeSortOrder
+
+Here is the output from the code above.
+
+ mouse
+ Smith
+ Wall
+
+There are a few point to bear in mind if you want to change the
+ordering in a BTREE database:
+
+=over 5
+
+=item 1.
+
+The new compare function must be specified when you create the database.
+
+=item 2.
+
+You cannot change the ordering once the database has been created. Thus
+you must use the same compare function every time you access the
+database.
+
+=back
+
+=head2 Using db_stat
+
+TODO
+
+=head1 BerkeleyDB::Recno
+
+Equivalent to calling B<db_open> with type B<DB_RECNO> in Berkeley DB 2.x and
+calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_RECNO> in
+Berkeley DB 3.x.
+
+Two forms of constructor are supported:
+
+ $db = new BerkeleyDB::Recno
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Recno specific
+ [ -Delim => byte,]
+ [ -Len => number,]
+ [ -Pad => byte,]
+ [ -Source => filename,]
+
+and this
+
+ [$db =] tie @arry, 'BerkeleyDB::Recno',
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Recno specific
+ [ -Delim => byte,]
+ [ -Len => number,]
+ [ -Pad => byte,]
+ [ -Source => filename,]
+
+=head2 A Recno Example
+
+Here is a simple example that uses RECNO (if you are using a version
+of Perl earlier than 5.004_57 this example won't work -- see
+L<Extra RECNO Methods> for a workaround).
+
+## simpleRecno
+
+Here is the output from the script:
+
+ The array contains 5 entries
+ popped black
+ shifted white
+ Element 1 Exists with value blue
+ The last element is green
+ The 2nd last element is yellow
+
+=head1 BerkeleyDB::Queue
+
+Equivalent to calling B<db_create> followed by B<DB-E<gt>open> with
+type B<DB_QUEUE> in Berkeley DB 3.x. This database format isn't available if
+you use Berkeley DB 2.x.
+
+Two forms of constructor are supported:
+
+ $db = new BerkeleyDB::Queue
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Queue specific
+ [ -Len => number,]
+ [ -Pad => byte,]
+ [ -ExtentSize => number, ]
+
+and this
+
+ [$db =] tie @arry, 'BerkeleyDB::Queue',
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Queue specific
+ [ -Len => number,]
+ [ -Pad => byte,]
+
+
+=head1 BerkeleyDB::Unknown
+
+This class is used to open an existing database.
+
+Equivalent to calling B<db_open> with type B<DB_UNKNOWN> in Berkeley DB 2.x and
+calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_UNKNOWN> in
+Berkeley DB 3.x.
+
+The constructor looks like this:
+
+ $db = new BerkeleyDB::Unknown
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+
+
+=head2 An example
+
+=head1 COMMON OPTIONS
+
+All database access class constructors support the common set of
+options defined below. All are optional.
+
+=over 5
+
+=item -Filename
+
+The database filename. If no filename is specified, a temporary file will
+be created and removed once the program terminates.
+
+=item -Subname
+
+Specifies the name of the sub-database to open.
+This option is only valid if you are using Berkeley DB 3.x.
+
+=item -Flags
+
+Specify how the database will be opened/created. The valid flags are:
+
+B<DB_CREATE>
+
+Create any underlying files, as necessary. If the files do not already
+exist and the B<DB_CREATE> flag is not specified, the call will fail.
+
+B<DB_NOMMAP>
+
+Not supported by BerkeleyDB.
+
+B<DB_RDONLY>
+
+Opens the database in read-only mode.
+
+B<DB_THREAD>
+
+Not supported by BerkeleyDB.
+
+B<DB_TRUNCATE>
+
+If the database file already exists, remove all the data before
+opening it.
+
+=item -Mode
+
+Determines the file protection when the database is created. Defaults
+to 0666.
+
+=item -Cachesize
+
+=item -Lorder
+
+=item -Pagesize
+
+=item -Env
+
+When working under a Berkeley DB environment, this parameter
+
+Defaults to no environment.
+
+=item -Txn
+
+TODO.
+
+=back
+
+=head1 COMMON DATABASE METHODS
+
+All the database interfaces support the common set of methods defined
+below.
+
+All the methods below return 0 to indicate success.
+
+=head2 $status = $db->db_get($key, $value [, $flags])
+
+Given a key (C<$key>) this method reads the value associated with it
+from the database. If it exists, the value read from the database is
+returned in the C<$value> parameter.
+
+The B<$flags> parameter is optional. If present, it must be set to B<one>
+of the following values:
+
+=over 5
+
+=item B<DB_GET_BOTH>
+
+When the B<DB_GET_BOTH> flag is specified, B<db_get> checks for the
+existence of B<both> the C<$key> B<and> C<$value> in the database.
+
+=item B<DB_SET_RECNO>
+
+TODO.
+
+=back
+
+In addition, the following value may be set by logically OR'ing it into
+the B<$flags> parameter:
+
+=over 5
+
+=item B<DB_RMW>
+
+TODO
+
+=back
+
+
+=head2 $status = $db->db_put($key, $value [, $flags])
+
+Stores a key/value pair in the database.
+
+The B<$flags> parameter is optional. If present it must be set to B<one>
+of the following values:
+
+=over 5
+
+=item B<DB_APPEND>
+
+This flag is only applicable when accessing a B<BerkeleyDB::Recno>
+database.
+
+TODO.
+
+
+=item B<DB_NOOVERWRITE>
+
+If this flag is specified and C<$key> already exists in the database,
+the call to B<db_put> will return B<DB_KEYEXIST>.
+
+=back
+
+=head2 $status = $db->db_del($key [, $flags])
+
+Deletes a key/value pair in the database associated with C<$key>.
+If duplicate keys are enabled in the database, B<db_del> will delete
+B<all> key/value pairs with key C<$key>.
+
+The B<$flags> parameter is optional and is currently unused.
+
+=head2 $status = $db->db_sync()
+
+If any parts of the database are in memory, write them to the database.
+
+=head2 $cursor = $db->db_cursor([$flags])
+
+Creates a cursor object. This is used to access the contents of the
+database sequentially. See L<CURSORS> for details of the methods
+available when working with cursors.
+
+The B<$flags> parameter is optional. If present it must be set to B<one>
+of the following values:
+
+=over 5
+
+=item B<DB_RMW>
+
+TODO.
+
+=back
+
+=head2 ($flag, $old_offset, $old_length) = $db->partial_set($offset, $length) ;
+
+TODO
+
+=head2 ($flag, $old_offset, $old_length) = $db->partial_clear() ;
+
+TODO
+
+=head2 $db->byteswapped()
+
+TODO
+
+=head2 $db->type()
+
+Returns the type of the database. The possible return code are B<DB_HASH>
+for a B<BerkeleyDB::Hash> database, B<DB_BTREE> for a B<BerkeleyDB::Btree>
+database and B<DB_RECNO> for a B<BerkeleyDB::Recno> database. This method
+is typically used when a database has been opened with
+B<BerkeleyDB::Unknown>.
+
+=item $ref = $db->db_stat()
+
+Returns a reference to an associative array containing information about
+the database. The keys of the associative array correspond directly to the
+names of the fields defined in the Berkeley DB documentation. For example,
+in the DB documentation, the field B<bt_version> stores the version of the
+Btree database. Assuming you called B<db_stat> on a Btree database the
+equivalent field would be accessed as follows:
+
+ $version = $ref->{'bt_version'} ;
+
+If you are using Berkeley DB 3.x, this method will work will all database
+formats. When DB 2.x is used, it only works with B<BerkeleyDB::Btree>.
+
+=head2 $status = $db->status()
+
+Returns the status of the last C<$db> method called.
+
+=head1 CURSORS
+
+A cursor is used whenever you want to access the contents of a database
+in sequential order.
+A cursor object is created with the C<db_cursor>
+
+A cursor object has the following methods available:
+
+=head2 $newcursor = $cursor->c_dup($flags)
+
+Creates a duplicate of C<$cursor>. This method needs Berkeley DB 3.0.x or better.
+
+The C<$flags> parameter is optional and can take the following value:
+
+=over 5
+
+=item DB_POSITION
+
+When present this flag will position the new cursor at the same place as the
+existing cursor.
+
+=back
+
+=head2 $status = $cursor->c_get($key, $value, $flags)
+
+Reads a key/value pair from the database, returning the data in C<$key>
+and C<$value>. The key/value pair actually read is controlled by the
+C<$flags> parameter, which can take B<one> of the following values:
+
+=over 5
+
+=item B<DB_FIRST>
+
+Set the cursor to point to the first key/value pair in the
+database. Return the key/value pair in C<$key> and C<$value>.
+
+=item B<DB_LAST>
+
+Set the cursor to point to the last key/value pair in the database. Return
+the key/value pair in C<$key> and C<$value>.
+
+=item B<DB_NEXT>
+
+If the cursor is already pointing to a key/value pair, it will be
+incremented to point to the next key/value pair and return its contents.
+
+If the cursor isn't initialised, B<DB_NEXT> works just like B<DB_FIRST>.
+
+If the cursor is already positioned at the last key/value pair, B<c_get>
+will return B<DB_NOTFOUND>.
+
+=item B<DB_NEXT_DUP>
+
+This flag is only valid when duplicate keys have been enabled in
+a database.
+If the cursor is already pointing to a key/value pair and the key of
+the next key/value pair is identical, the cursor will be incremented to
+point to it and their contents returned.
+
+=item B<DB_PREV>
+
+If the cursor is already pointing to a key/value pair, it will be
+decremented to point to the previous key/value pair and return its
+contents.
+
+If the cursor isn't initialised, B<DB_PREV> works just like B<DB_LAST>.
+
+If the cursor is already positioned at the first key/value pair, B<c_get>
+will return B<DB_NOTFOUND>.
+
+=item B<DB_CURRENT>
+
+If the cursor has been set to point to a key/value pair, return their
+contents.
+If the key/value pair referenced by the cursor has been deleted, B<c_get>
+will return B<DB_KEYEMPTY>.
+
+=item B<DB_SET>
+
+Set the cursor to point to the key/value pair referenced by B<$key>
+and return the value in B<$value>.
+
+=item B<DB_SET_RANGE>
+
+This flag is a variation on the B<DB_SET> flag. As well as returning
+the value, it also returns the key, via B<$key>.
+When used with a B<BerkeleyDB::Btree> database the key matched by B<c_get>
+will be the shortest key (in length) which is greater than or equal to
+the key supplied, via B<$key>. This allows partial key searches.
+See ??? for an example of how to use this flag.
+
+=item B<DB_GET_BOTH>
+
+Another variation on B<DB_SET>. This one returns both the key and
+the value.
+
+=item B<DB_SET_RECNO>
+
+TODO.
+
+=item B<DB_GET_RECNO>
+
+TODO.
+
+=back
+
+In addition, the following value may be set by logically OR'ing it into
+the B<$flags> parameter:
+
+=over 5
+
+=item B<DB_RMW>
+
+TODO.
+
+=back
+
+=head2 $status = $cursor->c_put($key, $value, $flags)
+
+Stores the key/value pair in the database. The position that the data is
+stored in the database is controlled by the C<$flags> parameter, which
+must take B<one> of the following values:
+
+=over 5
+
+=item B<DB_AFTER>
+
+When used with a Btree or Hash database, a duplicate of the key referenced
+by the current cursor position will be created and the contents of
+B<$value> will be associated with it - B<$key> is ignored.
+The new key/value pair will be stored immediately after the current
+cursor position.
+Obviously the database has to have been opened with B<DB_DUP>.
+
+When used with a Recno ... TODO
+
+
+=item B<DB_BEFORE>
+
+When used with a Btree or Hash database, a duplicate of the key referenced
+by the current cursor position will be created and the contents of
+B<$value> will be associated with it - B<$key> is ignored.
+The new key/value pair will be stored immediately before the current
+cursor position.
+Obviously the database has to have been opened with B<DB_DUP>.
+
+When used with a Recno ... TODO
+
+=item B<DB_CURRENT>
+
+If the cursor has been initialised, replace the value of the key/value
+pair stored in the database with the contents of B<$value>.
+
+=item B<DB_KEYFIRST>
+
+Only valid with a Btree or Hash database. This flag is only really
+used when duplicates are enabled in the database and sorted duplicates
+haven't been specified.
+In this case the key/value pair will be inserted as the first entry in
+the duplicates for the particular key.
+
+=item B<DB_KEYLAST>
+
+Only valid with a Btree or Hash database. This flag is only really
+used when duplicates are enabled in the database and sorted duplicates
+haven't been specified.
+In this case the key/value pair will be inserted as the last entry in
+the duplicates for the particular key.
+
+=back
+
+=head2 $status = $cursor->c_del([$flags])
+
+This method deletes the key/value pair associated with the current cursor
+position. The cursor position will not be changed by this operation, so
+any subsequent cursor operation must first initialise the cursor to
+point to a valid key/value pair.
+
+If the key/value pair associated with the cursor have already been
+deleted, B<c_del> will return B<DB_KEYEMPTY>.
+
+The B<$flags> parameter is not used at present.
+
+=head2 $status = $cursor->c_del($cnt [, $flags])
+
+Stores the number of duplicates at the current cursor position in B<$cnt>.
+
+The B<$flags> parameter is not used at present. This method needs
+Berkeley DB 3.1 or better.
+
+=head2 $status = $cursor->status()
+
+Returns the status of the last cursor method as a dual type.
+
+=head2 Cursor Examples
+
+TODO
+
+Iterating from first to last, then in reverse.
+
+examples of each of the flags.
+
+=head1 JOIN
+
+Join support for BerkeleyDB is in progress. Watch this space.
+
+TODO
+
+=head1 TRANSACTIONS
+
+TODO.
+
+=head1 DBM Filters
+
+A DBM Filter is a piece of code that is be used when you I<always>
+want to make the same transformation to all keys and/or values in a DBM
+database. All of the database classes (BerkeleyDB::Hash,
+BerkeleyDB::Btree and BerkeleyDB::Recno) support DBM Filters.
+
+There are four methods associated with DBM Filters. All work
+identically, and each is used to install (or uninstall) a single DBM
+Filter. Each expects a single parameter, namely a reference to a sub.
+The only difference between them is the place that the filter is
+installed.
+
+To summarise:
+
+=over 5
+
+=item B<filter_store_key>
+
+If a filter has been installed with this method, it will be invoked
+every time you write a key to a DBM database.
+
+=item B<filter_store_value>
+
+If a filter has been installed with this method, it will be invoked
+every time you write a value to a DBM database.
+
+
+=item B<filter_fetch_key>
+
+If a filter has been installed with this method, it will be invoked
+every time you read a key from a DBM database.
+
+=item B<filter_fetch_value>
+
+If a filter has been installed with this method, it will be invoked
+every time you read a value from a DBM database.
+
+=back
+
+You can use any combination of the methods, from none, to all four.
+
+All filter methods return the existing filter, if present, or C<undef>
+in not.
+
+To delete a filter pass C<undef> to it.
+
+=head2 The Filter
+
+When each filter is called by Perl, a local copy of C<$_> will contain
+the key or value to be filtered. Filtering is achieved by modifying
+the contents of C<$_>. The return code from the filter is ignored.
+
+=head2 An Example -- the NULL termination problem.
+
+Consider the following scenario. You have a DBM database that you need
+to share with a third-party C application. The C application assumes
+that I<all> keys and values are NULL terminated. Unfortunately when
+Perl writes to DBM databases it doesn't use NULL termination, so your
+Perl application will have to manage NULL termination itself. When you
+write to the database you will have to use something like this:
+
+ $hash{"$key\0"} = "$value\0" ;
+
+Similarly the NULL needs to be taken into account when you are considering
+the length of existing keys/values.
+
+It would be much better if you could ignore the NULL terminations issue
+in the main application code and have a mechanism that automatically
+added the terminating NULL to all keys and values whenever you write to
+the database and have them removed when you read from the database. As I'm
+sure you have already guessed, this is a problem that DBM Filters can
+fix very easily.
+
+## nullFilter
+
+Hopefully the contents of each of the filters should be
+self-explanatory. Both "fetch" filters remove the terminating NULL,
+and both "store" filters add a terminating NULL.
+
+
+=head2 Another Example -- Key is a C int.
+
+Here is another real-life example. By default, whenever Perl writes to
+a DBM database it always writes the key and value as strings. So when
+you use this:
+
+ $hash{12345} = "something" ;
+
+the key 12345 will get stored in the DBM database as the 5 byte string
+"12345". If you actually want the key to be stored in the DBM database
+as a C int, you will have to use C<pack> when writing, and C<unpack>
+when reading.
+
+Here is a DBM Filter that does it:
+
+## intFilter
+
+This time only two filters have been used -- we only need to manipulate
+the contents of the key, so it wasn't necessary to install any value
+filters.
+
+=head1 Using BerkeleyDB with MLDBM
+
+Both BerkeleyDB::Hash and BerkeleyDB::Btree can be used with the MLDBM
+module. The code fragment below shows how to open associate MLDBM with
+BerkeleyDB::Btree. To use BerkeleyDB::Hash just replace
+BerkeleyDB::Btree with BerkeleyDB::Hash.
+
+ use strict ;
+ use BerkeleyDB ;
+ use MLDBM qw(BerkeleyDB::Btree) ;
+ use Data::Dumper;
+
+ my $filename = 'testmldbm' ;
+ my %o ;
+
+ unlink $filename ;
+ tie %o, 'MLDBM', -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open database '$filename: $!\n";
+
+See the MLDBM documentation for information on how to use the module
+and for details of its limitations.
+
+=head1 EXAMPLES
+
+TODO.
+
+=head1 HINTS & TIPS
+
+=head2 Sharing Databases With C Applications
+
+There is no technical reason why a Berkeley DB database cannot be
+shared by both a Perl and a C application.
+
+The vast majority of problems that are reported in this area boil down
+to the fact that C strings are NULL terminated, whilst Perl strings
+are not. See L<An Example -- the NULL termination problem.> in the DBM
+FILTERS section for a generic way to work around this problem.
+
+
+=head2 The untie Gotcha
+
+TODO
+
+=head1 COMMON QUESTIONS
+
+This section attempts to answer some of the more common questions that
+I get asked.
+
+
+=head2 Relationship with DB_File
+
+Before Berkeley DB 2.x was written there was only one Perl module that
+interfaced to Berkeley DB. That module is called B<DB_File>. Although
+B<DB_File> can be build with Berkeley DB 1.x, 2.x or 3.x, it only provides
+an interface to the functionality available in Berkeley DB 1.x. That
+means that it doesn't support transactions, locking or any of the other
+new features available in DB 2.x or 3.x.
+
+=head2 How do I store Perl data structures with BerkeleyDB?
+
+See L<Using BerkeleyDB with MLDBM>.
+
+=head1 HISTORY
+
+See the Changes file.
+
+=head1 AVAILABILITY
+
+The most recent version of B<BerkeleyDB> can always be found
+on CPAN (see L<perlmod/CPAN> for details), in the directory
+F<modules/by-module/BerkeleyDB>.
+
+The official web site for Berkeley DB is F<http://www.sleepycat.com>.
+
+=head1 COPYRIGHT
+
+Copyright (c) 1997-2001 Paul Marquess. All rights reserved. This program
+is free software; you can redistribute it and/or modify it under the
+same terms as Perl itself.
+
+Although B<BerkeleyDB> is covered by the Perl license, the library it
+makes use of, namely Berkeley DB, is not. Berkeley DB has its own
+copyright and its own license. Please take the time to read it.
+
+Here are few words taken from the Berkeley DB FAQ (at
+F<http://www.sleepycat.com>) regarding the license:
+
+ Do I have to license DB to use it in Perl scripts?
+
+ No. The Berkeley DB license requires that software that uses
+ Berkeley DB be freely redistributable. In the case of Perl, that
+ software is Perl, and not your scripts. Any Perl scripts that you
+ write are your property, including scripts that make use of Berkeley
+ DB. Neither the Perl license nor the Berkeley DB license
+ place any restriction on what you may do with them.
+
+If you are in any doubt about the license situation, contact either the
+Berkeley DB authors or the author of BerkeleyDB.
+See L<"AUTHOR"> for details.
+
+
+=head1 AUTHOR
+
+Paul Marquess E<lt>Paul.Marquess@btinternet.comE<gt>.
+
+Questions about Berkeley DB may be addressed to E<lt>db@sleepycat.comE<gt>.
+
+=head1 SEE ALSO
+
+perl(1), DB_File, Berkeley DB.
+
+=cut
diff --git a/bdb/perl.BerkeleyDB/BerkeleyDB.xs b/bdb/perl.BerkeleyDB/BerkeleyDB.xs
new file mode 100644
index 00000000000..19126c98b53
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/BerkeleyDB.xs
@@ -0,0 +1,3927 @@
+/*
+
+ BerkeleyDB.xs -- Perl 5 interface to Berkeley DB version 2 & 3
+
+ written by Paul Marquess <Paul.Marquess@btinternet.com>
+
+ All comments/suggestions/problems are welcome
+
+ Copyright (c) 1997-2001 Paul Marquess. All rights reserved.
+ This program is free software; you can redistribute it and/or
+ modify it under the same terms as Perl itself.
+
+ Please refer to the COPYRIGHT section in
+
+ Changes:
+ 0.01 - First Alpha Release
+ 0.02 -
+
+*/
+
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+#define PERL_POLLUTE
+#include "EXTERN.h"
+#include "perl.h"
+#include "XSUB.h"
+
+/* Being the Berkeley DB we prefer the <sys/cdefs.h> (which will be
+ * shortly #included by the <db.h>) __attribute__ to the possibly
+ * already defined __attribute__, for example by GNUC or by Perl. */
+
+#undef __attribute__
+
+#ifndef PERL_VERSION
+# include "patchlevel.h"
+# define PERL_REVISION 5
+# define PERL_VERSION PATCHLEVEL
+# define PERL_SUBVERSION SUBVERSION
+#endif
+
+#if PERL_REVISION == 5 && (PERL_VERSION < 4 || (PERL_VERSION == 4 && PERL_SUBVERSION <= 75 ))
+
+# define PL_sv_undef sv_undef
+# define PL_na na
+# define PL_dirty dirty
+
+#endif
+
+#include <db.h>
+
+#if (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 0)
+# define IS_DB_3_0
+#endif
+
+#if DB_VERSION_MAJOR >= 3
+# define AT_LEAST_DB_3
+#endif
+
+#if DB_VERSION_MAJOR > 3 || (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR >= 1)
+# define AT_LEAST_DB_3_1
+#endif
+
+#if DB_VERSION_MAJOR > 3 || (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR >= 2)
+# define AT_LEAST_DB_3_2
+#endif
+
+/* need to define DEFSV & SAVE_DEFSV for older version of Perl */
+#ifndef DEFSV
+# define DEFSV GvSV(defgv)
+#endif
+
+#ifndef SAVE_DEFSV
+# define SAVE_DEFSV SAVESPTR(GvSV(defgv))
+#endif
+
+#ifndef pTHX
+# define pTHX
+# define pTHX_
+# define aTHX
+# define aTHX_
+#endif
+
+#ifndef dTHR
+# define dTHR
+#endif
+
+#ifndef newSVpvn
+# define newSVpvn(a,b) newSVpv(a,b)
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#define DBM_FILTERING
+#define STRICT_CLOSE
+/* #define ALLOW_RECNO_OFFSET */
+/* #define TRACE */
+
+#if DB_VERSION_MAJOR == 2 && ! defined(DB_LOCK_DEADLOCK)
+# define DB_LOCK_DEADLOCK EAGAIN
+#endif /* DB_VERSION_MAJOR == 2 */
+
+#if DB_VERSION_MAJOR == 2
+# define DB_QUEUE 4
+#endif /* DB_VERSION_MAJOR == 2 */
+
+#ifdef AT_LEAST_DB_3_2
+# define DB_callback DB * db,
+#else
+# define DB_callback
+#endif
+
+#if DB_VERSION_MAJOR > 2
+typedef struct {
+ int db_lorder;
+ size_t db_cachesize;
+ size_t db_pagesize;
+
+
+ void *(*db_malloc) __P((size_t));
+ int (*dup_compare)
+ __P((DB_callback const DBT *, const DBT *));
+
+ u_int32_t bt_maxkey;
+ u_int32_t bt_minkey;
+ int (*bt_compare)
+ __P((DB_callback const DBT *, const DBT *));
+ size_t (*bt_prefix)
+ __P((DB_callback const DBT *, const DBT *));
+
+ u_int32_t h_ffactor;
+ u_int32_t h_nelem;
+ u_int32_t (*h_hash)
+ __P((DB_callback const void *, u_int32_t));
+
+ int re_pad;
+ int re_delim;
+ u_int32_t re_len;
+ char *re_source;
+
+#define DB_DELIMITER 0x0001
+#define DB_FIXEDLEN 0x0008
+#define DB_PAD 0x0010
+ u_int32_t flags;
+ u_int32_t q_extentsize;
+} DB_INFO ;
+
+#endif /* DB_VERSION_MAJOR > 2 */
+
+typedef struct {
+ int Status ;
+ /* char ErrBuff[1000] ; */
+ SV * ErrPrefix ;
+ SV * ErrHandle ;
+ DB_ENV * Env ;
+ int open_dbs ;
+ int TxnMgrStatus ;
+ int active ;
+ bool txn_enabled ;
+ } BerkeleyDB_ENV_type ;
+
+
+typedef struct {
+ DBTYPE type ;
+ bool recno_or_queue ;
+ char * filename ;
+ BerkeleyDB_ENV_type * parent_env ;
+ DB * dbp ;
+ SV * compare ;
+ SV * dup_compare ;
+ SV * prefix ;
+ SV * hash ;
+ int Status ;
+ DB_INFO * info ;
+ DBC * cursor ;
+ DB_TXN * txn ;
+ int open_cursors ;
+ u_int32_t partial ;
+ u_int32_t dlen ;
+ u_int32_t doff ;
+ int active ;
+#ifdef ALLOW_RECNO_OFFSET
+ int array_base ;
+#endif
+#ifdef DBM_FILTERING
+ SV * filter_fetch_key ;
+ SV * filter_store_key ;
+ SV * filter_fetch_value ;
+ SV * filter_store_value ;
+ int filtering ;
+#endif
+ } BerkeleyDB_type;
+
+
+typedef struct {
+ DBTYPE type ;
+ bool recno_or_queue ;
+ char * filename ;
+ DB * dbp ;
+ SV * compare ;
+ SV * dup_compare ;
+ SV * prefix ;
+ SV * hash ;
+ int Status ;
+ DB_INFO * info ;
+ DBC * cursor ;
+ DB_TXN * txn ;
+ BerkeleyDB_type * parent_db ;
+ u_int32_t partial ;
+ u_int32_t dlen ;
+ u_int32_t doff ;
+ int active ;
+#ifdef ALLOW_RECNO_OFFSET
+ int array_base ;
+#endif
+#ifdef DBM_FILTERING
+ SV * filter_fetch_key ;
+ SV * filter_store_key ;
+ SV * filter_fetch_value ;
+ SV * filter_store_value ;
+ int filtering ;
+#endif
+ } BerkeleyDB_Cursor_type;
+
+typedef struct {
+ BerkeleyDB_ENV_type * env ;
+ } BerkeleyDB_TxnMgr_type ;
+
+#if 1
+typedef struct {
+ int Status ;
+ DB_TXN * txn ;
+ int active ;
+ } BerkeleyDB_Txn_type ;
+#else
+typedef DB_TXN BerkeleyDB_Txn_type ;
+#endif
+
+typedef BerkeleyDB_ENV_type * BerkeleyDB__Env ;
+typedef BerkeleyDB_ENV_type * BerkeleyDB__Env__Raw ;
+typedef BerkeleyDB_ENV_type * BerkeleyDB__Env__Inner ;
+typedef BerkeleyDB_type * BerkeleyDB ;
+typedef void * BerkeleyDB__Raw ;
+typedef BerkeleyDB_type * BerkeleyDB__Common ;
+typedef BerkeleyDB_type * BerkeleyDB__Common__Raw ;
+typedef BerkeleyDB_type * BerkeleyDB__Common__Inner ;
+typedef BerkeleyDB_type * BerkeleyDB__Hash ;
+typedef BerkeleyDB_type * BerkeleyDB__Hash__Raw ;
+typedef BerkeleyDB_type * BerkeleyDB__Btree ;
+typedef BerkeleyDB_type * BerkeleyDB__Btree__Raw ;
+typedef BerkeleyDB_type * BerkeleyDB__Recno ;
+typedef BerkeleyDB_type * BerkeleyDB__Recno__Raw ;
+typedef BerkeleyDB_type * BerkeleyDB__Queue ;
+typedef BerkeleyDB_type * BerkeleyDB__Queue__Raw ;
+typedef BerkeleyDB_Cursor_type BerkeleyDB__Cursor_type ;
+typedef BerkeleyDB_Cursor_type * BerkeleyDB__Cursor ;
+typedef BerkeleyDB_Cursor_type * BerkeleyDB__Cursor__Raw ;
+typedef BerkeleyDB_TxnMgr_type * BerkeleyDB__TxnMgr ;
+typedef BerkeleyDB_TxnMgr_type * BerkeleyDB__TxnMgr__Raw ;
+typedef BerkeleyDB_TxnMgr_type * BerkeleyDB__TxnMgr__Inner ;
+typedef BerkeleyDB_Txn_type * BerkeleyDB__Txn ;
+typedef BerkeleyDB_Txn_type * BerkeleyDB__Txn__Raw ;
+typedef BerkeleyDB_Txn_type * BerkeleyDB__Txn__Inner ;
+#if 0
+typedef DB_LOG * BerkeleyDB__Log ;
+typedef DB_LOCKTAB * BerkeleyDB__Lock ;
+#endif
+typedef DBT DBTKEY ;
+typedef DBT DBT_OPT ;
+typedef DBT DBT_B ;
+typedef DBT DBTKEY_B ;
+typedef DBT DBTVALUE ;
+typedef void * PV_or_NULL ;
+typedef PerlIO * IO_or_NULL ;
+typedef int DualType ;
+
+static void
+hash_delete(char * hash, IV key);
+
+#ifdef TRACE
+# define Trace(x) printf x
+#else
+# define Trace(x)
+#endif
+
+#ifdef ALLOW_RECNO_OFFSET
+# define RECNO_BASE db->array_base
+#else
+# define RECNO_BASE 1
+#endif
+
+#if DB_VERSION_MAJOR == 2
+# define flagSet_DB2(i, f) i |= f
+#else
+# define flagSet_DB2(i, f)
+#endif
+
+#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 5
+# define flagSet(bitmask) (flags & (bitmask))
+#else
+# define flagSet(bitmask) ((flags & DB_OPFLAGS_MASK) == (bitmask))
+#endif
+
+#ifdef DBM_FILTERING
+#define ckFilter(arg,type,name) \
+ if (db->type) { \
+ SV * save_defsv ; \
+ /* printf("filtering %s\n", name) ;*/ \
+ if (db->filtering) \
+ softCrash("recursion detected in %s", name) ; \
+ db->filtering = TRUE ; \
+ save_defsv = newSVsv(DEFSV) ; \
+ sv_setsv(DEFSV, arg) ; \
+ PUSHMARK(sp) ; \
+ (void) perl_call_sv(db->type, G_DISCARD|G_NOARGS); \
+ sv_setsv(arg, DEFSV) ; \
+ sv_setsv(DEFSV, save_defsv) ; \
+ SvREFCNT_dec(save_defsv) ; \
+ db->filtering = FALSE ; \
+ /*printf("end of filtering %s\n", name) ;*/ \
+ }
+#else
+#define ckFilter(type, sv, name)
+#endif
+
+#define ERR_BUFF "BerkeleyDB::Error"
+
+#define ZMALLOC(to, typ) ((to = (typ *)safemalloc(sizeof(typ))), \
+ Zero(to,1,typ))
+
+#define DBT_clear(x) Zero(&x, 1, DBT) ;
+
+#if 1
+#define getInnerObject(x) SvIV(*av_fetch((AV*)SvRV(x), 0, FALSE))
+#else
+#define getInnerObject(x) SvIV((SV*)SvRV(sv))
+#endif
+
+#define my_sv_setpvn(sv, d, s) (s ? sv_setpvn(sv, d, s) : sv_setpv(sv, "") )
+
+#define SetValue_iv(i, k) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) \
+ i = SvIV(sv)
+#define SetValue_io(i, k) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) \
+ i = IoOFP(sv_2io(sv))
+#define SetValue_sv(i, k) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) \
+ i = sv
+#define SetValue_pv(i, k,t) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) \
+ i = (t)SvPV(sv,PL_na)
+#define SetValue_pvx(i, k, t) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) \
+ i = (t)SvPVX(sv)
+#define SetValue_ov(i,k,t) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) {\
+ IV tmp = getInnerObject(sv) ; \
+ i = (t) tmp ; \
+ }
+
+#define SetValue_ovx(i,k,t) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) {\
+ HV * hv = (HV *)GetInternalObject(sv); \
+ SV ** svp = hv_fetch(hv, "db", 2, FALSE);\
+ IV tmp = SvIV(*svp); \
+ i = (t) tmp ; \
+ }
+
+#define SetValue_ovX(i,k,t) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) {\
+ IV tmp = SvIV(GetInternalObject(sv));\
+ i = (t) tmp ; \
+ }
+
+#define LastDBerror DB_RUNRECOVERY
+
+#define setDUALerrno(var, err) \
+ sv_setnv(var, (double)err) ; \
+ sv_setpv(var, ((err) ? db_strerror(err) : "")) ;\
+ SvNOK_on(var);
+
+#define OutputValue(arg, name) \
+ { if (RETVAL == 0) { \
+ my_sv_setpvn(arg, name.data, name.size) ; \
+ ckFilter(arg, filter_fetch_value,"filter_fetch_value") ; \
+ } \
+ }
+
+#define OutputValue_B(arg, name) \
+ { if (RETVAL == 0) { \
+ if (db->type == DB_BTREE && \
+ flagSet(DB_GET_RECNO)){ \
+ sv_setiv(arg, (I32)(*(I32*)name.data) - RECNO_BASE); \
+ } \
+ else { \
+ my_sv_setpvn(arg, name.data, name.size) ; \
+ } \
+ ckFilter(arg, filter_fetch_value, "filter_fetch_value"); \
+ } \
+ }
+
+#define OutputKey(arg, name) \
+ { if (RETVAL == 0) \
+ { \
+ if (!db->recno_or_queue) { \
+ my_sv_setpvn(arg, name.data, name.size); \
+ } \
+ else \
+ sv_setiv(arg, (I32)*(I32*)name.data - RECNO_BASE); \
+ ckFilter(arg, filter_fetch_key, "filter_fetch_key") ; \
+ } \
+ }
+
+#define OutputKey_B(arg, name) \
+ { if (RETVAL == 0) \
+ { \
+ if (db->recno_or_queue || \
+ (db->type == DB_BTREE && \
+ flagSet(DB_GET_RECNO))){ \
+ sv_setiv(arg, (I32)(*(I32*)name.data) - RECNO_BASE); \
+ } \
+ else { \
+ my_sv_setpvn(arg, name.data, name.size); \
+ } \
+ ckFilter(arg, filter_fetch_key, "filter_fetch_key") ; \
+ } \
+ }
+
+#define SetPartial(data,db) \
+ data.flags = db->partial ; \
+ data.dlen = db->dlen ; \
+ data.doff = db->doff ;
+
+#define ckActive(active, type) \
+ { \
+ if (!active) \
+ softCrash("%s is already closed", type) ; \
+ }
+
+#define ckActive_Environment(a) ckActive(a, "Environment")
+#define ckActive_TxnMgr(a) ckActive(a, "Transaction Manager")
+#define ckActive_Transaction(a) ckActive(a, "Transaction")
+#define ckActive_Database(a) ckActive(a, "Database")
+#define ckActive_Cursor(a) ckActive(a, "Cursor")
+
+/* Internal Global Data */
+static db_recno_t Value ;
+static db_recno_t zero = 0 ;
+static BerkeleyDB CurrentDB ;
+static DBTKEY empty ;
+static char ErrBuff[1000] ;
+
+static char *
+my_strdup(const char *s)
+{
+ if (s == NULL)
+ return NULL ;
+
+ {
+ MEM_SIZE l = strlen(s);
+ char *s1 = (char *)safemalloc(l);
+
+ Copy(s, s1, (MEM_SIZE)l, char);
+ return s1;
+ }
+}
+
+#if DB_VERSION_MAJOR == 2
+static char *
+db_strerror(int err)
+{
+ if (err == 0)
+ return "" ;
+
+ if (err > 0)
+ return Strerror(err) ;
+
+ switch (err) {
+ case DB_INCOMPLETE:
+ return ("DB_INCOMPLETE: Sync was unable to complete");
+ case DB_KEYEMPTY:
+ return ("DB_KEYEMPTY: Non-existent key/data pair");
+ case DB_KEYEXIST:
+ return ("DB_KEYEXIST: Key/data pair already exists");
+ case DB_LOCK_DEADLOCK:
+ return (
+ "DB_LOCK_DEADLOCK: Locker killed to resolve a deadlock");
+ case DB_LOCK_NOTGRANTED:
+ return ("DB_LOCK_NOTGRANTED: Lock not granted");
+ case DB_LOCK_NOTHELD:
+ return ("DB_LOCK_NOTHELD: Lock not held by locker");
+ case DB_NOTFOUND:
+ return ("DB_NOTFOUND: No matching key/data pair found");
+ case DB_RUNRECOVERY:
+ return ("DB_RUNRECOVERY: Fatal error, run database recovery");
+ default:
+ return "Unknown Error" ;
+
+ }
+}
+#endif /* DB_VERSION_MAJOR == 2 */
+
+static char *
+my_db_strerror(int err)
+{
+ static char buffer[1000] ;
+ SV * sv = perl_get_sv(ERR_BUFF, FALSE) ;
+ sprintf(buffer, "%d: %s", err, db_strerror(err)) ;
+ if (err && sv) {
+ strcat(buffer, ", ") ;
+ strcat(buffer, SvPVX(sv)) ;
+ }
+ return buffer;
+}
+
+static void
+close_everything(void)
+{
+ dTHR;
+ Trace(("close_everything\n")) ;
+ /* Abort All Transactions */
+ {
+ BerkeleyDB__Txn__Raw tid ;
+ HE * he ;
+ I32 len ;
+ HV * hv = perl_get_hv("BerkeleyDB::Term::Txn", TRUE);
+ I32 ret = hv_iterinit(hv) ;
+ int all = 0 ;
+ int closed = 0 ;
+ Trace(("BerkeleyDB::Term::close_all_txns dirty=%d\n", PL_dirty)) ;
+ while ( he = hv_iternext(hv) ) {
+ tid = * (BerkeleyDB__Txn__Raw *) (IV) hv_iterkey(he, &len) ;
+ Trace((" Aborting Transaction [%d] in [%d] Active [%d]\n", tid->txn, tid, tid->active));
+ if (tid->active) {
+ txn_abort(tid->txn);
+ ++ closed ;
+ }
+ tid->active = FALSE ;
+ ++ all ;
+ }
+ Trace(("End of BerkeleyDB::Term::close_all_txns aborted %d of %d transactios\n",closed, all)) ;
+ }
+
+ /* Close All Cursors */
+ {
+ BerkeleyDB__Cursor db ;
+ HE * he ;
+ I32 len ;
+ HV * hv = perl_get_hv("BerkeleyDB::Term::Cursor", TRUE);
+ I32 ret = hv_iterinit(hv) ;
+ int all = 0 ;
+ int closed = 0 ;
+ Trace(("BerkeleyDB::Term::close_all_cursors \n")) ;
+ while ( he = hv_iternext(hv) ) {
+ db = * (BerkeleyDB__Cursor*) (IV) hv_iterkey(he, &len) ;
+ Trace((" Closing Cursor [%d] in [%d] Active [%d]\n", db->cursor, db, db->active));
+ if (db->active) {
+ ((db->cursor)->c_close)(db->cursor) ;
+ ++ closed ;
+ }
+ db->active = FALSE ;
+ ++ all ;
+ }
+ Trace(("End of BerkeleyDB::Term::close_all_cursors closed %d of %d cursors\n",closed, all)) ;
+ }
+
+ /* Close All Databases */
+ {
+ BerkeleyDB db ;
+ HE * he ;
+ I32 len ;
+ HV * hv = perl_get_hv("BerkeleyDB::Term::Db", TRUE);
+ I32 ret = hv_iterinit(hv) ;
+ int all = 0 ;
+ int closed = 0 ;
+ Trace(("BerkeleyDB::Term::close_all_dbs\n" )) ;
+ while ( he = hv_iternext(hv) ) {
+ db = * (BerkeleyDB*) (IV) hv_iterkey(he, &len) ;
+ Trace((" Closing Database [%d] in [%d] Active [%d]\n", db->dbp, db, db->active));
+ if (db->active) {
+ (db->dbp->close)(db->dbp, 0) ;
+ ++ closed ;
+ }
+ db->active = FALSE ;
+ ++ all ;
+ }
+ Trace(("End of BerkeleyDB::Term::close_all_dbs closed %d of %d dbs\n",closed, all)) ;
+ }
+
+ /* Close All Environments */
+ {
+ BerkeleyDB__Env env ;
+ HE * he ;
+ I32 len ;
+ HV * hv = perl_get_hv("BerkeleyDB::Term::Env", TRUE);
+ I32 ret = hv_iterinit(hv) ;
+ int all = 0 ;
+ int closed = 0 ;
+ Trace(("BerkeleyDB::Term::close_all_envs\n")) ;
+ while ( he = hv_iternext(hv) ) {
+ env = * (BerkeleyDB__Env*) (IV) hv_iterkey(he, &len) ;
+ Trace((" Closing Environment [%d] in [%d] Active [%d]\n", env->Env, env, env->active));
+ if (env->active) {
+#if DB_VERSION_MAJOR == 2
+ db_appexit(env->Env) ;
+#else
+ (env->Env->close)(env->Env, 0) ;
+#endif
+ ++ closed ;
+ }
+ env->active = FALSE ;
+ ++ all ;
+ }
+ Trace(("End of BerkeleyDB::Term::close_all_envs closed %d of %d dbs\n",closed, all)) ;
+ }
+
+ Trace(("end close_everything\n")) ;
+
+}
+
+static void
+destroyDB(BerkeleyDB db)
+{
+ dTHR;
+ if (! PL_dirty && db->active) {
+ -- db->open_cursors ;
+ ((db->dbp)->close)(db->dbp, 0) ;
+ }
+ if (db->hash)
+ SvREFCNT_dec(db->hash) ;
+ if (db->compare)
+ SvREFCNT_dec(db->compare) ;
+ if (db->dup_compare)
+ SvREFCNT_dec(db->dup_compare) ;
+ if (db->prefix)
+ SvREFCNT_dec(db->prefix) ;
+#ifdef DBM_FILTERING
+ if (db->filter_fetch_key)
+ SvREFCNT_dec(db->filter_fetch_key) ;
+ if (db->filter_store_key)
+ SvREFCNT_dec(db->filter_store_key) ;
+ if (db->filter_fetch_value)
+ SvREFCNT_dec(db->filter_fetch_value) ;
+ if (db->filter_store_value)
+ SvREFCNT_dec(db->filter_store_value) ;
+#endif
+ hash_delete("BerkeleyDB::Term::Db", (IV)db) ;
+ if (db->filename)
+ Safefree(db->filename) ;
+ Safefree(db) ;
+}
+
+static void
+softCrash(const char *pat, ...)
+{
+ char buffer1 [500] ;
+ char buffer2 [500] ;
+ va_list args;
+ va_start(args, pat);
+
+ Trace(("softCrash: %s\n", pat)) ;
+
+#define ABORT_PREFIX "BerkeleyDB Aborting: "
+
+ /* buffer = (char*) safemalloc(strlen(pat) + strlen(ABORT_PREFIX) + 1) ; */
+ strcpy(buffer1, ABORT_PREFIX) ;
+ strcat(buffer1, pat) ;
+
+ vsprintf(buffer2, buffer1, args) ;
+
+ croak(buffer2);
+
+ /* NOTREACHED */
+ va_end(args);
+}
+
+
+static I32
+GetArrayLength(BerkeleyDB db)
+{
+ DBT key ;
+ DBT value ;
+ int RETVAL = 0 ;
+ DBC * cursor ;
+
+ DBT_clear(key) ;
+ DBT_clear(value) ;
+#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 6
+ if ( ((db->dbp)->cursor)(db->dbp, db->txn, &cursor) == 0 )
+#else
+ if ( ((db->dbp)->cursor)(db->dbp, db->txn, &cursor, 0) == 0 )
+#endif
+ {
+ RETVAL = cursor->c_get(cursor, &key, &value, DB_LAST) ;
+ if (RETVAL == 0)
+ RETVAL = *(I32 *)key.data ;
+ else /* No key means empty file */
+ RETVAL = 0 ;
+ cursor->c_close(cursor) ;
+ }
+
+ Trace(("GetArrayLength got %d\n", RETVAL)) ;
+ return ((I32)RETVAL) ;
+}
+
+#if 0
+
+#define GetRecnoKey(db, value) _GetRecnoKey(db, value)
+
+static db_recno_t
+_GetRecnoKey(BerkeleyDB db, I32 value)
+{
+ Trace(("GetRecnoKey start value = %d\n", value)) ;
+ if (db->recno_or_queue && value < 0) {
+ /* Get the length of the array */
+ I32 length = GetArrayLength(db) ;
+
+ /* check for attempt to write before start of array */
+ if (length + value + RECNO_BASE <= 0)
+ softCrash("Modification of non-creatable array value attempted, subscript %ld", (long)value) ;
+
+ value = length + value + RECNO_BASE ;
+ }
+ else
+ ++ value ;
+
+ Trace(("GetRecnoKey end value = %d\n", value)) ;
+
+ return value ;
+}
+
+#else /* ! 0 */
+
+#if 0
+#ifdef ALLOW_RECNO_OFFSET
+#define GetRecnoKey(db, value) _GetRecnoKey(db, value)
+
+static db_recno_t
+_GetRecnoKey(BerkeleyDB db, I32 value)
+{
+ if (value + RECNO_BASE < 1)
+ softCrash("key value %d < base (%d)", (value), RECNO_BASE?0:1) ;
+ return value + RECNO_BASE ;
+}
+
+#else
+#endif /* ALLOW_RECNO_OFFSET */
+#endif /* 0 */
+
+#define GetRecnoKey(db, value) ((value) + RECNO_BASE )
+
+#endif /* 0 */
+
+static SV *
+GetInternalObject(SV * sv)
+{
+ SV * info = (SV*) NULL ;
+ SV * s ;
+ MAGIC * mg ;
+
+ Trace(("in GetInternalObject %d\n", sv)) ;
+ if (sv == NULL || !SvROK(sv))
+ return NULL ;
+
+ s = SvRV(sv) ;
+ if (SvMAGICAL(s))
+ {
+ if (SvTYPE(s) == SVt_PVHV || SvTYPE(s) == SVt_PVAV)
+ mg = mg_find(s, 'P') ;
+ else
+ mg = mg_find(s, 'q') ;
+
+ /* all this testing is probably overkill, but till I know more
+ about global destruction it stays.
+ */
+ /* if (mg && mg->mg_obj && SvRV(mg->mg_obj) && SvPVX(SvRV(mg->mg_obj))) */
+ if (mg && mg->mg_obj && SvRV(mg->mg_obj) )
+ info = SvRV(mg->mg_obj) ;
+ else
+ info = s ;
+ }
+
+ Trace(("end of GetInternalObject %d\n", info)) ;
+ return info ;
+}
+
+static int
+btree_compare(DB_callback const DBT * key1, const DBT * key2 )
+{
+ dSP ;
+ void * data1, * data2 ;
+ int retval ;
+ int count ;
+
+ data1 = key1->data ;
+ data2 = key2->data ;
+
+#ifndef newSVpvn
+ /* As newSVpv will assume that the data pointer is a null terminated C
+ string if the size parameter is 0, make sure that data points to an
+ empty string if the length is 0
+ */
+ if (key1->size == 0)
+ data1 = "" ;
+ if (key2->size == 0)
+ data2 = "" ;
+#endif
+
+ ENTER ;
+ SAVETMPS;
+
+ PUSHMARK(SP) ;
+ EXTEND(SP,2) ;
+ PUSHs(sv_2mortal(newSVpvn(data1,key1->size)));
+ PUSHs(sv_2mortal(newSVpvn(data2,key2->size)));
+ PUTBACK ;
+
+ count = perl_call_sv(CurrentDB->compare, G_SCALAR);
+
+ SPAGAIN ;
+
+ if (count != 1)
+ softCrash ("in btree_compare - expected 1 return value from compare sub, got %d", count) ;
+
+ retval = POPi ;
+
+ PUTBACK ;
+ FREETMPS ;
+ LEAVE ;
+ return (retval) ;
+
+}
+
+static int
+dup_compare(DB_callback const DBT * key1, const DBT * key2 )
+{
+ dSP ;
+ void * data1, * data2 ;
+ int retval ;
+ int count ;
+
+ Trace(("In dup_compare \n")) ;
+ if (!CurrentDB)
+ softCrash("Internal Error - No CurrentDB in dup_compare") ;
+ if (CurrentDB->dup_compare == NULL)
+ softCrash("in dup_compare: no callback specified for database '%s'", CurrentDB->filename) ;
+
+ data1 = key1->data ;
+ data2 = key2->data ;
+
+#ifndef newSVpvn
+ /* As newSVpv will assume that the data pointer is a null terminated C
+ string if the size parameter is 0, make sure that data points to an
+ empty string if the length is 0
+ */
+ if (key1->size == 0)
+ data1 = "" ;
+ if (key2->size == 0)
+ data2 = "" ;
+#endif
+
+ ENTER ;
+ SAVETMPS;
+
+ PUSHMARK(SP) ;
+ EXTEND(SP,2) ;
+ PUSHs(sv_2mortal(newSVpvn(data1,key1->size)));
+ PUSHs(sv_2mortal(newSVpvn(data2,key2->size)));
+ PUTBACK ;
+
+ count = perl_call_sv(CurrentDB->dup_compare, G_SCALAR);
+
+ SPAGAIN ;
+
+ if (count != 1)
+ softCrash ("dup_compare: expected 1 return value from compare sub, got %d", count) ;
+
+ retval = POPi ;
+
+ PUTBACK ;
+ FREETMPS ;
+ LEAVE ;
+ return (retval) ;
+
+}
+
+static size_t
+btree_prefix(DB_callback const DBT * key1, const DBT * key2 )
+{
+ dSP ;
+ void * data1, * data2 ;
+ int retval ;
+ int count ;
+
+ data1 = key1->data ;
+ data2 = key2->data ;
+
+#ifndef newSVpvn
+ /* As newSVpv will assume that the data pointer is a null terminated C
+ string if the size parameter is 0, make sure that data points to an
+ empty string if the length is 0
+ */
+ if (key1->size == 0)
+ data1 = "" ;
+ if (key2->size == 0)
+ data2 = "" ;
+#endif
+
+ ENTER ;
+ SAVETMPS;
+
+ PUSHMARK(SP) ;
+ EXTEND(SP,2) ;
+ PUSHs(sv_2mortal(newSVpvn(data1,key1->size)));
+ PUSHs(sv_2mortal(newSVpvn(data2,key2->size)));
+ PUTBACK ;
+
+ count = perl_call_sv(CurrentDB->prefix, G_SCALAR);
+
+ SPAGAIN ;
+
+ if (count != 1)
+ softCrash ("btree_prefix: expected 1 return value from prefix sub, got %d", count) ;
+
+ retval = POPi ;
+
+ PUTBACK ;
+ FREETMPS ;
+ LEAVE ;
+
+ return (retval) ;
+}
+
+static u_int32_t
+hash_cb(DB_callback const void * data, u_int32_t size)
+{
+ dSP ;
+ int retval ;
+ int count ;
+
+#ifndef newSVpvn
+ if (size == 0)
+ data = "" ;
+#endif
+
+ ENTER ;
+ SAVETMPS;
+
+ PUSHMARK(SP) ;
+
+ XPUSHs(sv_2mortal(newSVpvn((char*)data,size)));
+ PUTBACK ;
+
+ count = perl_call_sv(CurrentDB->hash, G_SCALAR);
+
+ SPAGAIN ;
+
+ if (count != 1)
+ softCrash ("hash_cb: expected 1 return value from hash sub, got %d", count) ;
+
+ retval = POPi ;
+
+ PUTBACK ;
+ FREETMPS ;
+ LEAVE ;
+
+ return (retval) ;
+}
+
+static void
+db_errcall_cb(const char * db_errpfx, char * buffer)
+{
+#if 0
+
+ if (db_errpfx == NULL)
+ db_errpfx = "" ;
+ if (buffer == NULL )
+ buffer = "" ;
+ ErrBuff[0] = '\0';
+ if (strlen(db_errpfx) + strlen(buffer) + 3 <= 1000) {
+ if (*db_errpfx != '\0') {
+ strcat(ErrBuff, db_errpfx) ;
+ strcat(ErrBuff, ": ") ;
+ }
+ strcat(ErrBuff, buffer) ;
+ }
+
+#endif
+
+ SV * sv = perl_get_sv(ERR_BUFF, FALSE) ;
+ if (sv) {
+ if (db_errpfx)
+ sv_setpvf(sv, "%s: %s", db_errpfx, buffer) ;
+ else
+ sv_setpv(sv, buffer) ;
+ }
+}
+
+static SV *
+readHash(HV * hash, char * key)
+{
+ SV ** svp;
+ svp = hv_fetch(hash, key, strlen(key), FALSE);
+ if (svp && SvOK(*svp))
+ return *svp ;
+ return NULL ;
+}
+
+static void
+hash_delete(char * hash, IV key)
+{
+ HV * hv = perl_get_hv(hash, TRUE);
+ (void) hv_delete(hv, (char*)&key, sizeof(key), G_DISCARD);
+}
+
+static void
+hash_store_iv(char * hash, IV key, IV value)
+{
+ HV * hv = perl_get_hv(hash, TRUE);
+ SV ** ret = hv_store(hv, (char*)&key, sizeof(key), newSViv(value), 0);
+ /* printf("hv_store returned %d\n", ret) ; */
+}
+
+static void
+hv_store_iv(HV * hash, char * key, IV value)
+{
+ hv_store(hash, key, strlen(key), newSViv(value), 0);
+}
+
+static BerkeleyDB
+my_db_open(
+ BerkeleyDB db ,
+ SV * ref,
+ SV * ref_dbenv ,
+ BerkeleyDB__Env dbenv ,
+ const char * file,
+ const char * subname,
+ DBTYPE type,
+ int flags,
+ int mode,
+ DB_INFO * info
+ )
+{
+ DB_ENV * env = NULL ;
+ BerkeleyDB RETVAL = NULL ;
+ DB * dbp ;
+ int Status ;
+
+ Trace(("_db_open(dbenv[%lu] ref_dbenv [%lu] file[%s] subname [%s] type[%d] flags[%d] mode[%d]\n",
+ dbenv, ref_dbenv, file, subname, type, flags, mode)) ;
+
+ CurrentDB = db ;
+ if (dbenv)
+ env = dbenv->Env ;
+
+#if DB_VERSION_MAJOR == 2
+ if (subname)
+ softCrash("Subname needs Berkeley DB 3 or better") ;
+#endif
+
+#if DB_VERSION_MAJOR > 2
+ Status = db_create(&dbp, env, 0) ;
+ Trace(("db_create returned %s\n", my_db_strerror(Status))) ;
+ if (Status)
+ return RETVAL ;
+
+ if (info->re_source) {
+ Status = dbp->set_re_source(dbp, info->re_source) ;
+ Trace(("set_re_source [%s] returned %s\n",
+ info->re_source, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->db_cachesize) {
+ Status = dbp->set_cachesize(dbp, 0, info->db_cachesize, 0) ;
+ Trace(("set_cachesize [%d] returned %s\n",
+ info->db_cachesize, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->db_lorder) {
+ Status = dbp->set_lorder(dbp, info->db_lorder) ;
+ Trace(("set_lorder [%d] returned %s\n",
+ info->db_lorder, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->db_pagesize) {
+ Status = dbp->set_pagesize(dbp, info->db_pagesize) ;
+ Trace(("set_pagesize [%d] returned %s\n",
+ info->db_pagesize, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->h_ffactor) {
+ Status = dbp->set_h_ffactor(dbp, info->h_ffactor) ;
+ Trace(("set_h_ffactor [%d] returned %s\n",
+ info->h_ffactor, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->h_nelem) {
+ Status = dbp->set_h_nelem(dbp, info->h_nelem) ;
+ Trace(("set_h_nelem [%d] returned %s\n",
+ info->h_nelem, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->bt_minkey) {
+ Status = dbp->set_bt_minkey(dbp, info->bt_minkey) ;
+ Trace(("set_bt_minkey [%d] returned %s\n",
+ info->bt_minkey, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->bt_compare) {
+ Status = dbp->set_bt_compare(dbp, info->bt_compare) ;
+ Trace(("set_bt_compare [%d] returned %s\n",
+ info->bt_compare, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->h_hash) {
+ Status = dbp->set_h_hash(dbp, info->h_hash) ;
+ Trace(("set_h_hash [%d] returned %s\n",
+ info->h_hash, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->dup_compare) {
+ Status = dbp->set_dup_compare(dbp, info->dup_compare) ;
+ Trace(("set_dup_compare [%d] returned %s\n",
+ info->dup_compare, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->bt_prefix) {
+ Status = dbp->set_bt_prefix(dbp, info->bt_prefix) ;
+ Trace(("set_bt_prefix [%d] returned %s\n",
+ info->bt_prefix, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->re_len) {
+ Status = dbp->set_re_len(dbp, info->re_len) ;
+ Trace(("set_re_len [%d] returned %s\n",
+ info->re_len, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->re_delim) {
+ Status = dbp->set_re_delim(dbp, info->re_delim) ;
+ Trace(("set_re_delim [%d] returned %s\n",
+ info->re_delim, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->re_pad) {
+ Status = dbp->set_re_pad(dbp, info->re_pad) ;
+ Trace(("set_re_pad [%d] returned %s\n",
+ info->re_pad, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->flags) {
+ Status = dbp->set_flags(dbp, info->flags) ;
+ Trace(("set_flags [%d] returned %s\n",
+ info->flags, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->q_extentsize) {
+#ifdef AT_LEAST_DB_3_2
+ Status = dbp->set_q_extentsize(dbp, info->q_extentsize) ;
+ Trace(("set_flags [%d] returned %s\n",
+ info->flags, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+#else
+ softCrash("-ExtentSize needs at least Berkeley DB 3.2.x") ;
+#endif
+ }
+
+ if ((Status = (dbp->open)(dbp, file, subname, type, flags, mode)) == 0) {
+#else /* DB_VERSION_MAJOR == 2 */
+ if ((Status = db_open(file, type, flags, mode, env, info, &dbp)) == 0) {
+#endif /* DB_VERSION_MAJOR == 2 */
+
+ Trace(("db_opened\n"));
+ RETVAL = db ;
+ RETVAL->dbp = dbp ;
+#if DB_VERSION_MAJOR == 2
+ RETVAL->type = dbp->type ;
+#else /* DB_VERSION_MAJOR > 2 */
+ RETVAL->type = dbp->get_type(dbp) ;
+#endif /* DB_VERSION_MAJOR > 2 */
+ RETVAL->recno_or_queue = (RETVAL->type == DB_RECNO ||
+ RETVAL->type == DB_QUEUE) ;
+ RETVAL->filename = my_strdup(file) ;
+ RETVAL->Status = Status ;
+ RETVAL->active = TRUE ;
+ hash_store_iv("BerkeleyDB::Term::Db", (IV)RETVAL, 1) ;
+ Trace((" storing %d %d in BerkeleyDB::Term::Db\n", RETVAL, dbp)) ;
+ if (dbenv) {
+ RETVAL->parent_env = dbenv ;
+ dbenv->Status = Status ;
+ ++ dbenv->open_dbs ;
+ }
+ }
+ else {
+#if DB_VERSION_MAJOR > 2
+ (dbp->close)(dbp, 0) ;
+#endif
+ destroyDB(db) ;
+ Trace(("db open returned %s\n", my_db_strerror(Status))) ;
+ }
+
+ return RETVAL ;
+}
+
+static double
+constant(char * name, int arg)
+{
+ errno = 0;
+ switch (*name) {
+ case 'A':
+ break;
+ case 'B':
+ break;
+ case 'C':
+ break;
+ case 'D':
+ if (strEQ(name, "DB_AFTER"))
+#ifdef DB_AFTER
+ return DB_AFTER;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_APPEND"))
+#ifdef DB_APPEND
+ return DB_APPEND;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_ARCH_ABS"))
+#ifdef DB_ARCH_ABS
+ return DB_ARCH_ABS;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_ARCH_DATA"))
+#ifdef DB_ARCH_DATA
+ return DB_ARCH_DATA;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_ARCH_LOG"))
+#ifdef DB_ARCH_LOG
+ return DB_ARCH_LOG;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_BEFORE"))
+#ifdef DB_BEFORE
+ return DB_BEFORE;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_BTREE"))
+ return DB_BTREE;
+ if (strEQ(name, "DB_BTREEMAGIC"))
+#ifdef DB_BTREEMAGIC
+ return DB_BTREEMAGIC;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_BTREEOLDVER"))
+#ifdef DB_BTREEOLDVER
+ return DB_BTREEOLDVER;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_BTREEVERSION"))
+#ifdef DB_BTREEVERSION
+ return DB_BTREEVERSION;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_CHECKPOINT"))
+#ifdef DB_CHECKPOINT
+ return DB_CHECKPOINT;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_CONSUME"))
+#ifdef DB_CONSUME
+ return DB_CONSUME;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_CREATE"))
+#ifdef DB_CREATE
+ return DB_CREATE;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_CURLSN"))
+#ifdef DB_CURLSN
+ return DB_CURLSN;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_CURRENT"))
+#ifdef DB_CURRENT
+ return DB_CURRENT;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_DBT_MALLOC"))
+#ifdef DB_DBT_MALLOC
+ return DB_DBT_MALLOC;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_DBT_PARTIAL"))
+#ifdef DB_DBT_PARTIAL
+ return DB_DBT_PARTIAL;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_DBT_USERMEM"))
+#ifdef DB_DBT_USERMEM
+ return DB_DBT_USERMEM;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_DELETED"))
+#ifdef DB_DELETED
+ return DB_DELETED;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_DELIMITER"))
+#ifdef DB_DELIMITER
+ return DB_DELIMITER;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_DUP"))
+#ifdef DB_DUP
+ return DB_DUP;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_DUPSORT"))
+#ifdef DB_DUPSORT
+ return DB_DUPSORT;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_ENV_APPINIT"))
+#ifdef DB_ENV_APPINIT
+ return DB_ENV_APPINIT;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_ENV_STANDALONE"))
+#ifdef DB_ENV_STANDALONE
+ return DB_ENV_STANDALONE;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_ENV_THREAD"))
+#ifdef DB_ENV_THREAD
+ return DB_ENV_THREAD;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_EXCL"))
+#ifdef DB_EXCL
+ return DB_EXCL;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_FILE_ID_LEN"))
+#ifdef DB_FILE_ID_LEN
+ return DB_FILE_ID_LEN;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_FIRST"))
+#ifdef DB_FIRST
+ return DB_FIRST;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_FIXEDLEN"))
+#ifdef DB_FIXEDLEN
+ return DB_FIXEDLEN;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_FLUSH"))
+#ifdef DB_FLUSH
+ return DB_FLUSH;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_FORCE"))
+#ifdef DB_FORCE
+ return DB_FORCE;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_GET_BOTH"))
+#ifdef DB_GET_BOTH
+ return DB_GET_BOTH;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_GET_RECNO"))
+#ifdef DB_GET_RECNO
+ return DB_GET_RECNO;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_HASH"))
+ return DB_HASH;
+ if (strEQ(name, "DB_HASHMAGIC"))
+#ifdef DB_HASHMAGIC
+ return DB_HASHMAGIC;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_HASHOLDVER"))
+#ifdef DB_HASHOLDVER
+ return DB_HASHOLDVER;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_HASHVERSION"))
+#ifdef DB_HASHVERSION
+ return DB_HASHVERSION;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_INCOMPLETE"))
+#ifdef DB_INCOMPLETE
+ return DB_INCOMPLETE;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_INIT_CDB"))
+#ifdef DB_INIT_CDB
+ return DB_INIT_CDB;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_INIT_LOCK"))
+#ifdef DB_INIT_LOCK
+ return DB_INIT_LOCK;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_INIT_LOG"))
+#ifdef DB_INIT_LOG
+ return DB_INIT_LOG;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_INIT_MPOOL"))
+#ifdef DB_INIT_MPOOL
+ return DB_INIT_MPOOL;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_INIT_TXN"))
+#ifdef DB_INIT_TXN
+ return DB_INIT_TXN;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_JOIN_ITEM"))
+#ifdef DB_JOIN_ITEM
+ return DB_JOIN_ITEM;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_KEYEMPTY"))
+#ifdef DB_KEYEMPTY
+ return DB_KEYEMPTY;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_KEYEXIST"))
+#ifdef DB_KEYEXIST
+ return DB_KEYEXIST;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_KEYFIRST"))
+#ifdef DB_KEYFIRST
+ return DB_KEYFIRST;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_KEYLAST"))
+#ifdef DB_KEYLAST
+ return DB_KEYLAST;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_LAST"))
+#ifdef DB_LAST
+ return DB_LAST;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_LOCKMAGIC"))
+#ifdef DB_LOCKMAGIC
+ return DB_LOCKMAGIC;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_LOCKVERSION"))
+#ifdef DB_LOCKVERSION
+ return DB_LOCKVERSION;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_LOCK_CONFLICT"))
+#ifdef DB_LOCK_CONFLICT
+ return DB_LOCK_CONFLICT;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_LOCK_DEADLOCK"))
+#ifdef DB_LOCK_DEADLOCK
+ return DB_LOCK_DEADLOCK;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_LOCK_DEFAULT"))
+#ifdef DB_LOCK_DEFAULT
+ return DB_LOCK_DEFAULT;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_LOCK_GET"))
+ return DB_LOCK_GET;
+ if (strEQ(name, "DB_LOCK_NORUN"))
+#ifdef DB_LOCK_NORUN
+ return DB_LOCK_NORUN;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_LOCK_NOTGRANTED"))
+#ifdef DB_LOCK_NOTGRANTED
+ return DB_LOCK_NOTGRANTED;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_LOCK_NOTHELD"))
+#ifdef DB_LOCK_NOTHELD
+ return DB_LOCK_NOTHELD;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_LOCK_NOWAIT"))
+#ifdef DB_LOCK_NOWAIT
+ return DB_LOCK_NOWAIT;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_LOCK_OLDEST"))
+#ifdef DB_LOCK_OLDEST
+ return DB_LOCK_OLDEST;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_LOCK_RANDOM"))
+#ifdef DB_LOCK_RANDOM
+ return DB_LOCK_RANDOM;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_LOCK_RIW_N"))
+#ifdef DB_LOCK_RIW_N
+ return DB_LOCK_RIW_N;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_LOCK_RW_N"))
+#ifdef DB_LOCK_RW_N
+ return DB_LOCK_RW_N;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_LOCK_YOUNGEST"))
+#ifdef DB_LOCK_YOUNGEST
+ return DB_LOCK_YOUNGEST;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_LOGMAGIC"))
+#ifdef DB_LOGMAGIC
+ return DB_LOGMAGIC;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_LOGOLDVER"))
+#ifdef DB_LOGOLDVER
+ return DB_LOGOLDVER;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_MAX_PAGES"))
+#ifdef DB_MAX_PAGES
+ return DB_MAX_PAGES;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_MAX_RECORDS"))
+#ifdef DB_MAX_RECORDS
+ return DB_MAX_RECORDS;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_MPOOL_CLEAN"))
+#ifdef DB_MPOOL_CLEAN
+ return DB_MPOOL_CLEAN;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_MPOOL_CREATE"))
+#ifdef DB_MPOOL_CREATE
+ return DB_MPOOL_CREATE;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_MPOOL_DIRTY"))
+#ifdef DB_MPOOL_DIRTY
+ return DB_MPOOL_DIRTY;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_MPOOL_DISCARD"))
+#ifdef DB_MPOOL_DISCARD
+ return DB_MPOOL_DISCARD;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_MPOOL_LAST"))
+#ifdef DB_MPOOL_LAST
+ return DB_MPOOL_LAST;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_MPOOL_NEW"))
+#ifdef DB_MPOOL_NEW
+ return DB_MPOOL_NEW;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_MPOOL_PRIVATE"))
+#ifdef DB_MPOOL_PRIVATE
+ return DB_MPOOL_PRIVATE;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_MUTEXDEBUG"))
+#ifdef DB_MUTEXDEBUG
+ return DB_MUTEXDEBUG;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_MUTEXLOCKS"))
+#ifdef DB_MUTEXLOCKS
+ return DB_MUTEXLOCKS;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_NEEDSPLIT"))
+#ifdef DB_NEEDSPLIT
+ return DB_NEEDSPLIT;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_NEXT"))
+#ifdef DB_NEXT
+ return DB_NEXT;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_NEXT_DUP"))
+#ifdef DB_NEXT_DUP
+ return DB_NEXT_DUP;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_NOMMAP"))
+#ifdef DB_NOMMAP
+ return DB_NOMMAP;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_NOOVERWRITE"))
+#ifdef DB_NOOVERWRITE
+ return DB_NOOVERWRITE;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_NOSYNC"))
+#ifdef DB_NOSYNC
+ return DB_NOSYNC;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_NOTFOUND"))
+#ifdef DB_NOTFOUND
+ return DB_NOTFOUND;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_PAD"))
+#ifdef DB_PAD
+ return DB_PAD;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_PAGEYIELD"))
+#ifdef DB_PAGEYIELD
+ return DB_PAGEYIELD;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_POSITION"))
+#ifdef DB_POSITION
+ return DB_POSITION;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_PREV"))
+#ifdef DB_PREV
+ return DB_PREV;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_PRIVATE"))
+#ifdef DB_PRIVATE
+ return DB_PRIVATE;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_QUEUE"))
+ return DB_QUEUE;
+ if (strEQ(name, "DB_RDONLY"))
+#ifdef DB_RDONLY
+ return DB_RDONLY;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_RECNO"))
+ return DB_RECNO;
+ if (strEQ(name, "DB_RECNUM"))
+#ifdef DB_RECNUM
+ return DB_RECNUM;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_RECORDCOUNT"))
+#ifdef DB_RECORDCOUNT
+ return DB_RECORDCOUNT;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_RECOVER"))
+#ifdef DB_RECOVER
+ return DB_RECOVER;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_RECOVER_FATAL"))
+#ifdef DB_RECOVER_FATAL
+ return DB_RECOVER_FATAL;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_REGISTERED"))
+#ifdef DB_REGISTERED
+ return DB_REGISTERED;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_RENUMBER"))
+#ifdef DB_RENUMBER
+ return DB_RENUMBER;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_RMW"))
+#ifdef DB_RMW
+ return DB_RMW;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_RUNRECOVERY"))
+#ifdef DB_RUNRECOVERY
+ return DB_RUNRECOVERY;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_SEQUENTIAL"))
+#ifdef DB_SEQUENTIAL
+ return DB_SEQUENTIAL;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_SET"))
+#ifdef DB_SET
+ return DB_SET;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_SET_RANGE"))
+#ifdef DB_SET_RANGE
+ return DB_SET_RANGE;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_SET_RECNO"))
+#ifdef DB_SET_RECNO
+ return DB_SET_RECNO;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_SNAPSHOT"))
+#ifdef DB_SNAPSHOT
+ return DB_SNAPSHOT;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_SWAPBYTES"))
+#ifdef DB_SWAPBYTES
+ return DB_SWAPBYTES;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_TEMPORARY"))
+#ifdef DB_TEMPORARY
+ return DB_TEMPORARY;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_THREAD"))
+#ifdef DB_THREAD
+ return DB_THREAD;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_TRUNCATE"))
+#ifdef DB_TRUNCATE
+ return DB_TRUNCATE;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_TXNMAGIC"))
+#ifdef DB_TXNMAGIC
+ return DB_TXNMAGIC;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_TXNVERSION"))
+#ifdef DB_TXNVERSION
+ return DB_TXNVERSION;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_TXN_BACKWARD_ROLL"))
+ return DB_TXN_BACKWARD_ROLL;
+ if (strEQ(name, "DB_TXN_CKP"))
+#ifdef DB_TXN_CKP
+ return DB_TXN_CKP;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_TXN_FORWARD_ROLL"))
+ return DB_TXN_FORWARD_ROLL;
+ if (strEQ(name, "DB_TXN_LOCK_2PL"))
+#ifdef DB_TXN_LOCK_2PL
+ return DB_TXN_LOCK_2PL;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_TXN_LOCK_MASK"))
+#ifdef DB_TXN_LOCK_MASK
+ return DB_TXN_LOCK_MASK;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_TXN_LOCK_OPTIMIST"))
+#ifdef DB_TXN_LOCK_OPTIMIST
+ return DB_TXN_LOCK_OPTIMIST;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_TXN_LOCK_OPTIMISTIC"))
+#ifdef DB_TXN_LOCK_OPTIMISTIC
+ return DB_TXN_LOCK_OPTIMISTIC;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_TXN_LOG_MASK"))
+#ifdef DB_TXN_LOG_MASK
+ return DB_TXN_LOG_MASK;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_TXN_LOG_REDO"))
+#ifdef DB_TXN_LOG_REDO
+ return DB_TXN_LOG_REDO;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_TXN_LOG_UNDO"))
+#ifdef DB_TXN_LOG_UNDO
+ return DB_TXN_LOG_UNDO;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_TXN_LOG_UNDOREDO"))
+#ifdef DB_TXN_LOG_UNDOREDO
+ return DB_TXN_LOG_UNDOREDO;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_TXN_NOSYNC"))
+#ifdef DB_TXN_NOSYNC
+ return DB_TXN_NOSYNC;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_TXN_NOWAIT"))
+#ifdef DB_TXN_NOWAIT
+ return DB_TXN_NOWAIT;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_TXN_OPENFILES"))
+ return DB_TXN_OPENFILES;
+ if (strEQ(name, "DB_TXN_REDO"))
+#ifdef DB_TXN_REDO
+ return DB_TXN_REDO;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_TXN_SYNC"))
+#ifdef DB_TXN_SYNC
+ return DB_TXN_SYNC;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_TXN_UNDO"))
+#ifdef DB_TXN_UNDO
+ return DB_TXN_UNDO;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_UNKNOWN"))
+ return DB_UNKNOWN;
+ if (strEQ(name, "DB_USE_ENVIRON"))
+#ifdef DB_USE_ENVIRON
+ return DB_USE_ENVIRON;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_USE_ENVIRON_ROOT"))
+#ifdef DB_USE_ENVIRON_ROOT
+ return DB_USE_ENVIRON_ROOT;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_VERSION_MAJOR"))
+#ifdef DB_VERSION_MAJOR
+ return DB_VERSION_MAJOR;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_VERSION_MINOR"))
+#ifdef DB_VERSION_MINOR
+ return DB_VERSION_MINOR;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_VERSION_PATCH"))
+#ifdef DB_VERSION_PATCH
+ return DB_VERSION_PATCH;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_WRITECURSOR"))
+#ifdef DB_WRITECURSOR
+ return DB_WRITECURSOR;
+#else
+ goto not_there;
+#endif
+ break;
+ case 'E':
+ break;
+ case 'F':
+ break;
+ case 'G':
+ break;
+ case 'H':
+ break;
+ case 'I':
+ break;
+ case 'J':
+ break;
+ case 'K':
+ break;
+ case 'L':
+ break;
+ case 'M':
+ break;
+ case 'N':
+ break;
+ case 'O':
+ break;
+ case 'P':
+ break;
+ case 'Q':
+ break;
+ case 'R':
+ break;
+ case 'S':
+ break;
+ case 'T':
+ break;
+ case 'U':
+ break;
+ case 'V':
+ break;
+ case 'W':
+ break;
+ case 'X':
+ break;
+ case 'Y':
+ break;
+ case 'Z':
+ break;
+ case 'a':
+ break;
+ case 'b':
+ break;
+ case 'c':
+ break;
+ case 'd':
+ break;
+ case 'e':
+ break;
+ case 'f':
+ break;
+ case 'g':
+ break;
+ case 'h':
+ break;
+ case 'i':
+ break;
+ case 'j':
+ break;
+ case 'k':
+ break;
+ case 'l':
+ break;
+ case 'm':
+ break;
+ case 'n':
+ break;
+ case 'o':
+ break;
+ case 'p':
+ break;
+ case 'q':
+ break;
+ case 'r':
+ break;
+ case 's':
+ break;
+ case 't':
+ break;
+ case 'u':
+ break;
+ case 'v':
+ break;
+ case 'w':
+ break;
+ case 'x':
+ break;
+ case 'y':
+ break;
+ case 'z':
+ break;
+ }
+ errno = EINVAL;
+ return 0;
+
+not_there:
+ errno = ENOENT;
+ return 0;
+}
+
+
+MODULE = BerkeleyDB PACKAGE = BerkeleyDB PREFIX = env_
+
+char *
+DB_VERSION_STRING()
+ CODE:
+ RETVAL = DB_VERSION_STRING ;
+ OUTPUT:
+ RETVAL
+
+
+double
+constant(name,arg)
+ char * name
+ int arg
+
+#define env_db_version(maj, min, patch) db_version(&maj, &min, &patch)
+char *
+env_db_version(maj, min, patch)
+ int maj
+ int min
+ int patch
+ OUTPUT:
+ RETVAL
+ maj
+ min
+ patch
+
+int
+db_value_set(value, which)
+ int value
+ int which
+ NOT_IMPLEMENTED_YET
+
+
+DualType
+_db_remove(ref)
+ SV * ref
+ CODE:
+ {
+#if DB_VERSION_MAJOR == 2
+ softCrash("BerkeleyDB::db_remove needs Berkeley DB 3.x or better") ;
+#else
+ HV * hash ;
+ DB * dbp ;
+ SV * sv ;
+ const char * db ;
+ const char * subdb = NULL ;
+ BerkeleyDB__Env env = NULL ;
+ DB_ENV * dbenv = NULL ;
+ u_int32_t flags = 0 ;
+
+ hash = (HV*) SvRV(ref) ;
+ SetValue_pv(db, "Filename", char *) ;
+ SetValue_pv(subdb, "Subname", char *) ;
+ SetValue_iv(flags, "Flags") ;
+ SetValue_ov(env, "Env", BerkeleyDB__Env) ;
+ if (env)
+ dbenv = env->Env ;
+ RETVAL = db_create(&dbp, dbenv, 0) ;
+ if (RETVAL == 0) {
+ RETVAL = dbp->remove(dbp, db, subdb, flags) ;
+ }
+#endif
+ }
+ OUTPUT:
+ RETVAL
+
+MODULE = BerkeleyDB::Env PACKAGE = BerkeleyDB::Env PREFIX = env_
+
+
+BerkeleyDB::Env::Raw
+_db_appinit(self, ref)
+ char * self
+ SV * ref
+ CODE:
+ {
+ HV * hash ;
+ SV * sv ;
+ char * home = NULL ;
+ char * server = NULL ;
+ char ** config = NULL ;
+ int flags = 0 ;
+ int cachesize = 0 ;
+ int lk_detect = 0 ;
+ int mode = 0 ;
+ SV * errprefix = NULL;
+ DB_ENV * env ;
+ int status ;
+
+ Trace(("in _db_appinit [%s] %d\n", self, ref)) ;
+ hash = (HV*) SvRV(ref) ;
+ SetValue_pv(home, "Home", char *) ;
+ SetValue_pv(config, "Config", char **) ;
+ SetValue_sv(errprefix, "ErrPrefix") ;
+ SetValue_iv(flags, "Flags") ;
+ SetValue_pv(server, "Server", char *) ;
+ SetValue_iv(cachesize, "Cachesize") ;
+ SetValue_iv(lk_detect, "LockDetect") ;
+#ifndef AT_LEAST_DB_3_1
+ if (server)
+ softCrash("-Server needs Berkeley DB 3.1 or better") ;
+#endif /* ! AT_LEAST_DB_3_1 */
+ Trace(("_db_appinit(config=[%d], home=[%s],errprefix=[%s],flags=[%d]\n",
+ config, home, errprefix, flags)) ;
+#ifdef TRACE
+ if (config) {
+ int i ;
+ for (i = 0 ; i < 10 ; ++ i) {
+ if (config[i] == NULL) {
+ printf(" End\n") ;
+ break ;
+ }
+ printf(" config = [%s]\n", config[i]) ;
+ }
+ }
+#endif /* TRACE */
+ ZMALLOC(RETVAL, BerkeleyDB_ENV_type) ;
+ if (flags & DB_INIT_TXN)
+ RETVAL->txn_enabled = TRUE ;
+#if DB_VERSION_MAJOR == 2
+ ZMALLOC(RETVAL->Env, DB_ENV) ;
+ env = RETVAL->Env ;
+ {
+ /* Take a copy of the error prefix */
+ if (errprefix) {
+ Trace(("copying errprefix\n" )) ;
+ RETVAL->ErrPrefix = newSVsv(errprefix) ;
+ SvPOK_only(RETVAL->ErrPrefix) ;
+ }
+ if (RETVAL->ErrPrefix)
+ RETVAL->Env->db_errpfx = SvPVX(RETVAL->ErrPrefix) ;
+
+ if ((sv = readHash(hash, "ErrFile")) && sv != &PL_sv_undef) {
+ env->db_errfile = IoOFP(sv_2io(sv)) ;
+ RETVAL->ErrHandle = newRV(sv) ;
+ }
+ /* SetValue_io(RETVAL->Env.db_errfile, "ErrFile") ; */
+ SetValue_iv(env->db_verbose, "Verbose") ;
+ /* env->db_errbuf = RETVAL->ErrBuff ; */
+ env->db_errcall = db_errcall_cb ;
+ RETVAL->active = TRUE ;
+ status = db_appinit(home, config, env, flags) ;
+ Trace((" status = %d env %d Env %d\n", status, RETVAL, env)) ;
+ if (status == 0)
+ hash_store_iv("BerkeleyDB::Term::Env", (IV)RETVAL, 1) ;
+ else {
+ if (RETVAL->ErrHandle)
+ SvREFCNT_dec(RETVAL->ErrHandle) ;
+ if (RETVAL->ErrPrefix)
+ SvREFCNT_dec(RETVAL->ErrPrefix) ;
+ Safefree(RETVAL->Env) ;
+ Safefree(RETVAL) ;
+ RETVAL = NULL ;
+ }
+ }
+#else /* DB_VERSION_MAJOR > 2 */
+#ifndef AT_LEAST_DB_3_1
+# define DB_CLIENT 0
+#endif
+ status = db_env_create(&RETVAL->Env, server ? DB_CLIENT : 0) ;
+ Trace(("db_env_create flags = %d returned %s\n", flags,
+ my_db_strerror(status))) ;
+ env = RETVAL->Env ;
+ if (status == 0 && cachesize) {
+ status = env->set_cachesize(env, 0, cachesize, 0) ;
+ Trace(("set_cachesize [%d] returned %s\n",
+ cachesize, my_db_strerror(status)));
+ }
+
+ if (status == 0 && lk_detect) {
+ status = env->set_lk_detect(env, lk_detect) ;
+ Trace(("set_lk_detect [%d] returned %s\n",
+ lk_detect, my_db_strerror(status)));
+ }
+#ifdef AT_LEAST_DB_3_1
+ /* set the server */
+ if (server && status == 0)
+ {
+ status = env->set_server(env, server, 0, 0, 0);
+ Trace(("ENV->set_server server = %s returned %s\n", server,
+ my_db_strerror(status))) ;
+ }
+#endif
+ if (status == 0)
+ {
+ /* Take a copy of the error prefix */
+ if (errprefix) {
+ Trace(("copying errprefix\n" )) ;
+ RETVAL->ErrPrefix = newSVsv(errprefix) ;
+ SvPOK_only(RETVAL->ErrPrefix) ;
+ }
+ if (RETVAL->ErrPrefix)
+ env->set_errpfx(env, SvPVX(RETVAL->ErrPrefix)) ;
+
+ if ((sv = readHash(hash, "ErrFile")) && sv != &PL_sv_undef) {
+ env->set_errfile(env, IoOFP(sv_2io(sv))) ;
+ RETVAL->ErrHandle = newRV(sv) ;
+ }
+ /* SetValue_iv(RETVAL->Env.db_verbose, "Verbose") ; */ /* TODO */
+ SetValue_iv(mode, "Mode") ;
+ /* RETVAL->Env.db_errbuf = RETVAL->ErrBuff ; */
+ env->set_errcall(env, db_errcall_cb) ;
+ RETVAL->active = TRUE ;
+#ifdef IS_DB_3_0
+ status = (env->open)(env, home, config, flags, mode) ;
+#else /* > 3.0 */
+ status = (env->open)(env, home, flags, mode) ;
+#endif
+ Trace(("ENV->open returned %s\n", my_db_strerror(status))) ;
+ }
+
+ if (status == 0)
+ hash_store_iv("BerkeleyDB::Term::Env", (IV)RETVAL, 1) ;
+ else {
+ (env->close)(env, 0) ;
+ if (RETVAL->ErrHandle)
+ SvREFCNT_dec(RETVAL->ErrHandle) ;
+ if (RETVAL->ErrPrefix)
+ SvREFCNT_dec(RETVAL->ErrPrefix) ;
+ Safefree(RETVAL) ;
+ RETVAL = NULL ;
+ }
+#endif /* DB_VERSION_MAJOR > 2 */
+ }
+ OUTPUT:
+ RETVAL
+
+BerkeleyDB::Txn::Raw
+_txn_begin(env, pid=NULL, flags=0)
+ BerkeleyDB::Env env
+ BerkeleyDB::Txn pid
+ u_int32_t flags
+ CODE:
+ {
+ DB_TXN *txn ;
+ DB_TXN *p_id = NULL ;
+ Trace(("txn_begin pid %d, flags %d\n", pid, flags)) ;
+#if DB_VERSION_MAJOR == 2
+ if (env->Env->tx_info == NULL)
+ softCrash("Transaction Manager not enabled") ;
+#endif
+ if (!env->txn_enabled)
+ softCrash("Transaction Manager not enabled") ;
+ if (pid)
+ p_id = pid->txn ;
+ env->TxnMgrStatus =
+#if DB_VERSION_MAJOR == 2
+ txn_begin(env->Env->tx_info, p_id, &txn) ;
+#else
+ txn_begin(env->Env, p_id, &txn, flags) ;
+#endif
+ if (env->TxnMgrStatus == 0) {
+ ZMALLOC(RETVAL, BerkeleyDB_Txn_type) ;
+ RETVAL->txn = txn ;
+ RETVAL->active = TRUE ;
+ Trace(("_txn_begin created txn [%d] in [%d]\n", txn, RETVAL));
+ hash_store_iv("BerkeleyDB::Term::Txn", (IV)RETVAL, 1) ;
+ }
+ else
+ RETVAL = NULL ;
+ }
+ OUTPUT:
+ RETVAL
+
+
+#if DB_VERSION_MAJOR == 2
+# define env_txn_checkpoint(e,k,m) txn_checkpoint(e->Env->tx_info, k, m)
+#else /* DB 3.0 or better */
+# ifdef AT_LEAST_DB_3_1
+# define env_txn_checkpoint(e,k,m) txn_checkpoint(e->Env, k, m, 0)
+# else
+# define env_txn_checkpoint(e,k,m) txn_checkpoint(e->Env, k, m)
+# endif
+#endif
+DualType
+env_txn_checkpoint(env, kbyte, min)
+ BerkeleyDB::Env env
+ long kbyte
+ long min
+
+HV *
+txn_stat(env)
+ BerkeleyDB::Env env
+ HV * RETVAL = NULL ;
+ CODE:
+ {
+ DB_TXN_STAT * stat ;
+#if DB_VERSION_MAJOR == 2
+ if(txn_stat(env->Env->tx_info, &stat, safemalloc) == 0) {
+#else
+ if(txn_stat(env->Env, &stat, safemalloc) == 0) {
+#endif
+ RETVAL = (HV*)sv_2mortal((SV*)newHV()) ;
+ hv_store_iv(RETVAL, "st_time_ckp", stat->st_time_ckp) ;
+ hv_store_iv(RETVAL, "st_last_txnid", stat->st_last_txnid) ;
+ hv_store_iv(RETVAL, "st_maxtxns", stat->st_maxtxns) ;
+ hv_store_iv(RETVAL, "st_naborts", stat->st_naborts) ;
+ hv_store_iv(RETVAL, "st_nbegins", stat->st_nbegins) ;
+ hv_store_iv(RETVAL, "st_ncommits", stat->st_ncommits) ;
+ hv_store_iv(RETVAL, "st_nactive", stat->st_nactive) ;
+#if DB_VERSION_MAJOR > 2
+ hv_store_iv(RETVAL, "st_maxnactive", stat->st_maxnactive) ;
+ hv_store_iv(RETVAL, "st_regsize", stat->st_regsize) ;
+ hv_store_iv(RETVAL, "st_region_wait", stat->st_region_wait) ;
+ hv_store_iv(RETVAL, "st_region_nowait", stat->st_region_nowait) ;
+#endif
+ safefree(stat) ;
+ }
+ }
+ OUTPUT:
+ RETVAL
+
+#define EnDis(x) ((x) ? "Enabled" : "Disabled")
+void
+printEnv(env)
+ BerkeleyDB::Env env
+ INIT:
+ ckActive_Environment(env->active) ;
+ CODE:
+#if 0
+ printf("env [0x%X]\n", env) ;
+ printf(" ErrPrefix [%s]\n", env->ErrPrefix
+ ? SvPVX(env->ErrPrefix) : 0) ;
+ printf(" DB_ENV\n") ;
+ printf(" db_lorder [%d]\n", env->Env.db_lorder) ;
+ printf(" db_home [%s]\n", env->Env.db_home) ;
+ printf(" db_data_dir [%s]\n", env->Env.db_data_dir) ;
+ printf(" db_log_dir [%s]\n", env->Env.db_log_dir) ;
+ printf(" db_tmp_dir [%s]\n", env->Env.db_tmp_dir) ;
+ printf(" lk_info [%s]\n", EnDis(env->Env.lk_info)) ;
+ printf(" lk_max [%d]\n", env->Env.lk_max) ;
+ printf(" lg_info [%s]\n", EnDis(env->Env.lg_info)) ;
+ printf(" lg_max [%d]\n", env->Env.lg_max) ;
+ printf(" mp_info [%s]\n", EnDis(env->Env.mp_info)) ;
+ printf(" mp_size [%d]\n", env->Env.mp_size) ;
+ printf(" tx_info [%s]\n", EnDis(env->Env.tx_info)) ;
+ printf(" tx_max [%d]\n", env->Env.tx_max) ;
+ printf(" flags [%d]\n", env->Env.flags) ;
+ printf("\n") ;
+#endif
+
+SV *
+errPrefix(env, prefix)
+ BerkeleyDB::Env env
+ SV * prefix
+ INIT:
+ ckActive_Environment(env->active) ;
+ CODE:
+ if (env->ErrPrefix) {
+ RETVAL = newSVsv(env->ErrPrefix) ;
+ SvPOK_only(RETVAL) ;
+ sv_setsv(env->ErrPrefix, prefix) ;
+ }
+ else {
+ RETVAL = NULL ;
+ env->ErrPrefix = newSVsv(prefix) ;
+ }
+ SvPOK_only(env->ErrPrefix) ;
+#if DB_VERSION_MAJOR == 2
+ env->Env->db_errpfx = SvPVX(env->ErrPrefix) ;
+#else
+ env->Env->set_errpfx(env->Env, SvPVX(env->ErrPrefix)) ;
+#endif
+ OUTPUT:
+ RETVAL
+
+DualType
+status(env)
+ BerkeleyDB::Env env
+ CODE:
+ RETVAL = env->Status ;
+ OUTPUT:
+ RETVAL
+
+DualType
+db_appexit(env)
+ BerkeleyDB::Env env
+ INIT:
+ ckActive_Environment(env->active) ;
+ CODE:
+#ifdef STRICT_CLOSE
+ if (env->open_dbs)
+ softCrash("attempted to close an environment with %d open database(s)",
+ env->open_dbs) ;
+#endif /* STRICT_CLOSE */
+#if DB_VERSION_MAJOR == 2
+ RETVAL = db_appexit(env->Env) ;
+#else
+ RETVAL = (env->Env->close)(env->Env, 0) ;
+#endif
+ env->active = FALSE ;
+ hash_delete("BerkeleyDB::Term::Env", (IV)env) ;
+ OUTPUT:
+ RETVAL
+
+
+void
+_DESTROY(env)
+ BerkeleyDB::Env env
+ int RETVAL = 0 ;
+ CODE:
+ Trace(("In BerkeleyDB::Env::DESTROY\n"));
+ Trace((" env %ld Env %ld dirty %d\n", env, &env->Env, PL_dirty)) ;
+ if (env->active)
+#if DB_VERSION_MAJOR == 2
+ db_appexit(env->Env) ;
+#else
+ (env->Env->close)(env->Env, 0) ;
+#endif
+ if (env->ErrHandle)
+ SvREFCNT_dec(env->ErrHandle) ;
+ if (env->ErrPrefix)
+ SvREFCNT_dec(env->ErrPrefix) ;
+#if DB_VERSION_MAJOR == 2
+ Safefree(env->Env) ;
+#endif
+ Safefree(env) ;
+ hash_delete("BerkeleyDB::Term::Env", (IV)env) ;
+ Trace(("End of BerkeleyDB::Env::DESTROY %d\n", RETVAL)) ;
+
+BerkeleyDB::TxnMgr::Raw
+_TxnMgr(env)
+ BerkeleyDB::Env env
+ INIT:
+ ckActive_Environment(env->active) ;
+ if (!env->txn_enabled)
+ softCrash("Transaction Manager not enabled") ;
+ CODE:
+ ZMALLOC(RETVAL, BerkeleyDB_TxnMgr_type) ;
+ RETVAL->env = env ;
+ /* hash_store_iv("BerkeleyDB::Term::TxnMgr", (IV)txn, 1) ; */
+ OUTPUT:
+ RETVAL
+
+int
+set_data_dir(env, dir)
+ BerkeleyDB::Env env
+ char * dir
+ INIT:
+ ckActive_Database(env->active) ;
+ CODE:
+#ifndef AT_LEAST_DB_3_1
+ softCrash("$env->set_data_dir needs Berkeley DB 3.1 or better") ;
+#else
+ RETVAL = env->Status = env->Env->set_data_dir(env->Env, dir);
+#endif
+ OUTPUT:
+ RETVAL
+
+int
+set_lg_dir(env, dir)
+ BerkeleyDB::Env env
+ char * dir
+ INIT:
+ ckActive_Database(env->active) ;
+ CODE:
+#ifndef AT_LEAST_DB_3_1
+ softCrash("$env->set_lg_dir needs Berkeley DB 3.1 or better") ;
+#else
+ RETVAL = env->Status = env->Env->set_lg_dir(env->Env, dir);
+#endif
+ OUTPUT:
+ RETVAL
+
+int
+set_tmp_dir(env, dir)
+ BerkeleyDB::Env env
+ char * dir
+ INIT:
+ ckActive_Database(env->active) ;
+ CODE:
+#ifndef AT_LEAST_DB_3_1
+ softCrash("$env->set_tmp_dir needs Berkeley DB 3.1 or better") ;
+#else
+ RETVAL = env->Status = env->Env->set_tmp_dir(env->Env, dir);
+#endif
+ OUTPUT:
+ RETVAL
+
+int
+set_mutexlocks(env, do_lock)
+ BerkeleyDB::Env env
+ int do_lock
+ INIT:
+ ckActive_Database(env->active) ;
+ CODE:
+#ifndef AT_LEAST_DB_3
+ softCrash("$env->set_setmutexlocks needs Berkeley DB 3.0 or better") ;
+#else
+#if defined(IS_DB_3_0) || defined(AT_LEAST_DB_3_2)
+ RETVAL = env->Status = env->Env->set_mutexlocks(env->Env, do_lock);
+#else /* DB 3.1 */
+ RETVAL = env->Status = db_env_set_mutexlocks(do_lock);
+#endif
+#endif
+ OUTPUT:
+ RETVAL
+
+MODULE = BerkeleyDB::Term PACKAGE = BerkeleyDB::Term
+
+void
+close_everything()
+
+#define safeCroak(string) softCrash(string)
+void
+safeCroak(string)
+ char * string
+
+MODULE = BerkeleyDB::Hash PACKAGE = BerkeleyDB::Hash PREFIX = hash_
+
+BerkeleyDB::Hash::Raw
+_db_open_hash(self, ref)
+ char * self
+ SV * ref
+ CODE:
+ {
+ HV * hash ;
+ SV * sv ;
+ DB_INFO info ;
+ BerkeleyDB__Env dbenv = NULL;
+ SV * ref_dbenv = NULL;
+ const char * file = NULL ;
+ const char * subname = NULL ;
+ int flags = 0 ;
+ int mode = 0 ;
+ BerkeleyDB db ;
+
+ Trace(("_db_open_hash start\n")) ;
+ hash = (HV*) SvRV(ref) ;
+ SetValue_pv(file, "Filename", char *) ;
+ SetValue_pv(subname, "Subname", char *) ;
+ SetValue_ov(dbenv, "Env", BerkeleyDB__Env) ;
+ ref_dbenv = sv ;
+ SetValue_iv(flags, "Flags") ;
+ SetValue_iv(mode, "Mode") ;
+
+ Zero(&info, 1, DB_INFO) ;
+ SetValue_iv(info.db_cachesize, "Cachesize") ;
+ SetValue_iv(info.db_lorder, "Lorder") ;
+ SetValue_iv(info.db_pagesize, "Pagesize") ;
+ SetValue_iv(info.h_ffactor, "Ffactor") ;
+ SetValue_iv(info.h_nelem, "Nelem") ;
+ SetValue_iv(info.flags, "Property") ;
+ ZMALLOC(db, BerkeleyDB_type) ;
+ if ((sv = readHash(hash, "Hash")) && sv != &PL_sv_undef) {
+ info.h_hash = hash_cb ;
+ db->hash = newSVsv(sv) ;
+ }
+ /* DB_DUPSORT was introduced in DB 2.5.9 */
+ if ((sv = readHash(hash, "DupCompare")) && sv != &PL_sv_undef) {
+#ifdef DB_DUPSORT
+ info.dup_compare = dup_compare ;
+ db->dup_compare = newSVsv(sv) ;
+ info.flags |= DB_DUP|DB_DUPSORT ;
+#else
+ croak("DupCompare needs Berkeley DB 2.5.9 or later") ;
+#endif
+ }
+ RETVAL = my_db_open(db, ref, ref_dbenv, dbenv, file, subname, DB_HASH, flags, mode, &info) ;
+ Trace(("_db_open_hash end\n")) ;
+ }
+ OUTPUT:
+ RETVAL
+
+
+HV *
+db_stat(db, flags=0)
+ BerkeleyDB::Common db
+ int flags
+ HV * RETVAL = NULL ;
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+ {
+#if DB_VERSION_MAJOR == 2
+ softCrash("$db->db_stat for a Hash needs Berkeley DB 3.x or better") ;
+#else
+ DB_HASH_STAT * stat ;
+ db->Status = ((db->dbp)->stat)(db->dbp, &stat, safemalloc, flags) ;
+ if (db->Status == 0) {
+ RETVAL = (HV*)sv_2mortal((SV*)newHV()) ;
+ hv_store_iv(RETVAL, "hash_magic", stat->hash_magic) ;
+ hv_store_iv(RETVAL, "hash_version", stat->hash_version);
+ hv_store_iv(RETVAL, "hash_pagesize", stat->hash_pagesize);
+#ifdef AT_LEAST_DB_3_1
+ hv_store_iv(RETVAL, "hash_nkeys", stat->hash_nkeys);
+ hv_store_iv(RETVAL, "hash_ndata", stat->hash_ndata);
+#else
+ hv_store_iv(RETVAL, "hash_nrecs", stat->hash_nrecs);
+#endif
+ hv_store_iv(RETVAL, "hash_nelem", stat->hash_nelem);
+ hv_store_iv(RETVAL, "hash_ffactor", stat->hash_ffactor);
+ hv_store_iv(RETVAL, "hash_buckets", stat->hash_buckets);
+ hv_store_iv(RETVAL, "hash_free", stat->hash_free);
+ hv_store_iv(RETVAL, "hash_bfree", stat->hash_bfree);
+ hv_store_iv(RETVAL, "hash_bigpages", stat->hash_bigpages);
+ hv_store_iv(RETVAL, "hash_big_bfree", stat->hash_big_bfree);
+ hv_store_iv(RETVAL, "hash_overflows", stat->hash_overflows);
+ hv_store_iv(RETVAL, "hash_ovfl_free", stat->hash_ovfl_free);
+ hv_store_iv(RETVAL, "hash_dup", stat->hash_dup);
+ hv_store_iv(RETVAL, "hash_dup_free", stat->hash_dup_free);
+#if DB_VERSION_MAJOR >= 3
+ hv_store_iv(RETVAL, "hash_metaflags", stat->hash_metaflags);
+#endif
+ safefree(stat) ;
+ }
+#endif
+ }
+ OUTPUT:
+ RETVAL
+
+
+MODULE = BerkeleyDB::Unknown PACKAGE = BerkeleyDB::Unknown PREFIX = hash_
+
+void
+_db_open_unknown(ref)
+ SV * ref
+ PPCODE:
+ {
+ HV * hash ;
+ SV * sv ;
+ DB_INFO info ;
+ BerkeleyDB__Env dbenv = NULL;
+ SV * ref_dbenv = NULL;
+ const char * file = NULL ;
+ const char * subname = NULL ;
+ int flags = 0 ;
+ int mode = 0 ;
+ BerkeleyDB db ;
+ BerkeleyDB RETVAL ;
+ static char * Names[] = {"", "Btree", "Hash", "Recno"} ;
+
+ hash = (HV*) SvRV(ref) ;
+ SetValue_pv(file, "Filename", char *) ;
+ SetValue_pv(subname, "Subname", char *) ;
+ SetValue_ov(dbenv, "Env", BerkeleyDB__Env) ;
+ ref_dbenv = sv ;
+ SetValue_iv(flags, "Flags") ;
+ SetValue_iv(mode, "Mode") ;
+
+ Zero(&info, 1, DB_INFO) ;
+ SetValue_iv(info.db_cachesize, "Cachesize") ;
+ SetValue_iv(info.db_lorder, "Lorder") ;
+ SetValue_iv(info.db_pagesize, "Pagesize") ;
+ SetValue_iv(info.h_ffactor, "Ffactor") ;
+ SetValue_iv(info.h_nelem, "Nelem") ;
+ SetValue_iv(info.flags, "Property") ;
+ ZMALLOC(db, BerkeleyDB_type) ;
+
+ RETVAL = my_db_open(db, ref, ref_dbenv, dbenv, file, subname, DB_UNKNOWN, flags, mode, &info) ;
+ XPUSHs(sv_2mortal(newSViv((IV)RETVAL)));
+ if (RETVAL)
+ XPUSHs(sv_2mortal(newSVpv(Names[RETVAL->type], 0))) ;
+ else
+ XPUSHs(sv_2mortal(newSViv((IV)NULL)));
+ }
+
+
+
+MODULE = BerkeleyDB::Btree PACKAGE = BerkeleyDB::Btree PREFIX = btree_
+
+BerkeleyDB::Btree::Raw
+_db_open_btree(self, ref)
+ char * self
+ SV * ref
+ CODE:
+ {
+ HV * hash ;
+ SV * sv ;
+ DB_INFO info ;
+ BerkeleyDB__Env dbenv = NULL;
+ SV * ref_dbenv = NULL;
+ const char * file = NULL ;
+ const char * subname = NULL ;
+ int flags = 0 ;
+ int mode = 0 ;
+ BerkeleyDB db ;
+
+ hash = (HV*) SvRV(ref) ;
+ SetValue_pv(file, "Filename", char*) ;
+ SetValue_pv(subname, "Subname", char *) ;
+ SetValue_ov(dbenv, "Env", BerkeleyDB__Env) ;
+ ref_dbenv = sv ;
+ SetValue_iv(flags, "Flags") ;
+ SetValue_iv(mode, "Mode") ;
+
+ Zero(&info, 1, DB_INFO) ;
+ SetValue_iv(info.db_cachesize, "Cachesize") ;
+ SetValue_iv(info.db_lorder, "Lorder") ;
+ SetValue_iv(info.db_pagesize, "Pagesize") ;
+ SetValue_iv(info.bt_minkey, "Minkey") ;
+ SetValue_iv(info.flags, "Property") ;
+ ZMALLOC(db, BerkeleyDB_type) ;
+ if ((sv = readHash(hash, "Compare")) && sv != &PL_sv_undef) {
+ info.bt_compare = btree_compare ;
+ db->compare = newSVsv(sv) ;
+ }
+ /* DB_DUPSORT was introduced in DB 2.5.9 */
+ if ((sv = readHash(hash, "DupCompare")) && sv != &PL_sv_undef) {
+#ifdef DB_DUPSORT
+ info.dup_compare = dup_compare ;
+ db->dup_compare = newSVsv(sv) ;
+ info.flags |= DB_DUP|DB_DUPSORT ;
+#else
+ softCrash("DupCompare needs Berkeley DB 2.5.9 or later") ;
+#endif
+ }
+ if ((sv = readHash(hash, "Prefix")) && sv != &PL_sv_undef) {
+ info.bt_prefix = btree_prefix ;
+ db->prefix = newSVsv(sv) ;
+ }
+
+ RETVAL = my_db_open(db, ref, ref_dbenv, dbenv, file, subname, DB_BTREE, flags, mode, &info) ;
+ }
+ OUTPUT:
+ RETVAL
+
+
+HV *
+db_stat(db, flags=0)
+ BerkeleyDB::Common db
+ int flags
+ HV * RETVAL = NULL ;
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+ {
+ DB_BTREE_STAT * stat ;
+ db->Status = ((db->dbp)->stat)(db->dbp, &stat, safemalloc, flags) ;
+ if (db->Status == 0) {
+ RETVAL = (HV*)sv_2mortal((SV*)newHV()) ;
+ hv_store_iv(RETVAL, "bt_magic", stat->bt_magic);
+ hv_store_iv(RETVAL, "bt_version", stat->bt_version);
+#if DB_VERSION_MAJOR > 2
+ hv_store_iv(RETVAL, "bt_metaflags", stat->bt_metaflags) ;
+ hv_store_iv(RETVAL, "bt_flags", stat->bt_metaflags) ;
+#else
+ hv_store_iv(RETVAL, "bt_flags", stat->bt_flags) ;
+#endif
+ hv_store_iv(RETVAL, "bt_maxkey", stat->bt_maxkey) ;
+ hv_store_iv(RETVAL, "bt_minkey", stat->bt_minkey);
+ hv_store_iv(RETVAL, "bt_re_len", stat->bt_re_len);
+ hv_store_iv(RETVAL, "bt_re_pad", stat->bt_re_pad);
+ hv_store_iv(RETVAL, "bt_pagesize", stat->bt_pagesize);
+ hv_store_iv(RETVAL, "bt_levels", stat->bt_levels);
+#ifdef AT_LEAST_DB_3_1
+ hv_store_iv(RETVAL, "bt_nkeys", stat->bt_nkeys);
+ hv_store_iv(RETVAL, "bt_ndata", stat->bt_ndata);
+#else
+ hv_store_iv(RETVAL, "bt_nrecs", stat->bt_nrecs);
+#endif
+ hv_store_iv(RETVAL, "bt_int_pg", stat->bt_int_pg);
+ hv_store_iv(RETVAL, "bt_leaf_pg", stat->bt_leaf_pg);
+ hv_store_iv(RETVAL, "bt_dup_pg", stat->bt_dup_pg);
+ hv_store_iv(RETVAL, "bt_over_pg", stat->bt_over_pg);
+ hv_store_iv(RETVAL, "bt_free", stat->bt_free);
+#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 5
+ hv_store_iv(RETVAL, "bt_freed", stat->bt_freed);
+ hv_store_iv(RETVAL, "bt_pfxsaved", stat->bt_pfxsaved);
+ hv_store_iv(RETVAL, "bt_split", stat->bt_split);
+ hv_store_iv(RETVAL, "bt_rootsplit", stat->bt_rootsplit);
+ hv_store_iv(RETVAL, "bt_fastsplit", stat->bt_fastsplit);
+ hv_store_iv(RETVAL, "bt_added", stat->bt_added);
+ hv_store_iv(RETVAL, "bt_deleted", stat->bt_deleted);
+ hv_store_iv(RETVAL, "bt_get", stat->bt_get);
+ hv_store_iv(RETVAL, "bt_cache_hit", stat->bt_cache_hit);
+ hv_store_iv(RETVAL, "bt_cache_miss", stat->bt_cache_miss);
+#endif
+ hv_store_iv(RETVAL, "bt_int_pgfree", stat->bt_int_pgfree);
+ hv_store_iv(RETVAL, "bt_leaf_pgfree", stat->bt_leaf_pgfree);
+ hv_store_iv(RETVAL, "bt_dup_pgfree", stat->bt_dup_pgfree);
+ hv_store_iv(RETVAL, "bt_over_pgfree", stat->bt_over_pgfree);
+ safefree(stat) ;
+ }
+ }
+ OUTPUT:
+ RETVAL
+
+
+MODULE = BerkeleyDB::Recno PACKAGE = BerkeleyDB::Recno PREFIX = recno_
+
+BerkeleyDB::Recno::Raw
+_db_open_recno(self, ref)
+ char * self
+ SV * ref
+ CODE:
+ {
+ HV * hash ;
+ SV * sv ;
+ DB_INFO info ;
+ BerkeleyDB__Env dbenv = NULL;
+ SV * ref_dbenv = NULL;
+ const char * file = NULL ;
+ const char * subname = NULL ;
+ int flags = 0 ;
+ int mode = 0 ;
+ BerkeleyDB db ;
+
+ hash = (HV*) SvRV(ref) ;
+ SetValue_pv(file, "Fname", char*) ;
+ SetValue_ov(dbenv, "Env", BerkeleyDB__Env) ;
+ ref_dbenv = sv ;
+ SetValue_iv(flags, "Flags") ;
+ SetValue_iv(mode, "Mode") ;
+
+ Zero(&info, 1, DB_INFO) ;
+ SetValue_iv(info.db_cachesize, "Cachesize") ;
+ SetValue_iv(info.db_lorder, "Lorder") ;
+ SetValue_iv(info.db_pagesize, "Pagesize") ;
+ SetValue_iv(info.bt_minkey, "Minkey") ;
+
+ SetValue_iv(info.flags, "Property") ;
+ SetValue_pv(info.re_source, "Source", char*) ;
+ if ((sv = readHash(hash, "Len")) && sv != &PL_sv_undef) {
+ info.re_len = SvIV(sv) ; ;
+ flagSet_DB2(info.flags, DB_FIXEDLEN) ;
+ }
+ if ((sv = readHash(hash, "Delim")) && sv != &PL_sv_undef) {
+ info.re_delim = SvPOK(sv) ? *SvPV(sv,PL_na) : SvIV(sv) ; ;
+ flagSet_DB2(info.flags, DB_DELIMITER) ;
+ }
+ if ((sv = readHash(hash, "Pad")) && sv != &PL_sv_undef) {
+ info.re_pad = (u_int32_t)SvPOK(sv) ? *SvPV(sv,PL_na) : SvIV(sv) ; ;
+ flagSet_DB2(info.flags, DB_PAD) ;
+ }
+ ZMALLOC(db, BerkeleyDB_type) ;
+#ifdef ALLOW_RECNO_OFFSET
+ SetValue_iv(db->array_base, "ArrayBase") ;
+ db->array_base = (db->array_base == 0 ? 1 : 0) ;
+#endif /* ALLOW_RECNO_OFFSET */
+
+ RETVAL = my_db_open(db, ref, ref_dbenv, dbenv, file, subname, DB_RECNO, flags, mode, &info) ;
+ }
+ OUTPUT:
+ RETVAL
+
+
+MODULE = BerkeleyDB::Queue PACKAGE = BerkeleyDB::Queue PREFIX = recno_
+
+BerkeleyDB::Queue::Raw
+_db_open_queue(self, ref)
+ char * self
+ SV * ref
+ CODE:
+ {
+#ifndef AT_LEAST_DB_3
+ softCrash("BerkeleyDB::Queue needs Berkeley DB 3.0.x or better");
+#else
+ HV * hash ;
+ SV * sv ;
+ DB_INFO info ;
+ BerkeleyDB__Env dbenv = NULL;
+ SV * ref_dbenv = NULL;
+ const char * file = NULL ;
+ const char * subname = NULL ;
+ int flags = 0 ;
+ int mode = 0 ;
+ BerkeleyDB db ;
+
+ hash = (HV*) SvRV(ref) ;
+ SetValue_pv(file, "Fname", char*) ;
+ SetValue_ov(dbenv, "Env", BerkeleyDB__Env) ;
+ ref_dbenv = sv ;
+ SetValue_iv(flags, "Flags") ;
+ SetValue_iv(mode, "Mode") ;
+
+ Zero(&info, 1, DB_INFO) ;
+ SetValue_iv(info.db_cachesize, "Cachesize") ;
+ SetValue_iv(info.db_lorder, "Lorder") ;
+ SetValue_iv(info.db_pagesize, "Pagesize") ;
+ SetValue_iv(info.bt_minkey, "Minkey") ;
+ SetValue_iv(info.q_extentsize, "ExtentSize") ;
+
+
+ SetValue_iv(info.flags, "Property") ;
+ if ((sv = readHash(hash, "Len")) && sv != &PL_sv_undef) {
+ info.re_len = SvIV(sv) ; ;
+ flagSet_DB2(info.flags, DB_PAD) ;
+ }
+ if ((sv = readHash(hash, "Pad")) && sv != &PL_sv_undef) {
+ info.re_pad = (u_int32_t)SvPOK(sv) ? *SvPV(sv,PL_na) : SvIV(sv) ; ;
+ flagSet_DB2(info.flags, DB_PAD) ;
+ }
+ ZMALLOC(db, BerkeleyDB_type) ;
+#ifdef ALLOW_RECNO_OFFSET
+ SetValue_iv(db->array_base, "ArrayBase") ;
+ db->array_base = (db->array_base == 0 ? 1 : 0) ;
+#endif /* ALLOW_RECNO_OFFSET */
+
+ RETVAL = my_db_open(db, ref, ref_dbenv, dbenv, file, subname, DB_QUEUE, flags, mode, &info) ;
+#endif
+ }
+ OUTPUT:
+ RETVAL
+
+HV *
+db_stat(db, flags=0)
+ BerkeleyDB::Common db
+ int flags
+ HV * RETVAL = NULL ;
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+ {
+#if DB_VERSION_MAJOR == 2
+ softCrash("$db->db_stat for a Queue needs Berkeley DB 3.x or better") ;
+#else /* Berkeley DB 3, or better */
+ DB_QUEUE_STAT * stat ;
+ db->Status = ((db->dbp)->stat)(db->dbp, &stat, safemalloc, flags) ;
+ if (db->Status == 0) {
+ RETVAL = (HV*)sv_2mortal((SV*)newHV()) ;
+ hv_store_iv(RETVAL, "qs_magic", stat->qs_magic) ;
+ hv_store_iv(RETVAL, "qs_version", stat->qs_version);
+#ifdef AT_LEAST_DB_3_1
+ hv_store_iv(RETVAL, "qs_nkeys", stat->qs_nkeys);
+ hv_store_iv(RETVAL, "qs_ndata", stat->qs_ndata);
+#else
+ hv_store_iv(RETVAL, "qs_nrecs", stat->qs_nrecs);
+#endif
+ hv_store_iv(RETVAL, "qs_pages", stat->qs_pages);
+ hv_store_iv(RETVAL, "qs_pagesize", stat->qs_pagesize);
+ hv_store_iv(RETVAL, "qs_pgfree", stat->qs_pgfree);
+ hv_store_iv(RETVAL, "qs_re_len", stat->qs_re_len);
+ hv_store_iv(RETVAL, "qs_re_pad", stat->qs_re_pad);
+#ifdef AT_LEAST_DB_3_2
+#else
+ hv_store_iv(RETVAL, "qs_start", stat->qs_start);
+#endif
+ hv_store_iv(RETVAL, "qs_first_recno", stat->qs_first_recno);
+ hv_store_iv(RETVAL, "qs_cur_recno", stat->qs_cur_recno);
+#if DB_VERSION_MAJOR >= 3
+ hv_store_iv(RETVAL, "qs_metaflags", stat->qs_metaflags);
+#endif
+ safefree(stat) ;
+ }
+#endif
+ }
+ OUTPUT:
+ RETVAL
+
+
+MODULE = BerkeleyDB::Common PACKAGE = BerkeleyDB::Common PREFIX = dab_
+
+
+DualType
+db_close(db,flags=0)
+ BerkeleyDB::Common db
+ int flags
+ INIT:
+ ckActive_Database(db->active) ;
+ CurrentDB = db ;
+ CODE:
+ Trace(("BerkeleyDB::Common::db_close %d\n", db));
+#ifdef STRICT_CLOSE
+ if (db->txn)
+ softCrash("attempted to close a database while a transaction was still open") ;
+ if (db->open_cursors)
+ softCrash("attempted to close a database with %d open cursor(s)",
+ db->open_cursors) ;
+#endif /* STRICT_CLOSE */
+ RETVAL = db->Status = ((db->dbp)->close)(db->dbp, flags) ;
+ if (db->parent_env && db->parent_env->open_dbs)
+ -- db->parent_env->open_dbs ;
+ db->active = FALSE ;
+ hash_delete("BerkeleyDB::Term::Db", (IV)db) ;
+ -- db->open_cursors ;
+ Trace(("end of BerkeleyDB::Common::db_close\n"));
+ OUTPUT:
+ RETVAL
+
+void
+dab__DESTROY(db)
+ BerkeleyDB::Common db
+ CODE:
+ CurrentDB = db ;
+ Trace(("In BerkeleyDB::Common::_DESTROY db %d dirty=%d\n", db, PL_dirty)) ;
+ destroyDB(db) ;
+ Trace(("End of BerkeleyDB::Common::DESTROY \n")) ;
+
+#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 6
+#define db_cursor(db, txn, cur,flags) ((db->dbp)->cursor)(db->dbp, txn, cur)
+#else
+#define db_cursor(db, txn, cur,flags) ((db->dbp)->cursor)(db->dbp, txn, cur,flags)
+#endif
+BerkeleyDB::Cursor::Raw
+_db_cursor(db, flags=0)
+ BerkeleyDB::Common db
+ u_int32_t flags
+ BerkeleyDB::Cursor RETVAL = NULL ;
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+ {
+ DBC * cursor ;
+ CurrentDB = db ;
+ if ((db->Status = db_cursor(db, db->txn, &cursor, flags)) == 0){
+ ZMALLOC(RETVAL, BerkeleyDB__Cursor_type) ;
+ db->open_cursors ++ ;
+ RETVAL->parent_db = db ;
+ RETVAL->cursor = cursor ;
+ RETVAL->dbp = db->dbp ;
+ RETVAL->type = db->type ;
+ RETVAL->recno_or_queue = db->recno_or_queue ;
+ RETVAL->filename = my_strdup(db->filename) ;
+ RETVAL->compare = db->compare ;
+ RETVAL->dup_compare = db->dup_compare ;
+ RETVAL->prefix = db->prefix ;
+ RETVAL->hash = db->hash ;
+ RETVAL->partial = db->partial ;
+ RETVAL->doff = db->doff ;
+ RETVAL->dlen = db->dlen ;
+ RETVAL->active = TRUE ;
+#ifdef ALLOW_RECNO_OFFSET
+ RETVAL->array_base = db->array_base ;
+#endif /* ALLOW_RECNO_OFFSET */
+#ifdef DBM_FILTERING
+ RETVAL->filtering = FALSE ;
+ RETVAL->filter_fetch_key = db->filter_fetch_key ;
+ RETVAL->filter_store_key = db->filter_store_key ;
+ RETVAL->filter_fetch_value = db->filter_fetch_value ;
+ RETVAL->filter_store_value = db->filter_store_value ;
+#endif
+ /* RETVAL->info ; */
+ hash_store_iv("BerkeleyDB::Term::Cursor", (IV)RETVAL, 1) ;
+ }
+ }
+ OUTPUT:
+ RETVAL
+
+BerkeleyDB::Cursor::Raw
+_db_join(db, cursors, flags=0)
+ BerkeleyDB::Common db
+ AV * cursors
+ u_int32_t flags
+ BerkeleyDB::Cursor RETVAL = NULL ;
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+ {
+#if DB_VERSION_MAJOR == 2 && (DB_VERSION_MINOR < 5 || (DB_VERSION_MINOR == 5 && DB_VERSION_PATCH < 2))
+ softCrash("join needs Berkeley DB 2.5.2 or later") ;
+#else /* Berkeley DB >= 2.5.2 */
+ DBC * join_cursor ;
+ DBC ** cursor_list ;
+ I32 count = av_len(cursors) + 1 ;
+ int i ;
+ CurrentDB = db ;
+ if (count < 1 )
+ softCrash("db_join: No cursors in parameter list") ;
+ cursor_list = (DBC **)safemalloc(sizeof(DBC*) * (count + 1));
+ for (i = 0 ; i < count ; ++i) {
+ SV * obj = (SV*) * av_fetch(cursors, i, FALSE) ;
+ BerkeleyDB__Cursor cur = (BerkeleyDB__Cursor) getInnerObject(obj) ;
+ cursor_list[i] = cur->cursor ;
+ }
+ cursor_list[i] = NULL ;
+#if DB_VERSION_MAJOR == 2
+ if ((db->Status = ((db->dbp)->join)(db->dbp, cursor_list, flags, &join_cursor)) == 0){
+#else
+ if ((db->Status = ((db->dbp)->join)(db->dbp, cursor_list, &join_cursor, flags)) == 0){
+#endif
+ ZMALLOC(RETVAL, BerkeleyDB__Cursor_type) ;
+ db->open_cursors ++ ;
+ RETVAL->parent_db = db ;
+ RETVAL->cursor = join_cursor ;
+ RETVAL->dbp = db->dbp ;
+ RETVAL->type = db->type ;
+ RETVAL->filename = my_strdup(db->filename) ;
+ RETVAL->compare = db->compare ;
+ RETVAL->dup_compare = db->dup_compare ;
+ RETVAL->prefix = db->prefix ;
+ RETVAL->hash = db->hash ;
+ RETVAL->partial = db->partial ;
+ RETVAL->doff = db->doff ;
+ RETVAL->dlen = db->dlen ;
+ RETVAL->active = TRUE ;
+#ifdef ALLOW_RECNO_OFFSET
+ RETVAL->array_base = db->array_base ;
+#endif /* ALLOW_RECNO_OFFSET */
+#ifdef DBM_FILTERING
+ RETVAL->filtering = FALSE ;
+ RETVAL->filter_fetch_key = db->filter_fetch_key ;
+ RETVAL->filter_store_key = db->filter_store_key ;
+ RETVAL->filter_fetch_value = db->filter_fetch_value ;
+ RETVAL->filter_store_value = db->filter_store_value ;
+#endif
+ /* RETVAL->info ; */
+ hash_store_iv("BerkeleyDB::Term::Cursor", (IV)RETVAL, 1) ;
+ }
+ safefree(cursor_list) ;
+#endif /* Berkeley DB >= 2.5.2 */
+ }
+ OUTPUT:
+ RETVAL
+
+int
+ArrayOffset(db)
+ BerkeleyDB::Common db
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+#ifdef ALLOW_RECNO_OFFSET
+ RETVAL = db->array_base ? 0 : 1 ;
+#else
+ RETVAL = 0 ;
+#endif /* ALLOW_RECNO_OFFSET */
+ OUTPUT:
+ RETVAL
+
+int
+type(db)
+ BerkeleyDB::Common db
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+ RETVAL = db->type ;
+ OUTPUT:
+ RETVAL
+
+int
+byteswapped(db)
+ BerkeleyDB::Common db
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 5
+ softCrash("byteswapped needs Berkeley DB 2.5 or later") ;
+#else
+#if DB_VERSION_MAJOR == 2
+ RETVAL = db->dbp->byteswapped ;
+#else
+ RETVAL = db->dbp->get_byteswapped(db->dbp) ;
+#endif
+#endif
+ OUTPUT:
+ RETVAL
+
+DualType
+status(db)
+ BerkeleyDB::Common db
+ CODE:
+ RETVAL = db->Status ;
+ OUTPUT:
+ RETVAL
+
+#ifdef DBM_FILTERING
+
+#define setFilter(ftype) \
+ { \
+ if (db->ftype) \
+ RETVAL = sv_mortalcopy(db->ftype) ; \
+ ST(0) = RETVAL ; \
+ if (db->ftype && (code == &PL_sv_undef)) { \
+ SvREFCNT_dec(db->ftype) ; \
+ db->ftype = NULL ; \
+ } \
+ else if (code) { \
+ if (db->ftype) \
+ sv_setsv(db->ftype, code) ; \
+ else \
+ db->ftype = newSVsv(code) ; \
+ } \
+ }
+
+
+SV *
+filter_fetch_key(db, code)
+ BerkeleyDB::Common db
+ SV * code
+ SV * RETVAL = &PL_sv_undef ;
+ CODE:
+ setFilter(filter_fetch_key) ;
+
+SV *
+filter_store_key(db, code)
+ BerkeleyDB::Common db
+ SV * code
+ SV * RETVAL = &PL_sv_undef ;
+ CODE:
+ setFilter(filter_store_key) ;
+
+SV *
+filter_fetch_value(db, code)
+ BerkeleyDB::Common db
+ SV * code
+ SV * RETVAL = &PL_sv_undef ;
+ CODE:
+ setFilter(filter_fetch_value) ;
+
+SV *
+filter_store_value(db, code)
+ BerkeleyDB::Common db
+ SV * code
+ SV * RETVAL = &PL_sv_undef ;
+ CODE:
+ setFilter(filter_store_value) ;
+
+#endif /* DBM_FILTERING */
+
+void
+partial_set(db, offset, length)
+ BerkeleyDB::Common db
+ u_int32_t offset
+ u_int32_t length
+ INIT:
+ ckActive_Database(db->active) ;
+ PPCODE:
+ if (GIMME == G_ARRAY) {
+ XPUSHs(sv_2mortal(newSViv(db->partial == DB_DBT_PARTIAL))) ;
+ XPUSHs(sv_2mortal(newSViv(db->doff))) ;
+ XPUSHs(sv_2mortal(newSViv(db->dlen))) ;
+ }
+ db->partial = DB_DBT_PARTIAL ;
+ db->doff = offset ;
+ db->dlen = length ;
+
+
+void
+partial_clear(db)
+ BerkeleyDB::Common db
+ INIT:
+ ckActive_Database(db->active) ;
+ PPCODE:
+ if (GIMME == G_ARRAY) {
+ XPUSHs(sv_2mortal(newSViv(db->partial == DB_DBT_PARTIAL))) ;
+ XPUSHs(sv_2mortal(newSViv(db->doff))) ;
+ XPUSHs(sv_2mortal(newSViv(db->dlen))) ;
+ }
+ db->partial =
+ db->doff =
+ db->dlen = 0 ;
+
+
+#define db_del(db, key, flags) \
+ (db->Status = ((db->dbp)->del)(db->dbp, db->txn, &key, flags))
+DualType
+db_del(db, key, flags=0)
+ BerkeleyDB::Common db
+ DBTKEY key
+ u_int flags
+ INIT:
+ ckActive_Database(db->active) ;
+ CurrentDB = db ;
+
+
+#define db_get(db, key, data, flags) \
+ (db->Status = ((db->dbp)->get)(db->dbp, db->txn, &key, &data, flags))
+DualType
+db_get(db, key, data, flags=0)
+ BerkeleyDB::Common db
+ u_int flags
+ DBTKEY_B key
+ DBT_OPT data
+ INIT:
+ ckActive_Database(db->active) ;
+ CurrentDB = db ;
+ SetPartial(data,db) ;
+ OUTPUT:
+ key if (flagSet(DB_SET_RECNO)) OutputValue(ST(1), key) ;
+ data
+
+#define db_put(db,key,data,flag) \
+ (db->Status = (db->dbp->put)(db->dbp,db->txn,&key,&data,flag))
+DualType
+db_put(db, key, data, flags=0)
+ BerkeleyDB::Common db
+ DBTKEY key
+ DBT data
+ u_int flags
+ INIT:
+ ckActive_Database(db->active) ;
+ CurrentDB = db ;
+ /* SetPartial(data,db) ; */
+ OUTPUT:
+ key if (flagSet(DB_APPEND)) OutputKey(ST(1), key) ;
+
+#define db_key_range(db, key, range, flags) \
+ (db->Status = ((db->dbp)->key_range)(db->dbp, db->txn, &key, &range, flags))
+DualType
+db_key_range(db, key, less, equal, greater, flags=0)
+ BerkeleyDB::Common db
+ DBTKEY_B key
+ double less = NO_INIT
+ double equal = NO_INIT
+ double greater = NO_INIT
+ u_int32_t flags
+ CODE:
+ {
+#ifndef AT_LEAST_DB_3_1
+ softCrash("key_range needs Berkeley DB 3.1.x or later") ;
+#else
+ DB_KEY_RANGE range ;
+ range.less = range.equal = range.greater = 0.0 ;
+ ckActive_Database(db->active) ;
+ CurrentDB = db ;
+ RETVAL = db_key_range(db, key, range, flags);
+ if (RETVAL == 0) {
+ less = range.less ;
+ equal = range.equal;
+ greater = range.greater;
+ }
+#endif
+ }
+ OUTPUT:
+ RETVAL
+ less
+ equal
+ greater
+
+
+#define db_fd(d, x) (db->Status = (db->dbp->fd)(db->dbp, &x))
+DualType
+db_fd(db)
+ BerkeleyDB::Common db
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+ CurrentDB = db ;
+ db_fd(db, RETVAL) ;
+ OUTPUT:
+ RETVAL
+
+
+#define db_sync(db, fl) (db->Status = (db->dbp->sync)(db->dbp, fl))
+DualType
+db_sync(db, flags=0)
+ BerkeleyDB::Common db
+ u_int flags
+ INIT:
+ ckActive_Database(db->active) ;
+ CurrentDB = db ;
+
+void
+_Txn(db, txn=NULL)
+ BerkeleyDB::Common db
+ BerkeleyDB::Txn txn
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+ if (txn) {
+ Trace(("_Txn(%d in %d) active [%d]\n", txn->txn, txn, txn->active));
+ ckActive_Transaction(txn->active) ;
+ db->txn = txn->txn ;
+ }
+ else {
+ Trace(("_Txn(undef) \n"));
+ db->txn = NULL ;
+ }
+
+
+
+
+MODULE = BerkeleyDB::Cursor PACKAGE = BerkeleyDB::Cursor PREFIX = cu_
+
+BerkeleyDB::Cursor::Raw
+_c_dup(db, flags=0)
+ BerkeleyDB::Cursor db
+ u_int32_t flags
+ BerkeleyDB::Cursor RETVAL = NULL ;
+ INIT:
+ CurrentDB = db->parent_db ;
+ ckActive_Database(db->active) ;
+ CODE:
+ {
+#ifndef AT_LEAST_DB_3
+ softCrash("c_dup needs at least Berkeley DB 3.0.x");
+#else
+ DBC * newcursor ;
+ db->Status = ((db->cursor)->c_dup)(db->cursor, &newcursor, flags) ;
+ if (db->Status == 0){
+ ZMALLOC(RETVAL, BerkeleyDB__Cursor_type) ;
+ db->parent_db->open_cursors ++ ;
+ RETVAL->parent_db = db->parent_db ;
+ RETVAL->cursor = newcursor ;
+ RETVAL->dbp = db->dbp ;
+ RETVAL->type = db->type ;
+ RETVAL->recno_or_queue = db->recno_or_queue ;
+ RETVAL->filename = my_strdup(db->filename) ;
+ RETVAL->compare = db->compare ;
+ RETVAL->dup_compare = db->dup_compare ;
+ RETVAL->prefix = db->prefix ;
+ RETVAL->hash = db->hash ;
+ RETVAL->partial = db->partial ;
+ RETVAL->doff = db->doff ;
+ RETVAL->dlen = db->dlen ;
+ RETVAL->active = TRUE ;
+#ifdef ALLOW_RECNO_OFFSET
+ RETVAL->array_base = db->array_base ;
+#endif /* ALLOW_RECNO_OFFSET */
+#ifdef DBM_FILTERING
+ RETVAL->filtering = FALSE ;
+ RETVAL->filter_fetch_key = db->filter_fetch_key ;
+ RETVAL->filter_store_key = db->filter_store_key ;
+ RETVAL->filter_fetch_value = db->filter_fetch_value ;
+ RETVAL->filter_store_value = db->filter_store_value ;
+#endif /* DBM_FILTERING */
+ /* RETVAL->info ; */
+ hash_store_iv("BerkeleyDB::Term::Cursor", (IV)RETVAL, 1) ;
+ }
+#endif
+ }
+ OUTPUT:
+ RETVAL
+
+DualType
+_c_close(db)
+ BerkeleyDB::Cursor db
+ INIT:
+ CurrentDB = db->parent_db ;
+ ckActive_Cursor(db->active) ;
+ hash_delete("BerkeleyDB::Term::Cursor", (IV)db) ;
+ CODE:
+ RETVAL = db->Status =
+ ((db->cursor)->c_close)(db->cursor) ;
+ db->active = FALSE ;
+ if (db->parent_db->open_cursors)
+ -- db->parent_db->open_cursors ;
+ OUTPUT:
+ RETVAL
+
+void
+_DESTROY(db)
+ BerkeleyDB::Cursor db
+ CODE:
+ CurrentDB = db->parent_db ;
+ Trace(("In BerkeleyDB::Cursor::_DESTROY db %d dirty=%d active=%d\n", db, PL_dirty, db->active));
+ hash_delete("BerkeleyDB::Term::Cursor", (IV)db) ;
+ if (db->active)
+ ((db->cursor)->c_close)(db->cursor) ;
+ if (db->parent_db->open_cursors)
+ -- db->parent_db->open_cursors ;
+ Safefree(db->filename) ;
+ Safefree(db) ;
+ Trace(("End of BerkeleyDB::Cursor::_DESTROY\n")) ;
+
+DualType
+status(db)
+ BerkeleyDB::Cursor db
+ CODE:
+ RETVAL = db->Status ;
+ OUTPUT:
+ RETVAL
+
+
+#define cu_c_del(c,f) (c->Status = ((c->cursor)->c_del)(c->cursor,f))
+DualType
+cu_c_del(db, flags=0)
+ BerkeleyDB::Cursor db
+ int flags
+ INIT:
+ CurrentDB = db->parent_db ;
+ ckActive_Cursor(db->active) ;
+ OUTPUT:
+ RETVAL
+
+
+#define cu_c_get(c,k,d,f) (c->Status = (c->cursor->c_get)(c->cursor,&k,&d,f))
+DualType
+cu_c_get(db, key, data, flags=0)
+ BerkeleyDB::Cursor db
+ int flags
+ DBTKEY_B key
+ DBT_B data
+ INIT:
+ Trace(("c_get db [%d] flags [%d]\n", db, flags)) ;
+ CurrentDB = db->parent_db ;
+ ckActive_Cursor(db->active) ;
+ SetPartial(data,db) ;
+ Trace(("c_get end\n")) ;
+ OUTPUT:
+ RETVAL
+ key
+ data if (! flagSet(DB_JOIN_ITEM)) OutputValue_B(ST(2), data) ;
+
+
+#define cu_c_put(c,k,d,f) (c->Status = (c->cursor->c_put)(c->cursor,&k,&d,f))
+DualType
+cu_c_put(db, key, data, flags=0)
+ BerkeleyDB::Cursor db
+ DBTKEY key
+ DBT data
+ int flags
+ INIT:
+ CurrentDB = db->parent_db ;
+ ckActive_Cursor(db->active) ;
+ /* SetPartial(data,db) ; */
+ OUTPUT:
+ RETVAL
+
+#define cu_c_count(c,p,f) (c->Status = (c->cursor->c_count)(c->cursor,&p,f))
+DualType
+cu_c_count(db, count, flags=0)
+ BerkeleyDB::Cursor db
+ u_int32_t count = NO_INIT
+ int flags
+ CODE:
+#ifndef AT_LEAST_DB_3_1
+ softCrash("c_count needs at least Berkeley DB 3.1.x");
+#else
+ Trace(("c_get count [%d] flags [%d]\n", db, flags)) ;
+ CurrentDB = db->parent_db ;
+ ckActive_Cursor(db->active) ;
+ RETVAL = cu_c_count(db, count, flags) ;
+ Trace((" c_count got %d duplicates\n", count)) ;
+#endif
+ OUTPUT:
+ RETVAL
+ count
+
+MODULE = BerkeleyDB::TxnMgr PACKAGE = BerkeleyDB::TxnMgr PREFIX = xx_
+
+BerkeleyDB::Txn::Raw
+_txn_begin(txnmgr, pid=NULL, flags=0)
+ BerkeleyDB::TxnMgr txnmgr
+ BerkeleyDB::Txn pid
+ u_int32_t flags
+ CODE:
+ {
+ DB_TXN *txn ;
+ DB_TXN *p_id = NULL ;
+#if DB_VERSION_MAJOR == 2
+ if (txnmgr->env->Env->tx_info == NULL)
+ softCrash("Transaction Manager not enabled") ;
+#endif
+ if (pid)
+ p_id = pid->txn ;
+ txnmgr->env->TxnMgrStatus =
+#if DB_VERSION_MAJOR == 2
+ txn_begin(txnmgr->env->Env->tx_info, p_id, &txn) ;
+#else
+ txn_begin(txnmgr->env->Env, p_id, &txn, flags) ;
+#endif
+ if (txnmgr->env->TxnMgrStatus == 0) {
+ ZMALLOC(RETVAL, BerkeleyDB_Txn_type) ;
+ RETVAL->txn = txn ;
+ RETVAL->active = TRUE ;
+ Trace(("_txn_begin created txn [%d] in [%d]\n", txn, RETVAL));
+ hash_store_iv("BerkeleyDB::Term::Txn", (IV)RETVAL, 1) ;
+ }
+ else
+ RETVAL = NULL ;
+ }
+ OUTPUT:
+ RETVAL
+
+
+DualType
+status(mgr)
+ BerkeleyDB::TxnMgr mgr
+ CODE:
+ RETVAL = mgr->env->TxnMgrStatus ;
+ OUTPUT:
+ RETVAL
+
+
+void
+_DESTROY(mgr)
+ BerkeleyDB::TxnMgr mgr
+ CODE:
+ Trace(("In BerkeleyDB::TxnMgr::DESTROY dirty=%d\n", PL_dirty)) ;
+ Safefree(mgr) ;
+ Trace(("End of BerkeleyDB::TxnMgr::DESTROY\n")) ;
+
+DualType
+txn_close(txnp)
+ BerkeleyDB::TxnMgr txnp
+ NOT_IMPLEMENTED_YET
+
+
+#if DB_VERSION_MAJOR == 2
+# define xx_txn_checkpoint(t,k,m) txn_checkpoint(t->env->Env->tx_info, k, m)
+#else
+# ifdef AT_LEAST_DB_3_1
+# define xx_txn_checkpoint(t,k,m) txn_checkpoint(t->env->Env, k, m, 0)
+# else
+# define xx_txn_checkpoint(t,k,m) txn_checkpoint(t->env->Env, k, m)
+# endif
+#endif
+DualType
+xx_txn_checkpoint(txnp, kbyte, min)
+ BerkeleyDB::TxnMgr txnp
+ long kbyte
+ long min
+
+HV *
+txn_stat(txnp)
+ BerkeleyDB::TxnMgr txnp
+ HV * RETVAL = NULL ;
+ CODE:
+ {
+ DB_TXN_STAT * stat ;
+#if DB_VERSION_MAJOR == 2
+ if(txn_stat(txnp->env->Env->tx_info, &stat, safemalloc) == 0) {
+#else
+ if(txn_stat(txnp->env->Env, &stat, safemalloc) == 0) {
+#endif
+ RETVAL = (HV*)sv_2mortal((SV*)newHV()) ;
+ hv_store_iv(RETVAL, "st_time_ckp", stat->st_time_ckp) ;
+ hv_store_iv(RETVAL, "st_last_txnid", stat->st_last_txnid) ;
+ hv_store_iv(RETVAL, "st_maxtxns", stat->st_maxtxns) ;
+ hv_store_iv(RETVAL, "st_naborts", stat->st_naborts) ;
+ hv_store_iv(RETVAL, "st_nbegins", stat->st_nbegins) ;
+ hv_store_iv(RETVAL, "st_ncommits", stat->st_ncommits) ;
+ hv_store_iv(RETVAL, "st_nactive", stat->st_nactive) ;
+#if DB_VERSION_MAJOR > 2
+ hv_store_iv(RETVAL, "st_maxnactive", stat->st_maxnactive) ;
+ hv_store_iv(RETVAL, "st_regsize", stat->st_regsize) ;
+ hv_store_iv(RETVAL, "st_region_wait", stat->st_region_wait) ;
+ hv_store_iv(RETVAL, "st_region_nowait", stat->st_region_nowait) ;
+#endif
+ safefree(stat) ;
+ }
+ }
+ OUTPUT:
+ RETVAL
+
+
+BerkeleyDB::TxnMgr
+txn_open(dir, flags, mode, dbenv)
+ const char * dir
+ int flags
+ int mode
+ BerkeleyDB::Env dbenv
+ NOT_IMPLEMENTED_YET
+
+
+MODULE = BerkeleyDB::Txn PACKAGE = BerkeleyDB::Txn PREFIX = xx_
+
+DualType
+status(tid)
+ BerkeleyDB::Txn tid
+ CODE:
+ RETVAL = tid->Status ;
+ OUTPUT:
+ RETVAL
+
+int
+_DESTROY(tid)
+ BerkeleyDB::Txn tid
+ CODE:
+ Trace(("In BerkeleyDB::Txn::_DESTROY txn [%d] active [%d] dirty=%d\n", tid->txn, tid->active, PL_dirty)) ;
+ if (tid->active)
+ txn_abort(tid->txn) ;
+ RETVAL = (int)tid ;
+ hash_delete("BerkeleyDB::Term::Txn", (IV)tid) ;
+ Safefree(tid) ;
+ Trace(("End of BerkeleyDB::Txn::DESTROY\n")) ;
+ OUTPUT:
+ RETVAL
+
+#define xx_txn_unlink(d,f,e) txn_unlink(d,f,&(e->Env))
+DualType
+xx_txn_unlink(dir, force, dbenv)
+ const char * dir
+ int force
+ BerkeleyDB::Env dbenv
+ NOT_IMPLEMENTED_YET
+
+#define xx_txn_prepare(t) (t->Status = txn_prepare(t->txn))
+DualType
+xx_txn_prepare(tid)
+ BerkeleyDB::Txn tid
+ INIT:
+ ckActive_Transaction(tid->active) ;
+
+#if DB_VERSION_MAJOR == 2
+# define _txn_commit(t,flags) (t->Status = txn_commit(t->txn))
+#else
+# define _txn_commit(t, flags) (t->Status = txn_commit(t->txn, flags))
+#endif
+DualType
+_txn_commit(tid, flags=0)
+ BerkeleyDB::Txn tid
+ u_int32_t flags
+ INIT:
+ ckActive_Transaction(tid->active) ;
+ hash_delete("BerkeleyDB::Term::Txn", (IV)tid) ;
+ tid->active = FALSE ;
+
+#define _txn_abort(t) (t->Status = txn_abort(t->txn))
+DualType
+_txn_abort(tid)
+ BerkeleyDB::Txn tid
+ INIT:
+ ckActive_Transaction(tid->active) ;
+ hash_delete("BerkeleyDB::Term::Txn", (IV)tid) ;
+ tid->active = FALSE ;
+
+#define xx_txn_id(t) txn_id(t->txn)
+u_int32_t
+xx_txn_id(tid)
+ BerkeleyDB::Txn tid
+
+MODULE = BerkeleyDB::_tiedHash PACKAGE = BerkeleyDB::_tiedHash
+
+int
+FIRSTKEY(db)
+ BerkeleyDB::Common db
+ CODE:
+ {
+ DBTKEY key ;
+ DBT value ;
+ DBC * cursor ;
+
+ /*
+ TODO!
+ set partial value to 0 - to eliminate the retrieval of
+ the value need to store any existing partial settings &
+ restore at the end.
+
+ */
+ CurrentDB = db ;
+ DBT_clear(key) ;
+ DBT_clear(value) ;
+ /* If necessary create a cursor for FIRSTKEY/NEXTKEY use */
+ if (!db->cursor &&
+ (db->Status = db_cursor(db, db->txn, &cursor, 0)) == 0 )
+ db->cursor = cursor ;
+
+ if (db->cursor)
+ RETVAL = (db->Status) =
+ ((db->cursor)->c_get)(db->cursor, &key, &value, DB_FIRST);
+ else
+ RETVAL = db->Status ;
+ /* check for end of cursor */
+ if (RETVAL == DB_NOTFOUND) {
+ ((db->cursor)->c_close)(db->cursor) ;
+ db->cursor = NULL ;
+ }
+ ST(0) = sv_newmortal();
+ OutputKey(ST(0), key)
+ }
+
+
+
+int
+NEXTKEY(db, key)
+ BerkeleyDB::Common db
+ DBTKEY key
+ CODE:
+ {
+ DBT value ;
+
+ CurrentDB = db ;
+ DBT_clear(value) ;
+ key.flags = 0 ;
+ RETVAL = (db->Status) =
+ ((db->cursor)->c_get)(db->cursor, &key, &value, DB_NEXT);
+
+ /* check for end of cursor */
+ if (RETVAL == DB_NOTFOUND) {
+ ((db->cursor)->c_close)(db->cursor) ;
+ db->cursor = NULL ;
+ }
+ ST(0) = sv_newmortal();
+ OutputKey(ST(0), key)
+ }
+
+MODULE = BerkeleyDB::_tiedArray PACKAGE = BerkeleyDB::_tiedArray
+
+I32
+FETCHSIZE(db)
+ BerkeleyDB::Common db
+ CODE:
+ CurrentDB = db ;
+ RETVAL = GetArrayLength(db) ;
+ OUTPUT:
+ RETVAL
+
+
+MODULE = BerkeleyDB PACKAGE = BerkeleyDB
+
+BOOT:
+ {
+ SV * sv_err = perl_get_sv(ERR_BUFF, GV_ADD|GV_ADDMULTI) ;
+ SV * version_sv = perl_get_sv("BerkeleyDB::db_version", GV_ADD|GV_ADDMULTI) ;
+ SV * ver_sv = perl_get_sv("BerkeleyDB::db_ver", GV_ADD|GV_ADDMULTI) ;
+ int Major, Minor, Patch ;
+ (void)db_version(&Major, &Minor, &Patch) ;
+ /* Check that the versions of db.h and libdb.a are the same */
+ if (Major != DB_VERSION_MAJOR || Minor != DB_VERSION_MINOR
+ || Patch != DB_VERSION_PATCH)
+ croak("\nBerkeleyDB needs compatible versions of libdb & db.h\n\tyou have db.h version %d.%d.%d and libdb version %d.%d.%d\n",
+ DB_VERSION_MAJOR, DB_VERSION_MINOR, DB_VERSION_PATCH,
+ Major, Minor, Patch) ;
+
+ if (Major < 2 || (Major == 2 && Minor < 6))
+ {
+ croak("BerkeleyDB needs Berkeley DB 2.6 or greater. This is %d.%d.%d\n",
+ Major, Minor, Patch) ;
+ }
+ sv_setpvf(version_sv, "%d.%d", Major, Minor) ;
+ sv_setpvf(ver_sv, "%d.%03d%03d", Major, Minor, Patch) ;
+ sv_setpv(sv_err, "");
+
+ DBT_clear(empty) ;
+ empty.data = &zero ;
+ empty.size = sizeof(db_recno_t) ;
+ empty.flags = 0 ;
+
+ }
+
diff --git a/bdb/perl.BerkeleyDB/BerkeleyDB/Btree.pm b/bdb/perl.BerkeleyDB/BerkeleyDB/Btree.pm
new file mode 100644
index 00000000000..ba9a9c0085d
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/BerkeleyDB/Btree.pm
@@ -0,0 +1,8 @@
+
+package BerkeleyDB::Btree ;
+
+# This file is only used for MLDBM
+
+use BerkeleyDB ;
+
+1 ;
diff --git a/bdb/perl.BerkeleyDB/BerkeleyDB/Hash.pm b/bdb/perl.BerkeleyDB/BerkeleyDB/Hash.pm
new file mode 100644
index 00000000000..8e7bc7e78c7
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/BerkeleyDB/Hash.pm
@@ -0,0 +1,8 @@
+
+package BerkeleyDB::Hash ;
+
+# This file is only used for MLDBM
+
+use BerkeleyDB ;
+
+1 ;
diff --git a/bdb/perl.BerkeleyDB/Changes b/bdb/perl.BerkeleyDB/Changes
new file mode 100644
index 00000000000..dcaccd4d0c7
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/Changes
@@ -0,0 +1,112 @@
+Revision history for Perl extension BerkeleyDB.
+
+0.01 23 October 1997
+ * first alpha release as BerkDB.
+
+0.02 30 October 1997
+ * renamed module to BerkeleyDB
+ * fixed a few bugs & added more tests
+
+0.03 5 May 1998
+ * fixed db_get with DB_SET_RECNO
+ * fixed c_get with DB_SET_RECNO and DB_GET_RECNO
+ * implemented BerkeleyDB::Unknown
+ * implemented BerkeleyDB::Recno, including push, pop etc
+ modified the txn support.
+
+0.04 19 May 1998
+ * Define DEFSV & SAVE_DEFSV if not already defined. This allows
+ the module to be built with Perl 5.004_04.
+
+0.05 9 November 1998
+ * Added a note to README about how to build Berkeley DB 2.x
+ when using HP-UX.
+ * Minor modifications to get the module to build with DB 2.5.x
+
+0.06 19 December 1998
+ * Minor modifications to get the module to build with DB 2.6.x
+ * Added support for DB 2.6.x's Concurrent Access Method, DB_INIT_CDB.
+
+0.07 21st September 1999
+ * Numerous small bug fixes.
+ * Added support for sorting duplicate values DB_DUPSORT.
+ * Added support for DB_GET_BOTH & DB_NEXT_DUP.
+ * Added get_dup (from DB_File).
+ * beefed up the documentation.
+ * Forgot to add the DB_INIT_CDB in BerkeleyDB.pm in previous release.
+ * Merged the DBM Filter code from DB_File into BerkeleyDB.
+ * Fixed a nasty bug where a closed transaction was still used with
+ with dp_put, db_get etc.
+ * Added logic to gracefully close everything whenever a fatal error
+ happens. Previously the plug was just pulled.
+ * It is now a fatal error to explicitly close an environment if there
+ is still an open database; a database when there are open cursors or
+ an open transaction; and a cursor if there is an open transaction.
+ Using object destruction doesn't have this issue, as object
+ references will ensure everything gets closed in the correct order.
+ * The BOOT code now checks that the version of db.h & libdb are the
+ same - this seems to be a common problem on Linux.
+ * MLDBM support added.
+ * Support for the new join cursor added.
+ * Builds with Berkeley DB 3.x
+ * Updated dbinfo for Berkeley DB 3.x file formats.
+ * Deprecated the TxnMgr class. As with Berkeley DB version 3,
+ txn_begin etc are now accessed via the environment object.
+
+0.08 28nd November 1999
+ * More documentation updates
+ * Changed reference to files in /tmp in examples.t
+ * Fixed a typo in softCrash that caused problems when building
+ with a thread-enabled Perl.
+ * BerkeleyDB::Error wasn't initialised properly.
+ * ANSI-ified all the static C functions in BerkeleyDB.xs
+ * Added support for the following DB 3.x features:
+ + The Queue database type
+ + db_remove
+ + subdatabases
+ + db_stat for Hash & Queue
+
+0.09 29th November 1999
+ * the queue.t & subdb.t test harnesses were outputting a few
+ spurious warnings. This has been fixed.
+
+0.10 8th December 1999
+ * The DESTROY method was missing for BerkeleyDB::Env. This resulted in
+ a memory leak. Fixed.
+ * If opening an environment or database failed, there was a small
+ memory leak. This has been fixed.
+ * A thread-enabled Perl it could core when a database was closed.
+ Problem traced to the strdup function.
+
+0.11 4th June 2000
+ * When built with Berkeley Db 3.x there can be a clash with the close
+ macro.
+ * Typo in the definition of DB_WRITECURSOR
+ * The flags parameter wasn't getting sent to db_cursor
+ * Plugged small memory leak in db_cursor (DESTROY wasn't freeing
+ memory)
+ * Can be built with Berkeley DB 3.1
+
+
+0.12 2nd August 2000
+ * Serious bug with get fixed. Spotted by Sleepycat.
+ * Added hints file for Solaris & Irix (courtesy of Albert Chin-A-Young)
+
+0.13 15th January 2001
+ * Added support to allow this module to build with Berkeley DB 3.2
+ * Updated dbinfo to support Berkeley DB 3.1 & 3.2 file format
+ changes.
+ * Documented the Solaris 2.7 core dump problem in README.
+ * Tidied up the test harness to fix a problem on Solaris where the
+ "fred" directory wasn't being deleted when it should have been.
+ * two calls to "open" clashed with a win32 macro.
+ * size argument for hash_cb is different for Berkeley DB 3.x
+ * Documented the issue of building on Linux.
+ * Added -Server, -CacheSize & -LockDetect options
+ [original patch supplied by Graham Barr]
+ * Added support for set_mutexlocks, c_count, set_q_extentsize,
+ key_range, c_dup
+ * Dropped the "attempted to close a Cursor with an open transaction"
+ error in c_close. The correct behaviour is that the cursor
+ should be closed before committing/aborting the transaction.
+
diff --git a/bdb/perl.BerkeleyDB/MANIFEST b/bdb/perl.BerkeleyDB/MANIFEST
new file mode 100644
index 00000000000..3b8a820d56e
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/MANIFEST
@@ -0,0 +1,49 @@
+BerkeleyDB.pm
+BerkeleyDB.pod
+BerkeleyDB.pod.P
+BerkeleyDB.xs
+BerkeleyDB/Btree.pm
+BerkeleyDB/Hash.pm
+Changes
+config.in
+dbinfo
+hints/solaris.pl
+hints/irix_6_5.pl
+Makefile.PL
+MANIFEST
+mkconsts
+mkpod
+README
+t/btree.t
+t/db-3.0.t
+t/db-3.1.t
+t/db-3.2.t
+t/destroy.t
+t/env.t
+t/examples.t
+t/examples.t.T
+t/examples3.t
+t/examples3.t.T
+t/filter.t
+t/hash.t
+t/join.t
+t/mldbm.t
+t/queue.t
+t/recno.t
+t/strict.t
+t/subdb.t
+t/txn.t
+t/unknown.t
+Todo
+typemap
+patches/5.004
+patches/5.004_01
+patches/5.004_02
+patches/5.004_03
+patches/5.004_04
+patches/5.004_05
+patches/5.005
+patches/5.005_01
+patches/5.005_02
+patches/5.005_03
+patches/5.6.0
diff --git a/bdb/perl.BerkeleyDB/Makefile.PL b/bdb/perl.BerkeleyDB/Makefile.PL
new file mode 100644
index 00000000000..399a6761886
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/Makefile.PL
@@ -0,0 +1,112 @@
+#! perl -w
+
+# It should not be necessary to edit this file. The configuration for
+# BerkeleyDB is controlled from the file config.in
+
+
+BEGIN { die "BerkeleyDB needs Perl 5.004_04 or greater" if $] < 5.004_04 ; }
+
+use strict ;
+use ExtUtils::MakeMaker ;
+
+my $LIB_DIR ;
+my $INC_DIR ;
+my $DB_NAME ;
+my $LIBS ;
+
+ParseCONFIG() ;
+
+if (defined $DB_NAME)
+ { $LIBS = $DB_NAME }
+else {
+ if ($^O eq 'MSWin32')
+ { $LIBS = '-llibdb' }
+ else
+ { $LIBS = '-ldb' }
+}
+
+# OS2 is a special case, so check for it now.
+my $OS2 = "" ;
+$OS2 = "-DOS2" if $^O eq 'os2' ;
+
+WriteMakefile(
+ NAME => 'BerkeleyDB',
+ LIBS => ["-L${LIB_DIR} $LIBS"],
+ MAN3PODS => ' ', # Pods will be built by installman.
+ INC => "-I$INC_DIR",
+ VERSION_FROM => 'BerkeleyDB.pm',
+ XSPROTOARG => '-noprototypes',
+ DEFINE => "$OS2",
+ #'macro' => { INSTALLDIRS => 'perl' },
+ 'dist' => {COMPRESS=>'gzip', SUFFIX=>'gz'},
+ ($] >= 5.005
+ ? (ABSTRACT_FROM => 'BerkeleyDB.pod',
+ AUTHOR => 'Paul Marquess <Paul.Marquess@btinternet.com>')
+ : ()
+ ),
+ );
+
+
+sub MY::postamble {
+ '
+$(NAME).pod: $(NAME).pod.P t/examples.t.T t/examples3.t.T mkpod
+ perl ./mkpod
+
+$(NAME).xs: typemap
+ @$(TOUCH) $(NAME).xs
+
+Makefile: config.in
+
+
+' ;
+}
+
+sub ParseCONFIG
+{
+ my ($k, $v) ;
+ my @badkey = () ;
+ my %Info = () ;
+ my @Options = qw( INCLUDE LIB DBNAME ) ;
+ my %ValidOption = map {$_, 1} @Options ;
+ my %Parsed = %ValidOption ;
+ my $CONFIG = 'config.in' ;
+
+ print "Parsing $CONFIG...\n" ;
+
+ # DBNAME is optional, so pretend it has been parsed.
+ delete $Parsed{'DBNAME'} ;
+
+ open(F, "$CONFIG") or die "Cannot open file $CONFIG: $!\n" ;
+ while (<F>) {
+ s/^\s*|\s*$//g ;
+ next if /^\s*$/ or /^\s*#/ ;
+ s/\s*#\s*$// ;
+
+ ($k, $v) = split(/\s+=\s+/, $_, 2) ;
+ $k = uc $k ;
+ if ($ValidOption{$k}) {
+ delete $Parsed{$k} ;
+ $Info{$k} = $v ;
+ }
+ else {
+ push(@badkey, $k) ;
+ }
+ }
+ close F ;
+
+ print "Unknown keys in $CONFIG ignored [@badkey]\n"
+ if @badkey ;
+
+ # check parsed values
+ my @missing = () ;
+ die "The following keys are missing from $CONFIG file: [@missing]\n"
+ if @missing = keys %Parsed ;
+
+ $INC_DIR = $ENV{'BERKELEYDB_INCLUDE'} || $Info{'INCLUDE'} ;
+ $LIB_DIR = $ENV{'BERKELEYDB_LIB'} || $Info{'LIB'} ;
+ $DB_NAME = $Info{'DBNAME'} if defined $Info{'DBNAME'} ;
+ print "Looks Good.\n" ;
+
+}
+
+# end of file Makefile.PL
diff --git a/bdb/perl.BerkeleyDB/README b/bdb/perl.BerkeleyDB/README
new file mode 100644
index 00000000000..aa905fa8011
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/README
@@ -0,0 +1,464 @@
+ BerkeleyDB
+
+ Version 0.13
+
+ 15th Jan 2001
+
+ Copyright (c) 1997-2001 Paul Marquess. All rights reserved. This
+ program is free software; you can redistribute it and/or modify
+ it under the same terms as Perl itself.
+
+
+DESCRIPTION
+-----------
+
+BerkeleyDB is a module which allows Perl programs to make use of the
+facilities provided by Berkeley DB version 2 or 3. (Note: if you want
+to use version 1 of Berkeley DB with Perl you need the DB_File module).
+
+Berkeley DB is a C library which provides a consistent interface to a
+number of database formats. BerkeleyDB provides an interface to all
+four of the database types (hash, btree, queue and recno) currently
+supported by Berkeley DB.
+
+For further details see the documentation in the file BerkeleyDB.pod.
+
+PREREQUISITES
+-------------
+
+Before you can build BerkeleyDB you need to have the following
+installed on your system:
+
+ * Perl 5.004_04 or greater.
+
+ * Berkeley DB Version 2.6.4 or greater
+
+ The official web site for Berkeley DB is http://www.sleepycat.com
+
+ The latest version of Berkeley DB is always available there. It
+ is recommended that you use the most recent version available at
+ the Sleepycat site.
+
+ The one exception to this advice is where you want to use BerkeleyDB
+ to access database files created by a third-party application,
+ like Sendmail. In these cases you must build BerkeleyDB with a
+ compatible version of Berkeley DB.
+
+
+BUILDING THE MODULE
+-------------------
+
+Assuming you have met all the prerequisites, building the module should
+be relatively straightforward.
+
+Step 1 : If you are running Solaris 2.5, 2.7 or HP-UX 10 read either
+ the Solaris Notes or HP-UX Notes sections below.
+ If you are running Linux please read the Linux Notes section
+ before proceeding.
+
+
+Step 2 : Edit the file config.in to suit you local installation.
+ Instructions are given in the file.
+
+Step 3 : Build and test the module using this sequence of commands:
+
+ perl Makefile.PL
+ make
+ make test
+
+INSTALLATION
+------------
+
+ make install
+
+TROUBLESHOOTING
+===============
+
+Here are some of the problems that people encounter when building BerkeleyDB.
+
+Missing db.h or libdb.a
+-----------------------
+
+If you get an error like this:
+
+ cc -c -I./libraries/ -Dbool=char -DHAS_BOOL -I/usr/local/include -O2
+ -DVERSION=\"0.07\" -DXS_VERSION=\"0.07\" -fpic
+ -I/usr/local/lib/perl5/5.00503/i586-linux/CORE BerkeleyDB.c
+ BerkeleyDB.xs:52: db.h: No such file or directory
+
+or this:
+
+ cc -c -I./libraries/2.7.5 -Dbool=char -DHAS_BOOL -I/usr/local/include -O2
+ -DVERSION=\"0.07\" -DXS_VERSION=\"0.07\" -fpic
+ -I/usr/local/lib/perl5/5.00503/i586-linux/CORE BerkeleyDB.c
+ LD_RUN_PATH="/lib" cc -o blib/arch/auto/BerkeleyDB/BerkeleyDB.so -shared
+ -L/usr/local/lib BerkeleyDB.o
+ -L/home/paul/perl/ext/BerkDB/BerkeleyDB/libraries -ldb
+ ld: cannot open -ldb: No such file or directory
+
+This symptom can imply:
+
+ 1. You don't have Berkeley DB installed on your system at all.
+ Solution: get & install Berkeley DB.
+
+ 2. You do have Berkeley DB installed, but it isn't in a standard place.
+ Solution: Edit config.in and set the LIB and INCLUDE variables to point
+ to the directories where libdb.a and db.h are installed.
+
+Wrong db.h
+----------
+
+If you get an error like this when building this module:
+
+ cc -c -I./libraries/ -Dbool=char -DHAS_BOOL -I/usr/local/include -O2
+ -DVERSION=\"0.07\" -DXS_VERSION=\"0.07\" -fpic
+ -I/usr/local/lib/perl5/5.00503/i586-linux/CORE BerkeleyDB.c
+ BerkeleyDB.xs:93: parse error before `DB_INFO'
+ BerkeleyDB.xs:93: warning: no semicolon at end of struct or union
+ BerkeleyDB.xs:94: warning: data definition has no type or storage class
+ BerkeleyDB.xs:95: parse error before `0x80000000'
+ BerkeleyDB.xs:110: parse error before `}'
+ BerkeleyDB.xs:110: warning: data definition has no type or storage class
+ BerkeleyDB.xs:117: parse error before `DB_ENV'
+ ...
+
+This error usually happens when if you only have Berkeley DB version 1
+on your system or you have both version 1 and version 2 (or 3) of Berkeley
+DB installed on your system. When building BerkeleyDB it attempts
+to use the db.h for Berkeley DB version 1. This perl module can only
+be built with Berkeley DB version 2 or 3.
+
+This symptom can imply:
+
+ 1. You don't have Berkeley DB version 2 or 3 installed on your system
+ at all.
+ Solution: get & install Berkeley DB.
+
+ 2. You do have Berkeley DB 2 or 3 installed, but it isn't in a standard
+ place.
+ Solution: Edit config.in and set the LIB and INCLUDE variables
+ to point to the directories where libdb.a and db.h are
+ installed.
+
+Undefined Symbol: txn_stat
+--------------------------
+
+BerkeleyDB seems to have built correctly, but you get an error like this
+when you run the test harness:
+
+ $ make test
+ PERL_DL_NONLAZY=1 /home/paul/perl/install/bin/perl5.00503
+ -Iblib/arch -Iblib/lib -I/usr/local/lib/perl5/5.00503/i586-linux
+ -I/usr/local/lib/perl5/5.00503 -e 'use Test::Harness qw(&runtests $verbose);
+ $verbose=0; runtests @ARGV;' t/*.t
+ t/btree.............Can't load 'blib/arch/auto/BerkeleyDB/BerkeleyDB.so' for
+ module BerkeleyDB: blib/arch/auto/BerkeleyDB/BerkeleyDB.so:
+ undefined symbol: txn_stat
+ at /usr/local/lib/perl5/5.00503/i586-linux/DynaLoader.pm line 169.
+ ...
+
+This error usually happens when you have both version 1 and version
+2 (or 3) of Berkeley DB installed on your system and BerkeleyDB attempts
+to build using the db.h for Berkeley DB version 2/3 and the version 1
+library. Unfortunately the two versions aren't compatible with each
+other. BerkeleyDB can only be built with Berkeley DB version 2 or 3.
+
+Solution: Setting the LIB & INCLUDE variables in config.in to point to the
+ correct directories can sometimes be enough to fix this
+ problem. If that doesn't work the easiest way to fix the
+ problem is to either delete or temporarily rename the copies
+ of db.h and libdb.a that you don't want BerkeleyDB to use.
+
+Undefined Symbol: db_appinit
+----------------------------
+
+BerkeleyDB seems to have built correctly, but you get an error like this
+when you run the test harness:
+
+ $ make test
+ PERL_DL_NONLAZY=1 /home/paul/perl/install/bin/perl5.00561 -Iblib/arch
+ -Iblib/lib -I/home/paul/perl/install/5.005_61/lib/5.00561/i586-linux
+ -I/home/paul/perl/install/5.005_61/lib/5.00561 -e 'use Test::Harness
+ qw(&runtests $verbose); $verbose=0; runtests @ARGV;' t/*.t
+ t/btree.............Can't load 'blib/arch/auto/BerkeleyDB/BerkeleyDB.so' for
+ module BerkeleyDB: blib/arch/auto/BerkeleyDB/BerkeleyDB.so:
+ undefined symbol: db_appinit
+ at /home/paul/perl/install/5.005_61/lib/5.00561/i586-linux/DynaLoader.pm
+ ...
+
+
+This error usually happens when you have both version 2 and version
+3 of Berkeley DB installed on your system and BerkeleyDB attempts
+to build using the db.h for Berkeley DB version 2 and the version 3
+library. Unfortunately the two versions aren't compatible with each
+other.
+
+Solution: Setting the LIB & INCLUDE variables in config.in to point to the
+ correct directories can sometimes be enough to fix this
+ problem. If that doesn't work the easiest way to fix the
+ problem is to either delete or temporarily rename the copies
+ of db.h and libdb.a that you don't want BerkeleyDB to use.
+
+Undefined Symbol: db_create
+---------------------------
+
+BerkeleyDB seems to have built correctly, but you get an error like this
+when you run the test harness:
+
+ $ make test
+ PERL_DL_NONLAZY=1 /home/paul/perl/install/bin/perl5.00561 -Iblib/arch
+ -Iblib/lib -I/home/paul/perl/install/5.005_61/lib/5.00561/i586-linux
+ -I/home/paul/perl/install/5.005_61/lib/5.00561 -e 'use Test::Harness
+ qw(&runtests $verbose); $verbose=0; runtests @ARGV;' t/*.t
+ t/btree.............Can't load 'blib/arch/auto/BerkeleyDB/BerkeleyDB.so' for
+ module BerkeleyDB: blib/arch/auto/BerkeleyDB/BerkeleyDB.so:
+ undefined symbol: db_create
+ at /home/paul/perl/install/5.005_61/lib/5.00561/i586-linux/DynaLoader.pm
+ ...
+
+This error usually happens when you have both version 2 and version
+3 of Berkeley DB installed on your system and BerkeleyDB attempts
+to build using the db.h for Berkeley DB version 3 and the version 2
+library. Unfortunately the two versions aren't compatible with each
+other.
+
+Solution: Setting the LIB & INCLUDE variables in config.in to point to the
+ correct directories can sometimes be enough to fix this
+ problem. If that doesn't work the easiest way to fix the
+ problem is to either delete or temporarily rename the copies
+ of db.h and libdb.a that you don't want BerkeleyDB to use.
+
+
+Incompatible versions of db.h and libdb
+---------------------------------------
+
+BerkeleyDB seems to have built correctly, but you get an error like this
+when you run the test harness:
+
+ $ make test
+ PERL_DL_NONLAZY=1 /home/paul/perl/install/bin/perl5.00503
+ -Iblib/arch -Iblib/lib -I/usr/local/lib/perl5/5.00503/i586-linux
+ -I/usr/local/lib/perl5/5.00503 -e 'use Test::Harness qw(&runtests $verbose);
+ $verbose=0; runtests @ARGV;' t/*.t
+ t/btree.............
+ BerkeleyDB needs compatible versions of libdb & db.h
+ you have db.h version 2.6.4 and libdb version 2.7.5
+ BEGIN failed--compilation aborted at t/btree.t line 25.
+ dubious
+ Test returned status 255 (wstat 65280, 0xff00)
+ ...
+
+Another variation on the theme of having two versions of Berkeley DB on
+your system.
+
+Solution: Setting the LIB & INCLUDE variables in config.in to point to the
+ correct directories can sometimes be enough to fix this
+ problem. If that doesn't work the easiest way to fix the
+ problem is to either delete or temporarily rename the copies
+ of db.h and libdb.a that you don't want BerkeleyDB to use.
+ If you are running Linux, please read the Linux Notes section below.
+
+
+Linux Notes
+-----------
+
+Newer versions of Linux (e.g. RedHat 6, SuSe 6) ship with a C library
+that has version 2.x of Berkeley DB linked into it. This makes it
+difficult to build this module with anything other than the version of
+Berkeley DB that shipped with your Linux release. If you do try to use
+a different version of Berkeley DB you will most likely get the error
+described in the "Incompatible versions of db.h and libdb" section of
+this file.
+
+To make matters worse, prior to Perl 5.6.1, the perl binary itself
+*always* included the Berkeley DB library.
+
+If you want to use a newer version of Berkeley DB with this module, the
+easiest solution is to use Perl 5.6.1 (or better) and Berkeley DB 3.x
+(or better).
+
+There are two approaches you can use to get older versions of Perl to
+work with specific versions of Berkeley DB. Both have their advantages
+and disadvantages.
+
+The first approach will only work when you want to build a version of
+Perl older than 5.6.1 along with Berkeley DB 3.x. If you want to use
+Berkeley DB 2.x, you must use the next approach. This approach involves
+rebuilding your existing version of Perl after applying an unofficial
+patch. The "patches" directory in the this module's source distribution
+contains a number of patch files. There is one patch file for every
+stable version of Perl since 5.004. Apply the appropriate patch to your
+Perl source tree before re-building and installing Perl from scratch.
+For example, assuming you are in the top-level source directory for
+Perl 5.6.0, the command below will apply the necessary patch. Remember
+to replace the path shown below with one that points to this module's
+patches directory.
+
+ patch -p1 -N </path/to/BerkeleyDB/patches/5.6.0
+
+Now rebuild & install perl. You should now have a perl binary that can
+be used to build this module. Follow the instructions in "BUILDING THE
+MODULE", remembering to set the INCLUDE and LIB variables in config.in.
+
+
+The second approach will work with both Berkeley DB 2.x and 3.x.
+Start by building Berkeley DB as a shared library. This is from
+the Berkeley DB build instructions:
+
+ Building Shared Libraries for the GNU GCC compiler
+
+ If you're using gcc and there's no better shared library example for
+ your architecture, the following shared library build procedure will
+ probably work.
+
+ Add the -fpic option to the CFLAGS value in the Makefile.
+
+ Rebuild all of your .o files. This will create a Berkeley DB library
+ that contains .o files with PIC code. To build the shared library,
+ then take the following steps in the library build directory:
+
+ % mkdir tmp
+ % cd tmp
+ % ar xv ../libdb.a
+ % gcc -shared -o libdb.so *.o
+ % mv libdb.so ..
+ % cd ..
+ % rm -rf tmp
+
+ Note, you may have to change the gcc line depending on the
+ requirements of your system.
+
+ The file libdb.so is your shared library
+
+Once you have built libdb.so, you will need to store it somewhere safe.
+
+ cp libdb.so /usr/local/BerkeleyDB/lib
+
+If you now set the LD_PRELOAD environment variable to point to this
+shared library, Perl will use it instead of the version of Berkeley DB
+that shipped with your Linux distribution.
+
+ export LD_PRELOAD=/usr/local/BerkeleyDB/lib/libdb.so
+
+Finally follow the instructions in "BUILDING THE MODULE" to build,
+test and install this module. Don't forget to set the INCLUDE and LIB
+variables in config.in.
+
+Remember, you will need to have the LD_PRELOAD variable set anytime you
+want to use Perl with Berkeley DB. Also note that if you have LD_PRELOAD
+permanently set it will affect ALL commands you execute. This may be a
+problem if you run any commands that access a database created by the
+version of Berkeley DB that shipped with your Linux distribution.
+
+
+
+Solaris 2.5 Notes
+-----------------
+
+If you are running Solaris 2.5, and you get this error when you run the
+BerkeleyDB test harness:
+
+ libc internal error: _rmutex_unlock: rmutex not held.
+
+you probably need to install a Sun patch. It has been reported that
+Sun patch 103187-25 (or later revisions) fixes this problem.
+
+To find out if you have the patch installed, the command "showrev -p"
+will display the patches that are currently installed on your system.
+
+Solaris 2.7 Notes
+-----------------
+
+If you are running Solaris 2.7 and all the tests in the test harness
+generate a core dump, try applying Sun patch 106980-09 (or better).
+
+To find out if you have the patch installed, the command "showrev -p"
+will display the patches that are currently installed on your system.
+
+
+HP-UX Notes
+-----------
+
+Some people running HP-UX 10 have reported getting an error like this
+when building this module with the native HP-UX compiler.
+
+ ld: (Warning) At least one PA 2.0 object file (BerkeleyDB.o) was detected.
+ The linked output may not run on a PA 1.x system.
+ ld: Invalid loader fixup for symbol "$000000A5".
+
+If this is the case for you, Berkeley DB needs to be recompiled with
+the +z or +Z option and the resulting library placed in a .sl file. The
+following steps should do the trick:
+
+ 1: Configure the Berkeley DB distribution with the +z or +Z C compiler
+ flag:
+
+ env "CFLAGS=+z" ../dist/configure ...
+
+ 2: Edit the Berkeley DB Makefile and change:
+
+ "libdb= libdb.a" to "libdb= libdb.sl".
+
+ 3: Build and install the Berkeley DB distribution as usual.
+
+
+
+FEEDBACK
+--------
+
+How to report a problem with BerkeleyDB.
+
+To help me help you, I need of the following information:
+
+ 1. The version of Perl and the operating system name and version you
+ are running. The complete output from running "perl -V" will tell
+ me all I need to know.
+ If your perl does not understand the "-V" option is too old.
+ BerkeleyDB needs Perl version 5.004_04 or better.
+
+ 2. The version of BerkeleyDB you have. If you have successfully
+ installed BerkeleyDB, this one-liner will tell you:
+
+ perl -MBerkeleyDB -e 'print "BerkeleyDB ver $BerkeleyDB::VERSION\n"'
+
+ If you haven't installed BerkeleyDB then search BerkeleyDB.pm for a
+ line like this:
+
+ $VERSION = "1.20" ;
+
+ 3. The version of Berkeley DB you have installed. If you have
+ successfully installed BerkeleyDB, this one-liner will tell you:
+
+ perl -MBerkeleyDB -e 'print BerkeleyDB::DB_VERSION_STRING."\n"'
+
+ If you haven't installed BerkeleyDB then search db.h for a line
+ like this:
+
+ #define DB_VERSION_STRING
+
+ 4. If you are having problems building BerkeleyDB, send me a complete
+ log of what happened.
+
+ 5. Now the difficult one. If you think you have found a bug in
+ BerkeleyDB and you want me to fix it, you will *greatly* enhance
+ the chances of me being able to track it down by sending me a small
+ self-contained Perl script that illustrates the problem you are
+ encountering. Include a summary of what you think the problem is
+ and a log of what happens when you run the script, in case I can't
+ reproduce your problem on my system. If possible, don't have the
+ script dependent on an existing 20Meg database. If the script you
+ send me can create the database itself then that is preferred.
+
+ I realise that in some cases this is easier said than done, so if
+ you can only reproduce the problem in your existing script, then
+ you can post me that if you want. Just don't expect me to find your
+ problem in a hurry, or at all. :-)
+
+
+CHANGES
+-------
+
+See the Changes file.
+
+Paul Marquess <Paul.Marquess@btinternet.com>
+
diff --git a/bdb/perl.BerkeleyDB/Todo b/bdb/perl.BerkeleyDB/Todo
new file mode 100644
index 00000000000..12d53bcf91c
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/Todo
@@ -0,0 +1,57 @@
+
+ * Proper documentation.
+
+ * address or document the "close all cursors if you encounter an error"
+
+ * Change the $BerkeleyDB::Error to store the info in the db object,
+ if possible.
+
+ * $BerkeleyDB::db_version is documented. &db_version isn't.
+
+ * migrate perl code into the .xs file where necessary
+
+ * convert as many of the DB examples files to BerkeleyDB format.
+
+ * add a method to the DB object to allow access to the environment (if there
+ actually is one).
+
+
+Possibles
+
+ * use '~' magic to store the inner data.
+
+ * for the get stuff zap the value to undef if it doesn't find the
+ key. This may be more intuitive for those folks who are used with
+ the $hash{key} interface.
+
+ * Text interface? This can be done as via Recno
+
+ * allow recno to allow base offset for arrays to be either 0 or 1.
+
+ * when duplicate keys are enabled, allow db_put($key, [$val1, $val2,...])
+
+
+2.x -> 3.x Upgrade
+==================
+
+Environment Verbose
+Env->open mode
+DB cache size extra parameter
+DB->open subdatabases Done
+An empty environment causes DB->open to fail
+where is __db.001 coming from? db_remove seems to create it. Bug in 3.0.55
+Change db_strerror for 0 to ""? Done
+Queue Done
+db_stat for Hash & Queue Done
+No TxnMgr
+DB->remove
+ENV->remove
+ENV->set_verbose
+upgrade
+
+ $env = BerkeleyDB::Env::Create
+ $env = create BerkeleyDB::Env
+ $status = $env->open()
+
+ $db = BerkeleyDB::Hash::Create
+ $status = $db->open()
diff --git a/bdb/perl.BerkeleyDB/config.in b/bdb/perl.BerkeleyDB/config.in
new file mode 100644
index 00000000000..c23e6689cb3
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/config.in
@@ -0,0 +1,51 @@
+# Filename: config.in
+#
+# written by Paul Marquess <Paul.Marquess@btinternet.com>
+
+# 1. Where is the file db.h?
+#
+# Change the path below to point to the directory where db.h is
+# installed on your system.
+
+#INCLUDE = /usr/local/include
+#INCLUDE = /usr/local/BerkeleyDB/include
+#INCLUDE = ./libraries/2.7.5
+#INCLUDE = ./libraries/3.0.55
+#INCLUDE = ./libraries/3.1.17
+INCLUDE = ./libraries/3.2.7
+
+# 2. Where is libdb?
+#
+# Change the path below to point to the directory where libdb is
+# installed on your system.
+
+#LIB = /usr/local/lib
+#LIB = /usr/local/BerkeleyDB/lib
+#LIB = ./libraries/2.7.5
+#LIB = ./libraries/3.0.55
+#LIB = ./libraries/3.1.17
+LIB = ./libraries/3.2.7
+
+# 3. Is the library called libdb?
+#
+# If you have copies of both 1.x and 2.x Berkeley DB installed on
+# your system it can sometimes be tricky to make sure you are using
+# the correct one. Renaming one (or creating a symbolic link) to
+# include the version number of the library can help.
+#
+# For example, if you have Berkeley DB 2.6.4 you could rename the
+# Berkeley DB library from libdb.a to libdb-2.6.4.a and change the
+# DBNAME line below to look like this:
+#
+# DBNAME = -ldb-2.6.4
+#
+# Note: If you are building this module with Win32, -llibdb will be
+# used by default.
+#
+# If you have changed the name of the library, uncomment the line
+# below (by removing the leading #) and edit the line to use the name
+# you have picked.
+
+#DBNAME = -ldb-3.0
+
+# end of file config.in
diff --git a/bdb/perl.BerkeleyDB/dbinfo b/bdb/perl.BerkeleyDB/dbinfo
new file mode 100755
index 00000000000..415411aff8e
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/dbinfo
@@ -0,0 +1,109 @@
+#!/usr/local/bin/perl
+
+# Name: dbinfo -- identify berkeley DB version used to create
+# a database file
+#
+# Author: Paul Marquess <Paul.Marquess@btinternet.com>
+# Version: 1.03
+# Date 17th September 2000
+#
+# Copyright (c) 1998-2001 Paul Marquess. All rights reserved.
+# This program is free software; you can redistribute it and/or
+# modify it under the same terms as Perl itself.
+
+# Todo: Print more stats on a db file, e.g. no of records
+# add log/txn/lock files
+
+use strict ;
+
+my %Data =
+ (
+ 0x053162 => {
+ Type => "Btree",
+ Versions =>
+ {
+ 1 => "Unknown (older than 1.71)",
+ 2 => "Unknown (older than 1.71)",
+ 3 => "1.71 -> 1.85, 1.86",
+ 4 => "Unknown",
+ 5 => "2.0.0 -> 2.3.0",
+ 6 => "2.3.1 -> 2.7.7",
+ 7 => "3.0.x",
+ 8 => "3.1.x or greater",
+ }
+ },
+ 0x061561 => {
+ Type => "Hash",
+ Versions =>
+ {
+ 1 => "Unknown (older than 1.71)",
+ 2 => "1.71 -> 1.85",
+ 3 => "1.86",
+ 4 => "2.0.0 -> 2.1.0",
+ 5 => "2.2.6 -> 2.7.7",
+ 6 => "3.0.x",
+ 7 => "3.1.x or greater",
+ }
+ },
+ 0x042253 => {
+ Type => "Queue",
+ Versions =>
+ {
+ 1 => "3.0.x",
+ 2 => "3.1.x",
+ 3 => "3.2.x or greater",
+ }
+ },
+ ) ;
+
+die "Usage: dbinfo file\n" unless @ARGV == 1 ;
+
+print "testing file $ARGV[0]...\n\n" ;
+open (F, "<$ARGV[0]") or die "Cannot open file $ARGV[0]: $!\n" ;
+
+my $buff ;
+read F, $buff, 20 ;
+
+my (@info) = unpack("NNNNN", $buff) ;
+my (@info1) = unpack("VVVVV", $buff) ;
+my ($magic, $version, $endian) ;
+
+if ($Data{$info[0]}) # first try DB 1.x format
+{
+ $magic = $info[0] ;
+ $version = $info[1] ;
+ $endian = "Unknown" ;
+}
+elsif ($Data{$info[3]}) # next DB 2.x big endian
+{
+ $magic = $info[3] ;
+ $version = $info[4] ;
+ $endian = "Big Endian" ;
+}
+elsif ($Data{$info1[3]}) # next DB 2.x little endian
+{
+ $magic = $info1[3] ;
+ $version = $info1[4] ;
+ $endian = "Little Endian" ;
+}
+else
+ { die "not a Berkeley DB database file.\n" }
+
+my $type = $Data{$magic} ;
+$magic = sprintf "%06X", $magic ;
+
+my $ver_string = "Unknown" ;
+$ver_string = $type->{Versions}{$version}
+ if defined $type->{Versions}{$version} ;
+
+print <<EOM ;
+File Type: Berkeley DB $type->{Type} file.
+File Version ID: $version
+Built with Berkeley DB: $ver_string
+Byte Order: $endian
+Magic: $magic
+EOM
+
+close F ;
+
+exit ;
diff --git a/bdb/perl.BerkeleyDB/hints/irix_6_5.pl b/bdb/perl.BerkeleyDB/hints/irix_6_5.pl
new file mode 100644
index 00000000000..b531673e6e0
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/hints/irix_6_5.pl
@@ -0,0 +1 @@
+$self->{LIBS} = [ "@{$self->{LIBS}} -lthread" ];
diff --git a/bdb/perl.BerkeleyDB/hints/solaris.pl b/bdb/perl.BerkeleyDB/hints/solaris.pl
new file mode 100644
index 00000000000..ddd941d634a
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/hints/solaris.pl
@@ -0,0 +1 @@
+$self->{LIBS} = [ "@{$self->{LIBS}} -lmt" ];
diff --git a/bdb/perl.BerkeleyDB/mkconsts b/bdb/perl.BerkeleyDB/mkconsts
new file mode 100644
index 00000000000..24ef4fca7b2
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/mkconsts
@@ -0,0 +1,211 @@
+#!/usr/bin/perl
+
+%constants = (
+ # Symbol 0 = define, 1 = enum
+ DB_AFTER => 0,
+ DB_APPEND => 0,
+ DB_ARCH_ABS => 0,
+ DB_ARCH_DATA => 0,
+ DB_ARCH_LOG => 0,
+ DB_BEFORE => 0,
+ DB_BTREE => 1,
+ DB_BTREEMAGIC => 0,
+ DB_BTREEOLDVER => 0,
+ DB_BTREEVERSION => 0,
+ DB_CHECKPOINT => 0,
+ DB_CONSUME => 0,
+ DB_CREATE => 0,
+ DB_CURLSN => 0,
+ DB_CURRENT => 0,
+ DB_DBT_MALLOC => 0,
+ DB_DBT_PARTIAL => 0,
+ DB_DBT_USERMEM => 0,
+ DB_DELETED => 0,
+ DB_DELIMITER => 0,
+ DB_DUP => 0,
+ DB_DUPSORT => 0,
+ DB_ENV_APPINIT => 0,
+ DB_ENV_STANDALONE => 0,
+ DB_ENV_THREAD => 0,
+ DB_EXCL => 0,
+ DB_FILE_ID_LEN => 0,
+ DB_FIRST => 0,
+ DB_FIXEDLEN => 0,
+ DB_FLUSH => 0,
+ DB_FORCE => 0,
+ DB_GET_BOTH => 0,
+ DB_GET_RECNO => 0,
+ DB_HASH => 1,
+ DB_HASHMAGIC => 0,
+ DB_HASHOLDVER => 0,
+ DB_HASHVERSION => 0,
+ DB_INCOMPLETE => 0,
+ DB_INIT_CDB => 0,
+ DB_INIT_LOCK => 0,
+ DB_INIT_LOG => 0,
+ DB_INIT_MPOOL => 0,
+ DB_INIT_TXN => 0,
+ DB_JOIN_ITEM => 0,
+ DB_KEYEMPTY => 0,
+ DB_KEYEXIST => 0,
+ DB_KEYFIRST => 0,
+ DB_KEYLAST => 0,
+ DB_LAST => 0,
+ DB_LOCK_CONFLICT => 0,
+ DB_LOCK_DEADLOCK => 0,
+ DB_LOCK_DEFAULT => 0,
+ DB_LOCK_GET => 1,
+ DB_LOCK_NORUN => 0,
+ DB_LOCK_NOTGRANTED => 0,
+ DB_LOCK_NOTHELD => 0,
+ DB_LOCK_NOWAIT => 0,
+ DB_LOCK_OLDEST => 0,
+ DB_LOCK_RANDOM => 0,
+ DB_LOCK_RIW_N => 0,
+ DB_LOCK_RW_N => 0,
+ DB_LOCK_YOUNGEST => 0,
+ DB_LOCKMAGIC => 0,
+ DB_LOCKVERSION => 0,
+ DB_LOGMAGIC => 0,
+ DB_LOGOLDVER => 0,
+ DB_MAX_PAGES => 0,
+ DB_MAX_RECORDS => 0,
+ DB_MPOOL_CLEAN => 0,
+ DB_MPOOL_CREATE => 0,
+ DB_MPOOL_DIRTY => 0,
+ DB_MPOOL_DISCARD => 0,
+ DB_MPOOL_LAST => 0,
+ DB_MPOOL_NEW => 0,
+ DB_MPOOL_PRIVATE => 0,
+ DB_MUTEXDEBUG => 0,
+ DB_MUTEXLOCKS => 0,
+ DB_NEEDSPLIT => 0,
+ DB_NEXT => 0,
+ DB_NEXT_DUP => 0,
+ DB_NOMMAP => 0,
+ DB_NOOVERWRITE => 0,
+ DB_NOSYNC => 0,
+ DB_NOTFOUND => 0,
+ DB_PAD => 0,
+ DB_PAGEYIELD => 0,
+ DB_POSITION => 0,
+ DB_PREV => 0,
+ DB_PRIVATE => 0,
+ DB_QUEUE => 1,
+ DB_RDONLY => 0,
+ DB_RECNO => 1,
+ DB_RECNUM => 0,
+ DB_RECORDCOUNT => 0,
+ DB_RECOVER => 0,
+ DB_RECOVER_FATAL => 0,
+ DB_REGISTERED => 0,
+ DB_RENUMBER => 0,
+ DB_RMW => 0,
+ DB_RUNRECOVERY => 0,
+ DB_SEQUENTIAL => 0,
+ DB_SET => 0,
+ DB_SET_RANGE => 0,
+ DB_SET_RECNO => 0,
+ DB_SNAPSHOT => 0,
+ DB_SWAPBYTES => 0,
+ DB_TEMPORARY => 0,
+ DB_THREAD => 0,
+ DB_TRUNCATE => 0,
+ DB_TXN_ABORT => 1,
+ DB_TXN_BACKWARD_ROLL => 1,
+ DB_TXN_CKP => 0,
+ DB_TXN_FORWARD_ROLL => 1,
+ DB_TXN_LOCK_2PL => 0,
+ DB_TXN_LOCK_MASK => 0,
+ DB_TXN_LOCK_OPTIMISTIC => 0,
+ DB_TXN_LOG_MASK => 0,
+ DB_TXN_LOG_REDO => 0,
+ DB_TXN_LOG_UNDO => 0,
+ DB_TXN_LOG_UNDOREDO => 0,
+ DB_TXN_NOSYNC => 0,
+ DB_TXN_NOWAIT => 0,
+ DB_TXN_SYNC => 0,
+ DB_TXN_OPENFILES => 1,
+ DB_TXN_REDO => 0,
+ DB_TXN_UNDO => 0,
+ DB_TXNMAGIC => 0,
+ DB_TXNVERSION => 0,
+ DB_TXN_LOCK_OPTIMIST => 0,
+ DB_UNKNOWN => 1,
+ DB_USE_ENVIRON => 0,
+ DB_USE_ENVIRON_ROOT => 0,
+ DB_VERSION_MAJOR => 0,
+ DB_VERSION_MINOR => 0,
+ DB_VERSION_PATCH => 0,
+ DB_WRITECURSOR => 0,
+ ) ;
+
+sub OutputXS
+{
+ # skip to the marker
+ if (0) {
+ while (<>)
+ {
+ last if /^MARKER/ ;
+ print ;
+ }
+ }
+
+ foreach my $key (sort keys %constants)
+ {
+ my $isEnum = $constants{$key} ;
+
+ if ($isEnum) {
+ print <<EOM
+ if (strEQ(name, "$key"))
+ return $key;
+EOM
+ }
+ else
+ {
+ print <<EOM
+ if (strEQ(name, "$key"))
+#ifdef $key
+ return $key;
+#else
+ goto not_there;
+#endif
+EOM
+ }
+
+ }
+
+ if (0) {
+ while (<>)
+ {
+ print ;
+ }
+ }
+}
+
+sub OutputPM
+{
+ # skip to the marker
+ if (0) {
+ while (<>)
+ {
+ last if /^MARKER/ ;
+ print ;
+ }
+ }
+
+ foreach my $key (sort keys %constants)
+ {
+ print "\t$key\n";
+ }
+
+ if (0) {
+ while (<>)
+ {
+ print ;
+ }
+ }
+}
+
+OutputXS() if $ARGV[0] =~ /xs/i ;
+OutputPM() if $ARGV[0] =~ /pm/i ;
diff --git a/bdb/perl.BerkeleyDB/mkpod b/bdb/perl.BerkeleyDB/mkpod
new file mode 100755
index 00000000000..44bbf3fbf4f
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/mkpod
@@ -0,0 +1,146 @@
+#!/usr/local/bin/perl5
+
+# Filename: mkpod
+#
+# Author: Paul Marquess
+
+# File types
+#
+# Macro files end with .M
+# Tagged source files end with .T
+# Output from the code ends with .O
+# Pre-Pod file ends with .P
+#
+# Tags
+#
+# ## BEGIN tagname
+# ...
+# ## END tagname
+#
+# ## 0
+# ## 1
+#
+
+# Constants
+
+$TOKEN = '##' ;
+$Verbose = 1 if $ARGV[0] =~ /^-v/i ;
+
+# Macros files first
+foreach $file (glob("*.M"))
+{
+ open (F, "<$file") or die "Cannot open '$file':$!\n" ;
+ print " Processing Macro file $file\n" ;
+ while (<F>)
+ {
+ # Skip blank & comment lines
+ next if /^\s*$/ || /^\s*#/ ;
+
+ #
+ ($name, $expand) = split (/\t+/, $_, 2) ;
+
+ $expand =~ s/^\s*// ;
+ $expand =~ s/\s*$// ;
+
+ if ($expand =~ /\[#/ )
+ {
+ }
+
+ $Macros{$name} = $expand ;
+ }
+ close F ;
+}
+
+# Suck up all the code files
+foreach $file (glob("t/*.T"))
+{
+ ($newfile = $file) =~ s/\.T$// ;
+ open (F, "<$file") or die "Cannot open '$file':$!\n" ;
+ open (N, ">$newfile") or die "Cannot open '$newfile':$!\n" ;
+
+ print " Processing $file -> $newfile\n" ;
+
+ while ($line = <F>)
+ {
+ if ($line =~ /^$TOKEN\s*BEGIN\s+(\w+)\s*$/ or
+ $line =~ m[\s*/\*$TOKEN\s*BEGIN\s+(\w+)\s*$] )
+ {
+ print " Section $1 begins\n" if $Verbose ;
+ $InSection{$1} ++ ;
+ $Section{$1} = '' unless $Section{$1} ;
+ }
+ elsif ($line =~ /^$TOKEN\s*END\s+(\w+)\s*$/ or
+ $line =~ m[^\s*/\*$TOKEN\s*END\s+(\w+)\s*$] )
+ {
+ warn "Encountered END without a begin [$line]\n"
+ unless $InSection{$1} ;
+
+ delete $InSection{$1} ;
+ print " Section $1 ends\n" if $Verbose ;
+ }
+ else
+ {
+ print N $line ;
+ chop $line ;
+ $line =~ s/\s*$// ;
+
+ # Save the current line in each of the sections
+ foreach( keys %InSection)
+ {
+ if ($line !~ /^\s*$/ )
+ #{ $Section{$_} .= " $line" }
+ { $Section{$_} .= $line }
+ $Section{$_} .= "\n" ;
+ }
+ }
+
+ }
+
+ if (%InSection)
+ {
+ # Check for unclosed sections
+ print "The following Sections are not terminated\n" ;
+ foreach (sort keys %InSection)
+ { print "\t$_\n" }
+ exit 1 ;
+ }
+
+ close F ;
+ close N ;
+}
+
+print "\n\nCreating pod file(s)\n\n" if $Verbose ;
+
+@ppods = glob('*.P') ;
+#$ppod = $ARGV[0] ;
+#$pod = $ARGV[1] ;
+
+# Now process the pre-pod file
+foreach $ppod (@ppods)
+{
+ ($pod = $ppod) =~ s/\.P$// ;
+ open (PPOD, "<$ppod") or die "Cannot open file '$ppod': $!\n" ;
+ open (POD, ">$pod") or die "Cannot open file '$pod': $!\n" ;
+
+ print " $ppod -> $pod\n" ;
+
+ while ($line = <PPOD>)
+ {
+ if ( $line =~ /^\s*$TOKEN\s*(\w+)\s*$/)
+ {
+ warn "No code insert '$1' available\n"
+ unless $Section{$1} ;
+
+ print "Expanding section $1\n" if $Verbose ;
+ print POD $Section{$1} ;
+ }
+ else
+ {
+# $line =~ s/\[#([^\]])]/$Macros{$1}/ge ;
+ print POD $line ;
+ }
+ }
+
+ close PPOD ;
+ close POD ;
+}
diff --git a/bdb/perl.BerkeleyDB/patches/5.004 b/bdb/perl.BerkeleyDB/patches/5.004
new file mode 100644
index 00000000000..143ec95afbc
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/patches/5.004
@@ -0,0 +1,44 @@
+diff perl5.004.orig/Configure perl5.004/Configure
+190a191
+> perllibs=''
+9904a9906,9913
+> : Remove libraries needed only for extensions
+> : The appropriate ext/Foo/Makefile.PL will add them back in, if
+> : necessary.
+> set X `echo " $libs " |
+> sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
+> shift
+> perllibs="$*"
+>
+10372a10382
+> perllibs='$perllibs'
+diff perl5.004.orig/Makefile.SH perl5.004/Makefile.SH
+122c122
+< libs = $libs $cryptlib
+---
+> libs = $perllibs $cryptlib
+Common subdirectories: perl5.004.orig/Porting and perl5.004/Porting
+Common subdirectories: perl5.004.orig/cygwin32 and perl5.004/cygwin32
+Common subdirectories: perl5.004.orig/eg and perl5.004/eg
+Common subdirectories: perl5.004.orig/emacs and perl5.004/emacs
+Common subdirectories: perl5.004.orig/ext and perl5.004/ext
+Common subdirectories: perl5.004.orig/h2pl and perl5.004/h2pl
+Common subdirectories: perl5.004.orig/hints and perl5.004/hints
+Common subdirectories: perl5.004.orig/lib and perl5.004/lib
+diff perl5.004.orig/myconfig perl5.004/myconfig
+38c38
+< libs=$libs
+---
+> libs=$perllibs
+Common subdirectories: perl5.004.orig/os2 and perl5.004/os2
+diff perl5.004.orig/patchlevel.h perl5.004/patchlevel.h
+40a41
+> ,"NODB-1.0 - remove -ldb from core perl binary."
+Common subdirectories: perl5.004.orig/plan9 and perl5.004/plan9
+Common subdirectories: perl5.004.orig/pod and perl5.004/pod
+Common subdirectories: perl5.004.orig/qnx and perl5.004/qnx
+Common subdirectories: perl5.004.orig/t and perl5.004/t
+Common subdirectories: perl5.004.orig/utils and perl5.004/utils
+Common subdirectories: perl5.004.orig/vms and perl5.004/vms
+Common subdirectories: perl5.004.orig/win32 and perl5.004/win32
+Common subdirectories: perl5.004.orig/x2p and perl5.004/x2p
diff --git a/bdb/perl.BerkeleyDB/patches/5.004_01 b/bdb/perl.BerkeleyDB/patches/5.004_01
new file mode 100644
index 00000000000..1b05eb4e02b
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/patches/5.004_01
@@ -0,0 +1,217 @@
+diff -rc perl5.004_01.orig/Configure perl5.004_01/Configure
+*** perl5.004_01.orig/Configure Wed Jun 11 00:28:03 1997
+--- perl5.004_01/Configure Sun Nov 12 22:12:35 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 9907,9912 ****
+--- 9908,9921 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10375,10380 ****
+--- 10384,10390 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.004_01.orig/Makefile.SH perl5.004_01/Makefile.SH
+*** perl5.004_01.orig/Makefile.SH Thu Jun 12 23:27:56 1997
+--- perl5.004_01/Makefile.SH Sun Nov 12 22:12:35 2000
+***************
+*** 126,132 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 126,132 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.004_01.orig/lib/ExtUtils/Embed.pm perl5.004_01/lib/ExtUtils/Embed.pm
+*** perl5.004_01.orig/lib/ExtUtils/Embed.pm Wed Apr 2 22:12:04 1997
+--- perl5.004_01/lib/ExtUtils/Embed.pm Sun Nov 12 22:12:35 2000
+***************
+*** 170,176 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 170,176 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_01.orig/lib/ExtUtils/Liblist.pm perl5.004_01/lib/ExtUtils/Liblist.pm
+*** perl5.004_01.orig/lib/ExtUtils/Liblist.pm Sat Jun 7 01:19:44 1997
+--- perl5.004_01/lib/ExtUtils/Liblist.pm Sun Nov 12 22:13:27 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $Verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $Verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $Verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $Verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 186,196 ****
+ my($self, $potential_libs, $Verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{libs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+--- 186,196 ----
+ my($self, $potential_libs, $Verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{perllibs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 540,546 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 540,546 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.004_01.orig/lib/ExtUtils/MM_Unix.pm perl5.004_01/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_01.orig/lib/ExtUtils/MM_Unix.pm Thu Jun 12 22:06:18 1997
+--- perl5.004_01/lib/ExtUtils/MM_Unix.pm Sun Nov 12 22:12:35 2000
+***************
+*** 2137,2143 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2137,2143 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.004_01.orig/myconfig perl5.004_01/myconfig
+*** perl5.004_01.orig/myconfig Sat Dec 21 01:13:20 1996
+--- perl5.004_01/myconfig Sun Nov 12 22:12:35 2000
+***************
+*** 35,41 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 35,41 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_01.orig/patchlevel.h perl5.004_01/patchlevel.h
+*** perl5.004_01.orig/patchlevel.h Wed Jun 11 03:06:10 1997
+--- perl5.004_01/patchlevel.h Sun Nov 12 22:12:35 2000
+***************
+*** 38,43 ****
+--- 38,44 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/bdb/perl.BerkeleyDB/patches/5.004_02 b/bdb/perl.BerkeleyDB/patches/5.004_02
new file mode 100644
index 00000000000..238f8737941
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/patches/5.004_02
@@ -0,0 +1,217 @@
+diff -rc perl5.004_02.orig/Configure perl5.004_02/Configure
+*** perl5.004_02.orig/Configure Thu Aug 7 15:08:44 1997
+--- perl5.004_02/Configure Sun Nov 12 22:06:24 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 9911,9916 ****
+--- 9912,9925 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10379,10384 ****
+--- 10388,10394 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.004_02.orig/Makefile.SH perl5.004_02/Makefile.SH
+*** perl5.004_02.orig/Makefile.SH Thu Aug 7 13:10:53 1997
+--- perl5.004_02/Makefile.SH Sun Nov 12 22:06:24 2000
+***************
+*** 126,132 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 126,132 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.004_02.orig/lib/ExtUtils/Embed.pm perl5.004_02/lib/ExtUtils/Embed.pm
+*** perl5.004_02.orig/lib/ExtUtils/Embed.pm Fri Aug 1 15:08:44 1997
+--- perl5.004_02/lib/ExtUtils/Embed.pm Sun Nov 12 22:06:24 2000
+***************
+*** 178,184 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 178,184 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_02.orig/lib/ExtUtils/Liblist.pm perl5.004_02/lib/ExtUtils/Liblist.pm
+*** perl5.004_02.orig/lib/ExtUtils/Liblist.pm Fri Aug 1 19:36:58 1997
+--- perl5.004_02/lib/ExtUtils/Liblist.pm Sun Nov 12 22:06:24 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 186,196 ****
+ my($self, $potential_libs, $verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{libs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+--- 186,196 ----
+ my($self, $potential_libs, $verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{perllibs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 540,546 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 540,546 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.004_02.orig/lib/ExtUtils/MM_Unix.pm perl5.004_02/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_02.orig/lib/ExtUtils/MM_Unix.pm Tue Aug 5 14:28:08 1997
+--- perl5.004_02/lib/ExtUtils/MM_Unix.pm Sun Nov 12 22:06:25 2000
+***************
+*** 2224,2230 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2224,2230 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.004_02.orig/myconfig perl5.004_02/myconfig
+*** perl5.004_02.orig/myconfig Sat Dec 21 01:13:20 1996
+--- perl5.004_02/myconfig Sun Nov 12 22:06:25 2000
+***************
+*** 35,41 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 35,41 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_02.orig/patchlevel.h perl5.004_02/patchlevel.h
+*** perl5.004_02.orig/patchlevel.h Fri Aug 1 15:07:34 1997
+--- perl5.004_02/patchlevel.h Sun Nov 12 22:06:25 2000
+***************
+*** 38,43 ****
+--- 38,44 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/bdb/perl.BerkeleyDB/patches/5.004_03 b/bdb/perl.BerkeleyDB/patches/5.004_03
new file mode 100644
index 00000000000..06331eac922
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/patches/5.004_03
@@ -0,0 +1,223 @@
+diff -rc perl5.004_03.orig/Configure perl5.004_03/Configure
+*** perl5.004_03.orig/Configure Wed Aug 13 16:09:46 1997
+--- perl5.004_03/Configure Sun Nov 12 21:56:18 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 9911,9916 ****
+--- 9912,9925 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10379,10384 ****
+--- 10388,10394 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+Only in perl5.004_03: Configure.orig
+diff -rc perl5.004_03.orig/Makefile.SH perl5.004_03/Makefile.SH
+*** perl5.004_03.orig/Makefile.SH Mon Aug 18 19:24:29 1997
+--- perl5.004_03/Makefile.SH Sun Nov 12 21:56:18 2000
+***************
+*** 126,132 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 126,132 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+Only in perl5.004_03: Makefile.SH.orig
+diff -rc perl5.004_03.orig/lib/ExtUtils/Embed.pm perl5.004_03/lib/ExtUtils/Embed.pm
+*** perl5.004_03.orig/lib/ExtUtils/Embed.pm Fri Aug 1 15:08:44 1997
+--- perl5.004_03/lib/ExtUtils/Embed.pm Sun Nov 12 21:56:18 2000
+***************
+*** 178,184 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 178,184 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_03.orig/lib/ExtUtils/Liblist.pm perl5.004_03/lib/ExtUtils/Liblist.pm
+*** perl5.004_03.orig/lib/ExtUtils/Liblist.pm Fri Aug 1 19:36:58 1997
+--- perl5.004_03/lib/ExtUtils/Liblist.pm Sun Nov 12 21:57:17 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 186,196 ****
+ my($self, $potential_libs, $verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{libs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+--- 186,196 ----
+ my($self, $potential_libs, $verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{perllibs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 540,546 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 540,546 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+Only in perl5.004_03/lib/ExtUtils: Liblist.pm.orig
+Only in perl5.004_03/lib/ExtUtils: Liblist.pm.rej
+diff -rc perl5.004_03.orig/lib/ExtUtils/MM_Unix.pm perl5.004_03/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_03.orig/lib/ExtUtils/MM_Unix.pm Mon Aug 18 19:16:12 1997
+--- perl5.004_03/lib/ExtUtils/MM_Unix.pm Sun Nov 12 21:56:19 2000
+***************
+*** 2224,2230 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2224,2230 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+Only in perl5.004_03/lib/ExtUtils: MM_Unix.pm.orig
+diff -rc perl5.004_03.orig/myconfig perl5.004_03/myconfig
+*** perl5.004_03.orig/myconfig Sat Dec 21 01:13:20 1996
+--- perl5.004_03/myconfig Sun Nov 12 21:56:19 2000
+***************
+*** 35,41 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 35,41 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_03.orig/patchlevel.h perl5.004_03/patchlevel.h
+*** perl5.004_03.orig/patchlevel.h Wed Aug 13 11:42:01 1997
+--- perl5.004_03/patchlevel.h Sun Nov 12 21:56:19 2000
+***************
+*** 38,43 ****
+--- 38,44 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
+Only in perl5.004_03: patchlevel.h.orig
diff --git a/bdb/perl.BerkeleyDB/patches/5.004_04 b/bdb/perl.BerkeleyDB/patches/5.004_04
new file mode 100644
index 00000000000..a227dc700d9
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/patches/5.004_04
@@ -0,0 +1,209 @@
+diff -rc perl5.004_04.orig/Configure perl5.004_04/Configure
+*** perl5.004_04.orig/Configure Fri Oct 3 18:57:39 1997
+--- perl5.004_04/Configure Sun Nov 12 21:50:51 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 9910,9915 ****
+--- 9911,9924 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10378,10383 ****
+--- 10387,10393 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.004_04.orig/Makefile.SH perl5.004_04/Makefile.SH
+*** perl5.004_04.orig/Makefile.SH Wed Oct 15 10:33:16 1997
+--- perl5.004_04/Makefile.SH Sun Nov 12 21:50:51 2000
+***************
+*** 129,135 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 129,135 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.004_04.orig/lib/ExtUtils/Embed.pm perl5.004_04/lib/ExtUtils/Embed.pm
+*** perl5.004_04.orig/lib/ExtUtils/Embed.pm Fri Aug 1 15:08:44 1997
+--- perl5.004_04/lib/ExtUtils/Embed.pm Sun Nov 12 21:50:51 2000
+***************
+*** 178,184 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 178,184 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_04.orig/lib/ExtUtils/Liblist.pm perl5.004_04/lib/ExtUtils/Liblist.pm
+*** perl5.004_04.orig/lib/ExtUtils/Liblist.pm Tue Sep 9 17:41:32 1997
+--- perl5.004_04/lib/ExtUtils/Liblist.pm Sun Nov 12 21:51:33 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 189,195 ****
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+--- 189,195 ----
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 539,545 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 539,545 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.004_04.orig/lib/ExtUtils/MM_Unix.pm perl5.004_04/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_04.orig/lib/ExtUtils/MM_Unix.pm Wed Oct 8 14:13:51 1997
+--- perl5.004_04/lib/ExtUtils/MM_Unix.pm Sun Nov 12 21:50:51 2000
+***************
+*** 2229,2235 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2229,2235 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.004_04.orig/myconfig perl5.004_04/myconfig
+*** perl5.004_04.orig/myconfig Mon Oct 6 18:26:49 1997
+--- perl5.004_04/myconfig Sun Nov 12 21:50:51 2000
+***************
+*** 35,41 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 35,41 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_04.orig/patchlevel.h perl5.004_04/patchlevel.h
+*** perl5.004_04.orig/patchlevel.h Wed Oct 15 10:55:19 1997
+--- perl5.004_04/patchlevel.h Sun Nov 12 21:50:51 2000
+***************
+*** 39,44 ****
+--- 39,45 ----
+ /* The following line and terminating '};' are read by perlbug.PL. Don't alter. */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/bdb/perl.BerkeleyDB/patches/5.004_05 b/bdb/perl.BerkeleyDB/patches/5.004_05
new file mode 100644
index 00000000000..51c8bf35009
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/patches/5.004_05
@@ -0,0 +1,209 @@
+diff -rc perl5.004_05.orig/Configure perl5.004_05/Configure
+*** perl5.004_05.orig/Configure Thu Jan 6 22:05:49 2000
+--- perl5.004_05/Configure Sun Nov 12 21:36:25 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 10164,10169 ****
+--- 10165,10178 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10648,10653 ****
+--- 10657,10663 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.004_05.orig/Makefile.SH perl5.004_05/Makefile.SH
+*** perl5.004_05.orig/Makefile.SH Thu Jan 6 22:05:49 2000
+--- perl5.004_05/Makefile.SH Sun Nov 12 21:36:25 2000
+***************
+*** 151,157 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 151,157 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.004_05.orig/lib/ExtUtils/Embed.pm perl5.004_05/lib/ExtUtils/Embed.pm
+*** perl5.004_05.orig/lib/ExtUtils/Embed.pm Fri Aug 1 15:08:44 1997
+--- perl5.004_05/lib/ExtUtils/Embed.pm Sun Nov 12 21:36:25 2000
+***************
+*** 178,184 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 178,184 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_05.orig/lib/ExtUtils/Liblist.pm perl5.004_05/lib/ExtUtils/Liblist.pm
+*** perl5.004_05.orig/lib/ExtUtils/Liblist.pm Thu Jan 6 22:05:54 2000
+--- perl5.004_05/lib/ExtUtils/Liblist.pm Sun Nov 12 21:45:31 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 196,202 ****
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'libs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+--- 196,202 ----
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'perllibs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 590,596 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 590,596 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.004_05.orig/lib/ExtUtils/MM_Unix.pm perl5.004_05/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_05.orig/lib/ExtUtils/MM_Unix.pm Thu Jan 6 22:05:54 2000
+--- perl5.004_05/lib/ExtUtils/MM_Unix.pm Sun Nov 12 21:36:25 2000
+***************
+*** 2246,2252 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2246,2252 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.004_05.orig/myconfig perl5.004_05/myconfig
+*** perl5.004_05.orig/myconfig Thu Jan 6 22:05:55 2000
+--- perl5.004_05/myconfig Sun Nov 12 21:43:54 2000
+***************
+*** 34,40 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 34,40 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_05.orig/patchlevel.h perl5.004_05/patchlevel.h
+*** perl5.004_05.orig/patchlevel.h Thu Jan 6 22:05:48 2000
+--- perl5.004_05/patchlevel.h Sun Nov 12 21:36:25 2000
+***************
+*** 39,44 ****
+--- 39,45 ----
+ /* The following line and terminating '};' are read by perlbug.PL. Don't alter. */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/bdb/perl.BerkeleyDB/patches/5.005 b/bdb/perl.BerkeleyDB/patches/5.005
new file mode 100644
index 00000000000..effee3e8275
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/patches/5.005
@@ -0,0 +1,209 @@
+diff -rc perl5.005.orig/Configure perl5.005/Configure
+*** perl5.005.orig/Configure Wed Jul 15 08:05:44 1998
+--- perl5.005/Configure Sun Nov 12 21:30:40 2000
+***************
+*** 234,239 ****
+--- 234,240 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 11279,11284 ****
+--- 11280,11293 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 11804,11809 ****
+--- 11813,11819 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.005.orig/Makefile.SH perl5.005/Makefile.SH
+*** perl5.005.orig/Makefile.SH Sun Jul 19 08:06:35 1998
+--- perl5.005/Makefile.SH Sun Nov 12 21:30:40 2000
+***************
+*** 150,156 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 150,156 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.005.orig/lib/ExtUtils/Embed.pm perl5.005/lib/ExtUtils/Embed.pm
+*** perl5.005.orig/lib/ExtUtils/Embed.pm Wed Jul 22 07:45:02 1998
+--- perl5.005/lib/ExtUtils/Embed.pm Sun Nov 12 21:30:40 2000
+***************
+*** 194,200 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 194,200 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.005.orig/lib/ExtUtils/Liblist.pm perl5.005/lib/ExtUtils/Liblist.pm
+*** perl5.005.orig/lib/ExtUtils/Liblist.pm Wed Jul 22 07:09:42 1998
+--- perl5.005/lib/ExtUtils/Liblist.pm Sun Nov 12 21:30:40 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 290,296 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 290,296 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 598,604 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 598,604 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.005.orig/lib/ExtUtils/MM_Unix.pm perl5.005/lib/ExtUtils/MM_Unix.pm
+*** perl5.005.orig/lib/ExtUtils/MM_Unix.pm Tue Jul 14 04:39:12 1998
+--- perl5.005/lib/ExtUtils/MM_Unix.pm Sun Nov 12 21:30:41 2000
+***************
+*** 2281,2287 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2281,2287 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.005.orig/myconfig perl5.005/myconfig
+*** perl5.005.orig/myconfig Fri Apr 3 01:20:35 1998
+--- perl5.005/myconfig Sun Nov 12 21:30:41 2000
+***************
+*** 34,40 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+--- 34,40 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+diff -rc perl5.005.orig/patchlevel.h perl5.005/patchlevel.h
+*** perl5.005.orig/patchlevel.h Wed Jul 22 19:22:01 1998
+--- perl5.005/patchlevel.h Sun Nov 12 21:30:41 2000
+***************
+*** 39,44 ****
+--- 39,45 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/bdb/perl.BerkeleyDB/patches/5.005_01 b/bdb/perl.BerkeleyDB/patches/5.005_01
new file mode 100644
index 00000000000..2a05dd545f6
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/patches/5.005_01
@@ -0,0 +1,209 @@
+diff -rc perl5.005_01.orig/Configure perl5.005_01/Configure
+*** perl5.005_01.orig/Configure Wed Jul 15 08:05:44 1998
+--- perl5.005_01/Configure Sun Nov 12 20:55:58 2000
+***************
+*** 234,239 ****
+--- 234,240 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 11279,11284 ****
+--- 11280,11293 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 11804,11809 ****
+--- 11813,11819 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.005_01.orig/Makefile.SH perl5.005_01/Makefile.SH
+*** perl5.005_01.orig/Makefile.SH Sun Jul 19 08:06:35 1998
+--- perl5.005_01/Makefile.SH Sun Nov 12 20:55:58 2000
+***************
+*** 150,156 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 150,156 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.005_01.orig/lib/ExtUtils/Embed.pm perl5.005_01/lib/ExtUtils/Embed.pm
+*** perl5.005_01.orig/lib/ExtUtils/Embed.pm Wed Jul 22 07:45:02 1998
+--- perl5.005_01/lib/ExtUtils/Embed.pm Sun Nov 12 20:55:58 2000
+***************
+*** 194,200 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 194,200 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.005_01.orig/lib/ExtUtils/Liblist.pm perl5.005_01/lib/ExtUtils/Liblist.pm
+*** perl5.005_01.orig/lib/ExtUtils/Liblist.pm Wed Jul 22 07:09:42 1998
+--- perl5.005_01/lib/ExtUtils/Liblist.pm Sun Nov 12 20:55:58 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 290,296 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 290,296 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 598,604 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 598,604 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.005_01.orig/lib/ExtUtils/MM_Unix.pm perl5.005_01/lib/ExtUtils/MM_Unix.pm
+*** perl5.005_01.orig/lib/ExtUtils/MM_Unix.pm Tue Jul 14 04:39:12 1998
+--- perl5.005_01/lib/ExtUtils/MM_Unix.pm Sun Nov 12 20:55:58 2000
+***************
+*** 2281,2287 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2281,2287 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.005_01.orig/myconfig perl5.005_01/myconfig
+*** perl5.005_01.orig/myconfig Fri Apr 3 01:20:35 1998
+--- perl5.005_01/myconfig Sun Nov 12 20:55:58 2000
+***************
+*** 34,40 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+--- 34,40 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+diff -rc perl5.005_01.orig/patchlevel.h perl5.005_01/patchlevel.h
+*** perl5.005_01.orig/patchlevel.h Mon Jan 3 11:07:45 2000
+--- perl5.005_01/patchlevel.h Sun Nov 12 20:55:58 2000
+***************
+*** 39,44 ****
+--- 39,45 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/bdb/perl.BerkeleyDB/patches/5.005_02 b/bdb/perl.BerkeleyDB/patches/5.005_02
new file mode 100644
index 00000000000..5dd57ddc03f
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/patches/5.005_02
@@ -0,0 +1,264 @@
+diff -rc perl5.005_02.orig/Configure perl5.005_02/Configure
+*** perl5.005_02.orig/Configure Mon Jan 3 11:12:20 2000
+--- perl5.005_02/Configure Sun Nov 12 20:50:51 2000
+***************
+*** 234,239 ****
+--- 234,240 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 11334,11339 ****
+--- 11335,11348 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 11859,11864 ****
+--- 11868,11874 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+Only in perl5.005_02: Configure.orig
+diff -rc perl5.005_02.orig/Makefile.SH perl5.005_02/Makefile.SH
+*** perl5.005_02.orig/Makefile.SH Sun Jul 19 08:06:35 1998
+--- perl5.005_02/Makefile.SH Sun Nov 12 20:50:51 2000
+***************
+*** 150,156 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 150,156 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+Only in perl5.005_02: Makefile.SH.orig
+diff -rc perl5.005_02.orig/lib/ExtUtils/Embed.pm perl5.005_02/lib/ExtUtils/Embed.pm
+*** perl5.005_02.orig/lib/ExtUtils/Embed.pm Wed Jul 22 07:45:02 1998
+--- perl5.005_02/lib/ExtUtils/Embed.pm Sun Nov 12 20:50:51 2000
+***************
+*** 194,200 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 194,200 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.005_02.orig/lib/ExtUtils/Liblist.pm perl5.005_02/lib/ExtUtils/Liblist.pm
+*** perl5.005_02.orig/lib/ExtUtils/Liblist.pm Mon Jan 3 11:12:21 2000
+--- perl5.005_02/lib/ExtUtils/Liblist.pm Sun Nov 12 20:50:51 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 196,202 ****
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'libs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+--- 196,202 ----
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'perllibs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 333,339 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 333,339 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 623,629 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 623,629 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+***************
+*** 666,672 ****
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{libs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+--- 666,672 ----
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{perllibs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+***************
+*** 676,682 ****
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{libs}>.
+
+ =item *
+
+--- 676,682 ----
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{perllibs}>.
+
+ =item *
+
+Only in perl5.005_02/lib/ExtUtils: Liblist.pm.orig
+diff -rc perl5.005_02.orig/lib/ExtUtils/MM_Unix.pm perl5.005_02/lib/ExtUtils/MM_Unix.pm
+*** perl5.005_02.orig/lib/ExtUtils/MM_Unix.pm Tue Jul 14 04:39:12 1998
+--- perl5.005_02/lib/ExtUtils/MM_Unix.pm Sun Nov 12 20:50:51 2000
+***************
+*** 2281,2287 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2281,2287 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+Only in perl5.005_02/lib/ExtUtils: MM_Unix.pm.orig
+diff -rc perl5.005_02.orig/myconfig perl5.005_02/myconfig
+*** perl5.005_02.orig/myconfig Fri Apr 3 01:20:35 1998
+--- perl5.005_02/myconfig Sun Nov 12 20:50:51 2000
+***************
+*** 34,40 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+--- 34,40 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+diff -rc perl5.005_02.orig/patchlevel.h perl5.005_02/patchlevel.h
+*** perl5.005_02.orig/patchlevel.h Mon Jan 3 11:12:19 2000
+--- perl5.005_02/patchlevel.h Sun Nov 12 20:50:51 2000
+***************
+*** 40,45 ****
+--- 40,46 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/bdb/perl.BerkeleyDB/patches/5.005_03 b/bdb/perl.BerkeleyDB/patches/5.005_03
new file mode 100644
index 00000000000..115f9f5b909
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/patches/5.005_03
@@ -0,0 +1,250 @@
+diff -rc perl5.005_03.orig/Configure perl5.005_03/Configure
+*** perl5.005_03.orig/Configure Sun Mar 28 17:12:57 1999
+--- perl5.005_03/Configure Sun Sep 17 22:19:16 2000
+***************
+*** 208,213 ****
+--- 208,214 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 11642,11647 ****
+--- 11643,11656 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 12183,12188 ****
+--- 12192,12198 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.005_03.orig/Makefile.SH perl5.005_03/Makefile.SH
+*** perl5.005_03.orig/Makefile.SH Thu Mar 4 02:35:25 1999
+--- perl5.005_03/Makefile.SH Sun Sep 17 22:21:01 2000
+***************
+*** 58,67 ****
+ shrpldflags="-H512 -T512 -bhalt:4 -bM:SRE -bE:perl.exp"
+ case "$osvers" in
+ 3*)
+! shrpldflags="$shrpldflags -e _nostart $ldflags $libs $cryptlib"
+ ;;
+ *)
+! shrpldflags="$shrpldflags -b noentry $ldflags $libs $cryptlib"
+ ;;
+ esac
+ aixinstdir=`pwd | sed 's/\/UU$//'`
+--- 58,67 ----
+ shrpldflags="-H512 -T512 -bhalt:4 -bM:SRE -bE:perl.exp"
+ case "$osvers" in
+ 3*)
+! shrpldflags="$shrpldflags -e _nostart $ldflags $perllibs $cryptlib"
+ ;;
+ *)
+! shrpldflags="$shrpldflags -b noentry $ldflags $perllibs $cryptlib"
+ ;;
+ esac
+ aixinstdir=`pwd | sed 's/\/UU$//'`
+***************
+*** 155,161 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 155,161 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.005_03.orig/lib/ExtUtils/Embed.pm perl5.005_03/lib/ExtUtils/Embed.pm
+*** perl5.005_03.orig/lib/ExtUtils/Embed.pm Wed Jan 6 02:17:50 1999
+--- perl5.005_03/lib/ExtUtils/Embed.pm Sun Sep 17 22:19:16 2000
+***************
+*** 194,200 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 194,200 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.005_03.orig/lib/ExtUtils/Liblist.pm perl5.005_03/lib/ExtUtils/Liblist.pm
+*** perl5.005_03.orig/lib/ExtUtils/Liblist.pm Wed Jan 6 02:17:47 1999
+--- perl5.005_03/lib/ExtUtils/Liblist.pm Sun Sep 17 22:19:16 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 196,202 ****
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'libs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+--- 196,202 ----
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'perllibs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 336,342 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 336,342 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 626,632 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>,
+ C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
+--- 626,632 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>,
+ C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
+***************
+*** 670,676 ****
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{libs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+--- 670,676 ----
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{perllibs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+***************
+*** 680,686 ****
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{libs}>.
+
+ =item *
+
+--- 680,686 ----
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{perllibs}>.
+
+ =item *
+
+diff -rc perl5.005_03.orig/lib/ExtUtils/MM_Unix.pm perl5.005_03/lib/ExtUtils/MM_Unix.pm
+*** perl5.005_03.orig/lib/ExtUtils/MM_Unix.pm Fri Mar 5 00:34:20 1999
+--- perl5.005_03/lib/ExtUtils/MM_Unix.pm Sun Sep 17 22:19:16 2000
+***************
+*** 2284,2290 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2284,2290 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
diff --git a/bdb/perl.BerkeleyDB/patches/5.6.0 b/bdb/perl.BerkeleyDB/patches/5.6.0
new file mode 100644
index 00000000000..1f9b3b620de
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/patches/5.6.0
@@ -0,0 +1,294 @@
+diff -cr perl-5.6.0.orig/Configure perl-5.6.0/Configure
+*** perl-5.6.0.orig/Configure Wed Mar 22 20:36:37 2000
+--- perl-5.6.0/Configure Sun Sep 17 23:40:15 2000
+***************
+*** 217,222 ****
+--- 217,223 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 14971,14976 ****
+--- 14972,14985 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 15640,15645 ****
+--- 15649,15655 ----
+ path_sep='$path_sep'
+ perl5='$perl5'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -cr perl-5.6.0.orig/Makefile.SH perl-5.6.0/Makefile.SH
+*** perl-5.6.0.orig/Makefile.SH Sat Mar 11 16:05:24 2000
+--- perl-5.6.0/Makefile.SH Sun Sep 17 23:40:15 2000
+***************
+*** 70,76 ****
+ *) shrpldflags="$shrpldflags -b noentry"
+ ;;
+ esac
+! shrpldflags="$shrpldflags $ldflags $libs $cryptlib"
+ linklibperl="-L $archlibexp/CORE -L `pwd | sed 's/\/UU$//'` -lperl"
+ ;;
+ hpux*)
+--- 70,76 ----
+ *) shrpldflags="$shrpldflags -b noentry"
+ ;;
+ esac
+! shrpldflags="$shrpldflags $ldflags $perllibs $cryptlib"
+ linklibperl="-L $archlibexp/CORE -L `pwd | sed 's/\/UU$//'` -lperl"
+ ;;
+ hpux*)
+***************
+*** 176,182 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 176,182 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+***************
+*** 333,339 ****
+ case "$osname" in
+ aix)
+ $spitshell >>Makefile <<!GROK!THIS!
+! LIBS = $libs
+ # In AIX we need to change this for building Perl itself from
+ # its earlier definition (which is for building external
+ # extensions *after* Perl has been built and installed)
+--- 333,339 ----
+ case "$osname" in
+ aix)
+ $spitshell >>Makefile <<!GROK!THIS!
+! LIBS = $perllibs
+ # In AIX we need to change this for building Perl itself from
+ # its earlier definition (which is for building external
+ # extensions *after* Perl has been built and installed)
+diff -cr perl-5.6.0.orig/lib/ExtUtils/Embed.pm perl-5.6.0/lib/ExtUtils/Embed.pm
+*** perl-5.6.0.orig/lib/ExtUtils/Embed.pm Sun Jan 23 12:08:32 2000
+--- perl-5.6.0/lib/ExtUtils/Embed.pm Sun Sep 17 23:40:15 2000
+***************
+*** 193,199 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 193,199 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -cr perl-5.6.0.orig/lib/ExtUtils/Liblist.pm perl-5.6.0/lib/ExtUtils/Liblist.pm
+*** perl-5.6.0.orig/lib/ExtUtils/Liblist.pm Wed Mar 22 16:16:31 2000
+--- perl-5.6.0/lib/ExtUtils/Liblist.pm Sun Sep 17 23:40:15 2000
+***************
+*** 17,34 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 17,34 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 198,204 ****
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'libs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+--- 198,204 ----
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'perllibs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 338,344 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 338,344 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 624,630 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>,
+ C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
+--- 624,630 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>,
+ C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
+***************
+*** 668,674 ****
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{libs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+--- 668,674 ----
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{perllibs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+***************
+*** 678,684 ****
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{libs}>.
+
+ =item *
+
+--- 678,684 ----
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{perllibs}>.
+
+ =item *
+
+diff -cr perl-5.6.0.orig/lib/ExtUtils/MM_Unix.pm perl-5.6.0/lib/ExtUtils/MM_Unix.pm
+*** perl-5.6.0.orig/lib/ExtUtils/MM_Unix.pm Thu Mar 2 17:52:52 2000
+--- perl-5.6.0/lib/ExtUtils/MM_Unix.pm Sun Sep 17 23:40:15 2000
+***************
+*** 2450,2456 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2450,2456 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -cr perl-5.6.0.orig/myconfig.SH perl-5.6.0/myconfig.SH
+*** perl-5.6.0.orig/myconfig.SH Sat Feb 26 06:34:49 2000
+--- perl-5.6.0/myconfig.SH Sun Sep 17 23:41:17 2000
+***************
+*** 48,54 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+--- 48,54 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+diff -cr perl-5.6.0.orig/patchlevel.h perl-5.6.0/patchlevel.h
+*** perl-5.6.0.orig/patchlevel.h Wed Mar 22 20:23:11 2000
+--- perl-5.6.0/patchlevel.h Sun Sep 17 23:40:15 2000
+***************
+*** 70,75 ****
+--- 70,76 ----
+ #if !defined(PERL_PATCHLEVEL_H_IMPLICIT) && !defined(LOCAL_PATCH_COUNT)
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/bdb/perl.BerkeleyDB/t/btree.t b/bdb/perl.BerkeleyDB/t/btree.t
new file mode 100644
index 00000000000..97bb3257c97
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/t/btree.t
@@ -0,0 +1,976 @@
+#!./perl -w
+
+# ID: %I%, %G%
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+#use Config;
+#
+#BEGIN {
+# if(-d "lib" && -f "TEST") {
+# if ($Config{'extensions'} !~ /\bBerkeleyDB\b/ ) {
+# print "1..74\n";
+# exit 0;
+# }
+# }
+#}
+
+use BerkeleyDB;
+use File::Path qw(rmtree);
+
+print "1..243\n";
+
+my %DB_errors = (
+ 'DB_INCOMPLETE' => "DB_INCOMPLETE: Sync was unable to complete",
+ 'DB_KEYEMPTY' => "DB_KEYEMPTY: Non-existent key/data pair",
+ 'DB_KEYEXIST' => "DB_KEYEXIST: Key/data pair already exists",
+ 'DB_LOCK_DEADLOCK' => "DB_LOCK_DEADLOCK: Locker killed to resolve a deadlock",
+ 'DB_LOCK_NOTGRANTED' => "DB_LOCK_NOTGRANTED: Lock not granted",
+ 'DB_NOTFOUND' => "DB_NOTFOUND: No matching key/data pair found",
+ 'DB_OLD_VERSION' => "DB_OLDVERSION: Database requires a version upgrade",
+ 'DB_RUNRECOVERY' => "DB_RUNRECOVERY: Fatal error, run database recovery",
+) ;
+
+{
+ package LexFile ;
+
+ sub new
+ {
+ my $self = shift ;
+ unlink @_ ;
+ bless [ @_ ], $self ;
+ }
+
+ sub DESTROY
+ {
+ my $self = shift ;
+ unlink @{ $self } ;
+ }
+}
+
+sub ok
+{
+ my $no = shift ;
+ my $result = shift ;
+
+ print "not " unless $result ;
+ print "ok $no\n" ;
+}
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+
+# Check for invalid parameters
+{
+ # Check for invalid parameters
+ my $db ;
+ eval ' $db = new BerkeleyDB::Btree -Stupid => 3 ; ' ;
+ ok 1, $@ =~ /unknown key value\(s\) Stupid/ ;
+
+ eval ' $db = new BerkeleyDB::Btree -Bad => 2, -Mode => 0345, -Stupid => 3; ' ;
+ ok 2, $@ =~ /unknown key value\(s\) (Bad |Stupid ){2}/ ;
+
+ eval ' $db = new BerkeleyDB::Btree -Env => 2 ' ;
+ ok 3, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+
+ eval ' $db = new BerkeleyDB::Btree -Txn => "x" ' ;
+ ok 4, $@ =~ /^Txn not of type BerkeleyDB::Txn/ ;
+
+ my $obj = bless [], "main" ;
+ eval ' $db = new BerkeleyDB::Btree -Env => $obj ' ;
+ ok 5, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+}
+
+# Now check the interface to Btree
+
+{
+ my $lex = new LexFile $Dfile ;
+
+ ok 6, my $db = new BerkeleyDB::Btree -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my $value ;
+ my $status ;
+ ok 7, $db->db_put("some key", "some value") == 0 ;
+ ok 8, $db->status() == 0 ;
+ ok 9, $db->db_get("some key", $value) == 0 ;
+ ok 10, $value eq "some value" ;
+ ok 11, $db->db_put("key", "value") == 0 ;
+ ok 12, $db->db_get("key", $value) == 0 ;
+ ok 13, $value eq "value" ;
+ ok 14, $db->db_del("some key") == 0 ;
+ ok 15, ($status = $db->db_get("some key", $value)) == DB_NOTFOUND ;
+ ok 16, $db->status() == DB_NOTFOUND ;
+ ok 17, $db->status() eq $DB_errors{'DB_NOTFOUND'} ;
+
+ ok 18, $db->db_sync() == 0 ;
+
+ # Check NOOVERWRITE will make put fail when attempting to overwrite
+ # an existing record.
+
+ ok 19, $db->db_put( 'key', 'x', DB_NOOVERWRITE) == DB_KEYEXIST ;
+ ok 20, $db->status() eq $DB_errors{'DB_KEYEXIST'} ;
+ ok 21, $db->status() == DB_KEYEXIST ;
+
+
+ # check that the value of the key has not been changed by the
+ # previous test
+ ok 22, $db->db_get("key", $value) == 0 ;
+ ok 23, $value eq "value" ;
+
+ # test DB_GET_BOTH
+ my ($k, $v) = ("key", "value") ;
+ ok 24, $db->db_get($k, $v, DB_GET_BOTH) == 0 ;
+
+ ($k, $v) = ("key", "fred") ;
+ ok 25, $db->db_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
+
+ ($k, $v) = ("another", "value") ;
+ ok 26, $db->db_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
+
+
+}
+
+{
+ # Check simple env works with a hash.
+ my $lex = new LexFile $Dfile ;
+
+ my $home = "./fred" ;
+ ok 27, -d $home ? chmod 0777, $home : mkdir($home, 0777) ;
+
+ ok 28, my $env = new BerkeleyDB::Env -Flags => DB_CREATE|DB_INIT_MPOOL,
+ -Home => $home ;
+ ok 29, my $db = new BerkeleyDB::Btree -Filename => $Dfile,
+ -Env => $env,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my $value ;
+ ok 30, $db->db_put("some key", "some value") == 0 ;
+ ok 31, $db->db_get("some key", $value) == 0 ;
+ ok 32, $value eq "some value" ;
+ undef $db ;
+ undef $env ;
+ rmtree $home ;
+}
+
+
+{
+ # cursors
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my ($k, $v) ;
+ ok 33, my $db = new BerkeleyDB::Btree -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my %data = (
+ "red" => 2,
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (($k, $v) = each %data) {
+ $ret += $db->db_put($k, $v) ;
+ }
+ ok 34, $ret == 0 ;
+
+ # create the cursor
+ ok 35, my $cursor = $db->db_cursor() ;
+
+ $k = $v = "" ;
+ my %copy = %data ;
+ my $extras = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ if ( $copy{$k} eq $v )
+ { delete $copy{$k} }
+ else
+ { ++ $extras }
+ }
+ ok 36, $cursor->status() == DB_NOTFOUND ;
+ ok 37, $cursor->status() eq $DB_errors{'DB_NOTFOUND'};
+ ok 38, keys %copy == 0 ;
+ ok 39, $extras == 0 ;
+
+ # sequence backwards
+ %copy = %data ;
+ $extras = 0 ;
+ my $status ;
+ for ( $status = $cursor->c_get($k, $v, DB_LAST) ;
+ $status == 0 ;
+ $status = $cursor->c_get($k, $v, DB_PREV)) {
+ if ( $copy{$k} eq $v )
+ { delete $copy{$k} }
+ else
+ { ++ $extras }
+ }
+ ok 40, $status == DB_NOTFOUND ;
+ ok 41, $status eq $DB_errors{'DB_NOTFOUND'};
+ ok 42, $cursor->status() == $status ;
+ ok 43, $cursor->status() eq $status ;
+ ok 44, keys %copy == 0 ;
+ ok 45, $extras == 0 ;
+
+ ($k, $v) = ("green", "house") ;
+ ok 46, $cursor->c_get($k, $v, DB_GET_BOTH) == 0 ;
+
+ ($k, $v) = ("green", "door") ;
+ ok 47, $cursor->c_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
+
+ ($k, $v) = ("black", "house") ;
+ ok 48, $cursor->c_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
+
+}
+
+{
+ # Tied Hash interface
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ ok 49, tie %hash, 'BerkeleyDB::Btree', -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # check "each" with an empty database
+ my $count = 0 ;
+ while (my ($k, $v) = each %hash) {
+ ++ $count ;
+ }
+ ok 50, (tied %hash)->status() == DB_NOTFOUND ;
+ ok 51, $count == 0 ;
+
+ # Add a k/v pair
+ my $value ;
+ $hash{"some key"} = "some value";
+ ok 52, (tied %hash)->status() == 0 ;
+ ok 53, $hash{"some key"} eq "some value";
+ ok 54, defined $hash{"some key"} ;
+ ok 55, (tied %hash)->status() == 0 ;
+ ok 56, exists $hash{"some key"} ;
+ ok 57, !defined $hash{"jimmy"} ;
+ ok 58, (tied %hash)->status() == DB_NOTFOUND ;
+ ok 59, !exists $hash{"jimmy"} ;
+ ok 60, (tied %hash)->status() == DB_NOTFOUND ;
+
+ delete $hash{"some key"} ;
+ ok 61, (tied %hash)->status() == 0 ;
+ ok 62, ! defined $hash{"some key"} ;
+ ok 63, (tied %hash)->status() == DB_NOTFOUND ;
+ ok 64, ! exists $hash{"some key"} ;
+ ok 65, (tied %hash)->status() == DB_NOTFOUND ;
+
+ $hash{1} = 2 ;
+ $hash{10} = 20 ;
+ $hash{1000} = 2000 ;
+
+ my ($keys, $values) = (0,0);
+ $count = 0 ;
+ while (my ($k, $v) = each %hash) {
+ $keys += $k ;
+ $values += $v ;
+ ++ $count ;
+ }
+ ok 66, $count == 3 ;
+ ok 67, $keys == 1011 ;
+ ok 68, $values == 2022 ;
+
+ # now clear the hash
+ %hash = () ;
+ ok 69, keys %hash == 0 ;
+
+ untie %hash ;
+}
+
+{
+ # override default compare
+ my $lex = new LexFile $Dfile, $Dfile2, $Dfile3 ;
+ my $value ;
+ my (%h, %g, %k) ;
+ my @Keys = qw( 0123 12 -1234 9 987654321 def ) ;
+ ok 70, tie %h, "BerkeleyDB::Btree", -Filename => $Dfile,
+ -Compare => sub { $_[0] <=> $_[1] },
+ -Flags => DB_CREATE ;
+
+ ok 71, tie %g, 'BerkeleyDB::Btree', -Filename => $Dfile2,
+ -Compare => sub { $_[0] cmp $_[1] },
+ -Flags => DB_CREATE ;
+
+ ok 72, tie %k, 'BerkeleyDB::Btree', -Filename => $Dfile3,
+ -Compare => sub { length $_[0] <=> length $_[1] },
+ -Flags => DB_CREATE ;
+
+ my @srt_1 ;
+ { local $^W = 0 ;
+ @srt_1 = sort { $a <=> $b } @Keys ;
+ }
+ my @srt_2 = sort { $a cmp $b } @Keys ;
+ my @srt_3 = sort { length $a <=> length $b } @Keys ;
+
+ foreach (@Keys) {
+ local $^W = 0 ;
+ $h{$_} = 1 ;
+ $g{$_} = 1 ;
+ $k{$_} = 1 ;
+ }
+
+ sub ArrayCompare
+ {
+ my($a, $b) = @_ ;
+
+ return 0 if @$a != @$b ;
+
+ foreach (1 .. length @$a)
+ {
+ return 0 unless $$a[$_] eq $$b[$_] ;
+ }
+
+ 1 ;
+ }
+
+ ok 73, ArrayCompare (\@srt_1, [keys %h]);
+ ok 74, ArrayCompare (\@srt_2, [keys %g]);
+ ok 75, ArrayCompare (\@srt_3, [keys %k]);
+
+}
+
+{
+ # override default compare, with duplicates, don't sort values
+ my $lex = new LexFile $Dfile, $Dfile2, $Dfile3 ;
+ my $value ;
+ my (%h, %g, %k) ;
+ my @Keys = qw( 0123 9 12 -1234 9 987654321 def ) ;
+ my @Values = qw( 1 0 3 dd x abc 0 ) ;
+ ok 76, tie %h, "BerkeleyDB::Btree", -Filename => $Dfile,
+ -Compare => sub { $_[0] <=> $_[1] },
+ -Property => DB_DUP,
+ -Flags => DB_CREATE ;
+
+ ok 77, tie %g, 'BerkeleyDB::Btree', -Filename => $Dfile2,
+ -Compare => sub { $_[0] cmp $_[1] },
+ -Property => DB_DUP,
+ -Flags => DB_CREATE ;
+
+ ok 78, tie %k, 'BerkeleyDB::Btree', -Filename => $Dfile3,
+ -Compare => sub { length $_[0] <=> length $_[1] },
+ -Property => DB_DUP,
+ -Flags => DB_CREATE ;
+
+ my @srt_1 ;
+ { local $^W = 0 ;
+ @srt_1 = sort { $a <=> $b } @Keys ;
+ }
+ my @srt_2 = sort { $a cmp $b } @Keys ;
+ my @srt_3 = sort { length $a <=> length $b } @Keys ;
+
+ foreach (@Keys) {
+ local $^W = 0 ;
+ my $value = shift @Values ;
+ $h{$_} = $value ;
+ $g{$_} = $value ;
+ $k{$_} = $value ;
+ }
+
+ sub getValues
+ {
+ my $hash = shift ;
+ my $db = tied %$hash ;
+ my $cursor = $db->db_cursor() ;
+ my @values = () ;
+ my ($k, $v) = (0,0) ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ push @values, $v ;
+ }
+ return @values ;
+ }
+
+ ok 79, ArrayCompare (\@srt_1, [keys %h]);
+ ok 80, ArrayCompare (\@srt_2, [keys %g]);
+ ok 81, ArrayCompare (\@srt_3, [keys %k]);
+ ok 82, ArrayCompare ([qw(dd 0 0 x 3 1 abc)], [getValues \%h]);
+ ok 83, ArrayCompare ([qw(dd 1 0 3 x abc 0)], [getValues \%g]);
+ ok 84, ArrayCompare ([qw(0 x 3 0 1 dd abc)], [getValues \%k]);
+
+ # test DB_DUP_NEXT
+ ok 85, my $cur = (tied %g)->db_cursor() ;
+ my ($k, $v) = (9, "") ;
+ ok 86, $cur->c_get($k, $v, DB_SET) == 0 ;
+ ok 87, $k == 9 && $v == 0 ;
+ ok 88, $cur->c_get($k, $v, DB_NEXT_DUP) == 0 ;
+ ok 89, $k == 9 && $v eq "x" ;
+ ok 90, $cur->c_get($k, $v, DB_NEXT_DUP) == DB_NOTFOUND ;
+}
+
+{
+ # override default compare, with duplicates, sort values
+ my $lex = new LexFile $Dfile, $Dfile2;
+ my $value ;
+ my (%h, %g) ;
+ my @Keys = qw( 0123 9 12 -1234 9 987654321 9 def ) ;
+ my @Values = qw( 1 11 3 dd x abc 2 0 ) ;
+ ok 91, tie %h, "BerkeleyDB::Btree", -Filename => $Dfile,
+ -Compare => sub { $_[0] <=> $_[1] },
+ -DupCompare => sub { $_[0] cmp $_[1] },
+ -Property => DB_DUP,
+ -Flags => DB_CREATE ;
+
+ ok 92, tie %g, 'BerkeleyDB::Btree', -Filename => $Dfile2,
+ -Compare => sub { $_[0] cmp $_[1] },
+ -DupCompare => sub { $_[0] <=> $_[1] },
+ -Property => DB_DUP,
+
+
+
+ -Flags => DB_CREATE ;
+
+ my @srt_1 ;
+ { local $^W = 0 ;
+ @srt_1 = sort { $a <=> $b } @Keys ;
+ }
+ my @srt_2 = sort { $a cmp $b } @Keys ;
+
+ foreach (@Keys) {
+ local $^W = 0 ;
+ my $value = shift @Values ;
+ $h{$_} = $value ;
+ $g{$_} = $value ;
+ }
+
+ ok 93, ArrayCompare (\@srt_1, [keys %h]);
+ ok 94, ArrayCompare (\@srt_2, [keys %g]);
+ ok 95, ArrayCompare ([qw(dd 1 3 x 2 11 abc 0)], [getValues \%g]);
+ ok 96, ArrayCompare ([qw(dd 0 11 2 x 3 1 abc)], [getValues \%h]);
+
+}
+
+{
+ # get_dup etc
+ my $lex = new LexFile $Dfile;
+ my %hh ;
+
+ ok 97, my $YY = tie %hh, "BerkeleyDB::Btree", -Filename => $Dfile,
+ -DupCompare => sub { $_[0] cmp $_[1] },
+ -Property => DB_DUP,
+ -Flags => DB_CREATE ;
+
+ $hh{'Wall'} = 'Larry' ;
+ $hh{'Wall'} = 'Stone' ; # Note the duplicate key
+ $hh{'Wall'} = 'Brick' ; # Note the duplicate key
+ $hh{'Smith'} = 'John' ;
+ $hh{'mouse'} = 'mickey' ;
+
+ # first work in scalar context
+ ok 98, scalar $YY->get_dup('Unknown') == 0 ;
+ ok 99, scalar $YY->get_dup('Smith') == 1 ;
+ ok 100, scalar $YY->get_dup('Wall') == 3 ;
+
+ # now in list context
+ my @unknown = $YY->get_dup('Unknown') ;
+ ok 101, "@unknown" eq "" ;
+
+ my @smith = $YY->get_dup('Smith') ;
+ ok 102, "@smith" eq "John" ;
+
+ {
+ my @wall = $YY->get_dup('Wall') ;
+ my %wall ;
+ @wall{@wall} = @wall ;
+ ok 103, (@wall == 3 && $wall{'Larry'} && $wall{'Stone'} && $wall{'Brick'});
+ }
+
+ # hash
+ my %unknown = $YY->get_dup('Unknown', 1) ;
+ ok 104, keys %unknown == 0 ;
+
+ my %smith = $YY->get_dup('Smith', 1) ;
+ ok 105, keys %smith == 1 && $smith{'John'} ;
+
+ my %wall = $YY->get_dup('Wall', 1) ;
+ ok 106, keys %wall == 3 && $wall{'Larry'} == 1 && $wall{'Stone'} == 1
+ && $wall{'Brick'} == 1 ;
+
+ undef $YY ;
+ untie %hh ;
+
+}
+
+{
+ # in-memory file
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $fd ;
+ my $value ;
+ ok 107, my $db = tie %hash, 'BerkeleyDB::Btree' ;
+
+ ok 108, $db->db_put("some key", "some value") == 0 ;
+ ok 109, $db->db_get("some key", $value) == 0 ;
+ ok 110, $value eq "some value" ;
+
+}
+
+{
+ # partial
+ # check works via API
+
+ my $lex = new LexFile $Dfile ;
+ my $value ;
+ ok 111, my $db = new BerkeleyDB::Btree, -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (my ($k, $v) = each %data) {
+ $ret += $db->db_put($k, $v) ;
+ }
+ ok 112, $ret == 0 ;
+
+
+ # do a partial get
+ my ($pon, $off, $len) = $db->partial_set(0,2) ;
+ ok 113, ! $pon && $off == 0 && $len == 0 ;
+ ok 114, $db->db_get("red", $value) == 0 && $value eq "bo" ;
+ ok 115, $db->db_get("green", $value) == 0 && $value eq "ho" ;
+ ok 116, $db->db_get("blue", $value) == 0 && $value eq "se" ;
+
+ # do a partial get, off end of data
+ ($pon, $off, $len) = $db->partial_set(3,2) ;
+ ok 117, $pon ;
+ ok 118, $off == 0 ;
+ ok 119, $len == 2 ;
+ ok 120, $db->db_get("red", $value) == 0 && $value eq "t" ;
+ ok 121, $db->db_get("green", $value) == 0 && $value eq "se" ;
+ ok 122, $db->db_get("blue", $value) == 0 && $value eq "" ;
+
+ # switch of partial mode
+ ($pon, $off, $len) = $db->partial_clear() ;
+ ok 123, $pon ;
+ ok 124, $off == 3 ;
+ ok 125, $len == 2 ;
+ ok 126, $db->db_get("red", $value) == 0 && $value eq "boat" ;
+ ok 127, $db->db_get("green", $value) == 0 && $value eq "house" ;
+ ok 128, $db->db_get("blue", $value) == 0 && $value eq "sea" ;
+
+ # now partial put
+ $db->partial_set(0,2) ;
+ ok 129, $db->db_put("red", "") == 0 ;
+ ok 130, $db->db_put("green", "AB") == 0 ;
+ ok 131, $db->db_put("blue", "XYZ") == 0 ;
+ ok 132, $db->db_put("new", "KLM") == 0 ;
+
+ ($pon, $off, $len) = $db->partial_clear() ;
+ ok 133, $pon ;
+ ok 134, $off == 0 ;
+ ok 135, $len == 2 ;
+ ok 136, $db->db_get("red", $value) == 0 && $value eq "at" ;
+ ok 137, $db->db_get("green", $value) == 0 && $value eq "ABuse" ;
+ ok 138, $db->db_get("blue", $value) == 0 && $value eq "XYZa" ;
+ ok 139, $db->db_get("new", $value) == 0 && $value eq "KLM" ;
+
+ # now partial put
+ ($pon, $off, $len) = $db->partial_set(3,2) ;
+ ok 140, ! $pon ;
+ ok 141, $off == 0 ;
+ ok 142, $len == 0 ;
+ ok 143, $db->db_put("red", "PPP") == 0 ;
+ ok 144, $db->db_put("green", "Q") == 0 ;
+ ok 145, $db->db_put("blue", "XYZ") == 0 ;
+ ok 146, $db->db_put("new", "TU") == 0 ;
+
+ $db->partial_clear() ;
+ ok 147, $db->db_get("red", $value) == 0 && $value eq "at\0PPP" ;
+ ok 148, $db->db_get("green", $value) == 0 && $value eq "ABuQ" ;
+ ok 149, $db->db_get("blue", $value) == 0 && $value eq "XYZXYZ" ;
+ ok 150, $db->db_get("new", $value) == 0 && $value eq "KLMTU" ;
+}
+
+{
+ # partial
+ # check works via tied hash
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+ ok 151, my $db = tie %hash, 'BerkeleyDB::Btree', -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ while (my ($k, $v) = each %data) {
+ $hash{$k} = $v ;
+ }
+
+
+ # do a partial get
+ $db->partial_set(0,2) ;
+ ok 152, $hash{"red"} eq "bo" ;
+ ok 153, $hash{"green"} eq "ho" ;
+ ok 154, $hash{"blue"} eq "se" ;
+
+ # do a partial get, off end of data
+ $db->partial_set(3,2) ;
+ ok 155, $hash{"red"} eq "t" ;
+ ok 156, $hash{"green"} eq "se" ;
+ ok 157, $hash{"blue"} eq "" ;
+
+ # switch of partial mode
+ $db->partial_clear() ;
+ ok 158, $hash{"red"} eq "boat" ;
+ ok 159, $hash{"green"} eq "house" ;
+ ok 160, $hash{"blue"} eq "sea" ;
+
+ # now partial put
+ $db->partial_set(0,2) ;
+ ok 161, $hash{"red"} = "" ;
+ ok 162, $hash{"green"} = "AB" ;
+ ok 163, $hash{"blue"} = "XYZ" ;
+ ok 164, $hash{"new"} = "KLM" ;
+
+ $db->partial_clear() ;
+ ok 165, $hash{"red"} eq "at" ;
+ ok 166, $hash{"green"} eq "ABuse" ;
+ ok 167, $hash{"blue"} eq "XYZa" ;
+ ok 168, $hash{"new"} eq "KLM" ;
+
+ # now partial put
+ $db->partial_set(3,2) ;
+ ok 169, $hash{"red"} = "PPP" ;
+ ok 170, $hash{"green"} = "Q" ;
+ ok 171, $hash{"blue"} = "XYZ" ;
+ ok 172, $hash{"new"} = "TU" ;
+
+ $db->partial_clear() ;
+ ok 173, $hash{"red"} eq "at\0PPP" ;
+ ok 174, $hash{"green"} eq "ABuQ" ;
+ ok 175, $hash{"blue"} eq "XYZXYZ" ;
+ ok 176, $hash{"new"} eq "KLMTU" ;
+}
+
+{
+ # transaction
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+
+ my $home = "./fred" ;
+ rmtree $home if -e $home ;
+ ok 177, mkdir($home, 0777) ;
+ ok 178, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 179, my $txn = $env->txn_begin() ;
+ ok 180, my $db1 = tie %hash, 'BerkeleyDB::Btree', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (my ($k, $v) = each %data) {
+ $ret += $db1->db_put($k, $v) ;
+ }
+ ok 181, $ret == 0 ;
+
+ # should be able to see all the records
+
+ ok 182, my $cursor = $db1->db_cursor() ;
+ my ($k, $v) = ("", "") ;
+ my $count = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 183, $count == 3 ;
+ undef $cursor ;
+
+ # now abort the transaction
+ #ok 151, $txn->txn_abort() == 0 ;
+ ok 184, (my $Z = $txn->txn_abort()) == 0 ;
+
+ # there shouldn't be any records in the database
+ $count = 0 ;
+ # sequence forwards
+ ok 185, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 186, $count == 0 ;
+
+ undef $txn ;
+ undef $cursor ;
+ undef $db1 ;
+ undef $env ;
+ untie %hash ;
+ rmtree $home ;
+}
+
+{
+ # DB_DUP
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ ok 187, my $db = tie %hash, 'BerkeleyDB::Btree', -Filename => $Dfile,
+ -Property => DB_DUP,
+ -Flags => DB_CREATE ;
+
+ $hash{'Wall'} = 'Larry' ;
+ $hash{'Wall'} = 'Stone' ;
+ $hash{'Smith'} = 'John' ;
+ $hash{'Wall'} = 'Brick' ;
+ $hash{'Wall'} = 'Brick' ;
+ $hash{'mouse'} = 'mickey' ;
+
+ ok 188, keys %hash == 6 ;
+
+ # create a cursor
+ ok 189, my $cursor = $db->db_cursor() ;
+
+ my $key = "Wall" ;
+ my $value ;
+ ok 190, $cursor->c_get($key, $value, DB_SET) == 0 ;
+ ok 191, $key eq "Wall" && $value eq "Larry" ;
+ ok 192, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 193, $key eq "Wall" && $value eq "Stone" ;
+ ok 194, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 195, $key eq "Wall" && $value eq "Brick" ;
+ ok 196, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 197, $key eq "Wall" && $value eq "Brick" ;
+
+ my $ref = $db->db_stat() ;
+ ok 198, ($ref->{bt_flags} | DB_DUP) == DB_DUP ;
+
+ undef $db ;
+ undef $cursor ;
+ untie %hash ;
+
+}
+
+{
+ # db_stat
+
+ my $lex = new LexFile $Dfile ;
+ my $recs = ($BerkeleyDB::db_version >= 3.1 ? "bt_ndata" : "bt_nrecs") ;
+ my %hash ;
+ my ($k, $v) ;
+ ok 199, my $db = new BerkeleyDB::Btree -Filename => $Dfile,
+ -Flags => DB_CREATE,
+ -Minkey =>3 ,
+ -Pagesize => 2 **12
+ ;
+
+ my $ref = $db->db_stat() ;
+ ok 200, $ref->{$recs} == 0;
+ ok 201, $ref->{'bt_minkey'} == 3;
+ ok 202, $ref->{'bt_pagesize'} == 2 ** 12;
+
+ # create some data
+ my %data = (
+ "red" => 2,
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (($k, $v) = each %data) {
+ $ret += $db->db_put($k, $v) ;
+ }
+ ok 203, $ret == 0 ;
+
+ $ref = $db->db_stat() ;
+ ok 204, $ref->{$recs} == 3;
+}
+
+{
+ # sub-class test
+
+ package Another ;
+
+ use strict ;
+
+ open(FILE, ">SubDB.pm") or die "Cannot open SubDB.pm: $!\n" ;
+ print FILE <<'EOM' ;
+
+ package SubDB ;
+
+ use strict ;
+ use vars qw( @ISA @EXPORT) ;
+
+ require Exporter ;
+ use BerkeleyDB;
+ @ISA=qw(BerkeleyDB::Btree);
+ @EXPORT = @BerkeleyDB::EXPORT ;
+
+ sub db_put {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::db_put($key, $value * 3) ;
+ }
+
+ sub db_get {
+ my $self = shift ;
+ $self->SUPER::db_get($_[0], $_[1]) ;
+ $_[1] -= 2 ;
+ }
+
+ sub A_new_method
+ {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = $self->FETCH($key) ;
+ return "[[$value]]" ;
+ }
+
+ 1 ;
+EOM
+
+ close FILE ;
+
+ BEGIN { push @INC, '.'; }
+ eval 'use SubDB ; ';
+ main::ok 205, $@ eq "" ;
+ my %h ;
+ my $X ;
+ eval '
+ $X = tie(%h, "SubDB", -Filename => "dbbtree.tmp",
+ -Flags => DB_CREATE,
+ -Mode => 0640 );
+ ' ;
+
+ main::ok 206, $@ eq "" ;
+
+ my $ret = eval '$h{"fred"} = 3 ; return $h{"fred"} ' ;
+ main::ok 207, $@ eq "" ;
+ main::ok 208, $ret == 7 ;
+
+ my $value = 0;
+ $ret = eval '$X->db_put("joe", 4) ; $X->db_get("joe", $value) ; return $value' ;
+ main::ok 209, $@ eq "" ;
+ main::ok 210, $ret == 10 ;
+
+ $ret = eval ' DB_NEXT eq main::DB_NEXT ' ;
+ main::ok 211, $@ eq "" ;
+ main::ok 212, $ret == 1 ;
+
+ $ret = eval '$X->A_new_method("joe") ' ;
+ main::ok 213, $@ eq "" ;
+ main::ok 214, $ret eq "[[10]]" ;
+
+ unlink "SubDB.pm", "dbbtree.tmp" ;
+
+}
+
+{
+ # DB_RECNUM, DB_SET_RECNO & DB_GET_RECNO
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my ($k, $v) = ("", "");
+ ok 215, my $db = new BerkeleyDB::Btree
+ -Filename => $Dfile,
+ -Flags => DB_CREATE,
+ -Property => DB_RECNUM ;
+
+
+ # create some data
+ my @data = (
+ "A zero",
+ "B one",
+ "C two",
+ "D three",
+ "E four"
+ ) ;
+
+ my $ix = 0 ;
+ my $ret = 0 ;
+ foreach (@data) {
+ $ret += $db->db_put($_, $ix) ;
+ ++ $ix ;
+ }
+ ok 216, $ret == 0 ;
+
+ # db_get & DB_SET_RECNO
+ $k = 1 ;
+ ok 217, $db->db_get($k, $v, DB_SET_RECNO) == 0;
+ ok 218, $k eq "B one" && $v == 1 ;
+
+ $k = 3 ;
+ ok 219, $db->db_get($k, $v, DB_SET_RECNO) == 0;
+ ok 220, $k eq "D three" && $v == 3 ;
+
+ $k = 4 ;
+ ok 221, $db->db_get($k, $v, DB_SET_RECNO) == 0;
+ ok 222, $k eq "E four" && $v == 4 ;
+
+ $k = 0 ;
+ ok 223, $db->db_get($k, $v, DB_SET_RECNO) == 0;
+ ok 224, $k eq "A zero" && $v == 0 ;
+
+ # cursor & DB_SET_RECNO
+
+ # create the cursor
+ ok 225, my $cursor = $db->db_cursor() ;
+
+ $k = 2 ;
+ ok 226, $db->db_get($k, $v, DB_SET_RECNO) == 0;
+ ok 227, $k eq "C two" && $v == 2 ;
+
+ $k = 0 ;
+ ok 228, $cursor->c_get($k, $v, DB_SET_RECNO) == 0;
+ ok 229, $k eq "A zero" && $v == 0 ;
+
+ $k = 3 ;
+ ok 230, $db->db_get($k, $v, DB_SET_RECNO) == 0;
+ ok 231, $k eq "D three" && $v == 3 ;
+
+ # cursor & DB_GET_RECNO
+ ok 232, $cursor->c_get($k, $v, DB_FIRST) == 0 ;
+ ok 233, $k eq "A zero" && $v == 0 ;
+ ok 234, $cursor->c_get($k, $v, DB_GET_RECNO) == 0;
+ ok 235, $v == 0 ;
+
+ ok 236, $cursor->c_get($k, $v, DB_NEXT) == 0 ;
+ ok 237, $k eq "B one" && $v == 1 ;
+ ok 238, $cursor->c_get($k, $v, DB_GET_RECNO) == 0;
+ ok 239, $v == 1 ;
+
+ ok 240, $cursor->c_get($k, $v, DB_LAST) == 0 ;
+ ok 241, $k eq "E four" && $v == 4 ;
+ ok 242, $cursor->c_get($k, $v, DB_GET_RECNO) == 0;
+ ok 243, $v == 4 ;
+
+}
+
diff --git a/bdb/perl.BerkeleyDB/t/db-3.0.t b/bdb/perl.BerkeleyDB/t/db-3.0.t
new file mode 100644
index 00000000000..9c324dc7bab
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/t/db-3.0.t
@@ -0,0 +1,128 @@
+#!./perl -w
+
+# ID: 1.2, 7/17/97
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use File::Path qw(rmtree);
+
+BEGIN
+{
+ if ($BerkeleyDB::db_version < 3) {
+ print "1..0 # Skipped - this needs Berkeley DB 3.x or better\n" ;
+ exit 0 ;
+ }
+}
+
+print "1..14\n";
+
+
+{
+ package LexFile ;
+
+ sub new
+ {
+ my $self = shift ;
+ unlink @_ ;
+ bless [ @_ ], $self ;
+ }
+
+ sub DESTROY
+ {
+ my $self = shift ;
+ unlink @{ $self } ;
+ }
+}
+
+sub ok
+{
+ my $no = shift ;
+ my $result = shift ;
+
+ print "not " unless $result ;
+ print "ok $no\n" ;
+}
+
+sub docat
+{
+ my $file = shift;
+ local $/ = undef;
+ open(CAT,$file) || die "Cannot open $file:$!";
+ my $result = <CAT>;
+ close(CAT);
+ return $result;
+}
+
+
+my $Dfile = "dbhash.tmp";
+
+umask(0);
+
+{
+ # set_mutexlocks
+
+ my $home = "./fred" ;
+ ok 1, -d $home ? chmod 0777, $home : mkdir($home, 0777) ;
+ mkdir "./fred", 0777 ;
+ chdir "./fred" ;
+ ok 2, my $env = new BerkeleyDB::Env -Flags => DB_CREATE ;
+ ok 3, $env->set_mutexlocks(0) == 0 ;
+ chdir ".." ;
+ undef $env ;
+ rmtree $home ;
+}
+
+{
+ # c_dup
+
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my ($k, $v) ;
+ ok 4, my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my %data = (
+ "red" => 2,
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (($k, $v) = each %data) {
+ $ret += $db->db_put($k, $v) ;
+ }
+ ok 5, $ret == 0 ;
+
+ # create a cursor
+ ok 6, my $cursor = $db->db_cursor() ;
+
+ # point to a specific k/v pair
+ $k = "green" ;
+ ok 7, $cursor->c_get($k, $v, DB_SET) == 0 ;
+ ok 8, $v eq "house" ;
+
+ # duplicate the cursor
+ my $dup_cursor = $cursor->c_dup(DB_POSITION);
+ ok 9, $dup_cursor ;
+
+ # move original cursor off green/house
+ $cursor->c_get($k, $v, DB_NEXT) ;
+ ok 10, $k ne "green" ;
+ ok 11, $v ne "house" ;
+
+ # duplicate cursor should still be on green/house
+ ok 12, $dup_cursor->c_get($k, $v, DB_CURRENT) == 0;
+ ok 13, $k eq "green" ;
+ ok 14, $v eq "house" ;
+
+}
diff --git a/bdb/perl.BerkeleyDB/t/db-3.1.t b/bdb/perl.BerkeleyDB/t/db-3.1.t
new file mode 100644
index 00000000000..35076b6cd49
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/t/db-3.1.t
@@ -0,0 +1,172 @@
+#!./perl -w
+
+# ID: %I%, %G%
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+#use Config;
+#
+#BEGIN {
+# if(-d "lib" && -f "TEST") {
+# if ($Config{'extensions'} !~ /\bBerkeleyDB\b/ ) {
+# print "1..74\n";
+# exit 0;
+# }
+# }
+#}
+
+use BerkeleyDB;
+use File::Path qw(rmtree);
+
+BEGIN
+{
+ if ($BerkeleyDB::db_version < 3.1) {
+ print "1..0 # Skipping test, this needs Berkeley DB 3.1.x or better\n" ;
+ exit 0 ;
+ }
+}
+
+print "1..25\n";
+
+my %DB_errors = (
+ 'DB_INCOMPLETE' => "DB_INCOMPLETE: Sync was unable to complete",
+ 'DB_KEYEMPTY' => "DB_KEYEMPTY: Non-existent key/data pair",
+ 'DB_KEYEXIST' => "DB_KEYEXIST: Key/data pair already exists",
+ 'DB_LOCK_DEADLOCK' => "DB_LOCK_DEADLOCK: Locker killed to resolve a deadlock",
+ 'DB_LOCK_NOTGRANTED' => "DB_LOCK_NOTGRANTED: Lock not granted",
+ 'DB_NOTFOUND' => "DB_NOTFOUND: No matching key/data pair found",
+ 'DB_OLD_VERSION' => "DB_OLDVERSION: Database requires a version upgrade",
+ 'DB_RUNRECOVERY' => "DB_RUNRECOVERY: Fatal error, run database recovery",
+) ;
+
+{
+ package LexFile ;
+
+ sub new
+ {
+ my $self = shift ;
+ unlink @_ ;
+ bless [ @_ ], $self ;
+ }
+
+ sub DESTROY
+ {
+ my $self = shift ;
+ unlink @{ $self } ;
+ }
+}
+
+
+sub ok
+{
+ my $no = shift ;
+ my $result = shift ;
+
+ print "not " unless $result ;
+ print "ok $no\n" ;
+}
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+
+
+{
+ # c_count
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ ok 1, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Property => DB_DUP,
+ -Flags => DB_CREATE ;
+
+ $hash{'Wall'} = 'Larry' ;
+ $hash{'Wall'} = 'Stone' ;
+ $hash{'Smith'} = 'John' ;
+ $hash{'Wall'} = 'Brick' ;
+ $hash{'Wall'} = 'Brick' ;
+ $hash{'mouse'} = 'mickey' ;
+
+ ok 2, keys %hash == 6 ;
+
+ # create a cursor
+ ok 3, my $cursor = $db->db_cursor() ;
+
+ my $key = "Wall" ;
+ my $value ;
+ ok 4, $cursor->c_get($key, $value, DB_SET) == 0 ;
+ ok 5, $key eq "Wall" && $value eq "Larry" ;
+
+ my $count ;
+ ok 6, $cursor->c_count($count) == 0 ;
+ ok 7, $count == 4 ;
+
+ $key = "Smith" ;
+ ok 8, $cursor->c_get($key, $value, DB_SET) == 0 ;
+ ok 9, $key eq "Smith" && $value eq "John" ;
+
+ ok 10, $cursor->c_count($count) == 0 ;
+ ok 11, $count == 1 ;
+
+
+ undef $db ;
+ undef $cursor ;
+ untie %hash ;
+
+}
+
+{
+ # db_key_range
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ ok 12, my $db = tie %hash, 'BerkeleyDB::Btree', -Filename => $Dfile,
+ -Property => DB_DUP,
+ -Flags => DB_CREATE ;
+
+ $hash{'Wall'} = 'Larry' ;
+ $hash{'Wall'} = 'Stone' ;
+ $hash{'Smith'} = 'John' ;
+ $hash{'Wall'} = 'Brick' ;
+ $hash{'Wall'} = 'Brick' ;
+ $hash{'mouse'} = 'mickey' ;
+
+ ok 13, keys %hash == 6 ;
+
+ my $key = "Wall" ;
+ my ($less, $equal, $greater) ;
+ ok 14, $db->db_key_range($key, $less, $equal, $greater) == 0 ;
+
+ ok 15, $less != 0 ;
+ ok 16, $equal != 0 ;
+ ok 17, $greater != 0 ;
+
+ $key = "Smith" ;
+ ok 18, $db->db_key_range($key, $less, $equal, $greater) == 0 ;
+
+ ok 19, $less == 0 ;
+ ok 20, $equal != 0 ;
+ ok 21, $greater != 0 ;
+
+ $key = "NotThere" ;
+ ok 22, $db->db_key_range($key, $less, $equal, $greater) == 0 ;
+
+ ok 23, $less == 0 ;
+ ok 24, $equal == 0 ;
+ ok 25, $greater == 1 ;
+
+ undef $db ;
+ untie %hash ;
+
+}
diff --git a/bdb/perl.BerkeleyDB/t/db-3.2.t b/bdb/perl.BerkeleyDB/t/db-3.2.t
new file mode 100644
index 00000000000..0cff248733c
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/t/db-3.2.t
@@ -0,0 +1,90 @@
+#!./perl -w
+
+# ID: %I%, %G%
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+#use Config;
+#
+#BEGIN {
+# if(-d "lib" && -f "TEST") {
+# if ($Config{'extensions'} !~ /\bBerkeleyDB\b/ ) {
+# print "1..74\n";
+# exit 0;
+# }
+# }
+#}
+
+use BerkeleyDB;
+use File::Path qw(rmtree);
+
+BEGIN
+{
+ if ($BerkeleyDB::db_version < 3.2) {
+ print "1..0 # Skipping test, this needs Berkeley DB 3.2.x or better\n" ;
+ exit 0 ;
+ }
+}
+
+print "1..1\n";
+
+my %DB_errors = (
+ 'DB_INCOMPLETE' => "DB_INCOMPLETE: Sync was unable to complete",
+ 'DB_KEYEMPTY' => "DB_KEYEMPTY: Non-existent key/data pair",
+ 'DB_KEYEXIST' => "DB_KEYEXIST: Key/data pair already exists",
+ 'DB_LOCK_DEADLOCK' => "DB_LOCK_DEADLOCK: Locker killed to resolve a deadlock",
+ 'DB_LOCK_NOTGRANTED' => "DB_LOCK_NOTGRANTED: Lock not granted",
+ 'DB_NOTFOUND' => "DB_NOTFOUND: No matching key/data pair found",
+ 'DB_OLD_VERSION' => "DB_OLDVERSION: Database requires a version upgrade",
+ 'DB_RUNRECOVERY' => "DB_RUNRECOVERY: Fatal error, run database recovery",
+) ;
+
+{
+ package LexFile ;
+
+ sub new
+ {
+ my $self = shift ;
+ unlink @_ ;
+ bless [ @_ ], $self ;
+ }
+
+ sub DESTROY
+ {
+ my $self = shift ;
+ unlink @{ $self } ;
+ }
+}
+
+
+sub ok
+{
+ my $no = shift ;
+ my $result = shift ;
+
+ print "not " unless $result ;
+ print "ok $no\n" ;
+}
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+
+
+{
+ # set_q_extentsize
+
+ ok 1, 1 ;
+}
+
diff --git a/bdb/perl.BerkeleyDB/t/destroy.t b/bdb/perl.BerkeleyDB/t/destroy.t
new file mode 100644
index 00000000000..e3a1e2a97c6
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/t/destroy.t
@@ -0,0 +1,141 @@
+#!./perl -w
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use File::Path qw(rmtree);
+
+print "1..13\n";
+
+
+{
+ package LexFile ;
+
+ sub new
+ {
+ my $self = shift ;
+ unlink @_ ;
+ bless [ @_ ], $self ;
+ }
+
+ sub DESTROY
+ {
+ my $self = shift ;
+ unlink @{ $self } ;
+ }
+}
+
+sub ok
+{
+ my $no = shift ;
+ my $result = shift ;
+
+ print "not " unless $result ;
+ print "ok $no\n" ;
+}
+
+sub docat
+{
+ my $file = shift;
+ local $/ = undef;
+ open(CAT,$file) || die "Cannot open $file:$!";
+ my $result = <CAT>;
+ close(CAT);
+ return $result;
+}
+
+
+my $Dfile = "dbhash.tmp";
+my $home = "./fred" ;
+
+umask(0);
+
+{
+ # let object destroction kill everything
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+
+ rmtree $home if -e $home ;
+ ok 1, mkdir($home, 0777) ;
+ ok 2, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 3, my $txn = $env->txn_begin() ;
+ ok 4, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (my ($k, $v) = each %data) {
+ $ret += $db1->db_put($k, $v) ;
+ }
+ ok 5, $ret == 0 ;
+
+ # should be able to see all the records
+
+ ok 6, my $cursor = $db1->db_cursor() ;
+ my ($k, $v) = ("", "") ;
+ my $count = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 7, $count == 3 ;
+ undef $cursor ;
+
+ # now abort the transaction
+ ok 8, $txn->txn_abort() == 0 ;
+
+ # there shouldn't be any records in the database
+ $count = 0 ;
+ # sequence forwards
+ ok 9, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 10, $count == 0 ;
+
+ #undef $txn ;
+ #undef $cursor ;
+ #undef $db1 ;
+ #undef $env ;
+ #untie %hash ;
+
+}
+{
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $cursor ;
+ my ($k, $v) = ("", "") ;
+ ok 11, my $db1 = tie %hash, 'BerkeleyDB::Hash',
+ -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+ my $count = 0 ;
+ # sequence forwards
+ ok 12, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 13, $count == 0 ;
+}
+
+rmtree $home ;
+
diff --git a/bdb/perl.BerkeleyDB/t/env.t b/bdb/perl.BerkeleyDB/t/env.t
new file mode 100644
index 00000000000..5d0197f85c0
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/t/env.t
@@ -0,0 +1,279 @@
+#!./perl -w
+
+# ID: 1.2, 7/17/97
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use File::Path qw(rmtree);
+
+print "1..52\n";
+
+
+{
+ package LexFile ;
+
+ sub new
+ {
+ my $self = shift ;
+ unlink @_ ;
+ bless [ @_ ], $self ;
+ }
+
+ sub DESTROY
+ {
+ my $self = shift ;
+ unlink @{ $self } ;
+ }
+}
+
+sub ok
+{
+ my $no = shift ;
+ my $result = shift ;
+
+ print "not " unless $result ;
+ print "ok $no\n" ;
+}
+
+sub docat
+{
+ my $file = shift;
+ local $/ = undef;
+ open(CAT,$file) || die "Cannot open $file:$!";
+ my $result = <CAT>;
+ close(CAT);
+ return $result;
+}
+
+
+my $Dfile = "dbhash.tmp";
+
+umask(0);
+
+{
+ # db version stuff
+ my ($major, $minor, $patch) = (0, 0, 0) ;
+
+ ok 1, my $VER = BerkeleyDB::DB_VERSION_STRING ;
+ ok 2, my $ver = BerkeleyDB::db_version($major, $minor, $patch) ;
+ ok 3, $VER eq $ver ;
+ ok 4, $major > 1 ;
+ ok 5, defined $minor ;
+ ok 6, defined $patch ;
+}
+
+{
+ # Check for invalid parameters
+ my $env ;
+ eval ' $env = new BerkeleyDB::Env( -Stupid => 3) ; ' ;
+ ok 7, $@ =~ /unknown key value\(s\) Stupid/ ;
+
+ eval ' $env = new BerkeleyDB::Env( -Bad => 2, -Home => "/tmp", -Stupid => 3) ; ' ;
+ ok 8, $@ =~ /unknown key value\(s\) (Bad |Stupid ){2}/ ;
+
+ eval ' $env = new BerkeleyDB::Env (-Config => {"fred" => " "} ) ; ' ;
+ ok 9, !$env ;
+ ok 10, $BerkeleyDB::Error =~ /^illegal name-value pair/ ;
+}
+
+{
+ # create a very simple environment
+ my $home = "./fred" ;
+ ok 11, -d $home ? chmod 0777, $home : mkdir($home, 0777) ;
+ mkdir "./fred", 0777 ;
+ chdir "./fred" ;
+ ok 12, my $env = new BerkeleyDB::Env -Flags => DB_CREATE ;
+ chdir ".." ;
+ undef $env ;
+ rmtree $home ;
+}
+
+{
+ # create an environment with a Home
+ my $home = "./fred" ;
+ ok 13, -d $home ? chmod 0777, $home : mkdir($home, 0777) ;
+ ok 14, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE ;
+
+ undef $env ;
+ rmtree $home ;
+}
+
+{
+ # make new fail.
+ my $home = "./not_there" ;
+ rmtree $home ;
+ ok 15, ! -d $home ;
+ my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_INIT_LOCK ;
+ ok 16, ! $env ;
+ ok 17, $! != 0 ;
+
+ rmtree $home ;
+}
+
+{
+ # Config
+ use Cwd ;
+ my $cwd = cwd() ;
+ my $home = "$cwd/fred" ;
+ my $data_dir = "$home/data_dir" ;
+ my $log_dir = "$home/log_dir" ;
+ my $data_file = "data.db" ;
+ ok 18, -d $home ? chmod 0777, $home : mkdir($home, 0777) ;
+ ok 19, -d $data_dir ? chmod 0777, $data_dir : mkdir($data_dir, 0777) ;
+ ok 20, -d $log_dir ? chmod 0777, $log_dir : mkdir($log_dir, 0777) ;
+ my $env = new BerkeleyDB::Env -Home => $home,
+ -Config => { DB_DATA_DIR => $data_dir,
+ DB_LOG_DIR => $log_dir
+ },
+ -Flags => DB_CREATE|DB_INIT_TXN|DB_INIT_LOG|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 21, $env ;
+
+ ok 22, my $txn = $env->txn_begin() ;
+
+ my %hash ;
+ ok 23, tie %hash, 'BerkeleyDB::Hash', -Filename => $data_file,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+ $hash{"abc"} = 123 ;
+ $hash{"def"} = 456 ;
+
+ $txn->txn_commit() ;
+
+ untie %hash ;
+
+ undef $txn ;
+ undef $env ;
+ rmtree $home ;
+}
+
+{
+ # -ErrFile with a filename
+ my $errfile = "./errfile" ;
+ my $home = "./fred" ;
+ ok 24, -d $home ? chmod 0777, $home : mkdir($home, 0777) ;
+ my $lex = new LexFile $errfile ;
+ ok 25, my $env = new BerkeleyDB::Env( -ErrFile => $errfile,
+ -Flags => DB_CREATE,
+ -Home => $home) ;
+ my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Env => $env,
+ -Flags => -1;
+ ok 26, !$db ;
+
+ ok 27, $BerkeleyDB::Error =~ /^illegal flag specified to (db_open|DB->open)/;
+ ok 28, -e $errfile ;
+ my $contents = docat($errfile) ;
+ chomp $contents ;
+ ok 29, $BerkeleyDB::Error eq $contents ;
+
+ undef $env ;
+ rmtree $home ;
+}
+
+{
+ # -ErrFile with a filehandle
+ use IO ;
+ my $home = "./fred" ;
+ ok 30, -d $home ? chmod 0777, $home : mkdir($home, 0777) ;
+ my $errfile = "./errfile" ;
+ my $lex = new LexFile $errfile ;
+ ok 31, my $ef = new IO::File ">$errfile" ;
+ ok 32, my $env = new BerkeleyDB::Env( -ErrFile => $ef ,
+ -Flags => DB_CREATE,
+ -Home => $home) ;
+ my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Env => $env,
+ -Flags => -1;
+ ok 33, !$db ;
+
+ ok 34, $BerkeleyDB::Error =~ /^illegal flag specified to (db_open|DB->open)/;
+ $ef->close() ;
+ ok 35, -e $errfile ;
+ my $contents = "" ;
+ $contents = docat($errfile) ;
+ chomp $contents ;
+ ok 36, $BerkeleyDB::Error eq $contents ;
+ undef $env ;
+ rmtree $home ;
+}
+
+{
+ # -ErrPrefix
+ use IO ;
+ my $home = "./fred" ;
+ ok 37, -d $home ? chmod 0777, $home : mkdir($home, 0777) ;
+ my $errfile = "./errfile" ;
+ my $lex = new LexFile $errfile ;
+ ok 38, my $env = new BerkeleyDB::Env( -ErrFile => $errfile,
+ -ErrPrefix => "PREFIX",
+ -Flags => DB_CREATE,
+ -Home => $home) ;
+ my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Env => $env,
+ -Flags => -1;
+ ok 39, !$db ;
+
+ ok 40, $BerkeleyDB::Error =~ /^PREFIX: illegal flag specified to (db_open|DB->open)/;
+ ok 41, -e $errfile ;
+ my $contents = docat($errfile) ;
+ chomp $contents ;
+ ok 42, $BerkeleyDB::Error eq $contents ;
+
+ # change the prefix on the fly
+ my $old = $env->errPrefix("NEW ONE") ;
+ ok 43, $old eq "PREFIX" ;
+
+ $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Env => $env,
+ -Flags => -1;
+ ok 44, !$db ;
+ ok 45, $BerkeleyDB::Error =~ /^NEW ONE: illegal flag specified to (db_open|DB->open)/;
+ $contents = docat($errfile) ;
+ chomp $contents ;
+ ok 46, $contents =~ /$BerkeleyDB::Error$/ ;
+ undef $env ;
+ rmtree $home ;
+}
+
+{
+ # test db_appexit
+ use Cwd ;
+ my $cwd = cwd() ;
+ my $home = "$cwd/fred" ;
+ my $data_dir = "$home/data_dir" ;
+ my $log_dir = "$home/log_dir" ;
+ my $data_file = "data.db" ;
+ ok 47, -d $home ? chmod 0777, $home : mkdir($home, 0777) ;
+ ok 48, -d $data_dir ? chmod 0777, $data_dir : mkdir($data_dir, 0777) ;
+ ok 49, -d $log_dir ? chmod 0777, $log_dir : mkdir($log_dir, 0777) ;
+ my $env = new BerkeleyDB::Env -Home => $home,
+ -Config => { DB_DATA_DIR => $data_dir,
+ DB_LOG_DIR => $log_dir
+ },
+ -Flags => DB_CREATE|DB_INIT_TXN|DB_INIT_LOG|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 50, $env ;
+
+ ok 51, my $txn_mgr = $env->TxnMgr() ;
+
+ ok 52, $env->db_appexit() == 0 ;
+
+ #rmtree $home ;
+}
+
+# test -Verbose
+# test -Flags
+# db_value_set
diff --git a/bdb/perl.BerkeleyDB/t/examples.t b/bdb/perl.BerkeleyDB/t/examples.t
new file mode 100644
index 00000000000..4b6702d540a
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/t/examples.t
@@ -0,0 +1,482 @@
+#!./perl -w
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use File::Path qw(rmtree);
+
+print "1..7\n";
+
+my $FA = 0 ;
+
+{
+ sub try::TIEARRAY { bless [], "try" }
+ sub try::FETCHSIZE { $FA = 1 }
+ $FA = 0 ;
+ my @a ;
+ tie @a, 'try' ;
+ my $a = @a ;
+}
+
+{
+ package LexFile ;
+
+ sub new
+ {
+ my $self = shift ;
+ unlink @_ ;
+ bless [ @_ ], $self ;
+ }
+
+ sub DESTROY
+ {
+ my $self = shift ;
+ unlink @{ $self } ;
+ }
+}
+
+
+sub ok
+{
+ my $no = shift ;
+ my $result = shift ;
+
+ print "not " unless $result ;
+ print "ok $no\n" ;
+}
+
+{
+ package Redirect ;
+ use Symbol ;
+
+ sub new
+ {
+ my $class = shift ;
+ my $filename = shift ;
+ my $fh = gensym ;
+ open ($fh, ">$filename") || die "Cannot open $filename: $!" ;
+ my $real_stdout = select($fh) ;
+ return bless [$fh, $real_stdout ] ;
+
+ }
+ sub DESTROY
+ {
+ my $self = shift ;
+ close $self->[0] ;
+ select($self->[1]) ;
+ }
+}
+
+sub docat
+{
+ my $file = shift;
+ local $/ = undef;
+ open(CAT,$file) || die "Cannot open $file:$!";
+ my $result = <CAT> || "" ;
+ close(CAT);
+ return $result;
+}
+
+sub docat_del
+{
+ my $file = shift;
+ local $/ = undef;
+ open(CAT,$file) || die "Cannot open $file: $!";
+ my $result = <CAT> || "" ;
+ close(CAT);
+ unlink $file ;
+ return $result;
+}
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+my $redirect = "xyzt" ;
+
+
+{
+my $x = $BerkeleyDB::Error;
+my $redirect = "xyzt" ;
+ {
+ my $redirectObj = new Redirect $redirect ;
+
+ use strict ;
+ use BerkeleyDB ;
+ use vars qw( %h $k $v ) ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $h{"apple"} = "red" ;
+ $h{"orange"} = "orange" ;
+ $h{"banana"} = "yellow" ;
+ $h{"tomato"} = "red" ;
+
+ # Check for existence of a key
+ print "Banana Exists\n\n" if $h{"banana"} ;
+
+ # Delete a key/value pair.
+ delete $h{"apple"} ;
+
+ # print the contents of the file
+ while (($k, $v) = each %h)
+ { print "$k -> $v\n" }
+
+ untie %h ;
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]" ;
+ ok(1, docat_del($redirect) eq <<'EOM') ;
+Banana Exists
+
+orange -> orange
+tomato -> red
+banana -> yellow
+EOM
+
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ my $db = new BerkeleyDB::Hash
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $db->db_put("apple", "red") ;
+ $db->db_put("orange", "orange") ;
+ $db->db_put("banana", "yellow") ;
+ $db->db_put("tomato", "red") ;
+
+ # Check for existence of a key
+ print "Banana Exists\n\n" if $db->db_get("banana", $v) == 0;
+
+ # Delete a key/value pair.
+ $db->db_del("apple") ;
+
+ # print the contents of the file
+ my ($k, $v) = ("", "") ;
+ my $cursor = $db->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { print "$k -> $v\n" }
+
+ undef $cursor ;
+ undef $db ;
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]" ;
+ ok(2, docat_del($redirect) eq <<'EOM') ;
+Banana Exists
+
+orange -> orange
+tomato -> red
+banana -> yellow
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "tree" ;
+ unlink $filename ;
+ my %h ;
+ tie %h, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a key/value pair to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+ $h{'duck'} = 'donald' ;
+
+ # Delete
+ delete $h{"duck"} ;
+
+ # Cycle through the keys printing them in order.
+ # Note it is not necessary to sort the keys as
+ # the btree will have kept them in order automatically.
+ foreach (keys %h)
+ { print "$_\n" }
+
+ untie %h ;
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(3, docat_del($redirect) eq <<'EOM') ;
+Smith
+Wall
+mouse
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "tree" ;
+ unlink $filename ;
+ my %h ;
+ tie %h, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Compare => sub { lc $_[0] cmp lc $_[1] }
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a key/value pair to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+ $h{'duck'} = 'donald' ;
+
+ # Delete
+ delete $h{"duck"} ;
+
+ # Cycle through the keys printing them in order.
+ # Note it is not necessary to sort the keys as
+ # the btree will have kept them in order automatically.
+ foreach (keys %h)
+ { print "$_\n" }
+
+ untie %h ;
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(4, docat_del($redirect) eq <<'EOM') ;
+mouse
+Smith
+Wall
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my %hash ;
+ my $filename = "filt.db" ;
+ unlink $filename ;
+
+ my $db = tie %hash, 'BerkeleyDB::Hash',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+
+ # Install DBM Filters
+ $db->filter_fetch_key ( sub { s/\0$// } ) ;
+ $db->filter_store_key ( sub { $_ .= "\0" } ) ;
+ $db->filter_fetch_value( sub { s/\0$// } ) ;
+ $db->filter_store_value( sub { $_ .= "\0" } ) ;
+
+ $hash{"abc"} = "def" ;
+ my $a = $hash{"ABC"} ;
+ # ...
+ undef $db ;
+ untie %hash ;
+ $db = tie %hash, 'BerkeleyDB::Hash',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+ while (($k, $v) = each %hash)
+ { print "$k -> $v\n" }
+ undef $db ;
+ untie %hash ;
+
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(5, docat_del($redirect) eq <<"EOM") ;
+abc\x00 -> def\x00
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+ use strict ;
+ use BerkeleyDB ;
+ my %hash ;
+ my $filename = "filt.db" ;
+ unlink $filename ;
+
+
+ my $db = tie %hash, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+
+ $db->filter_fetch_key ( sub { $_ = unpack("i", $_) } ) ;
+ $db->filter_store_key ( sub { $_ = pack ("i", $_) } ) ;
+ $hash{123} = "def" ;
+ # ...
+ undef $db ;
+ untie %hash ;
+ $db = tie %hash, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot Open $filename: $!\n" ;
+ while (($k, $v) = each %hash)
+ { print "$k -> $v\n" }
+ undef $db ;
+ untie %hash ;
+
+ unlink $filename ;
+ }
+
+ my $val = pack("i", 123) ;
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(6, docat_del($redirect) eq <<"EOM") ;
+$val -> def
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+ if ($FA) {
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "text" ;
+ unlink $filename ;
+
+ my @h ;
+ tie @h, 'BerkeleyDB::Recno',
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_RENUMBER
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a few key/value pairs to the file
+ $h[0] = "orange" ;
+ $h[1] = "blue" ;
+ $h[2] = "yellow" ;
+
+ push @h, "green", "black" ;
+
+ my $elements = scalar @h ;
+ print "The array contains $elements entries\n" ;
+
+ my $last = pop @h ;
+ print "popped $last\n" ;
+
+ unshift @h, "white" ;
+ my $first = shift @h ;
+ print "shifted $first\n" ;
+
+ # Check for existence of a key
+ print "Element 1 Exists with value $h[1]\n" if $h[1] ;
+
+ untie @h ;
+ unlink $filename ;
+ } else {
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "text" ;
+ unlink $filename ;
+
+ my @h ;
+ my $db = tie @h, 'BerkeleyDB::Recno',
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_RENUMBER
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a few key/value pairs to the file
+ $h[0] = "orange" ;
+ $h[1] = "blue" ;
+ $h[2] = "yellow" ;
+
+ $db->push("green", "black") ;
+
+ my $elements = $db->length() ;
+ print "The array contains $elements entries\n" ;
+
+ my $last = $db->pop ;
+ print "popped $last\n" ;
+
+ $db->unshift("white") ;
+ my $first = $db->shift ;
+ print "shifted $first\n" ;
+
+ # Check for existence of a key
+ print "Element 1 Exists with value $h[1]\n" if $h[1] ;
+
+ undef $db ;
+ untie @h ;
+ unlink $filename ;
+ }
+
+ }
+
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(7, docat_del($redirect) eq <<"EOM") ;
+The array contains 5 entries
+popped black
+shifted white
+Element 1 Exists with value blue
+EOM
+
+}
+
diff --git a/bdb/perl.BerkeleyDB/t/examples.t.T b/bdb/perl.BerkeleyDB/t/examples.t.T
new file mode 100644
index 00000000000..fe0922318ca
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/t/examples.t.T
@@ -0,0 +1,496 @@
+#!./perl -w
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use File::Path qw(rmtree);
+
+print "1..7\n";
+
+my $FA = 0 ;
+
+{
+ sub try::TIEARRAY { bless [], "try" }
+ sub try::FETCHSIZE { $FA = 1 }
+ $FA = 0 ;
+ my @a ;
+ tie @a, 'try' ;
+ my $a = @a ;
+}
+
+{
+ package LexFile ;
+
+ sub new
+ {
+ my $self = shift ;
+ unlink @_ ;
+ bless [ @_ ], $self ;
+ }
+
+ sub DESTROY
+ {
+ my $self = shift ;
+ unlink @{ $self } ;
+ }
+}
+
+
+sub ok
+{
+ my $no = shift ;
+ my $result = shift ;
+
+ print "not " unless $result ;
+ print "ok $no\n" ;
+}
+
+{
+ package Redirect ;
+ use Symbol ;
+
+ sub new
+ {
+ my $class = shift ;
+ my $filename = shift ;
+ my $fh = gensym ;
+ open ($fh, ">$filename") || die "Cannot open $filename: $!" ;
+ my $real_stdout = select($fh) ;
+ return bless [$fh, $real_stdout ] ;
+
+ }
+ sub DESTROY
+ {
+ my $self = shift ;
+ close $self->[0] ;
+ select($self->[1]) ;
+ }
+}
+
+sub docat
+{
+ my $file = shift;
+ local $/ = undef;
+ open(CAT,$file) || die "Cannot open $file:$!";
+ my $result = <CAT> || "" ;
+ close(CAT);
+ return $result;
+}
+
+sub docat_del
+{
+ my $file = shift;
+ local $/ = undef;
+ open(CAT,$file) || die "Cannot open $file: $!";
+ my $result = <CAT> || "" ;
+ close(CAT);
+ unlink $file ;
+ return $result;
+}
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+my $redirect = "xyzt" ;
+
+
+{
+my $x = $BerkeleyDB::Error;
+my $redirect = "xyzt" ;
+ {
+ my $redirectObj = new Redirect $redirect ;
+
+## BEGIN simpleHash
+ use strict ;
+ use BerkeleyDB ;
+ use vars qw( %h $k $v ) ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $h{"apple"} = "red" ;
+ $h{"orange"} = "orange" ;
+ $h{"banana"} = "yellow" ;
+ $h{"tomato"} = "red" ;
+
+ # Check for existence of a key
+ print "Banana Exists\n\n" if $h{"banana"} ;
+
+ # Delete a key/value pair.
+ delete $h{"apple"} ;
+
+ # print the contents of the file
+ while (($k, $v) = each %h)
+ { print "$k -> $v\n" }
+
+ untie %h ;
+## END simpleHash
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]" ;
+ ok(1, docat_del($redirect) eq <<'EOM') ;
+Banana Exists
+
+orange -> orange
+tomato -> red
+banana -> yellow
+EOM
+
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+## BEGIN simpleHash2
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ my $db = new BerkeleyDB::Hash
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $db->db_put("apple", "red") ;
+ $db->db_put("orange", "orange") ;
+ $db->db_put("banana", "yellow") ;
+ $db->db_put("tomato", "red") ;
+
+ # Check for existence of a key
+ print "Banana Exists\n\n" if $db->db_get("banana", $v) == 0;
+
+ # Delete a key/value pair.
+ $db->db_del("apple") ;
+
+ # print the contents of the file
+ my ($k, $v) = ("", "") ;
+ my $cursor = $db->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { print "$k -> $v\n" }
+
+ undef $cursor ;
+ undef $db ;
+## END simpleHash2
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]" ;
+ ok(2, docat_del($redirect) eq <<'EOM') ;
+Banana Exists
+
+orange -> orange
+tomato -> red
+banana -> yellow
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+## BEGIN btreeSimple
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "tree" ;
+ unlink $filename ;
+ my %h ;
+ tie %h, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a key/value pair to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+ $h{'duck'} = 'donald' ;
+
+ # Delete
+ delete $h{"duck"} ;
+
+ # Cycle through the keys printing them in order.
+ # Note it is not necessary to sort the keys as
+ # the btree will have kept them in order automatically.
+ foreach (keys %h)
+ { print "$_\n" }
+
+ untie %h ;
+## END btreeSimple
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(3, docat_del($redirect) eq <<'EOM') ;
+Smith
+Wall
+mouse
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+## BEGIN btreeSortOrder
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "tree" ;
+ unlink $filename ;
+ my %h ;
+ tie %h, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Compare => sub { lc $_[0] cmp lc $_[1] }
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a key/value pair to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+ $h{'duck'} = 'donald' ;
+
+ # Delete
+ delete $h{"duck"} ;
+
+ # Cycle through the keys printing them in order.
+ # Note it is not necessary to sort the keys as
+ # the btree will have kept them in order automatically.
+ foreach (keys %h)
+ { print "$_\n" }
+
+ untie %h ;
+## END btreeSortOrder
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(4, docat_del($redirect) eq <<'EOM') ;
+mouse
+Smith
+Wall
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+## BEGIN nullFilter
+ use strict ;
+ use BerkeleyDB ;
+
+ my %hash ;
+ my $filename = "filt.db" ;
+ unlink $filename ;
+
+ my $db = tie %hash, 'BerkeleyDB::Hash',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+
+ # Install DBM Filters
+ $db->filter_fetch_key ( sub { s/\0$// } ) ;
+ $db->filter_store_key ( sub { $_ .= "\0" } ) ;
+ $db->filter_fetch_value( sub { s/\0$// } ) ;
+ $db->filter_store_value( sub { $_ .= "\0" } ) ;
+
+ $hash{"abc"} = "def" ;
+ my $a = $hash{"ABC"} ;
+ # ...
+ undef $db ;
+ untie %hash ;
+## END nullFilter
+ $db = tie %hash, 'BerkeleyDB::Hash',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+ while (($k, $v) = each %hash)
+ { print "$k -> $v\n" }
+ undef $db ;
+ untie %hash ;
+
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(5, docat_del($redirect) eq <<"EOM") ;
+abc\x00 -> def\x00
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+## BEGIN intFilter
+ use strict ;
+ use BerkeleyDB ;
+ my %hash ;
+ my $filename = "filt.db" ;
+ unlink $filename ;
+
+
+ my $db = tie %hash, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+
+ $db->filter_fetch_key ( sub { $_ = unpack("i", $_) } ) ;
+ $db->filter_store_key ( sub { $_ = pack ("i", $_) } ) ;
+ $hash{123} = "def" ;
+ # ...
+ undef $db ;
+ untie %hash ;
+## END intFilter
+ $db = tie %hash, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot Open $filename: $!\n" ;
+ while (($k, $v) = each %hash)
+ { print "$k -> $v\n" }
+ undef $db ;
+ untie %hash ;
+
+ unlink $filename ;
+ }
+
+ my $val = pack("i", 123) ;
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(6, docat_del($redirect) eq <<"EOM") ;
+$val -> def
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+ if ($FA) {
+## BEGIN simpleRecno
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "text" ;
+ unlink $filename ;
+
+ my @h ;
+ tie @h, 'BerkeleyDB::Recno',
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_RENUMBER
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a few key/value pairs to the file
+ $h[0] = "orange" ;
+ $h[1] = "blue" ;
+ $h[2] = "yellow" ;
+
+ push @h, "green", "black" ;
+
+ my $elements = scalar @h ;
+ print "The array contains $elements entries\n" ;
+
+ my $last = pop @h ;
+ print "popped $last\n" ;
+
+ unshift @h, "white" ;
+ my $first = shift @h ;
+ print "shifted $first\n" ;
+
+ # Check for existence of a key
+ print "Element 1 Exists with value $h[1]\n" if $h[1] ;
+
+ untie @h ;
+## END simpleRecno
+ unlink $filename ;
+ } else {
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "text" ;
+ unlink $filename ;
+
+ my @h ;
+ my $db = tie @h, 'BerkeleyDB::Recno',
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_RENUMBER
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a few key/value pairs to the file
+ $h[0] = "orange" ;
+ $h[1] = "blue" ;
+ $h[2] = "yellow" ;
+
+ $db->push("green", "black") ;
+
+ my $elements = $db->length() ;
+ print "The array contains $elements entries\n" ;
+
+ my $last = $db->pop ;
+ print "popped $last\n" ;
+
+ $db->unshift("white") ;
+ my $first = $db->shift ;
+ print "shifted $first\n" ;
+
+ # Check for existence of a key
+ print "Element 1 Exists with value $h[1]\n" if $h[1] ;
+
+ undef $db ;
+ untie @h ;
+ unlink $filename ;
+ }
+
+ }
+
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(7, docat_del($redirect) eq <<"EOM") ;
+The array contains 5 entries
+popped black
+shifted white
+Element 1 Exists with value blue
+EOM
+
+}
+
diff --git a/bdb/perl.BerkeleyDB/t/examples3.t b/bdb/perl.BerkeleyDB/t/examples3.t
new file mode 100644
index 00000000000..9cc1fa72c29
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/t/examples3.t
@@ -0,0 +1,213 @@
+#!./perl -w
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use File::Path qw(rmtree);
+
+BEGIN
+{
+ if ($BerkeleyDB::db_version < 3) {
+ print "1..0 # Skipping test, this needs Berkeley DB 3.x or better\n" ;
+ exit 0 ;
+ }
+}
+
+
+print "1..2\n";
+
+my $FA = 0 ;
+
+{
+ sub try::TIEARRAY { bless [], "try" }
+ sub try::FETCHSIZE { $FA = 1 }
+ $FA = 0 ;
+ my @a ;
+ tie @a, 'try' ;
+ my $a = @a ;
+}
+
+{
+ package LexFile ;
+
+ sub new
+ {
+ my $self = shift ;
+ unlink @_ ;
+ bless [ @_ ], $self ;
+ }
+
+ sub DESTROY
+ {
+ my $self = shift ;
+ unlink @{ $self } ;
+ }
+}
+
+
+sub ok
+{
+ my $no = shift ;
+ my $result = shift ;
+
+ print "not " unless $result ;
+ print "ok $no\n" ;
+}
+
+{
+ package Redirect ;
+ use Symbol ;
+
+ sub new
+ {
+ my $class = shift ;
+ my $filename = shift ;
+ my $fh = gensym ;
+ open ($fh, ">$filename") || die "Cannot open $filename: $!" ;
+ my $real_stdout = select($fh) ;
+ return bless [$fh, $real_stdout ] ;
+
+ }
+ sub DESTROY
+ {
+ my $self = shift ;
+ close $self->[0] ;
+ select($self->[1]) ;
+ }
+}
+
+sub docat
+{
+ my $file = shift;
+ local $/ = undef;
+ open(CAT,$file) || die "Cannot open $file:$!";
+ my $result = <CAT> || "" ;
+ close(CAT);
+ return $result;
+}
+
+sub docat_del
+{
+ my $file = shift;
+ local $/ = undef;
+ open(CAT,$file) || die "Cannot open $file: $!";
+ my $result = <CAT> || "" ;
+ close(CAT);
+ unlink $file ;
+ return $result;
+}
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+my $redirect = "xyzt" ;
+
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ my $db = new BerkeleyDB::Hash
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_DUP
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $db->db_put("red", "apple") ;
+ $db->db_put("orange", "orange") ;
+ $db->db_put("green", "banana") ;
+ $db->db_put("yellow", "banana") ;
+ $db->db_put("red", "tomato") ;
+ $db->db_put("green", "apple") ;
+
+ # print the contents of the file
+ my ($k, $v) = ("", "") ;
+ my $cursor = $db->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { print "$k -> $v\n" }
+
+ undef $cursor ;
+ undef $db ;
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]" ;
+ ok(1, docat_del($redirect) eq <<'EOM') ;
+orange -> orange
+yellow -> banana
+red -> apple
+red -> tomato
+green -> banana
+green -> apple
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ my $db = new BerkeleyDB::Hash
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_DUP | DB_DUPSORT
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $db->db_put("red", "apple") ;
+ $db->db_put("orange", "orange") ;
+ $db->db_put("green", "banana") ;
+ $db->db_put("yellow", "banana") ;
+ $db->db_put("red", "tomato") ;
+ $db->db_put("green", "apple") ;
+
+ # print the contents of the file
+ my ($k, $v) = ("", "") ;
+ my $cursor = $db->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { print "$k -> $v\n" }
+
+ undef $cursor ;
+ undef $db ;
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]" ;
+ ok(2, docat_del($redirect) eq <<'EOM') ;
+orange -> orange
+yellow -> banana
+red -> apple
+red -> tomato
+green -> apple
+green -> banana
+EOM
+
+}
+
+
diff --git a/bdb/perl.BerkeleyDB/t/examples3.t.T b/bdb/perl.BerkeleyDB/t/examples3.t.T
new file mode 100644
index 00000000000..573c04903e3
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/t/examples3.t.T
@@ -0,0 +1,217 @@
+#!./perl -w
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use File::Path qw(rmtree);
+
+BEGIN
+{
+ if ($BerkeleyDB::db_version < 3) {
+ print "1..0 # Skipping test, this needs Berkeley DB 3.x or better\n" ;
+ exit 0 ;
+ }
+}
+
+
+print "1..2\n";
+
+my $FA = 0 ;
+
+{
+ sub try::TIEARRAY { bless [], "try" }
+ sub try::FETCHSIZE { $FA = 1 }
+ $FA = 0 ;
+ my @a ;
+ tie @a, 'try' ;
+ my $a = @a ;
+}
+
+{
+ package LexFile ;
+
+ sub new
+ {
+ my $self = shift ;
+ unlink @_ ;
+ bless [ @_ ], $self ;
+ }
+
+ sub DESTROY
+ {
+ my $self = shift ;
+ unlink @{ $self } ;
+ }
+}
+
+
+sub ok
+{
+ my $no = shift ;
+ my $result = shift ;
+
+ print "not " unless $result ;
+ print "ok $no\n" ;
+}
+
+{
+ package Redirect ;
+ use Symbol ;
+
+ sub new
+ {
+ my $class = shift ;
+ my $filename = shift ;
+ my $fh = gensym ;
+ open ($fh, ">$filename") || die "Cannot open $filename: $!" ;
+ my $real_stdout = select($fh) ;
+ return bless [$fh, $real_stdout ] ;
+
+ }
+ sub DESTROY
+ {
+ my $self = shift ;
+ close $self->[0] ;
+ select($self->[1]) ;
+ }
+}
+
+sub docat
+{
+ my $file = shift;
+ local $/ = undef;
+ open(CAT,$file) || die "Cannot open $file:$!";
+ my $result = <CAT> || "" ;
+ close(CAT);
+ return $result;
+}
+
+sub docat_del
+{
+ my $file = shift;
+ local $/ = undef;
+ open(CAT,$file) || die "Cannot open $file: $!";
+ my $result = <CAT> || "" ;
+ close(CAT);
+ unlink $file ;
+ return $result;
+}
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+my $redirect = "xyzt" ;
+
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+## BEGIN dupHash
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ my $db = new BerkeleyDB::Hash
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_DUP
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $db->db_put("red", "apple") ;
+ $db->db_put("orange", "orange") ;
+ $db->db_put("green", "banana") ;
+ $db->db_put("yellow", "banana") ;
+ $db->db_put("red", "tomato") ;
+ $db->db_put("green", "apple") ;
+
+ # print the contents of the file
+ my ($k, $v) = ("", "") ;
+ my $cursor = $db->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { print "$k -> $v\n" }
+
+ undef $cursor ;
+ undef $db ;
+## END dupHash
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]" ;
+ ok(1, docat_del($redirect) eq <<'EOM') ;
+orange -> orange
+yellow -> banana
+red -> apple
+red -> tomato
+green -> banana
+green -> apple
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+## BEGIN dupSortHash
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ my $db = new BerkeleyDB::Hash
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_DUP | DB_DUPSORT
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $db->db_put("red", "apple") ;
+ $db->db_put("orange", "orange") ;
+ $db->db_put("green", "banana") ;
+ $db->db_put("yellow", "banana") ;
+ $db->db_put("red", "tomato") ;
+ $db->db_put("green", "apple") ;
+
+ # print the contents of the file
+ my ($k, $v) = ("", "") ;
+ my $cursor = $db->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { print "$k -> $v\n" }
+
+ undef $cursor ;
+ undef $db ;
+## END dupSortHash
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]" ;
+ ok(2, docat_del($redirect) eq <<'EOM') ;
+orange -> orange
+yellow -> banana
+red -> apple
+red -> tomato
+green -> apple
+green -> banana
+EOM
+
+}
+
+
diff --git a/bdb/perl.BerkeleyDB/t/filter.t b/bdb/perl.BerkeleyDB/t/filter.t
new file mode 100644
index 00000000000..8bcdc7f3f90
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/t/filter.t
@@ -0,0 +1,244 @@
+#!./perl -w
+
+# ID: %I%, %G%
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use File::Path qw(rmtree);
+
+print "1..46\n";
+
+{
+ package LexFile ;
+
+ sub new
+ {
+ my $self = shift ;
+ unlink @_ ;
+ bless [ @_ ], $self ;
+ }
+
+ sub DESTROY
+ {
+ my $self = shift ;
+ unlink @{ $self } ;
+ }
+}
+
+
+sub ok
+{
+ my $no = shift ;
+ my $result = shift ;
+
+ print "not " unless $result ;
+ print "ok $no\n" ;
+}
+
+my $Dfile = "dbhash.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+
+{
+ # DBM Filter tests
+ use strict ;
+ my (%h, $db) ;
+ my ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ unlink $Dfile;
+
+ sub checkOutput
+ {
+ my($fk, $sk, $fv, $sv) = @_ ;
+ return
+ $fetch_key eq $fk && $store_key eq $sk &&
+ $fetch_value eq $fv && $store_value eq $sv &&
+ $_ eq 'original' ;
+ }
+
+ ok 1, $db = tie %h, 'BerkeleyDB::Hash',
+ -Filename => $Dfile,
+ -Flags => DB_CREATE;
+
+ $db->filter_fetch_key (sub { $fetch_key = $_ }) ;
+ $db->filter_store_key (sub { $store_key = $_ }) ;
+ $db->filter_fetch_value (sub { $fetch_value = $_}) ;
+ $db->filter_store_value (sub { $store_value = $_ }) ;
+
+ $_ = "original" ;
+
+ $h{"fred"} = "joe" ;
+ # fk sk fv sv
+ ok 2, checkOutput( "", "fred", "", "joe") ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok 3, $h{"fred"} eq "joe";
+ # fk sk fv sv
+ ok 4, checkOutput( "", "fred", "joe", "") ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok 5, $db->FIRSTKEY() eq "fred" ;
+ # fk sk fv sv
+ ok 6, checkOutput( "fred", "", "", "") ;
+
+ # replace the filters, but remember the previous set
+ my ($old_fk) = $db->filter_fetch_key
+ (sub { $_ = uc $_ ; $fetch_key = $_ }) ;
+ my ($old_sk) = $db->filter_store_key
+ (sub { $_ = lc $_ ; $store_key = $_ }) ;
+ my ($old_fv) = $db->filter_fetch_value
+ (sub { $_ = "[$_]"; $fetch_value = $_ }) ;
+ my ($old_sv) = $db->filter_store_value
+ (sub { s/o/x/g; $store_value = $_ }) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h{"Fred"} = "Joe" ;
+ # fk sk fv sv
+ ok 7, checkOutput( "", "fred", "", "Jxe") ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok 8, $h{"Fred"} eq "[Jxe]";
+ # fk sk fv sv
+ ok 9, checkOutput( "", "fred", "[Jxe]", "") ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok 10, $db->FIRSTKEY() eq "FRED" ;
+ # fk sk fv sv
+ ok 11, checkOutput( "FRED", "", "", "") ;
+
+ # put the original filters back
+ $db->filter_fetch_key ($old_fk);
+ $db->filter_store_key ($old_sk);
+ $db->filter_fetch_value ($old_fv);
+ $db->filter_store_value ($old_sv);
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h{"fred"} = "joe" ;
+ ok 12, checkOutput( "", "fred", "", "joe") ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok 13, $h{"fred"} eq "joe";
+ ok 14, checkOutput( "", "fred", "joe", "") ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok 15, $db->FIRSTKEY() eq "fred" ;
+ ok 16, checkOutput( "fred", "", "", "") ;
+
+ # delete the filters
+ $db->filter_fetch_key (undef);
+ $db->filter_store_key (undef);
+ $db->filter_fetch_value (undef);
+ $db->filter_store_value (undef);
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h{"fred"} = "joe" ;
+ ok 17, checkOutput( "", "", "", "") ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok 18, $h{"fred"} eq "joe";
+ ok 19, checkOutput( "", "", "", "") ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok 20, $db->FIRSTKEY() eq "fred" ;
+ ok 21, checkOutput( "", "", "", "") ;
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
+{
+ # DBM Filter with a closure
+
+ use strict ;
+ my (%h, $db) ;
+
+ unlink $Dfile;
+ ok 22, $db = tie %h, 'BerkeleyDB::Hash',
+ -Filename => $Dfile,
+ -Flags => DB_CREATE;
+
+ my %result = () ;
+
+ sub Closure
+ {
+ my ($name) = @_ ;
+ my $count = 0 ;
+ my @kept = () ;
+
+ return sub { ++$count ;
+ push @kept, $_ ;
+ $result{$name} = "$name - $count: [@kept]" ;
+ }
+ }
+
+ $db->filter_store_key(Closure("store key")) ;
+ $db->filter_store_value(Closure("store value")) ;
+ $db->filter_fetch_key(Closure("fetch key")) ;
+ $db->filter_fetch_value(Closure("fetch value")) ;
+
+ $_ = "original" ;
+
+ $h{"fred"} = "joe" ;
+ ok 23, $result{"store key"} eq "store key - 1: [fred]" ;
+ ok 24, $result{"store value"} eq "store value - 1: [joe]" ;
+ ok 25, ! defined $result{"fetch key"} ;
+ ok 26, ! defined $result{"fetch value"} ;
+ ok 27, $_ eq "original" ;
+
+ ok 28, $db->FIRSTKEY() eq "fred" ;
+ ok 29, $result{"store key"} eq "store key - 1: [fred]" ;
+ ok 30, $result{"store value"} eq "store value - 1: [joe]" ;
+ ok 31, $result{"fetch key"} eq "fetch key - 1: [fred]" ;
+ ok 32, ! defined $result{"fetch value"} ;
+ ok 33, $_ eq "original" ;
+
+ $h{"jim"} = "john" ;
+ ok 34, $result{"store key"} eq "store key - 2: [fred jim]" ;
+ ok 35, $result{"store value"} eq "store value - 2: [joe john]" ;
+ ok 36, $result{"fetch key"} eq "fetch key - 1: [fred]" ;
+ ok 37, ! defined $result{"fetch value"} ;
+ ok 38, $_ eq "original" ;
+
+ ok 39, $h{"fred"} eq "joe" ;
+ ok 40, $result{"store key"} eq "store key - 3: [fred jim fred]" ;
+ ok 41, $result{"store value"} eq "store value - 2: [joe john]" ;
+ ok 42, $result{"fetch key"} eq "fetch key - 1: [fred]" ;
+ ok 43, $result{"fetch value"} eq "fetch value - 1: [joe]" ;
+ ok 44, $_ eq "original" ;
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
+{
+ # DBM Filter recursion detection
+ use strict ;
+ my (%h, $db) ;
+ unlink $Dfile;
+
+ ok 45, $db = tie %h, 'BerkeleyDB::Hash',
+ -Filename => $Dfile,
+ -Flags => DB_CREATE;
+
+ $db->filter_store_key (sub { $_ = $h{$_} }) ;
+
+ eval '$h{1} = 1234' ;
+ ok 46, $@ =~ /^BerkeleyDB Aborting: recursion detected in filter_store_key at/ ;
+ #print "[$@]\n" ;
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
diff --git a/bdb/perl.BerkeleyDB/t/hash.t b/bdb/perl.BerkeleyDB/t/hash.t
new file mode 100644
index 00000000000..1a42c60acb2
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/t/hash.t
@@ -0,0 +1,777 @@
+#!./perl -w
+
+# ID: %I%, %G%
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+#use Config;
+#
+#BEGIN {
+# if(-d "lib" && -f "TEST") {
+# if ($Config{'extensions'} !~ /\bBerkeleyDB\b/ ) {
+# print "1..74\n";
+# exit 0;
+# }
+# }
+#}
+
+use BerkeleyDB;
+use File::Path qw(rmtree);
+
+print "1..210\n";
+
+my %DB_errors = (
+ 'DB_INCOMPLETE' => "DB_INCOMPLETE: Sync was unable to complete",
+ 'DB_KEYEMPTY' => "DB_KEYEMPTY: Non-existent key/data pair",
+ 'DB_KEYEXIST' => "DB_KEYEXIST: Key/data pair already exists",
+ 'DB_LOCK_DEADLOCK' => "DB_LOCK_DEADLOCK: Locker killed to resolve a deadlock",
+ 'DB_LOCK_NOTGRANTED' => "DB_LOCK_NOTGRANTED: Lock not granted",
+ 'DB_NOTFOUND' => "DB_NOTFOUND: No matching key/data pair found",
+ 'DB_OLD_VERSION' => "DB_OLDVERSION: Database requires a version upgrade",
+ 'DB_RUNRECOVERY' => "DB_RUNRECOVERY: Fatal error, run database recovery",
+) ;
+
+{
+ package LexFile ;
+
+ sub new
+ {
+ my $self = shift ;
+ unlink @_ ;
+ bless [ @_ ], $self ;
+ }
+
+ sub DESTROY
+ {
+ my $self = shift ;
+ unlink @{ $self } ;
+ }
+}
+
+
+sub ok
+{
+ my $no = shift ;
+ my $result = shift ;
+
+ print "not " unless $result ;
+ print "ok $no\n" ;
+}
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+
+# Check for invalid parameters
+{
+ # Check for invalid parameters
+ my $db ;
+ eval ' $db = new BerkeleyDB::Hash -Stupid => 3 ; ' ;
+ ok 1, $@ =~ /unknown key value\(s\) Stupid/ ;
+
+ eval ' $db = new BerkeleyDB::Hash -Bad => 2, -Mode => 0345, -Stupid => 3; ' ;
+ ok 2, $@ =~ /unknown key value\(s\) (Bad |Stupid ){2}/ ;
+
+ eval ' $db = new BerkeleyDB::Hash -Env => 2 ' ;
+ ok 3, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+
+ eval ' $db = new BerkeleyDB::Hash -Txn => "fred" ' ;
+ ok 4, $@ =~ /^Txn not of type BerkeleyDB::Txn/ ;
+
+ my $obj = bless [], "main" ;
+ eval ' $db = new BerkeleyDB::Hash -Env => $obj ' ;
+ ok 5, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+}
+
+# Now check the interface to HASH
+
+{
+ my $lex = new LexFile $Dfile ;
+
+ ok 6, my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my $value ;
+ my $status ;
+ ok 7, $db->db_put("some key", "some value") == 0 ;
+ ok 8, $db->status() == 0 ;
+ ok 9, $db->db_get("some key", $value) == 0 ;
+ ok 10, $value eq "some value" ;
+ ok 11, $db->db_put("key", "value") == 0 ;
+ ok 12, $db->db_get("key", $value) == 0 ;
+ ok 13, $value eq "value" ;
+ ok 14, $db->db_del("some key") == 0 ;
+ ok 15, ($status = $db->db_get("some key", $value)) == DB_NOTFOUND ;
+ ok 16, $status eq $DB_errors{'DB_NOTFOUND'} ;
+ ok 17, $db->status() == DB_NOTFOUND ;
+ ok 18, $db->status() eq $DB_errors{'DB_NOTFOUND'};
+
+ ok 19, $db->db_sync() == 0 ;
+
+ # Check NOOVERWRITE will make put fail when attempting to overwrite
+ # an existing record.
+
+ ok 20, $db->db_put( 'key', 'x', DB_NOOVERWRITE) == DB_KEYEXIST ;
+ ok 21, $db->status() eq $DB_errors{'DB_KEYEXIST'};
+ ok 22, $db->status() == DB_KEYEXIST ;
+
+ # check that the value of the key has not been changed by the
+ # previous test
+ ok 23, $db->db_get("key", $value) == 0 ;
+ ok 24, $value eq "value" ;
+
+ # test DB_GET_BOTH
+ my ($k, $v) = ("key", "value") ;
+ ok 25, $db->db_get($k, $v, DB_GET_BOTH) == 0 ;
+
+ ($k, $v) = ("key", "fred") ;
+ ok 26, $db->db_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
+
+ ($k, $v) = ("another", "value") ;
+ ok 27, $db->db_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
+
+
+}
+
+{
+ # Check simple env works with a hash.
+ my $lex = new LexFile $Dfile ;
+
+ my $home = "./fred" ;
+ ok 28, -d $home ? chmod 0777, $home : mkdir($home, 0777) ;
+
+ ok 29, my $env = new BerkeleyDB::Env -Flags => DB_CREATE| DB_INIT_MPOOL,
+ -Home => $home ;
+ ok 30, my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Env => $env,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my $value ;
+ ok 31, $db->db_put("some key", "some value") == 0 ;
+ ok 32, $db->db_get("some key", $value) == 0 ;
+ ok 33, $value eq "some value" ;
+ undef $db ;
+ undef $env ;
+ rmtree $home ;
+}
+
+{
+ # override default hash
+ my $lex = new LexFile $Dfile ;
+ my $value ;
+ $::count = 0 ;
+ ok 34, my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Hash => sub { ++$::count ; length $_[0] },
+ -Flags => DB_CREATE ;
+
+ ok 35, $db->db_put("some key", "some value") == 0 ;
+ ok 36, $db->db_get("some key", $value) == 0 ;
+ ok 37, $value eq "some value" ;
+ ok 38, $::count > 0 ;
+
+}
+
+{
+ # cursors
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my ($k, $v) ;
+ ok 39, my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my %data = (
+ "red" => 2,
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (($k, $v) = each %data) {
+ $ret += $db->db_put($k, $v) ;
+ }
+ ok 40, $ret == 0 ;
+
+ # create the cursor
+ ok 41, my $cursor = $db->db_cursor() ;
+
+ $k = $v = "" ;
+ my %copy = %data ;
+ my $extras = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ if ( $copy{$k} eq $v )
+ { delete $copy{$k} }
+ else
+ { ++ $extras }
+ }
+ ok 42, $cursor->status() == DB_NOTFOUND ;
+ ok 43, $cursor->status() eq $DB_errors{'DB_NOTFOUND'} ;
+ ok 44, keys %copy == 0 ;
+ ok 45, $extras == 0 ;
+
+ # sequence backwards
+ %copy = %data ;
+ $extras = 0 ;
+ my $status ;
+ for ( $status = $cursor->c_get($k, $v, DB_LAST) ;
+ $status == 0 ;
+ $status = $cursor->c_get($k, $v, DB_PREV)) {
+ if ( $copy{$k} eq $v )
+ { delete $copy{$k} }
+ else
+ { ++ $extras }
+ }
+ ok 46, $status == DB_NOTFOUND ;
+ ok 47, $status eq $DB_errors{'DB_NOTFOUND'} ;
+ ok 48, $cursor->status() == $status ;
+ ok 49, $cursor->status() eq $status ;
+ ok 50, keys %copy == 0 ;
+ ok 51, $extras == 0 ;
+
+ ($k, $v) = ("green", "house") ;
+ ok 52, $cursor->c_get($k, $v, DB_GET_BOTH) == 0 ;
+
+ ($k, $v) = ("green", "door") ;
+ ok 53, $cursor->c_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
+
+ ($k, $v) = ("black", "house") ;
+ ok 54, $cursor->c_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
+
+}
+
+{
+ # Tied Hash interface
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ ok 55, tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # check "each" with an empty database
+ my $count = 0 ;
+ while (my ($k, $v) = each %hash) {
+ ++ $count ;
+ }
+ ok 56, (tied %hash)->status() == DB_NOTFOUND ;
+ ok 57, $count == 0 ;
+
+ # Add a k/v pair
+ my $value ;
+ $hash{"some key"} = "some value";
+ ok 58, (tied %hash)->status() == 0 ;
+ ok 59, $hash{"some key"} eq "some value";
+ ok 60, defined $hash{"some key"} ;
+ ok 61, (tied %hash)->status() == 0 ;
+ ok 62, exists $hash{"some key"} ;
+ ok 63, !defined $hash{"jimmy"} ;
+ ok 64, (tied %hash)->status() == DB_NOTFOUND ;
+ ok 65, !exists $hash{"jimmy"} ;
+ ok 66, (tied %hash)->status() == DB_NOTFOUND ;
+
+ delete $hash{"some key"} ;
+ ok 67, (tied %hash)->status() == 0 ;
+ ok 68, ! defined $hash{"some key"} ;
+ ok 69, (tied %hash)->status() == DB_NOTFOUND ;
+ ok 70, ! exists $hash{"some key"} ;
+ ok 71, (tied %hash)->status() == DB_NOTFOUND ;
+
+ $hash{1} = 2 ;
+ $hash{10} = 20 ;
+ $hash{1000} = 2000 ;
+
+ my ($keys, $values) = (0,0);
+ $count = 0 ;
+ while (my ($k, $v) = each %hash) {
+ $keys += $k ;
+ $values += $v ;
+ ++ $count ;
+ }
+ ok 72, $count == 3 ;
+ ok 73, $keys == 1011 ;
+ ok 74, $values == 2022 ;
+
+ # now clear the hash
+ %hash = () ;
+ ok 75, keys %hash == 0 ;
+
+ untie %hash ;
+}
+
+{
+ # in-memory file
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $fd ;
+ my $value ;
+ ok 76, my $db = tie %hash, 'BerkeleyDB::Hash' ;
+
+ ok 77, $db->db_put("some key", "some value") == 0 ;
+ ok 78, $db->db_get("some key", $value) == 0 ;
+ ok 79, $value eq "some value" ;
+
+ undef $db ;
+ untie %hash ;
+}
+
+{
+ # partial
+ # check works via API
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+ ok 80, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (my ($k, $v) = each %data) {
+ $ret += $db->db_put($k, $v) ;
+ }
+ ok 81, $ret == 0 ;
+
+
+ # do a partial get
+ my($pon, $off, $len) = $db->partial_set(0,2) ;
+ ok 82, $pon == 0 && $off == 0 && $len == 0 ;
+ ok 83, ( $db->db_get("red", $value) == 0) && $value eq "bo" ;
+ ok 84, ( $db->db_get("green", $value) == 0) && $value eq "ho" ;
+ ok 85, ( $db->db_get("blue", $value) == 0) && $value eq "se" ;
+
+ # do a partial get, off end of data
+ ($pon, $off, $len) = $db->partial_set(3,2) ;
+ ok 86, $pon ;
+ ok 87, $off == 0 ;
+ ok 88, $len == 2 ;
+ ok 89, $db->db_get("red", $value) == 0 && $value eq "t" ;
+ ok 90, $db->db_get("green", $value) == 0 && $value eq "se" ;
+ ok 91, $db->db_get("blue", $value) == 0 && $value eq "" ;
+
+ # switch of partial mode
+ ($pon, $off, $len) = $db->partial_clear() ;
+ ok 92, $pon ;
+ ok 93, $off == 3 ;
+ ok 94, $len == 2 ;
+ ok 95, $db->db_get("red", $value) == 0 && $value eq "boat" ;
+ ok 96, $db->db_get("green", $value) == 0 && $value eq "house" ;
+ ok 97, $db->db_get("blue", $value) == 0 && $value eq "sea" ;
+
+ # now partial put
+ ($pon, $off, $len) = $db->partial_set(0,2) ;
+ ok 98, ! $pon ;
+ ok 99, $off == 0 ;
+ ok 100, $len == 0 ;
+ ok 101, $db->db_put("red", "") == 0 ;
+ ok 102, $db->db_put("green", "AB") == 0 ;
+ ok 103, $db->db_put("blue", "XYZ") == 0 ;
+ ok 104, $db->db_put("new", "KLM") == 0 ;
+
+ $db->partial_clear() ;
+ ok 105, $db->db_get("red", $value) == 0 && $value eq "at" ;
+ ok 106, $db->db_get("green", $value) == 0 && $value eq "ABuse" ;
+ ok 107, $db->db_get("blue", $value) == 0 && $value eq "XYZa" ;
+ ok 108, $db->db_get("new", $value) == 0 && $value eq "KLM" ;
+
+ # now partial put
+ $db->partial_set(3,2) ;
+ ok 109, $db->db_put("red", "PPP") == 0 ;
+ ok 110, $db->db_put("green", "Q") == 0 ;
+ ok 111, $db->db_put("blue", "XYZ") == 0 ;
+ ok 112, $db->db_put("new", "--") == 0 ;
+
+ ($pon, $off, $len) = $db->partial_clear() ;
+ ok 113, $pon ;
+ ok 114, $off == 3 ;
+ ok 115, $len == 2 ;
+ ok 116, $db->db_get("red", $value) == 0 && $value eq "at\0PPP" ;
+ ok 117, $db->db_get("green", $value) == 0 && $value eq "ABuQ" ;
+ ok 118, $db->db_get("blue", $value) == 0 && $value eq "XYZXYZ" ;
+ ok 119, $db->db_get("new", $value) == 0 && $value eq "KLM--" ;
+}
+
+{
+ # partial
+ # check works via tied hash
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+ ok 120, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ while (my ($k, $v) = each %data) {
+ $hash{$k} = $v ;
+ }
+
+
+ # do a partial get
+ $db->partial_set(0,2) ;
+ ok 121, $hash{"red"} eq "bo" ;
+ ok 122, $hash{"green"} eq "ho" ;
+ ok 123, $hash{"blue"} eq "se" ;
+
+ # do a partial get, off end of data
+ $db->partial_set(3,2) ;
+ ok 124, $hash{"red"} eq "t" ;
+ ok 125, $hash{"green"} eq "se" ;
+ ok 126, $hash{"blue"} eq "" ;
+
+ # switch of partial mode
+ $db->partial_clear() ;
+ ok 127, $hash{"red"} eq "boat" ;
+ ok 128, $hash{"green"} eq "house" ;
+ ok 129, $hash{"blue"} eq "sea" ;
+
+ # now partial put
+ $db->partial_set(0,2) ;
+ ok 130, $hash{"red"} = "" ;
+ ok 131, $hash{"green"} = "AB" ;
+ ok 132, $hash{"blue"} = "XYZ" ;
+ ok 133, $hash{"new"} = "KLM" ;
+
+ $db->partial_clear() ;
+ ok 134, $hash{"red"} eq "at" ;
+ ok 135, $hash{"green"} eq "ABuse" ;
+ ok 136, $hash{"blue"} eq "XYZa" ;
+ ok 137, $hash{"new"} eq "KLM" ;
+
+ # now partial put
+ $db->partial_set(3,2) ;
+ ok 138, $hash{"red"} = "PPP" ;
+ ok 139, $hash{"green"} = "Q" ;
+ ok 140, $hash{"blue"} = "XYZ" ;
+ ok 141, $hash{"new"} = "TU" ;
+
+ $db->partial_clear() ;
+ ok 142, $hash{"red"} eq "at\0PPP" ;
+ ok 143, $hash{"green"} eq "ABuQ" ;
+ ok 144, $hash{"blue"} eq "XYZXYZ" ;
+ ok 145, $hash{"new"} eq "KLMTU" ;
+}
+
+{
+ # transaction
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+
+ my $home = "./fred" ;
+ rmtree $home if -e $home ;
+ ok 146, mkdir($home, 0777) ;
+ ok 147, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 148, my $txn = $env->txn_begin() ;
+ ok 149, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (my ($k, $v) = each %data) {
+ $ret += $db1->db_put($k, $v) ;
+ }
+ ok 150, $ret == 0 ;
+
+ # should be able to see all the records
+
+ ok 151, my $cursor = $db1->db_cursor() ;
+ my ($k, $v) = ("", "") ;
+ my $count = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 152, $count == 3 ;
+ undef $cursor ;
+
+ # now abort the transaction
+ ok 153, $txn->txn_abort() == 0 ;
+
+ # there shouldn't be any records in the database
+ $count = 0 ;
+ # sequence forwards
+ ok 154, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 155, $count == 0 ;
+
+ undef $txn ;
+ undef $cursor ;
+ undef $db1 ;
+ undef $env ;
+ untie %hash ;
+ rmtree $home ;
+}
+
+
+{
+ # DB_DUP
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ ok 156, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Property => DB_DUP,
+ -Flags => DB_CREATE ;
+
+ $hash{'Wall'} = 'Larry' ;
+ $hash{'Wall'} = 'Stone' ;
+ $hash{'Smith'} = 'John' ;
+ $hash{'Wall'} = 'Brick' ;
+ $hash{'Wall'} = 'Brick' ;
+ $hash{'mouse'} = 'mickey' ;
+
+ ok 157, keys %hash == 6 ;
+
+ # create a cursor
+ ok 158, my $cursor = $db->db_cursor() ;
+
+ my $key = "Wall" ;
+ my $value ;
+ ok 159, $cursor->c_get($key, $value, DB_SET) == 0 ;
+ ok 160, $key eq "Wall" && $value eq "Larry" ;
+ ok 161, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 162, $key eq "Wall" && $value eq "Stone" ;
+ ok 163, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 164, $key eq "Wall" && $value eq "Brick" ;
+ ok 165, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 166, $key eq "Wall" && $value eq "Brick" ;
+
+ #my $ref = $db->db_stat() ;
+ #ok 143, $ref->{bt_flags} | DB_DUP ;
+
+ # test DB_DUP_NEXT
+ my ($k, $v) = ("Wall", "") ;
+ ok 167, $cursor->c_get($k, $v, DB_SET) == 0 ;
+ ok 168, $k eq "Wall" && $v eq "Larry" ;
+ ok 169, $cursor->c_get($k, $v, DB_NEXT_DUP) == 0 ;
+ ok 170, $k eq "Wall" && $v eq "Stone" ;
+ ok 171, $cursor->c_get($k, $v, DB_NEXT_DUP) == 0 ;
+ ok 172, $k eq "Wall" && $v eq "Brick" ;
+ ok 173, $cursor->c_get($k, $v, DB_NEXT_DUP) == 0 ;
+ ok 174, $k eq "Wall" && $v eq "Brick" ;
+ ok 175, $cursor->c_get($k, $v, DB_NEXT_DUP) == DB_NOTFOUND ;
+
+
+ undef $db ;
+ undef $cursor ;
+ untie %hash ;
+
+}
+
+{
+ # DB_DUP & DupCompare
+ my $lex = new LexFile $Dfile, $Dfile2;
+ my ($key, $value) ;
+ my (%h, %g) ;
+ my @Keys = qw( 0123 9 12 -1234 9 987654321 9 def ) ;
+ my @Values = qw( 1 11 3 dd x abc 2 0 ) ;
+
+ ok 176, tie %h, "BerkeleyDB::Hash", -Filename => $Dfile,
+ -DupCompare => sub { $_[0] cmp $_[1] },
+ -Property => DB_DUP|DB_DUPSORT,
+ -Flags => DB_CREATE ;
+
+ ok 177, tie %g, 'BerkeleyDB::Hash', -Filename => $Dfile2,
+ -DupCompare => sub { $_[0] <=> $_[1] },
+ -Property => DB_DUP|DB_DUPSORT,
+ -Flags => DB_CREATE ;
+
+ foreach (@Keys) {
+ local $^W = 0 ;
+ my $value = shift @Values ;
+ $h{$_} = $value ;
+ $g{$_} = $value ;
+ }
+
+ ok 178, my $cursor = (tied %h)->db_cursor() ;
+ $key = 9 ; $value = "";
+ ok 179, $cursor->c_get($key, $value, DB_SET) == 0 ;
+ ok 180, $key == 9 && $value eq 11 ;
+ ok 181, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 182, $key == 9 && $value == 2 ;
+ ok 183, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 184, $key == 9 && $value eq "x" ;
+
+ $cursor = (tied %g)->db_cursor() ;
+ $key = 9 ;
+ ok 185, $cursor->c_get($key, $value, DB_SET) == 0 ;
+ ok 186, $key == 9 && $value eq "x" ;
+ ok 187, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 188, $key == 9 && $value == 2 ;
+ ok 189, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 190, $key == 9 && $value == 11 ;
+
+
+}
+
+{
+ # get_dup etc
+ my $lex = new LexFile $Dfile;
+ my %hh ;
+
+ ok 191, my $YY = tie %hh, "BerkeleyDB::Hash", -Filename => $Dfile,
+ -DupCompare => sub { $_[0] cmp $_[1] },
+ -Property => DB_DUP,
+ -Flags => DB_CREATE ;
+
+ $hh{'Wall'} = 'Larry' ;
+ $hh{'Wall'} = 'Stone' ; # Note the duplicate key
+ $hh{'Wall'} = 'Brick' ; # Note the duplicate key
+ $hh{'Smith'} = 'John' ;
+ $hh{'mouse'} = 'mickey' ;
+
+ # first work in scalar context
+ ok 192, scalar $YY->get_dup('Unknown') == 0 ;
+ ok 193, scalar $YY->get_dup('Smith') == 1 ;
+ ok 194, scalar $YY->get_dup('Wall') == 3 ;
+
+ # now in list context
+ my @unknown = $YY->get_dup('Unknown') ;
+ ok 195, "@unknown" eq "" ;
+
+ my @smith = $YY->get_dup('Smith') ;
+ ok 196, "@smith" eq "John" ;
+
+ {
+ my @wall = $YY->get_dup('Wall') ;
+ my %wall ;
+ @wall{@wall} = @wall ;
+ ok 197, (@wall == 3 && $wall{'Larry'}
+ && $wall{'Stone'} && $wall{'Brick'});
+ }
+
+ # hash
+ my %unknown = $YY->get_dup('Unknown', 1) ;
+ ok 198, keys %unknown == 0 ;
+
+ my %smith = $YY->get_dup('Smith', 1) ;
+ ok 199, keys %smith == 1 && $smith{'John'} ;
+
+ my %wall = $YY->get_dup('Wall', 1) ;
+ ok 200, keys %wall == 3 && $wall{'Larry'} == 1 && $wall{'Stone'} == 1
+ && $wall{'Brick'} == 1 ;
+
+ undef $YY ;
+ untie %hh ;
+
+}
+
+{
+ # sub-class test
+
+ package Another ;
+
+ use strict ;
+
+ open(FILE, ">SubDB.pm") or die "Cannot open SubDB.pm: $!\n" ;
+ print FILE <<'EOM' ;
+
+ package SubDB ;
+
+ use strict ;
+ use vars qw( @ISA @EXPORT) ;
+
+ require Exporter ;
+ use BerkeleyDB;
+ @ISA=qw(BerkeleyDB::Hash);
+ @EXPORT = @BerkeleyDB::EXPORT ;
+
+ sub db_put {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::db_put($key, $value * 3) ;
+ }
+
+ sub db_get {
+ my $self = shift ;
+ $self->SUPER::db_get($_[0], $_[1]) ;
+ $_[1] -= 2 ;
+ }
+
+ sub A_new_method
+ {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = $self->FETCH($key) ;
+ return "[[$value]]" ;
+ }
+
+ 1 ;
+EOM
+
+ close FILE ;
+
+ BEGIN { push @INC, '.'; }
+ eval 'use SubDB ; ';
+ main::ok 201, $@ eq "" ;
+ my %h ;
+ my $X ;
+ eval '
+ $X = tie(%h, "SubDB", -Filename => "dbhash.tmp",
+ -Flags => DB_CREATE,
+ -Mode => 0640 );
+ ' ;
+
+ main::ok 202, $@ eq "" ;
+
+ my $ret = eval '$h{"fred"} = 3 ; return $h{"fred"} ' ;
+ main::ok 203, $@ eq "" ;
+ main::ok 204, $ret == 7 ;
+
+ my $value = 0;
+ $ret = eval '$X->db_put("joe", 4) ; $X->db_get("joe", $value) ; return $value' ;
+ main::ok 205, $@ eq "" ;
+ main::ok 206, $ret == 10 ;
+
+ $ret = eval ' DB_NEXT eq main::DB_NEXT ' ;
+ main::ok 207, $@ eq "" ;
+ main::ok 208, $ret == 1 ;
+
+ $ret = eval '$X->A_new_method("joe") ' ;
+ main::ok 209, $@ eq "" ;
+ main::ok 210, $ret eq "[[10]]" ;
+
+ unlink "SubDB.pm", "dbhash.tmp" ;
+
+}
diff --git a/bdb/perl.BerkeleyDB/t/join.t b/bdb/perl.BerkeleyDB/t/join.t
new file mode 100644
index 00000000000..f986d76f734
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/t/join.t
@@ -0,0 +1,270 @@
+#!./perl -w
+
+# ID: %I%, %G%
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use File::Path qw(rmtree);
+
+if ($BerkeleyDB::db_ver < 2.005002)
+{
+ print "1..0 # Skip: join needs Berkeley DB 2.5.2 or later\n" ;
+ exit 0 ;
+}
+
+
+print "1..37\n";
+
+{
+ package LexFile ;
+
+ sub new
+ {
+ my $self = shift ;
+ unlink @_ ;
+ bless [ @_ ], $self ;
+ }
+
+ sub DESTROY
+ {
+ my $self = shift ;
+ unlink @{ $self } ;
+ }
+}
+
+
+sub ok
+{
+ my $no = shift ;
+ my $result = shift ;
+
+ print "not " unless $result ;
+ print "ok $no\n" ;
+}
+
+my $Dfile1 = "dbhash1.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile1, $Dfile2, $Dfile3 ;
+
+umask(0) ;
+
+sub addData
+{
+ my $db = shift ;
+ my @data = @_ ;
+ die "addData odd data\n" unless @data /2 != 0 ;
+ my ($k, $v) ;
+ my $ret = 0 ;
+ while (@data) {
+ $k = shift @data ;
+ $v = shift @data ;
+ $ret += $db->db_put($k, $v) ;
+ }
+
+ return ($ret == 0) ;
+}
+
+{
+ # error cases
+ my $lex = new LexFile $Dfile1, $Dfile2, $Dfile3 ;
+ my %hash1 ;
+ my $value ;
+ my $status ;
+ my $cursor ;
+
+ ok 1, my $db1 = tie %hash1, 'BerkeleyDB::Hash',
+ -Filename => $Dfile1,
+ -Flags => DB_CREATE,
+ -DupCompare => sub { $_[0] lt $_[1] },
+ -Property => DB_DUP|DB_DUPSORT ;
+
+ # no cursors supplied
+ eval '$cursor = $db1->db_join() ;' ;
+ ok 2, $@ =~ /Usage: \$db->BerkeleyDB::Common::db_join\Q([cursors], flags=0)/;
+
+ # empty list
+ eval '$cursor = $db1->db_join([]) ;' ;
+ ok 3, $@ =~ /db_join: No cursors in parameter list/;
+
+ # cursor list, isn't a []
+ eval '$cursor = $db1->db_join({}) ;' ;
+ ok 4, $@ =~ /cursors is not an array reference at/ ;
+
+ eval '$cursor = $db1->db_join(\1) ;' ;
+ ok 5, $@ =~ /cursors is not an array reference at/ ;
+
+}
+
+{
+ # test a 2-way & 3-way join
+
+ my $lex = new LexFile $Dfile1, $Dfile2, $Dfile3 ;
+ my %hash1 ;
+ my %hash2 ;
+ my %hash3 ;
+ my $value ;
+ my $status ;
+
+ my $home = "./fred" ;
+ rmtree $home if -e $home ;
+ ok 6, mkdir($home, 0777) ;
+ ok 7, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN
+ |DB_INIT_MPOOL;
+ #|DB_INIT_MPOOL| DB_INIT_LOCK;
+ ok 8, my $txn = $env->txn_begin() ;
+ ok 9, my $db1 = tie %hash1, 'BerkeleyDB::Hash',
+ -Filename => $Dfile1,
+ -Flags => DB_CREATE,
+ -DupCompare => sub { $_[0] cmp $_[1] },
+ -Property => DB_DUP|DB_DUPSORT,
+ -Env => $env,
+ -Txn => $txn ;
+ ;
+
+ ok 10, my $db2 = tie %hash2, 'BerkeleyDB::Hash',
+ -Filename => $Dfile2,
+ -Flags => DB_CREATE,
+ -DupCompare => sub { $_[0] cmp $_[1] },
+ -Property => DB_DUP|DB_DUPSORT,
+ -Env => $env,
+ -Txn => $txn ;
+
+ ok 11, my $db3 = tie %hash3, 'BerkeleyDB::Btree',
+ -Filename => $Dfile3,
+ -Flags => DB_CREATE,
+ -DupCompare => sub { $_[0] cmp $_[1] },
+ -Property => DB_DUP|DB_DUPSORT,
+ -Env => $env,
+ -Txn => $txn ;
+
+
+ ok 12, addData($db1, qw( apple Convenience
+ peach Shopway
+ pear Farmer
+ raspberry Shopway
+ strawberry Shopway
+ gooseberry Farmer
+ blueberry Farmer
+ ));
+
+ ok 13, addData($db2, qw( red apple
+ red raspberry
+ red strawberry
+ yellow peach
+ yellow pear
+ green gooseberry
+ blue blueberry)) ;
+
+ ok 14, addData($db3, qw( expensive apple
+ reasonable raspberry
+ expensive strawberry
+ reasonable peach
+ reasonable pear
+ expensive gooseberry
+ reasonable blueberry)) ;
+
+ ok 15, my $cursor2 = $db2->db_cursor() ;
+ my $k = "red" ;
+ my $v = "" ;
+ ok 16, $cursor2->c_get($k, $v, DB_SET) == 0 ;
+
+ # Two way Join
+ ok 17, my $cursor1 = $db1->db_join([$cursor2]) ;
+
+ my %expected = qw( apple Convenience
+ raspberry Shopway
+ strawberry Shopway
+ ) ;
+
+ # sequence forwards
+ while ($cursor1->c_get($k, $v) == 0) {
+ delete $expected{$k}
+ if defined $expected{$k} && $expected{$k} eq $v ;
+ #print "[$k] [$v]\n" ;
+ }
+ ok 18, keys %expected == 0 ;
+ ok 19, $cursor1->status() == DB_NOTFOUND ;
+
+ # Three way Join
+ ok 20, $cursor2 = $db2->db_cursor() ;
+ $k = "red" ;
+ $v = "" ;
+ ok 21, $cursor2->c_get($k, $v, DB_SET) == 0 ;
+
+ ok 22, my $cursor3 = $db3->db_cursor() ;
+ $k = "expensive" ;
+ $v = "" ;
+ ok 23, $cursor3->c_get($k, $v, DB_SET) == 0 ;
+ ok 24, $cursor1 = $db1->db_join([$cursor2, $cursor3]) ;
+
+ %expected = qw( apple Convenience
+ strawberry Shopway
+ ) ;
+
+ # sequence forwards
+ while ($cursor1->c_get($k, $v) == 0) {
+ delete $expected{$k}
+ if defined $expected{$k} && $expected{$k} eq $v ;
+ #print "[$k] [$v]\n" ;
+ }
+ ok 25, keys %expected == 0 ;
+ ok 26, $cursor1->status() == DB_NOTFOUND ;
+
+ # test DB_JOIN_ITEM
+ # #################
+ ok 27, $cursor2 = $db2->db_cursor() ;
+ $k = "red" ;
+ $v = "" ;
+ ok 28, $cursor2->c_get($k, $v, DB_SET) == 0 ;
+
+ ok 29, $cursor3 = $db3->db_cursor() ;
+ $k = "expensive" ;
+ $v = "" ;
+ ok 30, $cursor3->c_get($k, $v, DB_SET) == 0 ;
+ ok 31, $cursor1 = $db1->db_join([$cursor2, $cursor3]) ;
+
+ %expected = qw( apple 1
+ strawberry 1
+ ) ;
+
+ # sequence forwards
+ $k = "" ;
+ $v = "" ;
+ while ($cursor1->c_get($k, $v, DB_JOIN_ITEM) == 0) {
+ delete $expected{$k}
+ if defined $expected{$k} ;
+ #print "[$k]\n" ;
+ }
+ ok 32, keys %expected == 0 ;
+ ok 33, $cursor1->status() == DB_NOTFOUND ;
+
+ ok 34, $cursor1->c_close() == 0 ;
+ ok 35, $cursor2->c_close() == 0 ;
+ ok 36, $cursor3->c_close() == 0 ;
+
+ ok 37, ($status = $txn->txn_commit) == 0;
+
+ undef $txn ;
+ #undef $cursor1;
+ #undef $cursor2;
+ #undef $cursor3;
+ undef $db1 ;
+ undef $db2 ;
+ undef $db3 ;
+ undef $env ;
+ untie %hash1 ;
+ untie %hash2 ;
+ untie %hash3 ;
+ rmtree $home ;
+}
+
diff --git a/bdb/perl.BerkeleyDB/t/mldbm.t b/bdb/perl.BerkeleyDB/t/mldbm.t
new file mode 100644
index 00000000000..eb6673b35f5
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/t/mldbm.t
@@ -0,0 +1,166 @@
+#!/usr/bin/perl -w
+
+BEGIN
+{
+ if ($] < 5.005) {
+ print "1..0 # This is Perl $], skipping test\n" ;
+ exit 0 ;
+ }
+
+ eval { require Data::Dumper ; };
+ if ($@) {
+ print "1..0 # Data::Dumper is not installed on this system.\n";
+ exit 0 ;
+ }
+ if ($Data::Dumper::VERSION < 2.08) {
+ print "1..0 # Data::Dumper 2.08 or better required (found $Data::Dumper::VERSION).\n";
+ exit 0 ;
+ }
+ eval { require MLDBM ; };
+ if ($@) {
+ print "1..0 # MLDBM is not installed on this system.\n";
+ exit 0 ;
+ }
+}
+
+sub ok
+{
+ my $no = shift ;
+ my $result = shift ;
+
+ print "not " unless $result ;
+ print "ok $no\n" ;
+}
+
+print "1..12\n";
+
+{
+package BTREE ;
+
+use BerkeleyDB ;
+use MLDBM qw(BerkeleyDB::Btree) ;
+use Data::Dumper;
+
+$filename = 'testmldbm' ;
+
+unlink $filename ;
+$MLDBM::UseDB = "BerkeleyDB::Btree" ;
+$db = tie %o, MLDBM, -Filename => $filename,
+ -Flags => DB_CREATE
+ or die $!;
+::ok 1, $db ;
+::ok 2, $db->type() == DB_BTREE ;
+
+$c = [\'c'];
+$b = {};
+$a = [1, $b, $c];
+$b->{a} = $a;
+$b->{b} = $a->[1];
+$b->{c} = $a->[2];
+@o{qw(a b c)} = ($a, $b, $c);
+$o{d} = "{once upon a time}";
+$o{e} = 1024;
+$o{f} = 1024.1024;
+$first = Data::Dumper->new([@o{qw(a b c)}], [qw(a b c)])->Quotekeys(0)->Dump;
+$second = <<'EOT';
+$a = [
+ 1,
+ {
+ a => $a,
+ b => $a->[1],
+ c => [
+ \'c'
+ ]
+ },
+ $a->[1]{c}
+ ];
+$b = {
+ a => [
+ 1,
+ $b,
+ [
+ \'c'
+ ]
+ ],
+ b => $b,
+ c => $b->{a}[2]
+ };
+$c = [
+ \'c'
+ ];
+EOT
+
+::ok 3, $first eq $second ;
+::ok 4, $o{d} eq "{once upon a time}" ;
+::ok 5, $o{e} == 1024 ;
+::ok 6, $o{f} eq 1024.1024 ;
+
+unlink $filename ;
+}
+
+{
+
+package HASH ;
+
+use BerkeleyDB ;
+use MLDBM qw(BerkeleyDB::Hash) ;
+use Data::Dumper;
+
+$filename = 'testmldbm' ;
+
+unlink $filename ;
+$MLDBM::UseDB = "BerkeleyDB::Hash" ;
+$db = tie %o, MLDBM, -Filename => $filename,
+ -Flags => DB_CREATE
+ or die $!;
+::ok 7, $db ;
+::ok 8, $db->type() == DB_HASH ;
+
+
+$c = [\'c'];
+$b = {};
+$a = [1, $b, $c];
+$b->{a} = $a;
+$b->{b} = $a->[1];
+$b->{c} = $a->[2];
+@o{qw(a b c)} = ($a, $b, $c);
+$o{d} = "{once upon a time}";
+$o{e} = 1024;
+$o{f} = 1024.1024;
+$first = Data::Dumper->new([@o{qw(a b c)}], [qw(a b c)])->Quotekeys(0)->Dump;
+$second = <<'EOT';
+$a = [
+ 1,
+ {
+ a => $a,
+ b => $a->[1],
+ c => [
+ \'c'
+ ]
+ },
+ $a->[1]{c}
+ ];
+$b = {
+ a => [
+ 1,
+ $b,
+ [
+ \'c'
+ ]
+ ],
+ b => $b,
+ c => $b->{a}[2]
+ };
+$c = [
+ \'c'
+ ];
+EOT
+
+::ok 9, $first eq $second ;
+::ok 10, $o{d} eq "{once upon a time}" ;
+::ok 11, $o{e} == 1024 ;
+::ok 12, $o{f} eq 1024.1024 ;
+
+unlink $filename ;
+
+}
diff --git a/bdb/perl.BerkeleyDB/t/queue.t b/bdb/perl.BerkeleyDB/t/queue.t
new file mode 100644
index 00000000000..0f459a43a69
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/t/queue.t
@@ -0,0 +1,837 @@
+#!./perl -w
+
+# ID: %I%, %G%
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use File::Path qw(rmtree);
+
+BEGIN
+{
+ if ($BerkeleyDB::db_version < 3) {
+ print "1..0 # Skipping test, Queue needs Berkeley DB 3.x or better\n" ;
+ exit 0 ;
+ }
+}
+
+print "1..197\n";
+
+my %DB_errors = (
+ 'DB_INCOMPLETE' => "DB_INCOMPLETE: Sync was unable to complete",
+ 'DB_KEYEMPTY' => "DB_KEYEMPTY: Non-existent key/data pair",
+ 'DB_KEYEXIST' => "DB_KEYEXIST: Key/data pair already exists",
+ 'DB_LOCK_DEADLOCK' => "DB_LOCK_DEADLOCK: Locker killed to resolve a deadlock",
+ 'DB_LOCK_NOTGRANTED' => "DB_LOCK_NOTGRANTED: Lock not granted",
+ 'DB_NOTFOUND' => "DB_NOTFOUND: No matching key/data pair found",
+ 'DB_OLD_VERSION'=> "DB_OLDVERSION: Database requires a version upgrade",
+ 'DB_RUNRECOVERY'=> "DB_RUNRECOVERY: Fatal error, run database recovery",
+ ) ;
+
+{
+ package LexFile ;
+
+ sub new
+ {
+ my $self = shift ;
+ unlink @_ ;
+ bless [ @_ ], $self ;
+ }
+
+ sub DESTROY
+ {
+ my $self = shift ;
+ unlink @{ $self } ;
+ }
+}
+
+
+sub ok
+{
+ my $no = shift ;
+ my $result = shift ;
+
+ print "not " unless $result ;
+ print "ok $no\n" ;
+}
+
+sub docat
+{
+ my $file = shift;
+ local $/ = undef;
+ open(CAT,$file) || die "Cannot open $file:$!";
+ my $result = <CAT>;
+ close(CAT);
+ return $result;
+}
+
+sub touch
+{
+ my $file = shift ;
+ open(CAT,">$file") || die "Cannot open $file:$!";
+ close(CAT);
+}
+
+sub joiner
+{
+ my $db = shift ;
+ my $sep = shift ;
+ my ($k, $v) = (0, "") ;
+ my @data = () ;
+
+ my $cursor = $db->db_cursor() or return () ;
+ for ( my $status = $cursor->c_get($k, $v, DB_FIRST) ;
+ $status == 0 ;
+ $status = $cursor->c_get($k, $v, DB_NEXT)) {
+ push @data, $v ;
+ }
+
+ (scalar(@data), join($sep, @data)) ;
+}
+
+sub countRecords
+{
+ my $db = shift ;
+ my ($k, $v) = (0,0) ;
+ my ($count) = 0 ;
+ my ($cursor) = $db->db_cursor() ;
+ #for ($status = $cursor->c_get($k, $v, DB_FIRST) ;
+# $status == 0 ;
+# $status = $cursor->c_get($k, $v, DB_NEXT) )
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { ++ $count }
+
+ return $count ;
+}
+
+sub fillout
+{
+ my $var = shift ;
+ my $length = shift ;
+ my $pad = shift || " " ;
+ my $template = $pad x $length ;
+ substr($template, 0, length($var)) = $var ;
+ return $template ;
+}
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+
+# Check for invalid parameters
+{
+ # Check for invalid parameters
+ my $db ;
+ eval ' $db = new BerkeleyDB::Queue -Stupid => 3 ; ' ;
+ ok 1, $@ =~ /unknown key value\(s\) Stupid/ ;
+
+ eval ' $db = new BerkeleyDB::Queue -Bad => 2, -Mode => 0345, -Stupid => 3; ' ;
+ ok 2, $@ =~ /unknown key value\(s\) / ;
+
+ eval ' $db = new BerkeleyDB::Queue -Env => 2 ' ;
+ ok 3, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+
+ eval ' $db = new BerkeleyDB::Queue -Txn => "x" ' ;
+ ok 4, $@ =~ /^Txn not of type BerkeleyDB::Txn/ ;
+
+ my $obj = bless [], "main" ;
+ eval ' $db = new BerkeleyDB::Queue -Env => $obj ' ;
+ ok 5, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+}
+
+# Now check the interface to Queue
+
+{
+ my $lex = new LexFile $Dfile ;
+ my $rec_len = 10 ;
+ my $pad = "x" ;
+
+ ok 6, my $db = new BerkeleyDB::Queue -Filename => $Dfile,
+ -Flags => DB_CREATE,
+ -Len => $rec_len,
+ -Pad => $pad;
+
+ # Add a k/v pair
+ my $value ;
+ my $status ;
+ ok 7, $db->db_put(1, "some value") == 0 ;
+ ok 8, $db->status() == 0 ;
+ ok 9, $db->db_get(1, $value) == 0 ;
+ ok 10, $value eq fillout("some value", $rec_len, $pad) ;
+ ok 11, $db->db_put(2, "value") == 0 ;
+ ok 12, $db->db_get(2, $value) == 0 ;
+ ok 13, $value eq fillout("value", $rec_len, $pad) ;
+ ok 14, $db->db_del(1) == 0 ;
+ ok 15, ($status = $db->db_get(1, $value)) == DB_KEYEMPTY ;
+ ok 16, $db->status() == DB_KEYEMPTY ;
+ ok 17, $db->status() eq $DB_errors{'DB_KEYEMPTY'} ;
+
+ ok 18, ($status = $db->db_get(7, $value)) == DB_NOTFOUND ;
+ ok 19, $db->status() == DB_NOTFOUND ;
+ ok 20, $db->status() eq $DB_errors{'DB_NOTFOUND'} ;
+
+ ok 21, $db->db_sync() == 0 ;
+
+ # Check NOOVERWRITE will make put fail when attempting to overwrite
+ # an existing record.
+
+ ok 22, $db->db_put( 2, 'x', DB_NOOVERWRITE) == DB_KEYEXIST ;
+ ok 23, $db->status() eq $DB_errors{'DB_KEYEXIST'} ;
+ ok 24, $db->status() == DB_KEYEXIST ;
+
+
+ # check that the value of the key has not been changed by the
+ # previous test
+ ok 25, $db->db_get(2, $value) == 0 ;
+ ok 26, $value eq fillout("value", $rec_len, $pad) ;
+
+
+}
+
+
+{
+ # Check simple env works with a array.
+ # and pad defaults to space
+ my $lex = new LexFile $Dfile ;
+
+ my $home = "./fred" ;
+ my $rec_len = 11 ;
+ ok 27, -d $home ? chmod 0777, $home : mkdir($home, 0777) ;
+
+ ok 28, my $env = new BerkeleyDB::Env -Flags => DB_CREATE|DB_INIT_MPOOL,
+ -Home => $home ;
+
+ ok 29, my $db = new BerkeleyDB::Queue -Filename => $Dfile,
+ -Env => $env,
+ -Flags => DB_CREATE,
+ -Len => $rec_len;
+
+ # Add a k/v pair
+ my $value ;
+ ok 30, $db->db_put(1, "some value") == 0 ;
+ ok 31, $db->db_get(1, $value) == 0 ;
+ ok 32, $value eq fillout("some value", $rec_len) ;
+ undef $db ;
+ undef $env ;
+ rmtree $home ;
+}
+
+
+{
+ # cursors
+
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my ($k, $v) ;
+ my $rec_len = 5 ;
+ ok 33, my $db = new BerkeleyDB::Queue -Filename => $Dfile,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Len => $rec_len;
+
+ # create some data
+ my @data = (
+ "red" ,
+ "green" ,
+ "blue" ,
+ ) ;
+
+ my $i ;
+ my %data ;
+ my $ret = 0 ;
+ for ($i = 0 ; $i < @data ; ++$i) {
+ $ret += $db->db_put($i, $data[$i]) ;
+ $data{$i} = $data[$i] ;
+ }
+ ok 34, $ret == 0 ;
+
+ # create the cursor
+ ok 35, my $cursor = $db->db_cursor() ;
+
+ $k = 0 ; $v = "" ;
+ my %copy = %data;
+ my $extras = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ {
+ if ( fillout($copy{$k}, $rec_len) eq $v )
+ { delete $copy{$k} }
+ else
+ { ++ $extras }
+ }
+
+ ok 36, $cursor->status() == DB_NOTFOUND ;
+ ok 37, $cursor->status() eq $DB_errors{'DB_NOTFOUND'} ;
+ ok 38, keys %copy == 0 ;
+ ok 39, $extras == 0 ;
+
+ # sequence backwards
+ %copy = %data ;
+ $extras = 0 ;
+ my $status ;
+ for ( $status = $cursor->c_get($k, $v, DB_LAST) ;
+ $status == 0 ;
+ $status = $cursor->c_get($k, $v, DB_PREV)) {
+ if ( fillout($copy{$k}, $rec_len) eq $v )
+ { delete $copy{$k} }
+ else
+ { ++ $extras }
+ }
+ ok 40, $status == DB_NOTFOUND ;
+ ok 41, $status eq $DB_errors{'DB_NOTFOUND'} ;
+ ok 42, $cursor->status() == $status ;
+ ok 43, $cursor->status() eq $status ;
+ ok 44, keys %copy == 0 ;
+ ok 45, $extras == 0 ;
+}
+
+{
+ # Tied Array interface
+
+ # full tied array support started in Perl 5.004_57
+ # just double check.
+ my $FA = 0 ;
+ {
+ sub try::TIEARRAY { bless [], "try" }
+ sub try::FETCHSIZE { $FA = 1 }
+ my @a ;
+ tie @a, 'try' ;
+ my $a = @a ;
+ }
+
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my $db ;
+ my $rec_len = 10 ;
+ ok 46, $db = tie @array, 'BerkeleyDB::Queue', -Filename => $Dfile,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Len => $rec_len;
+
+ ok 47, my $cursor = (tied @array)->db_cursor() ;
+ # check the database is empty
+ my $count = 0 ;
+ my ($k, $v) = (0,"") ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 48, $cursor->status() == DB_NOTFOUND ;
+ ok 49, $count == 0 ;
+
+ ok 50, @array == 0 ;
+
+ # Add a k/v pair
+ my $value ;
+ $array[1] = "some value";
+ ok 51, (tied @array)->status() == 0 ;
+ ok 52, $array[1] eq fillout("some value", $rec_len);
+ ok 53, defined $array[1];
+ ok 54, (tied @array)->status() == 0 ;
+ ok 55, !defined $array[3];
+ ok 56, (tied @array)->status() == DB_NOTFOUND ;
+
+ ok 57, (tied @array)->db_del(1) == 0 ;
+ ok 58, (tied @array)->status() == 0 ;
+ ok 59, ! defined $array[1];
+ ok 60, (tied @array)->status() == DB_KEYEMPTY ;
+
+ $array[1] = 2 ;
+ $array[10] = 20 ;
+ $array[1000] = 2000 ;
+
+ my ($keys, $values) = (0,0);
+ $count = 0 ;
+ for ( my $status = $cursor->c_get($k, $v, DB_FIRST) ;
+ $status == 0 ;
+ $status = $cursor->c_get($k, $v, DB_NEXT)) {
+ $keys += $k ;
+ $values += $v ;
+ ++ $count ;
+ }
+ ok 61, $count == 3 ;
+ ok 62, $keys == 1011 ;
+ ok 63, $values == 2022 ;
+
+ # unshift isn't allowed
+# eval {
+# $FA ? unshift @array, "red", "green", "blue"
+# : $db->unshift("red", "green", "blue" ) ;
+# } ;
+# ok 64, $@ =~ /^unshift is unsupported with Queue databases/ ;
+ $array[0] = "red" ;
+ $array[1] = "green" ;
+ $array[2] = "blue" ;
+ $array[4] = 2 ;
+ ok 64, $array[0] eq fillout("red", $rec_len) ;
+ ok 65, $cursor->c_get($k, $v, DB_FIRST) == 0 ;
+ ok 66, $k == 0 ;
+ ok 67, $v eq fillout("red", $rec_len) ;
+ ok 68, $array[1] eq fillout("green", $rec_len) ;
+ ok 69, $cursor->c_get($k, $v, DB_NEXT) == 0 ;
+ ok 70, $k == 1 ;
+ ok 71, $v eq fillout("green", $rec_len) ;
+ ok 72, $array[2] eq fillout("blue", $rec_len) ;
+ ok 73, $cursor->c_get($k, $v, DB_NEXT) == 0 ;
+ ok 74, $k == 2 ;
+ ok 75, $v eq fillout("blue", $rec_len) ;
+ ok 76, $array[4] == 2 ;
+ ok 77, $cursor->c_get($k, $v, DB_NEXT) == 0 ;
+ ok 78, $k == 4 ;
+ ok 79, $v == 2 ;
+
+ # shift
+ ok 80, ($FA ? shift @array : $db->shift()) eq fillout("red", $rec_len) ;
+ ok 81, ($FA ? shift @array : $db->shift()) eq fillout("green", $rec_len) ;
+ ok 82, ($FA ? shift @array : $db->shift()) eq fillout("blue", $rec_len) ;
+ ok 83, ($FA ? shift @array : $db->shift()) == 2 ;
+
+ # push
+ $FA ? push @array, "the", "end"
+ : $db->push("the", "end") ;
+ ok 84, $cursor->c_get($k, $v, DB_LAST) == 0 ;
+ ok 85, $k == 1002 ;
+ ok 86, $v eq fillout("end", $rec_len) ;
+ ok 87, $cursor->c_get($k, $v, DB_PREV) == 0 ;
+ ok 88, $k == 1001 ;
+ ok 89, $v eq fillout("the", $rec_len) ;
+ ok 90, $cursor->c_get($k, $v, DB_PREV) == 0 ;
+ ok 91, $k == 1000 ;
+ ok 92, $v == 2000 ;
+
+ # pop
+ ok 93, ( $FA ? pop @array : $db->pop ) eq fillout("end", $rec_len) ;
+ ok 94, ( $FA ? pop @array : $db->pop ) eq fillout("the", $rec_len) ;
+ ok 95, ( $FA ? pop @array : $db->pop ) == 2000 ;
+
+ # now clear the array
+ $FA ? @array = ()
+ : $db->clear() ;
+ ok 96, $cursor->c_get($k, $v, DB_FIRST) == DB_NOTFOUND ;
+
+ undef $cursor ;
+ undef $db ;
+ untie @array ;
+}
+
+{
+ # in-memory file
+
+ my @array ;
+ my $fd ;
+ my $value ;
+ my $rec_len = 15 ;
+ ok 97, my $db = tie @array, 'BerkeleyDB::Queue',
+ -Len => $rec_len;
+
+ ok 98, $db->db_put(1, "some value") == 0 ;
+ ok 99, $db->db_get(1, $value) == 0 ;
+ ok 100, $value eq fillout("some value", $rec_len) ;
+
+}
+
+{
+ # partial
+ # check works via API
+
+ my $lex = new LexFile $Dfile ;
+ my $value ;
+ my $rec_len = 8 ;
+ ok 101, my $db = new BerkeleyDB::Queue -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Len => $rec_len,
+ -Pad => " " ;
+
+ # create some data
+ my @data = (
+ "",
+ "boat",
+ "house",
+ "sea",
+ ) ;
+
+ my $ret = 0 ;
+ my $i ;
+ for ($i = 0 ; $i < @data ; ++$i) {
+ my $r = $db->db_put($i, $data[$i]) ;
+ $ret += $r ;
+ }
+ ok 102, $ret == 0 ;
+
+ # do a partial get
+ my ($pon, $off, $len) = $db->partial_set(0,2) ;
+ ok 103, ! $pon && $off == 0 && $len == 0 ;
+ ok 104, $db->db_get(1, $value) == 0 && $value eq "bo" ;
+ ok 105, $db->db_get(2, $value) == 0 && $value eq "ho" ;
+ ok 106, $db->db_get(3, $value) == 0 && $value eq "se" ;
+
+ # do a partial get, off end of data
+ ($pon, $off, $len) = $db->partial_set(3,2) ;
+ ok 107, $pon ;
+ ok 108, $off == 0 ;
+ ok 109, $len == 2 ;
+ ok 110, $db->db_get(1, $value) == 0 && $value eq fillout("t", 2) ;
+ ok 111, $db->db_get(2, $value) == 0 && $value eq "se" ;
+ ok 112, $db->db_get(3, $value) == 0 && $value eq " " ;
+
+ # switch of partial mode
+ ($pon, $off, $len) = $db->partial_clear() ;
+ ok 113, $pon ;
+ ok 114, $off == 3 ;
+ ok 115, $len == 2 ;
+ ok 116, $db->db_get(1, $value) == 0 && $value eq fillout("boat", $rec_len) ;
+ ok 117, $db->db_get(2, $value) == 0 && $value eq fillout("house", $rec_len) ;
+ ok 118, $db->db_get(3, $value) == 0 && $value eq fillout("sea", $rec_len) ;
+
+ # now partial put
+ $db->partial_set(0,2) ;
+ ok 119, $db->db_put(1, "") != 0 ;
+ ok 120, $db->db_put(2, "AB") == 0 ;
+ ok 121, $db->db_put(3, "XY") == 0 ;
+ ok 122, $db->db_put(4, "KLM") != 0 ;
+ ok 123, $db->db_put(4, "KL") == 0 ;
+
+ ($pon, $off, $len) = $db->partial_clear() ;
+ ok 124, $pon ;
+ ok 125, $off == 0 ;
+ ok 126, $len == 2 ;
+ ok 127, $db->db_get(1, $value) == 0 && $value eq fillout("boat", $rec_len) ;
+ ok 128, $db->db_get(2, $value) == 0 && $value eq fillout("ABuse", $rec_len) ;
+ ok 129, $db->db_get(3, $value) == 0 && $value eq fillout("XYa", $rec_len) ;
+ ok 130, $db->db_get(4, $value) == 0 && $value eq fillout("KL", $rec_len) ;
+
+ # now partial put
+ ($pon, $off, $len) = $db->partial_set(3,2) ;
+ ok 131, ! $pon ;
+ ok 132, $off == 0 ;
+ ok 133, $len == 0 ;
+ ok 134, $db->db_put(1, "PP") == 0 ;
+ ok 135, $db->db_put(2, "Q") != 0 ;
+ ok 136, $db->db_put(3, "XY") == 0 ;
+ ok 137, $db->db_put(4, "TU") == 0 ;
+
+ $db->partial_clear() ;
+ ok 138, $db->db_get(1, $value) == 0 && $value eq fillout("boaPP", $rec_len) ;
+ ok 139, $db->db_get(2, $value) == 0 && $value eq fillout("ABuse",$rec_len) ;
+ ok 140, $db->db_get(3, $value) == 0 && $value eq fillout("XYaXY", $rec_len) ;
+ ok 141, $db->db_get(4, $value) == 0 && $value eq fillout("KL TU", $rec_len) ;
+}
+
+{
+ # partial
+ # check works via tied array
+
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my $value ;
+ my $rec_len = 8 ;
+ ok 142, my $db = tie @array, 'BerkeleyDB::Queue', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Len => $rec_len,
+ -Pad => " " ;
+
+ # create some data
+ my @data = (
+ "",
+ "boat",
+ "house",
+ "sea",
+ ) ;
+
+ my $i ;
+ my $status = 0 ;
+ for ($i = 1 ; $i < @data ; ++$i) {
+ $array[$i] = $data[$i] ;
+ $status += $db->status() ;
+ }
+
+ ok 143, $status == 0 ;
+
+ # do a partial get
+ $db->partial_set(0,2) ;
+ ok 144, $array[1] eq fillout("bo", 2) ;
+ ok 145, $array[2] eq fillout("ho", 2) ;
+ ok 146, $array[3] eq fillout("se", 2) ;
+
+ # do a partial get, off end of data
+ $db->partial_set(3,2) ;
+ ok 147, $array[1] eq fillout("t", 2) ;
+ ok 148, $array[2] eq fillout("se", 2) ;
+ ok 149, $array[3] eq fillout("", 2) ;
+
+ # switch of partial mode
+ $db->partial_clear() ;
+ ok 150, $array[1] eq fillout("boat", $rec_len) ;
+ ok 151, $array[2] eq fillout("house", $rec_len) ;
+ ok 152, $array[3] eq fillout("sea", $rec_len) ;
+
+ # now partial put
+ $db->partial_set(0,2) ;
+ $array[1] = "" ;
+ ok 153, $db->status() != 0 ;
+ $array[2] = "AB" ;
+ ok 154, $db->status() == 0 ;
+ $array[3] = "XY" ;
+ ok 155, $db->status() == 0 ;
+ $array[4] = "KL" ;
+ ok 156, $db->status() == 0 ;
+
+ $db->partial_clear() ;
+ ok 157, $array[1] eq fillout("boat", $rec_len) ;
+ ok 158, $array[2] eq fillout("ABuse", $rec_len) ;
+ ok 159, $array[3] eq fillout("XYa", $rec_len) ;
+ ok 160, $array[4] eq fillout("KL", $rec_len) ;
+
+ # now partial put
+ $db->partial_set(3,2) ;
+ $array[1] = "PP" ;
+ ok 161, $db->status() == 0 ;
+ $array[2] = "Q" ;
+ ok 162, $db->status() != 0 ;
+ $array[3] = "XY" ;
+ ok 163, $db->status() == 0 ;
+ $array[4] = "TU" ;
+ ok 164, $db->status() == 0 ;
+
+ $db->partial_clear() ;
+ ok 165, $array[1] eq fillout("boaPP", $rec_len) ;
+ ok 166, $array[2] eq fillout("ABuse", $rec_len) ;
+ ok 167, $array[3] eq fillout("XYaXY", $rec_len) ;
+ ok 168, $array[4] eq fillout("KL TU", $rec_len) ;
+}
+
+{
+ # transaction
+
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my $value ;
+
+ my $home = "./fred" ;
+ rmtree $home if -e $home ;
+ ok 169, mkdir($home, 0777) ;
+ my $rec_len = 9 ;
+ ok 170, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 171, my $txn = $env->txn_begin() ;
+ ok 172, my $db1 = tie @array, 'BerkeleyDB::Queue',
+ -Filename => $Dfile,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ,
+ -Len => $rec_len,
+ -Pad => " " ;
+
+
+ # create some data
+ my @data = (
+ "boat",
+ "house",
+ "sea",
+ ) ;
+
+ my $ret = 0 ;
+ my $i ;
+ for ($i = 0 ; $i < @data ; ++$i) {
+ $ret += $db1->db_put($i, $data[$i]) ;
+ }
+ ok 173, $ret == 0 ;
+
+ # should be able to see all the records
+
+ ok 174, my $cursor = $db1->db_cursor() ;
+ my ($k, $v) = (0, "") ;
+ my $count = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 175, $count == 3 ;
+ undef $cursor ;
+
+ # now abort the transaction
+ ok 176, $txn->txn_abort() == 0 ;
+
+ # there shouldn't be any records in the database
+ $count = 0 ;
+ # sequence forwards
+ ok 177, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 178, $count == 0 ;
+
+ undef $txn ;
+ undef $cursor ;
+ undef $db1 ;
+ undef $env ;
+ untie @array ;
+ rmtree $home ;
+}
+
+
+{
+ # db_stat
+
+ my $lex = new LexFile $Dfile ;
+ my $recs = ($BerkeleyDB::db_version >= 3.1 ? "qs_ndata" : "qs_nrecs") ;
+ my @array ;
+ my ($k, $v) ;
+ my $rec_len = 7 ;
+ ok 179, my $db = new BerkeleyDB::Queue -Filename => $Dfile,
+ -Flags => DB_CREATE,
+ -Pagesize => 4 * 1024,
+ -Len => $rec_len,
+ -Pad => " "
+ ;
+
+ my $ref = $db->db_stat() ;
+ ok 180, $ref->{$recs} == 0;
+ ok 181, $ref->{'qs_pagesize'} == 4 * 1024;
+
+ # create some data
+ my @data = (
+ 2,
+ "house",
+ "sea",
+ ) ;
+
+ my $ret = 0 ;
+ my $i ;
+ for ($i = $db->ArrayOffset ; @data ; ++$i) {
+ $ret += $db->db_put($i, shift @data) ;
+ }
+ ok 182, $ret == 0 ;
+
+ $ref = $db->db_stat() ;
+ ok 183, $ref->{$recs} == 3;
+}
+
+{
+ # sub-class test
+
+ package Another ;
+
+ use strict ;
+
+ open(FILE, ">SubDB.pm") or die "Cannot open SubDB.pm: $!\n" ;
+ print FILE <<'EOM' ;
+
+ package SubDB ;
+
+ use strict ;
+ use vars qw( @ISA @EXPORT) ;
+
+ require Exporter ;
+ use BerkeleyDB;
+ @ISA=qw(BerkeleyDB::Queue);
+ @EXPORT = @BerkeleyDB::EXPORT ;
+
+ sub db_put {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::db_put($key, $value * 3) ;
+ }
+
+ sub db_get {
+ my $self = shift ;
+ $self->SUPER::db_get($_[0], $_[1]) ;
+ $_[1] -= 2 ;
+ }
+
+ sub A_new_method
+ {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = $self->FETCH($key) ;
+ return "[[$value]]" ;
+ }
+
+ 1 ;
+EOM
+
+ close FILE ;
+
+ BEGIN { push @INC, '.'; }
+ eval 'use SubDB ; ';
+ main::ok 184, $@ eq "" ;
+ my @h ;
+ my $X ;
+ my $rec_len = 34 ;
+ eval '
+ $X = tie(@h, "SubDB", -Filename => "dbbtree.tmp",
+ -Flags => DB_CREATE,
+ -Mode => 0640 ,
+ -Len => $rec_len,
+ -Pad => " "
+ );
+ ' ;
+
+ main::ok 185, $@ eq "" ;
+
+ my $ret = eval '$h[1] = 3 ; return $h[1] ' ;
+ main::ok 186, $@ eq "" ;
+ main::ok 187, $ret == 7 ;
+
+ my $value = 0;
+ $ret = eval '$X->db_put(1, 4) ; $X->db_get(1, $value) ; return $value' ;
+ main::ok 188, $@ eq "" ;
+ main::ok 189, $ret == 10 ;
+
+ $ret = eval ' DB_NEXT eq main::DB_NEXT ' ;
+ main::ok 190, $@ eq "" ;
+ main::ok 191, $ret == 1 ;
+
+ $ret = eval '$X->A_new_method(1) ' ;
+ main::ok 192, $@ eq "" ;
+ main::ok 193, $ret eq "[[10]]" ;
+
+ unlink "SubDB.pm", "dbbtree.tmp" ;
+
+}
+
+{
+ # DB_APPEND
+
+ my $lex = new LexFile $Dfile;
+ my @array ;
+ my $value ;
+ my $rec_len = 21 ;
+ ok 194, my $db = tie @array, 'BerkeleyDB::Queue',
+ -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Len => $rec_len,
+ -Pad => " " ;
+
+ # create a few records
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+
+ my $k = 0 ;
+ ok 195, $db->db_put($k, "fred", DB_APPEND) == 0 ;
+ ok 196, $k == 4 ;
+ ok 197, $array[4] eq fillout("fred", $rec_len) ;
+
+ undef $db ;
+ untie @array ;
+}
+
+__END__
+
+
+# TODO
+#
+# DB_DELIMETER DB_FIXEDLEN DB_PAD DB_SNAPSHOT with partial records
diff --git a/bdb/perl.BerkeleyDB/t/recno.t b/bdb/perl.BerkeleyDB/t/recno.t
new file mode 100644
index 00000000000..0f210f540c3
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/t/recno.t
@@ -0,0 +1,967 @@
+#!./perl -w
+
+# ID: %I%, %G%
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use File::Path qw(rmtree);
+
+print "1..218\n";
+
+my %DB_errors = (
+ 'DB_INCOMPLETE' => "DB_INCOMPLETE: Sync was unable to complete",
+ 'DB_KEYEMPTY' => "DB_KEYEMPTY: Non-existent key/data pair",
+ 'DB_KEYEXIST' => "DB_KEYEXIST: Key/data pair already exists",
+ 'DB_LOCK_DEADLOCK' => "DB_LOCK_DEADLOCK: Locker killed to resolve a deadlock",
+ 'DB_LOCK_NOTGRANTED' => "DB_LOCK_NOTGRANTED: Lock not granted",
+ 'DB_NOTFOUND' => "DB_NOTFOUND: No matching key/data pair found",
+ 'DB_OLD_VERSION' => "DB_OLDVERSION: Database requires a version upgrade",
+ 'DB_RUNRECOVERY' => "DB_RUNRECOVERY: Fatal error, run database recovery",
+) ;
+
+{
+ package LexFile ;
+
+ sub new
+ {
+ my $self = shift ;
+ unlink @_ ;
+ bless [ @_ ], $self ;
+ }
+
+ sub DESTROY
+ {
+ my $self = shift ;
+ unlink @{ $self } ;
+ }
+}
+
+
+sub ok
+{
+ my $no = shift ;
+ my $result = shift ;
+
+ print "not " unless $result ;
+ print "ok $no\n" ;
+}
+
+sub docat
+{
+ my $file = shift;
+ local $/ = undef;
+ open(CAT,$file) || die "Cannot open $file:$!";
+ my $result = <CAT>;
+ close(CAT);
+ return $result;
+}
+
+sub touch
+{
+ my $file = shift ;
+ open(CAT,">$file") || die "Cannot open $file:$!";
+ close(CAT);
+}
+
+sub joiner
+{
+ my $db = shift ;
+ my $sep = shift ;
+ my ($k, $v) = (0, "") ;
+ my @data = () ;
+
+ my $cursor = $db->db_cursor() or return () ;
+ for ( my $status = $cursor->c_get($k, $v, DB_FIRST) ;
+ $status == 0 ;
+ $status = $cursor->c_get($k, $v, DB_NEXT)) {
+ push @data, $v ;
+ }
+
+ (scalar(@data), join($sep, @data)) ;
+}
+
+sub countRecords
+{
+ my $db = shift ;
+ my ($k, $v) = (0,0) ;
+ my ($count) = 0 ;
+ my ($cursor) = $db->db_cursor() ;
+ #for ($status = $cursor->c_get($k, $v, DB_FIRST) ;
+# $status == 0 ;
+# $status = $cursor->c_get($k, $v, DB_NEXT) )
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { ++ $count }
+
+ return $count ;
+}
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+
+# Check for invalid parameters
+{
+ # Check for invalid parameters
+ my $db ;
+ eval ' $db = new BerkeleyDB::Recno -Stupid => 3 ; ' ;
+ ok 1, $@ =~ /unknown key value\(s\) Stupid/ ;
+
+ eval ' $db = new BerkeleyDB::Recno -Bad => 2, -Mode => 0345, -Stupid => 3; ' ;
+ ok 2, $@ =~ /unknown key value\(s\) / ;
+
+ eval ' $db = new BerkeleyDB::Recno -Env => 2 ' ;
+ ok 3, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+
+ eval ' $db = new BerkeleyDB::Recno -Txn => "x" ' ;
+ ok 4, $@ =~ /^Txn not of type BerkeleyDB::Txn/ ;
+
+ my $obj = bless [], "main" ;
+ eval ' $db = new BerkeleyDB::Recno -Env => $obj ' ;
+ ok 5, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+}
+
+# Now check the interface to Recno
+
+{
+ my $lex = new LexFile $Dfile ;
+
+ ok 6, my $db = new BerkeleyDB::Recno -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my $value ;
+ my $status ;
+ ok 7, $db->db_put(1, "some value") == 0 ;
+ ok 8, $db->status() == 0 ;
+ ok 9, $db->db_get(1, $value) == 0 ;
+ ok 10, $value eq "some value" ;
+ ok 11, $db->db_put(2, "value") == 0 ;
+ ok 12, $db->db_get(2, $value) == 0 ;
+ ok 13, $value eq "value" ;
+ ok 14, $db->db_del(1) == 0 ;
+ ok 15, ($status = $db->db_get(1, $value)) == DB_KEYEMPTY ;
+ ok 16, $db->status() == DB_KEYEMPTY ;
+ ok 17, $db->status() eq $DB_errors{'DB_KEYEMPTY'} ;
+
+ ok 18, ($status = $db->db_get(7, $value)) == DB_NOTFOUND ;
+ ok 19, $db->status() == DB_NOTFOUND ;
+ ok 20, $db->status() eq $DB_errors{'DB_NOTFOUND'} ;
+
+ ok 21, $db->db_sync() == 0 ;
+
+ # Check NOOVERWRITE will make put fail when attempting to overwrite
+ # an existing record.
+
+ ok 22, $db->db_put( 2, 'x', DB_NOOVERWRITE) == DB_KEYEXIST ;
+ ok 23, $db->status() eq $DB_errors{'DB_KEYEXIST'} ;
+ ok 24, $db->status() == DB_KEYEXIST ;
+
+
+ # check that the value of the key has not been changed by the
+ # previous test
+ ok 25, $db->db_get(2, $value) == 0 ;
+ ok 26, $value eq "value" ;
+
+
+}
+
+
+{
+ # Check simple env works with a array.
+ my $lex = new LexFile $Dfile ;
+
+ my $home = "./fred" ;
+ ok 27, -d $home ? chmod 0777, $home : mkdir($home, 0777) ;
+
+ ok 28, my $env = new BerkeleyDB::Env -Flags => DB_CREATE|DB_INIT_MPOOL,
+ -Home => $home ;
+
+ ok 29, my $db = new BerkeleyDB::Recno -Filename => $Dfile,
+ -Env => $env,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my $value ;
+ ok 30, $db->db_put(1, "some value") == 0 ;
+ ok 31, $db->db_get(1, $value) == 0 ;
+ ok 32, $value eq "some value" ;
+ undef $db ;
+ undef $env ;
+ rmtree $home ;
+}
+
+
+{
+ # cursors
+
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my ($k, $v) ;
+ ok 33, my $db = new BerkeleyDB::Recno -Filename => $Dfile,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my @data = (
+ "red" ,
+ "green" ,
+ "blue" ,
+ ) ;
+
+ my $i ;
+ my %data ;
+ my $ret = 0 ;
+ for ($i = 0 ; $i < @data ; ++$i) {
+ $ret += $db->db_put($i, $data[$i]) ;
+ $data{$i} = $data[$i] ;
+ }
+ ok 34, $ret == 0 ;
+
+ # create the cursor
+ ok 35, my $cursor = $db->db_cursor() ;
+
+ $k = 0 ; $v = "" ;
+ my %copy = %data;
+ my $extras = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ {
+ if ( $copy{$k} eq $v )
+ { delete $copy{$k} }
+ else
+ { ++ $extras }
+ }
+
+ ok 36, $cursor->status() == DB_NOTFOUND ;
+ ok 37, $cursor->status() eq $DB_errors{'DB_NOTFOUND'} ;
+ ok 38, keys %copy == 0 ;
+ ok 39, $extras == 0 ;
+
+ # sequence backwards
+ %copy = %data ;
+ $extras = 0 ;
+ my $status ;
+ for ( $status = $cursor->c_get($k, $v, DB_LAST) ;
+ $status == 0 ;
+ $status = $cursor->c_get($k, $v, DB_PREV)) {
+ if ( $copy{$k} eq $v )
+ { delete $copy{$k} }
+ else
+ { ++ $extras }
+ }
+ ok 40, $status == DB_NOTFOUND ;
+ ok 41, $status eq $DB_errors{'DB_NOTFOUND'} ;
+ ok 42, $cursor->status() == $status ;
+ ok 43, $cursor->status() eq $status ;
+ ok 44, keys %copy == 0 ;
+ ok 45, $extras == 0 ;
+}
+
+{
+ # Tied Array interface
+
+ # full tied array support started in Perl 5.004_57
+ # just double check.
+ my $FA = 0 ;
+ {
+ sub try::TIEARRAY { bless [], "try" }
+ sub try::FETCHSIZE { $FA = 1 }
+ my @a ;
+ tie @a, 'try' ;
+ my $a = @a ;
+ }
+
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my $db ;
+ ok 46, $db = tie @array, 'BerkeleyDB::Recno', -Filename => $Dfile,
+ -Property => DB_RENUMBER,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ;
+
+ ok 47, my $cursor = (tied @array)->db_cursor() ;
+ # check the database is empty
+ my $count = 0 ;
+ my ($k, $v) = (0,"") ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 48, $cursor->status() == DB_NOTFOUND ;
+ ok 49, $count == 0 ;
+
+ ok 50, @array == 0 ;
+
+ # Add a k/v pair
+ my $value ;
+ $array[1] = "some value";
+ ok 51, (tied @array)->status() == 0 ;
+ ok 52, $array[1] eq "some value";
+ ok 53, defined $array[1];
+ ok 54, (tied @array)->status() == 0 ;
+ ok 55, !defined $array[3];
+ ok 56, (tied @array)->status() == DB_NOTFOUND ;
+
+ ok 57, (tied @array)->db_del(1) == 0 ;
+ ok 58, (tied @array)->status() == 0 ;
+ ok 59, ! defined $array[1];
+ ok 60, (tied @array)->status() == DB_NOTFOUND ;
+
+ $array[1] = 2 ;
+ $array[10] = 20 ;
+ $array[1000] = 2000 ;
+
+ my ($keys, $values) = (0,0);
+ $count = 0 ;
+ for ( my $status = $cursor->c_get($k, $v, DB_FIRST) ;
+ $status == 0 ;
+ $status = $cursor->c_get($k, $v, DB_NEXT)) {
+ $keys += $k ;
+ $values += $v ;
+ ++ $count ;
+ }
+ ok 61, $count == 3 ;
+ ok 62, $keys == 1011 ;
+ ok 63, $values == 2022 ;
+
+ # unshift
+ $FA ? unshift @array, "red", "green", "blue"
+ : $db->unshift("red", "green", "blue" ) ;
+ ok 64, $array[1] eq "red" ;
+ ok 65, $cursor->c_get($k, $v, DB_FIRST) == 0 ;
+ ok 66, $k == 1 ;
+ ok 67, $v eq "red" ;
+ ok 68, $array[2] eq "green" ;
+ ok 69, $cursor->c_get($k, $v, DB_NEXT) == 0 ;
+ ok 70, $k == 2 ;
+ ok 71, $v eq "green" ;
+ ok 72, $array[3] eq "blue" ;
+ ok 73, $cursor->c_get($k, $v, DB_NEXT) == 0 ;
+ ok 74, $k == 3 ;
+ ok 75, $v eq "blue" ;
+ ok 76, $array[4] == 2 ;
+ ok 77, $cursor->c_get($k, $v, DB_NEXT) == 0 ;
+ ok 78, $k == 4 ;
+ ok 79, $v == 2 ;
+
+ # shift
+ ok 80, ($FA ? shift @array : $db->shift()) eq "red" ;
+ ok 81, ($FA ? shift @array : $db->shift()) eq "green" ;
+ ok 82, ($FA ? shift @array : $db->shift()) eq "blue" ;
+ ok 83, ($FA ? shift @array : $db->shift()) == 2 ;
+
+ # push
+ $FA ? push @array, "the", "end"
+ : $db->push("the", "end") ;
+ ok 84, $cursor->c_get($k, $v, DB_LAST) == 0 ;
+ ok 85, $k == 1001 ;
+ ok 86, $v eq "end" ;
+ ok 87, $cursor->c_get($k, $v, DB_PREV) == 0 ;
+ ok 88, $k == 1000 ;
+ ok 89, $v eq "the" ;
+ ok 90, $cursor->c_get($k, $v, DB_PREV) == 0 ;
+ ok 91, $k == 999 ;
+ ok 92, $v == 2000 ;
+
+ # pop
+ ok 93, ( $FA ? pop @array : $db->pop ) eq "end" ;
+ ok 94, ( $FA ? pop @array : $db->pop ) eq "the" ;
+ ok 95, ( $FA ? pop @array : $db->pop ) == 2000 ;
+
+ # now clear the array
+ $FA ? @array = ()
+ : $db->clear() ;
+ ok 96, $cursor->c_get($k, $v, DB_FIRST) == DB_NOTFOUND ;
+
+ undef $cursor ;
+ undef $db ;
+ untie @array ;
+}
+
+{
+ # in-memory file
+
+ my @array ;
+ my $fd ;
+ my $value ;
+ ok 97, my $db = tie @array, 'BerkeleyDB::Recno' ;
+
+ ok 98, $db->db_put(1, "some value") == 0 ;
+ ok 99, $db->db_get(1, $value) == 0 ;
+ ok 100, $value eq "some value" ;
+
+}
+
+{
+ # partial
+ # check works via API
+
+ my $lex = new LexFile $Dfile ;
+ my $value ;
+ ok 101, my $db = new BerkeleyDB::Recno, -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my @data = (
+ "",
+ "boat",
+ "house",
+ "sea",
+ ) ;
+
+ my $ret = 0 ;
+ my $i ;
+ for ($i = 1 ; $i < @data ; ++$i) {
+ $ret += $db->db_put($i, $data[$i]) ;
+ }
+ ok 102, $ret == 0 ;
+
+
+ # do a partial get
+ my ($pon, $off, $len) = $db->partial_set(0,2) ;
+ ok 103, ! $pon && $off == 0 && $len == 0 ;
+ ok 104, $db->db_get(1, $value) == 0 && $value eq "bo" ;
+ ok 105, $db->db_get(2, $value) == 0 && $value eq "ho" ;
+ ok 106, $db->db_get(3, $value) == 0 && $value eq "se" ;
+
+ # do a partial get, off end of data
+ ($pon, $off, $len) = $db->partial_set(3,2) ;
+ ok 107, $pon ;
+ ok 108, $off == 0 ;
+ ok 109, $len == 2 ;
+ ok 110, $db->db_get(1, $value) == 0 && $value eq "t" ;
+ ok 111, $db->db_get(2, $value) == 0 && $value eq "se" ;
+ ok 112, $db->db_get(3, $value) == 0 && $value eq "" ;
+
+ # switch of partial mode
+ ($pon, $off, $len) = $db->partial_clear() ;
+ ok 113, $pon ;
+ ok 114, $off == 3 ;
+ ok 115, $len == 2 ;
+ ok 116, $db->db_get(1, $value) == 0 && $value eq "boat" ;
+ ok 117, $db->db_get(2, $value) == 0 && $value eq "house" ;
+ ok 118, $db->db_get(3, $value) == 0 && $value eq "sea" ;
+
+ # now partial put
+ $db->partial_set(0,2) ;
+ ok 119, $db->db_put(1, "") == 0 ;
+ ok 120, $db->db_put(2, "AB") == 0 ;
+ ok 121, $db->db_put(3, "XYZ") == 0 ;
+ ok 122, $db->db_put(4, "KLM") == 0 ;
+
+ ($pon, $off, $len) = $db->partial_clear() ;
+ ok 123, $pon ;
+ ok 124, $off == 0 ;
+ ok 125, $len == 2 ;
+ ok 126, $db->db_get(1, $value) == 0 && $value eq "at" ;
+ ok 127, $db->db_get(2, $value) == 0 && $value eq "ABuse" ;
+ ok 128, $db->db_get(3, $value) == 0 && $value eq "XYZa" ;
+ ok 129, $db->db_get(4, $value) == 0 && $value eq "KLM" ;
+
+ # now partial put
+ ($pon, $off, $len) = $db->partial_set(3,2) ;
+ ok 130, ! $pon ;
+ ok 131, $off == 0 ;
+ ok 132, $len == 0 ;
+ ok 133, $db->db_put(1, "PPP") == 0 ;
+ ok 134, $db->db_put(2, "Q") == 0 ;
+ ok 135, $db->db_put(3, "XYZ") == 0 ;
+ ok 136, $db->db_put(4, "TU") == 0 ;
+
+ $db->partial_clear() ;
+ ok 137, $db->db_get(1, $value) == 0 && $value eq "at\0PPP" ;
+ ok 138, $db->db_get(2, $value) == 0 && $value eq "ABuQ" ;
+ ok 139, $db->db_get(3, $value) == 0 && $value eq "XYZXYZ" ;
+ ok 140, $db->db_get(4, $value) == 0 && $value eq "KLMTU" ;
+}
+
+{
+ # partial
+ # check works via tied array
+
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my $value ;
+ ok 141, my $db = tie @array, 'BerkeleyDB::Recno', -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my @data = (
+ "",
+ "boat",
+ "house",
+ "sea",
+ ) ;
+
+ my $i ;
+ for ($i = 1 ; $i < @data ; ++$i) {
+ $array[$i] = $data[$i] ;
+ }
+
+
+ # do a partial get
+ $db->partial_set(0,2) ;
+ ok 142, $array[1] eq "bo" ;
+ ok 143, $array[2] eq "ho" ;
+ ok 144, $array[3] eq "se" ;
+
+ # do a partial get, off end of data
+ $db->partial_set(3,2) ;
+ ok 145, $array[1] eq "t" ;
+ ok 146, $array[2] eq "se" ;
+ ok 147, $array[3] eq "" ;
+
+ # switch of partial mode
+ $db->partial_clear() ;
+ ok 148, $array[1] eq "boat" ;
+ ok 149, $array[2] eq "house" ;
+ ok 150, $array[3] eq "sea" ;
+
+ # now partial put
+ $db->partial_set(0,2) ;
+ ok 151, $array[1] = "" ;
+ ok 152, $array[2] = "AB" ;
+ ok 153, $array[3] = "XYZ" ;
+ ok 154, $array[4] = "KLM" ;
+
+ $db->partial_clear() ;
+ ok 155, $array[1] eq "at" ;
+ ok 156, $array[2] eq "ABuse" ;
+ ok 157, $array[3] eq "XYZa" ;
+ ok 158, $array[4] eq "KLM" ;
+
+ # now partial put
+ $db->partial_set(3,2) ;
+ ok 159, $array[1] = "PPP" ;
+ ok 160, $array[2] = "Q" ;
+ ok 161, $array[3] = "XYZ" ;
+ ok 162, $array[4] = "TU" ;
+
+ $db->partial_clear() ;
+ ok 163, $array[1] eq "at\0PPP" ;
+ ok 164, $array[2] eq "ABuQ" ;
+ ok 165, $array[3] eq "XYZXYZ" ;
+ ok 166, $array[4] eq "KLMTU" ;
+}
+
+{
+ # transaction
+
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my $value ;
+
+ my $home = "./fred" ;
+ rmtree $home if -e $home ;
+ ok 167, mkdir($home, 0777) ;
+ ok 168, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 169, my $txn = $env->txn_begin() ;
+ ok 170, my $db1 = tie @array, 'BerkeleyDB::Recno',
+ -Filename => $Dfile,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+
+ # create some data
+ my @data = (
+ "boat",
+ "house",
+ "sea",
+ ) ;
+
+ my $ret = 0 ;
+ my $i ;
+ for ($i = 0 ; $i < @data ; ++$i) {
+ $ret += $db1->db_put($i, $data[$i]) ;
+ }
+ ok 171, $ret == 0 ;
+
+ # should be able to see all the records
+
+ ok 172, my $cursor = $db1->db_cursor() ;
+ my ($k, $v) = (0, "") ;
+ my $count = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 173, $count == 3 ;
+ undef $cursor ;
+
+ # now abort the transaction
+ ok 174, $txn->txn_abort() == 0 ;
+
+ # there shouldn't be any records in the database
+ $count = 0 ;
+ # sequence forwards
+ ok 175, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 176, $count == 0 ;
+
+ undef $txn ;
+ undef $cursor ;
+ undef $db1 ;
+ undef $env ;
+ untie @array ;
+ rmtree $home ;
+}
+
+
+{
+ # db_stat
+
+ my $lex = new LexFile $Dfile ;
+ my $recs = ($BerkeleyDB::db_version >= 3.1 ? "bt_ndata" : "bt_nrecs") ;
+ my @array ;
+ my ($k, $v) ;
+ ok 177, my $db = new BerkeleyDB::Recno -Filename => $Dfile,
+ -Flags => DB_CREATE,
+ -Pagesize => 4 * 1024,
+ ;
+
+ my $ref = $db->db_stat() ;
+ ok 178, $ref->{$recs} == 0;
+ ok 179, $ref->{'bt_pagesize'} == 4 * 1024;
+
+ # create some data
+ my @data = (
+ 2,
+ "house",
+ "sea",
+ ) ;
+
+ my $ret = 0 ;
+ my $i ;
+ for ($i = $db->ArrayOffset ; @data ; ++$i) {
+ $ret += $db->db_put($i, shift @data) ;
+ }
+ ok 180, $ret == 0 ;
+
+ $ref = $db->db_stat() ;
+ ok 181, $ref->{$recs} == 3;
+}
+
+{
+ # sub-class test
+
+ package Another ;
+
+ use strict ;
+
+ open(FILE, ">SubDB.pm") or die "Cannot open SubDB.pm: $!\n" ;
+ print FILE <<'EOM' ;
+
+ package SubDB ;
+
+ use strict ;
+ use vars qw( @ISA @EXPORT) ;
+
+ require Exporter ;
+ use BerkeleyDB;
+ @ISA=qw(BerkeleyDB::Recno);
+ @EXPORT = @BerkeleyDB::EXPORT ;
+
+ sub db_put {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::db_put($key, $value * 3) ;
+ }
+
+ sub db_get {
+ my $self = shift ;
+ $self->SUPER::db_get($_[0], $_[1]) ;
+ $_[1] -= 2 ;
+ }
+
+ sub A_new_method
+ {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = $self->FETCH($key) ;
+ return "[[$value]]" ;
+ }
+
+ 1 ;
+EOM
+
+ close FILE ;
+
+ BEGIN { push @INC, '.'; }
+ eval 'use SubDB ; ';
+ main::ok 182, $@ eq "" ;
+ my @h ;
+ my $X ;
+ eval '
+ $X = tie(@h, "SubDB", -Filename => "dbbtree.tmp",
+ -Flags => DB_CREATE,
+ -Mode => 0640 );
+ ' ;
+
+ main::ok 183, $@ eq "" ;
+
+ my $ret = eval '$h[1] = 3 ; return $h[1] ' ;
+ main::ok 184, $@ eq "" ;
+ main::ok 185, $ret == 7 ;
+
+ my $value = 0;
+ $ret = eval '$X->db_put(1, 4) ; $X->db_get(1, $value) ; return $value' ;
+ main::ok 186, $@ eq "" ;
+ main::ok 187, $ret == 10 ;
+
+ $ret = eval ' DB_NEXT eq main::DB_NEXT ' ;
+ main::ok 188, $@ eq "" ;
+ main::ok 189, $ret == 1 ;
+
+ $ret = eval '$X->A_new_method(1) ' ;
+ main::ok 190, $@ eq "" ;
+ main::ok 191, $ret eq "[[10]]" ;
+
+ unlink "SubDB.pm", "dbbtree.tmp" ;
+
+}
+
+{
+ # variable length records, DB_DELIMETER -- defaults to \n
+
+ my $lex = new LexFile $Dfile, $Dfile2 ;
+ touch $Dfile2 ;
+ my @array ;
+ my $value ;
+ ok 192, tie @array, 'BerkeleyDB::Recno', -Filename => $Dfile,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Source => $Dfile2 ;
+ $array[0] = "abc" ;
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+ untie @array ;
+
+ my $x = docat($Dfile2) ;
+ ok 193, $x eq "abc\ndef\n\nghi\n" ;
+}
+
+{
+ # variable length records, change DB_DELIMETER
+
+ my $lex = new LexFile $Dfile, $Dfile2 ;
+ touch $Dfile2 ;
+ my @array ;
+ my $value ;
+ ok 194, tie @array, 'BerkeleyDB::Recno', -Filename => $Dfile,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Source => $Dfile2 ,
+ -Delim => "-";
+ $array[0] = "abc" ;
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+ untie @array ;
+
+ my $x = docat($Dfile2) ;
+ ok 195, $x eq "abc-def--ghi-";
+}
+
+{
+ # fixed length records, default DB_PAD
+
+ my $lex = new LexFile $Dfile, $Dfile2 ;
+ touch $Dfile2 ;
+ my @array ;
+ my $value ;
+ ok 196, tie @array, 'BerkeleyDB::Recno', -Filename => $Dfile,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Len => 5,
+ -Source => $Dfile2 ;
+ $array[0] = "abc" ;
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+ untie @array ;
+
+ my $x = docat($Dfile2) ;
+ ok 197, $x eq "abc def ghi " ;
+}
+
+{
+ # fixed length records, change Pad
+
+ my $lex = new LexFile $Dfile, $Dfile2 ;
+ touch $Dfile2 ;
+ my @array ;
+ my $value ;
+ ok 198, tie @array, 'BerkeleyDB::Recno', -Filename => $Dfile,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Len => 5,
+ -Pad => "-",
+ -Source => $Dfile2 ;
+ $array[0] = "abc" ;
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+ untie @array ;
+
+ my $x = docat($Dfile2) ;
+ ok 199, $x eq "abc--def-------ghi--" ;
+}
+
+{
+ # DB_RENUMBER
+
+ my $lex = new LexFile $Dfile;
+ my @array ;
+ my $value ;
+ ok 200, my $db = tie @array, 'BerkeleyDB::Recno', -Filename => $Dfile,
+ -Property => DB_RENUMBER,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ;
+ # create a few records
+ $array[0] = "abc" ;
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+
+ ok 201, my ($length, $joined) = joiner($db, "|") ;
+ ok 202, $length == 3 ;
+ ok 203, $joined eq "abc|def|ghi";
+
+ ok 204, $db->db_del(1) == 0 ;
+ ok 205, ($length, $joined) = joiner($db, "|") ;
+ ok 206, $length == 2 ;
+ ok 207, $joined eq "abc|ghi";
+
+ undef $db ;
+ untie @array ;
+
+}
+
+{
+ # DB_APPEND
+
+ my $lex = new LexFile $Dfile;
+ my @array ;
+ my $value ;
+ ok 208, my $db = tie @array, 'BerkeleyDB::Recno',
+ -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create a few records
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+
+ my $k = 0 ;
+ ok 209, $db->db_put($k, "fred", DB_APPEND) == 0 ;
+ ok 210, $k == 4 ;
+
+ undef $db ;
+ untie @array ;
+}
+
+{
+ # in-memory Btree with an associated text file
+
+ my $lex = new LexFile $Dfile2 ;
+ touch $Dfile2 ;
+ my @array ;
+ my $value ;
+ ok 211, tie @array, 'BerkeleyDB::Recno', -Source => $Dfile2 ,
+ -ArrayBase => 0,
+ -Property => DB_RENUMBER,
+ -Flags => DB_CREATE ;
+ $array[0] = "abc" ;
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+ untie @array ;
+
+ my $x = docat($Dfile2) ;
+ ok 212, $x eq "abc\ndef\n\nghi\n" ;
+}
+
+{
+ # in-memory, variable length records, change DB_DELIMETER
+
+ my $lex = new LexFile $Dfile, $Dfile2 ;
+ touch $Dfile2 ;
+ my @array ;
+ my $value ;
+ ok 213, tie @array, 'BerkeleyDB::Recno',
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Source => $Dfile2 ,
+ -Property => DB_RENUMBER,
+ -Delim => "-";
+ $array[0] = "abc" ;
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+ untie @array ;
+
+ my $x = docat($Dfile2) ;
+ ok 214, $x eq "abc-def--ghi-";
+}
+
+{
+ # in-memory, fixed length records, default DB_PAD
+
+ my $lex = new LexFile $Dfile, $Dfile2 ;
+ touch $Dfile2 ;
+ my @array ;
+ my $value ;
+ ok 215, tie @array, 'BerkeleyDB::Recno', -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Property => DB_RENUMBER,
+ -Len => 5,
+ -Source => $Dfile2 ;
+ $array[0] = "abc" ;
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+ untie @array ;
+
+ my $x = docat($Dfile2) ;
+ ok 216, $x eq "abc def ghi " ;
+}
+
+{
+ # in-memory, fixed length records, change Pad
+
+ my $lex = new LexFile $Dfile, $Dfile2 ;
+ touch $Dfile2 ;
+ my @array ;
+ my $value ;
+ ok 217, tie @array, 'BerkeleyDB::Recno',
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Property => DB_RENUMBER,
+ -Len => 5,
+ -Pad => "-",
+ -Source => $Dfile2 ;
+ $array[0] = "abc" ;
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+ untie @array ;
+
+ my $x = docat($Dfile2) ;
+ ok 218, $x eq "abc--def-------ghi--" ;
+}
+
+__END__
+
+
+# TODO
+#
+# DB_DELIMETER DB_FIXEDLEN DB_PAD DB_SNAPSHOT with partial records
diff --git a/bdb/perl.BerkeleyDB/t/strict.t b/bdb/perl.BerkeleyDB/t/strict.t
new file mode 100644
index 00000000000..0a856bbb1c6
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/t/strict.t
@@ -0,0 +1,220 @@
+#!./perl -w
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use File::Path qw(rmtree);
+
+print "1..44\n";
+
+
+{
+ package LexFile ;
+
+ sub new
+ {
+ my $self = shift ;
+ unlink @_ ;
+ bless [ @_ ], $self ;
+ }
+
+ sub DESTROY
+ {
+ my $self = shift ;
+ unlink @{ $self } ;
+ }
+}
+
+sub ok
+{
+ my $no = shift ;
+ my $result = shift ;
+
+ print "not " unless $result ;
+ print "ok $no\n" ;
+}
+
+sub docat
+{
+ my $file = shift;
+ local $/ = undef;
+ open(CAT,$file) || die "Cannot open $file:$!";
+ my $result = <CAT>;
+ close(CAT);
+ return $result;
+}
+
+
+my $Dfile = "dbhash.tmp";
+my $home = "./fred" ;
+
+umask(0);
+
+{
+ # closing a database & an environment in the correct order.
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $status ;
+
+ rmtree $home if -e $home ;
+ ok 1, mkdir($home, 0777) ;
+ ok 2, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+
+ ok 3, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env;
+
+ ok 4, $db1->db_close() == 0 ;
+
+ eval { $status = $env->db_appexit() ; } ;
+ ok 5, $status == 0 ;
+ ok 6, $@ eq "" ;
+ #print "[$@]\n" ;
+
+ rmtree $home if -e $home ;
+}
+
+{
+ # closing an environment with an open database
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+
+ rmtree $home if -e $home ;
+ ok 7, mkdir($home, 0777) ;
+ ok 8, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+
+ ok 9, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env;
+
+ eval { $env->db_appexit() ; } ;
+ ok 10, $@ =~ /BerkeleyDB Aborting: attempted to close an environment with 1 open database/ ;
+ #print "[$@]\n" ;
+
+ undef $db1 ;
+ untie %hash ;
+ undef $env ;
+ rmtree $home if -e $home ;
+}
+
+{
+ # closing a transaction & a database
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $status ;
+
+ rmtree $home if -e $home ;
+ ok 11, mkdir($home, 0777) ;
+ ok 12, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+
+ ok 13, my $txn = $env->txn_begin() ;
+ ok 14, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+ ok 15, $txn->txn_commit() == 0 ;
+ eval { $status = $db->db_close() ; } ;
+ ok 16, $status == 0 ;
+ ok 17, $@ eq "" ;
+ eval { $status = $env->db_appexit() ; } ;
+ ok 18, $status == 0 ;
+ ok 19, $@ eq "" ;
+ #print "[$@]\n" ;
+}
+
+{
+ # closing a database with an open transaction
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+
+ rmtree $home if -e $home ;
+ ok 20, mkdir($home, 0777) ;
+ ok 21, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+
+ ok 22, my $txn = $env->txn_begin() ;
+ ok 23, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+ eval { $db->db_close() ; } ;
+ ok 24, $@ =~ /BerkeleyDB Aborting: attempted to close a database while a transaction was still open at/ ;
+ #print "[$@]\n" ;
+}
+
+{
+ # closing a cursor & a database
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $status ;
+ ok 25, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+ ok 26, my $cursor = $db->db_cursor() ;
+ ok 27, $cursor->c_close() == 0 ;
+ eval { $status = $db->db_close() ; } ;
+ ok 28, $status == 0 ;
+ ok 29, $@ eq "" ;
+ #print "[$@]\n" ;
+ rmtree $home if -e $home ;
+}
+
+{
+ # closing a database with an open cursor
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ ok 30, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+ ok 31, my $cursor = $db->db_cursor() ;
+ eval { $db->db_close() ; } ;
+ ok 32, $@ =~ /\QBerkeleyDB Aborting: attempted to close a database with 1 open cursor(s) at/;
+ #print "[$@]\n" ;
+ rmtree $home if -e $home ;
+}
+
+{
+ # closing a transaction & a cursor
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $status ;
+
+ rmtree $home if -e $home ;
+ ok 33, mkdir($home, 0777) ;
+ ok 34, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 35, my $txn = $env->txn_begin() ;
+ ok 36, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+ ok 37, my $cursor = $db->db_cursor() ;
+ eval { $status = $cursor->c_close() ; } ;
+ ok 38, $status == 0 ;
+ ok 39, ($status = $txn->txn_commit()) == 0 ;
+ ok 40, $@ eq "" ;
+ eval { $status = $db->db_close() ; } ;
+ ok 41, $status == 0 ;
+ ok 42, $@ eq "" ;
+ eval { $status = $env->db_appexit() ; } ;
+ ok 43, $status == 0 ;
+ ok 44, $@ eq "" ;
+ #print "[$@]\n" ;
+ rmtree $home if -e $home ;
+}
+
diff --git a/bdb/perl.BerkeleyDB/t/subdb.t b/bdb/perl.BerkeleyDB/t/subdb.t
new file mode 100644
index 00000000000..290e5d691e4
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/t/subdb.t
@@ -0,0 +1,296 @@
+#!./perl -w
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use File::Path qw(rmtree);
+
+BEGIN
+{
+ if ($BerkeleyDB::db_version < 3) {
+ print "1..0 # Skipping test, this needs Berkeley DB 3.x or better\n" ;
+ exit 0 ;
+ }
+}
+
+print "1..43\n";
+
+my %DB_errors = (
+ 'DB_INCOMPLETE' => "DB_INCOMPLETE: Sync was unable to complete",
+ 'DB_KEYEMPTY' => "DB_KEYEMPTY: Non-existent key/data pair",
+ 'DB_KEYEXIST' => "DB_KEYEXIST: Key/data pair already exists",
+ 'DB_LOCK_DEADLOCK' => "DB_LOCK_DEADLOCK: Locker killed to resolve a deadlock",
+ 'DB_LOCK_NOTGRANTED' => "DB_LOCK_NOTGRANTED: Lock not granted",
+ 'DB_NOTFOUND' => "DB_NOTFOUND: No matching key/data pair found",
+ 'DB_OLD_VERSION' => "DB_OLDVERSION: Database requires a version upgrade",
+ 'DB_RUNRECOVERY' => "DB_RUNRECOVERY: Fatal error, run database recovery",
+ ) ;
+
+{
+ package LexFile ;
+
+ sub new
+ {
+ my $self = shift ;
+ unlink @_ ;
+ bless [ @_ ], $self ;
+ }
+
+ sub DESTROY
+ {
+ my $self = shift ;
+ unlink @{ $self } ;
+ }
+}
+
+
+sub ok
+{
+ my $no = shift ;
+ my $result = shift ;
+
+ print "not " unless $result ;
+ print "ok $no\n" ;
+}
+
+sub addData
+{
+ my $db = shift ;
+ my @data = @_ ;
+ die "addData odd data\n" unless @data /2 != 0 ;
+ my ($k, $v) ;
+ my $ret = 0 ;
+ while (@data) {
+ $k = shift @data ;
+ $v = shift @data ;
+ $ret += $db->db_put($k, $v) ;
+ }
+
+ return ($ret == 0) ;
+}
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+# Berkeley DB 3.x specific functionality
+
+# Check for invalid parameters
+{
+ # Check for invalid parameters
+ my $db ;
+ eval ' BerkeleyDB::db_remove -Stupid => 3 ; ' ;
+ ok 1, $@ =~ /unknown key value\(s\) Stupid/ ;
+
+ eval ' BerkeleyDB::db_remove -Bad => 2, -Filename => "fred", -Stupid => 3; ' ;
+ ok 2, $@ =~ /unknown key value\(s\) (Bad |Stupid ){2}/ ;
+
+ eval ' BerkeleyDB::db_remove -Filename => "a", -Env => 2 ' ;
+ ok 3, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+
+ eval ' BerkeleyDB::db_remove -Subname => "a"' ;
+ ok 4, $@ =~ /^Must specify a filename/ ;
+
+ my $obj = bless [], "main" ;
+ eval ' BerkeleyDB::db_remove -Filename => "x", -Env => $obj ' ;
+ ok 5, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+}
+
+{
+ # subdatabases
+
+ # opening a subdatabse in an exsiting database that doesn't have
+ # subdatabases at all should fail
+
+ my $lex = new LexFile $Dfile ;
+
+ ok 6, my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my %data = qw(
+ red sky
+ blue sea
+ black heart
+ yellow belley
+ green grass
+ ) ;
+
+ ok 7, addData($db, %data) ;
+
+ undef $db ;
+
+ $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Subname => "fred" ;
+ ok 8, ! $db ;
+
+ ok 9, -e $Dfile ;
+ ok 10, ! BerkeleyDB::db_remove(-Filename => $Dfile) ;
+}
+
+{
+ # subdatabases
+
+ # opening a subdatabse in an exsiting database that does have
+ # subdatabases at all, but not this one
+
+ my $lex = new LexFile $Dfile ;
+
+ ok 11, my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Subname => "fred" ,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my %data = qw(
+ red sky
+ blue sea
+ black heart
+ yellow belley
+ green grass
+ ) ;
+
+ ok 12, addData($db, %data) ;
+
+ undef $db ;
+
+ $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Subname => "joe" ;
+
+ ok 13, !$db ;
+
+}
+
+{
+ # subdatabases
+
+ my $lex = new LexFile $Dfile ;
+
+ ok 14, my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Subname => "fred" ,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my %data = qw(
+ red sky
+ blue sea
+ black heart
+ yellow belley
+ green grass
+ ) ;
+
+ ok 15, addData($db, %data) ;
+
+ undef $db ;
+
+ ok 16, $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Subname => "fred" ;
+
+ ok 17, my $cursor = $db->db_cursor() ;
+ my ($k, $v) = ("", "") ;
+ my $status ;
+ while (($status = $cursor->c_get($k, $v, DB_NEXT)) == 0) {
+ if ($data{$k} eq $v) {
+ delete $data{$k} ;
+ }
+ }
+ ok 18, $status == DB_NOTFOUND ;
+ ok 19, keys %data == 0 ;
+}
+
+{
+ # subdatabases
+
+ # opening a database with multiple subdatabases - handle should be a list
+ # of the subdatabase names
+
+ my $lex = new LexFile $Dfile ;
+
+ ok 20, my $db1 = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Subname => "fred" ,
+ -Flags => DB_CREATE ;
+
+ ok 21, my $db2 = new BerkeleyDB::Btree -Filename => $Dfile,
+ -Subname => "joe" ,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my %data = qw(
+ red sky
+ blue sea
+ black heart
+ yellow belley
+ green grass
+ ) ;
+
+ ok 22, addData($db1, %data) ;
+ ok 23, addData($db2, %data) ;
+
+ undef $db1 ;
+ undef $db2 ;
+
+ ok 24, my $db = new BerkeleyDB::Unknown -Filename => $Dfile ,
+ -Flags => DB_RDONLY ;
+
+ #my $type = $db->type() ; print "type $type\n" ;
+ ok 25, my $cursor = $db->db_cursor() ;
+ my ($k, $v) = ("", "") ;
+ my $status ;
+ my @dbnames = () ;
+ while (($status = $cursor->c_get($k, $v, DB_NEXT)) == 0) {
+ push @dbnames, $k ;
+ }
+ ok 26, $status == DB_NOTFOUND ;
+ ok 27, join(",", sort @dbnames) eq "fred,joe" ;
+ undef $db ;
+
+ ok 28, BerkeleyDB::db_remove(-Filename => $Dfile, -Subname => "harry") != 0;
+ ok 29, BerkeleyDB::db_remove(-Filename => $Dfile, -Subname => "fred") == 0 ;
+
+ # should only be one subdatabase
+ ok 30, $db = new BerkeleyDB::Unknown -Filename => $Dfile ,
+ -Flags => DB_RDONLY ;
+
+ ok 31, $cursor = $db->db_cursor() ;
+ @dbnames = () ;
+ while (($status = $cursor->c_get($k, $v, DB_NEXT)) == 0) {
+ push @dbnames, $k ;
+ }
+ ok 32, $status == DB_NOTFOUND ;
+ ok 33, join(",", sort @dbnames) eq "joe" ;
+ undef $db ;
+
+ # can't delete an already deleted subdatabase
+ ok 34, BerkeleyDB::db_remove(-Filename => $Dfile, -Subname => "fred") != 0;
+
+ ok 35, BerkeleyDB::db_remove(-Filename => $Dfile, -Subname => "joe") == 0 ;
+
+ # should only be one subdatabase
+ ok 36, $db = new BerkeleyDB::Unknown -Filename => $Dfile ,
+ -Flags => DB_RDONLY ;
+
+ ok 37, $cursor = $db->db_cursor() ;
+ @dbnames = () ;
+ while (($status = $cursor->c_get($k, $v, DB_NEXT)) == 0) {
+ push @dbnames, $k ;
+ }
+ ok 38, $status == DB_NOTFOUND ;
+ ok 39, @dbnames == 0 ;
+ undef $db ;
+
+ ok 40, -e $Dfile ;
+ ok 41, BerkeleyDB::db_remove(-Filename => $Dfile) == 0 ;
+ ok 42, ! -e $Dfile ;
+ ok 43, BerkeleyDB::db_remove(-Filename => $Dfile) != 0 ;
+}
+
+# db_remove with env
diff --git a/bdb/perl.BerkeleyDB/t/txn.t b/bdb/perl.BerkeleyDB/t/txn.t
new file mode 100644
index 00000000000..6bef1887ea3
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/t/txn.t
@@ -0,0 +1,354 @@
+#!./perl -w
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use File::Path qw(rmtree);
+
+print "1..50\n";
+
+
+{
+ package LexFile ;
+
+ sub new
+ {
+ my $self = shift ;
+ unlink @_ ;
+ bless [ @_ ], $self ;
+ }
+
+ sub DESTROY
+ {
+ my $self = shift ;
+ unlink @{ $self } ;
+ }
+}
+
+sub ok
+{
+ my $no = shift ;
+ my $result = shift ;
+
+ print "not " unless $result ;
+ print "ok $no\n" ;
+}
+
+sub docat
+{
+ my $file = shift;
+ local $/ = undef;
+ open(CAT,$file) || die "Cannot open $file:$!";
+ my $result = <CAT>;
+ close(CAT);
+ return $result;
+}
+
+
+my $Dfile = "dbhash.tmp";
+
+umask(0);
+
+{
+ # error cases
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+
+ my $home = "./fred" ;
+ rmtree $home if -e $home ;
+ ok 1, mkdir($home, 0777) ;
+ ok 2, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE| DB_INIT_MPOOL;
+ eval { $env->txn_begin() ; } ;
+ ok 3, $@ =~ /^BerkeleyDB Aborting: Transaction Manager not enabled at/ ;
+
+ eval { my $txn_mgr = $env->TxnMgr() ; } ;
+ ok 4, $@ =~ /^BerkeleyDB Aborting: Transaction Manager not enabled at/ ;
+ undef $env ;
+ rmtree $home ;
+
+}
+
+{
+ # transaction - abort works
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+
+ my $home = "./fred" ;
+ rmtree $home if -e $home ;
+ ok 5, mkdir($home, 0777) ;
+ ok 6, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 7, my $txn = $env->txn_begin() ;
+ ok 8, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (my ($k, $v) = each %data) {
+ $ret += $db1->db_put($k, $v) ;
+ }
+ ok 9, $ret == 0 ;
+
+ # should be able to see all the records
+
+ ok 10, my $cursor = $db1->db_cursor() ;
+ my ($k, $v) = ("", "") ;
+ my $count = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 11, $count == 3 ;
+ undef $cursor ;
+
+ # now abort the transaction
+ ok 12, $txn->txn_abort() == 0 ;
+
+ # there shouldn't be any records in the database
+ $count = 0 ;
+ # sequence forwards
+ ok 13, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 14, $count == 0 ;
+
+ my $stat = $env->txn_stat() ;
+ ok 15, $stat->{'st_naborts'} == 1 ;
+
+ undef $txn ;
+ undef $cursor ;
+ undef $db1 ;
+ undef $env ;
+ untie %hash ;
+ rmtree $home ;
+}
+
+{
+ # transaction - abort works via txnmgr
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+
+ my $home = "./fred" ;
+ rmtree $home if -e $home ;
+ ok 16, mkdir($home, 0777) ;
+ ok 17, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 18, my $txn_mgr = $env->TxnMgr() ;
+ ok 19, my $txn = $txn_mgr->txn_begin() ;
+ ok 20, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (my ($k, $v) = each %data) {
+ $ret += $db1->db_put($k, $v) ;
+ }
+ ok 21, $ret == 0 ;
+
+ # should be able to see all the records
+
+ ok 22, my $cursor = $db1->db_cursor() ;
+ my ($k, $v) = ("", "") ;
+ my $count = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 23, $count == 3 ;
+ undef $cursor ;
+
+ # now abort the transaction
+ ok 24, $txn->txn_abort() == 0 ;
+
+ # there shouldn't be any records in the database
+ $count = 0 ;
+ # sequence forwards
+ ok 25, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 26, $count == 0 ;
+
+ my $stat = $txn_mgr->txn_stat() ;
+ ok 27, $stat->{'st_naborts'} == 1 ;
+
+ undef $txn ;
+ undef $cursor ;
+ undef $db1 ;
+ undef $txn_mgr ;
+ undef $env ;
+ untie %hash ;
+ rmtree $home ;
+}
+
+{
+ # transaction - commit works
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+
+ my $home = "./fred" ;
+ rmtree $home if -e $home ;
+ ok 28, mkdir($home, 0777) ;
+ ok 29, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 30, my $txn = $env->txn_begin() ;
+ ok 31, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (my ($k, $v) = each %data) {
+ $ret += $db1->db_put($k, $v) ;
+ }
+ ok 32, $ret == 0 ;
+
+ # should be able to see all the records
+
+ ok 33, my $cursor = $db1->db_cursor() ;
+ my ($k, $v) = ("", "") ;
+ my $count = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 34, $count == 3 ;
+ undef $cursor ;
+
+ # now commit the transaction
+ ok 35, $txn->txn_commit() == 0 ;
+
+ $count = 0 ;
+ # sequence forwards
+ ok 36, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 37, $count == 3 ;
+
+ my $stat = $env->txn_stat() ;
+ ok 38, $stat->{'st_naborts'} == 0 ;
+
+ undef $txn ;
+ undef $cursor ;
+ undef $db1 ;
+ undef $env ;
+ untie %hash ;
+ rmtree $home ;
+}
+
+{
+ # transaction - commit works via txnmgr
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+
+ my $home = "./fred" ;
+ rmtree $home if -e $home ;
+ ok 39, mkdir($home, 0777) ;
+ ok 40, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 41, my $txn_mgr = $env->TxnMgr() ;
+ ok 42, my $txn = $txn_mgr->txn_begin() ;
+ ok 43, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (my ($k, $v) = each %data) {
+ $ret += $db1->db_put($k, $v) ;
+ }
+ ok 44, $ret == 0 ;
+
+ # should be able to see all the records
+
+ ok 45, my $cursor = $db1->db_cursor() ;
+ my ($k, $v) = ("", "") ;
+ my $count = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 46, $count == 3 ;
+ undef $cursor ;
+
+ # now commit the transaction
+ ok 47, $txn->txn_commit() == 0 ;
+
+ $count = 0 ;
+ # sequence forwards
+ ok 48, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 49, $count == 3 ;
+
+ my $stat = $txn_mgr->txn_stat() ;
+ ok 50, $stat->{'st_naborts'} == 0 ;
+
+ undef $txn ;
+ undef $cursor ;
+ undef $db1 ;
+ undef $txn_mgr ;
+ undef $env ;
+ untie %hash ;
+ rmtree $home ;
+}
+
diff --git a/bdb/perl.BerkeleyDB/t/unknown.t b/bdb/perl.BerkeleyDB/t/unknown.t
new file mode 100644
index 00000000000..e72021f0b18
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/t/unknown.t
@@ -0,0 +1,212 @@
+#!./perl -w
+
+# ID: %I%, %G%
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use File::Path qw(rmtree);
+
+print "1..41\n";
+
+{
+ package LexFile ;
+
+ sub new
+ {
+ my $self = shift ;
+ unlink @_ ;
+ bless [ @_ ], $self ;
+ }
+
+ sub DESTROY
+ {
+ my $self = shift ;
+ unlink @{ $self } ;
+ }
+}
+
+
+sub ok
+{
+ my $no = shift ;
+ my $result = shift ;
+
+ print "not " unless $result ;
+ print "ok $no\n" ;
+}
+
+sub writeFile
+{
+ my $name = shift ;
+ open(FH, ">$name") or return 0 ;
+ print FH @_ ;
+ close FH ;
+ return 1 ;
+}
+
+my $Dfile = "dbhash.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+
+# Check for invalid parameters
+{
+ # Check for invalid parameters
+ my $db ;
+ eval ' $db = new BerkeleyDB::Unknown -Stupid => 3 ; ' ;
+ ok 1, $@ =~ /unknown key value\(s\) Stupid/ ;
+
+ eval ' $db = new BerkeleyDB::Unknown -Bad => 2, -Mode => 0345, -Stupid => 3; ' ;
+ ok 2, $@ =~ /unknown key value\(s\) (Bad |Stupid ){2}/ ;
+
+ eval ' $db = new BerkeleyDB::Unknown -Env => 2 ' ;
+ ok 3, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+
+ eval ' $db = new BerkeleyDB::Unknown -Txn => "fred" ' ;
+ ok 4, $@ =~ /^Txn not of type BerkeleyDB::Txn/ ;
+
+ my $obj = bless [], "main" ;
+ eval ' $db = new BerkeleyDB::Unknown -Env => $obj ' ;
+ ok 5, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+}
+
+# check the interface to a rubbish database
+{
+ # first an empty file
+ my $lex = new LexFile $Dfile ;
+ ok 6, writeFile($Dfile, "") ;
+
+ ok 7, ! (new BerkeleyDB::Unknown -Filename => $Dfile);
+
+ # now a non-database file
+ writeFile($Dfile, "\x2af6") ;
+ ok 8, ! (new BerkeleyDB::Unknown -Filename => $Dfile);
+}
+
+# check the interface to a Hash database
+
+{
+ my $lex = new LexFile $Dfile ;
+
+ # create a hash database
+ ok 9, my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # Add a few k/v pairs
+ my $value ;
+ my $status ;
+ ok 10, $db->db_put("some key", "some value") == 0 ;
+ ok 11, $db->db_put("key", "value") == 0 ;
+
+ # close the database
+ undef $db ;
+
+ # now open it with Unknown
+ ok 12, $db = new BerkeleyDB::Unknown -Filename => $Dfile;
+
+ ok 13, $db->type() == DB_HASH ;
+ ok 14, $db->db_get("some key", $value) == 0 ;
+ ok 15, $value eq "some value" ;
+ ok 16, $db->db_get("key", $value) == 0 ;
+ ok 17, $value eq "value" ;
+
+ my @array ;
+ eval { $db->Tie(\@array)} ;
+ ok 18, $@ =~ /^Tie needs a reference to a hash/ ;
+
+ my %hash ;
+ $db->Tie(\%hash) ;
+ ok 19, $hash{"some key"} eq "some value" ;
+
+}
+
+# check the interface to a Btree database
+
+{
+ my $lex = new LexFile $Dfile ;
+
+ # create a hash database
+ ok 20, my $db = new BerkeleyDB::Btree -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # Add a few k/v pairs
+ my $value ;
+ my $status ;
+ ok 21, $db->db_put("some key", "some value") == 0 ;
+ ok 22, $db->db_put("key", "value") == 0 ;
+
+ # close the database
+ undef $db ;
+
+ # now open it with Unknown
+ # create a hash database
+ ok 23, $db = new BerkeleyDB::Unknown -Filename => $Dfile;
+
+ ok 24, $db->type() == DB_BTREE ;
+ ok 25, $db->db_get("some key", $value) == 0 ;
+ ok 26, $value eq "some value" ;
+ ok 27, $db->db_get("key", $value) == 0 ;
+ ok 28, $value eq "value" ;
+
+
+ my @array ;
+ eval { $db->Tie(\@array)} ;
+ ok 29, $@ =~ /^Tie needs a reference to a hash/ ;
+
+ my %hash ;
+ $db->Tie(\%hash) ;
+ ok 30, $hash{"some key"} eq "some value" ;
+
+
+}
+
+# check the interface to a Recno database
+
+{
+ my $lex = new LexFile $Dfile ;
+
+ # create a recno database
+ ok 31, my $db = new BerkeleyDB::Recno -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # Add a few k/v pairs
+ my $value ;
+ my $status ;
+ ok 32, $db->db_put(0, "some value") == 0 ;
+ ok 33, $db->db_put(1, "value") == 0 ;
+
+ # close the database
+ undef $db ;
+
+ # now open it with Unknown
+ # create a hash database
+ ok 34, $db = new BerkeleyDB::Unknown -Filename => $Dfile;
+
+ ok 35, $db->type() == DB_RECNO ;
+ ok 36, $db->db_get(0, $value) == 0 ;
+ ok 37, $value eq "some value" ;
+ ok 38, $db->db_get(1, $value) == 0 ;
+ ok 39, $value eq "value" ;
+
+
+ my %hash ;
+ eval { $db->Tie(\%hash)} ;
+ ok 40, $@ =~ /^Tie needs a reference to an array/ ;
+
+ my @array ;
+ $db->Tie(\@array) ;
+ ok 41, $array[1] eq "value" ;
+
+
+}
+
+# check i/f to text
diff --git a/bdb/perl.BerkeleyDB/typemap b/bdb/perl.BerkeleyDB/typemap
new file mode 100644
index 00000000000..d6c4c7647ce
--- /dev/null
+++ b/bdb/perl.BerkeleyDB/typemap
@@ -0,0 +1,275 @@
+# typemap for Perl 5 interface to Berkeley DB version 2 & 3
+#
+# SCCS: %I%, %G%
+#
+# written by Paul Marquess <Paul.Marquess@btinternet.com>
+#
+#################################### DB SECTION
+#
+#
+
+void * T_PV
+u_int T_U_INT
+u_int32_t T_U_INT
+const char * T_PV_NULL
+PV_or_NULL T_PV_NULL
+IO_or_NULL T_IO_NULL
+
+AV * T_AV
+
+BerkeleyDB T_PTROBJ
+BerkeleyDB::Common T_PTROBJ_AV
+BerkeleyDB::Hash T_PTROBJ_AV
+BerkeleyDB::Btree T_PTROBJ_AV
+BerkeleyDB::Recno T_PTROBJ_AV
+BerkeleyDB::Queue T_PTROBJ_AV
+BerkeleyDB::Cursor T_PTROBJ_AV
+BerkeleyDB::TxnMgr T_PTROBJ_AV
+BerkeleyDB::Txn T_PTROBJ_AV
+BerkeleyDB::Log T_PTROBJ_AV
+BerkeleyDB::Lock T_PTROBJ_AV
+BerkeleyDB::Env T_PTROBJ_AV
+
+BerkeleyDB::Raw T_RAW
+BerkeleyDB::Common::Raw T_RAW
+BerkeleyDB::Hash::Raw T_RAW
+BerkeleyDB::Btree::Raw T_RAW
+BerkeleyDB::Recno::Raw T_RAW
+BerkeleyDB::Queue::Raw T_RAW
+BerkeleyDB::Cursor::Raw T_RAW
+BerkeleyDB::TxnMgr::Raw T_RAW
+BerkeleyDB::Txn::Raw T_RAW
+BerkeleyDB::Log::Raw T_RAW
+BerkeleyDB::Lock::Raw T_RAW
+BerkeleyDB::Env::Raw T_RAW
+
+BerkeleyDB::Env::Inner T_INNER
+BerkeleyDB::Common::Inner T_INNER
+BerkeleyDB::Txn::Inner T_INNER
+BerkeleyDB::TxnMgr::Inner T_INNER
+# BerkeleyDB__Env T_PTR
+DBT T_dbtdatum
+DBT_OPT T_dbtdatum_opt
+DBT_B T_dbtdatum_btree
+DBTKEY T_dbtkeydatum
+DBTKEY_B T_dbtkeydatum_btree
+DBTYPE T_U_INT
+DualType T_DUAL
+BerkeleyDB_type * T_IV
+BerkeleyDB_ENV_type * T_IV
+BerkeleyDB_TxnMgr_type * T_IV
+BerkeleyDB_Txn_type * T_IV
+BerkeleyDB__Cursor_type * T_IV
+DB * T_IV
+
+INPUT
+
+T_AV
+ if (SvROK($arg) && SvTYPE(SvRV($arg)) == SVt_PVAV)
+ /* if (sv_isa($arg, \"${ntype}\")) */
+ $var = (AV*)SvRV($arg);
+ else
+ croak(\"$var is not an array reference\")
+
+T_RAW
+ $var = ($type)SvIV($arg)
+
+T_U_INT
+ $var = SvUV($arg)
+
+T_SV_REF_NULL
+ if ($arg == &PL_sv_undef)
+ $var = NULL ;
+ else if (sv_derived_from($arg, \"${ntype}\")) {
+ IV tmp = SvIV((SV *)GetInternalObject($arg));
+ $var = ($type) tmp;
+ }
+ else
+ croak(\"$var is not of type ${ntype}\")
+
+T_HV_REF_NULL
+ if ($arg == &PL_sv_undef)
+ $var = NULL ;
+ else if (sv_derived_from($arg, \"${ntype}\")) {
+ HV * hv = (HV *)GetInternalObject($arg);
+ SV ** svp = hv_fetch(hv, \"db\", 2, FALSE);
+ IV tmp = SvIV(*svp);
+ $var = ($type) tmp;
+ }
+ else
+ croak(\"$var is not of type ${ntype}\")
+
+T_HV_REF
+ if (sv_derived_from($arg, \"${ntype}\")) {
+ HV * hv = (HV *)GetInternalObject($arg);
+ SV ** svp = hv_fetch(hv, \"db\", 2, FALSE);
+ IV tmp = SvIV(*svp);
+ $var = ($type) tmp;
+ }
+ else
+ croak(\"$var is not of type ${ntype}\")
+
+
+T_P_REF
+ if (sv_derived_from($arg, \"${ntype}\")) {
+ IV tmp = SvIV((SV*)SvRV($arg));
+ $var = ($type) tmp;
+ }
+ else
+ croak(\"$var is not of type ${ntype}\")
+
+
+T_INNER
+ {
+ HV * hv = (HV *)SvRV($arg);
+ SV ** svp = hv_fetch(hv, \"db\", 2, FALSE);
+ IV tmp = SvIV(*svp);
+ $var = ($type) tmp;
+ }
+
+T_PV_NULL
+ if ($arg == &PL_sv_undef)
+ $var = NULL ;
+ else {
+ $var = ($type)SvPV($arg,PL_na) ;
+ if (PL_na == 0)
+ $var = NULL ;
+ }
+
+T_IO_NULL
+ if ($arg == &PL_sv_undef)
+ $var = NULL ;
+ else
+ $var = IoOFP(sv_2io($arg))
+
+T_PTROBJ_NULL
+ if ($arg == &PL_sv_undef)
+ $var = NULL ;
+ else if (sv_derived_from($arg, \"${ntype}\")) {
+ IV tmp = SvIV((SV*)SvRV($arg));
+ $var = ($type) tmp;
+ }
+ else
+ croak(\"$var is not of type ${ntype}\")
+
+T_PTROBJ_SELF
+ if ($arg == &PL_sv_undef)
+ $var = NULL ;
+ else if (sv_derived_from($arg, \"${ntype}\")) {
+ IV tmp = SvIV((SV*)SvRV($arg));
+ $var = ($type) tmp;
+ }
+ else
+ croak(\"$var is not of type ${ntype}\")
+
+T_PTROBJ_AV
+ if ($arg == &PL_sv_undef || $arg == NULL)
+ $var = NULL ;
+ else if (sv_derived_from($arg, \"${ntype}\")) {
+ IV tmp = getInnerObject($arg) ;
+ $var = ($type) tmp;
+ }
+ else
+ croak(\"$var is not of type ${ntype}\")
+
+T_dbtkeydatum
+ ckFilter($arg, filter_store_key, \"filter_store_key\");
+ DBT_clear($var) ;
+ if (db->recno_or_queue) {
+ Value = GetRecnoKey(db, SvIV($arg)) ;
+ $var.data = & Value;
+ $var.size = (int)sizeof(db_recno_t);
+ }
+ else {
+ $var.data = SvPV($arg, PL_na);
+ $var.size = (int)PL_na;
+ }
+
+T_dbtkeydatum_btree
+ ckFilter($arg, filter_store_key, \"filter_store_key\");
+ DBT_clear($var) ;
+ if (db->recno_or_queue ||
+ (db->type == DB_BTREE && flagSet(DB_SET_RECNO))) {
+ Value = GetRecnoKey(db, SvIV($arg)) ;
+ $var.data = & Value;
+ $var.size = (int)sizeof(db_recno_t);
+ }
+ else {
+ $var.data = SvPV($arg, PL_na);
+ $var.size = (int)PL_na;
+ }
+
+T_dbtdatum
+ ckFilter($arg, filter_store_value, \"filter_store_value\");
+ DBT_clear($var) ;
+ $var.data = SvPV($arg, PL_na);
+ $var.size = (int)PL_na;
+ $var.flags = db->partial ;
+ $var.dlen = db->dlen ;
+ $var.doff = db->doff ;
+
+T_dbtdatum_opt
+ DBT_clear($var) ;
+ if (flagSet(DB_GET_BOTH)) {
+ ckFilter($arg, filter_store_value, \"filter_store_value\");
+ $var.data = SvPV($arg, PL_na);
+ $var.size = (int)PL_na;
+ $var.flags = db->partial ;
+ $var.dlen = db->dlen ;
+ $var.doff = db->doff ;
+ }
+
+T_dbtdatum_btree
+ DBT_clear($var) ;
+ if (flagSet(DB_GET_BOTH)) {
+ ckFilter($arg, filter_store_value, \"filter_store_value\");
+ $var.data = SvPV($arg, PL_na);
+ $var.size = (int)PL_na;
+ $var.flags = db->partial ;
+ $var.dlen = db->dlen ;
+ $var.doff = db->doff ;
+ }
+
+
+OUTPUT
+
+T_RAW
+ sv_setiv($arg, (IV)$var);
+
+T_SV_REF_NULL
+ sv_setiv($arg, (IV)$var);
+
+T_HV_REF_NULL
+ sv_setiv($arg, (IV)$var);
+
+T_HV_REF
+ sv_setiv($arg, (IV)$var);
+
+T_P_REF
+ sv_setiv($arg, (IV)$var);
+
+T_DUAL
+ setDUALerrno($arg, $var) ;
+
+T_U_INT
+ sv_setuv($arg, (UV)$var);
+
+T_PV_NULL
+ sv_setpv((SV*)$arg, $var);
+
+T_dbtkeydatum_btree
+ OutputKey_B($arg, $var)
+T_dbtkeydatum
+ OutputKey($arg, $var)
+T_dbtdatum
+ OutputValue($arg, $var)
+T_dbtdatum_opt
+ OutputValue($arg, $var)
+T_dbtdatum_btree
+ OutputValue_B($arg, $var)
+
+T_PTROBJ_NULL
+ sv_setref_pv($arg, \"${ntype}\", (void*)$var);
+
+T_PTROBJ_SELF
+ sv_setref_pv($arg, self, (void*)$var);
diff --git a/bdb/perl.DB_File/Changes b/bdb/perl.DB_File/Changes
new file mode 100644
index 00000000000..b8684cac3de
--- /dev/null
+++ b/bdb/perl.DB_File/Changes
@@ -0,0 +1,343 @@
+
+0.1
+
+ First Release.
+
+0.2
+
+ When DB_File is opening a database file it no longer terminates the
+ process if dbopen returned an error. This allows file protection
+ errors to be caught at run time. Thanks to Judith Grass
+ <grass@cybercash.com> for spotting the bug.
+
+0.3
+
+ Added prototype support for multiple btree compare callbacks.
+
+1.0
+
+ DB_File has been in use for over a year. To reflect that, the
+ version number has been incremented to 1.0.
+
+ Added complete support for multiple concurrent callbacks.
+
+ Using the push method on an empty list didn't work properly. This
+ has been fixed.
+
+1.01
+
+ Fixed a core dump problem with SunOS.
+
+ The return value from TIEHASH wasn't set to NULL when dbopen
+ returned an error.
+
+1.02
+
+ Merged OS/2 specific code into DB_File.xs
+
+ Removed some redundant code in DB_File.xs.
+
+ Documentation update.
+
+ Allow negative subscripts with RECNO interface.
+
+ Changed the default flags from O_RDWR to O_CREAT|O_RDWR.
+
+ The example code which showed how to lock a database needed a call
+ to sync added. Without it the resultant database file was empty.
+
+ Added get_dup method.
+
+1.03
+
+ Documentation update.
+
+ DB_File now imports the constants (O_RDWR, O_CREAT etc.) from Fcntl
+ automatically.
+
+ The standard hash function exists is now supported.
+
+ Modified the behavior of get_dup. When it returns an associative
+ array, the value is the count of the number of matching BTREE
+ values.
+
+1.04
+
+ Minor documentation changes.
+
+ Fixed a bug in hash_cb. Patches supplied by Dave Hammen,
+ <hammen@gothamcity.jsc.nasa.govt>.
+
+ Fixed a bug with the constructors for DB_File::HASHINFO,
+ DB_File::BTREEINFO and DB_File::RECNOINFO. Also tidied up the
+ constructors to make them -w clean.
+
+ Reworked part of the test harness to be more locale friendly.
+
+1.05
+
+ Made all scripts in the documentation strict and -w clean.
+
+ Added logic to DB_File.xs to allow the module to be built after
+ Perl is installed.
+
+1.06
+
+ Minor namespace cleanup: Localized PrintBtree.
+
+1.07
+
+ Fixed bug with RECNO, where bval wasn't defaulting to "\n".
+
+1.08
+
+ Documented operation of bval.
+
+1.09
+
+ Minor bug fix in DB_File::HASHINFO, DB_File::RECNOINFO and
+ DB_File::BTREEINFO.
+
+ Changed default mode to 0666.
+
+1.10
+
+ Fixed fd method so that it still returns -1 for in-memory files
+ when db 1.86 is used.
+
+1.11
+
+ Documented the untie gotcha.
+
+1.12
+
+ Documented the incompatibility with version 2 of Berkeley DB.
+
+1.13
+
+ Minor changes to DB_FIle.xs and DB_File.pm
+
+1.14
+
+ Made it illegal to tie an associative array to a RECNO database and
+ an ordinary array to a HASH or BTREE database.
+
+1.15
+
+ Patch from Gisle Aas <gisle@aas.no> to suppress "use of undefined
+ value" warning with db_get and db_seq.
+
+ Patch from Gisle Aas <gisle@aas.no> to make DB_File export only the
+ O_* constants from Fcntl.
+
+ Removed the DESTROY method from the DB_File::HASHINFO module.
+
+ Previously DB_File hard-wired the class name of any object that it
+ created to "DB_File". This makes sub-classing difficult. Now
+ DB_File creats objects in the namespace of the package it has been
+ inherited into.
+
+
+1.16
+
+ A harmless looking tab was causing Makefile.PL to fail on AIX 3.2.5
+
+ Small fix for the AIX strict C compiler XLC which doesn't like
+ __attribute__ being defined via proto.h and redefined via db.h. Fix
+ courtesy of Jarkko Hietaniemi.
+
+1.50
+
+ DB_File can now build with either DB 1.x or 2.x, but not both at
+ the same time.
+
+1.51
+
+ Fixed the test harness so that it doesn't expect DB_File to have
+ been installed by the main Perl build.
+
+
+ Fixed a bug in mapping 1.x O_RDONLY flag to 2.x DB_RDONLY equivalent
+
+1.52
+
+ Patch from Nick Ing-Simmons now allows DB_File to build on NT.
+ Merged 1.15 patch.
+
+1.53
+
+ Added DB_RENUMBER to flags for recno.
+
+1.54
+
+ Fixed a small bug in the test harness when run under win32
+ The emulation of fd when useing DB 2.x was busted.
+
+1.55
+ Merged 1.16 changes.
+
+1.56
+ Documented the Solaris 2.5 mutex bug
+
+1.57
+ If Perl has been compiled with Threads support,the symbol op will be
+ defined. This clashes with a field name in db.h, so it needs to be
+ #undef'ed before db.h is included.
+
+1.58
+ Tied Array support was enhanced in Perl 5.004_57. DB_File now
+ supports PUSH,POP,SHIFT,UNSHIFT & STORESIZE.
+
+ Fixed a problem with the use of sv_setpvn. When the size is
+ specified as 0, it does a strlen on the data. This was ok for DB
+ 1.x, but isn't for DB 2.x.
+
+1.59
+ Updated the license section.
+
+ Berkeley DB 2.4.10 disallows zero length keys. Tests 32 & 42 in
+ db-btree.t and test 27 in db-hash.t failed because of this change.
+ Those tests have been zapped.
+
+ Added dbinfo to the distribution.
+
+1.60
+ Changed the test to check for full tied array support
+
+1.61 19th November 1998
+
+ Added a note to README about how to build Berkeley DB 2.x when
+ using HP-UX.
+ Minor modifications to get the module to build with DB 2.5.x
+ Fixed a typo in the definition of O_RDONLY, courtesy of Mark Kettenis.
+
+1.62 30th November 1998
+
+ Added hints/dynixptx.pl.
+ Fixed typemap -- 1.61 used PL_na instead of na
+
+1.63 19th December 1998
+
+ * Fix to allow DB 2.6.x to build with DB_File
+ * Documentation updated to use push,pop etc in the RECNO example &
+ to include the find_dup & del_dup methods.
+
+1.64 21st February 1999
+
+ * Tidied the 1.x to 2.x flag mapping code.
+ * Added a patch from Mark Kettenis <kettenis@wins.uva.nl> to fix a flag
+ mapping problem with O_RDONLY on the Hurd
+ * Updated the message that db-recno.t prints when tests 51, 53 or 55 fail.
+
+1.65 6th March 1999
+
+ * Fixed a bug in the recno PUSH logic.
+ * The BOOT version check now needs 2.3.4 when using Berkeley DB version 2
+
+1.66 15th March 1999
+
+ * Added DBM Filter code
+
+1.67 6th June 1999
+
+ * Added DBM Filter documentation to DB_File.pm
+
+ * Fixed DBM Filter code to work with 5.004
+
+ * A few instances of newSVpvn were used in 1.66. This isn't available in
+ Perl 5.004_04 or earlier. Replaced with newSVpv.
+
+1.68 22nd July 1999
+
+ * Merged changes from 5.005_58
+
+ * Fixed a bug in R_IBEFORE & R_IAFTER procesing in Berkeley DB
+ 2 databases.
+
+ * Added some of the examples in the POD into the test harness.
+
+1.69 3rd August 1999
+
+ * fixed a bug in push -- DB_APPEND wasn't working properly.
+
+ * Fixed the R_SETCURSOR bug introduced in 1.68
+
+ * Added a new Perl variable $DB_File::db_ver
+
+1.70 4th August 1999
+
+ * Initialise $DB_File::db_ver and $DB_File::db_version with
+ GV_ADD|GV_ADDMULT -- bug spotted by Nick Ing-Simmons.
+
+ * Added a BOOT check to test for equivalent versions of db.h &
+ libdb.a/so.
+
+1.71 7th September 1999
+
+ * Fixed a bug that prevented 1.70 from compiling under win32
+
+ * Updated to support Berkeley DB 3.x
+
+ * Updated dbinfo for Berkeley DB 3.x file formats.
+
+1.72 16th January 2000
+
+ * Added hints/sco.pl
+
+ * The module will now use XSLoader when it is available. When it
+ isn't it will use DynaLoader.
+
+ * The locking section in DB_File.pm has been discredited. Many thanks
+ to David Harris for spotting the underlying problem, contributing
+ the updates to the documentation and writing DB_File::Lock (available
+ on CPAN).
+
+1.73 31st May 2000
+
+ * Added support in version.c for building with threaded Perl.
+
+ * Berkeley DB 3.1 has reenabled support for null keys. The test
+ harness has been updated to reflect this.
+
+1.74 10th December 2000
+
+ * A "close" call in DB_File.xs needed parenthesised to stop win32 from
+ thinking it was one of its macros.
+
+ * Updated dbinfo to support Berkeley DB 3.1 file format changes.
+
+ * DB_File.pm & the test hasness now use the warnings pragma (when
+ available).
+
+ * Included Perl core patch 7703 -- size argument for hash_cb is different
+ for Berkeley DB 3.x
+
+ * Included Perl core patch 7801 -- Give __getBerkeleyDBInfo the ANSI C
+ treatment.
+
+ * @a = () produced the warning 'Argument "" isn't numeric in entersub'
+ This has been fixed. Thanks to Edward Avis for spotting this bug.
+
+ * Added note about building under Linux. Included patches.
+
+ * Included Perl core patch 8068 -- fix for bug 20001013.009
+ When run with warnings enabled "$hash{XX} = undef " produced an
+ "Uninitialized value" warning. This has been fixed.
+
+1.75 17th December 2000
+
+ * Fixed perl core patch 7703
+
+ * Added suppport to allow DB_File to be built with Berkeley DB 3.2 --
+ btree_compare, btree_prefix and hash_cb needed to be changed.
+
+ * Updated dbinfo to support Berkeley DB 3.2 file format changes.
+
+
+1.76 15th January 2001
+
+ * Added instructions for using LD_PRELOAD to get Berkeley DB 2.x to work
+ with DB_File on Linux. Thanks to Norbert Bollow for sending details of
+ this approach.
+
+
diff --git a/bdb/perl.DB_File/DB_File.pm b/bdb/perl.DB_File/DB_File.pm
new file mode 100644
index 00000000000..e9b6a40d7e3
--- /dev/null
+++ b/bdb/perl.DB_File/DB_File.pm
@@ -0,0 +1,2072 @@
+# DB_File.pm -- Perl 5 interface to Berkeley DB
+#
+# written by Paul Marquess (Paul.Marquess@btinternet.com)
+# last modified 15th January 2001
+# version 1.76
+#
+# Copyright (c) 1995-2001 Paul Marquess. All rights reserved.
+# This program is free software; you can redistribute it and/or
+# modify it under the same terms as Perl itself.
+
+
+package DB_File::HASHINFO ;
+
+require 5.003 ;
+
+use warnings;
+use strict;
+use Carp;
+require Tie::Hash;
+@DB_File::HASHINFO::ISA = qw(Tie::Hash);
+
+sub new
+{
+ my $pkg = shift ;
+ my %x ;
+ tie %x, $pkg ;
+ bless \%x, $pkg ;
+}
+
+
+sub TIEHASH
+{
+ my $pkg = shift ;
+
+ bless { VALID => { map {$_, 1}
+ qw( bsize ffactor nelem cachesize hash lorder)
+ },
+ GOT => {}
+ }, $pkg ;
+}
+
+
+sub FETCH
+{
+ my $self = shift ;
+ my $key = shift ;
+
+ return $self->{GOT}{$key} if exists $self->{VALID}{$key} ;
+
+ my $pkg = ref $self ;
+ croak "${pkg}::FETCH - Unknown element '$key'" ;
+}
+
+
+sub STORE
+{
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+
+ if ( exists $self->{VALID}{$key} )
+ {
+ $self->{GOT}{$key} = $value ;
+ return ;
+ }
+
+ my $pkg = ref $self ;
+ croak "${pkg}::STORE - Unknown element '$key'" ;
+}
+
+sub DELETE
+{
+ my $self = shift ;
+ my $key = shift ;
+
+ if ( exists $self->{VALID}{$key} )
+ {
+ delete $self->{GOT}{$key} ;
+ return ;
+ }
+
+ my $pkg = ref $self ;
+ croak "DB_File::HASHINFO::DELETE - Unknown element '$key'" ;
+}
+
+sub EXISTS
+{
+ my $self = shift ;
+ my $key = shift ;
+
+ exists $self->{VALID}{$key} ;
+}
+
+sub NotHere
+{
+ my $self = shift ;
+ my $method = shift ;
+
+ croak ref($self) . " does not define the method ${method}" ;
+}
+
+sub FIRSTKEY { my $self = shift ; $self->NotHere("FIRSTKEY") }
+sub NEXTKEY { my $self = shift ; $self->NotHere("NEXTKEY") }
+sub CLEAR { my $self = shift ; $self->NotHere("CLEAR") }
+
+package DB_File::RECNOINFO ;
+
+use warnings;
+use strict ;
+
+@DB_File::RECNOINFO::ISA = qw(DB_File::HASHINFO) ;
+
+sub TIEHASH
+{
+ my $pkg = shift ;
+
+ bless { VALID => { map {$_, 1}
+ qw( bval cachesize psize flags lorder reclen bfname )
+ },
+ GOT => {},
+ }, $pkg ;
+}
+
+package DB_File::BTREEINFO ;
+
+use warnings;
+use strict ;
+
+@DB_File::BTREEINFO::ISA = qw(DB_File::HASHINFO) ;
+
+sub TIEHASH
+{
+ my $pkg = shift ;
+
+ bless { VALID => { map {$_, 1}
+ qw( flags cachesize maxkeypage minkeypage psize
+ compare prefix lorder )
+ },
+ GOT => {},
+ }, $pkg ;
+}
+
+
+package DB_File ;
+
+use warnings;
+use strict;
+use vars qw($VERSION @ISA @EXPORT $AUTOLOAD $DB_BTREE $DB_HASH $DB_RECNO
+ $db_version $use_XSLoader
+ ) ;
+use Carp;
+
+
+$VERSION = "1.76" ;
+
+#typedef enum { DB_BTREE, DB_HASH, DB_RECNO } DBTYPE;
+$DB_BTREE = new DB_File::BTREEINFO ;
+$DB_HASH = new DB_File::HASHINFO ;
+$DB_RECNO = new DB_File::RECNOINFO ;
+
+require Tie::Hash;
+require Exporter;
+use AutoLoader;
+BEGIN {
+ $use_XSLoader = 1 ;
+ eval { require XSLoader } ;
+
+ if ($@) {
+ $use_XSLoader = 0 ;
+ require DynaLoader;
+ @ISA = qw(DynaLoader);
+ }
+}
+
+push @ISA, qw(Tie::Hash Exporter);
+@EXPORT = qw(
+ $DB_BTREE $DB_HASH $DB_RECNO
+
+ BTREEMAGIC
+ BTREEVERSION
+ DB_LOCK
+ DB_SHMEM
+ DB_TXN
+ HASHMAGIC
+ HASHVERSION
+ MAX_PAGE_NUMBER
+ MAX_PAGE_OFFSET
+ MAX_REC_NUMBER
+ RET_ERROR
+ RET_SPECIAL
+ RET_SUCCESS
+ R_CURSOR
+ R_DUP
+ R_FIRST
+ R_FIXEDLEN
+ R_IAFTER
+ R_IBEFORE
+ R_LAST
+ R_NEXT
+ R_NOKEY
+ R_NOOVERWRITE
+ R_PREV
+ R_RECNOSYNC
+ R_SETCURSOR
+ R_SNAPSHOT
+ __R_UNUSED
+
+);
+
+sub AUTOLOAD {
+ my($constname);
+ ($constname = $AUTOLOAD) =~ s/.*:://;
+ my $val = constant($constname, @_ ? $_[0] : 0);
+ if ($! != 0) {
+ if ($! =~ /Invalid/ || $!{EINVAL}) {
+ $AutoLoader::AUTOLOAD = $AUTOLOAD;
+ goto &AutoLoader::AUTOLOAD;
+ }
+ else {
+ my($pack,$file,$line) = caller;
+ croak "Your vendor has not defined DB macro $constname, used at $file line $line.
+";
+ }
+ }
+ eval "sub $AUTOLOAD { $val }";
+ goto &$AUTOLOAD;
+}
+
+
+eval {
+ # Make all Fcntl O_XXX constants available for importing
+ require Fcntl;
+ my @O = grep /^O_/, @Fcntl::EXPORT;
+ Fcntl->import(@O); # first we import what we want to export
+ push(@EXPORT, @O);
+};
+
+if ($use_XSLoader)
+ { XSLoader::load("DB_File", $VERSION)}
+else
+ { bootstrap DB_File $VERSION }
+
+# Preloaded methods go here. Autoload methods go after __END__, and are
+# processed by the autosplit program.
+
+sub tie_hash_or_array
+{
+ my (@arg) = @_ ;
+ my $tieHASH = ( (caller(1))[3] =~ /TIEHASH/ ) ;
+
+ $arg[4] = tied %{ $arg[4] }
+ if @arg >= 5 && ref $arg[4] && $arg[4] =~ /=HASH/ && tied %{ $arg[4] } ;
+
+ # make recno in Berkeley DB version 2 work like recno in version 1.
+ if ($db_version > 1 and defined $arg[4] and $arg[4] =~ /RECNO/ and
+ $arg[1] and ! -e $arg[1]) {
+ open(FH, ">$arg[1]") or return undef ;
+ close FH ;
+ chmod $arg[3] ? $arg[3] : 0666 , $arg[1] ;
+ }
+
+ DoTie_($tieHASH, @arg) ;
+}
+
+sub TIEHASH
+{
+ tie_hash_or_array(@_) ;
+}
+
+sub TIEARRAY
+{
+ tie_hash_or_array(@_) ;
+}
+
+sub CLEAR
+{
+ my $self = shift;
+ my $key = 0 ;
+ my $value = "" ;
+ my $status = $self->seq($key, $value, R_FIRST());
+ my @keys;
+
+ while ($status == 0) {
+ push @keys, $key;
+ $status = $self->seq($key, $value, R_NEXT());
+ }
+ foreach $key (reverse @keys) {
+ my $s = $self->del($key);
+ }
+}
+
+sub EXTEND { }
+
+sub STORESIZE
+{
+ my $self = shift;
+ my $length = shift ;
+ my $current_length = $self->length() ;
+
+ if ($length < $current_length) {
+ my $key ;
+ for ($key = $current_length - 1 ; $key >= $length ; -- $key)
+ { $self->del($key) }
+ }
+ elsif ($length > $current_length) {
+ $self->put($length-1, "") ;
+ }
+}
+
+sub find_dup
+{
+ croak "Usage: \$db->find_dup(key,value)\n"
+ unless @_ == 3 ;
+
+ my $db = shift ;
+ my ($origkey, $value_wanted) = @_ ;
+ my ($key, $value) = ($origkey, 0);
+ my ($status) = 0 ;
+
+ for ($status = $db->seq($key, $value, R_CURSOR() ) ;
+ $status == 0 ;
+ $status = $db->seq($key, $value, R_NEXT() ) ) {
+
+ return 0 if $key eq $origkey and $value eq $value_wanted ;
+ }
+
+ return $status ;
+}
+
+sub del_dup
+{
+ croak "Usage: \$db->del_dup(key,value)\n"
+ unless @_ == 3 ;
+
+ my $db = shift ;
+ my ($key, $value) = @_ ;
+ my ($status) = $db->find_dup($key, $value) ;
+ return $status if $status != 0 ;
+
+ $status = $db->del($key, R_CURSOR() ) ;
+ return $status ;
+}
+
+sub get_dup
+{
+ croak "Usage: \$db->get_dup(key [,flag])\n"
+ unless @_ == 2 or @_ == 3 ;
+
+ my $db = shift ;
+ my $key = shift ;
+ my $flag = shift ;
+ my $value = 0 ;
+ my $origkey = $key ;
+ my $wantarray = wantarray ;
+ my %values = () ;
+ my @values = () ;
+ my $counter = 0 ;
+ my $status = 0 ;
+
+ # iterate through the database until either EOF ($status == 0)
+ # or a different key is encountered ($key ne $origkey).
+ for ($status = $db->seq($key, $value, R_CURSOR()) ;
+ $status == 0 and $key eq $origkey ;
+ $status = $db->seq($key, $value, R_NEXT()) ) {
+
+ # save the value or count number of matches
+ if ($wantarray) {
+ if ($flag)
+ { ++ $values{$value} }
+ else
+ { push (@values, $value) }
+ }
+ else
+ { ++ $counter }
+
+ }
+
+ return ($wantarray ? ($flag ? %values : @values) : $counter) ;
+}
+
+
+1;
+__END__
+
+=head1 NAME
+
+DB_File - Perl5 access to Berkeley DB version 1.x
+
+=head1 SYNOPSIS
+
+ use DB_File ;
+
+ [$X =] tie %hash, 'DB_File', [$filename, $flags, $mode, $DB_HASH] ;
+ [$X =] tie %hash, 'DB_File', $filename, $flags, $mode, $DB_BTREE ;
+ [$X =] tie @array, 'DB_File', $filename, $flags, $mode, $DB_RECNO ;
+
+ $status = $X->del($key [, $flags]) ;
+ $status = $X->put($key, $value [, $flags]) ;
+ $status = $X->get($key, $value [, $flags]) ;
+ $status = $X->seq($key, $value, $flags) ;
+ $status = $X->sync([$flags]) ;
+ $status = $X->fd ;
+
+ # BTREE only
+ $count = $X->get_dup($key) ;
+ @list = $X->get_dup($key) ;
+ %list = $X->get_dup($key, 1) ;
+ $status = $X->find_dup($key, $value) ;
+ $status = $X->del_dup($key, $value) ;
+
+ # RECNO only
+ $a = $X->length;
+ $a = $X->pop ;
+ $X->push(list);
+ $a = $X->shift;
+ $X->unshift(list);
+
+ # DBM Filters
+ $old_filter = $db->filter_store_key ( sub { ... } ) ;
+ $old_filter = $db->filter_store_value( sub { ... } ) ;
+ $old_filter = $db->filter_fetch_key ( sub { ... } ) ;
+ $old_filter = $db->filter_fetch_value( sub { ... } ) ;
+
+ untie %hash ;
+ untie @array ;
+
+=head1 DESCRIPTION
+
+B<DB_File> is a module which allows Perl programs to make use of the
+facilities provided by Berkeley DB version 1.x (if you have a newer
+version of DB, see L<Using DB_File with Berkeley DB version 2 or 3>).
+It is assumed that you have a copy of the Berkeley DB manual pages at
+hand when reading this documentation. The interface defined here
+mirrors the Berkeley DB interface closely.
+
+Berkeley DB is a C library which provides a consistent interface to a
+number of database formats. B<DB_File> provides an interface to all
+three of the database types currently supported by Berkeley DB.
+
+The file types are:
+
+=over 5
+
+=item B<DB_HASH>
+
+This database type allows arbitrary key/value pairs to be stored in data
+files. This is equivalent to the functionality provided by other
+hashing packages like DBM, NDBM, ODBM, GDBM, and SDBM. Remember though,
+the files created using DB_HASH are not compatible with any of the
+other packages mentioned.
+
+A default hashing algorithm, which will be adequate for most
+applications, is built into Berkeley DB. If you do need to use your own
+hashing algorithm it is possible to write your own in Perl and have
+B<DB_File> use it instead.
+
+=item B<DB_BTREE>
+
+The btree format allows arbitrary key/value pairs to be stored in a
+sorted, balanced binary tree.
+
+As with the DB_HASH format, it is possible to provide a user defined
+Perl routine to perform the comparison of keys. By default, though, the
+keys are stored in lexical order.
+
+=item B<DB_RECNO>
+
+DB_RECNO allows both fixed-length and variable-length flat text files
+to be manipulated using the same key/value pair interface as in DB_HASH
+and DB_BTREE. In this case the key will consist of a record (line)
+number.
+
+=back
+
+=head2 Using DB_File with Berkeley DB version 2 or 3
+
+Although B<DB_File> is intended to be used with Berkeley DB version 1,
+it can also be used with version 2.or 3 In this case the interface is
+limited to the functionality provided by Berkeley DB 1.x. Anywhere the
+version 2 or 3 interface differs, B<DB_File> arranges for it to work
+like version 1. This feature allows B<DB_File> scripts that were built
+with version 1 to be migrated to version 2 or 3 without any changes.
+
+If you want to make use of the new features available in Berkeley DB
+2.x or greater, use the Perl module B<BerkeleyDB> instead.
+
+B<Note:> The database file format has changed in both Berkeley DB
+version 2 and 3. If you cannot recreate your databases, you must dump
+any existing databases with the C<db_dump185> utility that comes with
+Berkeley DB.
+Once you have rebuilt DB_File to use Berkeley DB version 2 or 3, your
+databases can be recreated using C<db_load>. Refer to the Berkeley DB
+documentation for further details.
+
+Please read L<"COPYRIGHT"> before using version 2.x or 3.x of Berkeley
+DB with DB_File.
+
+=head2 Interface to Berkeley DB
+
+B<DB_File> allows access to Berkeley DB files using the tie() mechanism
+in Perl 5 (for full details, see L<perlfunc/tie()>). This facility
+allows B<DB_File> to access Berkeley DB files using either an
+associative array (for DB_HASH & DB_BTREE file types) or an ordinary
+array (for the DB_RECNO file type).
+
+In addition to the tie() interface, it is also possible to access most
+of the functions provided in the Berkeley DB API directly.
+See L<THE API INTERFACE>.
+
+=head2 Opening a Berkeley DB Database File
+
+Berkeley DB uses the function dbopen() to open or create a database.
+Here is the C prototype for dbopen():
+
+ DB*
+ dbopen (const char * file, int flags, int mode,
+ DBTYPE type, const void * openinfo)
+
+The parameter C<type> is an enumeration which specifies which of the 3
+interface methods (DB_HASH, DB_BTREE or DB_RECNO) is to be used.
+Depending on which of these is actually chosen, the final parameter,
+I<openinfo> points to a data structure which allows tailoring of the
+specific interface method.
+
+This interface is handled slightly differently in B<DB_File>. Here is
+an equivalent call using B<DB_File>:
+
+ tie %array, 'DB_File', $filename, $flags, $mode, $DB_HASH ;
+
+The C<filename>, C<flags> and C<mode> parameters are the direct
+equivalent of their dbopen() counterparts. The final parameter $DB_HASH
+performs the function of both the C<type> and C<openinfo> parameters in
+dbopen().
+
+In the example above $DB_HASH is actually a pre-defined reference to a
+hash object. B<DB_File> has three of these pre-defined references.
+Apart from $DB_HASH, there is also $DB_BTREE and $DB_RECNO.
+
+The keys allowed in each of these pre-defined references is limited to
+the names used in the equivalent C structure. So, for example, the
+$DB_HASH reference will only allow keys called C<bsize>, C<cachesize>,
+C<ffactor>, C<hash>, C<lorder> and C<nelem>.
+
+To change one of these elements, just assign to it like this:
+
+ $DB_HASH->{'cachesize'} = 10000 ;
+
+The three predefined variables $DB_HASH, $DB_BTREE and $DB_RECNO are
+usually adequate for most applications. If you do need to create extra
+instances of these objects, constructors are available for each file
+type.
+
+Here are examples of the constructors and the valid options available
+for DB_HASH, DB_BTREE and DB_RECNO respectively.
+
+ $a = new DB_File::HASHINFO ;
+ $a->{'bsize'} ;
+ $a->{'cachesize'} ;
+ $a->{'ffactor'};
+ $a->{'hash'} ;
+ $a->{'lorder'} ;
+ $a->{'nelem'} ;
+
+ $b = new DB_File::BTREEINFO ;
+ $b->{'flags'} ;
+ $b->{'cachesize'} ;
+ $b->{'maxkeypage'} ;
+ $b->{'minkeypage'} ;
+ $b->{'psize'} ;
+ $b->{'compare'} ;
+ $b->{'prefix'} ;
+ $b->{'lorder'} ;
+
+ $c = new DB_File::RECNOINFO ;
+ $c->{'bval'} ;
+ $c->{'cachesize'} ;
+ $c->{'psize'} ;
+ $c->{'flags'} ;
+ $c->{'lorder'} ;
+ $c->{'reclen'} ;
+ $c->{'bfname'} ;
+
+The values stored in the hashes above are mostly the direct equivalent
+of their C counterpart. Like their C counterparts, all are set to a
+default values - that means you don't have to set I<all> of the
+values when you only want to change one. Here is an example:
+
+ $a = new DB_File::HASHINFO ;
+ $a->{'cachesize'} = 12345 ;
+ tie %y, 'DB_File', "filename", $flags, 0777, $a ;
+
+A few of the options need extra discussion here. When used, the C
+equivalent of the keys C<hash>, C<compare> and C<prefix> store pointers
+to C functions. In B<DB_File> these keys are used to store references
+to Perl subs. Below are templates for each of the subs:
+
+ sub hash
+ {
+ my ($data) = @_ ;
+ ...
+ # return the hash value for $data
+ return $hash ;
+ }
+
+ sub compare
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return 0 if $key1 eq $key2
+ # -1 if $key1 lt $key2
+ # 1 if $key1 gt $key2
+ return (-1 , 0 or 1) ;
+ }
+
+ sub prefix
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return number of bytes of $key2 which are
+ # necessary to determine that it is greater than $key1
+ return $bytes ;
+ }
+
+See L<Changing the BTREE sort order> for an example of using the
+C<compare> template.
+
+If you are using the DB_RECNO interface and you intend making use of
+C<bval>, you should check out L<The 'bval' Option>.
+
+=head2 Default Parameters
+
+It is possible to omit some or all of the final 4 parameters in the
+call to C<tie> and let them take default values. As DB_HASH is the most
+common file format used, the call:
+
+ tie %A, "DB_File", "filename" ;
+
+is equivalent to:
+
+ tie %A, "DB_File", "filename", O_CREAT|O_RDWR, 0666, $DB_HASH ;
+
+It is also possible to omit the filename parameter as well, so the
+call:
+
+ tie %A, "DB_File" ;
+
+is equivalent to:
+
+ tie %A, "DB_File", undef, O_CREAT|O_RDWR, 0666, $DB_HASH ;
+
+See L<In Memory Databases> for a discussion on the use of C<undef>
+in place of a filename.
+
+=head2 In Memory Databases
+
+Berkeley DB allows the creation of in-memory databases by using NULL
+(that is, a C<(char *)0> in C) in place of the filename. B<DB_File>
+uses C<undef> instead of NULL to provide this functionality.
+
+=head1 DB_HASH
+
+The DB_HASH file format is probably the most commonly used of the three
+file formats that B<DB_File> supports. It is also very straightforward
+to use.
+
+=head2 A Simple Example
+
+This example shows how to create a database, add key/value pairs to the
+database, delete keys/value pairs and finally how to enumerate the
+contents of the database.
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+ use vars qw( %h $k $v ) ;
+
+ unlink "fruit" ;
+ tie %h, "DB_File", "fruit", O_RDWR|O_CREAT, 0640, $DB_HASH
+ or die "Cannot open file 'fruit': $!\n";
+
+ # Add a few key/value pairs to the file
+ $h{"apple"} = "red" ;
+ $h{"orange"} = "orange" ;
+ $h{"banana"} = "yellow" ;
+ $h{"tomato"} = "red" ;
+
+ # Check for existence of a key
+ print "Banana Exists\n\n" if $h{"banana"} ;
+
+ # Delete a key/value pair.
+ delete $h{"apple"} ;
+
+ # print the contents of the file
+ while (($k, $v) = each %h)
+ { print "$k -> $v\n" }
+
+ untie %h ;
+
+here is the output:
+
+ Banana Exists
+
+ orange -> orange
+ tomato -> red
+ banana -> yellow
+
+Note that the like ordinary associative arrays, the order of the keys
+retrieved is in an apparently random order.
+
+=head1 DB_BTREE
+
+The DB_BTREE format is useful when you want to store data in a given
+order. By default the keys will be stored in lexical order, but as you
+will see from the example shown in the next section, it is very easy to
+define your own sorting function.
+
+=head2 Changing the BTREE sort order
+
+This script shows how to override the default sorting algorithm that
+BTREE uses. Instead of using the normal lexical ordering, a case
+insensitive compare function will be used.
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ my %h ;
+
+ sub Compare
+ {
+ my ($key1, $key2) = @_ ;
+ "\L$key1" cmp "\L$key2" ;
+ }
+
+ # specify the Perl sub that will do the comparison
+ $DB_BTREE->{'compare'} = \&Compare ;
+
+ unlink "tree" ;
+ tie %h, "DB_File", "tree", O_RDWR|O_CREAT, 0640, $DB_BTREE
+ or die "Cannot open file 'tree': $!\n" ;
+
+ # Add a key/value pair to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+ $h{'duck'} = 'donald' ;
+
+ # Delete
+ delete $h{"duck"} ;
+
+ # Cycle through the keys printing them in order.
+ # Note it is not necessary to sort the keys as
+ # the btree will have kept them in order automatically.
+ foreach (keys %h)
+ { print "$_\n" }
+
+ untie %h ;
+
+Here is the output from the code above.
+
+ mouse
+ Smith
+ Wall
+
+There are a few point to bear in mind if you want to change the
+ordering in a BTREE database:
+
+=over 5
+
+=item 1.
+
+The new compare function must be specified when you create the database.
+
+=item 2.
+
+You cannot change the ordering once the database has been created. Thus
+you must use the same compare function every time you access the
+database.
+
+=back
+
+=head2 Handling Duplicate Keys
+
+The BTREE file type optionally allows a single key to be associated
+with an arbitrary number of values. This option is enabled by setting
+the flags element of C<$DB_BTREE> to R_DUP when creating the database.
+
+There are some difficulties in using the tied hash interface if you
+want to manipulate a BTREE database with duplicate keys. Consider this
+code:
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ use vars qw($filename %h ) ;
+
+ $filename = "tree" ;
+ unlink $filename ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ # Add some key/value pairs to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Wall'} = 'Brick' ; # Note the duplicate key
+ $h{'Wall'} = 'Brick' ; # Note the duplicate key and value
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+
+ # iterate through the associative array
+ # and print each key/value pair.
+ foreach (sort keys %h)
+ { print "$_ -> $h{$_}\n" }
+
+ untie %h ;
+
+Here is the output:
+
+ Smith -> John
+ Wall -> Larry
+ Wall -> Larry
+ Wall -> Larry
+ mouse -> mickey
+
+As you can see 3 records have been successfully created with key C<Wall>
+- the only thing is, when they are retrieved from the database they
+I<seem> to have the same value, namely C<Larry>. The problem is caused
+by the way that the associative array interface works. Basically, when
+the associative array interface is used to fetch the value associated
+with a given key, it will only ever retrieve the first value.
+
+Although it may not be immediately obvious from the code above, the
+associative array interface can be used to write values with duplicate
+keys, but it cannot be used to read them back from the database.
+
+The way to get around this problem is to use the Berkeley DB API method
+called C<seq>. This method allows sequential access to key/value
+pairs. See L<THE API INTERFACE> for details of both the C<seq> method
+and the API in general.
+
+Here is the script above rewritten using the C<seq> API method.
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ use vars qw($filename $x %h $status $key $value) ;
+
+ $filename = "tree" ;
+ unlink $filename ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ # Add some key/value pairs to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Wall'} = 'Brick' ; # Note the duplicate key
+ $h{'Wall'} = 'Brick' ; # Note the duplicate key and value
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+
+ # iterate through the btree using seq
+ # and print each key/value pair.
+ $key = $value = 0 ;
+ for ($status = $x->seq($key, $value, R_FIRST) ;
+ $status == 0 ;
+ $status = $x->seq($key, $value, R_NEXT) )
+ { print "$key -> $value\n" }
+
+ undef $x ;
+ untie %h ;
+
+that prints:
+
+ Smith -> John
+ Wall -> Brick
+ Wall -> Brick
+ Wall -> Larry
+ mouse -> mickey
+
+This time we have got all the key/value pairs, including the multiple
+values associated with the key C<Wall>.
+
+To make life easier when dealing with duplicate keys, B<DB_File> comes with
+a few utility methods.
+
+=head2 The get_dup() Method
+
+The C<get_dup> method assists in
+reading duplicate values from BTREE databases. The method can take the
+following forms:
+
+ $count = $x->get_dup($key) ;
+ @list = $x->get_dup($key) ;
+ %list = $x->get_dup($key, 1) ;
+
+In a scalar context the method returns the number of values associated
+with the key, C<$key>.
+
+In list context, it returns all the values which match C<$key>. Note
+that the values will be returned in an apparently random order.
+
+In list context, if the second parameter is present and evaluates
+TRUE, the method returns an associative array. The keys of the
+associative array correspond to the values that matched in the BTREE
+and the values of the array are a count of the number of times that
+particular value occurred in the BTREE.
+
+So assuming the database created above, we can use C<get_dup> like
+this:
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ use vars qw($filename $x %h ) ;
+
+ $filename = "tree" ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ my $cnt = $x->get_dup("Wall") ;
+ print "Wall occurred $cnt times\n" ;
+
+ my %hash = $x->get_dup("Wall", 1) ;
+ print "Larry is there\n" if $hash{'Larry'} ;
+ print "There are $hash{'Brick'} Brick Walls\n" ;
+
+ my @list = sort $x->get_dup("Wall") ;
+ print "Wall => [@list]\n" ;
+
+ @list = $x->get_dup("Smith") ;
+ print "Smith => [@list]\n" ;
+
+ @list = $x->get_dup("Dog") ;
+ print "Dog => [@list]\n" ;
+
+
+and it will print:
+
+ Wall occurred 3 times
+ Larry is there
+ There are 2 Brick Walls
+ Wall => [Brick Brick Larry]
+ Smith => [John]
+ Dog => []
+
+=head2 The find_dup() Method
+
+ $status = $X->find_dup($key, $value) ;
+
+This method checks for the existence of a specific key/value pair. If the
+pair exists, the cursor is left pointing to the pair and the method
+returns 0. Otherwise the method returns a non-zero value.
+
+Assuming the database from the previous example:
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ use vars qw($filename $x %h $found) ;
+
+ my $filename = "tree" ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ $found = ( $x->find_dup("Wall", "Larry") == 0 ? "" : "not") ;
+ print "Larry Wall is $found there\n" ;
+
+ $found = ( $x->find_dup("Wall", "Harry") == 0 ? "" : "not") ;
+ print "Harry Wall is $found there\n" ;
+
+ undef $x ;
+ untie %h ;
+
+prints this
+
+ Larry Wall is there
+ Harry Wall is not there
+
+
+=head2 The del_dup() Method
+
+ $status = $X->del_dup($key, $value) ;
+
+This method deletes a specific key/value pair. It returns
+0 if they exist and have been deleted successfully.
+Otherwise the method returns a non-zero value.
+
+Again assuming the existence of the C<tree> database
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ use vars qw($filename $x %h $found) ;
+
+ my $filename = "tree" ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ $x->del_dup("Wall", "Larry") ;
+
+ $found = ( $x->find_dup("Wall", "Larry") == 0 ? "" : "not") ;
+ print "Larry Wall is $found there\n" ;
+
+ undef $x ;
+ untie %h ;
+
+prints this
+
+ Larry Wall is not there
+
+=head2 Matching Partial Keys
+
+The BTREE interface has a feature which allows partial keys to be
+matched. This functionality is I<only> available when the C<seq> method
+is used along with the R_CURSOR flag.
+
+ $x->seq($key, $value, R_CURSOR) ;
+
+Here is the relevant quote from the dbopen man page where it defines
+the use of the R_CURSOR flag with seq:
+
+ Note, for the DB_BTREE access method, the returned key is not
+ necessarily an exact match for the specified key. The returned key
+ is the smallest key greater than or equal to the specified key,
+ permitting partial key matches and range searches.
+
+In the example script below, the C<match> sub uses this feature to find
+and print the first matching key/value pair given a partial key.
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+ use Fcntl ;
+
+ use vars qw($filename $x %h $st $key $value) ;
+
+ sub match
+ {
+ my $key = shift ;
+ my $value = 0;
+ my $orig_key = $key ;
+ $x->seq($key, $value, R_CURSOR) ;
+ print "$orig_key\t-> $key\t-> $value\n" ;
+ }
+
+ $filename = "tree" ;
+ unlink $filename ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ # Add some key/value pairs to the file
+ $h{'mouse'} = 'mickey' ;
+ $h{'Wall'} = 'Larry' ;
+ $h{'Walls'} = 'Brick' ;
+ $h{'Smith'} = 'John' ;
+
+
+ $key = $value = 0 ;
+ print "IN ORDER\n" ;
+ for ($st = $x->seq($key, $value, R_FIRST) ;
+ $st == 0 ;
+ $st = $x->seq($key, $value, R_NEXT) )
+
+ { print "$key -> $value\n" }
+
+ print "\nPARTIAL MATCH\n" ;
+
+ match "Wa" ;
+ match "A" ;
+ match "a" ;
+
+ undef $x ;
+ untie %h ;
+
+Here is the output:
+
+ IN ORDER
+ Smith -> John
+ Wall -> Larry
+ Walls -> Brick
+ mouse -> mickey
+
+ PARTIAL MATCH
+ Wa -> Wall -> Larry
+ A -> Smith -> John
+ a -> mouse -> mickey
+
+=head1 DB_RECNO
+
+DB_RECNO provides an interface to flat text files. Both variable and
+fixed length records are supported.
+
+In order to make RECNO more compatible with Perl, the array offset for
+all RECNO arrays begins at 0 rather than 1 as in Berkeley DB.
+
+As with normal Perl arrays, a RECNO array can be accessed using
+negative indexes. The index -1 refers to the last element of the array,
+-2 the second last, and so on. Attempting to access an element before
+the start of the array will raise a fatal run-time error.
+
+=head2 The 'bval' Option
+
+The operation of the bval option warrants some discussion. Here is the
+definition of bval from the Berkeley DB 1.85 recno manual page:
+
+ The delimiting byte to be used to mark the end of a
+ record for variable-length records, and the pad charac-
+ ter for fixed-length records. If no value is speci-
+ fied, newlines (``\n'') are used to mark the end of
+ variable-length records and fixed-length records are
+ padded with spaces.
+
+The second sentence is wrong. In actual fact bval will only default to
+C<"\n"> when the openinfo parameter in dbopen is NULL. If a non-NULL
+openinfo parameter is used at all, the value that happens to be in bval
+will be used. That means you always have to specify bval when making
+use of any of the options in the openinfo parameter. This documentation
+error will be fixed in the next release of Berkeley DB.
+
+That clarifies the situation with regards Berkeley DB itself. What
+about B<DB_File>? Well, the behavior defined in the quote above is
+quite useful, so B<DB_File> conforms to it.
+
+That means that you can specify other options (e.g. cachesize) and
+still have bval default to C<"\n"> for variable length records, and
+space for fixed length records.
+
+=head2 A Simple Example
+
+Here is a simple example that uses RECNO (if you are using a version
+of Perl earlier than 5.004_57 this example won't work -- see
+L<Extra RECNO Methods> for a workaround).
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ my $filename = "text" ;
+ unlink $filename ;
+
+ my @h ;
+ tie @h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_RECNO
+ or die "Cannot open file 'text': $!\n" ;
+
+ # Add a few key/value pairs to the file
+ $h[0] = "orange" ;
+ $h[1] = "blue" ;
+ $h[2] = "yellow" ;
+
+ push @h, "green", "black" ;
+
+ my $elements = scalar @h ;
+ print "The array contains $elements entries\n" ;
+
+ my $last = pop @h ;
+ print "popped $last\n" ;
+
+ unshift @h, "white" ;
+ my $first = shift @h ;
+ print "shifted $first\n" ;
+
+ # Check for existence of a key
+ print "Element 1 Exists with value $h[1]\n" if $h[1] ;
+
+ # use a negative index
+ print "The last element is $h[-1]\n" ;
+ print "The 2nd last element is $h[-2]\n" ;
+
+ untie @h ;
+
+Here is the output from the script:
+
+ The array contains 5 entries
+ popped black
+ shifted white
+ Element 1 Exists with value blue
+ The last element is green
+ The 2nd last element is yellow
+
+=head2 Extra RECNO Methods
+
+If you are using a version of Perl earlier than 5.004_57, the tied
+array interface is quite limited. In the example script above
+C<push>, C<pop>, C<shift>, C<unshift>
+or determining the array length will not work with a tied array.
+
+To make the interface more useful for older versions of Perl, a number
+of methods are supplied with B<DB_File> to simulate the missing array
+operations. All these methods are accessed via the object returned from
+the tie call.
+
+Here are the methods:
+
+=over 5
+
+=item B<$X-E<gt>push(list) ;>
+
+Pushes the elements of C<list> to the end of the array.
+
+=item B<$value = $X-E<gt>pop ;>
+
+Removes and returns the last element of the array.
+
+=item B<$X-E<gt>shift>
+
+Removes and returns the first element of the array.
+
+=item B<$X-E<gt>unshift(list) ;>
+
+Pushes the elements of C<list> to the start of the array.
+
+=item B<$X-E<gt>length>
+
+Returns the number of elements in the array.
+
+=back
+
+=head2 Another Example
+
+Here is a more complete example that makes use of some of the methods
+described above. It also makes use of the API interface directly (see
+L<THE API INTERFACE>).
+
+ use warnings ;
+ use strict ;
+ use vars qw(@h $H $file $i) ;
+ use DB_File ;
+ use Fcntl ;
+
+ $file = "text" ;
+
+ unlink $file ;
+
+ $H = tie @h, "DB_File", $file, O_RDWR|O_CREAT, 0640, $DB_RECNO
+ or die "Cannot open file $file: $!\n" ;
+
+ # first create a text file to play with
+ $h[0] = "zero" ;
+ $h[1] = "one" ;
+ $h[2] = "two" ;
+ $h[3] = "three" ;
+ $h[4] = "four" ;
+
+
+ # Print the records in order.
+ #
+ # The length method is needed here because evaluating a tied
+ # array in a scalar context does not return the number of
+ # elements in the array.
+
+ print "\nORIGINAL\n" ;
+ foreach $i (0 .. $H->length - 1) {
+ print "$i: $h[$i]\n" ;
+ }
+
+ # use the push & pop methods
+ $a = $H->pop ;
+ $H->push("last") ;
+ print "\nThe last record was [$a]\n" ;
+
+ # and the shift & unshift methods
+ $a = $H->shift ;
+ $H->unshift("first") ;
+ print "The first record was [$a]\n" ;
+
+ # Use the API to add a new record after record 2.
+ $i = 2 ;
+ $H->put($i, "Newbie", R_IAFTER) ;
+
+ # and a new record before record 1.
+ $i = 1 ;
+ $H->put($i, "New One", R_IBEFORE) ;
+
+ # delete record 3
+ $H->del(3) ;
+
+ # now print the records in reverse order
+ print "\nREVERSE\n" ;
+ for ($i = $H->length - 1 ; $i >= 0 ; -- $i)
+ { print "$i: $h[$i]\n" }
+
+ # same again, but use the API functions instead
+ print "\nREVERSE again\n" ;
+ my ($s, $k, $v) = (0, 0, 0) ;
+ for ($s = $H->seq($k, $v, R_LAST) ;
+ $s == 0 ;
+ $s = $H->seq($k, $v, R_PREV))
+ { print "$k: $v\n" }
+
+ undef $H ;
+ untie @h ;
+
+and this is what it outputs:
+
+ ORIGINAL
+ 0: zero
+ 1: one
+ 2: two
+ 3: three
+ 4: four
+
+ The last record was [four]
+ The first record was [zero]
+
+ REVERSE
+ 5: last
+ 4: three
+ 3: Newbie
+ 2: one
+ 1: New One
+ 0: first
+
+ REVERSE again
+ 5: last
+ 4: three
+ 3: Newbie
+ 2: one
+ 1: New One
+ 0: first
+
+Notes:
+
+=over 5
+
+=item 1.
+
+Rather than iterating through the array, C<@h> like this:
+
+ foreach $i (@h)
+
+it is necessary to use either this:
+
+ foreach $i (0 .. $H->length - 1)
+
+or this:
+
+ for ($a = $H->get($k, $v, R_FIRST) ;
+ $a == 0 ;
+ $a = $H->get($k, $v, R_NEXT) )
+
+=item 2.
+
+Notice that both times the C<put> method was used the record index was
+specified using a variable, C<$i>, rather than the literal value
+itself. This is because C<put> will return the record number of the
+inserted line via that parameter.
+
+=back
+
+=head1 THE API INTERFACE
+
+As well as accessing Berkeley DB using a tied hash or array, it is also
+possible to make direct use of most of the API functions defined in the
+Berkeley DB documentation.
+
+To do this you need to store a copy of the object returned from the tie.
+
+ $db = tie %hash, "DB_File", "filename" ;
+
+Once you have done that, you can access the Berkeley DB API functions
+as B<DB_File> methods directly like this:
+
+ $db->put($key, $value, R_NOOVERWRITE) ;
+
+B<Important:> If you have saved a copy of the object returned from
+C<tie>, the underlying database file will I<not> be closed until both
+the tied variable is untied and all copies of the saved object are
+destroyed.
+
+ use DB_File ;
+ $db = tie %hash, "DB_File", "filename"
+ or die "Cannot tie filename: $!" ;
+ ...
+ undef $db ;
+ untie %hash ;
+
+See L<The untie() Gotcha> for more details.
+
+All the functions defined in L<dbopen> are available except for
+close() and dbopen() itself. The B<DB_File> method interface to the
+supported functions have been implemented to mirror the way Berkeley DB
+works whenever possible. In particular note that:
+
+=over 5
+
+=item *
+
+The methods return a status value. All return 0 on success.
+All return -1 to signify an error and set C<$!> to the exact
+error code. The return code 1 generally (but not always) means that the
+key specified did not exist in the database.
+
+Other return codes are defined. See below and in the Berkeley DB
+documentation for details. The Berkeley DB documentation should be used
+as the definitive source.
+
+=item *
+
+Whenever a Berkeley DB function returns data via one of its parameters,
+the equivalent B<DB_File> method does exactly the same.
+
+=item *
+
+If you are careful, it is possible to mix API calls with the tied
+hash/array interface in the same piece of code. Although only a few of
+the methods used to implement the tied interface currently make use of
+the cursor, you should always assume that the cursor has been changed
+any time the tied hash/array interface is used. As an example, this
+code will probably not do what you expect:
+
+ $X = tie %x, 'DB_File', $filename, O_RDWR|O_CREAT, 0777, $DB_BTREE
+ or die "Cannot tie $filename: $!" ;
+
+ # Get the first key/value pair and set the cursor
+ $X->seq($key, $value, R_FIRST) ;
+
+ # this line will modify the cursor
+ $count = scalar keys %x ;
+
+ # Get the second key/value pair.
+ # oops, it didn't, it got the last key/value pair!
+ $X->seq($key, $value, R_NEXT) ;
+
+The code above can be rearranged to get around the problem, like this:
+
+ $X = tie %x, 'DB_File', $filename, O_RDWR|O_CREAT, 0777, $DB_BTREE
+ or die "Cannot tie $filename: $!" ;
+
+ # this line will modify the cursor
+ $count = scalar keys %x ;
+
+ # Get the first key/value pair and set the cursor
+ $X->seq($key, $value, R_FIRST) ;
+
+ # Get the second key/value pair.
+ # worked this time.
+ $X->seq($key, $value, R_NEXT) ;
+
+=back
+
+All the constants defined in L<dbopen> for use in the flags parameters
+in the methods defined below are also available. Refer to the Berkeley
+DB documentation for the precise meaning of the flags values.
+
+Below is a list of the methods available.
+
+=over 5
+
+=item B<$status = $X-E<gt>get($key, $value [, $flags]) ;>
+
+Given a key (C<$key>) this method reads the value associated with it
+from the database. The value read from the database is returned in the
+C<$value> parameter.
+
+If the key does not exist the method returns 1.
+
+No flags are currently defined for this method.
+
+=item B<$status = $X-E<gt>put($key, $value [, $flags]) ;>
+
+Stores the key/value pair in the database.
+
+If you use either the R_IAFTER or R_IBEFORE flags, the C<$key> parameter
+will have the record number of the inserted key/value pair set.
+
+Valid flags are R_CURSOR, R_IAFTER, R_IBEFORE, R_NOOVERWRITE and
+R_SETCURSOR.
+
+=item B<$status = $X-E<gt>del($key [, $flags]) ;>
+
+Removes all key/value pairs with key C<$key> from the database.
+
+A return code of 1 means that the requested key was not in the
+database.
+
+R_CURSOR is the only valid flag at present.
+
+=item B<$status = $X-E<gt>fd ;>
+
+Returns the file descriptor for the underlying database.
+
+See L<Locking: The Trouble with fd> for an explanation for why you should
+not use C<fd> to lock your database.
+
+=item B<$status = $X-E<gt>seq($key, $value, $flags) ;>
+
+This interface allows sequential retrieval from the database. See
+L<dbopen> for full details.
+
+Both the C<$key> and C<$value> parameters will be set to the key/value
+pair read from the database.
+
+The flags parameter is mandatory. The valid flag values are R_CURSOR,
+R_FIRST, R_LAST, R_NEXT and R_PREV.
+
+=item B<$status = $X-E<gt>sync([$flags]) ;>
+
+Flushes any cached buffers to disk.
+
+R_RECNOSYNC is the only valid flag at present.
+
+=back
+
+=head1 DBM FILTERS
+
+A DBM Filter is a piece of code that is be used when you I<always>
+want to make the same transformation to all keys and/or values in a
+DBM database.
+
+There are four methods associated with DBM Filters. All work identically,
+and each is used to install (or uninstall) a single DBM Filter. Each
+expects a single parameter, namely a reference to a sub. The only
+difference between them is the place that the filter is installed.
+
+To summarise:
+
+=over 5
+
+=item B<filter_store_key>
+
+If a filter has been installed with this method, it will be invoked
+every time you write a key to a DBM database.
+
+=item B<filter_store_value>
+
+If a filter has been installed with this method, it will be invoked
+every time you write a value to a DBM database.
+
+
+=item B<filter_fetch_key>
+
+If a filter has been installed with this method, it will be invoked
+every time you read a key from a DBM database.
+
+=item B<filter_fetch_value>
+
+If a filter has been installed with this method, it will be invoked
+every time you read a value from a DBM database.
+
+=back
+
+You can use any combination of the methods, from none, to all four.
+
+All filter methods return the existing filter, if present, or C<undef>
+in not.
+
+To delete a filter pass C<undef> to it.
+
+=head2 The Filter
+
+When each filter is called by Perl, a local copy of C<$_> will contain
+the key or value to be filtered. Filtering is achieved by modifying
+the contents of C<$_>. The return code from the filter is ignored.
+
+=head2 An Example -- the NULL termination problem.
+
+Consider the following scenario. You have a DBM database
+that you need to share with a third-party C application. The C application
+assumes that I<all> keys and values are NULL terminated. Unfortunately
+when Perl writes to DBM databases it doesn't use NULL termination, so
+your Perl application will have to manage NULL termination itself. When
+you write to the database you will have to use something like this:
+
+ $hash{"$key\0"} = "$value\0" ;
+
+Similarly the NULL needs to be taken into account when you are considering
+the length of existing keys/values.
+
+It would be much better if you could ignore the NULL terminations issue
+in the main application code and have a mechanism that automatically
+added the terminating NULL to all keys and values whenever you write to
+the database and have them removed when you read from the database. As I'm
+sure you have already guessed, this is a problem that DBM Filters can
+fix very easily.
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ my %hash ;
+ my $filename = "/tmp/filt" ;
+ unlink $filename ;
+
+ my $db = tie %hash, 'DB_File', $filename, O_CREAT|O_RDWR, 0666, $DB_HASH
+ or die "Cannot open $filename: $!\n" ;
+
+ # Install DBM Filters
+ $db->filter_fetch_key ( sub { s/\0$// } ) ;
+ $db->filter_store_key ( sub { $_ .= "\0" } ) ;
+ $db->filter_fetch_value( sub { s/\0$// } ) ;
+ $db->filter_store_value( sub { $_ .= "\0" } ) ;
+
+ $hash{"abc"} = "def" ;
+ my $a = $hash{"ABC"} ;
+ # ...
+ undef $db ;
+ untie %hash ;
+
+Hopefully the contents of each of the filters should be
+self-explanatory. Both "fetch" filters remove the terminating NULL,
+and both "store" filters add a terminating NULL.
+
+
+=head2 Another Example -- Key is a C int.
+
+Here is another real-life example. By default, whenever Perl writes to
+a DBM database it always writes the key and value as strings. So when
+you use this:
+
+ $hash{12345} = "soemthing" ;
+
+the key 12345 will get stored in the DBM database as the 5 byte string
+"12345". If you actually want the key to be stored in the DBM database
+as a C int, you will have to use C<pack> when writing, and C<unpack>
+when reading.
+
+Here is a DBM Filter that does it:
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+ my %hash ;
+ my $filename = "/tmp/filt" ;
+ unlink $filename ;
+
+
+ my $db = tie %hash, 'DB_File', $filename, O_CREAT|O_RDWR, 0666, $DB_HASH
+ or die "Cannot open $filename: $!\n" ;
+
+ $db->filter_fetch_key ( sub { $_ = unpack("i", $_) } ) ;
+ $db->filter_store_key ( sub { $_ = pack ("i", $_) } ) ;
+ $hash{123} = "def" ;
+ # ...
+ undef $db ;
+ untie %hash ;
+
+This time only two filters have been used -- we only need to manipulate
+the contents of the key, so it wasn't necessary to install any value
+filters.
+
+=head1 HINTS AND TIPS
+
+
+=head2 Locking: The Trouble with fd
+
+Until version 1.72 of this module, the recommended technique for locking
+B<DB_File> databases was to flock the filehandle returned from the "fd"
+function. Unfortunately this technique has been shown to be fundamentally
+flawed (Kudos to David Harris for tracking this down). Use it at your own
+peril!
+
+The locking technique went like this.
+
+ $db = tie(%db, 'DB_File', '/tmp/foo.db', O_CREAT|O_RDWR, 0644)
+ || die "dbcreat /tmp/foo.db $!";
+ $fd = $db->fd;
+ open(DB_FH, "+<&=$fd") || die "dup $!";
+ flock (DB_FH, LOCK_EX) || die "flock: $!";
+ ...
+ $db{"Tom"} = "Jerry" ;
+ ...
+ flock(DB_FH, LOCK_UN);
+ undef $db;
+ untie %db;
+ close(DB_FH);
+
+In simple terms, this is what happens:
+
+=over 5
+
+=item 1.
+
+Use "tie" to open the database.
+
+=item 2.
+
+Lock the database with fd & flock.
+
+=item 3.
+
+Read & Write to the database.
+
+=item 4.
+
+Unlock and close the database.
+
+=back
+
+Here is the crux of the problem. A side-effect of opening the B<DB_File>
+database in step 2 is that an initial block from the database will get
+read from disk and cached in memory.
+
+To see why this is a problem, consider what can happen when two processes,
+say "A" and "B", both want to update the same B<DB_File> database
+using the locking steps outlined above. Assume process "A" has already
+opened the database and has a write lock, but it hasn't actually updated
+the database yet (it has finished step 2, but not started step 3 yet). Now
+process "B" tries to open the same database - step 1 will succeed,
+but it will block on step 2 until process "A" releases the lock. The
+important thing to notice here is that at this point in time both
+processes will have cached identical initial blocks from the database.
+
+Now process "A" updates the database and happens to change some of the
+data held in the initial buffer. Process "A" terminates, flushing
+all cached data to disk and releasing the database lock. At this point
+the database on disk will correctly reflect the changes made by process
+"A".
+
+With the lock released, process "B" can now continue. It also updates the
+database and unfortunately it too modifies the data that was in its
+initial buffer. Once that data gets flushed to disk it will overwrite
+some/all of the changes process "A" made to the database.
+
+The result of this scenario is at best a database that doesn't contain
+what you expect. At worst the database will corrupt.
+
+The above won't happen every time competing process update the same
+B<DB_File> database, but it does illustrate why the technique should
+not be used.
+
+=head2 Safe ways to lock a database
+
+Starting with version 2.x, Berkeley DB has internal support for locking.
+The companion module to this one, B<BerkeleyDB>, provides an interface
+to this locking functionality. If you are serious about locking
+Berkeley DB databases, I strongly recommend using B<BerkeleyDB>.
+
+If using B<BerkeleyDB> isn't an option, there are a number of modules
+available on CPAN that can be used to implement locking. Each one
+implements locking differently and has different goals in mind. It is
+therefore worth knowing the difference, so that you can pick the right
+one for your application. Here are the three locking wrappers:
+
+=over 5
+
+=item B<Tie::DB_Lock>
+
+A B<DB_File> wrapper which creates copies of the database file for
+read access, so that you have a kind of a multiversioning concurrent read
+system. However, updates are still serial. Use for databases where reads
+may be lengthy and consistency problems may occur.
+
+=item B<Tie::DB_LockFile>
+
+A B<DB_File> wrapper that has the ability to lock and unlock the database
+while it is being used. Avoids the tie-before-flock problem by simply
+re-tie-ing the database when you get or drop a lock. Because of the
+flexibility in dropping and re-acquiring the lock in the middle of a
+session, this can be massaged into a system that will work with long
+updates and/or reads if the application follows the hints in the POD
+documentation.
+
+=item B<DB_File::Lock>
+
+An extremely lightweight B<DB_File> wrapper that simply flocks a lockfile
+before tie-ing the database and drops the lock after the untie. Allows
+one to use the same lockfile for multiple databases to avoid deadlock
+problems, if desired. Use for databases where updates are reads are
+quick and simple flock locking semantics are enough.
+
+=back
+
+=head2 Sharing Databases With C Applications
+
+There is no technical reason why a Berkeley DB database cannot be
+shared by both a Perl and a C application.
+
+The vast majority of problems that are reported in this area boil down
+to the fact that C strings are NULL terminated, whilst Perl strings are
+not. See L<DBM FILTERS> for a generic way to work around this problem.
+
+Here is a real example. Netscape 2.0 keeps a record of the locations you
+visit along with the time you last visited them in a DB_HASH database.
+This is usually stored in the file F<~/.netscape/history.db>. The key
+field in the database is the location string and the value field is the
+time the location was last visited stored as a 4 byte binary value.
+
+If you haven't already guessed, the location string is stored with a
+terminating NULL. This means you need to be careful when accessing the
+database.
+
+Here is a snippet of code that is loosely based on Tom Christiansen's
+I<ggh> script (available from your nearest CPAN archive in
+F<authors/id/TOMC/scripts/nshist.gz>).
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+ use Fcntl ;
+
+ use vars qw( $dotdir $HISTORY %hist_db $href $binary_time $date ) ;
+ $dotdir = $ENV{HOME} || $ENV{LOGNAME};
+
+ $HISTORY = "$dotdir/.netscape/history.db";
+
+ tie %hist_db, 'DB_File', $HISTORY
+ or die "Cannot open $HISTORY: $!\n" ;;
+
+ # Dump the complete database
+ while ( ($href, $binary_time) = each %hist_db ) {
+
+ # remove the terminating NULL
+ $href =~ s/\x00$// ;
+
+ # convert the binary time into a user friendly string
+ $date = localtime unpack("V", $binary_time);
+ print "$date $href\n" ;
+ }
+
+ # check for the existence of a specific key
+ # remember to add the NULL
+ if ( $binary_time = $hist_db{"http://mox.perl.com/\x00"} ) {
+ $date = localtime unpack("V", $binary_time) ;
+ print "Last visited mox.perl.com on $date\n" ;
+ }
+ else {
+ print "Never visited mox.perl.com\n"
+ }
+
+ untie %hist_db ;
+
+=head2 The untie() Gotcha
+
+If you make use of the Berkeley DB API, it is I<very> strongly
+recommended that you read L<perltie/The untie Gotcha>.
+
+Even if you don't currently make use of the API interface, it is still
+worth reading it.
+
+Here is an example which illustrates the problem from a B<DB_File>
+perspective:
+
+ use DB_File ;
+ use Fcntl ;
+
+ my %x ;
+ my $X ;
+
+ $X = tie %x, 'DB_File', 'tst.fil' , O_RDWR|O_TRUNC
+ or die "Cannot tie first time: $!" ;
+
+ $x{123} = 456 ;
+
+ untie %x ;
+
+ tie %x, 'DB_File', 'tst.fil' , O_RDWR|O_CREAT
+ or die "Cannot tie second time: $!" ;
+
+ untie %x ;
+
+When run, the script will produce this error message:
+
+ Cannot tie second time: Invalid argument at bad.file line 14.
+
+Although the error message above refers to the second tie() statement
+in the script, the source of the problem is really with the untie()
+statement that precedes it.
+
+Having read L<perltie> you will probably have already guessed that the
+error is caused by the extra copy of the tied object stored in C<$X>.
+If you haven't, then the problem boils down to the fact that the
+B<DB_File> destructor, DESTROY, will not be called until I<all>
+references to the tied object are destroyed. Both the tied variable,
+C<%x>, and C<$X> above hold a reference to the object. The call to
+untie() will destroy the first, but C<$X> still holds a valid
+reference, so the destructor will not get called and the database file
+F<tst.fil> will remain open. The fact that Berkeley DB then reports the
+attempt to open a database that is already open via the catch-all
+"Invalid argument" doesn't help.
+
+If you run the script with the C<-w> flag the error message becomes:
+
+ untie attempted while 1 inner references still exist at bad.file line 12.
+ Cannot tie second time: Invalid argument at bad.file line 14.
+
+which pinpoints the real problem. Finally the script can now be
+modified to fix the original problem by destroying the API object
+before the untie:
+
+ ...
+ $x{123} = 456 ;
+
+ undef $X ;
+ untie %x ;
+
+ $X = tie %x, 'DB_File', 'tst.fil' , O_RDWR|O_CREAT
+ ...
+
+
+=head1 COMMON QUESTIONS
+
+=head2 Why is there Perl source in my database?
+
+If you look at the contents of a database file created by DB_File,
+there can sometimes be part of a Perl script included in it.
+
+This happens because Berkeley DB uses dynamic memory to allocate
+buffers which will subsequently be written to the database file. Being
+dynamic, the memory could have been used for anything before DB
+malloced it. As Berkeley DB doesn't clear the memory once it has been
+allocated, the unused portions will contain random junk. In the case
+where a Perl script gets written to the database, the random junk will
+correspond to an area of dynamic memory that happened to be used during
+the compilation of the script.
+
+Unless you don't like the possibility of there being part of your Perl
+scripts embedded in a database file, this is nothing to worry about.
+
+=head2 How do I store complex data structures with DB_File?
+
+Although B<DB_File> cannot do this directly, there is a module which
+can layer transparently over B<DB_File> to accomplish this feat.
+
+Check out the MLDBM module, available on CPAN in the directory
+F<modules/by-module/MLDBM>.
+
+=head2 What does "Invalid Argument" mean?
+
+You will get this error message when one of the parameters in the
+C<tie> call is wrong. Unfortunately there are quite a few parameters to
+get wrong, so it can be difficult to figure out which one it is.
+
+Here are a couple of possibilities:
+
+=over 5
+
+=item 1.
+
+Attempting to reopen a database without closing it.
+
+=item 2.
+
+Using the O_WRONLY flag.
+
+=back
+
+=head2 What does "Bareword 'DB_File' not allowed" mean?
+
+You will encounter this particular error message when you have the
+C<strict 'subs'> pragma (or the full strict pragma) in your script.
+Consider this script:
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+ use vars qw(%x) ;
+ tie %x, DB_File, "filename" ;
+
+Running it produces the error in question:
+
+ Bareword "DB_File" not allowed while "strict subs" in use
+
+To get around the error, place the word C<DB_File> in either single or
+double quotes, like this:
+
+ tie %x, "DB_File", "filename" ;
+
+Although it might seem like a real pain, it is really worth the effort
+of having a C<use strict> in all your scripts.
+
+=head1 REFERENCES
+
+Articles that are either about B<DB_File> or make use of it.
+
+=over 5
+
+=item 1.
+
+I<Full-Text Searching in Perl>, Tim Kientzle (tkientzle@ddj.com),
+Dr. Dobb's Journal, Issue 295, January 1999, pp 34-41
+
+=back
+
+=head1 HISTORY
+
+Moved to the Changes file.
+
+=head1 BUGS
+
+Some older versions of Berkeley DB had problems with fixed length
+records using the RECNO file format. This problem has been fixed since
+version 1.85 of Berkeley DB.
+
+I am sure there are bugs in the code. If you do find any, or can
+suggest any enhancements, I would welcome your comments.
+
+=head1 AVAILABILITY
+
+B<DB_File> comes with the standard Perl source distribution. Look in
+the directory F<ext/DB_File>. Given the amount of time between releases
+of Perl the version that ships with Perl is quite likely to be out of
+date, so the most recent version can always be found on CPAN (see
+L<perlmod/CPAN> for details), in the directory
+F<modules/by-module/DB_File>.
+
+This version of B<DB_File> will work with either version 1.x, 2.x or
+3.x of Berkeley DB, but is limited to the functionality provided by
+version 1.
+
+The official web site for Berkeley DB is F<http://www.sleepycat.com>.
+All versions of Berkeley DB are available there.
+
+Alternatively, Berkeley DB version 1 is available at your nearest CPAN
+archive in F<src/misc/db.1.85.tar.gz>.
+
+If you are running IRIX, then get Berkeley DB version 1 from
+F<http://reality.sgi.com/ariel>. It has the patches necessary to
+compile properly on IRIX 5.3.
+
+=head1 COPYRIGHT
+
+Copyright (c) 1995-1999 Paul Marquess. All rights reserved. This program
+is free software; you can redistribute it and/or modify it under the
+same terms as Perl itself.
+
+Although B<DB_File> is covered by the Perl license, the library it
+makes use of, namely Berkeley DB, is not. Berkeley DB has its own
+copyright and its own license. Please take the time to read it.
+
+Here are are few words taken from the Berkeley DB FAQ (at
+F<http://www.sleepycat.com>) regarding the license:
+
+ Do I have to license DB to use it in Perl scripts?
+
+ No. The Berkeley DB license requires that software that uses
+ Berkeley DB be freely redistributable. In the case of Perl, that
+ software is Perl, and not your scripts. Any Perl scripts that you
+ write are your property, including scripts that make use of
+ Berkeley DB. Neither the Perl license nor the Berkeley DB license
+ place any restriction on what you may do with them.
+
+If you are in any doubt about the license situation, contact either the
+Berkeley DB authors or the author of DB_File. See L<"AUTHOR"> for details.
+
+
+=head1 SEE ALSO
+
+L<perl(1)>, L<dbopen(3)>, L<hash(3)>, L<recno(3)>, L<btree(3)>,
+L<dbmfilter>
+
+=head1 AUTHOR
+
+The DB_File interface was written by Paul Marquess
+E<lt>Paul.Marquess@btinternet.comE<gt>.
+Questions about the DB system itself may be addressed to
+E<lt>db@sleepycat.com<gt>.
+
+=cut
diff --git a/bdb/perl.DB_File/DB_File.xs b/bdb/perl.DB_File/DB_File.xs
new file mode 100644
index 00000000000..6811342066d
--- /dev/null
+++ b/bdb/perl.DB_File/DB_File.xs
@@ -0,0 +1,2072 @@
+/*
+
+ DB_File.xs -- Perl 5 interface to Berkeley DB
+
+ written by Paul Marquess <Paul.Marquess@btinternet.com>
+ last modified 15th January 2001
+ version 1.76
+
+ All comments/suggestions/problems are welcome
+
+ Copyright (c) 1995-2001 Paul Marquess. All rights reserved.
+ This program is free software; you can redistribute it and/or
+ modify it under the same terms as Perl itself.
+
+ Changes:
+ 0.1 - Initial Release
+ 0.2 - No longer bombs out if dbopen returns an error.
+ 0.3 - Added some support for multiple btree compares
+ 1.0 - Complete support for multiple callbacks added.
+ Fixed a problem with pushing a value onto an empty list.
+ 1.01 - Fixed a SunOS core dump problem.
+ The return value from TIEHASH wasn't set to NULL when
+ dbopen returned an error.
+ 1.02 - Use ALIAS to define TIEARRAY.
+ Removed some redundant commented code.
+ Merged OS2 code into the main distribution.
+ Allow negative subscripts with RECNO interface.
+ Changed the default flags to O_CREAT|O_RDWR
+ 1.03 - Added EXISTS
+ 1.04 - fixed a couple of bugs in hash_cb. Patches supplied by
+ Dave Hammen, hammen@gothamcity.jsc.nasa.gov
+ 1.05 - Added logic to allow prefix & hash types to be specified via
+ Makefile.PL
+ 1.06 - Minor namespace cleanup: Localized PrintBtree.
+ 1.07 - Fixed bug with RECNO, where bval wasn't defaulting to "\n".
+ 1.08 - No change to DB_File.xs
+ 1.09 - Default mode for dbopen changed to 0666
+ 1.10 - Fixed fd method so that it still returns -1 for
+ in-memory files when db 1.86 is used.
+ 1.11 - No change to DB_File.xs
+ 1.12 - No change to DB_File.xs
+ 1.13 - Tidied up a few casts.
+ 1.14 - Made it illegal to tie an associative array to a RECNO
+ database and an ordinary array to a HASH or BTREE database.
+ 1.50 - Make work with both DB 1.x or DB 2.x
+ 1.51 - Fixed a bug in mapping 1.x O_RDONLY flag to 2.x DB_RDONLY equivalent
+ 1.52 - Patch from Gisle Aas <gisle@aas.no> to suppress "use of
+ undefined value" warning with db_get and db_seq.
+ 1.53 - Added DB_RENUMBER to flags for recno.
+ 1.54 - Fixed bug in the fd method
+ 1.55 - Fix for AIX from Jarkko Hietaniemi
+ 1.56 - No change to DB_File.xs
+ 1.57 - added the #undef op to allow building with Threads support.
+ 1.58 - Fixed a problem with the use of sv_setpvn. When the
+ size is specified as 0, it does a strlen on the data.
+ This was ok for DB 1.x, but isn't for DB 2.x.
+ 1.59 - No change to DB_File.xs
+ 1.60 - Some code tidy up
+ 1.61 - added flagSet macro for DB 2.5.x
+ fixed typo in O_RDONLY test.
+ 1.62 - No change to DB_File.xs
+ 1.63 - Fix to alllow DB 2.6.x to build.
+ 1.64 - Tidied up the 1.x to 2.x flags mapping code.
+ Added a patch from Mark Kettenis <kettenis@wins.uva.nl>
+ to fix a flag mapping problem with O_RDONLY on the Hurd
+ 1.65 - Fixed a bug in the PUSH logic.
+ Added BOOT check that using 2.3.4 or greater
+ 1.66 - Added DBM filter code
+ 1.67 - Backed off the use of newSVpvn.
+ Fixed DBM Filter code for Perl 5.004.
+ Fixed a small memory leak in the filter code.
+ 1.68 - fixed backward compatability bug with R_IAFTER & R_IBEFORE
+ merged in the 5.005_58 changes
+ 1.69 - fixed a bug in push -- DB_APPEND wasn't working properly.
+ Fixed the R_SETCURSOR bug introduced in 1.68
+ Added a new Perl variable $DB_File::db_ver
+ 1.70 - Initialise $DB_File::db_ver and $DB_File::db_version with
+ GV_ADD|GV_ADDMULT -- bug spotted by Nick Ing-Simmons.
+ Added a BOOT check to test for equivalent versions of db.h &
+ libdb.a/so.
+ 1.71 - Support for Berkeley DB version 3.
+ Support for Berkeley DB 2/3's backward compatability mode.
+ Rewrote push
+ 1.72 - No change to DB_File.xs
+ 1.73 - No change to DB_File.xs
+ 1.74 - A call to open needed parenthesised to stop it clashing
+ with a win32 macro.
+ Added Perl core patches 7703 & 7801.
+ 1.75 - Fixed Perl core patch 7703.
+ Added suppport to allow DB_File to be built with
+ Berkeley DB 3.2 -- btree_compare, btree_prefix and hash_cb
+ needed to be changed.
+ 1.76 - No change to DB_File.xs
+
+*/
+
+#include "EXTERN.h"
+#include "perl.h"
+#include "XSUB.h"
+
+#ifndef PERL_VERSION
+# include "patchlevel.h"
+# define PERL_REVISION 5
+# define PERL_VERSION PATCHLEVEL
+# define PERL_SUBVERSION SUBVERSION
+#endif
+
+#if PERL_REVISION == 5 && (PERL_VERSION < 4 || (PERL_VERSION == 4 && PERL_SUBVERSION <= 75 ))
+
+# define PL_sv_undef sv_undef
+# define PL_na na
+
+#endif
+
+/* DEFSV appears first in 5.004_56 */
+#ifndef DEFSV
+# define DEFSV GvSV(defgv)
+#endif
+
+/* Being the Berkeley DB we prefer the <sys/cdefs.h> (which will be
+ * shortly #included by the <db.h>) __attribute__ to the possibly
+ * already defined __attribute__, for example by GNUC or by Perl. */
+
+#undef __attribute__
+
+/* If Perl has been compiled with Threads support,the symbol op will
+ be defined here. This clashes with a field name in db.h, so get rid of it.
+ */
+#ifdef op
+# undef op
+#endif
+
+#ifdef COMPAT185
+# include <db_185.h>
+#else
+# include <db.h>
+#endif
+
+#ifdef CAN_PROTOTYPE
+extern void __getBerkeleyDBInfo(void);
+#endif
+
+#ifndef pTHX
+# define pTHX
+# define pTHX_
+# define aTHX
+# define aTHX_
+#endif
+
+#ifndef newSVpvn
+# define newSVpvn(a,b) newSVpv(a,b)
+#endif
+
+#include <fcntl.h>
+
+/* #define TRACE */
+#define DBM_FILTERING
+
+#ifdef TRACE
+# define Trace(x) printf x
+#else
+# define Trace(x)
+#endif
+
+
+#define DBT_clear(x) Zero(&x, 1, DBT) ;
+
+#ifdef DB_VERSION_MAJOR
+
+#if DB_VERSION_MAJOR == 2
+# define BERKELEY_DB_1_OR_2
+#endif
+
+#if DB_VERSION_MAJOR > 3 || (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR >= 2)
+# define AT_LEAST_DB_3_2
+#endif
+
+/* map version 2 features & constants onto their version 1 equivalent */
+
+#ifdef DB_Prefix_t
+# undef DB_Prefix_t
+#endif
+#define DB_Prefix_t size_t
+
+#ifdef DB_Hash_t
+# undef DB_Hash_t
+#endif
+#define DB_Hash_t u_int32_t
+
+/* DBTYPE stays the same */
+/* HASHINFO, RECNOINFO and BTREEINFO map to DB_INFO */
+#if DB_VERSION_MAJOR == 2
+ typedef DB_INFO INFO ;
+#else /* DB_VERSION_MAJOR > 2 */
+# define DB_FIXEDLEN (0x8000)
+#endif /* DB_VERSION_MAJOR == 2 */
+
+/* version 2 has db_recno_t in place of recno_t */
+typedef db_recno_t recno_t;
+
+
+#define R_CURSOR DB_SET_RANGE
+#define R_FIRST DB_FIRST
+#define R_IAFTER DB_AFTER
+#define R_IBEFORE DB_BEFORE
+#define R_LAST DB_LAST
+#define R_NEXT DB_NEXT
+#define R_NOOVERWRITE DB_NOOVERWRITE
+#define R_PREV DB_PREV
+
+#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 5
+# define R_SETCURSOR 0x800000
+#else
+# define R_SETCURSOR (-100)
+#endif
+
+#define R_RECNOSYNC 0
+#define R_FIXEDLEN DB_FIXEDLEN
+#define R_DUP DB_DUP
+
+
+#define db_HA_hash h_hash
+#define db_HA_ffactor h_ffactor
+#define db_HA_nelem h_nelem
+#define db_HA_bsize db_pagesize
+#define db_HA_cachesize db_cachesize
+#define db_HA_lorder db_lorder
+
+#define db_BT_compare bt_compare
+#define db_BT_prefix bt_prefix
+#define db_BT_flags flags
+#define db_BT_psize db_pagesize
+#define db_BT_cachesize db_cachesize
+#define db_BT_lorder db_lorder
+#define db_BT_maxkeypage
+#define db_BT_minkeypage
+
+
+#define db_RE_reclen re_len
+#define db_RE_flags flags
+#define db_RE_bval re_pad
+#define db_RE_bfname re_source
+#define db_RE_psize db_pagesize
+#define db_RE_cachesize db_cachesize
+#define db_RE_lorder db_lorder
+
+#define TXN NULL,
+
+#define do_SEQ(db, key, value, flag) (db->cursor->c_get)(db->cursor, &key, &value, flag)
+
+
+#define DBT_flags(x) x.flags = 0
+#define DB_flags(x, v) x |= v
+
+#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 5
+# define flagSet(flags, bitmask) ((flags) & (bitmask))
+#else
+# define flagSet(flags, bitmask) (((flags) & DB_OPFLAGS_MASK) == (bitmask))
+#endif
+
+#else /* db version 1.x */
+
+#define BERKELEY_DB_1
+#define BERKELEY_DB_1_OR_2
+
+typedef union INFO {
+ HASHINFO hash ;
+ RECNOINFO recno ;
+ BTREEINFO btree ;
+ } INFO ;
+
+
+#ifdef mDB_Prefix_t
+# ifdef DB_Prefix_t
+# undef DB_Prefix_t
+# endif
+# define DB_Prefix_t mDB_Prefix_t
+#endif
+
+#ifdef mDB_Hash_t
+# ifdef DB_Hash_t
+# undef DB_Hash_t
+# endif
+# define DB_Hash_t mDB_Hash_t
+#endif
+
+#define db_HA_hash hash.hash
+#define db_HA_ffactor hash.ffactor
+#define db_HA_nelem hash.nelem
+#define db_HA_bsize hash.bsize
+#define db_HA_cachesize hash.cachesize
+#define db_HA_lorder hash.lorder
+
+#define db_BT_compare btree.compare
+#define db_BT_prefix btree.prefix
+#define db_BT_flags btree.flags
+#define db_BT_psize btree.psize
+#define db_BT_cachesize btree.cachesize
+#define db_BT_lorder btree.lorder
+#define db_BT_maxkeypage btree.maxkeypage
+#define db_BT_minkeypage btree.minkeypage
+
+#define db_RE_reclen recno.reclen
+#define db_RE_flags recno.flags
+#define db_RE_bval recno.bval
+#define db_RE_bfname recno.bfname
+#define db_RE_psize recno.psize
+#define db_RE_cachesize recno.cachesize
+#define db_RE_lorder recno.lorder
+
+#define TXN
+
+#define do_SEQ(db, key, value, flag) (db->dbp->seq)(db->dbp, &key, &value, flag)
+#define DBT_flags(x)
+#define DB_flags(x, v)
+#define flagSet(flags, bitmask) ((flags) & (bitmask))
+
+#endif /* db version 1 */
+
+
+
+#define db_DELETE(db, key, flags) ((db->dbp)->del)(db->dbp, TXN &key, flags)
+#define db_STORE(db, key, value, flags) ((db->dbp)->put)(db->dbp, TXN &key, &value, flags)
+#define db_FETCH(db, key, flags) ((db->dbp)->get)(db->dbp, TXN &key, &value, flags)
+
+#define db_sync(db, flags) ((db->dbp)->sync)(db->dbp, flags)
+#define db_get(db, key, value, flags) ((db->dbp)->get)(db->dbp, TXN &key, &value, flags)
+
+#ifdef DB_VERSION_MAJOR
+#define db_DESTROY(db) ( db->cursor->c_close(db->cursor),\
+ (db->dbp->close)(db->dbp, 0) )
+#define db_close(db) ((db->dbp)->close)(db->dbp, 0)
+#define db_del(db, key, flags) (flagSet(flags, R_CURSOR) \
+ ? ((db->cursor)->c_del)(db->cursor, 0) \
+ : ((db->dbp)->del)(db->dbp, NULL, &key, flags) )
+
+#else /* ! DB_VERSION_MAJOR */
+
+#define db_DESTROY(db) ((db->dbp)->close)(db->dbp)
+#define db_close(db) ((db->dbp)->close)(db->dbp)
+#define db_del(db, key, flags) ((db->dbp)->del)(db->dbp, &key, flags)
+#define db_put(db, key, value, flags) ((db->dbp)->put)(db->dbp, &key, &value, flags)
+
+#endif /* ! DB_VERSION_MAJOR */
+
+
+#define db_seq(db, key, value, flags) do_SEQ(db, key, value, flags)
+
+typedef struct {
+ DBTYPE type ;
+ DB * dbp ;
+ SV * compare ;
+ SV * prefix ;
+ SV * hash ;
+ int in_memory ;
+#ifdef BERKELEY_DB_1_OR_2
+ INFO info ;
+#endif
+#ifdef DB_VERSION_MAJOR
+ DBC * cursor ;
+#endif
+#ifdef DBM_FILTERING
+ SV * filter_fetch_key ;
+ SV * filter_store_key ;
+ SV * filter_fetch_value ;
+ SV * filter_store_value ;
+ int filtering ;
+#endif /* DBM_FILTERING */
+
+ } DB_File_type;
+
+typedef DB_File_type * DB_File ;
+typedef DBT DBTKEY ;
+
+#ifdef DBM_FILTERING
+
+#define ckFilter(arg,type,name) \
+ if (db->type) { \
+ SV * save_defsv ; \
+ /* printf("filtering %s\n", name) ;*/ \
+ if (db->filtering) \
+ croak("recursion detected in %s", name) ; \
+ db->filtering = TRUE ; \
+ save_defsv = newSVsv(DEFSV) ; \
+ sv_setsv(DEFSV, arg) ; \
+ PUSHMARK(sp) ; \
+ (void) perl_call_sv(db->type, G_DISCARD|G_NOARGS); \
+ sv_setsv(arg, DEFSV) ; \
+ sv_setsv(DEFSV, save_defsv) ; \
+ SvREFCNT_dec(save_defsv) ; \
+ db->filtering = FALSE ; \
+ /*printf("end of filtering %s\n", name) ;*/ \
+ }
+
+#else
+
+#define ckFilter(arg,type, name)
+
+#endif /* DBM_FILTERING */
+
+#define my_sv_setpvn(sv, d, s) sv_setpvn(sv, (s ? d : (void*)""), s)
+
+#define OutputValue(arg, name) \
+ { if (RETVAL == 0) { \
+ my_sv_setpvn(arg, name.data, name.size) ; \
+ ckFilter(arg, filter_fetch_value,"filter_fetch_value") ; \
+ } \
+ }
+
+#define OutputKey(arg, name) \
+ { if (RETVAL == 0) \
+ { \
+ if (db->type != DB_RECNO) { \
+ my_sv_setpvn(arg, name.data, name.size); \
+ } \
+ else \
+ sv_setiv(arg, (I32)*(I32*)name.data - 1); \
+ ckFilter(arg, filter_fetch_key,"filter_fetch_key") ; \
+ } \
+ }
+
+
+/* Internal Global Data */
+static recno_t Value ;
+static recno_t zero = 0 ;
+static DB_File CurrentDB ;
+static DBTKEY empty ;
+
+#ifdef DB_VERSION_MAJOR
+
+static int
+#ifdef CAN_PROTOTYPE
+db_put(DB_File db, DBTKEY key, DBT value, u_int flags)
+#else
+db_put(db, key, value, flags)
+DB_File db ;
+DBTKEY key ;
+DBT value ;
+u_int flags ;
+#endif
+{
+ int status ;
+
+ if (flagSet(flags, R_IAFTER) || flagSet(flags, R_IBEFORE)) {
+ DBC * temp_cursor ;
+ DBT l_key, l_value;
+
+#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 6
+ if (((db->dbp)->cursor)(db->dbp, NULL, &temp_cursor) != 0)
+#else
+ if (((db->dbp)->cursor)(db->dbp, NULL, &temp_cursor, 0) != 0)
+#endif
+ return (-1) ;
+
+ memset(&l_key, 0, sizeof(l_key));
+ l_key.data = key.data;
+ l_key.size = key.size;
+ memset(&l_value, 0, sizeof(l_value));
+ l_value.data = value.data;
+ l_value.size = value.size;
+
+ if ( temp_cursor->c_get(temp_cursor, &l_key, &l_value, DB_SET) != 0) {
+ (void)temp_cursor->c_close(temp_cursor);
+ return (-1);
+ }
+
+ status = temp_cursor->c_put(temp_cursor, &key, &value, flags);
+ (void)temp_cursor->c_close(temp_cursor);
+
+ return (status) ;
+ }
+
+
+ if (flagSet(flags, R_CURSOR)) {
+ return ((db->cursor)->c_put)(db->cursor, &key, &value, DB_CURRENT);
+ }
+
+ if (flagSet(flags, R_SETCURSOR)) {
+ if ((db->dbp)->put(db->dbp, NULL, &key, &value, 0) != 0)
+ return -1 ;
+ return ((db->cursor)->c_get)(db->cursor, &key, &value, DB_SET_RANGE);
+
+ }
+
+ return ((db->dbp)->put)(db->dbp, NULL, &key, &value, flags) ;
+
+}
+
+#endif /* DB_VERSION_MAJOR */
+
+
+static int
+#ifdef AT_LEAST_DB_3_2
+
+#ifdef CAN_PROTOTYPE
+btree_compare(DB * db, const DBT *key1, const DBT *key2)
+#else
+btree_compare(db, key1, key2)
+DB * db ;
+const DBT * key1 ;
+const DBT * key2 ;
+#endif /* CAN_PROTOTYPE */
+
+#else /* Berkeley DB < 3.2 */
+
+#ifdef CAN_PROTOTYPE
+btree_compare(const DBT *key1, const DBT *key2)
+#else
+btree_compare(key1, key2)
+const DBT * key1 ;
+const DBT * key2 ;
+#endif
+
+#endif
+
+{
+#ifdef dTHX
+ dTHX;
+#endif
+ dSP ;
+ void * data1, * data2 ;
+ int retval ;
+ int count ;
+
+ data1 = key1->data ;
+ data2 = key2->data ;
+
+#ifndef newSVpvn
+ /* As newSVpv will assume that the data pointer is a null terminated C
+ string if the size parameter is 0, make sure that data points to an
+ empty string if the length is 0
+ */
+ if (key1->size == 0)
+ data1 = "" ;
+ if (key2->size == 0)
+ data2 = "" ;
+#endif
+
+ ENTER ;
+ SAVETMPS;
+
+ PUSHMARK(SP) ;
+ EXTEND(SP,2) ;
+ PUSHs(sv_2mortal(newSVpvn(data1,key1->size)));
+ PUSHs(sv_2mortal(newSVpvn(data2,key2->size)));
+ PUTBACK ;
+
+ count = perl_call_sv(CurrentDB->compare, G_SCALAR);
+
+ SPAGAIN ;
+
+ if (count != 1)
+ croak ("DB_File btree_compare: expected 1 return value from compare sub, got %d\n", count) ;
+
+ retval = POPi ;
+
+ PUTBACK ;
+ FREETMPS ;
+ LEAVE ;
+ return (retval) ;
+
+}
+
+static DB_Prefix_t
+#ifdef AT_LEAST_DB_3_2
+
+#ifdef CAN_PROTOTYPE
+btree_prefix(DB * db, const DBT *key1, const DBT *key2)
+#else
+btree_prefix(db, key1, key2)
+Db * db ;
+const DBT * key1 ;
+const DBT * key2 ;
+#endif
+
+#else /* Berkeley DB < 3.2 */
+
+#ifdef CAN_PROTOTYPE
+btree_prefix(const DBT *key1, const DBT *key2)
+#else
+btree_prefix(key1, key2)
+const DBT * key1 ;
+const DBT * key2 ;
+#endif
+
+#endif
+{
+#ifdef dTHX
+ dTHX;
+#endif
+ dSP ;
+ void * data1, * data2 ;
+ int retval ;
+ int count ;
+
+ data1 = key1->data ;
+ data2 = key2->data ;
+
+#ifndef newSVpvn
+ /* As newSVpv will assume that the data pointer is a null terminated C
+ string if the size parameter is 0, make sure that data points to an
+ empty string if the length is 0
+ */
+ if (key1->size == 0)
+ data1 = "" ;
+ if (key2->size == 0)
+ data2 = "" ;
+#endif
+
+ ENTER ;
+ SAVETMPS;
+
+ PUSHMARK(SP) ;
+ EXTEND(SP,2) ;
+ PUSHs(sv_2mortal(newSVpvn(data1,key1->size)));
+ PUSHs(sv_2mortal(newSVpvn(data2,key2->size)));
+ PUTBACK ;
+
+ count = perl_call_sv(CurrentDB->prefix, G_SCALAR);
+
+ SPAGAIN ;
+
+ if (count != 1)
+ croak ("DB_File btree_prefix: expected 1 return value from prefix sub, got %d\n", count) ;
+
+ retval = POPi ;
+
+ PUTBACK ;
+ FREETMPS ;
+ LEAVE ;
+
+ return (retval) ;
+}
+
+
+#ifdef BERKELEY_DB_1
+# define HASH_CB_SIZE_TYPE size_t
+#else
+# define HASH_CB_SIZE_TYPE u_int32_t
+#endif
+
+static DB_Hash_t
+#ifdef AT_LEAST_DB_3_2
+
+#ifdef CAN_PROTOTYPE
+hash_cb(DB * db, const void *data, u_int32_t size)
+#else
+hash_cb(db, data, size)
+DB * db ;
+const void * data ;
+HASH_CB_SIZE_TYPE size ;
+#endif
+
+#else /* Berkeley DB < 3.2 */
+
+#ifdef CAN_PROTOTYPE
+hash_cb(const void *data, HASH_CB_SIZE_TYPE size)
+#else
+hash_cb(data, size)
+const void * data ;
+HASH_CB_SIZE_TYPE size ;
+#endif
+
+#endif
+{
+#ifdef dTHX
+ dTHX;
+#endif
+ dSP ;
+ int retval ;
+ int count ;
+
+#ifndef newSVpvn
+ if (size == 0)
+ data = "" ;
+#endif
+
+ /* DGH - Next two lines added to fix corrupted stack problem */
+ ENTER ;
+ SAVETMPS;
+
+ PUSHMARK(SP) ;
+
+ XPUSHs(sv_2mortal(newSVpvn((char*)data,size)));
+ PUTBACK ;
+
+ count = perl_call_sv(CurrentDB->hash, G_SCALAR);
+
+ SPAGAIN ;
+
+ if (count != 1)
+ croak ("DB_File hash_cb: expected 1 return value from hash sub, got %d\n", count) ;
+
+ retval = POPi ;
+
+ PUTBACK ;
+ FREETMPS ;
+ LEAVE ;
+
+ return (retval) ;
+}
+
+
+#if defined(TRACE) && defined(BERKELEY_DB_1_OR_2)
+
+static void
+#ifdef CAN_PROTOTYPE
+PrintHash(INFO *hash)
+#else
+PrintHash(hash)
+INFO * hash ;
+#endif
+{
+ printf ("HASH Info\n") ;
+ printf (" hash = %s\n",
+ (hash->db_HA_hash != NULL ? "redefined" : "default")) ;
+ printf (" bsize = %d\n", hash->db_HA_bsize) ;
+ printf (" ffactor = %d\n", hash->db_HA_ffactor) ;
+ printf (" nelem = %d\n", hash->db_HA_nelem) ;
+ printf (" cachesize = %d\n", hash->db_HA_cachesize) ;
+ printf (" lorder = %d\n", hash->db_HA_lorder) ;
+
+}
+
+static void
+#ifdef CAN_PROTOTYPE
+PrintRecno(INFO *recno)
+#else
+PrintRecno(recno)
+INFO * recno ;
+#endif
+{
+ printf ("RECNO Info\n") ;
+ printf (" flags = %d\n", recno->db_RE_flags) ;
+ printf (" cachesize = %d\n", recno->db_RE_cachesize) ;
+ printf (" psize = %d\n", recno->db_RE_psize) ;
+ printf (" lorder = %d\n", recno->db_RE_lorder) ;
+ printf (" reclen = %ul\n", (unsigned long)recno->db_RE_reclen) ;
+ printf (" bval = %d 0x%x\n", recno->db_RE_bval, recno->db_RE_bval) ;
+ printf (" bfname = %d [%s]\n", recno->db_RE_bfname, recno->db_RE_bfname) ;
+}
+
+static void
+#ifdef CAN_PROTOTYPE
+PrintBtree(INFO *btree)
+#else
+PrintBtree(btree)
+INFO * btree ;
+#endif
+{
+ printf ("BTREE Info\n") ;
+ printf (" compare = %s\n",
+ (btree->db_BT_compare ? "redefined" : "default")) ;
+ printf (" prefix = %s\n",
+ (btree->db_BT_prefix ? "redefined" : "default")) ;
+ printf (" flags = %d\n", btree->db_BT_flags) ;
+ printf (" cachesize = %d\n", btree->db_BT_cachesize) ;
+ printf (" psize = %d\n", btree->db_BT_psize) ;
+#ifndef DB_VERSION_MAJOR
+ printf (" maxkeypage = %d\n", btree->db_BT_maxkeypage) ;
+ printf (" minkeypage = %d\n", btree->db_BT_minkeypage) ;
+#endif
+ printf (" lorder = %d\n", btree->db_BT_lorder) ;
+}
+
+#else
+
+#define PrintRecno(recno)
+#define PrintHash(hash)
+#define PrintBtree(btree)
+
+#endif /* TRACE */
+
+
+static I32
+#ifdef CAN_PROTOTYPE
+GetArrayLength(pTHX_ DB_File db)
+#else
+GetArrayLength(db)
+DB_File db ;
+#endif
+{
+ DBT key ;
+ DBT value ;
+ int RETVAL ;
+
+ DBT_clear(key) ;
+ DBT_clear(value) ;
+ RETVAL = do_SEQ(db, key, value, R_LAST) ;
+ if (RETVAL == 0)
+ RETVAL = *(I32 *)key.data ;
+ else /* No key means empty file */
+ RETVAL = 0 ;
+
+ return ((I32)RETVAL) ;
+}
+
+static recno_t
+#ifdef CAN_PROTOTYPE
+GetRecnoKey(pTHX_ DB_File db, I32 value)
+#else
+GetRecnoKey(db, value)
+DB_File db ;
+I32 value ;
+#endif
+{
+ if (value < 0) {
+ /* Get the length of the array */
+ I32 length = GetArrayLength(aTHX_ db) ;
+
+ /* check for attempt to write before start of array */
+ if (length + value + 1 <= 0)
+ croak("Modification of non-creatable array value attempted, subscript %ld", (long)value) ;
+
+ value = length + value + 1 ;
+ }
+ else
+ ++ value ;
+
+ return value ;
+}
+
+
+static DB_File
+#ifdef CAN_PROTOTYPE
+ParseOpenInfo(pTHX_ int isHASH, char *name, int flags, int mode, SV *sv)
+#else
+ParseOpenInfo(isHASH, name, flags, mode, sv)
+int isHASH ;
+char * name ;
+int flags ;
+int mode ;
+SV * sv ;
+#endif
+{
+
+#ifdef BERKELEY_DB_1_OR_2 /* Berkeley DB Version 1 or 2 */
+
+ SV ** svp;
+ HV * action ;
+ DB_File RETVAL = (DB_File)safemalloc(sizeof(DB_File_type)) ;
+ void * openinfo = NULL ;
+ INFO * info = &RETVAL->info ;
+ STRLEN n_a;
+
+/* printf("In ParseOpenInfo name=[%s] flags=[%d] mode = [%d]\n", name, flags, mode) ; */
+ Zero(RETVAL, 1, DB_File_type) ;
+
+ /* Default to HASH */
+#ifdef DBM_FILTERING
+ RETVAL->filtering = 0 ;
+ RETVAL->filter_fetch_key = RETVAL->filter_store_key =
+ RETVAL->filter_fetch_value = RETVAL->filter_store_value =
+#endif /* DBM_FILTERING */
+ RETVAL->hash = RETVAL->compare = RETVAL->prefix = NULL ;
+ RETVAL->type = DB_HASH ;
+
+ /* DGH - Next line added to avoid SEGV on existing hash DB */
+ CurrentDB = RETVAL;
+
+ /* fd for 1.86 hash in memory files doesn't return -1 like 1.85 */
+ RETVAL->in_memory = (name == NULL) ;
+
+ if (sv)
+ {
+ if (! SvROK(sv) )
+ croak ("type parameter is not a reference") ;
+
+ svp = hv_fetch( (HV*)SvRV(sv), "GOT", 3, FALSE) ;
+ if (svp && SvOK(*svp))
+ action = (HV*) SvRV(*svp) ;
+ else
+ croak("internal error") ;
+
+ if (sv_isa(sv, "DB_File::HASHINFO"))
+ {
+
+ if (!isHASH)
+ croak("DB_File can only tie an associative array to a DB_HASH database") ;
+
+ RETVAL->type = DB_HASH ;
+ openinfo = (void*)info ;
+
+ svp = hv_fetch(action, "hash", 4, FALSE);
+
+ if (svp && SvOK(*svp))
+ {
+ info->db_HA_hash = hash_cb ;
+ RETVAL->hash = newSVsv(*svp) ;
+ }
+ else
+ info->db_HA_hash = NULL ;
+
+ svp = hv_fetch(action, "ffactor", 7, FALSE);
+ info->db_HA_ffactor = svp ? SvIV(*svp) : 0;
+
+ svp = hv_fetch(action, "nelem", 5, FALSE);
+ info->db_HA_nelem = svp ? SvIV(*svp) : 0;
+
+ svp = hv_fetch(action, "bsize", 5, FALSE);
+ info->db_HA_bsize = svp ? SvIV(*svp) : 0;
+
+ svp = hv_fetch(action, "cachesize", 9, FALSE);
+ info->db_HA_cachesize = svp ? SvIV(*svp) : 0;
+
+ svp = hv_fetch(action, "lorder", 6, FALSE);
+ info->db_HA_lorder = svp ? SvIV(*svp) : 0;
+
+ PrintHash(info) ;
+ }
+ else if (sv_isa(sv, "DB_File::BTREEINFO"))
+ {
+ if (!isHASH)
+ croak("DB_File can only tie an associative array to a DB_BTREE database");
+
+ RETVAL->type = DB_BTREE ;
+ openinfo = (void*)info ;
+
+ svp = hv_fetch(action, "compare", 7, FALSE);
+ if (svp && SvOK(*svp))
+ {
+ info->db_BT_compare = btree_compare ;
+ RETVAL->compare = newSVsv(*svp) ;
+ }
+ else
+ info->db_BT_compare = NULL ;
+
+ svp = hv_fetch(action, "prefix", 6, FALSE);
+ if (svp && SvOK(*svp))
+ {
+ info->db_BT_prefix = btree_prefix ;
+ RETVAL->prefix = newSVsv(*svp) ;
+ }
+ else
+ info->db_BT_prefix = NULL ;
+
+ svp = hv_fetch(action, "flags", 5, FALSE);
+ info->db_BT_flags = svp ? SvIV(*svp) : 0;
+
+ svp = hv_fetch(action, "cachesize", 9, FALSE);
+ info->db_BT_cachesize = svp ? SvIV(*svp) : 0;
+
+#ifndef DB_VERSION_MAJOR
+ svp = hv_fetch(action, "minkeypage", 10, FALSE);
+ info->btree.minkeypage = svp ? SvIV(*svp) : 0;
+
+ svp = hv_fetch(action, "maxkeypage", 10, FALSE);
+ info->btree.maxkeypage = svp ? SvIV(*svp) : 0;
+#endif
+
+ svp = hv_fetch(action, "psize", 5, FALSE);
+ info->db_BT_psize = svp ? SvIV(*svp) : 0;
+
+ svp = hv_fetch(action, "lorder", 6, FALSE);
+ info->db_BT_lorder = svp ? SvIV(*svp) : 0;
+
+ PrintBtree(info) ;
+
+ }
+ else if (sv_isa(sv, "DB_File::RECNOINFO"))
+ {
+ if (isHASH)
+ croak("DB_File can only tie an array to a DB_RECNO database");
+
+ RETVAL->type = DB_RECNO ;
+ openinfo = (void *)info ;
+
+ info->db_RE_flags = 0 ;
+
+ svp = hv_fetch(action, "flags", 5, FALSE);
+ info->db_RE_flags = (u_long) (svp ? SvIV(*svp) : 0);
+
+ svp = hv_fetch(action, "reclen", 6, FALSE);
+ info->db_RE_reclen = (size_t) (svp ? SvIV(*svp) : 0);
+
+ svp = hv_fetch(action, "cachesize", 9, FALSE);
+ info->db_RE_cachesize = (u_int) (svp ? SvIV(*svp) : 0);
+
+ svp = hv_fetch(action, "psize", 5, FALSE);
+ info->db_RE_psize = (u_int) (svp ? SvIV(*svp) : 0);
+
+ svp = hv_fetch(action, "lorder", 6, FALSE);
+ info->db_RE_lorder = (int) (svp ? SvIV(*svp) : 0);
+
+#ifdef DB_VERSION_MAJOR
+ info->re_source = name ;
+ name = NULL ;
+#endif
+ svp = hv_fetch(action, "bfname", 6, FALSE);
+ if (svp && SvOK(*svp)) {
+ char * ptr = SvPV(*svp,n_a) ;
+#ifdef DB_VERSION_MAJOR
+ name = (char*) n_a ? ptr : NULL ;
+#else
+ info->db_RE_bfname = (char*) (n_a ? ptr : NULL) ;
+#endif
+ }
+ else
+#ifdef DB_VERSION_MAJOR
+ name = NULL ;
+#else
+ info->db_RE_bfname = NULL ;
+#endif
+
+ svp = hv_fetch(action, "bval", 4, FALSE);
+#ifdef DB_VERSION_MAJOR
+ if (svp && SvOK(*svp))
+ {
+ int value ;
+ if (SvPOK(*svp))
+ value = (int)*SvPV(*svp, n_a) ;
+ else
+ value = SvIV(*svp) ;
+
+ if (info->flags & DB_FIXEDLEN) {
+ info->re_pad = value ;
+ info->flags |= DB_PAD ;
+ }
+ else {
+ info->re_delim = value ;
+ info->flags |= DB_DELIMITER ;
+ }
+
+ }
+#else
+ if (svp && SvOK(*svp))
+ {
+ if (SvPOK(*svp))
+ info->db_RE_bval = (u_char)*SvPV(*svp, n_a) ;
+ else
+ info->db_RE_bval = (u_char)(unsigned long) SvIV(*svp) ;
+ DB_flags(info->flags, DB_DELIMITER) ;
+
+ }
+ else
+ {
+ if (info->db_RE_flags & R_FIXEDLEN)
+ info->db_RE_bval = (u_char) ' ' ;
+ else
+ info->db_RE_bval = (u_char) '\n' ;
+ DB_flags(info->flags, DB_DELIMITER) ;
+ }
+#endif
+
+#ifdef DB_RENUMBER
+ info->flags |= DB_RENUMBER ;
+#endif
+
+ PrintRecno(info) ;
+ }
+ else
+ croak("type is not of type DB_File::HASHINFO, DB_File::BTREEINFO or DB_File::RECNOINFO");
+ }
+
+
+ /* OS2 Specific Code */
+#ifdef OS2
+#ifdef __EMX__
+ flags |= O_BINARY;
+#endif /* __EMX__ */
+#endif /* OS2 */
+
+#ifdef DB_VERSION_MAJOR
+
+ {
+ int Flags = 0 ;
+ int status ;
+
+ /* Map 1.x flags to 2.x flags */
+ if ((flags & O_CREAT) == O_CREAT)
+ Flags |= DB_CREATE ;
+
+#if O_RDONLY == 0
+ if (flags == O_RDONLY)
+#else
+ if ((flags & O_RDONLY) == O_RDONLY && (flags & O_RDWR) != O_RDWR)
+#endif
+ Flags |= DB_RDONLY ;
+
+#ifdef O_TRUNC
+ if ((flags & O_TRUNC) == O_TRUNC)
+ Flags |= DB_TRUNCATE ;
+#endif
+
+ status = db_open(name, RETVAL->type, Flags, mode, NULL, openinfo, &RETVAL->dbp) ;
+ if (status == 0)
+#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 6
+ status = (RETVAL->dbp->cursor)(RETVAL->dbp, NULL, &RETVAL->cursor) ;
+#else
+ status = (RETVAL->dbp->cursor)(RETVAL->dbp, NULL, &RETVAL->cursor,
+ 0) ;
+#endif
+
+ if (status)
+ RETVAL->dbp = NULL ;
+
+ }
+#else
+
+#if defined(DB_LIBRARY_COMPATIBILITY_API) && DB_VERSION_MAJOR > 2
+ RETVAL->dbp = __db185_open(name, flags, mode, RETVAL->type, openinfo) ;
+#else
+ RETVAL->dbp = dbopen(name, flags, mode, RETVAL->type, openinfo) ;
+#endif /* DB_LIBRARY_COMPATIBILITY_API */
+
+#endif
+
+ return (RETVAL) ;
+
+#else /* Berkeley DB Version > 2 */
+
+ SV ** svp;
+ HV * action ;
+ DB_File RETVAL = (DB_File)safemalloc(sizeof(DB_File_type)) ;
+ DB * dbp ;
+ STRLEN n_a;
+ int status ;
+
+/* printf("In ParseOpenInfo name=[%s] flags=[%d] mode = [%d]\n", name, flags, mode) ; */
+ Zero(RETVAL, 1, DB_File_type) ;
+
+ /* Default to HASH */
+#ifdef DBM_FILTERING
+ RETVAL->filtering = 0 ;
+ RETVAL->filter_fetch_key = RETVAL->filter_store_key =
+ RETVAL->filter_fetch_value = RETVAL->filter_store_value =
+#endif /* DBM_FILTERING */
+ RETVAL->hash = RETVAL->compare = RETVAL->prefix = NULL ;
+ RETVAL->type = DB_HASH ;
+
+ /* DGH - Next line added to avoid SEGV on existing hash DB */
+ CurrentDB = RETVAL;
+
+ /* fd for 1.86 hash in memory files doesn't return -1 like 1.85 */
+ RETVAL->in_memory = (name == NULL) ;
+
+ status = db_create(&RETVAL->dbp, NULL,0) ;
+ /* printf("db_create returned %d %s\n", status, db_strerror(status)) ; */
+ if (status) {
+ RETVAL->dbp = NULL ;
+ return (RETVAL) ;
+ }
+ dbp = RETVAL->dbp ;
+
+ if (sv)
+ {
+ if (! SvROK(sv) )
+ croak ("type parameter is not a reference") ;
+
+ svp = hv_fetch( (HV*)SvRV(sv), "GOT", 3, FALSE) ;
+ if (svp && SvOK(*svp))
+ action = (HV*) SvRV(*svp) ;
+ else
+ croak("internal error") ;
+
+ if (sv_isa(sv, "DB_File::HASHINFO"))
+ {
+
+ if (!isHASH)
+ croak("DB_File can only tie an associative array to a DB_HASH database") ;
+
+ RETVAL->type = DB_HASH ;
+
+ svp = hv_fetch(action, "hash", 4, FALSE);
+
+ if (svp && SvOK(*svp))
+ {
+ (void)dbp->set_h_hash(dbp, hash_cb) ;
+ RETVAL->hash = newSVsv(*svp) ;
+ }
+
+ svp = hv_fetch(action, "ffactor", 7, FALSE);
+ if (svp)
+ (void)dbp->set_h_ffactor(dbp, SvIV(*svp)) ;
+
+ svp = hv_fetch(action, "nelem", 5, FALSE);
+ if (svp)
+ (void)dbp->set_h_nelem(dbp, SvIV(*svp)) ;
+
+ svp = hv_fetch(action, "bsize", 5, FALSE);
+ if (svp)
+ (void)dbp->set_pagesize(dbp, SvIV(*svp));
+
+ svp = hv_fetch(action, "cachesize", 9, FALSE);
+ if (svp)
+ (void)dbp->set_cachesize(dbp, 0, SvIV(*svp), 0) ;
+
+ svp = hv_fetch(action, "lorder", 6, FALSE);
+ if (svp)
+ (void)dbp->set_lorder(dbp, SvIV(*svp)) ;
+
+ PrintHash(info) ;
+ }
+ else if (sv_isa(sv, "DB_File::BTREEINFO"))
+ {
+ if (!isHASH)
+ croak("DB_File can only tie an associative array to a DB_BTREE database");
+
+ RETVAL->type = DB_BTREE ;
+
+ svp = hv_fetch(action, "compare", 7, FALSE);
+ if (svp && SvOK(*svp))
+ {
+ (void)dbp->set_bt_compare(dbp, btree_compare) ;
+ RETVAL->compare = newSVsv(*svp) ;
+ }
+
+ svp = hv_fetch(action, "prefix", 6, FALSE);
+ if (svp && SvOK(*svp))
+ {
+ (void)dbp->set_bt_prefix(dbp, btree_prefix) ;
+ RETVAL->prefix = newSVsv(*svp) ;
+ }
+
+ svp = hv_fetch(action, "flags", 5, FALSE);
+ if (svp)
+ (void)dbp->set_flags(dbp, SvIV(*svp)) ;
+
+ svp = hv_fetch(action, "cachesize", 9, FALSE);
+ if (svp)
+ (void)dbp->set_cachesize(dbp, 0, SvIV(*svp), 0) ;
+
+ svp = hv_fetch(action, "psize", 5, FALSE);
+ if (svp)
+ (void)dbp->set_pagesize(dbp, SvIV(*svp)) ;
+
+ svp = hv_fetch(action, "lorder", 6, FALSE);
+ if (svp)
+ (void)dbp->set_lorder(dbp, SvIV(*svp)) ;
+
+ PrintBtree(info) ;
+
+ }
+ else if (sv_isa(sv, "DB_File::RECNOINFO"))
+ {
+ int fixed = FALSE ;
+
+ if (isHASH)
+ croak("DB_File can only tie an array to a DB_RECNO database");
+
+ RETVAL->type = DB_RECNO ;
+
+ svp = hv_fetch(action, "flags", 5, FALSE);
+ if (svp) {
+ int flags = SvIV(*svp) ;
+ /* remove FIXDLEN, if present */
+ if (flags & DB_FIXEDLEN) {
+ fixed = TRUE ;
+ flags &= ~DB_FIXEDLEN ;
+ }
+ }
+
+ svp = hv_fetch(action, "cachesize", 9, FALSE);
+ if (svp) {
+ status = dbp->set_cachesize(dbp, 0, SvIV(*svp), 0) ;
+ }
+
+ svp = hv_fetch(action, "psize", 5, FALSE);
+ if (svp) {
+ status = dbp->set_pagesize(dbp, SvIV(*svp)) ;
+ }
+
+ svp = hv_fetch(action, "lorder", 6, FALSE);
+ if (svp) {
+ status = dbp->set_lorder(dbp, SvIV(*svp)) ;
+ }
+
+ svp = hv_fetch(action, "bval", 4, FALSE);
+ if (svp && SvOK(*svp))
+ {
+ int value ;
+ if (SvPOK(*svp))
+ value = (int)*SvPV(*svp, n_a) ;
+ else
+ value = SvIV(*svp) ;
+
+ if (fixed) {
+ status = dbp->set_re_pad(dbp, value) ;
+ }
+ else {
+ status = dbp->set_re_delim(dbp, value) ;
+ }
+
+ }
+
+ if (fixed) {
+ svp = hv_fetch(action, "reclen", 6, FALSE);
+ if (svp) {
+ u_int32_t len = (u_int32_t)SvIV(*svp) ;
+ status = dbp->set_re_len(dbp, len) ;
+ }
+ }
+
+ if (name != NULL) {
+ status = dbp->set_re_source(dbp, name) ;
+ name = NULL ;
+ }
+
+ svp = hv_fetch(action, "bfname", 6, FALSE);
+ if (svp && SvOK(*svp)) {
+ char * ptr = SvPV(*svp,n_a) ;
+ name = (char*) n_a ? ptr : NULL ;
+ }
+ else
+ name = NULL ;
+
+
+ status = dbp->set_flags(dbp, DB_RENUMBER) ;
+
+ if (flags){
+ (void)dbp->set_flags(dbp, flags) ;
+ }
+ PrintRecno(info) ;
+ }
+ else
+ croak("type is not of type DB_File::HASHINFO, DB_File::BTREEINFO or DB_File::RECNOINFO");
+ }
+
+ {
+ int Flags = 0 ;
+ int status ;
+
+ /* Map 1.x flags to 3.x flags */
+ if ((flags & O_CREAT) == O_CREAT)
+ Flags |= DB_CREATE ;
+
+#if O_RDONLY == 0
+ if (flags == O_RDONLY)
+#else
+ if ((flags & O_RDONLY) == O_RDONLY && (flags & O_RDWR) != O_RDWR)
+#endif
+ Flags |= DB_RDONLY ;
+
+#ifdef O_TRUNC
+ if ((flags & O_TRUNC) == O_TRUNC)
+ Flags |= DB_TRUNCATE ;
+#endif
+
+ status = (RETVAL->dbp->open)(RETVAL->dbp, name, NULL, RETVAL->type,
+ Flags, mode) ;
+ /* printf("open returned %d %s\n", status, db_strerror(status)) ; */
+
+ if (status == 0)
+ status = (RETVAL->dbp->cursor)(RETVAL->dbp, NULL, &RETVAL->cursor,
+ 0) ;
+ /* printf("cursor returned %d %s\n", status, db_strerror(status)) ; */
+
+ if (status)
+ RETVAL->dbp = NULL ;
+
+ }
+
+ return (RETVAL) ;
+
+#endif /* Berkeley DB Version > 2 */
+
+} /* ParseOpenInfo */
+
+
+static double
+#ifdef CAN_PROTOTYPE
+constant(char *name, int arg)
+#else
+constant(name, arg)
+char *name;
+int arg;
+#endif
+{
+ errno = 0;
+ switch (*name) {
+ case 'A':
+ break;
+ case 'B':
+ if (strEQ(name, "BTREEMAGIC"))
+#ifdef BTREEMAGIC
+ return BTREEMAGIC;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "BTREEVERSION"))
+#ifdef BTREEVERSION
+ return BTREEVERSION;
+#else
+ goto not_there;
+#endif
+ break;
+ case 'C':
+ break;
+ case 'D':
+ if (strEQ(name, "DB_LOCK"))
+#ifdef DB_LOCK
+ return DB_LOCK;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_SHMEM"))
+#ifdef DB_SHMEM
+ return DB_SHMEM;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_TXN"))
+#ifdef DB_TXN
+ return (U32)DB_TXN;
+#else
+ goto not_there;
+#endif
+ break;
+ case 'E':
+ break;
+ case 'F':
+ break;
+ case 'G':
+ break;
+ case 'H':
+ if (strEQ(name, "HASHMAGIC"))
+#ifdef HASHMAGIC
+ return HASHMAGIC;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "HASHVERSION"))
+#ifdef HASHVERSION
+ return HASHVERSION;
+#else
+ goto not_there;
+#endif
+ break;
+ case 'I':
+ break;
+ case 'J':
+ break;
+ case 'K':
+ break;
+ case 'L':
+ break;
+ case 'M':
+ if (strEQ(name, "MAX_PAGE_NUMBER"))
+#ifdef MAX_PAGE_NUMBER
+ return (U32)MAX_PAGE_NUMBER;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "MAX_PAGE_OFFSET"))
+#ifdef MAX_PAGE_OFFSET
+ return MAX_PAGE_OFFSET;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "MAX_REC_NUMBER"))
+#ifdef MAX_REC_NUMBER
+ return (U32)MAX_REC_NUMBER;
+#else
+ goto not_there;
+#endif
+ break;
+ case 'N':
+ break;
+ case 'O':
+ break;
+ case 'P':
+ break;
+ case 'Q':
+ break;
+ case 'R':
+ if (strEQ(name, "RET_ERROR"))
+#ifdef RET_ERROR
+ return RET_ERROR;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "RET_SPECIAL"))
+#ifdef RET_SPECIAL
+ return RET_SPECIAL;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "RET_SUCCESS"))
+#ifdef RET_SUCCESS
+ return RET_SUCCESS;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "R_CURSOR"))
+#ifdef R_CURSOR
+ return R_CURSOR;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "R_DUP"))
+#ifdef R_DUP
+ return R_DUP;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "R_FIRST"))
+#ifdef R_FIRST
+ return R_FIRST;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "R_FIXEDLEN"))
+#ifdef R_FIXEDLEN
+ return R_FIXEDLEN;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "R_IAFTER"))
+#ifdef R_IAFTER
+ return R_IAFTER;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "R_IBEFORE"))
+#ifdef R_IBEFORE
+ return R_IBEFORE;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "R_LAST"))
+#ifdef R_LAST
+ return R_LAST;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "R_NEXT"))
+#ifdef R_NEXT
+ return R_NEXT;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "R_NOKEY"))
+#ifdef R_NOKEY
+ return R_NOKEY;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "R_NOOVERWRITE"))
+#ifdef R_NOOVERWRITE
+ return R_NOOVERWRITE;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "R_PREV"))
+#ifdef R_PREV
+ return R_PREV;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "R_RECNOSYNC"))
+#ifdef R_RECNOSYNC
+ return R_RECNOSYNC;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "R_SETCURSOR"))
+#ifdef R_SETCURSOR
+ return R_SETCURSOR;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "R_SNAPSHOT"))
+#ifdef R_SNAPSHOT
+ return R_SNAPSHOT;
+#else
+ goto not_there;
+#endif
+ break;
+ case 'S':
+ break;
+ case 'T':
+ break;
+ case 'U':
+ break;
+ case 'V':
+ break;
+ case 'W':
+ break;
+ case 'X':
+ break;
+ case 'Y':
+ break;
+ case 'Z':
+ break;
+ case '_':
+ break;
+ }
+ errno = EINVAL;
+ return 0;
+
+not_there:
+ errno = ENOENT;
+ return 0;
+}
+
+MODULE = DB_File PACKAGE = DB_File PREFIX = db_
+
+BOOT:
+ {
+ __getBerkeleyDBInfo() ;
+
+ DBT_clear(empty) ;
+ empty.data = &zero ;
+ empty.size = sizeof(recno_t) ;
+ }
+
+double
+constant(name,arg)
+ char * name
+ int arg
+
+
+DB_File
+db_DoTie_(isHASH, dbtype, name=undef, flags=O_CREAT|O_RDWR, mode=0666, type=DB_HASH)
+ int isHASH
+ char * dbtype
+ int flags
+ int mode
+ CODE:
+ {
+ char * name = (char *) NULL ;
+ SV * sv = (SV *) NULL ;
+ STRLEN n_a;
+
+ if (items >= 3 && SvOK(ST(2)))
+ name = (char*) SvPV(ST(2), n_a) ;
+
+ if (items == 6)
+ sv = ST(5) ;
+
+ RETVAL = ParseOpenInfo(aTHX_ isHASH, name, flags, mode, sv) ;
+ if (RETVAL->dbp == NULL)
+ RETVAL = NULL ;
+ }
+ OUTPUT:
+ RETVAL
+
+int
+db_DESTROY(db)
+ DB_File db
+ INIT:
+ CurrentDB = db ;
+ CLEANUP:
+ if (db->hash)
+ SvREFCNT_dec(db->hash) ;
+ if (db->compare)
+ SvREFCNT_dec(db->compare) ;
+ if (db->prefix)
+ SvREFCNT_dec(db->prefix) ;
+#ifdef DBM_FILTERING
+ if (db->filter_fetch_key)
+ SvREFCNT_dec(db->filter_fetch_key) ;
+ if (db->filter_store_key)
+ SvREFCNT_dec(db->filter_store_key) ;
+ if (db->filter_fetch_value)
+ SvREFCNT_dec(db->filter_fetch_value) ;
+ if (db->filter_store_value)
+ SvREFCNT_dec(db->filter_store_value) ;
+#endif /* DBM_FILTERING */
+ safefree(db) ;
+#ifdef DB_VERSION_MAJOR
+ if (RETVAL > 0)
+ RETVAL = -1 ;
+#endif
+
+
+int
+db_DELETE(db, key, flags=0)
+ DB_File db
+ DBTKEY key
+ u_int flags
+ INIT:
+ CurrentDB = db ;
+
+
+int
+db_EXISTS(db, key)
+ DB_File db
+ DBTKEY key
+ CODE:
+ {
+ DBT value ;
+
+ DBT_clear(value) ;
+ CurrentDB = db ;
+ RETVAL = (((db->dbp)->get)(db->dbp, TXN &key, &value, 0) == 0) ;
+ }
+ OUTPUT:
+ RETVAL
+
+int
+db_FETCH(db, key, flags=0)
+ DB_File db
+ DBTKEY key
+ u_int flags
+ CODE:
+ {
+ DBT value ;
+
+ DBT_clear(value) ;
+ CurrentDB = db ;
+ /* RETVAL = ((db->dbp)->get)(db->dbp, TXN &key, &value, flags) ; */
+ RETVAL = db_get(db, key, value, flags) ;
+ ST(0) = sv_newmortal();
+ OutputValue(ST(0), value)
+ }
+
+int
+db_STORE(db, key, value, flags=0)
+ DB_File db
+ DBTKEY key
+ DBT value
+ u_int flags
+ INIT:
+ CurrentDB = db ;
+
+
+int
+db_FIRSTKEY(db)
+ DB_File db
+ CODE:
+ {
+ DBTKEY key ;
+ DBT value ;
+
+ DBT_clear(key) ;
+ DBT_clear(value) ;
+ CurrentDB = db ;
+ RETVAL = do_SEQ(db, key, value, R_FIRST) ;
+ ST(0) = sv_newmortal();
+ OutputKey(ST(0), key) ;
+ }
+
+int
+db_NEXTKEY(db, key)
+ DB_File db
+ DBTKEY key
+ CODE:
+ {
+ DBT value ;
+
+ DBT_clear(value) ;
+ CurrentDB = db ;
+ RETVAL = do_SEQ(db, key, value, R_NEXT) ;
+ ST(0) = sv_newmortal();
+ OutputKey(ST(0), key) ;
+ }
+
+#
+# These would be nice for RECNO
+#
+
+int
+unshift(db, ...)
+ DB_File db
+ ALIAS: UNSHIFT = 1
+ CODE:
+ {
+ DBTKEY key ;
+ DBT value ;
+ int i ;
+ int One ;
+ DB * Db = db->dbp ;
+ STRLEN n_a;
+
+ DBT_clear(key) ;
+ DBT_clear(value) ;
+ CurrentDB = db ;
+#ifdef DB_VERSION_MAJOR
+ /* get the first value */
+ RETVAL = do_SEQ(db, key, value, DB_FIRST) ;
+ RETVAL = 0 ;
+#else
+ RETVAL = -1 ;
+#endif
+ for (i = items-1 ; i > 0 ; --i)
+ {
+ value.data = SvPV(ST(i), n_a) ;
+ value.size = n_a ;
+ One = 1 ;
+ key.data = &One ;
+ key.size = sizeof(int) ;
+#ifdef DB_VERSION_MAJOR
+ RETVAL = (db->cursor->c_put)(db->cursor, &key, &value, DB_BEFORE) ;
+#else
+ RETVAL = (Db->put)(Db, &key, &value, R_IBEFORE) ;
+#endif
+ if (RETVAL != 0)
+ break;
+ }
+ }
+ OUTPUT:
+ RETVAL
+
+I32
+pop(db)
+ DB_File db
+ ALIAS: POP = 1
+ CODE:
+ {
+ DBTKEY key ;
+ DBT value ;
+
+ DBT_clear(key) ;
+ DBT_clear(value) ;
+ CurrentDB = db ;
+
+ /* First get the final value */
+ RETVAL = do_SEQ(db, key, value, R_LAST) ;
+ ST(0) = sv_newmortal();
+ /* Now delete it */
+ if (RETVAL == 0)
+ {
+ /* the call to del will trash value, so take a copy now */
+ OutputValue(ST(0), value) ;
+ RETVAL = db_del(db, key, R_CURSOR) ;
+ if (RETVAL != 0)
+ sv_setsv(ST(0), &PL_sv_undef);
+ }
+ }
+
+I32
+shift(db)
+ DB_File db
+ ALIAS: SHIFT = 1
+ CODE:
+ {
+ DBT value ;
+ DBTKEY key ;
+
+ DBT_clear(key) ;
+ DBT_clear(value) ;
+ CurrentDB = db ;
+ /* get the first value */
+ RETVAL = do_SEQ(db, key, value, R_FIRST) ;
+ ST(0) = sv_newmortal();
+ /* Now delete it */
+ if (RETVAL == 0)
+ {
+ /* the call to del will trash value, so take a copy now */
+ OutputValue(ST(0), value) ;
+ RETVAL = db_del(db, key, R_CURSOR) ;
+ if (RETVAL != 0)
+ sv_setsv (ST(0), &PL_sv_undef) ;
+ }
+ }
+
+
+I32
+push(db, ...)
+ DB_File db
+ ALIAS: PUSH = 1
+ CODE:
+ {
+ DBTKEY key ;
+ DBT value ;
+ DB * Db = db->dbp ;
+ int i ;
+ STRLEN n_a;
+ int keyval ;
+
+ DBT_flags(key) ;
+ DBT_flags(value) ;
+ CurrentDB = db ;
+ /* Set the Cursor to the Last element */
+ RETVAL = do_SEQ(db, key, value, R_LAST) ;
+#ifndef DB_VERSION_MAJOR
+ if (RETVAL >= 0)
+#endif
+ {
+ if (RETVAL == 0)
+ keyval = *(int*)key.data ;
+ else
+ keyval = 0 ;
+ for (i = 1 ; i < items ; ++i)
+ {
+ value.data = SvPV(ST(i), n_a) ;
+ value.size = n_a ;
+ ++ keyval ;
+ key.data = &keyval ;
+ key.size = sizeof(int) ;
+ RETVAL = (Db->put)(Db, TXN &key, &value, 0) ;
+ if (RETVAL != 0)
+ break;
+ }
+ }
+ }
+ OUTPUT:
+ RETVAL
+
+I32
+length(db)
+ DB_File db
+ ALIAS: FETCHSIZE = 1
+ CODE:
+ CurrentDB = db ;
+ RETVAL = GetArrayLength(aTHX_ db) ;
+ OUTPUT:
+ RETVAL
+
+
+#
+# Now provide an interface to the rest of the DB functionality
+#
+
+int
+db_del(db, key, flags=0)
+ DB_File db
+ DBTKEY key
+ u_int flags
+ CODE:
+ CurrentDB = db ;
+ RETVAL = db_del(db, key, flags) ;
+#ifdef DB_VERSION_MAJOR
+ if (RETVAL > 0)
+ RETVAL = -1 ;
+ else if (RETVAL == DB_NOTFOUND)
+ RETVAL = 1 ;
+#endif
+ OUTPUT:
+ RETVAL
+
+
+int
+db_get(db, key, value, flags=0)
+ DB_File db
+ DBTKEY key
+ DBT value = NO_INIT
+ u_int flags
+ CODE:
+ CurrentDB = db ;
+ DBT_clear(value) ;
+ RETVAL = db_get(db, key, value, flags) ;
+#ifdef DB_VERSION_MAJOR
+ if (RETVAL > 0)
+ RETVAL = -1 ;
+ else if (RETVAL == DB_NOTFOUND)
+ RETVAL = 1 ;
+#endif
+ OUTPUT:
+ RETVAL
+ value
+
+int
+db_put(db, key, value, flags=0)
+ DB_File db
+ DBTKEY key
+ DBT value
+ u_int flags
+ CODE:
+ CurrentDB = db ;
+ RETVAL = db_put(db, key, value, flags) ;
+#ifdef DB_VERSION_MAJOR
+ if (RETVAL > 0)
+ RETVAL = -1 ;
+ else if (RETVAL == DB_KEYEXIST)
+ RETVAL = 1 ;
+#endif
+ OUTPUT:
+ RETVAL
+ key if (flagSet(flags, R_IAFTER) || flagSet(flags, R_IBEFORE)) OutputKey(ST(1), key);
+
+int
+db_fd(db)
+ DB_File db
+ int status = 0 ;
+ CODE:
+ CurrentDB = db ;
+#ifdef DB_VERSION_MAJOR
+ RETVAL = -1 ;
+ status = (db->in_memory
+ ? -1
+ : ((db->dbp)->fd)(db->dbp, &RETVAL) ) ;
+ if (status != 0)
+ RETVAL = -1 ;
+#else
+ RETVAL = (db->in_memory
+ ? -1
+ : ((db->dbp)->fd)(db->dbp) ) ;
+#endif
+ OUTPUT:
+ RETVAL
+
+int
+db_sync(db, flags=0)
+ DB_File db
+ u_int flags
+ CODE:
+ CurrentDB = db ;
+ RETVAL = db_sync(db, flags) ;
+#ifdef DB_VERSION_MAJOR
+ if (RETVAL > 0)
+ RETVAL = -1 ;
+#endif
+ OUTPUT:
+ RETVAL
+
+
+int
+db_seq(db, key, value, flags)
+ DB_File db
+ DBTKEY key
+ DBT value = NO_INIT
+ u_int flags
+ CODE:
+ CurrentDB = db ;
+ DBT_clear(value) ;
+ RETVAL = db_seq(db, key, value, flags);
+#ifdef DB_VERSION_MAJOR
+ if (RETVAL > 0)
+ RETVAL = -1 ;
+ else if (RETVAL == DB_NOTFOUND)
+ RETVAL = 1 ;
+#endif
+ OUTPUT:
+ RETVAL
+ key
+ value
+
+#ifdef DBM_FILTERING
+
+#define setFilter(type) \
+ { \
+ if (db->type) \
+ RETVAL = sv_mortalcopy(db->type) ; \
+ ST(0) = RETVAL ; \
+ if (db->type && (code == &PL_sv_undef)) { \
+ SvREFCNT_dec(db->type) ; \
+ db->type = NULL ; \
+ } \
+ else if (code) { \
+ if (db->type) \
+ sv_setsv(db->type, code) ; \
+ else \
+ db->type = newSVsv(code) ; \
+ } \
+ }
+
+
+SV *
+filter_fetch_key(db, code)
+ DB_File db
+ SV * code
+ SV * RETVAL = &PL_sv_undef ;
+ CODE:
+ setFilter(filter_fetch_key) ;
+
+SV *
+filter_store_key(db, code)
+ DB_File db
+ SV * code
+ SV * RETVAL = &PL_sv_undef ;
+ CODE:
+ setFilter(filter_store_key) ;
+
+SV *
+filter_fetch_value(db, code)
+ DB_File db
+ SV * code
+ SV * RETVAL = &PL_sv_undef ;
+ CODE:
+ setFilter(filter_fetch_value) ;
+
+SV *
+filter_store_value(db, code)
+ DB_File db
+ SV * code
+ SV * RETVAL = &PL_sv_undef ;
+ CODE:
+ setFilter(filter_store_value) ;
+
+#endif /* DBM_FILTERING */
diff --git a/bdb/perl.DB_File/DB_File_BS b/bdb/perl.DB_File/DB_File_BS
new file mode 100644
index 00000000000..9282c498811
--- /dev/null
+++ b/bdb/perl.DB_File/DB_File_BS
@@ -0,0 +1,6 @@
+# NeXT needs /usr/lib/libposix.a to load along with DB_File.so
+if ( $dlsrc eq "dl_next.xs" ) {
+ @DynaLoader::dl_resolve_using = ( '/usr/lib/libposix.a' );
+}
+
+1;
diff --git a/bdb/perl.DB_File/MANIFEST b/bdb/perl.DB_File/MANIFEST
new file mode 100644
index 00000000000..0cc30dbfb47
--- /dev/null
+++ b/bdb/perl.DB_File/MANIFEST
@@ -0,0 +1,27 @@
+Makefile.PL
+DB_File.pm
+DB_File.xs
+DB_File_BS
+Changes
+config.in
+dbinfo
+hints/dynixptx.pl
+hints/sco.pl
+MANIFEST
+README
+typemap
+t/db-btree.t
+t/db-hash.t
+t/db-recno.t
+version.c
+patches/5.004
+patches/5.004_01
+patches/5.004_02
+patches/5.004_03
+patches/5.004_04
+patches/5.004_05
+patches/5.005
+patches/5.005_01
+patches/5.005_02
+patches/5.005_03
+patches/5.6.0
diff --git a/bdb/perl.DB_File/Makefile.PL b/bdb/perl.DB_File/Makefile.PL
new file mode 100644
index 00000000000..25e707df6ea
--- /dev/null
+++ b/bdb/perl.DB_File/Makefile.PL
@@ -0,0 +1,187 @@
+#! perl -w
+use strict ;
+use ExtUtils::MakeMaker 5.16 ;
+use Config ;
+
+my $VER_INFO ;
+my $LIB_DIR ;
+my $INC_DIR ;
+my $DB_NAME ;
+my $LIBS ;
+my $COMPAT185 = "" ;
+
+my @files = ('DB_File.pm', glob "t/*.t") ;
+# See if warnings is available
+eval 'use warnings;';
+if ($@) {
+ # not there, so write a dummy warnings.pm
+ oldWarnings(@files) ;
+} else {
+ # is there,
+ newWarnings(@files) ;
+}
+
+ParseCONFIG() ;
+
+if (defined $DB_NAME)
+ { $LIBS = $DB_NAME }
+else {
+ if ($^O eq 'MSWin32')
+ { $LIBS = '-llibdb' }
+ else
+ { $LIBS = '-ldb' }
+}
+
+# Solaris is special.
+#$LIBS .= " -lthread" if $^O eq 'solaris' ;
+
+# OS2 is a special case, so check for it now.
+my $OS2 = "" ;
+$OS2 = "-DOS2" if $Config{'osname'} eq 'os2' ;
+
+WriteMakefile(
+ NAME => 'DB_File',
+ LIBS => ["-L${LIB_DIR} $LIBS"],
+ MAN3PODS => ' ', # Pods will be built by installman.
+ INC => "-I$INC_DIR",
+ VERSION_FROM => 'DB_File.pm',
+ XSPROTOARG => '-noprototypes',
+ DEFINE => "$OS2 $VER_INFO $COMPAT185",
+ OBJECT => 'version$(OBJ_EXT) DB_File$(OBJ_EXT)',
+ OPTIMIZE => '-g',
+ 'macro' => { INSTALLDIRS => 'perl' },
+ 'dist' => {COMPRESS=>'gzip', SUFFIX=>'gz'},
+ );
+
+
+sub MY::postamble {
+ '
+version$(OBJ_EXT): version.c
+
+$(NAME).xs: typemap
+ @$(TOUCH) $(NAME).xs
+
+Makefile: config.in
+
+' ;
+}
+
+
+sub ParseCONFIG
+{
+ my ($k, $v) ;
+ my @badkey = () ;
+ my %Info = () ;
+ my @Options = qw( INCLUDE LIB PREFIX HASH DBNAME COMPAT185 ) ;
+ my %ValidOption = map {$_, 1} @Options ;
+ my %Parsed = %ValidOption ;
+ my $CONFIG = 'config.in' ;
+
+ print "Parsing $CONFIG...\n" ;
+
+ # DBNAME & COMPAT185 are optional, so pretend they have
+ # been parsed.
+ delete $Parsed{'DBNAME'} ;
+ delete $Parsed{'COMPAT185'} ;
+ $Info{COMPAT185} = "No" ;
+
+
+ open(F, "$CONFIG") or die "Cannot open file $CONFIG: $!\n" ;
+ while (<F>) {
+ s/^\s*|\s*$//g ;
+ next if /^\s*$/ or /^\s*#/ ;
+ s/\s*#\s*$// ;
+
+ ($k, $v) = split(/\s+=\s+/, $_, 2) ;
+ $k = uc $k ;
+ if ($ValidOption{$k}) {
+ delete $Parsed{$k} ;
+ $Info{$k} = $v ;
+ }
+ else {
+ push(@badkey, $k) ;
+ }
+ }
+ close F ;
+
+ print "Unknown keys in $CONFIG ignored [@badkey]\n"
+ if @badkey ;
+
+ # check parsed values
+ my @missing = () ;
+ die "The following keys are missing from $CONFIG file: [@missing]\n"
+ if @missing = keys %Parsed ;
+
+ $INC_DIR = $ENV{'DB_FILE_INCLUDE'} || $Info{'INCLUDE'} ;
+ $LIB_DIR = $ENV{'DB_FILE_LIB'} || $Info{'LIB'} ;
+ $DB_NAME = $Info{'DBNAME'} if defined $Info{'DBNAME'} ;
+ $COMPAT185 = "-DCOMPAT185 -DDB_LIBRARY_COMPATIBILITY_API"
+ if (defined $ENV{'DB_FILE_COMPAT185'} &&
+ $ENV{'DB_FILE_COMPAT185'} =~ /^\s*(on|true|1)\s*$/i) ||
+ $Info{'COMPAT185'} =~ /^\s*(on|true|1)\s*$/i ;
+ my $PREFIX = $Info{'PREFIX'} ;
+ my $HASH = $Info{'HASH'} ;
+
+ $VER_INFO = "-DmDB_Prefix_t=${PREFIX} -DmDB_Hash_t=${HASH}" ;
+
+ print <<EOM if 0 ;
+ INCLUDE [$INC_DIR]
+ LIB [$LIB_DIR]
+ HASH [$HASH]
+ PREFIX [$PREFIX]
+ DBNAME [$DB_NAME]
+
+EOM
+
+ print "Looks Good.\n" ;
+
+}
+
+sub oldWarnings
+{
+ local ($^I) = ".bak" ;
+ local (@ARGV) = @_ ;
+
+ while (<>)
+ {
+ if (/^__END__/)
+ {
+ print ;
+ my $this = $ARGV ;
+ while (<>)
+ {
+ last if $ARGV ne $this ;
+ print ;
+ }
+ }
+
+ s/^(\s*)(no\s+warnings)/${1}local (\$^W) = 0; #$2/ ;
+ s/^(\s*)(use\s+warnings)/${1}local (\$^W) = 1; #$2/ ;
+ print ;
+ }
+}
+
+sub newWarnings
+{
+ local ($^I) = ".bak" ;
+ local (@ARGV) = @_ ;
+
+ while (<>)
+ {
+ if (/^__END__/)
+ {
+ my $this = $ARGV ;
+ print ;
+ while (<>)
+ {
+ last if $ARGV ne $this ;
+ print ;
+ }
+ }
+
+ s/^(\s*)local\s*\(\$\^W\)\s*=\s*\d+\s*;\s*#\s*((no|use)\s+warnings.*)/$1$2/ ;
+ print ;
+ }
+}
+
+# end of file Makefile.PL
diff --git a/bdb/perl.DB_File/README b/bdb/perl.DB_File/README
new file mode 100644
index 00000000000..e780111b2e9
--- /dev/null
+++ b/bdb/perl.DB_File/README
@@ -0,0 +1,396 @@
+ DB_File
+
+ Version 1.76
+
+ 15th January 2001
+
+ Copyright (c) 1995-2001 Paul Marquess. All rights reserved. This
+ program is free software; you can redistribute it and/or modify
+ it under the same terms as Perl itself.
+
+
+IMPORTANT NOTICE
+================
+
+If are using the locking technique described in older versions of
+DB_File, please read the section called "Locking: The Trouble with fd"
+in DB_File.pm immediately. The locking method has been found to be
+unsafe. You risk corrupting your data if you continue to use it.
+
+DESCRIPTION
+-----------
+
+DB_File is a module which allows Perl programs to make use of the
+facilities provided by Berkeley DB version 1. (DB_File can be built
+version 2 or 3 of Berkeley DB, but it will only support the 1.x
+features),
+
+If you want to make use of the new features available in Berkeley DB
+2.x or 3.x, use the Perl module BerkeleyDB instead.
+
+Berkeley DB is a C library which provides a consistent interface to a
+number of database formats. DB_File provides an interface to all three
+of the database types (hash, btree and recno) currently supported by
+Berkeley DB.
+
+For further details see the documentation included at the end of the
+file DB_File.pm.
+
+PREREQUISITES
+-------------
+
+Before you can build DB_File you must have the following installed on
+your system:
+
+ * Perl 5.004 or greater.
+
+ * Berkeley DB.
+
+ The official web site for Berkeley DB is http://www.sleepycat.com.
+ The latest version of Berkeley DB is always available there. It
+ is recommended that you use the most recent version available at
+ the Sleepycat site.
+
+ The one exception to this advice is where you want to use DB_File
+ to access database files created by a third-party application, like
+ Sendmail or Netscape. In these cases you must build DB_File with a
+ compatible version of Berkeley DB.
+
+ If you want to use Berkeley DB 2.x, you must have version 2.3.4
+ or greater. If you want to use Berkeley DB 3.x, any version will
+ do. For Berkeley DB 1.x, use either version 1.85 or 1.86.
+
+
+BUILDING THE MODULE
+-------------------
+
+Assuming you have met all the prerequisites, building the module should
+be relatively straightforward.
+
+Step 1 : If you are running either Solaris 2.5 or HP-UX 10 and want
+ to use Berkeley DB version 2 or 3, read either the Solaris Notes
+ or HP-UX Notes sections below. If you are running Linux please
+ read the Linux Notes section before proceeding.
+
+Step 2 : Edit the file config.in to suit you local installation.
+ Instructions are given in the file.
+
+Step 3 : Build and test the module using this sequence of commands:
+
+ perl Makefile.PL
+ make
+ make test
+
+
+ NOTE:
+ If you have a very old version of Berkeley DB (i.e. pre 1.85),
+ three of the tests in the recno test harness may fail (tests 51,
+ 53 and 55). You can safely ignore the errors if you're never
+ going to use the broken functionality (recno databases with a
+ modified bval). Otherwise you'll have to upgrade your DB
+ library.
+
+
+INSTALLATION
+------------
+
+ make install
+
+
+TROUBLESHOOTING
+===============
+
+Here are some of the common problems people encounter when building
+DB_File.
+
+Missing db.h or libdb.a
+-----------------------
+
+If you get an error like this:
+
+ cc -c -I/usr/local/include -Dbool=char -DHAS_BOOL
+ -O2 -DVERSION=\"1.64\" -DXS_VERSION=\"1.64\" -fpic
+ -I/usr/local/lib/perl5/i586-linux/5.00404/CORE -DmDB_Prefix_t=size_t
+ -DmDB_Hash_t=u_int32_t DB_File.c
+ DB_File.xs:101: db.h: No such file or directory
+
+or this:
+
+ LD_RUN_PATH="/lib" cc -o blib/arch/auto/DB_File/DB_File.so -shared
+ -L/usr/local/lib DB_File.o -L/usr/local/lib -ldb
+ ld: cannot open -ldb: No such file or directory
+
+This symptom can imply:
+
+ 1. You don't have Berkeley DB installed on your system at all.
+ Solution: get & install Berkeley DB.
+
+ 2. You do have Berkeley DB installed, but it isn't in a standard place.
+ Solution: Edit config.in and set the LIB and INCLUDE variables to point
+ to the directories where libdb.a and db.h are installed.
+
+
+Undefined symbol db_version
+---------------------------
+
+DB_File seems to have built correctly, but you get an error like this
+when you run the test harness:
+
+ $ make test
+ PERL_DL_NONLAZY=1 /usr/bin/perl5.00404 -I./blib/arch -I./blib/lib
+ -I/usr/local/lib/perl5/i586-linux/5.00404 -I/usr/local/lib/perl5 -e 'use
+ Test::Harness qw(&runtests $verbose); $verbose=0; runtests @ARGV;' t/*.t
+ t/db-btree..........Can't load './blib/arch/auto/DB_File/DB_File.so' for
+ module DB_File: ./blib/arch/auto/DB_File/DB_File.so: undefined symbol:
+ db_version at /usr/local/lib/perl5/i586-linux/5.00404/DynaLoader.pm
+ line 166.
+
+ at t/db-btree.t line 21
+ BEGIN failed--compilation aborted at t/db-btree.t line 21.
+ dubious Test returned status 2 (wstat 512, 0x200)
+
+This error usually happens when you have both version 1 and version
+2 of Berkeley DB installed on your system and DB_File attempts to
+build using the db.h for Berkeley DB version 2 and the version 1
+library. Unfortunately the two versions aren't compatible with each
+other. The undefined symbol error is actually caused because Berkeley
+DB version 1 doesn't have the symbol db_version.
+
+Solution: Setting the LIB & INCLUDE variables in config.in to point to the
+ correct directories can sometimes be enough to fix this
+ problem. If that doesn't work the easiest way to fix the
+ problem is to either delete or temporarily rename the copies
+ of db.h and libdb.a that you don't want DB_File to use.
+
+Incompatible versions of db.h and libdb
+---------------------------------------
+
+BerkeleyDB seems to have built correctly, but you get an error like this
+when you run the test harness:
+
+ $ make test
+ PERL_DL_NONLAZY=1 /home/paul/perl/install/bin/perl5.00560 -Iblib/arch
+ -Iblib/lib -I/home/paul/perl/install/5.005_60/lib/5.00560/i586-linux
+ -I/home/paul/perl/install/5.005_60/lib/5.00560 -e 'use Test::Harness
+ qw(&runtests $verbose); $verbose=0; runtests @ARGV;' t/*.t
+ t/db-btree..........
+ DB_File needs compatible versions of libdb & db.h
+ you have db.h version 2.3.7 and libdb version 2.7.5
+ BEGIN failed--compilation aborted at t/db-btree.t line 21.
+ ...
+
+Another variation on the theme of having two versions of Berkeley DB on
+your system.
+
+Solution: Setting the LIB & INCLUDE variables in config.in to point to the
+ correct directories can sometimes be enough to fix this
+ problem. If that doesn't work the easiest way to fix the
+ problem is to either delete or temporarily rename the copies
+ of db.h and libdb.a that you don't want BerkeleyDB to use.
+ If you are running Linux, please read the Linux Notes section
+ below.
+
+
+Linux Notes
+-----------
+
+Newer versions of Linux (e.g. RedHat 6, SuSe 6) ship with a C library
+that has version 2.x of Berkeley DB linked into it. This makes it
+difficult to build this module with anything other than the version of
+Berkeley DB that shipped with your Linux release. If you do try to use
+a different version of Berkeley DB you will most likely get the error
+described in the "Incompatible versions of db.h and libdb" section of
+this file.
+
+To make matters worse, prior to Perl 5.6.1, the perl binary itself
+*always* included the Berkeley DB library.
+
+If you want to use a newer version of Berkeley DB with this module, the
+easiest solution is to use Perl 5.6.1 (or better) and Berkeley DB 3.x
+(or better).
+
+There are two approaches you can use to get older versions of Perl to
+work with specific versions of Berkeley DB. Both have their advantages
+and disadvantages.
+
+The first approach will only work when you want to build a version of
+Perl older than 5.6.1 along with Berkeley DB 3.x. If you want to use
+Berkeley DB 2.x, you must use the next approach. This approach involves
+rebuilding your existing version of Perl after applying an unofficial
+patch. The "patches" directory in the this module's source distribution
+contains a number of patch files. There is one patch file for every
+stable version of Perl since 5.004. Apply the appropriate patch to your
+Perl source tree before re-building and installing Perl from scratch.
+For example, assuming you are in the top-level source directory for
+Perl 5.6.0, the command below will apply the necessary patch. Remember
+to replace the path shown below with one that points to this module's
+patches directory.
+
+ patch -p1 -N </path/to/DB_File/patches/5.6.0
+
+Now rebuild & install perl. You should now have a perl binary that can
+be used to build this module. Follow the instructions in "BUILDING THE
+MODULE", remembering to set the INCLUDE and LIB variables in config.in.
+
+
+The second approach will work with both Berkeley DB 2.x and 3.x.
+Start by building Berkeley DB as a shared library. This is from
+the Berkeley DB build instructions:
+
+ Building Shared Libraries for the GNU GCC compiler
+
+ If you're using gcc and there's no better shared library example for
+ your architecture, the following shared library build procedure will
+ probably work.
+
+ Add the -fpic option to the CFLAGS value in the Makefile.
+
+ Rebuild all of your .o files. This will create a Berkeley DB library
+ that contains .o files with PIC code. To build the shared library,
+ then take the following steps in the library build directory:
+
+ % mkdir tmp
+ % cd tmp
+ % ar xv ../libdb.a
+ % gcc -shared -o libdb.so *.o
+ % mv libdb.so ..
+ % cd ..
+ % rm -rf tmp
+
+ Note, you may have to change the gcc line depending on the
+ requirements of your system.
+
+ The file libdb.so is your shared library
+
+Once you have built libdb.so, you will need to store it somewhere safe.
+
+ cp libdb.so /usr/local/BerkeleyDB/lib
+
+If you now set the LD_PRELOAD environment variable to point to this
+shared library, Perl will use it instead of the version of Berkeley DB
+that shipped with your Linux distribution.
+
+ export LD_PRELOAD=/usr/local/BerkeleyDB/lib/libdb.so
+
+Finally follow the instructions in "BUILDING THE MODULE" to build,
+test and install this module. Don't forget to set the INCLUDE and LIB
+variables in config.in.
+
+Remember, you will need to have the LD_PRELOAD variable set anytime you
+want to use Perl with Berkeley DB. Also note that if you have LD_PRELOAD
+permanently set it will affect ALL commands you execute. This may be a
+problem if you run any commands that access a database created by the
+version of Berkeley DB that shipped with your Linux distribution.
+
+
+Solaris Notes
+-------------
+
+If you are running Solaris 2.5, and you get this error when you run the
+DB_File test harness:
+
+ libc internal error: _rmutex_unlock: rmutex not held.
+
+you probably need to install a Sun patch. It has been reported that
+Sun patch 103187-25 (or later revisions) fixes this problem.
+
+To find out if you have the patch installed, the command "showrev -p"
+will display the patches that are currently installed on your system.
+
+
+HP-UX Notes
+-----------
+
+Some people running HP-UX 10 have reported getting an error like this
+when building DB_File with the native HP-UX compiler.
+
+ ld: (Warning) At least one PA 2.0 object file (DB_File.o) was detected.
+ The linked output may not run on a PA 1.x system.
+ ld: Invalid loader fixup for symbol "$000000A5".
+
+If this is the case for you, Berkeley DB needs to be recompiled with
+the +z or +Z option and the resulting library placed in a .sl file. The
+following steps should do the trick:
+
+ 1: Configure the Berkeley DB distribution with the +z or +Z C compiler
+ flag:
+
+ env "CFLAGS=+z" ../dist/configure ...
+
+ 2: Edit the Berkeley DB Makefile and change:
+
+ "libdb= libdb.a" to "libdb= libdb.sl".
+
+
+ 3: Build and install the Berkeley DB distribution as usual.
+
+
+IRIX NOTES
+----------
+
+If you are running IRIX, and want to use Berkeley DB version 1, you can
+get it from http://reality.sgi.com/ariel. It has the patches necessary
+to compile properly on IRIX 5.3.
+
+
+FEEDBACK
+========
+
+How to report a problem with DB_File.
+
+To help me help you, I need the following information:
+
+ 1. The version of Perl and the operating system name and version you
+ are running. The *complete* output from running "perl -V" will
+ tell me all I need to know. Don't edit the output in any way. Note,
+ I want you to run "perl -V" and NOT "perl -v".
+
+ If your perl does not understand the "-V" option it is too old. DB_File
+ needs Perl version 5.004 or better.
+
+ 2. The version of DB_File you have.
+ If you have successfully installed DB_File, this one-liner will
+ tell you:
+
+ perl -e 'use DB_File; print "DB_File ver $DB_File::VERSION\n"'
+
+ If you haven't installed DB_File then search DB_File.pm for a line
+ like this:
+
+ $VERSION = "1.20" ;
+
+ 3. The version of Berkeley DB you are using.
+ If you are using a version older than 1.85, think about upgrading. One
+ point to note if you are considering upgrading Berkeley DB - the
+ file formats for 1.85, 1.86, 2.0, 3.0 & 3.1 are all different.
+
+ If you have successfully installed DB_File, this command will display
+ the version of Berkeley DB it was built with:
+
+ perl -e 'use DB_File; print "Berkeley DB ver $DB_File::db_ver\n"'
+
+ 4. If you are having problems building DB_File, send me a complete log
+ of what happened.
+
+ 5. Now the difficult one. If you think you have found a bug in DB_File
+ and you want me to fix it, you will *greatly* enhance the chances
+ of me being able to track it down by sending me a small
+ self-contained Perl script that illustrates the problem you are
+ encountering. Include a summary of what you think the problem is
+ and a log of what happens when you run the script, in case I can't
+ reproduce your problem on my system. If possible, don't have the
+ script dependent on an existing 20Meg database. If the script you
+ send me can create the database itself then that is preferred.
+
+ I realise that in some cases this is easier said than done, so if
+ you can only reproduce the problem in your existing script, then
+ you can post me that if you want. Just don't expect me to find your
+ problem in a hurry, or at all. :-)
+
+
+CHANGES
+-------
+
+See the Changes file.
+
+Paul Marquess <Paul.Marquess@btinternet.com>
diff --git a/bdb/perl.DB_File/config.in b/bdb/perl.DB_File/config.in
new file mode 100644
index 00000000000..5bda4a66762
--- /dev/null
+++ b/bdb/perl.DB_File/config.in
@@ -0,0 +1,99 @@
+# Filename: config.in
+#
+# written by Paul Marquess <Paul.Marquess@btinternet.com>
+# last modified 9th Sept 1997
+# version 1.55
+
+# 1. Where is the file db.h?
+#
+# Change the path below to point to the directory where db.h is
+# installed on your system.
+
+#INCLUDE = /usr/local/include
+#INCLUDE = /usr/local/BerkeleyDB/include
+#INCLUDE = /usr/include
+INCLUDE = ./libraries/3.2.7
+
+# 2. Where is libdb?
+#
+# Change the path below to point to the directory where libdb is
+# installed on your system.
+
+#LIB = /usr/local/lib
+#LIB = /usr/local/BerkeleyDB/lib
+#LIB = /usr/lib
+LIB = ./libraries/3.2.7
+
+# 3. What version of Berkely DB have you got?
+#
+# If you have version 2.0 or greater, you can skip this question.
+#
+# If you have Berkeley DB 1.78 or greater you shouldn't have to
+# change the definitions for PREFIX and HASH below.
+#
+# For older versions of Berkeley DB change both PREFIX and HASH to int.
+# Version 1.71, 1.72 and 1.73 are known to need this change.
+#
+# If you don't know what version you have have a look in the file db.h.
+#
+# Search for the string "DB_VERSION_MAJOR". If it is present, you
+# have Berkeley DB version 2 (or greater).
+#
+# If that didn't work, find the definition of the BTREEINFO typedef.
+# Check the return type from the prefix element. It should look like
+# this in an older copy of db.h:
+#
+# int (*prefix) __P((const DBT *, const DBT *));
+#
+# and like this in a more recent copy:
+#
+# size_t (*prefix) /* prefix function */
+# __P((const DBT *, const DBT *));
+#
+# Change the definition of PREFIX, below, to reflect the return type
+# of the prefix function in your db.h.
+#
+# Now find the definition of the HASHINFO typedef. Check the return
+# type of the hash element. Older versions look like this:
+#
+# int (*hash) __P((const void *, size_t));
+#
+# newer like this:
+#
+# u_int32_t /* hash function */
+# (*hash) __P((const void *, size_t));
+#
+# Change the definition of HASH, below, to reflect the return type of
+# the hash function in your db.h.
+#
+
+PREFIX = size_t
+HASH = u_int32_t
+
+# 4. Is the library called libdb?
+#
+# If you have copies of both 1.x and 2.x Berkeley DB installed on
+# your system it can sometimes be tricky to make sure you are using
+# the correct one. Renaming one (or creating a symbolic link) to
+# include the version number of the library can help.
+#
+# For example, if you have both Berkeley DB 2.3.12 and 1.85 on your
+# system and you want to use the Berkeley DB version 2 library you
+# could rename the version 2 library from libdb.a to libdb-2.3.12.a and
+# change the DBNAME line below to look like this:
+#
+# DBNAME = -ldb-2.3.12
+#
+# That will ensure you are linking the correct version of the DB
+# library.
+#
+# Note: If you are building this module with Win32, -llibdb will be
+# used by default.
+#
+# If you have changed the name of the library, uncomment the line
+# below (by removing the leading #) and edit the line to use the name
+# you have picked.
+
+#DBNAME = -ldb-2.4.10
+
+# end of file config.in
diff --git a/bdb/perl.DB_File/dbinfo b/bdb/perl.DB_File/dbinfo
new file mode 100644
index 00000000000..5a4df15907e
--- /dev/null
+++ b/bdb/perl.DB_File/dbinfo
@@ -0,0 +1,109 @@
+#!/usr/local/bin/perl
+
+# Name: dbinfo -- identify berkeley DB version used to create
+# a database file
+#
+# Author: Paul Marquess <Paul.Marquess@btinternet.com>
+# Version: 1.03
+# Date 17th September 2000
+#
+# Copyright (c) 1998-2000 Paul Marquess. All rights reserved.
+# This program is free software; you can redistribute it and/or
+# modify it under the same terms as Perl itself.
+
+# Todo: Print more stats on a db file, e.g. no of records
+# add log/txn/lock files
+
+use strict ;
+
+my %Data =
+ (
+ 0x053162 => {
+ Type => "Btree",
+ Versions =>
+ {
+ 1 => "Unknown (older than 1.71)",
+ 2 => "Unknown (older than 1.71)",
+ 3 => "1.71 -> 1.85, 1.86",
+ 4 => "Unknown",
+ 5 => "2.0.0 -> 2.3.0",
+ 6 => "2.3.1 -> 2.7.7",
+ 7 => "3.0.x",
+ 8 => "3.1.x or greater",
+ }
+ },
+ 0x061561 => {
+ Type => "Hash",
+ Versions =>
+ {
+ 1 => "Unknown (older than 1.71)",
+ 2 => "1.71 -> 1.85",
+ 3 => "1.86",
+ 4 => "2.0.0 -> 2.1.0",
+ 5 => "2.2.6 -> 2.7.7",
+ 6 => "3.0.x",
+ 7 => "3.1.x or greater",
+ }
+ },
+ 0x042253 => {
+ Type => "Queue",
+ Versions =>
+ {
+ 1 => "3.0.x",
+ 2 => "3.1.x",
+ 3 => "3.2.x or greater",
+ }
+ },
+ ) ;
+
+die "Usage: dbinfo file\n" unless @ARGV == 1 ;
+
+print "testing file $ARGV[0]...\n\n" ;
+open (F, "<$ARGV[0]") or die "Cannot open file $ARGV[0]: $!\n" ;
+
+my $buff ;
+read F, $buff, 20 ;
+
+my (@info) = unpack("NNNNN", $buff) ;
+my (@info1) = unpack("VVVVV", $buff) ;
+my ($magic, $version, $endian) ;
+
+if ($Data{$info[0]}) # first try DB 1.x format
+{
+ $magic = $info[0] ;
+ $version = $info[1] ;
+ $endian = "Unknown" ;
+}
+elsif ($Data{$info[3]}) # next DB 2.x big endian
+{
+ $magic = $info[3] ;
+ $version = $info[4] ;
+ $endian = "Big Endian" ;
+}
+elsif ($Data{$info1[3]}) # next DB 2.x little endian
+{
+ $magic = $info1[3] ;
+ $version = $info1[4] ;
+ $endian = "Little Endian" ;
+}
+else
+ { die "not a Berkeley DB database file.\n" }
+
+my $type = $Data{$magic} ;
+$magic = sprintf "%06X", $magic ;
+
+my $ver_string = "Unknown" ;
+$ver_string = $type->{Versions}{$version}
+ if defined $type->{Versions}{$version} ;
+
+print <<EOM ;
+File Type: Berkeley DB $type->{Type} file.
+File Version ID: $version
+Built with Berkeley DB: $ver_string
+Byte Order: $endian
+Magic: $magic
+EOM
+
+close F ;
+
+exit ;
diff --git a/bdb/perl.DB_File/hints/dynixptx.pl b/bdb/perl.DB_File/hints/dynixptx.pl
new file mode 100644
index 00000000000..bb5ffa56e6b
--- /dev/null
+++ b/bdb/perl.DB_File/hints/dynixptx.pl
@@ -0,0 +1,3 @@
+# Need to add an extra '-lc' to the end to work around a DYNIX/ptx bug
+
+$self->{LIBS} = ['-lm -lc'];
diff --git a/bdb/perl.DB_File/hints/sco.pl b/bdb/perl.DB_File/hints/sco.pl
new file mode 100644
index 00000000000..ff604409496
--- /dev/null
+++ b/bdb/perl.DB_File/hints/sco.pl
@@ -0,0 +1,2 @@
+# osr5 needs to explicitly link against libc to pull in some static symbols
+$self->{LIBS} = ['-ldb -lc'] if $Config{'osvers'} =~ '3\.2v5\.0\..' ;
diff --git a/bdb/perl.DB_File/patches/5.004 b/bdb/perl.DB_File/patches/5.004
new file mode 100644
index 00000000000..143ec95afbc
--- /dev/null
+++ b/bdb/perl.DB_File/patches/5.004
@@ -0,0 +1,44 @@
+diff perl5.004.orig/Configure perl5.004/Configure
+190a191
+> perllibs=''
+9904a9906,9913
+> : Remove libraries needed only for extensions
+> : The appropriate ext/Foo/Makefile.PL will add them back in, if
+> : necessary.
+> set X `echo " $libs " |
+> sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
+> shift
+> perllibs="$*"
+>
+10372a10382
+> perllibs='$perllibs'
+diff perl5.004.orig/Makefile.SH perl5.004/Makefile.SH
+122c122
+< libs = $libs $cryptlib
+---
+> libs = $perllibs $cryptlib
+Common subdirectories: perl5.004.orig/Porting and perl5.004/Porting
+Common subdirectories: perl5.004.orig/cygwin32 and perl5.004/cygwin32
+Common subdirectories: perl5.004.orig/eg and perl5.004/eg
+Common subdirectories: perl5.004.orig/emacs and perl5.004/emacs
+Common subdirectories: perl5.004.orig/ext and perl5.004/ext
+Common subdirectories: perl5.004.orig/h2pl and perl5.004/h2pl
+Common subdirectories: perl5.004.orig/hints and perl5.004/hints
+Common subdirectories: perl5.004.orig/lib and perl5.004/lib
+diff perl5.004.orig/myconfig perl5.004/myconfig
+38c38
+< libs=$libs
+---
+> libs=$perllibs
+Common subdirectories: perl5.004.orig/os2 and perl5.004/os2
+diff perl5.004.orig/patchlevel.h perl5.004/patchlevel.h
+40a41
+> ,"NODB-1.0 - remove -ldb from core perl binary."
+Common subdirectories: perl5.004.orig/plan9 and perl5.004/plan9
+Common subdirectories: perl5.004.orig/pod and perl5.004/pod
+Common subdirectories: perl5.004.orig/qnx and perl5.004/qnx
+Common subdirectories: perl5.004.orig/t and perl5.004/t
+Common subdirectories: perl5.004.orig/utils and perl5.004/utils
+Common subdirectories: perl5.004.orig/vms and perl5.004/vms
+Common subdirectories: perl5.004.orig/win32 and perl5.004/win32
+Common subdirectories: perl5.004.orig/x2p and perl5.004/x2p
diff --git a/bdb/perl.DB_File/patches/5.004_01 b/bdb/perl.DB_File/patches/5.004_01
new file mode 100644
index 00000000000..1b05eb4e02b
--- /dev/null
+++ b/bdb/perl.DB_File/patches/5.004_01
@@ -0,0 +1,217 @@
+diff -rc perl5.004_01.orig/Configure perl5.004_01/Configure
+*** perl5.004_01.orig/Configure Wed Jun 11 00:28:03 1997
+--- perl5.004_01/Configure Sun Nov 12 22:12:35 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 9907,9912 ****
+--- 9908,9921 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10375,10380 ****
+--- 10384,10390 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.004_01.orig/Makefile.SH perl5.004_01/Makefile.SH
+*** perl5.004_01.orig/Makefile.SH Thu Jun 12 23:27:56 1997
+--- perl5.004_01/Makefile.SH Sun Nov 12 22:12:35 2000
+***************
+*** 126,132 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 126,132 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.004_01.orig/lib/ExtUtils/Embed.pm perl5.004_01/lib/ExtUtils/Embed.pm
+*** perl5.004_01.orig/lib/ExtUtils/Embed.pm Wed Apr 2 22:12:04 1997
+--- perl5.004_01/lib/ExtUtils/Embed.pm Sun Nov 12 22:12:35 2000
+***************
+*** 170,176 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 170,176 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_01.orig/lib/ExtUtils/Liblist.pm perl5.004_01/lib/ExtUtils/Liblist.pm
+*** perl5.004_01.orig/lib/ExtUtils/Liblist.pm Sat Jun 7 01:19:44 1997
+--- perl5.004_01/lib/ExtUtils/Liblist.pm Sun Nov 12 22:13:27 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $Verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $Verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $Verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $Verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 186,196 ****
+ my($self, $potential_libs, $Verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{libs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+--- 186,196 ----
+ my($self, $potential_libs, $Verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{perllibs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 540,546 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 540,546 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.004_01.orig/lib/ExtUtils/MM_Unix.pm perl5.004_01/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_01.orig/lib/ExtUtils/MM_Unix.pm Thu Jun 12 22:06:18 1997
+--- perl5.004_01/lib/ExtUtils/MM_Unix.pm Sun Nov 12 22:12:35 2000
+***************
+*** 2137,2143 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2137,2143 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.004_01.orig/myconfig perl5.004_01/myconfig
+*** perl5.004_01.orig/myconfig Sat Dec 21 01:13:20 1996
+--- perl5.004_01/myconfig Sun Nov 12 22:12:35 2000
+***************
+*** 35,41 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 35,41 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_01.orig/patchlevel.h perl5.004_01/patchlevel.h
+*** perl5.004_01.orig/patchlevel.h Wed Jun 11 03:06:10 1997
+--- perl5.004_01/patchlevel.h Sun Nov 12 22:12:35 2000
+***************
+*** 38,43 ****
+--- 38,44 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/bdb/perl.DB_File/patches/5.004_02 b/bdb/perl.DB_File/patches/5.004_02
new file mode 100644
index 00000000000..238f8737941
--- /dev/null
+++ b/bdb/perl.DB_File/patches/5.004_02
@@ -0,0 +1,217 @@
+diff -rc perl5.004_02.orig/Configure perl5.004_02/Configure
+*** perl5.004_02.orig/Configure Thu Aug 7 15:08:44 1997
+--- perl5.004_02/Configure Sun Nov 12 22:06:24 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 9911,9916 ****
+--- 9912,9925 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10379,10384 ****
+--- 10388,10394 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.004_02.orig/Makefile.SH perl5.004_02/Makefile.SH
+*** perl5.004_02.orig/Makefile.SH Thu Aug 7 13:10:53 1997
+--- perl5.004_02/Makefile.SH Sun Nov 12 22:06:24 2000
+***************
+*** 126,132 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 126,132 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.004_02.orig/lib/ExtUtils/Embed.pm perl5.004_02/lib/ExtUtils/Embed.pm
+*** perl5.004_02.orig/lib/ExtUtils/Embed.pm Fri Aug 1 15:08:44 1997
+--- perl5.004_02/lib/ExtUtils/Embed.pm Sun Nov 12 22:06:24 2000
+***************
+*** 178,184 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 178,184 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_02.orig/lib/ExtUtils/Liblist.pm perl5.004_02/lib/ExtUtils/Liblist.pm
+*** perl5.004_02.orig/lib/ExtUtils/Liblist.pm Fri Aug 1 19:36:58 1997
+--- perl5.004_02/lib/ExtUtils/Liblist.pm Sun Nov 12 22:06:24 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 186,196 ****
+ my($self, $potential_libs, $verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{libs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+--- 186,196 ----
+ my($self, $potential_libs, $verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{perllibs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 540,546 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 540,546 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.004_02.orig/lib/ExtUtils/MM_Unix.pm perl5.004_02/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_02.orig/lib/ExtUtils/MM_Unix.pm Tue Aug 5 14:28:08 1997
+--- perl5.004_02/lib/ExtUtils/MM_Unix.pm Sun Nov 12 22:06:25 2000
+***************
+*** 2224,2230 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2224,2230 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.004_02.orig/myconfig perl5.004_02/myconfig
+*** perl5.004_02.orig/myconfig Sat Dec 21 01:13:20 1996
+--- perl5.004_02/myconfig Sun Nov 12 22:06:25 2000
+***************
+*** 35,41 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 35,41 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_02.orig/patchlevel.h perl5.004_02/patchlevel.h
+*** perl5.004_02.orig/patchlevel.h Fri Aug 1 15:07:34 1997
+--- perl5.004_02/patchlevel.h Sun Nov 12 22:06:25 2000
+***************
+*** 38,43 ****
+--- 38,44 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/bdb/perl.DB_File/patches/5.004_03 b/bdb/perl.DB_File/patches/5.004_03
new file mode 100644
index 00000000000..06331eac922
--- /dev/null
+++ b/bdb/perl.DB_File/patches/5.004_03
@@ -0,0 +1,223 @@
+diff -rc perl5.004_03.orig/Configure perl5.004_03/Configure
+*** perl5.004_03.orig/Configure Wed Aug 13 16:09:46 1997
+--- perl5.004_03/Configure Sun Nov 12 21:56:18 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 9911,9916 ****
+--- 9912,9925 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10379,10384 ****
+--- 10388,10394 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+Only in perl5.004_03: Configure.orig
+diff -rc perl5.004_03.orig/Makefile.SH perl5.004_03/Makefile.SH
+*** perl5.004_03.orig/Makefile.SH Mon Aug 18 19:24:29 1997
+--- perl5.004_03/Makefile.SH Sun Nov 12 21:56:18 2000
+***************
+*** 126,132 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 126,132 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+Only in perl5.004_03: Makefile.SH.orig
+diff -rc perl5.004_03.orig/lib/ExtUtils/Embed.pm perl5.004_03/lib/ExtUtils/Embed.pm
+*** perl5.004_03.orig/lib/ExtUtils/Embed.pm Fri Aug 1 15:08:44 1997
+--- perl5.004_03/lib/ExtUtils/Embed.pm Sun Nov 12 21:56:18 2000
+***************
+*** 178,184 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 178,184 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_03.orig/lib/ExtUtils/Liblist.pm perl5.004_03/lib/ExtUtils/Liblist.pm
+*** perl5.004_03.orig/lib/ExtUtils/Liblist.pm Fri Aug 1 19:36:58 1997
+--- perl5.004_03/lib/ExtUtils/Liblist.pm Sun Nov 12 21:57:17 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 186,196 ****
+ my($self, $potential_libs, $verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{libs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+--- 186,196 ----
+ my($self, $potential_libs, $verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{perllibs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 540,546 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 540,546 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+Only in perl5.004_03/lib/ExtUtils: Liblist.pm.orig
+Only in perl5.004_03/lib/ExtUtils: Liblist.pm.rej
+diff -rc perl5.004_03.orig/lib/ExtUtils/MM_Unix.pm perl5.004_03/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_03.orig/lib/ExtUtils/MM_Unix.pm Mon Aug 18 19:16:12 1997
+--- perl5.004_03/lib/ExtUtils/MM_Unix.pm Sun Nov 12 21:56:19 2000
+***************
+*** 2224,2230 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2224,2230 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+Only in perl5.004_03/lib/ExtUtils: MM_Unix.pm.orig
+diff -rc perl5.004_03.orig/myconfig perl5.004_03/myconfig
+*** perl5.004_03.orig/myconfig Sat Dec 21 01:13:20 1996
+--- perl5.004_03/myconfig Sun Nov 12 21:56:19 2000
+***************
+*** 35,41 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 35,41 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_03.orig/patchlevel.h perl5.004_03/patchlevel.h
+*** perl5.004_03.orig/patchlevel.h Wed Aug 13 11:42:01 1997
+--- perl5.004_03/patchlevel.h Sun Nov 12 21:56:19 2000
+***************
+*** 38,43 ****
+--- 38,44 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
+Only in perl5.004_03: patchlevel.h.orig
diff --git a/bdb/perl.DB_File/patches/5.004_04 b/bdb/perl.DB_File/patches/5.004_04
new file mode 100644
index 00000000000..a227dc700d9
--- /dev/null
+++ b/bdb/perl.DB_File/patches/5.004_04
@@ -0,0 +1,209 @@
+diff -rc perl5.004_04.orig/Configure perl5.004_04/Configure
+*** perl5.004_04.orig/Configure Fri Oct 3 18:57:39 1997
+--- perl5.004_04/Configure Sun Nov 12 21:50:51 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 9910,9915 ****
+--- 9911,9924 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10378,10383 ****
+--- 10387,10393 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.004_04.orig/Makefile.SH perl5.004_04/Makefile.SH
+*** perl5.004_04.orig/Makefile.SH Wed Oct 15 10:33:16 1997
+--- perl5.004_04/Makefile.SH Sun Nov 12 21:50:51 2000
+***************
+*** 129,135 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 129,135 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.004_04.orig/lib/ExtUtils/Embed.pm perl5.004_04/lib/ExtUtils/Embed.pm
+*** perl5.004_04.orig/lib/ExtUtils/Embed.pm Fri Aug 1 15:08:44 1997
+--- perl5.004_04/lib/ExtUtils/Embed.pm Sun Nov 12 21:50:51 2000
+***************
+*** 178,184 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 178,184 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_04.orig/lib/ExtUtils/Liblist.pm perl5.004_04/lib/ExtUtils/Liblist.pm
+*** perl5.004_04.orig/lib/ExtUtils/Liblist.pm Tue Sep 9 17:41:32 1997
+--- perl5.004_04/lib/ExtUtils/Liblist.pm Sun Nov 12 21:51:33 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 189,195 ****
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+--- 189,195 ----
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 539,545 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 539,545 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.004_04.orig/lib/ExtUtils/MM_Unix.pm perl5.004_04/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_04.orig/lib/ExtUtils/MM_Unix.pm Wed Oct 8 14:13:51 1997
+--- perl5.004_04/lib/ExtUtils/MM_Unix.pm Sun Nov 12 21:50:51 2000
+***************
+*** 2229,2235 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2229,2235 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.004_04.orig/myconfig perl5.004_04/myconfig
+*** perl5.004_04.orig/myconfig Mon Oct 6 18:26:49 1997
+--- perl5.004_04/myconfig Sun Nov 12 21:50:51 2000
+***************
+*** 35,41 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 35,41 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_04.orig/patchlevel.h perl5.004_04/patchlevel.h
+*** perl5.004_04.orig/patchlevel.h Wed Oct 15 10:55:19 1997
+--- perl5.004_04/patchlevel.h Sun Nov 12 21:50:51 2000
+***************
+*** 39,44 ****
+--- 39,45 ----
+ /* The following line and terminating '};' are read by perlbug.PL. Don't alter. */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/bdb/perl.DB_File/patches/5.004_05 b/bdb/perl.DB_File/patches/5.004_05
new file mode 100644
index 00000000000..51c8bf35009
--- /dev/null
+++ b/bdb/perl.DB_File/patches/5.004_05
@@ -0,0 +1,209 @@
+diff -rc perl5.004_05.orig/Configure perl5.004_05/Configure
+*** perl5.004_05.orig/Configure Thu Jan 6 22:05:49 2000
+--- perl5.004_05/Configure Sun Nov 12 21:36:25 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 10164,10169 ****
+--- 10165,10178 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10648,10653 ****
+--- 10657,10663 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.004_05.orig/Makefile.SH perl5.004_05/Makefile.SH
+*** perl5.004_05.orig/Makefile.SH Thu Jan 6 22:05:49 2000
+--- perl5.004_05/Makefile.SH Sun Nov 12 21:36:25 2000
+***************
+*** 151,157 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 151,157 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.004_05.orig/lib/ExtUtils/Embed.pm perl5.004_05/lib/ExtUtils/Embed.pm
+*** perl5.004_05.orig/lib/ExtUtils/Embed.pm Fri Aug 1 15:08:44 1997
+--- perl5.004_05/lib/ExtUtils/Embed.pm Sun Nov 12 21:36:25 2000
+***************
+*** 178,184 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 178,184 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_05.orig/lib/ExtUtils/Liblist.pm perl5.004_05/lib/ExtUtils/Liblist.pm
+*** perl5.004_05.orig/lib/ExtUtils/Liblist.pm Thu Jan 6 22:05:54 2000
+--- perl5.004_05/lib/ExtUtils/Liblist.pm Sun Nov 12 21:45:31 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 196,202 ****
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'libs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+--- 196,202 ----
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'perllibs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 590,596 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 590,596 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.004_05.orig/lib/ExtUtils/MM_Unix.pm perl5.004_05/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_05.orig/lib/ExtUtils/MM_Unix.pm Thu Jan 6 22:05:54 2000
+--- perl5.004_05/lib/ExtUtils/MM_Unix.pm Sun Nov 12 21:36:25 2000
+***************
+*** 2246,2252 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2246,2252 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.004_05.orig/myconfig perl5.004_05/myconfig
+*** perl5.004_05.orig/myconfig Thu Jan 6 22:05:55 2000
+--- perl5.004_05/myconfig Sun Nov 12 21:43:54 2000
+***************
+*** 34,40 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 34,40 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_05.orig/patchlevel.h perl5.004_05/patchlevel.h
+*** perl5.004_05.orig/patchlevel.h Thu Jan 6 22:05:48 2000
+--- perl5.004_05/patchlevel.h Sun Nov 12 21:36:25 2000
+***************
+*** 39,44 ****
+--- 39,45 ----
+ /* The following line and terminating '};' are read by perlbug.PL. Don't alter. */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/bdb/perl.DB_File/patches/5.005 b/bdb/perl.DB_File/patches/5.005
new file mode 100644
index 00000000000..effee3e8275
--- /dev/null
+++ b/bdb/perl.DB_File/patches/5.005
@@ -0,0 +1,209 @@
+diff -rc perl5.005.orig/Configure perl5.005/Configure
+*** perl5.005.orig/Configure Wed Jul 15 08:05:44 1998
+--- perl5.005/Configure Sun Nov 12 21:30:40 2000
+***************
+*** 234,239 ****
+--- 234,240 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 11279,11284 ****
+--- 11280,11293 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 11804,11809 ****
+--- 11813,11819 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.005.orig/Makefile.SH perl5.005/Makefile.SH
+*** perl5.005.orig/Makefile.SH Sun Jul 19 08:06:35 1998
+--- perl5.005/Makefile.SH Sun Nov 12 21:30:40 2000
+***************
+*** 150,156 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 150,156 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.005.orig/lib/ExtUtils/Embed.pm perl5.005/lib/ExtUtils/Embed.pm
+*** perl5.005.orig/lib/ExtUtils/Embed.pm Wed Jul 22 07:45:02 1998
+--- perl5.005/lib/ExtUtils/Embed.pm Sun Nov 12 21:30:40 2000
+***************
+*** 194,200 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 194,200 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.005.orig/lib/ExtUtils/Liblist.pm perl5.005/lib/ExtUtils/Liblist.pm
+*** perl5.005.orig/lib/ExtUtils/Liblist.pm Wed Jul 22 07:09:42 1998
+--- perl5.005/lib/ExtUtils/Liblist.pm Sun Nov 12 21:30:40 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 290,296 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 290,296 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 598,604 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 598,604 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.005.orig/lib/ExtUtils/MM_Unix.pm perl5.005/lib/ExtUtils/MM_Unix.pm
+*** perl5.005.orig/lib/ExtUtils/MM_Unix.pm Tue Jul 14 04:39:12 1998
+--- perl5.005/lib/ExtUtils/MM_Unix.pm Sun Nov 12 21:30:41 2000
+***************
+*** 2281,2287 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2281,2287 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.005.orig/myconfig perl5.005/myconfig
+*** perl5.005.orig/myconfig Fri Apr 3 01:20:35 1998
+--- perl5.005/myconfig Sun Nov 12 21:30:41 2000
+***************
+*** 34,40 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+--- 34,40 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+diff -rc perl5.005.orig/patchlevel.h perl5.005/patchlevel.h
+*** perl5.005.orig/patchlevel.h Wed Jul 22 19:22:01 1998
+--- perl5.005/patchlevel.h Sun Nov 12 21:30:41 2000
+***************
+*** 39,44 ****
+--- 39,45 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/bdb/perl.DB_File/patches/5.005_01 b/bdb/perl.DB_File/patches/5.005_01
new file mode 100644
index 00000000000..2a05dd545f6
--- /dev/null
+++ b/bdb/perl.DB_File/patches/5.005_01
@@ -0,0 +1,209 @@
+diff -rc perl5.005_01.orig/Configure perl5.005_01/Configure
+*** perl5.005_01.orig/Configure Wed Jul 15 08:05:44 1998
+--- perl5.005_01/Configure Sun Nov 12 20:55:58 2000
+***************
+*** 234,239 ****
+--- 234,240 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 11279,11284 ****
+--- 11280,11293 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 11804,11809 ****
+--- 11813,11819 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.005_01.orig/Makefile.SH perl5.005_01/Makefile.SH
+*** perl5.005_01.orig/Makefile.SH Sun Jul 19 08:06:35 1998
+--- perl5.005_01/Makefile.SH Sun Nov 12 20:55:58 2000
+***************
+*** 150,156 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 150,156 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.005_01.orig/lib/ExtUtils/Embed.pm perl5.005_01/lib/ExtUtils/Embed.pm
+*** perl5.005_01.orig/lib/ExtUtils/Embed.pm Wed Jul 22 07:45:02 1998
+--- perl5.005_01/lib/ExtUtils/Embed.pm Sun Nov 12 20:55:58 2000
+***************
+*** 194,200 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 194,200 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.005_01.orig/lib/ExtUtils/Liblist.pm perl5.005_01/lib/ExtUtils/Liblist.pm
+*** perl5.005_01.orig/lib/ExtUtils/Liblist.pm Wed Jul 22 07:09:42 1998
+--- perl5.005_01/lib/ExtUtils/Liblist.pm Sun Nov 12 20:55:58 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 290,296 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 290,296 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 598,604 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 598,604 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.005_01.orig/lib/ExtUtils/MM_Unix.pm perl5.005_01/lib/ExtUtils/MM_Unix.pm
+*** perl5.005_01.orig/lib/ExtUtils/MM_Unix.pm Tue Jul 14 04:39:12 1998
+--- perl5.005_01/lib/ExtUtils/MM_Unix.pm Sun Nov 12 20:55:58 2000
+***************
+*** 2281,2287 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2281,2287 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.005_01.orig/myconfig perl5.005_01/myconfig
+*** perl5.005_01.orig/myconfig Fri Apr 3 01:20:35 1998
+--- perl5.005_01/myconfig Sun Nov 12 20:55:58 2000
+***************
+*** 34,40 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+--- 34,40 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+diff -rc perl5.005_01.orig/patchlevel.h perl5.005_01/patchlevel.h
+*** perl5.005_01.orig/patchlevel.h Mon Jan 3 11:07:45 2000
+--- perl5.005_01/patchlevel.h Sun Nov 12 20:55:58 2000
+***************
+*** 39,44 ****
+--- 39,45 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/bdb/perl.DB_File/patches/5.005_02 b/bdb/perl.DB_File/patches/5.005_02
new file mode 100644
index 00000000000..5dd57ddc03f
--- /dev/null
+++ b/bdb/perl.DB_File/patches/5.005_02
@@ -0,0 +1,264 @@
+diff -rc perl5.005_02.orig/Configure perl5.005_02/Configure
+*** perl5.005_02.orig/Configure Mon Jan 3 11:12:20 2000
+--- perl5.005_02/Configure Sun Nov 12 20:50:51 2000
+***************
+*** 234,239 ****
+--- 234,240 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 11334,11339 ****
+--- 11335,11348 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 11859,11864 ****
+--- 11868,11874 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+Only in perl5.005_02: Configure.orig
+diff -rc perl5.005_02.orig/Makefile.SH perl5.005_02/Makefile.SH
+*** perl5.005_02.orig/Makefile.SH Sun Jul 19 08:06:35 1998
+--- perl5.005_02/Makefile.SH Sun Nov 12 20:50:51 2000
+***************
+*** 150,156 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 150,156 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+Only in perl5.005_02: Makefile.SH.orig
+diff -rc perl5.005_02.orig/lib/ExtUtils/Embed.pm perl5.005_02/lib/ExtUtils/Embed.pm
+*** perl5.005_02.orig/lib/ExtUtils/Embed.pm Wed Jul 22 07:45:02 1998
+--- perl5.005_02/lib/ExtUtils/Embed.pm Sun Nov 12 20:50:51 2000
+***************
+*** 194,200 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 194,200 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.005_02.orig/lib/ExtUtils/Liblist.pm perl5.005_02/lib/ExtUtils/Liblist.pm
+*** perl5.005_02.orig/lib/ExtUtils/Liblist.pm Mon Jan 3 11:12:21 2000
+--- perl5.005_02/lib/ExtUtils/Liblist.pm Sun Nov 12 20:50:51 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 196,202 ****
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'libs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+--- 196,202 ----
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'perllibs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 333,339 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 333,339 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 623,629 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 623,629 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+***************
+*** 666,672 ****
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{libs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+--- 666,672 ----
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{perllibs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+***************
+*** 676,682 ****
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{libs}>.
+
+ =item *
+
+--- 676,682 ----
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{perllibs}>.
+
+ =item *
+
+Only in perl5.005_02/lib/ExtUtils: Liblist.pm.orig
+diff -rc perl5.005_02.orig/lib/ExtUtils/MM_Unix.pm perl5.005_02/lib/ExtUtils/MM_Unix.pm
+*** perl5.005_02.orig/lib/ExtUtils/MM_Unix.pm Tue Jul 14 04:39:12 1998
+--- perl5.005_02/lib/ExtUtils/MM_Unix.pm Sun Nov 12 20:50:51 2000
+***************
+*** 2281,2287 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2281,2287 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+Only in perl5.005_02/lib/ExtUtils: MM_Unix.pm.orig
+diff -rc perl5.005_02.orig/myconfig perl5.005_02/myconfig
+*** perl5.005_02.orig/myconfig Fri Apr 3 01:20:35 1998
+--- perl5.005_02/myconfig Sun Nov 12 20:50:51 2000
+***************
+*** 34,40 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+--- 34,40 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+diff -rc perl5.005_02.orig/patchlevel.h perl5.005_02/patchlevel.h
+*** perl5.005_02.orig/patchlevel.h Mon Jan 3 11:12:19 2000
+--- perl5.005_02/patchlevel.h Sun Nov 12 20:50:51 2000
+***************
+*** 40,45 ****
+--- 40,46 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/bdb/perl.DB_File/patches/5.005_03 b/bdb/perl.DB_File/patches/5.005_03
new file mode 100644
index 00000000000..115f9f5b909
--- /dev/null
+++ b/bdb/perl.DB_File/patches/5.005_03
@@ -0,0 +1,250 @@
+diff -rc perl5.005_03.orig/Configure perl5.005_03/Configure
+*** perl5.005_03.orig/Configure Sun Mar 28 17:12:57 1999
+--- perl5.005_03/Configure Sun Sep 17 22:19:16 2000
+***************
+*** 208,213 ****
+--- 208,214 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 11642,11647 ****
+--- 11643,11656 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 12183,12188 ****
+--- 12192,12198 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.005_03.orig/Makefile.SH perl5.005_03/Makefile.SH
+*** perl5.005_03.orig/Makefile.SH Thu Mar 4 02:35:25 1999
+--- perl5.005_03/Makefile.SH Sun Sep 17 22:21:01 2000
+***************
+*** 58,67 ****
+ shrpldflags="-H512 -T512 -bhalt:4 -bM:SRE -bE:perl.exp"
+ case "$osvers" in
+ 3*)
+! shrpldflags="$shrpldflags -e _nostart $ldflags $libs $cryptlib"
+ ;;
+ *)
+! shrpldflags="$shrpldflags -b noentry $ldflags $libs $cryptlib"
+ ;;
+ esac
+ aixinstdir=`pwd | sed 's/\/UU$//'`
+--- 58,67 ----
+ shrpldflags="-H512 -T512 -bhalt:4 -bM:SRE -bE:perl.exp"
+ case "$osvers" in
+ 3*)
+! shrpldflags="$shrpldflags -e _nostart $ldflags $perllibs $cryptlib"
+ ;;
+ *)
+! shrpldflags="$shrpldflags -b noentry $ldflags $perllibs $cryptlib"
+ ;;
+ esac
+ aixinstdir=`pwd | sed 's/\/UU$//'`
+***************
+*** 155,161 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 155,161 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.005_03.orig/lib/ExtUtils/Embed.pm perl5.005_03/lib/ExtUtils/Embed.pm
+*** perl5.005_03.orig/lib/ExtUtils/Embed.pm Wed Jan 6 02:17:50 1999
+--- perl5.005_03/lib/ExtUtils/Embed.pm Sun Sep 17 22:19:16 2000
+***************
+*** 194,200 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 194,200 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.005_03.orig/lib/ExtUtils/Liblist.pm perl5.005_03/lib/ExtUtils/Liblist.pm
+*** perl5.005_03.orig/lib/ExtUtils/Liblist.pm Wed Jan 6 02:17:47 1999
+--- perl5.005_03/lib/ExtUtils/Liblist.pm Sun Sep 17 22:19:16 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 196,202 ****
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'libs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+--- 196,202 ----
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'perllibs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 336,342 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 336,342 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 626,632 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>,
+ C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
+--- 626,632 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>,
+ C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
+***************
+*** 670,676 ****
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{libs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+--- 670,676 ----
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{perllibs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+***************
+*** 680,686 ****
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{libs}>.
+
+ =item *
+
+--- 680,686 ----
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{perllibs}>.
+
+ =item *
+
+diff -rc perl5.005_03.orig/lib/ExtUtils/MM_Unix.pm perl5.005_03/lib/ExtUtils/MM_Unix.pm
+*** perl5.005_03.orig/lib/ExtUtils/MM_Unix.pm Fri Mar 5 00:34:20 1999
+--- perl5.005_03/lib/ExtUtils/MM_Unix.pm Sun Sep 17 22:19:16 2000
+***************
+*** 2284,2290 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2284,2290 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
diff --git a/bdb/perl.DB_File/patches/5.6.0 b/bdb/perl.DB_File/patches/5.6.0
new file mode 100644
index 00000000000..1f9b3b620de
--- /dev/null
+++ b/bdb/perl.DB_File/patches/5.6.0
@@ -0,0 +1,294 @@
+diff -cr perl-5.6.0.orig/Configure perl-5.6.0/Configure
+*** perl-5.6.0.orig/Configure Wed Mar 22 20:36:37 2000
+--- perl-5.6.0/Configure Sun Sep 17 23:40:15 2000
+***************
+*** 217,222 ****
+--- 217,223 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 14971,14976 ****
+--- 14972,14985 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 15640,15645 ****
+--- 15649,15655 ----
+ path_sep='$path_sep'
+ perl5='$perl5'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -cr perl-5.6.0.orig/Makefile.SH perl-5.6.0/Makefile.SH
+*** perl-5.6.0.orig/Makefile.SH Sat Mar 11 16:05:24 2000
+--- perl-5.6.0/Makefile.SH Sun Sep 17 23:40:15 2000
+***************
+*** 70,76 ****
+ *) shrpldflags="$shrpldflags -b noentry"
+ ;;
+ esac
+! shrpldflags="$shrpldflags $ldflags $libs $cryptlib"
+ linklibperl="-L $archlibexp/CORE -L `pwd | sed 's/\/UU$//'` -lperl"
+ ;;
+ hpux*)
+--- 70,76 ----
+ *) shrpldflags="$shrpldflags -b noentry"
+ ;;
+ esac
+! shrpldflags="$shrpldflags $ldflags $perllibs $cryptlib"
+ linklibperl="-L $archlibexp/CORE -L `pwd | sed 's/\/UU$//'` -lperl"
+ ;;
+ hpux*)
+***************
+*** 176,182 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 176,182 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+***************
+*** 333,339 ****
+ case "$osname" in
+ aix)
+ $spitshell >>Makefile <<!GROK!THIS!
+! LIBS = $libs
+ # In AIX we need to change this for building Perl itself from
+ # its earlier definition (which is for building external
+ # extensions *after* Perl has been built and installed)
+--- 333,339 ----
+ case "$osname" in
+ aix)
+ $spitshell >>Makefile <<!GROK!THIS!
+! LIBS = $perllibs
+ # In AIX we need to change this for building Perl itself from
+ # its earlier definition (which is for building external
+ # extensions *after* Perl has been built and installed)
+diff -cr perl-5.6.0.orig/lib/ExtUtils/Embed.pm perl-5.6.0/lib/ExtUtils/Embed.pm
+*** perl-5.6.0.orig/lib/ExtUtils/Embed.pm Sun Jan 23 12:08:32 2000
+--- perl-5.6.0/lib/ExtUtils/Embed.pm Sun Sep 17 23:40:15 2000
+***************
+*** 193,199 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 193,199 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -cr perl-5.6.0.orig/lib/ExtUtils/Liblist.pm perl-5.6.0/lib/ExtUtils/Liblist.pm
+*** perl-5.6.0.orig/lib/ExtUtils/Liblist.pm Wed Mar 22 16:16:31 2000
+--- perl-5.6.0/lib/ExtUtils/Liblist.pm Sun Sep 17 23:40:15 2000
+***************
+*** 17,34 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 17,34 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 198,204 ****
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'libs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+--- 198,204 ----
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'perllibs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 338,344 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 338,344 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 624,630 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>,
+ C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
+--- 624,630 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>,
+ C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
+***************
+*** 668,674 ****
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{libs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+--- 668,674 ----
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{perllibs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+***************
+*** 678,684 ****
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{libs}>.
+
+ =item *
+
+--- 678,684 ----
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{perllibs}>.
+
+ =item *
+
+diff -cr perl-5.6.0.orig/lib/ExtUtils/MM_Unix.pm perl-5.6.0/lib/ExtUtils/MM_Unix.pm
+*** perl-5.6.0.orig/lib/ExtUtils/MM_Unix.pm Thu Mar 2 17:52:52 2000
+--- perl-5.6.0/lib/ExtUtils/MM_Unix.pm Sun Sep 17 23:40:15 2000
+***************
+*** 2450,2456 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2450,2456 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -cr perl-5.6.0.orig/myconfig.SH perl-5.6.0/myconfig.SH
+*** perl-5.6.0.orig/myconfig.SH Sat Feb 26 06:34:49 2000
+--- perl-5.6.0/myconfig.SH Sun Sep 17 23:41:17 2000
+***************
+*** 48,54 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+--- 48,54 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+diff -cr perl-5.6.0.orig/patchlevel.h perl-5.6.0/patchlevel.h
+*** perl-5.6.0.orig/patchlevel.h Wed Mar 22 20:23:11 2000
+--- perl-5.6.0/patchlevel.h Sun Sep 17 23:40:15 2000
+***************
+*** 70,75 ****
+--- 70,76 ----
+ #if !defined(PERL_PATCHLEVEL_H_IMPLICIT) && !defined(LOCAL_PATCH_COUNT)
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/bdb/perl.DB_File/t/db-btree.t b/bdb/perl.DB_File/t/db-btree.t
new file mode 100644
index 00000000000..377cfe45c9b
--- /dev/null
+++ b/bdb/perl.DB_File/t/db-btree.t
@@ -0,0 +1,1306 @@
+#!./perl -w
+
+use warnings;
+use strict;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use Config;
+
+BEGIN {
+ if(-d "lib" && -f "TEST") {
+ if ($Config{'extensions'} !~ /\bDB_File\b/ ) {
+ print "1..157\n";
+ exit 0;
+ }
+ }
+}
+
+use DB_File;
+use Fcntl;
+
+print "1..157\n";
+
+sub ok
+{
+ my $no = shift ;
+ my $result = shift ;
+
+ print "not " unless $result ;
+ print "ok $no\n" ;
+}
+
+sub lexical
+{
+ my(@a) = unpack ("C*", $a) ;
+ my(@b) = unpack ("C*", $b) ;
+
+ my $len = (@a > @b ? @b : @a) ;
+ my $i = 0 ;
+
+ foreach $i ( 0 .. $len -1) {
+ return $a[$i] - $b[$i] if $a[$i] != $b[$i] ;
+ }
+
+ return @a - @b ;
+}
+
+{
+ package Redirect ;
+ use Symbol ;
+
+ sub new
+ {
+ my $class = shift ;
+ my $filename = shift ;
+ my $fh = gensym ;
+ open ($fh, ">$filename") || die "Cannot open $filename: $!" ;
+ my $real_stdout = select($fh) ;
+ return bless [$fh, $real_stdout ] ;
+
+ }
+ sub DESTROY
+ {
+ my $self = shift ;
+ close $self->[0] ;
+ select($self->[1]) ;
+ }
+}
+
+sub docat
+{
+ my $file = shift;
+ #local $/ = undef unless wantarray ;
+ open(CAT,$file) || die "Cannot open $file: $!";
+ my @result = <CAT>;
+ close(CAT);
+ wantarray ? @result : join("", @result) ;
+}
+
+sub docat_del
+{
+ my $file = shift;
+ #local $/ = undef unless wantarray ;
+ open(CAT,$file) || die "Cannot open $file: $!";
+ my @result = <CAT>;
+ close(CAT);
+ unlink $file ;
+ wantarray ? @result : join("", @result) ;
+}
+
+
+my $db185mode = ($DB_File::db_version == 1 && ! $DB_File::db_185_compat) ;
+my $null_keys_allowed = ($DB_File::db_ver < 2.004010
+ || $DB_File::db_ver >= 3.1 );
+
+my $Dfile = "dbbtree.tmp";
+unlink $Dfile;
+
+umask(0);
+
+# Check the interface to BTREEINFO
+
+my $dbh = new DB_File::BTREEINFO ;
+ok(1, ! defined $dbh->{flags}) ;
+ok(2, ! defined $dbh->{cachesize}) ;
+ok(3, ! defined $dbh->{psize}) ;
+ok(4, ! defined $dbh->{lorder}) ;
+ok(5, ! defined $dbh->{minkeypage}) ;
+ok(6, ! defined $dbh->{maxkeypage}) ;
+ok(7, ! defined $dbh->{compare}) ;
+ok(8, ! defined $dbh->{prefix}) ;
+
+$dbh->{flags} = 3000 ;
+ok(9, $dbh->{flags} == 3000) ;
+
+$dbh->{cachesize} = 9000 ;
+ok(10, $dbh->{cachesize} == 9000);
+
+$dbh->{psize} = 400 ;
+ok(11, $dbh->{psize} == 400) ;
+
+$dbh->{lorder} = 65 ;
+ok(12, $dbh->{lorder} == 65) ;
+
+$dbh->{minkeypage} = 123 ;
+ok(13, $dbh->{minkeypage} == 123) ;
+
+$dbh->{maxkeypage} = 1234 ;
+ok(14, $dbh->{maxkeypage} == 1234 );
+
+$dbh->{compare} = 1234 ;
+ok(15, $dbh->{compare} == 1234) ;
+
+$dbh->{prefix} = 1234 ;
+ok(16, $dbh->{prefix} == 1234 );
+
+# Check that an invalid entry is caught both for store & fetch
+eval '$dbh->{fred} = 1234' ;
+ok(17, $@ =~ /^DB_File::BTREEINFO::STORE - Unknown element 'fred' at/ ) ;
+eval 'my $q = $dbh->{fred}' ;
+ok(18, $@ =~ /^DB_File::BTREEINFO::FETCH - Unknown element 'fred' at/ ) ;
+
+# Now check the interface to BTREE
+
+my ($X, %h) ;
+ok(19, $X = tie(%h, 'DB_File',$Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE )) ;
+
+my ($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size,$atime,$mtime,$ctime,
+ $blksize,$blocks) = stat($Dfile);
+ok(20, ($mode & 0777) == ($^O eq 'os2' ? 0666 : 0640) || $^O eq 'amigaos' || $^O eq 'MSWin32');
+
+my ($key, $value, $i);
+while (($key,$value) = each(%h)) {
+ $i++;
+}
+ok(21, !$i ) ;
+
+$h{'goner1'} = 'snork';
+
+$h{'abc'} = 'ABC';
+ok(22, $h{'abc'} eq 'ABC' );
+ok(23, ! defined $h{'jimmy'} ) ;
+ok(24, ! exists $h{'jimmy'} ) ;
+ok(25, defined $h{'abc'} ) ;
+
+$h{'def'} = 'DEF';
+$h{'jkl','mno'} = "JKL\034MNO";
+$h{'a',2,3,4,5} = join("\034",'A',2,3,4,5);
+$h{'a'} = 'A';
+
+#$h{'b'} = 'B';
+$X->STORE('b', 'B') ;
+
+$h{'c'} = 'C';
+
+#$h{'d'} = 'D';
+$X->put('d', 'D') ;
+
+$h{'e'} = 'E';
+$h{'f'} = 'F';
+$h{'g'} = 'X';
+$h{'h'} = 'H';
+$h{'i'} = 'I';
+
+$h{'goner2'} = 'snork';
+delete $h{'goner2'};
+
+
+# IMPORTANT - $X must be undefined before the untie otherwise the
+# underlying DB close routine will not get called.
+undef $X ;
+untie(%h);
+
+# tie to the same file again
+ok(26, $X = tie(%h,'DB_File',$Dfile, O_RDWR, 0640, $DB_BTREE)) ;
+
+# Modify an entry from the previous tie
+$h{'g'} = 'G';
+
+$h{'j'} = 'J';
+$h{'k'} = 'K';
+$h{'l'} = 'L';
+$h{'m'} = 'M';
+$h{'n'} = 'N';
+$h{'o'} = 'O';
+$h{'p'} = 'P';
+$h{'q'} = 'Q';
+$h{'r'} = 'R';
+$h{'s'} = 'S';
+$h{'t'} = 'T';
+$h{'u'} = 'U';
+$h{'v'} = 'V';
+$h{'w'} = 'W';
+$h{'x'} = 'X';
+$h{'y'} = 'Y';
+$h{'z'} = 'Z';
+
+$h{'goner3'} = 'snork';
+
+delete $h{'goner1'};
+$X->DELETE('goner3');
+
+my @keys = keys(%h);
+my @values = values(%h);
+
+ok(27, $#keys == 29 && $#values == 29) ;
+
+$i = 0 ;
+while (($key,$value) = each(%h)) {
+ if ($key eq $keys[$i] && $value eq $values[$i] && $key eq lc($value)) {
+ $key =~ y/a-z/A-Z/;
+ $i++ if $key eq $value;
+ }
+}
+
+ok(28, $i == 30) ;
+
+@keys = ('blurfl', keys(%h), 'dyick');
+ok(29, $#keys == 31) ;
+
+#Check that the keys can be retrieved in order
+my @b = keys %h ;
+my @c = sort lexical @b ;
+ok(30, ArrayCompare(\@b, \@c)) ;
+
+$h{'foo'} = '';
+ok(31, $h{'foo'} eq '' ) ;
+
+# Berkeley DB from version 2.4.10 to 3.0 does not allow null keys.
+# This feature was reenabled in version 3.1 of Berkeley DB.
+my $result = 0 ;
+if ($null_keys_allowed) {
+ $h{''} = 'bar';
+ $result = ( $h{''} eq 'bar' );
+}
+else
+ { $result = 1 }
+ok(32, $result) ;
+
+# check cache overflow and numeric keys and contents
+my $ok = 1;
+for ($i = 1; $i < 200; $i++) { $h{$i + 0} = $i + 0; }
+for ($i = 1; $i < 200; $i++) { $ok = 0 unless $h{$i} == $i; }
+ok(33, $ok);
+
+($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size,$atime,$mtime,$ctime,
+ $blksize,$blocks) = stat($Dfile);
+ok(34, $size > 0 );
+
+@h{0..200} = 200..400;
+my @foo = @h{0..200};
+ok(35, join(':',200..400) eq join(':',@foo) );
+
+# Now check all the non-tie specific stuff
+
+
+# Check R_NOOVERWRITE flag will make put fail when attempting to overwrite
+# an existing record.
+
+my $status = $X->put( 'x', 'newvalue', R_NOOVERWRITE) ;
+ok(36, $status == 1 );
+
+# check that the value of the key 'x' has not been changed by the
+# previous test
+ok(37, $h{'x'} eq 'X' );
+
+# standard put
+$status = $X->put('key', 'value') ;
+ok(38, $status == 0 );
+
+#check that previous put can be retrieved
+$value = 0 ;
+$status = $X->get('key', $value) ;
+ok(39, $status == 0 );
+ok(40, $value eq 'value' );
+
+# Attempting to delete an existing key should work
+
+$status = $X->del('q') ;
+ok(41, $status == 0 );
+if ($null_keys_allowed) {
+ $status = $X->del('') ;
+} else {
+ $status = 0 ;
+}
+ok(42, $status == 0 );
+
+# Make sure that the key deleted, cannot be retrieved
+ok(43, ! defined $h{'q'}) ;
+ok(44, ! defined $h{''}) ;
+
+undef $X ;
+untie %h ;
+
+ok(45, $X = tie(%h, 'DB_File',$Dfile, O_RDWR, 0640, $DB_BTREE ));
+
+# Attempting to delete a non-existant key should fail
+
+$status = $X->del('joe') ;
+ok(46, $status == 1 );
+
+# Check the get interface
+
+# First a non-existing key
+$status = $X->get('aaaa', $value) ;
+ok(47, $status == 1 );
+
+# Next an existing key
+$status = $X->get('a', $value) ;
+ok(48, $status == 0 );
+ok(49, $value eq 'A' );
+
+# seq
+# ###
+
+# use seq to find an approximate match
+$key = 'ke' ;
+$value = '' ;
+$status = $X->seq($key, $value, R_CURSOR) ;
+ok(50, $status == 0 );
+ok(51, $key eq 'key' );
+ok(52, $value eq 'value' );
+
+# seq when the key does not match
+$key = 'zzz' ;
+$value = '' ;
+$status = $X->seq($key, $value, R_CURSOR) ;
+ok(53, $status == 1 );
+
+
+# use seq to set the cursor, then delete the record @ the cursor.
+
+$key = 'x' ;
+$value = '' ;
+$status = $X->seq($key, $value, R_CURSOR) ;
+ok(54, $status == 0 );
+ok(55, $key eq 'x' );
+ok(56, $value eq 'X' );
+$status = $X->del(0, R_CURSOR) ;
+ok(57, $status == 0 );
+$status = $X->get('x', $value) ;
+ok(58, $status == 1 );
+
+# ditto, but use put to replace the key/value pair.
+$key = 'y' ;
+$value = '' ;
+$status = $X->seq($key, $value, R_CURSOR) ;
+ok(59, $status == 0 );
+ok(60, $key eq 'y' );
+ok(61, $value eq 'Y' );
+
+$key = "replace key" ;
+$value = "replace value" ;
+$status = $X->put($key, $value, R_CURSOR) ;
+ok(62, $status == 0 );
+ok(63, $key eq 'replace key' );
+ok(64, $value eq 'replace value' );
+$status = $X->get('y', $value) ;
+ok(65, 1) ; # hard-wire to always pass. the previous test ($status == 1)
+ # only worked because of a bug in 1.85/6
+
+# use seq to walk forwards through a file
+
+$status = $X->seq($key, $value, R_FIRST) ;
+ok(66, $status == 0 );
+my $previous = $key ;
+
+$ok = 1 ;
+while (($status = $X->seq($key, $value, R_NEXT)) == 0)
+{
+ ($ok = 0), last if ($previous cmp $key) == 1 ;
+}
+
+ok(67, $status == 1 );
+ok(68, $ok == 1 );
+
+# use seq to walk backwards through a file
+$status = $X->seq($key, $value, R_LAST) ;
+ok(69, $status == 0 );
+$previous = $key ;
+
+$ok = 1 ;
+while (($status = $X->seq($key, $value, R_PREV)) == 0)
+{
+ ($ok = 0), last if ($previous cmp $key) == -1 ;
+ #print "key = [$key] value = [$value]\n" ;
+}
+
+ok(70, $status == 1 );
+ok(71, $ok == 1 );
+
+
+# check seq FIRST/LAST
+
+# sync
+# ####
+
+$status = $X->sync ;
+ok(72, $status == 0 );
+
+
+# fd
+# ##
+
+$status = $X->fd ;
+ok(73, $status != 0 );
+
+
+undef $X ;
+untie %h ;
+
+unlink $Dfile;
+
+# Now try an in memory file
+my $Y;
+ok(74, $Y = tie(%h, 'DB_File',undef, O_RDWR|O_CREAT, 0640, $DB_BTREE ));
+
+# fd with an in memory file should return failure
+$status = $Y->fd ;
+ok(75, $status == -1 );
+
+
+undef $Y ;
+untie %h ;
+
+# Duplicate keys
+my $bt = new DB_File::BTREEINFO ;
+$bt->{flags} = R_DUP ;
+my ($YY, %hh);
+ok(76, $YY = tie(%hh, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $bt )) ;
+
+$hh{'Wall'} = 'Larry' ;
+$hh{'Wall'} = 'Stone' ; # Note the duplicate key
+$hh{'Wall'} = 'Brick' ; # Note the duplicate key
+$hh{'Wall'} = 'Brick' ; # Note the duplicate key and value
+$hh{'Smith'} = 'John' ;
+$hh{'mouse'} = 'mickey' ;
+
+# first work in scalar context
+ok(77, scalar $YY->get_dup('Unknown') == 0 );
+ok(78, scalar $YY->get_dup('Smith') == 1 );
+ok(79, scalar $YY->get_dup('Wall') == 4 );
+
+# now in list context
+my @unknown = $YY->get_dup('Unknown') ;
+ok(80, "@unknown" eq "" );
+
+my @smith = $YY->get_dup('Smith') ;
+ok(81, "@smith" eq "John" );
+
+{
+my @wall = $YY->get_dup('Wall') ;
+my %wall ;
+@wall{@wall} = @wall ;
+ok(82, (@wall == 4 && $wall{'Larry'} && $wall{'Stone'} && $wall{'Brick'}) );
+}
+
+# hash
+my %unknown = $YY->get_dup('Unknown', 1) ;
+ok(83, keys %unknown == 0 );
+
+my %smith = $YY->get_dup('Smith', 1) ;
+ok(84, keys %smith == 1 && $smith{'John'}) ;
+
+my %wall = $YY->get_dup('Wall', 1) ;
+ok(85, keys %wall == 3 && $wall{'Larry'} == 1 && $wall{'Stone'} == 1
+ && $wall{'Brick'} == 2);
+
+undef $YY ;
+untie %hh ;
+unlink $Dfile;
+
+
+# test multiple callbacks
+my $Dfile1 = "btree1" ;
+my $Dfile2 = "btree2" ;
+my $Dfile3 = "btree3" ;
+
+my $dbh1 = new DB_File::BTREEINFO ;
+$dbh1->{compare} = sub {
+ no warnings 'numeric' ;
+ $_[0] <=> $_[1] } ;
+
+my $dbh2 = new DB_File::BTREEINFO ;
+$dbh2->{compare} = sub { $_[0] cmp $_[1] } ;
+
+my $dbh3 = new DB_File::BTREEINFO ;
+$dbh3->{compare} = sub { length $_[0] <=> length $_[1] } ;
+
+
+my (%g, %k);
+tie(%h, 'DB_File',$Dfile1, O_RDWR|O_CREAT, 0640, $dbh1 ) ;
+tie(%g, 'DB_File',$Dfile2, O_RDWR|O_CREAT, 0640, $dbh2 ) ;
+tie(%k, 'DB_File',$Dfile3, O_RDWR|O_CREAT, 0640, $dbh3 ) ;
+
+my @Keys = qw( 0123 12 -1234 9 987654321 def ) ;
+my (@srt_1, @srt_2, @srt_3);
+{
+ no warnings 'numeric' ;
+ @srt_1 = sort { $a <=> $b } @Keys ;
+}
+@srt_2 = sort { $a cmp $b } @Keys ;
+@srt_3 = sort { length $a <=> length $b } @Keys ;
+
+foreach (@Keys) {
+ $h{$_} = 1 ;
+ $g{$_} = 1 ;
+ $k{$_} = 1 ;
+}
+
+sub ArrayCompare
+{
+ my($a, $b) = @_ ;
+
+ return 0 if @$a != @$b ;
+
+ foreach (1 .. length @$a)
+ {
+ return 0 unless $$a[$_] eq $$b[$_] ;
+ }
+
+ 1 ;
+}
+
+ok(86, ArrayCompare (\@srt_1, [keys %h]) );
+ok(87, ArrayCompare (\@srt_2, [keys %g]) );
+ok(88, ArrayCompare (\@srt_3, [keys %k]) );
+
+untie %h ;
+untie %g ;
+untie %k ;
+unlink $Dfile1, $Dfile2, $Dfile3 ;
+
+# clear
+# #####
+
+ok(89, tie(%h, 'DB_File', $Dfile1, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
+foreach (1 .. 10)
+ { $h{$_} = $_ * 100 }
+
+# check that there are 10 elements in the hash
+$i = 0 ;
+while (($key,$value) = each(%h)) {
+ $i++;
+}
+ok(90, $i == 10);
+
+# now clear the hash
+%h = () ;
+
+# check it is empty
+$i = 0 ;
+while (($key,$value) = each(%h)) {
+ $i++;
+}
+ok(91, $i == 0);
+
+untie %h ;
+unlink $Dfile1 ;
+
+{
+ # check that attempting to tie an array to a DB_BTREE will fail
+
+ my $filename = "xyz" ;
+ my @x ;
+ eval { tie @x, 'DB_File', $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE ; } ;
+ ok(92, $@ =~ /^DB_File can only tie an associative array to a DB_BTREE database/) ;
+ unlink $filename ;
+}
+
+{
+ # sub-class test
+
+ package Another ;
+
+ use warnings ;
+ use strict ;
+
+ open(FILE, ">SubDB.pm") or die "Cannot open SubDB.pm: $!\n" ;
+ print FILE <<'EOM' ;
+
+ package SubDB ;
+
+ use warnings ;
+ use strict ;
+ use vars qw( @ISA @EXPORT) ;
+
+ require Exporter ;
+ use DB_File;
+ @ISA=qw(DB_File);
+ @EXPORT = @DB_File::EXPORT ;
+
+ sub STORE {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::STORE($key, $value * 2) ;
+ }
+
+ sub FETCH {
+ my $self = shift ;
+ my $key = shift ;
+ $self->SUPER::FETCH($key) - 1 ;
+ }
+
+ sub put {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::put($key, $value * 3) ;
+ }
+
+ sub get {
+ my $self = shift ;
+ $self->SUPER::get($_[0], $_[1]) ;
+ $_[1] -= 2 ;
+ }
+
+ sub A_new_method
+ {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = $self->FETCH($key) ;
+ return "[[$value]]" ;
+ }
+
+ 1 ;
+EOM
+
+ close FILE ;
+
+ BEGIN { push @INC, '.'; }
+ eval 'use SubDB ; ';
+ main::ok(93, $@ eq "") ;
+ my %h ;
+ my $X ;
+ eval '
+ $X = tie(%h, "SubDB","dbbtree.tmp", O_RDWR|O_CREAT, 0640, $DB_BTREE );
+ ' ;
+
+ main::ok(94, $@ eq "") ;
+
+ my $ret = eval '$h{"fred"} = 3 ; return $h{"fred"} ' ;
+ main::ok(95, $@ eq "") ;
+ main::ok(96, $ret == 5) ;
+
+ my $value = 0;
+ $ret = eval '$X->put("joe", 4) ; $X->get("joe", $value) ; return $value' ;
+ main::ok(97, $@ eq "") ;
+ main::ok(98, $ret == 10) ;
+
+ $ret = eval ' R_NEXT eq main::R_NEXT ' ;
+ main::ok(99, $@ eq "" ) ;
+ main::ok(100, $ret == 1) ;
+
+ $ret = eval '$X->A_new_method("joe") ' ;
+ main::ok(101, $@ eq "") ;
+ main::ok(102, $ret eq "[[11]]") ;
+
+ undef $X;
+ untie(%h);
+ unlink "SubDB.pm", "dbbtree.tmp" ;
+
+}
+
+{
+ # DBM Filter tests
+ use warnings ;
+ use strict ;
+ my (%h, $db) ;
+ my ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ unlink $Dfile;
+
+ sub checkOutput
+ {
+ my($fk, $sk, $fv, $sv) = @_ ;
+ return
+ $fetch_key eq $fk && $store_key eq $sk &&
+ $fetch_value eq $fv && $store_value eq $sv &&
+ $_ eq 'original' ;
+ }
+
+ ok(103, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
+
+ $db->filter_fetch_key (sub { $fetch_key = $_ }) ;
+ $db->filter_store_key (sub { $store_key = $_ }) ;
+ $db->filter_fetch_value (sub { $fetch_value = $_}) ;
+ $db->filter_store_value (sub { $store_value = $_ }) ;
+
+ $_ = "original" ;
+
+ $h{"fred"} = "joe" ;
+ # fk sk fv sv
+ ok(104, checkOutput( "", "fred", "", "joe")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(105, $h{"fred"} eq "joe");
+ # fk sk fv sv
+ ok(106, checkOutput( "", "fred", "joe", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(107, $db->FIRSTKEY() eq "fred") ;
+ # fk sk fv sv
+ ok(108, checkOutput( "fred", "", "", "")) ;
+
+ # replace the filters, but remember the previous set
+ my ($old_fk) = $db->filter_fetch_key
+ (sub { $_ = uc $_ ; $fetch_key = $_ }) ;
+ my ($old_sk) = $db->filter_store_key
+ (sub { $_ = lc $_ ; $store_key = $_ }) ;
+ my ($old_fv) = $db->filter_fetch_value
+ (sub { $_ = "[$_]"; $fetch_value = $_ }) ;
+ my ($old_sv) = $db->filter_store_value
+ (sub { s/o/x/g; $store_value = $_ }) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h{"Fred"} = "Joe" ;
+ # fk sk fv sv
+ ok(109, checkOutput( "", "fred", "", "Jxe")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(110, $h{"Fred"} eq "[Jxe]");
+ # fk sk fv sv
+ ok(111, checkOutput( "", "fred", "[Jxe]", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(112, $db->FIRSTKEY() eq "FRED") ;
+ # fk sk fv sv
+ ok(113, checkOutput( "FRED", "", "", "")) ;
+
+ # put the original filters back
+ $db->filter_fetch_key ($old_fk);
+ $db->filter_store_key ($old_sk);
+ $db->filter_fetch_value ($old_fv);
+ $db->filter_store_value ($old_sv);
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h{"fred"} = "joe" ;
+ ok(114, checkOutput( "", "fred", "", "joe")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(115, $h{"fred"} eq "joe");
+ ok(116, checkOutput( "", "fred", "joe", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(117, $db->FIRSTKEY() eq "fred") ;
+ ok(118, checkOutput( "fred", "", "", "")) ;
+
+ # delete the filters
+ $db->filter_fetch_key (undef);
+ $db->filter_store_key (undef);
+ $db->filter_fetch_value (undef);
+ $db->filter_store_value (undef);
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h{"fred"} = "joe" ;
+ ok(119, checkOutput( "", "", "", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(120, $h{"fred"} eq "joe");
+ ok(121, checkOutput( "", "", "", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(122, $db->FIRSTKEY() eq "fred") ;
+ ok(123, checkOutput( "", "", "", "")) ;
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
+{
+ # DBM Filter with a closure
+
+ use warnings ;
+ use strict ;
+ my (%h, $db) ;
+
+ unlink $Dfile;
+ ok(124, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
+
+ my %result = () ;
+
+ sub Closure
+ {
+ my ($name) = @_ ;
+ my $count = 0 ;
+ my @kept = () ;
+
+ return sub { ++$count ;
+ push @kept, $_ ;
+ $result{$name} = "$name - $count: [@kept]" ;
+ }
+ }
+
+ $db->filter_store_key(Closure("store key")) ;
+ $db->filter_store_value(Closure("store value")) ;
+ $db->filter_fetch_key(Closure("fetch key")) ;
+ $db->filter_fetch_value(Closure("fetch value")) ;
+
+ $_ = "original" ;
+
+ $h{"fred"} = "joe" ;
+ ok(125, $result{"store key"} eq "store key - 1: [fred]");
+ ok(126, $result{"store value"} eq "store value - 1: [joe]");
+ ok(127, ! defined $result{"fetch key"} );
+ ok(128, ! defined $result{"fetch value"} );
+ ok(129, $_ eq "original") ;
+
+ ok(130, $db->FIRSTKEY() eq "fred") ;
+ ok(131, $result{"store key"} eq "store key - 1: [fred]");
+ ok(132, $result{"store value"} eq "store value - 1: [joe]");
+ ok(133, $result{"fetch key"} eq "fetch key - 1: [fred]");
+ ok(134, ! defined $result{"fetch value"} );
+ ok(135, $_ eq "original") ;
+
+ $h{"jim"} = "john" ;
+ ok(136, $result{"store key"} eq "store key - 2: [fred jim]");
+ ok(137, $result{"store value"} eq "store value - 2: [joe john]");
+ ok(138, $result{"fetch key"} eq "fetch key - 1: [fred]");
+ ok(139, ! defined $result{"fetch value"} );
+ ok(140, $_ eq "original") ;
+
+ ok(141, $h{"fred"} eq "joe");
+ ok(142, $result{"store key"} eq "store key - 3: [fred jim fred]");
+ ok(143, $result{"store value"} eq "store value - 2: [joe john]");
+ ok(144, $result{"fetch key"} eq "fetch key - 1: [fred]");
+ ok(145, $result{"fetch value"} eq "fetch value - 1: [joe]");
+ ok(146, $_ eq "original") ;
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
+{
+ # DBM Filter recursion detection
+ use warnings ;
+ use strict ;
+ my (%h, $db) ;
+ unlink $Dfile;
+
+ ok(147, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
+
+ $db->filter_store_key (sub { $_ = $h{$_} }) ;
+
+ eval '$h{1} = 1234' ;
+ ok(148, $@ =~ /^recursion detected in filter_store_key at/ );
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
+
+{
+ # Examples from the POD
+
+
+ my $file = "xyzt" ;
+ {
+ my $redirect = new Redirect $file ;
+
+ # BTREE example 1
+ ###
+
+ use warnings FATAL => qw(all) ;
+ use strict ;
+ use DB_File ;
+
+ my %h ;
+
+ sub Compare
+ {
+ my ($key1, $key2) = @_ ;
+ "\L$key1" cmp "\L$key2" ;
+ }
+
+ # specify the Perl sub that will do the comparison
+ $DB_BTREE->{'compare'} = \&Compare ;
+
+ unlink "tree" ;
+ tie %h, "DB_File", "tree", O_RDWR|O_CREAT, 0640, $DB_BTREE
+ or die "Cannot open file 'tree': $!\n" ;
+
+ # Add a key/value pair to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+ $h{'duck'} = 'donald' ;
+
+ # Delete
+ delete $h{"duck"} ;
+
+ # Cycle through the keys printing them in order.
+ # Note it is not necessary to sort the keys as
+ # the btree will have kept them in order automatically.
+ foreach (keys %h)
+ { print "$_\n" }
+
+ untie %h ;
+
+ unlink "tree" ;
+ }
+
+ delete $DB_BTREE->{'compare'} ;
+
+ ok(149, docat_del($file) eq <<'EOM') ;
+mouse
+Smith
+Wall
+EOM
+
+ {
+ my $redirect = new Redirect $file ;
+
+ # BTREE example 2
+ ###
+
+ use warnings FATAL => qw(all) ;
+ use strict ;
+ use DB_File ;
+
+ use vars qw($filename %h ) ;
+
+ $filename = "tree" ;
+ unlink $filename ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ # Add some key/value pairs to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Wall'} = 'Brick' ; # Note the duplicate key
+ $h{'Wall'} = 'Brick' ; # Note the duplicate key and value
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+
+ # iterate through the associative array
+ # and print each key/value pair.
+ foreach (keys %h)
+ { print "$_ -> $h{$_}\n" }
+
+ untie %h ;
+
+ unlink $filename ;
+ }
+
+ ok(150, docat_del($file) eq ($db185mode ? <<'EOM' : <<'EOM') ) ;
+Smith -> John
+Wall -> Brick
+Wall -> Brick
+Wall -> Brick
+mouse -> mickey
+EOM
+Smith -> John
+Wall -> Larry
+Wall -> Larry
+Wall -> Larry
+mouse -> mickey
+EOM
+
+ {
+ my $redirect = new Redirect $file ;
+
+ # BTREE example 3
+ ###
+
+ use warnings FATAL => qw(all) ;
+ use strict ;
+ use DB_File ;
+
+ use vars qw($filename $x %h $status $key $value) ;
+
+ $filename = "tree" ;
+ unlink $filename ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ # Add some key/value pairs to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Wall'} = 'Brick' ; # Note the duplicate key
+ $h{'Wall'} = 'Brick' ; # Note the duplicate key and value
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+
+ # iterate through the btree using seq
+ # and print each key/value pair.
+ $key = $value = 0 ;
+ for ($status = $x->seq($key, $value, R_FIRST) ;
+ $status == 0 ;
+ $status = $x->seq($key, $value, R_NEXT) )
+ { print "$key -> $value\n" }
+
+
+ undef $x ;
+ untie %h ;
+ }
+
+ ok(151, docat_del($file) eq ($db185mode == 1 ? <<'EOM' : <<'EOM') ) ;
+Smith -> John
+Wall -> Brick
+Wall -> Brick
+Wall -> Larry
+mouse -> mickey
+EOM
+Smith -> John
+Wall -> Larry
+Wall -> Brick
+Wall -> Brick
+mouse -> mickey
+EOM
+
+
+ {
+ my $redirect = new Redirect $file ;
+
+ # BTREE example 4
+ ###
+
+ use warnings FATAL => qw(all) ;
+ use strict ;
+ use DB_File ;
+
+ use vars qw($filename $x %h ) ;
+
+ $filename = "tree" ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ my $cnt = $x->get_dup("Wall") ;
+ print "Wall occurred $cnt times\n" ;
+
+ my %hash = $x->get_dup("Wall", 1) ;
+ print "Larry is there\n" if $hash{'Larry'} ;
+ print "There are $hash{'Brick'} Brick Walls\n" ;
+
+ my @list = sort $x->get_dup("Wall") ;
+ print "Wall => [@list]\n" ;
+
+ @list = $x->get_dup("Smith") ;
+ print "Smith => [@list]\n" ;
+
+ @list = $x->get_dup("Dog") ;
+ print "Dog => [@list]\n" ;
+
+ undef $x ;
+ untie %h ;
+ }
+
+ ok(152, docat_del($file) eq <<'EOM') ;
+Wall occurred 3 times
+Larry is there
+There are 2 Brick Walls
+Wall => [Brick Brick Larry]
+Smith => [John]
+Dog => []
+EOM
+
+ {
+ my $redirect = new Redirect $file ;
+
+ # BTREE example 5
+ ###
+
+ use warnings FATAL => qw(all) ;
+ use strict ;
+ use DB_File ;
+
+ use vars qw($filename $x %h $found) ;
+
+ my $filename = "tree" ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ $found = ( $x->find_dup("Wall", "Larry") == 0 ? "" : "not") ;
+ print "Larry Wall is $found there\n" ;
+
+ $found = ( $x->find_dup("Wall", "Harry") == 0 ? "" : "not") ;
+ print "Harry Wall is $found there\n" ;
+
+ undef $x ;
+ untie %h ;
+ }
+
+ ok(153, docat_del($file) eq <<'EOM') ;
+Larry Wall is there
+Harry Wall is not there
+EOM
+
+ {
+ my $redirect = new Redirect $file ;
+
+ # BTREE example 6
+ ###
+
+ use warnings FATAL => qw(all) ;
+ use strict ;
+ use DB_File ;
+
+ use vars qw($filename $x %h $found) ;
+
+ my $filename = "tree" ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ $x->del_dup("Wall", "Larry") ;
+
+ $found = ( $x->find_dup("Wall", "Larry") == 0 ? "" : "not") ;
+ print "Larry Wall is $found there\n" ;
+
+ undef $x ;
+ untie %h ;
+
+ unlink $filename ;
+ }
+
+ ok(154, docat_del($file) eq <<'EOM') ;
+Larry Wall is not there
+EOM
+
+ {
+ my $redirect = new Redirect $file ;
+
+ # BTREE example 7
+ ###
+
+ use warnings FATAL => qw(all) ;
+ use strict ;
+ use DB_File ;
+ use Fcntl ;
+
+ use vars qw($filename $x %h $st $key $value) ;
+
+ sub match
+ {
+ my $key = shift ;
+ my $value = 0;
+ my $orig_key = $key ;
+ $x->seq($key, $value, R_CURSOR) ;
+ print "$orig_key\t-> $key\t-> $value\n" ;
+ }
+
+ $filename = "tree" ;
+ unlink $filename ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ # Add some key/value pairs to the file
+ $h{'mouse'} = 'mickey' ;
+ $h{'Wall'} = 'Larry' ;
+ $h{'Walls'} = 'Brick' ;
+ $h{'Smith'} = 'John' ;
+
+
+ $key = $value = 0 ;
+ print "IN ORDER\n" ;
+ for ($st = $x->seq($key, $value, R_FIRST) ;
+ $st == 0 ;
+ $st = $x->seq($key, $value, R_NEXT) )
+
+ { print "$key -> $value\n" }
+
+ print "\nPARTIAL MATCH\n" ;
+
+ match "Wa" ;
+ match "A" ;
+ match "a" ;
+
+ undef $x ;
+ untie %h ;
+
+ unlink $filename ;
+
+ }
+
+ ok(155, docat_del($file) eq <<'EOM') ;
+IN ORDER
+Smith -> John
+Wall -> Larry
+Walls -> Brick
+mouse -> mickey
+
+PARTIAL MATCH
+Wa -> Wall -> Larry
+A -> Smith -> John
+a -> mouse -> mickey
+EOM
+
+}
+
+#{
+# # R_SETCURSOR
+# use strict ;
+# my (%h, $db) ;
+# unlink $Dfile;
+#
+# ok(156, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
+#
+# $h{abc} = 33 ;
+# my $k = "newest" ;
+# my $v = 44 ;
+# my $status = $db->put($k, $v, R_SETCURSOR) ;
+# print "status = [$status]\n" ;
+# ok(157, $status == 0) ;
+# $status = $db->del($k, R_CURSOR) ;
+# print "status = [$status]\n" ;
+# ok(158, $status == 0) ;
+# $k = "newest" ;
+# ok(159, $db->get($k, $v, R_CURSOR)) ;
+#
+# ok(160, keys %h == 1) ;
+#
+# undef $db ;
+# untie %h;
+# unlink $Dfile;
+#}
+
+{
+ # Bug ID 20001013.009
+ #
+ # test that $hash{KEY} = undef doesn't produce the warning
+ # Use of uninitialized value in null operation
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ unlink $Dfile;
+ my %h ;
+ my $a = "";
+ local $SIG{__WARN__} = sub {$a = $_[0]} ;
+
+ tie %h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0664, $DB_BTREE
+ or die "Can't open file: $!\n" ;
+ $h{ABC} = undef;
+ ok(156, $a eq "") ;
+ untie %h ;
+ unlink $Dfile;
+}
+
+{
+ # test that %hash = () doesn't produce the warning
+ # Argument "" isn't numeric in entersub
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ unlink $Dfile;
+ my %h ;
+ my $a = "";
+ local $SIG{__WARN__} = sub {$a = $_[0]} ;
+
+ tie %h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0664, $DB_BTREE
+ or die "Can't open file: $!\n" ;
+ %h = (); ;
+ ok(157, $a eq "") ;
+ untie %h ;
+ unlink $Dfile;
+}
+
+exit ;
diff --git a/bdb/perl.DB_File/t/db-hash.t b/bdb/perl.DB_File/t/db-hash.t
new file mode 100644
index 00000000000..a6efd981004
--- /dev/null
+++ b/bdb/perl.DB_File/t/db-hash.t
@@ -0,0 +1,753 @@
+#!./perl
+
+use warnings ;
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use Config;
+
+BEGIN {
+ if(-d "lib" && -f "TEST") {
+ if ($Config{'extensions'} !~ /\bDB_File\b/ ) {
+ print "1..111\n";
+ exit 0;
+ }
+ }
+}
+
+use DB_File;
+use Fcntl;
+
+print "1..111\n";
+
+sub ok
+{
+ my $no = shift ;
+ my $result = shift ;
+
+ print "not " unless $result ;
+ print "ok $no\n" ;
+}
+
+{
+ package Redirect ;
+ use Symbol ;
+
+ sub new
+ {
+ my $class = shift ;
+ my $filename = shift ;
+ my $fh = gensym ;
+ open ($fh, ">$filename") || die "Cannot open $filename: $!" ;
+ my $real_stdout = select($fh) ;
+ return bless [$fh, $real_stdout ] ;
+
+ }
+ sub DESTROY
+ {
+ my $self = shift ;
+ close $self->[0] ;
+ select($self->[1]) ;
+ }
+}
+
+sub docat_del
+{
+ my $file = shift;
+ local $/ = undef;
+ open(CAT,$file) || die "Cannot open $file: $!";
+ my $result = <CAT>;
+ close(CAT);
+ unlink $file ;
+ return $result;
+}
+
+my $Dfile = "dbhash.tmp";
+my $null_keys_allowed = ($DB_File::db_ver < 2.004010
+ || $DB_File::db_ver >= 3.1 );
+
+unlink $Dfile;
+
+umask(0);
+
+# Check the interface to HASHINFO
+
+my $dbh = new DB_File::HASHINFO ;
+
+ok(1, ! defined $dbh->{bsize}) ;
+ok(2, ! defined $dbh->{ffactor}) ;
+ok(3, ! defined $dbh->{nelem}) ;
+ok(4, ! defined $dbh->{cachesize}) ;
+ok(5, ! defined $dbh->{hash}) ;
+ok(6, ! defined $dbh->{lorder}) ;
+
+$dbh->{bsize} = 3000 ;
+ok(7, $dbh->{bsize} == 3000 );
+
+$dbh->{ffactor} = 9000 ;
+ok(8, $dbh->{ffactor} == 9000 );
+
+$dbh->{nelem} = 400 ;
+ok(9, $dbh->{nelem} == 400 );
+
+$dbh->{cachesize} = 65 ;
+ok(10, $dbh->{cachesize} == 65 );
+
+$dbh->{hash} = "abc" ;
+ok(11, $dbh->{hash} eq "abc" );
+
+$dbh->{lorder} = 1234 ;
+ok(12, $dbh->{lorder} == 1234 );
+
+# Check that an invalid entry is caught both for store & fetch
+eval '$dbh->{fred} = 1234' ;
+ok(13, $@ =~ /^DB_File::HASHINFO::STORE - Unknown element 'fred' at/ );
+eval 'my $q = $dbh->{fred}' ;
+ok(14, $@ =~ /^DB_File::HASHINFO::FETCH - Unknown element 'fred' at/ );
+
+
+# Now check the interface to HASH
+my ($X, %h);
+ok(15, $X = tie(%h, 'DB_File',$Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
+
+my ($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size,$atime,$mtime,$ctime,
+ $blksize,$blocks) = stat($Dfile);
+ok(16, ($mode & 0777) == ($^O eq 'os2' ? 0666 : 0640) || $^O eq 'amigaos' || $^O eq 'MSWin32');
+
+my ($key, $value, $i);
+while (($key,$value) = each(%h)) {
+ $i++;
+}
+ok(17, !$i );
+
+$h{'goner1'} = 'snork';
+
+$h{'abc'} = 'ABC';
+ok(18, $h{'abc'} eq 'ABC' );
+ok(19, !defined $h{'jimmy'} );
+ok(20, !exists $h{'jimmy'} );
+ok(21, exists $h{'abc'} );
+
+$h{'def'} = 'DEF';
+$h{'jkl','mno'} = "JKL\034MNO";
+$h{'a',2,3,4,5} = join("\034",'A',2,3,4,5);
+$h{'a'} = 'A';
+
+#$h{'b'} = 'B';
+$X->STORE('b', 'B') ;
+
+$h{'c'} = 'C';
+
+#$h{'d'} = 'D';
+$X->put('d', 'D') ;
+
+$h{'e'} = 'E';
+$h{'f'} = 'F';
+$h{'g'} = 'X';
+$h{'h'} = 'H';
+$h{'i'} = 'I';
+
+$h{'goner2'} = 'snork';
+delete $h{'goner2'};
+
+
+# IMPORTANT - $X must be undefined before the untie otherwise the
+# underlying DB close routine will not get called.
+undef $X ;
+untie(%h);
+
+
+# tie to the same file again, do not supply a type - should default to HASH
+ok(22, $X = tie(%h,'DB_File',$Dfile, O_RDWR, 0640) );
+
+# Modify an entry from the previous tie
+$h{'g'} = 'G';
+
+$h{'j'} = 'J';
+$h{'k'} = 'K';
+$h{'l'} = 'L';
+$h{'m'} = 'M';
+$h{'n'} = 'N';
+$h{'o'} = 'O';
+$h{'p'} = 'P';
+$h{'q'} = 'Q';
+$h{'r'} = 'R';
+$h{'s'} = 'S';
+$h{'t'} = 'T';
+$h{'u'} = 'U';
+$h{'v'} = 'V';
+$h{'w'} = 'W';
+$h{'x'} = 'X';
+$h{'y'} = 'Y';
+$h{'z'} = 'Z';
+
+$h{'goner3'} = 'snork';
+
+delete $h{'goner1'};
+$X->DELETE('goner3');
+
+my @keys = keys(%h);
+my @values = values(%h);
+
+ok(23, $#keys == 29 && $#values == 29) ;
+
+$i = 0 ;
+while (($key,$value) = each(%h)) {
+ if ($key eq $keys[$i] && $value eq $values[$i] && $key eq lc($value)) {
+ $key =~ y/a-z/A-Z/;
+ $i++ if $key eq $value;
+ }
+}
+
+ok(24, $i == 30) ;
+
+@keys = ('blurfl', keys(%h), 'dyick');
+ok(25, $#keys == 31) ;
+
+$h{'foo'} = '';
+ok(26, $h{'foo'} eq '' );
+
+# Berkeley DB from version 2.4.10 to 3.0 does not allow null keys.
+# This feature was reenabled in version 3.1 of Berkeley DB.
+my $result = 0 ;
+if ($null_keys_allowed) {
+ $h{''} = 'bar';
+ $result = ( $h{''} eq 'bar' );
+}
+else
+ { $result = 1 }
+ok(27, $result) ;
+
+# check cache overflow and numeric keys and contents
+my $ok = 1;
+for ($i = 1; $i < 200; $i++) { $h{$i + 0} = $i + 0; }
+for ($i = 1; $i < 200; $i++) { $ok = 0 unless $h{$i} == $i; }
+ok(28, $ok );
+
+($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size,$atime,$mtime,$ctime,
+ $blksize,$blocks) = stat($Dfile);
+ok(29, $size > 0 );
+
+@h{0..200} = 200..400;
+my @foo = @h{0..200};
+ok(30, join(':',200..400) eq join(':',@foo) );
+
+
+# Now check all the non-tie specific stuff
+
+# Check NOOVERWRITE will make put fail when attempting to overwrite
+# an existing record.
+
+my $status = $X->put( 'x', 'newvalue', R_NOOVERWRITE) ;
+ok(31, $status == 1 );
+
+# check that the value of the key 'x' has not been changed by the
+# previous test
+ok(32, $h{'x'} eq 'X' );
+
+# standard put
+$status = $X->put('key', 'value') ;
+ok(33, $status == 0 );
+
+#check that previous put can be retrieved
+$value = 0 ;
+$status = $X->get('key', $value) ;
+ok(34, $status == 0 );
+ok(35, $value eq 'value' );
+
+# Attempting to delete an existing key should work
+
+$status = $X->del('q') ;
+ok(36, $status == 0 );
+
+# Make sure that the key deleted, cannot be retrieved
+{
+ no warnings 'uninitialized' ;
+ ok(37, $h{'q'} eq undef );
+}
+
+# Attempting to delete a non-existant key should fail
+
+$status = $X->del('joe') ;
+ok(38, $status == 1 );
+
+# Check the get interface
+
+# First a non-existing key
+$status = $X->get('aaaa', $value) ;
+ok(39, $status == 1 );
+
+# Next an existing key
+$status = $X->get('a', $value) ;
+ok(40, $status == 0 );
+ok(41, $value eq 'A' );
+
+# seq
+# ###
+
+# ditto, but use put to replace the key/value pair.
+
+# use seq to walk backwards through a file - check that this reversed is
+
+# check seq FIRST/LAST
+
+# sync
+# ####
+
+$status = $X->sync ;
+ok(42, $status == 0 );
+
+
+# fd
+# ##
+
+$status = $X->fd ;
+ok(43, $status != 0 );
+
+undef $X ;
+untie %h ;
+
+unlink $Dfile;
+
+# clear
+# #####
+
+ok(44, tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
+foreach (1 .. 10)
+ { $h{$_} = $_ * 100 }
+
+# check that there are 10 elements in the hash
+$i = 0 ;
+while (($key,$value) = each(%h)) {
+ $i++;
+}
+ok(45, $i == 10);
+
+# now clear the hash
+%h = () ;
+
+# check it is empty
+$i = 0 ;
+while (($key,$value) = each(%h)) {
+ $i++;
+}
+ok(46, $i == 0);
+
+untie %h ;
+unlink $Dfile ;
+
+
+# Now try an in memory file
+ok(47, $X = tie(%h, 'DB_File',undef, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
+
+# fd with an in memory file should return fail
+$status = $X->fd ;
+ok(48, $status == -1 );
+
+undef $X ;
+untie %h ;
+
+{
+ # check ability to override the default hashing
+ my %x ;
+ my $filename = "xyz" ;
+ my $hi = new DB_File::HASHINFO ;
+ $::count = 0 ;
+ $hi->{hash} = sub { ++$::count ; length $_[0] } ;
+ ok(49, tie %x, 'DB_File', $filename, O_RDWR|O_CREAT, 0640, $hi ) ;
+ $h{"abc"} = 123 ;
+ ok(50, $h{"abc"} == 123) ;
+ untie %x ;
+ unlink $filename ;
+ ok(51, $::count >0) ;
+}
+
+{
+ # check that attempting to tie an array to a DB_HASH will fail
+
+ my $filename = "xyz" ;
+ my @x ;
+ eval { tie @x, 'DB_File', $filename, O_RDWR|O_CREAT, 0640, $DB_HASH ; } ;
+ ok(52, $@ =~ /^DB_File can only tie an associative array to a DB_HASH database/) ;
+ unlink $filename ;
+}
+
+{
+ # sub-class test
+
+ package Another ;
+
+ use warnings ;
+ use strict ;
+
+ open(FILE, ">SubDB.pm") or die "Cannot open SubDB.pm: $!\n" ;
+ print FILE <<'EOM' ;
+
+ package SubDB ;
+
+ use warnings ;
+ use strict ;
+ use vars qw( @ISA @EXPORT) ;
+
+ require Exporter ;
+ use DB_File;
+ @ISA=qw(DB_File);
+ @EXPORT = @DB_File::EXPORT ;
+
+ sub STORE {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::STORE($key, $value * 2) ;
+ }
+
+ sub FETCH {
+ my $self = shift ;
+ my $key = shift ;
+ $self->SUPER::FETCH($key) - 1 ;
+ }
+
+ sub put {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::put($key, $value * 3) ;
+ }
+
+ sub get {
+ my $self = shift ;
+ $self->SUPER::get($_[0], $_[1]) ;
+ $_[1] -= 2 ;
+ }
+
+ sub A_new_method
+ {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = $self->FETCH($key) ;
+ return "[[$value]]" ;
+ }
+
+ 1 ;
+EOM
+
+ close FILE ;
+
+ BEGIN { push @INC, '.'; }
+ eval 'use SubDB ; ';
+ main::ok(53, $@ eq "") ;
+ my %h ;
+ my $X ;
+ eval '
+ $X = tie(%h, "SubDB","dbhash.tmp", O_RDWR|O_CREAT, 0640, $DB_HASH );
+ ' ;
+
+ main::ok(54, $@ eq "") ;
+
+ my $ret = eval '$h{"fred"} = 3 ; return $h{"fred"} ' ;
+ main::ok(55, $@ eq "") ;
+ main::ok(56, $ret == 5) ;
+
+ my $value = 0;
+ $ret = eval '$X->put("joe", 4) ; $X->get("joe", $value) ; return $value' ;
+ main::ok(57, $@ eq "") ;
+ main::ok(58, $ret == 10) ;
+
+ $ret = eval ' R_NEXT eq main::R_NEXT ' ;
+ main::ok(59, $@ eq "" ) ;
+ main::ok(60, $ret == 1) ;
+
+ $ret = eval '$X->A_new_method("joe") ' ;
+ main::ok(61, $@ eq "") ;
+ main::ok(62, $ret eq "[[11]]") ;
+
+ undef $X;
+ untie(%h);
+ unlink "SubDB.pm", "dbhash.tmp" ;
+
+}
+
+{
+ # DBM Filter tests
+ use warnings ;
+ use strict ;
+ my (%h, $db) ;
+ my ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ unlink $Dfile;
+
+ sub checkOutput
+ {
+ my($fk, $sk, $fv, $sv) = @_ ;
+ return
+ $fetch_key eq $fk && $store_key eq $sk &&
+ $fetch_value eq $fv && $store_value eq $sv &&
+ $_ eq 'original' ;
+ }
+
+ ok(63, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
+
+ $db->filter_fetch_key (sub { $fetch_key = $_ }) ;
+ $db->filter_store_key (sub { $store_key = $_ }) ;
+ $db->filter_fetch_value (sub { $fetch_value = $_}) ;
+ $db->filter_store_value (sub { $store_value = $_ }) ;
+
+ $_ = "original" ;
+
+ $h{"fred"} = "joe" ;
+ # fk sk fv sv
+ ok(64, checkOutput( "", "fred", "", "joe")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(65, $h{"fred"} eq "joe");
+ # fk sk fv sv
+ ok(66, checkOutput( "", "fred", "joe", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(67, $db->FIRSTKEY() eq "fred") ;
+ # fk sk fv sv
+ ok(68, checkOutput( "fred", "", "", "")) ;
+
+ # replace the filters, but remember the previous set
+ my ($old_fk) = $db->filter_fetch_key
+ (sub { $_ = uc $_ ; $fetch_key = $_ }) ;
+ my ($old_sk) = $db->filter_store_key
+ (sub { $_ = lc $_ ; $store_key = $_ }) ;
+ my ($old_fv) = $db->filter_fetch_value
+ (sub { $_ = "[$_]"; $fetch_value = $_ }) ;
+ my ($old_sv) = $db->filter_store_value
+ (sub { s/o/x/g; $store_value = $_ }) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h{"Fred"} = "Joe" ;
+ # fk sk fv sv
+ ok(69, checkOutput( "", "fred", "", "Jxe")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(70, $h{"Fred"} eq "[Jxe]");
+ # fk sk fv sv
+ ok(71, checkOutput( "", "fred", "[Jxe]", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(72, $db->FIRSTKEY() eq "FRED") ;
+ # fk sk fv sv
+ ok(73, checkOutput( "FRED", "", "", "")) ;
+
+ # put the original filters back
+ $db->filter_fetch_key ($old_fk);
+ $db->filter_store_key ($old_sk);
+ $db->filter_fetch_value ($old_fv);
+ $db->filter_store_value ($old_sv);
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h{"fred"} = "joe" ;
+ ok(74, checkOutput( "", "fred", "", "joe")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(75, $h{"fred"} eq "joe");
+ ok(76, checkOutput( "", "fred", "joe", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(77, $db->FIRSTKEY() eq "fred") ;
+ ok(78, checkOutput( "fred", "", "", "")) ;
+
+ # delete the filters
+ $db->filter_fetch_key (undef);
+ $db->filter_store_key (undef);
+ $db->filter_fetch_value (undef);
+ $db->filter_store_value (undef);
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h{"fred"} = "joe" ;
+ ok(79, checkOutput( "", "", "", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(80, $h{"fred"} eq "joe");
+ ok(81, checkOutput( "", "", "", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(82, $db->FIRSTKEY() eq "fred") ;
+ ok(83, checkOutput( "", "", "", "")) ;
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
+{
+ # DBM Filter with a closure
+
+ use warnings ;
+ use strict ;
+ my (%h, $db) ;
+
+ unlink $Dfile;
+ ok(84, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
+
+ my %result = () ;
+
+ sub Closure
+ {
+ my ($name) = @_ ;
+ my $count = 0 ;
+ my @kept = () ;
+
+ return sub { ++$count ;
+ push @kept, $_ ;
+ $result{$name} = "$name - $count: [@kept]" ;
+ }
+ }
+
+ $db->filter_store_key(Closure("store key")) ;
+ $db->filter_store_value(Closure("store value")) ;
+ $db->filter_fetch_key(Closure("fetch key")) ;
+ $db->filter_fetch_value(Closure("fetch value")) ;
+
+ $_ = "original" ;
+
+ $h{"fred"} = "joe" ;
+ ok(85, $result{"store key"} eq "store key - 1: [fred]");
+ ok(86, $result{"store value"} eq "store value - 1: [joe]");
+ ok(87, ! defined $result{"fetch key"} );
+ ok(88, ! defined $result{"fetch value"} );
+ ok(89, $_ eq "original") ;
+
+ ok(90, $db->FIRSTKEY() eq "fred") ;
+ ok(91, $result{"store key"} eq "store key - 1: [fred]");
+ ok(92, $result{"store value"} eq "store value - 1: [joe]");
+ ok(93, $result{"fetch key"} eq "fetch key - 1: [fred]");
+ ok(94, ! defined $result{"fetch value"} );
+ ok(95, $_ eq "original") ;
+
+ $h{"jim"} = "john" ;
+ ok(96, $result{"store key"} eq "store key - 2: [fred jim]");
+ ok(97, $result{"store value"} eq "store value - 2: [joe john]");
+ ok(98, $result{"fetch key"} eq "fetch key - 1: [fred]");
+ ok(99, ! defined $result{"fetch value"} );
+ ok(100, $_ eq "original") ;
+
+ ok(101, $h{"fred"} eq "joe");
+ ok(102, $result{"store key"} eq "store key - 3: [fred jim fred]");
+ ok(103, $result{"store value"} eq "store value - 2: [joe john]");
+ ok(104, $result{"fetch key"} eq "fetch key - 1: [fred]");
+ ok(105, $result{"fetch value"} eq "fetch value - 1: [joe]");
+ ok(106, $_ eq "original") ;
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
+{
+ # DBM Filter recursion detection
+ use warnings ;
+ use strict ;
+ my (%h, $db) ;
+ unlink $Dfile;
+
+ ok(107, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
+
+ $db->filter_store_key (sub { $_ = $h{$_} }) ;
+
+ eval '$h{1} = 1234' ;
+ ok(108, $@ =~ /^recursion detected in filter_store_key at/ );
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
+
+{
+ # Examples from the POD
+
+ my $file = "xyzt" ;
+ {
+ my $redirect = new Redirect $file ;
+
+ use warnings FATAL => qw(all);
+ use strict ;
+ use DB_File ;
+ use vars qw( %h $k $v ) ;
+
+ unlink "fruit" ;
+ tie %h, "DB_File", "fruit", O_RDWR|O_CREAT, 0640, $DB_HASH
+ or die "Cannot open file 'fruit': $!\n";
+
+ # Add a few key/value pairs to the file
+ $h{"apple"} = "red" ;
+ $h{"orange"} = "orange" ;
+ $h{"banana"} = "yellow" ;
+ $h{"tomato"} = "red" ;
+
+ # Check for existence of a key
+ print "Banana Exists\n\n" if $h{"banana"} ;
+
+ # Delete a key/value pair.
+ delete $h{"apple"} ;
+
+ # print the contents of the file
+ while (($k, $v) = each %h)
+ { print "$k -> $v\n" }
+
+ untie %h ;
+
+ unlink "fruit" ;
+ }
+
+ ok(109, docat_del($file) eq <<'EOM') ;
+Banana Exists
+
+orange -> orange
+tomato -> red
+banana -> yellow
+EOM
+
+}
+
+{
+ # Bug ID 20001013.009
+ #
+ # test that $hash{KEY} = undef doesn't produce the warning
+ # Use of uninitialized value in null operation
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ unlink $Dfile;
+ my %h ;
+ my $a = "";
+ local $SIG{__WARN__} = sub {$a = $_[0]} ;
+
+ tie %h, 'DB_File', $Dfile or die "Can't open file: $!\n" ;
+ $h{ABC} = undef;
+ ok(110, $a eq "") ;
+ untie %h ;
+ unlink $Dfile;
+}
+
+{
+ # test that %hash = () doesn't produce the warning
+ # Argument "" isn't numeric in entersub
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ unlink $Dfile;
+ my %h ;
+ my $a = "";
+ local $SIG{__WARN__} = sub {$a = $_[0]} ;
+
+ tie %h, 'DB_File', $Dfile or die "Can't open file: $!\n" ;
+ %h = (); ;
+ ok(111, $a eq "") ;
+ untie %h ;
+ unlink $Dfile;
+}
+
+exit ;
diff --git a/bdb/perl.DB_File/t/db-recno.t b/bdb/perl.DB_File/t/db-recno.t
new file mode 100644
index 00000000000..c64d83b5916
--- /dev/null
+++ b/bdb/perl.DB_File/t/db-recno.t
@@ -0,0 +1,899 @@
+#!./perl -w
+
+use warnings;
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use Config;
+
+BEGIN {
+ if(-d "lib" && -f "TEST") {
+ if ($Config{'extensions'} !~ /\bDB_File\b/ ) {
+ print "1..128\n";
+ exit 0;
+ }
+ }
+}
+
+use DB_File;
+use Fcntl;
+use vars qw($dbh $Dfile $bad_ones $FA) ;
+
+# full tied array support started in Perl 5.004_57
+# Double check to see if it is available.
+
+{
+ sub try::TIEARRAY { bless [], "try" }
+ sub try::FETCHSIZE { $FA = 1 }
+ $FA = 0 ;
+ my @a ;
+ tie @a, 'try' ;
+ my $a = @a ;
+}
+
+
+sub ok
+{
+ my $no = shift ;
+ my $result = shift ;
+
+ print "not " unless $result ;
+ print "ok $no\n" ;
+
+ return $result ;
+}
+
+{
+ package Redirect ;
+ use Symbol ;
+
+ sub new
+ {
+ my $class = shift ;
+ my $filename = shift ;
+ my $fh = gensym ;
+ open ($fh, ">$filename") || die "Cannot open $filename: $!" ;
+ my $real_stdout = select($fh) ;
+ return bless [$fh, $real_stdout ] ;
+
+ }
+ sub DESTROY
+ {
+ my $self = shift ;
+ close $self->[0] ;
+ select($self->[1]) ;
+ }
+}
+
+sub docat
+{
+ my $file = shift;
+ local $/ = undef;
+ open(CAT,$file) || die "Cannot open $file:$!";
+ my $result = <CAT>;
+ close(CAT);
+ return $result;
+}
+
+sub docat_del
+{
+ my $file = shift;
+ local $/ = undef;
+ open(CAT,$file) || die "Cannot open $file: $!";
+ my $result = <CAT>;
+ close(CAT);
+ unlink $file ;
+ return $result;
+}
+
+sub bad_one
+{
+ print STDERR <<EOM unless $bad_ones++ ;
+#
+# Some older versions of Berkeley DB version 1 will fail tests 51,
+# 53 and 55.
+#
+# You can safely ignore the errors if you're never going to use the
+# broken functionality (recno databases with a modified bval).
+# Otherwise you'll have to upgrade your DB library.
+#
+# If you want to use Berkeley DB version 1, then 1.85 and 1.86 are the
+# last versions that were released. Berkeley DB version 2 is continually
+# being updated -- Check out http://www.sleepycat.com/ for more details.
+#
+EOM
+}
+
+print "1..128\n";
+
+my $Dfile = "recno.tmp";
+unlink $Dfile ;
+
+umask(0);
+
+# Check the interface to RECNOINFO
+
+my $dbh = new DB_File::RECNOINFO ;
+ok(1, ! defined $dbh->{bval}) ;
+ok(2, ! defined $dbh->{cachesize}) ;
+ok(3, ! defined $dbh->{psize}) ;
+ok(4, ! defined $dbh->{flags}) ;
+ok(5, ! defined $dbh->{lorder}) ;
+ok(6, ! defined $dbh->{reclen}) ;
+ok(7, ! defined $dbh->{bfname}) ;
+
+$dbh->{bval} = 3000 ;
+ok(8, $dbh->{bval} == 3000 );
+
+$dbh->{cachesize} = 9000 ;
+ok(9, $dbh->{cachesize} == 9000 );
+
+$dbh->{psize} = 400 ;
+ok(10, $dbh->{psize} == 400 );
+
+$dbh->{flags} = 65 ;
+ok(11, $dbh->{flags} == 65 );
+
+$dbh->{lorder} = 123 ;
+ok(12, $dbh->{lorder} == 123 );
+
+$dbh->{reclen} = 1234 ;
+ok(13, $dbh->{reclen} == 1234 );
+
+$dbh->{bfname} = 1234 ;
+ok(14, $dbh->{bfname} == 1234 );
+
+
+# Check that an invalid entry is caught both for store & fetch
+eval '$dbh->{fred} = 1234' ;
+ok(15, $@ =~ /^DB_File::RECNOINFO::STORE - Unknown element 'fred' at/ );
+eval 'my $q = $dbh->{fred}' ;
+ok(16, $@ =~ /^DB_File::RECNOINFO::FETCH - Unknown element 'fred' at/ );
+
+# Now check the interface to RECNOINFO
+
+my $X ;
+my @h ;
+ok(17, $X = tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) ;
+
+ok(18, ((stat($Dfile))[2] & 0777) == ($^O eq 'os2' ? 0666 : 0640)
+ || $^O eq 'MSWin32' || $^O eq 'amigaos') ;
+
+#my $l = @h ;
+my $l = $X->length ;
+ok(19, ($FA ? @h == 0 : !$l) );
+
+my @data = qw( a b c d ever f g h i j k longername m n o p) ;
+
+$h[0] = shift @data ;
+ok(20, $h[0] eq 'a' );
+
+my $ i;
+foreach (@data)
+ { $h[++$i] = $_ }
+
+unshift (@data, 'a') ;
+
+ok(21, defined $h[1] );
+ok(22, ! defined $h[16] );
+ok(23, $FA ? @h == @data : $X->length == @data );
+
+
+# Overwrite an entry & check fetch it
+$h[3] = 'replaced' ;
+$data[3] = 'replaced' ;
+ok(24, $h[3] eq 'replaced' );
+
+#PUSH
+my @push_data = qw(added to the end) ;
+($FA ? push(@h, @push_data) : $X->push(@push_data)) ;
+push (@data, @push_data) ;
+ok(25, $h[++$i] eq 'added' );
+ok(26, $h[++$i] eq 'to' );
+ok(27, $h[++$i] eq 'the' );
+ok(28, $h[++$i] eq 'end' );
+
+# POP
+my $popped = pop (@data) ;
+my $value = ($FA ? pop @h : $X->pop) ;
+ok(29, $value eq $popped) ;
+
+# SHIFT
+$value = ($FA ? shift @h : $X->shift) ;
+my $shifted = shift @data ;
+ok(30, $value eq $shifted );
+
+# UNSHIFT
+
+# empty list
+($FA ? unshift @h : $X->unshift) ;
+ok(31, ($FA ? @h == @data : $X->length == @data ));
+
+my @new_data = qw(add this to the start of the array) ;
+$FA ? unshift (@h, @new_data) : $X->unshift (@new_data) ;
+unshift (@data, @new_data) ;
+ok(32, $FA ? @h == @data : $X->length == @data );
+ok(33, $h[0] eq "add") ;
+ok(34, $h[1] eq "this") ;
+ok(35, $h[2] eq "to") ;
+ok(36, $h[3] eq "the") ;
+ok(37, $h[4] eq "start") ;
+ok(38, $h[5] eq "of") ;
+ok(39, $h[6] eq "the") ;
+ok(40, $h[7] eq "array") ;
+ok(41, $h[8] eq $data[8]) ;
+
+# SPLICE
+
+# Now both arrays should be identical
+
+my $ok = 1 ;
+my $j = 0 ;
+foreach (@data)
+{
+ $ok = 0, last if $_ ne $h[$j ++] ;
+}
+ok(42, $ok );
+
+# Neagtive subscripts
+
+# get the last element of the array
+ok(43, $h[-1] eq $data[-1] );
+ok(44, $h[-1] eq $h[ ($FA ? @h : $X->length) -1] );
+
+# get the first element using a negative subscript
+eval '$h[ - ( $FA ? @h : $X->length)] = "abcd"' ;
+ok(45, $@ eq "" );
+ok(46, $h[0] eq "abcd" );
+
+# now try to read before the start of the array
+eval '$h[ - (1 + ($FA ? @h : $X->length))] = 1234' ;
+ok(47, $@ =~ '^Modification of non-creatable array value attempted' );
+
+# IMPORTANT - $X must be undefined before the untie otherwise the
+# underlying DB close routine will not get called.
+undef $X ;
+untie(@h);
+
+unlink $Dfile;
+
+
+{
+ # Check bval defaults to \n
+
+ my @h = () ;
+ my $dbh = new DB_File::RECNOINFO ;
+ ok(48, tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $dbh ) ;
+ $h[0] = "abc" ;
+ $h[1] = "def" ;
+ $h[3] = "ghi" ;
+ untie @h ;
+ my $x = docat($Dfile) ;
+ unlink $Dfile;
+ ok(49, $x eq "abc\ndef\n\nghi\n") ;
+}
+
+{
+ # Change bval
+
+ my @h = () ;
+ my $dbh = new DB_File::RECNOINFO ;
+ $dbh->{bval} = "-" ;
+ ok(50, tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $dbh ) ;
+ $h[0] = "abc" ;
+ $h[1] = "def" ;
+ $h[3] = "ghi" ;
+ untie @h ;
+ my $x = docat($Dfile) ;
+ unlink $Dfile;
+ my $ok = ($x eq "abc-def--ghi-") ;
+ bad_one() unless $ok ;
+ ok(51, $ok) ;
+}
+
+{
+ # Check R_FIXEDLEN with default bval (space)
+
+ my @h = () ;
+ my $dbh = new DB_File::RECNOINFO ;
+ $dbh->{flags} = R_FIXEDLEN ;
+ $dbh->{reclen} = 5 ;
+ ok(52, tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $dbh ) ;
+ $h[0] = "abc" ;
+ $h[1] = "def" ;
+ $h[3] = "ghi" ;
+ untie @h ;
+ my $x = docat($Dfile) ;
+ unlink $Dfile;
+ my $ok = ($x eq "abc def ghi ") ;
+ bad_one() unless $ok ;
+ ok(53, $ok) ;
+}
+
+{
+ # Check R_FIXEDLEN with user-defined bval
+
+ my @h = () ;
+ my $dbh = new DB_File::RECNOINFO ;
+ $dbh->{flags} = R_FIXEDLEN ;
+ $dbh->{bval} = "-" ;
+ $dbh->{reclen} = 5 ;
+ ok(54, tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $dbh ) ;
+ $h[0] = "abc" ;
+ $h[1] = "def" ;
+ $h[3] = "ghi" ;
+ untie @h ;
+ my $x = docat($Dfile) ;
+ unlink $Dfile;
+ my $ok = ($x eq "abc--def-------ghi--") ;
+ bad_one() unless $ok ;
+ ok(55, $ok) ;
+}
+
+{
+ # check that attempting to tie an associative array to a DB_RECNO will fail
+
+ my $filename = "xyz" ;
+ my %x ;
+ eval { tie %x, 'DB_File', $filename, O_RDWR|O_CREAT, 0640, $DB_RECNO ; } ;
+ ok(56, $@ =~ /^DB_File can only tie an array to a DB_RECNO database/) ;
+ unlink $filename ;
+}
+
+{
+ # sub-class test
+
+ package Another ;
+
+ use warnings ;
+ use strict ;
+
+ open(FILE, ">SubDB.pm") or die "Cannot open SubDB.pm: $!\n" ;
+ print FILE <<'EOM' ;
+
+ package SubDB ;
+
+ use warnings ;
+ use strict ;
+ use vars qw( @ISA @EXPORT) ;
+
+ require Exporter ;
+ use DB_File;
+ @ISA=qw(DB_File);
+ @EXPORT = @DB_File::EXPORT ;
+
+ sub STORE {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::STORE($key, $value * 2) ;
+ }
+
+ sub FETCH {
+ my $self = shift ;
+ my $key = shift ;
+ $self->SUPER::FETCH($key) - 1 ;
+ }
+
+ sub put {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::put($key, $value * 3) ;
+ }
+
+ sub get {
+ my $self = shift ;
+ $self->SUPER::get($_[0], $_[1]) ;
+ $_[1] -= 2 ;
+ }
+
+ sub A_new_method
+ {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = $self->FETCH($key) ;
+ return "[[$value]]" ;
+ }
+
+ 1 ;
+EOM
+
+ close FILE ;
+
+ BEGIN { push @INC, '.'; }
+ eval 'use SubDB ; ';
+ main::ok(57, $@ eq "") ;
+ my @h ;
+ my $X ;
+ eval '
+ $X = tie(@h, "SubDB","recno.tmp", O_RDWR|O_CREAT, 0640, $DB_RECNO );
+ ' ;
+
+ main::ok(58, $@ eq "") ;
+
+ my $ret = eval '$h[3] = 3 ; return $h[3] ' ;
+ main::ok(59, $@ eq "") ;
+ main::ok(60, $ret == 5) ;
+
+ my $value = 0;
+ $ret = eval '$X->put(1, 4) ; $X->get(1, $value) ; return $value' ;
+ main::ok(61, $@ eq "") ;
+ main::ok(62, $ret == 10) ;
+
+ $ret = eval ' R_NEXT eq main::R_NEXT ' ;
+ main::ok(63, $@ eq "" ) ;
+ main::ok(64, $ret == 1) ;
+
+ $ret = eval '$X->A_new_method(1) ' ;
+ main::ok(65, $@ eq "") ;
+ main::ok(66, $ret eq "[[11]]") ;
+
+ undef $X;
+ untie(@h);
+ unlink "SubDB.pm", "recno.tmp" ;
+
+}
+
+{
+
+ # test $#
+ my $self ;
+ unlink $Dfile;
+ ok(67, $self = tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) ;
+ $h[0] = "abc" ;
+ $h[1] = "def" ;
+ $h[2] = "ghi" ;
+ $h[3] = "jkl" ;
+ ok(68, $FA ? $#h == 3 : $self->length() == 4) ;
+ undef $self ;
+ untie @h ;
+ my $x = docat($Dfile) ;
+ ok(69, $x eq "abc\ndef\nghi\njkl\n") ;
+
+ # $# sets array to same length
+ ok(70, $self = tie @h, 'DB_File', $Dfile, O_RDWR, 0640, $DB_RECNO ) ;
+ if ($FA)
+ { $#h = 3 }
+ else
+ { $self->STORESIZE(4) }
+ ok(71, $FA ? $#h == 3 : $self->length() == 4) ;
+ undef $self ;
+ untie @h ;
+ $x = docat($Dfile) ;
+ ok(72, $x eq "abc\ndef\nghi\njkl\n") ;
+
+ # $# sets array to bigger
+ ok(73, $self = tie @h, 'DB_File', $Dfile, O_RDWR, 0640, $DB_RECNO ) ;
+ if ($FA)
+ { $#h = 6 }
+ else
+ { $self->STORESIZE(7) }
+ ok(74, $FA ? $#h == 6 : $self->length() == 7) ;
+ undef $self ;
+ untie @h ;
+ $x = docat($Dfile) ;
+ ok(75, $x eq "abc\ndef\nghi\njkl\n\n\n\n") ;
+
+ # $# sets array smaller
+ ok(76, $self = tie @h, 'DB_File', $Dfile, O_RDWR, 0640, $DB_RECNO ) ;
+ if ($FA)
+ { $#h = 2 }
+ else
+ { $self->STORESIZE(3) }
+ ok(77, $FA ? $#h == 2 : $self->length() == 3) ;
+ undef $self ;
+ untie @h ;
+ $x = docat($Dfile) ;
+ ok(78, $x eq "abc\ndef\nghi\n") ;
+
+ unlink $Dfile;
+
+
+}
+
+{
+ # DBM Filter tests
+ use warnings ;
+ use strict ;
+ my (@h, $db) ;
+ my ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ unlink $Dfile;
+
+ sub checkOutput
+ {
+ my($fk, $sk, $fv, $sv) = @_ ;
+ return
+ $fetch_key eq $fk && $store_key eq $sk &&
+ $fetch_value eq $fv && $store_value eq $sv &&
+ $_ eq 'original' ;
+ }
+
+ ok(79, $db = tie(@h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) );
+
+ $db->filter_fetch_key (sub { $fetch_key = $_ }) ;
+ $db->filter_store_key (sub { $store_key = $_ }) ;
+ $db->filter_fetch_value (sub { $fetch_value = $_}) ;
+ $db->filter_store_value (sub { $store_value = $_ }) ;
+
+ $_ = "original" ;
+
+ $h[0] = "joe" ;
+ # fk sk fv sv
+ ok(80, checkOutput( "", 0, "", "joe")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(81, $h[0] eq "joe");
+ # fk sk fv sv
+ ok(82, checkOutput( "", 0, "joe", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(83, $db->FIRSTKEY() == 0) ;
+ # fk sk fv sv
+ ok(84, checkOutput( 0, "", "", "")) ;
+
+ # replace the filters, but remember the previous set
+ my ($old_fk) = $db->filter_fetch_key
+ (sub { ++ $_ ; $fetch_key = $_ }) ;
+ my ($old_sk) = $db->filter_store_key
+ (sub { $_ *= 2 ; $store_key = $_ }) ;
+ my ($old_fv) = $db->filter_fetch_value
+ (sub { $_ = "[$_]"; $fetch_value = $_ }) ;
+ my ($old_sv) = $db->filter_store_value
+ (sub { s/o/x/g; $store_value = $_ }) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h[1] = "Joe" ;
+ # fk sk fv sv
+ ok(85, checkOutput( "", 2, "", "Jxe")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(86, $h[1] eq "[Jxe]");
+ # fk sk fv sv
+ ok(87, checkOutput( "", 2, "[Jxe]", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(88, $db->FIRSTKEY() == 1) ;
+ # fk sk fv sv
+ ok(89, checkOutput( 1, "", "", "")) ;
+
+ # put the original filters back
+ $db->filter_fetch_key ($old_fk);
+ $db->filter_store_key ($old_sk);
+ $db->filter_fetch_value ($old_fv);
+ $db->filter_store_value ($old_sv);
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h[0] = "joe" ;
+ ok(90, checkOutput( "", 0, "", "joe")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(91, $h[0] eq "joe");
+ ok(92, checkOutput( "", 0, "joe", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(93, $db->FIRSTKEY() == 0) ;
+ ok(94, checkOutput( 0, "", "", "")) ;
+
+ # delete the filters
+ $db->filter_fetch_key (undef);
+ $db->filter_store_key (undef);
+ $db->filter_fetch_value (undef);
+ $db->filter_store_value (undef);
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h[0] = "joe" ;
+ ok(95, checkOutput( "", "", "", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(96, $h[0] eq "joe");
+ ok(97, checkOutput( "", "", "", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(98, $db->FIRSTKEY() == 0) ;
+ ok(99, checkOutput( "", "", "", "")) ;
+
+ undef $db ;
+ untie @h;
+ unlink $Dfile;
+}
+
+{
+ # DBM Filter with a closure
+
+ use warnings ;
+ use strict ;
+ my (@h, $db) ;
+
+ unlink $Dfile;
+ ok(100, $db = tie(@h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) );
+
+ my %result = () ;
+
+ sub Closure
+ {
+ my ($name) = @_ ;
+ my $count = 0 ;
+ my @kept = () ;
+
+ return sub { ++$count ;
+ push @kept, $_ ;
+ $result{$name} = "$name - $count: [@kept]" ;
+ }
+ }
+
+ $db->filter_store_key(Closure("store key")) ;
+ $db->filter_store_value(Closure("store value")) ;
+ $db->filter_fetch_key(Closure("fetch key")) ;
+ $db->filter_fetch_value(Closure("fetch value")) ;
+
+ $_ = "original" ;
+
+ $h[0] = "joe" ;
+ ok(101, $result{"store key"} eq "store key - 1: [0]");
+ ok(102, $result{"store value"} eq "store value - 1: [joe]");
+ ok(103, ! defined $result{"fetch key"} );
+ ok(104, ! defined $result{"fetch value"} );
+ ok(105, $_ eq "original") ;
+
+ ok(106, $db->FIRSTKEY() == 0 ) ;
+ ok(107, $result{"store key"} eq "store key - 1: [0]");
+ ok(108, $result{"store value"} eq "store value - 1: [joe]");
+ ok(109, $result{"fetch key"} eq "fetch key - 1: [0]");
+ ok(110, ! defined $result{"fetch value"} );
+ ok(111, $_ eq "original") ;
+
+ $h[7] = "john" ;
+ ok(112, $result{"store key"} eq "store key - 2: [0 7]");
+ ok(113, $result{"store value"} eq "store value - 2: [joe john]");
+ ok(114, $result{"fetch key"} eq "fetch key - 1: [0]");
+ ok(115, ! defined $result{"fetch value"} );
+ ok(116, $_ eq "original") ;
+
+ ok(117, $h[0] eq "joe");
+ ok(118, $result{"store key"} eq "store key - 3: [0 7 0]");
+ ok(119, $result{"store value"} eq "store value - 2: [joe john]");
+ ok(120, $result{"fetch key"} eq "fetch key - 1: [0]");
+ ok(121, $result{"fetch value"} eq "fetch value - 1: [joe]");
+ ok(122, $_ eq "original") ;
+
+ undef $db ;
+ untie @h;
+ unlink $Dfile;
+}
+
+{
+ # DBM Filter recursion detection
+ use warnings ;
+ use strict ;
+ my (@h, $db) ;
+ unlink $Dfile;
+
+ ok(123, $db = tie(@h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) );
+
+ $db->filter_store_key (sub { $_ = $h[0] }) ;
+
+ eval '$h[1] = 1234' ;
+ ok(124, $@ =~ /^recursion detected in filter_store_key at/ );
+
+ undef $db ;
+ untie @h;
+ unlink $Dfile;
+}
+
+
+{
+ # Examples from the POD
+
+ my $file = "xyzt" ;
+ {
+ my $redirect = new Redirect $file ;
+
+ use warnings FATAL => qw(all);
+ use strict ;
+ use DB_File ;
+
+ my $filename = "text" ;
+ unlink $filename ;
+
+ my @h ;
+ my $x = tie @h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_RECNO
+ or die "Cannot open file 'text': $!\n" ;
+
+ # Add a few key/value pairs to the file
+ $h[0] = "orange" ;
+ $h[1] = "blue" ;
+ $h[2] = "yellow" ;
+
+ $FA ? push @h, "green", "black"
+ : $x->push("green", "black") ;
+
+ my $elements = $FA ? scalar @h : $x->length ;
+ print "The array contains $elements entries\n" ;
+
+ my $last = $FA ? pop @h : $x->pop ;
+ print "popped $last\n" ;
+
+ $FA ? unshift @h, "white"
+ : $x->unshift("white") ;
+ my $first = $FA ? shift @h : $x->shift ;
+ print "shifted $first\n" ;
+
+ # Check for existence of a key
+ print "Element 1 Exists with value $h[1]\n" if $h[1] ;
+
+ # use a negative index
+ print "The last element is $h[-1]\n" ;
+ print "The 2nd last element is $h[-2]\n" ;
+
+ undef $x ;
+ untie @h ;
+
+ unlink $filename ;
+ }
+
+ ok(125, docat_del($file) eq <<'EOM') ;
+The array contains 5 entries
+popped black
+shifted white
+Element 1 Exists with value blue
+The last element is green
+The 2nd last element is yellow
+EOM
+
+ my $save_output = "xyzt" ;
+ {
+ my $redirect = new Redirect $save_output ;
+
+ use warnings FATAL => qw(all);
+ use strict ;
+ use vars qw(@h $H $file $i) ;
+ use DB_File ;
+ use Fcntl ;
+
+ $file = "text" ;
+
+ unlink $file ;
+
+ $H = tie @h, "DB_File", $file, O_RDWR|O_CREAT, 0640, $DB_RECNO
+ or die "Cannot open file $file: $!\n" ;
+
+ # first create a text file to play with
+ $h[0] = "zero" ;
+ $h[1] = "one" ;
+ $h[2] = "two" ;
+ $h[3] = "three" ;
+ $h[4] = "four" ;
+
+
+ # Print the records in order.
+ #
+ # The length method is needed here because evaluating a tied
+ # array in a scalar context does not return the number of
+ # elements in the array.
+
+ print "\nORIGINAL\n" ;
+ foreach $i (0 .. $H->length - 1) {
+ print "$i: $h[$i]\n" ;
+ }
+
+ # use the push & pop methods
+ $a = $H->pop ;
+ $H->push("last") ;
+ print "\nThe last record was [$a]\n" ;
+
+ # and the shift & unshift methods
+ $a = $H->shift ;
+ $H->unshift("first") ;
+ print "The first record was [$a]\n" ;
+
+ # Use the API to add a new record after record 2.
+ $i = 2 ;
+ $H->put($i, "Newbie", R_IAFTER) ;
+
+ # and a new record before record 1.
+ $i = 1 ;
+ $H->put($i, "New One", R_IBEFORE) ;
+
+ # delete record 3
+ $H->del(3) ;
+
+ # now print the records in reverse order
+ print "\nREVERSE\n" ;
+ for ($i = $H->length - 1 ; $i >= 0 ; -- $i)
+ { print "$i: $h[$i]\n" }
+
+ # same again, but use the API functions instead
+ print "\nREVERSE again\n" ;
+ my ($s, $k, $v) = (0, 0, 0) ;
+ for ($s = $H->seq($k, $v, R_LAST) ;
+ $s == 0 ;
+ $s = $H->seq($k, $v, R_PREV))
+ { print "$k: $v\n" }
+
+ undef $H ;
+ untie @h ;
+
+ unlink $file ;
+ }
+
+ ok(126, docat_del($save_output) eq <<'EOM') ;
+
+ORIGINAL
+0: zero
+1: one
+2: two
+3: three
+4: four
+
+The last record was [four]
+The first record was [zero]
+
+REVERSE
+5: last
+4: three
+3: Newbie
+2: one
+1: New One
+0: first
+
+REVERSE again
+5: last
+4: three
+3: Newbie
+2: one
+1: New One
+0: first
+EOM
+
+}
+
+{
+ # Bug ID 20001013.009
+ #
+ # test that $hash{KEY} = undef doesn't produce the warning
+ # Use of uninitialized value in null operation
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ unlink $Dfile;
+ my @h ;
+ my $a = "";
+ local $SIG{__WARN__} = sub {$a = $_[0]} ;
+
+ tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0664, $DB_RECNO
+ or die "Can't open file: $!\n" ;
+ $h[0] = undef;
+ ok(127, $a eq "") ;
+ untie @h ;
+ unlink $Dfile;
+}
+
+{
+ # test that %hash = () doesn't produce the warning
+ # Argument "" isn't numeric in entersub
+ use warnings ;
+ use strict ;
+ use DB_File ;
+ my $a = "";
+ local $SIG{__WARN__} = sub {$a = $_[0]} ;
+
+ unlink $Dfile;
+ my @h ;
+
+ tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0664, $DB_RECNO
+ or die "Can't open file: $!\n" ;
+ @h = (); ;
+ ok(128, $a eq "") ;
+ untie @h ;
+ unlink $Dfile;
+}
+
+exit ;
diff --git a/bdb/perl.DB_File/typemap b/bdb/perl.DB_File/typemap
new file mode 100644
index 00000000000..55439ee76d9
--- /dev/null
+++ b/bdb/perl.DB_File/typemap
@@ -0,0 +1,44 @@
+# typemap for Perl 5 interface to Berkeley
+#
+# written by Paul Marquess <Paul.Marquess@btinternet.com>
+# last modified 10th December 2000
+# version 1.74
+#
+#################################### DB SECTION
+#
+#
+
+u_int T_U_INT
+DB_File T_PTROBJ
+DBT T_dbtdatum
+DBTKEY T_dbtkeydatum
+
+INPUT
+T_dbtkeydatum
+ ckFilter($arg, filter_store_key, \"filter_store_key\");
+ DBT_clear($var) ;
+ if (db->type != DB_RECNO) {
+ $var.data = SvPV($arg, PL_na);
+ $var.size = (int)PL_na;
+ }
+ else {
+ Value = GetRecnoKey(aTHX_ db, SvIV($arg)) ;
+ $var.data = & Value;
+ $var.size = (int)sizeof(recno_t);
+ }
+T_dbtdatum
+ ckFilter($arg, filter_store_value, \"filter_store_value\");
+ DBT_clear($var) ;
+ if (SvOK($arg)) {
+ $var.data = SvPV($arg, PL_na);
+ $var.size = (int)PL_na;
+ }
+
+OUTPUT
+
+T_dbtkeydatum
+ OutputKey($arg, $var)
+T_dbtdatum
+ OutputValue($arg, $var)
+T_PTROBJ
+ sv_setref_pv($arg, dbtype, (void*)$var);
diff --git a/bdb/perl.DB_File/version.c b/bdb/perl.DB_File/version.c
new file mode 100644
index 00000000000..82b3e8b27b9
--- /dev/null
+++ b/bdb/perl.DB_File/version.c
@@ -0,0 +1,81 @@
+/*
+
+ version.c -- Perl 5 interface to Berkeley DB
+
+ written by Paul Marquess <Paul.Marquess@btinternet.com>
+ last modified 16th January 2000
+ version 1.73
+
+ All comments/suggestions/problems are welcome
+
+ Copyright (c) 1995-2001 Paul Marquess. All rights reserved.
+ This program is free software; you can redistribute it and/or
+ modify it under the same terms as Perl itself.
+
+ Changes:
+ 1.71 - Support for Berkeley DB version 3.
+ Support for Berkeley DB 2/3's backward compatability mode.
+ 1.72 - No change.
+ 1.73 - Added support for threading
+ 1.74 - Added Perl core patch 7801.
+
+
+*/
+
+#include "EXTERN.h"
+#include "perl.h"
+#include "XSUB.h"
+
+#include <db.h>
+
+void
+#ifdef CAN_PROTOTYPE
+__getBerkeleyDBInfo(void)
+#else
+__getBerkeleyDBInfo()
+#endif
+{
+#ifdef dTHX
+ dTHX;
+#endif
+ SV * version_sv = perl_get_sv("DB_File::db_version", GV_ADD|GV_ADDMULTI) ;
+ SV * ver_sv = perl_get_sv("DB_File::db_ver", GV_ADD|GV_ADDMULTI) ;
+ SV * compat_sv = perl_get_sv("DB_File::db_185_compat", GV_ADD|GV_ADDMULTI) ;
+
+#ifdef DB_VERSION_MAJOR
+ int Major, Minor, Patch ;
+
+ (void)db_version(&Major, &Minor, &Patch) ;
+
+ /* Check that the versions of db.h and libdb.a are the same */
+ if (Major != DB_VERSION_MAJOR || Minor != DB_VERSION_MINOR
+ || Patch != DB_VERSION_PATCH)
+ croak("\nDB_File needs compatible versions of libdb & db.h\n\tyou have db.h version %d.%d.%d and libdb version %d.%d.%d\n",
+ DB_VERSION_MAJOR, DB_VERSION_MINOR, DB_VERSION_PATCH,
+ Major, Minor, Patch) ;
+
+ /* check that libdb is recent enough -- we need 2.3.4 or greater */
+ if (Major == 2 && (Minor < 3 || (Minor == 3 && Patch < 4)))
+ croak("DB_File needs Berkeley DB 2.3.4 or greater, you have %d.%d.%d\n",
+ Major, Minor, Patch) ;
+
+ {
+ char buffer[40] ;
+ sprintf(buffer, "%d.%d", Major, Minor) ;
+ sv_setpv(version_sv, buffer) ;
+ sprintf(buffer, "%d.%03d%03d", Major, Minor, Patch) ;
+ sv_setpv(ver_sv, buffer) ;
+ }
+
+#else /* ! DB_VERSION_MAJOR */
+ sv_setiv(version_sv, 1) ;
+ sv_setiv(ver_sv, 1) ;
+#endif /* ! DB_VERSION_MAJOR */
+
+#ifdef COMPAT185
+ sv_setiv(compat_sv, 1) ;
+#else /* ! COMPAT185 */
+ sv_setiv(compat_sv, 0) ;
+#endif /* ! COMPAT185 */
+
+}
diff --git a/bdb/qam/qam.c b/bdb/qam/qam.c
new file mode 100644
index 00000000000..0c9f453044f
--- /dev/null
+++ b/bdb/qam/qam.c
@@ -0,0 +1,1357 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: qam.c,v 11.72 2001/01/16 20:10:55 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_shash.h"
+#include "db_am.h"
+#include "mp.h"
+#include "lock.h"
+#include "log.h"
+#include "btree.h"
+#include "qam.h"
+
+static int __qam_c_close __P((DBC *, db_pgno_t, int *));
+static int __qam_c_del __P((DBC *));
+static int __qam_c_destroy __P((DBC *));
+static int __qam_c_get __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+static int __qam_c_put __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+static int __qam_getno __P((DB *, const DBT *, db_recno_t *));
+
+/*
+ * __qam_position --
+ * Position a queued access method cursor at a record. This returns
+ * the page locked. *exactp will be set if the record is valid.
+ * PUBLIC: int __qam_position
+ * PUBLIC: __P((DBC *, db_recno_t *, qam_position_mode, int *));
+ */
+int
+__qam_position(dbc, recnop, mode, exactp)
+ DBC *dbc; /* open cursor */
+ db_recno_t *recnop; /* pointer to recno to find */
+ qam_position_mode mode;/* locking: read or write */
+ int *exactp; /* indicate if it was found */
+{
+ QUEUE_CURSOR *cp;
+ DB *dbp;
+ QAMDATA *qp;
+ db_pgno_t pg;
+ int ret;
+
+ dbp = dbc->dbp;
+ cp = (QUEUE_CURSOR *)dbc->internal;
+
+ /* Fetch the page for this recno. */
+ pg = QAM_RECNO_PAGE(dbp, *recnop);
+
+ if ((ret = __db_lget(dbc, 0, pg, mode == QAM_READ ?
+ DB_LOCK_READ : DB_LOCK_WRITE, 0, &cp->lock)) != 0)
+ return (ret);
+ cp->page = NULL;
+ *exactp = 0;
+ if ((ret = __qam_fget(dbp, &pg,
+ mode == QAM_WRITE ? DB_MPOOL_CREATE : 0,
+ &cp->page)) != 0) {
+ /* We did not fetch it, we can release the lock. */
+ (void)__LPUT(dbc, cp->lock);
+ cp->lock.off = LOCK_INVALID;
+ if (mode != QAM_WRITE && (ret == EINVAL || ret == ENOENT))
+ return (0);
+ return (ret);
+ }
+ cp->pgno = pg;
+ cp->indx = QAM_RECNO_INDEX(dbp, pg, *recnop);
+
+ if (PGNO(cp->page) == 0) {
+ if (F_ISSET(dbp, DB_AM_RDONLY)) {
+ *exactp = 0;
+ return (0);
+ }
+ PGNO(cp->page) = pg;
+ TYPE(cp->page) = P_QAMDATA;
+ }
+
+ qp = QAM_GET_RECORD(dbp, cp->page, cp->indx);
+ *exactp = F_ISSET(qp, QAM_VALID);
+
+ return (ret);
+}
+
+/*
+ * __qam_pitem --
+ * Put an item on a queue page. Copy the data to the page and set the
+ * VALID and SET bits. If logging and the record was previously set,
+ * log that data, otherwise just log the new data.
+ *
+ * pagep must be write locked
+ *
+ * PUBLIC: int __qam_pitem
+ * PUBLIC: __P((DBC *, QPAGE *, u_int32_t, db_recno_t, DBT *));
+ */
+int
+__qam_pitem(dbc, pagep, indx, recno, data)
+ DBC *dbc;
+ QPAGE *pagep;
+ u_int32_t indx;
+ db_recno_t recno;
+ DBT *data;
+{
+ DB *dbp;
+ DBT olddata, pdata, *datap;
+ QAMDATA *qp;
+ QUEUE *t;
+ u_int32_t size;
+ u_int8_t *dest, *p;
+ int alloced, ret;
+
+ alloced = ret = 0;
+
+ dbp = dbc->dbp;
+ t = (QUEUE *)dbp->q_internal;
+
+ if (data->size > t->re_len)
+ goto len_err;
+
+ qp = QAM_GET_RECORD(dbp, pagep, indx);
+
+ p = qp->data;
+ size = data->size;
+ datap = data;
+ if (F_ISSET(data, DB_DBT_PARTIAL)) {
+ if (data->doff + data->dlen > t->re_len) {
+ alloced = data->dlen;
+ goto len_err;
+ }
+ if (data->size != data->dlen) {
+len_err: __db_err(dbp->dbenv,
+ "Length improper for fixed length record %lu",
+ (u_long)(alloced ? alloced : data->size));
+ return (EINVAL);
+ }
+ if (data->size == t->re_len)
+ goto no_partial;
+
+ /*
+ * If we are logging, then we have to build the record
+ * first, otherwise, we can simply drop the change
+ * directly on the page. After this clause, make
+ * sure that datap and p are set up correctly so that
+ * copying datap into p does the right thing.
+ *
+ * Note, I am changing this so that if the existing
+ * record is not valid, we create a complete record
+ * to log so that both this and the recovery code is simpler.
+ */
+
+ if (DB_LOGGING(dbc) || !F_ISSET(qp, QAM_VALID)) {
+ datap = &pdata;
+ memset(datap, 0, sizeof(*datap));
+
+ if ((ret = __os_malloc(dbp->dbenv,
+ t->re_len, NULL, &datap->data)) != 0)
+ return (ret);
+ alloced = 1;
+ datap->size = t->re_len;
+
+ /*
+ * Construct the record if it's valid, otherwise set it
+ * all to the pad character.
+ */
+ dest = datap->data;
+ if (F_ISSET(qp, QAM_VALID))
+ memcpy(dest, p, t->re_len);
+ else
+ memset(dest, t->re_pad, t->re_len);
+
+ dest += data->doff;
+ memcpy(dest, data->data, data->size);
+ } else {
+ datap = data;
+ p += data->doff;
+ }
+ }
+
+no_partial:
+ if (DB_LOGGING(dbc)) {
+ olddata.size = 0;
+ if (F_ISSET(qp, QAM_SET)) {
+ olddata.data = qp->data;
+ olddata.size = t->re_len;
+ }
+ if ((ret = __qam_add_log(dbp->dbenv, dbc->txn, &LSN(pagep),
+ 0, dbp->log_fileid, &LSN(pagep), pagep->pgno,
+ indx, recno, datap, qp->flags,
+ olddata.size == 0 ? NULL : &olddata)) != 0)
+ goto err;
+ }
+
+ F_SET(qp, QAM_VALID | QAM_SET);
+ memcpy(p, datap->data, datap->size);
+ if (!F_ISSET(data, DB_DBT_PARTIAL))
+ memset(p + datap->size, t->re_pad, t->re_len - datap->size);
+
+err: if (alloced)
+ __os_free(datap->data, t->re_len);
+
+ return (ret);
+}
+/*
+ * __qam_c_put
+ * Cursor put for queued access method.
+ * BEFORE and AFTER cannot be specified.
+ */
+static int
+__qam_c_put(dbc, key, data, flags, pgnop)
+ DBC *dbc;
+ DBT *key, *data;
+ u_int32_t flags;
+ db_pgno_t *pgnop;
+{
+ QUEUE_CURSOR *cp;
+ DB *dbp;
+ DB_LOCK lock;
+ QMETA *meta;
+ db_pgno_t pg;
+ db_recno_t new_cur, new_first;
+ u_int32_t opcode;
+ int exact, ret, t_ret;
+
+ COMPQUIET(key, NULL);
+
+ dbp = dbc->dbp;
+ if (pgnop != NULL)
+ *pgnop = PGNO_INVALID;
+
+ cp = (QUEUE_CURSOR *)dbc->internal;
+
+ /* Write lock the record. */
+ if ((ret = __db_lget(dbc,
+ 0, cp->recno, DB_LOCK_WRITE, DB_LOCK_RECORD, &lock)) != 0)
+ return (ret);
+
+ if ((ret = __qam_position(dbc,
+ &cp->recno, QAM_WRITE, &exact)) != 0) {
+ /* We could not get the page, we can release the record lock. */
+ __LPUT(dbc, lock);
+ return (ret);
+ }
+
+ if (exact && flags == DB_NOOVERWRITE) {
+ ret = __TLPUT(dbc, lock);
+ /* Doing record locking, release the page lock */
+ if ((t_ret = __LPUT(dbc, cp->lock)) == 0)
+ cp->lock.off = LOCK_INVALID;
+ else
+ if (ret == 0)
+ ret = t_ret;
+ if ((t_ret =
+ __qam_fput(dbp, cp->pgno, cp->page, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ cp->page = NULL;
+ return (ret == 0 ? DB_KEYEXIST : ret);
+ }
+
+ /* Put the item on the page. */
+ ret = __qam_pitem(dbc, (QPAGE *)cp->page, cp->indx, cp->recno, data);
+
+ /* Doing record locking, release the page lock */
+ if ((t_ret = __LPUT(dbc, cp->lock)) != 0 && ret == 0)
+ ret = t_ret;
+ if ((t_ret =
+ __qam_fput(dbp, cp->pgno, cp->page, DB_MPOOL_DIRTY)) && ret == 0)
+ ret = t_ret;
+ cp->page = NULL;
+ cp->lock = lock;
+ cp->lock_mode = DB_LOCK_WRITE;
+ if (ret != 0)
+ return (ret);
+
+ /* We may need to reset the head or tail of the queue. */
+ pg = ((QUEUE *)dbp->q_internal)->q_meta;
+ if ((ret = __db_lget(dbc, 0, pg, DB_LOCK_WRITE, 0, &lock)) != 0)
+ return (ret);
+ if ((ret = memp_fget(dbp->mpf, &pg, 0, &meta)) != 0) {
+ /* We did not fetch it, we can release the lock. */
+ (void)__LPUT(dbc, lock);
+ return (ret);
+ }
+
+ opcode = 0;
+ new_cur = new_first = 0;
+
+ /*
+ * If the put address is outside the queue, adjust the head and
+ * tail of the queue. If the order is inverted we move
+ * the one which is closer. The first case is when the
+ * queue is empty, move first and current to where the new
+ * insert is.
+ */
+
+ if (meta->first_recno == meta->cur_recno) {
+ new_first = cp->recno;
+ new_cur = cp->recno + 1;
+ if (new_cur == RECNO_OOB)
+ new_cur++;
+ opcode |= QAM_SETFIRST;
+ opcode |= QAM_SETCUR;
+ } else {
+ if (QAM_BEFORE_FIRST(meta, cp->recno) &&
+ (meta->first_recno <= meta->cur_recno ||
+ meta->first_recno - cp->recno < cp->recno - meta->cur_recno)) {
+ new_first = cp->recno;
+ opcode |= QAM_SETFIRST;
+ }
+
+ if (meta->cur_recno == cp->recno ||
+ (QAM_AFTER_CURRENT(meta, cp->recno) &&
+ (meta->first_recno <= meta->cur_recno ||
+ cp->recno - meta->cur_recno <= meta->first_recno - cp->recno))) {
+ new_cur = cp->recno + 1;
+ if (new_cur == RECNO_OOB)
+ new_cur++;
+ opcode |= QAM_SETCUR;
+ }
+ }
+
+ if (opcode != 0 && DB_LOGGING(dbc)) {
+ ret = __qam_mvptr_log(dbp->dbenv, dbc->txn, &meta->dbmeta.lsn,
+ 0, opcode, dbp->log_fileid, meta->first_recno, new_first,
+ meta->cur_recno, new_cur, &meta->dbmeta.lsn);
+ }
+
+ if (opcode & QAM_SETCUR)
+ meta->cur_recno = new_cur;
+ if (opcode & QAM_SETFIRST)
+ meta->first_recno = new_first;
+
+ if ((t_ret =
+ memp_fput(dbp->mpf, meta, opcode != 0 ? DB_MPOOL_DIRTY : 0)) != 0 &&
+ ret == 0)
+ ret = t_ret;
+
+ /* Don't hold the meta page long term. */
+ if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __qam_put --
+ * Add a record to the queue.
+ * If we are doing anything but appending, just call qam_c_put to do the
+ * work. Otherwise we fast path things here.
+ *
+ * PUBLIC: int __qam_put __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+ */
+int
+__qam_put(dbp, txn, key, data, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ DBT *key, *data;
+ u_int32_t flags;
+{
+ QUEUE_CURSOR *cp;
+ DBC *dbc;
+ DB_LOCK lock;
+ QMETA *meta;
+ QPAGE *page;
+ QUEUE *qp;
+ db_pgno_t pg;
+ db_recno_t recno;
+ int ret, t_ret;
+
+ PANIC_CHECK(dbp->dbenv);
+ DB_CHECK_TXN(dbp, txn);
+
+ /* Allocate a cursor. */
+ if ((ret = dbp->cursor(dbp, txn, &dbc, DB_WRITELOCK)) != 0)
+ return (ret);
+
+ DEBUG_LWRITE(dbc, dbc->txn, "qam_put", key, data, flags);
+
+ cp = (QUEUE_CURSOR *)dbc->internal;
+
+ /* Check for invalid flags. */
+ if ((ret = __db_putchk(dbp,
+ key, data, flags, F_ISSET(dbp, DB_AM_RDONLY), 0)) != 0)
+ goto done;
+
+ /* If not appending, then just call the cursor routine */
+ if (flags != DB_APPEND) {
+ if ((ret = __qam_getno(dbp, key, &cp->recno)) != 0)
+ goto done;
+
+ ret = __qam_c_put(dbc, NULL, data, flags, NULL);
+ goto done;
+ }
+
+ /* Write lock the meta page. */
+ pg = ((QUEUE *)dbp->q_internal)->q_meta;
+ if ((ret = __db_lget(dbc, 0, pg, DB_LOCK_WRITE, 0, &lock)) != 0)
+ goto done;
+ if ((ret = memp_fget(dbp->mpf, &pg, 0, &meta)) != 0) {
+ /* We did not fetch it, we can release the lock. */
+ (void)__LPUT(dbc, lock);
+ goto done;
+ }
+
+ /* Record that we are going to allocate a record. */
+ if (DB_LOGGING(dbc)) {
+ __qam_inc_log(dbp->dbenv,
+ dbc->txn, &meta->dbmeta.lsn,
+ 0, dbp->log_fileid, &meta->dbmeta.lsn);
+ }
+
+ /* Get the next record number. */
+ recno = meta->cur_recno;
+ meta->cur_recno++;
+ if (meta->cur_recno == RECNO_OOB)
+ meta->cur_recno++;
+ if (meta->cur_recno == meta->first_recno) {
+ meta->cur_recno--;
+ if (meta->cur_recno == RECNO_OOB)
+ meta->cur_recno--;
+ (void)__LPUT(dbc, lock);
+ ret = EFBIG;
+ goto err;
+ }
+
+ if (QAM_BEFORE_FIRST(meta, recno))
+ meta->first_recno = recno;
+
+ /* Lock the record and release meta page lock. */
+ if ((ret = __db_lget(dbc,
+ 1, recno, DB_LOCK_WRITE, DB_LOCK_RECORD, &lock)) != 0)
+ goto err;
+
+ /*
+ * The application may modify the data based on the selected record
+ * number.
+ */
+ if (flags == DB_APPEND && dbc->dbp->db_append_recno != NULL &&
+ (ret = dbc->dbp->db_append_recno(dbc->dbp, data, recno)) != 0) {
+ (void)__LPUT(dbc, lock);
+ goto err;
+ }
+
+ cp->lock = lock;
+ cp->lock_mode = DB_LOCK_WRITE;
+
+ pg = QAM_RECNO_PAGE(dbp, recno);
+
+ /* Fetch and write lock the data page. */
+ if ((ret = __db_lget(dbc, 0, pg, DB_LOCK_WRITE, 0, &lock)) != 0)
+ goto err;
+ if ((ret = __qam_fget(dbp, &pg, DB_MPOOL_CREATE, &page)) != 0) {
+ /* We did not fetch it, we can release the lock. */
+ (void)__LPUT(dbc, lock);
+ goto err;
+ }
+
+ /* See if this is a new page. */
+ if (page->pgno == 0) {
+ page->pgno = pg;
+ page->type = P_QAMDATA;
+ }
+
+ /* Put the item on the page and log it. */
+ ret = __qam_pitem(dbc, page,
+ QAM_RECNO_INDEX(dbp, pg, recno), recno, data);
+
+ /* Doing record locking, release the page lock */
+ if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if ((t_ret
+ = __qam_fput(dbp, pg, page, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Return the record number to the user. */
+ if (ret == 0)
+ ret = __db_retcopy(dbp, key,
+ &recno, sizeof(recno), &dbc->rkey.data, &dbc->rkey.ulen);
+
+ /* See if we are leaving the extent. */
+ qp = (QUEUE *) dbp->q_internal;
+ if (qp->page_ext != 0
+ && (recno % (qp->page_ext * qp->rec_page) == 0
+ || recno == UINT32_T_MAX)) {
+ if ((ret =
+ __db_lget(dbc, 0, pg, DB_LOCK_WRITE, 0, &lock)) != 0)
+ goto err;
+ if (!QAM_AFTER_CURRENT(meta, recno))
+ ret = __qam_fclose(dbp, pg);
+ (void)__LPUT(dbc, lock);
+ }
+
+err:
+ /* Release the meta page. */
+ if ((t_ret
+ = memp_fput(dbp->mpf, meta, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+
+done:
+ /* Discard the cursor. */
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __qam_c_del --
+ * Qam cursor->am_del function
+ */
+static int
+__qam_c_del(dbc)
+ DBC *dbc;
+{
+ QUEUE_CURSOR *cp;
+ DB *dbp;
+ DBT data;
+ DB_LOCK lock;
+ PAGE *pagep;
+ QAMDATA *qp;
+ QMETA *meta;
+ db_pgno_t pg;
+ int exact, ret, t_ret;
+
+ dbp = dbc->dbp;
+ cp = (QUEUE_CURSOR *)dbc->internal;
+
+ pg = ((QUEUE *)dbp->q_internal)->q_meta;
+ if ((ret = __db_lget(dbc, 0, pg, DB_LOCK_READ, 0, &lock)) != 0)
+ return (ret);
+ if ((ret = memp_fget(dbp->mpf, &pg, 0, &meta)) != 0) {
+ /* We did not fetch it, we can release the lock. */
+ (void)__LPUT(dbc, lock);
+ return (ret);
+ }
+
+ if (QAM_NOT_VALID(meta, cp->recno))
+ ret = DB_NOTFOUND;
+
+ /* Don't hold the meta page long term. */
+ if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0)
+ ret = t_ret;
+ if ((t_ret = memp_fput(dbp->mpf, meta, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (ret != 0)
+ return (ret);
+
+ if ((ret = __db_lget(dbc,
+ 0, cp->recno, DB_LOCK_WRITE, DB_LOCK_RECORD, &lock)) != 0)
+ return (ret);
+
+ cp->lock_mode = DB_LOCK_WRITE;
+ /* Find the record ; delete only deletes exact matches. */
+ if ((ret = __qam_position(dbc,
+ &cp->recno, QAM_WRITE, &exact)) != 0) {
+ cp->lock = lock;
+ return (ret);
+ }
+ if (!exact) {
+ ret = DB_NOTFOUND;
+ goto err1;
+ }
+
+ pagep = cp->page;
+ qp = QAM_GET_RECORD(dbp, pagep, cp->indx);
+
+ if (DB_LOGGING(dbc)) {
+ if (((QUEUE *)dbp->q_internal)->page_ext == 0
+ || ((QUEUE *)dbp->q_internal)->re_len == 0) {
+ if ((ret =
+ __qam_del_log(dbp->dbenv,
+ dbc->txn, &LSN(pagep), 0,
+ dbp->log_fileid, &LSN(pagep),
+ pagep->pgno, cp->indx, cp->recno)) != 0)
+ goto err1;
+ } else {
+ data.size = ((QUEUE *)dbp->q_internal)->re_len;
+ data.data = qp->data;
+ if ((ret =
+ __qam_delext_log(dbp->dbenv, dbc->txn,
+ &LSN(pagep), 0, dbp->log_fileid, &LSN(pagep),
+ pagep->pgno, cp->indx, cp->recno, &data)) != 0)
+ goto err1;
+ }
+ }
+
+ F_CLR(qp, QAM_VALID);
+
+err1:
+ if ((t_ret = __qam_fput(
+ dbp, cp->pgno, cp->page, ret == 0 ? DB_MPOOL_DIRTY : 0)) != 0)
+ return (ret ? ret : t_ret);
+ cp->page = NULL;
+ /* Doing record locking, release the page lock */
+ if ((t_ret = __LPUT(dbc, cp->lock)) != 0) {
+ cp->lock = lock;
+ return (ret ? ret : t_ret);
+ }
+ cp->lock = lock;
+ return (ret);
+}
+
+/*
+ * __qam_delete --
+ * Queue db->del function.
+ *
+ * PUBLIC: int __qam_delete __P((DB *, DB_TXN *, DBT *, u_int32_t));
+ */
+int
+__qam_delete(dbp, txn, key, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ DBT *key;
+ u_int32_t flags;
+{
+ QUEUE_CURSOR *cp;
+ DBC *dbc;
+ int ret, t_ret;
+
+ PANIC_CHECK(dbp->dbenv);
+ DB_CHECK_TXN(dbp, txn);
+
+ /* Check for invalid flags. */
+ if ((ret =
+ __db_delchk(dbp, key, flags, F_ISSET(dbp, DB_AM_RDONLY))) != 0)
+ return (ret);
+
+ /* Acquire a cursor. */
+ if ((ret = dbp->cursor(dbp, txn, &dbc, DB_WRITELOCK)) != 0)
+ return (ret);
+
+ DEBUG_LWRITE(dbc, txn, "qam_delete", key, NULL, flags);
+
+ cp = (QUEUE_CURSOR *)dbc->internal;
+ if ((ret = __qam_getno(dbp, key, &cp->recno)) != 0)
+ goto err;
+
+ ret = __qam_c_del(dbc);
+
+ /* Release the cursor. */
+err: if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+#ifdef DEBUG_WOP
+#define QDEBUG
+#endif
+
+/*
+ * __qam_c_get --
+ * Queue cursor->c_get function.
+ */
+static int
+__qam_c_get(dbc, key, data, flags, pgnop)
+ DBC *dbc;
+ DBT *key, *data;
+ u_int32_t flags;
+ db_pgno_t *pgnop;
+{
+ DB *dbp;
+ DB_LOCK lock, pglock, metalock, save_lock;
+ DBT tmp;
+ PAGE *pg;
+ QAMDATA *qp;
+ QMETA *meta;
+ QUEUE *t;
+ QUEUE_CURSOR *cp;
+ db_indx_t save_indx;
+ db_lockmode_t lock_mode;
+ db_pgno_t metapno, save_page;
+ db_recno_t current, first, save_recno;
+ qam_position_mode mode;
+ u_int32_t rec_extent;
+ int exact, is_first, locked, ret, t_ret, wait, with_delete;
+ int put_mode, meta_dirty, retrying, skip_again, wrapped;
+
+ cp = (QUEUE_CURSOR *)dbc->internal;
+ dbp = dbc->dbp;
+
+ PANIC_CHECK(dbp->dbenv);
+
+ wait = 0;
+ with_delete = 0;
+ retrying = 0;
+ rec_extent = 0;
+ lock_mode = DB_LOCK_READ;
+ mode = QAM_READ;
+ put_mode = 0;
+ t_ret = 0;
+ *pgnop = 0;
+ pg = NULL;
+ skip_again = 0;
+
+ if (F_ISSET(dbc, DBC_RMW)) {
+ lock_mode = DB_LOCK_WRITE;
+ mode = QAM_WRITE;
+ }
+
+ if (flags == DB_CONSUME_WAIT) {
+ wait = 1;
+ flags = DB_CONSUME;
+ }
+ if (flags == DB_CONSUME) {
+ DB_CHECK_TXN(dbp, dbc->txn);
+ with_delete = 1;
+ flags = DB_FIRST;
+ lock_mode = DB_LOCK_WRITE;
+ mode = QAM_CONSUME;
+ }
+
+ DEBUG_LREAD(dbc, dbc->txn, "qam_c_get",
+ flags == DB_SET || flags == DB_SET_RANGE ? key : NULL, NULL, flags);
+
+ is_first = 0;
+
+ t = (QUEUE *)dbp->q_internal;
+ /* get the meta page */
+ metapno = t->q_meta;
+ if ((ret = __db_lget(dbc, 0, metapno, lock_mode, 0, &metalock)) != 0)
+ return (ret);
+ locked = 1;
+ if ((ret = memp_fget(dbp->mpf, &metapno, 0, &meta)) != 0) {
+ /* We did not fetch it, we can release the lock. */
+ (void)__LPUT(dbc, metalock);
+ return (ret);
+ }
+
+ first = 0;
+
+ /* Make lint and friends happy. */
+ meta_dirty = 0;
+
+ /* Release any previous lock if not in a transaction. */
+ if (cp->lock.off != LOCK_INVALID) {
+ (void)__TLPUT(dbc, cp->lock);
+ cp->lock.off = LOCK_INVALID;
+ }
+
+retry: /* Update the record number. */
+ switch (flags) {
+ case DB_CURRENT:
+ break;
+ case DB_NEXT_DUP:
+ ret = DB_NOTFOUND;
+ goto err;
+ /* NOTREACHED */
+ case DB_NEXT:
+ case DB_NEXT_NODUP:
+ if (cp->recno != RECNO_OOB) {
+ ++cp->recno;
+ /* Wrap around, skipping zero. */
+ if (cp->recno == RECNO_OOB)
+ cp->recno++;
+ break;
+ }
+ /* FALLTHROUGH */
+ case DB_FIRST:
+ flags = DB_NEXT;
+ is_first = 1;
+
+ /* get the first record number */
+ cp->recno = first = meta->first_recno;
+
+ break;
+ case DB_PREV:
+ case DB_PREV_NODUP:
+ if (cp->recno != RECNO_OOB) {
+ if (QAM_BEFORE_FIRST(meta, cp->recno)
+ || cp->recno == meta->first_recno) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+ --cp->recno;
+ /* Wrap around, skipping zero. */
+ if (cp->recno == RECNO_OOB)
+ --cp->recno;
+ break;
+ }
+ /* FALLTHROUGH */
+ case DB_LAST:
+ if (meta->first_recno == meta->cur_recno) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+ cp->recno = meta->cur_recno - 1;
+ if (cp->recno == RECNO_OOB)
+ cp->recno--;
+ break;
+ case DB_GET_BOTH:
+ case DB_SET:
+ case DB_SET_RANGE:
+ if ((ret = __qam_getno(dbp, key, &cp->recno)) != 0)
+ goto err;
+ break;
+ default:
+ ret = __db_unknown_flag(dbp->dbenv, "__qam_c_get", flags);
+ goto err;
+ }
+
+ /*
+ * Check to see if we are out of data. Current points to
+ * the first free slot.
+ */
+ if (cp->recno == meta->cur_recno ||
+ QAM_AFTER_CURRENT(meta, cp->recno)) {
+ ret = DB_NOTFOUND;
+ pg = NULL;
+ if (wait) {
+ flags = DB_FIRST;
+ /*
+ * If first is not set, then we skipped a
+ * locked record, go back and find it.
+ * If we find a locked record again
+ * wait for it.
+ */
+ if (first == 0) {
+ retrying = 1;
+ goto retry;
+ }
+ if (CDB_LOCKING(dbp->dbenv)) {
+ if ((ret = lock_get(dbp->dbenv, dbc->locker,
+ DB_LOCK_SWITCH, &dbc->lock_dbt,
+ DB_LOCK_WAIT, &dbc->mylock)) != 0)
+ goto err;
+ if ((ret = lock_get(dbp->dbenv, dbc->locker,
+ DB_LOCK_UPGRADE, &dbc->lock_dbt, DB_LOCK_WRITE,
+ &dbc->mylock)) != 0)
+ goto err;
+ goto retry;
+ }
+ /*
+ * Wait for someone to update the meta page.
+ * This will probably mean there is something
+ * in the queue. We then go back up and
+ * try again.
+ */
+ if (locked == 0) {
+ if ((ret = __db_lget( dbc,
+ 0, metapno, lock_mode, 0, &metalock)) != 0)
+ goto err;
+ locked = 1;
+ if (cp->recno != RECNO_OOB &&
+ !QAM_AFTER_CURRENT(meta, cp->recno))
+ goto retry;
+ }
+ if ((ret = __db_lget(dbc, 0, metapno,
+ DB_LOCK_WAIT, DB_LOCK_SWITCH, &metalock)) != 0)
+ goto err;
+ if ((ret = lock_get(dbp->dbenv, dbc->locker,
+ DB_LOCK_UPGRADE, &dbc->lock_dbt, DB_LOCK_WRITE,
+ &metalock)) != 0)
+ goto err;
+ locked = 1;
+ goto retry;
+ }
+
+ goto err;
+ }
+
+ /* Don't hold the meta page long term. */
+ if (locked) {
+ if ((ret = __LPUT(dbc, metalock)) != 0)
+ goto err;
+ locked = 0;
+ }
+
+ /* Lock the record. */
+ if ((ret = __db_lget(dbc, 0, cp->recno, lock_mode,
+ (with_delete && !retrying) ?
+ DB_LOCK_NOWAIT | DB_LOCK_RECORD : DB_LOCK_RECORD,
+ &lock)) == DB_LOCK_NOTGRANTED && with_delete) {
+#ifdef QDEBUG
+ __db_logmsg(dbp->dbenv,
+ dbc->txn, "Queue S", 0, "%x %d %d %d",
+ dbc->locker, cp->recno, first, meta->first_recno);
+#endif
+ first = 0;
+ goto retry;
+ }
+
+ if (ret != 0)
+ goto err;
+
+ /*
+ * In the DB_FIRST or DB_LAST cases we must wait and then start over
+ * since the first/last may have moved while we slept.
+ * We release our locks and try again.
+ */
+ if ((!with_delete && is_first) || flags == DB_LAST) {
+ if ((ret =
+ __db_lget(dbc, 0, metapno, lock_mode, 0, &metalock)) != 0)
+ goto err;
+ if (cp->recno !=
+ (is_first ? meta->first_recno : (meta->cur_recno - 1))) {
+ __LPUT(dbc, lock);
+ if (is_first)
+ flags = DB_FIRST;
+ locked = 1;
+ goto retry;
+ }
+ /* Don't hold the meta page long term. */
+ if ((ret = __LPUT(dbc, metalock)) != 0)
+ goto err;
+ }
+
+ /* Position the cursor on the record. */
+ if ((ret = __qam_position(dbc, &cp->recno, mode, &exact)) != 0) {
+ /* We cannot get the page, release the record lock. */
+ (void)__LPUT(dbc, lock);
+ goto err;
+ }
+
+ pg = cp->page;
+ pglock = cp->lock;
+ cp->lock = lock;
+ cp->lock_mode = lock_mode;
+
+ if (!exact) {
+ if (flags == DB_NEXT || flags == DB_NEXT_NODUP
+ || flags == DB_PREV || flags == DB_PREV_NODUP
+ || flags == DB_LAST) {
+ /* Release locks and try again. */
+ if (pg != NULL)
+ (void)__qam_fput(dbp, cp->pgno, pg, 0);
+ cp->page = pg = NULL;
+ (void)__LPUT(dbc, pglock);
+ (void)__LPUT(dbc, cp->lock);
+ if (flags == DB_LAST)
+ flags = DB_PREV;
+ if (!with_delete)
+ is_first = 0;
+ retrying = 0;
+ goto retry;
+ }
+ /* this is for the SET and SET_RANGE cases */
+ ret = DB_KEYEMPTY;
+ goto err1;
+ }
+
+ /* Return the key if the user didn't give us one. */
+ if (key != NULL && flags != DB_SET && flags != DB_GET_BOTH &&
+ (ret = __db_retcopy(dbp, key, &cp->recno, sizeof(cp->recno),
+ &dbc->rkey.data, &dbc->rkey.ulen)) != 0)
+ goto err1;
+
+ if (key != NULL)
+ F_SET(key, DB_DBT_ISSET);
+
+ qp = QAM_GET_RECORD(dbp, pg, cp->indx);
+
+ /* Return the data item. */
+ if (flags == DB_GET_BOTH) {
+ /*
+ * Need to compare
+ */
+ tmp.data = qp->data;
+ tmp.size = t->re_len;
+ if ((ret = __bam_defcmp(dbp, data, &tmp)) != 0) {
+ ret = DB_NOTFOUND;
+ goto err1;
+ }
+ }
+ if (data != NULL && (ret = __db_retcopy(dbp, data,
+ qp->data, t->re_len, &dbc->rdata.data, &dbc->rdata.ulen)) != 0)
+ goto err1;
+
+ if (data != NULL)
+ F_SET(data, DB_DBT_ISSET);
+
+ /* Finally, if we are doing DB_CONSUME mark the record. */
+ if (with_delete) {
+ if (DB_LOGGING(dbc)) {
+ if (t->page_ext == 0 || t->re_len == 0) {
+ if ((ret = __qam_del_log(dbp->dbenv, dbc->txn,
+ &LSN(pg), 0, dbp->log_fileid, &LSN(pg),
+ pg->pgno, cp->indx, cp->recno)) != 0)
+ goto err1;
+ } else {
+ tmp.data = qp->data;
+ tmp.size = t->re_len;
+ if ((ret =
+ __qam_delext_log(dbp->dbenv, dbc->txn,
+ &LSN(pg), 0, dbp->log_fileid, &LSN(pg),
+ pg->pgno, cp->indx, cp->recno, &tmp)) != 0)
+ goto err1;
+ }
+ }
+
+ F_CLR(qp, QAM_VALID);
+ put_mode = DB_MPOOL_DIRTY;
+
+ if ((ret = __LPUT(dbc, pglock)) != 0)
+ goto err;
+
+ /*
+ * Now we need to update the metapage
+ * first pointer. If we have deleted
+ * the record that is pointed to by
+ * first_recno then we move it as far
+ * forward as we can without blocking.
+ * The metapage lock must be held for
+ * the whole scan otherwise someone could
+ * do a random insert behind where we are
+ * looking.
+ */
+
+ if (locked == 0 && (ret = __db_lget(
+ dbc, 0, metapno, lock_mode, 0, &metalock)) != 0)
+ goto err1;
+ locked = 1;
+#ifdef QDEBUG
+ __db_logmsg(dbp->dbenv,
+ dbc->txn, "Queue D", 0, "%x %d %d %d",
+ dbc->locker, cp->recno, first, meta->first_recno);
+#endif
+ /*
+ * See if we deleted the "first" record. If
+ * first is zero then we skipped something,
+ * see if first_recno has been move passed
+ * that to the record that we deleted.
+ */
+ if (first == 0)
+ first = cp->recno;
+ if (first != meta->first_recno)
+ goto done;
+
+ save_page = cp->pgno;
+ save_indx = cp->indx;
+ save_recno = cp->recno;
+ save_lock = cp->lock;
+
+ /*
+ * If we skipped some deleted records, we need to
+ * reposition on the first one. Get a lock
+ * in case someone is trying to put it back.
+ */
+ if (first != cp->recno) {
+ ret = __db_lget(dbc, 0, first, DB_LOCK_READ,
+ DB_LOCK_NOWAIT | DB_LOCK_RECORD, &lock);
+ if (ret == DB_LOCK_NOTGRANTED) {
+ ret = 0;
+ goto done;
+ }
+ if (ret != 0)
+ goto err1;
+ if ((ret =
+ __qam_fput(dbp, cp->pgno, cp->page, put_mode)) != 0)
+ goto err1;
+ cp->page = NULL;
+ put_mode = 0;
+ if ((ret = __qam_position(dbc,
+ &first, QAM_READ, &exact)) != 0 || exact != 0) {
+ (void)__LPUT(dbc, lock);
+ goto err1;
+ }
+ if ((ret =__LPUT(dbc, lock)) != 0)
+ goto err1;
+ if ((ret = __LPUT(dbc, cp->lock)) != 0)
+ goto err1;
+ }
+
+ current = meta->cur_recno;
+ wrapped = 0;
+ if (first > current)
+ wrapped = 1;
+ rec_extent = meta->page_ext * meta->rec_page;
+
+ /* Loop until we find a record or hit current */
+ for (;;) {
+ /*
+ * Check to see if we are moving off the extent
+ * and remove the extent.
+ * If we are moving off a page we need to
+ * get rid of the buffer.
+ * Wait for the lagging readers to move off the
+ * page.
+ */
+ if (rec_extent != 0
+ && ((exact = first % rec_extent == 0)
+ || first % meta->rec_page == 0
+ || first == UINT32_T_MAX)) {
+ if (exact == 1 && (ret = __db_lget(dbc,
+ 0, cp->pgno, DB_LOCK_WRITE, 0, &cp->lock)) != 0)
+ break;
+
+#ifdef QDEBUG
+ __db_logmsg(dbp->dbenv,
+ dbc->txn, "Queue R", 0, "%x %d %d %d",
+ dbc->locker, cp->pgno, first, meta->first_recno);
+#endif
+ put_mode |= DB_MPOOL_DISCARD;
+ if ((ret = __qam_fput(dbp,
+ cp->pgno, cp->page, put_mode)) != 0)
+ break;
+ cp->page = NULL;
+
+ if (exact == 1) {
+ ret = __qam_fremove(dbp, cp->pgno);
+ t_ret = __LPUT(dbc, cp->lock);
+ }
+ if (ret != 0)
+ break;
+ if (t_ret != 0) {
+ ret = t_ret;
+ break;
+ }
+ } else if ((ret =
+ __qam_fput(dbp, cp->pgno, cp->page, put_mode)) != 0)
+ break;
+ cp->page = NULL;
+ first++;
+ if (first == RECNO_OOB) {
+ wrapped = 0;
+ first++;
+ }
+
+ /*
+ * LOOP EXIT when we come move to the current
+ * pointer.
+ */
+ if (!wrapped && first >= current)
+ break;
+
+ ret = __db_lget(dbc, 0, first, DB_LOCK_READ,
+ DB_LOCK_NOWAIT | DB_LOCK_RECORD, &lock);
+ if (ret == DB_LOCK_NOTGRANTED) {
+ ret = 0;
+ break;
+ }
+ if (ret != 0)
+ break;
+
+ if ((ret = __qam_position(dbc,
+ &first, QAM_READ, &exact)) != 0) {
+ (void)__LPUT(dbc, lock);
+ break;
+ }
+ put_mode = 0;
+ if ((ret =__LPUT(dbc, lock)) != 0
+ || (ret = __LPUT(dbc, cp->lock)) != 0 ||exact) {
+ if ((t_ret = __qam_fput(dbp, cp->pgno,
+ cp->page, put_mode)) != 0 && ret == 0)
+ ret = t_ret;
+ cp->page = NULL;
+ break;
+ }
+ }
+
+ cp->pgno = save_page;
+ cp->indx = save_indx;
+ cp->recno = save_recno;
+ cp->lock = save_lock;
+
+ /*
+ * We have advanced as far as we can.
+ * Advance first_recno to this point.
+ */
+ if (meta->first_recno != first) {
+#ifdef QDEBUG
+ __db_logmsg(dbp->dbenv, dbc->txn, "Queue M",
+ 0, "%x %d %d %d", dbc->locker, cp->recno,
+ first, meta->first_recno);
+#endif
+ if (DB_LOGGING(dbc))
+ if ((ret =
+ __qam_incfirst_log(dbp->dbenv,
+ dbc->txn, &meta->dbmeta.lsn, 0,
+ dbp->log_fileid, cp->recno)) != 0)
+ goto err;
+ meta->first_recno = first;
+ meta_dirty = 1;
+ }
+ }
+
+done:
+err1: if (cp->page != NULL) {
+ t_ret = __qam_fput(dbp, cp->pgno, cp->page, put_mode);
+
+ if (!ret)
+ ret = t_ret;
+ /* Doing record locking, release the page lock */
+ t_ret = __LPUT(dbc, pglock);
+ cp->page = NULL;
+ }
+
+err: if (!ret)
+ ret = t_ret;
+ if (meta) {
+
+ /* release the meta page */
+ t_ret = memp_fput(
+ dbp->mpf, meta, meta_dirty ? DB_MPOOL_DIRTY : 0);
+
+ if (!ret)
+ ret = t_ret;
+
+ /* Don't hold the meta page long term. */
+ if (locked)
+ t_ret = __LPUT(dbc, metalock);
+ }
+ DB_ASSERT(metalock.off == LOCK_INVALID);
+
+ /*
+ * There is no need to keep the record locked if we are
+ * not in a transaction.
+ */
+ if (t_ret == 0)
+ t_ret = __TLPUT(dbc, cp->lock);
+
+ return (ret ? ret : t_ret);
+}
+
+/*
+ * __qam_c_close --
+ * Close down the cursor from a single use.
+ */
+static int
+__qam_c_close(dbc, root_pgno, rmroot)
+ DBC *dbc;
+ db_pgno_t root_pgno;
+ int *rmroot;
+{
+ QUEUE_CURSOR *cp;
+
+ COMPQUIET(root_pgno, 0);
+ COMPQUIET(rmroot, NULL);
+
+ cp = (QUEUE_CURSOR *)dbc->internal;
+
+ /* Discard any locks not acquired inside of a transaction. */
+ if (cp->lock.off != LOCK_INVALID) {
+ (void)__TLPUT(dbc, cp->lock);
+ cp->lock.off = LOCK_INVALID;
+ }
+
+ cp->page = NULL;
+ cp->pgno = PGNO_INVALID;
+ cp->indx = 0;
+ cp->lock.off = LOCK_INVALID;
+ cp->lock_mode = DB_LOCK_NG;
+ cp->recno = RECNO_OOB;
+ cp->flags = 0;
+
+ return (0);
+}
+
+/*
+ * __qam_c_dup --
+ * Duplicate a queue cursor, such that the new one holds appropriate
+ * locks for the position of the original.
+ *
+ * PUBLIC: int __qam_c_dup __P((DBC *, DBC *));
+ */
+int
+__qam_c_dup(orig_dbc, new_dbc)
+ DBC *orig_dbc, *new_dbc;
+{
+ QUEUE_CURSOR *orig, *new;
+
+ orig = (QUEUE_CURSOR *)orig_dbc->internal;
+ new = (QUEUE_CURSOR *)new_dbc->internal;
+
+ new->recno = orig->recno;
+
+ /* reget the long term lock if we are not in a xact */
+ if (orig_dbc->txn != NULL ||
+ !STD_LOCKING(orig_dbc) || orig->lock.off == LOCK_INVALID)
+ return (0);
+
+ return (__db_lget(new_dbc,
+ 0, new->recno, new->lock_mode, DB_LOCK_RECORD, &new->lock));
+}
+
+/*
+ * __qam_c_init
+ *
+ * PUBLIC: int __qam_c_init __P((DBC *));
+ */
+int
+__qam_c_init(dbc)
+ DBC *dbc;
+{
+ QUEUE_CURSOR *cp;
+ DB *dbp;
+ int ret;
+
+ dbp = dbc->dbp;
+
+ /* Allocate the internal structure. */
+ cp = (QUEUE_CURSOR *)dbc->internal;
+ if (cp == NULL) {
+ if ((ret =
+ __os_calloc(dbp->dbenv, 1, sizeof(QUEUE_CURSOR), &cp)) != 0)
+ return (ret);
+ dbc->internal = (DBC_INTERNAL *)cp;
+ }
+
+ /* Initialize methods. */
+ dbc->c_close = __db_c_close;
+ dbc->c_count = __db_c_count;
+ dbc->c_del = __db_c_del;
+ dbc->c_dup = __db_c_dup;
+ dbc->c_get = __db_c_get;
+ dbc->c_put = __db_c_put;
+ dbc->c_am_close = __qam_c_close;
+ dbc->c_am_del = __qam_c_del;
+ dbc->c_am_destroy = __qam_c_destroy;
+ dbc->c_am_get = __qam_c_get;
+ dbc->c_am_put = __qam_c_put;
+ dbc->c_am_writelock = NULL;
+
+ return (0);
+}
+
+/*
+ * __qam_c_destroy --
+ * Close a single cursor -- internal version.
+ */
+static int
+__qam_c_destroy(dbc)
+ DBC *dbc;
+{
+ /* Discard the structures. */
+ __os_free(dbc->internal, sizeof(QUEUE_CURSOR));
+
+ return (0);
+}
+
+/*
+ * __qam_getno --
+ * Check the user's record number.
+ */
+static int
+__qam_getno(dbp, key, rep)
+ DB *dbp;
+ const DBT *key;
+ db_recno_t *rep;
+{
+ if ((*rep = *(db_recno_t *)key->data) == 0) {
+ __db_err(dbp->dbenv, "illegal record number of 0");
+ return (EINVAL);
+ }
+ return (0);
+}
diff --git a/bdb/qam/qam.src b/bdb/qam/qam.src
new file mode 100644
index 00000000000..507d7a65229
--- /dev/null
+++ b/bdb/qam/qam.src
@@ -0,0 +1,124 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: qam.src,v 11.15 2001/01/16 20:10:55 ubell Exp $
+ */
+
+PREFIX qam
+
+INCLUDE #include "db_config.h"
+INCLUDE
+INCLUDE #ifndef NO_SYSTEM_INCLUDES
+INCLUDE #include <sys/types.h>
+INCLUDE
+INCLUDE #include <ctype.h>
+INCLUDE #include <errno.h>
+INCLUDE #include <string.h>
+INCLUDE #endif
+INCLUDE
+INCLUDE #include "db_int.h"
+INCLUDE #include "db_page.h"
+INCLUDE #include "db_dispatch.h"
+INCLUDE #include "db_am.h"
+INCLUDE #include "qam.h"
+INCLUDE #include "txn.h"
+INCLUDE
+
+/*
+ * inc
+ * Used when we increment a record number. These do not actually
+ * tell you what record number you got, just that you incremented
+ * the record number. These operations are never undone.
+ */
+BEGIN inc 76
+ARG fileid int32_t ld
+POINTER lsn DB_LSN * lu
+END
+
+/*
+ * incfirst
+ * Used when we increment first_recno.
+ */
+BEGIN incfirst 77
+ARG fileid int32_t ld
+ARG recno db_recno_t lu
+END
+
+/*
+ * mvptr
+ * Used when we change one or both of cur_recno and first_recno.
+ */
+BEGIN mvptr 78
+ARG opcode u_int32_t lu
+ARG fileid int32_t ld
+ARG old_first db_recno_t lu
+ARG new_first db_recno_t lu
+ARG old_cur db_recno_t lu
+ARG new_cur db_recno_t lu
+POINTER metalsn DB_LSN * lu
+END
+
+/*
+ * del
+ * Used when we delete a record.
+ * recno is the record that is being deleted.
+ */
+BEGIN del 79
+ARG fileid int32_t ld
+POINTER lsn DB_LSN * lu
+ARG pgno db_pgno_t lu
+ARG indx u_int32_t lu
+ARG recno db_recno_t lu
+END
+
+/*
+ * add
+ * Used when we put a record on a page.
+ * recno is the record being added.
+ * data is the record itself.
+ */
+BEGIN add 80
+ARG fileid int32_t ld
+POINTER lsn DB_LSN * lu
+ARG pgno db_pgno_t lu
+ARG indx u_int32_t lu
+ARG recno db_recno_t lu
+DBT data DBT s
+ARG vflag u_int32_t lu
+DBT olddata DBT s
+END
+
+/*
+ * delete
+ * Used when we remove a Queue extent file.
+ */
+BEGIN delete 81
+DBT name DBT s
+POINTER lsn DB_LSN * lu
+END
+
+/*
+ * rename
+ * Used when we rename a Queue extent file.
+ */
+BEGIN rename 82
+DBT name DBT s
+DBT newname DBT s
+END
+
+/*
+ * delext
+ * Used when we delete a record in extent based queue.
+ * recno is the record that is being deleted.
+ */
+BEGIN delext 83
+ARG fileid int32_t ld
+POINTER lsn DB_LSN * lu
+ARG pgno db_pgno_t lu
+ARG indx u_int32_t lu
+ARG recno db_recno_t lu
+DBT data DBT s
+END
diff --git a/bdb/qam/qam_auto.c b/bdb/qam/qam_auto.c
new file mode 100644
index 00000000000..cfdba3195eb
--- /dev/null
+++ b/bdb/qam/qam_auto.c
@@ -0,0 +1,1282 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <errno.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_dispatch.h"
+#include "db_am.h"
+#include "qam.h"
+#include "txn.h"
+
+int
+__qam_inc_log(dbenv, txnid, ret_lsnp, flags,
+ fileid, lsn)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ int32_t fileid;
+ DB_LSN * lsn;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_qam_inc;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(fileid)
+ + sizeof(*lsn);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(bp, &fileid, sizeof(fileid));
+ bp += sizeof(fileid);
+ if (lsn != NULL)
+ memcpy(bp, lsn, sizeof(*lsn));
+ else
+ memset(bp, 0, sizeof(*lsn));
+ bp += sizeof(*lsn);
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__qam_inc_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __qam_inc_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __qam_inc_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]qam_inc: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tlsn: [%lu][%lu]\n",
+ (u_long)argp->lsn.file, (u_long)argp->lsn.offset);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__qam_inc_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __qam_inc_args **argpp;
+{
+ __qam_inc_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__qam_inc_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->lsn, bp, sizeof(argp->lsn));
+ bp += sizeof(argp->lsn);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__qam_incfirst_log(dbenv, txnid, ret_lsnp, flags,
+ fileid, recno)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ int32_t fileid;
+ db_recno_t recno;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_qam_incfirst;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(fileid)
+ + sizeof(recno);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(bp, &fileid, sizeof(fileid));
+ bp += sizeof(fileid);
+ memcpy(bp, &recno, sizeof(recno));
+ bp += sizeof(recno);
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__qam_incfirst_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __qam_incfirst_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __qam_incfirst_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]qam_incfirst: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\trecno: %lu\n", (u_long)argp->recno);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__qam_incfirst_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __qam_incfirst_args **argpp;
+{
+ __qam_incfirst_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__qam_incfirst_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->recno, bp, sizeof(argp->recno));
+ bp += sizeof(argp->recno);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__qam_mvptr_log(dbenv, txnid, ret_lsnp, flags,
+ opcode, fileid, old_first, new_first, old_cur, new_cur,
+ metalsn)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ u_int32_t opcode;
+ int32_t fileid;
+ db_recno_t old_first;
+ db_recno_t new_first;
+ db_recno_t old_cur;
+ db_recno_t new_cur;
+ DB_LSN * metalsn;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_qam_mvptr;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(opcode)
+ + sizeof(fileid)
+ + sizeof(old_first)
+ + sizeof(new_first)
+ + sizeof(old_cur)
+ + sizeof(new_cur)
+ + sizeof(*metalsn);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(bp, &opcode, sizeof(opcode));
+ bp += sizeof(opcode);
+ memcpy(bp, &fileid, sizeof(fileid));
+ bp += sizeof(fileid);
+ memcpy(bp, &old_first, sizeof(old_first));
+ bp += sizeof(old_first);
+ memcpy(bp, &new_first, sizeof(new_first));
+ bp += sizeof(new_first);
+ memcpy(bp, &old_cur, sizeof(old_cur));
+ bp += sizeof(old_cur);
+ memcpy(bp, &new_cur, sizeof(new_cur));
+ bp += sizeof(new_cur);
+ if (metalsn != NULL)
+ memcpy(bp, metalsn, sizeof(*metalsn));
+ else
+ memset(bp, 0, sizeof(*metalsn));
+ bp += sizeof(*metalsn);
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__qam_mvptr_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __qam_mvptr_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __qam_mvptr_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]qam_mvptr: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\topcode: %lu\n", (u_long)argp->opcode);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\told_first: %lu\n", (u_long)argp->old_first);
+ printf("\tnew_first: %lu\n", (u_long)argp->new_first);
+ printf("\told_cur: %lu\n", (u_long)argp->old_cur);
+ printf("\tnew_cur: %lu\n", (u_long)argp->new_cur);
+ printf("\tmetalsn: [%lu][%lu]\n",
+ (u_long)argp->metalsn.file, (u_long)argp->metalsn.offset);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__qam_mvptr_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __qam_mvptr_args **argpp;
+{
+ __qam_mvptr_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__qam_mvptr_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->opcode, bp, sizeof(argp->opcode));
+ bp += sizeof(argp->opcode);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->old_first, bp, sizeof(argp->old_first));
+ bp += sizeof(argp->old_first);
+ memcpy(&argp->new_first, bp, sizeof(argp->new_first));
+ bp += sizeof(argp->new_first);
+ memcpy(&argp->old_cur, bp, sizeof(argp->old_cur));
+ bp += sizeof(argp->old_cur);
+ memcpy(&argp->new_cur, bp, sizeof(argp->new_cur));
+ bp += sizeof(argp->new_cur);
+ memcpy(&argp->metalsn, bp, sizeof(argp->metalsn));
+ bp += sizeof(argp->metalsn);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__qam_del_log(dbenv, txnid, ret_lsnp, flags,
+ fileid, lsn, pgno, indx, recno)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ int32_t fileid;
+ DB_LSN * lsn;
+ db_pgno_t pgno;
+ u_int32_t indx;
+ db_recno_t recno;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_qam_del;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(fileid)
+ + sizeof(*lsn)
+ + sizeof(pgno)
+ + sizeof(indx)
+ + sizeof(recno);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(bp, &fileid, sizeof(fileid));
+ bp += sizeof(fileid);
+ if (lsn != NULL)
+ memcpy(bp, lsn, sizeof(*lsn));
+ else
+ memset(bp, 0, sizeof(*lsn));
+ bp += sizeof(*lsn);
+ memcpy(bp, &pgno, sizeof(pgno));
+ bp += sizeof(pgno);
+ memcpy(bp, &indx, sizeof(indx));
+ bp += sizeof(indx);
+ memcpy(bp, &recno, sizeof(recno));
+ bp += sizeof(recno);
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__qam_del_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __qam_del_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __qam_del_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]qam_del: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tlsn: [%lu][%lu]\n",
+ (u_long)argp->lsn.file, (u_long)argp->lsn.offset);
+ printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ printf("\tindx: %lu\n", (u_long)argp->indx);
+ printf("\trecno: %lu\n", (u_long)argp->recno);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__qam_del_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __qam_del_args **argpp;
+{
+ __qam_del_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__qam_del_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->lsn, bp, sizeof(argp->lsn));
+ bp += sizeof(argp->lsn);
+ memcpy(&argp->pgno, bp, sizeof(argp->pgno));
+ bp += sizeof(argp->pgno);
+ memcpy(&argp->indx, bp, sizeof(argp->indx));
+ bp += sizeof(argp->indx);
+ memcpy(&argp->recno, bp, sizeof(argp->recno));
+ bp += sizeof(argp->recno);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__qam_add_log(dbenv, txnid, ret_lsnp, flags,
+ fileid, lsn, pgno, indx, recno, data,
+ vflag, olddata)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ int32_t fileid;
+ DB_LSN * lsn;
+ db_pgno_t pgno;
+ u_int32_t indx;
+ db_recno_t recno;
+ const DBT *data;
+ u_int32_t vflag;
+ const DBT *olddata;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_qam_add;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(fileid)
+ + sizeof(*lsn)
+ + sizeof(pgno)
+ + sizeof(indx)
+ + sizeof(recno)
+ + sizeof(u_int32_t) + (data == NULL ? 0 : data->size)
+ + sizeof(vflag)
+ + sizeof(u_int32_t) + (olddata == NULL ? 0 : olddata->size);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(bp, &fileid, sizeof(fileid));
+ bp += sizeof(fileid);
+ if (lsn != NULL)
+ memcpy(bp, lsn, sizeof(*lsn));
+ else
+ memset(bp, 0, sizeof(*lsn));
+ bp += sizeof(*lsn);
+ memcpy(bp, &pgno, sizeof(pgno));
+ bp += sizeof(pgno);
+ memcpy(bp, &indx, sizeof(indx));
+ bp += sizeof(indx);
+ memcpy(bp, &recno, sizeof(recno));
+ bp += sizeof(recno);
+ if (data == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &data->size, sizeof(data->size));
+ bp += sizeof(data->size);
+ memcpy(bp, data->data, data->size);
+ bp += data->size;
+ }
+ memcpy(bp, &vflag, sizeof(vflag));
+ bp += sizeof(vflag);
+ if (olddata == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &olddata->size, sizeof(olddata->size));
+ bp += sizeof(olddata->size);
+ memcpy(bp, olddata->data, olddata->size);
+ bp += olddata->size;
+ }
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__qam_add_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __qam_add_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __qam_add_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]qam_add: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tlsn: [%lu][%lu]\n",
+ (u_long)argp->lsn.file, (u_long)argp->lsn.offset);
+ printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ printf("\tindx: %lu\n", (u_long)argp->indx);
+ printf("\trecno: %lu\n", (u_long)argp->recno);
+ printf("\tdata: ");
+ for (i = 0; i < argp->data.size; i++) {
+ ch = ((u_int8_t *)argp->data.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\tvflag: %lu\n", (u_long)argp->vflag);
+ printf("\tolddata: ");
+ for (i = 0; i < argp->olddata.size; i++) {
+ ch = ((u_int8_t *)argp->olddata.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__qam_add_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __qam_add_args **argpp;
+{
+ __qam_add_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__qam_add_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->lsn, bp, sizeof(argp->lsn));
+ bp += sizeof(argp->lsn);
+ memcpy(&argp->pgno, bp, sizeof(argp->pgno));
+ bp += sizeof(argp->pgno);
+ memcpy(&argp->indx, bp, sizeof(argp->indx));
+ bp += sizeof(argp->indx);
+ memcpy(&argp->recno, bp, sizeof(argp->recno));
+ bp += sizeof(argp->recno);
+ memset(&argp->data, 0, sizeof(argp->data));
+ memcpy(&argp->data.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->data.data = bp;
+ bp += argp->data.size;
+ memcpy(&argp->vflag, bp, sizeof(argp->vflag));
+ bp += sizeof(argp->vflag);
+ memset(&argp->olddata, 0, sizeof(argp->olddata));
+ memcpy(&argp->olddata.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->olddata.data = bp;
+ bp += argp->olddata.size;
+ *argpp = argp;
+ return (0);
+}
+
+int
+__qam_delete_log(dbenv, txnid, ret_lsnp, flags,
+ name, lsn)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ const DBT *name;
+ DB_LSN * lsn;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_qam_delete;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t) + (name == NULL ? 0 : name->size)
+ + sizeof(*lsn);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ if (name == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &name->size, sizeof(name->size));
+ bp += sizeof(name->size);
+ memcpy(bp, name->data, name->size);
+ bp += name->size;
+ }
+ if (lsn != NULL)
+ memcpy(bp, lsn, sizeof(*lsn));
+ else
+ memset(bp, 0, sizeof(*lsn));
+ bp += sizeof(*lsn);
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__qam_delete_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __qam_delete_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __qam_delete_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]qam_delete: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tname: ");
+ for (i = 0; i < argp->name.size; i++) {
+ ch = ((u_int8_t *)argp->name.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\tlsn: [%lu][%lu]\n",
+ (u_long)argp->lsn.file, (u_long)argp->lsn.offset);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__qam_delete_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __qam_delete_args **argpp;
+{
+ __qam_delete_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__qam_delete_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memset(&argp->name, 0, sizeof(argp->name));
+ memcpy(&argp->name.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->name.data = bp;
+ bp += argp->name.size;
+ memcpy(&argp->lsn, bp, sizeof(argp->lsn));
+ bp += sizeof(argp->lsn);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__qam_rename_log(dbenv, txnid, ret_lsnp, flags,
+ name, newname)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ const DBT *name;
+ const DBT *newname;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_qam_rename;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t) + (name == NULL ? 0 : name->size)
+ + sizeof(u_int32_t) + (newname == NULL ? 0 : newname->size);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ if (name == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &name->size, sizeof(name->size));
+ bp += sizeof(name->size);
+ memcpy(bp, name->data, name->size);
+ bp += name->size;
+ }
+ if (newname == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &newname->size, sizeof(newname->size));
+ bp += sizeof(newname->size);
+ memcpy(bp, newname->data, newname->size);
+ bp += newname->size;
+ }
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__qam_rename_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __qam_rename_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __qam_rename_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]qam_rename: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tname: ");
+ for (i = 0; i < argp->name.size; i++) {
+ ch = ((u_int8_t *)argp->name.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\tnewname: ");
+ for (i = 0; i < argp->newname.size; i++) {
+ ch = ((u_int8_t *)argp->newname.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__qam_rename_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __qam_rename_args **argpp;
+{
+ __qam_rename_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__qam_rename_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memset(&argp->name, 0, sizeof(argp->name));
+ memcpy(&argp->name.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->name.data = bp;
+ bp += argp->name.size;
+ memset(&argp->newname, 0, sizeof(argp->newname));
+ memcpy(&argp->newname.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->newname.data = bp;
+ bp += argp->newname.size;
+ *argpp = argp;
+ return (0);
+}
+
+int
+__qam_delext_log(dbenv, txnid, ret_lsnp, flags,
+ fileid, lsn, pgno, indx, recno, data)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ int32_t fileid;
+ DB_LSN * lsn;
+ db_pgno_t pgno;
+ u_int32_t indx;
+ db_recno_t recno;
+ const DBT *data;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_qam_delext;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(fileid)
+ + sizeof(*lsn)
+ + sizeof(pgno)
+ + sizeof(indx)
+ + sizeof(recno)
+ + sizeof(u_int32_t) + (data == NULL ? 0 : data->size);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(bp, &fileid, sizeof(fileid));
+ bp += sizeof(fileid);
+ if (lsn != NULL)
+ memcpy(bp, lsn, sizeof(*lsn));
+ else
+ memset(bp, 0, sizeof(*lsn));
+ bp += sizeof(*lsn);
+ memcpy(bp, &pgno, sizeof(pgno));
+ bp += sizeof(pgno);
+ memcpy(bp, &indx, sizeof(indx));
+ bp += sizeof(indx);
+ memcpy(bp, &recno, sizeof(recno));
+ bp += sizeof(recno);
+ if (data == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &data->size, sizeof(data->size));
+ bp += sizeof(data->size);
+ memcpy(bp, data->data, data->size);
+ bp += data->size;
+ }
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__qam_delext_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __qam_delext_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __qam_delext_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]qam_delext: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tfileid: %ld\n", (long)argp->fileid);
+ printf("\tlsn: [%lu][%lu]\n",
+ (u_long)argp->lsn.file, (u_long)argp->lsn.offset);
+ printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ printf("\tindx: %lu\n", (u_long)argp->indx);
+ printf("\trecno: %lu\n", (u_long)argp->recno);
+ printf("\tdata: ");
+ for (i = 0; i < argp->data.size; i++) {
+ ch = ((u_int8_t *)argp->data.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__qam_delext_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __qam_delext_args **argpp;
+{
+ __qam_delext_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__qam_delext_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->fileid, bp, sizeof(argp->fileid));
+ bp += sizeof(argp->fileid);
+ memcpy(&argp->lsn, bp, sizeof(argp->lsn));
+ bp += sizeof(argp->lsn);
+ memcpy(&argp->pgno, bp, sizeof(argp->pgno));
+ bp += sizeof(argp->pgno);
+ memcpy(&argp->indx, bp, sizeof(argp->indx));
+ bp += sizeof(argp->indx);
+ memcpy(&argp->recno, bp, sizeof(argp->recno));
+ bp += sizeof(argp->recno);
+ memset(&argp->data, 0, sizeof(argp->data));
+ memcpy(&argp->data.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->data.data = bp;
+ bp += argp->data.size;
+ *argpp = argp;
+ return (0);
+}
+
+int
+__qam_init_print(dbenv)
+ DB_ENV *dbenv;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv,
+ __qam_inc_print, DB_qam_inc)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __qam_incfirst_print, DB_qam_incfirst)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __qam_mvptr_print, DB_qam_mvptr)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __qam_del_print, DB_qam_del)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __qam_add_print, DB_qam_add)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __qam_delete_print, DB_qam_delete)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __qam_rename_print, DB_qam_rename)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __qam_delext_print, DB_qam_delext)) != 0)
+ return (ret);
+ return (0);
+}
+
+int
+__qam_init_recover(dbenv)
+ DB_ENV *dbenv;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv,
+ __qam_inc_recover, DB_qam_inc)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __qam_incfirst_recover, DB_qam_incfirst)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __qam_mvptr_recover, DB_qam_mvptr)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __qam_del_recover, DB_qam_del)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __qam_add_recover, DB_qam_add)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __qam_delete_recover, DB_qam_delete)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __qam_rename_recover, DB_qam_rename)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __qam_delext_recover, DB_qam_delext)) != 0)
+ return (ret);
+ return (0);
+}
+
diff --git a/bdb/qam/qam_conv.c b/bdb/qam/qam_conv.c
new file mode 100644
index 00000000000..2eb1c7227e6
--- /dev/null
+++ b/bdb/qam/qam_conv.c
@@ -0,0 +1,83 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: qam_conv.c,v 11.6 2000/11/16 23:40:57 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "qam.h"
+#include "db_swap.h"
+#include "db_am.h"
+
+/*
+ * __qam_mswap --
+ * Swap the bytes on the queue metadata page.
+ *
+ * PUBLIC: int __qam_mswap __P((PAGE *));
+ */
+int
+__qam_mswap(pg)
+ PAGE *pg;
+{
+ u_int8_t *p;
+
+ __db_metaswap(pg);
+
+ p = (u_int8_t *)pg + sizeof(DBMETA);
+
+ SWAP32(p); /* first_recno */
+ SWAP32(p); /* cur_recno */
+ SWAP32(p); /* re_len */
+ SWAP32(p); /* re_pad */
+ SWAP32(p); /* rec_page */
+ SWAP32(p); /* page_ext */
+
+ return (0);
+}
+
+/*
+ * __qam_pgin_out --
+ * Convert host-specific page layout to/from the host-independent format
+ * stored on disk.
+ * We only need to fix up a few fields in the header
+ *
+ * PUBLIC: int __qam_pgin_out __P((DB_ENV *, db_pgno_t, void *, DBT *));
+ */
+int
+__qam_pgin_out(dbenv, pg, pp, cookie)
+ DB_ENV *dbenv;
+ db_pgno_t pg;
+ void *pp;
+ DBT *cookie;
+{
+ DB_PGINFO *pginfo;
+ QPAGE *h;
+
+ COMPQUIET(pg, 0);
+ COMPQUIET(dbenv, NULL);
+ pginfo = (DB_PGINFO *)cookie->data;
+ if (!pginfo->needswap)
+ return (0);
+
+ h = pp;
+ if (h->type == P_QAMMETA)
+ return (__qam_mswap(pp));
+
+ M_32_SWAP(h->lsn.file);
+ M_32_SWAP(h->lsn.offset);
+ M_32_SWAP(h->pgno);
+
+ return (0);
+}
diff --git a/bdb/qam/qam_files.c b/bdb/qam/qam_files.c
new file mode 100644
index 00000000000..e53a3bf24c0
--- /dev/null
+++ b/bdb/qam/qam_files.c
@@ -0,0 +1,503 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: qam_files.c,v 1.16 2001/01/19 18:01:59 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_shash.h"
+#include "db_am.h"
+#include "lock.h"
+#include "btree.h"
+#include "qam.h"
+#include "mp.h"
+
+/*
+ * __qam_fprobe -- calcluate and open extent
+ *
+ * Calculate which extent the page is in, open and create
+ * if necessary.
+ *
+ * PUBLIC: int __qam_fprobe __P((DB *, db_pgno_t, void *, qam_probe_mode, int));
+ */
+
+int
+__qam_fprobe(dbp, pgno, addrp, mode, flags)
+ DB *dbp;
+ db_pgno_t pgno;
+ void *addrp;
+ qam_probe_mode mode;
+ int flags;
+{
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *mpf;
+ MPFARRAY *array;
+ QUEUE *qp;
+ u_int32_t extid, maxext;
+ char buf[256];
+ int numext, offset, oldext, openflags, ret;
+
+ qp = (QUEUE *)dbp->q_internal;
+ if (qp->page_ext == 0) {
+ mpf = dbp->mpf;
+ if (mode == QAM_PROBE_GET)
+ return (memp_fget(mpf, &pgno, flags, addrp));
+ return (memp_fput(mpf, addrp, flags));
+ }
+
+ dbenv = dbp->dbenv;
+ mpf = NULL;
+ ret = 0;
+
+ /*
+ * Need to lock long enough to find the mpf or create the file.
+ * The file cannot go away because we must have a record locked
+ * in that file.
+ */
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ extid = (pgno - 1) / qp->page_ext;
+
+ /* Array1 will always be in use if array2 is in use. */
+ array = &qp->array1;
+ if (array->n_extent == 0) {
+ /* Start with 4 extents */
+ oldext = 0;
+ array->n_extent = 4;
+ array->low_extent = extid;
+ offset = 0;
+ numext = 0;
+ goto alloc;
+ }
+
+ offset = extid - qp->array1.low_extent;
+ if (qp->array2.n_extent != 0 &&
+ abs(offset) > abs(extid - qp->array2.low_extent)) {
+ array = &qp->array2;
+ offset = extid - array->low_extent;
+ }
+
+ /*
+ * Check to see if the requested extent is outside the range of
+ * extents in the array. This is true by defualt if there are
+ * no extents here yet.
+ */
+ if (offset < 0 || (unsigned) offset >= array->n_extent) {
+ oldext = array->n_extent;
+ numext = array->hi_extent - array->low_extent + 1;
+ if (offset < 0
+ && (unsigned) -offset + numext <= array->n_extent) {
+ /* If we can fit this one in, move the array up */
+ memmove(&array->mpfarray[-offset],
+ array->mpfarray, numext
+ * sizeof(array->mpfarray[0]));
+ memset(array->mpfarray, 0, -offset
+ * sizeof(array->mpfarray[0]));
+ offset = 0;
+ } else if ((u_int32_t)offset == array->n_extent &&
+ mode != QAM_PROBE_MPF && array->mpfarray[0].pinref == 0) {
+ /* We can close the bottom extent. */
+ mpf = array->mpfarray[0].mpf;
+ if (mpf != NULL && (ret = memp_fclose(mpf)) != 0)
+ goto err;
+ memmove(&array->mpfarray[0], &array->mpfarray[1],
+ (array->n_extent - 1) * sizeof (array->mpfarray[0]));
+ array->low_extent++;
+ array->hi_extent++;
+ offset--;
+ array->mpfarray[offset].mpf = NULL;
+ array->mpfarray[offset].pinref = 0;
+ } else {
+ /* See if we have wrapped around the queue. */
+ maxext = (u_int32_t) UINT32_T_MAX
+ / (qp->page_ext * qp->rec_page);
+ if ((u_int32_t) abs(offset) >= maxext/2) {
+ array = &qp->array2;
+ DB_ASSERT(array->n_extent == 0);
+ oldext = 0;
+ array->n_extent = 4;
+ array->low_extent = extid;
+ offset = 0;
+ numext = 0;
+ } else {
+ /*
+ * Increase the size to at least include
+ * the new one and double it.
+ */
+ array->n_extent += abs(offset);
+ array->n_extent <<= 2;
+ }
+ alloc:
+ if ((ret = __os_realloc(dbenv,
+ array->n_extent * sizeof(struct __qmpf),
+ NULL, &array->mpfarray)) != 0)
+ goto err;
+
+ if (offset < 0) {
+ offset = -offset;
+ memmove(&array->mpfarray[offset], array->mpfarray,
+ numext * sizeof(array->mpfarray[0]));
+ memset(array->mpfarray, 0,
+ offset * sizeof(array->mpfarray[0]));
+ memset(&array->mpfarray[numext + offset], 0,
+ (array->n_extent - (numext + offset))
+ * sizeof(array->mpfarray[0]));
+ offset = 0;
+ }
+ else
+ memset(&array->mpfarray[oldext], 0,
+ (array->n_extent - oldext) *
+ sizeof(array->mpfarray[0]));
+ }
+ }
+
+ if (extid < array->low_extent)
+ array->low_extent = extid;
+ if (extid > array->hi_extent)
+ array->hi_extent = extid;
+ if (array->mpfarray[offset].mpf == NULL) {
+ snprintf(buf,
+ sizeof(buf), QUEUE_EXTENT, qp->dir, qp->name, extid);
+ openflags = DB_EXTENT;
+ if (LF_ISSET(DB_MPOOL_CREATE))
+ openflags |= DB_CREATE;
+ if (F_ISSET(dbp, DB_AM_RDONLY))
+ openflags |= DB_RDONLY;
+ qp->finfo.fileid = NULL;
+ if ((ret = __memp_fopen(dbenv->mp_handle,
+ NULL, buf, openflags, qp->mode, dbp->pgsize,
+ 1, &qp->finfo, &array->mpfarray[offset].mpf)) != 0)
+ goto err;
+ }
+
+ mpf = array->mpfarray[offset].mpf;
+ if (mode == QAM_PROBE_GET)
+ array->mpfarray[offset].pinref++;
+ if (LF_ISSET(DB_MPOOL_CREATE))
+ __memp_clear_unlink(mpf);
+
+err:
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+
+ if (ret == 0) {
+ if (mode == QAM_PROBE_MPF) {
+ *(DB_MPOOLFILE **)addrp = mpf;
+ return (0);
+ }
+ pgno--;
+ pgno %= qp->page_ext;
+ if (mode == QAM_PROBE_GET)
+ return (memp_fget(mpf,
+ &pgno, flags | DB_MPOOL_EXTENT, addrp));
+ ret = memp_fput(mpf, addrp, flags);
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ array->mpfarray[offset].pinref--;
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ return (ret);
+}
+
+/*
+ * __qam_fclose -- close an extent.
+ *
+ * Calculate which extent the page is in and close it.
+ * We assume the mpf entry is present.
+ *
+ * PUBLIC: int __qam_fclose __P((DB *, db_pgno_t));
+ */
+
+int
+__qam_fclose(dbp, pgnoaddr)
+ DB *dbp;
+ db_pgno_t pgnoaddr;
+{
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *mpf;
+ MPFARRAY *array;
+ QUEUE *qp;
+ u_int32_t extid;
+ int offset, ret;
+
+ ret = 0;
+ dbenv = dbp->dbenv;
+ qp = (QUEUE *)dbp->q_internal;
+
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+
+ extid = (pgnoaddr - 1) / qp->page_ext;
+ array = &qp->array1;
+ if (array->low_extent > extid || array->hi_extent < extid)
+ array = &qp->array2;
+ offset = extid - array->low_extent;
+
+ DB_ASSERT(offset >= 0 && (unsigned) offset < array->n_extent);
+
+ /* If other threads are still using this file, leave it. */
+ if (array->mpfarray[offset].pinref != 0)
+ goto done;
+
+ mpf = array->mpfarray[offset].mpf;
+ array->mpfarray[offset].mpf = NULL;
+ ret = memp_fclose(mpf);
+
+done:
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ return (ret);
+}
+/*
+ * __qam_fremove -- remove an extent.
+ *
+ * Calculate which extent the page is in and remove it. There is no way
+ * to remove an extent without probing it first and seeing that is is empty
+ * so we assume the mpf entry is present.
+ *
+ * PUBLIC: int __qam_fremove __P((DB *, db_pgno_t));
+ */
+
+int
+__qam_fremove(dbp, pgnoaddr)
+ DB *dbp;
+ db_pgno_t pgnoaddr;
+{
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *mpf;
+ MPFARRAY *array;
+ QUEUE *qp;
+ u_int32_t extid;
+#if CONFIG_TEST
+ char buf[256], *real_name;
+#endif
+ int offset, ret;
+
+ qp = (QUEUE *)dbp->q_internal;
+ dbenv = dbp->dbenv;
+ ret = 0;
+
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+
+ extid = (pgnoaddr - 1) / qp->page_ext;
+ array = &qp->array1;
+ if (array->low_extent > extid || array->hi_extent < extid)
+ array = &qp->array2;
+ offset = extid - array->low_extent;
+
+ DB_ASSERT(offset >= 0 && (unsigned) offset < array->n_extent);
+
+#if CONFIG_TEST
+ real_name = NULL;
+ /* Find the real name of the file. */
+ snprintf(buf, sizeof(buf),
+ QUEUE_EXTENT, qp->dir, qp->name, extid);
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, NULL, buf, 0, NULL, &real_name)) != 0)
+ goto err;
+#endif
+ mpf = array->mpfarray[offset].mpf;
+ array->mpfarray[offset].mpf = NULL;
+ __memp_set_unlink(mpf);
+ if ((ret = memp_fclose(mpf)) != 0)
+ goto err;
+
+ if (offset == 0) {
+ memmove(array->mpfarray, &array->mpfarray[1],
+ (array->hi_extent - array->low_extent)
+ * sizeof(array->mpfarray[0]));
+ array->mpfarray[array->hi_extent - array->low_extent].mpf = NULL;
+ if (array->low_extent != array->hi_extent)
+ array->low_extent++;
+ } else {
+ if (extid == array->hi_extent)
+ array->hi_extent--;
+ }
+
+err:
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+#if CONFIG_TEST
+ if (real_name != NULL)
+ __os_freestr(real_name);
+#endif
+ return (ret);
+}
+
+/*
+ * __qam_sync --
+ * Flush the database cache.
+ *
+ * PUBLIC: int __qam_sync __P((DB *, u_int32_t));
+ */
+int
+__qam_sync(dbp, flags)
+ DB *dbp;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *mpf;
+ MPFARRAY *array;
+ QUEUE *qp;
+ QUEUE_FILELIST *filelist;
+ struct __qmpf *mpfp;
+ u_int32_t i;
+ int done, ret;
+
+ dbenv = dbp->dbenv;
+
+ PANIC_CHECK(dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->sync");
+
+ if ((ret = __db_syncchk(dbp, flags)) != 0)
+ return (ret);
+
+ /* Read-only trees never need to be sync'd. */
+ if (F_ISSET(dbp, DB_AM_RDONLY))
+ return (0);
+
+ /* If the tree was never backed by a database file, we're done. */
+ if (F_ISSET(dbp, DB_AM_INMEM))
+ return (0);
+
+ /* Flush any dirty pages from the cache to the backing file. */
+ if ((ret = memp_fsync(dbp->mpf)) != 0)
+ return (ret);
+
+ qp = (QUEUE *)dbp->q_internal;
+ if (qp->page_ext == 0)
+ return (0);
+
+ /* We do this for the side effect of opening all active extents. */
+ if ((ret = __qam_gen_filelist(dbp, &filelist)) != 0)
+ return (ret);
+
+ if (filelist == NULL)
+ return (0);
+
+ __os_free(filelist, 0);
+
+ done = 0;
+ qp = (QUEUE *)dbp->q_internal;
+ array = &qp->array1;
+
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+again:
+ mpfp = array->mpfarray;
+ for (i = array->low_extent; i <= array->hi_extent; i++, mpfp++)
+ if ((mpf = mpfp->mpf) != NULL) {
+ if ((ret = memp_fsync(mpf)) != 0)
+ goto err;
+ /*
+ * If we are the only ones with this file open
+ * then close it so it might be removed.
+ */
+ if (mpfp->pinref == 0) {
+ mpfp->mpf = NULL;
+ if ((ret = memp_fclose(mpf)) != 0)
+ goto err;
+ }
+ }
+
+ if (done == 0 && qp->array2.n_extent != 0) {
+ array = &qp->array2;
+ done = 1;
+ goto again;
+ }
+
+err:
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ return (ret);
+}
+
+/*
+ * __qam_gen_filelist -- generate a list of extent files.
+ * Another thread may close the handle so this should only
+ * be used single threaded or with care.
+ *
+ * PUBLIC: int __qam_gen_filelist __P(( DB *, QUEUE_FILELIST **));
+ */
+int
+__qam_gen_filelist(dbp, filelistp)
+ DB *dbp;
+ QUEUE_FILELIST **filelistp;
+{
+ DB_ENV *dbenv;
+ QUEUE *qp;
+ QMETA *meta;
+ db_pgno_t i, last, start, stop;
+ db_recno_t current, first;
+ QUEUE_FILELIST *fp;
+ int ret;
+
+ dbenv = dbp->dbenv;
+ qp = (QUEUE *)dbp->q_internal;
+ *filelistp = NULL;
+ if (qp->page_ext == 0)
+ return (0);
+
+ /* This may happen during metapage recovery. */
+ if (qp->name == NULL)
+ return (0);
+
+ /* Find out the page number of the last page in the database. */
+ i = PGNO_BASE_MD;
+ if ((ret = memp_fget(dbp->mpf, &i, 0, &meta)) != 0) {
+ (void)dbp->close(dbp, 0);
+ return (ret);
+ }
+
+ current = meta->cur_recno;
+ first = meta->first_recno;
+
+ if ((ret = memp_fput(dbp->mpf, meta, 0)) != 0) {
+ (void)dbp->close(dbp, 0);
+ return (ret);
+ }
+
+ last = QAM_RECNO_PAGE(dbp, current);
+ start = QAM_RECNO_PAGE(dbp, first);
+
+ /* Allocate the worst case plus 1 for null termination. */
+ if (last >= start)
+ ret = last - start + 2;
+ else
+ ret = last + (QAM_RECNO_PAGE(dbp, UINT32_T_MAX) - start) + 1;
+ if ((ret = __os_calloc(dbenv,
+ ret, sizeof(QUEUE_FILELIST), filelistp)) != 0)
+ return (ret);
+ fp = *filelistp;
+ i = start;
+ if (last >= start)
+ stop = last;
+ else
+ stop = QAM_RECNO_PAGE(dbp, UINT32_T_MAX);
+again:
+ for (; i <= last; i += qp->page_ext) {
+ if ((ret = __qam_fprobe(dbp,
+ i, &fp->mpf, QAM_PROBE_MPF, 0)) != 0) {
+ if (ret == ENOENT)
+ continue;
+ return (ret);
+ }
+ fp->id = (i - 1) / qp->page_ext;
+ fp++;
+ }
+
+ if (last < start) {
+ i = 1;
+ stop = last;
+ start = 0;
+ goto again;
+ }
+
+ return (0);
+}
diff --git a/bdb/qam/qam_method.c b/bdb/qam/qam_method.c
new file mode 100644
index 00000000000..1c94f4b8db0
--- /dev/null
+++ b/bdb/qam/qam_method.c
@@ -0,0 +1,472 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: qam_method.c,v 11.17 2001/01/10 04:50:54 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_int.h"
+#include "db_shash.h"
+#include "db_am.h"
+#include "qam.h"
+#include "db.h"
+#include "mp.h"
+#include "lock.h"
+#include "log.h"
+
+static int __qam_set_extentsize __P((DB *, u_int32_t));
+static int __qam_remove_callback __P((DB *, void *));
+
+struct __qam_cookie {
+ DB_LSN lsn;
+ QUEUE_FILELIST *filelist;
+};
+
+/*
+ * __qam_db_create --
+ * Queue specific initialization of the DB structure.
+ *
+ * PUBLIC: int __qam_db_create __P((DB *));
+ */
+int
+__qam_db_create(dbp)
+ DB *dbp;
+{
+ QUEUE *t;
+ int ret;
+
+ /* Allocate and initialize the private queue structure. */
+ if ((ret = __os_calloc(dbp->dbenv, 1, sizeof(QUEUE), &t)) != 0)
+ return (ret);
+ dbp->q_internal = t;
+ dbp->set_q_extentsize = __qam_set_extentsize;
+
+ t->re_pad = ' ';
+
+ return (0);
+}
+
+/*
+ * __qam_db_close --
+ * Queue specific discard of the DB structure.
+ *
+ * PUBLIC: int __qam_db_close __P((DB *));
+ */
+int
+__qam_db_close(dbp)
+ DB *dbp;
+{
+ DB_MPOOLFILE *mpf;
+ MPFARRAY *array;
+ QUEUE *t;
+ struct __qmpf *mpfp;
+ u_int32_t i;
+ int ret, t_ret;
+
+ ret = 0;
+ t = dbp->q_internal;
+
+ array = &t->array1;
+again:
+ mpfp = array->mpfarray;
+ if (mpfp != NULL) {
+ for (i = array->low_extent;
+ i <= array->hi_extent; i++, mpfp++) {
+ mpf = mpfp->mpf;
+ mpfp->mpf = NULL;
+ if (mpf != NULL &&
+ (t_ret = memp_fclose(mpf)) != 0 && ret == 0)
+ ret = t_ret;
+ }
+ __os_free(array->mpfarray, 0);
+ }
+ if (t->array2.n_extent != 0) {
+ array = &t->array2;
+ array->n_extent = 0;
+ goto again;
+ }
+
+ if (t->path != NULL)
+ __os_free(t->path, 0);
+ __os_free(t, sizeof(QUEUE));
+ dbp->q_internal = NULL;
+
+ return (ret);
+}
+
+static int
+__qam_set_extentsize(dbp, extentsize)
+ DB *dbp;
+ u_int32_t extentsize;
+{
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_extentsize");
+
+ if (extentsize < 1) {
+ __db_err(dbp->dbenv, "Extent size must be at least 1.");
+ return (EINVAL);
+ }
+
+ ((QUEUE*)dbp->q_internal)->page_ext = extentsize;
+
+ return (0);
+}
+
+/*
+ * __db_prqueue --
+ * Print out a queue
+ *
+ * PUBLIC: int __db_prqueue __P((DB *, u_int32_t));
+ */
+int
+__db_prqueue(dbp, flags)
+ DB *dbp;
+ u_int32_t flags;
+{
+ PAGE *h;
+ QMETA *meta;
+ db_pgno_t first, i, last, pg_ext, stop;
+ int ret;
+
+ /* Find out the page number of the last page in the database. */
+ i = PGNO_BASE_MD;
+ if ((ret = memp_fget(dbp->mpf, &i, 0, &meta)) != 0)
+ return (ret);
+
+ first = QAM_RECNO_PAGE(dbp, meta->first_recno);
+ last = QAM_RECNO_PAGE(dbp, meta->cur_recno);
+
+ if ((ret = __db_prpage(dbp, (PAGE *)meta, flags)) != 0)
+ return (ret);
+ if ((ret = memp_fput(dbp->mpf, meta, 0)) != 0)
+ return (ret);
+
+ i = first;
+ if (first > last)
+ stop = QAM_RECNO_PAGE(dbp, UINT32_T_MAX);
+ else
+ stop = last;
+
+ /* Dump each page. */
+begin:
+ for (; i <= stop; ++i) {
+ if ((ret = __qam_fget(dbp, &i, DB_MPOOL_EXTENT, &h)) != 0) {
+ pg_ext = ((QUEUE *)dbp->q_internal)->page_ext;
+ if (pg_ext == 0) {
+ if (ret == EINVAL && first == last)
+ return (0);
+ return (ret);
+ }
+ if (ret == ENOENT || ret == EINVAL) {
+ i += pg_ext - ((i - 1) % pg_ext) - 1;
+ continue;
+ }
+ return (ret);
+ }
+ (void)__db_prpage(dbp, h, flags);
+ if ((ret = __qam_fput(dbp, i, h, 0)) != 0)
+ return (ret);
+ }
+
+ if (first > last) {
+ i = 1;
+ stop = last;
+ first = last;
+ goto begin;
+ }
+ return (0);
+}
+
+/*
+ * __qam_remove
+ * Remove method for a Queue.
+ *
+ * PUBLIC: int __qam_remove __P((DB *, const char *,
+ * PUBLIC: const char *, DB_LSN *, int (**)(DB *, void*), void **));
+ */
+int
+__qam_remove(dbp, name, subdb, lsnp, callbackp, cookiep)
+ DB *dbp;
+ const char *name, *subdb;
+ DB_LSN *lsnp;
+ int (**callbackp) __P((DB *, void *));
+ void **cookiep;
+{
+ DBT namedbt;
+ DB_ENV *dbenv;
+ DB_LSN lsn;
+ MPFARRAY *ap;
+ QUEUE *qp;
+ int ret;
+ char *backup, buf[256], *real_back, *real_name;
+ QUEUE_FILELIST *filelist, *fp;
+ struct __qam_cookie *qam_cookie;
+
+ dbenv = dbp->dbenv;
+ ret = 0;
+ backup = real_back = real_name = NULL;
+ filelist = NULL;
+
+ PANIC_CHECK(dbenv);
+
+ /*
+ * Subdatabases.
+ */
+ if (subdb != NULL) {
+ __db_err(dbenv,
+ "Queue does not support multiple databases per file.");
+ ret = EINVAL;
+ goto done;
+ }
+
+ qp = (QUEUE *)dbp->q_internal;
+
+ if (qp->page_ext != 0 &&
+ (ret = __qam_gen_filelist(dbp, &filelist)) != 0)
+ goto done;
+
+ if (filelist == NULL)
+ goto done;
+
+ for (fp = filelist; fp->mpf != NULL; fp++) {
+ snprintf(buf,
+ sizeof(buf), QUEUE_EXTENT, qp->dir, qp->name, fp->id);
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, NULL, buf, 0, NULL, &real_name)) != 0)
+ goto done;
+ if (LOGGING_ON(dbenv)) {
+ memset(&namedbt, 0, sizeof(namedbt));
+ namedbt.data = (char *)buf;
+ namedbt.size = strlen(buf) + 1;
+
+ if ((ret =
+ __qam_delete_log(dbenv, dbp->open_txn,
+ &lsn, DB_FLUSH, &namedbt, lsnp)) != 0) {
+ __db_err(dbenv,
+ "%s: %s", name, db_strerror(ret));
+ goto done;
+ }
+ }
+ (void)__memp_fremove(fp->mpf);
+ if ((ret = memp_fclose(fp->mpf)) != 0)
+ goto done;
+ if (qp->array2.n_extent == 0 || qp->array2.low_extent > fp->id)
+ ap = &qp->array1;
+ else
+ ap = &qp->array2;
+ ap->mpfarray[fp->id - ap->low_extent].mpf = NULL;
+
+ /* Create name for backup file. */
+ if (TXN_ON(dbenv)) {
+ if ((ret = __db_backup_name(dbenv,
+ buf, &backup, lsnp)) != 0)
+ goto done;
+ if ((ret = __db_appname(dbenv, DB_APP_DATA,
+ NULL, backup, 0, NULL, &real_back)) != 0)
+ goto done;
+ if ((ret = __os_rename(dbenv,
+ real_name, real_back)) != 0)
+ goto done;
+ __os_freestr(real_back);
+ real_back = NULL;
+ }
+ else
+ if ((ret = __os_unlink(dbenv, real_name)) != 0)
+ goto done;
+ __os_freestr(real_name);
+ real_name = NULL;
+ }
+ if ((ret= __os_malloc(dbenv,
+ sizeof(struct __qam_cookie), NULL, &qam_cookie)) != 0)
+ goto done;
+ qam_cookie->lsn = *lsnp;
+ qam_cookie->filelist = filelist;
+ *cookiep = qam_cookie;
+ *callbackp = __qam_remove_callback;
+
+done:
+ if (ret != 0 && filelist != NULL)
+ __os_free(filelist, 0);
+ if (real_back != NULL)
+ __os_freestr(real_back);
+ if (real_name != NULL)
+ __os_freestr(real_name);
+ if (backup != NULL)
+ __os_freestr(backup);
+
+ return (ret);
+}
+
+static int
+__qam_remove_callback(dbp, cookie)
+ DB *dbp;
+ void *cookie;
+{
+ DB_ENV *dbenv;
+ DB_LSN *lsnp;
+ QUEUE *qp;
+ QUEUE_FILELIST *filelist, *fp;
+ char *backup, buf[256], *real_back;
+ int ret;
+
+ qp = (QUEUE *)dbp->q_internal;
+ if (qp->page_ext == 0)
+ return (__os_unlink(dbp->dbenv, cookie));
+
+ dbenv = dbp->dbenv;
+ lsnp = &((struct __qam_cookie *)cookie)->lsn;
+ filelist = fp = ((struct __qam_cookie *)cookie)->filelist;
+ real_back = backup = NULL;
+ if ((ret =
+ __db_backup_name(dbenv, qp->name, &backup, lsnp)) != 0)
+ goto err;
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, NULL, backup, 0, NULL, &real_back)) != 0)
+ goto err;
+ if ((ret = __os_unlink(dbp->dbenv, real_back)) != 0)
+ goto err;
+
+ __os_freestr(backup);
+ __os_freestr(real_back);
+
+ if (fp == NULL)
+ return (0);
+
+ for (; fp->mpf != NULL; fp++) {
+ snprintf(buf,
+ sizeof(buf), QUEUE_EXTENT, qp->dir, qp->name, fp->id);
+ real_back = backup = NULL;
+ if ((ret = __db_backup_name(dbenv, buf, &backup, lsnp)) != 0)
+ goto err;
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, NULL, backup, 0, NULL, &real_back)) != 0)
+ goto err;
+ ret = __os_unlink(dbenv, real_back);
+ __os_freestr(real_back);
+ __os_freestr(backup);
+ }
+ __os_free(filelist, 0);
+ __os_free(cookie, sizeof (struct __qam_cookie));
+
+ return (0);
+
+err:
+ if (backup != NULL)
+ __os_freestr(backup);
+
+ if (real_back != NULL)
+ __os_freestr(real_back);
+
+ return (ret);
+}
+
+/*
+ * __qam_rename
+ * Rename method for Queue.
+ *
+ * PUBLIC: int __qam_rename __P((DB *,
+ * PUBLIC: const char *, const char *, const char *));
+ */
+int
+__qam_rename(dbp, filename, subdb, newname)
+ DB *dbp;
+ const char *filename, *subdb, *newname;
+{
+ DBT namedbt, newnamedbt;
+ DB_ENV *dbenv;
+ DB_LSN newlsn;
+ MPFARRAY *ap;
+ QUEUE *qp;
+ QUEUE_FILELIST *fp, *filelist;
+ char buf[256], nbuf[256], *namep, *real_name, *real_newname;
+ int ret;
+
+ dbenv = dbp->dbenv;
+ ret = 0;
+ real_name = real_newname = NULL;
+ filelist = NULL;
+
+ qp = (QUEUE *)dbp->q_internal;
+
+ if (subdb != NULL) {
+ __db_err(dbenv,
+ "Queue does not support multiple databases per file.");
+ ret = EINVAL;
+ goto err;
+ }
+ if (qp->page_ext != 0 &&
+ (ret = __qam_gen_filelist(dbp, &filelist)) != 0)
+ goto err;
+ if ((namep = __db_rpath(newname)) != NULL)
+ newname = namep + 1;
+
+ for (fp = filelist; fp != NULL && fp->mpf != NULL; fp++) {
+ if ((ret = __memp_fremove(fp->mpf)) != 0)
+ goto err;
+ if ((ret = memp_fclose(fp->mpf)) != 0)
+ goto err;
+ if (qp->array2.n_extent == 0 || qp->array2.low_extent > fp->id)
+ ap = &qp->array1;
+ else
+ ap = &qp->array2;
+ ap->mpfarray[fp->id - ap->low_extent].mpf = NULL;
+ snprintf(buf,
+ sizeof(buf), QUEUE_EXTENT, qp->dir, qp->name, fp->id);
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, NULL, buf, 0, NULL, &real_name)) != 0)
+ goto err;
+ snprintf(nbuf,
+ sizeof(nbuf), QUEUE_EXTENT, qp->dir, newname, fp->id);
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, NULL, nbuf, 0, NULL, &real_newname)) != 0)
+ goto err;
+ if (LOGGING_ON(dbenv)) {
+ memset(&namedbt, 0, sizeof(namedbt));
+ namedbt.data = (char *)buf;
+ namedbt.size = strlen(buf) + 1;
+
+ memset(&newnamedbt, 0, sizeof(namedbt));
+ newnamedbt.data = (char *)nbuf;
+ newnamedbt.size = strlen(nbuf) + 1;
+
+ if ((ret =
+ __qam_rename_log(dbenv,
+ dbp->open_txn, &newlsn, 0,
+ &namedbt, &newnamedbt)) != 0) {
+ __db_err(dbenv, "%s: %s", filename, db_strerror(ret));
+ goto err;
+ }
+
+ if ((ret = __log_filelist_update(dbenv, dbp,
+ dbp->log_fileid, newname, NULL)) != 0)
+ goto err;
+ }
+ if ((ret = __os_rename(dbenv, real_name, real_newname)) != 0)
+ goto err;
+ __os_freestr(real_name);
+ __os_freestr(real_newname);
+ real_name = real_newname = NULL;
+ }
+
+err:
+ if (real_name != NULL)
+ __os_freestr(real_name);
+ if (real_newname != NULL)
+ __os_freestr(real_newname);
+ if (filelist != NULL)
+ __os_free(filelist, 0);
+
+ return (ret);
+}
diff --git a/bdb/qam/qam_open.c b/bdb/qam/qam_open.c
new file mode 100644
index 00000000000..73346439fd6
--- /dev/null
+++ b/bdb/qam/qam_open.c
@@ -0,0 +1,268 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: qam_open.c,v 11.31 2000/12/20 17:59:29 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_shash.h"
+#include "db_swap.h"
+#include "db_am.h"
+#include "lock.h"
+#include "qam.h"
+
+/*
+ * __qam_open
+ *
+ * PUBLIC: int __qam_open __P((DB *, const char *, db_pgno_t, int, u_int32_t));
+ */
+int
+__qam_open(dbp, name, base_pgno, mode, flags)
+ DB *dbp;
+ const char *name;
+ db_pgno_t base_pgno;
+ int mode;
+ u_int32_t flags;
+{
+ QUEUE *t;
+ DBC *dbc;
+ DB_LOCK metalock;
+ DB_LSN orig_lsn;
+ QMETA *qmeta;
+ int locked;
+ int ret, t_ret;
+
+ ret = 0;
+ locked = 0;
+ t = dbp->q_internal;
+
+ if (name == NULL && t->page_ext != 0) {
+ __db_err(dbp->dbenv,
+ "Extent size may not be specified for in-memory queue database.");
+ return (EINVAL);
+ }
+ /* Initialize the remaining fields/methods of the DB. */
+ dbp->del = __qam_delete;
+ dbp->put = __qam_put;
+ dbp->stat = __qam_stat;
+ dbp->sync = __qam_sync;
+ dbp->db_am_remove = __qam_remove;
+ dbp->db_am_rename = __qam_rename;
+
+ metalock.off = LOCK_INVALID;
+
+ /*
+ * Get a cursor. If DB_CREATE is specified, we may be creating
+ * pages, and to do that safely in CDB we need a write cursor.
+ * In STD_LOCKING mode, we'll synchronize using the meta page
+ * lock instead.
+ */
+ if ((ret = dbp->cursor(dbp, dbp->open_txn,
+ &dbc, LF_ISSET(DB_CREATE) && CDB_LOCKING(dbp->dbenv) ?
+ DB_WRITECURSOR : 0)) != 0)
+ return (ret);
+
+ /* Get, and optionally create the metadata page. */
+ if ((ret =
+ __db_lget(dbc, 0, base_pgno, DB_LOCK_READ, 0, &metalock)) != 0)
+ goto err;
+ if ((ret = memp_fget(
+ dbp->mpf, &base_pgno, DB_MPOOL_CREATE, (PAGE **)&qmeta)) != 0)
+ goto err;
+
+ /*
+ * If the magic number is correct, we're not creating the tree.
+ * Correct any fields that may not be right. Note, all of the
+ * local flags were set by DB->open.
+ */
+again: if (qmeta->dbmeta.magic == DB_QAMMAGIC) {
+ t->re_pad = qmeta->re_pad;
+ t->re_len = qmeta->re_len;
+ t->rec_page = qmeta->rec_page;
+ t->page_ext = qmeta->page_ext;
+
+ (void)memp_fput(dbp->mpf, (PAGE *)qmeta, 0);
+ goto done;
+ }
+
+ /* If we're doing CDB; we now have to get the write lock. */
+ if (CDB_LOCKING(dbp->dbenv)) {
+ DB_ASSERT(LF_ISSET(DB_CREATE));
+ if ((ret = lock_get(dbp->dbenv, dbc->locker, DB_LOCK_UPGRADE,
+ &dbc->lock_dbt, DB_LOCK_WRITE, &dbc->mylock)) != 0)
+ goto err;
+ }
+
+ /*
+ * If we are doing locking, relase the read lock
+ * and get a write lock. We want to avoid deadlock.
+ */
+ if (locked == 0 && STD_LOCKING(dbc)) {
+ if ((ret = __LPUT(dbc, metalock)) != 0)
+ goto err;
+ if ((ret = __db_lget(dbc,
+ 0, base_pgno, DB_LOCK_WRITE, 0, &metalock)) != 0)
+ goto err;
+ locked = 1;
+ goto again;
+ }
+ /* Initialize the tree structure metadata information. */
+ orig_lsn = qmeta->dbmeta.lsn;
+ memset(qmeta, 0, sizeof(QMETA));
+ ZERO_LSN(qmeta->dbmeta.lsn);
+ qmeta->dbmeta.pgno = base_pgno;
+ qmeta->dbmeta.magic = DB_QAMMAGIC;
+ qmeta->dbmeta.version = DB_QAMVERSION;
+ qmeta->dbmeta.pagesize = dbp->pgsize;
+ qmeta->dbmeta.type = P_QAMMETA;
+ qmeta->re_pad = t->re_pad;
+ qmeta->re_len = t->re_len;
+ qmeta->rec_page = CALC_QAM_RECNO_PER_PAGE(dbp);
+ qmeta->cur_recno = 1;
+ qmeta->first_recno = 1;
+ qmeta->page_ext = t->page_ext;
+ t->rec_page = qmeta->rec_page;
+ memcpy(qmeta->dbmeta.uid, dbp->fileid, DB_FILE_ID_LEN);
+
+ /* Verify that we can fit at least one record per page. */
+ if (QAM_RECNO_PER_PAGE(dbp) < 1) {
+ __db_err(dbp->dbenv,
+ "Record size of %lu too large for page size of %lu",
+ (u_long)t->re_len, (u_long)dbp->pgsize);
+ (void)memp_fput(dbp->mpf, (PAGE *)qmeta, 0);
+ ret = EINVAL;
+ goto err;
+ }
+
+ if ((ret = __db_log_page(dbp,
+ name, &orig_lsn, base_pgno, (PAGE *)qmeta)) != 0)
+ goto err;
+
+ /* Release the metadata page. */
+ if ((ret = memp_fput(dbp->mpf, (PAGE *)qmeta, DB_MPOOL_DIRTY)) != 0)
+ goto err;
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTLOG, ret, name);
+
+ /*
+ * Flush the metadata page to disk.
+ *
+ * !!!
+ * It's not useful to return not-yet-flushed here -- convert it to
+ * an error.
+ */
+ if ((ret = memp_fsync(dbp->mpf)) == DB_INCOMPLETE) {
+ __db_err(dbp->dbenv, "Flush of metapage failed");
+ ret = EINVAL;
+ }
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTSYNC, ret, name);
+
+done: t->q_meta = base_pgno;
+ t->q_root = base_pgno + 1;
+
+ /* Setup information needed to open extents. */
+ if (t->page_ext != 0) {
+ t->finfo.pgcookie = &t->pgcookie;
+ t->finfo.fileid = NULL;
+ t->finfo.lsn_offset = 0;
+
+ t->pginfo.db_pagesize = dbp->pgsize;
+ t->pginfo.needswap = F_ISSET(dbp, DB_AM_SWAP);
+ t->pgcookie.data = &t->pginfo;
+ t->pgcookie.size = sizeof(DB_PGINFO);
+
+ if ((ret = __os_strdup(dbp->dbenv, name, &t->path)) != 0)
+ goto err;
+ t->dir = t->path;
+ if ((t->name = __db_rpath(t->path)) == NULL) {
+ t->name = t->path;
+ t->dir = PATH_DOT;
+ } else
+ *t->name++ = '\0';
+
+ if (mode == 0)
+ mode = __db_omode("rwrw--");
+ t->mode = mode;
+ }
+
+err:
+DB_TEST_RECOVERY_LABEL
+ /* Don't hold the meta page long term. */
+ (void)__LPUT(dbc, metalock);
+
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __qam_metachk --
+ *
+ * PUBLIC: int __qam_metachk __P((DB *, const char *, QMETA *));
+ */
+int
+__qam_metachk(dbp, name, qmeta)
+ DB *dbp;
+ const char *name;
+ QMETA *qmeta;
+{
+ DB_ENV *dbenv;
+ u_int32_t vers;
+ int ret;
+
+ dbenv = dbp->dbenv;
+
+ /*
+ * At this point, all we know is that the magic number is for a Queue.
+ * Check the version, the database may be out of date.
+ */
+ vers = qmeta->dbmeta.version;
+ if (F_ISSET(dbp, DB_AM_SWAP))
+ M_32_SWAP(vers);
+ switch (vers) {
+ case 1:
+ case 2:
+ __db_err(dbenv,
+ "%s: queue version %lu requires a version upgrade",
+ name, (u_long)vers);
+ return (DB_OLD_VERSION);
+ case 3:
+ break;
+ default:
+ __db_err(dbenv,
+ "%s: unsupported qam version: %lu", name, (u_long)vers);
+ return (EINVAL);
+ }
+
+ /* Swap the page if we need to. */
+ if (F_ISSET(dbp, DB_AM_SWAP) && (ret = __qam_mswap((PAGE *)qmeta)) != 0)
+ return (ret);
+
+ /* Check the type. */
+ if (dbp->type != DB_QUEUE && dbp->type != DB_UNKNOWN)
+ return (EINVAL);
+ dbp->type = DB_QUEUE;
+ DB_ILLEGAL_METHOD(dbp, DB_OK_QUEUE);
+
+ /* Set the page size. */
+ dbp->pgsize = qmeta->dbmeta.pagesize;
+
+ /* Copy the file's ID. */
+ memcpy(dbp->fileid, qmeta->dbmeta.uid, DB_FILE_ID_LEN);
+
+ return (0);
+}
diff --git a/bdb/qam/qam_rec.c b/bdb/qam/qam_rec.c
new file mode 100644
index 00000000000..4d330f58651
--- /dev/null
+++ b/bdb/qam/qam_rec.c
@@ -0,0 +1,732 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: qam_rec.c,v 11.34 2001/01/19 18:01:59 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_shash.h"
+#include "lock.h"
+#include "db_am.h"
+#include "qam.h"
+#include "log.h"
+
+/*
+ * __qam_inc_recover --
+ * Recovery function for inc.
+ *
+ * PUBLIC: int __qam_inc_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__qam_inc_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __qam_inc_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
+ QMETA *meta;
+ db_pgno_t metapg;
+ int cmp_p, modified, ret;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__qam_inc_print);
+ REC_INTRO(__qam_inc_read, 1);
+
+ metapg = ((QUEUE *)file_dbp->q_internal)->q_meta;
+
+ if ((ret = __db_lget(dbc,
+ LCK_ROLLBACK, metapg, DB_LOCK_WRITE, 0, &lock)) != 0)
+ goto done;
+ if ((ret = memp_fget(mpf, &metapg, 0, &meta)) != 0) {
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &metapg, DB_MPOOL_CREATE, &meta)) != 0) {
+ (void)__LPUT(dbc, lock);
+ goto out;
+ }
+ meta->dbmeta.pgno = metapg;
+ meta->dbmeta.type = P_QAMMETA;
+
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ (void)__LPUT(dbc, lock);
+ goto out;
+ }
+ }
+
+ modified = 0;
+ cmp_p = log_compare(&LSN(meta), &argp->lsn);
+ CHECK_LSN(op, cmp_p, &LSN(meta), &argp->lsn);
+
+ /*
+ * The cur_recno never goes backwards. It is a point of
+ * contention among appenders. If one fails cur_recno will
+ * most likely be beyond that one when it aborts.
+ * We move it ahead on either an abort or a commit
+ * and make the LSN reflect that fact.
+ */
+ if (cmp_p == 0) {
+ modified = 1;
+ meta->cur_recno++;
+ if (meta->cur_recno == RECNO_OOB)
+ meta->cur_recno++;
+ meta->dbmeta.lsn = *lsnp;
+ }
+ if ((ret = memp_fput(mpf, meta, modified ? DB_MPOOL_DIRTY : 0)))
+ goto out;
+
+ (void)__LPUT(dbc, lock);
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __qam_incfirst_recover --
+ * Recovery function for incfirst.
+ *
+ * PUBLIC: int __qam_incfirst_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__qam_incfirst_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __qam_incfirst_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
+ QMETA *meta;
+ QUEUE_CURSOR *cp;
+ db_pgno_t metapg;
+ int exact, modified, ret, rec_ext;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__qam_incfirst_print);
+ REC_INTRO(__qam_incfirst_read, 1);
+
+ metapg = ((QUEUE *)file_dbp->q_internal)->q_meta;
+
+ if ((ret = __db_lget(dbc,
+ LCK_ROLLBACK, metapg, DB_LOCK_WRITE, 0, &lock)) != 0)
+ goto done;
+ if ((ret = memp_fget(mpf, &metapg, 0, &meta)) != 0) {
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &metapg, DB_MPOOL_CREATE, &meta)) != 0) {
+ (void)__LPUT(dbc, lock);
+ goto out;
+ }
+ meta->dbmeta.pgno = metapg;
+ meta->dbmeta.type = P_QAMMETA;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ (void)__LPUT(dbc, lock);
+ goto out;
+ }
+ }
+
+ modified = 0;
+
+ /*
+ * Only move first_recno backwards so we pick up the aborted delete.
+ * When going forward we need to be careful since
+ * we may have bumped over a locked record.
+ */
+ if (DB_UNDO(op)) {
+ if (QAM_BEFORE_FIRST(meta, argp->recno)) {
+ meta->first_recno = argp->recno;
+ modified = 1;
+ }
+ } else {
+ if (log_compare(&LSN(meta), lsnp) < 0) {
+ LSN(meta) = *lsnp;
+ modified = 1;
+ }
+ rec_ext = 0;
+ if (meta->page_ext != 0)
+ rec_ext = meta->page_ext * meta->rec_page;
+ cp = (QUEUE_CURSOR *)dbc->internal;
+ if (meta->first_recno == RECNO_OOB)
+ meta->first_recno++;
+ while (meta->first_recno != meta->cur_recno
+ && !QAM_BEFORE_FIRST(meta, argp->recno + 1)) {
+ if ((ret = __qam_position(dbc,
+ &meta->first_recno, QAM_READ, &exact)) != 0)
+ goto out;
+ if (cp->page != NULL)
+ __qam_fput(file_dbp, cp->pgno, cp->page, 0);
+
+ if (exact == 1)
+ break;
+ if (cp->page != NULL &&
+ rec_ext != 0 && meta->first_recno % rec_ext == 0)
+ if ((ret =
+ __qam_fremove(file_dbp, cp->pgno)) != 0)
+ goto out;
+ meta->first_recno++;
+ if (meta->first_recno == RECNO_OOB)
+ meta->first_recno++;
+ modified = 1;
+ }
+ }
+
+ if ((ret = memp_fput(mpf, meta, modified ? DB_MPOOL_DIRTY : 0)))
+ goto out;
+
+ (void)__LPUT(dbc, lock);
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __qam_mvptr_recover --
+ * Recovery function for mvptr.
+ *
+ * PUBLIC: int __qam_mvptr_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__qam_mvptr_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __qam_mvptr_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
+ QMETA *meta;
+ db_pgno_t metapg;
+ int cmp_p, modified, ret;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__qam_mvptr_print);
+ REC_INTRO(__qam_mvptr_read, 1);
+
+ metapg = ((QUEUE *)file_dbp->q_internal)->q_meta;
+
+ if ((ret = __db_lget(dbc,
+ LCK_ROLLBACK, metapg, DB_LOCK_WRITE, 0, &lock)) != 0)
+ goto done;
+ if ((ret = memp_fget(mpf, &metapg, 0, &meta)) != 0) {
+ if (DB_REDO(op)) {
+ if ((ret = memp_fget(mpf,
+ &metapg, DB_MPOOL_CREATE, &meta)) != 0) {
+ (void)__LPUT(dbc, lock);
+ goto out;
+ }
+ meta->dbmeta.pgno = metapg;
+ meta->dbmeta.type = P_QAMMETA;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ (void)__LPUT(dbc, lock);
+ goto out;
+ }
+ }
+
+ modified = 0;
+ cmp_p = log_compare(&meta->dbmeta.lsn, &argp->metalsn);
+
+ /*
+ * We never undo a movement of one of the pointers.
+ * Just move them along regardless of abort/commit.
+ */
+ if (cmp_p == 0) {
+ if (argp->opcode & QAM_SETFIRST)
+ meta->first_recno = argp->new_first;
+
+ if (argp->opcode & QAM_SETCUR)
+ meta->cur_recno = argp->new_cur;
+
+ modified = 1;
+ meta->dbmeta.lsn = *lsnp;
+ }
+
+ if ((ret = memp_fput(mpf, meta, modified ? DB_MPOOL_DIRTY : 0)))
+ goto out;
+
+ (void)__LPUT(dbc, lock);
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+/*
+ * __qam_del_recover --
+ * Recovery function for del.
+ * Non-extent version or if there is no data (zero len).
+ *
+ * PUBLIC: int __qam_del_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__qam_del_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __qam_del_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
+ QAMDATA *qp;
+ QMETA *meta;
+ QPAGE *pagep;
+ db_pgno_t metapg;
+ int cmp_n, modified, ret;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__qam_del_print);
+ REC_INTRO(__qam_del_read, 1);
+
+ if ((ret = __qam_fget(file_dbp,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+
+ modified = 0;
+ if (pagep->pgno == PGNO_INVALID) {
+ pagep->pgno = argp->pgno;
+ pagep->type = P_QAMDATA;
+ modified = 1;
+ }
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ if (DB_UNDO(op)) {
+ /* make sure first is behind us */
+ metapg = ((QUEUE *)file_dbp->q_internal)->q_meta;
+ if ((ret = __db_lget(dbc,
+ LCK_ROLLBACK, metapg, DB_LOCK_WRITE, 0, &lock)) != 0)
+ return (ret);
+ if ((ret = memp_fget(file_dbp->mpf, &metapg, 0, &meta)) != 0) {
+ (void)__LPUT(dbc, lock);
+ goto done;
+ }
+ if (meta->first_recno == RECNO_OOB ||
+ (QAM_BEFORE_FIRST(meta, argp->recno)
+ && (meta->first_recno <= meta->cur_recno
+ || meta->first_recno -
+ argp->recno < argp->recno - meta->cur_recno))) {
+ meta->first_recno = argp->recno;
+ (void)memp_fput(file_dbp->mpf, meta, DB_MPOOL_DIRTY);
+ } else
+ (void)memp_fput(file_dbp->mpf, meta, 0);
+ (void)__LPUT(dbc, lock);
+
+ /* Need to undo delete - mark the record as present */
+ qp = QAM_GET_RECORD(file_dbp, pagep, argp->indx);
+ F_SET(qp, QAM_VALID);
+
+ /*
+ * Move the LSN back to this point; do not move it forward.
+ * Only move it back if we're in recovery. If we're in
+ * an abort, because we don't hold a page lock, we could
+ * foul up a concurrent put. Having too late an LSN
+ * is harmless in queue except when we're determining
+ * what we need to roll forward during recovery. [#2588]
+ */
+ if (op == DB_TXN_BACKWARD_ROLL && cmp_n < 0)
+ LSN(pagep) = argp->lsn;
+ modified = 1;
+ } else if (cmp_n > 0 && DB_REDO(op)) {
+ /* Need to redo delete - clear the valid bit */
+ qp = QAM_GET_RECORD(file_dbp, pagep, argp->indx);
+ F_CLR(qp, QAM_VALID);
+ LSN(pagep) = *lsnp;
+ modified = 1;
+ }
+ if ((ret = __qam_fput(file_dbp,
+ argp->pgno, pagep, modified ? DB_MPOOL_DIRTY : 0)))
+ goto out;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+/*
+ * __qam_delext_recover --
+ * Recovery function for del in an extent based queue.
+ *
+ * PUBLIC: int __qam_delext_recover __P((DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__qam_delext_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __qam_delext_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
+ QAMDATA *qp;
+ QMETA *meta;
+ QPAGE *pagep;
+ db_pgno_t metapg;
+ int cmp_n, modified, ret;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__qam_delext_print);
+ REC_INTRO(__qam_delext_read, 1);
+
+ if ((ret = __qam_fget(file_dbp,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+
+ modified = 0;
+ if (pagep->pgno == PGNO_INVALID) {
+ pagep->pgno = argp->pgno;
+ pagep->type = P_QAMDATA;
+ modified = 1;
+ }
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ if (DB_UNDO(op)) {
+ /* make sure first is behind us */
+ metapg = ((QUEUE *)file_dbp->q_internal)->q_meta;
+ if ((ret = __db_lget(dbc,
+ LCK_ROLLBACK, metapg, DB_LOCK_WRITE, 0, &lock)) != 0)
+ return (ret);
+ if ((ret = memp_fget(file_dbp->mpf, &metapg, 0, &meta)) != 0) {
+ (void)__LPUT(dbc, lock);
+ goto done;
+ }
+ if (meta->first_recno == RECNO_OOB ||
+ (QAM_BEFORE_FIRST(meta, argp->recno)
+ && (meta->first_recno <= meta->cur_recno
+ || meta->first_recno -
+ argp->recno < argp->recno - meta->cur_recno))) {
+ meta->first_recno = argp->recno;
+ (void)memp_fput(file_dbp->mpf, meta, DB_MPOOL_DIRTY);
+ } else
+ (void)memp_fput(file_dbp->mpf, meta, 0);
+ (void)__LPUT(dbc, lock);
+
+ if ((ret = __qam_pitem(dbc, pagep,
+ argp->indx, argp->recno, &argp->data)) != 0)
+ goto done;
+
+ /*
+ * Move the LSN back to this point; do not move it forward.
+ * Only move it back if we're in recovery. If we're in
+ * an abort, because we don't hold a page lock, we could
+ * foul up a concurrent put. Having too late an LSN
+ * is harmless in queue except when we're determining
+ * what we need to roll forward during recovery. [#2588]
+ */
+ if (op == DB_TXN_BACKWARD_ROLL && cmp_n < 0)
+ LSN(pagep) = argp->lsn;
+ modified = 1;
+ } else if (cmp_n > 0 && DB_REDO(op)) {
+ /* Need to redo delete - clear the valid bit */
+ qp = QAM_GET_RECORD(file_dbp, pagep, argp->indx);
+ F_CLR(qp, QAM_VALID);
+ LSN(pagep) = *lsnp;
+ modified = 1;
+ }
+ if ((ret = __qam_fput(file_dbp,
+ argp->pgno, pagep, modified ? DB_MPOOL_DIRTY : 0)))
+ goto out;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __qam_add_recover --
+ * Recovery function for add.
+ *
+ * PUBLIC: int __qam_add_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__qam_add_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __qam_add_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ QAMDATA *qp;
+ QMETA *meta;
+ QPAGE *pagep;
+ db_pgno_t metapg;
+ int cmp_n, modified, ret;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__qam_add_print);
+ REC_INTRO(__qam_add_read, 1);
+
+ modified = 0;
+ if ((ret = __qam_fget(file_dbp,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+
+ if (pagep->pgno == PGNO_INVALID) {
+ pagep->pgno = argp->pgno;
+ pagep->type = P_QAMDATA;
+ modified = 1;
+ }
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ if (cmp_n > 0 && DB_REDO(op)) {
+ /* Need to redo add - put the record on page */
+ if ((ret = __qam_pitem(dbc, pagep, argp->indx, argp->recno,
+ &argp->data)) != 0)
+ goto err;
+ LSN(pagep) = *lsnp;
+ modified = 1;
+ /* Make sure first pointer includes this record. */
+ metapg = ((QUEUE *)file_dbp->q_internal)->q_meta;
+ if ((ret = memp_fget(mpf, &metapg, 0, &meta)) != 0)
+ goto err;
+ if (QAM_BEFORE_FIRST(meta, argp->recno)) {
+ meta->first_recno = argp->recno;
+ if ((ret = memp_fput(mpf, meta, DB_MPOOL_DIRTY)) != 0)
+ goto err;
+ } else
+ if ((ret = memp_fput(mpf, meta, 0)) != 0)
+ goto err;
+
+ } else if (DB_UNDO(op)) {
+ /*
+ * Need to undo add
+ * If this was an overwrite, put old record back.
+ * Otherwise just clear the valid bit
+ */
+ if (argp->olddata.size != 0) {
+ if ((ret = __qam_pitem(dbc, pagep,
+ argp->indx, argp->recno, &argp->olddata)) != 0)
+ goto err;
+
+ if (!(argp->vflag & QAM_VALID)) {
+ qp = QAM_GET_RECORD(
+ file_dbp, pagep, argp->indx);
+ F_CLR(qp, QAM_VALID);
+ }
+ modified = 1;
+ } else {
+ qp = QAM_GET_RECORD(file_dbp, pagep, argp->indx);
+ qp->flags = 0;
+ modified = 1;
+ }
+
+ /*
+ * Move the LSN back to this point; do not move it forward.
+ * Only move it back if we're in recovery. If we're in
+ * an abort, because we don't hold a page lock, we could
+ * foul up a concurrent put. Having too late an LSN
+ * is harmless in queue except when we're determining
+ * what we need to roll forward during recovery. [#2588]
+ */
+ if (op == DB_TXN_BACKWARD_ROLL && cmp_n < 0)
+ LSN(pagep) = argp->lsn;
+ }
+
+err: if ((ret = __qam_fput(file_dbp,
+ argp->pgno, pagep, modified ? DB_MPOOL_DIRTY : 0)))
+ goto out;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+/*
+ * __qam_delete_recover --
+ * Recovery function for delete of an extent.
+ *
+ * PUBLIC: int __qam_delete_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__qam_delete_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __qam_delete_args *argp;
+ int ret;
+ char *backup, *real_back, *real_name;
+
+ COMPQUIET(info, NULL);
+
+ REC_PRINT(__qam_delete_print);
+
+ backup = real_back = real_name = NULL;
+ if ((ret = __qam_delete_read(dbenv, dbtp->data, &argp)) != 0)
+ goto out;
+
+ if (DB_REDO(op)) {
+ /*
+ * On a recovery, as we recreate what was going on, we
+ * recreate the creation of the file. And so, even though
+ * it committed, we need to delete it. Try to delete it,
+ * but it is not an error if that delete fails.
+ */
+ if ((ret = __db_appname(dbenv, DB_APP_DATA,
+ NULL, argp->name.data, 0, NULL, &real_name)) != 0)
+ goto out;
+ if (__os_exists(real_name, NULL) == 0) {
+ if ((ret = __os_unlink(dbenv, real_name)) != 0)
+ goto out;
+ }
+ } else if (DB_UNDO(op)) {
+ /*
+ * Trying to undo. File may or may not have been deleted.
+ * Try to move the backup to the original. If the backup
+ * exists, then this is right. If it doesn't exist, then
+ * nothing will happen and that's OK.
+ */
+ if ((ret = __db_backup_name(dbenv, argp->name.data,
+ &backup, &argp->lsn)) != 0)
+ goto out;
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, NULL, backup, 0, NULL, &real_back)) != 0)
+ goto out;
+ if ((ret = __db_appname(dbenv, DB_APP_DATA,
+ NULL, argp->name.data, 0, NULL, &real_name)) != 0)
+ goto out;
+ if (__os_exists(real_back, NULL) == 0)
+ if ((ret =
+ __os_rename(dbenv, real_back, real_name)) != 0)
+ goto out;
+ }
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (argp != NULL)
+ __os_free(argp, 0);
+ if (backup != NULL)
+ __os_freestr(backup);
+ if (real_back != NULL)
+ __os_freestr(real_back);
+ if (real_name != NULL)
+ __os_freestr(real_name);
+ return (ret);
+}
+/*
+ * __qam_rename_recover --
+ * Recovery function for rename.
+ *
+ * PUBLIC: int __qam_rename_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__qam_rename_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __qam_rename_args *argp;
+ char *new_name, *real_name;
+ int ret;
+
+ COMPQUIET(info, NULL);
+
+ REC_PRINT(__qam_rename_print);
+
+ new_name = real_name = NULL;
+
+ if ((ret = __qam_rename_read(dbenv, dbtp->data, &argp)) != 0)
+ goto out;
+
+ if (DB_REDO(op)) {
+ if ((ret = __db_appname(dbenv, DB_APP_DATA,
+ NULL, argp->name.data, 0, NULL, &real_name)) != 0)
+ goto out;
+ if (__os_exists(real_name, NULL) == 0) {
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, NULL, argp->newname.data,
+ 0, NULL, &new_name)) != 0)
+ goto out;
+ if ((ret = __os_rename(dbenv,
+ real_name, new_name)) != 0)
+ goto out;
+ }
+ } else {
+ if ((ret = __db_appname(dbenv, DB_APP_DATA,
+ NULL, argp->newname.data, 0, NULL, &new_name)) != 0)
+ goto out;
+ if (__os_exists(new_name, NULL) == 0) {
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, NULL, argp->name.data,
+ 0, NULL, &real_name)) != 0)
+ goto out;
+ if ((ret = __os_rename(dbenv,
+ new_name, real_name)) != 0)
+ goto out;
+ }
+ }
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (argp != NULL)
+ __os_free(argp, 0);
+
+ if (new_name != NULL)
+ __os_free(new_name, 0);
+
+ if (real_name != NULL)
+ __os_free(real_name, 0);
+
+ return (ret);
+}
diff --git a/bdb/qam/qam_stat.c b/bdb/qam/qam_stat.c
new file mode 100644
index 00000000000..865f477c1eb
--- /dev/null
+++ b/bdb/qam/qam_stat.c
@@ -0,0 +1,201 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: qam_stat.c,v 11.16 2001/01/10 04:50:54 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_shash.h"
+#include "db_am.h"
+#include "lock.h"
+#include "qam.h"
+
+/*
+ * __qam_stat --
+ * Gather/print the qam statistics
+ *
+ * PUBLIC: int __qam_stat __P((DB *, void *, void *(*)(size_t), u_int32_t));
+ */
+int
+__qam_stat(dbp, spp, db_malloc, flags)
+ DB *dbp;
+ void *spp;
+ void *(*db_malloc) __P((size_t));
+ u_int32_t flags;
+{
+ QUEUE *t;
+ DBC *dbc;
+ DB_LOCK lock;
+ DB_QUEUE_STAT *sp;
+ PAGE *h;
+ QAMDATA *qp, *ep;
+ QMETA *meta;
+ db_indx_t indx;
+ db_pgno_t first, last, pgno, pg_ext, stop;
+ u_int32_t re_len;
+ int ret, t_ret;
+
+ PANIC_CHECK(dbp->dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->stat");
+
+ t = dbp->q_internal;
+ sp = NULL;
+ lock.off = LOCK_INVALID;
+
+ /* Check for invalid flags. */
+ if ((ret = __db_statchk(dbp, flags)) != 0)
+ return (ret);
+
+ if (spp == NULL)
+ return (0);
+
+ /* Acquire a cursor. */
+ if ((ret = dbp->cursor(dbp, NULL, &dbc, 0)) != 0)
+ return (ret);
+
+ DEBUG_LWRITE(dbc, NULL, "qam_stat", NULL, NULL, flags);
+
+ /* Allocate and clear the structure. */
+ if ((ret = __os_malloc(dbp->dbenv, sizeof(*sp), db_malloc, &sp)) != 0)
+ goto err;
+ memset(sp, 0, sizeof(*sp));
+
+ re_len = ((QUEUE *)dbp->q_internal)->re_len;
+ if (flags == DB_CACHED_COUNTS) {
+ if ((ret = __db_lget(dbc,
+ 0, t->q_meta, DB_LOCK_READ, 0, &lock)) != 0)
+ goto err;
+ if ((ret =
+ memp_fget(dbp->mpf, &t->q_meta, 0, (PAGE **)&meta)) != 0)
+ goto err;
+ sp->qs_nkeys = meta->dbmeta.key_count;
+ sp->qs_ndata = meta->dbmeta.record_count;
+
+ goto done;
+ }
+
+ /* Determine the last page of the database. */
+ if ((ret = __db_lget(dbc,
+ 0, t->q_meta, DB_LOCK_READ, 0, &lock)) != 0)
+ goto err;
+ if ((ret = memp_fget(dbp->mpf, &t->q_meta, 0, (PAGE **)&meta)) != 0)
+ goto err;
+
+ first = QAM_RECNO_PAGE(dbp, meta->first_recno);
+ last = QAM_RECNO_PAGE(dbp, meta->cur_recno);
+
+ if ((ret = memp_fput(dbp->mpf, meta, 0)) != 0)
+ goto err;
+ (void)__LPUT(dbc, lock);
+
+ pgno = first;
+ if (first > last)
+ stop = QAM_RECNO_PAGE(dbp, UINT32_T_MAX);
+ else
+ stop = last;
+
+ /* Dump each page. */
+ pg_ext = ((QUEUE *)dbp->q_internal)->page_ext;
+begin:
+ /* Walk through the pages and count. */
+ for (; pgno <= stop; ++pgno) {
+ if ((ret =
+ __db_lget(dbc,
+ 0, pgno, DB_LOCK_READ, 0, &lock)) != 0)
+ goto err;
+ ret = __qam_fget(dbp, &pgno, DB_MPOOL_EXTENT, &h);
+ if (ret == ENOENT) {
+ pgno += pg_ext - 1;
+ continue;
+ }
+ if (ret == EINVAL) {
+ pgno += pg_ext - ((pgno - 1) % pg_ext) - 1;
+ continue;
+ }
+ if (ret == EIO && first == last && pg_ext == 0)
+ break;
+ if (ret != 0)
+ goto err;
+
+ ++sp->qs_pages;
+
+ ep = (QAMDATA *)((u_int8_t *)h + dbp->pgsize - re_len);
+ for (indx = 0, qp = QAM_GET_RECORD(dbp, h, indx);
+ qp <= ep;
+ ++indx, qp = QAM_GET_RECORD(dbp, h, indx)) {
+ if (F_ISSET(qp, QAM_VALID))
+ sp->qs_ndata++;
+ else
+ sp->qs_pgfree += re_len;
+ }
+
+ if ((ret = __qam_fput(dbp, pgno, h, 0)) != 0)
+ goto err;
+ (void)__LPUT(dbc, lock);
+ }
+ if (first > last) {
+ pgno = 1;
+ stop = last;
+ first = last;
+ goto begin;
+ }
+
+ /* Get the meta-data page. */
+ if ((ret = __db_lget(dbc,
+ 0, t->q_meta, F_ISSET(dbp, DB_AM_RDONLY) ?
+ DB_LOCK_READ : DB_LOCK_WRITE, 0, &lock)) != 0)
+ goto err;
+ if ((ret = memp_fget(dbp->mpf, &t->q_meta, 0, (PAGE **)&meta)) != 0)
+ goto err;
+
+ /* Get the metadata fields. */
+ sp->qs_magic = meta->dbmeta.magic;
+ sp->qs_version = meta->dbmeta.version;
+ sp->qs_metaflags = meta->dbmeta.flags;
+ sp->qs_pagesize = meta->dbmeta.pagesize;
+ sp->qs_re_len = meta->re_len;
+ sp->qs_re_pad = meta->re_pad;
+ sp->qs_first_recno = meta->first_recno;
+ sp->qs_cur_recno = meta->cur_recno;
+ sp->qs_nkeys = sp->qs_ndata;
+ if (!F_ISSET(dbp, DB_AM_RDONLY))
+ meta->dbmeta.key_count =
+ meta->dbmeta.record_count = sp->qs_ndata;
+
+done:
+ /* Discard the meta-data page. */
+ if ((ret = memp_fput(dbp->mpf,
+ meta, F_ISSET(dbp, DB_AM_RDONLY) ? 0 : DB_MPOOL_DIRTY)) != 0)
+ goto err;
+ (void)__LPUT(dbc, lock);
+
+ *(DB_QUEUE_STAT **)spp = sp;
+ ret = 0;
+
+ if (0) {
+err: if (sp != NULL)
+ __os_free(sp, sizeof(*sp));
+ }
+
+ if (lock.off != LOCK_INVALID)
+ (void)__LPUT(dbc, lock);
+
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
diff --git a/bdb/qam/qam_upgrade.c b/bdb/qam/qam_upgrade.c
new file mode 100644
index 00000000000..f49bfe88d90
--- /dev/null
+++ b/bdb/qam/qam_upgrade.c
@@ -0,0 +1,111 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: qam_upgrade.c,v 11.7 2000/11/30 00:58:44 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <limits.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_swap.h"
+#include "db_am.h"
+#include "db_upgrade.h"
+
+/*
+ * __qam_31_qammeta --
+ * Upgrade the database from version 1 to version 2.
+ *
+ * PUBLIC: int __qam_31_qammeta __P((DB *, char *, u_int8_t *));
+ */
+int
+__qam_31_qammeta(dbp, real_name, buf)
+ DB *dbp;
+ char *real_name;
+ u_int8_t *buf;
+{
+ QMETA31 *newmeta;
+ QMETA30 *oldmeta;
+
+ COMPQUIET(dbp, NULL);
+ COMPQUIET(real_name, NULL);
+
+ newmeta = (QMETA31 *)buf;
+ oldmeta = (QMETA30 *)buf;
+
+ /*
+ * Copy the fields to their new locations.
+ * They may overlap so start at the bottom and use memmove().
+ */
+ newmeta->rec_page = oldmeta->rec_page;
+ newmeta->re_pad = oldmeta->re_pad;
+ newmeta->re_len = oldmeta->re_len;
+ newmeta->cur_recno = oldmeta->cur_recno;
+ newmeta->first_recno = oldmeta->first_recno;
+ newmeta->start = oldmeta->start;
+ memmove(newmeta->dbmeta.uid,
+ oldmeta->dbmeta.uid, sizeof(oldmeta->dbmeta.uid));
+ newmeta->dbmeta.flags = oldmeta->dbmeta.flags;
+ newmeta->dbmeta.record_count = 0;
+ newmeta->dbmeta.key_count = 0;
+ ZERO_LSN(newmeta->dbmeta.unused3);
+
+ /* Update the version. */
+ newmeta->dbmeta.version = 2;
+
+ return (0);
+}
+
+/*
+ * __qam_32_qammeta --
+ * Upgrade the database from version 2 to version 3.
+ *
+ * PUBLIC: int __qam_32_qammeta __P((DB *, char *, u_int8_t *));
+ */
+int
+__qam_32_qammeta(dbp, real_name, buf)
+ DB *dbp;
+ char *real_name;
+ u_int8_t *buf;
+{
+ QMETA32 *newmeta;
+ QMETA31 *oldmeta;
+
+ COMPQUIET(dbp, NULL);
+ COMPQUIET(real_name, NULL);
+
+ newmeta = (QMETA32 *)buf;
+ oldmeta = (QMETA31 *)buf;
+
+ /*
+ * Copy the fields to their new locations.
+ * We are dropping the first field so move
+ * from the top.
+ */
+ newmeta->first_recno = oldmeta->first_recno;
+ newmeta->cur_recno = oldmeta->cur_recno;
+ newmeta->re_len = oldmeta->re_len;
+ newmeta->re_pad = oldmeta->re_pad;
+ newmeta->rec_page = oldmeta->rec_page;
+ newmeta->page_ext = 0;
+ /* cur_recno now points to the first free slot. */
+ newmeta->cur_recno++;
+ if (newmeta->first_recno == 0)
+ newmeta->first_recno = 1;
+
+ /* Update the version. */
+ newmeta->dbmeta.version = 3;
+
+ return (0);
+}
diff --git a/bdb/qam/qam_verify.c b/bdb/qam/qam_verify.c
new file mode 100644
index 00000000000..a9a467d6785
--- /dev/null
+++ b/bdb/qam/qam_verify.c
@@ -0,0 +1,194 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: qam_verify.c,v 1.17 2000/12/12 17:39:35 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_verify.h"
+#include "qam.h"
+#include "db_ext.h"
+
+/*
+ * __qam_vrfy_meta --
+ * Verify the queue-specific part of a metadata page.
+ *
+ * PUBLIC: int __qam_vrfy_meta __P((DB *, VRFY_DBINFO *, QMETA *,
+ * PUBLIC: db_pgno_t, u_int32_t));
+ */
+int
+__qam_vrfy_meta(dbp, vdp, meta, pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ QMETA *meta;
+ db_pgno_t pgno;
+ u_int32_t flags;
+{
+ VRFY_PAGEINFO *pip;
+ int isbad, ret, t_ret;
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+ isbad = 0;
+
+ /*
+ * Queue can't be used in subdatabases, so if this isn't set
+ * something very odd is going on.
+ */
+ if (!F_ISSET(pip, VRFY_INCOMPLETE))
+ EPRINT((dbp->dbenv, "Queue databases must be one-per-file."));
+
+ /*
+ * cur_recno/rec_page
+ * Cur_recno may be one beyond the end of the page and
+ * we start numbering from 1.
+ */
+ if (vdp->last_pgno > 0 && meta->cur_recno > 0 &&
+ meta->cur_recno - 1 > meta->rec_page * vdp->last_pgno) {
+ EPRINT((dbp->dbenv,
+ "Current recno %lu references record past last page number %lu",
+ meta->cur_recno, vdp->last_pgno));
+ isbad = 1;
+ }
+
+ /*
+ * re_len: If this is bad, we can't safely verify queue data pages, so
+ * return DB_VERIFY_FATAL
+ */
+ if (ALIGN(meta->re_len + sizeof(QAMDATA) - 1, sizeof(u_int32_t)) *
+ meta->rec_page + sizeof(QPAGE) > dbp->pgsize) {
+ EPRINT((dbp->dbenv,
+ "Queue record length %lu impossibly high for page size and records per page",
+ meta->re_len));
+ ret = DB_VERIFY_FATAL;
+ goto err;
+ } else {
+ vdp->re_len = meta->re_len;
+ vdp->rec_page = meta->rec_page;
+ }
+
+err: if ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret == 0 && isbad == 1 ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __qam_vrfy_data --
+ * Verify a queue data page.
+ *
+ * PUBLIC: int __qam_vrfy_data __P((DB *, VRFY_DBINFO *, QPAGE *,
+ * PUBLIC: db_pgno_t, u_int32_t));
+ */
+int
+__qam_vrfy_data(dbp, vdp, h, pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ QPAGE *h;
+ db_pgno_t pgno;
+ u_int32_t flags;
+{
+ DB fakedb;
+ struct __queue fakeq;
+ QAMDATA *qp;
+ db_recno_t i;
+ u_int8_t qflags;
+
+ /*
+ * Not much to do here, except make sure that flags are reasonable.
+ *
+ * QAM_GET_RECORD assumes a properly initialized q_internal
+ * structure, however, and we don't have one, so we play
+ * some gross games to fake it out.
+ */
+ fakedb.q_internal = &fakeq;
+ fakeq.re_len = vdp->re_len;
+
+ for (i = 0; i < vdp->rec_page; i++) {
+ qp = QAM_GET_RECORD(&fakedb, h, i);
+ if ((u_int8_t *)qp >= (u_int8_t *)h + dbp->pgsize) {
+ EPRINT((dbp->dbenv,
+ "Queue record %lu extends past end of page %lu",
+ i, pgno));
+ return (DB_VERIFY_BAD);
+ }
+
+ qflags = qp->flags;
+ qflags &= !(QAM_VALID | QAM_SET);
+ if (qflags != 0) {
+ EPRINT((dbp->dbenv,
+ "Queue record %lu on page %lu has bad flags",
+ i, pgno));
+ return (DB_VERIFY_BAD);
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * __qam_vrfy_structure --
+ * Verify a queue database structure, such as it is.
+ *
+ * PUBLIC: int __qam_vrfy_structure __P((DB *, VRFY_DBINFO *, u_int32_t));
+ */
+int
+__qam_vrfy_structure(dbp, vdp, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ u_int32_t flags;
+{
+ VRFY_PAGEINFO *pip;
+ db_pgno_t i;
+ int ret, isbad;
+
+ isbad = 0;
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, PGNO_BASE_MD, &pip)) != 0)
+ return (ret);
+
+ if (pip->type != P_QAMMETA) {
+ EPRINT((dbp->dbenv,
+ "Queue database has no meta page"));
+ isbad = 1;
+ goto err;
+ }
+
+ if ((ret = __db_vrfy_pgset_inc(vdp->pgset, 0)) != 0)
+ goto err;
+
+ for (i = 1; i <= vdp->last_pgno; i++) {
+ /* Send feedback to the application about our progress. */
+ if (!LF_ISSET(DB_SALVAGE))
+ __db_vrfy_struct_feedback(dbp, vdp);
+
+ if ((ret = __db_vrfy_putpageinfo(vdp, pip)) != 0 ||
+ (ret = __db_vrfy_getpageinfo(vdp, i, &pip)) != 0)
+ return (ret);
+ if (!F_ISSET(pip, VRFY_IS_ALLZEROES) &&
+ pip->type != P_QAMDATA) {
+ EPRINT((dbp->dbenv,
+ "Queue database page %lu of incorrect type %lu",
+ i, pip->type));
+ isbad = 1;
+ goto err;
+ } else if ((ret = __db_vrfy_pgset_inc(vdp->pgset, i)) != 0)
+ goto err;
+ }
+
+err: if ((ret = __db_vrfy_putpageinfo(vdp, pip)) != 0)
+ return (ret);
+ return (isbad == 1 ? DB_VERIFY_BAD : 0);
+}
diff --git a/bdb/rpc_client/client.c b/bdb/rpc_client/client.c
new file mode 100644
index 00000000000..70744f54b4c
--- /dev/null
+++ b/bdb/rpc_client/client.c
@@ -0,0 +1,371 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: client.c,v 1.21 2000/11/30 00:58:44 ubell Exp $";
+#endif /* not lint */
+
+#ifdef HAVE_RPC
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <rpc/rpc.h>
+
+#include <ctype.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+#include "db_server.h"
+
+#include "db_int.h"
+#include "txn.h"
+#include "gen_client_ext.h"
+#include "rpc_client_ext.h"
+
+/*
+ * __dbclenv_server --
+ * Initialize an environment's server.
+ *
+ * PUBLIC: int __dbcl_envserver __P((DB_ENV *, char *, long, long, u_int32_t));
+ */
+int
+__dbcl_envserver(dbenv, host, tsec, ssec, flags)
+ DB_ENV *dbenv;
+ char *host;
+ long tsec, ssec;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __env_create_msg req;
+ __env_create_reply *replyp;
+ struct timeval tp;
+ int ret;
+
+ COMPQUIET(flags, 0);
+
+#ifdef HAVE_VXWORKS
+ if ((ret = rpcTaskInit()) != 0) {
+ __db_err(dbenv, "Could not initialize VxWorks RPC");
+ return (ERROR);
+ }
+#endif
+ if ((cl =
+ clnt_create(host, DB_SERVERPROG, DB_SERVERVERS, "tcp")) == NULL) {
+ __db_err(dbenv, clnt_spcreateerror(host));
+ return (DB_NOSERVER);
+ }
+ dbenv->cl_handle = cl;
+
+ if (tsec != 0) {
+ tp.tv_sec = tsec;
+ tp.tv_usec = 0;
+ (void)clnt_control(cl, CLSET_TIMEOUT, (char *)&tp);
+ }
+
+ req.timeout = ssec;
+ /*
+ * CALL THE SERVER
+ */
+ if ((replyp = __db_env_create_1(&req, cl)) == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ return (DB_NOSERVER);
+ }
+
+ /*
+ * Process reply and free up our space from request
+ * SUCCESS: Store ID from server.
+ */
+ if ((ret = replyp->status) != 0)
+ return (ret);
+
+ dbenv->cl_id = replyp->envcl_id;
+ return (0);
+}
+
+/*
+ * __dbcl_refresh --
+ * Clean up an environment.
+ *
+ * PUBLIC: int __dbcl_refresh __P((DB_ENV *));
+ */
+int
+__dbcl_refresh(dbenv)
+ DB_ENV *dbenv;
+{
+ CLIENT *cl;
+ int ret;
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ ret = 0;
+ if (dbenv->tx_handle != NULL) {
+ /*
+ * We only need to free up our stuff, the caller
+ * of this function will call the server who will
+ * do all the real work.
+ */
+ ret = __dbcl_txn_close(dbenv);
+ dbenv->tx_handle = NULL;
+ }
+ if (cl != NULL)
+ clnt_destroy(cl);
+ dbenv->cl_handle = NULL;
+ return (ret);
+}
+
+/*
+ * __dbcl_txn_close --
+ * Clean up an environment's transactions.
+ *
+ * PUBLIC: int __dbcl_txn_close __P((DB_ENV *));
+ */
+int
+__dbcl_txn_close(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_TXN *txnp;
+ DB_TXNMGR *tmgrp;
+ int ret;
+
+ ret = 0;
+ tmgrp = dbenv->tx_handle;
+
+ /*
+ * This function can only be called once per process (i.e., not
+ * once per thread), so no synchronization is required.
+ * Also this function is called *after* the server has been called,
+ * so the server has already closed/aborted any transactions that
+ * were open on its side. We only need to do local cleanup.
+ */
+ while ((txnp = TAILQ_FIRST(&tmgrp->txn_chain)) != NULL)
+ __dbcl_txn_end(txnp);
+
+ __os_free(tmgrp, sizeof(*tmgrp));
+ return (ret);
+
+}
+
+/*
+ * __dbcl_txn_end --
+ * Clean up an transaction.
+ * RECURSIVE FUNCTION: Clean up nested transactions.
+ *
+ * PUBLIC: void __dbcl_txn_end __P((DB_TXN *));
+ */
+void
+__dbcl_txn_end(txnp)
+ DB_TXN *txnp;
+{
+ DB_ENV *dbenv;
+ DB_TXN *kids;
+ DB_TXNMGR *mgr;
+
+ mgr = txnp->mgrp;
+ dbenv = mgr->dbenv;
+
+ /*
+ * First take care of any kids we have
+ */
+ for (kids = TAILQ_FIRST(&txnp->kids);
+ kids != NULL;
+ kids = TAILQ_FIRST(&txnp->kids))
+ __dbcl_txn_end(kids);
+
+ /*
+ * We are ending this transaction no matter what the parent
+ * may eventually do, if we have a parent. All those details
+ * are taken care of by the server. We only need to make sure
+ * that we properly release resources.
+ */
+ if (txnp->parent != NULL)
+ TAILQ_REMOVE(&txnp->parent->kids, txnp, klinks);
+ TAILQ_REMOVE(&mgr->txn_chain, txnp, links);
+ __os_free(txnp, sizeof(*txnp));
+
+ return;
+}
+
+/*
+ * __dbcl_c_destroy --
+ * Destroy a cursor.
+ *
+ * PUBLIC: int __dbcl_c_destroy __P((DBC *));
+ */
+int
+__dbcl_c_destroy(dbc)
+ DBC *dbc;
+{
+ DB *dbp;
+
+ dbp = dbc->dbp;
+
+ TAILQ_REMOVE(&dbp->free_queue, dbc, links);
+ __os_free(dbc, sizeof(*dbc));
+
+ return (0);
+}
+
+/*
+ * __dbcl_c_refresh --
+ * Refresh a cursor. Move it from the active queue to the free queue.
+ *
+ * PUBLIC: void __dbcl_c_refresh __P((DBC *));
+ */
+void
+__dbcl_c_refresh(dbcp)
+ DBC *dbcp;
+{
+ DB *dbp;
+
+ dbp = dbcp->dbp;
+ dbcp->flags = 0;
+ dbcp->cl_id = 0;
+
+ /*
+ * If dbp->cursor fails locally, we use a local dbc so that
+ * we can close it. In that case, dbp will be NULL.
+ */
+ if (dbp != NULL) {
+ TAILQ_REMOVE(&dbp->active_queue, dbcp, links);
+ TAILQ_INSERT_TAIL(&dbp->free_queue, dbcp, links);
+ }
+ return;
+}
+
+/*
+ * __dbcl_c_setup --
+ * Allocate a cursor.
+ *
+ * PUBLIC: int __dbcl_c_setup __P((long, DB *, DBC **));
+ */
+int
+__dbcl_c_setup(cl_id, dbp, dbcpp)
+ long cl_id;
+ DB *dbp;
+ DBC **dbcpp;
+{
+ DBC *dbc, tmpdbc;
+ int ret, t_ret;
+
+ if ((dbc = TAILQ_FIRST(&dbp->free_queue)) != NULL)
+ TAILQ_REMOVE(&dbp->free_queue, dbc, links);
+ else {
+ if ((ret =
+ __os_calloc(dbp->dbenv, 1, sizeof(DBC), &dbc)) != 0) {
+ /*
+ * If we die here, set up a tmp dbc to call the
+ * server to shut down that cursor.
+ */
+ tmpdbc.dbp = NULL;
+ tmpdbc.cl_id = cl_id;
+ t_ret = __dbcl_dbc_close(&tmpdbc);
+ return (ret);
+ }
+ dbc->c_close = __dbcl_dbc_close;
+ dbc->c_count = __dbcl_dbc_count;
+ dbc->c_del = __dbcl_dbc_del;
+ dbc->c_dup = __dbcl_dbc_dup;
+ dbc->c_get = __dbcl_dbc_get;
+ dbc->c_put = __dbcl_dbc_put;
+ dbc->c_am_destroy = __dbcl_c_destroy;
+ }
+ dbc->cl_id = cl_id;
+ dbc->dbp = dbp;
+ TAILQ_INSERT_TAIL(&dbp->active_queue, dbc, links);
+ *dbcpp = dbc;
+ return (0);
+}
+
+/*
+ * __dbcl_retcopy --
+ * Copy the returned data into the user's DBT, handling special flags
+ * as they apply to a client. Modeled after __db_retcopy().
+ *
+ * PUBLIC: int __dbcl_retcopy __P((DB_ENV *, DBT *, void *, u_int32_t));
+ */
+int
+__dbcl_retcopy(dbenv, dbt, data, len)
+ DB_ENV *dbenv;
+ DBT *dbt;
+ void *data;
+ u_int32_t len;
+{
+ int ret;
+
+ /*
+ * No need to handle DB_DBT_PARTIAL here, server already did.
+ */
+ dbt->size = len;
+
+ /*
+ * Allocate memory to be owned by the application: DB_DBT_MALLOC
+ * and DB_DBT_REALLOC. Always allocate even if we're copying 0 bytes.
+ * Or use memory specified by application: DB_DBT_USERMEM.
+ */
+ if (F_ISSET(dbt, DB_DBT_MALLOC)) {
+ if ((ret = __os_malloc(dbenv, len, NULL, &dbt->data)) != 0)
+ return (ret);
+ } else if (F_ISSET(dbt, DB_DBT_REALLOC)) {
+ if ((ret = __os_realloc(dbenv, len, NULL, &dbt->data)) != 0)
+ return (ret);
+ } else if (F_ISSET(dbt, DB_DBT_USERMEM)) {
+ if (len != 0 && (dbt->data == NULL || dbt->ulen < len))
+ return (ENOMEM);
+ } else {
+ /*
+ * If no user flags, then set the DBT to point to the
+ * returned data pointer and return.
+ */
+ dbt->data = data;
+ return (0);
+ }
+
+ if (len != 0)
+ memcpy(dbt->data, data, len);
+ return (0);
+}
+
+/*
+ * __dbcl_dbclose_common --
+ * Common code for closing/cleaning a dbp.
+ *
+ * PUBLIC: int __dbcl_dbclose_common __P((DB *));
+ */
+int
+__dbcl_dbclose_common(dbp)
+ DB *dbp;
+{
+ int ret, t_ret;
+ DBC *dbc;
+
+ /*
+ * Go through the active cursors and call the cursor recycle routine,
+ * which resolves pending operations and moves the cursors onto the
+ * free list. Then, walk the free list and call the cursor destroy
+ * routine.
+ *
+ * NOTE: We do not need to use the join_queue for join cursors.
+ * See comment in __dbcl_dbjoin_ret.
+ */
+ ret = 0;
+ while ((dbc = TAILQ_FIRST(&dbp->active_queue)) != NULL)
+ __dbcl_c_refresh(dbc);
+ while ((dbc = TAILQ_FIRST(&dbp->free_queue)) != NULL)
+ if ((t_ret = __dbcl_c_destroy(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ TAILQ_INIT(&dbp->free_queue);
+ TAILQ_INIT(&dbp->active_queue);
+
+ memset(dbp, CLEAR_BYTE, sizeof(*dbp));
+ __os_free(dbp, sizeof(*dbp));
+ return (ret);
+}
+#endif /* HAVE_RPC */
diff --git a/bdb/rpc_client/db_server_clnt.c b/bdb/rpc_client/db_server_clnt.c
new file mode 100644
index 00000000000..680a26144b2
--- /dev/null
+++ b/bdb/rpc_client/db_server_clnt.c
@@ -0,0 +1,692 @@
+#include "db_config.h"
+#ifdef HAVE_RPC
+/*
+ * Please do not edit this file.
+ * It was generated using rpcgen.
+ */
+
+#include "db_server.h"
+
+/* Default timeout can be changed using clnt_control() */
+static struct timeval TIMEOUT = { 25, 0 };
+
+__env_cachesize_reply *
+__db_env_cachesize_1(argp, clnt)
+ __env_cachesize_msg *argp;
+ CLIENT *clnt;
+{
+ static __env_cachesize_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_env_cachesize,
+ (xdrproc_t) xdr___env_cachesize_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___env_cachesize_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__env_close_reply *
+__db_env_close_1(argp, clnt)
+ __env_close_msg *argp;
+ CLIENT *clnt;
+{
+ static __env_close_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_env_close,
+ (xdrproc_t) xdr___env_close_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___env_close_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__env_create_reply *
+__db_env_create_1(argp, clnt)
+ __env_create_msg *argp;
+ CLIENT *clnt;
+{
+ static __env_create_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_env_create,
+ (xdrproc_t) xdr___env_create_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___env_create_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__env_flags_reply *
+__db_env_flags_1(argp, clnt)
+ __env_flags_msg *argp;
+ CLIENT *clnt;
+{
+ static __env_flags_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_env_flags,
+ (xdrproc_t) xdr___env_flags_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___env_flags_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__env_open_reply *
+__db_env_open_1(argp, clnt)
+ __env_open_msg *argp;
+ CLIENT *clnt;
+{
+ static __env_open_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_env_open,
+ (xdrproc_t) xdr___env_open_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___env_open_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__env_remove_reply *
+__db_env_remove_1(argp, clnt)
+ __env_remove_msg *argp;
+ CLIENT *clnt;
+{
+ static __env_remove_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_env_remove,
+ (xdrproc_t) xdr___env_remove_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___env_remove_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__txn_abort_reply *
+__db_txn_abort_1(argp, clnt)
+ __txn_abort_msg *argp;
+ CLIENT *clnt;
+{
+ static __txn_abort_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_txn_abort,
+ (xdrproc_t) xdr___txn_abort_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___txn_abort_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__txn_begin_reply *
+__db_txn_begin_1(argp, clnt)
+ __txn_begin_msg *argp;
+ CLIENT *clnt;
+{
+ static __txn_begin_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_txn_begin,
+ (xdrproc_t) xdr___txn_begin_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___txn_begin_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__txn_commit_reply *
+__db_txn_commit_1(argp, clnt)
+ __txn_commit_msg *argp;
+ CLIENT *clnt;
+{
+ static __txn_commit_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_txn_commit,
+ (xdrproc_t) xdr___txn_commit_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___txn_commit_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_bt_maxkey_reply *
+__db_db_bt_maxkey_1(argp, clnt)
+ __db_bt_maxkey_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_bt_maxkey_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_bt_maxkey,
+ (xdrproc_t) xdr___db_bt_maxkey_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_bt_maxkey_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_bt_minkey_reply *
+__db_db_bt_minkey_1(argp, clnt)
+ __db_bt_minkey_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_bt_minkey_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_bt_minkey,
+ (xdrproc_t) xdr___db_bt_minkey_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_bt_minkey_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_close_reply *
+__db_db_close_1(argp, clnt)
+ __db_close_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_close_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_close,
+ (xdrproc_t) xdr___db_close_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_close_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_create_reply *
+__db_db_create_1(argp, clnt)
+ __db_create_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_create_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_create,
+ (xdrproc_t) xdr___db_create_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_create_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_del_reply *
+__db_db_del_1(argp, clnt)
+ __db_del_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_del_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_del,
+ (xdrproc_t) xdr___db_del_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_del_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_extentsize_reply *
+__db_db_extentsize_1(argp, clnt)
+ __db_extentsize_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_extentsize_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_extentsize,
+ (xdrproc_t) xdr___db_extentsize_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_extentsize_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_flags_reply *
+__db_db_flags_1(argp, clnt)
+ __db_flags_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_flags_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_flags,
+ (xdrproc_t) xdr___db_flags_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_flags_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_get_reply *
+__db_db_get_1(argp, clnt)
+ __db_get_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_get_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_get,
+ (xdrproc_t) xdr___db_get_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_get_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_h_ffactor_reply *
+__db_db_h_ffactor_1(argp, clnt)
+ __db_h_ffactor_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_h_ffactor_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_h_ffactor,
+ (xdrproc_t) xdr___db_h_ffactor_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_h_ffactor_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_h_nelem_reply *
+__db_db_h_nelem_1(argp, clnt)
+ __db_h_nelem_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_h_nelem_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_h_nelem,
+ (xdrproc_t) xdr___db_h_nelem_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_h_nelem_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_key_range_reply *
+__db_db_key_range_1(argp, clnt)
+ __db_key_range_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_key_range_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_key_range,
+ (xdrproc_t) xdr___db_key_range_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_key_range_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_lorder_reply *
+__db_db_lorder_1(argp, clnt)
+ __db_lorder_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_lorder_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_lorder,
+ (xdrproc_t) xdr___db_lorder_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_lorder_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_open_reply *
+__db_db_open_1(argp, clnt)
+ __db_open_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_open_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_open,
+ (xdrproc_t) xdr___db_open_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_open_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_pagesize_reply *
+__db_db_pagesize_1(argp, clnt)
+ __db_pagesize_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_pagesize_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_pagesize,
+ (xdrproc_t) xdr___db_pagesize_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_pagesize_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_put_reply *
+__db_db_put_1(argp, clnt)
+ __db_put_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_put_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_put,
+ (xdrproc_t) xdr___db_put_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_put_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_re_delim_reply *
+__db_db_re_delim_1(argp, clnt)
+ __db_re_delim_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_re_delim_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_re_delim,
+ (xdrproc_t) xdr___db_re_delim_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_re_delim_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_re_len_reply *
+__db_db_re_len_1(argp, clnt)
+ __db_re_len_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_re_len_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_re_len,
+ (xdrproc_t) xdr___db_re_len_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_re_len_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_re_pad_reply *
+__db_db_re_pad_1(argp, clnt)
+ __db_re_pad_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_re_pad_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_re_pad,
+ (xdrproc_t) xdr___db_re_pad_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_re_pad_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_remove_reply *
+__db_db_remove_1(argp, clnt)
+ __db_remove_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_remove_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_remove,
+ (xdrproc_t) xdr___db_remove_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_remove_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_rename_reply *
+__db_db_rename_1(argp, clnt)
+ __db_rename_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_rename_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_rename,
+ (xdrproc_t) xdr___db_rename_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_rename_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_stat_reply *
+__db_db_stat_1(argp, clnt)
+ __db_stat_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_stat_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_stat,
+ (xdrproc_t) xdr___db_stat_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_stat_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_swapped_reply *
+__db_db_swapped_1(argp, clnt)
+ __db_swapped_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_swapped_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_swapped,
+ (xdrproc_t) xdr___db_swapped_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_swapped_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_sync_reply *
+__db_db_sync_1(argp, clnt)
+ __db_sync_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_sync_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_sync,
+ (xdrproc_t) xdr___db_sync_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_sync_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_cursor_reply *
+__db_db_cursor_1(argp, clnt)
+ __db_cursor_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_cursor_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_cursor,
+ (xdrproc_t) xdr___db_cursor_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_cursor_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_join_reply *
+__db_db_join_1(argp, clnt)
+ __db_join_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_join_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_join,
+ (xdrproc_t) xdr___db_join_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_join_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__dbc_close_reply *
+__db_dbc_close_1(argp, clnt)
+ __dbc_close_msg *argp;
+ CLIENT *clnt;
+{
+ static __dbc_close_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_dbc_close,
+ (xdrproc_t) xdr___dbc_close_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___dbc_close_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__dbc_count_reply *
+__db_dbc_count_1(argp, clnt)
+ __dbc_count_msg *argp;
+ CLIENT *clnt;
+{
+ static __dbc_count_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_dbc_count,
+ (xdrproc_t) xdr___dbc_count_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___dbc_count_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__dbc_del_reply *
+__db_dbc_del_1(argp, clnt)
+ __dbc_del_msg *argp;
+ CLIENT *clnt;
+{
+ static __dbc_del_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_dbc_del,
+ (xdrproc_t) xdr___dbc_del_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___dbc_del_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__dbc_dup_reply *
+__db_dbc_dup_1(argp, clnt)
+ __dbc_dup_msg *argp;
+ CLIENT *clnt;
+{
+ static __dbc_dup_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_dbc_dup,
+ (xdrproc_t) xdr___dbc_dup_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___dbc_dup_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__dbc_get_reply *
+__db_dbc_get_1(argp, clnt)
+ __dbc_get_msg *argp;
+ CLIENT *clnt;
+{
+ static __dbc_get_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_dbc_get,
+ (xdrproc_t) xdr___dbc_get_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___dbc_get_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__dbc_put_reply *
+__db_dbc_put_1(argp, clnt)
+ __dbc_put_msg *argp;
+ CLIENT *clnt;
+{
+ static __dbc_put_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_dbc_put,
+ (xdrproc_t) xdr___dbc_put_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___dbc_put_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+#endif /* HAVE_RPC */
diff --git a/bdb/rpc_client/gen_client.c b/bdb/rpc_client/gen_client.c
new file mode 100644
index 00000000000..7cc598e7dfa
--- /dev/null
+++ b/bdb/rpc_client/gen_client.c
@@ -0,0 +1,2494 @@
+/* Do not edit: automatically built by gen_rpc.awk. */
+#include "db_config.h"
+
+#ifdef HAVE_RPC
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <rpc/rpc.h>
+#include <rpc/xdr.h>
+
+#include <errno.h>
+#include <string.h>
+#endif
+#include "db_server.h"
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_ext.h"
+#include "mp.h"
+#include "rpc_client_ext.h"
+#include "txn.h"
+
+#include "gen_client_ext.h"
+
+int
+__dbcl_env_cachesize(dbenv, gbytes, bytes, ncache)
+ DB_ENV * dbenv;
+ u_int32_t gbytes;
+ u_int32_t bytes;
+ int ncache;
+{
+ CLIENT *cl;
+ __env_cachesize_msg req;
+ static __env_cachesize_reply *replyp = NULL;
+ int ret;
+
+ ret = 0;
+ if (dbenv == NULL || dbenv->cl_handle == NULL) {
+ __db_err(dbenv, "No server environment.");
+ return (DB_NOSERVER);
+ }
+
+ if (replyp != NULL) {
+ xdr_free((xdrproc_t)xdr___env_cachesize_reply, (void *)replyp);
+ replyp = NULL;
+ }
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbenv == NULL)
+ req.dbenvcl_id = 0;
+ else
+ req.dbenvcl_id = dbenv->cl_id;
+ req.gbytes = gbytes;
+ req.bytes = bytes;
+ req.ncache = ncache;
+
+ replyp = __db_env_cachesize_1(&req, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = replyp->status;
+out:
+ return (ret);
+}
+
+int
+__dbcl_env_close(dbenv, flags)
+ DB_ENV * dbenv;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __env_close_msg req;
+ static __env_close_reply *replyp = NULL;
+ int ret;
+
+ ret = 0;
+ if (dbenv == NULL || dbenv->cl_handle == NULL) {
+ __db_err(dbenv, "No server environment.");
+ return (DB_NOSERVER);
+ }
+
+ if (replyp != NULL) {
+ xdr_free((xdrproc_t)xdr___env_close_reply, (void *)replyp);
+ replyp = NULL;
+ }
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbenv == NULL)
+ req.dbenvcl_id = 0;
+ else
+ req.dbenvcl_id = dbenv->cl_id;
+ req.flags = flags;
+
+ replyp = __db_env_close_1(&req, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ return (__dbcl_env_close_ret(dbenv, flags, replyp));
+out:
+ return (ret);
+}
+
+int
+__dbcl_rpc_illegal(dbenv, name)
+ DB_ENV *dbenv;
+ char *name;
+{
+ __db_err(dbenv,
+ "%s method meaningless in RPC environment", name);
+ return (__db_eopnotsup(dbenv));
+}
+
+int
+__dbcl_set_data_dir(dbenv, dir)
+ DB_ENV * dbenv;
+ const char * dir;
+{
+ COMPQUIET(dir, NULL);
+ return (__dbcl_rpc_illegal(dbenv, "set_data_dir"));
+}
+
+int
+__dbcl_env_set_feedback(dbenv, func0)
+ DB_ENV * dbenv;
+ void (*func0) __P((DB_ENV *, int, int));
+{
+ COMPQUIET(func0, 0);
+ return (__dbcl_rpc_illegal(dbenv, "env_set_feedback"));
+}
+
+int
+__dbcl_env_flags(dbenv, flags, onoff)
+ DB_ENV * dbenv;
+ u_int32_t flags;
+ int onoff;
+{
+ CLIENT *cl;
+ __env_flags_msg req;
+ static __env_flags_reply *replyp = NULL;
+ int ret;
+
+ ret = 0;
+ if (dbenv == NULL || dbenv->cl_handle == NULL) {
+ __db_err(dbenv, "No server environment.");
+ return (DB_NOSERVER);
+ }
+
+ if (replyp != NULL) {
+ xdr_free((xdrproc_t)xdr___env_flags_reply, (void *)replyp);
+ replyp = NULL;
+ }
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbenv == NULL)
+ req.dbenvcl_id = 0;
+ else
+ req.dbenvcl_id = dbenv->cl_id;
+ req.flags = flags;
+ req.onoff = onoff;
+
+ replyp = __db_env_flags_1(&req, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = replyp->status;
+out:
+ return (ret);
+}
+
+int
+__dbcl_set_lg_bsize(dbenv, bsize)
+ DB_ENV * dbenv;
+ u_int32_t bsize;
+{
+ COMPQUIET(bsize, 0);
+ return (__dbcl_rpc_illegal(dbenv, "set_lg_bsize"));
+}
+
+int
+__dbcl_set_lg_dir(dbenv, dir)
+ DB_ENV * dbenv;
+ const char * dir;
+{
+ COMPQUIET(dir, NULL);
+ return (__dbcl_rpc_illegal(dbenv, "set_lg_dir"));
+}
+
+int
+__dbcl_set_lg_max(dbenv, max)
+ DB_ENV * dbenv;
+ u_int32_t max;
+{
+ COMPQUIET(max, 0);
+ return (__dbcl_rpc_illegal(dbenv, "set_lg_max"));
+}
+
+int
+__dbcl_set_lk_conflict(dbenv, conflicts, modes)
+ DB_ENV * dbenv;
+ u_int8_t * conflicts;
+ int modes;
+{
+ COMPQUIET(conflicts, 0);
+ COMPQUIET(modes, 0);
+ return (__dbcl_rpc_illegal(dbenv, "set_lk_conflict"));
+}
+
+int
+__dbcl_set_lk_detect(dbenv, detect)
+ DB_ENV * dbenv;
+ u_int32_t detect;
+{
+ COMPQUIET(detect, 0);
+ return (__dbcl_rpc_illegal(dbenv, "set_lk_detect"));
+}
+
+int
+__dbcl_set_lk_max(dbenv, max)
+ DB_ENV * dbenv;
+ u_int32_t max;
+{
+ COMPQUIET(max, 0);
+ return (__dbcl_rpc_illegal(dbenv, "set_lk_max"));
+}
+
+int
+__dbcl_set_lk_max_locks(dbenv, max)
+ DB_ENV * dbenv;
+ u_int32_t max;
+{
+ COMPQUIET(max, 0);
+ return (__dbcl_rpc_illegal(dbenv, "set_lk_max_locks"));
+}
+
+int
+__dbcl_set_lk_max_lockers(dbenv, max)
+ DB_ENV * dbenv;
+ u_int32_t max;
+{
+ COMPQUIET(max, 0);
+ return (__dbcl_rpc_illegal(dbenv, "set_lk_max_lockers"));
+}
+
+int
+__dbcl_set_lk_max_objects(dbenv, max)
+ DB_ENV * dbenv;
+ u_int32_t max;
+{
+ COMPQUIET(max, 0);
+ return (__dbcl_rpc_illegal(dbenv, "set_lk_max_objects"));
+}
+
+int
+__dbcl_set_mp_mmapsize(dbenv, mmapsize)
+ DB_ENV * dbenv;
+ size_t mmapsize;
+{
+ COMPQUIET(mmapsize, 0);
+ return (__dbcl_rpc_illegal(dbenv, "set_mp_mmapsize"));
+}
+
+int
+__dbcl_set_mutex_locks(dbenv, do_lock)
+ DB_ENV * dbenv;
+ int do_lock;
+{
+ COMPQUIET(do_lock, 0);
+ return (__dbcl_rpc_illegal(dbenv, "set_mutex_locks"));
+}
+
+int
+__dbcl_env_open(dbenv, home, flags, mode)
+ DB_ENV * dbenv;
+ const char * home;
+ u_int32_t flags;
+ int mode;
+{
+ CLIENT *cl;
+ __env_open_msg req;
+ static __env_open_reply *replyp = NULL;
+ int ret;
+
+ ret = 0;
+ if (dbenv == NULL || dbenv->cl_handle == NULL) {
+ __db_err(dbenv, "No server environment.");
+ return (DB_NOSERVER);
+ }
+
+ if (replyp != NULL) {
+ xdr_free((xdrproc_t)xdr___env_open_reply, (void *)replyp);
+ replyp = NULL;
+ }
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbenv == NULL)
+ req.dbenvcl_id = 0;
+ else
+ req.dbenvcl_id = dbenv->cl_id;
+ if (home == NULL)
+ req.home = "";
+ else
+ req.home = (char *)home;
+ req.flags = flags;
+ req.mode = mode;
+
+ replyp = __db_env_open_1(&req, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ return (__dbcl_env_open_ret(dbenv, home, flags, mode, replyp));
+out:
+ return (ret);
+}
+
+int
+__dbcl_env_paniccall(dbenv, func0)
+ DB_ENV * dbenv;
+ void (*func0) __P((DB_ENV *, int));
+{
+ COMPQUIET(func0, 0);
+ return (__dbcl_rpc_illegal(dbenv, "env_paniccall"));
+}
+
+int
+__dbcl_set_recovery_init(dbenv, func0)
+ DB_ENV * dbenv;
+ int (*func0) __P((DB_ENV *));
+{
+ COMPQUIET(func0, 0);
+ return (__dbcl_rpc_illegal(dbenv, "set_recovery_init"));
+}
+
+int
+__dbcl_env_remove(dbenv, home, flags)
+ DB_ENV * dbenv;
+ const char * home;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __env_remove_msg req;
+ static __env_remove_reply *replyp = NULL;
+ int ret;
+
+ ret = 0;
+ if (dbenv == NULL || dbenv->cl_handle == NULL) {
+ __db_err(dbenv, "No server environment.");
+ return (DB_NOSERVER);
+ }
+
+ if (replyp != NULL) {
+ xdr_free((xdrproc_t)xdr___env_remove_reply, (void *)replyp);
+ replyp = NULL;
+ }
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbenv == NULL)
+ req.dbenvcl_id = 0;
+ else
+ req.dbenvcl_id = dbenv->cl_id;
+ if (home == NULL)
+ req.home = "";
+ else
+ req.home = (char *)home;
+ req.flags = flags;
+
+ replyp = __db_env_remove_1(&req, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ return (__dbcl_env_remove_ret(dbenv, home, flags, replyp));
+out:
+ return (ret);
+}
+
+int
+__dbcl_set_shm_key(dbenv, shm_key)
+ DB_ENV * dbenv;
+ long shm_key;
+{
+ COMPQUIET(shm_key, 0);
+ return (__dbcl_rpc_illegal(dbenv, "set_shm_key"));
+}
+
+int
+__dbcl_set_tmp_dir(dbenv, dir)
+ DB_ENV * dbenv;
+ const char * dir;
+{
+ COMPQUIET(dir, NULL);
+ return (__dbcl_rpc_illegal(dbenv, "set_tmp_dir"));
+}
+
+int
+__dbcl_set_tx_recover(dbenv, func0)
+ DB_ENV * dbenv;
+ int (*func0) __P((DB_ENV *, DBT *, DB_LSN *, db_recops));
+{
+ COMPQUIET(func0, 0);
+ return (__dbcl_rpc_illegal(dbenv, "set_tx_recover"));
+}
+
+int
+__dbcl_set_tx_max(dbenv, max)
+ DB_ENV * dbenv;
+ u_int32_t max;
+{
+ COMPQUIET(max, 0);
+ return (__dbcl_rpc_illegal(dbenv, "set_tx_max"));
+}
+
+int
+__dbcl_set_tx_timestamp(dbenv, max)
+ DB_ENV * dbenv;
+ time_t * max;
+{
+ COMPQUIET(max, 0);
+ return (__dbcl_rpc_illegal(dbenv, "set_tx_timestamp"));
+}
+
+int
+__dbcl_set_verbose(dbenv, which, onoff)
+ DB_ENV * dbenv;
+ u_int32_t which;
+ int onoff;
+{
+ COMPQUIET(which, 0);
+ COMPQUIET(onoff, 0);
+ return (__dbcl_rpc_illegal(dbenv, "set_verbose"));
+}
+
+int
+__dbcl_txn_abort(txnp)
+ DB_TXN * txnp;
+{
+ CLIENT *cl;
+ __txn_abort_msg req;
+ static __txn_abort_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = NULL;
+ dbenv = txnp->mgrp->dbenv;
+ if (dbenv == NULL || dbenv->cl_handle == NULL) {
+ __db_err(dbenv, "No server environment.");
+ return (DB_NOSERVER);
+ }
+
+ if (replyp != NULL) {
+ xdr_free((xdrproc_t)xdr___txn_abort_reply, (void *)replyp);
+ replyp = NULL;
+ }
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (txnp == NULL)
+ req.txnpcl_id = 0;
+ else
+ req.txnpcl_id = txnp->txnid;
+
+ replyp = __db_txn_abort_1(&req, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ return (__dbcl_txn_abort_ret(txnp, replyp));
+out:
+ return (ret);
+}
+
+int
+__dbcl_txn_begin(envp, parent, txnpp, flags)
+ DB_ENV * envp;
+ DB_TXN * parent;
+ DB_TXN ** txnpp;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __txn_begin_msg req;
+ static __txn_begin_reply *replyp = NULL;
+ int ret;
+
+ ret = 0;
+ if (envp == NULL || envp->cl_handle == NULL) {
+ __db_err(envp, "No server environment.");
+ return (DB_NOSERVER);
+ }
+
+ if (replyp != NULL) {
+ xdr_free((xdrproc_t)xdr___txn_begin_reply, (void *)replyp);
+ replyp = NULL;
+ }
+ cl = (CLIENT *)envp->cl_handle;
+
+ if (envp == NULL)
+ req.envpcl_id = 0;
+ else
+ req.envpcl_id = envp->cl_id;
+ if (parent == NULL)
+ req.parentcl_id = 0;
+ else
+ req.parentcl_id = parent->txnid;
+ req.flags = flags;
+
+ replyp = __db_txn_begin_1(&req, cl);
+ if (replyp == NULL) {
+ __db_err(envp, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ return (__dbcl_txn_begin_ret(envp, parent, txnpp, flags, replyp));
+out:
+ return (ret);
+}
+
+int
+__dbcl_txn_checkpoint(dbenv, kbyte, min)
+ DB_ENV * dbenv;
+ u_int32_t kbyte;
+ u_int32_t min;
+{
+ COMPQUIET(kbyte, 0);
+ COMPQUIET(min, 0);
+ return (__dbcl_rpc_illegal(dbenv, "txn_checkpoint"));
+}
+
+int
+__dbcl_txn_commit(txnp, flags)
+ DB_TXN * txnp;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __txn_commit_msg req;
+ static __txn_commit_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = NULL;
+ dbenv = txnp->mgrp->dbenv;
+ if (dbenv == NULL || dbenv->cl_handle == NULL) {
+ __db_err(dbenv, "No server environment.");
+ return (DB_NOSERVER);
+ }
+
+ if (replyp != NULL) {
+ xdr_free((xdrproc_t)xdr___txn_commit_reply, (void *)replyp);
+ replyp = NULL;
+ }
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (txnp == NULL)
+ req.txnpcl_id = 0;
+ else
+ req.txnpcl_id = txnp->txnid;
+ req.flags = flags;
+
+ replyp = __db_txn_commit_1(&req, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ return (__dbcl_txn_commit_ret(txnp, flags, replyp));
+out:
+ return (ret);
+}
+
+int
+__dbcl_txn_prepare(txnp)
+ DB_TXN * txnp;
+{
+ DB_ENV *dbenv;
+
+ dbenv = txnp->mgrp->dbenv;
+ return (__dbcl_rpc_illegal(dbenv, "txn_prepare"));
+}
+
+int
+__dbcl_txn_stat(dbenv, statp, func0)
+ DB_ENV * dbenv;
+ DB_TXN_STAT ** statp;
+ void *(*func0) __P((size_t));
+{
+ COMPQUIET(statp, 0);
+ COMPQUIET(func0, 0);
+ return (__dbcl_rpc_illegal(dbenv, "txn_stat"));
+}
+
+int
+__dbcl_db_bt_compare(dbp, func0)
+ DB * dbp;
+ int (*func0) __P((DB *, const DBT *, const DBT *));
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbp->dbenv;
+ COMPQUIET(func0, 0);
+ return (__dbcl_rpc_illegal(dbenv, "db_bt_compare"));
+}
+
+int
+__dbcl_db_bt_maxkey(dbp, maxkey)
+ DB * dbp;
+ u_int32_t maxkey;
+{
+ CLIENT *cl;
+ __db_bt_maxkey_msg req;
+ static __db_bt_maxkey_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = NULL;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || dbenv->cl_handle == NULL) {
+ __db_err(dbenv, "No server environment.");
+ return (DB_NOSERVER);
+ }
+
+ if (replyp != NULL) {
+ xdr_free((xdrproc_t)xdr___db_bt_maxkey_reply, (void *)replyp);
+ replyp = NULL;
+ }
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ req.dbpcl_id = 0;
+ else
+ req.dbpcl_id = dbp->cl_id;
+ req.maxkey = maxkey;
+
+ replyp = __db_db_bt_maxkey_1(&req, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = replyp->status;
+out:
+ return (ret);
+}
+
+int
+__dbcl_db_bt_minkey(dbp, minkey)
+ DB * dbp;
+ u_int32_t minkey;
+{
+ CLIENT *cl;
+ __db_bt_minkey_msg req;
+ static __db_bt_minkey_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = NULL;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || dbenv->cl_handle == NULL) {
+ __db_err(dbenv, "No server environment.");
+ return (DB_NOSERVER);
+ }
+
+ if (replyp != NULL) {
+ xdr_free((xdrproc_t)xdr___db_bt_minkey_reply, (void *)replyp);
+ replyp = NULL;
+ }
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ req.dbpcl_id = 0;
+ else
+ req.dbpcl_id = dbp->cl_id;
+ req.minkey = minkey;
+
+ replyp = __db_db_bt_minkey_1(&req, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = replyp->status;
+out:
+ return (ret);
+}
+
+int
+__dbcl_db_bt_prefix(dbp, func0)
+ DB * dbp;
+ size_t (*func0) __P((DB *, const DBT *, const DBT *));
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbp->dbenv;
+ COMPQUIET(func0, 0);
+ return (__dbcl_rpc_illegal(dbenv, "db_bt_prefix"));
+}
+
+int
+__dbcl_db_set_append_recno(dbp, func0)
+ DB * dbp;
+ int (*func0) __P((DB *, DBT *, db_recno_t));
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbp->dbenv;
+ COMPQUIET(func0, 0);
+ return (__dbcl_rpc_illegal(dbenv, "db_set_append_recno"));
+}
+
+int
+__dbcl_db_cachesize(dbp, gbytes, bytes, ncache)
+ DB * dbp;
+ u_int32_t gbytes;
+ u_int32_t bytes;
+ int ncache;
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbp->dbenv;
+ COMPQUIET(gbytes, 0);
+ COMPQUIET(bytes, 0);
+ COMPQUIET(ncache, 0);
+ return (__dbcl_rpc_illegal(dbenv, "db_cachesize"));
+}
+
+int
+__dbcl_db_close(dbp, flags)
+ DB * dbp;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __db_close_msg req;
+ static __db_close_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = NULL;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || dbenv->cl_handle == NULL) {
+ __db_err(dbenv, "No server environment.");
+ return (DB_NOSERVER);
+ }
+
+ if (replyp != NULL) {
+ xdr_free((xdrproc_t)xdr___db_close_reply, (void *)replyp);
+ replyp = NULL;
+ }
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ req.dbpcl_id = 0;
+ else
+ req.dbpcl_id = dbp->cl_id;
+ req.flags = flags;
+
+ replyp = __db_db_close_1(&req, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ return (__dbcl_db_close_ret(dbp, flags, replyp));
+out:
+ return (ret);
+}
+
+int
+__dbcl_db_del(dbp, txnp, key, flags)
+ DB * dbp;
+ DB_TXN * txnp;
+ DBT * key;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __db_del_msg req;
+ static __db_del_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = NULL;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || dbenv->cl_handle == NULL) {
+ __db_err(dbenv, "No server environment.");
+ return (DB_NOSERVER);
+ }
+
+ if (replyp != NULL) {
+ xdr_free((xdrproc_t)xdr___db_del_reply, (void *)replyp);
+ replyp = NULL;
+ }
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ req.dbpcl_id = 0;
+ else
+ req.dbpcl_id = dbp->cl_id;
+ if (txnp == NULL)
+ req.txnpcl_id = 0;
+ else
+ req.txnpcl_id = txnp->txnid;
+ req.keydlen = key->dlen;
+ req.keydoff = key->doff;
+ req.keyflags = key->flags;
+ req.keydata.keydata_val = key->data;
+ req.keydata.keydata_len = key->size;
+ req.flags = flags;
+
+ replyp = __db_db_del_1(&req, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = replyp->status;
+out:
+ return (ret);
+}
+
+int
+__dbcl_db_extentsize(dbp, extentsize)
+ DB * dbp;
+ u_int32_t extentsize;
+{
+ CLIENT *cl;
+ __db_extentsize_msg req;
+ static __db_extentsize_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = NULL;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || dbenv->cl_handle == NULL) {
+ __db_err(dbenv, "No server environment.");
+ return (DB_NOSERVER);
+ }
+
+ if (replyp != NULL) {
+ xdr_free((xdrproc_t)xdr___db_extentsize_reply, (void *)replyp);
+ replyp = NULL;
+ }
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ req.dbpcl_id = 0;
+ else
+ req.dbpcl_id = dbp->cl_id;
+ req.extentsize = extentsize;
+
+ replyp = __db_db_extentsize_1(&req, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = replyp->status;
+out:
+ return (ret);
+}
+
+int
+__dbcl_db_fd(dbp, fdp)
+ DB * dbp;
+ int * fdp;
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbp->dbenv;
+ COMPQUIET(fdp, 0);
+ return (__dbcl_rpc_illegal(dbenv, "db_fd"));
+}
+
+int
+__dbcl_db_feedback(dbp, func0)
+ DB * dbp;
+ void (*func0) __P((DB *, int, int));
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbp->dbenv;
+ COMPQUIET(func0, 0);
+ return (__dbcl_rpc_illegal(dbenv, "db_feedback"));
+}
+
+int
+__dbcl_db_flags(dbp, flags)
+ DB * dbp;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __db_flags_msg req;
+ static __db_flags_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = NULL;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || dbenv->cl_handle == NULL) {
+ __db_err(dbenv, "No server environment.");
+ return (DB_NOSERVER);
+ }
+
+ if (replyp != NULL) {
+ xdr_free((xdrproc_t)xdr___db_flags_reply, (void *)replyp);
+ replyp = NULL;
+ }
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ req.dbpcl_id = 0;
+ else
+ req.dbpcl_id = dbp->cl_id;
+ req.flags = flags;
+
+ replyp = __db_db_flags_1(&req, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = replyp->status;
+out:
+ return (ret);
+}
+
+int
+__dbcl_db_get(dbp, txnp, key, data, flags)
+ DB * dbp;
+ DB_TXN * txnp;
+ DBT * key;
+ DBT * data;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __db_get_msg req;
+ static __db_get_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = NULL;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || dbenv->cl_handle == NULL) {
+ __db_err(dbenv, "No server environment.");
+ return (DB_NOSERVER);
+ }
+
+ if (replyp != NULL) {
+ xdr_free((xdrproc_t)xdr___db_get_reply, (void *)replyp);
+ replyp = NULL;
+ }
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ req.dbpcl_id = 0;
+ else
+ req.dbpcl_id = dbp->cl_id;
+ if (txnp == NULL)
+ req.txnpcl_id = 0;
+ else
+ req.txnpcl_id = txnp->txnid;
+ req.keydlen = key->dlen;
+ req.keydoff = key->doff;
+ req.keyflags = key->flags;
+ req.keydata.keydata_val = key->data;
+ req.keydata.keydata_len = key->size;
+ req.datadlen = data->dlen;
+ req.datadoff = data->doff;
+ req.dataflags = data->flags;
+ req.datadata.datadata_val = data->data;
+ req.datadata.datadata_len = data->size;
+ req.flags = flags;
+
+ replyp = __db_db_get_1(&req, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ return (__dbcl_db_get_ret(dbp, txnp, key, data, flags, replyp));
+out:
+ return (ret);
+}
+
+int
+__dbcl_db_h_ffactor(dbp, ffactor)
+ DB * dbp;
+ u_int32_t ffactor;
+{
+ CLIENT *cl;
+ __db_h_ffactor_msg req;
+ static __db_h_ffactor_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = NULL;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || dbenv->cl_handle == NULL) {
+ __db_err(dbenv, "No server environment.");
+ return (DB_NOSERVER);
+ }
+
+ if (replyp != NULL) {
+ xdr_free((xdrproc_t)xdr___db_h_ffactor_reply, (void *)replyp);
+ replyp = NULL;
+ }
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ req.dbpcl_id = 0;
+ else
+ req.dbpcl_id = dbp->cl_id;
+ req.ffactor = ffactor;
+
+ replyp = __db_db_h_ffactor_1(&req, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = replyp->status;
+out:
+ return (ret);
+}
+
+int
+__dbcl_db_h_hash(dbp, func0)
+ DB * dbp;
+ u_int32_t (*func0) __P((DB *, const void *, u_int32_t));
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbp->dbenv;
+ COMPQUIET(func0, 0);
+ return (__dbcl_rpc_illegal(dbenv, "db_h_hash"));
+}
+
+int
+__dbcl_db_h_nelem(dbp, nelem)
+ DB * dbp;
+ u_int32_t nelem;
+{
+ CLIENT *cl;
+ __db_h_nelem_msg req;
+ static __db_h_nelem_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = NULL;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || dbenv->cl_handle == NULL) {
+ __db_err(dbenv, "No server environment.");
+ return (DB_NOSERVER);
+ }
+
+ if (replyp != NULL) {
+ xdr_free((xdrproc_t)xdr___db_h_nelem_reply, (void *)replyp);
+ replyp = NULL;
+ }
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ req.dbpcl_id = 0;
+ else
+ req.dbpcl_id = dbp->cl_id;
+ req.nelem = nelem;
+
+ replyp = __db_db_h_nelem_1(&req, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = replyp->status;
+out:
+ return (ret);
+}
+
+int
+__dbcl_db_key_range(dbp, txnp, key, range, flags)
+ DB * dbp;
+ DB_TXN * txnp;
+ DBT * key;
+ DB_KEY_RANGE * range;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __db_key_range_msg req;
+ static __db_key_range_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = NULL;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || dbenv->cl_handle == NULL) {
+ __db_err(dbenv, "No server environment.");
+ return (DB_NOSERVER);
+ }
+
+ if (replyp != NULL) {
+ xdr_free((xdrproc_t)xdr___db_key_range_reply, (void *)replyp);
+ replyp = NULL;
+ }
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ req.dbpcl_id = 0;
+ else
+ req.dbpcl_id = dbp->cl_id;
+ if (txnp == NULL)
+ req.txnpcl_id = 0;
+ else
+ req.txnpcl_id = txnp->txnid;
+ req.keydlen = key->dlen;
+ req.keydoff = key->doff;
+ req.keyflags = key->flags;
+ req.keydata.keydata_val = key->data;
+ req.keydata.keydata_len = key->size;
+ req.flags = flags;
+
+ replyp = __db_db_key_range_1(&req, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ return (__dbcl_db_key_range_ret(dbp, txnp, key, range, flags, replyp));
+out:
+ return (ret);
+}
+
+int
+__dbcl_db_lorder(dbp, lorder)
+ DB * dbp;
+ int lorder;
+{
+ CLIENT *cl;
+ __db_lorder_msg req;
+ static __db_lorder_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = NULL;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || dbenv->cl_handle == NULL) {
+ __db_err(dbenv, "No server environment.");
+ return (DB_NOSERVER);
+ }
+
+ if (replyp != NULL) {
+ xdr_free((xdrproc_t)xdr___db_lorder_reply, (void *)replyp);
+ replyp = NULL;
+ }
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ req.dbpcl_id = 0;
+ else
+ req.dbpcl_id = dbp->cl_id;
+ req.lorder = lorder;
+
+ replyp = __db_db_lorder_1(&req, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = replyp->status;
+out:
+ return (ret);
+}
+
+int
+__dbcl_db_malloc(dbp, func0)
+ DB * dbp;
+ void *(*func0) __P((size_t));
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbp->dbenv;
+ COMPQUIET(func0, 0);
+ return (__dbcl_rpc_illegal(dbenv, "db_malloc"));
+}
+
+int
+__dbcl_db_open(dbp, name, subdb, type, flags, mode)
+ DB * dbp;
+ const char * name;
+ const char * subdb;
+ DBTYPE type;
+ u_int32_t flags;
+ int mode;
+{
+ CLIENT *cl;
+ __db_open_msg req;
+ static __db_open_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = NULL;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || dbenv->cl_handle == NULL) {
+ __db_err(dbenv, "No server environment.");
+ return (DB_NOSERVER);
+ }
+
+ if (replyp != NULL) {
+ xdr_free((xdrproc_t)xdr___db_open_reply, (void *)replyp);
+ replyp = NULL;
+ }
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ req.dbpcl_id = 0;
+ else
+ req.dbpcl_id = dbp->cl_id;
+ if (name == NULL)
+ req.name = "";
+ else
+ req.name = (char *)name;
+ if (subdb == NULL)
+ req.subdb = "";
+ else
+ req.subdb = (char *)subdb;
+ req.type = type;
+ req.flags = flags;
+ req.mode = mode;
+
+ replyp = __db_db_open_1(&req, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ return (__dbcl_db_open_ret(dbp, name, subdb, type, flags, mode, replyp));
+out:
+ return (ret);
+}
+
+int
+__dbcl_db_pagesize(dbp, pagesize)
+ DB * dbp;
+ u_int32_t pagesize;
+{
+ CLIENT *cl;
+ __db_pagesize_msg req;
+ static __db_pagesize_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = NULL;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || dbenv->cl_handle == NULL) {
+ __db_err(dbenv, "No server environment.");
+ return (DB_NOSERVER);
+ }
+
+ if (replyp != NULL) {
+ xdr_free((xdrproc_t)xdr___db_pagesize_reply, (void *)replyp);
+ replyp = NULL;
+ }
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ req.dbpcl_id = 0;
+ else
+ req.dbpcl_id = dbp->cl_id;
+ req.pagesize = pagesize;
+
+ replyp = __db_db_pagesize_1(&req, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = replyp->status;
+out:
+ return (ret);
+}
+
+int
+__dbcl_db_panic(dbp, func0)
+ DB * dbp;
+ void (*func0) __P((DB_ENV *, int));
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbp->dbenv;
+ COMPQUIET(func0, 0);
+ return (__dbcl_rpc_illegal(dbenv, "db_panic"));
+}
+
+int
+__dbcl_db_put(dbp, txnp, key, data, flags)
+ DB * dbp;
+ DB_TXN * txnp;
+ DBT * key;
+ DBT * data;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __db_put_msg req;
+ static __db_put_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = NULL;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || dbenv->cl_handle == NULL) {
+ __db_err(dbenv, "No server environment.");
+ return (DB_NOSERVER);
+ }
+
+ if (replyp != NULL) {
+ xdr_free((xdrproc_t)xdr___db_put_reply, (void *)replyp);
+ replyp = NULL;
+ }
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ req.dbpcl_id = 0;
+ else
+ req.dbpcl_id = dbp->cl_id;
+ if (txnp == NULL)
+ req.txnpcl_id = 0;
+ else
+ req.txnpcl_id = txnp->txnid;
+ req.keydlen = key->dlen;
+ req.keydoff = key->doff;
+ req.keyflags = key->flags;
+ req.keydata.keydata_val = key->data;
+ req.keydata.keydata_len = key->size;
+ req.datadlen = data->dlen;
+ req.datadoff = data->doff;
+ req.dataflags = data->flags;
+ req.datadata.datadata_val = data->data;
+ req.datadata.datadata_len = data->size;
+ req.flags = flags;
+
+ replyp = __db_db_put_1(&req, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ return (__dbcl_db_put_ret(dbp, txnp, key, data, flags, replyp));
+out:
+ return (ret);
+}
+
+int
+__dbcl_db_realloc(dbp, func0)
+ DB * dbp;
+ void *(*func0) __P((void *, size_t));
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbp->dbenv;
+ COMPQUIET(func0, 0);
+ return (__dbcl_rpc_illegal(dbenv, "db_realloc"));
+}
+
+int
+__dbcl_db_re_delim(dbp, delim)
+ DB * dbp;
+ int delim;
+{
+ CLIENT *cl;
+ __db_re_delim_msg req;
+ static __db_re_delim_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = NULL;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || dbenv->cl_handle == NULL) {
+ __db_err(dbenv, "No server environment.");
+ return (DB_NOSERVER);
+ }
+
+ if (replyp != NULL) {
+ xdr_free((xdrproc_t)xdr___db_re_delim_reply, (void *)replyp);
+ replyp = NULL;
+ }
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ req.dbpcl_id = 0;
+ else
+ req.dbpcl_id = dbp->cl_id;
+ req.delim = delim;
+
+ replyp = __db_db_re_delim_1(&req, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = replyp->status;
+out:
+ return (ret);
+}
+
+int
+__dbcl_db_re_len(dbp, len)
+ DB * dbp;
+ u_int32_t len;
+{
+ CLIENT *cl;
+ __db_re_len_msg req;
+ static __db_re_len_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = NULL;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || dbenv->cl_handle == NULL) {
+ __db_err(dbenv, "No server environment.");
+ return (DB_NOSERVER);
+ }
+
+ if (replyp != NULL) {
+ xdr_free((xdrproc_t)xdr___db_re_len_reply, (void *)replyp);
+ replyp = NULL;
+ }
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ req.dbpcl_id = 0;
+ else
+ req.dbpcl_id = dbp->cl_id;
+ req.len = len;
+
+ replyp = __db_db_re_len_1(&req, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = replyp->status;
+out:
+ return (ret);
+}
+
+int
+__dbcl_db_re_pad(dbp, pad)
+ DB * dbp;
+ int pad;
+{
+ CLIENT *cl;
+ __db_re_pad_msg req;
+ static __db_re_pad_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = NULL;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || dbenv->cl_handle == NULL) {
+ __db_err(dbenv, "No server environment.");
+ return (DB_NOSERVER);
+ }
+
+ if (replyp != NULL) {
+ xdr_free((xdrproc_t)xdr___db_re_pad_reply, (void *)replyp);
+ replyp = NULL;
+ }
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ req.dbpcl_id = 0;
+ else
+ req.dbpcl_id = dbp->cl_id;
+ req.pad = pad;
+
+ replyp = __db_db_re_pad_1(&req, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = replyp->status;
+out:
+ return (ret);
+}
+
+int
+__dbcl_db_re_source(dbp, re_source)
+ DB * dbp;
+ const char * re_source;
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbp->dbenv;
+ COMPQUIET(re_source, NULL);
+ return (__dbcl_rpc_illegal(dbenv, "db_re_source"));
+}
+
+int
+__dbcl_db_remove(dbp, name, subdb, flags)
+ DB * dbp;
+ const char * name;
+ const char * subdb;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __db_remove_msg req;
+ static __db_remove_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = NULL;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || dbenv->cl_handle == NULL) {
+ __db_err(dbenv, "No server environment.");
+ return (DB_NOSERVER);
+ }
+
+ if (replyp != NULL) {
+ xdr_free((xdrproc_t)xdr___db_remove_reply, (void *)replyp);
+ replyp = NULL;
+ }
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ req.dbpcl_id = 0;
+ else
+ req.dbpcl_id = dbp->cl_id;
+ if (name == NULL)
+ req.name = "";
+ else
+ req.name = (char *)name;
+ if (subdb == NULL)
+ req.subdb = "";
+ else
+ req.subdb = (char *)subdb;
+ req.flags = flags;
+
+ replyp = __db_db_remove_1(&req, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ return (__dbcl_db_remove_ret(dbp, name, subdb, flags, replyp));
+out:
+ return (ret);
+}
+
+int
+__dbcl_db_rename(dbp, name, subdb, newname, flags)
+ DB * dbp;
+ const char * name;
+ const char * subdb;
+ const char * newname;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __db_rename_msg req;
+ static __db_rename_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = NULL;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || dbenv->cl_handle == NULL) {
+ __db_err(dbenv, "No server environment.");
+ return (DB_NOSERVER);
+ }
+
+ if (replyp != NULL) {
+ xdr_free((xdrproc_t)xdr___db_rename_reply, (void *)replyp);
+ replyp = NULL;
+ }
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ req.dbpcl_id = 0;
+ else
+ req.dbpcl_id = dbp->cl_id;
+ if (name == NULL)
+ req.name = "";
+ else
+ req.name = (char *)name;
+ if (subdb == NULL)
+ req.subdb = "";
+ else
+ req.subdb = (char *)subdb;
+ if (newname == NULL)
+ req.newname = "";
+ else
+ req.newname = (char *)newname;
+ req.flags = flags;
+
+ replyp = __db_db_rename_1(&req, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ return (__dbcl_db_rename_ret(dbp, name, subdb, newname, flags, replyp));
+out:
+ return (ret);
+}
+
+int
+__dbcl_db_stat(dbp, sp, func0, flags)
+ DB * dbp;
+ void * sp;
+ void *(*func0) __P((size_t));
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __db_stat_msg req;
+ static __db_stat_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = NULL;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || dbenv->cl_handle == NULL) {
+ __db_err(dbenv, "No server environment.");
+ return (DB_NOSERVER);
+ }
+
+ if (replyp != NULL) {
+ xdr_free((xdrproc_t)xdr___db_stat_reply, (void *)replyp);
+ replyp = NULL;
+ }
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (func0 != NULL) {
+ __db_err(sp, "User functions not supported in RPC.");
+ return (EINVAL);
+ }
+ if (dbp == NULL)
+ req.dbpcl_id = 0;
+ else
+ req.dbpcl_id = dbp->cl_id;
+ req.flags = flags;
+
+ replyp = __db_db_stat_1(&req, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ return (__dbcl_db_stat_ret(dbp, sp, func0, flags, replyp));
+out:
+ return (ret);
+}
+
+int
+__dbcl_db_swapped(dbp)
+ DB * dbp;
+{
+ CLIENT *cl;
+ __db_swapped_msg req;
+ static __db_swapped_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = NULL;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || dbenv->cl_handle == NULL) {
+ __db_err(dbenv, "No server environment.");
+ return (DB_NOSERVER);
+ }
+
+ if (replyp != NULL) {
+ xdr_free((xdrproc_t)xdr___db_swapped_reply, (void *)replyp);
+ replyp = NULL;
+ }
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ req.dbpcl_id = 0;
+ else
+ req.dbpcl_id = dbp->cl_id;
+
+ replyp = __db_db_swapped_1(&req, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = replyp->status;
+out:
+ return (ret);
+}
+
+int
+__dbcl_db_sync(dbp, flags)
+ DB * dbp;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __db_sync_msg req;
+ static __db_sync_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = NULL;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || dbenv->cl_handle == NULL) {
+ __db_err(dbenv, "No server environment.");
+ return (DB_NOSERVER);
+ }
+
+ if (replyp != NULL) {
+ xdr_free((xdrproc_t)xdr___db_sync_reply, (void *)replyp);
+ replyp = NULL;
+ }
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ req.dbpcl_id = 0;
+ else
+ req.dbpcl_id = dbp->cl_id;
+ req.flags = flags;
+
+ replyp = __db_db_sync_1(&req, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = replyp->status;
+out:
+ return (ret);
+}
+
+int
+__dbcl_db_upgrade(dbp, fname, flags)
+ DB * dbp;
+ const char * fname;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbp->dbenv;
+ COMPQUIET(fname, NULL);
+ COMPQUIET(flags, 0);
+ return (__dbcl_rpc_illegal(dbenv, "db_upgrade"));
+}
+
+int
+__dbcl_db_cursor(dbp, txnp, dbcpp, flags)
+ DB * dbp;
+ DB_TXN * txnp;
+ DBC ** dbcpp;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __db_cursor_msg req;
+ static __db_cursor_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = NULL;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || dbenv->cl_handle == NULL) {
+ __db_err(dbenv, "No server environment.");
+ return (DB_NOSERVER);
+ }
+
+ if (replyp != NULL) {
+ xdr_free((xdrproc_t)xdr___db_cursor_reply, (void *)replyp);
+ replyp = NULL;
+ }
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ req.dbpcl_id = 0;
+ else
+ req.dbpcl_id = dbp->cl_id;
+ if (txnp == NULL)
+ req.txnpcl_id = 0;
+ else
+ req.txnpcl_id = txnp->txnid;
+ req.flags = flags;
+
+ replyp = __db_db_cursor_1(&req, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ return (__dbcl_db_cursor_ret(dbp, txnp, dbcpp, flags, replyp));
+out:
+ return (ret);
+}
+
+static int __dbcl_db_join_curslist __P((__db_join_curslist **, DBC **));
+static void __dbcl_db_join_cursfree __P((__db_join_curslist **));
+int
+__dbcl_db_join(dbp, curs, dbcp, flags)
+ DB * dbp;
+ DBC ** curs;
+ DBC ** dbcp;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __db_join_msg req;
+ static __db_join_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = NULL;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || dbenv->cl_handle == NULL) {
+ __db_err(dbenv, "No server environment.");
+ return (DB_NOSERVER);
+ }
+
+ if (replyp != NULL) {
+ xdr_free((xdrproc_t)xdr___db_join_reply, (void *)replyp);
+ replyp = NULL;
+ }
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ req.dbpcl_id = 0;
+ else
+ req.dbpcl_id = dbp->cl_id;
+ if ((ret = __dbcl_db_join_curslist(&req.curslist, curs)) != 0)
+ goto out;
+ req.flags = flags;
+
+ replyp = __db_db_join_1(&req, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ __dbcl_db_join_cursfree(&req.curslist);
+ return (__dbcl_db_join_ret(dbp, curs, dbcp, flags, replyp));
+out:
+ __dbcl_db_join_cursfree(&req.curslist);
+ return (ret);
+}
+
+int
+__dbcl_db_join_curslist(locp, pp)
+ __db_join_curslist **locp;
+ DBC ** pp;
+{
+ DBC ** p;
+ u_int32_t *q;
+ int ret;
+ __db_join_curslist *nl, **nlp;
+
+ *locp = NULL;
+ if (pp == NULL)
+ return (0);
+ nlp = locp;
+ for (p = pp; *p != 0; p++) {
+ if ((ret = __os_malloc(NULL, sizeof(*nl), NULL, nlp)) != 0)
+ goto out;
+ nl = *nlp;
+ nl->next = NULL;
+ nl->ent.ent_val = NULL;
+ nl->ent.ent_len = 0;
+ if ((ret = __os_malloc(NULL, sizeof(u_int32_t), NULL, &nl->ent.ent_val)) != 0)
+ goto out;
+ q = (u_int32_t *)nl->ent.ent_val;
+ *q = (*p)->cl_id;
+ nl->ent.ent_len = sizeof(u_int32_t);
+ nlp = &nl->next;
+ }
+ return (0);
+out:
+ __dbcl_db_join_cursfree(locp);
+ return (ret);
+}
+
+void
+__dbcl_db_join_cursfree(locp)
+ __db_join_curslist **locp;
+{
+ __db_join_curslist *nl, *nl1;
+
+ if (locp == NULL)
+ return;
+ for (nl = *locp; nl != NULL; nl = nl1) {
+ nl1 = nl->next;
+ if (nl->ent.ent_val)
+ __os_free(nl->ent.ent_val, nl->ent.ent_len);
+ __os_free(nl, sizeof(*nl));
+ }
+}
+
+int
+__dbcl_dbc_close(dbc)
+ DBC * dbc;
+{
+ CLIENT *cl;
+ __dbc_close_msg req;
+ static __dbc_close_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = NULL;
+ dbenv = dbc->dbp->dbenv;
+ if (dbenv == NULL || dbenv->cl_handle == NULL) {
+ __db_err(dbenv, "No server environment.");
+ return (DB_NOSERVER);
+ }
+
+ if (replyp != NULL) {
+ xdr_free((xdrproc_t)xdr___dbc_close_reply, (void *)replyp);
+ replyp = NULL;
+ }
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbc == NULL)
+ req.dbccl_id = 0;
+ else
+ req.dbccl_id = dbc->cl_id;
+
+ replyp = __db_dbc_close_1(&req, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ return (__dbcl_dbc_close_ret(dbc, replyp));
+out:
+ return (ret);
+}
+
+int
+__dbcl_dbc_count(dbc, countp, flags)
+ DBC * dbc;
+ db_recno_t * countp;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __dbc_count_msg req;
+ static __dbc_count_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = NULL;
+ dbenv = dbc->dbp->dbenv;
+ if (dbenv == NULL || dbenv->cl_handle == NULL) {
+ __db_err(dbenv, "No server environment.");
+ return (DB_NOSERVER);
+ }
+
+ if (replyp != NULL) {
+ xdr_free((xdrproc_t)xdr___dbc_count_reply, (void *)replyp);
+ replyp = NULL;
+ }
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbc == NULL)
+ req.dbccl_id = 0;
+ else
+ req.dbccl_id = dbc->cl_id;
+ req.flags = flags;
+
+ replyp = __db_dbc_count_1(&req, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ return (__dbcl_dbc_count_ret(dbc, countp, flags, replyp));
+out:
+ return (ret);
+}
+
+int
+__dbcl_dbc_del(dbc, flags)
+ DBC * dbc;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __dbc_del_msg req;
+ static __dbc_del_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = NULL;
+ dbenv = dbc->dbp->dbenv;
+ if (dbenv == NULL || dbenv->cl_handle == NULL) {
+ __db_err(dbenv, "No server environment.");
+ return (DB_NOSERVER);
+ }
+
+ if (replyp != NULL) {
+ xdr_free((xdrproc_t)xdr___dbc_del_reply, (void *)replyp);
+ replyp = NULL;
+ }
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbc == NULL)
+ req.dbccl_id = 0;
+ else
+ req.dbccl_id = dbc->cl_id;
+ req.flags = flags;
+
+ replyp = __db_dbc_del_1(&req, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = replyp->status;
+out:
+ return (ret);
+}
+
+int
+__dbcl_dbc_dup(dbc, dbcp, flags)
+ DBC * dbc;
+ DBC ** dbcp;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __dbc_dup_msg req;
+ static __dbc_dup_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = NULL;
+ dbenv = dbc->dbp->dbenv;
+ if (dbenv == NULL || dbenv->cl_handle == NULL) {
+ __db_err(dbenv, "No server environment.");
+ return (DB_NOSERVER);
+ }
+
+ if (replyp != NULL) {
+ xdr_free((xdrproc_t)xdr___dbc_dup_reply, (void *)replyp);
+ replyp = NULL;
+ }
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbc == NULL)
+ req.dbccl_id = 0;
+ else
+ req.dbccl_id = dbc->cl_id;
+ req.flags = flags;
+
+ replyp = __db_dbc_dup_1(&req, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ return (__dbcl_dbc_dup_ret(dbc, dbcp, flags, replyp));
+out:
+ return (ret);
+}
+
+int
+__dbcl_dbc_get(dbc, key, data, flags)
+ DBC * dbc;
+ DBT * key;
+ DBT * data;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __dbc_get_msg req;
+ static __dbc_get_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = NULL;
+ dbenv = dbc->dbp->dbenv;
+ if (dbenv == NULL || dbenv->cl_handle == NULL) {
+ __db_err(dbenv, "No server environment.");
+ return (DB_NOSERVER);
+ }
+
+ if (replyp != NULL) {
+ xdr_free((xdrproc_t)xdr___dbc_get_reply, (void *)replyp);
+ replyp = NULL;
+ }
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbc == NULL)
+ req.dbccl_id = 0;
+ else
+ req.dbccl_id = dbc->cl_id;
+ req.keydlen = key->dlen;
+ req.keydoff = key->doff;
+ req.keyflags = key->flags;
+ req.keydata.keydata_val = key->data;
+ req.keydata.keydata_len = key->size;
+ req.datadlen = data->dlen;
+ req.datadoff = data->doff;
+ req.dataflags = data->flags;
+ req.datadata.datadata_val = data->data;
+ req.datadata.datadata_len = data->size;
+ req.flags = flags;
+
+ replyp = __db_dbc_get_1(&req, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ return (__dbcl_dbc_get_ret(dbc, key, data, flags, replyp));
+out:
+ return (ret);
+}
+
+int
+__dbcl_dbc_put(dbc, key, data, flags)
+ DBC * dbc;
+ DBT * key;
+ DBT * data;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __dbc_put_msg req;
+ static __dbc_put_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = NULL;
+ dbenv = dbc->dbp->dbenv;
+ if (dbenv == NULL || dbenv->cl_handle == NULL) {
+ __db_err(dbenv, "No server environment.");
+ return (DB_NOSERVER);
+ }
+
+ if (replyp != NULL) {
+ xdr_free((xdrproc_t)xdr___dbc_put_reply, (void *)replyp);
+ replyp = NULL;
+ }
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbc == NULL)
+ req.dbccl_id = 0;
+ else
+ req.dbccl_id = dbc->cl_id;
+ req.keydlen = key->dlen;
+ req.keydoff = key->doff;
+ req.keyflags = key->flags;
+ req.keydata.keydata_val = key->data;
+ req.keydata.keydata_len = key->size;
+ req.datadlen = data->dlen;
+ req.datadoff = data->doff;
+ req.dataflags = data->flags;
+ req.datadata.datadata_val = data->data;
+ req.datadata.datadata_len = data->size;
+ req.flags = flags;
+
+ replyp = __db_dbc_put_1(&req, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ return (__dbcl_dbc_put_ret(dbc, key, data, flags, replyp));
+out:
+ return (ret);
+}
+
+int
+__dbcl_lock_detect(dbenv, flags, atype, aborted)
+ DB_ENV * dbenv;
+ u_int32_t flags;
+ u_int32_t atype;
+ int * aborted;
+{
+ COMPQUIET(flags, 0);
+ COMPQUIET(atype, 0);
+ COMPQUIET(aborted, 0);
+ return (__dbcl_rpc_illegal(dbenv, "lock_detect"));
+}
+
+int
+__dbcl_lock_get(dbenv, locker, flags, obj, mode, lock)
+ DB_ENV * dbenv;
+ u_int32_t locker;
+ u_int32_t flags;
+ const DBT * obj;
+ db_lockmode_t mode;
+ DB_LOCK * lock;
+{
+ COMPQUIET(locker, 0);
+ COMPQUIET(flags, 0);
+ COMPQUIET(obj, NULL);
+ COMPQUIET(mode, 0);
+ COMPQUIET(lock, 0);
+ return (__dbcl_rpc_illegal(dbenv, "lock_get"));
+}
+
+int
+__dbcl_lock_id(dbenv, idp)
+ DB_ENV * dbenv;
+ u_int32_t * idp;
+{
+ COMPQUIET(idp, 0);
+ return (__dbcl_rpc_illegal(dbenv, "lock_id"));
+}
+
+int
+__dbcl_lock_put(dbenv, lock)
+ DB_ENV * dbenv;
+ DB_LOCK * lock;
+{
+ COMPQUIET(lock, 0);
+ return (__dbcl_rpc_illegal(dbenv, "lock_put"));
+}
+
+int
+__dbcl_lock_stat(dbenv, statp, func0)
+ DB_ENV * dbenv;
+ DB_LOCK_STAT ** statp;
+ void *(*func0) __P((size_t));
+{
+ COMPQUIET(statp, 0);
+ COMPQUIET(func0, 0);
+ return (__dbcl_rpc_illegal(dbenv, "lock_stat"));
+}
+
+int
+__dbcl_lock_vec(dbenv, locker, flags, list, nlist, elistp)
+ DB_ENV * dbenv;
+ u_int32_t locker;
+ u_int32_t flags;
+ DB_LOCKREQ * list;
+ int nlist;
+ DB_LOCKREQ ** elistp;
+{
+ COMPQUIET(locker, 0);
+ COMPQUIET(flags, 0);
+ COMPQUIET(list, 0);
+ COMPQUIET(nlist, 0);
+ COMPQUIET(elistp, 0);
+ return (__dbcl_rpc_illegal(dbenv, "lock_vec"));
+}
+
+int
+__dbcl_log_archive(dbenv, listp, flags, func0)
+ DB_ENV * dbenv;
+ char *** listp;
+ u_int32_t flags;
+ void *(*func0) __P((size_t));
+{
+ COMPQUIET(listp, 0);
+ COMPQUIET(flags, 0);
+ COMPQUIET(func0, 0);
+ return (__dbcl_rpc_illegal(dbenv, "log_archive"));
+}
+
+int
+__dbcl_log_file(dbenv, lsn, namep, len)
+ DB_ENV * dbenv;
+ const DB_LSN * lsn;
+ char * namep;
+ size_t len;
+{
+ COMPQUIET(lsn, NULL);
+ COMPQUIET(namep, NULL);
+ COMPQUIET(len, 0);
+ return (__dbcl_rpc_illegal(dbenv, "log_file"));
+}
+
+int
+__dbcl_log_flush(dbenv, lsn)
+ DB_ENV * dbenv;
+ const DB_LSN * lsn;
+{
+ COMPQUIET(lsn, NULL);
+ return (__dbcl_rpc_illegal(dbenv, "log_flush"));
+}
+
+int
+__dbcl_log_get(dbenv, lsn, data, flags)
+ DB_ENV * dbenv;
+ DB_LSN * lsn;
+ DBT * data;
+ u_int32_t flags;
+{
+ COMPQUIET(lsn, 0);
+ COMPQUIET(data, NULL);
+ COMPQUIET(flags, 0);
+ return (__dbcl_rpc_illegal(dbenv, "log_get"));
+}
+
+int
+__dbcl_log_put(dbenv, lsn, data, flags)
+ DB_ENV * dbenv;
+ DB_LSN * lsn;
+ const DBT * data;
+ u_int32_t flags;
+{
+ COMPQUIET(lsn, 0);
+ COMPQUIET(data, NULL);
+ COMPQUIET(flags, 0);
+ return (__dbcl_rpc_illegal(dbenv, "log_put"));
+}
+
+int
+__dbcl_log_register(dbenv, dbp, namep)
+ DB_ENV * dbenv;
+ DB * dbp;
+ const char * namep;
+{
+ COMPQUIET(dbp, 0);
+ COMPQUIET(namep, NULL);
+ return (__dbcl_rpc_illegal(dbenv, "log_register"));
+}
+
+int
+__dbcl_log_stat(dbenv, statp, func0)
+ DB_ENV * dbenv;
+ DB_LOG_STAT ** statp;
+ void *(*func0) __P((size_t));
+{
+ COMPQUIET(statp, 0);
+ COMPQUIET(func0, 0);
+ return (__dbcl_rpc_illegal(dbenv, "log_stat"));
+}
+
+int
+__dbcl_log_unregister(dbenv, dbp)
+ DB_ENV * dbenv;
+ DB * dbp;
+{
+ COMPQUIET(dbp, 0);
+ return (__dbcl_rpc_illegal(dbenv, "log_unregister"));
+}
+
+int
+__dbcl_memp_fclose(mpf)
+ DB_MPOOLFILE * mpf;
+{
+ DB_ENV *dbenv;
+
+ dbenv = mpf->dbmp->dbenv;
+ return (__dbcl_rpc_illegal(dbenv, "memp_fclose"));
+}
+
+int
+__dbcl_memp_fget(mpf, pgno, flags, pagep)
+ DB_MPOOLFILE * mpf;
+ db_pgno_t * pgno;
+ u_int32_t flags;
+ void ** pagep;
+{
+ DB_ENV *dbenv;
+
+ dbenv = mpf->dbmp->dbenv;
+ COMPQUIET(pgno, 0);
+ COMPQUIET(flags, 0);
+ COMPQUIET(pagep, 0);
+ return (__dbcl_rpc_illegal(dbenv, "memp_fget"));
+}
+
+int
+__dbcl_memp_fopen(dbenv, file, flags, mode, pagesize, finfop, mpf)
+ DB_ENV * dbenv;
+ const char * file;
+ u_int32_t flags;
+ int mode;
+ size_t pagesize;
+ DB_MPOOL_FINFO * finfop;
+ DB_MPOOLFILE ** mpf;
+{
+ COMPQUIET(file, NULL);
+ COMPQUIET(flags, 0);
+ COMPQUIET(mode, 0);
+ COMPQUIET(pagesize, 0);
+ COMPQUIET(finfop, 0);
+ COMPQUIET(mpf, 0);
+ return (__dbcl_rpc_illegal(dbenv, "memp_fopen"));
+}
+
+int
+__dbcl_memp_fput(mpf, pgaddr, flags)
+ DB_MPOOLFILE * mpf;
+ void * pgaddr;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+
+ dbenv = mpf->dbmp->dbenv;
+ COMPQUIET(pgaddr, 0);
+ COMPQUIET(flags, 0);
+ return (__dbcl_rpc_illegal(dbenv, "memp_fput"));
+}
+
+int
+__dbcl_memp_fset(mpf, pgaddr, flags)
+ DB_MPOOLFILE * mpf;
+ void * pgaddr;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+
+ dbenv = mpf->dbmp->dbenv;
+ COMPQUIET(pgaddr, 0);
+ COMPQUIET(flags, 0);
+ return (__dbcl_rpc_illegal(dbenv, "memp_fset"));
+}
+
+int
+__dbcl_memp_fsync(mpf)
+ DB_MPOOLFILE * mpf;
+{
+ DB_ENV *dbenv;
+
+ dbenv = mpf->dbmp->dbenv;
+ return (__dbcl_rpc_illegal(dbenv, "memp_fsync"));
+}
+
+int
+__dbcl_memp_register(dbenv, ftype, func0, func1)
+ DB_ENV * dbenv;
+ int ftype;
+ int (*func0) __P((DB_ENV *, db_pgno_t, void *, DBT *));
+ int (*func1) __P((DB_ENV *, db_pgno_t, void *, DBT *));
+{
+ COMPQUIET(ftype, 0);
+ COMPQUIET(func0, 0);
+ COMPQUIET(func1, 0);
+ return (__dbcl_rpc_illegal(dbenv, "memp_register"));
+}
+
+int
+__dbcl_memp_stat(dbenv, gstatp, fstatp, func0)
+ DB_ENV * dbenv;
+ DB_MPOOL_STAT ** gstatp;
+ DB_MPOOL_FSTAT *** fstatp;
+ void *(*func0) __P((size_t));
+{
+ COMPQUIET(gstatp, 0);
+ COMPQUIET(fstatp, 0);
+ COMPQUIET(func0, 0);
+ return (__dbcl_rpc_illegal(dbenv, "memp_stat"));
+}
+
+int
+__dbcl_memp_sync(dbenv, lsn)
+ DB_ENV * dbenv;
+ DB_LSN * lsn;
+{
+ COMPQUIET(lsn, 0);
+ return (__dbcl_rpc_illegal(dbenv, "memp_sync"));
+}
+
+int
+__dbcl_memp_trickle(dbenv, pct, nwrotep)
+ DB_ENV * dbenv;
+ int pct;
+ int * nwrotep;
+{
+ COMPQUIET(pct, 0);
+ COMPQUIET(nwrotep, 0);
+ return (__dbcl_rpc_illegal(dbenv, "memp_trickle"));
+}
+
+#endif /* HAVE_RPC */
diff --git a/bdb/rpc_client/gen_client_ret.c b/bdb/rpc_client/gen_client_ret.c
new file mode 100644
index 00000000000..17e3f195fc3
--- /dev/null
+++ b/bdb/rpc_client/gen_client_ret.c
@@ -0,0 +1,542 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: gen_client_ret.c,v 1.29 2000/12/31 19:26:23 bostic Exp $";
+#endif /* not lint */
+
+#ifdef HAVE_RPC
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <rpc/rpc.h>
+
+#include <string.h>
+#endif
+#include "db_server.h"
+
+#include "db_int.h"
+#include "db_page.h"
+#include "txn.h"
+#include "db_ext.h"
+#include "rpc_client_ext.h"
+
+static void __db_db_stat_statsfree __P((u_int32_t *));
+static int __db_db_stat_statslist __P((__db_stat_statsreplist *, u_int32_t **));
+
+int
+__dbcl_env_close_ret(dbenv, flags, replyp)
+ DB_ENV *dbenv;
+ u_int32_t flags;
+ __env_close_reply *replyp;
+{
+ int ret;
+
+ COMPQUIET(flags, 0);
+
+ ret = __dbcl_refresh(dbenv);
+ __os_free(dbenv, sizeof(*dbenv));
+ if (replyp->status == 0 && ret != 0)
+ return (ret);
+ else
+ return (replyp->status);
+}
+
+int
+__dbcl_env_open_ret(dbenv, home, flags, mode, replyp)
+ DB_ENV *dbenv;
+ const char *home;
+ u_int32_t flags;
+ int mode;
+ __env_open_reply *replyp;
+{
+ DB_TXNMGR *tmgrp;
+ int ret;
+
+ COMPQUIET(home, NULL);
+ COMPQUIET(mode, 0);
+
+ /*
+ * If error, return it.
+ */
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ /*
+ * If the user requested transactions, then we have some
+ * local client-side setup to do also.
+ */
+ if (LF_ISSET(DB_INIT_TXN)) {
+ if ((ret = __os_calloc(dbenv,
+ 1, sizeof(DB_TXNMGR), &tmgrp)) != 0)
+ return (ret);
+ TAILQ_INIT(&tmgrp->txn_chain);
+ tmgrp->dbenv = dbenv;
+ dbenv->tx_handle = tmgrp;
+ }
+
+ return (replyp->status);
+}
+
+int
+__dbcl_env_remove_ret(dbenv, home, flags, replyp)
+ DB_ENV *dbenv;
+ const char *home;
+ u_int32_t flags;
+ __env_remove_reply *replyp;
+{
+ int ret;
+
+ COMPQUIET(home, NULL);
+ COMPQUIET(flags, 0);
+
+ ret = __dbcl_refresh(dbenv);
+ __os_free(dbenv, sizeof(*dbenv));
+ if (replyp->status == 0 && ret != 0)
+ return (ret);
+ else
+ return (replyp->status);
+}
+
+int
+__dbcl_txn_abort_ret(txnp, replyp)
+ DB_TXN *txnp;
+ __txn_abort_reply *replyp;
+{
+ __dbcl_txn_end(txnp);
+ return (replyp->status);
+}
+
+int
+__dbcl_txn_begin_ret(envp, parent, txnpp, flags, replyp)
+ DB_ENV *envp;
+ DB_TXN *parent, **txnpp;
+ u_int32_t flags;
+ __txn_begin_reply *replyp;
+{
+ DB_TXN *txn;
+ int ret;
+
+ COMPQUIET(flags, 0);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ if ((ret = __os_calloc(envp, 1, sizeof(DB_TXN), &txn)) != 0)
+ return (ret);
+ txn->txnid = replyp->txnidcl_id;
+ txn->mgrp = envp->tx_handle;
+ txn->parent = parent;
+ TAILQ_INIT(&txn->kids);
+ txn->flags = TXN_MALLOC;
+ if (parent != NULL)
+ TAILQ_INSERT_HEAD(&parent->kids, txn, klinks);
+
+ /*
+ * XXX
+ * In DB library the txn_chain is protected by the mgrp->mutexp.
+ * However, that mutex is implemented in the environments shared
+ * memory region. The client library does not support all of the
+ * region - that just get forwarded to the server. Therefore,
+ * the chain is unprotected here, but properly protected on the
+ * server.
+ */
+ TAILQ_INSERT_TAIL(&txn->mgrp->txn_chain, txn, links);
+
+ *txnpp = txn;
+ return (replyp->status);
+}
+
+int
+__dbcl_txn_commit_ret(txnp, flags, replyp)
+ DB_TXN *txnp;
+ u_int32_t flags;
+ __txn_commit_reply *replyp;
+{
+ COMPQUIET(flags, 0);
+
+ __dbcl_txn_end(txnp);
+ return (replyp->status);
+}
+
+int
+__dbcl_db_close_ret(dbp, flags, replyp)
+ DB *dbp;
+ u_int32_t flags;
+ __db_close_reply *replyp;
+{
+ int ret;
+
+ COMPQUIET(flags, 0);
+
+ ret = __dbcl_dbclose_common(dbp);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ else
+ return (ret);
+}
+
+int
+__dbcl_db_get_ret(dbp, txnp, key, data, flags, replyp)
+ DB *dbp;
+ DB_TXN *txnp;
+ DBT *key, *data;
+ u_int32_t flags;
+ __db_get_reply *replyp;
+{
+ DB_ENV *dbenv;
+ int ret;
+ void *oldkey;
+
+ COMPQUIET(txnp, NULL);
+ COMPQUIET(flags, 0);
+
+ ret = 0;
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ dbenv = dbp->dbenv;
+
+ oldkey = key->data;
+ ret = __dbcl_retcopy(dbenv, key, replyp->keydata.keydata_val,
+ replyp->keydata.keydata_len);
+ if (ret)
+ return (ret);
+ ret = __dbcl_retcopy(dbenv, data, replyp->datadata.datadata_val,
+ replyp->datadata.datadata_len);
+ /*
+ * If an error on copying 'data' and we allocated for 'key'
+ * free it before returning the error.
+ */
+ if (ret && oldkey != NULL)
+ __os_free(key->data, key->size);
+ return (ret);
+}
+
+int
+__dbcl_db_key_range_ret(dbp, txnp, key, range, flags, replyp)
+ DB *dbp;
+ DB_TXN *txnp;
+ DBT *key;
+ DB_KEY_RANGE *range;
+ u_int32_t flags;
+ __db_key_range_reply *replyp;
+{
+ COMPQUIET(dbp, NULL);
+ COMPQUIET(txnp, NULL);
+ COMPQUIET(key, NULL);
+ COMPQUIET(flags, 0);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ range->less = replyp->less;
+ range->equal = replyp->equal;
+ range->greater = replyp->greater;
+ return (replyp->status);
+}
+
+int
+__dbcl_db_open_ret(dbp, name, subdb, type, flags, mode, replyp)
+ DB *dbp;
+ const char *name, *subdb;
+ DBTYPE type;
+ u_int32_t flags;
+ int mode;
+ __db_open_reply *replyp;
+{
+ COMPQUIET(name, NULL);
+ COMPQUIET(subdb, NULL);
+ COMPQUIET(type, 0);
+ COMPQUIET(flags, 0);
+ COMPQUIET(mode, 0);
+
+ dbp->type = replyp->type;
+
+ /*
+ * XXX
+ * This is only for Tcl which peeks at the dbp flags.
+ * When dbp->get_flags exists, this should go away.
+ */
+ dbp->flags = replyp->dbflags;
+ return (replyp->status);
+}
+
+int
+__dbcl_db_put_ret(dbp, txnp, key, data, flags, replyp)
+ DB *dbp;
+ DB_TXN *txnp;
+ DBT *key, *data;
+ u_int32_t flags;
+ __db_put_reply *replyp;
+{
+ int ret;
+
+ COMPQUIET(dbp, NULL);
+ COMPQUIET(txnp, NULL);
+ COMPQUIET(data, NULL);
+
+ ret = replyp->status;
+ if (replyp->status == 0 && (flags == DB_APPEND))
+ *(db_recno_t *)key->data =
+ *(db_recno_t *)replyp->keydata.keydata_val;
+ return (ret);
+}
+
+int
+__dbcl_db_remove_ret(dbp, name, subdb, flags, replyp)
+ DB *dbp;
+ const char *name, *subdb;
+ u_int32_t flags;
+ __db_remove_reply *replyp;
+{
+ int ret;
+
+ COMPQUIET(name, 0);
+ COMPQUIET(subdb, 0);
+ COMPQUIET(flags, 0);
+
+ ret = __dbcl_dbclose_common(dbp);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ else
+ return (ret);
+}
+
+int
+__dbcl_db_rename_ret(dbp, name, subdb, newname, flags, replyp)
+ DB *dbp;
+ const char *name, *subdb, *newname;
+ u_int32_t flags;
+ __db_remove_reply *replyp;
+{
+ int ret;
+
+ COMPQUIET(name, 0);
+ COMPQUIET(subdb, 0);
+ COMPQUIET(newname, 0);
+ COMPQUIET(flags, 0);
+
+ ret = __dbcl_dbclose_common(dbp);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ else
+ return (ret);
+}
+
+int
+__dbcl_db_stat_ret(dbp, sp, func, flags, replyp)
+ DB *dbp;
+ void *sp;
+ void *(*func) __P((size_t));
+ u_int32_t flags;
+ __db_stat_reply *replyp;
+{
+ int ret;
+ u_int32_t *__db_statslist;
+
+ COMPQUIET(dbp, NULL);
+ COMPQUIET(func, NULL);
+ COMPQUIET(flags, 0);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ if ((ret =
+ __db_db_stat_statslist(replyp->statslist, &__db_statslist)) != 0)
+ return (ret);
+
+ if (sp == NULL)
+ __db_db_stat_statsfree(__db_statslist);
+ else
+ *(u_int32_t **)sp = __db_statslist;
+ return (replyp->status);
+}
+
+static int
+__db_db_stat_statslist(locp, ppp)
+ __db_stat_statsreplist *locp;
+ u_int32_t **ppp;
+{
+ u_int32_t *pp;
+ int cnt, ret, size;
+ __db_stat_statsreplist *nl;
+
+ for (cnt = 0, nl = locp; nl != NULL; cnt++, nl = nl->next)
+ ;
+
+ if (cnt == 0) {
+ *ppp = NULL;
+ return (0);
+ }
+ size = sizeof(*pp) * cnt;
+ if ((ret = __os_malloc(NULL, size, NULL, ppp)) != 0)
+ return (ret);
+ memset(*ppp, 0, size);
+ for (pp = *ppp, nl = locp; nl != NULL; nl = nl->next, pp++) {
+ *pp = *(u_int32_t *)nl->ent.ent_val;
+ }
+ return (0);
+}
+
+static void
+__db_db_stat_statsfree(pp)
+ u_int32_t *pp;
+{
+ size_t size;
+ u_int32_t *p;
+
+ if (pp == NULL)
+ return;
+ size = sizeof(*p);
+ for (p = pp; *p != 0; p++)
+ size += sizeof(*p);
+
+ __os_free(pp, size);
+}
+
+int
+__dbcl_db_cursor_ret(dbp, txnp, dbcpp, flags, replyp)
+ DB *dbp;
+ DB_TXN *txnp;
+ DBC **dbcpp;
+ u_int32_t flags;
+ __db_cursor_reply *replyp;
+{
+ COMPQUIET(txnp, NULL);
+ COMPQUIET(flags, 0);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ return (__dbcl_c_setup(replyp->dbcidcl_id, dbp, dbcpp));
+}
+
+int
+__dbcl_db_join_ret(dbp, curs, dbcpp, flags, replyp)
+ DB *dbp;
+ DBC **curs, **dbcpp;
+ u_int32_t flags;
+ __db_join_reply *replyp;
+{
+ COMPQUIET(curs, NULL);
+ COMPQUIET(flags, 0);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ /*
+ * We set this up as a normal cursor. We do not need
+ * to treat a join cursor any differently than a normal
+ * cursor, even though DB itself must. We only need the
+ * client-side cursor/db relationship to know what cursors
+ * are open in the db, and to store their ID. Nothing else.
+ */
+ return (__dbcl_c_setup(replyp->dbcidcl_id, dbp, dbcpp));
+}
+
+int
+__dbcl_dbc_close_ret(dbcp, replyp)
+ DBC *dbcp;
+ __dbc_close_reply *replyp;
+{
+ DB *dbp;
+
+ dbp = dbcp->dbp;
+ __dbcl_c_refresh(dbcp);
+ return (replyp->status);
+}
+
+int
+__dbcl_dbc_count_ret(dbc, countp, flags, replyp)
+ DBC *dbc;
+ db_recno_t *countp;
+ u_int32_t flags;
+ __dbc_count_reply *replyp;
+{
+ COMPQUIET(dbc, NULL);
+ COMPQUIET(flags, 0);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ *countp = replyp->dupcount;
+
+ return (replyp->status);
+}
+
+int
+__dbcl_dbc_dup_ret(dbcp, dbcpp, flags, replyp)
+ DBC *dbcp, **dbcpp;
+ u_int32_t flags;
+ __dbc_dup_reply *replyp;
+{
+ COMPQUIET(flags, 0);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ return (__dbcl_c_setup(replyp->dbcidcl_id, dbcp->dbp, dbcpp));
+}
+
+int
+__dbcl_dbc_get_ret(dbcp, key, data, flags, replyp)
+ DBC *dbcp;
+ DBT *key, *data;
+ u_int32_t flags;
+ __dbc_get_reply *replyp;
+{
+ DB_ENV *dbenv;
+ int ret;
+ void *oldkey;
+
+ COMPQUIET(flags, 0);
+
+ ret = 0;
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ dbenv = dbcp->dbp->dbenv;
+ oldkey = key->data;
+ ret = __dbcl_retcopy(dbenv, key, replyp->keydata.keydata_val,
+ replyp->keydata.keydata_len);
+ if (ret)
+ return (ret);
+ ret = __dbcl_retcopy(dbenv, data, replyp->datadata.datadata_val,
+ replyp->datadata.datadata_len);
+
+ /*
+ * If an error on copying 'data' and we allocated for 'key'
+ * free it before returning the error.
+ */
+ if (ret && oldkey != NULL)
+ __os_free(key->data, key->size);
+ return (ret);
+}
+
+int
+__dbcl_dbc_put_ret(dbcp, key, data, flags, replyp)
+ DBC *dbcp;
+ DBT *key, *data;
+ u_int32_t flags;
+ __dbc_put_reply *replyp;
+{
+ COMPQUIET(data, NULL);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ if (replyp->status == 0 && dbcp->dbp->type == DB_RECNO &&
+ (flags == DB_AFTER || flags == DB_BEFORE))
+ *(db_recno_t *)key->data =
+ *(db_recno_t *)replyp->keydata.keydata_val;
+ return (replyp->status);
+}
+#endif /* HAVE_RPC */
diff --git a/bdb/rpc_server/clsrv.html b/bdb/rpc_server/clsrv.html
new file mode 100644
index 00000000000..ae089c4b382
--- /dev/null
+++ b/bdb/rpc_server/clsrv.html
@@ -0,0 +1,453 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<HTML>
+<HEAD>
+ <META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
+ <META NAME="GENERATOR" CONTENT="Mozilla/4.08 [en] (X11; I; FreeBSD 3.3-RELEASE i386) [Netscape]">
+</HEAD>
+<BODY>
+
+<CENTER>
+<H1>
+Client/Server Interface for Berkeley DB</H1></CENTER>
+
+<CENTER><I>Susan LoVerso</I>
+<BR><I>sue@sleepycat.com</I>
+<BR><I>Rev 1.3</I>
+<BR><I>1999 Nov 29</I></CENTER>
+
+<P>We provide an interface allowing client/server access to Berkeley DB.&nbsp;&nbsp;
+Our goal is to provide a client and server library to allow users to separate
+the functionality of their applications yet still have access to the full
+benefits of Berkeley DB.&nbsp; The goal is to provide a totally seamless
+interface with minimal modification to existing applications as well.
+<P>The client/server interface for Berkeley DB can be broken up into several
+layers.&nbsp; At the lowest level there is the transport mechanism to send
+out the messages over the network.&nbsp; Above that layer is the messaging
+layer to interpret what comes over the wire, and bundle/unbundle message
+contents.&nbsp; The next layer is Berkeley DB itself.
+<P>The transport layer uses ONC RPC (RFC 1831) and XDR (RFC 1832).&nbsp;
+We declare our message types and operations supported by our program and
+the RPC library and utilities pretty much take care of the rest.&nbsp;
+The
+<I>rpcgen</I> program generates all of the low level code needed.&nbsp;
+We need to define both sides of the RPC.
+<BR>&nbsp;
+<H2>
+<A NAME="DB Modifications"></A>DB Modifications</H2>
+To achieve the goal of a seamless interface, it is necessary to impose
+a constraint on the application. That constraint is simply that all database
+access must be done through an open environment.&nbsp; I.e. this model
+does not support standalone databases.&nbsp; The reason for this constraint
+is so that we have an environment structure internally to store our connection
+to the server.&nbsp; Imposing this constraint means that we can provide
+the seamless interface just by adding a single environment method: <A HREF="../docs/api_c/env_set_server.html">DBENV->set_server()</A>.
+<P>The planned interface for this method is:
+<PRE>DBENV->set_server(dbenv,&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; /* DB_ENV structure */
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; hostname&nbsp;&nbsp;&nbsp; /* Host of server */
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; cl_timeout, /* Client timeout (sec) */
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; srv_timeout,/* Server timeout (sec) */
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; flags);&nbsp;&nbsp;&nbsp;&nbsp; /* Flags: unused */</PRE>
+This new method takes the hostname of the server, establishes our connection
+and an environment on the server.&nbsp; If a server timeout is specified,
+then we send that to the server as well (and the server may or may not
+choose to use that value).&nbsp; This timeout is how long the server will
+allow the environment to remain idle before declaring it dead and releasing
+resources on the server.&nbsp; The pointer to the connection is stored
+on the client in the DBENV structure and is used by all other methods to
+figure out with whom to communicate.&nbsp; If a client timeout is specified,
+it indicates how long the client is willing to wait for a reply from the
+server.&nbsp; If the values are 0, then defaults are used.&nbsp; Flags
+is currently unused, but exists because we always need to have a placeholder
+for flags and it would be used for specifying authentication desired (were
+we to provide an authentication scheme at some point) or other uses not
+thought of yet!
+<P>This client code is part of the monolithic DB library.&nbsp; The user
+accesses the client functions via a new flag to <A HREF="../docs/api_c/db_env_create.html">db_env_create()</A>.&nbsp;
+That flag is DB_CLIENT.&nbsp; By using this flag the user indicates they
+want to have the client methods rather than the standard methods for the
+environment.&nbsp; Also by issuing this flag, the user needs to connect
+to the server via the <A HREF="../docs/api_c/env_set_server.html">DBENV->set_server()</A>
+method.
+<P>We need two new fields in the <I>DB_ENV </I>structure.&nbsp; One is
+the socket descriptor to communicate to the server, the other field is
+the client identifier the server gives to us.&nbsp; The <I>DB, </I>and<I>
+DBC </I>only need one additional field, the client identifier.&nbsp; The
+<I>DB_TXN</I>
+structure does not need modification, we are overloading the <I>txn_id
+</I>field.
+<H2>
+Issues</H2>
+We need to figure out what to do in case of client and server crashes.&nbsp;
+Both the client library and the server program are stateful.&nbsp; They
+both consume local resources during the lifetime of the connection.&nbsp;
+Should one end drop that connection, the other side needs to release those
+resources.
+<P>If the server crashes, then the client will get an error back.&nbsp;
+I have chosen to implement time-outs on the client side, using a default
+or allowing the application to specify one through the <A HREF="../docs/api_c/env_set_server.html">DBENV->set_server()</A>
+method.&nbsp; Either the current operation will time-out waiting for the
+reply or the next operation called will time out (or get back some other
+kind of error regarding the server's non-existence).&nbsp; In any case,
+if the client application gets back such an error, it should abort any
+open transactions locally, close any databases, and close its environment.&nbsp;
+It may then decide to retry to connect to the server periodically or whenever
+it comes back.&nbsp; If the last operation a client did was a transaction
+commit that did not return or timed out from the server, the client cannot
+determine if the transaction was committed or not but must release the
+local transaction resources. Once the server is back up, recovery must
+be run on the server.&nbsp;&nbsp; If the transaction commit completed on
+the server before the crash, then the operation is redone, if the transaction
+commit did not get to the server, the pieces of the transaction are undone
+on recover.&nbsp; The client can then re-establish its connection and begin
+again.&nbsp; This is effectively like beginning over.&nbsp; The client
+cannot use ID's from its previous connection to the server.&nbsp; However,
+if recovery is run, then consistency is assured.
+<P>If the client crashes, the server needs to somehow figure this out.&nbsp;
+The server is just sitting there waiting for a request to come in.&nbsp;
+A server must be able to time-out a client.&nbsp; Similar to ftpd, if a
+connection is idle for N seconds, then the server decides the client is
+dead and releases that client's resources, aborting any open transactions,
+closing any open databases and environments.&nbsp;&nbsp; The server timing
+out a client is not a trivial issue however.&nbsp; The generated function
+for the server just calls <I>svc_run()</I>.&nbsp; The server code I write
+contains procedures to do specific things.&nbsp; We do not have access
+to the code calling <I>select()</I>.&nbsp; Timing out the select is not
+good enough even if we could do so.&nbsp; We want to time-out idle environments,
+not simply cause a time-out if the server is idle a while.&nbsp; See the
+discussion of the <A HREF="#The Server Program">server program</A> for
+a description of how we accomplish this.
+<P>Since rpcgen generates the main() function of the server, I do not yet
+know how we are going to have the server multi-threaded or multi-process
+without changing the generated code.&nbsp; The RPC book indicates that
+the only way to accomplish this is through modifying the generated code
+in the server.&nbsp; <B>For the moment we will ignore this issue while
+we get the core server working, as it is only a performance issue.</B>
+<P>We do not do any security or authentication.&nbsp; Someone could get
+the code and modify it to spoof messages, trick the server, etc.&nbsp;
+RPC has some amount of authentication built into it.&nbsp; I haven't yet
+looked into it much to know if we want to use it or just point a user at
+it.&nbsp; The changes to the client code are fairly minor, the changes
+to our server procs are fairly minor.&nbsp; We would have to add code to
+a <I>sed</I> script or <I>awk</I> script to change the generated server
+code (yet again) in the dispatch routine to perform authentication.
+<P>We will need to get an official program number from Sun.&nbsp; We can
+get this by sending mail to <I>rpc@sun.com</I> and presumably at some point
+they will send us back a program number that we will encode into our XDR
+description file.&nbsp; Until we release this we can use a program number
+in the "user defined" number space.
+<BR>&nbsp;
+<H2>
+<A NAME="The Server Program"></A>The Server Program</H2>
+The server is a standalone program that the user builds and runs, probably
+as a daemon like process.&nbsp; This program is linked against the Berkeley
+DB library and the RPC library (which is part of the C library on my FreeBSD
+machine, others may have/need <I>-lrpclib</I>).&nbsp; The server basically
+is a slave to the client process.&nbsp; All messages from the client are
+synchronous and two-way.&nbsp; The server handles messages one at a time,
+and sends a reply back before getting another message.&nbsp; There are
+no asynchronous messages generated by the server to the client.
+<P>We have made a choice to modify the generated code for the server.&nbsp;
+The changes will be minimal, generally calling functions we write, that
+are in other source files.&nbsp; The first change is adding a call to our
+time-out function as described below.&nbsp; The second change is changing
+the name of the generated <I>main()</I> function to <I>__dbsrv_main()</I>,
+and adding our own <I>main()</I> function so that we can parse options,
+and set up other initialization we require.&nbsp; I have a <I>sed</I> script
+that is run from the distribution scripts that massages the generated code
+to make these minor changes.
+<P>Primarily the code needed for the server is the collection of the specified
+RPC functions.&nbsp; Each function receives the structure indicated, and
+our code takes out what it needs and passes the information into DB itself.&nbsp;
+The server needs to maintain a translation table for identifiers that we
+pass back to the client for the environment, transaction and database handles.
+<P>The table that the server maintains, assuming one client per server
+process/thread, should contain the handle to the environment, database
+or transaction, a link to maintain parent/child relationships between transactions,
+or databases and cursors, this handle's identifier, a type so that we can
+error if the client passes us a bad id for this call, and a link to this
+handle's environment entry (for time out/activity purposes).&nbsp; The
+table contains, in entries used by environments, a time-out value and an
+activity time stamp.&nbsp; Its use is described below for timing out idle
+clients.
+<P>Here is how we time out clients in the server.&nbsp; We have to modify
+the generated server code, but only to add one line during the dispatch
+function to run the time-out function.&nbsp; The call is made right before
+the return of the dispatch function, after the reply is sent to the client,
+so that client's aren't kept waiting for server bookkeeping activities.&nbsp;
+This time-out function then runs every time the server processes a request.&nbsp;
+In the time-out function we maintain a time-out hint that is the youngest
+environment to time-out.&nbsp; If the current time is less than the hint
+we know we do not need to run through the list of open handles.&nbsp; If
+the hint is expired, then we go through the list of open environment handles,
+and if they are past their expiration, then we close them and clean up.&nbsp;
+If they are not, we set up the hint for the next time.
+<P>Each entry in the open handle table has a pointer back to its environment's
+entry.&nbsp; Every operation within this environment can then update the
+single environment activity record.&nbsp; Every environment can have a
+different time-out.&nbsp; The <A HREF="../docs/api_c/env_set_server.html">DBENV->set_server
+</A>call
+takes a server time-out value.&nbsp; If this value is 0 then a default
+(currently 5 minutes) is used.&nbsp; This time-out value is only a hint
+to the server.&nbsp; It may choose to disregard this value or set the time-out
+based on its own implementation.
+<P>For completeness, the flaws of this time-out implementation should be
+pointed out.&nbsp; First, it is possible that a client could crash with
+open handles, and no other requests come in to the server.&nbsp; Therefore
+the time-out function never gets run and those resources are not released
+(until a request does come in).&nbsp; Similarly, this time-out is not exact.&nbsp;
+The time-out function uses its hint and if it computes a hint on one run,
+an earlier time-out might be created before that time-out expires.&nbsp;
+This issue simply yields a handle that doesn't get released until that
+original hint expires.&nbsp; To illustrate, consider that at the time that
+the time-out function is run, the youngest time-out is 5 minutes in the
+future.&nbsp; Soon after, a new environment is opened that has a time-out
+of 1 minute.&nbsp; If this environment becomes idle (and other operations
+are going on), the time-out function will not release that environment
+until the original 5 minute hint expires.&nbsp; This is not a problem since
+the resources will eventually be released.
+<P>On a similar note, if a client crashes during an RPC, our reply generates
+a SIGPIPE, and our server crashes unless we catch it.&nbsp; Using <I>signal(SIGPIPE,
+SIG_IGN) </I>we can ignore it, and the server will go on.&nbsp; This is
+a call&nbsp; in our <I>main()</I> function that we write.&nbsp; Eventually
+this client's handles would be timed out as described above.&nbsp; We need
+this only for the unfortunate window of a client crashing during the RPC.
+<P>The options below are primarily for control of the program itself,.&nbsp;
+Details relating to databases and environments should be passed from the
+client to the server, since the server can serve many clients, many environments
+and many databases.&nbsp; Therefore it makes more sense for the client
+to set the cache size of its own environment, rather than setting a default
+cachesize on the server that applies as a blanket to any environment it
+may be called upon to open.&nbsp; Options are:
+<UL>
+<LI>
+<B>-t&nbsp;</B> to set the default time-out given to an environment.</LI>
+
+<LI>
+<B>-T</B> to set the maximum time-out allowed for the server.</LI>
+
+<LI>
+<B>-L</B> to log the execution of the server process to a specified file.</LI>
+
+<LI>
+<B>-v</B> to run in verbose mode.</LI>
+
+<LI>
+<B>-M</B>&nbsp; to specify the maximum number of outstanding child server
+processes/threads we can have at any given time.&nbsp; The default is 10.
+<B>[We
+are not yet doing multiple threads/processes.]</B></LI>
+</UL>
+
+<H2>
+The Client Code</H2>
+The client code contains all of the supported functions and methods used
+in this model.&nbsp; There are several methods in the <I>__db_env
+</I>and
+<I>__db</I>
+structures that currently do not apply, such as the callbacks.&nbsp; Those
+fields that are not applicable to the client model point to NULL to notify
+the user of their error.&nbsp; Some method functions remain unchanged,
+as well such as the error calls.
+<P>The client code contains each method function that goes along with the
+<A HREF="#Remote Procedure Calls">RPC
+calls</A> described elsewhere.&nbsp; The client library also contains its
+own version of <A HREF="../docs/api_c/env_create.html">db_env_create()</A>,
+which does not result in any messages going over to the server (since we
+do not yet know what server we are talking to).&nbsp; This function sets
+up the pointers to the correct client functions.
+<P>All of the method functions that handle the messaging have a basic flow
+similar to this:
+<UL>
+<LI>
+Local arg parsing that may be needed</LI>
+
+<LI>
+Marshalling the message header and the arguments we need to send to the
+server</LI>
+
+<LI>
+Sending the message</LI>
+
+<LI>
+Receiving a reply</LI>
+
+<LI>
+Unmarshalling the reply</LI>
+
+<LI>
+Local results processing that may be needed</LI>
+</UL>
+
+<H2>
+Generated Code</H2>
+Almost all of the code is generated from a source file describing the interface
+and an <I>awk</I> script.&nbsp;&nbsp; This awk script generates six (6)
+files for us.&nbsp; It also modifies one.&nbsp; The files are:
+<OL>
+<LI>
+Client file - The C source file created containing the client code.</LI>
+
+<LI>
+Client template file - The C template source file created containing interfaces
+for handling client-local issues such as resource allocation, but with
+a consistent interface with the client code generated.</LI>
+
+<LI>
+Server file - The C source file created containing the server code.</LI>
+
+<LI>
+Server template file - The C template source file created containing interfaces
+for handling server-local issues such as resource allocation, calling into
+the DB library but with a consistent interface with the server code generated.</LI>
+
+<LI>
+XDR file - The XDR message description file created.</LI>
+
+<LI>
+Server sed file - A sed script that contains commands to apply to the server
+procedure file (i.e. the real source file that the server template file
+becomes) so that minor interface changes can be consistently and easily
+applied to the real code.</LI>
+
+<LI>
+Server procedure file - This is the file that is modified by the sed script
+generated.&nbsp; It originated from the server template file.</LI>
+</OL>
+The awk script reads a source file, <I>db_server/rpc.src </I>that describes
+each operation and what sorts of arguments it takes and what it returns
+from the server.&nbsp; The syntax of the source file describes the interface
+to that operation.&nbsp; There are four (4) parts to the syntax:
+<OL>
+<LI>
+<B>BEGIN</B> <B><I>function version# codetype</I></B> - begins a new functional
+interface for the given <B><I>function</I></B>.&nbsp; Each function has
+a <B><I>version number</I></B>, currently all of them are at version number
+one (1).&nbsp; The <B><I>code type</I></B> indicates to the awk script
+what kind of code to generate.&nbsp; The choices are:</LI>
+
+<UL>
+<LI>
+<B>CODE </B>- Generate all code, and return a status value.&nbsp; If specified,
+the client code will simply return the status to the user upon completion
+of the RPC call.</LI>
+
+<LI>
+<B>RETCODE </B>- Generate all code and call a return function in the client
+template file to deal with client issues or with other returned items.&nbsp;
+If specified, the client code generated will call a function of the form
+<I>__dbcl_&lt;name>_ret()
+</I>where
+&lt;name> is replaced with the function name given here.&nbsp; This function
+is placed in the template file because this indicates that something special
+must occur on return.&nbsp; The arguments to this function are the same
+as those for the client function, with the addition of the reply message
+structure.</LI>
+
+<LI>
+<B>NOCLNTCODE - </B>Generate XDR and server code, but no corresponding
+client code. (This is used for functions that are not named the same thing
+on both sides.&nbsp; The only use of this at the moment is db_env_create
+and db_create.&nbsp; The environment create call to the server is actually
+called from the <A HREF="../docs/api_c/env_set_server.html">DBENV->set_server()</A>
+method.&nbsp; The db_create code exists elsewhere in the library and we
+modify that code for the client call.)</LI>
+</UL>
+
+<LI>
+<B>ARG <I>RPC-type C-type varname [list-type]</I></B>- each line of this
+describes an argument to the function.&nbsp; The argument is called <B><I>varname</I></B>.&nbsp;
+The <B><I>C-type</I></B> given is what it should look like in the C code
+generated, such as <B>DB *, u_int32_t, const char *</B>.&nbsp; The
+<B><I>RPC-type</I></B>
+is an indication about how the RPC request message should be constructed.&nbsp;
+The RPC-types allowed are described below.</LI>
+
+<LI>
+<B>RET <I>RPC-type C-type varname [list-type]</I></B>- each line of this
+describes what the server should return from this procedure call (in addition
+to a status, which is always returned and should not be specified).&nbsp;
+The argument is called <B><I>varname</I></B>.&nbsp; The <B><I>C-type</I></B>
+given is what it should look like in the C code generated, such as <B>DB
+*, u_int32_t, const char *</B>.&nbsp; The <B><I>RPC-type</I></B> is an
+indication about how the RPC reply message should be constructed.&nbsp;
+The RPC-types are described below.</LI>
+
+<LI>
+<B>END </B>- End the description of this function.&nbsp; The result is
+that when the awk script encounters the <B>END</B> tag, it now has all
+the information it needs to construct the generated code for this function.</LI>
+</OL>
+The <B><I>RPC-type</I></B> must be one of the following:
+<UL>
+<LI>
+<B>IGNORE </B>- This argument is not passed to the server and should be
+ignored when constructing the XDR code.&nbsp; <B>Only allowed for an ARG
+specfication.</B></LI>
+
+<LI>
+<B>STRING</B> - This argument is a string.</LI>
+
+<LI>
+<B>INT </B>- This argument is an integer of some sort.</LI>
+
+<LI>
+<B>DBT </B>- This argument is a DBT, resulting in its decomposition into
+the request message.</LI>
+
+<LI>
+<B>LIST</B> - This argument is an opaque list passed to the server (NULL-terminated).&nbsp;
+If an argument of this type is given, it must have a <B><I>list-type</I></B>
+specified that is one of:</LI>
+
+<UL>
+<LI>
+<B>STRING</B></LI>
+
+<LI>
+<B>INT</B></LI>
+
+<LI>
+<B>ID</B>.</LI>
+</UL>
+
+<LI>
+<B>ID</B> - This argument is an identifier.</LI>
+</UL>
+So, for example, the source for the DB->join RPC call looks like:
+<PRE>BEGIN&nbsp;&nbsp; dbjoin&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 1&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; RETCODE
+ARG&nbsp;&nbsp;&nbsp;&nbsp; ID&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; DB *&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; dbp&nbsp;
+ARG&nbsp;&nbsp;&nbsp;&nbsp; LIST&nbsp;&nbsp;&nbsp; DBC **&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; curs&nbsp;&nbsp;&nbsp; ID
+ARG&nbsp;&nbsp;&nbsp;&nbsp; IGNORE&nbsp; DBC **&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; dbcpp&nbsp;
+ARG&nbsp;&nbsp;&nbsp;&nbsp; INT&nbsp;&nbsp;&nbsp;&nbsp; u_int32_t&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; flags
+RET&nbsp;&nbsp;&nbsp;&nbsp; ID&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; long&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; dbcid
+END</PRE>
+Our first line tells us we are writing the dbjoin function.&nbsp; It requires
+special code on the client so we indicate that with the RETCODE.&nbsp;
+This method takes four arguments.&nbsp; For the RPC request we need the
+database ID from the dbp, we construct a NULL-terminated list of IDs for
+the cursor list, we ignore the argument to return the cursor handle to
+the user, and we pass along the flags.&nbsp; On the return, the reply contains
+a status, by default, and additionally, it contains the ID of the newly
+created cursor.
+<H2>
+Building and Installing</H2>
+I need to verify with Don Anderson, but I believe we should just build
+the server program, just like we do for db_stat, db_checkpoint, etc.&nbsp;
+Basically it can be treated as a utility program from the building and
+installation perspective.
+<P>As mentioned early on, in the section on <A HREF="#DB Modifications">DB
+Modifications</A>, we have a single library, but allowing the user to access
+the client portion by sending a flag to <A HREF="../docs/api_c/env_create.html">db_env_create()</A>.&nbsp;
+The Makefile is modified to include the new files.
+<P>Testing is performed in two ways.&nbsp; First I have a new example program,
+that should become part of the example directory.&nbsp; It is basically
+a merging of ex_access.c and ex_env.c.&nbsp; This example is adequate to
+test basic functionality, as it does just does database put/get calls and
+appropriate open and close calls.&nbsp; However, in order to test the full
+set of functions a more generalized scheme is required.&nbsp; For the moment,
+I am going to modify the Tcl interface to accept the server information.&nbsp;
+Nothing else should need to change in Tcl.&nbsp; Then we can either write
+our own test modules or use a subset of the existing ones to test functionality
+on a regular basis.
+</BODY>
+</HTML>
diff --git a/bdb/rpc_server/db_server.sed b/bdb/rpc_server/db_server.sed
new file mode 100644
index 00000000000..f028f778e21
--- /dev/null
+++ b/bdb/rpc_server/db_server.sed
@@ -0,0 +1,5 @@
+1i\
+\#include "db_config.h"\
+\#ifdef HAVE_RPC
+$a\
+\#endif /* HAVE_RPC */
diff --git a/bdb/rpc_server/db_server.x b/bdb/rpc_server/db_server.x
new file mode 100644
index 00000000000..6bdff201f5a
--- /dev/null
+++ b/bdb/rpc_server/db_server.x
@@ -0,0 +1,492 @@
+/* Do not edit: automatically built by gen_rpc.awk. */
+
+struct __env_cachesize_msg {
+ unsigned int dbenvcl_id;
+ unsigned int gbytes;
+ unsigned int bytes;
+ unsigned int ncache;
+};
+
+struct __env_cachesize_reply {
+ unsigned int status;
+};
+
+struct __env_close_msg {
+ unsigned int dbenvcl_id;
+ unsigned int flags;
+};
+
+struct __env_close_reply {
+ unsigned int status;
+};
+
+struct __env_create_msg {
+ unsigned int timeout;
+};
+
+struct __env_create_reply {
+ unsigned int status;
+ unsigned int envcl_id;
+};
+
+struct __env_flags_msg {
+ unsigned int dbenvcl_id;
+ unsigned int flags;
+ unsigned int onoff;
+};
+
+struct __env_flags_reply {
+ unsigned int status;
+};
+
+struct __env_open_msg {
+ unsigned int dbenvcl_id;
+ string home<>;
+ unsigned int flags;
+ unsigned int mode;
+};
+
+struct __env_open_reply {
+ unsigned int status;
+};
+
+struct __env_remove_msg {
+ unsigned int dbenvcl_id;
+ string home<>;
+ unsigned int flags;
+};
+
+struct __env_remove_reply {
+ unsigned int status;
+};
+
+struct __txn_abort_msg {
+ unsigned int txnpcl_id;
+};
+
+struct __txn_abort_reply {
+ unsigned int status;
+};
+
+struct __txn_begin_msg {
+ unsigned int envpcl_id;
+ unsigned int parentcl_id;
+ unsigned int flags;
+};
+
+struct __txn_begin_reply {
+ unsigned int status;
+ unsigned int txnidcl_id;
+};
+
+struct __txn_commit_msg {
+ unsigned int txnpcl_id;
+ unsigned int flags;
+};
+
+struct __txn_commit_reply {
+ unsigned int status;
+};
+
+struct __db_bt_maxkey_msg {
+ unsigned int dbpcl_id;
+ unsigned int maxkey;
+};
+
+struct __db_bt_maxkey_reply {
+ unsigned int status;
+};
+
+struct __db_bt_minkey_msg {
+ unsigned int dbpcl_id;
+ unsigned int minkey;
+};
+
+struct __db_bt_minkey_reply {
+ unsigned int status;
+};
+
+struct __db_close_msg {
+ unsigned int dbpcl_id;
+ unsigned int flags;
+};
+
+struct __db_close_reply {
+ unsigned int status;
+};
+
+struct __db_create_msg {
+ unsigned int flags;
+ unsigned int envpcl_id;
+};
+
+struct __db_create_reply {
+ unsigned int status;
+ unsigned int dbpcl_id;
+};
+
+struct __db_del_msg {
+ unsigned int dbpcl_id;
+ unsigned int txnpcl_id;
+ unsigned int keydlen;
+ unsigned int keydoff;
+ unsigned int keyflags;
+ opaque keydata<>;
+ unsigned int flags;
+};
+
+struct __db_del_reply {
+ unsigned int status;
+};
+
+struct __db_extentsize_msg {
+ unsigned int dbpcl_id;
+ unsigned int extentsize;
+};
+
+struct __db_extentsize_reply {
+ unsigned int status;
+};
+
+struct __db_flags_msg {
+ unsigned int dbpcl_id;
+ unsigned int flags;
+};
+
+struct __db_flags_reply {
+ unsigned int status;
+};
+
+struct __db_get_msg {
+ unsigned int dbpcl_id;
+ unsigned int txnpcl_id;
+ unsigned int keydlen;
+ unsigned int keydoff;
+ unsigned int keyflags;
+ opaque keydata<>;
+ unsigned int datadlen;
+ unsigned int datadoff;
+ unsigned int dataflags;
+ opaque datadata<>;
+ unsigned int flags;
+};
+
+struct __db_get_reply {
+ unsigned int status;
+ opaque keydata<>;
+ opaque datadata<>;
+};
+
+struct __db_h_ffactor_msg {
+ unsigned int dbpcl_id;
+ unsigned int ffactor;
+};
+
+struct __db_h_ffactor_reply {
+ unsigned int status;
+};
+
+struct __db_h_nelem_msg {
+ unsigned int dbpcl_id;
+ unsigned int nelem;
+};
+
+struct __db_h_nelem_reply {
+ unsigned int status;
+};
+
+struct __db_key_range_msg {
+ unsigned int dbpcl_id;
+ unsigned int txnpcl_id;
+ unsigned int keydlen;
+ unsigned int keydoff;
+ unsigned int keyflags;
+ opaque keydata<>;
+ unsigned int flags;
+};
+
+struct __db_key_range_reply {
+ unsigned int status;
+ double less;
+ double equal;
+ double greater;
+};
+
+struct __db_lorder_msg {
+ unsigned int dbpcl_id;
+ unsigned int lorder;
+};
+
+struct __db_lorder_reply {
+ unsigned int status;
+};
+
+struct __db_open_msg {
+ unsigned int dbpcl_id;
+ string name<>;
+ string subdb<>;
+ unsigned int type;
+ unsigned int flags;
+ unsigned int mode;
+};
+
+struct __db_open_reply {
+ unsigned int status;
+ unsigned int type;
+ unsigned int dbflags;
+};
+
+struct __db_pagesize_msg {
+ unsigned int dbpcl_id;
+ unsigned int pagesize;
+};
+
+struct __db_pagesize_reply {
+ unsigned int status;
+};
+
+struct __db_put_msg {
+ unsigned int dbpcl_id;
+ unsigned int txnpcl_id;
+ unsigned int keydlen;
+ unsigned int keydoff;
+ unsigned int keyflags;
+ opaque keydata<>;
+ unsigned int datadlen;
+ unsigned int datadoff;
+ unsigned int dataflags;
+ opaque datadata<>;
+ unsigned int flags;
+};
+
+struct __db_put_reply {
+ unsigned int status;
+ opaque keydata<>;
+};
+
+struct __db_re_delim_msg {
+ unsigned int dbpcl_id;
+ unsigned int delim;
+};
+
+struct __db_re_delim_reply {
+ unsigned int status;
+};
+
+struct __db_re_len_msg {
+ unsigned int dbpcl_id;
+ unsigned int len;
+};
+
+struct __db_re_len_reply {
+ unsigned int status;
+};
+
+struct __db_re_pad_msg {
+ unsigned int dbpcl_id;
+ unsigned int pad;
+};
+
+struct __db_re_pad_reply {
+ unsigned int status;
+};
+
+struct __db_remove_msg {
+ unsigned int dbpcl_id;
+ string name<>;
+ string subdb<>;
+ unsigned int flags;
+};
+
+struct __db_remove_reply {
+ unsigned int status;
+};
+
+struct __db_rename_msg {
+ unsigned int dbpcl_id;
+ string name<>;
+ string subdb<>;
+ string newname<>;
+ unsigned int flags;
+};
+
+struct __db_rename_reply {
+ unsigned int status;
+};
+
+struct __db_stat_msg {
+ unsigned int dbpcl_id;
+ unsigned int flags;
+};
+
+struct __db_stat_statsreplist {
+ opaque ent<>;
+ __db_stat_statsreplist *next;
+};
+
+struct __db_stat_reply {
+ unsigned int status;
+ __db_stat_statsreplist *statslist;
+};
+
+struct __db_swapped_msg {
+ unsigned int dbpcl_id;
+};
+
+struct __db_swapped_reply {
+ unsigned int status;
+};
+
+struct __db_sync_msg {
+ unsigned int dbpcl_id;
+ unsigned int flags;
+};
+
+struct __db_sync_reply {
+ unsigned int status;
+};
+
+struct __db_cursor_msg {
+ unsigned int dbpcl_id;
+ unsigned int txnpcl_id;
+ unsigned int flags;
+};
+
+struct __db_cursor_reply {
+ unsigned int status;
+ unsigned int dbcidcl_id;
+};
+
+struct __db_join_curslist {
+ opaque ent<>;
+ __db_join_curslist *next;
+};
+
+struct __db_join_msg {
+ unsigned int dbpcl_id;
+ __db_join_curslist *curslist;
+ unsigned int flags;
+};
+
+struct __db_join_reply {
+ unsigned int status;
+ unsigned int dbcidcl_id;
+};
+
+struct __dbc_close_msg {
+ unsigned int dbccl_id;
+};
+
+struct __dbc_close_reply {
+ unsigned int status;
+};
+
+struct __dbc_count_msg {
+ unsigned int dbccl_id;
+ unsigned int flags;
+};
+
+struct __dbc_count_reply {
+ unsigned int status;
+ unsigned int dupcount;
+};
+
+struct __dbc_del_msg {
+ unsigned int dbccl_id;
+ unsigned int flags;
+};
+
+struct __dbc_del_reply {
+ unsigned int status;
+};
+
+struct __dbc_dup_msg {
+ unsigned int dbccl_id;
+ unsigned int flags;
+};
+
+struct __dbc_dup_reply {
+ unsigned int status;
+ unsigned int dbcidcl_id;
+};
+
+struct __dbc_get_msg {
+ unsigned int dbccl_id;
+ unsigned int keydlen;
+ unsigned int keydoff;
+ unsigned int keyflags;
+ opaque keydata<>;
+ unsigned int datadlen;
+ unsigned int datadoff;
+ unsigned int dataflags;
+ opaque datadata<>;
+ unsigned int flags;
+};
+
+struct __dbc_get_reply {
+ unsigned int status;
+ opaque keydata<>;
+ opaque datadata<>;
+};
+
+struct __dbc_put_msg {
+ unsigned int dbccl_id;
+ unsigned int keydlen;
+ unsigned int keydoff;
+ unsigned int keyflags;
+ opaque keydata<>;
+ unsigned int datadlen;
+ unsigned int datadoff;
+ unsigned int dataflags;
+ opaque datadata<>;
+ unsigned int flags;
+};
+
+struct __dbc_put_reply {
+ unsigned int status;
+ opaque keydata<>;
+};
+program DB_SERVERPROG {
+ version DB_SERVERVERS {
+ __env_cachesize_reply __DB_env_cachesize(__env_cachesize_msg) = 1;
+ __env_close_reply __DB_env_close(__env_close_msg) = 2;
+ __env_create_reply __DB_env_create(__env_create_msg) = 3;
+ __env_flags_reply __DB_env_flags(__env_flags_msg) = 4;
+ __env_open_reply __DB_env_open(__env_open_msg) = 5;
+ __env_remove_reply __DB_env_remove(__env_remove_msg) = 6;
+ __txn_abort_reply __DB_txn_abort(__txn_abort_msg) = 7;
+ __txn_begin_reply __DB_txn_begin(__txn_begin_msg) = 8;
+ __txn_commit_reply __DB_txn_commit(__txn_commit_msg) = 9;
+ __db_bt_maxkey_reply __DB_db_bt_maxkey(__db_bt_maxkey_msg) = 10;
+ __db_bt_minkey_reply __DB_db_bt_minkey(__db_bt_minkey_msg) = 11;
+ __db_close_reply __DB_db_close(__db_close_msg) = 12;
+ __db_create_reply __DB_db_create(__db_create_msg) = 13;
+ __db_del_reply __DB_db_del(__db_del_msg) = 14;
+ __db_extentsize_reply __DB_db_extentsize(__db_extentsize_msg) = 15;
+ __db_flags_reply __DB_db_flags(__db_flags_msg) = 16;
+ __db_get_reply __DB_db_get(__db_get_msg) = 17;
+ __db_h_ffactor_reply __DB_db_h_ffactor(__db_h_ffactor_msg) = 18;
+ __db_h_nelem_reply __DB_db_h_nelem(__db_h_nelem_msg) = 19;
+ __db_key_range_reply __DB_db_key_range(__db_key_range_msg) = 20;
+ __db_lorder_reply __DB_db_lorder(__db_lorder_msg) = 21;
+ __db_open_reply __DB_db_open(__db_open_msg) = 22;
+ __db_pagesize_reply __DB_db_pagesize(__db_pagesize_msg) = 23;
+ __db_put_reply __DB_db_put(__db_put_msg) = 24;
+ __db_re_delim_reply __DB_db_re_delim(__db_re_delim_msg) = 25;
+ __db_re_len_reply __DB_db_re_len(__db_re_len_msg) = 26;
+ __db_re_pad_reply __DB_db_re_pad(__db_re_pad_msg) = 27;
+ __db_remove_reply __DB_db_remove(__db_remove_msg) = 28;
+ __db_rename_reply __DB_db_rename(__db_rename_msg) = 29;
+ __db_stat_reply __DB_db_stat(__db_stat_msg) = 30;
+ __db_swapped_reply __DB_db_swapped(__db_swapped_msg) = 31;
+ __db_sync_reply __DB_db_sync(__db_sync_msg) = 32;
+ __db_cursor_reply __DB_db_cursor(__db_cursor_msg) = 33;
+ __db_join_reply __DB_db_join(__db_join_msg) = 34;
+ __dbc_close_reply __DB_dbc_close(__dbc_close_msg) = 35;
+ __dbc_count_reply __DB_dbc_count(__dbc_count_msg) = 36;
+ __dbc_del_reply __DB_dbc_del(__dbc_del_msg) = 37;
+ __dbc_dup_reply __DB_dbc_dup(__dbc_dup_msg) = 38;
+ __dbc_get_reply __DB_dbc_get(__dbc_get_msg) = 39;
+ __dbc_put_reply __DB_dbc_put(__dbc_put_msg) = 40;
+ } = 1;
+} = 351457;
diff --git a/bdb/rpc_server/db_server_proc.c b/bdb/rpc_server/db_server_proc.c
new file mode 100644
index 00000000000..108a00fb371
--- /dev/null
+++ b/bdb/rpc_server/db_server_proc.c
@@ -0,0 +1,1546 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifdef HAVE_RPC
+#ifndef lint
+static const char revid[] = "$Id: db_server_proc.c,v 1.48 2001/01/06 16:08:01 sue Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <rpc/rpc.h>
+
+#include <string.h>
+#endif
+#include "db_server.h"
+
+#include "db_int.h"
+#include "db_server_int.h"
+#include "rpc_server_ext.h"
+
+static int __db_stats_list __P((DB_ENV *,
+ __db_stat_statsreplist **, u_int32_t *, int));
+
+/* BEGIN __env_cachesize_1_proc */
+void
+__env_cachesize_1_proc(dbenvcl_id, gbytes, bytes,
+ ncache, replyp)
+ long dbenvcl_id;
+ u_int32_t gbytes;
+ u_int32_t bytes;
+ u_int32_t ncache;
+ __env_cachesize_reply *replyp;
+/* END __env_cachesize_1_proc */
+{
+ int ret;
+ DB_ENV * dbenv;
+ ct_entry *dbenv_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+
+ ret = dbenv->set_cachesize(dbenv, gbytes, bytes, ncache);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_close_1_proc */
+void
+__env_close_1_proc(dbenvcl_id, flags, replyp)
+ long dbenvcl_id;
+ u_int32_t flags;
+ __env_close_reply *replyp;
+/* END __env_close_1_proc */
+{
+ replyp->status = __dbenv_close_int(dbenvcl_id, flags);
+ return;
+}
+
+/* BEGIN __env_create_1_proc */
+void
+__env_create_1_proc(timeout, replyp)
+ u_int32_t timeout;
+ __env_create_reply *replyp;
+/* END __env_create_1_proc */
+{
+ int ret;
+ DB_ENV *dbenv;
+ ct_entry *ctp;
+
+ ctp = new_ct_ent(&replyp->status);
+ if (ctp == NULL)
+ return;
+ if ((ret = db_env_create(&dbenv, 0)) == 0) {
+ ctp->ct_envp = dbenv;
+ ctp->ct_type = CT_ENV;
+ ctp->ct_parent = NULL;
+ ctp->ct_envparent = ctp;
+ __dbsrv_settimeout(ctp, timeout);
+ __dbsrv_active(ctp);
+ replyp->envcl_id = ctp->ct_id;
+ } else
+ __dbclear_ctp(ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_flags_1_proc */
+void
+__env_flags_1_proc(dbenvcl_id, flags, onoff, replyp)
+ long dbenvcl_id;
+ u_int32_t flags;
+ u_int32_t onoff;
+ __env_flags_reply *replyp;
+/* END __env_flags_1_proc */
+{
+ int ret;
+ DB_ENV * dbenv;
+ ct_entry *dbenv_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+
+ ret = dbenv->set_flags(dbenv, flags, onoff);
+
+ replyp->status = ret;
+ return;
+}
+/* BEGIN __env_open_1_proc */
+void
+__env_open_1_proc(dbenvcl_id, home, flags,
+ mode, replyp)
+ long dbenvcl_id;
+ char *home;
+ u_int32_t flags;
+ u_int32_t mode;
+ __env_open_reply *replyp;
+/* END __env_open_1_proc */
+{
+ int ret;
+ DB_ENV * dbenv;
+ ct_entry *dbenv_ctp;
+ char *fullhome;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+ fullhome = get_home(home);
+ if (fullhome == NULL) {
+ replyp->status = DB_NOSERVER_HOME;
+ return;
+ }
+
+ ret = dbenv->open(dbenv, fullhome, flags, mode);
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_remove_1_proc */
+void
+__env_remove_1_proc(dbenvcl_id, home, flags, replyp)
+ long dbenvcl_id;
+ char *home;
+ u_int32_t flags;
+ __env_remove_reply *replyp;
+/* END __env_remove_1_proc */
+{
+ int ret;
+ DB_ENV * dbenv;
+ ct_entry *dbenv_ctp;
+ char *fullhome;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+ fullhome = get_home(home);
+ if (fullhome == NULL) {
+ replyp->status = DB_NOSERVER_HOME;
+ return;
+ }
+
+ ret = dbenv->remove(dbenv, fullhome, flags);
+ __dbdel_ctp(dbenv_ctp);
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_abort_1_proc */
+void
+__txn_abort_1_proc(txnpcl_id, replyp)
+ long txnpcl_id;
+ __txn_abort_reply *replyp;
+/* END __txn_abort_1_proc */
+{
+ DB_TXN * txnp;
+ ct_entry *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+
+ ret = txn_abort(txnp);
+ __dbdel_ctp(txnp_ctp);
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_begin_1_proc */
+void
+__txn_begin_1_proc(envpcl_id, parentcl_id,
+ flags, replyp)
+ long envpcl_id;
+ long parentcl_id;
+ u_int32_t flags;
+ __txn_begin_reply *replyp;
+/* END __txn_begin_1_proc */
+{
+ int ret;
+ DB_ENV * envp;
+ ct_entry *envp_ctp;
+ DB_TXN * parent;
+ ct_entry *parent_ctp;
+ DB_TXN *txnp;
+ ct_entry *ctp;
+
+ ACTIVATE_CTP(envp_ctp, envpcl_id, CT_ENV);
+ envp = (DB_ENV *)envp_ctp->ct_anyp;
+ parent_ctp = NULL;
+
+ ctp = new_ct_ent(&replyp->status);
+ if (ctp == NULL)
+ return;
+
+ if (parentcl_id != 0) {
+ ACTIVATE_CTP(parent_ctp, parentcl_id, CT_TXN);
+ parent = (DB_TXN *)parent_ctp->ct_anyp;
+ ctp->ct_activep = parent_ctp->ct_activep;
+ } else
+ parent = NULL;
+
+ ret = txn_begin(envp, parent, &txnp, flags);
+ if (ret == 0) {
+ ctp->ct_txnp = txnp;
+ ctp->ct_type = CT_TXN;
+ ctp->ct_parent = parent_ctp;
+ ctp->ct_envparent = envp_ctp;
+ replyp->txnidcl_id = ctp->ct_id;
+ __dbsrv_settimeout(ctp, envp_ctp->ct_timeout);
+ __dbsrv_active(ctp);
+ } else
+ __dbclear_ctp(ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_commit_1_proc */
+void
+__txn_commit_1_proc(txnpcl_id, flags, replyp)
+ long txnpcl_id;
+ u_int32_t flags;
+ __txn_commit_reply *replyp;
+/* END __txn_commit_1_proc */
+{
+ int ret;
+ DB_TXN * txnp;
+ ct_entry *txnp_ctp;
+
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+
+ ret = txn_commit(txnp, flags);
+ __dbdel_ctp(txnp_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_bt_maxkey_1_proc */
+void
+__db_bt_maxkey_1_proc(dbpcl_id, maxkey, replyp)
+ long dbpcl_id;
+ u_int32_t maxkey;
+ __db_bt_maxkey_reply *replyp;
+/* END __db_bt_maxkey_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_bt_maxkey(dbp, maxkey);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_bt_minkey_1_proc */
+void
+__db_bt_minkey_1_proc(dbpcl_id, minkey, replyp)
+ long dbpcl_id;
+ u_int32_t minkey;
+ __db_bt_minkey_reply *replyp;
+/* END __db_bt_minkey_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_bt_minkey(dbp, minkey);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_close_1_proc */
+void
+__db_close_1_proc(dbpcl_id, flags, replyp)
+ long dbpcl_id;
+ u_int32_t flags;
+ __db_close_reply *replyp;
+/* END __db_close_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->close(dbp, flags);
+ __dbdel_ctp(dbp_ctp);
+
+ replyp-> status= ret;
+ return;
+}
+
+/* BEGIN __db_create_1_proc */
+void
+__db_create_1_proc(flags, envpcl_id, replyp)
+ u_int32_t flags;
+ long envpcl_id;
+ __db_create_reply *replyp;
+/* END __db_create_1_proc */
+{
+ int ret;
+ DB_ENV * envp;
+ DB *dbp;
+ ct_entry *envp_ctp, *dbp_ctp;
+
+ ACTIVATE_CTP(envp_ctp, envpcl_id, CT_ENV);
+ envp = (DB_ENV *)envp_ctp->ct_anyp;
+
+ dbp_ctp = new_ct_ent(&replyp->status);
+ if (dbp_ctp == NULL)
+ return ;
+ /*
+ * We actually require env's for databases. The client should
+ * have caught it, but just in case.
+ */
+ DB_ASSERT(envp != NULL);
+ if ((ret = db_create(&dbp, envp, flags)) == 0) {
+ dbp_ctp->ct_dbp = dbp;
+ dbp_ctp->ct_type = CT_DB;
+ dbp_ctp->ct_parent = envp_ctp;
+ dbp_ctp->ct_envparent = envp_ctp;
+ replyp->dbpcl_id = dbp_ctp->ct_id;
+ } else
+ __dbclear_ctp(dbp_ctp);
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_del_1_proc */
+void
+__db_del_1_proc(dbpcl_id, txnpcl_id, keydlen,
+ keydoff, keyflags, keydata, keysize,
+ flags, replyp)
+ long dbpcl_id;
+ long txnpcl_id;
+ u_int32_t keydlen;
+ u_int32_t keydoff;
+ u_int32_t keyflags;
+ void *keydata;
+ u_int32_t keysize;
+ u_int32_t flags;
+ __db_del_reply *replyp;
+/* END __db_del_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+ DB_TXN * txnp;
+ ct_entry *txnp_ctp;
+ DBT key;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ memset(&key, 0, sizeof(key));
+
+ /* Set up key DBT */
+ key.dlen = keydlen;
+ key.doff = keydoff;
+ key.flags = keyflags;
+ key.size = keysize;
+ key.data = keydata;
+
+ ret = dbp->del(dbp, txnp, &key, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_extentsize_1_proc */
+void
+__db_extentsize_1_proc(dbpcl_id, extentsize, replyp)
+ long dbpcl_id;
+ u_int32_t extentsize;
+ __db_extentsize_reply *replyp;
+/* END __db_extentsize_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_q_extentsize(dbp, extentsize);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_flags_1_proc */
+void
+__db_flags_1_proc(dbpcl_id, flags, replyp)
+ long dbpcl_id;
+ u_int32_t flags;
+ __db_flags_reply *replyp;
+/* END __db_flags_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_flags(dbp, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_get_1_proc */
+void
+__db_get_1_proc(dbpcl_id, txnpcl_id, keydlen,
+ keydoff, keyflags, keydata, keysize,
+ datadlen, datadoff, dataflags, datadata,
+ datasize, flags, replyp, freep)
+ long dbpcl_id;
+ long txnpcl_id;
+ u_int32_t keydlen;
+ u_int32_t keydoff;
+ u_int32_t keyflags;
+ void *keydata;
+ u_int32_t keysize;
+ u_int32_t datadlen;
+ u_int32_t datadoff;
+ u_int32_t dataflags;
+ void *datadata;
+ u_int32_t datasize;
+ u_int32_t flags;
+ __db_get_reply *replyp;
+ int * freep;
+/* END __db_get_1_proc */
+{
+ int key_alloc, ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+ DB_TXN * txnp;
+ ct_entry *txnp_ctp;
+ DBT key, data;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ *freep = 0;
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ /* Set up key and data DBT */
+ key.dlen = keydlen;
+ key.doff = keydoff;
+ /*
+ * Ignore memory related flags on server.
+ */
+ key.flags = DB_DBT_MALLOC;
+ if (keyflags & DB_DBT_PARTIAL)
+ key.flags |= DB_DBT_PARTIAL;
+ key.size = keysize;
+ key.data = keydata;
+
+ data.dlen = datadlen;
+ data.doff = datadoff;
+ /*
+ * Ignore memory related flags on server.
+ */
+ data.flags = DB_DBT_MALLOC;
+ if (dataflags & DB_DBT_PARTIAL)
+ data.flags |= DB_DBT_PARTIAL;
+ data.size = datasize;
+ data.data = datadata;
+
+ /* Got all our stuff, now do the get */
+ ret = dbp->get(dbp, txnp, &key, &data, flags);
+ /*
+ * Otherwise just status.
+ */
+ if (ret == 0) {
+ /*
+ * XXX
+ * We need to xdr_free whatever we are returning, next time.
+ * However, DB does not allocate a new key if one was given
+ * and we'd be free'ing up space allocated in the request.
+ * So, allocate a new key/data pointer if it is the same one
+ * as in the request.
+ */
+ *freep = 1;
+ /*
+ * Key
+ */
+ key_alloc = 0;
+ if (key.data == keydata) {
+ ret = __os_malloc(dbp->dbenv,
+ key.size, NULL, &replyp->keydata.keydata_val);
+ if (ret != 0) {
+ __os_free(key.data, key.size);
+ __os_free(data.data, data.size);
+ goto err;
+ }
+ key_alloc = 1;
+ memcpy(replyp->keydata.keydata_val, key.data, key.size);
+ } else
+ replyp->keydata.keydata_val = key.data;
+
+ replyp->keydata.keydata_len = key.size;
+
+ /*
+ * Data
+ */
+ if (data.data == datadata) {
+ ret = __os_malloc(dbp->dbenv,
+ data.size, NULL, &replyp->datadata.datadata_val);
+ if (ret != 0) {
+ __os_free(key.data, key.size);
+ __os_free(data.data, data.size);
+ if (key_alloc)
+ __os_free(replyp->keydata.keydata_val,
+ key.size);
+ goto err;
+ }
+ memcpy(replyp->datadata.datadata_val, data.data,
+ data.size);
+ } else
+ replyp->datadata.datadata_val = data.data;
+ replyp->datadata.datadata_len = data.size;
+ } else {
+err: replyp->keydata.keydata_val = NULL;
+ replyp->keydata.keydata_len = 0;
+ replyp->datadata.datadata_val = NULL;
+ replyp->datadata.datadata_len = 0;
+ *freep = 0;
+ }
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_h_ffactor_1_proc */
+void
+__db_h_ffactor_1_proc(dbpcl_id, ffactor, replyp)
+ long dbpcl_id;
+ u_int32_t ffactor;
+ __db_h_ffactor_reply *replyp;
+/* END __db_h_ffactor_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_h_ffactor(dbp, ffactor);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_h_nelem_1_proc */
+void
+__db_h_nelem_1_proc(dbpcl_id, nelem, replyp)
+ long dbpcl_id;
+ u_int32_t nelem;
+ __db_h_nelem_reply *replyp;
+/* END __db_h_nelem_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_h_nelem(dbp, nelem);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_key_range_1_proc */
+void
+__db_key_range_1_proc(dbpcl_id, txnpcl_id, keydlen,
+ keydoff, keyflags, keydata, keysize,
+ flags, replyp)
+ long dbpcl_id;
+ long txnpcl_id;
+ u_int32_t keydlen;
+ u_int32_t keydoff;
+ u_int32_t keyflags;
+ void *keydata;
+ u_int32_t keysize;
+ u_int32_t flags;
+ __db_key_range_reply *replyp;
+/* END __db_key_range_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+ DB_TXN * txnp;
+ ct_entry *txnp_ctp;
+ DBT key;
+ DB_KEY_RANGE range;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ memset(&key, 0, sizeof(key));
+ /* Set up key and data DBT */
+ key.dlen = keydlen;
+ key.doff = keydoff;
+ key.size = keysize;
+ key.data = keydata;
+ key.flags = keyflags;
+
+ ret = dbp->key_range(dbp, txnp, &key, &range, flags);
+
+ replyp->status = ret;
+ replyp->less = range.less;
+ replyp->equal = range.equal;
+ replyp->greater = range.greater;
+ return;
+}
+
+/* BEGIN __db_lorder_1_proc */
+void
+__db_lorder_1_proc(dbpcl_id, lorder, replyp)
+ long dbpcl_id;
+ u_int32_t lorder;
+ __db_lorder_reply *replyp;
+/* END __db_lorder_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_lorder(dbp, lorder);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbopen_1_proc */
+void
+__db_open_1_proc(dbpcl_id, name, subdb,
+ type, flags, mode, replyp)
+ long dbpcl_id;
+ char *name;
+ char *subdb;
+ u_int32_t type;
+ u_int32_t flags;
+ u_int32_t mode;
+ __db_open_reply *replyp;
+/* END __db_open_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->open(dbp, name, subdb, (DBTYPE)type, flags, mode);
+ if (ret == 0) {
+ replyp->type = (int) dbp->get_type(dbp);
+ /* XXX
+ * Tcl needs to peek at dbp->flags for DB_AM_DUP. Send
+ * this dbp's flags back.
+ */
+ replyp->dbflags = (int) dbp->flags;
+ }
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_pagesize_1_proc */
+void
+__db_pagesize_1_proc(dbpcl_id, pagesize, replyp)
+ long dbpcl_id;
+ u_int32_t pagesize;
+ __db_pagesize_reply *replyp;
+/* END __db_pagesize_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_pagesize(dbp, pagesize);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_put_1_proc */
+void
+__db_put_1_proc(dbpcl_id, txnpcl_id, keydlen,
+ keydoff, keyflags, keydata, keysize,
+ datadlen, datadoff, dataflags, datadata,
+ datasize, flags, replyp, freep)
+ long dbpcl_id;
+ long txnpcl_id;
+ u_int32_t keydlen;
+ u_int32_t keydoff;
+ u_int32_t keyflags;
+ void *keydata;
+ u_int32_t keysize;
+ u_int32_t datadlen;
+ u_int32_t datadoff;
+ u_int32_t dataflags;
+ void *datadata;
+ u_int32_t datasize;
+ u_int32_t flags;
+ __db_put_reply *replyp;
+ int * freep;
+/* END __db_put_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+ DB_TXN * txnp;
+ ct_entry *txnp_ctp;
+ DBT key, data;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ *freep = 0;
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ /* Set up key and data DBT */
+ key.dlen = keydlen;
+ key.doff = keydoff;
+ /*
+ * Ignore memory related flags on server.
+ */
+ key.flags = DB_DBT_MALLOC;
+ if (keyflags & DB_DBT_PARTIAL)
+ key.flags |= DB_DBT_PARTIAL;
+ key.size = keysize;
+ key.data = keydata;
+
+ data.dlen = datadlen;
+ data.doff = datadoff;
+ data.flags = dataflags;
+ data.size = datasize;
+ data.data = datadata;
+
+ /* Got all our stuff, now do the put */
+ ret = dbp->put(dbp, txnp, &key, &data, flags);
+ /*
+ * If the client did a DB_APPEND, set up key in reply.
+ * Otherwise just status.
+ */
+ if (ret == 0 && (flags == DB_APPEND)) {
+ /*
+ * XXX
+ * We need to xdr_free whatever we are returning, next time.
+ * However, DB does not allocate a new key if one was given
+ * and we'd be free'ing up space allocated in the request.
+ * So, allocate a new key/data pointer if it is the same one
+ * as in the request.
+ */
+ *freep = 1;
+ /*
+ * Key
+ */
+ if (key.data == keydata) {
+ ret = __os_malloc(dbp->dbenv,
+ key.size, NULL, &replyp->keydata.keydata_val);
+ if (ret != 0) {
+ __os_free(key.data, key.size);
+ goto err;
+ }
+ memcpy(replyp->keydata.keydata_val, key.data, key.size);
+ } else
+ replyp->keydata.keydata_val = key.data;
+
+ replyp->keydata.keydata_len = key.size;
+ } else {
+err: replyp->keydata.keydata_val = NULL;
+ replyp->keydata.keydata_len = 0;
+ *freep = 0;
+ }
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_re_delim_1_proc */
+void
+__db_re_delim_1_proc(dbpcl_id, delim, replyp)
+ long dbpcl_id;
+ u_int32_t delim;
+ __db_re_delim_reply *replyp;
+/* END __db_re_delim_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_re_delim(dbp, delim);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_re_len_1_proc */
+void
+__db_re_len_1_proc(dbpcl_id, len, replyp)
+ long dbpcl_id;
+ u_int32_t len;
+ __db_re_len_reply *replyp;
+/* END __db_re_len_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_re_len(dbp, len);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_re_pad_1_proc */
+void
+__db_re_pad_1_proc(dbpcl_id, pad, replyp)
+ long dbpcl_id;
+ u_int32_t pad;
+ __db_re_pad_reply *replyp;
+/* END __db_re_pad_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_re_pad(dbp, pad);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_remove_1_proc */
+void
+__db_remove_1_proc(dbpcl_id, name, subdb,
+ flags, replyp)
+ long dbpcl_id;
+ char *name;
+ char *subdb;
+ u_int32_t flags;
+ __db_remove_reply *replyp;
+/* END __db_remove_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->remove(dbp, name, subdb, flags);
+ __dbdel_ctp(dbp_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_rename_1_proc */
+void
+__db_rename_1_proc(dbpcl_id, name, subdb,
+ newname, flags, replyp)
+ long dbpcl_id;
+ char *name;
+ char *subdb;
+ char *newname;
+ u_int32_t flags;
+ __db_rename_reply *replyp;
+/* END __db_rename_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->rename(dbp, name, subdb, newname, flags);
+ __dbdel_ctp(dbp_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_stat_1_proc */
+void
+__db_stat_1_proc(dbpcl_id,
+ flags, replyp, freep)
+ long dbpcl_id;
+ u_int32_t flags;
+ __db_stat_reply *replyp;
+ int * freep;
+/* END __db_stat_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+ DBTYPE type;
+ void *sp;
+ int len;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->stat(dbp, &sp, NULL, flags);
+ replyp->status = ret;
+ if (ret != 0)
+ return;
+ /*
+ * We get here, we have success. Allocate an array so that
+ * we can use the list generator. Generate the reply, free
+ * up the space.
+ */
+ /*
+ * XXX This assumes that all elements of all stat structures
+ * are u_int32_t fields. They are, currently.
+ */
+ type = dbp->get_type(dbp);
+ if (type == DB_HASH)
+ len = sizeof(DB_HASH_STAT) / sizeof(u_int32_t);
+ else if (type == DB_QUEUE)
+ len = sizeof(DB_QUEUE_STAT) / sizeof(u_int32_t);
+ else /* BTREE or RECNO are same stats */
+ len = sizeof(DB_BTREE_STAT) / sizeof(u_int32_t);
+ /*
+ * Set up our list of stats.
+ */
+ ret = __db_stats_list(dbp->dbenv,
+ &replyp->statslist, (u_int32_t*)sp, len);
+
+ __os_free(sp, 0);
+ if (ret == 0)
+ *freep = 1;
+ replyp->status = ret;
+ return;
+}
+
+int
+__db_stats_list(dbenv, locp, pp, len)
+ DB_ENV *dbenv;
+ __db_stat_statsreplist **locp;
+ u_int32_t *pp;
+ int len;
+{
+ u_int32_t *p, *q;
+ int i, ret;
+ __db_stat_statsreplist *nl, **nlp;
+
+ nlp = locp;
+ for (i = 0; i < len; i++) {
+ p = pp+i;
+ if ((ret = __os_malloc(dbenv, sizeof(*nl), NULL, nlp)) != 0)
+ goto out;
+ nl = *nlp;
+ nl->next = NULL;
+ if ((ret = __os_malloc(dbenv,
+ sizeof(u_int32_t), NULL, &nl->ent.ent_val)) != 0)
+ goto out;
+ q = (u_int32_t *)nl->ent.ent_val;
+ *q = *p;
+ nl->ent.ent_len = sizeof(u_int32_t);
+ nlp = &nl->next;
+ }
+ return (0);
+out:
+ __db_stats_freelist(locp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: void __db_stats_freelist __P((__db_stat_statsreplist **));
+ */
+void
+__db_stats_freelist(locp)
+ __db_stat_statsreplist **locp;
+{
+ __db_stat_statsreplist *nl, *nl1;
+
+ for (nl = *locp; nl != NULL; nl = nl1) {
+ nl1 = nl->next;
+ if (nl->ent.ent_val)
+ __os_free(nl->ent.ent_val, nl->ent.ent_len);
+ __os_free(nl, sizeof(*nl));
+ }
+ *locp = NULL;
+}
+
+/* BEGIN __db_swapped_1_proc */
+void
+__db_swapped_1_proc(dbpcl_id, replyp)
+ long dbpcl_id;
+ __db_swapped_reply *replyp;
+/* END __db_swapped_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->get_byteswapped(dbp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_sync_1_proc */
+void
+__db_sync_1_proc(dbpcl_id, flags, replyp)
+ long dbpcl_id;
+ u_int32_t flags;
+ __db_sync_reply *replyp;
+/* END __db_sync_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->sync(dbp, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_cursor_1_proc */
+void
+__db_cursor_1_proc(dbpcl_id, txnpcl_id,
+ flags, replyp)
+ long dbpcl_id;
+ long txnpcl_id;
+ u_int32_t flags;
+ __db_cursor_reply *replyp;
+/* END __db_cursor_1_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+ DB_TXN * txnp;
+ ct_entry *txnp_ctp;
+ DBC *dbc;
+ ct_entry *dbc_ctp, *env_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ dbc_ctp = new_ct_ent(&replyp->status);
+ if (dbc_ctp == NULL)
+ return;
+
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+ dbc_ctp->ct_activep = txnp_ctp->ct_activep;
+ } else
+ txnp = NULL;
+
+ if ((ret = dbp->cursor(dbp, txnp, &dbc, flags)) == 0) {
+ dbc_ctp->ct_dbc = dbc;
+ dbc_ctp->ct_type = CT_CURSOR;
+ dbc_ctp->ct_parent = dbp_ctp;
+ env_ctp = dbp_ctp->ct_envparent;
+ dbc_ctp->ct_envparent = env_ctp;
+ __dbsrv_settimeout(dbc_ctp, env_ctp->ct_timeout);
+ __dbsrv_active(dbc_ctp);
+ replyp->dbcidcl_id = dbc_ctp->ct_id;
+ } else
+ __dbclear_ctp(dbc_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_join_1_proc */
+void
+__db_join_1_proc(dbpcl_id, curslist,
+ flags, replyp)
+ long dbpcl_id;
+ u_int32_t * curslist;
+ u_int32_t flags;
+ __db_join_reply *replyp;
+/* END __db_join_1_proc */
+{
+ DB * dbp;
+ ct_entry *dbp_ctp;
+ DBC *dbc;
+ DBC **jcurs, **c;
+ ct_entry *dbc_ctp, *ctp;
+ size_t size;
+ int ret;
+ u_int32_t *cl;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ dbc_ctp = new_ct_ent(&replyp->status);
+ if (dbc_ctp == NULL)
+ return;
+
+ for (size = sizeof(DBC *), cl = curslist; *cl != 0; size += sizeof(DBC *), cl++)
+ ;
+ if ((ret = __os_malloc(dbp->dbenv, size, NULL, &jcurs)) != 0) {
+ replyp->status = ret;
+ __dbclear_ctp(dbc_ctp);
+ return;
+ }
+ /*
+ * If our curslist has a parent txn, we need to use it too
+ * for the activity timeout. All cursors must be part of
+ * the same transaction, so just check the first.
+ */
+ ctp = get_tableent(*curslist);
+ DB_ASSERT(ctp->ct_type == CT_CURSOR);
+ /*
+ * If we are using a transaction, set the join activity timer
+ * to point to the parent transaction.
+ */
+ if (ctp->ct_activep != &ctp->ct_active)
+ dbc_ctp->ct_activep = ctp->ct_activep;
+ for (cl = curslist, c = jcurs; *cl != 0; cl++, c++) {
+ ctp = get_tableent(*cl);
+ if (ctp == NULL) {
+ replyp->status = DB_NOSERVER_ID;
+ goto out;
+ }
+ /*
+ * If we are using a txn, the join cursor points to the
+ * transaction timeout. If we are not using a transaction,
+ * then all the curslist cursors must point to the join
+ * cursor's timeout so that we do not timeout any of the
+ * curlist cursors while the join cursor is active.
+ * Change the type of the curslist ctps to CT_JOIN so that
+ * we know they are part of a join list and we can distinguish
+ * them and later restore them when the join cursor is closed.
+ */
+ DB_ASSERT(ctp->ct_type == CT_CURSOR);
+ ctp->ct_type |= CT_JOIN;
+ ctp->ct_origp = ctp->ct_activep;
+ /*
+ * Setting this to the ct_active field of the dbc_ctp is
+ * really just a way to distinguish which join dbc this
+ * cursor is part of. The ct_activep of this cursor is
+ * not used at all during its lifetime as part of a join
+ * cursor.
+ */
+ ctp->ct_activep = &dbc_ctp->ct_active;
+ *c = ctp->ct_dbc;
+ }
+ *c = NULL;
+ if ((ret = dbp->join(dbp, jcurs, &dbc, flags)) == 0) {
+ dbc_ctp->ct_dbc = dbc;
+ dbc_ctp->ct_type = (CT_JOINCUR | CT_CURSOR);
+ dbc_ctp->ct_parent = dbp_ctp;
+ dbc_ctp->ct_envparent = dbp_ctp->ct_envparent;
+ __dbsrv_settimeout(dbc_ctp, dbp_ctp->ct_envparent->ct_timeout);
+ __dbsrv_active(dbc_ctp);
+ replyp->dbcidcl_id = dbc_ctp->ct_id;
+ } else {
+ __dbclear_ctp(dbc_ctp);
+ /*
+ * If we get an error, undo what we did above to any cursors.
+ */
+ for (cl = curslist; *cl != 0; cl++) {
+ ctp = get_tableent(*cl);
+ ctp->ct_type = CT_CURSOR;
+ ctp->ct_activep = ctp->ct_origp;
+ }
+ }
+
+ replyp->status = ret;
+out:
+ __os_free(jcurs, size);
+ return;
+}
+
+/* BEGIN __dbc_close_1_proc */
+void
+__dbc_close_1_proc(dbccl_id, replyp)
+ long dbccl_id;
+ __dbc_close_reply *replyp;
+/* END __dbc_close_1_proc */
+{
+ ct_entry *dbc_ctp;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ replyp->status = __dbc_close_int(dbc_ctp);
+ return;
+}
+
+/* BEGIN __dbc_count_1_proc */
+void
+__dbc_count_1_proc(dbccl_id, flags, replyp)
+ long dbccl_id;
+ u_int32_t flags;
+ __dbc_count_reply *replyp;
+/* END __dbc_count_1_proc */
+{
+ int ret;
+ DBC * dbc;
+ ct_entry *dbc_ctp;
+ db_recno_t num;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (DBC *)dbc_ctp->ct_anyp;
+
+ ret = dbc->c_count(dbc, &num, flags);
+ replyp->status = ret;
+ if (ret == 0)
+ replyp->dupcount = num;
+ return;
+}
+
+/* BEGIN __dbc_del_1_proc */
+void
+__dbc_del_1_proc(dbccl_id, flags, replyp)
+ long dbccl_id;
+ u_int32_t flags;
+ __dbc_del_reply *replyp;
+/* END __dbc_del_1_proc */
+{
+ int ret;
+ DBC * dbc;
+ ct_entry *dbc_ctp;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (DBC *)dbc_ctp->ct_anyp;
+
+ ret = dbc->c_del(dbc, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_dup_1_proc */
+void
+__dbc_dup_1_proc(dbccl_id, flags, replyp)
+ long dbccl_id;
+ u_int32_t flags;
+ __dbc_dup_reply *replyp;
+/* END __dbc_dup_1_proc */
+{
+ int ret;
+ DBC * dbc;
+ ct_entry *dbc_ctp;
+ DBC *newdbc;
+ ct_entry *new_ctp;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (DBC *)dbc_ctp->ct_anyp;
+
+ new_ctp = new_ct_ent(&replyp->status);
+ if (new_ctp == NULL)
+ return;
+
+ if ((ret = dbc->c_dup(dbc, &newdbc, flags)) == 0) {
+ new_ctp->ct_dbc = newdbc;
+ new_ctp->ct_type = CT_CURSOR;
+ new_ctp->ct_parent = dbc_ctp->ct_parent;
+ new_ctp->ct_envparent = dbc_ctp->ct_envparent;
+ /*
+ * If our cursor has a parent txn, we need to use it too.
+ */
+ if (dbc_ctp->ct_activep != &dbc_ctp->ct_active)
+ new_ctp->ct_activep = dbc_ctp->ct_activep;
+ __dbsrv_settimeout(new_ctp, dbc_ctp->ct_timeout);
+ __dbsrv_active(new_ctp);
+ replyp->dbcidcl_id = new_ctp->ct_id;
+ } else
+ __dbclear_ctp(new_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_get_1_proc */
+void
+__dbc_get_1_proc(dbccl_id, keydlen, keydoff,
+ keyflags, keydata, keysize, datadlen,
+ datadoff, dataflags, datadata, datasize,
+ flags, replyp, freep)
+ long dbccl_id;
+ u_int32_t keydlen;
+ u_int32_t keydoff;
+ u_int32_t keyflags;
+ void *keydata;
+ u_int32_t keysize;
+ u_int32_t datadlen;
+ u_int32_t datadoff;
+ u_int32_t dataflags;
+ void *datadata;
+ u_int32_t datasize;
+ u_int32_t flags;
+ __dbc_get_reply *replyp;
+ int * freep;
+/* END __dbc_get_1_proc */
+{
+ DB_ENV *dbenv;
+ DBC *dbc;
+ DBT key, data;
+ ct_entry *dbc_ctp;
+ int key_alloc, ret;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (DBC *)dbc_ctp->ct_anyp;
+ dbenv = dbc->dbp->dbenv;
+
+ *freep = 0;
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ /* Set up key and data DBT */
+ key.dlen = keydlen;
+ key.doff = keydoff;
+ /*
+ * Ignore memory related flags on server.
+ */
+ key.flags = DB_DBT_MALLOC;
+ if (keyflags & DB_DBT_PARTIAL)
+ key.flags |= DB_DBT_PARTIAL;
+ key.size = keysize;
+ key.data = keydata;
+
+ data.dlen = datadlen;
+ data.doff = datadoff;
+ data.flags = DB_DBT_MALLOC;
+ if (dataflags & DB_DBT_PARTIAL)
+ data.flags |= DB_DBT_PARTIAL;
+ data.size = datasize;
+ data.data = datadata;
+
+ /* Got all our stuff, now do the get */
+ ret = dbc->c_get(dbc, &key, &data, flags);
+
+ /*
+ * Otherwise just status.
+ */
+ if (ret == 0) {
+ /*
+ * XXX
+ * We need to xdr_free whatever we are returning, next time.
+ * However, DB does not allocate a new key if one was given
+ * and we'd be free'ing up space allocated in the request.
+ * So, allocate a new key/data pointer if it is the same one
+ * as in the request.
+ */
+ *freep = 1;
+ /*
+ * Key
+ */
+ key_alloc = 0;
+ if (key.data == keydata) {
+ ret = __os_malloc(dbenv, key.size, NULL,
+ &replyp->keydata.keydata_val);
+ if (ret != 0) {
+ __os_free(key.data, key.size);
+ __os_free(data.data, data.size);
+ goto err;
+ }
+ key_alloc = 1;
+ memcpy(replyp->keydata.keydata_val, key.data, key.size);
+ } else
+ replyp->keydata.keydata_val = key.data;
+
+ replyp->keydata.keydata_len = key.size;
+
+ /*
+ * Data
+ */
+ if (data.data == datadata) {
+ ret = __os_malloc(dbenv, data.size, NULL,
+ &replyp->datadata.datadata_val);
+ if (ret != 0) {
+ __os_free(key.data, key.size);
+ __os_free(data.data, data.size);
+ if (key_alloc)
+ __os_free(replyp->keydata.keydata_val,
+ key.size);
+ goto err;
+ }
+ memcpy(replyp->datadata.datadata_val, data.data,
+ data.size);
+ } else
+ replyp->datadata.datadata_val = data.data;
+ replyp->datadata.datadata_len = data.size;
+ } else {
+err: replyp->keydata.keydata_val = NULL;
+ replyp->keydata.keydata_len = 0;
+ replyp->datadata.datadata_val = NULL;
+ replyp->datadata.datadata_len = 0;
+ *freep = 0;
+ }
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_put_1_proc */
+void
+__dbc_put_1_proc(dbccl_id, keydlen, keydoff,
+ keyflags, keydata, keysize, datadlen,
+ datadoff, dataflags, datadata, datasize,
+ flags, replyp, freep)
+ long dbccl_id;
+ u_int32_t keydlen;
+ u_int32_t keydoff;
+ u_int32_t keyflags;
+ void *keydata;
+ u_int32_t keysize;
+ u_int32_t datadlen;
+ u_int32_t datadoff;
+ u_int32_t dataflags;
+ void *datadata;
+ u_int32_t datasize;
+ u_int32_t flags;
+ __dbc_put_reply *replyp;
+ int * freep;
+/* END __dbc_put_1_proc */
+{
+ int ret;
+ DBC * dbc;
+ DB *dbp;
+ ct_entry *dbc_ctp;
+ DBT key, data;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (DBC *)dbc_ctp->ct_anyp;
+ dbp = (DB *)dbc_ctp->ct_parent->ct_anyp;
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ /* Set up key and data DBT */
+ key.dlen = keydlen;
+ key.doff = keydoff;
+ /*
+ * Ignore memory related flags on server.
+ */
+ key.flags = 0;
+ if (keyflags & DB_DBT_PARTIAL)
+ key.flags |= DB_DBT_PARTIAL;
+ key.size = keysize;
+ key.data = keydata;
+
+ data.dlen = datadlen;
+ data.doff = datadoff;
+ data.flags = dataflags;
+ data.size = datasize;
+ data.data = datadata;
+
+ /* Got all our stuff, now do the put */
+ ret = dbc->c_put(dbc, &key, &data, flags);
+
+ *freep = 0;
+ if (ret == 0 && (flags == DB_AFTER || flags == DB_BEFORE) &&
+ dbp->type == DB_RECNO) {
+ /*
+ * We need to xdr_free whatever we are returning, next time.
+ */
+ replyp->keydata.keydata_val = key.data;
+ replyp->keydata.keydata_len = key.size;
+ } else {
+ replyp->keydata.keydata_val = NULL;
+ replyp->keydata.keydata_len = 0;
+ }
+ replyp->status = ret;
+ return;
+}
+#endif /* HAVE_RPC */
diff --git a/bdb/rpc_server/db_server_proc.sed b/bdb/rpc_server/db_server_proc.sed
new file mode 100644
index 00000000000..7266275b2b6
--- /dev/null
+++ b/bdb/rpc_server/db_server_proc.sed
@@ -0,0 +1,418 @@
+/^\/\* BEGIN __env_cachesize_1_proc/,/^\/\* END __env_cachesize_1_proc/c\
+/* BEGIN __env_cachesize_1_proc */\
+void\
+__env_cachesize_1_proc(dbenvcl_id, gbytes, bytes,\
+\ \ ncache, replyp)\
+\ long dbenvcl_id;\
+\ u_int32_t gbytes;\
+\ u_int32_t bytes;\
+\ u_int32_t ncache;\
+\ __env_cachesize_reply *replyp;\
+/* END __env_cachesize_1_proc */
+/^\/\* BEGIN __env_close_1_proc/,/^\/\* END __env_close_1_proc/c\
+/* BEGIN __env_close_1_proc */\
+void\
+__env_close_1_proc(dbenvcl_id, flags, replyp)\
+\ long dbenvcl_id;\
+\ u_int32_t flags;\
+\ __env_close_reply *replyp;\
+/* END __env_close_1_proc */
+/^\/\* BEGIN __env_create_1_proc/,/^\/\* END __env_create_1_proc/c\
+/* BEGIN __env_create_1_proc */\
+void\
+__env_create_1_proc(timeout, replyp)\
+\ u_int32_t timeout;\
+\ __env_create_reply *replyp;\
+/* END __env_create_1_proc */
+/^\/\* BEGIN __env_flags_1_proc/,/^\/\* END __env_flags_1_proc/c\
+/* BEGIN __env_flags_1_proc */\
+void\
+__env_flags_1_proc(dbenvcl_id, flags, onoff, replyp)\
+\ long dbenvcl_id;\
+\ u_int32_t flags;\
+\ u_int32_t onoff;\
+\ __env_flags_reply *replyp;\
+/* END __env_flags_1_proc */
+/^\/\* BEGIN __env_open_1_proc/,/^\/\* END __env_open_1_proc/c\
+/* BEGIN __env_open_1_proc */\
+void\
+__env_open_1_proc(dbenvcl_id, home, flags,\
+\ \ mode, replyp)\
+\ long dbenvcl_id;\
+\ char *home;\
+\ u_int32_t flags;\
+\ u_int32_t mode;\
+\ __env_open_reply *replyp;\
+/* END __env_open_1_proc */
+/^\/\* BEGIN __env_remove_1_proc/,/^\/\* END __env_remove_1_proc/c\
+/* BEGIN __env_remove_1_proc */\
+void\
+__env_remove_1_proc(dbenvcl_id, home, flags, replyp)\
+\ long dbenvcl_id;\
+\ char *home;\
+\ u_int32_t flags;\
+\ __env_remove_reply *replyp;\
+/* END __env_remove_1_proc */
+/^\/\* BEGIN __txn_abort_1_proc/,/^\/\* END __txn_abort_1_proc/c\
+/* BEGIN __txn_abort_1_proc */\
+void\
+__txn_abort_1_proc(txnpcl_id, replyp)\
+\ long txnpcl_id;\
+\ __txn_abort_reply *replyp;\
+/* END __txn_abort_1_proc */
+/^\/\* BEGIN __txn_begin_1_proc/,/^\/\* END __txn_begin_1_proc/c\
+/* BEGIN __txn_begin_1_proc */\
+void\
+__txn_begin_1_proc(envpcl_id, parentcl_id,\
+\ \ flags, replyp)\
+\ long envpcl_id;\
+\ long parentcl_id;\
+\ u_int32_t flags;\
+\ __txn_begin_reply *replyp;\
+/* END __txn_begin_1_proc */
+/^\/\* BEGIN __txn_commit_1_proc/,/^\/\* END __txn_commit_1_proc/c\
+/* BEGIN __txn_commit_1_proc */\
+void\
+__txn_commit_1_proc(txnpcl_id, flags, replyp)\
+\ long txnpcl_id;\
+\ u_int32_t flags;\
+\ __txn_commit_reply *replyp;\
+/* END __txn_commit_1_proc */
+/^\/\* BEGIN __db_bt_maxkey_1_proc/,/^\/\* END __db_bt_maxkey_1_proc/c\
+/* BEGIN __db_bt_maxkey_1_proc */\
+void\
+__db_bt_maxkey_1_proc(dbpcl_id, maxkey, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t maxkey;\
+\ __db_bt_maxkey_reply *replyp;\
+/* END __db_bt_maxkey_1_proc */
+/^\/\* BEGIN __db_bt_minkey_1_proc/,/^\/\* END __db_bt_minkey_1_proc/c\
+/* BEGIN __db_bt_minkey_1_proc */\
+void\
+__db_bt_minkey_1_proc(dbpcl_id, minkey, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t minkey;\
+\ __db_bt_minkey_reply *replyp;\
+/* END __db_bt_minkey_1_proc */
+/^\/\* BEGIN __db_close_1_proc/,/^\/\* END __db_close_1_proc/c\
+/* BEGIN __db_close_1_proc */\
+void\
+__db_close_1_proc(dbpcl_id, flags, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t flags;\
+\ __db_close_reply *replyp;\
+/* END __db_close_1_proc */
+/^\/\* BEGIN __db_create_1_proc/,/^\/\* END __db_create_1_proc/c\
+/* BEGIN __db_create_1_proc */\
+void\
+__db_create_1_proc(flags, envpcl_id, replyp)\
+\ u_int32_t flags;\
+\ long envpcl_id;\
+\ __db_create_reply *replyp;\
+/* END __db_create_1_proc */
+/^\/\* BEGIN __db_del_1_proc/,/^\/\* END __db_del_1_proc/c\
+/* BEGIN __db_del_1_proc */\
+void\
+__db_del_1_proc(dbpcl_id, txnpcl_id, keydlen,\
+\ \ keydoff, keyflags, keydata, keysize,\
+\ \ flags, replyp)\
+\ long dbpcl_id;\
+\ long txnpcl_id;\
+\ u_int32_t keydlen;\
+\ u_int32_t keydoff;\
+\ u_int32_t keyflags;\
+\ void *keydata;\
+\ u_int32_t keysize;\
+\ u_int32_t flags;\
+\ __db_del_reply *replyp;\
+/* END __db_del_1_proc */
+/^\/\* BEGIN __db_extentsize_1_proc/,/^\/\* END __db_extentsize_1_proc/c\
+/* BEGIN __db_extentsize_1_proc */\
+void\
+__db_extentsize_1_proc(dbpcl_id, extentsize, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t extentsize;\
+\ __db_extentsize_reply *replyp;\
+/* END __db_extentsize_1_proc */
+/^\/\* BEGIN __db_flags_1_proc/,/^\/\* END __db_flags_1_proc/c\
+/* BEGIN __db_flags_1_proc */\
+void\
+__db_flags_1_proc(dbpcl_id, flags, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t flags;\
+\ __db_flags_reply *replyp;\
+/* END __db_flags_1_proc */
+/^\/\* BEGIN __db_get_1_proc/,/^\/\* END __db_get_1_proc/c\
+/* BEGIN __db_get_1_proc */\
+void\
+__db_get_1_proc(dbpcl_id, txnpcl_id, keydlen,\
+\ \ keydoff, keyflags, keydata, keysize,\
+\ \ datadlen, datadoff, dataflags, datadata,\
+\ \ datasize, flags, replyp, freep)\
+\ long dbpcl_id;\
+\ long txnpcl_id;\
+\ u_int32_t keydlen;\
+\ u_int32_t keydoff;\
+\ u_int32_t keyflags;\
+\ void *keydata;\
+\ u_int32_t keysize;\
+\ u_int32_t datadlen;\
+\ u_int32_t datadoff;\
+\ u_int32_t dataflags;\
+\ void *datadata;\
+\ u_int32_t datasize;\
+\ u_int32_t flags;\
+\ __db_get_reply *replyp;\
+\ int * freep;\
+/* END __db_get_1_proc */
+/^\/\* BEGIN __db_h_ffactor_1_proc/,/^\/\* END __db_h_ffactor_1_proc/c\
+/* BEGIN __db_h_ffactor_1_proc */\
+void\
+__db_h_ffactor_1_proc(dbpcl_id, ffactor, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t ffactor;\
+\ __db_h_ffactor_reply *replyp;\
+/* END __db_h_ffactor_1_proc */
+/^\/\* BEGIN __db_h_nelem_1_proc/,/^\/\* END __db_h_nelem_1_proc/c\
+/* BEGIN __db_h_nelem_1_proc */\
+void\
+__db_h_nelem_1_proc(dbpcl_id, nelem, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t nelem;\
+\ __db_h_nelem_reply *replyp;\
+/* END __db_h_nelem_1_proc */
+/^\/\* BEGIN __db_key_range_1_proc/,/^\/\* END __db_key_range_1_proc/c\
+/* BEGIN __db_key_range_1_proc */\
+void\
+__db_key_range_1_proc(dbpcl_id, txnpcl_id, keydlen,\
+\ \ keydoff, keyflags, keydata, keysize,\
+\ \ flags, replyp)\
+\ long dbpcl_id;\
+\ long txnpcl_id;\
+\ u_int32_t keydlen;\
+\ u_int32_t keydoff;\
+\ u_int32_t keyflags;\
+\ void *keydata;\
+\ u_int32_t keysize;\
+\ u_int32_t flags;\
+\ __db_key_range_reply *replyp;\
+/* END __db_key_range_1_proc */
+/^\/\* BEGIN __db_lorder_1_proc/,/^\/\* END __db_lorder_1_proc/c\
+/* BEGIN __db_lorder_1_proc */\
+void\
+__db_lorder_1_proc(dbpcl_id, lorder, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t lorder;\
+\ __db_lorder_reply *replyp;\
+/* END __db_lorder_1_proc */
+/^\/\* BEGIN __db_open_1_proc/,/^\/\* END __db_open_1_proc/c\
+/* BEGIN __db_open_1_proc */\
+void\
+__db_open_1_proc(dbpcl_id, name, subdb,\
+\ \ type, flags, mode, replyp)\
+\ long dbpcl_id;\
+\ char *name;\
+\ char *subdb;\
+\ u_int32_t type;\
+\ u_int32_t flags;\
+\ u_int32_t mode;\
+\ __db_open_reply *replyp;\
+/* END __db_open_1_proc */
+/^\/\* BEGIN __db_pagesize_1_proc/,/^\/\* END __db_pagesize_1_proc/c\
+/* BEGIN __db_pagesize_1_proc */\
+void\
+__db_pagesize_1_proc(dbpcl_id, pagesize, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t pagesize;\
+\ __db_pagesize_reply *replyp;\
+/* END __db_pagesize_1_proc */
+/^\/\* BEGIN __db_put_1_proc/,/^\/\* END __db_put_1_proc/c\
+/* BEGIN __db_put_1_proc */\
+void\
+__db_put_1_proc(dbpcl_id, txnpcl_id, keydlen,\
+\ \ keydoff, keyflags, keydata, keysize,\
+\ \ datadlen, datadoff, dataflags, datadata,\
+\ \ datasize, flags, replyp, freep)\
+\ long dbpcl_id;\
+\ long txnpcl_id;\
+\ u_int32_t keydlen;\
+\ u_int32_t keydoff;\
+\ u_int32_t keyflags;\
+\ void *keydata;\
+\ u_int32_t keysize;\
+\ u_int32_t datadlen;\
+\ u_int32_t datadoff;\
+\ u_int32_t dataflags;\
+\ void *datadata;\
+\ u_int32_t datasize;\
+\ u_int32_t flags;\
+\ __db_put_reply *replyp;\
+\ int * freep;\
+/* END __db_put_1_proc */
+/^\/\* BEGIN __db_re_delim_1_proc/,/^\/\* END __db_re_delim_1_proc/c\
+/* BEGIN __db_re_delim_1_proc */\
+void\
+__db_re_delim_1_proc(dbpcl_id, delim, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t delim;\
+\ __db_re_delim_reply *replyp;\
+/* END __db_re_delim_1_proc */
+/^\/\* BEGIN __db_re_len_1_proc/,/^\/\* END __db_re_len_1_proc/c\
+/* BEGIN __db_re_len_1_proc */\
+void\
+__db_re_len_1_proc(dbpcl_id, len, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t len;\
+\ __db_re_len_reply *replyp;\
+/* END __db_re_len_1_proc */
+/^\/\* BEGIN __db_re_pad_1_proc/,/^\/\* END __db_re_pad_1_proc/c\
+/* BEGIN __db_re_pad_1_proc */\
+void\
+__db_re_pad_1_proc(dbpcl_id, pad, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t pad;\
+\ __db_re_pad_reply *replyp;\
+/* END __db_re_pad_1_proc */
+/^\/\* BEGIN __db_remove_1_proc/,/^\/\* END __db_remove_1_proc/c\
+/* BEGIN __db_remove_1_proc */\
+void\
+__db_remove_1_proc(dbpcl_id, name, subdb,\
+\ \ flags, replyp)\
+\ long dbpcl_id;\
+\ char *name;\
+\ char *subdb;\
+\ u_int32_t flags;\
+\ __db_remove_reply *replyp;\
+/* END __db_remove_1_proc */
+/^\/\* BEGIN __db_rename_1_proc/,/^\/\* END __db_rename_1_proc/c\
+/* BEGIN __db_rename_1_proc */\
+void\
+__db_rename_1_proc(dbpcl_id, name, subdb,\
+\ \ newname, flags, replyp)\
+\ long dbpcl_id;\
+\ char *name;\
+\ char *subdb;\
+\ char *newname;\
+\ u_int32_t flags;\
+\ __db_rename_reply *replyp;\
+/* END __db_rename_1_proc */
+/^\/\* BEGIN __db_stat_1_proc/,/^\/\* END __db_stat_1_proc/c\
+/* BEGIN __db_stat_1_proc */\
+void\
+__db_stat_1_proc(dbpcl_id,\
+\ \ flags, replyp, freep)\
+\ long dbpcl_id;\
+\ u_int32_t flags;\
+\ __db_stat_reply *replyp;\
+\ int * freep;\
+/* END __db_stat_1_proc */
+/^\/\* BEGIN __db_swapped_1_proc/,/^\/\* END __db_swapped_1_proc/c\
+/* BEGIN __db_swapped_1_proc */\
+void\
+__db_swapped_1_proc(dbpcl_id, replyp)\
+\ long dbpcl_id;\
+\ __db_swapped_reply *replyp;\
+/* END __db_swapped_1_proc */
+/^\/\* BEGIN __db_sync_1_proc/,/^\/\* END __db_sync_1_proc/c\
+/* BEGIN __db_sync_1_proc */\
+void\
+__db_sync_1_proc(dbpcl_id, flags, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t flags;\
+\ __db_sync_reply *replyp;\
+/* END __db_sync_1_proc */
+/^\/\* BEGIN __db_cursor_1_proc/,/^\/\* END __db_cursor_1_proc/c\
+/* BEGIN __db_cursor_1_proc */\
+void\
+__db_cursor_1_proc(dbpcl_id, txnpcl_id,\
+\ \ flags, replyp)\
+\ long dbpcl_id;\
+\ long txnpcl_id;\
+\ u_int32_t flags;\
+\ __db_cursor_reply *replyp;\
+/* END __db_cursor_1_proc */
+/^\/\* BEGIN __db_join_1_proc/,/^\/\* END __db_join_1_proc/c\
+/* BEGIN __db_join_1_proc */\
+void\
+__db_join_1_proc(dbpcl_id, curslist,\
+\ \ flags, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t * curslist;\
+\ u_int32_t flags;\
+\ __db_join_reply *replyp;\
+/* END __db_join_1_proc */
+/^\/\* BEGIN __dbc_close_1_proc/,/^\/\* END __dbc_close_1_proc/c\
+/* BEGIN __dbc_close_1_proc */\
+void\
+__dbc_close_1_proc(dbccl_id, replyp)\
+\ long dbccl_id;\
+\ __dbc_close_reply *replyp;\
+/* END __dbc_close_1_proc */
+/^\/\* BEGIN __dbc_count_1_proc/,/^\/\* END __dbc_count_1_proc/c\
+/* BEGIN __dbc_count_1_proc */\
+void\
+__dbc_count_1_proc(dbccl_id, flags, replyp)\
+\ long dbccl_id;\
+\ u_int32_t flags;\
+\ __dbc_count_reply *replyp;\
+/* END __dbc_count_1_proc */
+/^\/\* BEGIN __dbc_del_1_proc/,/^\/\* END __dbc_del_1_proc/c\
+/* BEGIN __dbc_del_1_proc */\
+void\
+__dbc_del_1_proc(dbccl_id, flags, replyp)\
+\ long dbccl_id;\
+\ u_int32_t flags;\
+\ __dbc_del_reply *replyp;\
+/* END __dbc_del_1_proc */
+/^\/\* BEGIN __dbc_dup_1_proc/,/^\/\* END __dbc_dup_1_proc/c\
+/* BEGIN __dbc_dup_1_proc */\
+void\
+__dbc_dup_1_proc(dbccl_id, flags, replyp)\
+\ long dbccl_id;\
+\ u_int32_t flags;\
+\ __dbc_dup_reply *replyp;\
+/* END __dbc_dup_1_proc */
+/^\/\* BEGIN __dbc_get_1_proc/,/^\/\* END __dbc_get_1_proc/c\
+/* BEGIN __dbc_get_1_proc */\
+void\
+__dbc_get_1_proc(dbccl_id, keydlen, keydoff,\
+\ \ keyflags, keydata, keysize, datadlen,\
+\ \ datadoff, dataflags, datadata, datasize,\
+\ \ flags, replyp, freep)\
+\ long dbccl_id;\
+\ u_int32_t keydlen;\
+\ u_int32_t keydoff;\
+\ u_int32_t keyflags;\
+\ void *keydata;\
+\ u_int32_t keysize;\
+\ u_int32_t datadlen;\
+\ u_int32_t datadoff;\
+\ u_int32_t dataflags;\
+\ void *datadata;\
+\ u_int32_t datasize;\
+\ u_int32_t flags;\
+\ __dbc_get_reply *replyp;\
+\ int * freep;\
+/* END __dbc_get_1_proc */
+/^\/\* BEGIN __dbc_put_1_proc/,/^\/\* END __dbc_put_1_proc/c\
+/* BEGIN __dbc_put_1_proc */\
+void\
+__dbc_put_1_proc(dbccl_id, keydlen, keydoff,\
+\ \ keyflags, keydata, keysize, datadlen,\
+\ \ datadoff, dataflags, datadata, datasize,\
+\ \ flags, replyp, freep)\
+\ long dbccl_id;\
+\ u_int32_t keydlen;\
+\ u_int32_t keydoff;\
+\ u_int32_t keyflags;\
+\ void *keydata;\
+\ u_int32_t keysize;\
+\ u_int32_t datadlen;\
+\ u_int32_t datadoff;\
+\ u_int32_t dataflags;\
+\ void *datadata;\
+\ u_int32_t datasize;\
+\ u_int32_t flags;\
+\ __dbc_put_reply *replyp;\
+\ int * freep;\
+/* END __dbc_put_1_proc */
diff --git a/bdb/rpc_server/db_server_svc.c b/bdb/rpc_server/db_server_svc.c
new file mode 100644
index 00000000000..96d8a4a5dd9
--- /dev/null
+++ b/bdb/rpc_server/db_server_svc.c
@@ -0,0 +1,359 @@
+#include "db_config.h"
+#ifdef HAVE_RPC
+/*
+ * Please do not edit this file.
+ * It was generated using rpcgen.
+ */
+
+#include "db_server.h"
+#include <stdio.h>
+#include <stdlib.h> /* getenv, exit */
+#include <memory.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+extern void __dbsrv_timeout();
+
+#ifdef DEBUG
+#define RPC_SVC_FG
+#endif
+
+static void
+db_serverprog_1(rqstp, transp)
+ struct svc_req *rqstp;
+ register SVCXPRT *transp;
+{
+ union {
+ __env_cachesize_msg __db_env_cachesize_1_arg;
+ __env_close_msg __db_env_close_1_arg;
+ __env_create_msg __db_env_create_1_arg;
+ __env_flags_msg __db_env_flags_1_arg;
+ __env_open_msg __db_env_open_1_arg;
+ __env_remove_msg __db_env_remove_1_arg;
+ __txn_abort_msg __db_txn_abort_1_arg;
+ __txn_begin_msg __db_txn_begin_1_arg;
+ __txn_commit_msg __db_txn_commit_1_arg;
+ __db_bt_maxkey_msg __db_db_bt_maxkey_1_arg;
+ __db_bt_minkey_msg __db_db_bt_minkey_1_arg;
+ __db_close_msg __db_db_close_1_arg;
+ __db_create_msg __db_db_create_1_arg;
+ __db_del_msg __db_db_del_1_arg;
+ __db_extentsize_msg __db_db_extentsize_1_arg;
+ __db_flags_msg __db_db_flags_1_arg;
+ __db_get_msg __db_db_get_1_arg;
+ __db_h_ffactor_msg __db_db_h_ffactor_1_arg;
+ __db_h_nelem_msg __db_db_h_nelem_1_arg;
+ __db_key_range_msg __db_db_key_range_1_arg;
+ __db_lorder_msg __db_db_lorder_1_arg;
+ __db_open_msg __db_db_open_1_arg;
+ __db_pagesize_msg __db_db_pagesize_1_arg;
+ __db_put_msg __db_db_put_1_arg;
+ __db_re_delim_msg __db_db_re_delim_1_arg;
+ __db_re_len_msg __db_db_re_len_1_arg;
+ __db_re_pad_msg __db_db_re_pad_1_arg;
+ __db_remove_msg __db_db_remove_1_arg;
+ __db_rename_msg __db_db_rename_1_arg;
+ __db_stat_msg __db_db_stat_1_arg;
+ __db_swapped_msg __db_db_swapped_1_arg;
+ __db_sync_msg __db_db_sync_1_arg;
+ __db_cursor_msg __db_db_cursor_1_arg;
+ __db_join_msg __db_db_join_1_arg;
+ __dbc_close_msg __db_dbc_close_1_arg;
+ __dbc_count_msg __db_dbc_count_1_arg;
+ __dbc_del_msg __db_dbc_del_1_arg;
+ __dbc_dup_msg __db_dbc_dup_1_arg;
+ __dbc_get_msg __db_dbc_get_1_arg;
+ __dbc_put_msg __db_dbc_put_1_arg;
+ } argument;
+ char *result;
+ bool_t (*xdr_argument)(), (*xdr_result)();
+ char *(*local)();
+
+ switch (rqstp->rq_proc) {
+ case NULLPROC:
+ (void) svc_sendreply(transp, xdr_void,
+ (char *)NULL);
+ return;
+
+ case __DB_env_cachesize:
+ xdr_argument = xdr___env_cachesize_msg;
+ xdr_result = xdr___env_cachesize_reply;
+ local = (char *(*)()) __db_env_cachesize_1;
+ break;
+
+ case __DB_env_close:
+ xdr_argument = xdr___env_close_msg;
+ xdr_result = xdr___env_close_reply;
+ local = (char *(*)()) __db_env_close_1;
+ break;
+
+ case __DB_env_create:
+ xdr_argument = xdr___env_create_msg;
+ xdr_result = xdr___env_create_reply;
+ local = (char *(*)()) __db_env_create_1;
+ break;
+
+ case __DB_env_flags:
+ xdr_argument = xdr___env_flags_msg;
+ xdr_result = xdr___env_flags_reply;
+ local = (char *(*)()) __db_env_flags_1;
+ break;
+
+ case __DB_env_open:
+ xdr_argument = xdr___env_open_msg;
+ xdr_result = xdr___env_open_reply;
+ local = (char *(*)()) __db_env_open_1;
+ break;
+
+ case __DB_env_remove:
+ xdr_argument = xdr___env_remove_msg;
+ xdr_result = xdr___env_remove_reply;
+ local = (char *(*)()) __db_env_remove_1;
+ break;
+
+ case __DB_txn_abort:
+ xdr_argument = xdr___txn_abort_msg;
+ xdr_result = xdr___txn_abort_reply;
+ local = (char *(*)()) __db_txn_abort_1;
+ break;
+
+ case __DB_txn_begin:
+ xdr_argument = xdr___txn_begin_msg;
+ xdr_result = xdr___txn_begin_reply;
+ local = (char *(*)()) __db_txn_begin_1;
+ break;
+
+ case __DB_txn_commit:
+ xdr_argument = xdr___txn_commit_msg;
+ xdr_result = xdr___txn_commit_reply;
+ local = (char *(*)()) __db_txn_commit_1;
+ break;
+
+ case __DB_db_bt_maxkey:
+ xdr_argument = xdr___db_bt_maxkey_msg;
+ xdr_result = xdr___db_bt_maxkey_reply;
+ local = (char *(*)()) __db_db_bt_maxkey_1;
+ break;
+
+ case __DB_db_bt_minkey:
+ xdr_argument = xdr___db_bt_minkey_msg;
+ xdr_result = xdr___db_bt_minkey_reply;
+ local = (char *(*)()) __db_db_bt_minkey_1;
+ break;
+
+ case __DB_db_close:
+ xdr_argument = xdr___db_close_msg;
+ xdr_result = xdr___db_close_reply;
+ local = (char *(*)()) __db_db_close_1;
+ break;
+
+ case __DB_db_create:
+ xdr_argument = xdr___db_create_msg;
+ xdr_result = xdr___db_create_reply;
+ local = (char *(*)()) __db_db_create_1;
+ break;
+
+ case __DB_db_del:
+ xdr_argument = xdr___db_del_msg;
+ xdr_result = xdr___db_del_reply;
+ local = (char *(*)()) __db_db_del_1;
+ break;
+
+ case __DB_db_extentsize:
+ xdr_argument = xdr___db_extentsize_msg;
+ xdr_result = xdr___db_extentsize_reply;
+ local = (char *(*)()) __db_db_extentsize_1;
+ break;
+
+ case __DB_db_flags:
+ xdr_argument = xdr___db_flags_msg;
+ xdr_result = xdr___db_flags_reply;
+ local = (char *(*)()) __db_db_flags_1;
+ break;
+
+ case __DB_db_get:
+ xdr_argument = xdr___db_get_msg;
+ xdr_result = xdr___db_get_reply;
+ local = (char *(*)()) __db_db_get_1;
+ break;
+
+ case __DB_db_h_ffactor:
+ xdr_argument = xdr___db_h_ffactor_msg;
+ xdr_result = xdr___db_h_ffactor_reply;
+ local = (char *(*)()) __db_db_h_ffactor_1;
+ break;
+
+ case __DB_db_h_nelem:
+ xdr_argument = xdr___db_h_nelem_msg;
+ xdr_result = xdr___db_h_nelem_reply;
+ local = (char *(*)()) __db_db_h_nelem_1;
+ break;
+
+ case __DB_db_key_range:
+ xdr_argument = xdr___db_key_range_msg;
+ xdr_result = xdr___db_key_range_reply;
+ local = (char *(*)()) __db_db_key_range_1;
+ break;
+
+ case __DB_db_lorder:
+ xdr_argument = xdr___db_lorder_msg;
+ xdr_result = xdr___db_lorder_reply;
+ local = (char *(*)()) __db_db_lorder_1;
+ break;
+
+ case __DB_db_open:
+ xdr_argument = xdr___db_open_msg;
+ xdr_result = xdr___db_open_reply;
+ local = (char *(*)()) __db_db_open_1;
+ break;
+
+ case __DB_db_pagesize:
+ xdr_argument = xdr___db_pagesize_msg;
+ xdr_result = xdr___db_pagesize_reply;
+ local = (char *(*)()) __db_db_pagesize_1;
+ break;
+
+ case __DB_db_put:
+ xdr_argument = xdr___db_put_msg;
+ xdr_result = xdr___db_put_reply;
+ local = (char *(*)()) __db_db_put_1;
+ break;
+
+ case __DB_db_re_delim:
+ xdr_argument = xdr___db_re_delim_msg;
+ xdr_result = xdr___db_re_delim_reply;
+ local = (char *(*)()) __db_db_re_delim_1;
+ break;
+
+ case __DB_db_re_len:
+ xdr_argument = xdr___db_re_len_msg;
+ xdr_result = xdr___db_re_len_reply;
+ local = (char *(*)()) __db_db_re_len_1;
+ break;
+
+ case __DB_db_re_pad:
+ xdr_argument = xdr___db_re_pad_msg;
+ xdr_result = xdr___db_re_pad_reply;
+ local = (char *(*)()) __db_db_re_pad_1;
+ break;
+
+ case __DB_db_remove:
+ xdr_argument = xdr___db_remove_msg;
+ xdr_result = xdr___db_remove_reply;
+ local = (char *(*)()) __db_db_remove_1;
+ break;
+
+ case __DB_db_rename:
+ xdr_argument = xdr___db_rename_msg;
+ xdr_result = xdr___db_rename_reply;
+ local = (char *(*)()) __db_db_rename_1;
+ break;
+
+ case __DB_db_stat:
+ xdr_argument = xdr___db_stat_msg;
+ xdr_result = xdr___db_stat_reply;
+ local = (char *(*)()) __db_db_stat_1;
+ break;
+
+ case __DB_db_swapped:
+ xdr_argument = xdr___db_swapped_msg;
+ xdr_result = xdr___db_swapped_reply;
+ local = (char *(*)()) __db_db_swapped_1;
+ break;
+
+ case __DB_db_sync:
+ xdr_argument = xdr___db_sync_msg;
+ xdr_result = xdr___db_sync_reply;
+ local = (char *(*)()) __db_db_sync_1;
+ break;
+
+ case __DB_db_cursor:
+ xdr_argument = xdr___db_cursor_msg;
+ xdr_result = xdr___db_cursor_reply;
+ local = (char *(*)()) __db_db_cursor_1;
+ break;
+
+ case __DB_db_join:
+ xdr_argument = xdr___db_join_msg;
+ xdr_result = xdr___db_join_reply;
+ local = (char *(*)()) __db_db_join_1;
+ break;
+
+ case __DB_dbc_close:
+ xdr_argument = xdr___dbc_close_msg;
+ xdr_result = xdr___dbc_close_reply;
+ local = (char *(*)()) __db_dbc_close_1;
+ break;
+
+ case __DB_dbc_count:
+ xdr_argument = xdr___dbc_count_msg;
+ xdr_result = xdr___dbc_count_reply;
+ local = (char *(*)()) __db_dbc_count_1;
+ break;
+
+ case __DB_dbc_del:
+ xdr_argument = xdr___dbc_del_msg;
+ xdr_result = xdr___dbc_del_reply;
+ local = (char *(*)()) __db_dbc_del_1;
+ break;
+
+ case __DB_dbc_dup:
+ xdr_argument = xdr___dbc_dup_msg;
+ xdr_result = xdr___dbc_dup_reply;
+ local = (char *(*)()) __db_dbc_dup_1;
+ break;
+
+ case __DB_dbc_get:
+ xdr_argument = xdr___dbc_get_msg;
+ xdr_result = xdr___dbc_get_reply;
+ local = (char *(*)()) __db_dbc_get_1;
+ break;
+
+ case __DB_dbc_put:
+ xdr_argument = xdr___dbc_put_msg;
+ xdr_result = xdr___dbc_put_reply;
+ local = (char *(*)()) __db_dbc_put_1;
+ break;
+
+ default:
+ svcerr_noproc(transp);
+ return;
+ }
+ (void) memset((char *)&argument, 0, sizeof (argument));
+ if (!svc_getargs(transp, xdr_argument, &argument)) {
+ svcerr_decode(transp);
+ return;
+ }
+ result = (*local)(&argument, rqstp);
+ if (result != NULL && !svc_sendreply(transp, xdr_result, result)) {
+ svcerr_systemerr(transp);
+ }
+ if (!svc_freeargs(transp, xdr_argument, &argument)) {
+ fprintf(stderr, "unable to free arguments");
+ exit(1);
+ }
+ __dbsrv_timeout(0);
+ return;
+}
+
+void __dbsrv_main()
+{
+ register SVCXPRT *transp;
+
+ (void) pmap_unset(DB_SERVERPROG, DB_SERVERVERS);
+
+ transp = svctcp_create(RPC_ANYSOCK, 0, 0);
+ if (transp == NULL) {
+ fprintf(stderr, "cannot create tcp service.");
+ exit(1);
+ }
+ if (!svc_register(transp, DB_SERVERPROG, DB_SERVERVERS, db_serverprog_1, IPPROTO_TCP)) {
+ fprintf(stderr, "unable to register (DB_SERVERPROG, DB_SERVERVERS, tcp).");
+ exit(1);
+ }
+
+ svc_run();
+ fprintf(stderr, "svc_run returned");
+ exit(1);
+ /* NOTREACHED */
+}
+#endif /* HAVE_RPC */
diff --git a/bdb/rpc_server/db_server_svc.sed b/bdb/rpc_server/db_server_svc.sed
new file mode 100644
index 00000000000..9d540e51af6
--- /dev/null
+++ b/bdb/rpc_server/db_server_svc.sed
@@ -0,0 +1,5 @@
+/^#include <netinet.in.h>/a\
+\extern void __dbsrv_timeout();
+/^ return;/i\
+\ __dbsrv_timeout(0);
+s/^main/void __dbsrv_main/
diff --git a/bdb/rpc_server/db_server_util.c b/bdb/rpc_server/db_server_util.c
new file mode 100644
index 00000000000..862bbd05efb
--- /dev/null
+++ b/bdb/rpc_server/db_server_util.c
@@ -0,0 +1,612 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_server_util.c,v 1.32 2001/01/18 18:36:59 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <rpc/rpc.h>
+
+#include <limits.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+#include "db_server.h"
+
+#include "db_int.h"
+#include "clib_ext.h"
+#include "db_server_int.h"
+#include "rpc_server_ext.h"
+#include "common_ext.h"
+
+extern int __dbsrv_main __P((void));
+static int add_home __P((char *));
+static int env_recover __P((char *));
+static void __dbclear_child __P((ct_entry *));
+
+static LIST_HEAD(cthead, ct_entry) __dbsrv_head;
+static LIST_HEAD(homehead, home_entry) __dbsrv_home;
+static long __dbsrv_defto = DB_SERVER_TIMEOUT;
+static long __dbsrv_maxto = DB_SERVER_MAXTIMEOUT;
+static long __dbsrv_idleto = DB_SERVER_IDLETIMEOUT;
+static char *logfile = NULL;
+static char *prog;
+
+static void usage __P((char *));
+static void version_check __P((void));
+
+int __dbsrv_verbose = 0;
+
+int
+main(argc, argv)
+ int argc;
+ char **argv;
+{
+ extern char *optarg;
+ extern int optind;
+ CLIENT *cl;
+ int ch, ret;
+
+ prog = argv[0];
+
+ version_check();
+
+ /*
+ * Check whether another server is running or not. There
+ * is a race condition where two servers could be racing to
+ * register with the portmapper. The goal of this check is to
+ * forbid running additional servers (like those started from
+ * the test suite) if the user is already running one.
+ *
+ * XXX
+ * This does not solve nor prevent two servers from being
+ * started at the same time and running recovery at the same
+ * time on the same environments.
+ */
+ if ((cl = clnt_create("localhost",
+ DB_SERVERPROG, DB_SERVERVERS, "tcp")) != NULL) {
+ fprintf(stderr,
+ "%s: Berkeley DB RPC server already running.\n", prog);
+ clnt_destroy(cl);
+ exit(1);
+ }
+
+ LIST_INIT(&__dbsrv_home);
+ while ((ch = getopt(argc, argv, "h:I:L:t:T:Vv")) != EOF)
+ switch (ch) {
+ case 'h':
+ (void)add_home(optarg);
+ break;
+ case 'I':
+ (void)__db_getlong(NULL, prog, optarg, 1,
+ LONG_MAX, &__dbsrv_idleto);
+ break;
+ case 'L':
+ logfile = optarg;
+ break;
+ case 't':
+ (void)__db_getlong(NULL, prog, optarg, 1,
+ LONG_MAX, &__dbsrv_defto);
+ break;
+ case 'T':
+ (void)__db_getlong(NULL, prog, optarg, 1,
+ LONG_MAX, &__dbsrv_maxto);
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ exit(0);
+ case 'v':
+ __dbsrv_verbose = 1;
+ break;
+ default:
+ usage(prog);
+ }
+ /*
+ * Check default timeout against maximum timeout
+ */
+ if (__dbsrv_defto > __dbsrv_maxto)
+ __dbsrv_defto = __dbsrv_maxto;
+
+ /*
+ * Check default timeout against idle timeout
+ * It would be bad to timeout environments sooner than txns.
+ */
+ if (__dbsrv_defto > __dbsrv_idleto)
+printf("%s: WARNING: Idle timeout %ld is less than resource timeout %ld\n",
+ prog, __dbsrv_idleto, __dbsrv_defto);
+
+ LIST_INIT(&__dbsrv_head);
+
+ /*
+ * If a client crashes during an RPC, our reply to it
+ * generates a SIGPIPE. Ignore SIGPIPE so we don't exit unnecessarily.
+ */
+#ifdef SIGPIPE
+ signal(SIGPIPE, SIG_IGN);
+#endif
+
+ if (logfile != NULL && __db_util_logset("berkeley_db_svc", logfile))
+ exit(1);
+
+ /*
+ * Now that we are ready to start, run recovery on all the
+ * environments specified.
+ */
+ if ((ret = env_recover(prog)) != 0)
+ exit(1);
+
+ /*
+ * We've done our setup, now call the generated server loop
+ */
+ if (__dbsrv_verbose)
+ printf("%s: Ready to receive requests\n", prog);
+ __dbsrv_main();
+
+ /* NOTREACHED */
+ abort();
+}
+
+static void
+usage(prog)
+ char *prog;
+{
+ fprintf(stderr, "usage: %s %s\n\t%s\n", prog,
+ "[-Vv] [-h home]",
+ "[-I idletimeout] [-L logfile] [-t def_timeout] [-T maxtimeout]");
+ exit(1);
+}
+
+static void
+version_check()
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ prog, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ exit (1);
+ }
+}
+
+/*
+ * PUBLIC: void __dbsrv_settimeout __P((ct_entry *, u_int32_t));
+ */
+void
+__dbsrv_settimeout(ctp, to)
+ ct_entry *ctp;
+ u_int32_t to;
+{
+ if (to > (u_int32_t)__dbsrv_maxto)
+ ctp->ct_timeout = __dbsrv_maxto;
+ else if (to <= 0)
+ ctp->ct_timeout = __dbsrv_defto;
+ else
+ ctp->ct_timeout = to;
+}
+
+/*
+ * PUBLIC: void __dbsrv_timeout __P((int));
+ */
+void
+__dbsrv_timeout(force)
+ int force;
+{
+ static long to_hint = -1;
+ DBC *dbcp;
+ time_t t;
+ long to;
+ ct_entry *ctp, *nextctp;
+
+ if ((t = time(NULL)) == -1)
+ return;
+
+ /*
+ * Check hint. If hint is further in the future
+ * than now, no work to do.
+ */
+ if (!force && to_hint > 0 && t < to_hint)
+ return;
+ to_hint = -1;
+ /*
+ * Timeout transactions or cursors holding DB resources.
+ * Do this before timing out envs to properly release resources.
+ *
+ * !!!
+ * We can just loop through this list looking for cursors and txns.
+ * We do not need to verify txn and cursor relationships at this
+ * point because we maintain the list in LIFO order *and* we
+ * maintain activity in the ultimate txn parent of any cursor
+ * so either everything in a txn is timing out, or nothing.
+ * So, since we are LIFO, we will correctly close/abort all the
+ * appropriate handles, in the correct order.
+ */
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL; ctp = nextctp) {
+ nextctp = LIST_NEXT(ctp, entries);
+ switch (ctp->ct_type) {
+ case CT_TXN:
+ to = *(ctp->ct_activep) + ctp->ct_timeout;
+ /* TIMEOUT */
+ if (to < t) {
+ if (__dbsrv_verbose)
+ printf("Timing out txn id %ld\n",
+ ctp->ct_id);
+ (void)txn_abort((DB_TXN *)ctp->ct_anyp);
+ __dbdel_ctp(ctp);
+ /*
+ * If we timed out an txn, we may have closed
+ * all sorts of ctp's.
+ * So start over with a guaranteed good ctp.
+ */
+ nextctp = LIST_FIRST(&__dbsrv_head);
+ } else if ((to_hint > 0 && to_hint > to) ||
+ to_hint == -1)
+ to_hint = to;
+ break;
+ case CT_CURSOR:
+ case (CT_JOINCUR | CT_CURSOR):
+ to = *(ctp->ct_activep) + ctp->ct_timeout;
+ /* TIMEOUT */
+ if (to < t) {
+ if (__dbsrv_verbose)
+ printf("Timing out cursor %ld\n",
+ ctp->ct_id);
+ dbcp = (DBC *)ctp->ct_anyp;
+ (void)__dbc_close_int(ctp);
+ /*
+ * Start over with a guaranteed good ctp.
+ */
+ nextctp = LIST_FIRST(&__dbsrv_head);
+ } else if ((to_hint > 0 && to_hint > to) ||
+ to_hint == -1)
+ to_hint = to;
+ break;
+ default:
+ break;
+ }
+ }
+ /*
+ * Timeout idle handles.
+ * If we are forcing a timeout, we'll close all env handles.
+ */
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL; ctp = nextctp) {
+ nextctp = LIST_NEXT(ctp, entries);
+ if (ctp->ct_type != CT_ENV)
+ continue;
+ to = *(ctp->ct_activep) + ctp->ct_idle;
+ /* TIMEOUT */
+ if (to < t || force) {
+ if (__dbsrv_verbose)
+ printf("Timing out env id %ld\n", ctp->ct_id);
+ (void)__dbenv_close_int(ctp->ct_id, 0);
+ /*
+ * If we timed out an env, we may have closed
+ * all sorts of ctp's (maybe even all of them.
+ * So start over with a guaranteed good ctp.
+ */
+ nextctp = LIST_FIRST(&__dbsrv_head);
+ }
+ }
+}
+
+/*
+ * RECURSIVE FUNCTION. We need to clear/free any number of levels of nested
+ * layers.
+ */
+static void
+__dbclear_child(parent)
+ ct_entry *parent;
+{
+ ct_entry *ctp, *nextctp;
+
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL;
+ ctp = nextctp) {
+ nextctp = LIST_NEXT(ctp, entries);
+ if (ctp->ct_type == 0)
+ continue;
+ if (ctp->ct_parent == parent) {
+ __dbclear_child(ctp);
+ /*
+ * Need to do this here because le_next may
+ * have changed with the recursive call and we
+ * don't want to point to a removed entry.
+ */
+ nextctp = LIST_NEXT(ctp, entries);
+ __dbclear_ctp(ctp);
+ }
+ }
+}
+
+/*
+ * PUBLIC: void __dbclear_ctp __P((ct_entry *));
+ */
+void
+__dbclear_ctp(ctp)
+ ct_entry *ctp;
+{
+ LIST_REMOVE(ctp, entries);
+ __os_free(ctp, sizeof(ct_entry));
+}
+
+/*
+ * PUBLIC: void __dbdel_ctp __P((ct_entry *));
+ */
+void
+__dbdel_ctp(parent)
+ ct_entry *parent;
+{
+ __dbclear_child(parent);
+ __dbclear_ctp(parent);
+}
+
+/*
+ * PUBLIC: ct_entry *new_ct_ent __P((u_int32_t *));
+ */
+ct_entry *
+new_ct_ent(errp)
+ u_int32_t *errp;
+{
+ time_t t;
+ ct_entry *ctp, *octp;
+ int ret;
+
+ if ((ret = __os_malloc(NULL, sizeof(ct_entry), NULL, &ctp)) != 0) {
+ *errp = ret;
+ return (NULL);
+ }
+ /*
+ * Get the time as ID. We may service more than one request per
+ * second however. If we are, then increment id value until we
+ * find an unused one. We insert entries in LRU fashion at the
+ * head of the list. So, if the first entry doesn't match, then
+ * we know for certain that we can use our entry.
+ */
+ if ((t = time(NULL)) == -1) {
+ *errp = t;
+ __os_free(ctp, sizeof(ct_entry));
+ return (NULL);
+ }
+ octp = LIST_FIRST(&__dbsrv_head);
+ if (octp != NULL && octp->ct_id >= t)
+ t = octp->ct_id + 1;
+ ctp->ct_id = t;
+ ctp->ct_idle = __dbsrv_idleto;
+ ctp->ct_activep = &ctp->ct_active;
+ ctp->ct_origp = NULL;
+
+ LIST_INSERT_HEAD(&__dbsrv_head, ctp, entries);
+ return (ctp);
+}
+
+/*
+ * PUBLIC: ct_entry *get_tableent __P((long));
+ */
+ct_entry *
+get_tableent(id)
+ long id;
+{
+ ct_entry *ctp;
+
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL;
+ ctp = LIST_NEXT(ctp, entries))
+ if (ctp->ct_id == id)
+ return (ctp);
+ return (NULL);
+}
+
+/*
+ * PUBLIC: void __dbsrv_active __P((ct_entry *));
+ */
+void
+__dbsrv_active(ctp)
+ ct_entry *ctp;
+{
+ time_t t;
+ ct_entry *envctp;
+
+ if (ctp == NULL)
+ return;
+ if ((t = time(NULL)) == -1)
+ return;
+ *(ctp->ct_activep) = t;
+ if ((envctp = ctp->ct_envparent) == NULL)
+ return;
+ *(envctp->ct_activep) = t;
+ return;
+}
+
+/*
+ * PUBLIC: int __dbc_close_int __P((ct_entry *));
+ */
+int
+__dbc_close_int(dbc_ctp)
+ ct_entry *dbc_ctp;
+{
+ DBC *dbc;
+ int ret;
+ ct_entry *ctp;
+
+ dbc = (DBC *)dbc_ctp->ct_anyp;
+
+ ret = dbc->c_close(dbc);
+ /*
+ * If this cursor is a join cursor then we need to fix up the
+ * cursors that it was joined from so that they are independent again.
+ */
+ if (dbc_ctp->ct_type & CT_JOINCUR)
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL;
+ ctp = LIST_NEXT(ctp, entries)) {
+ /*
+ * Test if it is a join cursor, and if it is part
+ * of this one.
+ */
+ if ((ctp->ct_type & CT_JOIN) &&
+ ctp->ct_activep == &dbc_ctp->ct_active) {
+ ctp->ct_type &= ~CT_JOIN;
+ ctp->ct_activep = ctp->ct_origp;
+ __dbsrv_active(ctp);
+ }
+ }
+ __dbclear_ctp(dbc_ctp);
+ return (ret);
+
+}
+
+/*
+ * PUBLIC: int __dbenv_close_int __P((long, int));
+ */
+int
+__dbenv_close_int(id, flags)
+ long id;
+ int flags;
+{
+ DB_ENV *dbenv;
+ int ret;
+ ct_entry *ctp;
+
+ ctp = get_tableent(id);
+ if (ctp == NULL)
+ return (DB_NOSERVER_ID);
+ DB_ASSERT(ctp->ct_type == CT_ENV);
+ dbenv = ctp->ct_envp;
+
+ ret = dbenv->close(dbenv, flags);
+ __dbdel_ctp(ctp);
+ return (ret);
+}
+
+static int
+add_home(home)
+ char *home;
+{
+ home_entry *hp, *homep;
+ int ret;
+
+ if ((ret = __os_malloc(NULL, sizeof(home_entry), NULL, &hp)) != 0)
+ return (ret);
+ if ((ret = __os_malloc(NULL, strlen(home)+1, NULL, &hp->home)) != 0)
+ return (ret);
+ memcpy(hp->home, home, strlen(home)+1);
+ hp->dir = home;
+ /*
+ * This loop is to remove any trailing path separators,
+ * to assure hp->name points to the last component.
+ */
+ hp->name = __db_rpath(home);
+ *(hp->name) = '\0';
+ hp->name++;
+ while (*(hp->name) == '\0') {
+ hp->name = __db_rpath(home);
+ *(hp->name) = '\0';
+ hp->name++;
+ }
+ /*
+ * Now we have successfully added it. Make sure there are no
+ * identical names.
+ */
+ for (homep = LIST_FIRST(&__dbsrv_home); homep != NULL;
+ homep = LIST_NEXT(homep, entries))
+ if (strcmp(homep->name, hp->name) == 0) {
+ printf("Already added home name %s, at directory %s\n",
+ hp->name, homep->dir);
+ return (-1);
+ }
+ LIST_INSERT_HEAD(&__dbsrv_home, hp, entries);
+ if (__dbsrv_verbose)
+ printf("Added home %s in dir %s\n", hp->name, hp->dir);
+ return (0);
+}
+
+/*
+ * PUBLIC: char *get_home __P((char *));
+ */
+char *
+get_home(name)
+ char *name;
+{
+ home_entry *hp;
+
+ for (hp = LIST_FIRST(&__dbsrv_home); hp != NULL;
+ hp = LIST_NEXT(hp, entries))
+ if (strcmp(name, hp->name) == 0)
+ return (hp->home);
+ return (NULL);
+}
+
+static int
+env_recover(progname)
+ char *progname;
+{
+ DB_ENV *dbenv;
+ home_entry *hp;
+ u_int32_t flags;
+ int exitval, ret;
+
+ for (hp = LIST_FIRST(&__dbsrv_home); hp != NULL;
+ hp = LIST_NEXT(hp, entries)) {
+ exitval = 0;
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr, "%s: db_env_create: %s\n",
+ progname, db_strerror(ret));
+ exit(1);
+ }
+ if (__dbsrv_verbose == 1) {
+ (void)dbenv->set_verbose(dbenv, DB_VERB_RECOVERY, 1);
+ (void)dbenv->set_verbose(dbenv, DB_VERB_CHKPOINT, 1);
+ }
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+
+ /*
+ * Initialize the env with DB_RECOVER. That is all we
+ * have to do to run recovery.
+ */
+ if (__dbsrv_verbose)
+ printf("Running recovery on %s\n", hp->home);
+ flags = DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL |
+ DB_INIT_TXN | DB_PRIVATE | DB_USE_ENVIRON | DB_RECOVER;
+ if ((ret = dbenv->open(dbenv, hp->home, flags, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DBENV->open");
+ goto error;
+ }
+
+ if (0) {
+error: exitval = 1;
+ }
+ if ((ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr, "%s: dbenv->close: %s\n",
+ progname, db_strerror(ret));
+ }
+ if (exitval)
+ return (exitval);
+ }
+ return (0);
+}
diff --git a/bdb/rpc_server/db_server_xdr.c b/bdb/rpc_server/db_server_xdr.c
new file mode 100644
index 00000000000..f403f862e5d
--- /dev/null
+++ b/bdb/rpc_server/db_server_xdr.c
@@ -0,0 +1,1149 @@
+#include "db_config.h"
+#ifdef HAVE_RPC
+/*
+ * Please do not edit this file.
+ * It was generated using rpcgen.
+ */
+
+#include "db_server.h"
+
+bool_t
+xdr___env_cachesize_msg(xdrs, objp)
+ register XDR *xdrs;
+ __env_cachesize_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbenvcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->gbytes))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->bytes))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->ncache))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_cachesize_reply(xdrs, objp)
+ register XDR *xdrs;
+ __env_cachesize_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_close_msg(xdrs, objp)
+ register XDR *xdrs;
+ __env_close_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbenvcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_close_reply(xdrs, objp)
+ register XDR *xdrs;
+ __env_close_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_create_msg(xdrs, objp)
+ register XDR *xdrs;
+ __env_create_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->timeout))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_create_reply(xdrs, objp)
+ register XDR *xdrs;
+ __env_create_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->envcl_id))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_flags_msg(xdrs, objp)
+ register XDR *xdrs;
+ __env_flags_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbenvcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->onoff))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_flags_reply(xdrs, objp)
+ register XDR *xdrs;
+ __env_flags_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_open_msg(xdrs, objp)
+ register XDR *xdrs;
+ __env_open_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbenvcl_id))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->home, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->mode))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_open_reply(xdrs, objp)
+ register XDR *xdrs;
+ __env_open_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_remove_msg(xdrs, objp)
+ register XDR *xdrs;
+ __env_remove_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbenvcl_id))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->home, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_remove_reply(xdrs, objp)
+ register XDR *xdrs;
+ __env_remove_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___txn_abort_msg(xdrs, objp)
+ register XDR *xdrs;
+ __txn_abort_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->txnpcl_id))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___txn_abort_reply(xdrs, objp)
+ register XDR *xdrs;
+ __txn_abort_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___txn_begin_msg(xdrs, objp)
+ register XDR *xdrs;
+ __txn_begin_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->envpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->parentcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___txn_begin_reply(xdrs, objp)
+ register XDR *xdrs;
+ __txn_begin_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->txnidcl_id))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___txn_commit_msg(xdrs, objp)
+ register XDR *xdrs;
+ __txn_commit_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->txnpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___txn_commit_reply(xdrs, objp)
+ register XDR *xdrs;
+ __txn_commit_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_bt_maxkey_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_bt_maxkey_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->maxkey))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_bt_maxkey_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_bt_maxkey_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_bt_minkey_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_bt_minkey_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->minkey))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_bt_minkey_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_bt_minkey_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_close_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_close_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_close_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_close_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_create_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_create_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->envpcl_id))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_create_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_create_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_del_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_del_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->txnpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keydlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keydoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keyflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->keydata.keydata_val, (u_int *) &objp->keydata.keydata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_del_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_del_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_extentsize_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_extentsize_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->extentsize))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_extentsize_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_extentsize_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_flags_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_flags_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_flags_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_flags_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_get_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_get_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->txnpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keydlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keydoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keyflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->keydata.keydata_val, (u_int *) &objp->keydata.keydata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->datadlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->datadoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dataflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->datadata.datadata_val, (u_int *) &objp->datadata.datadata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_get_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_get_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->keydata.keydata_val, (u_int *) &objp->keydata.keydata_len, ~0))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->datadata.datadata_val, (u_int *) &objp->datadata.datadata_len, ~0))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_h_ffactor_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_h_ffactor_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->ffactor))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_h_ffactor_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_h_ffactor_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_h_nelem_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_h_nelem_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->nelem))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_h_nelem_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_h_nelem_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_key_range_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_key_range_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->txnpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keydlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keydoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keyflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->keydata.keydata_val, (u_int *) &objp->keydata.keydata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_key_range_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_key_range_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_double(xdrs, &objp->less))
+ return (FALSE);
+ if (!xdr_double(xdrs, &objp->equal))
+ return (FALSE);
+ if (!xdr_double(xdrs, &objp->greater))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_lorder_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_lorder_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->lorder))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_lorder_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_lorder_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_open_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_open_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->name, ~0))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->subdb, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->type))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->mode))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_open_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_open_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->type))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dbflags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_pagesize_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_pagesize_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->pagesize))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_pagesize_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_pagesize_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_put_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_put_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->txnpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keydlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keydoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keyflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->keydata.keydata_val, (u_int *) &objp->keydata.keydata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->datadlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->datadoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dataflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->datadata.datadata_val, (u_int *) &objp->datadata.datadata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_put_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_put_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->keydata.keydata_val, (u_int *) &objp->keydata.keydata_len, ~0))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_re_delim_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_re_delim_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->delim))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_re_delim_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_re_delim_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_re_len_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_re_len_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->len))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_re_len_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_re_len_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_re_pad_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_re_pad_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->pad))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_re_pad_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_re_pad_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_remove_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_remove_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->name, ~0))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->subdb, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_remove_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_remove_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_rename_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_rename_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->name, ~0))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->subdb, ~0))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->newname, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_rename_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_rename_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_stat_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_stat_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_stat_statsreplist(xdrs, objp)
+ register XDR *xdrs;
+ __db_stat_statsreplist *objp;
+{
+
+ if (!xdr_bytes(xdrs, (char **)&objp->ent.ent_val, (u_int *) &objp->ent.ent_len, ~0))
+ return (FALSE);
+ if (!xdr_pointer(xdrs, (char **)&objp->next, sizeof (__db_stat_statsreplist), (xdrproc_t) xdr___db_stat_statsreplist))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_stat_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_stat_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_pointer(xdrs, (char **)&objp->statslist, sizeof (__db_stat_statsreplist), (xdrproc_t) xdr___db_stat_statsreplist))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_swapped_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_swapped_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_swapped_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_swapped_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_sync_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_sync_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_sync_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_sync_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_cursor_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_cursor_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->txnpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_cursor_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_cursor_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dbcidcl_id))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_join_curslist(xdrs, objp)
+ register XDR *xdrs;
+ __db_join_curslist *objp;
+{
+
+ if (!xdr_bytes(xdrs, (char **)&objp->ent.ent_val, (u_int *) &objp->ent.ent_len, ~0))
+ return (FALSE);
+ if (!xdr_pointer(xdrs, (char **)&objp->next, sizeof (__db_join_curslist), (xdrproc_t) xdr___db_join_curslist))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_join_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_join_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_pointer(xdrs, (char **)&objp->curslist, sizeof (__db_join_curslist), (xdrproc_t) xdr___db_join_curslist))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_join_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_join_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dbcidcl_id))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_close_msg(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_close_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbccl_id))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_close_reply(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_close_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_count_msg(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_count_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbccl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_count_reply(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_count_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dupcount))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_del_msg(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_del_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbccl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_del_reply(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_del_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_dup_msg(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_dup_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbccl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_dup_reply(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_dup_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dbcidcl_id))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_get_msg(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_get_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbccl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keydlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keydoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keyflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->keydata.keydata_val, (u_int *) &objp->keydata.keydata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->datadlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->datadoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dataflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->datadata.datadata_val, (u_int *) &objp->datadata.datadata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_get_reply(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_get_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->keydata.keydata_val, (u_int *) &objp->keydata.keydata_len, ~0))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->datadata.datadata_val, (u_int *) &objp->datadata.datadata_len, ~0))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_put_msg(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_put_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbccl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keydlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keydoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keyflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->keydata.keydata_val, (u_int *) &objp->keydata.keydata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->datadlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->datadoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dataflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->datadata.datadata_val, (u_int *) &objp->datadata.datadata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_put_reply(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_put_reply *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->keydata.keydata_val, (u_int *) &objp->keydata.keydata_len, ~0))
+ return (FALSE);
+ return (TRUE);
+}
+#endif /* HAVE_RPC */
diff --git a/bdb/rpc_server/gen_db_server.c b/bdb/rpc_server/gen_db_server.c
new file mode 100644
index 00000000000..20da69c4b1b
--- /dev/null
+++ b/bdb/rpc_server/gen_db_server.c
@@ -0,0 +1,703 @@
+/* Do not edit: automatically built by gen_rpc.awk. */
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <rpc/rpc.h>
+#include <rpc/xdr.h>
+
+#include <errno.h>
+#include <string.h>
+#endif
+#include "db_server.h"
+
+#include "db_int.h"
+#include "db_server_int.h"
+#include "rpc_server_ext.h"
+
+#include "gen_server_ext.h"
+
+__env_cachesize_reply *
+__db_env_cachesize_1(req)
+ __env_cachesize_msg *req;
+{
+ static __env_cachesize_reply reply; /* must be static */
+
+ __env_cachesize_1_proc(req->dbenvcl_id,
+ req->gbytes,
+ req->bytes,
+ req->ncache,
+ &reply);
+
+ return (&reply);
+}
+
+__env_close_reply *
+__db_env_close_1(req)
+ __env_close_msg *req;
+{
+ static __env_close_reply reply; /* must be static */
+
+ __env_close_1_proc(req->dbenvcl_id,
+ req->flags,
+ &reply);
+
+ return (&reply);
+}
+
+__env_create_reply *
+__db_env_create_1(req)
+ __env_create_msg *req;
+{
+ static __env_create_reply reply; /* must be static */
+
+ __env_create_1_proc(req->timeout,
+ &reply);
+
+ return (&reply);
+}
+
+__env_flags_reply *
+__db_env_flags_1(req)
+ __env_flags_msg *req;
+{
+ static __env_flags_reply reply; /* must be static */
+
+ __env_flags_1_proc(req->dbenvcl_id,
+ req->flags,
+ req->onoff,
+ &reply);
+
+ return (&reply);
+}
+
+__env_open_reply *
+__db_env_open_1(req)
+ __env_open_msg *req;
+{
+ static __env_open_reply reply; /* must be static */
+
+ __env_open_1_proc(req->dbenvcl_id,
+ (*req->home == '\0') ? NULL : req->home,
+ req->flags,
+ req->mode,
+ &reply);
+
+ return (&reply);
+}
+
+__env_remove_reply *
+__db_env_remove_1(req)
+ __env_remove_msg *req;
+{
+ static __env_remove_reply reply; /* must be static */
+
+ __env_remove_1_proc(req->dbenvcl_id,
+ (*req->home == '\0') ? NULL : req->home,
+ req->flags,
+ &reply);
+
+ return (&reply);
+}
+
+__txn_abort_reply *
+__db_txn_abort_1(req)
+ __txn_abort_msg *req;
+{
+ static __txn_abort_reply reply; /* must be static */
+
+ __txn_abort_1_proc(req->txnpcl_id,
+ &reply);
+
+ return (&reply);
+}
+
+__txn_begin_reply *
+__db_txn_begin_1(req)
+ __txn_begin_msg *req;
+{
+ static __txn_begin_reply reply; /* must be static */
+
+ __txn_begin_1_proc(req->envpcl_id,
+ req->parentcl_id,
+ req->flags,
+ &reply);
+
+ return (&reply);
+}
+
+__txn_commit_reply *
+__db_txn_commit_1(req)
+ __txn_commit_msg *req;
+{
+ static __txn_commit_reply reply; /* must be static */
+
+ __txn_commit_1_proc(req->txnpcl_id,
+ req->flags,
+ &reply);
+
+ return (&reply);
+}
+
+__db_bt_maxkey_reply *
+__db_db_bt_maxkey_1(req)
+ __db_bt_maxkey_msg *req;
+{
+ static __db_bt_maxkey_reply reply; /* must be static */
+
+ __db_bt_maxkey_1_proc(req->dbpcl_id,
+ req->maxkey,
+ &reply);
+
+ return (&reply);
+}
+
+__db_bt_minkey_reply *
+__db_db_bt_minkey_1(req)
+ __db_bt_minkey_msg *req;
+{
+ static __db_bt_minkey_reply reply; /* must be static */
+
+ __db_bt_minkey_1_proc(req->dbpcl_id,
+ req->minkey,
+ &reply);
+
+ return (&reply);
+}
+
+__db_close_reply *
+__db_db_close_1(req)
+ __db_close_msg *req;
+{
+ static __db_close_reply reply; /* must be static */
+
+ __db_close_1_proc(req->dbpcl_id,
+ req->flags,
+ &reply);
+
+ return (&reply);
+}
+
+__db_create_reply *
+__db_db_create_1(req)
+ __db_create_msg *req;
+{
+ static __db_create_reply reply; /* must be static */
+
+ __db_create_1_proc(req->flags,
+ req->envpcl_id,
+ &reply);
+
+ return (&reply);
+}
+
+__db_del_reply *
+__db_db_del_1(req)
+ __db_del_msg *req;
+{
+ static __db_del_reply reply; /* must be static */
+
+ __db_del_1_proc(req->dbpcl_id,
+ req->txnpcl_id,
+ req->keydlen,
+ req->keydoff,
+ req->keyflags,
+ req->keydata.keydata_val,
+ req->keydata.keydata_len,
+ req->flags,
+ &reply);
+
+ return (&reply);
+}
+
+__db_extentsize_reply *
+__db_db_extentsize_1(req)
+ __db_extentsize_msg *req;
+{
+ static __db_extentsize_reply reply; /* must be static */
+
+ __db_extentsize_1_proc(req->dbpcl_id,
+ req->extentsize,
+ &reply);
+
+ return (&reply);
+}
+
+__db_flags_reply *
+__db_db_flags_1(req)
+ __db_flags_msg *req;
+{
+ static __db_flags_reply reply; /* must be static */
+
+ __db_flags_1_proc(req->dbpcl_id,
+ req->flags,
+ &reply);
+
+ return (&reply);
+}
+
+__db_get_reply *
+__db_db_get_1(req)
+ __db_get_msg *req;
+{
+ static __db_get_reply reply; /* must be static */
+ static int __db_get_free = 0; /* must be static */
+
+ if (__db_get_free)
+ xdr_free((xdrproc_t)xdr___db_get_reply, (void *)&reply);
+ __db_get_free = 0;
+
+ /* Reinitialize allocated fields */
+ reply.keydata.keydata_val = NULL;
+ reply.datadata.datadata_val = NULL;
+
+ __db_get_1_proc(req->dbpcl_id,
+ req->txnpcl_id,
+ req->keydlen,
+ req->keydoff,
+ req->keyflags,
+ req->keydata.keydata_val,
+ req->keydata.keydata_len,
+ req->datadlen,
+ req->datadoff,
+ req->dataflags,
+ req->datadata.datadata_val,
+ req->datadata.datadata_len,
+ req->flags,
+ &reply,
+ &__db_get_free);
+ return (&reply);
+}
+
+__db_h_ffactor_reply *
+__db_db_h_ffactor_1(req)
+ __db_h_ffactor_msg *req;
+{
+ static __db_h_ffactor_reply reply; /* must be static */
+
+ __db_h_ffactor_1_proc(req->dbpcl_id,
+ req->ffactor,
+ &reply);
+
+ return (&reply);
+}
+
+__db_h_nelem_reply *
+__db_db_h_nelem_1(req)
+ __db_h_nelem_msg *req;
+{
+ static __db_h_nelem_reply reply; /* must be static */
+
+ __db_h_nelem_1_proc(req->dbpcl_id,
+ req->nelem,
+ &reply);
+
+ return (&reply);
+}
+
+__db_key_range_reply *
+__db_db_key_range_1(req)
+ __db_key_range_msg *req;
+{
+ static __db_key_range_reply reply; /* must be static */
+
+ __db_key_range_1_proc(req->dbpcl_id,
+ req->txnpcl_id,
+ req->keydlen,
+ req->keydoff,
+ req->keyflags,
+ req->keydata.keydata_val,
+ req->keydata.keydata_len,
+ req->flags,
+ &reply);
+
+ return (&reply);
+}
+
+__db_lorder_reply *
+__db_db_lorder_1(req)
+ __db_lorder_msg *req;
+{
+ static __db_lorder_reply reply; /* must be static */
+
+ __db_lorder_1_proc(req->dbpcl_id,
+ req->lorder,
+ &reply);
+
+ return (&reply);
+}
+
+__db_open_reply *
+__db_db_open_1(req)
+ __db_open_msg *req;
+{
+ static __db_open_reply reply; /* must be static */
+
+ __db_open_1_proc(req->dbpcl_id,
+ (*req->name == '\0') ? NULL : req->name,
+ (*req->subdb == '\0') ? NULL : req->subdb,
+ req->type,
+ req->flags,
+ req->mode,
+ &reply);
+
+ return (&reply);
+}
+
+__db_pagesize_reply *
+__db_db_pagesize_1(req)
+ __db_pagesize_msg *req;
+{
+ static __db_pagesize_reply reply; /* must be static */
+
+ __db_pagesize_1_proc(req->dbpcl_id,
+ req->pagesize,
+ &reply);
+
+ return (&reply);
+}
+
+__db_put_reply *
+__db_db_put_1(req)
+ __db_put_msg *req;
+{
+ static __db_put_reply reply; /* must be static */
+ static int __db_put_free = 0; /* must be static */
+
+ if (__db_put_free)
+ xdr_free((xdrproc_t)xdr___db_put_reply, (void *)&reply);
+ __db_put_free = 0;
+
+ /* Reinitialize allocated fields */
+ reply.keydata.keydata_val = NULL;
+
+ __db_put_1_proc(req->dbpcl_id,
+ req->txnpcl_id,
+ req->keydlen,
+ req->keydoff,
+ req->keyflags,
+ req->keydata.keydata_val,
+ req->keydata.keydata_len,
+ req->datadlen,
+ req->datadoff,
+ req->dataflags,
+ req->datadata.datadata_val,
+ req->datadata.datadata_len,
+ req->flags,
+ &reply,
+ &__db_put_free);
+ return (&reply);
+}
+
+__db_re_delim_reply *
+__db_db_re_delim_1(req)
+ __db_re_delim_msg *req;
+{
+ static __db_re_delim_reply reply; /* must be static */
+
+ __db_re_delim_1_proc(req->dbpcl_id,
+ req->delim,
+ &reply);
+
+ return (&reply);
+}
+
+__db_re_len_reply *
+__db_db_re_len_1(req)
+ __db_re_len_msg *req;
+{
+ static __db_re_len_reply reply; /* must be static */
+
+ __db_re_len_1_proc(req->dbpcl_id,
+ req->len,
+ &reply);
+
+ return (&reply);
+}
+
+__db_re_pad_reply *
+__db_db_re_pad_1(req)
+ __db_re_pad_msg *req;
+{
+ static __db_re_pad_reply reply; /* must be static */
+
+ __db_re_pad_1_proc(req->dbpcl_id,
+ req->pad,
+ &reply);
+
+ return (&reply);
+}
+
+__db_remove_reply *
+__db_db_remove_1(req)
+ __db_remove_msg *req;
+{
+ static __db_remove_reply reply; /* must be static */
+
+ __db_remove_1_proc(req->dbpcl_id,
+ (*req->name == '\0') ? NULL : req->name,
+ (*req->subdb == '\0') ? NULL : req->subdb,
+ req->flags,
+ &reply);
+
+ return (&reply);
+}
+
+__db_rename_reply *
+__db_db_rename_1(req)
+ __db_rename_msg *req;
+{
+ static __db_rename_reply reply; /* must be static */
+
+ __db_rename_1_proc(req->dbpcl_id,
+ (*req->name == '\0') ? NULL : req->name,
+ (*req->subdb == '\0') ? NULL : req->subdb,
+ (*req->newname == '\0') ? NULL : req->newname,
+ req->flags,
+ &reply);
+
+ return (&reply);
+}
+
+__db_stat_reply *
+__db_db_stat_1(req)
+ __db_stat_msg *req;
+{
+ static __db_stat_reply reply; /* must be static */
+ static int __db_stat_free = 0; /* must be static */
+
+ if (__db_stat_free)
+ xdr_free((xdrproc_t)xdr___db_stat_reply, (void *)&reply);
+ __db_stat_free = 0;
+
+ /* Reinitialize allocated fields */
+ reply.statslist = NULL;
+
+ __db_stat_1_proc(req->dbpcl_id,
+ req->flags,
+ &reply,
+ &__db_stat_free);
+ return (&reply);
+}
+
+__db_swapped_reply *
+__db_db_swapped_1(req)
+ __db_swapped_msg *req;
+{
+ static __db_swapped_reply reply; /* must be static */
+
+ __db_swapped_1_proc(req->dbpcl_id,
+ &reply);
+
+ return (&reply);
+}
+
+__db_sync_reply *
+__db_db_sync_1(req)
+ __db_sync_msg *req;
+{
+ static __db_sync_reply reply; /* must be static */
+
+ __db_sync_1_proc(req->dbpcl_id,
+ req->flags,
+ &reply);
+
+ return (&reply);
+}
+
+__db_cursor_reply *
+__db_db_cursor_1(req)
+ __db_cursor_msg *req;
+{
+ static __db_cursor_reply reply; /* must be static */
+
+ __db_cursor_1_proc(req->dbpcl_id,
+ req->txnpcl_id,
+ req->flags,
+ &reply);
+
+ return (&reply);
+}
+
+int __db_db_join_curslist __P((__db_join_curslist *, u_int32_t **));
+void __db_db_join_cursfree __P((u_int32_t *));
+
+__db_join_reply *
+__db_db_join_1(req)
+ __db_join_msg *req;
+{
+ u_int32_t *__db_curslist;
+ int ret;
+ static __db_join_reply reply; /* must be static */
+
+ if ((ret = __db_db_join_curslist(req->curslist, &__db_curslist)) != 0)
+ goto out;
+
+ __db_join_1_proc(req->dbpcl_id,
+ __db_curslist,
+ req->flags,
+ &reply);
+
+ __db_db_join_cursfree(__db_curslist);
+
+out:
+ return (&reply);
+}
+
+int
+__db_db_join_curslist(locp, ppp)
+ __db_join_curslist *locp;
+ u_int32_t **ppp;
+{
+ u_int32_t *pp;
+ int cnt, ret, size;
+ __db_join_curslist *nl;
+
+ for (cnt = 0, nl = locp; nl != NULL; cnt++, nl = nl->next)
+ ;
+
+ if (cnt == 0) {
+ *ppp = NULL;
+ return (0);
+ }
+ size = sizeof(*pp) * (cnt + 1);
+ if ((ret = __os_malloc(NULL, size, NULL, ppp)) != 0)
+ return (ret);
+ memset(*ppp, 0, size);
+ for (pp = *ppp, nl = locp; nl != NULL; nl = nl->next, pp++) {
+ *pp = *(u_int32_t *)nl->ent.ent_val;
+ }
+ return (0);
+}
+
+void
+__db_db_join_cursfree(pp)
+ u_int32_t *pp;
+{
+ size_t size;
+ u_int32_t *p;
+
+ if (pp == NULL)
+ return;
+ size = sizeof(*p);
+ for (p = pp; *p != 0; p++) {
+ size += sizeof(*p);
+ }
+ __os_free(pp, size);
+}
+
+__dbc_close_reply *
+__db_dbc_close_1(req)
+ __dbc_close_msg *req;
+{
+ static __dbc_close_reply reply; /* must be static */
+
+ __dbc_close_1_proc(req->dbccl_id,
+ &reply);
+
+ return (&reply);
+}
+
+__dbc_count_reply *
+__db_dbc_count_1(req)
+ __dbc_count_msg *req;
+{
+ static __dbc_count_reply reply; /* must be static */
+
+ __dbc_count_1_proc(req->dbccl_id,
+ req->flags,
+ &reply);
+
+ return (&reply);
+}
+
+__dbc_del_reply *
+__db_dbc_del_1(req)
+ __dbc_del_msg *req;
+{
+ static __dbc_del_reply reply; /* must be static */
+
+ __dbc_del_1_proc(req->dbccl_id,
+ req->flags,
+ &reply);
+
+ return (&reply);
+}
+
+__dbc_dup_reply *
+__db_dbc_dup_1(req)
+ __dbc_dup_msg *req;
+{
+ static __dbc_dup_reply reply; /* must be static */
+
+ __dbc_dup_1_proc(req->dbccl_id,
+ req->flags,
+ &reply);
+
+ return (&reply);
+}
+
+__dbc_get_reply *
+__db_dbc_get_1(req)
+ __dbc_get_msg *req;
+{
+ static __dbc_get_reply reply; /* must be static */
+ static int __dbc_get_free = 0; /* must be static */
+
+ if (__dbc_get_free)
+ xdr_free((xdrproc_t)xdr___dbc_get_reply, (void *)&reply);
+ __dbc_get_free = 0;
+
+ /* Reinitialize allocated fields */
+ reply.keydata.keydata_val = NULL;
+ reply.datadata.datadata_val = NULL;
+
+ __dbc_get_1_proc(req->dbccl_id,
+ req->keydlen,
+ req->keydoff,
+ req->keyflags,
+ req->keydata.keydata_val,
+ req->keydata.keydata_len,
+ req->datadlen,
+ req->datadoff,
+ req->dataflags,
+ req->datadata.datadata_val,
+ req->datadata.datadata_len,
+ req->flags,
+ &reply,
+ &__dbc_get_free);
+ return (&reply);
+}
+
+__dbc_put_reply *
+__db_dbc_put_1(req)
+ __dbc_put_msg *req;
+{
+ static __dbc_put_reply reply; /* must be static */
+ static int __dbc_put_free = 0; /* must be static */
+
+ if (__dbc_put_free)
+ xdr_free((xdrproc_t)xdr___dbc_put_reply, (void *)&reply);
+ __dbc_put_free = 0;
+
+ /* Reinitialize allocated fields */
+ reply.keydata.keydata_val = NULL;
+
+ __dbc_put_1_proc(req->dbccl_id,
+ req->keydlen,
+ req->keydoff,
+ req->keyflags,
+ req->keydata.keydata_val,
+ req->keydata.keydata_len,
+ req->datadlen,
+ req->datadoff,
+ req->dataflags,
+ req->datadata.datadata_val,
+ req->datadata.datadata_len,
+ req->flags,
+ &reply,
+ &__dbc_put_free);
+ return (&reply);
+}
+
diff --git a/bdb/rpc_server/rpc.src b/bdb/rpc_server/rpc.src
new file mode 100644
index 00000000000..5dd25205136
--- /dev/null
+++ b/bdb/rpc_server/rpc.src
@@ -0,0 +1,599 @@
+#
+# $Id: rpc.src,v 1.30 2000/12/20 21:53:05 ubell Exp $
+# Syntax:
+# BEGIN function_name RPC # {CODE | NOCLNTCODE | RETCODE | NOFUNC}
+# CODE: generate XDR and client code, return status
+# Used for functions that just return a status and nothing else.
+# RETCODE:generate XDR and client code, call return function
+# (generate template return function)
+# Used for functions that returns data.
+# NOCLNTCODE: generate only XDR and server functions
+# Used for functions that are "different" on the client.
+# Primarily used for envcreate (which is called from
+# the dbenv->set_server method on the client side) and
+# dbcreate, which is called from non-generated code.
+# NOFUNC: generate a client "unsupported function" with right args
+# Used for unsupported functions.
+#
+# ARG {IGNORE | STRING | INT | DBT | LIST | ID | CONST} C-type varname
+# IGNORE: not passed to server
+# STRING: string passed to server
+# DBT: DBT arg passed to server
+# LIST: opaque list passed to server (NULL-terminated opaque list)
+# INT: integer passed to server
+# ID: cl_id from arg passed to server
+# CONST: do not generate COMPQUIET (for NOFUNC only)
+# FUNCPROT prototype
+# FUNCARG functiontype
+# These two *MUST* go together and FUNCPROT *MUST* be first. These
+# are for the tricky user-supplied functions to some methods. They
+# are not supported in RPC, so will be ignored, but the complicated
+# syntax of their argument requires we have a special flag for them
+# that contains the verbatim text to use in the prototype and the
+# c-type, respectively. The FUNCARG must include the function, and
+# must call it 'funcN', where N is the count of functions. Almost
+# always it must be func0. A *very* few methods have more than one
+# user-supplied functions, in those cases, it must be func0, func1, etc.
+#
+# All messages automatically return "status" and return that from
+# the call to the function. RET's are additional things the server
+# may return. RET is like ARG but does not need the IGNORE option.
+# RET {STRING | INT | DBT | LIST | ID} varname [STRING | INT | ID]
+# STRING: string from server
+# DBT: DBT arg from server
+# LIST: opaque list from server (NULL-terminated opaque list)
+# Must have list type of STRING, ID or INT specified
+# INT: integer from server
+# ID: id from server stored in cl_id
+# END function end.
+
+#
+# Environment functions
+#
+BEGIN env_cachesize 1 CODE
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t gbytes
+ARG INT u_int32_t bytes
+ARG INT int ncache
+END
+BEGIN env_close 1 RETCODE
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t flags
+END
+BEGIN env_create 1 NOCLNTCODE
+ARG INT long timeout
+RET ID long env
+END
+BEGIN set_data_dir 1 NOFUNC
+ARG ID DB_ENV * dbenv
+ARG STRING const char * dir
+END
+BEGIN env_set_feedback 1 NOFUNC
+ARG ID DB_ENV * dbenv
+FUNCPROT void (*)(DB_ENV *, int, int)
+FUNCARG void (*func0) __P((DB_ENV *, int, int))
+END
+BEGIN env_flags 1 CODE
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t flags
+ARG INT int onoff
+END
+BEGIN set_lg_bsize 1 NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t bsize
+END
+BEGIN set_lg_dir 1 NOFUNC
+ARG ID DB_ENV * dbenv
+ARG STRING const char * dir
+END
+BEGIN set_lg_max 1 NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t max
+END
+BEGIN set_lk_conflict 1 NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int8_t * conflicts
+ARG INT int modes
+END
+BEGIN set_lk_detect 1 NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t detect
+END
+BEGIN set_lk_max 1 NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t max
+END
+BEGIN set_lk_max_locks 1 NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t max
+END
+BEGIN set_lk_max_lockers 1 NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t max
+END
+BEGIN set_lk_max_objects 1 NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t max
+END
+BEGIN set_mp_mmapsize 1 NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT size_t mmapsize
+END
+BEGIN set_mutex_locks 1 NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT int do_lock
+END
+BEGIN env_open 1 RETCODE
+ARG ID DB_ENV * dbenv
+ARG STRING const char * home
+ARG INT u_int32_t flags
+ARG INT int mode
+END
+BEGIN env_paniccall 1 NOFUNC
+ARG ID DB_ENV * dbenv
+FUNCPROT void (*)(DB_ENV *, int)
+FUNCARG void (*func0) __P((DB_ENV *, int))
+END
+BEGIN set_recovery_init 1 NOFUNC
+ARG ID DB_ENV * dbenv
+FUNCPROT int (*)(DB_ENV *)
+FUNCARG int (*func0) __P((DB_ENV *))
+END
+BEGIN env_remove 1 RETCODE
+ARG ID DB_ENV * dbenv
+ARG STRING const char * home
+ARG INT u_int32_t flags
+END
+BEGIN set_shm_key 1 NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT long shm_key
+END
+BEGIN set_tmp_dir 1 NOFUNC
+ARG ID DB_ENV * dbenv
+ARG STRING const char * dir
+END
+BEGIN set_tx_recover 1 NOFUNC
+ARG ID DB_ENV * dbenv
+FUNCPROT int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops)
+FUNCARG int (*func0) __P((DB_ENV *, DBT *, DB_LSN *, db_recops))
+END
+BEGIN set_tx_max 1 NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t max
+END
+BEGIN set_tx_timestamp 1 NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT time_t * max
+END
+BEGIN set_verbose 1 NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t which
+ARG INT int onoff
+END
+#
+# Transaction functions
+#
+BEGIN txn_abort 1 RETCODE
+ARG ID DB_TXN * txnp
+END
+BEGIN txn_begin 1 RETCODE
+ARG ID DB_ENV * envp
+ARG ID DB_TXN * parent
+ARG IGNORE DB_TXN ** txnpp
+ARG INT u_int32_t flags
+RET ID long txnid
+END
+BEGIN txn_checkpoint 1 NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t kbyte
+ARG INT u_int32_t min
+END
+BEGIN txn_commit 1 RETCODE
+ARG ID DB_TXN * txnp
+ARG INT u_int32_t flags
+END
+BEGIN txn_prepare 1 NOFUNC
+ARG ID DB_TXN * txnp
+END
+BEGIN txn_stat 1 NOFUNC
+ARG ID DB_ENV * dbenv
+ARG IGNORE DB_TXN_STAT ** statp
+FUNCPROT void *(*)(size_t)
+FUNCARG void *(*func0) __P((size_t))
+END
+
+#
+# Database functions
+#
+BEGIN db_bt_compare 1 NOFUNC
+ARG ID DB * dbp
+FUNCPROT int (*)(DB *, const DBT *, const DBT *)
+FUNCARG int (*func0) __P((DB *, const DBT *, const DBT *))
+END
+BEGIN db_bt_maxkey 1 CODE
+ARG ID DB * dbp
+ARG INT u_int32_t maxkey
+END
+BEGIN db_bt_minkey 1 CODE
+ARG ID DB * dbp
+ARG INT u_int32_t minkey
+END
+BEGIN db_bt_prefix 1 NOFUNC
+ARG ID DB * dbp
+FUNCPROT size_t(*)(DB *, const DBT *, const DBT *)
+FUNCARG size_t (*func0) __P((DB *, const DBT *, const DBT *))
+END
+BEGIN db_set_append_recno 1 NOFUNC
+ARG ID DB * dbp
+FUNCPROT int (*)(DB *, DBT *, db_recno_t)
+FUNCARG int (*func0) __P((DB *, DBT *, db_recno_t))
+END
+BEGIN db_cachesize 1 NOFUNC
+ARG ID DB * dbp
+ARG INT u_int32_t gbytes
+ARG INT u_int32_t bytes
+ARG INT int ncache
+END
+BEGIN db_close 1 RETCODE
+ARG ID DB * dbp
+ARG INT u_int32_t flags
+END
+BEGIN db_create 1 NOCLNTCODE
+ARG INT u_int32_t flags
+ARG ID DB_ENV * envp
+RET ID long dbp
+END
+BEGIN db_del 1 CODE
+ARG ID DB * dbp
+ARG ID DB_TXN * txnp
+ARG DBT DBT * key
+ARG INT u_int32_t flags
+END
+BEGIN db_extentsize 1 CODE
+ARG ID DB * dbp
+ARG INT u_int32_t extentsize
+END
+BEGIN db_fd 1 NOFUNC
+ARG ID DB * dbp
+ARG IGNORE int * fdp
+END
+BEGIN db_feedback 1 NOFUNC
+ARG ID DB * dbp
+FUNCPROT void (*)(DB *, int, int)
+FUNCARG void (*func0) __P((DB *, int, int))
+END
+BEGIN db_flags 1 CODE
+ARG ID DB * dbp
+ARG INT u_int32_t flags
+END
+BEGIN db_get 1 RETCODE
+ARG ID DB * dbp
+ARG ID DB_TXN * txnp
+ARG DBT DBT * key
+ARG DBT DBT * data
+ARG INT u_int32_t flags
+RET DBT DBT * key
+RET DBT DBT * data
+END
+BEGIN db_h_ffactor 1 CODE
+ARG ID DB * dbp
+ARG INT u_int32_t ffactor
+END
+BEGIN db_h_hash 1 NOFUNC
+ARG ID DB * dbp
+FUNCPROT u_int32_t(*)(DB *, const void *, u_int32_t)
+FUNCARG u_int32_t (*func0) __P((DB *, const void *, u_int32_t))
+END
+BEGIN db_h_nelem 1 CODE
+ARG ID DB * dbp
+ARG INT u_int32_t nelem
+END
+BEGIN db_key_range 1 RETCODE
+ARG ID DB * dbp
+ARG ID DB_TXN * txnp
+ARG DBT DBT * key
+ARG IGNORE DB_KEY_RANGE * range
+ARG INT u_int32_t flags
+RET DBL double less
+RET DBL double equal
+RET DBL double greater
+END
+BEGIN db_lorder 1 CODE
+ARG ID DB * dbp
+ARG INT int lorder
+END
+BEGIN db_malloc 1 NOFUNC
+ARG ID DB * dbp
+FUNCPROT void *(*)(size_t)
+FUNCARG void *(*func0) __P((size_t))
+END
+# XXX
+# The line:
+# RET INT u_int32_t dbflags
+# should go away when a get_flags method exists. It is
+# needed now because Tcl looks at dbp->flags.
+#
+BEGIN db_open 1 RETCODE
+ARG ID DB * dbp
+ARG STRING const char * name
+ARG STRING const char * subdb
+ARG INT DBTYPE type
+ARG INT u_int32_t flags
+ARG INT int mode
+RET INT DBTYPE type
+RET INT u_int32_t dbflags
+END
+BEGIN db_pagesize 1 CODE
+ARG ID DB * dbp
+ARG INT u_int32_t pagesize
+END
+BEGIN db_panic 1 NOFUNC
+ARG ID DB * dbp
+FUNCPROT void (*)(DB_ENV *, int)
+FUNCARG void (*func0) __P((DB_ENV *, int))
+END
+BEGIN db_put 1 RETCODE
+ARG ID DB * dbp
+ARG ID DB_TXN * txnp
+ARG DBT DBT * key
+ARG DBT DBT * data
+ARG INT u_int32_t flags
+RET DBT DBT * key
+END
+BEGIN db_realloc 1 NOFUNC
+ARG ID DB * dbp
+FUNCPROT void *(*)(void *, size_t)
+FUNCARG void *(*func0) __P((void *, size_t))
+END
+BEGIN db_re_delim 1 CODE
+ARG ID DB * dbp
+ARG INT int delim
+END
+BEGIN db_re_len 1 CODE
+ARG ID DB * dbp
+ARG INT u_int32_t len
+END
+BEGIN db_re_pad 1 CODE
+ARG ID DB * dbp
+ARG INT int pad
+END
+BEGIN db_re_source 1 NOFUNC
+ARG ID DB * dbp
+ARG STRING const char * re_source
+END
+BEGIN db_remove 1 RETCODE
+ARG ID DB * dbp
+ARG STRING const char * name
+ARG STRING const char * subdb
+ARG INT u_int32_t flags
+END
+BEGIN db_rename 1 RETCODE
+ARG ID DB * dbp
+ARG STRING const char * name
+ARG STRING const char * subdb
+ARG STRING const char * newname
+ARG INT u_int32_t flags
+END
+BEGIN db_stat 1 RETCODE
+ARG ID DB * dbp
+ARG IGNORE void * sp
+FUNCPROT void *(*)(size_t)
+FUNCARG void *(*func0) __P((size_t))
+ARG INT u_int32_t flags
+RET LIST u_int32_t * stats INT
+END
+BEGIN db_swapped 1 CODE
+ARG ID DB * dbp
+END
+BEGIN db_sync 1 CODE
+ARG ID DB * dbp
+ARG INT u_int32_t flags
+END
+BEGIN db_upgrade 1 NOFUNC
+ARG ID DB * dbp
+ARG STRING const char * fname
+ARG INT u_int32_t flags
+END
+#
+# Cursor functions
+#
+BEGIN db_cursor 1 RETCODE
+ARG ID DB * dbp
+ARG ID DB_TXN * txnp
+ARG IGNORE DBC ** dbcpp
+ARG INT u_int32_t flags
+RET ID long dbcid
+END
+BEGIN db_join 1 RETCODE
+ARG ID DB * dbp
+ARG LIST DBC ** curs ID
+ARG IGNORE DBC ** dbcp
+ARG INT u_int32_t flags
+RET ID long dbcid
+END
+BEGIN dbc_close 1 RETCODE
+ARG ID DBC * dbc
+END
+BEGIN dbc_count 1 RETCODE
+ARG ID DBC * dbc
+ARG IGNORE db_recno_t * countp
+ARG INT u_int32_t flags
+RET INT db_recno_t dupcount
+END
+BEGIN dbc_del 1 CODE
+ARG ID DBC * dbc
+ARG INT u_int32_t flags
+END
+BEGIN dbc_dup 1 RETCODE
+ARG ID DBC * dbc
+ARG IGNORE DBC ** dbcp
+ARG INT u_int32_t flags
+RET ID long dbcid
+END
+BEGIN dbc_get 1 RETCODE
+ARG ID DBC * dbc
+ARG DBT DBT * key
+ARG DBT DBT * data
+ARG INT u_int32_t flags
+RET DBT DBT * key
+RET DBT DBT * data
+END
+BEGIN dbc_put 1 RETCODE
+ARG ID DBC * dbc
+ARG DBT DBT * key
+ARG DBT DBT * data
+ARG INT u_int32_t flags
+RET DBT DBT * key
+END
+
+#
+# Unsupported environment subsystems
+#
+#
+# Locking subsystem
+#
+BEGIN lock_detect 1 NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t flags
+ARG INT u_int32_t atype
+ARG IGNORE int * aborted
+END
+BEGIN lock_get 1 NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t locker
+ARG INT u_int32_t flags
+ARG CONST const DBT * obj
+ARG INT db_lockmode_t mode
+ARG IGNORE DB_LOCK * lock
+END
+BEGIN lock_id 1 NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t * idp
+END
+BEGIN lock_put 1 NOFUNC
+ARG ID DB_ENV * dbenv
+ARG ID DB_LOCK * lock
+END
+BEGIN lock_stat 1 NOFUNC
+ARG ID DB_ENV * dbenv
+ARG IGNORE DB_LOCK_STAT ** statp
+FUNCPROT void *(*)(size_t)
+FUNCARG void *(*func0) __P((size_t))
+END
+BEGIN lock_vec 1 NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t locker
+ARG INT u_int32_t flags
+ARG IGNORE DB_LOCKREQ * list
+ARG INT int nlist
+ARG IGNORE DB_LOCKREQ ** elistp
+END
+#
+# Logging subsystem
+#
+BEGIN log_archive 1 NOFUNC
+ARG ID DB_ENV * dbenv
+ARG IGNORE char *** listp
+ARG INT u_int32_t flags
+FUNCPROT void *(*)(size_t)
+FUNCARG void *(*func0) __P((size_t))
+END
+#
+# Don't do log_compare. It doesn't have an env we can get at,
+# and it doesn't manipulate DB internal information.
+#
+BEGIN log_file 1 NOFUNC
+ARG ID DB_ENV * dbenv
+ARG CONST const DB_LSN * lsn
+ARG STRING char * namep
+ARG INT size_t len
+END
+BEGIN log_flush 1 NOFUNC
+ARG ID DB_ENV * dbenv
+ARG CONST const DB_LSN * lsn
+END
+BEGIN log_get 1 NOFUNC
+ARG ID DB_ENV * dbenv
+ARG IGNORE DB_LSN * lsn
+ARG DBT DBT * data
+ARG INT u_int32_t flags
+END
+BEGIN log_put 1 NOFUNC
+ARG ID DB_ENV * dbenv
+ARG IGNORE DB_LSN * lsn
+ARG CONST const DBT * data
+ARG INT u_int32_t flags
+END
+BEGIN log_register 1 NOFUNC
+ARG ID DB_ENV * dbenv
+ARG ID DB * dbp
+ARG CONST const char * namep
+END
+BEGIN log_stat 1 NOFUNC
+ARG ID DB_ENV * dbenv
+ARG IGNORE DB_LOG_STAT ** statp
+FUNCPROT void *(*)(size_t)
+FUNCARG void *(*func0) __P((size_t))
+END
+BEGIN log_unregister 1 NOFUNC
+ARG ID DB_ENV * dbenv
+ARG ID DB * dbp
+END
+#
+# Mpool Subsystem
+#
+BEGIN memp_fclose 1 NOFUNC
+ARG ID DB_MPOOLFILE * mpf
+END
+BEGIN memp_fget 1 NOFUNC
+ARG ID DB_MPOOLFILE * mpf
+ARG IGNORE db_pgno_t * pgno
+ARG INT u_int32_t flags
+ARG IGNORE void ** pagep
+END
+BEGIN memp_fopen 1 NOFUNC
+ARG ID DB_ENV * dbenv
+ARG CONST const char * file
+ARG INT u_int32_t flags
+ARG INT int mode
+ARG INT size_t pagesize
+ARG IGNORE DB_MPOOL_FINFO * finfop
+ARG IGNORE DB_MPOOLFILE ** mpf
+END
+BEGIN memp_fput 1 NOFUNC
+ARG ID DB_MPOOLFILE * mpf
+ARG IGNORE void * pgaddr
+ARG INT u_int32_t flags
+END
+BEGIN memp_fset 1 NOFUNC
+ARG ID DB_MPOOLFILE * mpf
+ARG IGNORE void * pgaddr
+ARG INT u_int32_t flags
+END
+BEGIN memp_fsync 1 NOFUNC
+ARG ID DB_MPOOLFILE * mpf
+END
+BEGIN memp_register 1 NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT int ftype
+FUNCPROT int (*)(DB_ENV *, db_pgno_t, void *, DBT *)
+FUNCARG int (*func0) __P((DB_ENV *, db_pgno_t, void *, DBT *))
+FUNCPROT int (*)(DB_ENV *, db_pgno_t, void *, DBT *)
+FUNCARG int (*func1) __P((DB_ENV *, db_pgno_t, void *, DBT *))
+END
+BEGIN memp_stat 1 NOFUNC
+ARG ID DB_ENV * dbenv
+ARG IGNORE DB_MPOOL_STAT ** gstatp
+ARG IGNORE DB_MPOOL_FSTAT *** fstatp
+FUNCPROT void *(*)(size_t)
+FUNCARG void *(*func0) __P((size_t))
+END
+BEGIN memp_sync 1 NOFUNC
+ARG ID DB_ENV * dbenv
+ARG IGNORE DB_LSN * lsn
+END
+BEGIN memp_trickle 1 NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT int pct
+ARG IGNORE int * nwrotep
+END
diff --git a/bdb/tcl/docs/db.html b/bdb/tcl/docs/db.html
new file mode 100644
index 00000000000..c75ab6ecf4f
--- /dev/null
+++ b/bdb/tcl/docs/db.html
@@ -0,0 +1,266 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<HTML>
+<HEAD>
+ <META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
+ <META NAME="GENERATOR" CONTENT="Mozilla/4.08 [en] (X11; I; FreeBSD 3.3-RELEASE i386) [Netscape]">
+</HEAD>
+<BODY>
+
+<H2>
+<A NAME="Database Commands"></A>Database Commands</H2>
+The database commands provide a conduit into the DB method functions.&nbsp;
+They are all fairly straightforward and I describe them in terms of their
+DB functions briefly here, with a link to the DB page where appropriate.&nbsp;
+The first set of commands are those I believe will be the primary functions
+used by most databases.&nbsp; Some are directly related to their DB counterparts,
+and some are higher level functions that are useful to provide the user.
+<P><B>> berkdb open [-env <I>env</I>]</B>
+<BR><B>&nbsp;&nbsp;&nbsp; [-btree|-hash|-recno|-queue|-unknown]</B>
+<BR><B>&nbsp;&nbsp;&nbsp; [-create] [-excl] [-nommap] [-rdonly] [-truncate]
+[-mode
+<I>mode</I>] [-errfile <I>filename</I>]</B>
+<BR><B>&nbsp;&nbsp;&nbsp; [-dup] [-dupsort] [-recnum] [-renumber] [-revsplitoff]
+[-snapshot]</B>
+<BR><B>&nbsp;&nbsp;&nbsp; [-extent <I>size</I>]</B>
+<BR><B>&nbsp;&nbsp;&nbsp; [-ffactor <I>density</I>]</B>
+<BR><B>&nbsp;&nbsp;&nbsp; [-nelem <I>size</I>]</B>
+<BR><B>&nbsp;&nbsp;&nbsp; [-lorder <I>order</I>]</B>
+<BR><B>&nbsp;&nbsp;&nbsp; [-delim <I>delim</I>]</B>
+<BR><B>&nbsp;&nbsp;&nbsp; [-len <I>len</I>]</B>
+<BR><B>&nbsp;&nbsp;&nbsp; [-pad <I>pad</I>]</B>
+<BR><B>&nbsp;&nbsp;&nbsp; [-source <I>file</I>]</B>
+<BR><B>&nbsp;&nbsp;&nbsp; [-minkey <I>minkey</I>]</B>
+<BR><B>&nbsp;&nbsp;&nbsp; [-cachesize {<I>gbytes bytes ncaches</I>}]</B>
+<BR><B>&nbsp;&nbsp;&nbsp; [-pagesize <I>pagesize</I>]</B>
+<BR><B>&nbsp;&nbsp;&nbsp; [--]</B>
+<BR><B>&nbsp;&nbsp;&nbsp; [<I>filename </I>[<I>subdbname</I>]]</B>
+<P>This command will invoke the <A HREF="../../docs/api_c/db_create.html">db_create</A>
+function.&nbsp; If the command is given the <B>-env</B> option, then we
+will accordingly creating the database within the context of that environment.&nbsp;
+After it successfully gets a handle to a database, we bind it to a new
+Tcl command of the form <B><I>dbX, </I></B>where X is an integer starting
+at 0 (e.g. <B>db0, db1, </B>etc).&nbsp; We use the <I>Tcl_CreateObjCommand()&nbsp;</I>
+to create the top level database function.&nbsp; It is through this handle
+that the user can access all of the commands described in the <A HREF="#Database Commands">Database
+Commands</A> section.&nbsp; Internally, the database handle is sent as
+the <I>ClientData</I> portion of the new command set so that all future
+database calls access the appropriate handle.
+<P>After parsing all of the optional arguments affecting the setup of the
+database and making the appropriate calls to DB to manipulate those values,
+we open the database for the user. It&nbsp; translates to the
+<A HREF="../../docs/api_c/db_open.html">DB->open</A>
+method call after parsing all of the various optional arguments.&nbsp;
+We automatically set the DB_THREAD flag.&nbsp; The arguments are:
+<UL>
+<LI>
+<B>-- </B>- Terminate the list of options and use remaining arguments as
+the file or subdb names (thus allowing the use of filenames beginning with
+a dash '-')</LI>
+
+<LI>
+<B>-btree</B> - DB_BTREE database</LI>
+
+<LI>
+<B>-hash</B> -&nbsp; DB_HASH database</LI>
+
+<LI>
+<B>-recno&nbsp;</B> - DB_RECNO database</LI>
+
+<LI>
+<B>-queue</B> - DB_QUEUE database</LI>
+
+<LI>
+<B>-create</B> selects the DB_CREATE flag&nbsp; to create underlying files</LI>
+
+<LI>
+<B>-excl</B> selects the DB_EXCL flag&nbsp; to exclusively create underlying
+files</LI>
+
+<LI>
+<B>-nommap</B> selects the DB_NOMMAP flag to forbid mmaping of files</LI>
+
+<LI>
+<B>-rdonly</B> selects the DB_RDONLY flag for opening in read-only mode</LI>
+
+<LI>
+<B>-truncate</B> selects the DB_TRUNCATE flag to truncate the database</LI>
+
+<LI>
+<B>-mode<I> mode</I></B> specifies the mode for created files</LI>
+
+<LI>
+<B>-errfile </B>specifies the error file to use for this environment to
+<B><I>filename</I></B>
+by calling <A HREF="../../docs/api_c/db_set_errfile.html">DB->set_errfile</A><B><I>.
+</I></B>If
+the file already exists then we will append to the end of the file</LI>
+
+<LI>
+<B>-dup </B>selects the DB_DUP flag to permit duplicates in the database</LI>
+
+<LI>
+<B>-dupsort</B> selects the DB_DUPSORT flag to support sorted duplicates</LI>
+
+<LI>
+<B>-recnum</B> selects the DB_RECNUM flag to support record numbers in
+btrees</LI>
+
+<LI>
+<B>-renumber </B>selects the DB_RENUMBER flag to support mutable record
+numbers</LI>
+
+<LI>
+<B>-revsplitoff </B>selects the DB_REVSPLITOFF flag to suppress reverse
+splitting of pages on deletion</LI>
+
+<LI>
+<B>-snapshot </B>selects the DB_SNAPSHOT flag to support database snapshots</LI>
+
+<LI>
+<B>-extent </B>sets the size of a Queue database extent to the given <B><I>size
+</I></B>using
+the <A HREF="../../docs/api_c/db_set_q_extentsize.html">DB->set_q_extentsize</A>
+method</LI>
+
+<LI>
+<B>-ffactor</B> sets the hash table key density to the given <B><I>density
+</I></B>using
+the <A HREF="../../docs/api_c/db_set_h_ffactor.html">DB->set_h_ffactor</A>
+method</LI>
+
+<LI>
+<B>-nelem </B>sets the hash table size estimate to the given <B><I>size
+</I></B>using
+the <A HREF="../../docs/api_c/db_set_h_nelem.html">DB->set_h_nelem</A>
+method</LI>
+
+<LI>
+<B>-lorder </B>sets the byte order for integers stored in the database
+meta-data to the given <B><I>order</I></B> using the <A HREF="../../docs/api_c/db_set_lorder.html">DB->set_lorder</A>
+method</LI>
+
+<LI>
+<B>-delim </B>sets the delimiting byte for variable length records to
+<B><I>delim</I></B>
+using the <A HREF="../../docs/api_c/db_set_re_delim.html">DB->set_re_delim</A>
+method</LI>
+
+<LI>
+<B>-len </B>sets the length of fixed-length records to <B><I>len</I></B>
+using the <A HREF="../../docs/api_c/db_set_re_len.html">DB->set_re_len</A>
+method</LI>
+
+<LI>
+<B>-pad </B>sets the pad character used for fixed length records to
+<B><I>pad</I></B>&nbsp;
+using the <A HREF="../../docs/db_set_re_pad.html">DB->set_re_pad</A> method</LI>
+
+<LI>
+<B>-source </B>sets the backing source file name to <B><I>file</I></B>
+using the <A HREF="../../docs/api_c/db_set_re_source.html">DB->set_re_source</A>
+method</LI>
+
+<LI>
+<B>-minkey </B>sets the minimum number of keys per Btree page to <B><I>minkey</I></B>
+using the <A HREF="../../docs/api_c/db_set_bt_minkey.html">DB->set_bt_minkey</A>
+method</LI>
+
+<LI>
+<B>-cachesize </B>sets the size of the database cache to the size&nbsp;
+specified by <B><I>gbytes </I></B>and <B><I>bytes, </I></B>broken up into
+<B><I>ncaches</I></B>
+number of caches using the <A HREF="../../docs/api_c/db_set_cachesize.html">DB->set_cachesize</A>
+method</LI>
+
+<LI>
+<B>-pagesize </B>sets the size of the database page to <B><I>pagesize </I></B>using
+the <A HREF="../../docs/api_c/db_set_pagesize.html">DB->set_pagesize</A>
+method</LI>
+
+<LI>
+<B><I>filename</I></B> indicates the name of the database</LI>
+
+<LI>
+<B><I>subdbname</I></B> indicate the name of the sub-database</LI>
+</UL>
+
+<HR WIDTH="100%">
+<BR><B>&nbsp;berkdb upgrade [-dupsort] [-env <I>env</I>] [--] [<I>filename</I>]</B>
+<P>This command will invoke the <A HREF="../../docs/api_c/db_upgrade.html">DB->upgrade</A>
+function.&nbsp; If the command is given the <B>-env</B> option, then we
+will accordingly upgrade the database filename within the context of that
+environment. The <B>-dupsort</B> option selects the DB_DUPSORT flag for
+upgrading. The use of --<B> </B>terminates the list of options, thus allowing
+filenames beginning with a dash.
+<P>
+<HR WIDTH="100%"><B>> berkdb verify [-env <I>env</I>] [--] [<I>filename</I>]</B>
+<P>This command will invoke the <A HREF="../../docs/api_c/db_verify.html">DB->verify</A>
+function.&nbsp; If the command is given the <B>-env</B> option, then we
+will accordingly verify the database filename within the context of that
+environment.&nbsp; The use of --<B> </B>terminates the list of options,
+thus allowing filenames beginning with a dash.
+<P>
+<HR WIDTH="100%"><B>> <I>db</I> join [-nosort] <I>db0.c0 db1.c0</I> ...</B>
+<P>This command will invoke the <A HREF="../../docs/api_c/db_join.html">db_join</A>
+function.&nbsp; After it successfully joins a database, we bind it to a
+new Tcl command of the form <B><I>dbN.cX, </I></B>where X is an integer
+starting at 0 (e.g. <B>db2.c0, db3.c0, </B>etc).&nbsp; We use the <I>Tcl_CreateObjCommand()&nbsp;</I>
+to create the top level database function.&nbsp; It is through this cursor
+handle that the user can access the joined data items.
+<P>The options are:
+<UL>
+<LI>
+<B>-nosort -</B> This flag causes DB not to sort the cursors based on the
+number of data items they reference.&nbsp; It results in the DB_JOIN_NOSORT
+flag being set.</LI>
+</UL>
+
+<HR WIDTH="100%"><B>> <I>db</I> get_join [-nosort] {db key} {db key} ...</B>
+<P>This command performs a join operation on the keys specified and returns
+a list of the joined {key data} pairs.
+<P>The options are:
+<UL>
+<LI>
+<B>-nosort</B> This flag causes DB not to sort the cursors based on the
+number of data items they reference.&nbsp; It results in the DB_JOIN_NOSORT
+flag being set.</LI>
+</UL>
+
+<HR WIDTH="100%"><B>> <I>db</I> keyrange [-txn <I>id</I>] key</B>
+<P>This command returns the range for the given <B>key</B>.&nbsp; It returns
+a list of 3 double elements of the form {<B><I>less equal greater</I></B>}
+where <B><I>less</I></B> is the percentage of keys less than the given
+key, <B><I>equal</I></B> is the percentage equal to the given key and <B><I>greater</I></B>
+is the percentage greater than the given key.&nbsp; If the -txn option
+is specified it performs this operation under transaction protection.
+<BR>
+<HR WIDTH="100%"><B>> <I>db</I> put</B>
+<P>The <B>undocumented</B> options are:
+<UL>
+<LI>
+<B>-nodupdata</B> This flag causes DB not to insert the key/data pair if
+it already exists, that is, both the key and data items are already in
+the database. The -nodupdata flag may only be specified if the underlying
+database has been configured to support sorted duplicates.</LI>
+</UL>
+
+<HR WIDTH="100%"><B>> <I>db</I> stat</B>
+<P>The <B>undocumented</B> options are:
+<UL>
+<LI>
+<B>-cachedcounts</B> This flag causes DB to return the cached key/record
+counts, similar to the DB_CACHED_COUNTS flags to DB->stat.</LI>
+</UL>
+
+<HR WIDTH="100%"><B>> <I>dbc</I> put</B>
+<P>The <B>undocumented</B> options are:
+<UL>
+<LI>
+<B>-nodupdata</B> This flag causes DB not to insert the key/data pair if
+it already exists, that is, both the key and data items are already in
+the database. The -nodupdata flag may only be specified if the underlying
+database has been configured to support sorted duplicates.</LI>
+</UL>
+
+</BODY>
+</HTML>
diff --git a/bdb/tcl/docs/env.html b/bdb/tcl/docs/env.html
new file mode 100644
index 00000000000..a1bd08fd163
--- /dev/null
+++ b/bdb/tcl/docs/env.html
@@ -0,0 +1,303 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<HTML>
+<HEAD>
+ <META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
+ <META NAME="GENERATOR" CONTENT="Mozilla/4.08 [en] (X11; I; FreeBSD 3.3-RELEASE i386) [Netscape]">
+</HEAD>
+<BODY>
+
+<H2>
+Environment Commands</H2>
+Environments provide a structure for creating a consistent environment
+for processes using one or more of the features of Berkeley DB.&nbsp; Unlike
+some of the database commands, the environment commands are very low level.
+<BR>
+<HR WIDTH="100%">
+<P>The user may create and open a new DB environment&nbsp; by invoking:
+<P><B>> berkdb env</B>
+<BR><B>&nbsp;&nbsp;&nbsp; [-cdb] [-cdb_alldb] [-lock] [-log] [-txn [nosync]]</B>
+<BR><B>&nbsp;&nbsp;&nbsp; [-create] [-home<I> directory</I>] [-mode <I>mode</I>]</B>
+<BR><B>&nbsp;&nbsp;&nbsp; [-data_dir <I>directory</I>] [-log_dir <I>directory</I>]
+[-tmp_dir <I>directory</I>]</B>
+<BR><B>&nbsp;&nbsp;&nbsp; [-nommap] [-private] [-recover] [-recover_fatal]
+[-system_mem] [-errfile <I>filename</I>]</B>
+<BR><B>&nbsp;&nbsp;&nbsp; [-use_environ] [-use_environ_root] [-verbose
+{<I>which </I>on|off}]</B>
+<BR><B>&nbsp;&nbsp;&nbsp; [-region_init]</B>
+<BR><B>&nbsp;&nbsp;&nbsp; [-cachesize {<I>gbytes bytes ncaches</I>}]</B>
+<BR><B>&nbsp;&nbsp;&nbsp; [-mmapsize<I> size</I>]</B>
+<BR><B>&nbsp;&nbsp;&nbsp; [-log_max <I>max</I>]</B>
+<BR><B>&nbsp;&nbsp;&nbsp; [-log_buffer <I>size</I>]</B>
+<BR><B>&nbsp;&nbsp;&nbsp; [-lock_conflict {<I>nmodes </I>{<I>matrix</I>}}]</B>
+<BR><B>&nbsp;&nbsp;&nbsp; [-lock_detect default|oldest|random|youngest]</B>
+<BR><B>&nbsp;&nbsp;&nbsp; [-lock_max <I>max</I>]</B>
+<BR><B>&nbsp;&nbsp;&nbsp; [-lock_max_locks <I>max</I>]</B>
+<BR><B>&nbsp;&nbsp;&nbsp; [-lock_max_lockers <I>max</I>]</B>
+<BR><B>&nbsp;&nbsp;&nbsp; [-lock_max_objects <I>max</I>]</B>
+<BR><B>&nbsp;&nbsp;&nbsp; [-txn_max <I>max</I>]</B>
+<BR><B>&nbsp;&nbsp;&nbsp; [-client_timeout <I>seconds</I>]</B>
+<BR><B>&nbsp;&nbsp;&nbsp; [-server_timeout <I>seconds</I>]</B>
+<BR><B>&nbsp;&nbsp;&nbsp; [-server <I>hostname</I>]</B>
+<BR>&nbsp;
+<P>This command opens up an environment.&nbsp;&nbsp; We automatically set
+the DB_THREAD and the DB_INIT_MPOOL flags.&nbsp; The arguments are:
+<UL>
+<LI>
+<B>-cdb</B> selects the DB_INIT_CDB flag for Concurrent Data Store</LI>
+
+<LI>
+<B>-cdb_alldb</B> selects the DB_CDB_ALLDB flag for Concurrent Data Store</LI>
+
+<LI>
+<B>-lock</B> selects the DB_INIT_LOCK flag for the locking subsystem</LI>
+
+<LI>
+<B>-log</B> selects the DB_INIT_LOG flag for the logging subsystem</LI>
+
+<LI>
+<B>-txn</B> selects the DB_INIT_TXN, DB_INIT_LOCK and DB_INIT_LOG flags
+for the transaction subsystem.&nbsp; If <B>nosync</B> is specified, then
+it will also select DB_TXN_NOSYNC to indicate no flushes of log on commits</LI>
+
+<LI>
+<B>-create </B>selects the DB_CREATE flag to create underlying files</LI>
+
+<LI>
+<B>-home <I>directory </I></B>selects the home directory of the environment</LI>
+
+<LI>
+<B>-data_dir <I>directory </I></B>selects the data file directory of the
+environment by calling <A HREF="../../docs/api_c/env_set_data_dir.html">DBENV->set_data_dir</A>.</LI>
+
+<LI>
+<B>-log_dir <I>directory </I></B>selects the log file directory of the
+environment&nbsp; by calling <A HREF="../../docs/api_c/env_set_lg_dir.html">DBENV->set_lg_dir</A>.</LI>
+
+<LI>
+<B>-tmp_dir <I>directory </I></B>selects the temporary file directory of
+the environment&nbsp; by calling <A HREF="../../docs/api_c/env_set_tmp_dir.so">DBENV->set_tmp_dir</A>.</LI>
+
+<LI>
+<B>-mode <I>mode </I></B>sets the permissions of created files to <B><I>mode</I></B></LI>
+
+<LI>
+<B>-nommap</B> selects the DB_NOMMAP flag to disallow using mmap'ed files</LI>
+
+<LI>
+<B>-private</B> selects the DB_PRIVATE flag for a private environment</LI>
+
+<LI>
+<B>-recover</B> selects the DB_RECOVER flag for recovery</LI>
+
+<LI>
+<B>-recover_fatal</B> selects the DB_RECOVER_FATAL flag for catastrophic
+recovery</LI>
+
+<LI>
+<B>-system_mem</B> selects the DB_SYSTEM_MEM flag to use system memory</LI>
+
+<LI>
+<B>-errfile </B>specifies the error file to use for this environment to
+<B><I>filename</I></B>
+by calling <A HREF="../../docs/api_c/env_set_errfile.html">DBENV->set_errfile</A><B><I>.
+</I></B>If
+the file already exists then we will append to the end of the file</LI>
+
+<LI>
+<B>-use_environ</B> selects the DB_USE_ENVIRON flag to affect file naming</LI>
+
+<LI>
+<B>-use_environ_root</B> selects the DB_USE_ENVIRON_ROOT flag to have the
+root environment affect file naming</LI>
+
+<LI>
+<B>-verbose</B> produces verbose error output for the given which subsystem,
+using the <A HREF="../../docs/api_c/dbenv_set_verbose.html">DBENV->set_verbose</A>
+method.&nbsp;&nbsp; See the description of <A HREF="#> <env> verbose which on|off">verbose</A>
+below for valid <B><I>which </I></B>values</LI>
+
+<LI>
+<B>-region_init </B>specifies that the user wants to page fault the region
+in on startup using the <A HREF="../../docs/api_c/env_set_region_init.html">DBENV->set_region_init</A>
+method call</LI>
+
+<LI>
+<B>-cachesize </B>sets the size of the database cache to the size&nbsp;
+specified by <B><I>gbytes </I></B>and <B><I>bytes, </I></B>broken up into
+<B><I>ncaches</I></B>
+number of caches using the <A HREF="../../docs/api_c/env_set_cachesize.html">DBENV->set_cachesize</A>
+method</LI>
+
+<LI>
+<B>-mmapsize </B>sets the size of the database page to <B><I>size </I></B>using
+the <A HREF="../../docs/api_c/env_set_mp_mmapsize.html">DBENV->set_mp_mmapsize</A>
+method</LI>
+
+<LI>
+<B>-log_max </B>sets the maximum size of the log file to <B><I>max</I></B>
+using the <A HREF="../../docs/api_c/env_set_lg_max.html">DBENV->set_lg_max</A>
+call</LI>
+
+<LI>
+<B>-log_buffer </B>sets the size of the log file in bytes to <B><I>size</I></B>
+using the <A HREF="../../docs/api_c/env_set_lg_bsize.html">DBENV->set_lg_bsize</A>
+call</LI>
+
+<LI>
+<B>-lock_conflict </B>sets the number of lock modes to <B><I>nmodes</I></B>
+and sets the locking policy for those modes to the <B><I>conflict_matrix</I></B>
+given using the <A HREF="../../docs/api_c/env_set_lk_conflict.html">DBENV->set_lk_conflict</A>
+method call</LI>
+
+<LI>
+<B>-lock_detect </B>sets the deadlock detection policy to the given policy
+using the <A HREF="../../docs/env_set_lk_detect.html">DBENV->set_lk_detect</A>
+method call.&nbsp; The policy choices are:</LI>
+
+<UL>
+<LI>
+<B>default</B> selects the DB_LOCK_DEFAULT policy for default detection</LI>
+
+<LI>
+<B>oldest </B>selects DB_LOCK_OLDEST to abort the oldest locker on a deadlock</LI>
+
+<LI>
+<B>random</B> selects DB_LOCK_RANDOM to abort a random locker on a deadlock</LI>
+
+<LI>
+<B>youngest</B> selects DB_LOCK_YOUNGEST to abort the youngest locker on
+a deadlock</LI>
+</UL>
+
+<LI>
+<B>-lock_max </B>sets the maximum size of the lock table to <B><I>max </I></B>using
+the <A HREF="../../docs/api_c/env_set_lk_max.html">DBENV->set_lk_max</A>
+method call</LI>
+
+<LI>
+<B>-lock_max_locks </B>sets the maximum number of locks to <B><I>max </I></B>using
+the <A HREF="../../docs/api_c/env_set_lk_max_locks.html">DBENV->set_lk_max_locks</A>
+method call</LI>
+
+<LI>
+<B>-lock_max_lockers </B>sets the maximum number of locking entities to
+<B><I>max </I></B>using the <A HREF="../../docs/api_c/env_set_lk_max_lockers.html">DBENV->set_lk_max_lockers</A>
+method call</LI>
+
+<LI>
+<B>-lock_max_objects </B>sets the maximum number of simultaneously locked
+objects to <B><I>max </I></B>using the <A HREF="../../docs/api_c/env_set_lk_max_objects.html">DBENV->set_lk_max_objects</A>
+method call</LI>
+
+<LI>
+<B>-txn_max </B>sets the maximum size of the transaction table to <B><I>max</I></B>
+using the <A HREF="../../docs/api_c/env_set_txn_max.html">DBENV->set_txn_max</A>
+method call</LI>
+
+<LI>
+<B>-client_timeout</B> sets the timeout value for the client waiting for
+a reply from the server for RPC operations to <B><I>seconds</I></B>.</LI>
+
+<LI>
+<B>-server_timeout</B> sets the timeout value for the server to determine
+an idle client is gone to <B><I>seconds</I></B>.</LI>
+
+<LI>
+<B>&nbsp;-server </B>specifies the <B><I>hostname</I></B> of the server
+to connect to in the <A HREF="../../docs/api_c/env_set_server.html">DBENV->set_server</A>
+call.</LI>
+</UL>
+This command will invoke the <A HREF="../../docs/api_c/env_create.html">db_env_create</A>
+function.&nbsp; After it successfully gets a handle to an environment,
+we bind it to a new Tcl command of the form <B><I>envX</I></B>, where X
+is an integer starting at&nbsp; 0 (e.g. <B>env0, env1, </B>etc).&nbsp;
+We use the <I>Tcl_CreateObjCommand()</I> to create the top level environment
+command function.&nbsp; It is through this handle that the user can access
+all the commands described in the <A HREF="#Environment Commands">Environment
+Commands</A> section.&nbsp; Internally, the handle we get back from DB
+will be stored as the <I>ClientData</I> portion of the new command set
+so that all future environment calls will have that handle readily available.&nbsp;
+Then we call the <A HREF="../../docs/api_c/env_open.html">DBENV->open</A>
+method call and possibly some number of setup calls as described above.
+<P>
+<HR WIDTH="100%">
+<BR><A NAME="> <env> verbose which on|off"></A><B>> &lt;env> verbose <I>which</I>
+on|off</B>
+<P>This command controls the use of debugging output for the environment.&nbsp;
+This command directly translates to a call to the <A HREF="../../docs/api_c/dbenv_set_verbose.html">DBENV->set_verbose</A>
+method call.&nbsp; It returns either a 0 (for success), a DB error message
+or it throws a Tcl error with a system message.&nbsp; The user specifies
+<B><I>which</I></B>
+subsystem to control, and indicates whether debug messages should be turned
+<B>on</B>
+or <B>off</B> for that subsystem.&nbsp; The value of <B><I>which</I></B>
+must be one of the following:
+<UL>
+<LI>
+<B>chkpt</B> - Chooses the checkpointing code by using the DB_VERB_CHKPOINT
+value</LI>
+
+<LI>
+<B>deadlock </B>- Chooses the deadlocking code by using the DB_VERB_DEADLOCK
+value</LI>
+
+<LI>
+<B>recovery </B>- Chooses the recovery code by using the DB_VERB_RECOVERY
+value</LI>
+
+<LI>
+<B>wait </B>- Chooses the waitsfor code by using the DB_VERB_WAITSFOR value</LI>
+</UL>
+
+<HR WIDTH="100%">
+<P><A NAME="> <env> close"></A><B>> &lt;env> close</B>
+<P>This command closes an environment and deletes the handle.&nbsp; This
+command directly translates to a call to the <A HREF="../../docs/api_c/env_close.html">DBENV->close</A>
+method call.&nbsp; It returns either a 0 (for success), a DB error message
+or it throws a Tcl error with a system message.
+<P>Additionally, since the handle is no longer valid, we will call <I>Tcl_DeleteCommand()
+</I>so
+that further uses of the handle will be dealt with properly by Tcl itself.
+<P>Also, the close command will automatically abort any <A HREF="txn.html">transactions</A>
+and close any <A HREF="mpool.html">mpool</A> memory files.&nbsp; As such
+we must maintain a list of open transaction and mpool handles so that we
+can call <I>Tcl_DeleteCommand</I> on those as well.
+<P>
+<HR WIDTH="100%">
+<BR><B>> berkdb envremove [-data_dir <I>directory</I>] [-force] [-home
+<I>directory</I>]
+-log_dir <I>directory</I>] [-tmp_dir <I>directory</I>] [-use_environ] [-use_environ_root]</B>
+<P>This command removes the environment if it is not in use and deletes
+the handle.&nbsp; This command directly translates to a call to the <A HREF="../../docs/api_c/env_remove.html">DBENV->remove</A>
+method call.&nbsp; It returns either a 0 (for success), a DB error message
+or it throws a Tcl error with a system message.&nbsp; The arguments are:
+<UL>
+<LI>
+<B>-force</B> selects the DB_FORCE flag to remove even if other processes
+have the environment open</LI>
+
+<LI>
+<B>-home <I>directory</I> </B>specifies the home directory of the environment</LI>
+
+<LI>
+<B>-data_dir <I>directory </I></B>selects the data file directory of the
+environment by calling <A HREF="../../docs/api_c/env_set_data_dir.html">DBENV->set_data_dir</A>.</LI>
+
+<LI>
+<B>-log_dir <I>directory </I></B>selects the log file directory of the
+environment&nbsp; by calling <A HREF="../../docs/api_c/env_set_lg_dir.html">DBENV->set_lg_dir</A>.</LI>
+
+<LI>
+<B>-tmp_dir <I>directory </I></B>selects the temporary file directory of
+the environment&nbsp; by calling <A HREF="../../docs/api_c/env_set_tmp_dir.so">DBENV->set_tmp_dir</A>.</LI>
+
+<LI>
+<B>-use_environ </B>selects the DB_USE_ENVIRON flag to affect file naming</LI>
+
+<LI>
+<B>-use_environ_root</B> selects the DB_USE_ENVIRON_ROOT flag to affect
+file naming</LI>
+</UL>
+
+</BODY>
+</HTML>
diff --git a/bdb/tcl/docs/historic.html b/bdb/tcl/docs/historic.html
new file mode 100644
index 00000000000..216dc456b72
--- /dev/null
+++ b/bdb/tcl/docs/historic.html
@@ -0,0 +1,168 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<HTML>
+<HEAD>
+ <META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
+ <META NAME="GENERATOR" CONTENT="Mozilla/4.08 [en] (X11; I; FreeBSD 2.2.8-19990120-SNAP i386) [Netscape]">
+</HEAD>
+<BODY>
+
+<H2>
+<A NAME="Compatibility Commands"></A>Compatibility Commands</H2>
+The compatibility commands for old Dbm and Ndbm are described in the <A HREF="../../docs/api_c/dbm.html">dbm</A>
+manpage.
+<P><B>> berkdb dbminit <I>filename</I></B>
+<P>This command will invoke the dbminit function.&nbsp;&nbsp; <B><I>Filename</I></B>
+is used as the name of the database.
+<P>
+<HR WIDTH="100%"><B>> berkdb dbmclose</B>
+<P>This command will invoke the dbmclose function.
+<P>
+<HR WIDTH="100%"><B>> berkdb fetch <I>key</I></B>
+<P>This command will invoke the fetch function.&nbsp;&nbsp; It will return
+the data associated with the given <B><I>key </I></B>or a Tcl error.
+<P>
+<HR WIDTH="100%"><B>> berkdb store <I>key data</I></B>
+<P>This command will invoke the store function.&nbsp;&nbsp; It will store
+the <B><I>key/data</I></B> pair.&nbsp; It will return a 0 on success or
+throw a Tcl error.
+<P>
+<HR WIDTH="100%"><B>> berkdb delete <I>key</I></B>
+<P>This command will invoke the deletet function.&nbsp;&nbsp; It will delete
+the <B><I>key</I></B> from the database.&nbsp; It will return a 0 on success
+or throw a Tcl error.
+<P>
+<HR WIDTH="100%"><B>> berkdb firstkey</B>
+<P>This command will invoke the firstkey function.&nbsp;&nbsp; It will
+return the first key in the database or a Tcl error.
+<P>
+<HR WIDTH="100%"><B>> berkdb nextkey <I>key</I></B>
+<P>This command will invoke the nextkey function.&nbsp;&nbsp; It will return
+the next key after the given <B><I>key</I></B> or a Tcl error.
+<P>
+<HR WIDTH="100%"><B>> berkdb hcreate <I>nelem</I></B>
+<P>This command will invoke the hcreate function with <B><I>nelem</I></B>
+elements.&nbsp; It will return a 0 on success or a Tcl error.
+<P>
+<HR WIDTH="100%"><B>> berkdb hsearch <I>key data action</I></B>
+<P>This command will invoke the hsearch function with <B><I>key</I></B>
+and <B><I>data</I></B>.&nbsp; The <B><I>action</I></B> must be either <B>find</B>
+or <B>enter</B>.&nbsp; If it is <B>find</B>, it will return the resultant
+data.&nbsp; If it is <B>enter</B>, it will return a 0 on success or a Tcl
+error.
+<P>
+<HR WIDTH="100%"><B>> berkdb hdestroy</B>
+<P>This command will invoke the hdestroy function.&nbsp; It will return
+a 0.
+<HR WIDTH="100%"><B>> berkdb ndbm_open [-create] [-rdonly] [-truncate]
+[-mode
+<I>mode</I>] [--] <I>filename</I></B>
+<P>This command will invoke the dbm_open function.&nbsp;&nbsp;&nbsp; After
+it successfully gets a handle to a database, we bind it to a new Tcl command
+of the form <B><I>ndbmX, </I></B>where X is an integer starting at 0 (e.g.
+<B>ndbm0,
+ndbm1, </B>etc).&nbsp; We use the <I>Tcl_CreateObjCommand()&nbsp;</I> to
+create the top level database function.&nbsp; It is through this handle
+that the user can access all of the commands described below.&nbsp; Internally,
+the database handle is sent as the <I>ClientData</I> portion of the new
+command set so that all future database calls access the appropriate handle.
+<P>The arguments are:
+<UL>
+<LI>
+<B>-- </B>- Terminate the list of options and use remaining arguments as
+the file or subdb names (thus allowing the use of filenames beginning with
+a dash '-')</LI>
+
+<LI>
+<B>-create</B> selects the O_CREAT flag&nbsp; to create underlying files</LI>
+
+<LI>
+<B>-rdonly</B> selects the O_RDONLY flag for opening in read-only mode</LI>
+
+<LI>
+<B>-truncate</B> selects the O_TRUNC flag to truncate the database</LI>
+
+<LI>
+<B>-mode<I> mode</I></B> specifies the mode for created files</LI>
+
+<LI>
+<B><I>filename</I></B> indicates the name of the database</LI>
+</UL>
+
+<P><BR>
+<HR WIDTH="100%">
+<BR><B>> &lt;ndbm> close</B>
+<P>This command closes the database and renders the handle invalid.&nbsp;&nbsp;
+This command directly translates to the dbm_close function call.&nbsp;
+It returns either a 0 (for success),&nbsp; or it throws a Tcl error with
+a system message.
+<P>Additionally, since the handle is no longer valid, we will call <I>Tcl_DeleteCommand()
+</I>so
+that further uses of the handle will be dealt with properly by Tcl itself.&nbsp;
+<HR WIDTH="100%">
+<BR><B>> &lt;ndbm> clearerr</B>
+<P>This command clears errors&nbsp; the database.&nbsp;&nbsp; This command
+directly translates to the dbm_clearerr function call.&nbsp; It returns
+either a 0 (for success),&nbsp; or it throws a Tcl error with a system
+message.
+<P>
+<HR WIDTH="100%">
+<BR><B>> &lt;ndbm> delete <I>key</I></B>
+<P>This command deletes the <B><I>key</I></B> from thedatabase.&nbsp;&nbsp;
+This command directly translates to the dbm_delete function call.&nbsp;
+It returns either a 0 (for success),&nbsp; or it throws a Tcl error with
+a system message.
+<P>
+<HR WIDTH="100%">
+<BR><B>> &lt;ndbm> dirfno</B>
+<P>This command directly translates to the dbm_dirfno function call.&nbsp;
+It returns either resultts,&nbsp; or it throws a Tcl error with a system
+message.
+<P>
+<HR WIDTH="100%">
+<BR><B>> &lt;ndbm> error</B>
+<P>This command returns the last error.&nbsp;&nbsp; This command directly
+translates to the dbm_error function call.&nbsp; It returns an error string..
+<P>
+<HR WIDTH="100%">
+<BR><B>> &lt;ndbm> fetch <I>key</I></B>
+<P>This command gets the given <B><I>key</I></B> from the database.&nbsp;&nbsp;
+This command directly translates to the dbm_fetch function call.&nbsp;
+It returns either the data,&nbsp; or it throws a Tcl error with a system
+message.
+<P>
+<HR WIDTH="100%">
+<BR><B>> &lt;ndbm> firstkey</B>
+<P>This command returns the first key in the database.&nbsp;&nbsp; This
+command directly translates to the dbm_firstkey function call.&nbsp; It
+returns either the key,&nbsp; or it throws a Tcl error with a system message.
+<P>
+<HR WIDTH="100%">
+<BR><B>> &lt;ndbm> nextkey</B>
+<P>This command returns the next key in the database.&nbsp;&nbsp; This
+command directly translates to the dbm_nextkey function call.&nbsp; It
+returns either the key,&nbsp; or it throws a Tcl error with a system message.
+<P>
+<HR WIDTH="100%">
+<BR><B>> &lt;ndbm> pagfno</B>
+<P>This command directly translates to the dbm_pagfno function call.&nbsp;
+It returns either resultts,&nbsp; or it throws a Tcl error with a system
+message.
+<BR>
+<HR WIDTH="100%">
+<BR><B>> &lt;ndbm> rdonly</B>
+<P>This command changes the database to readonly.&nbsp;&nbsp; This command
+directly translates to the dbm_rdonly function call.&nbsp; It returns either
+a 0 (for success),&nbsp; or it throws a Tcl error with a system message.
+<P>
+<HR WIDTH="100%">
+<BR><B>> &lt;ndbm> store <I>key data </I>insert|replace</B>
+<P>This command puts the given <B><I>key</I></B> and <B><I>data</I></B>
+pair into the database.&nbsp;&nbsp; This command directly translates to
+the dbm_store function call.&nbsp; It will either <B>insert</B> or <B>replace</B>
+the data based on the action given in the third argument.&nbsp; It returns
+either a 0 (for success),&nbsp; or it throws a Tcl error with a system
+message.
+<BR>
+<HR WIDTH="100%">
+</BODY>
+</HTML>
diff --git a/bdb/tcl/docs/index.html b/bdb/tcl/docs/index.html
new file mode 100644
index 00000000000..2866c1e23db
--- /dev/null
+++ b/bdb/tcl/docs/index.html
@@ -0,0 +1,47 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<HTML>
+<HEAD>
+ <META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
+ <META NAME="GENERATOR" CONTENT="Mozilla/4.08 [en] (X11; I; FreeBSD 2.2.8-19990120-SNAP i386) [Netscape]">
+</HEAD>
+<BODY>
+
+<CENTER>
+<H1>
+Complete Tcl Interface for Berkeley DB</H1></CENTER>
+
+<UL type=disc>
+<LI>
+<A HREF="../../docs/api_tcl/tcl_index.html">General use Berkeley DB commands</A></LI>
+</UL>
+
+<UL type=disc>
+<LI>
+<A HREF="./env.html">Environment commands</A></LI>
+
+<LI>
+<A HREF="./lock.html">Locking commands</A></LI>
+
+<LI>
+<A HREF="./log.html">Logging commands</A></LI>
+
+<LI>
+<A HREF="./mpool.html">Memory Pool commands</A></LI>
+
+<LI>
+<A HREF="./txn.html">Transaction commands</A></LI>
+</UL>
+
+<UL>
+<LI>
+<A HREF="./db.html">Access Method commands</A></LI>
+
+<LI>
+<A HREF="./test.html">Debugging and Testing</A></LI>
+
+<LI>
+<A HREF="./historic.html">Compatibility commands</A></LI>
+
+<LI>
+<A HREF="./library.html">Convenience commands</A></LI>
+</UL>
diff --git a/bdb/tcl/docs/library.html b/bdb/tcl/docs/library.html
new file mode 100644
index 00000000000..abd656d8e5d
--- /dev/null
+++ b/bdb/tcl/docs/library.html
@@ -0,0 +1,26 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<HTML>
+<HEAD>
+ <META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
+ <META NAME="GENERATOR" CONTENT="Mozilla/4.08 [en] (X11; I; FreeBSD 2.2.8-19990120-SNAP i386) [Netscape]">
+</HEAD>
+<BODY>
+<HR WIDTH="100%">
+<H2>
+<A NAME="Convenience Commands"></A>Convenience Commands</H2>
+The convenience commands are provided for ease of use with the DB test
+suite.
+<P><B>> berkdb rand</B>
+<P>This command will invoke the rand function and return the random number.
+<P>
+<HR WIDTH="100%"><B>> berkdb random_int <I>low high</I></B>
+<P>This command will invoke the rand function and return a number between
+<B><I>low</I></B>
+and <B><I>high</I></B>.
+<P>
+<HR WIDTH="100%">
+<P><B>> berkdb srand <I>seed</I></B>
+<P>This command will invoke the srand function with the given <B><I>seed</I></B>
+and return 0.
+<P>
+<HR WIDTH="100%">
diff --git a/bdb/tcl/docs/lock.html b/bdb/tcl/docs/lock.html
new file mode 100644
index 00000000000..87a20e9a6bf
--- /dev/null
+++ b/bdb/tcl/docs/lock.html
@@ -0,0 +1,187 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<HTML>
+<HEAD>
+ <META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
+ <META NAME="GENERATOR" CONTENT="Mozilla/4.08 [en] (X11; I; FreeBSD 2.2.8-19990120-SNAP i386) [Netscape]">
+</HEAD>
+<BODY>
+
+<H2>
+<A NAME="Locking Commands"></A>Locking Commands</H2>
+Most locking commands work with the environment handle.&nbsp; However,
+when a user gets a lock we create a new lock handle that they then use
+with in a similar manner to all the other handles to release the lock.&nbsp;
+We present the general locking functions first, and then those that manipulate
+locks.
+<P><B>> &lt;env> lock_detect [-lock_conflict] [default|oldest|youngest|random]</B>
+<P>This command runs the deadlock detector.&nbsp; It directly translates
+to the <A HREF="../../docs/api_c/lock_detect.html">lock_detect</A> DB call.&nbsp;
+It returns either a 0 (for success), a DB error message or it throws a
+Tcl error with a system message.&nbsp; The first argument sets the policy
+for deadlock as follows:
+<UL>
+<LI>
+<B>default</B> selects the DB_LOCK_DEFAULT policy for default detection
+(default if not specified)</LI>
+
+<LI>
+<B>oldest </B>selects DB_LOCK_OLDEST to abort the oldest locker on a deadlock</LI>
+
+<LI>
+<B>random</B> selects DB_LOCK_RANDOM to abort a random locker on a deadlock</LI>
+
+<LI>
+<B>youngest</B> selects DB_LOCK_YOUNGEST to abort the youngest locker on
+a deadlock</LI>
+</UL>
+The second argument, <B>-lock_conflict</B>, selects the DB_LOCK_CONFLICT
+flag to only run the detector if a lock conflict has occurred since the
+last time the detector was run.
+<HR WIDTH="100%">
+<BR><B>> &lt;env> lock_stat</B>
+<P>This command returns a list of name/value pairs where the names correspond
+to the C-structure field names of DB_LOCK_STAT and the values are the data
+returned.&nbsp; This command is a direct translation of the <A HREF="../../docs/api_c/lock_stat.html">lock_stat</A>
+DB call.
+<HR WIDTH="100%">
+<BR><A NAME="> <env> lock_id"></A><B>> &lt;env> lock_id</B>
+<P>This command returns a unique locker ID value.&nbsp; It directly translates
+to the <A HREF="../../docs/api_c/lock_id.html">lock_id</A> DB call.
+<HR WIDTH="100%">
+<BR><A NAME="> <env> lock_get"></A><B>> &lt;env> lock_get [-nowait]<I>lockmode
+locker obj</I></B>
+<P>This command gets a lock. It will invoke the <A HREF="../../docs/api_c/lock_get.html">lock_get</A>
+function.&nbsp; After it successfully gets a handle to a lock, we bind
+it to a new Tcl command of the form <B><I>$env.lockX</I></B>, where X is
+an integer starting at&nbsp; 0 (e.g. <B>$env.lock0, $env.lock1, </B>etc).&nbsp;
+We use the <I>Tcl_CreateObjCommand()</I> to create the top level locking
+command function.&nbsp; It is through this handle that the user can release
+the lock.&nbsp; Internally, the handle we get back from DB will be stored
+as the <I>ClientData</I> portion of the new command set so that future
+locking calls will have that handle readily available.
+<P>The arguments are:
+<UL>
+<LI>
+<B><I>locker</I></B> specifies the locker ID returned from the <A HREF="#> <env> lock_id">lock_id</A>
+command</LI>
+
+<LI>
+<B><I>obj</I></B> specifies an object to lock</LI>
+
+<LI>
+the <B><I>lock mode</I></B> is specified as one of the following:</LI>
+
+<UL>
+<LI>
+<B>ng </B>specifies DB_LOCK_NG for not granted (always 0)</LI>
+
+<LI>
+<B>read</B> specifies DB_LOCK_READ for a read (shared) lock</LI>
+
+<LI>
+<B>write</B> specifies DB_LOCK_WRITE for an exclusive write lock</LI>
+
+<LI>
+<B>iwrite </B>specifies DB_LOCK_IWRITE for intent for exclusive write lock</LI>
+
+<LI>
+<B>iread </B>specifies DB_LOCK_IREAD for intent for shared read lock</LI>
+
+<LI>
+<B>iwr </B>specifies DB_LOCK_IWR for intent for eread and write lock</LI>
+</UL>
+
+<LI>
+<B>-nowait</B> selects the DB_LOCK_NOWAIT to indicate that we do not want
+to wait on the lock</LI>
+</UL>
+
+<HR WIDTH="100%">
+<BR><B>> &lt;lock> put</B>
+<P>This command releases the lock referenced by the command.&nbsp; It is
+a direct translation of the <A HREF="../../docs/api_c/lock_put.html">lock_put</A>
+function.&nbsp; It returns either a 0 (for success), a DB error message
+or it throws a Tcl error with a system message.&nbsp; Additionally, since
+the handle is no longer valid, we will call
+<I>Tcl_DeleteCommand()
+</I>so
+that further uses of the handle will be dealt with properly by Tcl itself.
+<BR>
+<HR WIDTH="100%">
+<BR><A NAME="> <env> lock_vec"></A><B>> &lt;env> lock_vec [-nowait] <I>locker
+</I>{get|put|put_all|put_obj
+[<I>obj</I>] [<I>lockmode</I>] [<I>lock</I>]} ...</B>
+<P>This command performs a series of lock calls.&nbsp; It is a direct translation
+of the <A HREF="../../docs/api_c/lock_vec.html">lock_vec</A> function.&nbsp;
+This command will return a list of the return values from each operation
+specified in the argument list.&nbsp; For the 'put' operations the entry
+in the return value list is either a 0 (for success) or an error.&nbsp;
+For the 'get' operation, the entry is the lock widget handle, <B>$env.lockN</B>
+(as described above in <A HREF="#> <env> lock_get">&lt;env> lock_get</A>)
+or an error.&nbsp; If an error occurs, the return list will contain the
+return values for all the successful operations up the erroneous one and
+the error code for that operation.&nbsp; Subsequent operations will be
+ignored.
+<P>As for the other operations, if we are doing a 'get' we will create
+the commands and if we are doing a 'put' we will have to delete the commands.&nbsp;
+Additionally, we will have to do this after the call to the DB lock_vec
+and iterate over the results, creating and/or deleting Tcl commands.&nbsp;
+It is possible that we may return a lock widget from a get operation that
+is considered invalid, if, for instance, there was a <B>put_all</B> operation
+performed later in the vector of operations.&nbsp; The arguments are:
+<UL>
+<LI>
+<B><I>locker</I></B> specifies the locker ID returned from the <A HREF="#> <env> lock_id">lock_id</A>
+command</LI>
+
+<LI>
+<B>-nowait</B> selects the DB_LOCK_NOWAIT to indicate that we do not want
+to wait on the lock</LI>
+
+<LI>
+the lock vectors are tuple consisting of {an operation, lock object, lock
+mode, lock handle} where what is required is based on the operation desired:</LI>
+
+<UL>
+<LI>
+<B>get</B> specifes DB_LOCK_GET to get a lock.&nbsp; Requires a tuple <B>{get
+<I>obj</I>
+<I>mode</I>}
+</B>where
+<B><I>mode</I></B>
+is:</LI>
+
+<UL>
+<LI>
+<B>ng </B>specifies DB_LOCK_NG for not granted (always 0)</LI>
+
+<LI>
+<B>read</B> specifies DB_LOCK_READ for a read (shared) lock</LI>
+
+<LI>
+<B>write</B> specifies DB_LOCK_WRITE for an exclusive write lock</LI>
+
+<LI>
+<B>iwrite </B>specifies DB_LOCK_IWRITE for intent for exclusive write lock</LI>
+
+<LI>
+<B>iread </B>specifies DB_LOCK_IREAD for intent for shared read lock</LI>
+
+<LI>
+<B>iwr </B>specifies DB_LOCK_IWR for intent for eread and write lock</LI>
+</UL>
+
+<LI>
+<B>put</B> specifies DB_LOCK_PUT to release a <B><I>lock</I></B>.&nbsp;
+Requires a tuple <B>{put <I>lock}</I></B></LI>
+
+<LI>
+<B>put_all </B>specifies DB_LOCK_PUT_ALL to release all locks held by <B><I>locker</I></B>.&nbsp;
+Requires a tuple <B>{put_all}</B></LI>
+
+<LI>
+<B>put_obj</B> specifies DB_LOCK_PUT_OBJ to release all locks held by <B><I>locker</I></B>
+associated with the given <B><I>obj</I></B>.&nbsp; Requires a tuple <B>{put_obj
+<I>obj</I>}</B></LI>
+</UL>
+</UL>
diff --git a/bdb/tcl/docs/log.html b/bdb/tcl/docs/log.html
new file mode 100644
index 00000000000..35ecfc2f5f5
--- /dev/null
+++ b/bdb/tcl/docs/log.html
@@ -0,0 +1,142 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<HTML>
+<HEAD>
+ <META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
+ <META NAME="GENERATOR" CONTENT="Mozilla/4.08 [en] (X11; I; FreeBSD 3.3-RELEASE i386) [Netscape]">
+</HEAD>
+<BODY>
+
+<H2>
+<A NAME="Logging Commands"></A>Logging Commands</H2>
+Logging commands work from the environment handle to control the use of
+the log files.&nbsp; Log files are opened when the environment is opened
+and closed when the environment is closed.&nbsp; In all of the commands
+in the logging subsystem that take or return a log sequence number, it
+is of the form:
+<BR><B>{<I>fileid offset</I>}</B>
+<BR>where the <B><I>fileid</I></B> is an identifier of the log file, as
+returned from the <A HREF="#> <env> log_get">log_get</A> call.
+<P><B>> &lt;env> log_archive [-arch_abs] [-arch_data] [-arch_log]</B>
+<P>This command returns&nbsp; a list of log files that are no longer in
+use.&nbsp; It is a direct call to the <A HREF="../../docs/api_c/log_archive.html">log_archive</A>
+function. The arguments are:
+<UL>
+<LI>
+<B>-arch_abs </B>selects DB_ARCH_ABS to return all pathnames as absolute
+pathnames</LI>
+
+<LI>
+<B>-arch_data </B>selects DB_ARCH_DATA to return a list of database files</LI>
+
+<LI>
+<B>-arch_log </B>selects DB_ARCH_LOG to return a list of log files</LI>
+</UL>
+
+<HR WIDTH="100%">
+<BR><B>> &lt;env> log_compare <I>lsn1 lsn2</I></B>
+<P>This command compares two log sequence numbers, given as <B><I>lsn1</I></B>
+and <B><I>lsn2</I></B>.&nbsp; It is a direct call to the <A HREF="../../docs/api_c/log_compare.html">log_compare</A>
+function.&nbsp; It will return a -1, 0, 1 to indicate if <B><I>lsn1</I></B>
+is less than, equal to or greater than <B><I>lsn2</I></B> respectively.
+<BR>
+<HR WIDTH="100%">
+<BR><B>> &lt;env> log_file <I>lsn</I></B>
+<P>This command returns&nbsp; the file name associated with the given <B><I>lsn</I></B>.&nbsp;
+It is a direct call to the <A HREF="../../docs/api_c/log_file.html">log_file</A>
+function.
+<BR>
+<HR WIDTH="100%">
+<BR><B>> &lt;env> log_flush [<I>lsn</I>]</B>
+<P>This command&nbsp; flushes the log up to the specified <B><I>lsn</I></B>
+or flushes all records if none is given&nbsp; It is a direct call to the
+<A HREF="../../docs/api_c/log_flush.html">log_flush</A>
+function.&nbsp; It returns either a 0 (for success), a DB error message
+or it throws a Tcl error with a system message.
+<BR>
+<HR WIDTH="100%">
+<BR><A NAME="<env> log_get"></A><B>> &lt;env> log_get<I> </I>[-checkpoint]
+[-current] [-first] [-last] [-next] [-prev] [-set <I>lsn</I>]</B>
+<P>This command retrieves a record from the log according to the <B><I>lsn</I></B>
+given and returns it and the data.&nbsp; It is a direct call to the <A HREF="../../docs/api_c/log_get.html">log_get</A>
+function.&nbsp; It is a way of implementing a manner of log iteration similar
+to <A HREF="../../docs/api_tcl/db_cursor.html">cursors</A>.&nbsp;&nbsp;
+The information we return is similar to database information.&nbsp; We
+return a list where the first item is the LSN (which is a list itself)
+and the second item is the data.&nbsp; So it looks like, fully expanded,
+<B>{{<I>fileid</I>
+<I>offset</I>}
+<I>data</I>}.</B>&nbsp;
+In the case where DB_NOTFOUND is returned, we return an empty list <B>{}</B>.&nbsp;
+All other errors return a Tcl error.&nbsp; The arguments are:
+<UL>
+<LI>
+<B>-checkpoint </B>selects the DB_CHECKPOINT flag to return the LSN/data
+pair of the last record written through <A HREF="#> <env> log_put">log_put</A>
+with DB_CHECKPOINT specified</LI>
+
+<LI>
+<B>-current</B> selects the DB_CURRENT flag to return the current record</LI>
+
+<LI>
+<B>-first</B> selects the DB_FIRST flag to return the first record in the
+log.</LI>
+
+<LI>
+<B>-last </B>selects the DB_LAST flag to return the last record in the
+log.</LI>
+
+<LI>
+<B>-next</B> selects the DB_NEXT flag to return the next record in the
+log.</LI>
+
+<LI>
+<B>-prev </B>selects the DB_PREV flag to return the&nbsp; previous record
+in the log.</LI>
+
+<LI>
+<B>-set</B> selects the DB_SET flag to return the record specified by the
+given <B><I>lsn</I></B></LI>
+</UL>
+
+<HR WIDTH="100%">
+<BR><A NAME="> <env> log_put"></A><B>> &lt;env> log_put<I> </I>[-checkpoint]
+[-curlsn] [-flush] <I>record</I></B>
+<P>This command stores a <B><I>record</I></B> into the log and returns
+the LSN of the log record.&nbsp; It is a direct call to the <A HREF="../../docs/api_c/log_put.html">log_put</A>
+function.&nbsp; It returns either an LSN or it throws a Tcl error with
+a system message.&nbsp;<B> </B>The arguments are:
+<UL>
+<LI>
+<B>-checkpoint </B>selects the DB_CHECKPOINT flag</LI>
+
+<LI>
+<B>-curlsn</B> selects the DB_CURLSN flag to return the LSN of the next
+record</LI>
+
+<LI>
+<B>-flush </B>selects the DB_FLUSH flag to flush the log to disk.</LI>
+</UL>
+
+<HR WIDTH="100%">
+<BR><A NAME="> <env> log_register"></A><B>> &lt;env> log_register <I>db</I>
+<I>file</I></B>
+<P>This command registers a <B><I>file</I></B> and <B><I>db</I></B> with
+the log manager.&nbsp; It is a direct call to the <A HREF="../../docs/api_c/log_register.html">log_register</A>
+function.&nbsp; It returns either a 0 (for success), a DB error message
+or it throws a Tcl error with a system message.
+<BR>
+<HR WIDTH="100%">
+<BR><A NAME="> <env> log_unregister"></A><B>> &lt;env> log_unregister <I>db</I></B>
+<P>This command unregisters the file specified by the database handle <B><I>db
+</I></B>from the log manager.&nbsp; It is a direct call to the <A HREF="../../docs/api_c/log_unregister.html">log_unregister</A>
+function.&nbsp; It returns either a 0 (for success), a DB error message
+or it throws a Tcl error with a system message.
+<BR>
+<HR WIDTH="100%">
+<BR><B>> &lt;env> log_stat</B>
+<P>This command returns&nbsp; the statistics associated with the logging
+subsystem.&nbsp; It is a direct call to the <A HREF="../../docs/api_c/log_stat.html">log_stat</A>
+function.&nbsp; It returns a list of name/value pairs of the DB_LOG_STAT
+structure.
+</BODY>
+</HTML>
diff --git a/bdb/tcl/docs/mpool.html b/bdb/tcl/docs/mpool.html
new file mode 100644
index 00000000000..666219306ca
--- /dev/null
+++ b/bdb/tcl/docs/mpool.html
@@ -0,0 +1,189 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<HTML>
+<HEAD>
+ <META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
+ <META NAME="GENERATOR" CONTENT="Mozilla/4.08 [en] (X11; I; FreeBSD 2.2.8-19990120-SNAP i386) [Netscape]">
+</HEAD>
+<BODY>
+
+<H2>
+<A NAME="Memory Pool Commands"></A>Memory Pool Commands</H2>
+Memory pools are used in a manner similar to the other subsystems.&nbsp;
+We create a handle to the pool and&nbsp; then use it for a variety of operations.&nbsp;
+Some of the memory pool commands use the environment instead. Those are
+presented first.
+<P><B>> &lt;env> mpool_stat</B>
+<P>This command returns&nbsp; the statistics associated with the memory
+pool subsystem.&nbsp; It is a direct call to the <A HREF="../../docs/api_c/memp_stat.html">memp_stat</A>
+function.&nbsp; It returns a list of name/value pairs of the DB_MPOOL_STAT
+structure.
+<BR>
+<HR WIDTH="100%">
+<BR><B>> &lt;env> mpool_sync <I>lsn</I></B>
+<P>This command flushes the memory pool for all pages with a log sequence
+number less than <B><I>lsn</I></B>.&nbsp; It is a direct call to the <A HREF="../../docs/api_c/memp_sync.html">memp_sync&nbsp;</A>
+function.&nbsp; It returns either a 0 (for success), a DB error message
+or it throws a Tcl error with a system message.
+<BR>
+<HR WIDTH="100%">
+<BR><B>> &lt;env> mpool_trickle <I>percent</I></B>
+<P>This command tells DB to ensure that at least <B><I>percent</I></B>
+percent of the pages are clean by writing out enough to dirty pages to
+achieve that percentage.&nbsp; It is a direct call to the <A HREF="../../docs/api_c/memp_trickle.html">memp_trickle</A>
+function.&nbsp; The command will return the number of pages actually written.&nbsp;
+It returns either the number of pages on success, or it throws a Tcl error
+with a system message.
+<BR>
+<HR WIDTH="100%">
+<P><B>> &lt;env> mpool [-create] [-nommap] [-rdonly] [-mode <I>mode</I>]
+-pagesize <I>size</I> [<I>file</I>]</B>
+<P>This command creates a new memory pool.&nbsp; It invokes the <A HREF="../../docs/api_c/memp_fopen.html">memp_fopen</A>
+function.&nbsp; After it successfully gets a handle to a memory pool, we
+bind it to a new Tcl command of the form <B><I>$env.mpX</I></B>, where
+X is an integer starting at&nbsp; 0 (e.g. <B>$env.mp0, $env.mp1, </B>etc).&nbsp;
+We use the <I>Tcl_CreateObjCommand()</I> to create the top level memory
+pool functions.&nbsp; It is through this handle that the user can manipulate
+the pool.&nbsp; Internally, the handle we get back from DB will be stored
+as the <I>ClientData</I> portion of the new command set so that future
+memory pool calls will have that handle readily available.&nbsp; Additionally,
+we need to maintain this handle in relation to the environment so that
+if the user calls <A HREF="../../docs/api_tcl/env_close.html">&lt;env> close</A> without closing
+the memory pool we can properly clean up.&nbsp; The arguments are:
+<UL>
+<LI>
+<B><I>file</I></B> is the name of the file to open</LI>
+
+<LI>
+<B>-create </B>selects the DB_CREATE flag to create underlying file</LI>
+
+<LI>
+<B>-mode <I>mode </I></B>sets the permissions of created file to <B><I>mode</I></B></LI>
+
+<LI>
+<B>-nommap</B> selects the DB_NOMMAP flag to disallow using mmap'ed files</LI>
+
+<LI>
+<B>-pagesize</B> sets the underlying file page size to <B><I>size</I></B></LI>
+
+<LI>
+<B>-rdonly </B>selects the DB_RDONLY flag for read only access</LI>
+</UL>
+
+<HR WIDTH="100%">
+<BR><B>> &lt;mp> close</B>
+<P>This command closes the memory pool.&nbsp; It is a direct call to the
+<A HREF="../../docs/api_c/memp_fclose.html">memp_close</A>
+function.&nbsp; It returns either a 0 (for success), a DB error message
+or it throws a Tcl error with a system message.
+<P>Additionally, since the handle is no longer valid, we will call
+<I>Tcl_DeleteCommand()
+</I>so
+that further uses of the handle will be dealt with properly by Tcl itself.&nbsp;
+We must also remove the reference to this handle from the environment.&nbsp;
+We will go through the list of pinned pages that were acquired by the <A HREF="#> <mp> get">get</A>
+command and
+<A HREF="#> <pg> put">put</A> them back.
+<HR WIDTH="100%">
+<BR><B>> &lt;mp> fsync</B>
+<P>This command flushes all of the file's dirty pages to disk.&nbsp; It
+is a direct call to the <A HREF="../../docs/api_c/memp_fsync.html">memp_fsync</A>
+function.&nbsp; It returns either a 0 (for success), a DB error message
+or it throws a Tcl error with a system message.
+<HR WIDTH="100%">
+<BR><A NAME="> <mp> get"></A><B>> &lt;mp> get [-create] [-last] [-new]
+[<I>pgno</I>]</B>
+<P>This command gets the&nbsp; <B><I>pgno </I></B>page from the memory
+pool.&nbsp; It invokes the <A HREF="../../docs/api_c/memp_fget.html">memp_fget</A>
+function and possibly the <A HREF="../../docs/api_c/memp_fset.html">memp_fset</A>
+function if any options are chosen to set the page characteristics.&nbsp;
+After it successfully gets a handle to a page,&nbsp; we bind it to and
+return a new Tcl command of the form <B><I>$env.mpN.pX</I></B>, where X
+is an integer starting at&nbsp; 0 (e.g. <B>$env.mp0.p0, $env.mp1.p0, </B>etc).&nbsp;
+We use the <I>Tcl_CreateObjCommand()</I> to create the top level page functions.&nbsp;
+It is through this handle that the user can manipulate the page.&nbsp;
+Internally, the handle we get back from DB will be stored as the <I>ClientData</I>
+portion of the new command set.&nbsp; We need to store this handle in&nbsp;
+relation to the memory pool handle so that if the memory pool is closed,
+we will <A HREF="#> <pg> put">put</A> back the pages (setting the discard
+flag) and delete that set of commands.
+<P>The arguments are:
+<UL>
+<LI>
+<B>-create </B>selects the DB_MPOOL_CREATE flag&nbsp; to create the page
+if it does not exist.</LI>
+
+<LI>
+<B>-last</B> selects the DB_MPOOL_LAST flag to return the last page in
+the file</LI>
+
+<LI>
+<B>-new</B> selects the DB_MPOOL_NEW flag to create a new page</LI>
+</UL>
+
+<HR WIDTH="100%">
+<BR><B>> &lt;pg> pgnum</B>
+<P>This command returns the page number associated with this memory pool
+page.&nbsp; Primarily it will be used after an <A HREF="#> <mp> get">&lt;mp>
+get</A> call.
+<BR>
+<HR WIDTH="100%"><B>> &lt;pg> pgsize</B>
+<P>This command returns the page size associated with this memory pool
+page.&nbsp; Primarily it will be used after an <A HREF="#> <mp> get">&lt;mp>
+get</A> call.
+<BR>
+<HR WIDTH="100%"><B>> &lt;pg> set [-clean] [-dirty] [-discard]</B>
+<P>This command sets the characteristics of the page.&nbsp; It is a direct
+call to the <A HREF="../../docs/api_c/memp_fset.html">memp_fset</A> function.&nbsp;
+It returns either a 0 (for success), a DB error message or it throws a
+Tcl error with a system message.&nbsp; The arguments are:
+<UL>
+<LI>
+<B>-clean</B> selects the DB_MPOOL_CLEAN flag to indicate this is a clean
+page</LI>
+
+<LI>
+<B>-dirty</B> selects the DB_MPOOL_DIRTY flag to indicate this page should
+be flushed before eviction</LI>
+
+<LI>
+<B>-discard</B> selects the DB_MPOOL_DISCARD flag to indicate this page
+is unimportant</LI>
+</UL>
+
+<HR WIDTH="100%">
+<BR><A NAME="> <pg> put"></A><B>> &lt;pg> put [-clean] [-dirty] [-discard]</B>
+<P>This command will put back the page to the memory pool.&nbsp; It is
+a direct call to the <A HREF="../../docs/api_c/memp_fput.html">memp_fput</A>
+function.&nbsp; It returns either a 0 (for success), a DB error message
+or it throws a Tcl error with a system message. Additionally, since the
+handle is no longer valid, we will call
+<I>Tcl_DeleteCommand()
+</I>so that
+further uses of the handle will be dealt with properly by Tcl itself.&nbsp;
+We must also remove the reference to this handle from the memory pool.
+<P>The arguments are:
+<UL>
+<LI>
+<B>-clean</B> selects the DB_MPOOL_CLEAN flag to indicate this is a clean
+page</LI>
+
+<LI>
+<B>-dirty</B> selects the DB_MPOOL_DIRTY flag to indicate this page should
+be flushed before eviction</LI>
+
+<LI>
+<B>-discard</B> selects the DB_MPOOL_DISCARD flag to indicate this page
+is unimportant</LI>
+</UL>
+
+<HR WIDTH="100%">
+<BR><B>> &lt;pg> init <I>val|string</I></B>
+<P>This command initializes the page to the <B><I>val</I></B> given or
+places the <B><I>string</I></B> given at the beginning of the page.&nbsp;
+It returns a 0 for success or it throws a Tcl error with an error message.
+<P>
+<HR WIDTH="100%">
+<BR><B>> &lt;pg> is_setto <I>val|string</I></B>
+<P>This command verifies the page contains the <B><I>val</I></B> given
+or checks that the <B>string</B> given is at the beginning of the page.&nbsp;
+It returns a 1 if the page is correctly set to the value and a 0 otherwise.
diff --git a/bdb/tcl/docs/test.html b/bdb/tcl/docs/test.html
new file mode 100644
index 00000000000..10cf09efba7
--- /dev/null
+++ b/bdb/tcl/docs/test.html
@@ -0,0 +1,149 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<HTML>
+<HEAD>
+ <META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
+ <META NAME="GENERATOR" CONTENT="Mozilla/4.08 [en] (X11; I; FreeBSD 2.2.8-19990120-SNAP i386) [Netscape]">
+</HEAD>
+<BODY>
+
+<H2>
+<A NAME="Debugging"></A>Debugging and Testing</H2>
+We have imported the debugging system from the old test suite into the
+new interface to aid in debugging problems.&nbsp; There are several variables
+that are available both in gdb as globals to the C code, and variables
+in Tcl that the user can set.&nbsp; These variables are linked together
+so that changes in one venue are reflected in the other.&nbsp; The names
+of the variables have been modified a bit to reduce the likelihood
+<BR>of namespace trampling.&nbsp; We have added a double underscore to
+all the names.
+<P>The variables are all initialized to zero (0) thus resulting in debugging
+being turned off.&nbsp; The purpose of the debugging, fundamentally, is
+to allow the user to set a breakpoint prior to making a DB call.&nbsp;
+This breakpoint is set in the <I>__db_loadme() </I>function.&nbsp; The
+user may selectively turn on various debugging areas each controlled by
+a separate variable (note they all have two (2) underscores prepended to
+the name):
+<UL>
+<LI>
+<B>__debug_on</B> - Turns on the debugging system.&nbsp; This must be on
+for any debugging to occur</LI>
+
+<LI>
+<B>__debug_print - </B>Turns on printing a debug count statement on each
+call</LI>
+
+<LI>
+<B>__debug_test -</B> Hits the breakpoint in <I>__db_loadme</I> on the
+specific iteration</LI>
+
+<LI>
+<B>__debug_stop </B>- Hits the breakpoint in <I>__db_loadme</I> on every
+(or the next) iteration</LI>
+</UL>
+<B>Note to developers:</B>&nbsp; Anyone extending this interface must place
+a call to <B>_debug_check()</B> (no arguments) before every call into the
+DB library.
+<P>There is also a command available that will force a call to the _debug_check
+function.
+<P><B>> berkdb debug_check</B>
+<P>
+<HR WIDTH="100%">
+<BR>For testing purposes we have added several hooks into the DB library
+and a small interface into the environment and/or database commands to
+manipulate the hooks.&nbsp; This command interface and the hooks and everything
+that goes with it is only enabled when the test option is configured into
+DB.
+<P><B>> &lt;env> test copy <I>location</I></B>
+<BR><B>> &lt;db> test copy <I>location</I></B>
+<BR><B>> &lt;env> test abort <I>location</I></B>
+<BR><B>> &lt;db> test abort <I>location</I></B>
+<P>In order to test recovery we need to be able to abort the creation or
+deletion process at various points.&nbsp; Also we want to invoke a copy
+function to copy the database file(s)&nbsp; at various points as well so
+that we can obtain before/after snapshots of the databases.&nbsp; The interface
+provides the test command to specify a <B><I>location</I></B> where we
+wish to invoke a <B>copy</B> or an <B>abort</B>.&nbsp; The command is available
+from either the environment or the database for convenience.&nbsp; The
+<B><I>location</I></B>
+can be one of the following:
+<UL>
+<LI>
+<B>none -</B> Clears the location</LI>
+
+<LI>
+<B>preopen -</B> Sets the location prior to the __os_open call in the creation
+process</LI>
+
+<LI>
+<B>postopen</B> - Sets the location to immediately following the __os_open
+call in creation</LI>
+
+<LI>
+<B>postlogmeta</B> - Sets the location to immediately following the __db_log_page
+call to log the meta data in creation.&nbsp; Only valid for Btree.</LI>
+
+<LI>
+<B>postlog</B> - Sets the location to immediately following the last (or
+only) __db_log_page call in creation.</LI>
+
+<LI>
+<B>postsync</B> - Sets the location to immediately following the sync of
+the log page in creation.</LI>
+
+<LI>
+<B>prerename</B> - Sets the location prior to the __os_rename call in the
+deletion process.</LI>
+
+<LI>
+<B>postrename</B> - Sets the location to immediately following the __os_rename
+call in deletion</LI>
+</UL>
+
+<HR WIDTH="100%">
+<BR><B>> &lt;env> mutex <I>mode nitems</I></B>
+<P>This command creates a mutex region for testing.&nbsp; It sets the mode
+of the region to <B><I>mode</I></B> and sets up for <B><I>nitems</I></B>
+number of mutex entries.&nbsp; After we successfully get a handle to a
+mutex we create a command of the form <B><I>$env.mutexX</I></B>, where
+X is an integer starting at&nbsp; 0 (e.g. <B>$env.mutex0, $env.mutex1,
+</B>etc).&nbsp;&nbsp;
+We use the <I>Tcl_CreateObjCommand()&nbsp;</I> to create the top level
+mutex function.&nbsp; It is through this handle that the user can access
+all of the commands described below.&nbsp; Internally, the mutex handle
+is sent as the <I>ClientData</I> portion of the new command set so that
+all future mutex calls access the appropriate handle.
+<P>
+<HR WIDTH="100%"><B>> &lt;mutex> close</B>
+<P>This command closes the mutex and renders the handle invalid.&nbsp;&nbsp;
+This command directly translates to the __db_r_detach function call.&nbsp;
+It returns either a 0 (for success),&nbsp; or it throws a Tcl error with
+a system message.
+<P>Additionally, since the handle is no longer valid, we will call <I>Tcl_DeleteCommand()
+</I>so
+that further uses of the handle will be dealt with properly by Tcl itself.&nbsp;
+<HR WIDTH="100%"><B>> &lt;mutex> get <I>id</I></B>
+<P>This command locks the mutex identified by <B><I>id</I></B>.&nbsp; It
+returns either a 0 (for success),&nbsp; or it throws a Tcl error with a
+system message.
+<BR>
+<HR WIDTH="100%"><B>> &lt;mutex> release <I>id</I></B>
+<P>This command releases the mutex identified by <B><I>id</I></B>.&nbsp;
+It returns either a 0 (for success),&nbsp; or it throws a Tcl error with
+a system message.
+<BR>
+<HR WIDTH="100%"><B>> &lt;mutex> getval <I>id</I></B>
+<P>This command gets the value stored for the mutex identified by <B><I>id</I></B>.&nbsp;
+It returns either the value,&nbsp; or it throws a Tcl error with a system
+message.
+<BR>
+<HR WIDTH="100%"><B>> &lt;mutex> setval <I>id val</I></B>
+<P>This command sets the value stored for the mutex identified by <B><I>id
+</I></B>to
+<B><I>val</I></B>.&nbsp;
+It returns either a 0 (for success),&nbsp; or it throws a Tcl error with
+a system message.
+<BR>
+<HR WIDTH="100%">
+<BR>&nbsp;
+</BODY>
+</HTML>
diff --git a/bdb/tcl/docs/txn.html b/bdb/tcl/docs/txn.html
new file mode 100644
index 00000000000..863c9a875e6
--- /dev/null
+++ b/bdb/tcl/docs/txn.html
@@ -0,0 +1,56 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<HTML>
+<HEAD>
+ <META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
+ <META NAME="GENERATOR" CONTENT="Mozilla/4.08 [en] (X11; I; FreeBSD 2.2.8-19990120-SNAP i386) [Netscape]">
+</HEAD>
+<BODY>
+
+<H2>
+<A NAME="Transaction Commands"></A>Transaction Commands</H2>
+Transactions are used in a manner similar to the other subsystems.&nbsp;
+We create a handle to the transaction and&nbsp; then use it for a variety
+of operations.&nbsp; Some of the transaction commands use the environment
+instead.&nbsp; Those are presented first.&nbsp; The transaction command
+handle returned is the handle used by the various commands that can be
+transaction protected, such as <A HREF="../../docs/api_tcl/db_cursor.html">cursors</A>.<BR>
+
+<HR WIDTH="100%">
+<P><B>> &lt;env> txn_checkpoint [-kbyte <I>kb</I>] [-min <I>min</I>]</B>
+<P>This command causes a checkpoint of the transaction region.&nbsp; It
+is a direct translation of the <A HREF="../../docs/api_c/txn_checkpoint.html">txn_checkpoint
+</A>function.&nbsp;
+It returns either a 0 (for success), a DB error message or it throws a
+Tcl error with a system message.&nbsp; The arguments are:
+<UL>
+<LI>
+<B>-kbyte </B>causes the checkpoint to occur only if <B><I>kb</I></B> kilobytes
+of log data has been written since the last checkpoint</LI>
+
+<LI>
+<B>-min</B> causes the checkpoint to occur only if <B><I>min</I></B> minutes
+have passed since the last checkpoint</LI>
+</UL>
+
+<HR WIDTH="100%">
+<BR><B>> &lt;env> txn_stat</B>
+<P>This command returns transaction statistics.&nbsp; It is a direct translation
+of the <A HREF="../../docs/api_c/txn_stat.html">txn_stat</A> function.&nbsp;
+It will return a list of name/value pairs that correspond to the DB_TXN_STAT
+structure.
+<HR WIDTH="100%">
+<BR><B>>&nbsp; &lt;txn> id</B>
+<P>This command returns the transaction id.&nbsp; It is a direct call to
+the <A HREF="../../docs/api_c/txn_id.html">txn_id</A> function.&nbsp; The
+typical use of this identifier is as the <B><I>locker</I></B> value for
+the <A HREF="lock.html">lock_get</A> and <A HREF="lock.html">lock_vec</A>
+calls.
+<HR WIDTH="100%">
+<BR><B>> &lt;txn> prepare</B>
+<P>This command initiates a two-phase commit.&nbsp; It is a direct call
+to the <A HREF="../../docs/api_c/txn_prepare.html">txn_prepare</A> function.&nbsp;
+It returns either a 0 (for success), a DB error message or it throws a
+Tcl error with a system message.
+<HR WIDTH="100%">
+</BODY>
+</HTML>
diff --git a/bdb/tcl/tcl_compat.c b/bdb/tcl/tcl_compat.c
new file mode 100644
index 00000000000..41caee95cc7
--- /dev/null
+++ b/bdb/tcl/tcl_compat.c
@@ -0,0 +1,1055 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: tcl_compat.c,v 11.22 2001/01/11 18:19:55 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <fcntl.h>
+#include <stdlib.h>
+#include <string.h>
+#include <tcl.h>
+#endif
+
+#define DB_DBM_HSEARCH 1
+
+#include "db_int.h"
+#include "tcl_db.h"
+
+/*
+ * Prototypes for procedures defined later in this file:
+ */
+static int mutex_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+
+/*
+ * bdb_HCommand --
+ * Implements h* functions.
+ *
+ * PUBLIC: int bdb_HCommand __P((Tcl_Interp *, int, Tcl_Obj * CONST*));
+ */
+int
+bdb_HCommand(interp, objc, objv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *hcmds[] = {
+ "hcreate",
+ "hdestroy",
+ "hsearch",
+ NULL
+ };
+ enum hcmds {
+ HHCREATE,
+ HHDESTROY,
+ HHSEARCH
+ };
+ static char *srchacts[] = {
+ "enter",
+ "find",
+ NULL
+ };
+ enum srchacts {
+ ACT_ENTER,
+ ACT_FIND
+ };
+ ENTRY item, *hres;
+ ACTION action;
+ int actindex, cmdindex, nelem, result, ret;
+ Tcl_Obj *res;
+
+ result = TCL_OK;
+ /*
+ * Get the command name index from the object based on the cmds
+ * defined above. This SHOULD NOT fail because we already checked
+ * in the 'berkdb' command.
+ */
+ if (Tcl_GetIndexFromObj(interp,
+ objv[1], hcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+
+ res = NULL;
+ switch ((enum hcmds)cmdindex) {
+ case HHCREATE:
+ /*
+ * Must be 1 arg, nelem. Error if not.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "nelem");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetIntFromObj(interp, objv[2], &nelem);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = hcreate(nelem) == 0 ? 1: 0;
+ _ReturnSetup(interp, ret, "hcreate");
+ }
+ break;
+ case HHSEARCH:
+ /*
+ * 3 args for this. Error if different.
+ */
+ if (objc != 5) {
+ Tcl_WrongNumArgs(interp, 2, objv, "key data action");
+ return (TCL_ERROR);
+ }
+ item.key = Tcl_GetStringFromObj(objv[2], NULL);
+ item.data = Tcl_GetStringFromObj(objv[3], NULL);
+ action = 0;
+ if (Tcl_GetIndexFromObj(interp, objv[4], srchacts,
+ "action", TCL_EXACT, &actindex) != TCL_OK)
+ return (IS_HELP(objv[4]));
+ switch ((enum srchacts)actindex) {
+ case ACT_FIND:
+ action = FIND;
+ break;
+ case ACT_ENTER:
+ action = ENTER;
+ break;
+ }
+ _debug_check();
+ hres = hsearch(item, action);
+ if (hres == NULL)
+ Tcl_SetResult(interp, "-1", TCL_STATIC);
+ else if (action == FIND)
+ Tcl_SetResult(interp, (char *)hres->data, TCL_STATIC);
+ else
+ /* action is ENTER */
+ Tcl_SetResult(interp, "0", TCL_STATIC);
+
+ break;
+ case HHDESTROY:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ (void)hdestroy();
+ res = Tcl_NewIntObj(0);
+ break;
+ }
+ /*
+ * Only set result if we have a res. Otherwise, lower
+ * functions have already done so.
+ */
+ if (result == TCL_OK && res)
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
+
+/*
+ *
+ * bdb_NdbmOpen --
+ * Opens an ndbm database.
+ *
+ * PUBLIC: #if DB_DBM_HSEARCH != 0
+ * PUBLIC: int bdb_NdbmOpen __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DBM **));
+ * PUBLIC: #endif
+ */
+int
+bdb_NdbmOpen(interp, objc, objv, dbpp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DBM **dbpp; /* Dbm pointer */
+{
+ static char *ndbopen[] = {
+ "-create",
+ "-mode",
+ "-rdonly",
+ "-truncate",
+ "--",
+ NULL
+ };
+ enum ndbopen {
+ NDB_CREATE,
+ NDB_MODE,
+ NDB_RDONLY,
+ NDB_TRUNC,
+ NDB_ENDARG
+ };
+
+ u_int32_t open_flags;
+ int endarg, i, mode, optindex, read_only, result;
+ char *arg, *db;
+
+ result = TCL_OK;
+ open_flags = 0;
+ endarg = mode = 0;
+ read_only = 0;
+
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args?");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Get the option name index from the object based on the args
+ * defined above.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], ndbopen, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (arg[0] == '-') {
+ result = IS_HELP(objv[i]);
+ goto error;
+ } else
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum ndbopen)optindex) {
+ case NDB_CREATE:
+ open_flags |= O_CREAT;
+ break;
+ case NDB_RDONLY:
+ read_only = 1;
+ break;
+ case NDB_TRUNC:
+ open_flags |= O_TRUNC;
+ break;
+ case NDB_MODE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-mode mode?");
+ result = TCL_ERROR;
+ break;
+ }
+ /*
+ * Don't need to check result here because
+ * if TCL_ERROR, the error message is already
+ * set up, and we'll bail out below. If ok,
+ * the mode is set and we go on.
+ */
+ result = Tcl_GetIntFromObj(interp, objv[i++], &mode);
+ break;
+ case NDB_ENDARG:
+ endarg = 1;
+ break;
+ } /* switch */
+
+ /*
+ * If, at any time, parsing the args we get an error,
+ * bail out and return.
+ */
+ if (result != TCL_OK)
+ goto error;
+ if (endarg)
+ break;
+ }
+ if (result != TCL_OK)
+ goto error;
+
+ /*
+ * Any args we have left, (better be 0, or 1 left) is a
+ * file name. If we have 0, then an in-memory db. If
+ * there is 1, a db name.
+ */
+ db = NULL;
+ if (i != objc && i != objc - 1) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args? ?file?");
+ result = TCL_ERROR;
+ goto error;
+ }
+ if (i != objc)
+ db = Tcl_GetStringFromObj(objv[objc - 1], NULL);
+
+ /*
+ * When we get here, we have already parsed all of our args
+ * and made all our calls to set up the database. Everything
+ * is okay so far, no errors, if we get here.
+ *
+ * Now open the database.
+ */
+ if (read_only)
+ open_flags |= O_RDONLY;
+ else
+ open_flags |= O_RDWR;
+ _debug_check();
+ if ((*dbpp = dbm_open(db, open_flags, mode)) == NULL) {
+ result = _ReturnSetup(interp, Tcl_GetErrno(), "db open");
+ goto error;
+ }
+ return (TCL_OK);
+
+error:
+ *dbpp = NULL;
+ return (result);
+}
+
+/*
+ * bdb_DbmCommand --
+ * Implements "dbm" commands.
+ *
+ * PUBLIC: #if DB_DBM_HSEARCH != 0
+ * PUBLIC: int bdb_DbmCommand
+ * PUBLIC: __P((Tcl_Interp *, int, Tcl_Obj * CONST*, int, DBM *));
+ * PUBLIC: #endif
+ */
+int
+bdb_DbmCommand(interp, objc, objv, flag, dbm)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ int flag; /* Which db interface */
+ DBM *dbm; /* DBM pointer */
+{
+ static char *dbmcmds[] = {
+ "dbmclose",
+ "dbminit",
+ "delete",
+ "fetch",
+ "firstkey",
+ "nextkey",
+ "store",
+ NULL
+ };
+ enum dbmcmds {
+ DBMCLOSE,
+ DBMINIT,
+ DBMDELETE,
+ DBMFETCH,
+ DBMFIRST,
+ DBMNEXT,
+ DBMSTORE
+ };
+ static char *stflag[] = {
+ "insert", "replace",
+ NULL
+ };
+ enum stflag {
+ STINSERT, STREPLACE
+ };
+ datum key, data;
+ int cmdindex, stindex, result, ret;
+ char *name, *t;
+
+ result = TCL_OK;
+ /*
+ * Get the command name index from the object based on the cmds
+ * defined above. This SHOULD NOT fail because we already checked
+ * in the 'berkdb' command.
+ */
+ if (Tcl_GetIndexFromObj(interp,
+ objv[1], dbmcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+
+ switch ((enum dbmcmds)cmdindex) {
+ case DBMCLOSE:
+ /*
+ * No arg for this. Error if different.
+ */
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ if (flag == DBTCL_DBM)
+ ret = dbmclose();
+ else {
+ Tcl_SetResult(interp,
+ "Bad interface flag for command", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ _ReturnSetup(interp, ret, "dbmclose");
+ break;
+ case DBMINIT:
+ /*
+ * Must be 1 arg - file.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "file");
+ return (TCL_ERROR);
+ }
+ name = Tcl_GetStringFromObj(objv[2], NULL);
+ if (flag == DBTCL_DBM)
+ ret = dbminit(name);
+ else {
+ Tcl_SetResult(interp, "Bad interface flag for command",
+ TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ _ReturnSetup(interp, ret, "dbminit");
+ break;
+ case DBMFETCH:
+ /*
+ * 1 arg for this. Error if different.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "key");
+ return (TCL_ERROR);
+ }
+ key.dptr = (char *)Tcl_GetByteArrayFromObj(objv[2], &key.dsize);
+ _debug_check();
+ if (flag == DBTCL_DBM)
+ data = fetch(key);
+ else if (flag == DBTCL_NDBM)
+ data = dbm_fetch(dbm, key);
+ else {
+ Tcl_SetResult(interp,
+ "Bad interface flag for command", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (data.dptr == NULL ||
+ (ret = __os_malloc(NULL, data.dsize + 1, NULL, &t)) != 0)
+ Tcl_SetResult(interp, "-1", TCL_STATIC);
+ else {
+ memcpy(t, data.dptr, data.dsize);
+ t[data.dsize] = '\0';
+ Tcl_SetResult(interp, t, TCL_VOLATILE);
+ __os_free(t, data.dsize + 1);
+ }
+ break;
+ case DBMSTORE:
+ /*
+ * 2 args for this. Error if different.
+ */
+ if (objc != 4 && flag == DBTCL_DBM) {
+ Tcl_WrongNumArgs(interp, 2, objv, "key data");
+ return (TCL_ERROR);
+ }
+ if (objc != 5 && flag == DBTCL_NDBM) {
+ Tcl_WrongNumArgs(interp, 2, objv, "key data action");
+ return (TCL_ERROR);
+ }
+ key.dptr = (char *)Tcl_GetByteArrayFromObj(objv[2], &key.dsize);
+ data.dptr =
+ (char *)Tcl_GetByteArrayFromObj(objv[3], &data.dsize);
+ _debug_check();
+ if (flag == DBTCL_DBM)
+ ret = store(key, data);
+ else if (flag == DBTCL_NDBM) {
+ if (Tcl_GetIndexFromObj(interp, objv[4], stflag,
+ "flag", TCL_EXACT, &stindex) != TCL_OK)
+ return (IS_HELP(objv[4]));
+ switch ((enum stflag)stindex) {
+ case STINSERT:
+ flag = DBM_INSERT;
+ break;
+ case STREPLACE:
+ flag = DBM_REPLACE;
+ break;
+ }
+ ret = dbm_store(dbm, key, data, flag);
+ } else {
+ Tcl_SetResult(interp,
+ "Bad interface flag for command", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ _ReturnSetup(interp, ret, "store");
+ break;
+ case DBMDELETE:
+ /*
+ * 1 arg for this. Error if different.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "key");
+ return (TCL_ERROR);
+ }
+ key.dptr = (char *)Tcl_GetByteArrayFromObj(objv[2], &key.dsize);
+ _debug_check();
+ if (flag == DBTCL_DBM)
+ ret = delete(key);
+ else if (flag == DBTCL_NDBM)
+ ret = dbm_delete(dbm, key);
+ else {
+ Tcl_SetResult(interp,
+ "Bad interface flag for command", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ _ReturnSetup(interp, ret, "delete");
+ break;
+ case DBMFIRST:
+ /*
+ * No arg for this. Error if different.
+ */
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ if (flag == DBTCL_DBM)
+ key = firstkey();
+ else if (flag == DBTCL_NDBM)
+ key = dbm_firstkey(dbm);
+ else {
+ Tcl_SetResult(interp,
+ "Bad interface flag for command", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (key.dptr == NULL ||
+ (ret = __os_malloc(NULL, key.dsize + 1, NULL, &t)) != 0)
+ Tcl_SetResult(interp, "-1", TCL_STATIC);
+ else {
+ memcpy(t, key.dptr, key.dsize);
+ t[key.dsize] = '\0';
+ Tcl_SetResult(interp, t, TCL_VOLATILE);
+ __os_free(t, key.dsize + 1);
+ }
+ break;
+ case DBMNEXT:
+ /*
+ * 0 or 1 arg for this. Error if different.
+ */
+ _debug_check();
+ if (flag == DBTCL_DBM) {
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ key.dptr = (char *)
+ Tcl_GetByteArrayFromObj(objv[2], &key.dsize);
+ data = nextkey(key);
+ } else if (flag == DBTCL_NDBM) {
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ data = dbm_nextkey(dbm);
+ } else {
+ Tcl_SetResult(interp,
+ "Bad interface flag for command", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (data.dptr == NULL ||
+ (ret = __os_malloc(NULL, data.dsize + 1, NULL, &t)) != 0)
+ Tcl_SetResult(interp, "-1", TCL_STATIC);
+ else {
+ memcpy(t, data.dptr, data.dsize);
+ t[data.dsize] = '\0';
+ Tcl_SetResult(interp, t, TCL_VOLATILE);
+ __os_free(t, data.dsize + 1);
+ }
+ break;
+ }
+ return (result);
+}
+
+/*
+ * ndbm_Cmd --
+ * Implements the "ndbm" widget.
+ *
+ * PUBLIC: int ndbm_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+ */
+int
+ndbm_Cmd(clientData, interp, objc, objv)
+ ClientData clientData; /* DB handle */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *ndbcmds[] = {
+ "clearerr",
+ "close",
+ "delete",
+ "dirfno",
+ "error",
+ "fetch",
+ "firstkey",
+ "nextkey",
+ "pagfno",
+ "rdonly",
+ "store",
+ NULL
+ };
+ enum ndbcmds {
+ NDBCLRERR,
+ NDBCLOSE,
+ NDBDELETE,
+ NDBDIRFNO,
+ NDBERR,
+ NDBFETCH,
+ NDBFIRST,
+ NDBNEXT,
+ NDBPAGFNO,
+ NDBRDONLY,
+ NDBSTORE
+ };
+ DBM *dbp;
+ DBTCL_INFO *dbip;
+ Tcl_Obj *res;
+ int cmdindex, result, ret;
+
+ Tcl_ResetResult(interp);
+ dbp = (DBM *)clientData;
+ dbip = _PtrToInfo((void *)dbp);
+ result = TCL_OK;
+ if (objc <= 1) {
+ Tcl_WrongNumArgs(interp, 1, objv, "command cmdargs");
+ return (TCL_ERROR);
+ }
+ if (dbp == NULL) {
+ Tcl_SetResult(interp, "NULL db pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (dbip == NULL) {
+ Tcl_SetResult(interp, "NULL db info pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Get the command name index from the object based on the dbcmds
+ * defined above.
+ */
+ if (Tcl_GetIndexFromObj(interp,
+ objv[1], ndbcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+
+ res = NULL;
+ switch ((enum ndbcmds)cmdindex) {
+ case NDBCLOSE:
+ _debug_check();
+ dbm_close(dbp);
+ (void)Tcl_DeleteCommand(interp, dbip->i_name);
+ _DeleteInfo(dbip);
+ res = Tcl_NewIntObj(0);
+ break;
+ case NDBDELETE:
+ case NDBFETCH:
+ case NDBFIRST:
+ case NDBNEXT:
+ case NDBSTORE:
+ result = bdb_DbmCommand(interp, objc, objv, DBTCL_NDBM, dbp);
+ break;
+ case NDBCLRERR:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = dbm_clearerr(dbp);
+ if (ret)
+ _ReturnSetup(interp, ret, "clearerr");
+ else
+ res = Tcl_NewIntObj(ret);
+ break;
+ case NDBDIRFNO:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = dbm_dirfno(dbp);
+ res = Tcl_NewIntObj(ret);
+ break;
+ case NDBPAGFNO:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = dbm_pagfno(dbp);
+ res = Tcl_NewIntObj(ret);
+ break;
+ case NDBERR:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = dbm_error(dbp);
+ Tcl_SetErrno(ret);
+ Tcl_SetResult(interp, Tcl_PosixError(interp), TCL_STATIC);
+ break;
+ case NDBRDONLY:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = dbm_rdonly(dbp);
+ if (ret)
+ _ReturnSetup(interp, ret, "rdonly");
+ else
+ res = Tcl_NewIntObj(ret);
+ break;
+ }
+ /*
+ * Only set result if we have a res. Otherwise, lower
+ * functions have already done so.
+ */
+ if (result == TCL_OK && res)
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
+
+/*
+ * bdb_RandCommand --
+ * Implements rand* functions.
+ *
+ * PUBLIC: int bdb_RandCommand __P((Tcl_Interp *, int, Tcl_Obj * CONST*));
+ */
+int
+bdb_RandCommand(interp, objc, objv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *rcmds[] = {
+ "rand", "random_int", "srand",
+ NULL
+ };
+ enum rcmds {
+ RRAND, RRAND_INT, RSRAND
+ };
+ long t;
+ int cmdindex, hi, lo, result, ret;
+ Tcl_Obj *res;
+ char msg[MSG_SIZE];
+
+ result = TCL_OK;
+ /*
+ * Get the command name index from the object based on the cmds
+ * defined above. This SHOULD NOT fail because we already checked
+ * in the 'berkdb' command.
+ */
+ if (Tcl_GetIndexFromObj(interp,
+ objv[1], rcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+
+ res = NULL;
+ switch ((enum rcmds)cmdindex) {
+ case RRAND:
+ /*
+ * Must be 0 args. Error if different.
+ */
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ ret = rand();
+ res = Tcl_NewIntObj(ret);
+ break;
+ case RRAND_INT:
+ /*
+ * Must be 4 args. Error if different.
+ */
+ if (objc != 4) {
+ Tcl_WrongNumArgs(interp, 2, objv, "lo hi");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetIntFromObj(interp, objv[2], &lo);
+ if (result != TCL_OK)
+ break;
+ result = Tcl_GetIntFromObj(interp, objv[3], &hi);
+ if (result == TCL_OK) {
+#ifndef RAND_MAX
+#define RAND_MAX 0x7fffffff
+#endif
+ t = rand();
+ if (t > RAND_MAX) {
+ snprintf(msg, MSG_SIZE,
+ "Max random is higher than %ld\n",
+ (long)RAND_MAX);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ break;
+ }
+ _debug_check();
+ ret = (int)(((double)t / ((double)(RAND_MAX) + 1)) *
+ (hi - lo + 1));
+ ret += lo;
+ res = Tcl_NewIntObj(ret);
+ }
+ break;
+ case RSRAND:
+ /*
+ * Must be 1 arg. Error if different.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "seed");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetIntFromObj(interp, objv[2], &lo);
+ if (result == TCL_OK) {
+ srand((u_int)lo);
+ res = Tcl_NewIntObj(0);
+ }
+ break;
+ }
+ /*
+ * Only set result if we have a res. Otherwise, lower
+ * functions have already done so.
+ */
+ if (result == TCL_OK && res)
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
+
+/*
+ *
+ * tcl_Mutex --
+ * Opens an env mutex.
+ *
+ * PUBLIC: int tcl_Mutex __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *,
+ * PUBLIC: DBTCL_INFO *));
+ */
+int
+tcl_Mutex(interp, objc, objv, envp, envip)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+ DBTCL_INFO *envip; /* Info pointer */
+{
+ DBTCL_INFO *ip;
+ Tcl_Obj *res;
+ _MUTEX_DATA *md;
+ int i, mode, nitems, result, ret;
+ char newname[MSG_SIZE];
+
+ md = NULL;
+ result = TCL_OK;
+ mode = nitems = ret = 0;
+ memset(newname, 0, MSG_SIZE);
+
+ if (objc != 4) {
+ Tcl_WrongNumArgs(interp, 2, objv, "mode nitems");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetIntFromObj(interp, objv[2], &mode);
+ if (result != TCL_OK)
+ return (TCL_ERROR);
+ result = Tcl_GetIntFromObj(interp, objv[3], &nitems);
+ if (result != TCL_OK)
+ return (TCL_ERROR);
+
+ snprintf(newname, sizeof(newname),
+ "%s.mutex%d", envip->i_name, envip->i_envmutexid);
+ ip = _NewInfo(interp, NULL, newname, I_MUTEX);
+ if (ip == NULL) {
+ Tcl_SetResult(interp, "Could not set up info",
+ TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ /*
+ * Set up mutex.
+ */
+ /*
+ * Map in the region.
+ *
+ * XXX
+ * We don't bother doing this "right", i.e., using the shalloc
+ * functions, just grab some memory knowing that it's correctly
+ * aligned.
+ */
+ _debug_check();
+ if (__os_calloc(NULL, 1, sizeof(_MUTEX_DATA), &md) != 0)
+ goto posixout;
+ md->env = envp;
+ md->n_mutex = nitems;
+ md->size = sizeof(_MUTEX_ENTRY) * nitems;
+
+ md->reginfo.type = REGION_TYPE_MUTEX;
+ md->reginfo.id = INVALID_REGION_TYPE;
+ md->reginfo.mode = mode;
+ md->reginfo.flags = REGION_CREATE_OK | REGION_JOIN_OK;
+ if ((ret = __db_r_attach(envp, &md->reginfo, md->size)) != 0)
+ goto posixout;
+ md->marray = md->reginfo.addr;
+
+ /* Initialize a created region. */
+ if (F_ISSET(&md->reginfo, REGION_CREATE))
+ for (i = 0; i < nitems; i++) {
+ md->marray[i].val = 0;
+ if ((ret =
+ __db_mutex_init(envp, &md->marray[i].m, i, 0)) != 0)
+ goto posixout;
+ }
+ R_UNLOCK(envp, &md->reginfo);
+
+ /*
+ * Success. Set up return. Set up new info
+ * and command widget for this mutex.
+ */
+ envip->i_envmutexid++;
+ ip->i_parent = envip;
+ _SetInfoData(ip, md);
+ Tcl_CreateObjCommand(interp, newname,
+ (Tcl_ObjCmdProc *)mutex_Cmd, (ClientData)md, NULL);
+ res = Tcl_NewStringObj(newname, strlen(newname));
+ Tcl_SetObjResult(interp, res);
+
+ return (TCL_OK);
+
+posixout:
+ if (ret > 0)
+ Tcl_PosixError(interp);
+ result = _ReturnSetup(interp, ret, "mutex");
+ _DeleteInfo(ip);
+
+ if (md != NULL) {
+ if (md->reginfo.addr != NULL)
+ (void)__db_r_detach(md->env,
+ &md->reginfo, F_ISSET(&md->reginfo, REGION_CREATE));
+ __os_free(md, sizeof(*md));
+ }
+ return (result);
+}
+
+/*
+ * mutex_Cmd --
+ * Implements the "mutex" widget.
+ */
+static int
+mutex_Cmd(clientData, interp, objc, objv)
+ ClientData clientData; /* Mutex handle */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *mxcmds[] = {
+ "close",
+ "get",
+ "getval",
+ "release",
+ "setval",
+ NULL
+ };
+ enum mxcmds {
+ MXCLOSE,
+ MXGET,
+ MXGETVAL,
+ MXRELE,
+ MXSETVAL
+ };
+ DB_ENV *dbenv;
+ DBTCL_INFO *envip, *mpip;
+ _MUTEX_DATA *mp;
+ Tcl_Obj *res;
+ int cmdindex, id, result, newval;
+
+ Tcl_ResetResult(interp);
+ mp = (_MUTEX_DATA *)clientData;
+ mpip = _PtrToInfo((void *)mp);
+ envip = mpip->i_parent;
+ dbenv = envip->i_envp;
+ result = TCL_OK;
+
+ if (mp == NULL) {
+ Tcl_SetResult(interp, "NULL mp pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (mpip == NULL) {
+ Tcl_SetResult(interp, "NULL mp info pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Get the command name index from the object based on the dbcmds
+ * defined above.
+ */
+ if (Tcl_GetIndexFromObj(interp,
+ objv[1], mxcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+
+ res = NULL;
+ switch ((enum mxcmds)cmdindex) {
+ case MXCLOSE:
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 1, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ (void)__db_r_detach(mp->env, &mp->reginfo, 0);
+ res = Tcl_NewIntObj(0);
+ (void)Tcl_DeleteCommand(interp, mpip->i_name);
+ _DeleteInfo(mpip);
+ __os_free(mp, sizeof(*mp));
+ break;
+ case MXRELE:
+ /*
+ * Check for 1 arg. Error if different.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "id");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetIntFromObj(interp, objv[2], &id);
+ if (result != TCL_OK)
+ break;
+ MUTEX_UNLOCK(dbenv, &mp->marray[id].m);
+ res = Tcl_NewIntObj(0);
+ break;
+ case MXGET:
+ /*
+ * Check for 1 arg. Error if different.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "id");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetIntFromObj(interp, objv[2], &id);
+ if (result != TCL_OK)
+ break;
+ MUTEX_LOCK(dbenv, &mp->marray[id].m, mp->env->lockfhp);
+ res = Tcl_NewIntObj(0);
+ break;
+ case MXGETVAL:
+ /*
+ * Check for 1 arg. Error if different.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "id");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetIntFromObj(interp, objv[2], &id);
+ if (result != TCL_OK)
+ break;
+ res = Tcl_NewIntObj(mp->marray[id].val);
+ break;
+ case MXSETVAL:
+ /*
+ * Check for 2 args. Error if different.
+ */
+ if (objc != 4) {
+ Tcl_WrongNumArgs(interp, 2, objv, "id val");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetIntFromObj(interp, objv[2], &id);
+ if (result != TCL_OK)
+ break;
+ result = Tcl_GetIntFromObj(interp, objv[3], &newval);
+ if (result != TCL_OK)
+ break;
+ mp->marray[id].val = newval;
+ res = Tcl_NewIntObj(0);
+ break;
+ }
+ /*
+ * Only set result if we have a res. Otherwise, lower
+ * functions have already done so.
+ */
+ if (result == TCL_OK && res)
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
diff --git a/bdb/tcl/tcl_db.c b/bdb/tcl/tcl_db.c
new file mode 100644
index 00000000000..8e7215a272a
--- /dev/null
+++ b/bdb/tcl/tcl_db.c
@@ -0,0 +1,1771 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: tcl_db.c,v 11.55 2000/11/28 20:12:31 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <tcl.h>
+#endif
+
+#include "db_int.h"
+#include "tcl_db.h"
+
+/*
+ * Prototypes for procedures defined later in this file:
+ */
+static int tcl_DbClose __P((Tcl_Interp *,
+ int, Tcl_Obj * CONST*, DB *, DBTCL_INFO *));
+static int tcl_DbDelete __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *));
+static int tcl_DbGet __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *));
+static int tcl_DbKeyRange __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *));
+static int tcl_DbPut __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *));
+static int tcl_DbStat __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *));
+static int tcl_DbCursor __P((Tcl_Interp *,
+ int, Tcl_Obj * CONST*, DB *, DBC **));
+static int tcl_DbJoin __P((Tcl_Interp *,
+ int, Tcl_Obj * CONST*, DB *, DBC **));
+static int tcl_DbGetjoin __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *));
+static int tcl_DbCount __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *));
+
+/*
+ *
+ * PUBLIC: int db_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+ *
+ * db_Cmd --
+ * Implements the "db" widget.
+ */
+int
+db_Cmd(clientData, interp, objc, objv)
+ ClientData clientData; /* DB handle */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *dbcmds[] = {
+ "close",
+ "count",
+ "cursor",
+ "del",
+ "get",
+ "get_join",
+ "get_type",
+ "is_byteswapped",
+ "join",
+ "keyrange",
+ "put",
+ "stat",
+ "sync",
+#if CONFIG_TEST
+ "test",
+#endif
+ NULL
+ };
+ enum dbcmds {
+ DBCLOSE,
+ DBCOUNT,
+ DBCURSOR,
+ DBDELETE,
+ DBGET,
+ DBGETJOIN,
+ DBGETTYPE,
+ DBSWAPPED,
+ DBJOIN,
+ DBKEYRANGE,
+ DBPUT,
+ DBSTAT,
+ DBSYNC
+#if CONFIG_TEST
+ , DBTEST
+#endif
+ };
+ DB *dbp;
+ DBC *dbc;
+ DBTCL_INFO *dbip;
+ DBTCL_INFO *ip;
+ Tcl_Obj *res;
+ int cmdindex, result, ret;
+ char newname[MSG_SIZE];
+
+ Tcl_ResetResult(interp);
+ dbp = (DB *)clientData;
+ dbip = _PtrToInfo((void *)dbp);
+ memset(newname, 0, MSG_SIZE);
+ result = TCL_OK;
+ if (objc <= 1) {
+ Tcl_WrongNumArgs(interp, 1, objv, "command cmdargs");
+ return (TCL_ERROR);
+ }
+ if (dbp == NULL) {
+ Tcl_SetResult(interp, "NULL db pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (dbip == NULL) {
+ Tcl_SetResult(interp, "NULL db info pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Get the command name index from the object based on the dbcmds
+ * defined above.
+ */
+ if (Tcl_GetIndexFromObj(interp,
+ objv[1], dbcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+
+ res = NULL;
+ switch ((enum dbcmds)cmdindex) {
+ case DBCLOSE:
+ result = tcl_DbClose(interp, objc, objv, dbp, dbip);
+ break;
+ case DBDELETE:
+ result = tcl_DbDelete(interp, objc, objv, dbp);
+ break;
+ case DBGET:
+ result = tcl_DbGet(interp, objc, objv, dbp);
+ break;
+ case DBKEYRANGE:
+ result = tcl_DbKeyRange(interp, objc, objv, dbp);
+ break;
+ case DBPUT:
+ result = tcl_DbPut(interp, objc, objv, dbp);
+ break;
+ case DBCOUNT:
+ result = tcl_DbCount(interp, objc, objv, dbp);
+ break;
+ case DBSWAPPED:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = dbp->get_byteswapped(dbp);
+ res = Tcl_NewIntObj(ret);
+ break;
+ case DBGETTYPE:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = dbp->get_type(dbp);
+ if (ret == DB_BTREE)
+ res = Tcl_NewStringObj("btree", strlen("btree"));
+ else if (ret == DB_HASH)
+ res = Tcl_NewStringObj("hash", strlen("hash"));
+ else if (ret == DB_RECNO)
+ res = Tcl_NewStringObj("recno", strlen("recno"));
+ else if (ret == DB_QUEUE)
+ res = Tcl_NewStringObj("queue", strlen("queue"));
+ else {
+ Tcl_SetResult(interp,
+ "db gettype: Returned unknown type\n", TCL_STATIC);
+ result = TCL_ERROR;
+ }
+ break;
+ case DBSTAT:
+ result = tcl_DbStat(interp, objc, objv, dbp);
+ break;
+ case DBSYNC:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = dbp->sync(dbp, 0);
+ res = Tcl_NewIntObj(ret);
+ if (ret != 0) {
+ Tcl_SetObjResult(interp, res);
+ result = TCL_ERROR;
+ }
+ break;
+ case DBCURSOR:
+ snprintf(newname, sizeof(newname),
+ "%s.c%d", dbip->i_name, dbip->i_dbdbcid);
+ ip = _NewInfo(interp, NULL, newname, I_DBC);
+ if (ip != NULL) {
+ result = tcl_DbCursor(interp, objc, objv, dbp, &dbc);
+ if (result == TCL_OK) {
+ dbip->i_dbdbcid++;
+ ip->i_parent = dbip;
+ Tcl_CreateObjCommand(interp, newname,
+ (Tcl_ObjCmdProc *)dbc_Cmd,
+ (ClientData)dbc, NULL);
+ res =
+ Tcl_NewStringObj(newname, strlen(newname));
+ _SetInfoData(ip, dbc);
+ } else
+ _DeleteInfo(ip);
+ } else {
+ Tcl_SetResult(interp,
+ "Could not set up info", TCL_STATIC);
+ result = TCL_ERROR;
+ }
+ break;
+ case DBJOIN:
+ snprintf(newname, sizeof(newname),
+ "%s.c%d", dbip->i_name, dbip->i_dbdbcid);
+ ip = _NewInfo(interp, NULL, newname, I_DBC);
+ if (ip != NULL) {
+ result = tcl_DbJoin(interp, objc, objv, dbp, &dbc);
+ if (result == TCL_OK) {
+ dbip->i_dbdbcid++;
+ ip->i_parent = dbip;
+ Tcl_CreateObjCommand(interp, newname,
+ (Tcl_ObjCmdProc *)dbc_Cmd,
+ (ClientData)dbc, NULL);
+ res =
+ Tcl_NewStringObj(newname, strlen(newname));
+ _SetInfoData(ip, dbc);
+ } else
+ _DeleteInfo(ip);
+ } else {
+ Tcl_SetResult(interp,
+ "Could not set up info", TCL_STATIC);
+ result = TCL_ERROR;
+ }
+ break;
+ case DBGETJOIN:
+ result = tcl_DbGetjoin(interp, objc, objv, dbp);
+ break;
+#if CONFIG_TEST
+ case DBTEST:
+ result = tcl_EnvTest(interp, objc, objv, dbp->dbenv);
+ break;
+#endif
+ }
+ /*
+ * Only set result if we have a res. Otherwise, lower
+ * functions have already done so.
+ */
+ if (result == TCL_OK && res)
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
+
+/*
+ * tcl_db_stat --
+ */
+static int
+tcl_DbStat(interp, objc, objv, dbp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB *dbp; /* Database pointer */
+{
+ DB_BTREE_STAT *bsp;
+ DB_HASH_STAT *hsp;
+ DB_QUEUE_STAT *qsp;
+ void *sp;
+ Tcl_Obj *res;
+ DBTYPE type;
+ u_int32_t flag;
+ int result, ret;
+ char *arg;
+
+ result = TCL_OK;
+ flag = 0;
+
+ if (objc > 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-recordcount?");
+ return (TCL_ERROR);
+ }
+
+ if (objc == 3) {
+ arg = Tcl_GetStringFromObj(objv[2], NULL);
+ if (strcmp(arg, "-recordcount") == 0)
+ flag = DB_RECORDCOUNT;
+ else if (strcmp(arg, "-cachedcounts") == 0)
+ flag = DB_CACHED_COUNTS;
+ else {
+ Tcl_SetResult(interp,
+ "db stat: unknown arg", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ }
+
+ _debug_check();
+ ret = dbp->stat(dbp, &sp, NULL, flag);
+ result = _ReturnSetup(interp, ret, "db stat");
+ if (result == TCL_ERROR)
+ return (result);
+
+ type = dbp->get_type(dbp);
+ /*
+ * Have our stats, now construct the name value
+ * list pairs and free up the memory.
+ */
+ res = Tcl_NewObj();
+ /*
+ * MAKE_STAT_LIST assumes 'res' and 'error' label.
+ */
+ if (type == DB_HASH) {
+ hsp = (DB_HASH_STAT *)sp;
+ MAKE_STAT_LIST("Magic", hsp->hash_magic);
+ MAKE_STAT_LIST("Version", hsp->hash_version);
+ MAKE_STAT_LIST("Page size", hsp->hash_pagesize);
+ MAKE_STAT_LIST("Number of keys", hsp->hash_nkeys);
+ MAKE_STAT_LIST("Number of records", hsp->hash_ndata);
+ MAKE_STAT_LIST("Estim. number of elements", hsp->hash_nelem);
+ MAKE_STAT_LIST("Fill factor", hsp->hash_ffactor);
+ MAKE_STAT_LIST("Buckets", hsp->hash_buckets);
+ MAKE_STAT_LIST("Free pages", hsp->hash_free);
+ MAKE_STAT_LIST("Bytes free", hsp->hash_bfree);
+ MAKE_STAT_LIST("Number of big pages", hsp->hash_bigpages);
+ MAKE_STAT_LIST("Big pages bytes free", hsp->hash_big_bfree);
+ MAKE_STAT_LIST("Overflow pages", hsp->hash_overflows);
+ MAKE_STAT_LIST("Overflow bytes free", hsp->hash_ovfl_free);
+ MAKE_STAT_LIST("Duplicate pages", hsp->hash_dup);
+ MAKE_STAT_LIST("Duplicate pages bytes free",
+ hsp->hash_dup_free);
+ } else if (type == DB_QUEUE) {
+ qsp = (DB_QUEUE_STAT *)sp;
+ MAKE_STAT_LIST("Magic", qsp->qs_magic);
+ MAKE_STAT_LIST("Version", qsp->qs_version);
+ MAKE_STAT_LIST("Page size", qsp->qs_pagesize);
+ MAKE_STAT_LIST("Number of records", qsp->qs_ndata);
+ MAKE_STAT_LIST("Number of pages", qsp->qs_pages);
+ MAKE_STAT_LIST("Bytes free", qsp->qs_pgfree);
+ MAKE_STAT_LIST("Record length", qsp->qs_re_len);
+ MAKE_STAT_LIST("Record pad", qsp->qs_re_pad);
+ MAKE_STAT_LIST("First record number", qsp->qs_first_recno);
+ MAKE_STAT_LIST("Last record number", qsp->qs_cur_recno);
+ } else { /* BTREE and RECNO are same stats */
+ bsp = (DB_BTREE_STAT *)sp;
+ MAKE_STAT_LIST("Number of keys", bsp->bt_nkeys);
+ MAKE_STAT_LIST("Number of records", bsp->bt_ndata);
+ if (flag != DB_RECORDCOUNT) {
+ MAKE_STAT_LIST("Magic", bsp->bt_magic);
+ MAKE_STAT_LIST("Version", bsp->bt_version);
+ MAKE_STAT_LIST("Flags", bsp->bt_metaflags);
+ MAKE_STAT_LIST("Minimum keys per page", bsp->bt_minkey);
+ MAKE_STAT_LIST("Fixed record length", bsp->bt_re_len);
+ MAKE_STAT_LIST("Record pad", bsp->bt_re_pad);
+ MAKE_STAT_LIST("Page size", bsp->bt_pagesize);
+ MAKE_STAT_LIST("Levels", bsp->bt_levels);
+ MAKE_STAT_LIST("Internal pages", bsp->bt_int_pg);
+ MAKE_STAT_LIST("Leaf pages", bsp->bt_leaf_pg);
+ MAKE_STAT_LIST("Duplicate pages", bsp->bt_dup_pg);
+ MAKE_STAT_LIST("Overflow pages", bsp->bt_over_pg);
+ MAKE_STAT_LIST("Pages on freelist", bsp->bt_free);
+ MAKE_STAT_LIST("Internal pages bytes free",
+ bsp->bt_int_pgfree);
+ MAKE_STAT_LIST("Leaf pages bytes free",
+ bsp->bt_leaf_pgfree);
+ MAKE_STAT_LIST("Duplicate pages bytes free",
+ bsp->bt_dup_pgfree);
+ MAKE_STAT_LIST("Bytes free in overflow pages",
+ bsp->bt_over_pgfree);
+ }
+ }
+ Tcl_SetObjResult(interp, res);
+error:
+ __os_free(sp, 0);
+ return (result);
+}
+
+/*
+ * tcl_db_close --
+ */
+static int
+tcl_DbClose(interp, objc, objv, dbp, dbip)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB *dbp; /* Database pointer */
+ DBTCL_INFO *dbip; /* Info pointer */
+{
+ DBTCL_INFO *p, *nextp;
+ u_int32_t flag;
+ int result, ret;
+ char *arg;
+
+ result = TCL_OK;
+ flag = 0;
+ if (objc > 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-nosync?");
+ return (TCL_ERROR);
+ }
+
+ if (objc == 3) {
+ arg = Tcl_GetStringFromObj(objv[2], NULL);
+ if (strcmp(arg, "-nosync") == 0)
+ flag = DB_NOSYNC;
+ else {
+ Tcl_SetResult(interp,
+ "dbclose: unknown arg", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ }
+
+ /*
+ * First we have to close any open cursors. Then we close
+ * our db.
+ */
+ for (p = LIST_FIRST(&__db_infohead); p != NULL; p = nextp) {
+ nextp = LIST_NEXT(p, entries);
+ /*
+ * Check if this is a cursor info structure and if
+ * it is, if it belongs to this DB. If so, remove
+ * its commands and info structure.
+ */
+ if (p->i_parent == dbip && p->i_type == I_DBC) {
+ (void)Tcl_DeleteCommand(interp, p->i_name);
+ _DeleteInfo(p);
+ }
+ }
+ (void)Tcl_DeleteCommand(interp, dbip->i_name);
+ _DeleteInfo(dbip);
+ _debug_check();
+ ret = (dbp)->close(dbp, flag);
+ result = _ReturnSetup(interp, ret, "db close");
+ return (result);
+}
+
+/*
+ * tcl_db_put --
+ */
+static int
+tcl_DbPut(interp, objc, objv, dbp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB *dbp; /* Database pointer */
+{
+ static char *dbputopts[] = {
+ "-append",
+ "-nodupdata",
+ "-nooverwrite",
+ "-partial",
+ "-txn",
+ NULL
+ };
+ enum dbputopts {
+ DBPUT_APPEND,
+ DBGET_NODUPDATA,
+ DBPUT_NOOVER,
+ DBPUT_PART,
+ DBPUT_TXN
+ };
+ static char *dbputapp[] = {
+ "-append", NULL
+ };
+ enum dbputapp { DBPUT_APPEND0 };
+ DBT key, data;
+ DBTYPE type;
+ DB_TXN *txn;
+ Tcl_Obj **elemv, *res;
+ db_recno_t recno;
+ u_int32_t flag;
+ int elemc, end, i, itmp, optindex, result, ret;
+ char *arg, msg[MSG_SIZE];
+
+ txn = NULL;
+ result = TCL_OK;
+ flag = 0;
+ if (objc <= 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-args? key data");
+ return (TCL_ERROR);
+ }
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ /*
+ * If it is a QUEUE or RECNO database, the key is a record number
+ * and must be setup up to contain a db_recno_t. Otherwise the
+ * key is a "string".
+ */
+ type = dbp->get_type(dbp);
+
+ /*
+ * We need to determine where the end of required args are. If we
+ * are using a QUEUE/RECNO db and -append, then there is just one
+ * req arg (data). Otherwise there are two (key data).
+ *
+ * We preparse the list to determine this since we need to know
+ * to properly check # of args for other options below.
+ */
+ end = objc - 2;
+ if (type == DB_QUEUE || type == DB_RECNO) {
+ i = 2;
+ while (i < objc - 1) {
+ if (Tcl_GetIndexFromObj(interp, objv[i++], dbputapp,
+ "option", TCL_EXACT, &optindex) != TCL_OK)
+ continue;
+ switch ((enum dbputapp)optindex) {
+ case DBPUT_APPEND0:
+ end = objc - 1;
+ break;
+ }
+ }
+ }
+ Tcl_ResetResult(interp);
+
+ /*
+ * Get the command name index from the object based on the options
+ * defined above.
+ */
+ i = 2;
+ while (i < end) {
+ if (Tcl_GetIndexFromObj(interp, objv[i],
+ dbputopts, "option", TCL_EXACT, &optindex) != TCL_OK)
+ return (IS_HELP(objv[i]));
+ i++;
+ switch ((enum dbputopts)optindex) {
+ case DBPUT_TXN:
+ if (i > (end - 1)) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ txn = NAME_TO_TXN(arg);
+ if (txn == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "Put: Invalid txn: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ }
+ break;
+ case DBPUT_APPEND:
+ FLAG_CHECK(flag);
+ flag = DB_APPEND;
+ break;
+ case DBGET_NODUPDATA:
+ FLAG_CHECK(flag);
+ flag = DB_NODUPDATA;
+ break;
+ case DBPUT_NOOVER:
+ FLAG_CHECK(flag);
+ flag = DB_NOOVERWRITE;
+ break;
+ case DBPUT_PART:
+ if (i > (end - 1)) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-partial {offset length}?");
+ result = TCL_ERROR;
+ break;
+ }
+ /*
+ * Get sublist as {offset length}
+ */
+ result = Tcl_ListObjGetElements(interp, objv[i++],
+ &elemc, &elemv);
+ if (elemc != 2) {
+ Tcl_SetResult(interp,
+ "List must be {offset length}", TCL_STATIC);
+ result = TCL_ERROR;
+ break;
+ }
+ data.flags = DB_DBT_PARTIAL;
+ result = Tcl_GetIntFromObj(interp, elemv[0], &itmp);
+ data.doff = itmp;
+ if (result != TCL_OK)
+ break;
+ result = Tcl_GetIntFromObj(interp, elemv[1], &itmp);
+ data.dlen = itmp;
+ /*
+ * NOTE: We don't check result here because all we'd
+ * do is break anyway, and we are doing that. If you
+ * add code here, you WILL need to add the check
+ * for result. (See the check for save.doff, a few
+ * lines above and copy that.)
+ */
+ break;
+ }
+ if (result != TCL_OK)
+ break;
+ }
+
+ if (result == TCL_ERROR)
+ return (result);
+
+ /*
+ * If we are a recno db and we are NOT using append, then the 2nd
+ * last arg is the key.
+ */
+ if (type == DB_QUEUE || type == DB_RECNO) {
+ key.data = &recno;
+ key.ulen = key.size = sizeof(db_recno_t);
+ key.flags = DB_DBT_USERMEM;
+ if (flag == DB_APPEND)
+ recno = 0;
+ else {
+ result = Tcl_GetIntFromObj(interp, objv[objc-2], &itmp);
+ recno = itmp;
+ if (result != TCL_OK)
+ return (result);
+ }
+ } else {
+ key.data = Tcl_GetByteArrayFromObj(objv[objc-2], &itmp);
+ key.size = itmp;
+ }
+ /*
+ * XXX
+ * Tcl 8.1 Tcl_GetByteArrayFromObj/Tcl_GetIntFromObj bug.
+ *
+ * This line (and the line for key.data above) were moved from
+ * the beginning of the function to here.
+ *
+ * There is a bug in Tcl 8.1 and byte arrays in that if it happens
+ * to use an object as both a byte array and something else like
+ * an int, and you've done a Tcl_GetByteArrayFromObj, then you
+ * do a Tcl_GetIntFromObj, your memory is deleted.
+ *
+ * Workaround is to make sure all Tcl_GetByteArrayFromObj calls
+ * are done last.
+ */
+ data.data = Tcl_GetByteArrayFromObj(objv[objc-1], &itmp);
+ data.size = itmp;
+ _debug_check();
+ ret = dbp->put(dbp, txn, &key, &data, flag);
+ result = _ReturnSetup(interp, ret, "db put");
+ if (ret == 0 &&
+ (type == DB_RECNO || type == DB_QUEUE) && flag == DB_APPEND) {
+ res = Tcl_NewIntObj(recno);
+ Tcl_SetObjResult(interp, res);
+ }
+ return (result);
+}
+
+/*
+ * tcl_db_get --
+ */
+static int
+tcl_DbGet(interp, objc, objv, dbp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB *dbp; /* Database pointer */
+{
+ static char *dbgetopts[] = {
+ "-consume",
+ "-consume_wait",
+ "-get_both",
+ "-glob",
+ "-partial",
+ "-recno",
+ "-rmw",
+ "-txn",
+ NULL
+ };
+ enum dbgetopts {
+ DBGET_CONSUME,
+ DBGET_CONSUME_WAIT,
+ DBGET_BOTH,
+ DBGET_GLOB,
+ DBGET_PART,
+ DBGET_RECNO,
+ DBGET_RMW,
+ DBGET_TXN
+ };
+ DBC *dbc;
+ DBT key, data, save;
+ DBTYPE type;
+ DB_TXN *txn;
+ Tcl_Obj **elemv, *retlist;
+ db_recno_t recno;
+ u_int32_t flag, cflag, isdup, rmw;
+ int elemc, end, i, itmp, optindex, result, ret, useglob, userecno;
+ char *arg, *pattern, *prefix, msg[MSG_SIZE];
+
+ result = TCL_OK;
+ cflag = flag = rmw = 0;
+ useglob = userecno = 0;
+ txn = NULL;
+ pattern = prefix = NULL;
+
+ if (objc < 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-args? key");
+ return (TCL_ERROR);
+ }
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ memset(&save, 0, sizeof(save));
+
+ /*
+ * Get the command name index from the object based on the options
+ * defined above.
+ */
+ i = 2;
+ type = dbp->get_type(dbp);
+ end = objc;
+ while (i < end) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], dbgetopts, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ if (IS_HELP(objv[i]) == TCL_OK)
+ return (TCL_OK);
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum dbgetopts)optindex) {
+ case DBGET_BOTH:
+ /*
+ * Change 'end' and make sure we aren't already past
+ * the new end.
+ */
+ if (i > objc - 2) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-get_both key data?");
+ result = TCL_ERROR;
+ break;
+ }
+ end = objc - 2;
+ FLAG_CHECK(flag);
+ flag = DB_GET_BOTH;
+ break;
+ case DBGET_TXN:
+ if (i == end - 1) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ txn = NAME_TO_TXN(arg);
+ if (txn == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "Get: Invalid txn: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ }
+ break;
+ case DBGET_GLOB:
+ useglob = 1;
+ end = objc - 1;
+ break;
+ case DBGET_CONSUME:
+ FLAG_CHECK(flag);
+ flag = DB_CONSUME;
+ break;
+ case DBGET_CONSUME_WAIT:
+ FLAG_CHECK(flag);
+ flag = DB_CONSUME_WAIT;
+ break;
+ case DBGET_RECNO:
+ end = objc - 1;
+ userecno = 1;
+ if (type != DB_RECNO && type != DB_QUEUE) {
+ FLAG_CHECK(flag);
+ flag = DB_SET_RECNO;
+ }
+ break;
+ case DBGET_RMW:
+ rmw = DB_RMW;
+ break;
+ case DBGET_PART:
+ end = objc - 1;
+ if (i == end) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-partial {offset length}?");
+ result = TCL_ERROR;
+ break;
+ }
+ /*
+ * Get sublist as {offset length}
+ */
+ result = Tcl_ListObjGetElements(interp, objv[i++],
+ &elemc, &elemv);
+ if (elemc != 2) {
+ Tcl_SetResult(interp,
+ "List must be {offset length}", TCL_STATIC);
+ result = TCL_ERROR;
+ break;
+ }
+ save.flags = DB_DBT_PARTIAL;
+ result = Tcl_GetIntFromObj(interp, elemv[0], &itmp);
+ save.doff = itmp;
+ if (result != TCL_OK)
+ break;
+ result = Tcl_GetIntFromObj(interp, elemv[1], &itmp);
+ save.dlen = itmp;
+ /*
+ * NOTE: We don't check result here because all we'd
+ * do is break anyway, and we are doing that. If you
+ * add code here, you WILL need to add the check
+ * for result. (See the check for save.doff, a few
+ * lines above and copy that.)
+ */
+ break;
+ }
+ if (result != TCL_OK)
+ break;
+ }
+ if (result != TCL_OK)
+ goto out;
+
+ if (type == DB_RECNO || type == DB_QUEUE)
+ userecno = 1;
+ /*
+ * Check for illegal combos of options.
+ */
+ if (useglob && (userecno || flag == DB_SET_RECNO ||
+ type == DB_RECNO || type == DB_QUEUE)) {
+ Tcl_SetResult(interp,
+ "Cannot use -glob and record numbers.\n",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ goto out;
+ }
+ if (useglob && flag == DB_GET_BOTH) {
+ Tcl_SetResult(interp,
+ "Only one of -glob or -get_both can be specified.\n",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ goto out;
+ }
+
+ if (useglob)
+ pattern = Tcl_GetStringFromObj(objv[objc - 1], NULL);
+
+ /*
+ * This is the list we return
+ */
+ retlist = Tcl_NewListObj(0, NULL);
+ save.flags |= DB_DBT_MALLOC;
+
+ /*
+ * isdup is used to know if we support duplicates. If not, we
+ * can just do a db->get call and avoid using cursors.
+ * XXX
+ * When there is a db->get_flags method, it should be used.
+ * isdup = dbp->get_flags(dbp) & DB_DUP;
+ * For now we illegally peek.
+ * XXX
+ */
+ isdup = dbp->flags & DB_AM_DUP;
+
+ /*
+ * If the database doesn't support duplicates or we're performing
+ * ops that don't require returning multiple items, use DB->get
+ * instead of a cursor operation.
+ */
+ if (pattern == NULL && (isdup == 0 ||
+ flag == DB_SET_RECNO || flag == DB_GET_BOTH ||
+ flag == DB_CONSUME || flag == DB_CONSUME_WAIT)) {
+ if (flag == DB_GET_BOTH) {
+ if (userecno) {
+ result = Tcl_GetIntFromObj(interp,
+ objv[(objc - 2)], &itmp);
+ recno = itmp;
+ if (result == TCL_OK) {
+ key.data = &recno;
+ key.size = sizeof(db_recno_t);
+ } else
+ return (result);
+ } else {
+ key.data =
+ Tcl_GetByteArrayFromObj(objv[objc-2],
+ &itmp);
+ key.size = itmp;
+ }
+ /*
+ * Already checked args above. Fill in key and save.
+ * Save is used in the dbp->get call below to fill in
+ * data.
+ */
+ save.data =
+ Tcl_GetByteArrayFromObj(objv[objc-1], &itmp);
+ save.size = itmp;
+ } else if (flag != DB_CONSUME && flag != DB_CONSUME_WAIT) {
+ if (userecno) {
+ result = Tcl_GetIntFromObj(
+ interp, objv[(objc - 1)], &itmp);
+ recno = itmp;
+ if (result == TCL_OK) {
+ key.data = &recno;
+ key.size = sizeof(db_recno_t);
+ } else
+ return (result);
+ } else {
+ key.data = Tcl_GetByteArrayFromObj(objv[objc-1], &itmp);
+ key.size = itmp;
+ }
+ }
+
+ memset(&data, 0, sizeof(data));
+ data = save;
+
+ _debug_check();
+
+ ret = dbp->get(dbp, txn, &key, &data, flag | rmw);
+ result = _ReturnSetup(interp, ret, "db get");
+ if (ret == 0) {
+ /*
+ * Success. Return a list of the form {name value}
+ * If it was a recno in key.data, we need to convert
+ * into a string/object representation of that recno.
+ */
+ if (type == DB_RECNO || type == DB_QUEUE)
+ result = _SetListRecnoElem(interp, retlist,
+ *(db_recno_t *)key.data, data.data,
+ data.size);
+ else
+ result = _SetListElem(interp, retlist,
+ key.data, key.size, data.data, data.size);
+ /*
+ * Free space from DB_DBT_MALLOC
+ */
+ __os_free(data.data, data.size);
+ }
+ if (result == TCL_OK)
+ Tcl_SetObjResult(interp, retlist);
+ goto out;
+ }
+
+ if (userecno) {
+ result = Tcl_GetIntFromObj(interp, objv[(objc - 1)], &itmp);
+ recno = itmp;
+ if (result == TCL_OK) {
+ key.data = &recno;
+ key.size = sizeof(db_recno_t);
+ } else
+ return (result);
+ } else {
+ key.data = Tcl_GetByteArrayFromObj(objv[objc-1], &itmp);
+ key.size = itmp;
+ }
+ ret = dbp->cursor(dbp, txn, &dbc, 0);
+ result = _ReturnSetup(interp, ret, "db cursor");
+ if (result == TCL_ERROR)
+ goto out;
+
+ /*
+ * At this point, we have a cursor, if we have a pattern,
+ * we go to the nearest one and step forward until we don't
+ * have any more that match the pattern prefix. If we have
+ * an exact key, we go to that key position, and step through
+ * all the duplicates. In either case we build up a list of
+ * the form {{key data} {key data}...} along the way.
+ */
+ memset(&data, 0, sizeof(data));
+ /*
+ * Restore any "partial" info we have saved.
+ */
+ data = save;
+ if (pattern) {
+ /*
+ * Note, prefix is returned in new space. Must free it.
+ */
+ ret = _GetGlobPrefix(pattern, &prefix);
+ if (ret) {
+ result = TCL_ERROR;
+ Tcl_SetResult(interp,
+ "Unable to allocate pattern space", TCL_STATIC);
+ goto out1;
+ }
+ key.data = prefix;
+ key.size = strlen(prefix);
+ /*
+ * If they give us an empty pattern string
+ * (i.e. -glob *), go through entire DB.
+ */
+ if (strlen(prefix) == 0)
+ cflag = DB_FIRST;
+ else
+ cflag = DB_SET_RANGE;
+ } else
+ cflag = DB_SET;
+ _debug_check();
+ ret = dbc->c_get(dbc, &key, &data, cflag | rmw);
+ result = _ReturnSetup(interp, ret, "db get (cursor)");
+ if (result == TCL_ERROR)
+ goto out1;
+ if (pattern)
+ cflag = DB_NEXT;
+ else
+ cflag = DB_NEXT_DUP;
+
+ while (ret == 0 && result == TCL_OK) {
+ /*
+ * Build up our {name value} sublist
+ */
+ result = _SetListElem(interp, retlist,
+ key.data, key.size,
+ data.data, data.size);
+ /*
+ * Free space from DB_DBT_MALLOC
+ */
+ __os_free(data.data, data.size);
+ if (result != TCL_OK)
+ break;
+ /*
+ * Append {name value} to return list
+ */
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ /*
+ * Restore any "partial" info we have saved.
+ */
+ data = save;
+ ret = dbc->c_get(dbc, &key, &data, cflag | rmw);
+ if (ret == 0 && pattern &&
+ memcmp(key.data, prefix, strlen(prefix)) != 0) {
+ /*
+ * Free space from DB_DBT_MALLOC
+ */
+ __os_free(data.data, data.size);
+ break;
+ }
+ }
+ dbc->c_close(dbc);
+out1:
+ if (result == TCL_OK)
+ Tcl_SetObjResult(interp, retlist);
+out:
+ /*
+ * _GetGlobPrefix(), the function which allocates prefix, works
+ * by copying and condensing another string. Thus prefix may
+ * have multiple nuls at the end, so we free using __os_free().
+ */
+ if (prefix != NULL)
+ __os_free(prefix,0);
+ return (result);
+}
+
+/*
+ * tcl_db_delete --
+ */
+static int
+tcl_DbDelete(interp, objc, objv, dbp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB *dbp; /* Database pointer */
+{
+ static char *dbdelopts[] = {
+ "-glob",
+ "-txn",
+ NULL
+ };
+ enum dbdelopts {
+ DBDEL_GLOB,
+ DBDEL_TXN
+ };
+ DBC *dbc;
+ DBT key, data;
+ DBTYPE type;
+ DB_TXN *txn;
+ db_recno_t recno;
+ int i, itmp, optindex, result, ret;
+ u_int32_t flag;
+ char *arg, *pattern, *prefix, msg[MSG_SIZE];
+
+ result = TCL_OK;
+ flag = 0;
+ pattern = prefix = NULL;
+ txn = NULL;
+ if (objc < 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-args? key");
+ return (TCL_ERROR);
+ }
+
+ memset(&key, 0, sizeof(key));
+ /*
+ * The first arg must be -txn, -glob or a list of keys.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], dbdelopts, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ /*
+ * If we don't have a -glob or -txn, then the
+ * remaining args must be exact keys.
+ * Reset the result so we don't get
+ * an errant error message if there is another error.
+ */
+ if (IS_HELP(objv[i]) == TCL_OK)
+ return (TCL_OK);
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum dbdelopts)optindex) {
+ case DBDEL_TXN:
+ if (i == objc) {
+ /*
+ * Someone could conceivably have a key of
+ * the same name. So just break and use it.
+ */
+ i--;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ txn = NAME_TO_TXN(arg);
+ if (txn == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "Delete: Invalid txn: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ }
+ break;
+ case DBDEL_GLOB:
+ /*
+ * Get the pattern. Get the prefix and use cursors to
+ * get all the data items.
+ */
+ if (i == objc) {
+ /*
+ * Someone could conceivably have a key of
+ * the same name. So just break and use it.
+ */
+ i--;
+ break;
+ }
+ pattern = Tcl_GetStringFromObj(objv[i++], NULL);
+ break;
+ }
+ if (result != TCL_OK)
+ break;
+ }
+
+ if (result != TCL_OK)
+ goto out;
+
+ /*
+ * If we have a pattern AND more keys to process, then there
+ * is an error. Either we have some number of exact keys,
+ * or we have a pattern.
+ */
+ if (pattern != NULL && i != objc) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args? -glob pattern | key");
+ result = TCL_ERROR;
+ goto out;
+ }
+ /*
+ * XXX
+ * For consistency with get, we have decided for the moment, to
+ * allow -glob, or one key, not many. The code was originally
+ * written to take many keys and we'll leave it that way, because
+ * tcl_DbGet may one day accept many disjoint keys to get, rather
+ * than one, and at that time we'd make delete be consistent. In
+ * any case, the code is already here and there is no need to remove,
+ * just check that we only have one arg left.
+ */
+ if (pattern == NULL && i != (objc - 1)) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args? -glob pattern | key");
+ result = TCL_ERROR;
+ goto out;
+ }
+
+ /*
+ * If we have remaining args, they are all exact keys. Call
+ * DB->del on each of those keys.
+ *
+ * If it is a RECNO database, the key is a record number and must be
+ * setup up to contain a db_recno_t. Otherwise the key is a "string".
+ */
+ type = dbp->get_type(dbp);
+ ret = 0;
+ while (i < objc && ret == 0) {
+ memset(&key, 0, sizeof(key));
+ if (type == DB_RECNO || type == DB_QUEUE) {
+ result = Tcl_GetIntFromObj(interp, objv[i++], &itmp);
+ recno = itmp;
+ if (result == TCL_OK) {
+ key.data = &recno;
+ key.size = sizeof(db_recno_t);
+ } else
+ return (result);
+ } else {
+ key.data = Tcl_GetByteArrayFromObj(objv[i++], &itmp);
+ key.size = itmp;
+ }
+ _debug_check();
+ ret = dbp->del(dbp, txn, &key, 0);
+ /*
+ * If we have any error, set up return result and stop
+ * processing keys.
+ */
+ if (ret != 0)
+ break;
+ }
+ result = _ReturnSetup(interp, ret, "db del");
+
+ /*
+ * At this point we've either finished or, if we have a pattern,
+ * we go to the nearest one and step forward until we don't
+ * have any more that match the pattern prefix.
+ */
+ if (pattern) {
+ ret = dbp->cursor(dbp, txn, &dbc, 0);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret, "db cursor");
+ goto out;
+ }
+ /*
+ * Note, prefix is returned in new space. Must free it.
+ */
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ ret = _GetGlobPrefix(pattern, &prefix);
+ if (ret) {
+ result = TCL_ERROR;
+ Tcl_SetResult(interp,
+ "Unable to allocate pattern space", TCL_STATIC);
+ goto out;
+ }
+ key.data = prefix;
+ key.size = strlen(prefix);
+ if (strlen(prefix) == 0)
+ flag = DB_FIRST;
+ else
+ flag = DB_SET_RANGE;
+ ret = dbc->c_get(dbc, &key, &data, flag);
+ while (ret == 0 &&
+ memcmp(key.data, prefix, strlen(prefix)) == 0) {
+ /*
+ * Each time through here the cursor is pointing
+ * at the current valid item. Delete it and
+ * move ahead.
+ */
+ _debug_check();
+ ret = dbc->c_del(dbc, 0);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret, "db c_del");
+ break;
+ }
+ /*
+ * Deleted the current, now move to the next item
+ * in the list, check if it matches the prefix pattern.
+ */
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ ret = dbc->c_get(dbc, &key, &data, DB_NEXT);
+ }
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+ /*
+ * _GetGlobPrefix(), the function which allocates prefix, works
+ * by copying and condensing another string. Thus prefix may
+ * have multiple nuls at the end, so we free using __os_free().
+ */
+ __os_free(prefix,0);
+ dbc->c_close(dbc);
+ result = _ReturnSetup(interp, ret, "db del");
+ }
+out:
+ return (result);
+}
+
+/*
+ * tcl_db_cursor --
+ */
+static int
+tcl_DbCursor(interp, objc, objv, dbp, dbcp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB *dbp; /* Database pointer */
+ DBC **dbcp; /* Return cursor pointer */
+{
+ static char *dbcuropts[] = {
+ "-txn", "-update",
+ NULL
+ };
+ enum dbcuropts {
+ DBCUR_TXN, DBCUR_UPDATE
+ };
+ DB_TXN *txn;
+ u_int32_t flag;
+ int i, optindex, result, ret;
+ char *arg, msg[MSG_SIZE];
+
+ result = TCL_OK;
+ flag = 0;
+ txn = NULL;
+ /*
+ * If the user asks for -glob or -recno, it MUST be the second
+ * last arg given. If it isn't given, then we must check if
+ * they gave us a correct key.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], dbcuropts, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ result = IS_HELP(objv[i]);
+ goto out;
+ }
+ i++;
+ switch ((enum dbcuropts)optindex) {
+ case DBCUR_TXN:
+ if (i == objc) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ txn = NAME_TO_TXN(arg);
+ if (txn == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "Cursor: Invalid txn: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ }
+ break;
+ case DBCUR_UPDATE:
+ flag = DB_WRITECURSOR;
+ break;
+ }
+ if (result != TCL_OK)
+ break;
+ }
+ if (result != TCL_OK)
+ goto out;
+
+ _debug_check();
+ ret = dbp->cursor(dbp, txn, dbcp, flag);
+ if (ret != 0)
+ result = _ErrorSetup(interp, ret, "db cursor");
+out:
+ return (result);
+}
+
+/*
+ * tcl_db_join --
+ */
+static int
+tcl_DbJoin(interp, objc, objv, dbp, dbcp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB *dbp; /* Database pointer */
+ DBC **dbcp; /* Cursor pointer */
+{
+ static char *dbjopts[] = {
+ "-nosort",
+ NULL
+ };
+ enum dbjopts {
+ DBJ_NOSORT
+ };
+ DBC **listp;
+ u_int32_t flag;
+ int adj, i, j, optindex, size, result, ret;
+ char *arg, msg[MSG_SIZE];
+
+ result = TCL_OK;
+ flag = 0;
+ if (objc < 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "curs1 curs2 ...");
+ return (TCL_ERROR);
+ }
+
+ i = 2;
+ adj = i;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], dbjopts, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ result = IS_HELP(objv[i]);
+ if (result == TCL_OK)
+ return (result);
+ result = TCL_OK;
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum dbjopts)optindex) {
+ case DBJ_NOSORT:
+ flag |= DB_JOIN_NOSORT;
+ adj++;
+ break;
+ }
+ }
+ if (result != TCL_OK)
+ return (result);
+ /*
+ * Allocate one more for NULL ptr at end of list.
+ */
+ size = sizeof(DBC *) * ((objc - adj) + 1);
+ ret = __os_malloc(dbp->dbenv, size, NULL, &listp);
+ if (ret != 0) {
+ Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ memset(listp, 0, size);
+ for (j = 0, i = adj; i < objc; i++, j++) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ listp[j] = NAME_TO_DBC(arg);
+ if (listp[j] == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "Join: Invalid cursor: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ goto out;
+ }
+ }
+ listp[j] = NULL;
+ _debug_check();
+ ret = dbp->join(dbp, listp, dbcp, flag);
+ result = _ReturnSetup(interp, ret, "db join");
+
+out:
+ __os_free(listp, size);
+ return (result);
+}
+
+/*
+ * tcl_db_getjoin --
+ */
+static int
+tcl_DbGetjoin(interp, objc, objv, dbp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB *dbp; /* Database pointer */
+{
+ static char *dbgetjopts[] = {
+ "-nosort",
+ "-txn",
+ NULL
+ };
+ enum dbgetjopts {
+ DBGETJ_NOSORT,
+ DBGETJ_TXN
+ };
+ DB_TXN *txn;
+ DB *elemdbp;
+ DBC **listp;
+ DBC *dbc;
+ DBT key, data;
+ Tcl_Obj **elemv, *retlist;
+ u_int32_t flag;
+ int adj, elemc, i, itmp, j, optindex, result, ret, size;
+ char *arg, msg[MSG_SIZE];
+
+ result = TCL_OK;
+ flag = 0;
+ if (objc < 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "{db1 key1} {db2 key2} ...");
+ return (TCL_ERROR);
+ }
+
+ txn = NULL;
+ i = 2;
+ adj = i;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], dbgetjopts, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ result = IS_HELP(objv[i]);
+ if (result == TCL_OK)
+ return (result);
+ result = TCL_OK;
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum dbgetjopts)optindex) {
+ case DBGETJ_NOSORT:
+ flag |= DB_JOIN_NOSORT;
+ adj++;
+ break;
+ case DBGETJ_TXN:
+ if (i == objc) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ txn = NAME_TO_TXN(arg);
+ adj += 2;
+ if (txn == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "GetJoin: Invalid txn: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ }
+ break;
+ }
+ }
+ if (result != TCL_OK)
+ return (result);
+ size = sizeof(DBC *) * ((objc - adj) + 1);
+ ret = __os_malloc(NULL, size, NULL, &listp);
+ if (ret != 0) {
+ Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ memset(listp, 0, size);
+ for (j = 0, i = adj; i < objc; i++, j++) {
+ /*
+ * Get each sublist as {db key}
+ */
+ result = Tcl_ListObjGetElements(interp, objv[i],
+ &elemc, &elemv);
+ if (elemc != 2) {
+ Tcl_SetResult(interp, "Lists must be {db key}",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ goto out;
+ }
+ /*
+ * Get a pointer to that open db. Then, open a cursor in
+ * that db, and go to the "key" place.
+ */
+ elemdbp = NAME_TO_DB(Tcl_GetStringFromObj(elemv[0], NULL));
+ if (elemdbp == NULL) {
+ snprintf(msg, MSG_SIZE, "Get_join: Invalid db: %s\n",
+ Tcl_GetStringFromObj(elemv[0], NULL));
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ goto out;
+ }
+ ret = elemdbp->cursor(elemdbp, txn, &listp[j], 0);
+ if ((result = _ReturnSetup(interp, ret, "db cursor")) ==
+ TCL_ERROR)
+ goto out;
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ key.data = Tcl_GetByteArrayFromObj(elemv[elemc-1], &itmp);
+ key.size = itmp;
+ ret = (listp[j])->c_get(listp[j], &key, &data, DB_SET);
+ if ((result = _ReturnSetup(interp, ret, "db cget")) ==
+ TCL_ERROR)
+ goto out;
+ }
+ listp[j] = NULL;
+ _debug_check();
+ ret = dbp->join(dbp, listp, &dbc, flag);
+ result = _ReturnSetup(interp, ret, "db join");
+ if (result == TCL_ERROR)
+ goto out;
+
+ retlist = Tcl_NewListObj(0, NULL);
+ while (ret == 0 && result == TCL_OK) {
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ key.flags |= DB_DBT_MALLOC;
+ data.flags |= DB_DBT_MALLOC;
+ ret = dbc->c_get(dbc, &key, &data, 0);
+ /*
+ * Build up our {name value} sublist
+ */
+ if (ret == 0) {
+ result = _SetListElem(interp, retlist,
+ key.data, key.size,
+ data.data, data.size);
+ __os_free(key.data, key.size);
+ __os_free(data.data, data.size);
+ }
+ }
+ dbc->c_close(dbc);
+ if (result == TCL_OK)
+ Tcl_SetObjResult(interp, retlist);
+out:
+ while (j) {
+ if (listp[j])
+ (listp[j])->c_close(listp[j]);
+ j--;
+ }
+ __os_free(listp, size);
+ return (result);
+}
+
+/*
+ * tcl_DbCount --
+ */
+static int
+tcl_DbCount(interp, objc, objv, dbp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB *dbp; /* Database pointer */
+{
+ Tcl_Obj *res;
+ DBC *dbc;
+ DBT key, data;
+ db_recno_t count, recno;
+ int itmp, len, result, ret;
+
+ result = TCL_OK;
+ count = 0;
+ res = NULL;
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "key");
+ return (TCL_ERROR);
+ }
+
+ memset(&key, 0, sizeof(key));
+
+ /*
+ * Get the count for our key.
+ * We do this by getting a cursor for this DB. Moving the cursor
+ * to the set location, and getting a count on that cursor.
+ */
+ ret = 0;
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ /*
+ * If it's a queue or recno database, we must make sure to
+ * treat the key as a recno rather than as a byte string.
+ */
+ if (dbp->type == DB_RECNO || dbp->type == DB_QUEUE) {
+ result = Tcl_GetIntFromObj(interp, objv[2], &itmp);
+ recno = itmp;
+ if (result == TCL_OK) {
+ key.data = &recno;
+ key.size = sizeof(db_recno_t);
+ } else
+ return (result);
+ } else {
+ key.data = Tcl_GetByteArrayFromObj(objv[2], &len);
+ key.size = len;
+ }
+ _debug_check();
+ ret = dbp->cursor(dbp, NULL, &dbc, 0);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret, "db cursor");
+ goto out;
+ }
+ /*
+ * Move our cursor to the key.
+ */
+ ret = dbc->c_get(dbc, &key, &data, DB_SET);
+ if (ret == DB_NOTFOUND)
+ count = 0;
+ else {
+ ret = dbc->c_count(dbc, &count, 0);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret, "db cursor");
+ goto out;
+ }
+ }
+ res = Tcl_NewIntObj(count);
+ Tcl_SetObjResult(interp, res);
+out:
+ return (result);
+}
+
+/*
+ * tcl_DbKeyRange --
+ */
+static int
+tcl_DbKeyRange(interp, objc, objv, dbp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB *dbp; /* Database pointer */
+{
+ static char *dbkeyropts[] = {
+ "-txn",
+ NULL
+ };
+ enum dbkeyropts {
+ DBKEYR_TXN
+ };
+ DB_TXN *txn;
+ DB_KEY_RANGE range;
+ DBT key;
+ DBTYPE type;
+ Tcl_Obj *myobjv[3], *retlist;
+ db_recno_t recno;
+ u_int32_t flag;
+ int i, itmp, myobjc, optindex, result, ret;
+ char *arg, msg[MSG_SIZE];
+
+ result = TCL_OK;
+ flag = 0;
+ if (objc < 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-txn id? key");
+ return (TCL_ERROR);
+ }
+
+ txn = NULL;
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], dbkeyropts, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ result = IS_HELP(objv[i]);
+ if (result == TCL_OK)
+ return (result);
+ result = TCL_OK;
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum dbkeyropts)optindex) {
+ case DBKEYR_TXN:
+ if (i == objc) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ txn = NAME_TO_TXN(arg);
+ if (txn == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "KeyRange: Invalid txn: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ }
+ break;
+ }
+ }
+ if (result != TCL_OK)
+ return (result);
+ type = dbp->get_type(dbp);
+ ret = 0;
+ /*
+ * Make sure we have a key.
+ */
+ if (i != (objc - 1)) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args? key");
+ result = TCL_ERROR;
+ goto out;
+ }
+ memset(&key, 0, sizeof(key));
+ if (type == DB_RECNO || type == DB_QUEUE) {
+ result = Tcl_GetIntFromObj(interp, objv[i], &itmp);
+ recno = itmp;
+ if (result == TCL_OK) {
+ key.data = &recno;
+ key.size = sizeof(db_recno_t);
+ } else
+ return (result);
+ } else {
+ key.data = Tcl_GetByteArrayFromObj(objv[i++], &itmp);
+ key.size = itmp;
+ }
+ _debug_check();
+ ret = dbp->key_range(dbp, txn, &key, &range, flag);
+ result = _ReturnSetup(interp, ret, "db join");
+ if (result == TCL_ERROR)
+ goto out;
+
+ /*
+ * If we succeeded, set up return list.
+ */
+ myobjc = 3;
+ myobjv[0] = Tcl_NewDoubleObj(range.less);
+ myobjv[1] = Tcl_NewDoubleObj(range.equal);
+ myobjv[2] = Tcl_NewDoubleObj(range.greater);
+ retlist = Tcl_NewListObj(myobjc, myobjv);
+ if (result == TCL_OK)
+ Tcl_SetObjResult(interp, retlist);
+out:
+ return (result);
+}
diff --git a/bdb/tcl/tcl_db_pkg.c b/bdb/tcl/tcl_db_pkg.c
new file mode 100644
index 00000000000..f83b5a7d2a9
--- /dev/null
+++ b/bdb/tcl/tcl_db_pkg.c
@@ -0,0 +1,2246 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: tcl_db_pkg.c,v 11.76 2001/01/19 18:02:36 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <tcl.h>
+#endif
+
+#define DB_DBM_HSEARCH 1
+
+#include "db_int.h"
+#include "tcl_db.h"
+
+/*
+ * Prototypes for procedures defined later in this file:
+ */
+static int berkdb_Cmd __P((ClientData, Tcl_Interp *, int,
+ Tcl_Obj * CONST*));
+static int bdb_EnvOpen __P((Tcl_Interp *, int, Tcl_Obj * CONST*,
+ DBTCL_INFO *, DB_ENV **));
+static int bdb_DbOpen __P((Tcl_Interp *, int, Tcl_Obj * CONST*,
+ DBTCL_INFO *, DB **));
+static int bdb_DbRemove __P((Tcl_Interp *, int, Tcl_Obj * CONST*));
+static int bdb_DbRename __P((Tcl_Interp *, int, Tcl_Obj * CONST*));
+static int bdb_DbUpgrade __P((Tcl_Interp *, int, Tcl_Obj * CONST*));
+static int bdb_DbVerify __P((Tcl_Interp *, int, Tcl_Obj * CONST*));
+static int bdb_Version __P((Tcl_Interp *, int, Tcl_Obj * CONST*));
+static int bdb_Handles __P((Tcl_Interp *, int, Tcl_Obj * CONST*));
+
+/*
+ * Db_tcl_Init --
+ *
+ * This is a package initialization procedure, which is called by Tcl when
+ * this package is to be added to an interpreter. The name is based on the
+ * name of the shared library, currently libdb_tcl-X.Y.so, which Tcl uses
+ * to determine the name of this function.
+ */
+int
+Db_tcl_Init(interp)
+ Tcl_Interp *interp; /* Interpreter in which the package is
+ * to be made available. */
+{
+ int code;
+
+ code = Tcl_PkgProvide(interp, "Db_tcl", "1.0");
+ if (code != TCL_OK)
+ return (code);
+
+ Tcl_CreateObjCommand(interp, "berkdb", (Tcl_ObjCmdProc *)berkdb_Cmd,
+ (ClientData)0, NULL);
+ /*
+ * Create shared global debugging variables
+ */
+ Tcl_LinkVar(interp, "__debug_on", (char *)&__debug_on, TCL_LINK_INT);
+ Tcl_LinkVar(interp, "__debug_print", (char *)&__debug_print,
+ TCL_LINK_INT);
+ Tcl_LinkVar(interp, "__debug_stop", (char *)&__debug_stop,
+ TCL_LINK_INT);
+ Tcl_LinkVar(interp, "__debug_test", (char *)&__debug_test,
+ TCL_LINK_INT);
+ LIST_INIT(&__db_infohead);
+ return (TCL_OK);
+}
+
+/*
+ * berkdb_cmd --
+ * Implements the "berkdb" command.
+ * This command supports three sub commands:
+ * berkdb version - Returns a list {major minor patch}
+ * berkdb env - Creates a new DB_ENV and returns a binding
+ * to a new command of the form dbenvX, where X is an
+ * integer starting at 0 (dbenv0, dbenv1, ...)
+ * berkdb open - Creates a new DB (optionally within
+ * the given environment. Returns a binding to a new
+ * command of the form dbX, where X is an integer
+ * starting at 0 (db0, db1, ...)
+ */
+static int
+berkdb_Cmd(notused, interp, objc, objv)
+ ClientData notused; /* Not used. */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *berkdbcmds[] = {
+ "dbremove",
+ "dbrename",
+ "dbverify",
+ "env",
+ "envremove",
+ "handles",
+ "open",
+ "upgrade",
+ "version",
+ /* All below are compatibility functions */
+ "hcreate", "hsearch", "hdestroy",
+ "dbminit", "fetch", "store",
+ "delete", "firstkey", "nextkey",
+ "ndbm_open", "dbmclose",
+ /* All below are convenience functions */
+ "rand", "random_int", "srand",
+ "debug_check",
+ NULL
+ };
+ /*
+ * All commands enums below ending in X are compatibility
+ */
+ enum berkdbcmds {
+ BDB_DBREMOVE,
+ BDB_DBRENAME,
+ BDB_DBVERIFY,
+ BDB_ENV,
+ BDB_ENVREMOVE,
+ BDB_HANDLES,
+ BDB_OPEN,
+ BDB_UPGRADE,
+ BDB_VERSION,
+ BDB_HCREATEX, BDB_HSEARCHX, BDB_HDESTROYX,
+ BDB_DBMINITX, BDB_FETCHX, BDB_STOREX,
+ BDB_DELETEX, BDB_FIRSTKEYX, BDB_NEXTKEYX,
+ BDB_NDBMOPENX, BDB_DBMCLOSEX,
+ BDB_RANDX, BDB_RAND_INTX, BDB_SRANDX,
+ BDB_DBGCKX
+ };
+ static int env_id = 0;
+ static int db_id = 0;
+ static int ndbm_id = 0;
+
+ DB *dbp;
+ DBM *ndbmp;
+ DBTCL_INFO *ip;
+ DB_ENV *envp;
+ Tcl_Obj *res;
+ int cmdindex, result;
+ char newname[MSG_SIZE];
+
+ COMPQUIET(notused, NULL);
+
+ Tcl_ResetResult(interp);
+ memset(newname, 0, MSG_SIZE);
+ result = TCL_OK;
+ if (objc <= 1) {
+ Tcl_WrongNumArgs(interp, 1, objv, "command cmdargs");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Get the command name index from the object based on the berkdbcmds
+ * defined above.
+ */
+ if (Tcl_GetIndexFromObj(interp,
+ objv[1], berkdbcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+ res = NULL;
+ switch ((enum berkdbcmds)cmdindex) {
+ case BDB_VERSION:
+ _debug_check();
+ result = bdb_Version(interp, objc, objv);
+ break;
+ case BDB_HANDLES:
+ result = bdb_Handles(interp, objc, objv);
+ break;
+ case BDB_ENV:
+ snprintf(newname, sizeof(newname), "env%d", env_id);
+ ip = _NewInfo(interp, NULL, newname, I_ENV);
+ if (ip != NULL) {
+ result = bdb_EnvOpen(interp, objc, objv, ip, &envp);
+ if (result == TCL_OK && envp != NULL) {
+ env_id++;
+ Tcl_CreateObjCommand(interp, newname,
+ (Tcl_ObjCmdProc *)env_Cmd,
+ (ClientData)envp, NULL);
+ /* Use ip->i_name - newname is overwritten */
+ res =
+ Tcl_NewStringObj(newname, strlen(newname));
+ _SetInfoData(ip, envp);
+ } else
+ _DeleteInfo(ip);
+ } else {
+ Tcl_SetResult(interp, "Could not set up info",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ }
+ break;
+ case BDB_DBREMOVE:
+ result = bdb_DbRemove(interp, objc, objv);
+ break;
+ case BDB_DBRENAME:
+ result = bdb_DbRename(interp, objc, objv);
+ break;
+ case BDB_UPGRADE:
+ result = bdb_DbUpgrade(interp, objc, objv);
+ break;
+ case BDB_DBVERIFY:
+ result = bdb_DbVerify(interp, objc, objv);
+ break;
+ case BDB_ENVREMOVE:
+ result = tcl_EnvRemove(interp, objc, objv, NULL, NULL);
+ break;
+ case BDB_OPEN:
+ snprintf(newname, sizeof(newname), "db%d", db_id);
+ ip = _NewInfo(interp, NULL, newname, I_DB);
+ if (ip != NULL) {
+ result = bdb_DbOpen(interp, objc, objv, ip, &dbp);
+ if (result == TCL_OK && dbp != NULL) {
+ db_id++;
+ Tcl_CreateObjCommand(interp, newname,
+ (Tcl_ObjCmdProc *)db_Cmd,
+ (ClientData)dbp, NULL);
+ /* Use ip->i_name - newname is overwritten */
+ res =
+ Tcl_NewStringObj(newname, strlen(newname));
+ _SetInfoData(ip, dbp);
+ } else
+ _DeleteInfo(ip);
+ } else {
+ Tcl_SetResult(interp, "Could not set up info",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ }
+ break;
+ case BDB_HCREATEX:
+ case BDB_HSEARCHX:
+ case BDB_HDESTROYX:
+ result = bdb_HCommand(interp, objc, objv);
+ break;
+ case BDB_DBMINITX:
+ case BDB_DBMCLOSEX:
+ case BDB_FETCHX:
+ case BDB_STOREX:
+ case BDB_DELETEX:
+ case BDB_FIRSTKEYX:
+ case BDB_NEXTKEYX:
+ result = bdb_DbmCommand(interp, objc, objv, DBTCL_DBM, NULL);
+ break;
+ case BDB_NDBMOPENX:
+ snprintf(newname, sizeof(newname), "ndbm%d", ndbm_id);
+ ip = _NewInfo(interp, NULL, newname, I_NDBM);
+ if (ip != NULL) {
+ result = bdb_NdbmOpen(interp, objc, objv, &ndbmp);
+ if (result == TCL_OK) {
+ ndbm_id++;
+ Tcl_CreateObjCommand(interp, newname,
+ (Tcl_ObjCmdProc *)ndbm_Cmd,
+ (ClientData)ndbmp, NULL);
+ /* Use ip->i_name - newname is overwritten */
+ res =
+ Tcl_NewStringObj(newname, strlen(newname));
+ _SetInfoData(ip, ndbmp);
+ } else
+ _DeleteInfo(ip);
+ } else {
+ Tcl_SetResult(interp, "Could not set up info",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ }
+ break;
+ case BDB_RANDX:
+ case BDB_RAND_INTX:
+ case BDB_SRANDX:
+ result = bdb_RandCommand(interp, objc, objv);
+ break;
+ case BDB_DBGCKX:
+ _debug_check();
+ res = Tcl_NewIntObj(0);
+ break;
+ }
+ /*
+ * For each different arg call different function to create
+ * new commands (or if version, get/return it).
+ */
+ if (result == TCL_OK && res != NULL)
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
+
+/*
+ * bdb_EnvOpen -
+ * Implements the environment open command.
+ * There are many, many options to the open command.
+ * Here is the general flow:
+ *
+ * 1. Call db_env_create to create the env handle.
+ * 2. Parse args tracking options.
+ * 3. Make any pre-open setup calls necessary.
+ * 4. Call DBENV->open to open the env.
+ * 5. Return env widget handle to user.
+ */
+static int
+bdb_EnvOpen(interp, objc, objv, ip, env)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DBTCL_INFO *ip; /* Our internal info */
+ DB_ENV **env; /* Environment pointer */
+{
+ static char *envopen[] = {
+ "-cachesize",
+ "-cdb",
+ "-cdb_alldb",
+ "-client_timeout",
+ "-create",
+ "-data_dir",
+ "-errfile",
+ "-errpfx",
+ "-home",
+ "-lock",
+ "-lock_conflict",
+ "-lock_detect",
+ "-lock_max",
+ "-lock_max_locks",
+ "-lock_max_lockers",
+ "-lock_max_objects",
+ "-log",
+ "-log_buffer",
+ "-log_dir",
+ "-log_max",
+ "-mmapsize",
+ "-mode",
+ "-nommap",
+ "-private",
+ "-recover",
+ "-recover_fatal",
+ "-region_init",
+ "-server",
+ "-server_timeout",
+ "-shm_key",
+ "-system_mem",
+ "-tmp_dir",
+ "-txn",
+ "-txn_max",
+ "-txn_timestamp",
+ "-use_environ",
+ "-use_environ_root",
+ "-verbose",
+ NULL
+ };
+ /*
+ * !!!
+ * These have to be in the same order as the above,
+ * which is close to but not quite alphabetical.
+ */
+ enum envopen {
+ ENV_CACHESIZE,
+ ENV_CDB,
+ ENV_CDB_ALLDB,
+ ENV_CLIENT_TO,
+ ENV_CREATE,
+ ENV_DATA_DIR,
+ ENV_ERRFILE,
+ ENV_ERRPFX,
+ ENV_HOME,
+ ENV_LOCK,
+ ENV_CONFLICT,
+ ENV_DETECT,
+ ENV_LOCK_MAX,
+ ENV_LOCK_MAX_LOCKS,
+ ENV_LOCK_MAX_LOCKERS,
+ ENV_LOCK_MAX_OBJECTS,
+ ENV_LOG,
+ ENV_LOG_BUFFER,
+ ENV_LOG_DIR,
+ ENV_LOG_MAX,
+ ENV_MMAPSIZE,
+ ENV_MODE,
+ ENV_NOMMAP,
+ ENV_PRIVATE,
+ ENV_RECOVER,
+ ENV_RECOVER_FATAL,
+ ENV_REGION_INIT,
+ ENV_SERVER,
+ ENV_SERVER_TO,
+ ENV_SHM_KEY,
+ ENV_SYSTEM_MEM,
+ ENV_TMP_DIR,
+ ENV_TXN,
+ ENV_TXN_MAX,
+ ENV_TXN_TIME,
+ ENV_USE_ENVIRON,
+ ENV_USE_ENVIRON_ROOT,
+ ENV_VERBOSE
+ };
+ Tcl_Obj **myobjv, **myobjv1;
+ time_t time;
+ u_int32_t detect, gbytes, bytes, ncaches, open_flags, set_flag, size;
+ u_int8_t *conflicts;
+ int i, intarg, itmp, j, logbufset, logmaxset;
+ int mode, myobjc, nmodes, optindex, result, ret, temp;
+ long client_to, server_to, shm;
+ char *arg, *home, *server;
+
+ result = TCL_OK;
+ mode = 0;
+ set_flag = 0;
+ home = NULL;
+ /*
+ * XXX
+ * If/when our Tcl interface becomes thread-safe, we should enable
+ * DB_THREAD here. Note that DB_THREAD currently does not work
+ * with log_get -next, -prev; if we wish to enable DB_THREAD,
+ * those must either be made thread-safe first or we must come up with
+ * a workaround. (We used to specify DB_THREAD if and only if
+ * logging was not configured.)
+ */
+ open_flags = DB_JOINENV;
+ logmaxset = logbufset = 0;
+
+ if (objc <= 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args?");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Server code must go before the call to db_env_create.
+ */
+ server = NULL;
+ server_to = client_to = 0;
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i++], envopen, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ Tcl_ResetResult(interp);
+ continue;
+ }
+ switch ((enum envopen)optindex) {
+ case ENV_SERVER:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-server hostname");
+ result = TCL_ERROR;
+ break;
+ }
+ server = Tcl_GetStringFromObj(objv[i++], NULL);
+ break;
+ case ENV_SERVER_TO:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-server_to secs");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetLongFromObj(interp, objv[i++],
+ &server_to);
+ break;
+ case ENV_CLIENT_TO:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-client_to secs");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetLongFromObj(interp, objv[i++],
+ &client_to);
+ break;
+ default:
+ break;
+ }
+ }
+ if (server != NULL) {
+ ret = db_env_create(env, DB_CLIENT);
+ if (ret)
+ return (_ReturnSetup(interp, ret, "db_env_create"));
+ (*env)->set_errpfx((*env), ip->i_name);
+ (*env)->set_errcall((*env), _ErrorFunc);
+ if ((ret = (*env)->set_server((*env), server,
+ client_to, server_to, 0)) != 0) {
+ result = TCL_ERROR;
+ goto error;
+ }
+ } else {
+ /*
+ * Create the environment handle before parsing the args
+ * since we'll be modifying the environment as we parse.
+ */
+ ret = db_env_create(env, 0);
+ if (ret)
+ return (_ReturnSetup(interp, ret, "db_env_create"));
+ (*env)->set_errpfx((*env), ip->i_name);
+ (*env)->set_errcall((*env), _ErrorFunc);
+ }
+
+ /*
+ * Get the command name index from the object based on the bdbcmds
+ * defined above.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], envopen, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ result = IS_HELP(objv[i]);
+ goto error;
+ }
+ i++;
+ switch ((enum envopen)optindex) {
+ case ENV_SERVER:
+ case ENV_SERVER_TO:
+ case ENV_CLIENT_TO:
+ /*
+ * Already handled these, skip them and their arg.
+ */
+ i++;
+ break;
+ case ENV_CDB:
+ FLD_SET(open_flags, DB_INIT_CDB | DB_INIT_MPOOL);
+ FLD_CLR(open_flags, DB_JOINENV);
+ break;
+ case ENV_CDB_ALLDB:
+ FLD_SET(set_flag, DB_CDB_ALLDB);
+ break;
+ case ENV_LOCK:
+ FLD_SET(open_flags, DB_INIT_LOCK | DB_INIT_MPOOL);
+ FLD_CLR(open_flags, DB_JOINENV);
+ break;
+ case ENV_LOG:
+ FLD_SET(open_flags, DB_INIT_LOG | DB_INIT_MPOOL);
+ FLD_CLR(open_flags, DB_JOINENV);
+ break;
+ case ENV_TXN:
+ FLD_SET(open_flags, DB_INIT_LOCK |
+ DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN);
+ FLD_CLR(open_flags, DB_JOINENV);
+ /* Make sure we have an arg to check against! */
+ if (i < objc) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (strcmp(arg, "nosync") == 0) {
+ FLD_SET(set_flag, DB_TXN_NOSYNC);
+ i++;
+ }
+ }
+ break;
+ case ENV_CREATE:
+ FLD_SET(open_flags, DB_CREATE | DB_INIT_MPOOL);
+ FLD_CLR(open_flags, DB_JOINENV);
+ break;
+ case ENV_HOME:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-home dir?");
+ result = TCL_ERROR;
+ break;
+ }
+ home = Tcl_GetStringFromObj(objv[i++], NULL);
+ break;
+ case ENV_MODE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-mode mode?");
+ result = TCL_ERROR;
+ break;
+ }
+ /*
+ * Don't need to check result here because
+ * if TCL_ERROR, the error message is already
+ * set up, and we'll bail out below. If ok,
+ * the mode is set and we go on.
+ */
+ result = Tcl_GetIntFromObj(interp, objv[i++], &mode);
+ break;
+ case ENV_NOMMAP:
+ FLD_SET(set_flag, DB_NOMMAP);
+ break;
+ case ENV_PRIVATE:
+ FLD_SET(open_flags, DB_PRIVATE | DB_INIT_MPOOL);
+ FLD_CLR(open_flags, DB_JOINENV);
+ break;
+ case ENV_RECOVER:
+ FLD_SET(open_flags, DB_RECOVER);
+ break;
+ case ENV_RECOVER_FATAL:
+ FLD_SET(open_flags, DB_RECOVER_FATAL);
+ break;
+ case ENV_SYSTEM_MEM:
+ FLD_SET(open_flags, DB_SYSTEM_MEM);
+ break;
+ case ENV_USE_ENVIRON_ROOT:
+ FLD_SET(open_flags, DB_USE_ENVIRON_ROOT);
+ break;
+ case ENV_USE_ENVIRON:
+ FLD_SET(open_flags, DB_USE_ENVIRON);
+ break;
+ case ENV_VERBOSE:
+ result = Tcl_ListObjGetElements(interp, objv[i],
+ &myobjc, &myobjv);
+ if (result == TCL_OK)
+ i++;
+ else
+ break;
+ if (myobjc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-verbose {which on|off}?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = tcl_EnvVerbose(interp, *env,
+ myobjv[0], myobjv[1]);
+ break;
+ case ENV_REGION_INIT:
+ _debug_check();
+ ret = db_env_set_region_init(1);
+ result = _ReturnSetup(interp, ret, "region_init");
+ break;
+ case ENV_CACHESIZE:
+ result = Tcl_ListObjGetElements(interp, objv[i],
+ &myobjc, &myobjv);
+ if (result == TCL_OK)
+ i++;
+ else
+ break;
+ j = 0;
+ if (myobjc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-cachesize {gbytes bytes ncaches}?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetIntFromObj(interp, myobjv[0], &itmp);
+ gbytes = itmp;
+ if (result != TCL_OK)
+ break;
+ result = Tcl_GetIntFromObj(interp, myobjv[1], &itmp);
+ bytes = itmp;
+ if (result != TCL_OK)
+ break;
+ result = Tcl_GetIntFromObj(interp, myobjv[2], &itmp);
+ ncaches = itmp;
+ if (result != TCL_OK)
+ break;
+ _debug_check();
+ ret = (*env)->set_cachesize(*env, gbytes, bytes,
+ ncaches);
+ result = _ReturnSetup(interp, ret, "set_cachesize");
+ break;
+ case ENV_MMAPSIZE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-mmapsize size?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetIntFromObj(interp, objv[i++], &intarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*env)->set_mp_mmapsize(*env,
+ (size_t)intarg);
+ result = _ReturnSetup(interp, ret, "mmapsize");
+ }
+ break;
+ case ENV_SHM_KEY:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-shm_key key?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetLongFromObj(interp, objv[i++], &shm);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*env)->set_shm_key(*env, shm);
+ result = _ReturnSetup(interp, ret, "shm_key");
+ }
+ break;
+ case ENV_LOG_MAX:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-log_max max?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetIntFromObj(interp, objv[i++], &intarg);
+ if (result == TCL_OK && logbufset) {
+ _debug_check();
+ ret = (*env)->set_lg_max(*env,
+ (u_int32_t)intarg);
+ result = _ReturnSetup(interp, ret, "log_max");
+ logbufset = 0;
+ } else
+ logmaxset = intarg;
+ break;
+ case ENV_LOG_BUFFER:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-log_buffer size?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetIntFromObj(interp, objv[i++], &intarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*env)->set_lg_bsize(*env,
+ (u_int32_t)intarg);
+ result = _ReturnSetup(interp, ret, "log_bsize");
+ logbufset = 1;
+ if (logmaxset) {
+ _debug_check();
+ ret = (*env)->set_lg_max(*env,
+ (u_int32_t)logmaxset);
+ result = _ReturnSetup(interp, ret,
+ "log_max");
+ logmaxset = 0;
+ logbufset = 0;
+ }
+ }
+ break;
+ case ENV_CONFLICT:
+ /*
+ * Get conflict list. List is:
+ * {nmodes {matrix}}
+ *
+ * Where matrix must be nmodes*nmodes big.
+ * Set up conflicts array to pass.
+ */
+ result = Tcl_ListObjGetElements(interp, objv[i],
+ &myobjc, &myobjv);
+ if (result == TCL_OK)
+ i++;
+ else
+ break;
+ if (myobjc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-lock_conflict {nmodes {matrix}}?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetIntFromObj(interp, myobjv[0], &nmodes);
+ if (result != TCL_OK)
+ break;
+ result = Tcl_ListObjGetElements(interp, myobjv[1],
+ &myobjc, &myobjv1);
+ if (myobjc != (nmodes * nmodes)) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-lock_conflict {nmodes {matrix}}?");
+ result = TCL_ERROR;
+ break;
+ }
+ size = sizeof(u_int8_t) * nmodes*nmodes;
+ ret = __os_malloc(*env, size, NULL, &conflicts);
+ if (ret != 0) {
+ result = TCL_ERROR;
+ break;
+ }
+ for (j = 0; j < myobjc; j++) {
+ result = Tcl_GetIntFromObj(interp, myobjv1[j],
+ &temp);
+ conflicts[j] = temp;
+ if (result != TCL_OK) {
+ __os_free(conflicts, size);
+ break;
+ }
+ }
+ _debug_check();
+ ret = (*env)->set_lk_conflicts(*env,
+ (u_int8_t *)conflicts, nmodes);
+ __os_free(conflicts, size);
+ result = _ReturnSetup(interp, ret, "set_lk_conflicts");
+ break;
+ case ENV_DETECT:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-lock_detect policy?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ if (strcmp(arg, "default") == 0)
+ detect = DB_LOCK_DEFAULT;
+ else if (strcmp(arg, "oldest") == 0)
+ detect = DB_LOCK_OLDEST;
+ else if (strcmp(arg, "youngest") == 0)
+ detect = DB_LOCK_YOUNGEST;
+ else if (strcmp(arg, "random") == 0)
+ detect = DB_LOCK_RANDOM;
+ else {
+ Tcl_AddErrorInfo(interp,
+ "lock_detect: illegal policy");
+ result = TCL_ERROR;
+ break;
+ }
+ _debug_check();
+ ret = (*env)->set_lk_detect(*env, detect);
+ result = _ReturnSetup(interp, ret, "lock_detect");
+ break;
+ case ENV_LOCK_MAX:
+ case ENV_LOCK_MAX_LOCKS:
+ case ENV_LOCK_MAX_LOCKERS:
+ case ENV_LOCK_MAX_OBJECTS:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-lock_max max?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetIntFromObj(interp, objv[i++], &intarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ switch ((enum envopen)optindex) {
+ case ENV_LOCK_MAX:
+ ret = (*env)->set_lk_max(*env,
+ (u_int32_t)intarg);
+ break;
+ case ENV_LOCK_MAX_LOCKS:
+ ret = (*env)->set_lk_max_locks(*env,
+ (u_int32_t)intarg);
+ break;
+ case ENV_LOCK_MAX_LOCKERS:
+ ret = (*env)->set_lk_max_lockers(*env,
+ (u_int32_t)intarg);
+ break;
+ case ENV_LOCK_MAX_OBJECTS:
+ ret = (*env)->set_lk_max_objects(*env,
+ (u_int32_t)intarg);
+ break;
+ default:
+ break;
+ }
+ result = _ReturnSetup(interp, ret, "lock_max");
+ }
+ break;
+ case ENV_TXN_MAX:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-txn_max max?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetIntFromObj(interp, objv[i++], &intarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*env)->set_tx_max(*env,
+ (u_int32_t)intarg);
+ result = _ReturnSetup(interp, ret, "txn_max");
+ }
+ break;
+ case ENV_TXN_TIME:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-txn_timestamp time?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetLongFromObj(interp, objv[i++],
+ (long *)&time);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*env)->set_tx_timestamp(*env, &time);
+ result = _ReturnSetup(interp, ret,
+ "txn_timestamp");
+ }
+ break;
+ case ENV_ERRFILE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-errfile file");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ /*
+ * If the user already set one, close it.
+ */
+ if (ip->i_err != NULL)
+ fclose(ip->i_err);
+ ip->i_err = fopen(arg, "a");
+ if (ip->i_err != NULL) {
+ _debug_check();
+ (*env)->set_errfile(*env, ip->i_err);
+ }
+ break;
+ case ENV_ERRPFX:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-errpfx prefix");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ /*
+ * If the user already set one, free it.
+ */
+ if (ip->i_errpfx != NULL)
+ __os_freestr(ip->i_errpfx);
+ if ((ret =
+ __os_strdup(*env, arg, &ip->i_errpfx)) != 0) {
+ result = _ReturnSetup(interp, ret,
+ "__os_strdup");
+ break;
+ }
+ if (ip->i_errpfx != NULL) {
+ _debug_check();
+ (*env)->set_errpfx(*env, ip->i_errpfx);
+ }
+ break;
+ case ENV_DATA_DIR:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-data_dir dir");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ _debug_check();
+ ret = (*env)->set_data_dir(*env, arg);
+ result = _ReturnSetup(interp, ret, "set_data_dir");
+ break;
+ case ENV_LOG_DIR:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-log_dir dir");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ _debug_check();
+ ret = (*env)->set_lg_dir(*env, arg);
+ result = _ReturnSetup(interp, ret, "set_lg_dir");
+ break;
+ case ENV_TMP_DIR:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-tmp_dir dir");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ _debug_check();
+ ret = (*env)->set_tmp_dir(*env, arg);
+ result = _ReturnSetup(interp, ret, "set_tmp_dir");
+ break;
+ }
+ /*
+ * If, at any time, parsing the args we get an error,
+ * bail out and return.
+ */
+ if (result != TCL_OK)
+ goto error;
+ }
+
+ /*
+ * We have to check this here. We want to set the log buffer
+ * size first, if it is specified. So if the user did so,
+ * then we took care of it above. But, if we get out here and
+ * logmaxset is non-zero, then they set the log_max without
+ * resetting the log buffer size, so we now have to do the
+ * call to set_lg_max, since we didn't do it above.
+ */
+ if (logmaxset) {
+ _debug_check();
+ ret = (*env)->set_lg_max(*env, (u_int32_t)logmaxset);
+ result = _ReturnSetup(interp, ret, "log_max");
+ }
+
+ if (result != TCL_OK)
+ goto error;
+
+ if (set_flag) {
+ ret = (*env)->set_flags(*env, set_flag, 1);
+ result = _ReturnSetup(interp, ret, "set_flags");
+ if (result == TCL_ERROR)
+ goto error;
+ /*
+ * If we are successful, clear the result so that the
+ * return from set_flags isn't part of the result.
+ */
+ Tcl_ResetResult(interp);
+ }
+ /*
+ * When we get here, we have already parsed all of our args
+ * and made all our calls to set up the environment. Everything
+ * is okay so far, no errors, if we get here.
+ *
+ * Now open the environment.
+ */
+ _debug_check();
+ ret = (*env)->open(*env, home, open_flags, mode);
+ result = _ReturnSetup(interp, ret, "env open");
+
+error:
+ if (result == TCL_ERROR) {
+ if (ip->i_err) {
+ fclose(ip->i_err);
+ ip->i_err = NULL;
+ }
+ (void)(*env)->close(*env, 0);
+ *env = NULL;
+ }
+ return (result);
+}
+
+/*
+ * bdb_DbOpen --
+ * Implements the "db_create/db_open" command.
+ * There are many, many options to the open command.
+ * Here is the general flow:
+ *
+ * 0. Preparse args to determine if we have -env.
+ * 1. Call db_create to create the db handle.
+ * 2. Parse args tracking options.
+ * 3. Make any pre-open setup calls necessary.
+ * 4. Call DB->open to open the database.
+ * 5. Return db widget handle to user.
+ */
+static int
+bdb_DbOpen(interp, objc, objv, ip, dbp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DBTCL_INFO *ip; /* Our internal info */
+ DB **dbp; /* DB handle */
+{
+ static char *bdbenvopen[] = {
+ "-env", NULL
+ };
+ enum bdbenvopen {
+ TCL_DB_ENV0
+ };
+ static char *bdbopen[] = {
+ "-btree",
+ "-cachesize",
+ "-create",
+ "-delim",
+ "-dup",
+ "-dupsort",
+ "-env",
+ "-errfile",
+ "-errpfx",
+ "-excl",
+ "-extent",
+ "-ffactor",
+ "-hash",
+ "-len",
+ "-lorder",
+ "-minkey",
+ "-mode",
+ "-nelem",
+ "-nommap",
+ "-pad",
+ "-pagesize",
+ "-queue",
+ "-rdonly",
+ "-recno",
+ "-recnum",
+ "-renumber",
+ "-revsplitoff",
+ "-snapshot",
+ "-source",
+ "-truncate",
+ "-test",
+ "-unknown",
+ "--",
+ NULL
+ };
+ enum bdbopen {
+ TCL_DB_BTREE,
+ TCL_DB_CACHESIZE,
+ TCL_DB_CREATE,
+ TCL_DB_DELIM,
+ TCL_DB_DUP,
+ TCL_DB_DUPSORT,
+ TCL_DB_ENV,
+ TCL_DB_ERRFILE,
+ TCL_DB_ERRPFX,
+ TCL_DB_EXCL,
+ TCL_DB_EXTENT,
+ TCL_DB_FFACTOR,
+ TCL_DB_HASH,
+ TCL_DB_LEN,
+ TCL_DB_LORDER,
+ TCL_DB_MINKEY,
+ TCL_DB_MODE,
+ TCL_DB_NELEM,
+ TCL_DB_NOMMAP,
+ TCL_DB_PAD,
+ TCL_DB_PAGESIZE,
+ TCL_DB_QUEUE,
+ TCL_DB_RDONLY,
+ TCL_DB_RECNO,
+ TCL_DB_RECNUM,
+ TCL_DB_RENUMBER,
+ TCL_DB_REVSPLIT,
+ TCL_DB_SNAPSHOT,
+ TCL_DB_SOURCE,
+ TCL_DB_TRUNCATE,
+ TCL_DB_TEST,
+ TCL_DB_UNKNOWN,
+ TCL_DB_ENDARG
+ };
+
+ DBTCL_INFO *envip, *errip;
+ DBTYPE type;
+ DB_ENV *envp;
+ Tcl_Obj **myobjv;
+ u_int32_t gbytes, bytes, ncaches, open_flags;
+ int endarg, i, intarg, itmp, j, mode, myobjc;
+ int optindex, result, ret, set_err, set_flag, set_pfx, subdblen;
+ u_char *subdbtmp;
+ char *arg, *db, *subdb;
+ extern u_int32_t __ham_test __P((DB *, const void *, u_int32_t));
+
+ type = DB_UNKNOWN;
+ endarg = mode = set_err = set_flag = set_pfx = 0;
+ result = TCL_OK;
+ subdbtmp = NULL;
+ db = subdb = NULL;
+
+ /*
+ * XXX
+ * If/when our Tcl interface becomes thread-safe, we should enable
+ * DB_THREAD here. See comment in bdb_EnvOpen().
+ */
+ open_flags = 0;
+ envp = NULL;
+
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args?");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * We must first parse for the environment flag, since that
+ * is needed for db_create. Then create the db handle.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i++], bdbenvopen,
+ "option", TCL_EXACT, &optindex) != TCL_OK) {
+ /*
+ * Reset the result so we don't get
+ * an errant error message if there is another error.
+ */
+ Tcl_ResetResult(interp);
+ continue;
+ }
+ switch ((enum bdbenvopen)optindex) {
+ case TCL_DB_ENV0:
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ envp = NAME_TO_ENV(arg);
+ if (envp == NULL) {
+ Tcl_SetResult(interp,
+ "db open: illegal environment", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ }
+ break;
+ }
+
+ /*
+ * Create the db handle before parsing the args
+ * since we'll be modifying the database options as we parse.
+ */
+ ret = db_create(dbp, envp, 0);
+ if (ret)
+ return (_ReturnSetup(interp, ret, "db_create"));
+
+ /*
+ * XXX Remove restriction when err stuff is not tied to env.
+ *
+ * The DB->set_err* functions actually overwrite in the
+ * environment. So, if we are explicitly using an env,
+ * don't overwrite what we have already set up. If we are
+ * not using one, then we set up since we get a private
+ * default env.
+ */
+ /* XXX - remove this conditional if/when err is not tied to env */
+ if (envp == NULL) {
+ (*dbp)->set_errpfx((*dbp), ip->i_name);
+ (*dbp)->set_errcall((*dbp), _ErrorFunc);
+ }
+ envip = _PtrToInfo(envp); /* XXX */
+ /*
+ * If we are using an env, we keep track of err info in the env's ip.
+ * Otherwise use the DB's ip.
+ */
+ if (envip)
+ errip = envip;
+ else
+ errip = ip;
+ /*
+ * Get the option name index from the object based on the args
+ * defined above.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], bdbopen, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (arg[0] == '-') {
+ result = IS_HELP(objv[i]);
+ goto error;
+ } else
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum bdbopen)optindex) {
+ case TCL_DB_ENV:
+ /*
+ * Already parsed this, skip it and the env pointer.
+ */
+ i++;
+ continue;
+ case TCL_DB_BTREE:
+ if (type != DB_UNKNOWN) {
+ Tcl_SetResult(interp,
+ "Too many DB types specified", TCL_STATIC);
+ result = TCL_ERROR;
+ goto error;
+ }
+ type = DB_BTREE;
+ break;
+ case TCL_DB_HASH:
+ if (type != DB_UNKNOWN) {
+ Tcl_SetResult(interp,
+ "Too many DB types specified", TCL_STATIC);
+ result = TCL_ERROR;
+ goto error;
+ }
+ type = DB_HASH;
+ break;
+ case TCL_DB_RECNO:
+ if (type != DB_UNKNOWN) {
+ Tcl_SetResult(interp,
+ "Too many DB types specified", TCL_STATIC);
+ result = TCL_ERROR;
+ goto error;
+ }
+ type = DB_RECNO;
+ break;
+ case TCL_DB_QUEUE:
+ if (type != DB_UNKNOWN) {
+ Tcl_SetResult(interp,
+ "Too many DB types specified", TCL_STATIC);
+ result = TCL_ERROR;
+ goto error;
+ }
+ type = DB_QUEUE;
+ break;
+ case TCL_DB_UNKNOWN:
+ if (type != DB_UNKNOWN) {
+ Tcl_SetResult(interp,
+ "Too many DB types specified", TCL_STATIC);
+ result = TCL_ERROR;
+ goto error;
+ }
+ break;
+ case TCL_DB_CREATE:
+ open_flags |= DB_CREATE;
+ break;
+ case TCL_DB_EXCL:
+ open_flags |= DB_EXCL;
+ break;
+ case TCL_DB_RDONLY:
+ open_flags |= DB_RDONLY;
+ break;
+ case TCL_DB_TRUNCATE:
+ open_flags |= DB_TRUNCATE;
+ break;
+ case TCL_DB_TEST:
+ (*dbp)->set_h_hash(*dbp, __ham_test);
+ break;
+ case TCL_DB_MODE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-mode mode?");
+ result = TCL_ERROR;
+ break;
+ }
+ /*
+ * Don't need to check result here because
+ * if TCL_ERROR, the error message is already
+ * set up, and we'll bail out below. If ok,
+ * the mode is set and we go on.
+ */
+ result = Tcl_GetIntFromObj(interp, objv[i++], &mode);
+ break;
+ case TCL_DB_NOMMAP:
+ open_flags |= DB_NOMMAP;
+ break;
+ case TCL_DB_DUP:
+ set_flag |= DB_DUP;
+ break;
+ case TCL_DB_DUPSORT:
+ set_flag |= DB_DUPSORT;
+ break;
+ case TCL_DB_RECNUM:
+ set_flag |= DB_RECNUM;
+ break;
+ case TCL_DB_RENUMBER:
+ set_flag |= DB_RENUMBER;
+ break;
+ case TCL_DB_REVSPLIT:
+ set_flag |= DB_REVSPLITOFF;
+ break;
+ case TCL_DB_SNAPSHOT:
+ set_flag |= DB_SNAPSHOT;
+ break;
+ case TCL_DB_FFACTOR:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-ffactor density");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetIntFromObj(interp, objv[i++], &intarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*dbp)->set_h_ffactor(*dbp,
+ (u_int32_t)intarg);
+ result = _ReturnSetup(interp, ret,
+ "set_h_ffactor");
+ }
+ break;
+ case TCL_DB_NELEM:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-nelem nelem");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetIntFromObj(interp, objv[i++], &intarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*dbp)->set_h_nelem(*dbp,
+ (u_int32_t)intarg);
+ result = _ReturnSetup(interp, ret,
+ "set_h_nelem");
+ }
+ break;
+ case TCL_DB_LORDER:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-lorder 1234|4321");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetIntFromObj(interp, objv[i++], &intarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*dbp)->set_lorder(*dbp,
+ (u_int32_t)intarg);
+ result = _ReturnSetup(interp, ret,
+ "set_lorder");
+ }
+ break;
+ case TCL_DB_DELIM:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-delim delim");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetIntFromObj(interp, objv[i++], &intarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*dbp)->set_re_delim(*dbp, intarg);
+ result = _ReturnSetup(interp, ret,
+ "set_re_delim");
+ }
+ break;
+ case TCL_DB_LEN:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-len length");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetIntFromObj(interp, objv[i++], &intarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*dbp)->set_re_len(*dbp,
+ (u_int32_t)intarg);
+ result = _ReturnSetup(interp, ret,
+ "set_re_len");
+ }
+ break;
+ case TCL_DB_PAD:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-pad pad");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetIntFromObj(interp, objv[i++], &intarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*dbp)->set_re_pad(*dbp, intarg);
+ result = _ReturnSetup(interp, ret,
+ "set_re_pad");
+ }
+ break;
+ case TCL_DB_SOURCE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-source file");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ _debug_check();
+ ret = (*dbp)->set_re_source(*dbp, arg);
+ result = _ReturnSetup(interp, ret, "set_re_source");
+ break;
+ case TCL_DB_EXTENT:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-extent size");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetIntFromObj(interp, objv[i++], &intarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*dbp)->set_q_extentsize(*dbp,
+ (u_int32_t)intarg);
+ result = _ReturnSetup(interp, ret,
+ "set_q_extentsize");
+ }
+ break;
+ case TCL_DB_MINKEY:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-minkey minkey");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetIntFromObj(interp, objv[i++], &intarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*dbp)->set_bt_minkey(*dbp, intarg);
+ result = _ReturnSetup(interp, ret,
+ "set_bt_minkey");
+ }
+ break;
+ case TCL_DB_CACHESIZE:
+ result = Tcl_ListObjGetElements(interp, objv[i++],
+ &myobjc, &myobjv);
+ if (result != TCL_OK)
+ break;
+ j = 0;
+ if (myobjc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-cachesize {gbytes bytes ncaches}?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetIntFromObj(interp, myobjv[0], &itmp);
+ gbytes = itmp;
+ if (result != TCL_OK)
+ break;
+ result = Tcl_GetIntFromObj(interp, myobjv[1], &itmp);
+ bytes = itmp;
+ if (result != TCL_OK)
+ break;
+ result = Tcl_GetIntFromObj(interp, myobjv[2], &itmp);
+ ncaches = itmp;
+ if (result != TCL_OK)
+ break;
+ _debug_check();
+ ret = (*dbp)->set_cachesize(*dbp, gbytes, bytes,
+ ncaches);
+ result = _ReturnSetup(interp, ret,
+ "set_cachesize");
+ break;
+ case TCL_DB_PAGESIZE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-pagesize size?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetIntFromObj(interp, objv[i++], &intarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*dbp)->set_pagesize(*dbp,
+ (size_t)intarg);
+ result = _ReturnSetup(interp, ret,
+ "set pagesize");
+ }
+ break;
+ case TCL_DB_ERRFILE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-errfile file");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ /*
+ * If the user already set one, close it.
+ */
+ if (errip->i_err != NULL)
+ fclose(errip->i_err);
+ errip->i_err = fopen(arg, "a");
+ if (errip->i_err != NULL) {
+ _debug_check();
+ (*dbp)->set_errfile(*dbp, errip->i_err);
+ set_err = 1;
+ }
+ break;
+ case TCL_DB_ERRPFX:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-errpfx prefix");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ /*
+ * If the user already set one, free it.
+ */
+ if (errip->i_errpfx != NULL)
+ __os_freestr(errip->i_errpfx);
+ if ((ret = __os_strdup((*dbp)->dbenv,
+ arg, &errip->i_errpfx)) != 0) {
+ result = _ReturnSetup(interp, ret,
+ "__os_strdup");
+ break;
+ }
+ if (errip->i_errpfx != NULL) {
+ _debug_check();
+ (*dbp)->set_errpfx(*dbp, errip->i_errpfx);
+ set_pfx = 1;
+ }
+ break;
+ case TCL_DB_ENDARG:
+ endarg = 1;
+ break;
+ } /* switch */
+
+ /*
+ * If, at any time, parsing the args we get an error,
+ * bail out and return.
+ */
+ if (result != TCL_OK)
+ goto error;
+ if (endarg)
+ break;
+ }
+ if (result != TCL_OK)
+ goto error;
+
+ /*
+ * Any args we have left, (better be 0, 1 or 2 left) are
+ * file names. If we have 0, then an in-memory db. If
+ * there is 1, a db name, if 2 a db and subdb name.
+ */
+ if (i != objc) {
+ /*
+ * Dbs must be NULL terminated file names, but subdbs can
+ * be anything. Use Strings for the db name and byte
+ * arrays for the subdb.
+ */
+ db = Tcl_GetStringFromObj(objv[i++], NULL);
+ if (i != objc) {
+ subdbtmp =
+ Tcl_GetByteArrayFromObj(objv[i++], &subdblen);
+ if ((ret = __os_malloc(envp,
+ subdblen + 1, NULL, &subdb)) != 0) {
+ Tcl_SetResult(interp, db_strerror(ret),
+ TCL_STATIC);
+ return (0);
+ }
+ memcpy(subdb, subdbtmp, subdblen);
+ subdb[subdblen] = '\0';
+ }
+ }
+ if (set_flag) {
+ ret = (*dbp)->set_flags(*dbp, set_flag);
+ result = _ReturnSetup(interp, ret, "set_flags");
+ if (result == TCL_ERROR)
+ goto error;
+ /*
+ * If we are successful, clear the result so that the
+ * return from set_flags isn't part of the result.
+ */
+ Tcl_ResetResult(interp);
+ }
+
+ /*
+ * When we get here, we have already parsed all of our args and made
+ * all our calls to set up the database. Everything is okay so far,
+ * no errors, if we get here.
+ */
+ _debug_check();
+
+ /* Open the database. */
+ ret = (*dbp)->open(*dbp, db, subdb, type, open_flags, mode);
+ result = _ReturnSetup(interp, ret, "db open");
+
+error:
+ if (subdb)
+ __os_free(subdb, subdblen + 1);
+ if (result == TCL_ERROR) {
+ /*
+ * If we opened and set up the error file in the environment
+ * on this open, but we failed for some other reason, clean
+ * up and close the file.
+ *
+ * XXX when err stuff isn't tied to env, change to use ip,
+ * instead of envip. Also, set_err is irrelevant when that
+ * happens. It will just read:
+ * if (ip->i_err)
+ * fclose(ip->i_err);
+ */
+ if (set_err && errip && errip->i_err != NULL) {
+ fclose(errip->i_err);
+ errip->i_err = NULL;
+ }
+ if (set_pfx && errip && errip->i_errpfx != NULL) {
+ __os_freestr(errip->i_errpfx);
+ errip->i_errpfx = NULL;
+ }
+ (void)(*dbp)->close(*dbp, 0);
+ *dbp = NULL;
+ }
+ return (result);
+}
+
+/*
+ * bdb_DbRemove --
+ * Implements the DB->remove command.
+ */
+static int
+bdb_DbRemove(interp, objc, objv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *bdbrem[] = {
+ "-env", "--", NULL
+ };
+ enum bdbrem {
+ TCL_DBREM_ENV,
+ TCL_DBREM_ENDARG
+ };
+ DB_ENV *envp;
+ DB *dbp;
+ int endarg, i, optindex, result, ret, subdblen;
+ u_char *subdbtmp;
+ char *arg, *db, *subdb;
+
+ envp = NULL;
+ dbp = NULL;
+ result = TCL_OK;
+ subdbtmp = NULL;
+ db = subdb = NULL;
+ endarg = 0;
+
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args? filename ?database?");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * We must first parse for the environment flag, since that
+ * is needed for db_create. Then create the db handle.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], bdbrem,
+ "option", TCL_EXACT, &optindex) != TCL_OK) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (arg[0] == '-') {
+ result = IS_HELP(objv[i]);
+ goto error;
+ } else
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum bdbrem)optindex) {
+ case TCL_DBREM_ENV:
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ envp = NAME_TO_ENV(arg);
+ if (envp == NULL) {
+ Tcl_SetResult(interp,
+ "db remove: illegal environment",
+ TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ break;
+ case TCL_DBREM_ENDARG:
+ endarg = 1;
+ break;
+ }
+ /*
+ * If, at any time, parsing the args we get an error,
+ * bail out and return.
+ */
+ if (result != TCL_OK)
+ goto error;
+ if (endarg)
+ break;
+ }
+ if (result != TCL_OK)
+ goto error;
+ /*
+ * Any args we have left, (better be 1 or 2 left) are
+ * file names. If there is 1, a db name, if 2 a db and subdb name.
+ */
+ if ((i != (objc - 1)) || (i != (objc - 2))) {
+ /*
+ * Dbs must be NULL terminated file names, but subdbs can
+ * be anything. Use Strings for the db name and byte
+ * arrays for the subdb.
+ */
+ db = Tcl_GetStringFromObj(objv[i++], NULL);
+ if (i != objc) {
+ subdbtmp =
+ Tcl_GetByteArrayFromObj(objv[i++], &subdblen);
+ if ((ret = __os_malloc(envp, subdblen + 1,
+ NULL, &subdb)) != 0) { Tcl_SetResult(interp,
+ db_strerror(ret), TCL_STATIC);
+ return (0);
+ }
+ memcpy(subdb, subdbtmp, subdblen);
+ subdb[subdblen] = '\0';
+ }
+ } else {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args? filename ?database?");
+ result = TCL_ERROR;
+ goto error;
+ }
+ ret = db_create(&dbp, envp, 0);
+ if (ret) {
+ result = _ReturnSetup(interp, ret, "db_create");
+ goto error;
+ }
+ /*
+ * No matter what, we NULL out dbp after this call.
+ */
+ ret = dbp->remove(dbp, db, subdb, 0);
+ result = _ReturnSetup(interp, ret, "db remove");
+ dbp = NULL;
+error:
+ if (subdb)
+ __os_free(subdb, subdblen + 1);
+ if (result == TCL_ERROR && dbp)
+ (void)dbp->close(dbp, 0);
+ return (result);
+}
+
+/*
+ * bdb_DbRename --
+ * Implements the DB->rename command.
+ */
+static int
+bdb_DbRename(interp, objc, objv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *bdbmv[] = {
+ "-env", "--", NULL
+ };
+ enum bdbmv {
+ TCL_DBMV_ENV,
+ TCL_DBMV_ENDARG
+ };
+ DB_ENV *envp;
+ DB *dbp;
+ int endarg, i, newlen, optindex, result, ret, subdblen;
+ u_char *subdbtmp;
+ char *arg, *db, *newname, *subdb;
+
+ envp = NULL;
+ dbp = NULL;
+ result = TCL_OK;
+ subdbtmp = NULL;
+ db = newname = subdb = NULL;
+ endarg = 0;
+
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp,
+ 3, objv, "?args? filename ?database? ?newname?");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * We must first parse for the environment flag, since that
+ * is needed for db_create. Then create the db handle.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], bdbmv,
+ "option", TCL_EXACT, &optindex) != TCL_OK) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (arg[0] == '-') {
+ result = IS_HELP(objv[i]);
+ goto error;
+ } else
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum bdbmv)optindex) {
+ case TCL_DBMV_ENV:
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ envp = NAME_TO_ENV(arg);
+ if (envp == NULL) {
+ Tcl_SetResult(interp,
+ "db rename: illegal environment",
+ TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ break;
+ case TCL_DBMV_ENDARG:
+ endarg = 1;
+ break;
+ }
+ /*
+ * If, at any time, parsing the args we get an error,
+ * bail out and return.
+ */
+ if (result != TCL_OK)
+ goto error;
+ if (endarg)
+ break;
+ }
+ if (result != TCL_OK)
+ goto error;
+ /*
+ * Any args we have left, (better be 2 or 3 left) are
+ * file names. If there is 2, a file name, if 3 a file and db name.
+ */
+ if ((i != (objc - 2)) || (i != (objc - 3))) {
+ /*
+ * Dbs must be NULL terminated file names, but subdbs can
+ * be anything. Use Strings for the db name and byte
+ * arrays for the subdb.
+ */
+ db = Tcl_GetStringFromObj(objv[i++], NULL);
+ if (i == objc - 2) {
+ subdbtmp =
+ Tcl_GetByteArrayFromObj(objv[i++], &subdblen);
+ if ((ret = __os_malloc(envp, subdblen + 1,
+ NULL, &subdb)) != 0) {
+ Tcl_SetResult(interp,
+ db_strerror(ret), TCL_STATIC);
+ return (0);
+ }
+ memcpy(subdb, subdbtmp, subdblen);
+ subdb[subdblen] = '\0';
+ }
+ subdbtmp =
+ Tcl_GetByteArrayFromObj(objv[i++], &newlen);
+ if ((ret = __os_malloc(envp, newlen + 1,
+ NULL, &newname)) != 0) {
+ Tcl_SetResult(interp,
+ db_strerror(ret), TCL_STATIC);
+ return (0);
+ }
+ memcpy(newname, subdbtmp, newlen);
+ newname[newlen] = '\0';
+ } else {
+ Tcl_WrongNumArgs(interp, 3, objv, "?args? filename ?database? ?newname?");
+ result = TCL_ERROR;
+ goto error;
+ }
+ ret = db_create(&dbp, envp, 0);
+ if (ret) {
+ result = _ReturnSetup(interp, ret, "db_create");
+ goto error;
+ }
+ /*
+ * No matter what, we NULL out dbp after this call.
+ */
+ ret = dbp->rename(dbp, db, subdb, newname, 0);
+ result = _ReturnSetup(interp, ret, "db rename");
+ dbp = NULL;
+error:
+ if (subdb)
+ __os_free(subdb, subdblen + 1);
+ if (newname)
+ __os_free(newname, newlen + 1);
+ if (result == TCL_ERROR && dbp)
+ (void)dbp->close(dbp, 0);
+ return (result);
+}
+
+/*
+ * bdb_DbVerify --
+ * Implements the DB->verify command.
+ */
+static int
+bdb_DbVerify(interp, objc, objv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *bdbverify[] = {
+ "-env", "-errfile", "-errpfx", "--", NULL
+ };
+ enum bdbvrfy {
+ TCL_DBVRFY_ENV,
+ TCL_DBVRFY_ERRFILE,
+ TCL_DBVRFY_ERRPFX,
+ TCL_DBVRFY_ENDARG
+ };
+ DB_ENV *envp;
+ DB *dbp;
+ FILE *errf;
+ int endarg, i, optindex, result, ret, flags;
+ char *arg, *db, *errpfx;
+
+ envp = NULL;
+ dbp = NULL;
+ result = TCL_OK;
+ db = errpfx = NULL;
+ errf = NULL;
+ flags = endarg = 0;
+
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args? filename");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * We must first parse for the environment flag, since that
+ * is needed for db_create. Then create the db handle.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], bdbverify,
+ "option", TCL_EXACT, &optindex) != TCL_OK) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (arg[0] == '-') {
+ result = IS_HELP(objv[i]);
+ goto error;
+ } else
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum bdbvrfy)optindex) {
+ case TCL_DBVRFY_ENV:
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ envp = NAME_TO_ENV(arg);
+ if (envp == NULL) {
+ Tcl_SetResult(interp,
+ "db verify: illegal environment",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ break;
+ }
+ break;
+ case TCL_DBVRFY_ERRFILE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-errfile file");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ /*
+ * If the user already set one, close it.
+ */
+ if (errf != NULL)
+ fclose(errf);
+ errf = fopen(arg, "a");
+ break;
+ case TCL_DBVRFY_ERRPFX:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-errpfx prefix");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ /*
+ * If the user already set one, free it.
+ */
+ if (errpfx != NULL)
+ __os_freestr(errpfx);
+ if ((ret = __os_strdup(NULL, arg, &errpfx)) != 0) {
+ result = _ReturnSetup(interp, ret,
+ "__os_strdup");
+ break;
+ }
+ break;
+ case TCL_DBVRFY_ENDARG:
+ endarg = 1;
+ break;
+ }
+ /*
+ * If, at any time, parsing the args we get an error,
+ * bail out and return.
+ */
+ if (result != TCL_OK)
+ goto error;
+ if (endarg)
+ break;
+ }
+ if (result != TCL_OK)
+ goto error;
+ /*
+ * The remaining arg is the db filename.
+ */
+ if (i == (objc - 1))
+ db = Tcl_GetStringFromObj(objv[i++], NULL);
+ else {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args? filename");
+ result = TCL_ERROR;
+ goto error;
+ }
+ ret = db_create(&dbp, envp, 0);
+ if (ret) {
+ result = _ReturnSetup(interp, ret, "db_create");
+ goto error;
+ }
+
+ if (errf != NULL)
+ dbp->set_errfile(dbp, errf);
+ if (errpfx != NULL)
+ dbp->set_errpfx(dbp, errpfx);
+
+ ret = dbp->verify(dbp, db, NULL, NULL, flags);
+ result = _ReturnSetup(interp, ret, "db verify");
+error:
+ if (errf != NULL)
+ fclose(errf);
+ if (errpfx != NULL)
+ __os_freestr(errpfx);
+ if (dbp)
+ (void)dbp->close(dbp, 0);
+ return (result);
+}
+
+/*
+ * bdb_Version --
+ * Implements the version command.
+ */
+static int
+bdb_Version(interp, objc, objv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *bdbver[] = {
+ "-string", NULL
+ };
+ enum bdbver {
+ TCL_VERSTRING
+ };
+ int i, optindex, maj, min, patch, result, string, verobjc;
+ char *arg, *v;
+ Tcl_Obj *res, *verobjv[3];
+
+ result = TCL_OK;
+ string = 0;
+
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args?");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * We must first parse for the environment flag, since that
+ * is needed for db_create. Then create the db handle.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], bdbver,
+ "option", TCL_EXACT, &optindex) != TCL_OK) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (arg[0] == '-') {
+ result = IS_HELP(objv[i]);
+ goto error;
+ } else
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum bdbver)optindex) {
+ case TCL_VERSTRING:
+ string = 1;
+ break;
+ }
+ /*
+ * If, at any time, parsing the args we get an error,
+ * bail out and return.
+ */
+ if (result != TCL_OK)
+ goto error;
+ }
+ if (result != TCL_OK)
+ goto error;
+
+ v = db_version(&maj, &min, &patch);
+ if (string)
+ res = Tcl_NewStringObj(v, strlen(v));
+ else {
+ verobjc = 3;
+ verobjv[0] = Tcl_NewIntObj(maj);
+ verobjv[1] = Tcl_NewIntObj(min);
+ verobjv[2] = Tcl_NewIntObj(patch);
+ res = Tcl_NewListObj(verobjc, verobjv);
+ }
+ Tcl_SetObjResult(interp, res);
+error:
+ return (result);
+}
+
+/*
+ * bdb_Handles --
+ * Implements the handles command.
+ */
+static int
+bdb_Handles(interp, objc, objv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ DBTCL_INFO *p;
+ Tcl_Obj *res, *handle;
+
+ /*
+ * No args. Error if we have some
+ */
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "");
+ return (TCL_ERROR);
+ }
+ res = Tcl_NewListObj(0, NULL);
+
+ for (p = LIST_FIRST(&__db_infohead); p != NULL;
+ p = LIST_NEXT(p, entries)) {
+ handle = Tcl_NewStringObj(p->i_name, strlen(p->i_name));
+ if (Tcl_ListObjAppendElement(interp, res, handle) != TCL_OK)
+ return (TCL_ERROR);
+ }
+ Tcl_SetObjResult(interp, res);
+ return (TCL_OK);
+}
+
+/*
+ * bdb_DbUpgrade --
+ * Implements the DB->upgrade command.
+ */
+static int
+bdb_DbUpgrade(interp, objc, objv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *bdbupg[] = {
+ "-dupsort", "-env", "--", NULL
+ };
+ enum bdbupg {
+ TCL_DBUPG_DUPSORT,
+ TCL_DBUPG_ENV,
+ TCL_DBUPG_ENDARG
+ };
+ DB_ENV *envp;
+ DB *dbp;
+ int endarg, i, optindex, result, ret, flags;
+ char *arg, *db;
+
+ envp = NULL;
+ dbp = NULL;
+ result = TCL_OK;
+ db = NULL;
+ flags = endarg = 0;
+
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args? filename");
+ return (TCL_ERROR);
+ }
+
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], bdbupg,
+ "option", TCL_EXACT, &optindex) != TCL_OK) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (arg[0] == '-') {
+ result = IS_HELP(objv[i]);
+ goto error;
+ } else
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum bdbupg)optindex) {
+ case TCL_DBUPG_DUPSORT:
+ flags |= DB_DUPSORT;
+ break;
+ case TCL_DBUPG_ENV:
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ envp = NAME_TO_ENV(arg);
+ if (envp == NULL) {
+ Tcl_SetResult(interp,
+ "db upgrade: illegal environment",
+ TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ break;
+ case TCL_DBUPG_ENDARG:
+ endarg = 1;
+ break;
+ }
+ /*
+ * If, at any time, parsing the args we get an error,
+ * bail out and return.
+ */
+ if (result != TCL_OK)
+ goto error;
+ if (endarg)
+ break;
+ }
+ if (result != TCL_OK)
+ goto error;
+ /*
+ * The remaining arg is the db filename.
+ */
+ if (i == (objc - 1))
+ db = Tcl_GetStringFromObj(objv[i++], NULL);
+ else {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args? filename");
+ result = TCL_ERROR;
+ goto error;
+ }
+ ret = db_create(&dbp, envp, 0);
+ if (ret) {
+ result = _ReturnSetup(interp, ret, "db_create");
+ goto error;
+ }
+
+ ret = dbp->upgrade(dbp, db, flags);
+ result = _ReturnSetup(interp, ret, "db upgrade");
+error:
+ if (dbp)
+ (void)dbp->close(dbp, 0);
+ return (result);
+}
diff --git a/bdb/tcl/tcl_dbcursor.c b/bdb/tcl/tcl_dbcursor.c
new file mode 100644
index 00000000000..26e7b58c64a
--- /dev/null
+++ b/bdb/tcl/tcl_dbcursor.c
@@ -0,0 +1,744 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: tcl_dbcursor.c,v 11.26 2001/01/11 18:19:55 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <tcl.h>
+#endif
+
+#include "db_int.h"
+#include "tcl_db.h"
+
+/*
+ * Prototypes for procedures defined later in this file:
+ */
+static int tcl_DbcDup __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DBC *));
+static int tcl_DbcGet __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DBC *));
+static int tcl_DbcPut __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DBC *));
+
+/*
+ * PUBLIC: int dbc_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+ *
+ * dbc_cmd --
+ * Implements the cursor command.
+ */
+int
+dbc_Cmd(clientData, interp, objc, objv)
+ ClientData clientData; /* Cursor handle */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *dbccmds[] = {
+ "close",
+ "del",
+ "dup",
+ "get",
+ "put",
+ NULL
+ };
+ enum dbccmds {
+ DBCCLOSE,
+ DBCDELETE,
+ DBCDUP,
+ DBCGET,
+ DBCPUT
+ };
+ DBC *dbc;
+ DBTCL_INFO *dbip;
+ int cmdindex, result, ret;
+
+ Tcl_ResetResult(interp);
+ dbc = (DBC *)clientData;
+ dbip = _PtrToInfo((void *)dbc);
+ result = TCL_OK;
+
+ if (objc <= 1) {
+ Tcl_WrongNumArgs(interp, 1, objv, "command cmdargs");
+ return (TCL_ERROR);
+ }
+ if (dbc == NULL) {
+ Tcl_SetResult(interp, "NULL dbc pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (dbip == NULL) {
+ Tcl_SetResult(interp, "NULL dbc info pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Get the command name index from the object based on the berkdbcmds
+ * defined above.
+ */
+ if (Tcl_GetIndexFromObj(interp, objv[1], dbccmds, "command",
+ TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+ switch ((enum dbccmds)cmdindex) {
+ case DBCCLOSE:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = dbc->c_close(dbc);
+ result = _ReturnSetup(interp, ret, "dbc close");
+ if (result == TCL_OK) {
+ (void)Tcl_DeleteCommand(interp, dbip->i_name);
+ _DeleteInfo(dbip);
+ }
+ break;
+ case DBCDELETE:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = dbc->c_del(dbc, 0);
+ result = _ReturnSetup(interp, ret, "dbc delete");
+ break;
+ case DBCDUP:
+ result = tcl_DbcDup(interp, objc, objv, dbc);
+ break;
+ case DBCGET:
+ result = tcl_DbcGet(interp, objc, objv, dbc);
+ break;
+ case DBCPUT:
+ result = tcl_DbcPut(interp, objc, objv, dbc);
+ break;
+ }
+ return (result);
+}
+
+/*
+ * tcl_DbcPut --
+ */
+static int
+tcl_DbcPut(interp, objc, objv, dbc)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DBC *dbc; /* Cursor pointer */
+{
+ static char *dbcutopts[] = {
+ "-after", "-before", "-current",
+ "-keyfirst", "-keylast", "-nodupdata",
+ "-partial",
+ NULL
+ };
+ enum dbcutopts {
+ DBCPUT_AFTER, DBCPUT_BEFORE, DBCPUT_CURRENT,
+ DBCPUT_KEYFIRST,DBCPUT_KEYLAST, DBCPUT_NODUPDATA,
+ DBCPUT_PART
+ };
+ DB *thisdbp;
+ DBT key, data;
+ DBTCL_INFO *dbcip, *dbip;
+ DBTYPE type;
+ Tcl_Obj **elemv, *res;
+ db_recno_t recno;
+ u_int32_t flag;
+ int elemc, i, itmp, optindex, result, ret;
+
+ result = TCL_OK;
+ flag = 0;
+
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-args? ?key?");
+ return (TCL_ERROR);
+ }
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ /*
+ * Get the command name index from the object based on the options
+ * defined above.
+ */
+ i = 2;
+ while (i < (objc - 1)) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], dbcutopts, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ /*
+ * Reset the result so we don't get
+ * an errant error message if there is another error.
+ */
+ if (IS_HELP(objv[i]) == TCL_OK) {
+ result = TCL_OK;
+ goto out;
+ }
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum dbcutopts)optindex) {
+ case DBCPUT_AFTER:
+ FLAG_CHECK(flag);
+ flag = DB_AFTER;
+ break;
+ case DBCPUT_BEFORE:
+ FLAG_CHECK(flag);
+ flag = DB_BEFORE;
+ break;
+ case DBCPUT_CURRENT:
+ FLAG_CHECK(flag);
+ flag = DB_CURRENT;
+ break;
+ case DBCPUT_KEYFIRST:
+ FLAG_CHECK(flag);
+ flag = DB_KEYFIRST;
+ break;
+ case DBCPUT_KEYLAST:
+ FLAG_CHECK(flag);
+ flag = DB_KEYLAST;
+ break;
+ case DBCPUT_NODUPDATA:
+ FLAG_CHECK(flag);
+ flag = DB_NODUPDATA;
+ break;
+ case DBCPUT_PART:
+ if (i > (objc - 2)) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-partial {offset length}?");
+ result = TCL_ERROR;
+ break;
+ }
+ /*
+ * Get sublist as {offset length}
+ */
+ result = Tcl_ListObjGetElements(interp, objv[i++],
+ &elemc, &elemv);
+ if (elemc != 2) {
+ Tcl_SetResult(interp,
+ "List must be {offset length}", TCL_STATIC);
+ result = TCL_ERROR;
+ break;
+ }
+ data.flags |= DB_DBT_PARTIAL;
+ result = Tcl_GetIntFromObj(interp, elemv[0], &itmp);
+ data.doff = itmp;
+ if (result != TCL_OK)
+ break;
+ result = Tcl_GetIntFromObj(interp, elemv[1], &itmp);
+ data.dlen = itmp;
+ /*
+ * NOTE: We don't check result here because all we'd
+ * do is break anyway, and we are doing that. If you
+ * add code here, you WILL need to add the check
+ * for result. (See the check for save.doff, a few
+ * lines above and copy that.)
+ */
+ }
+ if (result != TCL_OK)
+ break;
+ }
+ if (result != TCL_OK)
+ goto out;
+
+ /*
+ * We need to determine if we are a recno database or not. If we are,
+ * then key.data is a recno, not a string.
+ */
+ dbcip = _PtrToInfo(dbc);
+ if (dbcip == NULL)
+ type = DB_UNKNOWN;
+ else {
+ dbip = dbcip->i_parent;
+ if (dbip == NULL) {
+ Tcl_SetResult(interp, "Cursor without parent database",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ return (result);
+ }
+ thisdbp = dbip->i_dbp;
+ type = thisdbp->get_type(thisdbp);
+ }
+ /*
+ * When we get here, we better have:
+ * 1 arg if -after, -before or -current
+ * 2 args in all other cases
+ */
+ if (flag == DB_AFTER || flag == DB_BEFORE || flag == DB_CURRENT) {
+ if (i != (objc - 1)) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-args? data");
+ result = TCL_ERROR;
+ goto out;
+ }
+ /*
+ * We want to get the key back, so we need to set
+ * up the location to get it back in.
+ */
+ if (type == DB_RECNO || type == DB_QUEUE) {
+ recno = 0;
+ key.data = &recno;
+ key.size = sizeof(db_recno_t);
+ }
+ } else {
+ if (i != (objc - 2)) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-args? key data");
+ result = TCL_ERROR;
+ goto out;
+ }
+ if (type == DB_RECNO || type == DB_QUEUE) {
+ result = Tcl_GetIntFromObj(interp, objv[objc-2], &itmp);
+ recno = itmp;
+ if (result == TCL_OK) {
+ key.data = &recno;
+ key.size = sizeof(db_recno_t);
+ } else
+ return (result);
+ } else {
+ key.data = Tcl_GetByteArrayFromObj(objv[objc-2], &itmp);
+ key.size = itmp;
+ }
+ }
+ data.data = Tcl_GetByteArrayFromObj(objv[objc-1], &itmp);
+ data.size = itmp;
+ _debug_check();
+ ret = dbc->c_put(dbc, &key, &data, flag);
+ result = _ReturnSetup(interp, ret, "dbc put");
+ if (ret == 0 && (flag == DB_AFTER || flag == DB_BEFORE)
+ && type == DB_RECNO) {
+ res = Tcl_NewIntObj(*(db_recno_t *)key.data);
+ Tcl_SetObjResult(interp, res);
+ }
+out:
+ return (result);
+}
+
+/*
+ * tcl_dbc_get --
+ */
+static int
+tcl_DbcGet(interp, objc, objv, dbc)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DBC *dbc; /* Cursor pointer */
+{
+ static char *dbcgetopts[] = {
+ "-current",
+ "-first",
+ "-get_both",
+ "-get_recno",
+ "-join_item",
+ "-last",
+ "-next",
+ "-nextdup",
+ "-nextnodup",
+ "-partial",
+ "-prev",
+ "-prevnodup",
+ "-rmw",
+ "-set",
+ "-set_range",
+ "-set_recno",
+ NULL
+ };
+ enum dbcgetopts {
+ DBCGET_CURRENT,
+ DBCGET_FIRST,
+ DBCGET_BOTH,
+ DBCGET_RECNO,
+ DBCGET_JOIN,
+ DBCGET_LAST,
+ DBCGET_NEXT,
+ DBCGET_NEXTDUP,
+ DBCGET_NEXTNODUP,
+ DBCGET_PART,
+ DBCGET_PREV,
+ DBCGET_PREVNODUP,
+ DBCGET_RMW,
+ DBCGET_SET,
+ DBCGET_SETRANGE,
+ DBCGET_SETRECNO
+ };
+ DB *thisdbp;
+ DBT key, data;
+ DBTCL_INFO *dbcip, *dbip;
+ DBTYPE type;
+ Tcl_Obj **elemv, *myobj, *retlist;
+ db_recno_t recno;
+ u_int32_t flag;
+ int elemc, i, itmp, optindex, result, ret;
+
+ result = TCL_OK;
+ flag = 0;
+
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-args? ?key?");
+ return (TCL_ERROR);
+ }
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ /*
+ * Get the command name index from the object based on the options
+ * defined above.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], dbcgetopts,
+ "option", TCL_EXACT, &optindex) != TCL_OK) {
+ /*
+ * Reset the result so we don't get
+ * an errant error message if there is another error.
+ */
+ if (IS_HELP(objv[i]) == TCL_OK) {
+ result = TCL_OK;
+ goto out;
+ }
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum dbcgetopts)optindex) {
+ case DBCGET_RMW:
+ flag |= DB_RMW;
+ break;
+ case DBCGET_CURRENT:
+ FLAG_CHECK2(flag, DB_RMW);
+ flag |= DB_CURRENT;
+ break;
+ case DBCGET_FIRST:
+ FLAG_CHECK2(flag, DB_RMW);
+ flag |= DB_FIRST;
+ break;
+ case DBCGET_LAST:
+ FLAG_CHECK2(flag, DB_RMW);
+ flag |= DB_LAST;
+ break;
+ case DBCGET_NEXT:
+ FLAG_CHECK2(flag, DB_RMW);
+ flag |= DB_NEXT;
+ break;
+ case DBCGET_PREV:
+ FLAG_CHECK2(flag, DB_RMW);
+ flag |= DB_PREV;
+ break;
+ case DBCGET_PREVNODUP:
+ FLAG_CHECK2(flag, DB_RMW);
+ flag |= DB_PREV_NODUP;
+ break;
+ case DBCGET_NEXTNODUP:
+ FLAG_CHECK2(flag, DB_RMW);
+ flag |= DB_NEXT_NODUP;
+ break;
+ case DBCGET_NEXTDUP:
+ FLAG_CHECK2(flag, DB_RMW);
+ flag |= DB_NEXT_DUP;
+ break;
+ case DBCGET_BOTH:
+ FLAG_CHECK2(flag, DB_RMW);
+ flag |= DB_GET_BOTH;
+ break;
+ case DBCGET_RECNO:
+ FLAG_CHECK2(flag, DB_RMW);
+ flag |= DB_GET_RECNO;
+ break;
+ case DBCGET_JOIN:
+ FLAG_CHECK2(flag, DB_RMW);
+ flag |= DB_JOIN_ITEM;
+ break;
+ case DBCGET_SET:
+ FLAG_CHECK2(flag, DB_RMW);
+ flag |= DB_SET;
+ break;
+ case DBCGET_SETRANGE:
+ FLAG_CHECK2(flag, DB_RMW);
+ flag |= DB_SET_RANGE;
+ break;
+ case DBCGET_SETRECNO:
+ FLAG_CHECK2(flag, DB_RMW);
+ flag |= DB_SET_RECNO;
+ break;
+ case DBCGET_PART:
+ if (i == objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-partial {offset length}?");
+ result = TCL_ERROR;
+ break;
+ }
+ /*
+ * Get sublist as {offset length}
+ */
+ result = Tcl_ListObjGetElements(interp, objv[i++],
+ &elemc, &elemv);
+ if (elemc != 2) {
+ Tcl_SetResult(interp,
+ "List must be {offset length}", TCL_STATIC);
+ result = TCL_ERROR;
+ break;
+ }
+ data.flags |= DB_DBT_PARTIAL;
+ result = Tcl_GetIntFromObj(interp, elemv[0], &itmp);
+ data.doff = itmp;
+ if (result != TCL_OK)
+ break;
+ result = Tcl_GetIntFromObj(interp, elemv[1], &itmp);
+ data.dlen = itmp;
+ /*
+ * NOTE: We don't check result here because all we'd
+ * do is break anyway, and we are doing that. If you
+ * add code here, you WILL need to add the check
+ * for result. (See the check for save.doff, a few
+ * lines above and copy that.)
+ */
+ break;
+ }
+ if (result != TCL_OK)
+ break;
+ }
+ if (result != TCL_OK)
+ goto out;
+
+ /*
+ * We need to determine if we are a recno database
+ * or not. If we are, then key.data is a recno, not
+ * a string.
+ */
+ dbcip = _PtrToInfo(dbc);
+ if (dbcip == NULL)
+ type = DB_UNKNOWN;
+ else {
+ dbip = dbcip->i_parent;
+ if (dbip == NULL) {
+ Tcl_SetResult(interp, "Cursor without parent database",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ goto out;
+ }
+ thisdbp = dbip->i_dbp;
+ type = thisdbp->get_type(thisdbp);
+ }
+ /*
+ * When we get here, we better have:
+ * 2 args, key and data if GET_BOTH was specified.
+ * 1 arg if -set, -set_range or -set_recno
+ * 0 in all other cases.
+ */
+ if ((flag & DB_OPFLAGS_MASK) == DB_GET_BOTH) {
+ if (i != (objc - 2)) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-args? -get_both key data");
+ result = TCL_ERROR;
+ goto out;
+ } else {
+ if (type == DB_RECNO || type == DB_QUEUE) {
+ result = Tcl_GetIntFromObj(
+ interp, objv[objc-2], &itmp);
+ recno = itmp;
+ if (result == TCL_OK) {
+ key.data = &recno;
+ key.size = sizeof(db_recno_t);
+ } else
+ goto out;
+ } else {
+ key.data = Tcl_GetByteArrayFromObj(
+ objv[objc - 2], &itmp);
+ key.size = itmp;
+ }
+ data.data =
+ Tcl_GetByteArrayFromObj(objv[objc - 1], &itmp);
+ data.size = itmp;
+ }
+ } else if ((flag & DB_OPFLAGS_MASK) == DB_SET ||
+ (flag & DB_OPFLAGS_MASK) == DB_SET_RANGE ||
+ (flag & DB_OPFLAGS_MASK) == DB_SET_RECNO) {
+ if (i != (objc - 1)) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-args? key");
+ result = TCL_ERROR;
+ goto out;
+ }
+ data.flags |= DB_DBT_MALLOC;
+ if ((flag & DB_OPFLAGS_MASK) == DB_SET_RECNO ||
+ type == DB_RECNO || type == DB_QUEUE) {
+ result = Tcl_GetIntFromObj(interp,
+ objv[objc - 1], (int *)&recno);
+ key.data = &recno;
+ key.size = sizeof(db_recno_t);
+ } else {
+ key.data =
+ Tcl_GetByteArrayFromObj(objv[objc - 1], &itmp);
+ key.size = itmp;
+ }
+ } else {
+ if (i != objc) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-args?");
+ result = TCL_ERROR;
+ goto out;
+ }
+ key.flags |= DB_DBT_MALLOC;
+ data.flags |= DB_DBT_MALLOC;
+ }
+
+ _debug_check();
+ ret = dbc->c_get(dbc, &key, &data, flag);
+ result = _ReturnSetup(interp, ret, "dbc get");
+ if (result == TCL_ERROR)
+ goto out;
+
+ retlist = Tcl_NewListObj(0, NULL);
+ if (ret == DB_NOTFOUND)
+ goto out1;
+ if ((flag & DB_OPFLAGS_MASK) == DB_GET_RECNO) {
+ recno = *((db_recno_t *)data.data);
+ myobj = Tcl_NewIntObj((int)recno);
+ result = Tcl_ListObjAppendElement(interp, retlist, myobj);
+ } else {
+ if ((type == DB_RECNO || type == DB_QUEUE) && key.data != NULL)
+ result = _SetListRecnoElem(interp, retlist,
+ *(db_recno_t *)key.data, data.data, data.size);
+ else
+ result = _SetListElem(interp, retlist,
+ key.data, key.size, data.data, data.size);
+ }
+ if (key.flags & DB_DBT_MALLOC)
+ __os_free(key.data, key.size);
+ if (data.flags & DB_DBT_MALLOC)
+ __os_free(data.data, data.size);
+out1:
+ if (result == TCL_OK)
+ Tcl_SetObjResult(interp, retlist);
+out:
+ return (result);
+
+}
+
+/*
+ * tcl_DbcDup --
+ */
+static int
+tcl_DbcDup(interp, objc, objv, dbc)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DBC *dbc; /* Cursor pointer */
+{
+ static char *dbcdupopts[] = {
+ "-position",
+ NULL
+ };
+ enum dbcdupopts {
+ DBCDUP_POS
+ };
+ DB *thisdbp;
+ DBC *newdbc;
+ DBTCL_INFO *dbcip, *newdbcip, *dbip;
+ Tcl_Obj *res;
+ u_int32_t flag;
+ int i, optindex, result, ret;
+ char newname[MSG_SIZE];
+
+ result = TCL_OK;
+ flag = 0;
+ res = NULL;
+
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-args?");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Get the command name index from the object based on the options
+ * defined above.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], dbcdupopts,
+ "option", TCL_EXACT, &optindex) != TCL_OK) {
+ /*
+ * Reset the result so we don't get
+ * an errant error message if there is another error.
+ */
+ if (IS_HELP(objv[i]) == TCL_OK) {
+ result = TCL_OK;
+ goto out;
+ }
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum dbcdupopts)optindex) {
+ case DBCDUP_POS:
+ flag = DB_POSITION;
+ break;
+ }
+ if (result != TCL_OK)
+ break;
+ }
+ if (result != TCL_OK)
+ goto out;
+
+ /*
+ * We need to determine if we are a recno database
+ * or not. If we are, then key.data is a recno, not
+ * a string.
+ */
+ dbcip = _PtrToInfo(dbc);
+ if (dbcip == NULL) {
+ Tcl_SetResult(interp, "Cursor without info structure",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ goto out;
+ } else {
+ dbip = dbcip->i_parent;
+ if (dbip == NULL) {
+ Tcl_SetResult(interp, "Cursor without parent database",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ goto out;
+ }
+ thisdbp = dbip->i_dbp;
+ }
+ /*
+ * Now duplicate the cursor. If successful, we need to create
+ * a new cursor command.
+ */
+
+ snprintf(newname, sizeof(newname),
+ "%s.c%d", dbip->i_name, dbip->i_dbdbcid);
+ newdbcip = _NewInfo(interp, NULL, newname, I_DBC);
+ if (newdbcip != NULL) {
+ ret = dbc->c_dup(dbc, &newdbc, flag);
+ if (ret == 0) {
+ dbip->i_dbdbcid++;
+ newdbcip->i_parent = dbip;
+ Tcl_CreateObjCommand(interp, newname,
+ (Tcl_ObjCmdProc *)dbc_Cmd,
+ (ClientData)newdbc, NULL);
+ res = Tcl_NewStringObj(newname, strlen(newname));
+ _SetInfoData(newdbcip, newdbc);
+ Tcl_SetObjResult(interp, res);
+ } else {
+ result = _ReturnSetup(interp, ret, "db dup");
+ _DeleteInfo(newdbcip);
+ }
+ } else {
+ Tcl_SetResult(interp, "Could not set up info", TCL_STATIC);
+ result = TCL_ERROR;
+ }
+out:
+ return (result);
+
+}
diff --git a/bdb/tcl/tcl_env.c b/bdb/tcl/tcl_env.c
new file mode 100644
index 00000000000..cb7b0d9744d
--- /dev/null
+++ b/bdb/tcl/tcl_env.c
@@ -0,0 +1,678 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: tcl_env.c,v 11.33 2001/01/11 18:19:55 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <tcl.h>
+#endif
+
+#include "db_int.h"
+#include "tcl_db.h"
+
+/*
+ * Prototypes for procedures defined later in this file:
+ */
+static void _EnvInfoDelete __P((Tcl_Interp *, DBTCL_INFO *));
+
+/*
+ * PUBLIC: int env_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+ *
+ * env_Cmd --
+ * Implements the "env" command.
+ */
+int
+env_Cmd(clientData, interp, objc, objv)
+ ClientData clientData; /* Env handle */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *envcmds[] = {
+ "close",
+ "lock_detect",
+ "lock_id",
+ "lock_get",
+ "lock_stat",
+ "lock_vec",
+ "log_archive",
+ "log_compare",
+ "log_file",
+ "log_flush",
+ "log_get",
+ "log_put",
+ "log_register",
+ "log_stat",
+ "log_unregister",
+ "mpool",
+ "mpool_stat",
+ "mpool_sync",
+ "mpool_trickle",
+ "mutex",
+#if CONFIG_TEST
+ "test",
+#endif
+ "txn",
+ "txn_checkpoint",
+ "txn_stat",
+ "verbose",
+ NULL
+ };
+ enum envcmds {
+ ENVCLOSE,
+ ENVLKDETECT,
+ ENVLKID,
+ ENVLKGET,
+ ENVLKSTAT,
+ ENVLKVEC,
+ ENVLOGARCH,
+ ENVLOGCMP,
+ ENVLOGFILE,
+ ENVLOGFLUSH,
+ ENVLOGGET,
+ ENVLOGPUT,
+ ENVLOGREG,
+ ENVLOGSTAT,
+ ENVLOGUNREG,
+ ENVMP,
+ ENVMPSTAT,
+ ENVMPSYNC,
+ ENVTRICKLE,
+ ENVMUTEX,
+#if CONFIG_TEST
+ ENVTEST,
+#endif
+ ENVTXN,
+ ENVTXNCKP,
+ ENVTXNSTAT,
+ ENVVERB
+ };
+ DBTCL_INFO *envip;
+ DB_ENV *envp;
+ Tcl_Obj *res;
+ u_int32_t newval;
+ int cmdindex, result, ret;
+
+ Tcl_ResetResult(interp);
+ envp = (DB_ENV *)clientData;
+ envip = _PtrToInfo((void *)envp);
+ result = TCL_OK;
+
+ if (objc <= 1) {
+ Tcl_WrongNumArgs(interp, 1, objv, "command cmdargs");
+ return (TCL_ERROR);
+ }
+ if (envp == NULL) {
+ Tcl_SetResult(interp, "NULL env pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (envip == NULL) {
+ Tcl_SetResult(interp, "NULL env info pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Get the command name index from the object based on the berkdbcmds
+ * defined above.
+ */
+ if (Tcl_GetIndexFromObj(interp, objv[1], envcmds, "command",
+ TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+ res = NULL;
+ switch ((enum envcmds)cmdindex) {
+ case ENVCLOSE:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ /*
+ * Any transactions will be aborted, and an mpools
+ * closed automatically. We must delete any txn
+ * and mp widgets we have here too for this env.
+ * NOTE: envip is freed when we come back from
+ * this function. Set it to NULL to make sure no
+ * one tries to use it later.
+ */
+ _EnvInfoDelete(interp, envip);
+ envip = NULL;
+ _debug_check();
+ ret = envp->close(envp, 0);
+ result = _ReturnSetup(interp, ret, "env close");
+ break;
+ case ENVLKDETECT:
+ result = tcl_LockDetect(interp, objc, objv, envp);
+ break;
+ case ENVLKSTAT:
+ result = tcl_LockStat(interp, objc, objv, envp);
+ break;
+ case ENVLKID:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = lock_id(envp, &newval);
+ result = _ReturnSetup(interp, ret, "lock_id");
+ if (result == TCL_OK)
+ res = Tcl_NewIntObj((int)newval);
+ break;
+ case ENVLKGET:
+ result = tcl_LockGet(interp, objc, objv, envp);
+ break;
+ case ENVLKVEC:
+ result = tcl_LockVec(interp, objc, objv, envp);
+ break;
+ case ENVLOGARCH:
+ result = tcl_LogArchive(interp, objc, objv, envp);
+ break;
+ case ENVLOGCMP:
+ result = tcl_LogCompare(interp, objc, objv);
+ break;
+ case ENVLOGFILE:
+ result = tcl_LogFile(interp, objc, objv, envp);
+ break;
+ case ENVLOGFLUSH:
+ result = tcl_LogFlush(interp, objc, objv, envp);
+ break;
+ case ENVLOGGET:
+ result = tcl_LogGet(interp, objc, objv, envp);
+ break;
+ case ENVLOGPUT:
+ result = tcl_LogPut(interp, objc, objv, envp);
+ break;
+ case ENVLOGREG:
+ result = tcl_LogRegister(interp, objc, objv, envp);
+ break;
+ case ENVLOGUNREG:
+ result = tcl_LogUnregister(interp, objc, objv, envp);
+ break;
+ case ENVLOGSTAT:
+ result = tcl_LogStat(interp, objc, objv, envp);
+ break;
+ case ENVMPSTAT:
+ result = tcl_MpStat(interp, objc, objv, envp);
+ break;
+ case ENVMPSYNC:
+ result = tcl_MpSync(interp, objc, objv, envp);
+ break;
+ case ENVTRICKLE:
+ result = tcl_MpTrickle(interp, objc, objv, envp);
+ break;
+ case ENVMP:
+ result = tcl_Mp(interp, objc, objv, envp, envip);
+ break;
+ case ENVTXNCKP:
+ result = tcl_TxnCheckpoint(interp, objc, objv, envp);
+ break;
+ case ENVTXNSTAT:
+ result = tcl_TxnStat(interp, objc, objv, envp);
+ break;
+ case ENVTXN:
+ result = tcl_Txn(interp, objc, objv, envp, envip);
+ break;
+ case ENVMUTEX:
+ result = tcl_Mutex(interp, objc, objv, envp, envip);
+ break;
+#if CONFIG_TEST
+ case ENVTEST:
+ result = tcl_EnvTest(interp, objc, objv, envp);
+ break;
+#endif
+ case ENVVERB:
+ /*
+ * Two args for this. Error if different.
+ */
+ if (objc != 4) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ result = tcl_EnvVerbose(interp, envp, objv[2], objv[3]);
+ break;
+ }
+ /*
+ * Only set result if we have a res. Otherwise, lower
+ * functions have already done so.
+ */
+ if (result == TCL_OK && res)
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
+
+/*
+ * PUBLIC: int tcl_EnvRemove __P((Tcl_Interp *, int, Tcl_Obj * CONST*,
+ * PUBLIC: DB_ENV *, DBTCL_INFO *));
+ *
+ * tcl_EnvRemove --
+ */
+int
+tcl_EnvRemove(interp, objc, objv, envp, envip)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Env pointer */
+ DBTCL_INFO *envip; /* Info pointer */
+{
+ static char *envremopts[] = {
+ "-data_dir",
+ "-force",
+ "-home",
+ "-log_dir",
+ "-server",
+ "-tmp_dir",
+ "-use_environ",
+ "-use_environ_root",
+ NULL
+ };
+ enum envremopts {
+ ENVREM_DATADIR,
+ ENVREM_FORCE,
+ ENVREM_HOME,
+ ENVREM_LOGDIR,
+ ENVREM_SERVER,
+ ENVREM_TMPDIR,
+ ENVREM_USE_ENVIRON,
+ ENVREM_USE_ENVIRON_ROOT
+ };
+ DB_ENV *e;
+ u_int32_t cflag, flag, forceflag;
+ int i, optindex, result, ret;
+ char *datadir, *home, *logdir, *server, *tmpdir;
+
+ result = TCL_OK;
+ cflag = flag = forceflag = 0;
+ home = NULL;
+ datadir = logdir = tmpdir = NULL;
+ server = NULL;
+
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args?");
+ return (TCL_ERROR);
+ }
+
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], envremopts, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ result = IS_HELP(objv[i]);
+ goto error;
+ }
+ i++;
+ switch ((enum envremopts)optindex) {
+ case ENVREM_FORCE:
+ forceflag |= DB_FORCE;
+ break;
+ case ENVREM_HOME:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-home dir?");
+ result = TCL_ERROR;
+ break;
+ }
+ home = Tcl_GetStringFromObj(objv[i++], NULL);
+ break;
+ case ENVREM_SERVER:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-server name?");
+ result = TCL_ERROR;
+ break;
+ }
+ server = Tcl_GetStringFromObj(objv[i++], NULL);
+ cflag = DB_CLIENT;
+ break;
+ case ENVREM_USE_ENVIRON:
+ flag |= DB_USE_ENVIRON;
+ break;
+ case ENVREM_USE_ENVIRON_ROOT:
+ flag |= DB_USE_ENVIRON_ROOT;
+ break;
+ case ENVREM_DATADIR:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-data_dir dir");
+ result = TCL_ERROR;
+ break;
+ }
+ datadir = Tcl_GetStringFromObj(objv[i++], NULL);
+ break;
+ case ENVREM_LOGDIR:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-log_dir dir");
+ result = TCL_ERROR;
+ break;
+ }
+ logdir = Tcl_GetStringFromObj(objv[i++], NULL);
+ break;
+ case ENVREM_TMPDIR:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-tmp_dir dir");
+ result = TCL_ERROR;
+ break;
+ }
+ tmpdir = Tcl_GetStringFromObj(objv[i++], NULL);
+ break;
+ }
+ /*
+ * If, at any time, parsing the args we get an error,
+ * bail out and return.
+ */
+ if (result != TCL_OK)
+ goto error;
+ }
+
+ /*
+ * If envp is NULL, we don't have an open env and we need to open
+ * one of the user. Don't bother with the info stuff.
+ */
+ if (envp == NULL) {
+ if ((ret = db_env_create(&e, cflag)) != 0) {
+ result = _ReturnSetup(interp, ret, "db_env_create");
+ goto error;
+ }
+ if (server != NULL) {
+ ret = e->set_server(e, server, 0, 0, 0);
+ result = _ReturnSetup(interp, ret, "set_server");
+ if (result != TCL_OK)
+ goto error;
+ }
+ if (datadir != NULL) {
+ _debug_check();
+ ret = e->set_data_dir(e, datadir);
+ result = _ReturnSetup(interp, ret, "set_data_dir");
+ if (result != TCL_OK)
+ goto error;
+ }
+ if (logdir != NULL) {
+ _debug_check();
+ ret = e->set_lg_dir(e, logdir);
+ result = _ReturnSetup(interp, ret, "set_log_dir");
+ if (result != TCL_OK)
+ goto error;
+ }
+ if (tmpdir != NULL) {
+ _debug_check();
+ ret = e->set_tmp_dir(e, tmpdir);
+ result = _ReturnSetup(interp, ret, "set_tmp_dir");
+ if (result != TCL_OK)
+ goto error;
+ }
+ } else {
+ /*
+ * We have to clean up any info associated with this env,
+ * regardless of the result of the remove so do it first.
+ * NOTE: envip is freed when we come back from this function.
+ */
+ _EnvInfoDelete(interp, envip);
+ envip = NULL;
+ e = envp;
+ }
+
+ flag |= forceflag;
+ /*
+ * When we get here we have parsed all the args. Now remove
+ * the environment.
+ */
+ _debug_check();
+ ret = e->remove(e, home, flag);
+ result = _ReturnSetup(interp, ret, "env remove");
+error:
+ return (result);
+}
+
+static void
+_EnvInfoDelete(interp, envip)
+ Tcl_Interp *interp; /* Tcl Interpreter */
+ DBTCL_INFO *envip; /* Info for env */
+{
+ DBTCL_INFO *nextp, *p;
+
+ /*
+ * Before we can delete the environment info, we must close
+ * any open subsystems in this env. We will:
+ * 1. Abort any transactions (which aborts any nested txns).
+ * 2. Close any mpools (which will put any pages itself).
+ * 3. Put any locks.
+ * 4. Close the error file.
+ */
+ for (p = LIST_FIRST(&__db_infohead); p != NULL; p = nextp) {
+ /*
+ * Check if this info structure "belongs" to this
+ * env. If so, remove its commands and info structure.
+ * We do not close/abort/whatever here, because we
+ * don't want to replicate DB behavior.
+ */
+ if (p->i_parent == envip) {
+ switch (p->i_type) {
+ case I_TXN:
+ _TxnInfoDelete(interp, p);
+ break;
+ case I_MP:
+ _MpInfoDelete(interp, p);
+ break;
+ default:
+ Tcl_SetResult(interp,
+ "_EnvInfoDelete: bad info type",
+ TCL_STATIC);
+ break;
+ }
+ nextp = LIST_NEXT(p, entries);
+ (void)Tcl_DeleteCommand(interp, p->i_name);
+ _DeleteInfo(p);
+ } else
+ nextp = LIST_NEXT(p, entries);
+ }
+ (void)Tcl_DeleteCommand(interp, envip->i_name);
+ _DeleteInfo(envip);
+}
+
+/*
+ * PUBLIC: int tcl_EnvVerbose __P((Tcl_Interp *, DB_ENV *, Tcl_Obj *,
+ * PUBLIC: Tcl_Obj *));
+ *
+ * tcl_EnvVerbose --
+ */
+int
+tcl_EnvVerbose(interp, envp, which, onoff)
+ Tcl_Interp *interp; /* Interpreter */
+ DB_ENV *envp; /* Env pointer */
+ Tcl_Obj *which; /* Which subsystem */
+ Tcl_Obj *onoff; /* On or off */
+{
+ static char *verbwhich[] = {
+ "chkpt",
+ "deadlock",
+ "recovery",
+ "wait",
+ NULL
+ };
+ enum verbwhich {
+ ENVVERB_CHK,
+ ENVVERB_DEAD,
+ ENVVERB_REC,
+ ENVVERB_WAIT
+ };
+ static char *verbonoff[] = {
+ "off",
+ "on",
+ NULL
+ };
+ enum verbonoff {
+ ENVVERB_OFF,
+ ENVVERB_ON
+ };
+ int on, optindex, ret;
+ u_int32_t wh;
+
+ if (Tcl_GetIndexFromObj(interp, which, verbwhich, "option",
+ TCL_EXACT, &optindex) != TCL_OK)
+ return (IS_HELP(which));
+
+ switch ((enum verbwhich)optindex) {
+ case ENVVERB_CHK:
+ wh = DB_VERB_CHKPOINT;
+ break;
+ case ENVVERB_DEAD:
+ wh = DB_VERB_DEADLOCK;
+ break;
+ case ENVVERB_REC:
+ wh = DB_VERB_RECOVERY;
+ break;
+ case ENVVERB_WAIT:
+ wh = DB_VERB_WAITSFOR;
+ break;
+ default:
+ return (TCL_ERROR);
+ }
+ if (Tcl_GetIndexFromObj(interp, onoff, verbonoff, "option",
+ TCL_EXACT, &optindex) != TCL_OK)
+ return (IS_HELP(onoff));
+ switch ((enum verbonoff)optindex) {
+ case ENVVERB_OFF:
+ on = 0;
+ break;
+ case ENVVERB_ON:
+ on = 1;
+ break;
+ default:
+ return (TCL_ERROR);
+ }
+ ret = envp->set_verbose(envp, wh, on);
+ return (_ReturnSetup(interp, ret, "env set verbose"));
+}
+
+#if CONFIG_TEST
+/*
+ * PUBLIC: int tcl_EnvTest __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+ *
+ * tcl_EnvTest --
+ */
+int
+tcl_EnvTest(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Env pointer */
+{
+ static char *envtestcmd[] = {
+ "abort",
+ "copy",
+ NULL
+ };
+ enum envtestcmd {
+ ENVTEST_ABORT,
+ ENVTEST_COPY
+ };
+ static char *envtestat[] = {
+ "none",
+ "preopen",
+ "prerename",
+ "postlog",
+ "postlogmeta",
+ "postopen",
+ "postrename",
+ "postsync",
+ NULL
+ };
+ enum envtestat {
+ ENVTEST_NONE,
+ ENVTEST_PREOPEN,
+ ENVTEST_PRERENAME,
+ ENVTEST_POSTLOG,
+ ENVTEST_POSTLOGMETA,
+ ENVTEST_POSTOPEN,
+ ENVTEST_POSTRENAME,
+ ENVTEST_POSTSYNC
+ };
+ int *loc, optindex, result, testval;
+
+ result = TCL_OK;
+
+ if (objc != 4) {
+ Tcl_WrongNumArgs(interp, 2, objv, "abort|copy location");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * This must be the "copy" or "abort" portion of the command.
+ */
+ if (Tcl_GetIndexFromObj(interp, objv[2], envtestcmd, "command",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ result = IS_HELP(objv[2]);
+ return (result);
+ }
+ switch ((enum envtestcmd)optindex) {
+ case ENVTEST_ABORT:
+ loc = &envp->test_abort;
+ break;
+ case ENVTEST_COPY:
+ loc = &envp->test_copy;
+ break;
+ default:
+ Tcl_SetResult(interp, "Illegal store location", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ /*
+ * This must be the location portion of the command.
+ */
+ if (Tcl_GetIndexFromObj(interp, objv[3], envtestat, "location",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ result = IS_HELP(objv[3]);
+ return (result);
+ }
+ switch ((enum envtestat)optindex) {
+ case ENVTEST_NONE:
+ testval = 0;
+ break;
+ case ENVTEST_PREOPEN:
+ testval = DB_TEST_PREOPEN;
+ break;
+ case ENVTEST_PRERENAME:
+ testval = DB_TEST_PRERENAME;
+ break;
+ case ENVTEST_POSTLOG:
+ testval = DB_TEST_POSTLOG;
+ break;
+ case ENVTEST_POSTLOGMETA:
+ testval = DB_TEST_POSTLOGMETA;
+ break;
+ case ENVTEST_POSTOPEN:
+ testval = DB_TEST_POSTOPEN;
+ break;
+ case ENVTEST_POSTRENAME:
+ testval = DB_TEST_POSTRENAME;
+ break;
+ case ENVTEST_POSTSYNC:
+ testval = DB_TEST_POSTSYNC;
+ break;
+ default:
+ Tcl_SetResult(interp, "Illegal test location", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ *loc = testval;
+ Tcl_SetResult(interp, "0", TCL_STATIC);
+ return (result);
+}
+#endif
diff --git a/bdb/tcl/tcl_internal.c b/bdb/tcl/tcl_internal.c
new file mode 100644
index 00000000000..bdab60f4ad6
--- /dev/null
+++ b/bdb/tcl/tcl_internal.c
@@ -0,0 +1,440 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: tcl_internal.c,v 11.27 2000/05/22 18:36:51 sue Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <tcl.h>
+#endif
+
+#include "db_int.h"
+#include "tcl_db.h"
+#include "db_page.h"
+#include "db_am.h"
+#include "db_ext.h"
+
+/*
+ *
+ * internal.c --
+ *
+ * This file contains internal functions we need to maintain
+ * state for our Tcl interface.
+ *
+ * NOTE: This all uses a linear linked list. If we end up with
+ * too many info structs such that this is a performance hit, it
+ * should be redone using hashes or a list per type. The assumption
+ * is that the user won't have more than a few dozen info structs
+ * in operation at any given point in time. Even a complicated
+ * application with a few environments, nested transactions, locking,
+ * and several databases open, using cursors should not have a
+ * negative performance impact, in terms of searching the list to
+ * get/manipulate the info structure.
+ */
+
+/*
+ * Prototypes for procedures defined later in this file:
+ */
+
+#define GLOB_CHAR(c) ((c) == '*' || (c) == '?')
+
+/*
+ * PUBLIC: DBTCL_INFO *_NewInfo __P((Tcl_Interp *,
+ * PUBLIC: void *, char *, enum INFOTYPE));
+ *
+ * _NewInfo --
+ *
+ * This function will create a new info structure and fill it in
+ * with the name and pointer, id and type.
+ */
+DBTCL_INFO *
+_NewInfo(interp, anyp, name, type)
+ Tcl_Interp *interp;
+ void *anyp;
+ char *name;
+ enum INFOTYPE type;
+{
+ DBTCL_INFO *p;
+ int i, ret;
+
+ if ((ret = __os_malloc(NULL, sizeof(DBTCL_INFO), NULL, &p)) != 0) {
+ Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC);
+ return (NULL);
+ }
+
+ if ((ret = __os_strdup(NULL, name, &p->i_name)) != 0) {
+ Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC);
+ __os_free(p, sizeof(DBTCL_INFO));
+ return (NULL);
+ }
+ p->i_interp = interp;
+ p->i_anyp = anyp;
+ p->i_data = 0;
+ p->i_data2 = 0;
+ p->i_type = type;
+ p->i_parent = NULL;
+ p->i_err = NULL;
+ p->i_errpfx = NULL;
+ p->i_lockobj.data = NULL;
+ for (i = 0; i < MAX_ID; i++)
+ p->i_otherid[i] = 0;
+
+ LIST_INSERT_HEAD(&__db_infohead, p, entries);
+ return (p);
+}
+
+/*
+ * PUBLIC: void *_NameToPtr __P((CONST char *));
+ */
+void *
+_NameToPtr(name)
+ CONST char *name;
+{
+ DBTCL_INFO *p;
+
+ for (p = LIST_FIRST(&__db_infohead); p != NULL;
+ p = LIST_NEXT(p, entries))
+ if (strcmp(name, p->i_name) == 0)
+ return (p->i_anyp);
+ return (NULL);
+}
+
+/*
+ * PUBLIC: char *_PtrToName __P((CONST void *));
+ */
+char *
+_PtrToName(ptr)
+ CONST void *ptr;
+{
+ DBTCL_INFO *p;
+
+ for (p = LIST_FIRST(&__db_infohead); p != NULL;
+ p = LIST_NEXT(p, entries))
+ if (p->i_anyp == ptr)
+ return (p->i_name);
+ return (NULL);
+}
+
+/*
+ * PUBLIC: DBTCL_INFO *_PtrToInfo __P((CONST void *));
+ */
+DBTCL_INFO *
+_PtrToInfo(ptr)
+ CONST void *ptr;
+{
+ DBTCL_INFO *p;
+
+ for (p = LIST_FIRST(&__db_infohead); p != NULL;
+ p = LIST_NEXT(p, entries))
+ if (p->i_anyp == ptr)
+ return (p);
+ return (NULL);
+}
+
+/*
+ * PUBLIC: DBTCL_INFO *_NameToInfo __P((CONST char *));
+ */
+DBTCL_INFO *
+_NameToInfo(name)
+ CONST char *name;
+{
+ DBTCL_INFO *p;
+
+ for (p = LIST_FIRST(&__db_infohead); p != NULL;
+ p = LIST_NEXT(p, entries))
+ if (strcmp(name, p->i_name) == 0)
+ return (p);
+ return (NULL);
+}
+
+/*
+ * PUBLIC: void _SetInfoData __P((DBTCL_INFO *, void *));
+ */
+void
+_SetInfoData(p, data)
+ DBTCL_INFO *p;
+ void *data;
+{
+ if (p == NULL)
+ return;
+ p->i_anyp = data;
+ return;
+}
+
+/*
+ * PUBLIC: void _DeleteInfo __P((DBTCL_INFO *));
+ */
+void
+_DeleteInfo(p)
+ DBTCL_INFO *p;
+{
+ if (p == NULL)
+ return;
+ LIST_REMOVE(p, entries);
+ if (p->i_lockobj.data != NULL)
+ __os_free(p->i_lockobj.data, p->i_lockobj.size);
+ if (p->i_err != NULL) {
+ fclose(p->i_err);
+ p->i_err = NULL;
+ }
+ if (p->i_errpfx != NULL)
+ __os_freestr(p->i_errpfx);
+ __os_freestr(p->i_name);
+ __os_free(p, sizeof(DBTCL_INFO));
+
+ return;
+}
+
+/*
+ * PUBLIC: int _SetListElem __P((Tcl_Interp *,
+ * PUBLIC: Tcl_Obj *, void *, int, void *, int));
+ */
+int
+_SetListElem(interp, list, elem1, e1cnt, elem2, e2cnt)
+ Tcl_Interp *interp;
+ Tcl_Obj *list;
+ void *elem1, *elem2;
+ int e1cnt, e2cnt;
+{
+ Tcl_Obj *myobjv[2], *thislist;
+ int myobjc;
+
+ myobjc = 2;
+ myobjv[0] = Tcl_NewByteArrayObj((u_char *)elem1, e1cnt);
+ myobjv[1] = Tcl_NewByteArrayObj((u_char *)elem2, e2cnt);
+ thislist = Tcl_NewListObj(myobjc, myobjv);
+ if (thislist == NULL)
+ return (TCL_ERROR);
+ return (Tcl_ListObjAppendElement(interp, list, thislist));
+
+}
+
+/*
+ * PUBLIC: int _SetListElemInt __P((Tcl_Interp *, Tcl_Obj *, void *, int));
+ */
+int
+_SetListElemInt(interp, list, elem1, elem2)
+ Tcl_Interp *interp;
+ Tcl_Obj *list;
+ void *elem1;
+ int elem2;
+{
+ Tcl_Obj *myobjv[2], *thislist;
+ int myobjc;
+
+ myobjc = 2;
+ myobjv[0] = Tcl_NewByteArrayObj((u_char *)elem1, strlen((char *)elem1));
+ myobjv[1] = Tcl_NewIntObj(elem2);
+ thislist = Tcl_NewListObj(myobjc, myobjv);
+ if (thislist == NULL)
+ return (TCL_ERROR);
+ return (Tcl_ListObjAppendElement(interp, list, thislist));
+}
+
+/*
+ * PUBLIC: int _SetListRecnoElem __P((Tcl_Interp *, Tcl_Obj *,
+ * PUBLIC: db_recno_t, u_char *, int));
+ */
+int
+_SetListRecnoElem(interp, list, elem1, elem2, e2size)
+ Tcl_Interp *interp;
+ Tcl_Obj *list;
+ db_recno_t elem1;
+ u_char *elem2;
+ int e2size;
+{
+ Tcl_Obj *myobjv[2], *thislist;
+ int myobjc;
+
+ myobjc = 2;
+ myobjv[0] = Tcl_NewIntObj(elem1);
+ myobjv[1] = Tcl_NewByteArrayObj(elem2, e2size);
+ thislist = Tcl_NewListObj(myobjc, myobjv);
+ if (thislist == NULL)
+ return (TCL_ERROR);
+ return (Tcl_ListObjAppendElement(interp, list, thislist));
+
+}
+
+/*
+ * PUBLIC: int _GetGlobPrefix __P((char *, char **));
+ */
+int
+_GetGlobPrefix(pattern, prefix)
+ char *pattern;
+ char **prefix;
+{
+ int i, j;
+ char *p;
+
+ /*
+ * Duplicate it, we get enough space and most of the work is done.
+ */
+ if (__os_strdup(NULL, pattern, prefix) != 0)
+ return (1);
+
+ p = *prefix;
+ for (i = 0, j = 0; p[i] && !GLOB_CHAR(p[i]); i++, j++)
+ /*
+ * Check for an escaped character and adjust
+ */
+ if (p[i] == '\\' && p[i+1]) {
+ p[j] = p[i+1];
+ i++;
+ } else
+ p[j] = p[i];
+ p[j] = 0;
+ return (0);
+}
+
+/*
+ * PUBLIC: int _ReturnSetup __P((Tcl_Interp *, int, char *));
+ */
+int
+_ReturnSetup(interp, ret, errmsg)
+ Tcl_Interp *interp;
+ int ret;
+ char *errmsg;
+{
+ char *msg;
+
+ if (ret > 0)
+ return (_ErrorSetup(interp, ret, errmsg));
+
+ /*
+ * We either have success or a DB error. If a DB error, set up the
+ * string. We return an error if not one of the errors we catch.
+ * If anyone wants to reset the result to return anything different,
+ * then the calling function is responsible for doing so via
+ * Tcl_ResetResult or another Tcl_SetObjResult.
+ */
+ if (ret == 0) {
+ Tcl_SetResult(interp, "0", TCL_STATIC);
+ return (TCL_OK);
+ }
+
+ msg = db_strerror(ret);
+ Tcl_AppendResult(interp, msg, NULL);
+
+ switch (ret) {
+ case DB_NOTFOUND:
+ case DB_KEYEXIST:
+ case DB_KEYEMPTY:
+ return (TCL_OK);
+ default:
+ Tcl_SetErrorCode(interp, "BerkeleyDB", msg, NULL);
+ return (TCL_ERROR);
+ }
+}
+
+/*
+ * PUBLIC: int _ErrorSetup __P((Tcl_Interp *, int, char *));
+ */
+int
+_ErrorSetup(interp, ret, errmsg)
+ Tcl_Interp *interp;
+ int ret;
+ char *errmsg;
+{
+ Tcl_SetErrno(ret);
+ Tcl_AppendResult(interp, errmsg, ":", Tcl_PosixError(interp), NULL);
+ return (TCL_ERROR);
+}
+
+/*
+ * PUBLIC: void _ErrorFunc __P((CONST char *, char *));
+ */
+void
+_ErrorFunc(pfx, msg)
+ CONST char *pfx;
+ char *msg;
+{
+ DBTCL_INFO *p;
+ Tcl_Interp *interp;
+ int size;
+ char *err;
+
+ p = _NameToInfo(pfx);
+ if (p == NULL)
+ return;
+ interp = p->i_interp;
+
+ size = strlen(pfx) + strlen(msg) + 4;
+ /*
+ * If we cannot allocate enough to put together the prefix
+ * and message then give them just the message.
+ */
+ if (__os_malloc(NULL, size, NULL, &err) != 0) {
+ Tcl_AddErrorInfo(interp, msg);
+ Tcl_AppendResult(interp, msg, "\n", NULL);
+ return;
+ }
+ snprintf(err, size, "%s: %s", pfx, msg);
+ Tcl_AddErrorInfo(interp, err);
+ Tcl_AppendResult(interp, err, "\n", NULL);
+ __os_free(err, size);
+ return;
+}
+
+#define INVALID_LSNMSG "Invalid LSN with %d parts. Should have 2.\n"
+
+/*
+ * PUBLIC: int _GetLsn __P((Tcl_Interp *, Tcl_Obj *, DB_LSN *));
+ */
+int
+_GetLsn(interp, obj, lsn)
+ Tcl_Interp *interp;
+ Tcl_Obj *obj;
+ DB_LSN *lsn;
+{
+ Tcl_Obj **myobjv;
+ int itmp, myobjc, result;
+ char msg[MSG_SIZE];
+
+ result = Tcl_ListObjGetElements(interp, obj, &myobjc, &myobjv);
+ if (result == TCL_ERROR)
+ return (result);
+ if (myobjc != 2) {
+ result = TCL_ERROR;
+ snprintf(msg, MSG_SIZE, INVALID_LSNMSG, myobjc);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ return (result);
+ }
+ result = Tcl_GetIntFromObj(interp, myobjv[0], &itmp);
+ if (result == TCL_ERROR)
+ return (result);
+ lsn->file = itmp;
+ result = Tcl_GetIntFromObj(interp, myobjv[1], &itmp);
+ lsn->offset = itmp;
+ return (result);
+}
+
+int __debug_stop, __debug_on, __debug_print, __debug_test;
+
+/*
+ * PUBLIC: void _debug_check __P((void));
+ */
+void
+_debug_check()
+{
+ if (__debug_on == 0)
+ return;
+
+ if (__debug_print != 0) {
+ printf("\r%6d:", __debug_on);
+ fflush(stdout);
+ }
+ if (__debug_on++ == __debug_test || __debug_stop)
+ __db_loadme();
+}
diff --git a/bdb/tcl/tcl_lock.c b/bdb/tcl/tcl_lock.c
new file mode 100644
index 00000000000..89f6eeb2b39
--- /dev/null
+++ b/bdb/tcl/tcl_lock.c
@@ -0,0 +1,655 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: tcl_lock.c,v 11.21 2001/01/11 18:19:55 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <tcl.h>
+#endif
+
+#include "db_int.h"
+#include "tcl_db.h"
+
+/*
+ * Prototypes for procedures defined later in this file:
+ */
+static int lock_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+static int _LockMode __P((Tcl_Interp *, Tcl_Obj *, db_lockmode_t *));
+static int _GetThisLock __P((Tcl_Interp *, DB_ENV *, u_int32_t,
+ u_int32_t, DBT *, db_lockmode_t, char *));
+static void _LockPutInfo __P((Tcl_Interp *, db_lockop_t, DB_LOCK *,
+ u_int32_t, DBT *));
+
+static char *lkmode[] = {
+ "ng", "read", "write",
+ "iwrite", "iread", "iwr",
+ NULL
+};
+enum lkmode {
+ LK_NG, LK_READ, LK_WRITE,
+ LK_IWRITE, LK_IREAD, LK_IWR
+};
+
+/*
+ * tcl_LockDetect --
+ *
+ * PUBLIC: int tcl_LockDetect __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_LockDetect(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ static char *ldopts[] = {
+ "-lock_conflict",
+ "default",
+ "oldest",
+ "random",
+ "youngest",
+ NULL
+ };
+ enum ldopts {
+ LD_CONFLICT,
+ LD_DEFAULT,
+ LD_OLDEST,
+ LD_RANDOM,
+ LD_YOUNGEST
+ };
+ u_int32_t flag, policy;
+ int i, optindex, result, ret;
+
+ result = TCL_OK;
+ flag = policy = 0;
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i],
+ ldopts, "option", TCL_EXACT, &optindex) != TCL_OK)
+ return (IS_HELP(objv[i]));
+ i++;
+ switch ((enum ldopts)optindex) {
+ case LD_DEFAULT:
+ FLAG_CHECK(policy);
+ policy = DB_LOCK_DEFAULT;
+ break;
+ case LD_OLDEST:
+ FLAG_CHECK(policy);
+ policy = DB_LOCK_OLDEST;
+ break;
+ case LD_YOUNGEST:
+ FLAG_CHECK(policy);
+ policy = DB_LOCK_YOUNGEST;
+ break;
+ case LD_RANDOM:
+ FLAG_CHECK(policy);
+ policy = DB_LOCK_RANDOM;
+ break;
+ case LD_CONFLICT:
+ flag |= DB_LOCK_CONFLICT;
+ break;
+ }
+ }
+
+ _debug_check();
+ ret = lock_detect(envp, flag, policy, NULL);
+ result = _ReturnSetup(interp, ret, "lock detect");
+ return (result);
+}
+
+/*
+ * tcl_LockGet --
+ *
+ * PUBLIC: int tcl_LockGet __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_LockGet(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ static char *lgopts[] = {
+ "-nowait",
+ NULL
+ };
+ enum lgopts {
+ LGNOWAIT
+ };
+ DBT obj;
+ Tcl_Obj *res;
+ db_lockmode_t mode;
+ u_int32_t flag, lockid;
+ int itmp, optindex, result;
+ char newname[MSG_SIZE];
+
+ result = TCL_OK;
+ memset(newname, 0, MSG_SIZE);
+ if (objc != 5 && objc != 6) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-nowait? mode id obj");
+ return (TCL_ERROR);
+ }
+ /*
+ * Work back from required args.
+ * Last arg is obj.
+ * Second last is lock id.
+ * Third last is lock mode.
+ */
+ memset(&obj, 0, sizeof(obj));
+
+ if ((result =
+ Tcl_GetIntFromObj(interp, objv[objc-2], &itmp)) != TCL_OK)
+ return (result);
+ lockid = itmp;
+
+ /*
+ * XXX
+ * Tcl 8.1 Tcl_GetByteArrayFromObj/Tcl_GetIntFromObj bug.
+ *
+ * The line below was originally before the Tcl_GetIntFromObj.
+ *
+ * There is a bug in Tcl 8.1 and byte arrays in that if it happens
+ * to use an object as both a byte array and something else like
+ * an int, and you've done a Tcl_GetByteArrayFromObj, then you
+ * do a Tcl_GetIntFromObj, your memory is deleted.
+ *
+ * Workaround is to make sure all Tcl_GetByteArrayFromObj calls
+ * are done last.
+ */
+ obj.data = Tcl_GetByteArrayFromObj(objv[objc-1], &itmp);
+ obj.size = itmp;
+ if ((result = _LockMode(interp, objv[(objc - 3)], &mode)) != TCL_OK)
+ return (result);
+
+ /*
+ * Any left over arg is the flag.
+ */
+ flag = 0;
+ if (objc == 6) {
+ if (Tcl_GetIndexFromObj(interp, objv[(objc - 4)],
+ lgopts, "option", TCL_EXACT, &optindex) != TCL_OK)
+ return (IS_HELP(objv[(objc - 4)]));
+ switch ((enum lgopts)optindex) {
+ case LGNOWAIT:
+ flag |= DB_LOCK_NOWAIT;
+ break;
+ }
+ }
+
+ result = _GetThisLock(interp, envp, lockid, flag, &obj, mode, newname);
+ if (result == TCL_OK) {
+ res = Tcl_NewStringObj(newname, strlen(newname));
+ Tcl_SetObjResult(interp, res);
+ }
+ return (result);
+}
+
+/*
+ * tcl_LockStat --
+ *
+ * PUBLIC: int tcl_LockStat __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_LockStat(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ DB_LOCK_STAT *sp;
+ Tcl_Obj *res;
+ int result, ret;
+
+ result = TCL_OK;
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = lock_stat(envp, &sp, NULL);
+ result = _ReturnSetup(interp, ret, "lock stat");
+ if (result == TCL_ERROR)
+ return (result);
+ /*
+ * Have our stats, now construct the name value
+ * list pairs and free up the memory.
+ */
+ res = Tcl_NewObj();
+ /*
+ * MAKE_STAT_LIST assumes 'res' and 'error' label.
+ */
+ MAKE_STAT_LIST("Region size", sp->st_regsize);
+ MAKE_STAT_LIST("Max locks", sp->st_maxlocks);
+ MAKE_STAT_LIST("Max lockers", sp->st_maxlockers);
+ MAKE_STAT_LIST("Max objects", sp->st_maxobjects);
+ MAKE_STAT_LIST("Lock modes", sp->st_nmodes);
+ MAKE_STAT_LIST("Current number of locks", sp->st_nlocks);
+ MAKE_STAT_LIST("Maximum number of locks so far", sp->st_maxnlocks);
+ MAKE_STAT_LIST("Current number of lockers", sp->st_nlockers);
+ MAKE_STAT_LIST("Maximum number of lockers so far", sp->st_maxnlockers);
+ MAKE_STAT_LIST("Current number of objects", sp->st_nobjects);
+ MAKE_STAT_LIST("Maximum number of objects so far", sp->st_maxnobjects);
+ MAKE_STAT_LIST("Number of conflicts", sp->st_nconflicts);
+ MAKE_STAT_LIST("Lock requests", sp->st_nrequests);
+ MAKE_STAT_LIST("Lock releases", sp->st_nreleases);
+ MAKE_STAT_LIST("Deadlocks detected", sp->st_ndeadlocks);
+ MAKE_STAT_LIST("Number of region lock waits", sp->st_region_wait);
+ MAKE_STAT_LIST("Number of region lock nowaits", sp->st_region_nowait);
+ Tcl_SetObjResult(interp, res);
+error:
+ __os_free(sp, sizeof(*sp));
+ return (result);
+}
+
+/*
+ * lock_Cmd --
+ * Implements the "lock" widget.
+ */
+static int
+lock_Cmd(clientData, interp, objc, objv)
+ ClientData clientData; /* Lock handle */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *lkcmds[] = {
+ "put",
+ NULL
+ };
+ enum lkcmds {
+ LKPUT
+ };
+ DB_ENV *env;
+ DB_LOCK *lock;
+ DBTCL_INFO *lkip;
+ int cmdindex, result, ret;
+
+ Tcl_ResetResult(interp);
+ lock = (DB_LOCK *)clientData;
+ lkip = _PtrToInfo((void *)lock);
+ result = TCL_OK;
+
+ if (lock == NULL) {
+ Tcl_SetResult(interp, "NULL lock", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (lkip == NULL) {
+ Tcl_SetResult(interp, "NULL lock info pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ env = NAME_TO_ENV(lkip->i_parent->i_name);
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ /*
+ * Get the command name index from the object based on the dbcmds
+ * defined above.
+ */
+ if (Tcl_GetIndexFromObj(interp,
+ objv[1], lkcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+
+ switch ((enum lkcmds)cmdindex) {
+ case LKPUT:
+ _debug_check();
+ ret = lock_put(env, lock);
+ result = _ReturnSetup(interp, ret, "lock put");
+ (void)Tcl_DeleteCommand(interp, lkip->i_name);
+ _DeleteInfo(lkip);
+ __os_free(lock, sizeof(DB_LOCK));
+ break;
+ }
+ return (result);
+}
+
+/*
+ * tcl_LockVec --
+ *
+ * PUBLIC: int tcl_LockVec __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_LockVec(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* environment pointer */
+{
+ static char *lvopts[] = {
+ "-nowait",
+ NULL
+ };
+ enum lvopts {
+ LVNOWAIT
+ };
+ static char *lkops[] = {
+ "get", "put", "put_all", "put_obj",
+ NULL
+ };
+ enum lkops {
+ LKGET, LKPUT, LKPUTALL, LKPUTOBJ
+ };
+ DB_LOCK *lock;
+ DB_LOCKREQ list;
+ DBT obj;
+ Tcl_Obj **myobjv, *res, *thisop;
+ db_lockmode_t mode;
+ u_int32_t flag, lockid;
+ int i, itmp, myobjc, optindex, result, ret;
+ char *lockname, msg[MSG_SIZE], newname[MSG_SIZE];
+
+ result = TCL_OK;
+ memset(newname, 0, MSG_SIZE);
+ flag = 0;
+ mode = 0;
+ /*
+ * If -nowait is given, it MUST be first arg.
+ */
+ if (Tcl_GetIndexFromObj(interp, objv[2],
+ lvopts, "option", TCL_EXACT, &optindex) == TCL_OK) {
+ switch ((enum lvopts)optindex) {
+ case LVNOWAIT:
+ flag |= DB_LOCK_NOWAIT;
+ break;
+ }
+ i = 3;
+ } else {
+ if (IS_HELP(objv[2]) == TCL_OK)
+ return (TCL_OK);
+ Tcl_ResetResult(interp);
+ i = 2;
+ }
+
+ /*
+ * Our next arg MUST be the locker ID.
+ */
+ result = Tcl_GetIntFromObj(interp, objv[i++], &itmp);
+ if (result != TCL_OK)
+ return (result);
+ lockid = itmp;
+
+ /*
+ * All other remaining args are operation tuples.
+ * Go through sequentially to decode, execute and build
+ * up list of return values.
+ */
+ res = Tcl_NewListObj(0, NULL);
+ while (i < objc) {
+ /*
+ * Get the list of the tuple.
+ */
+ lock = NULL;
+ result = Tcl_ListObjGetElements(interp, objv[i],
+ &myobjc, &myobjv);
+ if (result == TCL_OK)
+ i++;
+ else
+ break;
+ /*
+ * First we will set up the list of requests.
+ * We will make a "second pass" after we get back
+ * the results from the lock_vec call to create
+ * the return list.
+ */
+ if (Tcl_GetIndexFromObj(interp, myobjv[0],
+ lkops, "option", TCL_EXACT, &optindex) != TCL_OK) {
+ result = IS_HELP(myobjv[0]);
+ goto error;
+ }
+ switch ((enum lkops)optindex) {
+ case LKGET:
+ if (myobjc != 3) {
+ Tcl_WrongNumArgs(interp, 1, myobjv,
+ "{get obj mode}");
+ result = TCL_ERROR;
+ goto error;
+ }
+ result = _LockMode(interp, myobjv[2], &list.mode);
+ if (result != TCL_OK)
+ goto error;
+ /*
+ * XXX
+ * Tcl 8.1 Tcl_GetByteArrayFromObj/Tcl_GetIntFromObj
+ * bug.
+ *
+ * There is a bug in Tcl 8.1 and byte arrays in that if
+ * it happens to use an object as both a byte array and
+ * something else like an int, and you've done a
+ * Tcl_GetByteArrayFromObj, then you do a
+ * Tcl_GetIntFromObj, your memory is deleted.
+ *
+ * Workaround is to make sure all
+ * Tcl_GetByteArrayFromObj calls are done last.
+ */
+ obj.data = Tcl_GetByteArrayFromObj(myobjv[1], &itmp);
+ obj.size = itmp;
+ ret = _GetThisLock(interp, envp, lockid, flag,
+ &obj, list.mode, newname);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret, "lock vec");
+ thisop = Tcl_NewIntObj(ret);
+ (void)Tcl_ListObjAppendElement(interp, res,
+ thisop);
+ goto error;
+ }
+ thisop = Tcl_NewStringObj(newname, strlen(newname));
+ (void)Tcl_ListObjAppendElement(interp, res, thisop);
+ continue;
+ case LKPUT:
+ if (myobjc != 2) {
+ Tcl_WrongNumArgs(interp, 1, myobjv,
+ "{put lock}");
+ result = TCL_ERROR;
+ goto error;
+ }
+ list.op = DB_LOCK_PUT;
+ lockname = Tcl_GetStringFromObj(myobjv[1], NULL);
+ lock = NAME_TO_LOCK(lockname);
+ if (lock == NULL) {
+ snprintf(msg, MSG_SIZE, "Invalid lock: %s\n",
+ lockname);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ goto error;
+ }
+ list.lock = *lock;
+ break;
+ case LKPUTALL:
+ if (myobjc != 1) {
+ Tcl_WrongNumArgs(interp, 1, myobjv,
+ "{put_all}");
+ result = TCL_ERROR;
+ goto error;
+ }
+ list.op = DB_LOCK_PUT_ALL;
+ break;
+ case LKPUTOBJ:
+ if (myobjc != 2) {
+ Tcl_WrongNumArgs(interp, 1, myobjv,
+ "{put_obj obj}");
+ result = TCL_ERROR;
+ goto error;
+ }
+ list.op = DB_LOCK_PUT_OBJ;
+ obj.data = Tcl_GetByteArrayFromObj(myobjv[1], &itmp);
+ obj.size = itmp;
+ list.obj = &obj;
+ break;
+ }
+ /*
+ * We get here, we have set up our request, now call
+ * lock_vec.
+ */
+ _debug_check();
+ ret = lock_vec(envp, lockid, flag, &list, 1, NULL);
+ /*
+ * Now deal with whether or not the operation succeeded.
+ * Get's were done above, all these are only puts.
+ */
+ thisop = Tcl_NewIntObj(ret);
+ result = Tcl_ListObjAppendElement(interp, res, thisop);
+ if (ret != 0 && result == TCL_OK)
+ result = _ReturnSetup(interp, ret, "lock put");
+ /*
+ * We did a put of some kind. Since we did that,
+ * we have to delete the commands associated with
+ * any of the locks we just put.
+ */
+ _LockPutInfo(interp, list.op, lock, lockid, &obj);
+ }
+
+ if (result == TCL_OK && res)
+ Tcl_SetObjResult(interp, res);
+error:
+ return (result);
+}
+
+static int
+_LockMode(interp, obj, mode)
+ Tcl_Interp *interp;
+ Tcl_Obj *obj;
+ db_lockmode_t *mode;
+{
+ int optindex;
+
+ if (Tcl_GetIndexFromObj(interp, obj, lkmode, "option",
+ TCL_EXACT, &optindex) != TCL_OK)
+ return (IS_HELP(obj));
+ switch ((enum lkmode)optindex) {
+ case LK_NG:
+ *mode = DB_LOCK_NG;
+ break;
+ case LK_READ:
+ *mode = DB_LOCK_READ;
+ break;
+ case LK_WRITE:
+ *mode = DB_LOCK_WRITE;
+ break;
+ case LK_IREAD:
+ *mode = DB_LOCK_IREAD;
+ break;
+ case LK_IWRITE:
+ *mode = DB_LOCK_IWRITE;
+ break;
+ case LK_IWR:
+ *mode = DB_LOCK_IWR;
+ break;
+ }
+ return (TCL_OK);
+}
+
+static void
+_LockPutInfo(interp, op, lock, lockid, objp)
+ Tcl_Interp *interp;
+ db_lockop_t op;
+ DB_LOCK *lock;
+ u_int32_t lockid;
+ DBT *objp;
+{
+ DBTCL_INFO *p, *nextp;
+ int found;
+
+ for (p = LIST_FIRST(&__db_infohead); p != NULL; p = nextp) {
+ found = 0;
+ nextp = LIST_NEXT(p, entries);
+ if ((op == DB_LOCK_PUT && (p->i_lock == lock)) ||
+ (op == DB_LOCK_PUT_ALL && p->i_locker == lockid) ||
+ (op == DB_LOCK_PUT_OBJ && p->i_lockobj.data &&
+ memcmp(p->i_lockobj.data, objp->data, objp->size) == 0))
+ found = 1;
+ if (found) {
+ (void)Tcl_DeleteCommand(interp, p->i_name);
+ __os_free(p->i_lock, sizeof(DB_LOCK));
+ _DeleteInfo(p);
+ }
+ }
+}
+
+static int
+_GetThisLock(interp, envp, lockid, flag, objp, mode, newname)
+ Tcl_Interp *interp; /* Interpreter */
+ DB_ENV *envp; /* Env handle */
+ u_int32_t lockid; /* Locker ID */
+ u_int32_t flag; /* Lock flag */
+ DBT *objp; /* Object to lock */
+ db_lockmode_t mode; /* Lock mode */
+ char *newname; /* New command name */
+{
+ DB_LOCK *lock;
+ DBTCL_INFO *envip, *ip;
+ int result, ret;
+
+ result = TCL_OK;
+ envip = _PtrToInfo((void *)envp);
+ if (envip == NULL) {
+ Tcl_SetResult(interp, "Could not find env info\n", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ snprintf(newname, MSG_SIZE, "%s.lock%d",
+ envip->i_name, envip->i_envlockid);
+ ip = _NewInfo(interp, NULL, newname, I_LOCK);
+ if (ip == NULL) {
+ Tcl_SetResult(interp, "Could not set up info",
+ TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ ret = __os_malloc(envp, sizeof(DB_LOCK), NULL, &lock);
+ if (ret != 0) {
+ Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = lock_get(envp, lockid, flag, objp, mode, lock);
+ result = _ReturnSetup(interp, ret, "lock get");
+ if (result == TCL_ERROR) {
+ __os_free(lock, sizeof(DB_LOCK));
+ _DeleteInfo(ip);
+ return (result);
+ }
+ /*
+ * Success. Set up return. Set up new info
+ * and command widget for this lock.
+ */
+ ret = __os_malloc(envp, objp->size, NULL, &ip->i_lockobj.data);
+ if (ret != 0) {
+ Tcl_SetResult(interp, "Could not duplicate obj",
+ TCL_STATIC);
+ (void)lock_put(envp, lock);
+ __os_free(lock, sizeof(DB_LOCK));
+ _DeleteInfo(ip);
+ result = TCL_ERROR;
+ goto error;
+ }
+ memcpy(ip->i_lockobj.data, objp->data, objp->size);
+ ip->i_lockobj.size = objp->size;
+ envip->i_envlockid++;
+ ip->i_parent = envip;
+ ip->i_locker = lockid;
+ _SetInfoData(ip, lock);
+ Tcl_CreateObjCommand(interp, newname,
+ (Tcl_ObjCmdProc *)lock_Cmd, (ClientData)lock, NULL);
+error:
+ return (result);
+}
diff --git a/bdb/tcl/tcl_log.c b/bdb/tcl/tcl_log.c
new file mode 100644
index 00000000000..20f8e8c0277
--- /dev/null
+++ b/bdb/tcl/tcl_log.c
@@ -0,0 +1,581 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: tcl_log.c,v 11.21 2000/11/30 00:58:45 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <tcl.h>
+#endif
+
+#include "db_int.h"
+#include "tcl_db.h"
+
+/*
+ * tcl_LogArchive --
+ *
+ * PUBLIC: int tcl_LogArchive __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_LogArchive(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ static char *archopts[] = {
+ "-arch_abs", "-arch_data", "-arch_log",
+ NULL
+ };
+ enum archopts {
+ ARCH_ABS, ARCH_DATA, ARCH_LOG
+ };
+ Tcl_Obj *fileobj, *res;
+ u_int32_t flag;
+ int i, optindex, result, ret;
+ char **file, **list;
+
+ result = TCL_OK;
+ flag = 0;
+ /*
+ * Get the flag index from the object based on the options
+ * defined above.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i],
+ archopts, "option", TCL_EXACT, &optindex) != TCL_OK)
+ return (IS_HELP(objv[i]));
+ i++;
+ switch ((enum archopts)optindex) {
+ case ARCH_ABS:
+ flag |= DB_ARCH_ABS;
+ break;
+ case ARCH_DATA:
+ flag |= DB_ARCH_DATA;
+ break;
+ case ARCH_LOG:
+ flag |= DB_ARCH_LOG;
+ break;
+ }
+ }
+ _debug_check();
+ list = NULL;
+ ret = log_archive(envp, &list, flag, NULL);
+ result = _ReturnSetup(interp, ret, "log archive");
+ if (result == TCL_OK) {
+ res = Tcl_NewListObj(0, NULL);
+ for (file = list; file != NULL && *file != NULL; file++) {
+ fileobj = Tcl_NewStringObj(*file, strlen(*file));
+ result = Tcl_ListObjAppendElement(interp, res, fileobj);
+ if (result != TCL_OK)
+ break;
+ }
+ Tcl_SetObjResult(interp, res);
+ }
+ if (list != NULL)
+ __os_free(list, 0);
+ return (result);
+}
+
+/*
+ * tcl_LogCompare --
+ *
+ * PUBLIC: int tcl_LogCompare __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*));
+ */
+int
+tcl_LogCompare(interp, objc, objv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ DB_LSN lsn0, lsn1;
+ Tcl_Obj *res;
+ int result, ret;
+
+ result = TCL_OK;
+ /*
+ * No flags, must be 4 args.
+ */
+ if (objc != 4) {
+ Tcl_WrongNumArgs(interp, 2, objv, "lsn1 lsn2");
+ return (TCL_ERROR);
+ }
+
+ result = _GetLsn(interp, objv[2], &lsn0);
+ if (result == TCL_ERROR)
+ return (result);
+ result = _GetLsn(interp, objv[3], &lsn1);
+ if (result == TCL_ERROR)
+ return (result);
+
+ _debug_check();
+ ret = log_compare(&lsn0, &lsn1);
+ res = Tcl_NewIntObj(ret);
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
+
+/*
+ * tcl_LogFile --
+ *
+ * PUBLIC: int tcl_LogFile __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_LogFile(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ DB_LSN lsn;
+ Tcl_Obj *res;
+ size_t len;
+ int result, ret;
+ char *name;
+
+ result = TCL_OK;
+ /*
+ * No flags, must be 3 args.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "lsn");
+ return (TCL_ERROR);
+ }
+
+ result = _GetLsn(interp, objv[2], &lsn);
+ if (result == TCL_ERROR)
+ return (result);
+
+ len = MSG_SIZE;
+ ret = ENOMEM;
+ name = NULL;
+ while (ret == ENOMEM) {
+ if (name != NULL)
+ __os_free(name, len/2);
+ ret = __os_malloc(envp, len, NULL, &name);
+ if (ret != 0) {
+ Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC);
+ break;
+ }
+ _debug_check();
+ ret = log_file(envp, &lsn, name, len);
+ len *= 2;
+ }
+ result = _ReturnSetup(interp, ret, "log_file");
+ if (ret == 0) {
+ res = Tcl_NewStringObj(name, strlen(name));
+ Tcl_SetObjResult(interp, res);
+ }
+
+ if (name != NULL)
+ __os_free(name, len/2);
+
+ return (result);
+}
+
+/*
+ * tcl_LogFlush --
+ *
+ * PUBLIC: int tcl_LogFlush __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_LogFlush(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ DB_LSN lsn, *lsnp;
+ int result, ret;
+
+ result = TCL_OK;
+ /*
+ * No flags, must be 2 or 3 args.
+ */
+ if (objc > 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?lsn?");
+ return (TCL_ERROR);
+ }
+
+ if (objc == 3) {
+ lsnp = &lsn;
+ result = _GetLsn(interp, objv[2], &lsn);
+ if (result == TCL_ERROR)
+ return (result);
+ } else
+ lsnp = NULL;
+
+ _debug_check();
+ ret = log_flush(envp, lsnp);
+ result = _ReturnSetup(interp, ret, "log_flush");
+ return (result);
+}
+
+/*
+ * tcl_LogGet --
+ *
+ * PUBLIC: int tcl_LogGet __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_LogGet(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ static char *loggetopts[] = {
+ "-checkpoint", "-current", "-first",
+ "-last", "-next", "-prev",
+ "-set",
+ NULL
+ };
+ enum loggetopts {
+ LOGGET_CKP, LOGGET_CUR, LOGGET_FIRST,
+ LOGGET_LAST, LOGGET_NEXT, LOGGET_PREV,
+ LOGGET_SET
+ };
+ DB_LSN lsn;
+ DBT data;
+ Tcl_Obj *dataobj, *lsnlist, *myobjv[2], *res;
+ u_int32_t flag;
+ int i, myobjc, optindex, result, ret;
+
+ result = TCL_OK;
+ flag = 0;
+ if (objc < 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-args? lsn");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Get the command name index from the object based on the options
+ * defined above.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i],
+ loggetopts, "option", TCL_EXACT, &optindex) != TCL_OK)
+ return (IS_HELP(objv[i]));
+ i++;
+ switch ((enum loggetopts)optindex) {
+ case LOGGET_CKP:
+ FLAG_CHECK(flag);
+ flag |= DB_CHECKPOINT;
+ break;
+ case LOGGET_CUR:
+ FLAG_CHECK(flag);
+ flag |= DB_CURRENT;
+ break;
+ case LOGGET_FIRST:
+ FLAG_CHECK(flag);
+ flag |= DB_FIRST;
+ break;
+ case LOGGET_LAST:
+ FLAG_CHECK(flag);
+ flag |= DB_LAST;
+ break;
+ case LOGGET_NEXT:
+ FLAG_CHECK(flag);
+ flag |= DB_NEXT;
+ break;
+ case LOGGET_PREV:
+ FLAG_CHECK(flag);
+ flag |= DB_PREV;
+ break;
+ case LOGGET_SET:
+ FLAG_CHECK(flag);
+ flag |= DB_SET;
+ if (i == objc) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-set lsn?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = _GetLsn(interp, objv[i++], &lsn);
+ break;
+ }
+ }
+
+ if (result == TCL_ERROR)
+ return (result);
+
+ memset(&data, 0, sizeof(data));
+ data.flags |= DB_DBT_MALLOC;
+ _debug_check();
+ ret = log_get(envp, &lsn, &data, flag);
+ res = Tcl_NewListObj(0, NULL);
+ result = _ReturnSetup(interp, ret, "log_get");
+ if (ret == 0) {
+ /*
+ * Success. Set up return list as {LSN data} where LSN
+ * is a sublist {file offset}.
+ */
+ myobjc = 2;
+ myobjv[0] = Tcl_NewIntObj(lsn.file);
+ myobjv[1] = Tcl_NewIntObj(lsn.offset);
+ lsnlist = Tcl_NewListObj(myobjc, myobjv);
+ if (lsnlist == NULL) {
+ if (data.data != NULL)
+ __os_free(data.data, data.size);
+ return (TCL_ERROR);
+ }
+ result = Tcl_ListObjAppendElement(interp, res, lsnlist);
+ dataobj = Tcl_NewStringObj(data.data, data.size);
+ result = Tcl_ListObjAppendElement(interp, res, dataobj);
+ }
+ if (data.data != NULL)
+ __os_free(data.data, data.size);
+
+ if (result == TCL_OK)
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
+
+/*
+ * tcl_LogPut --
+ *
+ * PUBLIC: int tcl_LogPut __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_LogPut(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ static char *logputopts[] = {
+ "-checkpoint", "-curlsn", "-flush",
+ NULL
+ };
+ enum logputopts {
+ LOGPUT_CKP, LOGPUT_CUR, LOGPUT_FLUSH
+ };
+ DB_LSN lsn;
+ DBT data;
+ Tcl_Obj *intobj, *res;
+ u_int32_t flag;
+ int itmp, optindex, result, ret;
+
+ result = TCL_OK;
+ flag = 0;
+ if (objc < 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-args? record");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Data/record must be the last arg.
+ */
+ memset(&data, 0, sizeof(data));
+ data.data = Tcl_GetByteArrayFromObj(objv[objc-1], &itmp);
+ data.size = itmp;
+
+ /*
+ * Get the command name index from the object based on the options
+ * defined above.
+ */
+ if (objc == 4) {
+ if (Tcl_GetIndexFromObj(interp, objv[2],
+ logputopts, "option", TCL_EXACT, &optindex) != TCL_OK) {
+ return (IS_HELP(objv[2]));
+ }
+ switch ((enum logputopts)optindex) {
+ case LOGPUT_CKP:
+ flag = DB_CHECKPOINT;
+ break;
+ case LOGPUT_CUR:
+ flag = DB_CURLSN;
+ break;
+ case LOGPUT_FLUSH:
+ flag = DB_FLUSH;
+ break;
+ }
+ }
+
+ if (result == TCL_ERROR)
+ return (result);
+
+ _debug_check();
+ ret = log_put(envp, &lsn, &data, flag);
+ result = _ReturnSetup(interp, ret, "log_put");
+ if (result == TCL_ERROR)
+ return (result);
+ res = Tcl_NewListObj(0, NULL);
+ intobj = Tcl_NewIntObj(lsn.file);
+ result = Tcl_ListObjAppendElement(interp, res, intobj);
+ intobj = Tcl_NewIntObj(lsn.offset);
+ result = Tcl_ListObjAppendElement(interp, res, intobj);
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
+
+/*
+ * tcl_LogRegister --
+ *
+ * PUBLIC: int tcl_LogRegister __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_LogRegister(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ DB *dbp;
+ Tcl_Obj *res;
+ int result, ret;
+ char *arg, msg[MSG_SIZE];
+
+ result = TCL_OK;
+ if (objc != 4) {
+ Tcl_WrongNumArgs(interp, 2, objv, "db filename");
+ return (TCL_ERROR);
+ }
+ /*
+ * First comes the DB.
+ */
+ arg = Tcl_GetStringFromObj(objv[2], NULL);
+ dbp = NAME_TO_DB(arg);
+ if (dbp == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "LogRegister: Invalid db: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Next is the filename.
+ */
+ arg = Tcl_GetStringFromObj(objv[3], NULL);
+
+ _debug_check();
+ ret = log_register(envp, dbp, arg);
+ result = _ReturnSetup(interp, ret, "log_register");
+ if (result == TCL_OK) {
+ res = Tcl_NewIntObj((int)dbp->log_fileid);
+ Tcl_SetObjResult(interp, res);
+ }
+ return (result);
+}
+
+/*
+ * tcl_LogStat --
+ *
+ * PUBLIC: int tcl_LogStat __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_LogStat(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ DB_LOG_STAT *sp;
+ Tcl_Obj *res;
+ int result, ret;
+
+ result = TCL_OK;
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = log_stat(envp, &sp, NULL);
+ result = _ReturnSetup(interp, ret, "log stat");
+ if (result == TCL_ERROR)
+ return (result);
+
+ /*
+ * Have our stats, now construct the name value
+ * list pairs and free up the memory.
+ */
+ res = Tcl_NewObj();
+ /*
+ * MAKE_STAT_LIST assumes 'res' and 'error' label.
+ */
+ MAKE_STAT_LIST("Magic", sp->st_magic);
+ MAKE_STAT_LIST("Log file Version", sp->st_version);
+ MAKE_STAT_LIST("Region size", sp->st_regsize);
+ MAKE_STAT_LIST("Log file mode", sp->st_mode);
+ MAKE_STAT_LIST("Log record cache size", sp->st_lg_bsize);
+ MAKE_STAT_LIST("Maximum log file size", sp->st_lg_max);
+ MAKE_STAT_LIST("Mbytes written", sp->st_w_mbytes);
+ MAKE_STAT_LIST("Bytes written (over Mb)", sp->st_w_bytes);
+ MAKE_STAT_LIST("Mbytes written since checkpoint", sp->st_wc_mbytes);
+ MAKE_STAT_LIST("Bytes written (over Mb) since checkpoint",
+ sp->st_wc_bytes);
+ MAKE_STAT_LIST("Times log written", sp->st_wcount);
+ MAKE_STAT_LIST("Times log written because cache filled up",
+ sp->st_wcount_fill);
+ MAKE_STAT_LIST("Times log flushed", sp->st_scount);
+ MAKE_STAT_LIST("Current log file number", sp->st_cur_file);
+ MAKE_STAT_LIST("Current log file offset", sp->st_cur_offset);
+ MAKE_STAT_LIST("Number of region lock waits", sp->st_region_wait);
+ MAKE_STAT_LIST("Number of region lock nowaits", sp->st_region_nowait);
+ Tcl_SetObjResult(interp, res);
+error:
+ __os_free(sp, sizeof(*sp));
+ return (result);
+}
+
+/*
+ * tcl_LogUnregister --
+ *
+ * PUBLIC: int tcl_LogUnregister __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_LogUnregister(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ DB *dbp;
+ char *arg, msg[MSG_SIZE];
+ int result, ret;
+
+ result = TCL_OK;
+ /*
+ * 1 arg for this. Error if more or less.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ arg = Tcl_GetStringFromObj(objv[2], NULL);
+ dbp = NAME_TO_DB(arg);
+ if (dbp == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "log_unregister: Invalid db identifier: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = log_unregister(envp, dbp);
+ result = _ReturnSetup(interp, ret, "log_unregister");
+
+ return (result);
+}
diff --git a/bdb/tcl/tcl_mp.c b/bdb/tcl/tcl_mp.c
new file mode 100644
index 00000000000..b424deea242
--- /dev/null
+++ b/bdb/tcl/tcl_mp.c
@@ -0,0 +1,822 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: tcl_mp.c,v 11.24 2001/01/09 16:13:59 sue Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <tcl.h>
+#endif
+
+#include "db_int.h"
+#include "tcl_db.h"
+
+/*
+ * Prototypes for procedures defined later in this file:
+ */
+static int mp_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+static int pg_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+static int tcl_MpGet __P((Tcl_Interp *, int, Tcl_Obj * CONST*,
+ DB_MPOOLFILE *, DBTCL_INFO *));
+static int tcl_Pg __P((Tcl_Interp *, int, Tcl_Obj * CONST*,
+ void *, DB_MPOOLFILE *, DBTCL_INFO *, int));
+static int tcl_PgInit __P((Tcl_Interp *, int, Tcl_Obj * CONST*,
+ void *, DBTCL_INFO *));
+static int tcl_PgIsset __P((Tcl_Interp *, int, Tcl_Obj * CONST*,
+ void *, DBTCL_INFO *));
+
+/*
+ * _MpInfoDelete --
+ * Removes "sub" mp page info structures that are children
+ * of this mp.
+ *
+ * PUBLIC: void _MpInfoDelete __P((Tcl_Interp *, DBTCL_INFO *));
+ */
+void
+_MpInfoDelete(interp, mpip)
+ Tcl_Interp *interp; /* Interpreter */
+ DBTCL_INFO *mpip; /* Info for mp */
+{
+ DBTCL_INFO *nextp, *p;
+
+ for (p = LIST_FIRST(&__db_infohead); p != NULL; p = nextp) {
+ /*
+ * Check if this info structure "belongs" to this
+ * mp. Remove its commands and info structure.
+ */
+ nextp = LIST_NEXT(p, entries);
+ if (p->i_parent == mpip && p->i_type == I_PG) {
+ (void)Tcl_DeleteCommand(interp, p->i_name);
+ _DeleteInfo(p);
+ }
+ }
+}
+
+/*
+ * tcl_MpSync --
+ *
+ * PUBLIC: int tcl_MpSync __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_MpSync(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+
+ DB_LSN lsn;
+ int result, ret;
+
+ result = TCL_OK;
+ /*
+ * No flags, must be 3 args.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "lsn");
+ return (TCL_ERROR);
+ }
+
+ result = _GetLsn(interp, objv[2], &lsn);
+ if (result == TCL_ERROR)
+ return (result);
+
+ _debug_check();
+ ret = memp_sync(envp, &lsn);
+ result = _ReturnSetup(interp, ret, "memp sync");
+ return (result);
+}
+
+/*
+ * tcl_MpTrickle --
+ *
+ * PUBLIC: int tcl_MpTrickle __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_MpTrickle(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+
+ int pages;
+ int percent;
+ int result;
+ int ret;
+ Tcl_Obj *res;
+
+ result = TCL_OK;
+ /*
+ * No flags, must be 3 args.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "percent");
+ return (TCL_ERROR);
+ }
+
+ result = Tcl_GetIntFromObj(interp, objv[2], &percent);
+ if (result == TCL_ERROR)
+ return (result);
+
+ _debug_check();
+ ret = memp_trickle(envp, percent, &pages);
+ result = _ReturnSetup(interp, ret, "memp trickle");
+ if (result == TCL_ERROR)
+ return (result);
+
+ res = Tcl_NewIntObj(pages);
+ Tcl_SetObjResult(interp, res);
+ return (result);
+
+}
+
+/*
+ * tcl_Mp --
+ *
+ * PUBLIC: int tcl_Mp __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *, DBTCL_INFO *));
+ */
+int
+tcl_Mp(interp, objc, objv, envp, envip)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+ DBTCL_INFO *envip; /* Info pointer */
+{
+ static char *mpopts[] = {
+ "-create",
+ "-mode",
+ "-nommap",
+ "-pagesize",
+ "-rdonly",
+ NULL
+ };
+ enum mpopts {
+ MPCREATE,
+ MPMODE,
+ MPNOMMAP,
+ MPPAGE,
+ MPRDONLY
+ };
+ DBTCL_INFO *ip;
+ DB_MPOOLFILE *mpf;
+ Tcl_Obj *res;
+ u_int32_t flag;
+ int i, pgsize, mode, optindex, result, ret;
+ char *file, newname[MSG_SIZE];
+
+ result = TCL_OK;
+ i = 2;
+ flag = 0;
+ mode = 0;
+ pgsize = 0;
+ memset(newname, 0, MSG_SIZE);
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i],
+ mpopts, "option", TCL_EXACT, &optindex) != TCL_OK) {
+ /*
+ * Reset the result so we don't get an errant
+ * error message if there is another error.
+ * This arg is the file name.
+ */
+ if (IS_HELP(objv[i]) == TCL_OK)
+ return (TCL_OK);
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum mpopts)optindex) {
+ case MPCREATE:
+ flag |= DB_CREATE;
+ break;
+ case MPNOMMAP:
+ flag |= DB_NOMMAP;
+ break;
+ case MPPAGE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-pagesize size?");
+ result = TCL_ERROR;
+ break;
+ }
+ /*
+ * Don't need to check result here because
+ * if TCL_ERROR, the error message is already
+ * set up, and we'll bail out below. If ok,
+ * the mode is set and we go on.
+ */
+ result = Tcl_GetIntFromObj(interp, objv[i++], &pgsize);
+ break;
+ case MPRDONLY:
+ flag |= DB_RDONLY;
+ break;
+ case MPMODE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-mode mode?");
+ result = TCL_ERROR;
+ break;
+ }
+ /*
+ * Don't need to check result here because
+ * if TCL_ERROR, the error message is already
+ * set up, and we'll bail out below. If ok,
+ * the mode is set and we go on.
+ */
+ result = Tcl_GetIntFromObj(interp, objv[i++], &mode);
+ break;
+ }
+ if (result != TCL_OK)
+ goto error;
+ }
+ /*
+ * Any left over arg is a file name. It better be the last arg.
+ */
+ file = NULL;
+ if (i != objc) {
+ if (i != objc - 1) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args? ?file?");
+ result = TCL_ERROR;
+ goto error;
+ }
+ file = Tcl_GetStringFromObj(objv[i++], NULL);
+ }
+
+ snprintf(newname, sizeof(newname), "%s.mp%d",
+ envip->i_name, envip->i_envmpid);
+ ip = _NewInfo(interp, NULL, newname, I_MP);
+ if (ip == NULL) {
+ Tcl_SetResult(interp, "Could not set up info",
+ TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ /*
+ * XXX finfop is NULL here. Interface currently doesn't
+ * have all the stuff. Should expand interface.
+ */
+ _debug_check();
+ ret = memp_fopen(envp, file, flag, mode, (size_t)pgsize, NULL, &mpf);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret, "mpool");
+ _DeleteInfo(ip);
+ } else {
+ /*
+ * Success. Set up return. Set up new info
+ * and command widget for this mpool.
+ */
+ envip->i_envmpid++;
+ ip->i_parent = envip;
+ ip->i_pgsz = pgsize;
+ _SetInfoData(ip, mpf);
+ Tcl_CreateObjCommand(interp, newname,
+ (Tcl_ObjCmdProc *)mp_Cmd, (ClientData)mpf, NULL);
+ res = Tcl_NewStringObj(newname, strlen(newname));
+ Tcl_SetObjResult(interp, res);
+ }
+error:
+ return (result);
+}
+
+/*
+ * tcl_MpStat --
+ *
+ * PUBLIC: int tcl_MpStat __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_MpStat(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ DB_MPOOL_STAT *sp;
+ DB_MPOOL_FSTAT **fsp, **savefsp;
+ int result;
+ int ret;
+ Tcl_Obj *res;
+ Tcl_Obj *res1;
+
+ result = TCL_OK;
+ savefsp = NULL;
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = memp_stat(envp, &sp, &fsp, NULL);
+ result = _ReturnSetup(interp, ret, "memp stat");
+ if (result == TCL_ERROR)
+ return (result);
+
+ /*
+ * Have our stats, now construct the name value
+ * list pairs and free up the memory.
+ */
+ res = Tcl_NewObj();
+ /*
+ * MAKE_STAT_LIST assumes 'res' and 'error' label.
+ */
+ MAKE_STAT_LIST("Region size", sp->st_regsize);
+ MAKE_STAT_LIST("Cache size (gbytes)", sp->st_gbytes);
+ MAKE_STAT_LIST("Cache size (bytes)", sp->st_bytes);
+ MAKE_STAT_LIST("Cache hits", sp->st_cache_hit);
+ MAKE_STAT_LIST("Cache misses", sp->st_cache_miss);
+ MAKE_STAT_LIST("Number of caches", sp->st_ncache);
+ MAKE_STAT_LIST("Pages mapped into address space", sp->st_map);
+ MAKE_STAT_LIST("Pages created", sp->st_page_create);
+ MAKE_STAT_LIST("Pages read in", sp->st_page_in);
+ MAKE_STAT_LIST("Pages written", sp->st_page_out);
+ MAKE_STAT_LIST("Clean page evictions", sp->st_ro_evict);
+ MAKE_STAT_LIST("Dirty page evictions", sp->st_rw_evict);
+ MAKE_STAT_LIST("Hash buckets", sp->st_hash_buckets);
+ MAKE_STAT_LIST("Hash lookups", sp->st_hash_searches);
+ MAKE_STAT_LIST("Longest hash chain found", sp->st_hash_longest);
+ MAKE_STAT_LIST("Hash elements examined", sp->st_hash_examined);
+ MAKE_STAT_LIST("Cached clean pages", sp->st_page_clean);
+ MAKE_STAT_LIST("Cached dirty pages", sp->st_page_dirty);
+ MAKE_STAT_LIST("Dirty pages trickled", sp->st_page_trickle);
+ MAKE_STAT_LIST("Number of region lock waits", sp->st_region_wait);
+ MAKE_STAT_LIST("Number of region lock nowaits", sp->st_region_nowait);
+ /*
+ * Save global stat list as res1. The MAKE_STAT_LIST
+ * macro assumes 'res' so we'll use that to build up
+ * our per-file sublist.
+ */
+ res1 = res;
+ savefsp = fsp;
+ for (; fsp != NULL && *fsp != NULL; fsp++) {
+ res = Tcl_NewObj();
+ result = _SetListElem(interp, res, "File Name",
+ strlen("File Name"), (*fsp)->file_name,
+ strlen((*fsp)->file_name));
+ if (result != TCL_OK)
+ goto error;
+ MAKE_STAT_LIST("Page size", (*fsp)->st_pagesize);
+ MAKE_STAT_LIST("Cache Hits", (*fsp)->st_cache_hit);
+ MAKE_STAT_LIST("Cache Misses", (*fsp)->st_cache_miss);
+ MAKE_STAT_LIST("Pages mapped into address space",
+ (*fsp)->st_map);
+ MAKE_STAT_LIST("Pages created", (*fsp)->st_page_create);
+ MAKE_STAT_LIST("Pages read in", (*fsp)->st_page_in);
+ MAKE_STAT_LIST("Pages written", (*fsp)->st_page_out);
+ /*
+ * Now that we have a complete "per-file" stat
+ * list, append that to the other list.
+ */
+ result = Tcl_ListObjAppendElement(interp, res1, res);
+ if (result != TCL_OK)
+ goto error;
+ }
+ Tcl_SetObjResult(interp, res1);
+error:
+ __os_free(sp, sizeof(*sp));
+ if (savefsp != NULL)
+ __os_free(savefsp, 0);
+ return (result);
+}
+
+/*
+ * mp_Cmd --
+ * Implements the "mp" widget.
+ */
+static int
+mp_Cmd(clientData, interp, objc, objv)
+ ClientData clientData; /* Mp handle */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *mpcmds[] = {
+ "close", "fsync", "get",
+ NULL
+ };
+ enum mpcmds {
+ MPCLOSE, MPFSYNC, MPGET
+ };
+ DB_MPOOLFILE *mp;
+ int cmdindex, length, result, ret;
+ DBTCL_INFO *mpip;
+ Tcl_Obj *res;
+ char *obj_name;
+
+ Tcl_ResetResult(interp);
+ mp = (DB_MPOOLFILE *)clientData;
+ obj_name = Tcl_GetStringFromObj(objv[0], &length);
+ mpip = _NameToInfo(obj_name);
+ result = TCL_OK;
+
+ if (mp == NULL) {
+ Tcl_SetResult(interp, "NULL mp pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (mpip == NULL) {
+ Tcl_SetResult(interp, "NULL mp info pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Get the command name index from the object based on the dbcmds
+ * defined above.
+ */
+ if (Tcl_GetIndexFromObj(interp,
+ objv[1], mpcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+
+ res = NULL;
+ switch ((enum mpcmds)cmdindex) {
+ case MPCLOSE:
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 1, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = memp_fclose(mp);
+ result = _ReturnSetup(interp, ret, "mp close");
+ _MpInfoDelete(interp, mpip);
+ (void)Tcl_DeleteCommand(interp, mpip->i_name);
+ _DeleteInfo(mpip);
+ break;
+ case MPFSYNC:
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 1, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = memp_fsync(mp);
+ res = Tcl_NewIntObj(ret);
+ break;
+ case MPGET:
+ result = tcl_MpGet(interp, objc, objv, mp, mpip);
+ break;
+ }
+ /*
+ * Only set result if we have a res. Otherwise, lower
+ * functions have already done so.
+ */
+ if (result == TCL_OK && res)
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
+
+/*
+ * tcl_MpGet --
+ */
+static int
+tcl_MpGet(interp, objc, objv, mp, mpip)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_MPOOLFILE *mp; /* mp pointer */
+ DBTCL_INFO *mpip; /* mp info pointer */
+{
+ static char *mpget[] = {
+ "-create", "-last", "-new",
+ NULL
+ };
+ enum mpget {
+ MPGET_CREATE, MPGET_LAST, MPGET_NEW
+ };
+
+ DBTCL_INFO *ip;
+ Tcl_Obj *res;
+ db_pgno_t pgno;
+ u_int32_t flag;
+ int i, ipgno, optindex, result, ret;
+ char newname[MSG_SIZE];
+ void *page;
+
+ result = TCL_OK;
+ memset(newname, 0, MSG_SIZE);
+ i = 2;
+ flag = 0;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i],
+ mpget, "option", TCL_EXACT, &optindex) != TCL_OK) {
+ /*
+ * Reset the result so we don't get an errant
+ * error message if there is another error.
+ * This arg is the page number.
+ */
+ if (IS_HELP(objv[i]) == TCL_OK)
+ return (TCL_OK);
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum mpget)optindex) {
+ case MPGET_CREATE:
+ flag |= DB_MPOOL_CREATE;
+ break;
+ case MPGET_LAST:
+ flag |= DB_MPOOL_LAST;
+ break;
+ case MPGET_NEW:
+ flag |= DB_MPOOL_NEW;
+ break;
+ }
+ if (result != TCL_OK)
+ goto error;
+ }
+ /*
+ * Any left over arg is a page number. It better be the last arg.
+ */
+ ipgno = 0;
+ if (i != objc) {
+ if (i != objc - 1) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args? ?pgno?");
+ result = TCL_ERROR;
+ goto error;
+ }
+ result = Tcl_GetIntFromObj(interp, objv[i++], &ipgno);
+ if (result != TCL_OK)
+ goto error;
+ }
+
+ snprintf(newname, sizeof(newname), "%s.pg%d",
+ mpip->i_name, mpip->i_mppgid);
+ ip = _NewInfo(interp, NULL, newname, I_PG);
+ if (ip == NULL) {
+ Tcl_SetResult(interp, "Could not set up info",
+ TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ pgno = ipgno;
+ ret = memp_fget(mp, &pgno, flag, &page);
+ result = _ReturnSetup(interp, ret, "mpool get");
+ if (result == TCL_ERROR)
+ _DeleteInfo(ip);
+ else {
+ /*
+ * Success. Set up return. Set up new info
+ * and command widget for this mpool.
+ */
+ mpip->i_mppgid++;
+ ip->i_parent = mpip;
+ ip->i_pgno = pgno;
+ ip->i_pgsz = mpip->i_pgsz;
+ _SetInfoData(ip, page);
+ Tcl_CreateObjCommand(interp, newname,
+ (Tcl_ObjCmdProc *)pg_Cmd, (ClientData)page, NULL);
+ res = Tcl_NewStringObj(newname, strlen(newname));
+ Tcl_SetObjResult(interp, res);
+ }
+error:
+ return (result);
+}
+
+/*
+ * pg_Cmd --
+ * Implements the "pg" widget.
+ */
+static int
+pg_Cmd(clientData, interp, objc, objv)
+ ClientData clientData; /* Page handle */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *pgcmds[] = {
+ "init",
+ "is_setto",
+ "pgnum",
+ "pgsize",
+ "put",
+ "set",
+ NULL
+ };
+ enum pgcmds {
+ PGINIT,
+ PGISSET,
+ PGNUM,
+ PGSIZE,
+ PGPUT,
+ PGSET
+ };
+ DB_MPOOLFILE *mp;
+ int cmdindex, length, result;
+ char *obj_name;
+ void *page;
+ DBTCL_INFO *pgip;
+ Tcl_Obj *res;
+
+ Tcl_ResetResult(interp);
+ page = (void *)clientData;
+ obj_name = Tcl_GetStringFromObj(objv[0], &length);
+ pgip = _NameToInfo(obj_name);
+ mp = NAME_TO_MP(pgip->i_parent->i_name);
+ result = TCL_OK;
+
+ if (page == NULL) {
+ Tcl_SetResult(interp, "NULL page pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (mp == NULL) {
+ Tcl_SetResult(interp, "NULL mp pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (pgip == NULL) {
+ Tcl_SetResult(interp, "NULL page info pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Get the command name index from the object based on the dbcmds
+ * defined above.
+ */
+ if (Tcl_GetIndexFromObj(interp,
+ objv[1], pgcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+
+ res = NULL;
+ switch ((enum pgcmds)cmdindex) {
+ case PGNUM:
+ res = Tcl_NewIntObj(pgip->i_pgno);
+ break;
+ case PGSIZE:
+ res = Tcl_NewLongObj(pgip->i_pgsz);
+ break;
+ case PGSET:
+ case PGPUT:
+ result = tcl_Pg(interp, objc, objv, page, mp, pgip,
+ cmdindex == PGSET ? 0 : 1);
+ break;
+ case PGINIT:
+ result = tcl_PgInit(interp, objc, objv, page, pgip);
+ break;
+ case PGISSET:
+ result = tcl_PgIsset(interp, objc, objv, page, pgip);
+ break;
+ }
+ /*
+ * Only set result if we have a res. Otherwise, lower
+ * functions have already done so.
+ */
+ if (result == TCL_OK && res)
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
+
+static int
+tcl_Pg(interp, objc, objv, page, mp, pgip, putop)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ void *page; /* Page pointer */
+ DB_MPOOLFILE *mp; /* Mpool pointer */
+ DBTCL_INFO *pgip; /* Info pointer */
+ int putop; /* Operation */
+{
+ static char *pgopt[] = {
+ "-clean", "-dirty", "-discard",
+ NULL
+ };
+ enum pgopt {
+ PGCLEAN, PGDIRTY, PGDISCARD
+ };
+ u_int32_t flag;
+ int i, optindex, result, ret;
+
+ result = TCL_OK;
+ i = 2;
+ flag = 0;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i],
+ pgopt, "option", TCL_EXACT, &optindex) != TCL_OK)
+ return (IS_HELP(objv[i]));
+ i++;
+ switch ((enum pgopt)optindex) {
+ case PGCLEAN:
+ flag |= DB_MPOOL_CLEAN;
+ break;
+ case PGDIRTY:
+ flag |= DB_MPOOL_DIRTY;
+ break;
+ case PGDISCARD:
+ flag |= DB_MPOOL_DISCARD;
+ break;
+ }
+ }
+
+ _debug_check();
+ if (putop)
+ ret = memp_fput(mp, page, flag);
+ else
+ ret = memp_fset(mp, page, flag);
+
+ result = _ReturnSetup(interp, ret, "page");
+
+ if (putop) {
+ (void)Tcl_DeleteCommand(interp, pgip->i_name);
+ _DeleteInfo(pgip);
+ }
+ return (result);
+}
+
+static int
+tcl_PgInit(interp, objc, objv, page, pgip)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ void *page; /* Page pointer */
+ DBTCL_INFO *pgip; /* Info pointer */
+{
+ Tcl_Obj *res;
+ size_t pgsz;
+ long *p, *endp, newval;
+ int length, result;
+ u_char *s;
+
+ result = TCL_OK;
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "val");
+ return (TCL_ERROR);
+ }
+
+ pgsz = pgip->i_pgsz;
+ result = Tcl_GetLongFromObj(interp, objv[2], &newval);
+ if (result != TCL_OK) {
+ s = Tcl_GetByteArrayFromObj(objv[2], &length);
+ if (s == NULL)
+ return (TCL_ERROR);
+ memcpy(page, s, ((size_t)length < pgsz) ? length : pgsz);
+ result = TCL_OK;
+ } else {
+ p = (long *)page;
+ for (endp = p + (pgsz / sizeof(long)); p < endp; p++)
+ *p = newval;
+ }
+ res = Tcl_NewIntObj(0);
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
+
+static int
+tcl_PgIsset(interp, objc, objv, page, pgip)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ void *page; /* Page pointer */
+ DBTCL_INFO *pgip; /* Info pointer */
+{
+ Tcl_Obj *res;
+ size_t pgsz;
+ long *p, *endp, newval;
+ int length, result;
+ u_char *s;
+
+ result = TCL_OK;
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "val");
+ return (TCL_ERROR);
+ }
+
+ pgsz = pgip->i_pgsz;
+ result = Tcl_GetLongFromObj(interp, objv[2], &newval);
+ if (result != TCL_OK) {
+ if ((s = Tcl_GetByteArrayFromObj(objv[2], &length)) == NULL)
+ return (TCL_ERROR);
+ result = TCL_OK;
+
+ if (memcmp(page,
+ s, ((size_t)length < pgsz) ? length : pgsz ) != 0) {
+ res = Tcl_NewIntObj(0);
+ Tcl_SetObjResult(interp, res);
+ return (result);
+ }
+ } else {
+ p = (long *)page;
+ /*
+ * If any value is not the same, return 0 (is not set to
+ * this value). Otherwise, if we finish the loop, we return 1
+ * (is set to this value).
+ */
+ for (endp = p + (pgsz/sizeof(long)); p < endp; p++)
+ if (*p != newval) {
+ res = Tcl_NewIntObj(0);
+ Tcl_SetObjResult(interp, res);
+ return (result);
+ }
+ }
+
+ res = Tcl_NewIntObj(1);
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
diff --git a/bdb/tcl/tcl_txn.c b/bdb/tcl/tcl_txn.c
new file mode 100644
index 00000000000..dfe6b6cf60f
--- /dev/null
+++ b/bdb/tcl/tcl_txn.c
@@ -0,0 +1,473 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: tcl_txn.c,v 11.24 2000/12/31 19:26:23 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <tcl.h>
+#endif
+
+#include "db_int.h"
+#include "tcl_db.h"
+
+/*
+ * Prototypes for procedures defined later in this file:
+ */
+static int tcl_TxnCommit __P((Tcl_Interp *, int, Tcl_Obj * CONST*,
+ DB_TXN *, DBTCL_INFO *));
+
+/*
+ * _TxnInfoDelete --
+ * Removes nested txn info structures that are children
+ * of this txn.
+ * RECURSIVE: Transactions can be arbitrarily nested, so we
+ * must recurse down until we get them all.
+ *
+ * PUBLIC: void _TxnInfoDelete __P((Tcl_Interp *, DBTCL_INFO *));
+ */
+void
+_TxnInfoDelete(interp, txnip)
+ Tcl_Interp *interp; /* Interpreter */
+ DBTCL_INFO *txnip; /* Info for txn */
+{
+ DBTCL_INFO *nextp, *p;
+
+ for (p = LIST_FIRST(&__db_infohead); p != NULL; p = nextp) {
+ /*
+ * Check if this info structure "belongs" to this
+ * txn. Remove its commands and info structure.
+ */
+ nextp = LIST_NEXT(p, entries);
+ if (p->i_parent == txnip && p->i_type == I_TXN) {
+ _TxnInfoDelete(interp, p);
+ (void)Tcl_DeleteCommand(interp, p->i_name);
+ _DeleteInfo(p);
+ }
+ }
+}
+
+/*
+ * tcl_TxnCheckpoint --
+ *
+ * PUBLIC: int tcl_TxnCheckpoint __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_TxnCheckpoint(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ static char *txnckpopts[] = {
+ "-kbyte", "-min",
+ NULL
+ };
+ enum txnckpopts {
+ TXNCKP_KB, TXNCKP_MIN
+ };
+ int i, kb, min, optindex, result, ret;
+
+ result = TCL_OK;
+ kb = min = 0;
+
+ /*
+ * Get the flag index from the object based on the options
+ * defined above.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i],
+ txnckpopts, "option", TCL_EXACT, &optindex) != TCL_OK) {
+ return (IS_HELP(objv[i]));
+ }
+ i++;
+ switch ((enum txnckpopts)optindex) {
+ case TXNCKP_KB:
+ if (i == objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-kbyte kb?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetIntFromObj(interp, objv[i++], &kb);
+ break;
+ case TXNCKP_MIN:
+ if (i == objc) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-min min?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetIntFromObj(interp, objv[i++], &min);
+ break;
+ }
+ }
+ _debug_check();
+ ret = txn_checkpoint(envp, (u_int32_t)kb, (u_int32_t)min, 0);
+ result = _ReturnSetup(interp, ret, "txn checkpoint");
+ return (result);
+}
+
+/*
+ * tcl_Txn --
+ *
+ * PUBLIC: int tcl_Txn __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *, DBTCL_INFO *));
+ */
+int
+tcl_Txn(interp, objc, objv, envp, envip)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+ DBTCL_INFO *envip; /* Info pointer */
+{
+ static char *txnopts[] = {
+ "-nosync",
+ "-nowait",
+ "-parent",
+ "-sync",
+ NULL
+ };
+ enum txnopts {
+ TXN_NOSYNC,
+ TXN_NOWAIT,
+ TXN_PARENT,
+ TXN_SYNC
+ };
+ DBTCL_INFO *ip;
+ DB_TXN *parent;
+ DB_TXN *txn;
+ Tcl_Obj *res;
+ u_int32_t flag;
+ int i, optindex, result, ret;
+ char *arg, msg[MSG_SIZE], newname[MSG_SIZE];
+
+ result = TCL_OK;
+ memset(newname, 0, MSG_SIZE);
+
+ parent = NULL;
+ flag = 0;
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i],
+ txnopts, "option", TCL_EXACT, &optindex) != TCL_OK) {
+ return (IS_HELP(objv[i]));
+ }
+ i++;
+ switch ((enum txnopts)optindex) {
+ case TXN_PARENT:
+ if (i == objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-parent txn?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ parent = NAME_TO_TXN(arg);
+ if (parent == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "Invalid parent txn: %s\n",
+ arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ return (TCL_ERROR);
+ }
+ break;
+ case TXN_NOWAIT:
+ FLAG_CHECK(flag);
+ flag |= DB_TXN_NOWAIT;
+ break;
+ case TXN_SYNC:
+ FLAG_CHECK(flag);
+ flag |= DB_TXN_SYNC;
+ break;
+ case TXN_NOSYNC:
+ FLAG_CHECK(flag);
+ flag |= DB_TXN_NOSYNC;
+ break;
+ }
+ }
+ snprintf(newname, sizeof(newname), "%s.txn%d",
+ envip->i_name, envip->i_envtxnid);
+ ip = _NewInfo(interp, NULL, newname, I_TXN);
+ if (ip == NULL) {
+ Tcl_SetResult(interp, "Could not set up info",
+ TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = txn_begin(envp, parent, &txn, flag);
+ result = _ReturnSetup(interp, ret, "txn");
+ if (result == TCL_ERROR)
+ _DeleteInfo(ip);
+ else {
+ /*
+ * Success. Set up return. Set up new info
+ * and command widget for this txn.
+ */
+ envip->i_envtxnid++;
+ if (parent)
+ ip->i_parent = _PtrToInfo(parent);
+ else
+ ip->i_parent = envip;
+ _SetInfoData(ip, txn);
+ Tcl_CreateObjCommand(interp, newname,
+ (Tcl_ObjCmdProc *)txn_Cmd, (ClientData)txn, NULL);
+ res = Tcl_NewStringObj(newname, strlen(newname));
+ Tcl_SetObjResult(interp, res);
+ }
+ return (result);
+}
+
+/*
+ * tcl_TxnStat --
+ *
+ * PUBLIC: int tcl_TxnStat __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_TxnStat(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+#define MAKE_STAT_LSN(s, lsn) \
+do { \
+ myobjc = 2; \
+ myobjv[0] = Tcl_NewIntObj((lsn)->file); \
+ myobjv[1] = Tcl_NewIntObj((lsn)->offset); \
+ lsnlist = Tcl_NewListObj(myobjc, myobjv); \
+ myobjc = 2; \
+ myobjv[0] = Tcl_NewStringObj((s), strlen(s)); \
+ myobjv[1] = lsnlist; \
+ thislist = Tcl_NewListObj(myobjc, myobjv); \
+ result = Tcl_ListObjAppendElement(interp, res, thislist); \
+ if (result != TCL_OK) \
+ goto error; \
+} while (0);
+
+ DBTCL_INFO *ip;
+ DB_TXN_ACTIVE *p;
+ DB_TXN_STAT *sp;
+ Tcl_Obj *myobjv[2], *res, *thislist, *lsnlist;
+ u_int32_t i;
+ int myobjc, result, ret;
+
+ result = TCL_OK;
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = txn_stat(envp, &sp, NULL);
+ result = _ReturnSetup(interp, ret, "txn stat");
+ if (result == TCL_ERROR)
+ return (result);
+
+ /*
+ * Have our stats, now construct the name value
+ * list pairs and free up the memory.
+ */
+ res = Tcl_NewObj();
+ /*
+ * MAKE_STAT_LIST assumes 'res' and 'error' label.
+ */
+ MAKE_STAT_LIST("Region size", sp->st_regsize);
+ MAKE_STAT_LSN("LSN of last checkpoint", &sp->st_last_ckp);
+ MAKE_STAT_LSN("LSN of pending checkpoint", &sp->st_pending_ckp);
+ MAKE_STAT_LIST("Time of last checkpoint", sp->st_time_ckp);
+ MAKE_STAT_LIST("Last txn ID allocated", sp->st_last_txnid);
+ MAKE_STAT_LIST("Max Txns", sp->st_maxtxns);
+ MAKE_STAT_LIST("Number aborted txns", sp->st_naborts);
+ MAKE_STAT_LIST("Number active txns", sp->st_nactive);
+ MAKE_STAT_LIST("Number txns begun", sp->st_nbegins);
+ MAKE_STAT_LIST("Number committed txns", sp->st_ncommits);
+ MAKE_STAT_LIST("Number of region lock waits", sp->st_region_wait);
+ MAKE_STAT_LIST("Number of region lock nowaits", sp->st_region_nowait);
+ for (i = 0, p = sp->st_txnarray; i < sp->st_nactive; i++, p++)
+ for (ip = LIST_FIRST(&__db_infohead); ip != NULL;
+ ip = LIST_NEXT(ip, entries)) {
+ if (ip->i_type != I_TXN)
+ continue;
+ if (ip->i_type == I_TXN &&
+ (txn_id(ip->i_txnp) == p->txnid)) {
+ MAKE_STAT_LSN(ip->i_name, &p->lsn);
+ if (p->parentid != 0)
+ MAKE_STAT_STRLIST("Parent",
+ ip->i_parent->i_name);
+ else
+ MAKE_STAT_LIST("Parent", 0);
+ break;
+ }
+ }
+ Tcl_SetObjResult(interp, res);
+error:
+ __os_free(sp, sizeof(*sp));
+ return (result);
+}
+
+/*
+ * txn_Cmd --
+ * Implements the "txn" widget.
+ *
+ * PUBLIC: int txn_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+ */
+int
+txn_Cmd(clientData, interp, objc, objv)
+ ClientData clientData; /* Txn handle */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *txncmds[] = {
+ "abort",
+ "commit",
+ "id",
+ "prepare",
+ NULL
+ };
+ enum txncmds {
+ TXNABORT,
+ TXNCOMMIT,
+ TXNID,
+ TXNPREPARE
+ };
+ DBTCL_INFO *txnip;
+ DB_TXN *txnp;
+ Tcl_Obj *res;
+ int cmdindex, result, ret;
+
+ Tcl_ResetResult(interp);
+ txnp = (DB_TXN *)clientData;
+ txnip = _PtrToInfo((void *)txnp);
+ result = TCL_OK;
+ if (txnp == NULL) {
+ Tcl_SetResult(interp, "NULL txn pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (txnip == NULL) {
+ Tcl_SetResult(interp, "NULL txn info pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Get the command name index from the object based on the dbcmds
+ * defined above.
+ */
+ if (Tcl_GetIndexFromObj(interp,
+ objv[1], txncmds, "command", TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+
+ res = NULL;
+ switch ((enum txncmds)cmdindex) {
+ case TXNID:
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 1, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = txn_id(txnp);
+ res = Tcl_NewIntObj(ret);
+ break;
+ case TXNPREPARE:
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 1, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = txn_prepare(txnp);
+ result = _ReturnSetup(interp, ret, "txn prepare");
+ break;
+ case TXNCOMMIT:
+ result = tcl_TxnCommit(interp, objc, objv, txnp, txnip);
+ _TxnInfoDelete(interp, txnip);
+ (void)Tcl_DeleteCommand(interp, txnip->i_name);
+ _DeleteInfo(txnip);
+ break;
+ case TXNABORT:
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 1, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = txn_abort(txnp);
+ result = _ReturnSetup(interp, ret, "txn abort");
+ _TxnInfoDelete(interp, txnip);
+ (void)Tcl_DeleteCommand(interp, txnip->i_name);
+ _DeleteInfo(txnip);
+ break;
+ }
+ /*
+ * Only set result if we have a res. Otherwise, lower
+ * functions have already done so.
+ */
+ if (result == TCL_OK && res)
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
+
+static int
+tcl_TxnCommit(interp, objc, objv, txnp, txnip)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_TXN *txnp; /* Transaction pointer */
+ DBTCL_INFO *txnip; /* Info pointer */
+{
+ static char *commitopt[] = {
+ "-nosync",
+ "-sync",
+ NULL
+ };
+ enum commitopt {
+ COMSYNC,
+ COMNOSYNC
+ };
+ u_int32_t flag;
+ int optindex, result, ret;
+
+ COMPQUIET(txnip, NULL);
+
+ result = TCL_OK;
+ flag = 0;
+ if (objc != 2 && objc != 3) {
+ Tcl_WrongNumArgs(interp, 1, objv, NULL);
+ return (TCL_ERROR);
+ }
+ if (objc == 3) {
+ if (Tcl_GetIndexFromObj(interp, objv[2], commitopt,
+ "option", TCL_EXACT, &optindex) != TCL_OK)
+ return (IS_HELP(objv[2]));
+ switch ((enum commitopt)optindex) {
+ case COMSYNC:
+ FLAG_CHECK(flag);
+ flag = DB_TXN_SYNC;
+ break;
+ case COMNOSYNC:
+ FLAG_CHECK(flag);
+ flag = DB_TXN_NOSYNC;
+ break;
+ }
+ }
+
+ _debug_check();
+ ret = txn_commit(txnp, flag);
+ result = _ReturnSetup(interp, ret, "txn commit");
+ return (result);
+}
diff --git a/bdb/test/TESTS b/bdb/test/TESTS
new file mode 100644
index 00000000000..a585bdddcde
--- /dev/null
+++ b/bdb/test/TESTS
@@ -0,0 +1,448 @@
+# $Id: TESTS,v 11.34 2000/11/06 19:31:56 sue Exp $
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+Access method tests
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test001 Small keys/data
+ Put/get per key
+ Dump file
+ Close, reopen
+ Dump file
+
+test002 Small keys/medium data
+ Put/get per key
+ Dump file
+ Close, reopen
+ Dump file
+
+test003 Small keys/large data
+ Put/get per key
+ Dump file
+ Close, reopen
+ Dump file
+
+test004 Small keys/medium data
+ Put/get per key
+ Sequential (cursor) get/delete
+
+test005 Small keys/medium data
+ Put/get per key
+ Close, reopen
+ Sequential (cursor) get/delete
+
+test006 Small keys/medium data
+ Put/get per key
+ Keyed delete and verify
+
+test007 Small keys/medium data
+ Put/get per key
+ Close, reopen
+ Keyed delete
+
+test008 Small keys/large data
+ Put/get per key
+ Loop through keys by steps (which change)
+ ... delete each key at step
+ ... add each key back
+ ... change step
+ Confirm that overflow pages are getting reused
+
+test009 Small keys/large data
+ Same as test008; close and reopen database
+
+test010 Duplicate test
+ Small key/data pairs.
+
+test011 Duplicate test
+ Small key/data pairs.
+ Test DB_KEYFIRST, DB_KEYLAST, DB_BEFORE and DB_AFTER.
+ To test off-page duplicates, run with small pagesize.
+
+test012 Large keys/small data
+ Same as test003 except use big keys (source files and
+ executables) and small data (the file/executable names).
+
+test013 Partial put test
+ Overwrite entire records using partial puts. Make sure
+ that NOOVERWRITE flag works.
+
+test014 Exercise partial puts on short data
+ Run 5 combinations of numbers of characters to replace,
+ and number of times to increase the size by.
+
+test015 Partial put test
+ Partial put test where the key does not initially exist.
+
+test016 Partial put test
+ Partial put where the datum gets shorter as a result of
+ the put.
+
+test017 Basic offpage duplicate test.
+
+test018 Offpage duplicate test
+ Key_{first,last,before,after} offpage duplicates.
+
+test019 Partial get test.
+
+test020 In-Memory database tests.
+
+test021 Btree range tests.
+
+test022 Test of DB->getbyteswapped().
+
+test023 Duplicate test
+ Exercise deletes and cursor operations within a
+ duplicate set.
+
+test024 Record number retrieval test.
+
+test025 DB_APPEND flag test.
+
+test026 Small keys/medium data w/duplicates
+ Put/get per key.
+ Loop through keys -- delete each key
+ ... test that cursors delete duplicates correctly
+
+test027 Off-page duplicate test
+ Test026 with parameters to force off-page duplicates.
+
+test028 Cursor delete test
+ Test put operations after deleting through a cursor.
+
+test029 Record renumbering
+
+test030 DB_NEXT_DUP functionality
+
+test031 Duplicate sorting functionality
+ Make sure DB_NODUPDATA works.
+
+test032 DB_GET_BOTH
+
+test033 DB_GET_BOTH without comparison function
+
+test034 Test032 with off-page duplicates
+
+test035 Test033 with off-page duplicates
+
+test036 Test KEYFIRST and KEYLAST when the key doesn't exist
+
+test037 Test DB_RMW
+
+test038 DB_GET_BOTH on deleted items
+
+test039 DB_GET_BOTH on deleted items without comparison function
+
+test040 Test038 with off-page duplicates
+
+test041 Test039 with off-page duplicates
+
+test042 Concurrent Data Store test
+
+test043 Recno renumbering and implicit creation test
+
+test044 Small system integration tests
+ Test proper functioning of the checkpoint daemon,
+ recovery, transactions, etc.
+
+test045 Small random tester
+ Runs a number of random add/delete/retrieve operations.
+ Tests both successful conditions and error conditions.
+
+test046 Overwrite test of small/big key/data with cursor checks.
+
+test047 Cursor get test with SET_RANGE option.
+
+test048 Cursor stability across Btree splits.
+
+test049 Cursor operations on unitialized cursors.
+
+test050 Cursor overwrite test for Recno.
+
+test051 Fixed-length record Recno test.
+
+test052 Renumbering record Recno test.
+
+test053 DB_REVSPLITOFF flag test
+
+test054 Cursor maintenance during key/data deletion.
+
+test054 Basic cursor operations.
+
+test055 Cursor maintenance during key deletes.
+
+test056 Cursor maintenance during deletes.
+
+test057 Cursor maintenance during key deletes.
+
+test058 Verify that deleting and reading duplicates results in
+ correct ordering.
+
+test059 Cursor ops work with a partial length of 0.
+
+test060 Test of the DB_EXCL flag to DB->open().
+
+test061 Test of txn abort and commit for in-memory databases.
+
+test062 Test of partial puts (using DB_CURRENT) onto duplicate pages.
+
+test063 Test of the DB_RDONLY flag to DB->open
+
+test064 Test of DB->get_type
+
+test065 Test of DB->stat(DB_RECORDCOUNT)
+
+test066 Test of cursor overwrites of DB_CURRENT w/ duplicates.
+
+test067 Test of DB_CURRENT partial puts onto almost empty duplicate
+ pages, with and without DB_DUP_SORT.
+
+test068 Test of DB_BEFORE and DB_AFTER with partial puts.
+
+test069 Test of DB_CURRENT partial puts without duplicates--
+ test067 w/ small ndups.
+
+test070 Test of DB_CONSUME (Four consumers, 1000 items.)
+
+test071 Test of DB_CONSUME (One consumer, 10000 items.)
+
+test072 Cursor stability test when dups are moved off-page
+
+test073 Test of cursor stability on duplicate pages.
+
+test074 Test of DB_NEXT_NODUP.
+
+test075 Test of DB->rename().
+ (formerly test of DB_TRUNCATE cached page invalidation [#1487])
+
+test076 Test creation of many small databases in a single environment.
+ [#1528].
+
+test077 Test of DB_GET_RECNO [#1206].
+
+test078 Test of DBC->c_count().
+
+test079 Test of deletes in large trees. (test006 w/ sm. pagesize).
+
+test080 Test of DB->remove()
+
+test081 Test off-page duplicates and overflow pages together with
+ very large keys (key/data as file contents).
+
+test082 Test of DB_PREV_NODUP (uses test074).
+
+test083 Test of DB->key_range.
+
+test084 Sanity test of large (64K) pages.
+
+test085 Test of cursor behavior when a cursor is pointing to a deleted
+ btree key which then has duplicates added. [#2473]
+
+test086 Test of cursor stability across btree splits/rsplits with
+ subtransaction aborts (a variant of test048). [#2373]
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+Cursor Join.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+jointest Test duplicate assisted joins.
+ Executes 1, 2, 3 and 4-way joins with differing
+ index orders and selectivity.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+Deadlock detection.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+dead001 Use two different configurations to test deadlock
+ detection among a variable number of processes. One
+ configuration has the processes deadlocked in a ring.
+ The other has the processes all deadlocked on a single
+ resource.
+
+dead002 Same test as dead001, but use "detect on every collision"
+ instead of separate deadlock detector.
+
+dead003 Same test as dead002, but explicitly specify oldest or
+ youngest. Verify the correct lock was aborted/granted.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+Lock tests
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+lock001 Basic lock test, gets/puts. Contention without waiting.
+
+lock002 Multi-process lock tests.
+
+lock003 Multiprocess random lock test.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+Logging test
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+log001 Read/write log records.
+
+log002 Tests multiple logs
+ Log truncation
+ lsn comparison and file functionality.
+
+log003 Verify that log_flush is flushing records correctly.
+
+log004 Prev on log when beginning of log has been truncated.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+Mpool test
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+memp001 Randomly updates pages.
+
+memp002 Tests multiple processes accessing and modifying the same
+ files.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+Recovery
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd001 Per-operation recovery tests for non-duplicate, non-split
+ messages. Makes sure that we exercise redo, undo, and
+ do-nothing condition. Any test that appears with the
+ message (change state) indicates that we've already run
+ the particular test, but we are running it again so that
+ we can change the state of the data base to prepare for
+ the next test (this applies to all other recovery tests
+ as well).
+
+recd002 Split recovery tests. For every known split log message,
+ makes sure that we exercise redo, undo, and do-nothing
+ condition.
+
+recd003 Duplicate recovery tests. For every known duplicate log
+ message, makes sure that we exercise redo, undo, and
+ do-nothing condition.
+
+recd004 Big key test where big key gets elevated to internal page.
+
+recd005 Verify reuse of file ids works on catastrophic recovery.
+
+recd006 Nested transactions.
+
+recd007 File create/delete tests.
+
+recd008 Test deeply nested transactions.
+
+recd009 Verify record numbering across split/reverse splits
+ and recovery.
+
+recd010 Verify duplicates across split/reverse splits
+ and recovery.
+
+recd011 Verify that recovery to a specific timestamp works.
+
+recd012 Test of log file ID management. [#2288]
+
+recd013 Test of cursor adjustment on child transaction aborts. [#2373]
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+Subdatabase tests
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+subdb001 Tests mixing db and subdb operations
+ Create a db, add data, try to create a subdb.
+ Test naming db and subdb with a leading - for
+ correct parsing
+ Existence check -- test use of -excl with subdbs
+
+subdb002 Tests basic subdb functionality
+ Small keys, small data
+ Put/get per key
+ Dump file
+ Close, reopen
+ Dump file
+
+subdb003 Tests many subdbs
+ Creates many subdbs and puts a small amount of
+ data in each (many defaults to 2000)
+
+subdb004 Tests large subdb names
+ subdb name = filecontents,
+ key = filename, data = filecontents
+ Put/get per key
+ Dump file
+ Dump subdbs, verify data and subdb name match
+
+subdb005 Tests cursor operations in subdbs
+ Put/get per key
+ Verify cursor operations work within subdb
+ Verify cursor operations do not work across subdbs
+
+subdb006 Tests intra-subdb join
+
+subdb007 Tests page size differences between subdbs
+ Open several subdbs, each with a different pagesize
+ Small keys, small data
+ Put/get per key per subdb
+ Dump file, verify per subdb
+ Close, reopen per subdb
+ Dump file, verify per subdb
+
+subdb008 Tests lorder differences between subdbs
+ Open several subdbs, each with a different/random lorder
+ Small keys, small data
+ Put/get per key per subdb
+ Dump file, verify per subdb
+ Close, reopen per subdb
+ Dump file, verify per subdb
+
+subdb009 Test DB->rename() method for subdbs
+
+subdb010 Test DB->remove() method for subdbs
+
+subdbtest001 Tests multiple access methods in one subdb
+ Open several subdbs, each with a different access method
+ Small keys, small data
+ Put/get per key per subdb
+ Dump file, verify per subdb
+ Close, reopen per subdb
+ Dump file, verify per subdb
+
+subdbtest002 Tests multiple access methods in one subdb access by
+ multiple processes
+ Open several subdbs, each with a different access method
+ Small keys, small data
+ Put/get per key per subdb
+ Fork off several child procs to each delete selected
+ data from their subdb and then exit
+ Dump file, verify contents of each subdb is correct
+ Close, reopen per subdb
+ Dump file, verify per subdb
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+Transaction tests
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+txn001 Begin, commit, abort testing.
+
+txn002 Verify that read-only transactions do not write log records.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+Environment tests
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env001 Test of env remove interface (formerly env_remove).
+
+env002 Test of DB_LOG_DIR and env name resolution.
+
+env003 Test of DB_TMP_DIR and env name resolution.
+
+env004 Multiple data directories test.
+
+env005 Test for using subsystems without initializing them correctly.
+
+env006 Smoke test that the utilities all run.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+RPC tests
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+[RPC tests also include running all Access Method tests for all methods
+via an RPC server]
+
+rpc001 Test RPC server timeouts for cursor, txn and env handles.
+
+rpc002 Test unsupported functions
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+Recno backing file tests
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rsrc001 Basic backing file test (put/get)
+
+rsrc002 Test of set_re_delim
diff --git a/bdb/test/archive.tcl b/bdb/test/archive.tcl
new file mode 100644
index 00000000000..9fdbe82d137
--- /dev/null
+++ b/bdb/test/archive.tcl
@@ -0,0 +1,232 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: archive.tcl,v 11.14 2000/10/27 13:23:55 sue Exp $
+#
+# Options are:
+# -checkrec <checkpoint frequency"
+# -dir <dbhome directory>
+# -maxfilesize <maxsize of log file>
+# -stat
+proc archive_usage {} {
+ puts "archive -checkrec <checkpt freq> -dir <directory> \
+ -maxfilesize <max size of log files>"
+}
+proc archive_command { args } {
+ source ./include.tcl
+
+ # Catch a list of files output by db_archive.
+ catch { eval exec $util_path/db_archive $args } output
+
+ if { $is_windows_test == 1 || 1 } {
+ # On Windows, convert all filenames to use forward slashes.
+ regsub -all {[\\]} $output / output
+ }
+
+ # Output the [possibly-transformed] list.
+ return $output
+}
+proc archive { args } {
+ global alphabet
+ source ./include.tcl
+
+ # Set defaults
+ set maxbsize [expr 8 * 1024]
+ set maxfile [expr 32 * 1024]
+ set dostat 0
+ set checkrec 500
+ for { set i 0 } { $i < [llength $args] } {incr i} {
+ switch -regexp -- [lindex $args $i] {
+ -c.* { incr i; set checkrec [lindex $args $i] }
+ -d.* { incr i; set testdir [lindex $args $i] }
+ -m.* { incr i; set maxfile [lindex $args $i] }
+ -s.* { set dostat 1 }
+ default {
+ puts -nonewline "FAIL:[timestamp] Usage: "
+ archive_usage
+ return
+ }
+
+ }
+ }
+
+ # Clean out old log if it existed
+ puts "Unlinking log: error message OK"
+ env_cleanup $testdir
+
+ # Now run the various functionality tests
+ set eflags "-create -txn -home $testdir \
+ -log_buffer $maxbsize -log_max $maxfile"
+ set dbenv [eval {berkdb env} $eflags]
+ error_check_bad dbenv $dbenv NULL
+ error_check_good dbenv [is_substr $dbenv env] 1
+
+ # The basic test structure here is that we write a lot of log
+ # records (enough to fill up 100 log files; each log file it
+ # small). We take periodic checkpoints. Between each pair
+ # of checkpoints, we refer to 2 files, overlapping them each
+ # checkpoint. We also start transactions and let them overlap
+ # checkpoints as well. The pattern that we try to create is:
+ # ---- write log records----|||||--- write log records ---
+ # -T1 T2 T3 --- D1 D2 ------CHECK--- CT1 --- D2 D3 CD1 ----CHECK
+ # where TX is begin transaction, CTx is commit transaction, DX is
+ # open data file and CDx is close datafile.
+
+ set baserec "1:$alphabet:2:$alphabet:3:$alphabet:4:$alphabet"
+ puts "Archive.a: Writing log records; checkpoint every $checkrec records"
+ set nrecs $maxfile
+ set rec 0:$baserec
+
+ # Begin transaction and write a log record
+ set t1 [$dbenv txn]
+ error_check_good t1:txn_begin [is_substr $t1 "txn"] 1
+
+ set l1 [$dbenv log_put $rec]
+ error_check_bad l1:log_put [llength $l1] 0
+
+ set lsnlist [list [lindex $l1 0]]
+
+ set t2 [$dbenv txn]
+ error_check_good t2:txn_begin [is_substr $t2 "txn"] 1
+
+ set l1 [$dbenv log_put $rec]
+ lappend lsnlist [lindex $l1 0]
+
+ set t3 [$dbenv txn]
+ set l1 [$dbenv log_put $rec]
+ lappend lsnlist [lindex $l1 0]
+
+ set txnlist [list $t1 $t2 $t3]
+ set db1 [eval {berkdb_open} "-create -mode 0644 -hash -env $dbenv ar1"]
+ set db2 [eval {berkdb_open} "-create -mode 0644 -btree -env $dbenv ar2"]
+ set dbcount 3
+ set dblist [list $db1 $db2]
+
+ for { set i 1 } { $i <= $nrecs } { incr i } {
+ set rec $i:$baserec
+ set lsn [$dbenv log_put $rec]
+ error_check_bad log_put [llength $lsn] 0
+ if { [expr $i % $checkrec] == 0 } {
+ # Take a checkpoint
+ $dbenv txn_checkpoint
+ set ckp_file [lindex [lindex [$dbenv log_get -last] 0] 0]
+ catch { archive_command -h $testdir -a } res_log_full
+ if { [string first db_archive $res_log_full] == 0 } {
+ set res_log_full ""
+ }
+ catch { archive_command -h $testdir } res_log
+ if { [string first db_archive $res_log] == 0 } {
+ set res_log ""
+ }
+ catch { archive_command -h $testdir -l } res_alllog
+ catch { archive_command -h $testdir -a -s } \
+ res_data_full
+ catch { archive_command -h $testdir -s } res_data
+ error_check_good nlogfiles [llength $res_alllog] \
+ [lindex [lindex [$dbenv log_get -last] 0] 0]
+ error_check_good logs_match [llength $res_log_full] \
+ [llength $res_log]
+ error_check_good data_match [llength $res_data_full] \
+ [llength $res_data]
+
+ # Check right number of log files
+ error_check_good nlogs [llength $res_log] \
+ [expr [lindex $lsnlist 0] - 1]
+
+ # Check that the relative names are a subset of the
+ # full names
+ set n 0
+ foreach x $res_log {
+ error_check_bad log_name_match:$res_log \
+ [string first $x \
+ [lindex $res_log_full $n]] -1
+ incr n
+ }
+
+ set n 0
+ foreach x $res_data {
+ error_check_bad log_name_match:$res_data \
+ [string first $x \
+ [lindex $res_data_full $n]] -1
+ incr n
+ }
+
+ # Begin/commit any transactions
+ set t [lindex $txnlist 0]
+ if { [string length $t] != 0 } {
+ error_check_good txn_commit:$t [$t commit] 0
+ set txnlist [lrange $txnlist 1 end]
+ }
+ set lsnlist [lrange $lsnlist 1 end]
+
+ if { [llength $txnlist] == 0 } {
+ set t1 [$dbenv txn]
+ error_check_bad tx_begin $t1 NULL
+ error_check_good \
+ tx_begin [is_substr $t1 $dbenv] 1
+ set l1 [lindex [$dbenv log_put $rec] 0]
+ lappend lsnlist [min $l1 $ckp_file]
+
+ set t2 [$dbenv txn]
+ error_check_bad tx_begin $t2 NULL
+ error_check_good \
+ tx_begin [is_substr $t2 $dbenv] 1
+ set l1 [lindex [$dbenv log_put $rec] 0]
+ lappend lsnlist [min $l1 $ckp_file]
+
+ set t3 [$dbenv txn]
+ error_check_bad tx_begin $t3 NULL
+ error_check_good \
+ tx_begin [is_substr $t3 $dbenv] 1
+ set l1 [lindex [$dbenv log_put $rec] 0]
+ lappend lsnlist [min $l1 $ckp_file]
+
+ set txnlist [list $t1 $t2 $t3]
+ }
+
+ # Open/close some DB files
+ if { [expr $dbcount % 2] == 0 } {
+ set type "-hash"
+ } else {
+ set type "-btree"
+ }
+ set db [eval {berkdb_open} \
+ "-create -mode 0644 $type -env $dbenv ar$dbcount"]
+ error_check_bad db_open:$dbcount $db NULL
+ error_check_good db_open:$dbcount [is_substr $db db] 1
+ incr dbcount
+
+ lappend dblist $db
+ set db [lindex $dblist 0]
+ error_check_good db_close:$db [$db close] 0
+ set dblist [lrange $dblist 1 end]
+
+ }
+ }
+ # Commit any transactions still running.
+ puts "Archive: Commit any transactions still running."
+ foreach t $txnlist {
+ error_check_good txn_commit:$t [$t commit] 0
+ }
+
+ # Close any files that are still open.
+ puts "Archive: Close open files."
+ foreach d $dblist {
+ error_check_good db_close:$db [$d close] 0
+ }
+
+ # Close and unlink the file
+ reset_env $dbenv
+
+ puts "Archive: Complete."
+}
+
+proc min { a b } {
+ if {$a < $b} {
+ return $a
+ } else {
+ return $b
+ }
+}
diff --git a/bdb/test/byteorder.tcl b/bdb/test/byteorder.tcl
new file mode 100644
index 00000000000..d9e44e1d27d
--- /dev/null
+++ b/bdb/test/byteorder.tcl
@@ -0,0 +1,23 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: byteorder.tcl,v 11.7 2000/11/16 23:56:18 ubell Exp $
+#
+# Byte Order Test
+# Use existing tests and run with both byte orders.
+proc byteorder { method {nentries 1000} } {
+ puts "Byteorder: $method $nentries"
+
+ eval {test001 $method $nentries 0 "01" -lorder 1234}
+ eval {test001 $method $nentries 0 "01" -lorder 4321}
+ eval {test003 $method -lorder 1234}
+ eval {test003 $method -lorder 4321}
+ eval {test010 $method $nentries 5 10 -lorder 1234}
+ eval {test010 $method $nentries 5 10 -lorder 4321}
+ eval {test011 $method $nentries 5 11 -lorder 1234}
+ eval {test011 $method $nentries 5 11 -lorder 4321}
+ eval {test018 $method $nentries -lorder 1234}
+ eval {test018 $method $nentries -lorder 4321}
+}
diff --git a/bdb/test/conscript.tcl b/bdb/test/conscript.tcl
new file mode 100644
index 00000000000..11d0eb58e7d
--- /dev/null
+++ b/bdb/test/conscript.tcl
@@ -0,0 +1,123 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: conscript.tcl,v 11.12 2000/12/01 04:28:36 ubell Exp $
+#
+# Script for DB_CONSUME test (test070.tcl).
+# Usage: conscript dir file runtype nitems outputfile tnum args
+# dir: DBHOME directory
+# file: db file on which to operate
+# runtype: PRODUCE or CONSUME--which am I?
+# nitems: number of items to put or get
+# outputfile: where to log consumer results
+# tnum: test number
+
+proc consumescript_produce { db_cmd nitems tnum args } {
+ source ./include.tcl
+ global mydata
+
+ set pid [pid]
+ puts "\tTest0$tnum: Producer $pid starting, producing $nitems items."
+
+ set db [eval $db_cmd]
+ error_check_good db_open:$pid [is_valid_db $db] TRUE
+
+ set oret -1
+ set ret 0
+ for { set ndx 0 } { $ndx < $nitems } { incr ndx } {
+ set oret $ret
+ set ret [$db put -append [chop_data q $mydata]]
+ error_check_good db_put \
+ [expr $ret > 0 ? $oret < $ret : \
+ $oret < 0 ? $oret < $ret : $oret > $ret] 1
+
+ }
+ # XXX: We permit incomplete syncs because they seem to
+ # be unavoidable and not damaging.
+ set ret [catch {$db close} res]
+ error_check_good db_close:$pid [expr ($ret == 0) ||\
+ ([is_substr $res DB_INCOMPLETE] == 1)] 1
+ puts "\t\tTest0$tnum: Producer $pid finished."
+}
+
+proc consumescript_consume { db_cmd nitems tnum outputfile mode args } {
+ source ./include.tcl
+ global mydata
+ set pid [pid]
+ puts "\tTest0$tnum: Consumer $pid starting, seeking $nitems items."
+
+ set db [eval $db_cmd]
+ error_check_good db_open:$pid [is_valid_db $db] TRUE
+
+ set oid [open $outputfile w]
+
+ for { set ndx 0 } { $ndx < $nitems } { } {
+ set ret [$db get $mode]
+ if { [llength $ret] > 0 } {
+ error_check_good correct_data:$pid \
+ [lindex [lindex $ret 0] 1] [pad_data q $mydata]
+ set rno [lindex [lindex $ret 0] 0]
+ puts $oid $rno
+ incr ndx
+ } else {
+ # No data to consume; wait.
+ }
+ }
+
+ error_check_good output_close:$pid [close $oid] ""
+ # XXX: see above note.
+ set ret [catch {$db close} res]
+ error_check_good db_close:$pid [expr ($ret == 0) ||\
+ ([is_substr $res DB_INCOMPLETE] == 1)] 1
+ puts "\t\tTest0$tnum: Consumer $pid finished."
+}
+
+source ./include.tcl
+source $test_path/test.tcl
+
+# Verify usage
+if { $argc < 6 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+set usage "conscript.tcl dir file runtype nitems outputfile tnum"
+
+# Initialize arguments
+set dir [lindex $argv 0]
+set file [lindex $argv 1]
+set runtype [lindex $argv 2]
+set nitems [lindex $argv 3]
+set outputfile [lindex $argv 4]
+set tnum [lindex $argv 5]
+# args is the string "{ -len 20 -pad 0}", so we need to extract the
+# " -len 20 -pad 0" part.
+set args [lindex [lrange $argv 6 end] 0]
+
+set mydata "consumer data"
+
+# Open env
+set dbenv [berkdb env -home $dir ]
+error_check_good db_env_create [is_valid_env $dbenv] TRUE
+
+# Figure out db opening command.
+set db_cmd [concat {berkdb_open -create -mode 0644 -queue -env}\
+ $dbenv $args $file]
+
+# Invoke consumescript_produce or consumescript_consume based on $runtype
+if { $runtype == "PRODUCE" } {
+ # Producers have nothing to log; make sure outputfile is null.
+ error_check_good no_producer_outputfile $outputfile ""
+ consumescript_produce $db_cmd $nitems $tnum $args
+} elseif { $runtype == "CONSUME" } {
+ consumescript_consume $db_cmd $nitems $tnum $outputfile -consume $args
+} elseif { $runtype == "WAIT" } {
+ consumescript_consume $db_cmd $nitems $tnum $outputfile -consume_wait \
+ $args
+} else {
+ error_check_good bad_args $runtype "either PRODUCE, CONSUME or WAIT"
+}
+error_check_good env_close [$dbenv close] 0
+exit
diff --git a/bdb/test/dbm.tcl b/bdb/test/dbm.tcl
new file mode 100644
index 00000000000..41a5da1f13a
--- /dev/null
+++ b/bdb/test/dbm.tcl
@@ -0,0 +1,128 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: dbm.tcl,v 11.12 2000/08/25 14:21:50 sue Exp $
+#
+# Historic DBM interface test.
+# Use the first 1000 entries from the dictionary.
+# Insert each with self as key and data; retrieve each.
+# After all are entered, retrieve all; compare output to original.
+# Then reopen the file, re-retrieve everything.
+# Finally, delete everything.
+proc dbm { { nentries 1000 } } {
+ source ./include.tcl
+
+ puts "DBM interfaces test: $nentries"
+
+ # Create the database and open the dictionary
+ set testfile $testdir/dbmtest
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir NULL
+
+ error_check_good dbminit [berkdb dbminit $testfile] 0
+ set did [open $dict]
+
+ set flags ""
+ set txn ""
+ set count 0
+ set skippednullkey 0
+
+ puts "\tDBM.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ # DBM can't handle zero-length keys
+ if { [string length $str] == 0 } {
+ set skippednullkey 1
+ continue
+ }
+
+ set ret [berkdb store $str $str]
+ error_check_good dbm_store $ret 0
+
+ set d [berkdb fetch $str]
+ error_check_good dbm_fetch $d $str
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tDBM.b: dump file"
+ set oid [open $t1 w]
+ for { set key [berkdb firstkey] } { $key != -1 } {\
+ set key [berkdb nextkey $key] } {
+ puts $oid $key
+ set d [berkdb fetch $key]
+ error_check_good dbm_refetch $d $key
+ }
+
+ # If we had to skip a zero-length key, juggle things to cover up
+ # this fact in the dump.
+ if { $skippednullkey == 1 } {
+ puts $oid ""
+ incr nentries 1
+ }
+
+ close $oid
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+
+ error_check_good DBM:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tDBM.c: close, open, and dump file"
+
+ # Now, reopen the file and run the last test again.
+ error_check_good dbminit2 [berkdb dbminit $testfile] 0
+ set oid [open $t1 w]
+
+ for { set key [berkdb firstkey] } { $key != -1 } {\
+ set key [berkdb nextkey $key] } {
+ puts $oid $key
+ set d [berkdb fetch $key]
+ error_check_good dbm_refetch $d $key
+ }
+ if { $skippednullkey == 1 } {
+ puts $oid ""
+ }
+ close $oid
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ filesort $t1 $t3
+
+ error_check_good DBM:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ # Now, reopen the file and delete each entry
+ puts "\tDBM.d: sequential scan and delete"
+
+ error_check_good dbminit3 [berkdb dbminit $testfile] 0
+ set oid [open $t1 w]
+
+ for { set key [berkdb firstkey] } { $key != -1 } {\
+ set key [berkdb nextkey $key] } {
+ puts $oid $key
+ set ret [berkdb delete $key]
+ error_check_good dbm_delete $ret 0
+ }
+ if { $skippednullkey == 1 } {
+ puts $oid ""
+ }
+ close $oid
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ filesort $t1 $t3
+
+ error_check_good DBM:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ error_check_good "dbm_close" [berkdb dbmclose] 0
+}
diff --git a/bdb/test/dbscript.tcl b/bdb/test/dbscript.tcl
new file mode 100644
index 00000000000..3a51b4363d4
--- /dev/null
+++ b/bdb/test/dbscript.tcl
@@ -0,0 +1,357 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: dbscript.tcl,v 11.10 2000/04/21 18:36:21 krinsky Exp $
+#
+# Random db tester.
+# Usage: dbscript file numops min_del max_add key_avg data_avgdups
+# file: db file on which to operate
+# numops: number of operations to do
+# ncurs: number of cursors
+# min_del: minimum number of keys before you disable deletes.
+# max_add: maximum number of keys before you disable adds.
+# key_avg: average key size
+# data_avg: average data size
+# dups: 1 indicates dups allowed, 0 indicates no dups
+# errpct: What percent of operations should generate errors
+# seed: Random number generator seed (-1 means use pid)
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set alphabet "abcdefghijklmnopqrstuvwxyz"
+
+set usage "dbscript file numops ncurs min_del max_add key_avg data_avg dups errpcnt"
+
+# Verify usage
+if { $argc != 9 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set file [lindex $argv 0]
+set numops [ lindex $argv 1 ]
+set ncurs [ lindex $argv 2 ]
+set min_del [ lindex $argv 3 ]
+set max_add [ lindex $argv 4 ]
+set key_avg [ lindex $argv 5 ]
+set data_avg [ lindex $argv 6 ]
+set dups [ lindex $argv 7 ]
+set errpct [ lindex $argv 8 ]
+
+berkdb srand $rand_init
+
+puts "Beginning execution for [pid]"
+puts "$file database"
+puts "$numops Operations"
+puts "$ncurs cursors"
+puts "$min_del keys before deletes allowed"
+puts "$max_add or fewer keys to add"
+puts "$key_avg average key length"
+puts "$data_avg average data length"
+if { $dups != 1 } {
+ puts "No dups"
+} else {
+ puts "Dups allowed"
+}
+puts "$errpct % Errors"
+
+flush stdout
+
+set db [berkdb_open $file]
+set cerr [catch {error_check_good dbopen [is_substr $db db] 1} cret]
+if {$cerr != 0} {
+ puts $cret
+ return
+}
+set method [$db get_type]
+set record_based [is_record_based $method]
+
+# Initialize globals including data
+global nkeys
+global l_keys
+global a_keys
+
+set nkeys [db_init $db 1]
+puts "Initial number of keys: $nkeys"
+
+set pflags ""
+set gflags ""
+set txn ""
+
+# Open the cursors
+set curslist {}
+for { set i 0 } { $i < $ncurs } { incr i } {
+ set dbc [$db cursor]
+ set cerr [catch {error_check_good dbopen [is_substr $dbc $db.c] 1} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ set cerr [catch {error_check_bad cursor_create $dbc NULL} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ lappend curslist $dbc
+
+}
+
+# On each iteration we're going to generate random keys and
+# data. We'll select either a get/put/delete operation unless
+# we have fewer than min_del keys in which case, delete is not
+# an option or more than max_add in which case, add is not
+# an option. The tcl global arrays a_keys and l_keys keep track
+# of key-data pairs indexed by key and a list of keys, accessed
+# by integer.
+set adds 0
+set puts 0
+set gets 0
+set dels 0
+set bad_adds 0
+set bad_puts 0
+set bad_gets 0
+set bad_dels 0
+
+for { set iter 0 } { $iter < $numops } { incr iter } {
+ set op [pick_op $min_del $max_add $nkeys]
+ set err [is_err $errpct]
+
+ # The op0's indicate that there aren't any duplicates, so we
+ # exercise regular operations. If dups is 1, then we'll use
+ # cursor ops.
+ switch $op$dups$err {
+ add00 {
+ incr adds
+
+ set k [random_data $key_avg 1 a_keys $record_based]
+ set data [random_data $data_avg 0 0]
+ set data [chop_data $method $data]
+ set ret [eval {$db put} $txn $pflags \
+ {-nooverwrite $k $data}]
+ set cerr [catch {error_check_good put $ret 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ newpair $k [pad_data $method $data]
+ }
+ add01 {
+ incr bad_adds
+ set k [random_key]
+ set data [random_data $data_avg 0 0]
+ set data [chop_data $method $data]
+ set ret [eval {$db put} $txn $pflags \
+ {-nooverwrite $k $data}]
+ set cerr [catch {error_check_good put $ret 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ # Error case so no change to data state
+ }
+ add10 {
+ incr adds
+ set dbcinfo [random_cursor $curslist]
+ set dbc [lindex $dbcinfo 0]
+ if { [berkdb random_int 1 2] == 1 } {
+ # Add a new key
+ set k [random_data $key_avg 1 a_keys \
+ $record_based]
+ set data [random_data $data_avg 0 0]
+ set data [chop_data $method $data]
+ set ret [eval {$dbc put} $txn \
+ {-keyfirst $k $data}]
+ newpair $k [pad_data $method $data]
+ } else {
+ # Add a new duplicate
+ set dbc [lindex $dbcinfo 0]
+ set k [lindex $dbcinfo 1]
+ set data [random_data $data_avg 0 0]
+
+ set op [pick_cursput]
+ set data [chop_data $method $data]
+ set ret [eval {$dbc put} $txn {$op $k $data}]
+ adddup $k [lindex $dbcinfo 2] $data
+ }
+ }
+ add11 {
+ # TODO
+ incr bad_adds
+ set ret 1
+ }
+ put00 {
+ incr puts
+ set k [random_key]
+ set data [random_data $data_avg 0 0]
+ set data [chop_data $method $data]
+ set ret [eval {$db put} $txn {$k $data}]
+ changepair $k [pad_data $method $data]
+ }
+ put01 {
+ incr bad_puts
+ set k [random_key]
+ set data [random_data $data_avg 0 0]
+ set data [chop_data $method $data]
+ set ret [eval {$db put} $txn $pflags \
+ {-nooverwrite $k $data}]
+ set cerr [catch {error_check_good put $ret 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ # Error case so no change to data state
+ }
+ put10 {
+ incr puts
+ set dbcinfo [random_cursor $curslist]
+ set dbc [lindex $dbcinfo 0]
+ set k [lindex $dbcinfo 1]
+ set data [random_data $data_avg 0 0]
+ set data [chop_data $method $data]
+
+ set ret [eval {$dbc put} $txn {-current $data}]
+ changedup $k [lindex $dbcinfo 2] $data
+ }
+ put11 {
+ incr bad_puts
+ set k [random_key]
+ set data [random_data $data_avg 0 0]
+ set data [chop_data $method $data]
+ set dbc [$db cursor]
+ set ret [eval {$dbc put} $txn {-current $data}]
+ set cerr [catch {error_check_good curs_close \
+ [$dbc close] 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ # Error case so no change to data state
+ }
+ get00 {
+ incr gets
+ set k [random_key]
+ set val [eval {$db get} $txn {$k}]
+ set data [pad_data $method [lindex [lindex $val 0] 1]]
+ if { $data == $a_keys($k) } {
+ set ret 0
+ } else {
+ set ret "FAIL: Error got |$data| expected |$a_keys($k)|"
+ }
+ # Get command requires no state change
+ }
+ get01 {
+ incr bad_gets
+ set k [random_data $key_avg 1 a_keys $record_based]
+ set ret [eval {$db get} $txn {$k}]
+ # Error case so no change to data state
+ }
+ get10 {
+ incr gets
+ set dbcinfo [random_cursor $curslist]
+ if { [llength $dbcinfo] == 3 } {
+ set ret 0
+ else
+ set ret 0
+ }
+ # Get command requires no state change
+ }
+ get11 {
+ incr bad_gets
+ set k [random_key]
+ set dbc [$db cursor]
+ if { [berkdb random_int 1 2] == 1 } {
+ set dir -next
+ } else {
+ set dir -prev
+ }
+ set ret [eval {$dbc get} $txn {-next $k}]
+ set cerr [catch {error_check_good curs_close \
+ [$dbc close] 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ # Error and get case so no change to data state
+ }
+ del00 {
+ incr dels
+ set k [random_key]
+ set ret [eval {$db del} $txn {$k}]
+ rempair $k
+ }
+ del01 {
+ incr bad_dels
+ set k [random_data $key_avg 1 a_keys $record_based]
+ set ret [eval {$db del} $txn {$k}]
+ # Error case so no change to data state
+ }
+ del10 {
+ incr dels
+ set dbcinfo [random_cursor $curslist]
+ set dbc [lindex $dbcinfo 0]
+ set ret [eval {$dbc del} $txn]
+ remdup [lindex dbcinfo 1] [lindex dbcinfo 2]
+ }
+ del11 {
+ incr bad_dels
+ set c [$db cursor]
+ set ret [eval {$c del} $txn]
+ set cerr [catch {error_check_good curs_close \
+ [$c close] 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ # Error case so no change to data state
+ }
+ }
+ if { $err == 1 } {
+ # Verify failure.
+ set cerr [catch {error_check_good $op$dups$err:$k \
+ [is_substr Error $ret] 1} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ } else {
+ # Verify success
+ set cerr [catch {error_check_good $op$dups$err:$k $ret 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ }
+
+ flush stdout
+}
+
+# Close cursors and file
+foreach i $curslist {
+ set r [$i close]
+ set cerr [catch {error_check_good cursor_close:$i $r 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+}
+
+set r [$db close]
+set cerr [catch {error_check_good db_close:$db $r 0} cret]
+if {$cerr != 0} {
+ puts $cret
+ return
+}
+
+puts "[timestamp] [pid] Complete"
+puts "Successful ops: $adds adds $gets gets $puts puts $dels dels"
+puts "Error ops: $bad_adds adds $bad_gets gets $bad_puts puts $bad_dels dels"
+flush stdout
+
+filecheck $file $txn
+
+exit
diff --git a/bdb/test/ddscript.tcl b/bdb/test/ddscript.tcl
new file mode 100644
index 00000000000..9b139a4cbc6
--- /dev/null
+++ b/bdb/test/ddscript.tcl
@@ -0,0 +1,43 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: ddscript.tcl,v 11.7 2000/05/08 19:26:37 sue Exp $
+#
+# Deadlock detector script tester.
+# Usage: ddscript dir test lockerid objid numprocs
+# dir: DBHOME directory
+# test: Which test to run
+# lockerid: Lock id for this locker
+# objid: Object id to lock.
+# numprocs: Total number of processes running
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage "ddscript dir test lockerid objid numprocs"
+
+# Verify usage
+if { $argc != 5 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set dir [lindex $argv 0]
+set tnum [ lindex $argv 1 ]
+set lockerid [ lindex $argv 2 ]
+set objid [ lindex $argv 3 ]
+set numprocs [ lindex $argv 4 ]
+
+set myenv [berkdb env -lock -home $dir -create -mode 0644]
+error_check_bad lock_open $myenv NULL
+error_check_good lock_open [is_substr $myenv "env"] 1
+
+puts [eval $tnum $myenv $lockerid $objid $numprocs]
+
+error_check_good envclose [$myenv close] 0
+
+exit
diff --git a/bdb/test/dead001.tcl b/bdb/test/dead001.tcl
new file mode 100644
index 00000000000..9e7c71f6a58
--- /dev/null
+++ b/bdb/test/dead001.tcl
@@ -0,0 +1,76 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: dead001.tcl,v 11.17 2000/11/05 14:23:55 dda Exp $
+#
+# Deadlock Test 1.
+# We create various deadlock scenarios for different numbers of lockers
+# and see if we can get the world cleaned up suitably.
+proc dead001 { { procs "2 4 10" } {tests "ring clump" } } {
+ source ./include.tcl
+
+ puts "Dead001: Deadlock detector tests"
+
+ env_cleanup $testdir
+
+ # Create the environment.
+ puts "\tDead001.a: creating environment"
+ set env [berkdb env -create -mode 0644 -lock -home $testdir]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ error_check_good lock_env:close [$env close] 0
+
+ set dpid [exec $util_path/db_deadlock -vw -h $testdir \
+ >& $testdir/dd.out &]
+
+ foreach t $tests {
+ set pidlist ""
+ foreach n $procs {
+
+ sentinel_init
+
+ # Fire off the tests
+ puts "\tDead001: $n procs of test $t"
+ for { set i 0 } { $i < $n } { incr i } {
+ puts "$tclsh_path $test_path/wrap.tcl \
+ $testdir/dead001.log.$i \
+ ddscript.tcl $testdir $t $i $i $n"
+ set p [exec $tclsh_path \
+ $test_path/wrap.tcl \
+ ddscript.tcl $testdir/dead001.log.$i \
+ $testdir $t $i $i $n &]
+ lappend pidlist $p
+ }
+ watch_procs 5
+
+ # Now check output
+ set dead 0
+ set clean 0
+ set other 0
+ for { set i 0 } { $i < $n } { incr i } {
+ set did [open $testdir/dead001.log.$i]
+ while { [gets $did val] != -1 } {
+ switch $val {
+ DEADLOCK { incr dead }
+ 1 { incr clean }
+ default { incr other }
+ }
+ }
+ close $did
+ }
+ puts "dead check..."
+ dead_check $t $n $dead $clean $other
+ }
+ }
+
+ exec $KILL $dpid
+ # Windows needs files closed before deleting files, so pause a little
+ tclsleep 2
+ fileremove -f $testdir/dd.out
+ # Remove log files
+ for { set i 0 } { $i < $n } { incr i } {
+ fileremove -f $testdir/dead001.log.$i
+ }
+}
diff --git a/bdb/test/dead002.tcl b/bdb/test/dead002.tcl
new file mode 100644
index 00000000000..83cc6c7d59b
--- /dev/null
+++ b/bdb/test/dead002.tcl
@@ -0,0 +1,68 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: dead002.tcl,v 11.15 2000/08/25 14:21:50 sue Exp $
+#
+# Deadlock Test 2.
+# Identical to Test 1 except that instead of running a standalone deadlock
+# detector, we create the region with "detect on every wait"
+proc dead002 { { procs "2 4 10" } {tests "ring clump" } } {
+ source ./include.tcl
+
+ puts "Dead002: Deadlock detector tests"
+
+ env_cleanup $testdir
+
+ # Create the environment.
+ puts "\tDead002.a: creating environment"
+ set env [berkdb env \
+ -create -mode 0644 -home $testdir -lock -lock_detect default]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+ error_check_good lock_env:close [$env close] 0
+
+ foreach t $tests {
+ set pidlist ""
+ foreach n $procs {
+ sentinel_init
+
+ # Fire off the tests
+ puts "\tDead002: $n procs of test $t"
+ for { set i 0 } { $i < $n } { incr i } {
+ puts "$tclsh_path $test_path/wrap.tcl \
+ $testdir/dead002.log.$i \
+ ddscript.tcl $testdir $t $i $i $n"
+ set p [exec $tclsh_path \
+ $test_path/wrap.tcl \
+ ddscript.tcl $testdir/dead002.log.$i \
+ $testdir $t $i $i $n &]
+ lappend pidlist $p
+ }
+ watch_procs 5
+
+ # Now check output
+ set dead 0
+ set clean 0
+ set other 0
+ for { set i 0 } { $i < $n } { incr i } {
+ set did [open $testdir/dead002.log.$i]
+ while { [gets $did val] != -1 } {
+ switch $val {
+ DEADLOCK { incr dead }
+ 1 { incr clean }
+ default { incr other }
+ }
+ }
+ close $did
+ }
+ dead_check $t $n $dead $clean $other
+ }
+ }
+
+ fileremove -f $testdir/dd.out
+ # Remove log files
+ for { set i 0 } { $i < $n } { incr i } {
+ fileremove -f $testdir/dead002.log.$i
+ }
+}
diff --git a/bdb/test/dead003.tcl b/bdb/test/dead003.tcl
new file mode 100644
index 00000000000..4075eb44f86
--- /dev/null
+++ b/bdb/test/dead003.tcl
@@ -0,0 +1,92 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: dead003.tcl,v 1.8 2000/08/25 14:21:50 sue Exp $
+#
+# Deadlock Test 3.
+# Test DB_LOCK_OLDEST and DB_LOCK_YOUNGEST
+# Identical to Test 2 except that we create the region with "detect on
+# every wait" with first the "oldest" and then "youngest".
+proc dead003 { { procs "2 4 10" } {tests "ring clump" } } {
+ source ./include.tcl
+
+ set detects { oldest youngest }
+ puts "Dead003: Deadlock detector tests: $detects"
+
+ # Create the environment.
+ foreach d $detects {
+ env_cleanup $testdir
+ puts "\tDead003.a: creating environment for $d"
+ set env [berkdb env \
+ -create -mode 0644 -home $testdir -lock -lock_detect $d]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+ error_check_good lock_env:close [$env close] 0
+
+ foreach t $tests {
+ set pidlist ""
+ foreach n $procs {
+ sentinel_init
+
+ # Fire off the tests
+ puts "\tDead003: $n procs of test $t"
+ for { set i 0 } { $i < $n } { incr i } {
+ puts "$tclsh_path\
+ test_path/ddscript.tcl $testdir \
+ $t $i $i $n >& \
+ $testdir/dead003.log.$i"
+ set p [exec $tclsh_path \
+ $test_path/wrap.tcl \
+ ddscript.tcl \
+ $testdir/dead003.log.$i $testdir \
+ $t $i $i $n &]
+ lappend pidlist $p
+ }
+ watch_procs 5
+
+ # Now check output
+ set dead 0
+ set clean 0
+ set other 0
+ for { set i 0 } { $i < $n } { incr i } {
+ set did [open $testdir/dead003.log.$i]
+ while { [gets $did val] != -1 } {
+ switch $val {
+ DEADLOCK { incr dead }
+ 1 { incr clean }
+ default { incr other }
+ }
+ }
+ close $did
+ }
+ dead_check $t $n $dead $clean $other
+ #
+ # If we get here we know we have the
+ # correct number of dead/clean procs, as
+ # checked by dead_check above. Now verify
+ # that the right process was the one.
+ puts "\tDead003: Verify $d locks were aborted"
+ set l ""
+ if { $d == "oldest" } {
+ set l [expr $n - 1]
+ }
+ if { $d == "youngest" } {
+ set l 0
+ }
+ set did [open $testdir/dead003.log.$l]
+ while { [gets $did val] != -1 } {
+ error_check_good check_abort \
+ $val 1
+ }
+ close $did
+ }
+ }
+
+ fileremove -f $testdir/dd.out
+ # Remove log files
+ for { set i 0 } { $i < $n } { incr i } {
+ fileremove -f $testdir/dead003.log.$i
+ }
+ }
+}
diff --git a/bdb/test/env001.tcl b/bdb/test/env001.tcl
new file mode 100644
index 00000000000..00837330193
--- /dev/null
+++ b/bdb/test/env001.tcl
@@ -0,0 +1,147 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: env001.tcl,v 11.21 2000/11/09 19:24:08 sue Exp $
+#
+# Test of env remove interface.
+proc env001 { } {
+ global errorInfo
+ global errorCode
+
+ source ./include.tcl
+
+ set testfile $testdir/env.db
+ set t1 $testdir/t1
+
+ puts "Env001: Test of environment remove interface."
+ env_cleanup $testdir
+
+ # Try opening without Create flag should error
+ puts "\tEnv001.a: Open without create (should fail)."
+ catch {set env [berkdb env -home $testdir]} ret
+ error_check_good env:fail [is_substr $ret "no such file"] 1
+
+ # Now try opening with create
+ puts "\tEnv001.b: Open with create."
+ set env [berkdb env -create -mode 0644 -home $testdir]
+ error_check_bad env:$testdir $env NULL
+ error_check_good env:$testdir [is_substr $env "env"] 1
+
+ # Make sure that close works.
+ puts "\tEnv001.c: Verify close."
+ error_check_good env:close:$env [$env close] 0
+
+ # Make sure we can reopen -- this doesn't work on Windows
+ # because if there is only one opener, the region disappears
+ # when it is closed. We can't do a second opener, because
+ # that will fail on HP-UX.
+ puts "\tEnv001.d: Remove on closed environments."
+ if { $is_windows_test != 1 } {
+ puts "\t\tEnv001.d.1: Verify re-open."
+ set env [berkdb env -home $testdir]
+ error_check_bad env:$testdir $env NULL
+ error_check_good env:$testdir [is_substr $env "env"] 1
+
+ # remove environment
+ puts "\t\tEnv001.d.2: Close environment."
+ error_check_good env:close [$env close] 0
+ puts "\t\tEnv001.d.3: Try remove with force (should succeed)."
+ error_check_good \
+ envremove [berkdb envremove -force -home $testdir] 0
+ }
+
+ if { $is_windows_test != 1 && $is_hp_test != 1 } {
+ puts "\tEnv001.e: Remove on open environments."
+ puts "\t\tEnv001.e.1: Env is open by single proc,\
+ remove no force."
+ set env [berkdb env -create -mode 0644 -home $testdir]
+ error_check_bad env:$testdir $env NULL
+ error_check_good env:$testdir [is_substr $env "env"] 1
+ set stat [catch {berkdb envremove -home $testdir} ret]
+ error_check_good env:remove $stat 1
+ error_check_good env:close [$env close] 0
+ }
+
+ puts \
+ "\t\tEnv001.e.2: Env is open by single proc, remove with force."
+ # Now that envremove doesn't do a close, this won't work on Windows.
+ if { $is_windows_test != 1 && $is_hp_test != 1} {
+ set env [berkdb env -create -mode 0644 -home $testdir]
+ error_check_bad env:$testdir $env NULL
+ error_check_good env:$testdir [is_substr $env "env"] 1
+ set stat [catch {berkdb envremove -force -home $testdir} ret]
+ error_check_good env:remove(force) $ret 0
+ #
+ # Even though the underlying env is gone, we need to close
+ # the handle.
+ #
+ catch {$env close}
+ }
+
+ puts "\t\tEnv001.e.3: Env is open by 2 procs, remove no force."
+ # should fail
+ set env [berkdb env -create -mode 0644 -home $testdir]
+ error_check_bad env:$testdir $env NULL
+ error_check_good env:$testdir [is_substr $env "env"] 1
+
+ set f1 [open |$tclsh_path r+]
+ puts $f1 "source $test_path/test.tcl"
+
+ set remote_env [send_cmd $f1 "berkdb env -home $testdir"]
+ error_check_good remote:env_open [is_valid_env $remote_env] TRUE
+ # First close our env, but leave remote open
+ error_check_good env:close [$env close] 0
+ catch {berkdb envremove -home $testdir} ret
+ error_check_good envremove:2procs:noforce [is_substr $errorCode EBUSY] 1
+ #
+ # even though it failed, $env is no longer valid, so remove it in
+ # the remote process
+ set remote_close [send_cmd $f1 "$remote_env close"]
+ error_check_good remote_close $remote_close 0
+
+ # exit remote process
+ set err [catch { close $f1 } result]
+ error_check_good close_remote_process $err 0
+
+ puts "\t\tEnv001.e.4: Env is open by 2 procs, remove with force."
+ # You cannot do this on windows because you can't remove files that
+ # are open, so we skip this test for Windows. On UNIX, it should
+ # succeed
+ if { $is_windows_test != 1 && $is_hp_test != 1 } {
+ set env [berkdb env -create -mode 0644 -home $testdir]
+ error_check_bad env:$testdir $env NULL
+ error_check_good env:$testdir [is_substr $env "env"] 1
+ set f1 [open |$tclsh_path r+]
+ puts $f1 "source $test_path/test.tcl"
+
+ set remote_env [send_cmd $f1 "berkdb env -home $testdir"]
+ error_check_good remote:env_open [is_valid_env $remote_env] TRUE
+
+ catch {berkdb envremove -force -home $testdir} ret
+ error_check_good envremove:2procs:force $ret 0
+ #
+ # We still need to close our handle.
+ #
+ catch {$env close} ret
+
+ # Close down remote process
+ set err [catch { close $f1 } result]
+ error_check_good close_remote_process $err 0
+ }
+
+ # Try opening in a different dir
+ puts "\tEnv001.f: Try opening env in another directory."
+ if { [file exists $testdir/NEWDIR] != 1 } {
+ file mkdir $testdir/NEWDIR
+ }
+ set eflags "-create -home $testdir/NEWDIR -mode 0644"
+ set env [eval {berkdb env} $eflags]
+ error_check_bad env:open $env NULL
+ error_check_good env:close [$env close] 0
+ error_check_good berkdb:envremove \
+ [berkdb envremove -home $testdir/NEWDIR] 0
+
+ puts "\tEnv001 complete."
+}
diff --git a/bdb/test/env002.tcl b/bdb/test/env002.tcl
new file mode 100644
index 00000000000..a37ddea17a9
--- /dev/null
+++ b/bdb/test/env002.tcl
@@ -0,0 +1,156 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: env002.tcl,v 11.11 2000/08/25 14:21:50 sue Exp $
+#
+# Env Test 002
+# Test set_lg_dir and env name resolution
+# With an environment path specified using -home, and then again
+# with it specified by the environment variable DB_HOME:
+# 1) Make sure that the set_lg_dir option is respected
+# a) as a relative pathname.
+# b) as an absolute pathname.
+# 2) Make sure that the DB_LOG_DIR db_config argument is respected,
+# again as relative and absolute pathnames.
+# 3) Make sure that if -both- db_config and a file are present,
+# only the file is respected (see doc/env/naming.html).
+proc env002 { } {
+ # env002 is essentially just a small driver that runs
+ # env002_body--formerly the entire test--twice; once, it
+ # supplies a "home" argument to use with environment opens,
+ # and the second time it sets DB_HOME instead.
+ # Note that env002_body itself calls env002_run_test to run
+ # the body of the actual test and check for the presence
+ # of logs. The nesting, I hope, makes this test's structure simpler.
+
+ global env
+ source ./include.tcl
+
+ puts "Env002: set_lg_dir test."
+
+ puts "\tEnv002: Running with -home argument to berkdb env."
+ env002_body "-home $testdir"
+
+ puts "\tEnv002: Running with environment variable DB_HOME set."
+ set env(DB_HOME) $testdir
+ env002_body "-use_environ"
+
+ unset env(DB_HOME)
+
+ puts "\tEnv002: Running with both DB_HOME and -home set."
+ # Should respect -only- -home, so we give it a bogus
+ # environment variable setting.
+ set env(DB_HOME) $testdir/bogus_home
+ env002_body "-use_environ -home $testdir"
+ unset env(DB_HOME)
+
+}
+
+proc env002_body { home_arg } {
+ source ./include.tcl
+
+ env_cleanup $testdir
+ set logdir "logs_in_here"
+
+ file mkdir $testdir/$logdir
+
+ # Set up full path to $logdir for when we test absolute paths.
+ set curdir [pwd]
+ cd $testdir/$logdir
+ set fulllogdir [pwd]
+ cd $curdir
+
+ env002_make_config $logdir
+
+ # Run the meat of the test.
+ env002_run_test a 1 "relative path, config file" $home_arg \
+ $testdir/$logdir
+
+ env_cleanup $testdir
+
+ file mkdir $fulllogdir
+ env002_make_config $fulllogdir
+
+ # Run the test again
+ env002_run_test a 2 "absolute path, config file" $home_arg \
+ $fulllogdir
+
+ env_cleanup $testdir
+
+ # Now we try without a config file, but instead with db_config
+ # relative paths
+ file mkdir $testdir/$logdir
+ env002_run_test b 1 "relative path, db_config" "$home_arg \
+ -log_dir $logdir -data_dir ." \
+ $testdir/$logdir
+
+ env_cleanup $testdir
+
+ # absolute
+ file mkdir $fulllogdir
+ env002_run_test b 2 "absolute path, db_config" "$home_arg \
+ -log_dir $fulllogdir -data_dir ." \
+ $fulllogdir
+
+ env_cleanup $testdir
+
+ # Now, set db_config -and- have a # DB_CONFIG file, and make
+ # sure only the latter is honored.
+
+ file mkdir $testdir/$logdir
+ env002_make_config $logdir
+
+ # note that we supply a -nonexistent- log dir to db_config
+ env002_run_test c 1 "relative path, both db_config and file" \
+ "$home_arg -log_dir $testdir/bogus \
+ -data_dir ." $testdir/$logdir
+ env_cleanup $testdir
+
+ file mkdir $fulllogdir
+ env002_make_config $fulllogdir
+
+ # note that we supply a -nonexistent- log dir to db_config
+ env002_run_test c 2 "relative path, both db_config and file" \
+ "$home_arg -log_dir $fulllogdir/bogus \
+ -data_dir ." $fulllogdir
+}
+
+proc env002_run_test { major minor msg env_args log_path} {
+ global testdir
+ set testfile "env002.db"
+
+ puts "\t\tEnv002.$major.$minor: $msg"
+
+ # Create an environment, with logging, and scribble some
+ # stuff in a [btree] database in it.
+ # puts [concat {berkdb env -create -log -private} $env_args]
+ set dbenv [eval {berkdb env -create -log -private} $env_args]
+ error_check_good env_open [is_valid_env $dbenv] TRUE
+ set db [berkdb_open -env $dbenv -create -btree -mode 0644 $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set key "some_key"
+ set data "some_data"
+
+ error_check_good db_put \
+ [$db put $key [chop_data btree $data]] 0
+
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$dbenv close] 0
+
+ # Now make sure the log file is where we want it to be.
+ error_check_good db_exists [file exists $testdir/$testfile] 1
+ error_check_good log_exists \
+ [file exists $log_path/log.0000000001] 1
+}
+
+proc env002_make_config { logdir } {
+ global testdir
+
+ set cid [open $testdir/DB_CONFIG w]
+ puts $cid "set_data_dir ."
+ puts $cid "set_lg_dir $logdir"
+ close $cid
+}
diff --git a/bdb/test/env003.tcl b/bdb/test/env003.tcl
new file mode 100644
index 00000000000..01e0b6188fc
--- /dev/null
+++ b/bdb/test/env003.tcl
@@ -0,0 +1,177 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: env003.tcl,v 11.12 2000/08/25 14:21:50 sue Exp $
+#
+# Env Test 003
+# Test DB_TMP_DIR and env name resolution
+# With an environment path specified using -home, and then again
+# with it specified by the environment variable DB_HOME:
+# 1) Make sure that the DB_TMP_DIR config file option is respected
+# a) as a relative pathname.
+# b) as an absolute pathname.
+# 2) Make sure that the DB_TMP_DIR db_config argument is respected,
+# again as relative and absolute pathnames.
+# 3) Make sure that if -both- db_config and a file are present,
+# only the file is respected (see doc/env/naming.html).
+proc env003 { } {
+ # env003 is essentially just a small driver that runs
+ # env003_body twice. First, it supplies a "home" argument
+ # to use with environment opens, and the second time it sets
+ # DB_HOME instead.
+ # Note that env003_body itself calls env003_run_test to run
+ # the body of the actual test.
+
+ global env
+ source ./include.tcl
+
+ puts "Env003: DB_TMP_DIR test."
+
+ puts "\tEnv003: Running with -home argument to berkdb env."
+ env003_body "-home $testdir"
+
+ puts "\tEnv003: Running with environment variable DB_HOME set."
+ set env(DB_HOME) $testdir
+ env003_body "-use_environ"
+
+ unset env(DB_HOME)
+
+ puts "\tEnv003: Running with both DB_HOME and -home set."
+ # Should respect -only- -home, so we give it a bogus
+ # environment variable setting.
+ set env(DB_HOME) $testdir/bogus_home
+ env003_body "-use_environ -home $testdir"
+ unset env(DB_HOME)
+
+}
+
+proc env003_body { home_arg } {
+ source ./include.tcl
+
+ env_cleanup $testdir
+ set tmpdir "tmpfiles_in_here"
+
+ file mkdir $testdir/$tmpdir
+
+ # Set up full path to $tmpdir for when we test absolute paths.
+ set curdir [pwd]
+ cd $testdir/$tmpdir
+ set fulltmpdir [pwd]
+ cd $curdir
+
+ # Run test with the temp dir. nonexistent--it checks for failure.
+ env_cleanup $testdir
+
+ env003_make_config $tmpdir
+
+ # Run the meat of the test.
+ env003_run_test a 1 "relative path, config file" $home_arg \
+ $testdir/$tmpdir
+
+ env_cleanup $testdir
+
+ env003_make_config $fulltmpdir
+
+ # Run the test again
+ env003_run_test a 2 "absolute path, config file" $home_arg \
+ $fulltmpdir
+
+ env_cleanup $testdir
+
+ # Now we try without a config file, but instead with db_config
+ # relative paths
+ env003_run_test b 1 "relative path, db_config" "$home_arg \
+ -tmp_dir $tmpdir -data_dir ." \
+ $testdir/$tmpdir
+
+ env_cleanup $testdir
+
+ # absolute
+ env003_run_test b 2 "absolute path, db_config" "$home_arg \
+ -tmp_dir $fulltmpdir -data_dir ." \
+ $fulltmpdir
+
+ env_cleanup $testdir
+
+ # Now, set db_config -and- have a # DB_CONFIG file, and make
+ # sure only the latter is honored.
+
+ # Make a temp directory that actually does exist to supply
+ # as a bogus argument--the test checks for -nonexistent- temp
+ # dirs., as success is harder to detect.
+ file mkdir $testdir/bogus
+ env003_make_config $tmpdir
+
+ # note that we supply an -existent- tmp dir to db_config as
+ # a red herring
+ env003_run_test c 1 "relative path, both db_config and file" \
+ "$home_arg -tmp_dir $testdir/bogus -data_dir ." \
+ $testdir/$tmpdir
+ env_cleanup $testdir
+
+ file mkdir $fulltmpdir
+ file mkdir $fulltmpdir/bogus
+ env003_make_config $fulltmpdir/nonexistent
+
+ # note that we supply an -existent- tmp dir to db_config as
+ # a red herring
+ env003_run_test c 2 "relative path, both db_config and file" \
+ "$home_arg -tmp_dir $fulltmpdir/bogus -data_dir ." \
+ $fulltmpdir
+}
+
+proc env003_run_test { major minor msg env_args tmp_path} {
+ global testdir
+ global alphabet
+ global errorCode
+
+ puts "\t\tEnv003.$major.$minor: $msg"
+
+ # Create an environment and small-cached in-memory database to
+ # use.
+ set dbenv [eval {berkdb env -create -home $testdir} $env_args \
+ {-cachesize {0 40960 1}}]
+ error_check_good env_open [is_valid_env $dbenv] TRUE
+ set db [berkdb_open_noerr -env $dbenv -create -btree]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ # Fill the database with more than its cache can fit.
+ # !!!
+ # This is actually trickier than it sounds. The tempfile
+ # gets unlinked as soon as it's created, so there's no straightforward
+ # way to check for its existence. Instead, we make sure
+ # DB_TMP_DIR points somewhere bogus, and make sure that the temp
+ # dir. does -not- exist. But to do this, we have to know
+ # which call to DB->put is going to fail--the temp file is
+ # created lazily, so the failure only occurs when the cache finally
+ # overflows.
+ # The data we've conjured up will fit nicely once, but the second
+ # call will overflow the cache. Thus we check for success once,
+ # then failure.
+ #
+ set key1 "key1"
+ set key2 "key2"
+ set data [repeat $alphabet 1000]
+
+ # First put should succeed.
+ error_check_good db_put_1 [$db put $key1 $data] 0
+
+ # Second one should return ENOENT.
+ set errorCode NONE
+ catch {$db put $key2 $data} res
+ error_check_good db_put_2 [is_substr $errorCode ENOENT] 1
+
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$dbenv close] 0
+}
+
+proc env003_make_config { tmpdir } {
+ global testdir
+
+ set cid [open $testdir/DB_CONFIG w]
+ puts $cid "set_data_dir ."
+ puts $cid "set_tmp_dir $tmpdir"
+ close $cid
+}
diff --git a/bdb/test/env004.tcl b/bdb/test/env004.tcl
new file mode 100644
index 00000000000..82cc8dd25c7
--- /dev/null
+++ b/bdb/test/env004.tcl
@@ -0,0 +1,103 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: env004.tcl,v 11.14 2000/08/25 14:21:50 sue Exp $
+#
+# Env Test 4
+# Test multiple data directories. Do a bunch of different opens
+# to make sure that the files are detected in different directories.
+proc env004 { } {
+ source ./include.tcl
+
+ set method "hash"
+ set omethod [convert_method $method]
+ set args [convert_args $method ""]
+
+ puts "Env004: Multiple data directory test."
+
+ env_cleanup $testdir
+ file mkdir $testdir/data1
+ file mkdir $testdir/data2
+ file mkdir $testdir/data3
+
+ puts "\tEnv004.a: Multiple data directories in DB_CONFIG file"
+
+ # Create a config file
+ set cid [open $testdir/DB_CONFIG w]
+ puts $cid "set_data_dir ."
+ puts $cid "set_data_dir data1"
+ puts $cid "set_data_dir data2"
+ puts $cid "set_data_dir data3"
+ close $cid
+
+ # Now get pathnames
+ set curdir [pwd]
+ cd $testdir
+ set fulldir [pwd]
+ cd $curdir
+
+ set e [berkdb env -create -private -home $testdir]
+ error_check_good dbenv [is_valid_env $e] TRUE
+ ddir_test $fulldir $method $e $args
+ error_check_good env_close [$e close] 0
+
+ puts "\tEnv004.b: Multiple data directories in berkdb env call."
+ env_cleanup $testdir
+ file mkdir $testdir/data1
+ file mkdir $testdir/data2
+ file mkdir $testdir/data3
+
+ # Now call dbenv with config specified
+ set e [berkdb env -create -private \
+ -data_dir . -data_dir data1 -data_dir data2 \
+ -data_dir data3 -home $testdir]
+ error_check_good dbenv [is_valid_env $e] TRUE
+ ddir_test $fulldir $method $e $args
+ error_check_good env_close [$e close] 0
+
+ env_cleanup $testdir
+}
+
+proc ddir_test { fulldir method e args } {
+ source ./include.tcl
+
+ set args [convert_args $args]
+ set omethod [convert_method $method]
+
+ # Now create one file in each directory
+ set db1 [eval {berkdb_open -create \
+ -truncate -mode 0644 $omethod -env $e} $args {data1/datafile1.db}]
+ error_check_good dbopen1 [is_valid_db $db1] TRUE
+
+ set db2 [eval {berkdb_open -create \
+ -truncate -mode 0644 $omethod -env $e} $args {data2/datafile2.db}]
+ error_check_good dbopen2 [is_valid_db $db2] TRUE
+
+ set db3 [eval {berkdb_open -create \
+ -truncate -mode 0644 $omethod -env $e} $args {data3/datafile3.db}]
+ error_check_good dbopen3 [is_valid_db $db3] TRUE
+
+ # Close the files
+ error_check_good db_close1 [$db1 close] 0
+ error_check_good db_close2 [$db2 close] 0
+ error_check_good db_close3 [$db3 close] 0
+
+ # Now, reopen the files without complete pathnames and make
+ # sure that we find them.
+
+ set db1 [berkdb_open -env $e $fulldir/data1/datafile1.db]
+ error_check_good dbopen1 [is_valid_db $db1] TRUE
+
+ set db2 [berkdb_open -env $e $fulldir/data2/datafile2.db]
+ error_check_good dbopen2 [is_valid_db $db2] TRUE
+
+ set db3 [berkdb_open -env $e $fulldir/data3/datafile3.db]
+ error_check_good dbopen3 [is_valid_db $db3] TRUE
+
+ # Finally close all the files
+ error_check_good db_close1 [$db1 close] 0
+ error_check_good db_close2 [$db2 close] 0
+ error_check_good db_close3 [$db3 close] 0
+}
diff --git a/bdb/test/env005.tcl b/bdb/test/env005.tcl
new file mode 100644
index 00000000000..4ad9419936f
--- /dev/null
+++ b/bdb/test/env005.tcl
@@ -0,0 +1,53 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: env005.tcl,v 11.8 2000/08/25 14:21:50 sue Exp $
+#
+# Env Test 5
+# Test that using subsystems without initializing them correctly
+# returns an error. Cannot test mpool, because it is assumed
+# in the Tcl code.
+proc env005 { } {
+ source ./include.tcl
+
+ puts "Env005: Uninitialized env subsystems test."
+
+ env_cleanup $testdir
+ puts "\tEnv005.a: Creating env with no subsystems."
+
+ set e [berkdb env -create -home $testdir]
+ error_check_good dbenv [is_valid_env $e] TRUE
+ set db [berkdb_open -create -btree $testdir/env005.db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set rlist {
+ { "lock_detect" "Env005.b0"}
+ { "lock_get read 1 1" "Env005.b1"}
+ { "lock_id" "Env005.b2"}
+ { "lock_stat" "Env005.b3"}
+ { "log_archive" "Env005.c0"}
+ { "log_file {1 1}" "Env005.c1"}
+ { "log_flush" "Env005.c2"}
+ { "log_get -first" "Env005.c3"}
+ { "log_put record" "Env005.c4"}
+ { "log_register $db xxx" "Env005.c5"}
+ { "log_stat" "Env005.c6"}
+ { "log_unregister $db" "Env005.c7"}
+ { "txn" "Env005.d0"}
+ { "txn_checkpoint" "Env005.d1"}
+ { "txn_stat" "Env005.d2"}
+ }
+
+ foreach pair $rlist {
+ set cmd [lindex $pair 0]
+ set msg [lindex $pair 1]
+ puts "\t$msg: $cmd"
+ set stat [catch {eval $e $cmd} ret]
+ error_check_good $cmd $stat 1
+ error_check_good $cmd.err [is_substr $ret invalid] 1
+ }
+ error_check_good dbclose [$db close] 0
+ error_check_good envclose [$e close] 0
+}
diff --git a/bdb/test/env006.tcl b/bdb/test/env006.tcl
new file mode 100644
index 00000000000..1a39886cafa
--- /dev/null
+++ b/bdb/test/env006.tcl
@@ -0,0 +1,42 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: env006.tcl,v 11.5 2000/10/27 13:23:55 sue Exp $
+#
+# Env Test 6
+# DB Utility Check
+# Make sure that all the utilities exist and run.
+#
+proc env006 { } {
+ source ./include.tcl
+
+ puts "Env006: Run underlying utilities."
+
+ set rlist {
+ { "db_archive" "Env006.a"}
+ { "db_checkpoint" "Env006.b"}
+ { "db_deadlock" "Env006.c"}
+ { "db_dump" "Env006.d"}
+ { "db_load" "Env006.e"}
+ { "db_printlog" "Env006.f"}
+ { "db_recover" "Env006.g"}
+ { "db_stat" "Env006.h"}
+ }
+ foreach pair $rlist {
+ set cmd [lindex $pair 0]
+ set msg [lindex $pair 1]
+
+ puts "\t$msg: $cmd"
+
+ set stat [catch {exec $util_path/$cmd -?} ret]
+ error_check_good $cmd $stat 1
+
+ #
+ # Check for "usage", but only check "sage" so that
+ # we can handle either Usage or usage.
+ #
+ error_check_good $cmd.err [is_substr $ret sage] 1
+ }
+}
diff --git a/bdb/test/env007.tcl b/bdb/test/env007.tcl
new file mode 100644
index 00000000000..b8ddea75c91
--- /dev/null
+++ b/bdb/test/env007.tcl
@@ -0,0 +1,100 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: env007.tcl,v 11.5 2000/08/25 14:21:50 sue Exp $
+#
+# Env Test 007
+# Test various config file options.
+# 1) Make sure command line option is respected
+# 2) Make sure that config file option is respected
+# 3) Make sure that if -both- DB_CONFIG and the set_<whatever>
+# method is used, only the file is respected.
+proc env007 { } {
+ # env007 is essentially just a small driver that runs
+ # env007_body twice. First, it supplies a "set" argument
+ # to use with environment opens, and the second time it sets
+ # DB_CONFIG instead.
+ # Note that env007_body itself calls env007_run_test to run
+ # the body of the actual test.
+
+ source ./include.tcl
+
+ puts "Env007: DB_CONFIG test."
+
+ #
+ # Test only those options we can easily check via stat
+ #
+ set rlist {
+ { " -txn_max " "set_tx_max" "19" "31" "Env007.a: Txn Max"
+ "txn_stat" "Max Txns"}
+ { " -lock_max " "set_lk_max" "19" "31" "Env007.b: Lock Max"
+ "lock_stat" "Max locks"}
+ { " -log_buffer " "set_lg_bsize" "65536" "131072" "Env007.c: Log Bsize"
+ "log_stat" "Log record cache size"}
+ { " -log_max " "set_lg_max" "8388608" "9437184" "Env007.d: Log Max"
+ "log_stat" "Maximum log file size"}
+ }
+
+ set e "berkdb env -create -mode 0644 -home $testdir -log -lock -txn "
+ foreach item $rlist {
+ set envarg [lindex $item 0]
+ set configarg [lindex $item 1]
+ set envval [lindex $item 2]
+ set configval [lindex $item 3]
+ set msg [lindex $item 4]
+ set statcmd [lindex $item 5]
+ set statstr [lindex $item 6]
+
+ env_cleanup $testdir
+ # First verify using just env args
+ puts "\t$msg Environment argument only"
+ set env [eval $e $envarg $envval]
+ error_check_good envopen:0 [is_valid_env $env] TRUE
+ env007_check $env $statcmd $statstr $envval
+ error_check_good envclose:0 [$env close] 0
+
+ env_cleanup $testdir
+ env007_make_config $configarg $configval
+
+ # verify using just config file
+ puts "\t$msg Config file only"
+ set env [eval $e]
+ error_check_good envopen:1 [is_valid_env $env] TRUE
+ env007_check $env $statcmd $statstr $configval
+ error_check_good envclose:1 [$env close] 0
+
+ # First verify using just env args
+ puts "\t$msg Environment arg and config file"
+ set env [eval $e $envarg $envval]
+ error_check_good envopen:2 [is_valid_env $env] TRUE
+ env007_check $env $statcmd $statstr $configval
+ error_check_good envclose:2 [$env close] 0
+ }
+}
+
+proc env007_check { env statcmd statstr testval } {
+ set stat [$env $statcmd]
+ set checked 0
+ foreach statpair $stat {
+ if {$checked == 1} {
+ break
+ }
+ set statmsg [lindex $statpair 0]
+ set statval [lindex $statpair 1]
+ if {[is_substr $statmsg $statstr] != 0} {
+ set checked 1
+ error_check_good $statstr:ck $statval $testval
+ }
+ }
+ error_check_good $statstr:test $checked 1
+}
+
+proc env007_make_config { carg cval } {
+ global testdir
+
+ set cid [open $testdir/DB_CONFIG w]
+ puts $cid "$carg $cval"
+ close $cid
+}
diff --git a/bdb/test/env008.tcl b/bdb/test/env008.tcl
new file mode 100644
index 00000000000..645f07f63d6
--- /dev/null
+++ b/bdb/test/env008.tcl
@@ -0,0 +1,73 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: env008.tcl,v 11.2 2000/10/30 19:00:38 sue Exp $
+#
+# Test of env and subdirs.
+proc env008 { } {
+ global errorInfo
+ global errorCode
+
+ source ./include.tcl
+
+ env_cleanup $testdir
+
+ set subdir 1/1
+ set subdir1 1/2
+ file mkdir $testdir/$subdir $testdir/$subdir1
+ set testfile $subdir/env.db
+
+ puts "Env008: Test of environments and subdirectories."
+
+ # Try opening without Create flag should error
+ puts "\tEnv008.a: Create env and db."
+ set env [berkdb env -create -mode 0644 -home $testdir -txn]
+ error_check_good env [is_valid_env $env] TRUE
+
+ puts "\tEnv008.b: Remove db in subdir."
+ env008_db $env $testfile
+ error_check_good dbremove:$testfile \
+ [berkdb dbremove -env $env $testfile] 0
+
+ #
+ # Rather than remaking the db every time for the renames
+ # just move around the new file name to another new file
+ # name.
+ #
+ puts "\tEnv008.c: Rename db in subdir."
+ env008_db $env $testfile
+ set newfile $subdir/new.db
+ error_check_good dbrename:$testfile/.. \
+ [berkdb dbrename -env $env $testfile $newfile] 0
+ set testfile $newfile
+
+ puts "\tEnv008.d: Rename db to parent dir."
+ set newfile $subdir/../new.db
+ error_check_good dbrename:$testfile/.. \
+ [berkdb dbrename -env $env $testfile $newfile] 0
+ set testfile $newfile
+
+ puts "\tEnv008.e: Rename db to child dir."
+ set newfile $subdir/env.db
+ error_check_good dbrename:$testfile/.. \
+ [berkdb dbrename -env $env $testfile $newfile] 0
+ set testfile $newfile
+
+ puts "\tEnv008.f: Rename db to another dir."
+ set newfile $subdir1/env.db
+ error_check_good dbrename:$testfile/.. \
+ [berkdb dbrename -env $env $testfile $newfile] 0
+
+ error_check_good envclose [$env close] 0
+ puts "\tEnv008 complete."
+}
+
+proc env008_db { env testfile } {
+ set db [berkdb_open -env $env -create -btree $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [$db put key data]
+ error_check_good dbput $ret 0
+ error_check_good dbclose [$db close] 0
+}
diff --git a/bdb/test/hsearch.tcl b/bdb/test/hsearch.tcl
new file mode 100644
index 00000000000..0afee7fb2de
--- /dev/null
+++ b/bdb/test/hsearch.tcl
@@ -0,0 +1,51 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: hsearch.tcl,v 11.7 2000/08/25 14:21:50 sue Exp $
+#
+# Historic Hsearch interface test.
+# Use the first 1000 entries from the dictionary.
+# Insert each with self as key and data; retrieve each.
+# After all are entered, retrieve all; compare output to original.
+# Then reopen the file, re-retrieve everything.
+# Finally, delete everything.
+proc hsearch { { nentries 1000 } } {
+ source ./include.tcl
+
+ puts "HSEARCH interfaces test: $nentries"
+
+ # Create the database and open the dictionary
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir NULL
+
+ error_check_good hcreate [berkdb hcreate $nentries] 0
+ set did [open $dict]
+ set count 0
+
+ puts "\tHSEARCH.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set ret [berkdb hsearch $str $str enter]
+ error_check_good hsearch:enter $ret 0
+
+ set d [berkdb hsearch $str 0 find]
+ error_check_good hsearch:find $d $str
+ incr count
+ }
+ close $did
+
+ puts "\tHSEARCH.b: re-get loop"
+ set did [open $dict]
+ # Here is the loop where we retrieve each key
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set d [berkdb hsearch $str 0 find]
+ error_check_good hsearch:find $d $str
+ incr count
+ }
+ close $did
+ error_check_good hdestroy [berkdb hdestroy] 0
+}
diff --git a/bdb/test/include.tcl b/bdb/test/include.tcl
new file mode 100644
index 00000000000..e5084d6507c
--- /dev/null
+++ b/bdb/test/include.tcl
@@ -0,0 +1,19 @@
+set tclsh_path @TCL_TCLSH@
+set tcllib .libs/libdb_tcl-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@SOSUFFIX@
+set rpc_server localhost
+set rpc_path .
+set test_path @srcdir@/../test
+
+set KILL "@db_cv_path_kill@"
+
+# DO NOT EDIT BELOW THIS LINE: automatically built by dist/s_tcl.
+
+global dict
+global testdir
+global util_path
+set testdir ./TESTDIR
+set rpc_testdir $rpc_path/TESTDIR
+
+global is_hp_test
+global is_qnx_test
+global is_windows_test
diff --git a/bdb/test/join.tcl b/bdb/test/join.tcl
new file mode 100644
index 00000000000..ebf33b8cdf3
--- /dev/null
+++ b/bdb/test/join.tcl
@@ -0,0 +1,451 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: join.tcl,v 11.17 2000/08/25 14:21:51 sue Exp $
+#
+# We'll test 2-way, 3-way, and 4-way joins and figure that if those work,
+# everything else does as well. We'll create test databases called
+# join1.db, join2.db, join3.db, and join4.db. The number on the database
+# describes the duplication -- duplicates are of the form 0, N, 2N, 3N, ...
+# where N is the number of the database. Primary.db is the primary database,
+# and null.db is the database that has no matching duplicates.
+#
+# We should test this on all btrees, all hash, and a combination thereof
+# Join test.
+proc jointest { {psize 8192} {with_dup_dups 0} {flags 0} } {
+ global testdir
+ global rand_init
+ source ./include.tcl
+
+ env_cleanup $testdir
+ berkdb srand $rand_init
+
+ # Use one environment for all database opens so we don't
+ # need oodles of regions.
+ set env [berkdb env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # With the new offpage duplicate code, we don't support
+ # duplicate duplicates in sorted dup sets. Thus, if with_dup_dups
+ # is greater than one, run only with "-dup".
+ if { $with_dup_dups > 1 } {
+ set doptarray {"-dup"}
+ } else {
+ set doptarray {"-dup -dupsort" "-dup" RANDOMMIX RANDOMMIX }
+ }
+
+ # NB: these flags are internal only, ok
+ foreach m "DB_BTREE DB_HASH DB_BOTH" {
+ # run with two different random mixes.
+ foreach dopt $doptarray {
+ set opt [list "-env" $env $dopt]
+
+ puts "Join test: ($m $dopt) psize $psize,\
+ $with_dup_dups dup\
+ dups, flags $flags."
+
+ build_all $m $psize $opt oa $with_dup_dups
+
+ # null.db is db_built fifth but is referenced by
+ # zero; set up the option array appropriately.
+ set oa(0) $oa(5)
+
+ # Build the primary
+ puts "\tBuilding the primary database $m"
+ set oflags "-create -truncate -mode 0644 -env $env\
+ [conv $m [berkdb random_int 1 2]]"
+ set db [eval {berkdb_open} $oflags primary.db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ for { set i 0 } { $i < 1000 } { incr i } {
+ set key [format "%04d" $i]
+ set ret [$db put $key stub]
+ error_check_good "primary put" $ret 0
+ }
+ error_check_good "primary close" [$db close] 0
+ set did [open $dict]
+ gets $did str
+ do_join primary.db "1 0" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "2 0" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "3 0" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "4 0" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "1" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "2" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "3" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "4" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "1 2" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "1 2 3" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "1 2 3 4" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "2 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "3 2 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "4 3 2 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "1 3" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "3 1" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "1 4" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "4 1" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "2 3" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "3 2" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "2 4" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "4 2" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "3 4" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "4 3" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "2 3 4" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "3 4 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "4 2 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "0 2 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "3 2 0" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "4 3 2 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "4 3 0 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "3 3 3" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "2 2 3 3" $str oa $flags\
+ $with_dup_dups
+ gets $did str2
+ gets $did str
+ do_join primary.db "1 2" $str oa $flags\
+ $with_dup_dups "3" $str2
+
+ # You really don't want to run this section
+ # with $with_dup_dups > 2.
+ if { $with_dup_dups <= 2 } {
+ gets $did str2
+ gets $did str
+ do_join primary.db "1 2 3" $str\
+ oa $flags $with_dup_dups "3 3 1" $str2
+ gets $did str2
+ gets $did str
+ do_join primary.db "4 0 2" $str\
+ oa $flags $with_dup_dups "4 3 3" $str2
+ gets $did str2
+ gets $did str
+ do_join primary.db "3 2 1" $str\
+ oa $flags $with_dup_dups "0 2" $str2
+ gets $did str2
+ gets $did str
+ do_join primary.db "2 2 3 3" $str\
+ oa $flags $with_dup_dups "1 4 4" $str2
+ gets $did str2
+ gets $did str
+ do_join primary.db "2 2 3 3" $str\
+ oa $flags $with_dup_dups "0 0 4 4" $str2
+ gets $did str2
+ gets $did str
+ do_join primary.db "2 2 3 3" $str2\
+ oa $flags $with_dup_dups "2 4 4" $str
+ gets $did str2
+ gets $did str
+ do_join primary.db "2 2 3 3" $str2\
+ oa $flags $with_dup_dups "0 0 4 4" $str
+ }
+ close $did
+ }
+ }
+
+ error_check_good env_close [$env close] 0
+}
+
+proc build_all { method psize opt oaname with_dup_dups {nentries 100} } {
+ global testdir
+ db_build join1.db $nentries 50 1 [conv $method 1]\
+ $psize $opt $oaname $with_dup_dups
+ db_build join2.db $nentries 25 2 [conv $method 2]\
+ $psize $opt $oaname $with_dup_dups
+ db_build join3.db $nentries 16 3 [conv $method 3]\
+ $psize $opt $oaname $with_dup_dups
+ db_build join4.db $nentries 12 4 [conv $method 4]\
+ $psize $opt $oaname $with_dup_dups
+ db_build null.db $nentries 0 5 [conv $method 5]\
+ $psize $opt $oaname $with_dup_dups
+}
+
+proc conv { m i } {
+ switch -- $m {
+ DB_HASH { return "-hash"}
+ "-hash" { return "-hash"}
+ DB_BTREE { return "-btree"}
+ "-btree" { return "-btree"}
+ DB_BOTH {
+ if { [expr $i % 2] == 0 } {
+ return "-hash";
+ } else {
+ return "-btree";
+ }
+ }
+ }
+}
+
+proc random_opts { } {
+ set j [berkdb random_int 0 1]
+ if { $j == 0 } {
+ return " -dup"
+ } else {
+ return " -dup -dupsort"
+ }
+}
+
+proc db_build { name nkeys ndups dup_interval method psize lopt oaname \
+ with_dup_dups } {
+ source ./include.tcl
+
+ # Get array of arg names (from two levels up the call stack)
+ upvar 2 $oaname oa
+
+ # Search for "RANDOMMIX" in $opt, and if present, replace
+ # with " -dup" or " -dup -dupsort" at random.
+ set i [lsearch $lopt RANDOMMIX]
+ if { $i != -1 } {
+ set lopt [lreplace $lopt $i $i [random_opts]]
+ }
+
+ # Save off db_open arguments for this database.
+ set opt [eval concat $lopt]
+ set oa($dup_interval) $opt
+
+ # Create the database and open the dictionary
+ set oflags "-create -truncate -mode 0644 $method\
+ -pagesize $psize"
+ set db [eval {berkdb_open} $oflags $opt $name]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+ set count 0
+ puts -nonewline "\tBuilding $name: $nkeys keys "
+ puts -nonewline "with $ndups duplicates at interval of $dup_interval"
+ if { $with_dup_dups > 0 } {
+ puts ""
+ puts "\t\tand $with_dup_dups duplicate duplicates."
+ } else {
+ puts "."
+ }
+ for { set count 0 } { [gets $did str] != -1 && $count < $nkeys } {
+ incr count} {
+ set str $str$name
+ # We need to make sure that the dups are inserted in a
+ # random, or near random, order. Do this by generating
+ # them and putting each in a list, then sorting the list
+ # at random.
+ set duplist {}
+ for { set i 0 } { $i < $ndups } { incr i } {
+ set data [format "%04d" [expr $i * $dup_interval]]
+ lappend duplist $data
+ }
+ # randomize the list
+ for { set i 0 } { $i < $ndups } {incr i } {
+ # set j [berkdb random_int $i [expr $ndups - 1]]
+ set j [expr ($i % 2) + $i]
+ if { $j >= $ndups } { set j $i }
+ set dupi [lindex $duplist $i]
+ set dupj [lindex $duplist $j]
+ set duplist [lreplace $duplist $i $i $dupj]
+ set duplist [lreplace $duplist $j $j $dupi]
+ }
+ foreach data $duplist {
+ if { $with_dup_dups != 0 } {
+ for { set j 0 }\
+ { $j < $with_dup_dups }\
+ {incr j} {
+ set ret [$db put $str $data]
+ error_check_good put$j $ret 0
+ }
+ } else {
+ set ret [$db put $str $data]
+ error_check_good put $ret 0
+ }
+ }
+
+ if { $ndups == 0 } {
+ set ret [$db put $str NODUP]
+ error_check_good put $ret 0
+ }
+ }
+ close $did
+ error_check_good close:$name [$db close] 0
+}
+
+proc do_join { primary dbs key oanm flags with_dup_dups {dbs2 ""} {key2 ""} } {
+ global testdir
+ source ./include.tcl
+
+ upvar $oanm oa
+
+ puts -nonewline "\tJoining: $dbs on $key"
+ if { $dbs2 == "" } {
+ puts ""
+ } else {
+ puts " with $dbs2 on $key2"
+ }
+
+ # Open all the databases
+ set p [berkdb_open -unknown $testdir/$primary]
+ error_check_good "primary open" [is_valid_db $p] TRUE
+
+ set dblist ""
+ set curslist ""
+
+ set ndx [llength $dbs]
+
+ foreach i [concat $dbs $dbs2] {
+ set opt $oa($i)
+ set db [eval {berkdb_open -unknown} $opt [n_to_name $i]]
+ error_check_good "[n_to_name $i] open" [is_valid_db $db] TRUE
+ set curs [$db cursor]
+ error_check_good "$db cursor" \
+ [is_substr $curs "$db.c"] 1
+ lappend dblist $db
+ lappend curslist $curs
+
+ if { $ndx > 0 } {
+ set realkey [concat $key[n_to_name $i]]
+ } else {
+ set realkey [concat $key2[n_to_name $i]]
+ }
+
+ set pair [$curs get -set $realkey]
+ error_check_good cursor_set:$realkey:$pair \
+ [llength [lindex $pair 0]] 2
+
+ incr ndx -1
+ }
+
+ set join_curs [eval {$p join} $curslist]
+ error_check_good join_cursor \
+ [is_substr $join_curs "$p.c"] 1
+
+ # Calculate how many dups we expect.
+ # We go through the list of indices. If we find a 0, then we
+ # expect 0 dups. For everything else, we look at pairs of numbers,
+ # if the are relatively prime, multiply them and figure out how
+ # many times that goes into 50. If they aren't relatively prime,
+ # take the number of times the larger goes into 50.
+ set expected 50
+ set last 1
+ foreach n [concat $dbs $dbs2] {
+ if { $n == 0 } {
+ set expected 0
+ break
+ }
+ if { $last == $n } {
+ continue
+ }
+
+ if { [expr $last % $n] == 0 || [expr $n % $last] == 0 } {
+ if { $n > $last } {
+ set last $n
+ set expected [expr 50 / $last]
+ }
+ } else {
+ set last [expr $n * $last / [gcd $n $last]]
+ set expected [expr 50 / $last]
+ }
+ }
+
+ # If $with_dup_dups is greater than zero, each datum has
+ # been inserted $with_dup_dups times. So we expect the number
+ # of dups to go up by a factor of ($with_dup_dups)^(number of databases)
+
+ if { $with_dup_dups > 0 } {
+ foreach n [concat $dbs $dbs2] {
+ set expected [expr $expected * $with_dup_dups]
+ }
+ }
+
+ set ndups 0
+ if { $flags == " -join_item"} {
+ set l 1
+ } else {
+ set flags ""
+ set l 2
+ }
+ for { set pair [eval {$join_curs get} $flags] } { \
+ [llength [lindex $pair 0]] == $l } {
+ set pair [eval {$join_curs get} $flags] } {
+ set k [lindex [lindex $pair 0] 0]
+ foreach i $dbs {
+ error_check_bad valid_dup:$i:$dbs $i 0
+ set kval [string trimleft $k 0]
+ if { [string length $kval] == 0 } {
+ set kval 0
+ }
+ error_check_good valid_dup:$i:$dbs [expr $kval % $i] 0
+ }
+ incr ndups
+ }
+ error_check_good number_of_dups:$dbs $ndups $expected
+
+ error_check_good close_primary [$p close] 0
+ foreach i $curslist {
+ error_check_good close_cursor:$i [$i close] 0
+ }
+ foreach i $dblist {
+ error_check_good close_index:$i [$i close] 0
+ }
+}
+
+proc n_to_name { n } {
+global testdir
+ if { $n == 0 } {
+ return null.db;
+ } else {
+ return join$n.db;
+ }
+}
+
+proc gcd { a b } {
+ set g 1
+
+ for { set i 2 } { $i <= $a } { incr i } {
+ if { [expr $a % $i] == 0 && [expr $b % $i] == 0 } {
+ set g $i
+ }
+ }
+ return $g
+}
diff --git a/bdb/test/lock001.tcl b/bdb/test/lock001.tcl
new file mode 100644
index 00000000000..d571a987240
--- /dev/null
+++ b/bdb/test/lock001.tcl
@@ -0,0 +1,170 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: lock001.tcl,v 11.11 2000/08/25 14:21:51 sue Exp $
+#
+# Test driver for lock tests.
+# General Multi Random
+# Options are:
+# -dir <directory in which to store mpool> Y Y Y
+# -iterations <iterations> Y N Y
+# -ldegree <number of locks per iteration> N N Y
+# -maxlocks <locks in table> Y Y Y
+# -objs <number of objects> N N Y
+# -procs <number of processes to run> N N Y
+# -reads <read ratio> N N Y
+# -seeds <list of seed values for processes> N N Y
+# -wait <wait interval after getting locks> N N Y
+# -conflicts <conflict matrix; a list of lists> Y Y Y
+proc lock_usage {} {
+ puts stderr "randomlock\n\t-dir <dir>\n\t-iterations <iterations>"
+ puts stderr "\t-conflicts <conflict matrix>"
+ puts stderr "\t-ldegree <locks per iteration>\n\t-maxlocks <n>"
+ puts stderr "\t-objs <objects>\n\t-procs <nprocs>\n\t-reads <%reads>"
+ puts stderr "\t-seeds <list of seeds>\n\t-wait <max wait interval>"
+ return
+}
+
+proc locktest { args } {
+ source ./include.tcl
+
+ # Set defaults
+ # Adjusted to make exact match of isqrt
+ #set conflicts { 3 0 0 0 0 0 1 0 1 1}
+ #set conflicts { 3 0 0 0 0 1 0 1 1}
+ set conflicts { 0 0 0 0 0 1 0 1 1}
+ set iterations 1000
+ set ldegree 5
+ set maxlocks 1000
+ set objs 75
+ set procs 5
+ set reads 65
+ set seeds {}
+ set wait 5
+ for { set i 0 } { $i < [llength $args] } {incr i} {
+ switch -regexp -- [lindex $args $i] {
+ -c.* { incr i; set conflicts [linkdex $args $i] }
+ -d.* { incr i; set testdir [lindex $args $i] }
+ -i.* { incr i; set iterations [lindex $args $i] }
+ -l.* { incr i; set ldegree [lindex $args $i] }
+ -m.* { incr i; set maxlocks [lindex $args $i] }
+ -o.* { incr i; set objs [lindex $args $i] }
+ -p.* { incr i; set procs [lindex $args $i] }
+ -r.* { incr i; set reads [lindex $args $i] }
+ -s.* { incr i; set seeds [lindex $args $i] }
+ -w.* { incr i; set wait [lindex $args $i] }
+ default {
+ puts -nonewline "FAIL:[timestamp] Usage: "
+ lock_usage
+ return
+ }
+ }
+ }
+ set nmodes [isqrt [llength $conflicts]]
+
+ # Cleanup
+ env_cleanup $testdir
+
+ # Open the region we'll use for testing.
+ set eflags "-create -lock -home $testdir -mode 0644 \
+ -lock_max $maxlocks -lock_conflict {$nmodes {$conflicts}}"
+ set env [eval {berkdb env} $eflags]
+ lock001 $env $iterations $nmodes
+ reset_env $env
+ env_cleanup $testdir
+
+ lock002 $maxlocks $conflicts
+
+ lock003 $testdir $iterations \
+ $maxlocks $procs $ldegree $objs $reads $wait $conflicts $seeds
+}
+
+# Make sure that the basic lock tests work. Do some simple gets and puts for
+# a single locker.
+proc lock001 {env iter nmodes} {
+ source ./include.tcl
+
+ puts "Lock001: test basic lock operations"
+ set locker 999
+ # Get and release each type of lock
+ puts "Lock001.a: get and release each type of lock"
+ foreach m {ng write read} {
+ set obj obj$m
+ set lockp [$env lock_get $m $locker $obj]
+ error_check_good lock_get:a [is_blocked $lockp] 0
+ error_check_good lock_get:a [is_substr $lockp $env] 1
+ set ret [ $lockp put ]
+ error_check_good lock_put $ret 0
+ }
+
+ # Get a bunch of locks for the same locker; these should work
+ set obj OBJECT
+ puts "Lock001.b: Get a bunch of locks for the same locker"
+ foreach m {ng write read} {
+ set lockp [$env lock_get $m $locker $obj ]
+ lappend locklist $lockp
+ error_check_good lock_get:b [is_blocked $lockp] 0
+ error_check_good lock_get:b [is_substr $lockp $env] 1
+ }
+ release_list $locklist
+
+ set locklist {}
+ # Check that reference counted locks work
+ puts "Lock001.c: reference counted locks."
+ for {set i 0} { $i < 10 } {incr i} {
+ set lockp [$env lock_get -nowait write $locker $obj]
+ error_check_good lock_get:c [is_blocked $lockp] 0
+ error_check_good lock_get:c [is_substr $lockp $env] 1
+ lappend locklist $lockp
+ }
+ release_list $locklist
+
+ # Finally try some failing locks
+ set locklist {}
+ foreach i {ng write read} {
+ set lockp [$env lock_get $i $locker $obj]
+ lappend locklist $lockp
+ error_check_good lock_get:d [is_blocked $lockp] 0
+ error_check_good lock_get:d [is_substr $lockp $env] 1
+ }
+
+ # Change the locker
+ set locker [incr locker]
+ set blocklist {}
+ # Skip NO_LOCK lock.
+ puts "Lock001.e: Change the locker, acquire read and write."
+ foreach i {write read} {
+ catch {$env lock_get -nowait $i $locker $obj} ret
+ error_check_good lock_get:e [is_substr $ret "not granted"] 1
+ #error_check_good lock_get:e [is_substr $lockp $env] 1
+ #error_check_good lock_get:e [is_blocked $lockp] 0
+ }
+ # Now release original locks
+ release_list $locklist
+
+ # Now re-acquire blocking locks
+ set locklist {}
+ puts "Lock001.f: Re-acquire blocking locks."
+ foreach i {write read} {
+ set lockp [$env lock_get -nowait $i $locker $obj ]
+ error_check_good lock_get:f [is_substr $lockp $env] 1
+ error_check_good lock_get:f [is_blocked $lockp] 0
+ lappend locklist $lockp
+ }
+
+ # Now release new locks
+ release_list $locklist
+
+ puts "Lock001 Complete."
+}
+
+# Blocked locks appear as lockmgrN.lockM\nBLOCKED
+proc is_blocked { l } {
+ if { [string compare $l BLOCKED ] == 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
diff --git a/bdb/test/lock002.tcl b/bdb/test/lock002.tcl
new file mode 100644
index 00000000000..b433730b1e6
--- /dev/null
+++ b/bdb/test/lock002.tcl
@@ -0,0 +1,151 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: lock002.tcl,v 11.10 2000/08/25 14:21:51 sue Exp $
+#
+# Exercise basic multi-process aspects of lock.
+proc lock002 { {maxlocks 1000} {conflicts {0 0 0 0 0 1 0 1 1} } } {
+ source ./include.tcl
+
+ puts "Lock002: Basic multi-process lock tests."
+
+ env_cleanup $testdir
+
+ set nmodes [isqrt [llength $conflicts]]
+
+ # Open the lock
+ mlock_open $maxlocks $nmodes $conflicts
+ mlock_wait
+}
+
+# Make sure that we can create a region; destroy it, attach to it,
+# detach from it, etc.
+proc mlock_open { maxl nmodes conflicts } {
+ source ./include.tcl
+
+ puts "Lock002.a multi-process open/close test"
+
+ # Open/Create region here. Then close it and try to open from
+ # other test process.
+ set env_cmd [concat "berkdb env -create -mode 0644 \
+ -lock -lock_max $maxl -lock_conflict" \
+ [list [list $nmodes $conflicts]] "-home $testdir"]
+ set local_env [eval $env_cmd]
+ error_check_good env_open [is_valid_env $local_env] TRUE
+
+ set ret [$local_env close]
+ error_check_good env_close $ret 0
+
+ # Open from other test process
+ set env_cmd "berkdb env -mode 0644 -home $testdir"
+
+ set f1 [open |$tclsh_path r+]
+ puts $f1 "source $test_path/test.tcl"
+
+ set remote_env [send_cmd $f1 $env_cmd]
+ error_check_good remote:env_open [is_valid_env $remote_env] TRUE
+
+ # Now make sure that we can reopen the region.
+ set local_env [eval $env_cmd]
+ error_check_good env_open [is_valid_env $local_env] TRUE
+ set ret [$local_env close]
+ error_check_good env_close $ret 0
+
+ # Try closing the remote region
+ set ret [send_cmd $f1 "$remote_env close"]
+ error_check_good remote:lock_close $ret 0
+
+ # Try opening for create. Will succeed because region exists.
+ set env_cmd [concat "berkdb env -create -mode 0644 \
+ -lock -lock_max $maxl -lock_conflict" \
+ [list [list $nmodes $conflicts]] "-home $testdir"]
+ set local_env [eval $env_cmd]
+ error_check_good remote:env_open [is_valid_env $local_env] TRUE
+
+ # close locally
+ reset_env $local_env
+
+ # Close and exit remote
+ set ret [send_cmd $f1 "reset_env $remote_env"]
+
+ catch { close $f1 } result
+}
+
+proc mlock_wait { } {
+ source ./include.tcl
+
+ puts "Lock002.b multi-process get/put wait test"
+
+ # Open region locally
+ set env_cmd "berkdb env -lock -home $testdir"
+ set local_env [eval $env_cmd]
+ error_check_good env_open [is_valid_env $local_env] TRUE
+
+ # Open region remotely
+ set f1 [open |$tclsh_path r+]
+
+ puts $f1 "source $test_path/test.tcl"
+
+ set remote_env [send_cmd $f1 $env_cmd]
+ error_check_good remote:env_open [is_valid_env $remote_env] TRUE
+
+ # Get a write lock locally; try for the read lock
+ # remotely. We hold the locks for several seconds
+ # so that we can use timestamps to figure out if the
+ # other process waited.
+ set locker 1
+ set local_lock [$local_env lock_get write $locker object1]
+ error_check_good lock_get [is_valid_lock $local_lock $local_env] TRUE
+
+ # Now request a lock that we expect to hang; generate
+ # timestamps so we can tell if it actually hangs.
+ set locker 2
+ set remote_lock [send_timed_cmd $f1 1 \
+ "set lock \[$remote_env lock_get write $locker object1\]"]
+
+ # Now sleep before releasing lock
+ tclsleep 5
+ set result [$local_lock put]
+ error_check_good lock_put $result 0
+
+ # Now get the result from the other script
+ set result [rcv_result $f1]
+ error_check_good lock_get:remote_time [expr $result > 4] 1
+
+ # Now get the remote lock
+ set remote_lock [send_cmd $f1 "puts \$lock"]
+ error_check_good remote:lock_get \
+ [is_valid_lock $remote_lock $remote_env] TRUE
+
+ # Now make the other guy wait 5 second and then release his
+ # lock while we try to get a write lock on it
+ set start [timestamp -r]
+
+ set ret [send_cmd $f1 "tclsleep 5"]
+
+ set ret [send_cmd $f1 "$remote_lock put"]
+
+ set locker 1
+ set local_lock [$local_env lock_get write $locker object1]
+ error_check_good lock_get:time \
+ [expr [expr [timestamp -r] - $start] > 2] 1
+ error_check_good lock_get:local \
+ [is_valid_lock $local_lock $local_env] TRUE
+
+ # Now check remote's result
+ set result [rcv_result $f1]
+ error_check_good lock_put:remote $result 0
+
+ # Clean up remote
+ set ret [send_cmd $f1 "reset_env $remote_env"]
+
+ close $f1
+
+ # Now close up locally
+ set ret [$local_lock put]
+ error_check_good lock_put $ret 0
+
+ reset_env $local_env
+}
diff --git a/bdb/test/lock003.tcl b/bdb/test/lock003.tcl
new file mode 100644
index 00000000000..539b6d0ff66
--- /dev/null
+++ b/bdb/test/lock003.tcl
@@ -0,0 +1,48 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: lock003.tcl,v 11.16 2000/08/25 14:21:51 sue Exp $
+#
+# Exercise multi-process aspects of lock. Generate a bunch of parallel
+# testers that try to randomly obtain locks.
+proc lock003 { dir {iter 500} {max 1000} {procs 5} {ldegree 5} {objs 75} \
+ {reads 65} {wait 1} {conflicts { 3 0 0 0 0 0 1 0 1 1}} {seeds {}} } {
+ source ./include.tcl
+
+ puts "Lock003: Multi-process random lock test"
+
+ # Clean up after previous runs
+ env_cleanup $dir
+
+ # Open/create the lock region
+ set e [berkdb env -create -lock -home $dir]
+ error_check_good env_open [is_substr $e env] 1
+
+ set ret [$e close]
+ error_check_good env_close $ret 0
+
+ # Now spawn off processes
+ set pidlist {}
+ for { set i 0 } {$i < $procs} {incr i} {
+ if { [llength $seeds] == $procs } {
+ set s [lindex $seeds $i]
+ }
+ puts "$tclsh_path\
+ $test_path/wrap.tcl \
+ lockscript.tcl $dir/$i.lockout\
+ $dir $iter $objs $wait $ldegree $reads &"
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ lockscript.tcl $testdir/lock003.$i.out \
+ $dir $iter $objs $wait $ldegree $reads &]
+ lappend pidlist $p
+ }
+
+ puts "Lock003: $procs independent processes now running"
+ watch_procs 30 10800
+ # Remove log files
+ for { set i 0 } {$i < $procs} {incr i} {
+ fileremove -f $dir/$i.lockout
+ }
+}
diff --git a/bdb/test/lockscript.tcl b/bdb/test/lockscript.tcl
new file mode 100644
index 00000000000..bd07d80b54b
--- /dev/null
+++ b/bdb/test/lockscript.tcl
@@ -0,0 +1,88 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: lockscript.tcl,v 11.11 2000/03/24 19:53:39 krinsky Exp $
+#
+# Random lock tester.
+# Usage: lockscript dir numiters numobjs sleepint degree readratio
+# dir: lock directory.
+# numiters: Total number of iterations.
+# numobjs: Number of objects on which to lock.
+# sleepint: Maximum sleep interval.
+# degree: Maximum number of locks to acquire at once
+# readratio: Percent of locks that should be reads.
+
+source ./include.tcl
+source $test_path/test.tcl
+
+set usage "lockscript dir numiters numobjs sleepint degree readratio"
+
+# Verify usage
+if { $argc != 6 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set dir [lindex $argv 0]
+set numiters [ lindex $argv 1 ]
+set numobjs [ lindex $argv 2 ]
+set sleepint [ lindex $argv 3 ]
+set degree [ lindex $argv 4 ]
+set readratio [ lindex $argv 5 ]
+set locker [pid]
+
+# Initialize random number generator
+global rand_init
+berkdb srand $rand_init
+
+puts -nonewline "Beginning execution for $locker: $numiters $numobjs "
+puts "$sleepint $degree $readratio"
+flush stdout
+
+set e [berkdb env -create -lock -home $dir]
+error_check_good env_open [is_substr $e env] 1
+
+for { set iter 0 } { $iter < $numiters } { incr iter } {
+ set nlocks [berkdb random_int 1 $degree]
+ # We will always lock objects in ascending order to avoid
+ # deadlocks.
+ set lastobj 1
+ set locklist {}
+ for { set lnum 0 } { $lnum < $nlocks } { incr lnum } {
+ # Pick lock parameters
+ set obj [berkdb random_int $lastobj $numobjs]
+ set lastobj [expr $obj + 1]
+ set x [berkdb random_int 1 100 ]
+ if { $x <= $readratio } {
+ set rw read
+ } else {
+ set rw write
+ }
+ puts "[timestamp] $locker $lnum: $rw $obj"
+
+ # Do get; add to list
+ set lockp [$e lock_get $rw $locker $obj]
+ lappend locklist $lockp
+ if {$lastobj > $numobjs} {
+ break
+ }
+ }
+ # Pick sleep interval
+ tclsleep [berkdb random_int 1 $sleepint]
+
+ # Now release locks
+ puts "[timestamp] $locker released locks"
+ release_list $locklist
+ flush stdout
+}
+
+set ret [$e close]
+error_check_good env_close $ret 0
+
+puts "[timestamp] $locker Complete"
+flush stdout
+
+exit
diff --git a/bdb/test/log.tcl b/bdb/test/log.tcl
new file mode 100644
index 00000000000..c3802d0f971
--- /dev/null
+++ b/bdb/test/log.tcl
@@ -0,0 +1,337 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: log.tcl,v 11.17 2000/11/30 20:09:19 dda Exp $
+#
+# Options are:
+# -dir <directory in which to store memp>
+# -maxfilesize <maxsize of log file>
+# -iterations <iterations>
+# -stat
+proc log_usage {} {
+ puts "log -dir <directory> -iterations <number of ops> \
+ -maxfilesize <max size of log files> -stat"
+}
+proc logtest { args } {
+ source ./include.tcl
+ global rand_init
+
+ # Set defaults
+ set iterations 1000
+ set maxfile [expr 1024 * 128]
+ set dostat 0
+ for { set i 0 } { $i < [llength $args] } {incr i} {
+ switch -regexp -- [lindex $args $i] {
+ -d.* { incr i; set testdir [lindex $args $i] }
+ -i.* { incr i; set iterations [lindex $args $i] }
+ -m.* { incr i; set maxfile [lindex $args $i] }
+ -s.* { set dostat 1 }
+ default {
+ puts -nonewline "FAIL:[timestamp] Usage: "
+ log_usage
+ return
+ }
+ }
+ }
+ set multi_log [expr 3 * $iterations]
+
+ # Clean out old log if it existed
+ puts "Unlinking log: error message OK"
+ env_cleanup $testdir
+
+ # Now run the various functionality tests
+ berkdb srand $rand_init
+
+ log001 $testdir $maxfile $iterations
+ log001 $testdir $maxfile $multi_log
+ log002 $testdir $maxfile
+ log003 $testdir $maxfile
+ log004 $testdir
+}
+
+proc log001 { dir max nrecs } {
+ source ./include.tcl
+
+ puts "Log001: Basic put/get test"
+
+ env_cleanup $dir
+
+ set env [berkdb env -log -create -home $dir \
+ -mode 0644 -log_max $max]
+ error_check_bad log_env:$dir $env NULL
+ error_check_good log:$dir [is_substr $env "env"] 1
+
+ # We will write records to the log and make sure we can
+ # read them back correctly. We'll use a standard pattern
+ # repeated some number of times for each record.
+
+ set lsn_list {}
+ set rec_list {}
+ puts "Log001.a: Writing $nrecs log records"
+ for { set i 0 } { $i < $nrecs } { incr i } {
+ set rec ""
+ for { set j 0 } { $j < [expr $i % 10 + 1] } {incr j} {
+ set rec $rec$i:logrec:$i
+ }
+ set lsn [$env log_put $rec]
+ error_check_bad log_put [is_substr $lsn log_cmd] 1
+ lappend lsn_list $lsn
+ lappend rec_list $rec
+ }
+ puts "Log001.b: Retrieving log records sequentially (forward)"
+ set i 0
+ for { set grec [$env log_get -first] } { [llength $grec] != 0 } {
+ set grec [$env log_get -next]} {
+ error_check_good log_get:seq [lindex $grec 1] \
+ [lindex $rec_list $i]
+ incr i
+ }
+
+ puts "Log001.c: Retrieving log records sequentially (backward)"
+ set i [llength $rec_list]
+ for { set grec [$env log_get -last] } { [llength $grec] != 0 } {
+ set grec [$env log_get -prev] } {
+ incr i -1
+ error_check_good \
+ log_get:seq [lindex $grec 1] [lindex $rec_list $i]
+ }
+
+ puts "Log001.d: Retrieving log records sequentially by LSN"
+ set i 0
+ foreach lsn $lsn_list {
+ set grec [$env log_get -set $lsn]
+ error_check_good \
+ log_get:seq [lindex $grec 1] [lindex $rec_list $i]
+ incr i
+ }
+
+ puts "Log001.e: Retrieving log records randomly by LSN"
+ set m [expr [llength $lsn_list] - 1]
+ for { set i 0 } { $i < $nrecs } { incr i } {
+ set recno [berkdb random_int 0 $m ]
+ set lsn [lindex $lsn_list $recno]
+ set grec [$env log_get -set $lsn]
+ error_check_good \
+ log_get:seq [lindex $grec 1] [lindex $rec_list $recno]
+ }
+
+ # Close and unlink the file
+ error_check_good env:close:$env [$env close] 0
+ error_check_good envremove:$dir [berkdb envremove -home $dir] 0
+
+ puts "Log001 Complete"
+}
+
+proc log002 { dir {max 32768} } {
+ source ./include.tcl
+
+ puts "Log002: Multiple log test w/trunc, file, compare functionality"
+
+ env_cleanup $dir
+
+ set env [berkdb env -create -home $dir -mode 0644 -log -log_max $max]
+ error_check_bad log_env:$dir $env NULL
+ error_check_good log:$dir [is_substr $env "env"] 1
+
+ # We'll record every hundred'th record for later use
+ set info_list {}
+
+ set i 0
+ puts "Log002.a: Writing log records"
+
+ for {set s 0} { $s < [expr 3 * $max] } { incr s $len } {
+ set rec [random_data 120 0 0]
+ set len [string length $rec]
+ set lsn [$env log_put $rec]
+
+ if { [expr $i % 100 ] == 0 } {
+ lappend info_list [list $lsn $rec]
+ }
+ incr i
+ }
+
+ puts "Log002.b: Checking log_compare"
+ set last {0 0}
+ foreach p $info_list {
+ set l [lindex $p 0]
+ if { [llength $last] != 0 } {
+ error_check_good \
+ log_compare [$env log_compare $l $last] 1
+ error_check_good \
+ log_compare [$env log_compare $last $l] -1
+ error_check_good \
+ log_compare [$env log_compare $l $l] 0
+ }
+ set last $l
+ }
+
+ puts "Log002.c: Checking log_file"
+ set flist [glob $dir/log*]
+ foreach p $info_list {
+
+ set lsn [lindex $p 0]
+ set f [$env log_file $lsn]
+
+ # Change all backslash separators on Windows to forward slash
+ # separators, which is what the rest of the test suite expects.
+ regsub -all {\\} $f {/} f
+
+ error_check_bad log_file:$f [lsearch $flist $f] -1
+ }
+
+ puts "Log002.d: Verifying records"
+ for {set i [expr [llength $info_list] - 1] } { $i >= 0 } { incr i -1} {
+ set p [lindex $info_list $i]
+ set grec [$env log_get -set [lindex $p 0]]
+ error_check_good log_get:$env [lindex $grec 1] [lindex $p 1]
+ }
+
+ # Close and unlink the file
+ error_check_good env:close:$env [$env close] 0
+ error_check_good envremove:$dir [berkdb envremove -home $dir] 0
+
+ puts "Log002 Complete"
+}
+
+proc log003 { dir {max 32768} } {
+ source ./include.tcl
+
+ puts "Log003: Verify log_flush behavior"
+
+ env_cleanup $dir
+ set short_rec "abcdefghijklmnopqrstuvwxyz"
+ set long_rec [repeat $short_rec 200]
+ set very_long_rec [repeat $long_rec 4]
+
+ foreach rec "$short_rec $long_rec $very_long_rec" {
+ puts "Log003.a: Verify flush on [string length $rec] byte rec"
+
+ set env [berkdb env -log -home $dir \
+ -create -mode 0644 -log_max $max]
+ error_check_bad log_env:$dir $env NULL
+ error_check_good log:$dir [is_substr $env "env"] 1
+
+ set lsn [$env log_put $rec]
+ error_check_bad log_put [lindex $lsn 0] "ERROR:"
+ set ret [$env log_flush $lsn]
+ error_check_good log_flush $ret 0
+
+ # Now, we want to crash the region and recheck. Closing the
+ # log does not flush any records, so we'll use a close to
+ # do the "crash"
+ set ret [$env close]
+ error_check_good log_env:close $ret 0
+
+ # Now, remove the log region
+ #set ret [berkdb envremove -home $dir]
+ #error_check_good env:remove $ret 0
+
+ # Re-open the log and try to read the record.
+ set env [berkdb env -create -home $dir \
+ -log -mode 0644 -log_max $max]
+ error_check_bad log_env:$dir $env NULL
+ error_check_good log:$dir [is_substr $env "env"] 1
+
+ set gotrec [$env log_get -first]
+ error_check_good lp_get [lindex $gotrec 1] $rec
+
+ # Close and unlink the file
+ error_check_good env:close:$env [$env close] 0
+ error_check_good envremove:$dir [berkdb envremove -home $dir] 0
+ log_cleanup $dir
+ }
+
+ foreach rec "$short_rec $long_rec $very_long_rec" {
+ puts "Log003.b: \
+ Verify flush on non-last record [string length $rec]"
+ set env [berkdb env \
+ -create -log -home $dir -mode 0644 -log_max $max]
+ error_check_bad log_env:$dir $env NULL
+ error_check_good log:$dir [is_substr $env "env"] 1
+
+ # Put 10 random records
+ for { set i 0 } { $i < 10 } { incr i} {
+ set r [random_data 450 0 0]
+ set lsn [$env log_put $r]
+ error_check_bad log_put [lindex $lsn 0] "ERROR:"
+ }
+
+ # Put the record we are interested in
+ set save_lsn [$env log_put $rec]
+ error_check_bad log_put [lindex $save_lsn 0] "ERROR:"
+
+ # Put 10 more random records
+ for { set i 0 } { $i < 10 } { incr i} {
+ set r [random_data 450 0 0]
+ set lsn [$env log_put $r]
+ error_check_bad log_put [lindex $lsn 0] "ERROR:"
+ }
+
+ # Now check the flush
+ set ret [$env log_flush $save_lsn]
+ error_check_good log_flush $ret 0
+
+ # Now, we want to crash the region and recheck. Closing the
+ # log does not flush any records, so we'll use a close to
+ # do the "crash"
+
+ #
+ # Now, close and remove the log region
+ error_check_good env:close:$env [$env close] 0
+ set ret [berkdb envremove -home $dir]
+ error_check_good env:remove $ret 0
+
+ # Re-open the log and try to read the record.
+ set env [berkdb env \
+ -home $dir -create -log -mode 0644 -log_max $max]
+ error_check_bad log_env:$dir $env NULL
+ error_check_good log:$dir [is_substr $env "env"] 1
+
+ set gotrec [$env log_get -set $save_lsn]
+ error_check_good lp_get [lindex $gotrec 1] $rec
+
+ # Close and unlink the file
+ error_check_good env:close:$env [$env close] 0
+ error_check_good envremove:$dir [berkdb envremove -home $dir] 0
+ log_cleanup $dir
+ }
+
+ puts "Log003 Complete"
+}
+
+# Make sure that if we do PREVs on a log, but the beginning of the
+# log has been truncated, we do the right thing.
+proc log004 { dir } {
+ source ./include.tcl
+
+ puts "Log004: Prev on log when beginning of log has been truncated."
+ # Use archive test to populate log
+ env_cleanup $dir
+ puts "Log004.a: Call archive to populate log."
+ archive
+
+ # Delete all log files under 100
+ puts "Log004.b: Delete all log files under 100."
+ set ret [catch { glob $dir/log.00000000* } result]
+ if { $ret == 0 } {
+ eval fileremove -f $result
+ }
+
+ # Now open the log and get the first record and try a prev
+ puts "Log004.c: Open truncated log, attempt to access missing portion."
+ set myenv [berkdb env -create -log -home $dir]
+ error_check_good log_open [is_substr $myenv "env"] 1
+
+ set ret [$myenv log_get -first]
+ error_check_bad log_get [llength $ret] 0
+
+ # This should give DB_NOTFOUND which is a ret of length 0
+ catch {$myenv log_get -prev} ret
+ error_check_good log_get_prev [string length $ret] 0
+
+ puts "Log004.d: Close log and environment."
+ error_check_good log_close [$myenv close] 0
+ puts "Log004 complete."
+}
diff --git a/bdb/test/logtrack.list b/bdb/test/logtrack.list
new file mode 100644
index 00000000000..ba7f34a6d13
--- /dev/null
+++ b/bdb/test/logtrack.list
@@ -0,0 +1,68 @@
+PREFIX crdel
+BEGIN fileopen 141
+BEGIN metasub 142
+BEGIN metapage 143
+DEPRECATED old_delete 144
+BEGIN rename 145
+BEGIN delete 146
+PREFIX db
+BEGIN addrem 41
+DEPRECATED split 42
+BEGIN big 43
+BEGIN ovref 44
+BEGIN relink 45
+DEPRECATED addpage 46
+BEGIN debug 47
+BEGIN noop 48
+PREFIX bam
+BEGIN pg_alloc 51
+DEPRECATED pg_alloc1 60
+BEGIN pg_free 52
+DEPRECATED pg_free1 61
+DEPRECATED split1 53
+BEGIN split 62
+DEPRECATED rsplit1 54
+BEGIN rsplit 63
+BEGIN adj 55
+BEGIN cadjust 56
+BEGIN cdel 57
+BEGIN repl 58
+BEGIN root 59
+BEGIN curadj 64
+BEGIN rcuradj 65
+PREFIX ham
+BEGIN insdel 21
+BEGIN newpage 22
+DEPRECATED splitmeta 23
+BEGIN splitdata 24
+BEGIN replace 25
+DEPRECATED newpgno 26
+DEPRECATED ovfl 27
+BEGIN copypage 28
+BEGIN metagroup 29
+DEPRECATED groupalloc1 30
+DEPRECATED groupalloc2 31
+BEGIN groupalloc 32
+BEGIN curadj 33
+BEGIN chgpg 34
+PREFIX log
+DEPRECATED register1 1
+BEGIN register 2
+PREFIX qam
+BEGIN inc 76
+BEGIN incfirst 77
+BEGIN mvptr 78
+BEGIN del 79
+BEGIN add 80
+BEGIN delete 81
+BEGIN rename 82
+BEGIN delext 83
+PREFIX txn
+DEPRECATED old_regop 6
+BEGIN regop 10
+DEPRECATED old_ckp 7
+BEGIN ckp 11
+DEPRECATED xa_regop_old 8
+BEGIN xa_regop 13
+DEPRECATED child_old 9
+BEGIN child 12
diff --git a/bdb/test/logtrack.tcl b/bdb/test/logtrack.tcl
new file mode 100644
index 00000000000..cea4912e627
--- /dev/null
+++ b/bdb/test/logtrack.tcl
@@ -0,0 +1,130 @@
+# See the file LICENSE for redistribution information
+#
+# Copyright (c) 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: logtrack.tcl,v 11.6 2000/10/27 15:30:39 krinsky Exp $
+#
+# logtrack.tcl: A collection of routines, formerly implemented in Perl
+# as log.pl, to track which log record types the test suite hits.
+
+set ltsname "logtrack_seen.db"
+set ltlist $test_path/logtrack.list
+set tmpname "logtrack_tmp"
+
+proc logtrack_clean { } {
+ global ltsname
+
+ file delete -force $ltsname
+
+ return
+}
+
+proc logtrack_init { } {
+ global ltsname
+
+ logtrack_clean
+
+ # Create an empty tracking database.
+ [berkdb_open -create -truncate -btree $ltsname] close
+
+ return
+}
+
+# Dump the logs for directory dirname and record which log
+# records were seen.
+proc logtrack_read { dirname } {
+ global ltsname tmpname util_path
+
+ set seendb [berkdb_open $ltsname]
+ error_check_good seendb_open [is_valid_db $seendb] TRUE
+
+ file delete -force $tmpname
+ set ret [catch {exec $util_path/db_printlog -N \
+ -h "$dirname" > $tmpname} res]
+ error_check_good printlog $ret 0
+ error_check_good tmpfile_exists [file exists $tmpname] 1
+
+ set f [open $tmpname r]
+ while { [gets $f record] >= 0 } {
+ regexp {\[[^\]]*\]\[[^\]]*\]([^\:]*)\:} $record whl name
+ error_check_good seendb_put [$seendb put $name ""] 0
+ }
+ close $f
+ file delete -force $tmpname
+
+ error_check_good seendb_close [$seendb close] 0
+}
+
+# Print the log record types that were seen but should not have been
+# seen and the log record types that were not seen but should have been seen.
+proc logtrack_summary { } {
+ global ltsname ltlist testdir
+
+ set seendb [berkdb_open $ltsname]
+ error_check_good seendb_open [is_valid_db $seendb] TRUE
+ set existdb [berkdb_open -create -btree]
+ error_check_good existdb_open [is_valid_db $existdb] TRUE
+ set deprecdb [berkdb_open -create -btree]
+ error_check_good deprecdb_open [is_valid_db $deprecdb] TRUE
+
+ error_check_good ltlist_exists [file exists $ltlist] 1
+ set f [open $ltlist r]
+ set pref ""
+ while { [gets $f line] >= 0 } {
+ # Get the keyword, the first thing on the line:
+ # BEGIN/DEPRECATED/PREFIX
+ set keyword [lindex $line 0]
+
+ if { [string compare $keyword PREFIX] == 0 } {
+ # New prefix.
+ set pref [lindex $line 1]
+ } elseif { [string compare $keyword BEGIN] == 0 } {
+ # A log type we care about; put it on our list.
+
+ # Skip noop and debug.
+ if { [string compare [lindex $line 1] noop] == 0 } {
+ continue
+ }
+ if { [string compare [lindex $line 1] debug] == 0 } {
+ continue
+ }
+
+ error_check_good exist_put [$existdb put \
+ ${pref}_[lindex $line 1] ""] 0
+ } elseif { [string compare $keyword DEPRECATED] == 0 } {
+ error_check_good deprec_put [$deprecdb put \
+ ${pref}_[lindex $line 1] ""] 0
+ }
+ }
+
+ error_check_good exist_curs \
+ [is_valid_cursor [set ec [$existdb cursor]] $existdb] TRUE
+ while { [llength [set dbt [$ec get -next]]] != 0 } {
+ set rec [lindex [lindex $dbt 0] 0]
+ if { [$seendb count $rec] == 0 } {
+ puts "FAIL: log record type $rec not seen"
+ }
+ }
+ error_check_good exist_curs_close [$ec close] 0
+
+ error_check_good seen_curs \
+ [is_valid_cursor [set sc [$existdb cursor]] $existdb] TRUE
+ while { [llength [set dbt [$sc get -next]]] != 0 } {
+ set rec [lindex [lindex $dbt 0] 0]
+ if { [$existdb count $rec] == 0 } {
+ if { [$deprecdb count $rec] == 0 } {
+ puts "FAIL: unknown log record type $rec seen"
+ } else {
+ puts "FAIL: deprecated log record type $rec seen"
+ }
+ }
+ }
+ error_check_good seen_curs_close [$sc close] 0
+
+ error_check_good seendb_close [$seendb close] 0
+ error_check_good existdb_close [$existdb close] 0
+ error_check_good deprecdb_close [$deprecdb close] 0
+
+ logtrack_clean
+}
diff --git a/bdb/test/mdbscript.tcl b/bdb/test/mdbscript.tcl
new file mode 100644
index 00000000000..368aad371b2
--- /dev/null
+++ b/bdb/test/mdbscript.tcl
@@ -0,0 +1,381 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: mdbscript.tcl,v 11.23 2000/10/09 02:26:11 krinsky Exp $
+#
+# Process script for the multi-process db tester.
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+global dbenv
+global klock
+global l_keys
+global procid
+global alphabet
+
+# In Tcl, when there are multiple catch handlers, *all* handlers
+# are called, so we have to resort to this hack.
+#
+global exception_handled
+
+set exception_handled 0
+
+set datastr $alphabet$alphabet
+
+# Usage: mdbscript dir file nentries iter procid procs seed
+# dir: DBHOME directory
+# file: db file on which to operate
+# nentries: number of entries taken from dictionary
+# iter: number of operations to run
+# procid: this processes' id number
+# procs: total number of processes running
+set usage "mdbscript method dir file nentries iter procid procs"
+
+# Verify usage
+if { $argc != 7 } {
+ puts "FAIL:[timestamp] test042: Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set method [lindex $argv 0]
+set dir [lindex $argv 1]
+set file [lindex $argv 2]
+set nentries [ lindex $argv 3 ]
+set iter [ lindex $argv 4 ]
+set procid [ lindex $argv 5 ]
+set procs [ lindex $argv 6 ]
+
+set pflags ""
+set gflags ""
+set txn ""
+
+set renum [is_rrecno $method]
+set omethod [convert_method $method]
+
+if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+}
+
+# Initialize seed
+global rand_init
+
+# We want repeatable results, but we also want each instance of mdbscript
+# to do something different. So we add the procid to the fixed seed.
+# (Note that this is a serial number given by the caller, not a pid.)
+berkdb srand [expr $rand_init + $procid]
+
+puts "Beginning execution for [pid] $method"
+puts "$dir db_home"
+puts "$file database"
+puts "$nentries data elements"
+puts "$iter iterations"
+puts "$procid process id"
+puts "$procs processes"
+
+set klock NOLOCK
+flush stdout
+
+set dbenv [berkdb env -create -cdb -home $dir]
+#set dbenv [berkdb env -create -cdb -log -home $dir]
+error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+set db [berkdb_open -env $dbenv -create -mode 0644 $omethod $file]
+error_check_good dbopen [is_valid_db $db] TRUE
+
+# Init globals (no data)
+set nkeys [db_init $db 0]
+puts "Initial number of keys: $nkeys"
+error_check_good db_init $nkeys $nentries
+tclsleep 5
+
+proc get_lock { k } {
+ global dbenv
+ global procid
+ global klock
+ global DB_LOCK_WRITE
+ global DB_LOCK_NOWAIT
+ global errorInfo
+ global exception_handled
+ # Make sure that the key isn't in the middle of
+ # a delete operation
+ if {[catch {$dbenv lock_get -nowait write $procid $k} klock] != 0 } {
+ set exception_handled 1
+
+ error_check_good \
+ get_lock [is_substr $errorInfo "DB_LOCK_NOTGRANTED"] 1
+ puts "Warning: key $k locked"
+ set klock NOLOCK
+ return 1
+ } else {
+ error_check_good get_lock [is_valid_lock $klock $dbenv] TRUE
+ }
+ return 0
+}
+
+# On each iteration we're going to randomly pick a key.
+# 1. We'll either get it (verifying that its contents are reasonable).
+# 2. Put it (using an overwrite to make the data be datastr:ID).
+# 3. Get it and do a put through the cursor, tacking our ID on to
+# 4. Get it, read forward some random number of keys.
+# 5. Get it, read forward some random number of keys and do a put (replace).
+# 6. Get it, read forward some random number of keys and do a del. And then
+# do a put of the key.
+set gets 0
+set getput 0
+set overwrite 0
+set seqread 0
+set seqput 0
+set seqdel 0
+set dlen [string length $datastr]
+
+for { set i 0 } { $i < $iter } { incr i } {
+ set op [berkdb random_int 0 5]
+ puts "iteration $i operation $op"
+ flush stdout
+ if {[catch {
+ switch $op {
+ 0 {
+ incr gets
+ set k [rand_key $method $nkeys $renum $procs]
+ if {[is_record_based $method] == 1} {
+ set key $k
+ } else {
+ set key [lindex $l_keys $k]
+ }
+
+ if { [get_lock $key] == 1 } {
+ incr i -1
+ continue;
+ }
+
+ set rec [eval {$db get} $txn $gflags {$key}]
+ error_check_bad "$db get $key" [llength $rec] 0
+ set partial [string range \
+ [lindex [lindex $rec 0] 1] 0 [expr $dlen - 1]]
+ error_check_good \
+ "$db get $key" $partial [pad_data $method $datastr]
+ }
+ 1 {
+ incr overwrite
+ set k [rand_key $method $nkeys $renum $procs]
+ if {[is_record_based $method] == 1} {
+ set key $k
+ } else {
+ set key [lindex $l_keys $k]
+ }
+
+ set data $datastr:$procid
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $data]}]
+ error_check_good "$db put $key" $ret 0
+ }
+ 2 {
+ incr getput
+ set dbc [$db cursor -update]
+ error_check_good "$db cursor" \
+ [is_valid_cursor $dbc $db] TRUE
+ set close_cursor 1
+ set k [rand_key $method $nkeys $renum $procs]
+ if {[is_record_based $method] == 1} {
+ set key $k
+ } else {
+ set key [lindex $l_keys $k]
+ }
+
+ if { [get_lock $key] == 1 } {
+ incr i -1
+ error_check_good "$dbc close" \
+ [$dbc close] 0
+ set close_cursor 0
+ continue;
+ }
+
+ set ret [$dbc get -set $key]
+ error_check_good \
+ "$dbc get $key" [llength [lindex $ret 0]] 2
+ set rec [lindex [lindex $ret 0] 1]
+ set partial [string range $rec 0 [expr $dlen - 1]]
+ error_check_good \
+ "$dbc get $key" $partial [pad_data $method $datastr]
+ append rec ":$procid"
+ set ret [$dbc put \
+ -current [chop_data $method $rec]]
+ error_check_good "$dbc put $key" $ret 0
+ error_check_good "$dbc close" [$dbc close] 0
+ set close_cursor 0
+ }
+ 3 -
+ 4 -
+ 5 {
+ if { $op == 3 } {
+ set flags ""
+ } else {
+ set flags -update
+ }
+ set dbc [eval {$db cursor} $flags]
+ error_check_good "$db cursor" \
+ [is_valid_cursor $dbc $db] TRUE
+ set close_cursor 1
+ set k [rand_key $method $nkeys $renum $procs]
+ if {[is_record_based $method] == 1} {
+ set key $k
+ } else {
+ set key [lindex $l_keys $k]
+ }
+
+ if { [get_lock $key] == 1 } {
+ incr i -1
+ error_check_good "$dbc close" \
+ [$dbc close] 0
+ set close_cursor 0
+ continue;
+ }
+
+ set ret [$dbc get -set $key]
+ error_check_good \
+ "$dbc get $key" [llength [lindex $ret 0]] 2
+
+ # Now read a few keys sequentially
+ set nloop [berkdb random_int 0 10]
+ if { [berkdb random_int 0 1] == 0 } {
+ set flags -next
+ } else {
+ set flags -prev
+ }
+ while { $nloop > 0 } {
+ set lastret $ret
+ set ret [eval {$dbc get} $flags]
+ # Might read beginning/end of file
+ if { [llength $ret] == 0} {
+ set ret $lastret
+ break
+ }
+ incr nloop -1
+ }
+ switch $op {
+ 3 {
+ incr seqread
+ }
+ 4 {
+ incr seqput
+ set rec [lindex [lindex $ret 0] 1]
+ set partial [string range $rec 0 \
+ [expr $dlen - 1]]
+ error_check_good "$dbc get $key" \
+ $partial [pad_data $method $datastr]
+ append rec ":$procid"
+ set ret [$dbc put -current \
+ [chop_data $method $rec]]
+ error_check_good \
+ "$dbc put $key" $ret 0
+ }
+ 5 {
+ incr seqdel
+ set k [lindex [lindex $ret 0] 0]
+ # We need to lock the item we're
+ # deleting so that someone else can't
+ # try to do a get while we're
+ # deleting
+ error_check_good "$klock put" \
+ [$klock put] 0
+ set klock NOLOCK
+ set cur [$dbc get -current]
+ error_check_bad get_current \
+ [llength $cur] 0
+ set key [lindex [lindex $cur 0] 0]
+ if { [get_lock $key] == 1 } {
+ incr i -1
+ error_check_good "$dbc close" \
+ [$dbc close] 0
+ set close_cursor 0
+ continue
+ }
+ set ret [$dbc del]
+ error_check_good "$dbc del" $ret 0
+ set rec $datastr
+ append rec ":$procid"
+ if { $renum == 1 } {
+ set ret [$dbc put -before \
+ [chop_data $method $rec]]
+ error_check_good \
+ "$dbc put $k" $ret $k
+ } elseif { \
+ [is_record_based $method] == 1 } {
+ error_check_good "$dbc close" \
+ [$dbc close] 0
+ set close_cursor 0
+ set ret [$db put $k \
+ [chop_data $method $rec]]
+ error_check_good \
+ "$db put $k" $ret 0
+ } else {
+ set ret [$dbc put -keylast $k \
+ [chop_data $method $rec]]
+ error_check_good \
+ "$dbc put $k" $ret 0
+ }
+ }
+ }
+ if { $close_cursor == 1 } {
+ error_check_good \
+ "$dbc close" [$dbc close] 0
+ set close_cursor 0
+ }
+ }
+ }
+ } res] != 0} {
+ global errorInfo;
+ global exception_handled;
+
+ puts $errorInfo
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+
+ flush stdout
+ if { [string compare $klock NOLOCK] != 0 } {
+ catch {$klock put}
+ }
+ if {$close_cursor == 1} {
+ catch {$dbc close}
+ set close_cursor 0
+ }
+
+ if {[string first FAIL $theError] == 0 && \
+ $exception_handled != 1} {
+ error "FAIL:[timestamp] test042: key $k: $theError"
+ }
+ set exception_handled 0
+ } else {
+ flush stdout
+ if { [string compare $klock NOLOCK] != 0 } {
+ error_check_good "$klock put" [$klock put] 0
+ set klock NOLOCK
+ }
+ }
+}
+
+if {[catch {$db close} ret] != 0 } {
+ error_check_good close [is_substr $errorInfo "DB_INCOMPLETE"] 1
+ puts "Warning: sync incomplete on close ([pid])"
+} else {
+ error_check_good close $ret 0
+}
+$dbenv close
+
+exit
+
+puts "[timestamp] [pid] Complete"
+puts "Successful ops: "
+puts "\t$gets gets"
+puts "\t$overwrite overwrites"
+puts "\t$getput getputs"
+puts "\t$seqread seqread"
+puts "\t$seqput seqput"
+puts "\t$seqdel seqdel"
+flush stdout
diff --git a/bdb/test/mpool.tcl b/bdb/test/mpool.tcl
new file mode 100644
index 00000000000..b2eb2252037
--- /dev/null
+++ b/bdb/test/mpool.tcl
@@ -0,0 +1,420 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: mpool.tcl,v 11.34 2001/01/18 04:58:07 krinsky Exp $
+#
+# Options are:
+# -cachesize {gbytes bytes ncache}
+# -nfiles <files>
+# -iterations <iterations>
+# -pagesize <page size in bytes>
+# -dir <directory in which to store memp>
+# -stat
+proc memp_usage {} {
+ puts "memp -cachesize {gbytes bytes ncache}"
+ puts "\t-nfiles <files>"
+ puts "\t-iterations <iterations>"
+ puts "\t-pagesize <page size in bytes>"
+ puts "\t-dir <memp directory>"
+ puts "\t-mem {private system}"
+ return
+}
+
+proc mpool { args } {
+ source ./include.tcl
+ global errorCode
+
+ puts "mpool {$args} running"
+ # Set defaults
+ set cachearg " -cachesize {0 200000 3}"
+ set nfiles 5
+ set iterations 500
+ set pagesize "512 1024 2048 4096 8192"
+ set npages 100
+ set procs 4
+ set seeds ""
+ set shm_key 1
+ set dostat 0
+ set flags ""
+ for { set i 0 } { $i < [llength $args] } {incr i} {
+ switch -regexp -- [lindex $args $i] {
+ -c.* {
+ incr i
+ set cachesize [lindex $args $i]
+ set cachearg " -cachesize $cachesize"
+ }
+ -d.* { incr i; set testdir [lindex $args $i] }
+ -i.* { incr i; set iterations [lindex $args $i] }
+ -me.* {
+ incr i
+ if { [string \
+ compare [lindex $args $i] private] == 0 } {
+ set flags -private
+ } elseif { [string \
+ compare [lindex $args $i] system] == 0 } {
+ #
+ # We need to use a shm id. Use one
+ # that is the same each time so that
+ # we do not grow segments infinitely.
+ set flags "-system_mem -shm_key $shm_key"
+ } else {
+ puts -nonewline \
+ "FAIL:[timestamp] Usage: "
+ memp_usage
+ return
+ }
+ }
+ -nf.* { incr i; set nfiles [lindex $args $i] }
+ -np.* { incr i; set npages [lindex $args $i] }
+ -pa.* { incr i; set pagesize [lindex $args $i] }
+ -pr.* { incr i; set procs [lindex $args $i] }
+ -se.* { incr i; set seeds [lindex $args $i] }
+ -st.* { set dostat 1 }
+ default {
+ puts -nonewline "FAIL:[timestamp] Usage: "
+ memp_usage
+ return
+ }
+ }
+ }
+
+ # Clean out old directory
+ env_cleanup $testdir
+
+ # Open the memp with region init specified
+ set ret [catch {eval {berkdb env -create -mode 0644}\
+ $cachearg {-region_init -home $testdir} $flags} res]
+ if { $ret == 0 } {
+ set env $res
+ } else {
+ # If the env open failed, it may be because we're on a platform
+ # such as HP-UX 10 that won't support mutexes in shmget memory.
+ # Or QNX, which doesn't support system memory at all.
+ # Verify that the return value was EINVAL or EOPNOTSUPP
+ # and bail gracefully.
+ error_check_good is_shm_test [is_substr $flags -system_mem] 1
+ error_check_good returned_error [expr \
+ [is_substr $errorCode EINVAL] || \
+ [is_substr $errorCode EOPNOTSUPP]] 1
+ puts "Warning:\
+ platform does not support mutexes in shmget memory."
+ puts "Skipping shared memory mpool test."
+ return
+ }
+ error_check_good env_open [is_substr $env env] 1
+
+ reset_env $env
+ env_cleanup $testdir
+
+ # Now open without region init
+ set env [eval {berkdb env -create -mode 0644}\
+ $cachearg {-home $testdir} $flags]
+ error_check_good evn_open [is_substr $env env] 1
+
+ memp001 $env \
+ $testdir $nfiles $iterations [lindex $pagesize 0] $dostat $flags
+ reset_env $env
+ set ret [berkdb envremove -home $testdir]
+ error_check_good env_remove $ret 0
+ env_cleanup $testdir
+
+ memp002 $testdir \
+ $procs $pagesize $iterations $npages $seeds $dostat $flags
+ set ret [berkdb envremove -home $testdir]
+ error_check_good env_remove $ret 0
+ env_cleanup $testdir
+
+ memp003 $testdir $iterations $flags
+ set ret [berkdb envremove -home $testdir]
+ error_check_good env_remove $ret 0
+
+ env_cleanup $testdir
+}
+
+proc memp001 {env dir n iter psize dostat flags} {
+ source ./include.tcl
+ global rand_init
+
+ puts "Memp001: {$flags} random update $iter iterations on $n files."
+
+ # Open N memp files
+ for {set i 1} {$i <= $n} {incr i} {
+ set fname "data_file.$i"
+ file_create $dir/$fname 50 $psize
+
+ set mpools($i) \
+ [$env mpool -create -pagesize $psize -mode 0644 $fname]
+ error_check_good mp_open [is_substr $mpools($i) $env.mp] 1
+ }
+
+ # Now, loop, picking files at random
+ berkdb srand $rand_init
+ for {set i 0} {$i < $iter} {incr i} {
+ set mpool $mpools([berkdb random_int 1 $n])
+ set p1 [get_range $mpool 10]
+ set p2 [get_range $mpool 10]
+ set p3 [get_range $mpool 10]
+ set p1 [replace $mpool $p1]
+ set p3 [replace $mpool $p3]
+ set p4 [get_range $mpool 20]
+ set p4 [replace $mpool $p4]
+ set p5 [get_range $mpool 10]
+ set p6 [get_range $mpool 20]
+ set p7 [get_range $mpool 10]
+ set p8 [get_range $mpool 20]
+ set p5 [replace $mpool $p5]
+ set p6 [replace $mpool $p6]
+ set p9 [get_range $mpool 40]
+ set p9 [replace $mpool $p9]
+ set p10 [get_range $mpool 40]
+ set p7 [replace $mpool $p7]
+ set p8 [replace $mpool $p8]
+ set p9 [replace $mpool $p9]
+ set p10 [replace $mpool $p10]
+ }
+
+ if { $dostat == 1 } {
+ puts [$env mpool_stat]
+ for {set i 1} {$i <= $n} {incr i} {
+ error_check_good mp_sync [$mpools($i) fsync] 0
+ }
+ }
+
+ # Close N memp files
+ for {set i 1} {$i <= $n} {incr i} {
+ error_check_good memp_close:$mpools($i) [$mpools($i) close] 0
+ fileremove -f $dir/data_file.$i
+ }
+}
+
+proc file_create { fname nblocks blocksize } {
+ set fid [open $fname w]
+ for {set i 0} {$i < $nblocks} {incr i} {
+ seek $fid [expr $i * $blocksize] start
+ puts -nonewline $fid $i
+ }
+ seek $fid [expr $nblocks * $blocksize - 1]
+
+ # We don't end the file with a newline, because some platforms (like
+ # Windows) emit CR/NL. There does not appear to be a BINARY open flag
+ # that prevents this.
+ puts -nonewline $fid "Z"
+ close $fid
+
+ # Make sure it worked
+ if { [file size $fname] != $nblocks * $blocksize } {
+ error "FAIL: file_create could not create correct file size"
+ }
+}
+
+proc get_range { mpool max } {
+ set pno [berkdb random_int 0 $max]
+ set p [$mpool get $pno]
+ error_check_good page [is_valid_page $p $mpool] TRUE
+ set got [$p pgnum]
+ if { $got != $pno } {
+ puts "Get_range: Page mismatch page |$pno| val |$got|"
+ }
+ set ret [$p init "Page is pinned by [pid]"]
+ error_check_good page_init $ret 0
+
+ return $p
+}
+
+proc replace { mpool p } {
+ set pgno [$p pgnum]
+
+ set ret [$p init "Page is unpinned by [pid]"]
+ error_check_good page_init $ret 0
+
+ set ret [$p put -dirty]
+ error_check_good page_put $ret 0
+
+ set p2 [$mpool get $pgno]
+ error_check_good page [is_valid_page $p2 $mpool] TRUE
+
+ return $p2
+}
+
+proc memp002 { dir procs psizes iterations npages seeds dostat flags } {
+ source ./include.tcl
+
+ puts "Memp002: {$flags} Multiprocess mpool tester"
+
+ if { [is_substr $flags -private] != 0 } {
+ puts "Memp002 skipping\
+ multiple processes not supported by private memory"
+ return
+ }
+ set iter [expr $iterations / $procs]
+
+ # Clean up old stuff and create new.
+ env_cleanup $dir
+
+ for { set i 0 } { $i < [llength $psizes] } { incr i } {
+ fileremove -f $dir/file$i
+ }
+ set e [eval {berkdb env -create -lock -home $dir} $flags]
+ error_check_good dbenv [is_valid_widget $e env] TRUE
+
+ set pidlist {}
+ for { set i 0 } { $i < $procs } {incr i} {
+ if { [llength $seeds] == $procs } {
+ set seed [lindex $seeds $i]
+ } else {
+ set seed -1
+ }
+
+ puts "$tclsh_path\
+ $test_path/mpoolscript.tcl $dir $i $procs \
+ $iter $psizes $npages 3 $flags > \
+ $dir/memp002.$i.out &"
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ mpoolscript.tcl $dir/memp002.$i.out $dir $i $procs \
+ $iter $psizes $npages 3 $flags &]
+ lappend pidlist $p
+ }
+ puts "Memp002: $procs independent processes now running"
+ watch_procs
+
+ reset_env $e
+}
+
+# Test reader-only/writer process combinations; we use the access methods
+# for testing.
+proc memp003 { dir {nentries 10000} flags } {
+ global alphabet
+ source ./include.tcl
+
+ puts "Memp003: {$flags} Reader/Writer tests"
+
+ if { [is_substr $flags -private] != 0 } {
+ puts "Memp003 skipping\
+ multiple processes not supported by private memory"
+ return
+ }
+
+ env_cleanup $dir
+ set psize 1024
+ set testfile mpool.db
+ set t1 $dir/t1
+
+ # Create an environment that the two processes can share
+ set c [list 0 [expr $psize * 10] 3]
+ set dbenv [eval {berkdb env \
+ -create -lock -home $dir -cachesize $c} $flags]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ # First open and create the file.
+
+ set db [berkdb_open -env $dbenv -create -truncate \
+ -mode 0644 -pagesize $psize -btree $testfile]
+ error_check_good dbopen/RW [is_valid_db $db] TRUE
+
+ set did [open $dict]
+ set txn ""
+ set count 0
+
+ puts "\tMemp003.a: create database"
+ set keys ""
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ lappend keys $str
+
+ set ret [eval {$db put} $txn {$str $str}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $txn {$str}]
+ error_check_good get $ret [list [list $str $str]]
+
+ incr count
+ }
+ close $did
+ error_check_good close [$db close] 0
+
+ # Now open the file for read-only
+ set db [berkdb_open -env $dbenv -rdonly $testfile]
+ error_check_good dbopen/RO [is_substr $db db] 1
+
+ puts "\tMemp003.b: verify a few keys"
+ # Read and verify a couple of keys; saving them to check later
+ set testset ""
+ for { set i 0 } { $i < 10 } { incr i } {
+ set ndx [berkdb random_int 0 [expr $nentries - 1]]
+ set key [lindex $keys $ndx]
+ if { [lsearch $testset $key] != -1 } {
+ incr i -1
+ continue;
+ }
+
+ # The remote process stuff is unhappy with
+ # zero-length keys; make sure we don't pick one.
+ if { [llength $key] == 0 } {
+ incr i -1
+ continue
+ }
+
+ lappend testset $key
+
+ set ret [eval {$db get} $txn {$key}]
+ error_check_good get/RO $ret [list [list $key $key]]
+ }
+
+ puts "\tMemp003.c: retrieve and modify keys in remote process"
+ # Now open remote process where we will open the file RW
+ set f1 [open |$tclsh_path r+]
+ puts $f1 "source $test_path/test.tcl"
+ puts $f1 "flush stdout"
+ flush $f1
+
+ set c [concat "{" [list 0 [expr $psize * 10] 3] "}" ]
+ set remote_env [send_cmd $f1 \
+ "berkdb env -create -lock -home $dir -cachesize $c $flags"]
+ error_check_good remote_dbenv [is_valid_env $remote_env] TRUE
+
+ set remote_db [send_cmd $f1 "berkdb_open -env $remote_env $testfile"]
+ error_check_good remote_dbopen [is_valid_db $remote_db] TRUE
+
+ foreach k $testset {
+ # Get the key
+ set ret [send_cmd $f1 "$remote_db get $k"]
+ error_check_good remote_get $ret [list [list $k $k]]
+
+ # Now replace the key
+ set ret [send_cmd $f1 "$remote_db put $k $k$k"]
+ error_check_good remote_put $ret 0
+ }
+
+ puts "\tMemp003.d: verify changes in local process"
+ foreach k $testset {
+ set ret [eval {$db get} $txn {$key}]
+ error_check_good get_verify/RO $ret [list [list $key $key$key]]
+ }
+
+ puts "\tMemp003.e: Fill up the cache with dirty buffers"
+ foreach k $testset {
+ # Now rewrite the keys with BIG data
+ set data [replicate $alphabet 32]
+ set ret [send_cmd $f1 "$remote_db put $k $data"]
+ error_check_good remote_put $ret 0
+ }
+
+ puts "\tMemp003.f: Get more pages for the read-only file"
+ dump_file $db $txn $t1 nop
+
+ puts "\tMemp003.g: Sync from the read-only file"
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_close [$db close] 0
+
+ set ret [send_cmd $f1 "$remote_db close"]
+ error_check_good remote_get $ret 0
+
+ # Close the environment both remotely and locally.
+ set ret [send_cmd $f1 "$remote_env close"]
+ error_check_good remote:env_close $ret 0
+ close $f1
+
+ reset_env $dbenv
+}
diff --git a/bdb/test/mpoolscript.tcl b/bdb/test/mpoolscript.tcl
new file mode 100644
index 00000000000..8695254c257
--- /dev/null
+++ b/bdb/test/mpoolscript.tcl
@@ -0,0 +1,170 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: mpoolscript.tcl,v 11.12 2000/05/05 15:23:47 sue Exp $
+#
+# Random multiple process mpool tester.
+# Usage: mpoolscript dir id numiters numfiles numpages sleepint
+# dir: lock directory.
+# id: Unique identifier for this process.
+# maxprocs: Number of procs in this test.
+# numiters: Total number of iterations.
+# pgsizes: Pagesizes for the different files. Length of this item indicates
+# how many files to use.
+# numpages: Number of pages per file.
+# sleepint: Maximum sleep interval.
+# flags: Flags for env open
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage \
+ "mpoolscript dir id maxprocs numiters pgsizes numpages sleepint flags"
+
+# Verify usage
+if { $argc != 8 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ puts $argc
+ exit
+}
+
+# Initialize arguments
+set dir [lindex $argv 0]
+set id [lindex $argv 1]
+set maxprocs [lindex $argv 2]
+set numiters [ lindex $argv 3 ]
+set pgsizes [ lindex $argv 4 ]
+set numpages [ lindex $argv 5 ]
+set sleepint [ lindex $argv 6 ]
+set flags [ lindex $argv 7]
+
+# Initialize seed
+global rand_init
+berkdb srand $rand_init
+
+# Give time for all processes to start up.
+tclsleep 10
+
+puts -nonewline "Beginning execution for $id: $maxprocs $dir $numiters"
+puts " $pgsizes $numpages $sleepint"
+flush stdout
+
+# Figure out how small/large to make the cache
+set max 0
+foreach i $pgsizes {
+ if { $i > $max } {
+ set max $i
+ }
+}
+
+set cache [list 0 [expr $maxprocs * ([lindex $pgsizes 0] + $max)] 1]
+set env_cmd {berkdb env -lock -cachesize $cache -home $dir}
+set e [eval $env_cmd $flags]
+error_check_good env_open [is_valid_env $e] TRUE
+
+# Now open files
+set mpools {}
+set nfiles 0
+foreach psize $pgsizes {
+ set mp [$e mpool -create -mode 0644 -pagesize $psize file$nfiles]
+ error_check_good memp_fopen:$nfiles [is_valid_mpool $mp $e] TRUE
+ lappend mpools $mp
+ incr nfiles
+}
+
+puts "Establishing long-term pin on file 0 page $id for process $id"
+
+# Set up the long-pin page
+set lock [$e lock_get write $id 0:$id]
+error_check_good lock_get [is_valid_lock $lock $e] TRUE
+
+set mp [lindex $mpools 0]
+set master_page [$mp get -create $id]
+error_check_good mp_get:$master_page [is_valid_page $master_page $mp] TRUE
+
+set r [$master_page init MASTER$id]
+error_check_good page_init $r 0
+
+# Release the lock but keep the page pinned
+set r [$lock put]
+error_check_good lock_put $r 0
+
+# Main loop. On each iteration, we'll check every page in each of
+# of the files. On any file, if we see the appropriate tag in the
+# field, we'll rewrite the page, else we won't. Keep track of
+# how many pages we actually process.
+set pages 0
+for { set iter 0 } { $iter < $numiters } { incr iter } {
+ puts "[timestamp]: iteration $iter, $pages pages set so far"
+ flush stdout
+ for { set fnum 1 } { $fnum < $nfiles } { incr fnum } {
+ if { [expr $fnum % 2 ] == 0 } {
+ set pred [expr ($id + $maxprocs - 1) % $maxprocs]
+ } else {
+ set pred [expr ($id + $maxprocs + 1) % $maxprocs]
+ }
+
+ set mpf [lindex $mpools $fnum]
+ for { set p 0 } { $p < $numpages } { incr p } {
+ set lock [$e lock_get write $id $fnum:$p]
+ error_check_good lock_get:$fnum:$p \
+ [is_valid_lock $lock $e] TRUE
+
+ # Now, get the page
+ set pp [$mpf get -create $p]
+ error_check_good page_get:$fnum:$p \
+ [is_valid_page $pp $mpf] TRUE
+
+ if { [$pp is_setto $pred] == 0 || [$pp is_setto 0] == 0 } {
+ # Set page to self.
+ set r [$pp init $id]
+ error_check_good page_init:$fnum:$p $r 0
+ incr pages
+ set r [$pp put -dirty]
+ error_check_good page_put:$fnum:$p $r 0
+ } else {
+ error_check_good page_put:$fnum:$p [$pp put] 0
+ }
+ error_check_good lock_put:$fnum:$p [$lock put] 0
+ }
+ }
+ tclsleep [berkdb random_int 1 $sleepint]
+}
+
+# Now verify your master page, release its pin, then verify everyone else's
+puts "$id: End of run verification of master page"
+set r [$master_page is_setto MASTER$id]
+error_check_good page_check $r 1
+set r [$master_page put -dirty]
+error_check_good page_put $r 0
+
+set i [expr ($id + 1) % $maxprocs]
+set mpf [lindex $mpools 0]
+
+while { $i != $id } {
+ set p [$mpf get -create $i]
+ error_check_good mp_get [is_valid_page $p $mpf] TRUE
+
+ if { [$p is_setto MASTER$i] != 1 } {
+ puts "Warning: Master page $i not set."
+ }
+ error_check_good page_put:$p [$p put] 0
+
+ set i [expr ($i + 1) % $maxprocs]
+}
+
+# Close files
+foreach i $mpools {
+ set r [$i close]
+ error_check_good mpf_close $r 0
+}
+
+# Close environment system
+set r [$e close]
+error_check_good env_close $r 0
+
+puts "[timestamp] $id Complete"
+flush stdout
diff --git a/bdb/test/mutex.tcl b/bdb/test/mutex.tcl
new file mode 100644
index 00000000000..5300fb0c4a3
--- /dev/null
+++ b/bdb/test/mutex.tcl
@@ -0,0 +1,225 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: mutex.tcl,v 11.18 2000/09/01 19:24:59 krinsky Exp $
+#
+# Exercise mutex functionality.
+# Options are:
+# -dir <directory in which to store mpool>
+# -iter <iterations>
+# -mdegree <number of mutexes per iteration>
+# -nmutex <number of mutexes>
+# -procs <number of processes to run>
+# -wait <wait interval after getting locks>
+proc mutex_usage {} {
+ puts stderr "mutex\n\t-dir <dir>\n\t-iter <iterations>"
+ puts stderr "\t-mdegree <locks per iteration>\n\t-nmutex <n>"
+ puts stderr "\t-procs <nprocs>"
+ puts stderr "\n\t-wait <max wait interval>"
+ return
+}
+
+proc mutex { args } {
+ source ./include.tcl
+
+ set dir db
+ set iter 500
+ set mdegree 3
+ set nmutex 20
+ set procs 5
+ set wait 2
+
+ for { set i 0 } { $i < [llength $args] } {incr i} {
+ switch -regexp -- [lindex $args $i] {
+ -d.* { incr i; set testdir [lindex $args $i] }
+ -i.* { incr i; set iter [lindex $args $i] }
+ -m.* { incr i; set mdegree [lindex $args $i] }
+ -n.* { incr i; set nmutex [lindex $args $i] }
+ -p.* { incr i; set procs [lindex $args $i] }
+ -w.* { incr i; set wait [lindex $args $i] }
+ default {
+ puts -nonewline "FAIL:[timestamp] Usage: "
+ mutex_usage
+ return
+ }
+ }
+ }
+
+ if { [file exists $testdir/$dir] != 1 } {
+ file mkdir $testdir/$dir
+ } elseif { [file isdirectory $testdir/$dir ] != 1 } {
+ error "$testdir/$dir is not a directory"
+ }
+
+ # Basic sanity tests
+ mutex001 $testdir $nmutex
+
+ # Basic synchronization tests
+ mutex002 $testdir $nmutex
+
+ # Multiprocess tests
+ mutex003 $testdir $iter $nmutex $procs $mdegree $wait
+}
+
+proc mutex001 { dir nlocks } {
+ source ./include.tcl
+
+ puts "Mutex001: Basic functionality"
+ env_cleanup $dir
+
+ # Test open w/out create; should fail
+ error_check_bad \
+ env_open [catch {berkdb env -lock -home $dir} env] 0
+
+ # Now open for real
+ set env [berkdb env -create -mode 0644 -lock -home $dir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ set m [$env mutex 0644 $nlocks]
+ error_check_good mutex_init [is_valid_mutex $m $env] TRUE
+
+ # Get, set each mutex; sleep, then get Release
+ for { set i 0 } { $i < $nlocks } { incr i } {
+ set r [$m get $i ]
+ error_check_good mutex_get $r 0
+
+ set r [$m setval $i $i]
+ error_check_good mutex_setval $r 0
+ }
+ tclsleep 5
+ for { set i 0 } { $i < $nlocks } { incr i } {
+ set r [$m getval $i]
+ error_check_good mutex_getval $r $i
+
+ set r [$m release $i ]
+ error_check_good mutex_get $r 0
+ }
+
+ error_check_good mutex_close [$m close] 0
+ error_check_good env_close [$env close] 0
+ puts "Mutex001: completed successfully."
+}
+
+# Test basic synchronization
+proc mutex002 { dir nlocks } {
+ source ./include.tcl
+
+ puts "Mutex002: Basic synchronization"
+ env_cleanup $dir
+
+ # Fork off child before we open any files.
+ set f1 [open |$tclsh_path r+]
+ puts $f1 "source $test_path/test.tcl"
+ flush $f1
+
+ # Open the environment and the mutex locally
+ set local_env [berkdb env -create -mode 0644 -lock -home $dir]
+ error_check_good env_open [is_valid_env $local_env] TRUE
+
+ set local_mutex [$local_env mutex 0644 $nlocks]
+ error_check_good \
+ mutex_init [is_valid_mutex $local_mutex $local_env] TRUE
+
+ # Open the environment and the mutex remotely
+ set remote_env [send_cmd $f1 "berkdb env -lock -home $dir"]
+ error_check_good remote:env_open [is_valid_env $remote_env] TRUE
+
+ set remote_mutex [send_cmd $f1 "$remote_env mutex 0644 $nlocks"]
+ error_check_good \
+ mutex_init [is_valid_mutex $remote_mutex $remote_env] TRUE
+
+ # Do a get here, then set the value to be pid.
+ # On the remote side fire off a get and getval.
+ set r [$local_mutex get 1]
+ error_check_good lock_get $r 0
+
+ set r [$local_mutex setval 1 [pid]]
+ error_check_good lock_get $r 0
+
+ # Now have the remote side request the lock and check its
+ # value. Then wait 5 seconds, release the mutex and see
+ # what the remote side returned.
+ send_timed_cmd $f1 1 "$remote_mutex get 1"
+ send_timed_cmd $f1 1 "set ret \[$remote_mutex getval 1\]"
+
+ # Now sleep before resetting and releasing lock
+ tclsleep 5
+ set newv [expr [pid] - 1]
+ set r [$local_mutex setval 1 $newv]
+ error_check_good mutex_setval $r 0
+
+ set r [$local_mutex release 1]
+ error_check_good mutex_release $r 0
+
+ # Now get the result from the other script
+ # Timestamp
+ set result [rcv_result $f1]
+ error_check_good lock_get:remote_time [expr $result > 4] 1
+
+ # Timestamp
+ set result [rcv_result $f1]
+
+ # Mutex value
+ set result [send_cmd $f1 "puts \$ret"]
+ error_check_good lock_get:remote_getval $result $newv
+
+ # Close down the remote
+ set ret [send_cmd $f1 "$remote_mutex close" 5]
+ # Not sure why we need this, but we do... an extra blank line
+ # someone gets output somewhere
+ gets $f1 ret
+ error_check_good remote:mutex_close $ret 0
+
+ set ret [send_cmd $f1 "$remote_env close"]
+ error_check_good remote:env_close $ret 0
+
+ catch { close $f1 } result
+
+ set ret [$local_mutex close]
+ error_check_good local:mutex_close $ret 0
+
+ set ret [$local_env close]
+ error_check_good local:env_close $ret 0
+
+ puts "Mutex002: completed successfully."
+}
+
+# Generate a bunch of parallel
+# testers that try to randomly obtain locks.
+proc mutex003 { dir iter nmutex procs mdegree wait } {
+ source ./include.tcl
+
+ puts "Mutex003: Multi-process random mutex test ($procs processes)"
+
+ env_cleanup $dir
+
+ # Now open the region we'll use for multiprocess testing.
+ set env [berkdb env -create -mode 0644 -lock -home $dir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ set mutex [$env mutex 0644 $nmutex]
+ error_check_good mutex_init [is_valid_mutex $mutex $env] TRUE
+
+ error_check_good mutex_close [$mutex close] 0
+
+ # Now spawn off processes
+ set proclist {}
+ for { set i 0 } {$i < $procs} {incr i} {
+ puts "$tclsh_path\
+ $test_path/mutexscript.tcl $dir\
+ $iter $nmutex $wait $mdegree > $testdir/$i.mutexout &"
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ mutexscript.tcl $testdir/$i.mutexout $dir\
+ $iter $nmutex $wait $mdegree &]
+ lappend proclist $p
+ }
+ puts "Mutex003: $procs independent processes now running"
+ watch_procs
+ error_check_good env_close [$env close] 0
+ # Remove output files
+ for { set i 0 } {$i < $procs} {incr i} {
+ fileremove -f $dir/$i.mutexout
+ }
+}
diff --git a/bdb/test/mutexscript.tcl b/bdb/test/mutexscript.tcl
new file mode 100644
index 00000000000..9a49e471186
--- /dev/null
+++ b/bdb/test/mutexscript.tcl
@@ -0,0 +1,91 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: mutexscript.tcl,v 11.12 2000/11/21 22:14:56 dda Exp $
+#
+# Random mutex tester.
+# Usage: mutexscript dir numiters mlocks sleepint degree
+# dir: dir in which all the mutexes live.
+# numiters: Total number of iterations.
+# nmutex: Total number of mutexes.
+# sleepint: Maximum sleep interval.
+# degree: Maximum number of locks to acquire at once
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage "mutexscript dir numiters nmutex sleepint degree"
+
+# Verify usage
+if { $argc != 5 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set dir [lindex $argv 0]
+set numiters [ lindex $argv 1 ]
+set nmutex [ lindex $argv 2 ]
+set sleepint [ lindex $argv 3 ]
+set degree [ lindex $argv 4 ]
+set locker [pid]
+set mypid [sanitized_pid]
+
+# Initialize seed
+global rand_init
+berkdb srand $rand_init
+
+puts -nonewline "Mutexscript: Beginning execution for $locker:"
+puts " $numiters $nmutex $sleepint $degree"
+flush stdout
+
+# Open the environment and the mutex
+set e [berkdb env -create -mode 0644 -lock -home $dir]
+error_check_good evn_open [is_valid_env $e] TRUE
+
+set mutex [$e mutex 0644 $nmutex]
+error_check_good mutex_init [is_valid_mutex $mutex $e] TRUE
+
+# Sleep for awhile to make sure that everyone has gotten in
+tclsleep 5
+
+for { set iter 0 } { $iter < $numiters } { incr iter } {
+ set nlocks [berkdb random_int 1 $degree]
+ # We will always lock objects in ascending order to avoid
+ # deadlocks.
+ set lastobj 1
+ set mlist {}
+ for { set lnum 0 } { $lnum < $nlocks } { incr lnum } {
+ # Pick lock parameters
+ set obj [berkdb random_int $lastobj [expr $nmutex - 1]]
+ set lastobj [expr $obj + 1]
+ puts "[timestamp] $locker $lnum: $obj"
+
+ # Do get, set its val to own pid, and then add to list
+ error_check_good mutex_get:$obj [$mutex get $obj] 0
+ error_check_good mutex_setval:$obj [$mutex setval $obj $mypid] 0
+ lappend mlist $obj
+ if {$lastobj >= $nmutex} {
+ break
+ }
+ }
+
+ # Pick sleep interval
+ tclsleep [ berkdb random_int 1 $sleepint ]
+
+ # Now release locks
+ foreach i $mlist {
+ error_check_good mutex_getval:$i [$mutex getval $i] $mypid
+ error_check_good mutex_setval:$i \
+ [$mutex setval $i [expr 0 - $mypid]] 0
+ error_check_good mutex_release:$i [$mutex release $i] 0
+ }
+ puts "[timestamp] $locker released mutexes"
+ flush stdout
+}
+
+puts "[timestamp] $locker Complete"
+flush stdout
diff --git a/bdb/test/ndbm.tcl b/bdb/test/ndbm.tcl
new file mode 100644
index 00000000000..a6286de0266
--- /dev/null
+++ b/bdb/test/ndbm.tcl
@@ -0,0 +1,141 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: ndbm.tcl,v 11.13 2000/08/25 14:21:51 sue Exp $
+#
+# Historic NDBM interface test.
+# Use the first 1000 entries from the dictionary.
+# Insert each with self as key and data; retrieve each.
+# After all are entered, retrieve all; compare output to original.
+# Then reopen the file, re-retrieve everything.
+# Finally, delete everything.
+proc ndbm { { nentries 1000 } } {
+ source ./include.tcl
+
+ puts "NDBM interfaces test: $nentries"
+
+ # Create the database and open the dictionary
+ set testfile $testdir/ndbmtest
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir NULL
+
+ set db [berkdb ndbm_open -create -truncate -mode 0644 $testfile]
+ error_check_good ndbm_open [is_substr $db ndbm] 1
+ set did [open $dict]
+
+ error_check_good rdonly_false [$db rdonly] 0
+
+ set flags 0
+ set txn 0
+ set count 0
+ set skippednullkey 0
+
+ puts "\tNDBM.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ # NDBM can't handle zero-length keys
+ if { [string length $str] == 0 } {
+ set skippednullkey 1
+ continue
+ }
+
+ set ret [$db store $str $str insert]
+ error_check_good ndbm_store $ret 0
+
+ set d [$db fetch $str]
+ error_check_good ndbm_fetch $d $str
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tNDBM.b: dump file"
+ set oid [open $t1 w]
+ for { set key [$db firstkey] } { $key != -1 } {
+ set key [$db nextkey] } {
+ puts $oid $key
+ set d [$db fetch $key]
+ error_check_good ndbm_refetch $d $key
+ }
+
+ # If we had to skip a zero-length key, juggle things to cover up
+ # this fact in the dump.
+ if { $skippednullkey == 1 } {
+ puts $oid ""
+ incr nentries 1
+ }
+ close $oid
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+
+ error_check_good NDBM:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tNDBM.c: pagf/dirf test"
+ set fd [$db pagfno]
+ error_check_bad pagf $fd -1
+ set fd [$db dirfno]
+ error_check_bad dirf $fd -1
+
+ puts "\tNDBM.d: close, open, and dump file"
+
+ # Now, reopen the file and run the last test again.
+ error_check_good ndbm_close [$db close] 0
+ set db [berkdb ndbm_open -rdonly $testfile]
+ error_check_good ndbm_open2 [is_substr $db ndbm] 1
+ set oid [open $t1 w]
+
+ error_check_good rdonly_true [$db rdonly] "rdonly:not owner"
+
+ for { set key [$db firstkey] } { $key != -1 } {
+ set key [$db nextkey] } {
+ puts $oid $key
+ set d [$db fetch $key]
+ error_check_good ndbm_refetch2 $d $key
+ }
+ if { $skippednullkey == 1 } {
+ puts $oid ""
+ }
+ close $oid
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ filesort $t1 $t3
+
+ error_check_good NDBM:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ # Now, reopen the file and delete each entry
+ puts "\tNDBM.e: sequential scan and delete"
+
+ error_check_good ndbm_close [$db close] 0
+ set db [berkdb ndbm_open $testfile]
+ error_check_good ndbm_open3 [is_substr $db ndbm] 1
+ set oid [open $t1 w]
+
+ for { set key [$db firstkey] } { $key != -1 } {
+ set key [$db nextkey] } {
+ puts $oid $key
+ set ret [$db delete $key]
+ error_check_good ndbm_delete $ret 0
+ }
+ if { $skippednullkey == 1 } {
+ puts $oid ""
+ }
+ close $oid
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ filesort $t1 $t3
+
+ error_check_good NDBM:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+ error_check_good ndbm_close [$db close] 0
+}
diff --git a/bdb/test/recd001.tcl b/bdb/test/recd001.tcl
new file mode 100644
index 00000000000..bbf5159011b
--- /dev/null
+++ b/bdb/test/recd001.tcl
@@ -0,0 +1,180 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd001.tcl,v 11.28 2000/12/07 19:13:46 sue Exp $
+#
+# Recovery Test 1.
+# These are the most basic recovery tests. We do individual recovery
+# tests for each operation in the access method interface. First we
+# create a file and capture the state of the database (i.e., we copy
+# it. Then we run a transaction containing a single operation. In
+# one test, we abort the transaction and compare the outcome to the
+# original copy of the file. In the second test, we restore the
+# original copy of the database and then run recovery and compare
+# this against the actual database.
+proc recd001 { method {select 0} args} {
+ global fixed_len
+ source ./include.tcl
+
+ set orig_fixed_len $fixed_len
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd001: $method operation/transaction tests"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ # The recovery tests were originally written to
+ # do a command, abort, do it again, commit, and then
+ # repeat the sequence with another command. Each command
+ # tends to require that the previous command succeeded and
+ # left the database a certain way. To avoid cluttering up the
+ # op_recover interface as well as the test code, we create two
+ # databases; one does abort and then commit for each op, the
+ # other does prepare, prepare-abort, and prepare-commit for each
+ # op. If all goes well, this allows each command to depend
+ # exactly one successful iteration of the previous command.
+ set testfile recd001.db
+ set testfile2 recd001-2.db
+
+ set flags "-create -txn -home $testdir"
+
+ puts "\tRecd001.a.0: creating environment"
+ set env_cmd "berkdb env $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ #
+ # We need to create a database to get the pagesize (either
+ # the default or whatever might have been specified).
+ # Then remove it so we can compute fixed_len and create the
+ # real database.
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set stat [$db stat]
+ #
+ # Compute the fixed_len based on the pagesize being used.
+ # We want the fixed_len to be 1/4 the pagesize.
+ #
+ set pg [get_pagesize $stat]
+ error_check_bad get_pagesize $pg -1
+ set fixed_len [expr $pg / 4]
+ error_check_good db_close [$db close] 0
+ error_check_good dbremove [berkdb dbremove -env $dbenv $testfile] 0
+
+ # Convert the args again because fixed_len is now real.
+ # Create the databases and close the environment.
+ # cannot specify db truncate in txn protected env!!!
+ set opts [convert_args $method ""]
+ set omethod [convert_method $method]
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv $opts $testfile2"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ error_check_good env_close [$dbenv close] 0
+
+ puts "\tRecd001.a.1: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+
+ # List of recovery tests: {CMD MSG} pairs.
+ set rlist {
+ { {DB put -txn TXNID $key $data} "Recd001.b: put"}
+ { {DB del -txn TXNID $key} "Recd001.c: delete"}
+ { {DB put -txn TXNID $bigkey $data} "Recd001.d: big key put"}
+ { {DB del -txn TXNID $bigkey} "Recd001.e: big key delete"}
+ { {DB put -txn TXNID $key $bigdata} "Recd001.f: big data put"}
+ { {DB del -txn TXNID $key} "Recd001.g: big data delete"}
+ { {DB put -txn TXNID $key $data} "Recd001.h: put (change state)"}
+ { {DB put -txn TXNID $key $newdata} "Recd001.i: overwrite"}
+ { {DB put -txn TXNID -partial {$off $len} $key $partial_grow}
+ "Recd001.j: partial put growing"}
+ { {DB put -txn TXNID $key $newdata} "Recd001.k: overwrite (fix)"}
+ { {DB put -txn TXNID -partial {$off $len} $key $partial_shrink}
+ "Recd001.l: partial put shrinking"}
+ { {DB put -txn TXNID -append $data} "Recd001.m: put -append"}
+ { {DB get -txn TXNID -consume} "Recd001.n: db get -consume"}
+ }
+
+ # These are all the data values that we're going to need to read
+ # through the operation table and run the recovery tests.
+
+ if { [is_record_based $method] == 1 } {
+ set key 1
+ } else {
+ set key recd001_key
+ }
+ set data recd001_data
+ set newdata NEWrecd001_dataNEW
+ set off 3
+ set len 12
+ set partial_grow replacement_record_grow
+ set partial_shrink xxx
+ if { [is_fixed_length $method] == 1 } {
+ set len [string length $partial_grow]
+ set partial_shrink $partial_grow
+ }
+ set bigdata [replicate $key $fixed_len]
+ if { [is_record_based $method] == 1 } {
+ set bigkey $fixed_len
+ } else {
+ set bigkey [replicate $key $fixed_len]
+ }
+
+ foreach pair $rlist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+
+ if { [is_queue $method] != 1 } {
+ if { [string first append $cmd] != -1 } {
+ continue
+ }
+ if { [string first consume $cmd] != -1 } {
+ continue
+ }
+ }
+
+# if { [is_fixed_length $method] == 1 } {
+# if { [string first partial $cmd] != -1 } {
+# continue
+# }
+# }
+ op_recover abort $testdir $env_cmd $testfile $cmd $msg
+ op_recover commit $testdir $env_cmd $testfile $cmd $msg
+ op_recover prepare $testdir $env_cmd $testfile2 $cmd $msg
+ op_recover prepare-abort $testdir $env_cmd $testfile2 $cmd $msg
+ op_recover prepare-commit $testdir $env_cmd $testfile2 $cmd $msg
+ }
+ set fixed_len $orig_fixed_len
+
+ puts "\tRecd001.o: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
diff --git a/bdb/test/recd002.tcl b/bdb/test/recd002.tcl
new file mode 100644
index 00000000000..ffcec6527e8
--- /dev/null
+++ b/bdb/test/recd002.tcl
@@ -0,0 +1,96 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd002.tcl,v 11.22 2000/12/11 17:24:54 sue Exp $
+#
+# Recovery Test #2. Verify that splits can be recovered.
+proc recd002 { method {select 0} args} {
+ source ./include.tcl
+ global rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Recd002: skipping for specific pagesizes"
+ return
+ }
+ berkdb srand $rand_init
+
+ # Queues don't do splits, so we don't really need the small page
+ # size and the small page size is smaller than the record, so it's
+ # a problem.
+ if { [string compare $omethod "-queue"] == 0 } {
+ set pagesize 4096
+ } else {
+ set pagesize 512
+ }
+ puts "Recd002: $method split recovery tests"
+
+ env_cleanup $testdir
+ set testfile recd002.db
+ set testfile2 recd002-2.db
+ set eflags \
+ "-create -txn -lock_max 2000 -home $testdir"
+
+ puts "\tRecd002.a: creating environment"
+ set env_cmd "berkdb env $eflags"
+ set dbenv [eval $env_cmd]
+ error_check_bad dbenv $dbenv NULL
+
+ # Create the databases. We will use a small page size so that splits
+ # happen fairly quickly.
+ set oflags "-create $args $omethod -mode 0644 -env $dbenv\
+ -pagesize $pagesize $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_bad db_open $db NULL
+ error_check_good db_open [is_substr $db db] 1
+ error_check_good db_close [$db close] 0
+ set oflags "-create $args $omethod -mode 0644 -env $dbenv\
+ -pagesize $pagesize $testfile2"
+ set db [eval {berkdb_open} $oflags]
+ error_check_bad db_open $db NULL
+ error_check_good db_open [is_substr $db db] 1
+ error_check_good db_close [$db close] 0
+ reset_env $dbenv
+
+ # List of recovery tests: {CMD MSG} pairs
+ set slist {
+ { {populate DB $omethod TXNID $n 0 0} "Recd002.b: splits"}
+ { {unpopulate DB TXNID $r} "Recd002.c: Remove keys"}
+ }
+
+ # If pages are 512 bytes, then adding 512 key/data pairs
+ # should be more than sufficient.
+ set n 512
+ set r [expr $n / 2 ]
+ foreach pair $slist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+ op_recover abort $testdir $env_cmd $testfile $cmd $msg
+ op_recover commit $testdir $env_cmd $testfile $cmd $msg
+ op_recover prepare $testdir $env_cmd $testfile2 $cmd $msg
+ op_recover prepare-abort $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-commit $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ }
+
+ puts "\tRecd002.d: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
diff --git a/bdb/test/recd003.tcl b/bdb/test/recd003.tcl
new file mode 100644
index 00000000000..af7097c8909
--- /dev/null
+++ b/bdb/test/recd003.tcl
@@ -0,0 +1,111 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd003.tcl,v 11.22 2000/12/07 19:13:46 sue Exp $
+#
+# Recovery Test 3.
+# Test all the duplicate log messages and recovery operations. We make
+# sure that we exercise all possible recovery actions: redo, undo, undo
+# but no fix necessary and redo but no fix necessary.
+proc recd003 { method {select 0} args } {
+ source ./include.tcl
+ global rand_init
+
+ set largs [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "Recd003 skipping for method $method"
+ return
+ }
+ puts "Recd003: $method duplicate recovery tests"
+
+ berkdb srand $rand_init
+
+ env_cleanup $testdir
+ # See comment in recd001.tcl for why there are two database files...
+ set testfile recd003.db
+ set testfile2 recd003-2.db
+ set eflags "-create -txn -home $testdir"
+
+ puts "\tRecd003.a: creating environment"
+ set env_cmd "berkdb env $eflags"
+ set dbenv [eval $env_cmd]
+ error_check_bad dbenv $dbenv NULL
+
+ # Create the databases.
+ set oflags \
+ "-create $largs -mode 0644 $omethod -dup -env $dbenv $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_bad db_open $db NULL
+ error_check_good db_open [is_substr $db db] 1
+ error_check_good db_close [$db close] 0
+ set oflags \
+ "-create $largs -mode 0644 $omethod -dup -env $dbenv $testfile2"
+ set db [eval {berkdb_open} $oflags]
+ error_check_bad db_open $db NULL
+ error_check_good db_open [is_substr $db db] 1
+ error_check_good db_close [$db close] 0
+ reset_env $dbenv
+
+ # These are all the data values that we're going to need to read
+ # through the operation table and run the recovery tests.
+ set n 10
+ set dupn 2000
+ set bign 500
+
+ # List of recovery tests: {CMD MSG} pairs
+ set dlist {
+ { {populate DB $omethod TXNID $n 1 0}
+ "Recd003.b: add dups"}
+ { {DB del -txn TXNID duplicate_key}
+ "Recd003.c: remove dups all at once"}
+ { {populate DB $omethod TXNID $n 1 0}
+ "Recd003.d: add dups (change state)"}
+ { {unpopulate DB TXNID 0}
+ "Recd003.e: remove dups 1 at a time"}
+ { {populate DB $omethod TXNID $dupn 1 0}
+ "Recd003.f: dup split"}
+ { {DB del -txn TXNID duplicate_key}
+ "Recd003.g: remove dups (change state)"}
+ { {populate DB $omethod TXNID $n 1 1}
+ "Recd003.h: add big dup"}
+ { {DB del -txn TXNID duplicate_key}
+ "Recd003.i: remove big dup all at once"}
+ { {populate DB $omethod TXNID $n 1 1}
+ "Recd003.j: add big dup (change state)"}
+ { {unpopulate DB TXNID 0}
+ "Recd003.k: remove big dup 1 at a time"}
+ { {populate DB $omethod TXNID $bign 1 1}
+ "Recd003.l: split big dup"}
+ }
+
+ foreach pair $dlist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+ op_recover abort $testdir $env_cmd $testfile $cmd $msg
+ op_recover commit $testdir $env_cmd $testfile $cmd $msg
+ op_recover prepare $testdir $env_cmd $testfile2 $cmd $msg
+ op_recover prepare-abort $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-commit $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ }
+
+ puts "\tRecd003.m: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
diff --git a/bdb/test/recd004.tcl b/bdb/test/recd004.tcl
new file mode 100644
index 00000000000..012dd80f6e5
--- /dev/null
+++ b/bdb/test/recd004.tcl
@@ -0,0 +1,90 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd004.tcl,v 11.21 2000/12/11 17:24:55 sue Exp $
+#
+# Recovery Test #4.
+# Verify that we work correctly when big keys get elevated.
+proc recd004 { method {select 0} args} {
+ source ./include.tcl
+ global rand_init
+
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Recd004: skipping for specific pagesizes"
+ return
+ }
+ if { [is_record_based $method] == 1 } {
+ puts "Recd004 skipping for method $method"
+ return
+ }
+ puts "Recd004: $method big-key on internal page recovery tests"
+
+ berkdb srand $rand_init
+
+ env_cleanup $testdir
+ set testfile recd004.db
+ set testfile2 recd004-2.db
+ set eflags "-create -txn -home $testdir"
+ puts "\tRecd004.a: creating environment"
+ set env_cmd "berkdb env $eflags"
+ set dbenv [eval $env_cmd]
+ error_check_bad dbenv $dbenv NULL
+
+ # Create the databases. We will use a small page size so that we
+ # elevate quickly
+ set oflags "-create -mode 0644 \
+ $omethod -env $dbenv $opts -pagesize 512 $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_bad db_open $db NULL
+ error_check_good db_open [is_substr $db db] 1
+ error_check_good db_close [$db close] 0
+ set oflags "-create -mode 0644 \
+ $omethod -env $dbenv $opts -pagesize 512 $testfile2"
+ set db [eval {berkdb_open} $oflags]
+ error_check_bad db_open $db NULL
+ error_check_good db_open [is_substr $db db] 1
+ error_check_good db_close [$db close] 0
+ reset_env $dbenv
+
+ # List of recovery tests: {CMD MSG} pairs
+ set slist {
+ { {big_populate DB TXNID $n} "Recd004.b: big key elevation"}
+ { {unpopulate DB TXNID 0} "Recd004.c: Remove keys"}
+ }
+
+ # If pages are 512 bytes, then adding 512 key/data pairs
+ # should be more than sufficient.
+ set n 512
+ foreach pair $slist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+ op_recover abort $testdir $env_cmd $testfile $cmd $msg
+ op_recover commit $testdir $env_cmd $testfile $cmd $msg
+ op_recover prepare $testdir $env_cmd $testfile2 $cmd $msg
+ op_recover prepare-abort $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-commit $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ }
+
+ puts "\tRecd004.d: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
diff --git a/bdb/test/recd005.tcl b/bdb/test/recd005.tcl
new file mode 100644
index 00000000000..06a346f4484
--- /dev/null
+++ b/bdb/test/recd005.tcl
@@ -0,0 +1,231 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd005.tcl,v 11.27 2000/12/15 21:41:38 ubell Exp $
+#
+# Recovery Test 5.
+# Make sure that we can do catastrophic recovery even if we open
+# files using the same log file id.
+proc recd005 { method args} {
+ source ./include.tcl
+ global rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd005: $method catastropic recovery"
+
+ berkdb srand $rand_init
+
+ set testfile1 recd005.1.db
+ set testfile2 recd005.2.db
+ set eflags \
+ "-create -txn -lock_max 2000 -lock_max_objects 2000 -home $testdir"
+
+ set tnum 0
+ foreach sizes "{1000 10} {10 1000}" {
+ foreach ops "{abort abort} {abort commit} {commit abort} \
+ {commit commit}" {
+ env_cleanup $testdir
+ incr tnum
+
+ set s1 [lindex $sizes 0]
+ set s2 [lindex $sizes 1]
+ set op1 [lindex $ops 0]
+ set op2 [lindex $ops 1]
+ puts "\tRecd005.$tnum: $s1 $s2 $op1 $op2"
+
+ puts "\tRecd005.$tnum.a: creating environment"
+ set env_cmd "berkdb env $eflags"
+ set dbenv [eval $env_cmd]
+ error_check_bad dbenv $dbenv NULL
+
+ # Create the two databases.
+ set oflags \
+ "-create -mode 0644 -env $dbenv $args $omethod"
+ set db1 [eval {berkdb_open} $oflags $testfile1]
+ error_check_bad db_open $db1 NULL
+ error_check_good db_open [is_substr $db1 db] 1
+ error_check_good db_close [$db1 close] 0
+
+ set db2 [eval {berkdb_open} $oflags $testfile2]
+ error_check_bad db_open $db2 NULL
+ error_check_good db_open [is_substr $db2 db] 1
+ error_check_good db_close [$db2 close] 0
+ $dbenv close
+
+ set dbenv [eval $env_cmd]
+ puts "\tRecd005.$tnum.b: Populating databases"
+ do_one_file \
+ $testdir $method $dbenv $env_cmd $testfile1 $s1 $op1
+ do_one_file \
+ $testdir $method $dbenv $env_cmd $testfile2 $s2 $op2
+
+ puts "\tRecd005.$tnum.c: Verifying initial population"
+ check_file $testdir $env_cmd $testfile1 $op1
+ check_file $testdir $env_cmd $testfile2 $op2
+
+ # Now, close the environment (so that recovery will work
+ # on NT which won't allow delete of an open file).
+ reset_env $dbenv
+
+ berkdb debug_check
+ puts -nonewline \
+ "\tRecd005.$tnum.d: About to run recovery ... "
+ flush stdout
+
+ set stat [catch \
+ {exec $util_path/db_recover -h $testdir -c} \
+ result]
+ if { $stat == 1 } {
+ error "Recovery error: $result."
+ }
+ puts "complete"
+
+ # Substitute a file that will need recovery and try
+ # running recovery again.
+ if { $op1 == "abort" } {
+ file copy -force $testdir/$testfile1.afterop \
+ $testdir/$testfile1
+ move_file_extent $testdir $testfile1 \
+ afterop copy
+ } else {
+ file copy -force $testdir/$testfile1.init \
+ $testdir/$testfile1
+ move_file_extent $testdir $testfile1 init copy
+ }
+ if { $op2 == "abort" } {
+ file copy -force $testdir/$testfile2.afterop \
+ $testdir/$testfile2
+ move_file_extent $testdir $testfile2 \
+ afterop copy
+ } else {
+ file copy -force $testdir/$testfile2.init \
+ $testdir/$testfile2
+ move_file_extent $testdir $testfile2 init copy
+ }
+
+ berkdb debug_check
+ puts -nonewline "\tRecd005.$tnum.e:\
+ About to run recovery on pre-op database ... "
+ flush stdout
+
+ set stat \
+ [catch {exec $util_path/db_recover \
+ -h $testdir -c} result]
+ if { $stat == 1 } {
+ error "Recovery error: $result."
+ }
+ puts "complete"
+
+ set dbenv [eval $env_cmd]
+ check_file $testdir $env_cmd $testfile1 $op1
+ check_file $testdir $env_cmd $testfile2 $op2
+ reset_env $dbenv
+
+ puts "\tRecd005.$tnum.f:\
+ Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch \
+ {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+ }
+ }
+}
+
+proc do_one_file { dir method env env_cmd filename num op } {
+ source ./include.tcl
+
+ set init_file $dir/$filename.t1
+ set afterop_file $dir/$filename.t2
+ set final_file $dir/$filename.t3
+
+ # Save the initial file and open the environment and the first file
+ file copy -force $dir/$filename $dir/$filename.init
+ copy_extent_file $dir $filename init
+ set oflags "-unknown -env $env"
+ set db [eval {berkdb_open} $oflags $filename]
+
+ # Dump out file contents for initial case
+ set tflags ""
+ open_and_dump_file $filename $env $tflags $init_file nop \
+ dump_file_direction "-first" "-next"
+
+ set txn [$env txn]
+ error_check_bad txn_begin $txn NULL
+ error_check_good txn_begin [is_substr $txn $env] 1
+
+ # Now fill in the db and the txnid in the command
+ populate $db $method $txn $num 0 0
+
+ # Sync the file so that we can capture a snapshot to test
+ # recovery.
+ error_check_good sync:$db [$db sync] 0
+ file copy -force $dir/$filename $dir/$filename.afterop
+ copy_extent_file $dir $filename afterop
+ open_and_dump_file $testdir/$filename.afterop NULL $tflags \
+ $afterop_file nop dump_file_direction "-first" "-next"
+ error_check_good txn_$op:$txn [$txn $op] 0
+
+ if { $op == "commit" } {
+ puts "\t\tFile $filename executed and committed."
+ } else {
+ puts "\t\tFile $filename executed and aborted."
+ }
+
+ # Dump out file and save a copy.
+ error_check_good sync:$db [$db sync] 0
+ open_and_dump_file $testdir/$filename NULL $tflags $final_file nop \
+ dump_file_direction "-first" "-next"
+ file copy -force $dir/$filename $dir/$filename.final
+ copy_extent_file $dir $filename final
+
+ # If this is an abort, it should match the original file.
+ # If this was a commit, then this file should match the
+ # afterop file.
+ if { $op == "abort" } {
+ filesort $init_file $init_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(initial,post-$op):diff($init_file,$final_file) \
+ [filecmp $init_file.sort $final_file.sort] 0
+ } else {
+ filesort $afterop_file $afterop_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(post-$op,pre-commit):diff($afterop_file,$final_file) \
+ [filecmp $afterop_file.sort $final_file.sort] 0
+ }
+
+ error_check_good close:$db [$db close] 0
+}
+
+proc check_file { dir env_cmd filename op } {
+ source ./include.tcl
+
+ set init_file $dir/$filename.t1
+ set afterop_file $dir/$filename.t2
+ set final_file $dir/$filename.t3
+
+ set tflags ""
+ open_and_dump_file $testdir/$filename NULL $tflags $final_file nop \
+ dump_file_direction "-first" "-next"
+ if { $op == "abort" } {
+ filesort $init_file $init_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(initial,post-$op):diff($init_file,$final_file) \
+ [filecmp $init_file.sort $final_file.sort] 0
+ } else {
+ filesort $afterop_file $afterop_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(pre-commit,post-$op):diff($afterop_file,$final_file) \
+ [filecmp $afterop_file.sort $final_file.sort] 0
+ }
+
+}
diff --git a/bdb/test/recd006.tcl b/bdb/test/recd006.tcl
new file mode 100644
index 00000000000..14f01cc0b8f
--- /dev/null
+++ b/bdb/test/recd006.tcl
@@ -0,0 +1,262 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd006.tcl,v 11.21 2000/12/07 19:13:46 sue Exp $
+#
+# Recovery Test 6.
+# Test nested transactions.
+proc recd006 { method {select 0} args} {
+ global kvals
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "Recd006 skipping for method $method"
+ return
+ }
+ puts "Recd006: $method nested transactions"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ set dbfile recd006.db
+ set testfile $testdir/$dbfile
+
+ puts "\tRecd006.a: create database"
+ set oflags "-create $args $omethod $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Make sure that we have enough entries to span a couple of
+ # different pages.
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < 1000 } {
+ if { [string compare $omethod "-recno"] == 0 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+
+ set ret [$db put -nooverwrite $key $str]
+ error_check_good put $ret 0
+
+ incr count
+ }
+ close $did
+
+ # Variables used below:
+ # p1: a pair of keys that are likely to be on the same page.
+ # p2: a pair of keys that are likely to be on the same page,
+ # but on a page different than those in p1.
+ set dbc [$db cursor]
+ error_check_good dbc [is_substr $dbc $db] 1
+
+ set ret [$dbc get -first]
+ error_check_bad dbc_get:DB_FIRST [llength $ret] 0
+ set p1 [lindex [lindex $ret 0] 0]
+ set kvals($p1) [lindex [lindex $ret 0] 1]
+
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:DB_NEXT [llength $ret] 0
+ lappend p1 [lindex [lindex $ret 0] 0]
+ set kvals([lindex [lindex $ret 0] 0]) [lindex [lindex $ret 0] 1]
+
+ set ret [$dbc get -last]
+ error_check_bad dbc_get:DB_LAST [llength $ret] 0
+ set p2 [lindex [lindex $ret 0] 0]
+ set kvals($p2) [lindex [lindex $ret 0] 1]
+
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:DB_PREV [llength $ret] 0
+ lappend p2 [lindex [lindex $ret 0] 0]
+ set kvals([lindex [lindex $ret 0] 0]) [lindex [lindex $ret 0] 1]
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+
+ # Now create the full transaction environment.
+ set eflags "-create -txn -home $testdir"
+
+ puts "\tRecd006.b: creating environment"
+ set env_cmd "berkdb env $eflags"
+ set dbenv [eval $env_cmd]
+ error_check_bad dbenv $dbenv NULL
+
+ # Reset the environment.
+ reset_env $dbenv
+
+ set p1 [list $p1]
+ set p2 [list $p2]
+
+ # List of recovery tests: {CMD MSG} pairs
+ set rlist {
+ { {nesttest DB TXNID ENV 1 $p1 $p2 commit commit}
+ "Recd006.c: children (commit commit)"}
+ { {nesttest DB TXNID ENV 0 $p1 $p2 commit commit}
+ "Recd006.d: children (commit commit)"}
+ { {nesttest DB TXNID ENV 1 $p1 $p2 commit abort}
+ "Recd006.e: children (commit abort)"}
+ { {nesttest DB TXNID ENV 0 $p1 $p2 commit abort}
+ "Recd006.f: children (commit abort)"}
+ { {nesttest DB TXNID ENV 1 $p1 $p2 abort abort}
+ "Recd006.g: children (abort abort)"}
+ { {nesttest DB TXNID ENV 0 $p1 $p2 abort abort}
+ "Recd006.h: children (abort abort)"}
+ { {nesttest DB TXNID ENV 1 $p1 $p2 abort commit}
+ "Recd006.i: children (abort commit)"}
+ { {nesttest DB TXNID ENV 0 $p1 $p2 abort commit}
+ "Recd006.j: children (abort commit)"}
+ }
+
+ foreach pair $rlist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+ op_recover abort $testdir $env_cmd $dbfile $cmd $msg
+ op_recover commit $testdir $env_cmd $dbfile $cmd $msg
+ }
+
+ puts "\tRecd006.k: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
+
+# Do the nested transaction test.
+# We want to make sure that children inherit properly from their
+# parents and that locks are properly handed back to parents
+# and that the right thing happens on commit/abort.
+# In particular:
+# Write lock on parent, properly acquired by child.
+# Committed operation on child gives lock to parent so that
+# other child can also get the lock.
+# Aborted op by child releases lock so other child can get it.
+# Correct database state if child commits
+# Correct database state if child aborts
+proc nesttest { db parent env do p1 p2 child1 child2} {
+ global kvals
+ source ./include.tcl
+
+ if { $do == 1 } {
+ set func toupper
+ } else {
+ set func tolower
+ }
+
+ # Do an RMW on the parent to get a write lock.
+ set p10 [lindex $p1 0]
+ set p11 [lindex $p1 1]
+ set p20 [lindex $p2 0]
+ set p21 [lindex $p2 1]
+
+ set ret [$db get -rmw -txn $parent $p10]
+ set res $ret
+ set Dret [lindex [lindex $ret 0] 1]
+ if { [string compare $Dret $kvals($p10)] == 0 ||
+ [string compare $Dret [string toupper $kvals($p10)]] == 0 } {
+ set val 0
+ } else {
+ set val $Dret
+ }
+ error_check_good get_parent_RMW $val 0
+
+ # OK, do child 1
+ set kid1 [$env txn -parent $parent]
+ error_check_good kid1 [is_valid_widget $kid1 $env.txn] TRUE
+
+ # Reading write-locked parent object should be OK
+ #puts "\tRead write-locked parent object for kid1."
+ set ret [$db get -txn $kid1 $p10]
+ error_check_good kid1_get10 $ret $res
+
+ # Now update this child
+ set data [lindex [lindex [string $func $ret] 0] 1]
+ set ret [$db put -txn $kid1 $p10 $data]
+ error_check_good kid1_put10 $ret 0
+
+ #puts "\tKid1 successful put."
+
+ # Now start child2
+ #puts "\tBegin txn for kid2."
+ set kid2 [$env txn -parent $parent]
+ error_check_good kid2 [is_valid_widget $kid2 $env.txn] TRUE
+
+ # Getting anything in the p1 set should deadlock, so let's
+ # work on the p2 set.
+ set data [string $func $kvals($p20)]
+ #puts "\tPut data for kid2."
+ set ret [$db put -txn $kid2 $p20 $data]
+ error_check_good kid2_put20 $ret 0
+
+ #puts "\tKid2 data put successful."
+
+ # Now let's do the right thing to kid1
+ puts -nonewline "\tKid1 $child1..."
+ if { [string compare $child1 "commit"] == 0 } {
+ error_check_good kid1_commit [$kid1 commit] 0
+ } else {
+ error_check_good kid1_abort [$kid1 abort] 0
+ }
+ puts "complete"
+
+ # In either case, child2 should now be able to get the
+ # lock, either because it is inherited by the parent
+ # (commit) or because it was released (abort).
+ set data [string $func $kvals($p11)]
+ set ret [$db put -txn $kid2 $p11 $data]
+ error_check_good kid2_put11 $ret 0
+
+ # Now let's do the right thing to kid2
+ puts -nonewline "\tKid2 $child2..."
+ if { [string compare $child2 "commit"] == 0 } {
+ error_check_good kid2_commit [$kid2 commit] 0
+ } else {
+ error_check_good kid2_abort [$kid2 abort] 0
+ }
+ puts "complete"
+
+ # Now, let parent check that the right things happened.
+ # First get all four values
+ set p10_check [lindex [lindex [$db get -txn $parent $p10] 0] 0]
+ set p11_check [lindex [lindex [$db get -txn $parent $p11] 0] 0]
+ set p20_check [lindex [lindex [$db get -txn $parent $p20] 0] 0]
+ set p21_check [lindex [lindex [$db get -txn $parent $p21] 0] 0]
+
+ if { [string compare $child1 "commit"] == 0 } {
+ error_check_good parent_kid1 $p10_check \
+ [string tolower [string $func $kvals($p10)]]
+ } else {
+ error_check_good \
+ parent_kid1 $p10_check [string tolower $kvals($p10)]
+ }
+ if { [string compare $child2 "commit"] == 0 } {
+ error_check_good parent_kid2 $p11_check \
+ [string tolower [string $func $kvals($p11)]]
+ error_check_good parent_kid2 $p20_check \
+ [string tolower [string $func $kvals($p20)]]
+ } else {
+ error_check_good parent_kid2 $p11_check $kvals($p11)
+ error_check_good parent_kid2 $p20_check $kvals($p20)
+ }
+
+ # Now do a write on the parent for 21 whose lock it should
+ # either have or should be available.
+ set ret [$db put -txn $parent $p21 [string $func $kvals($p21)]]
+ error_check_good parent_put21 $ret 0
+
+ return 0
+}
diff --git a/bdb/test/recd007.tcl b/bdb/test/recd007.tcl
new file mode 100644
index 00000000000..d077ae19f2c
--- /dev/null
+++ b/bdb/test/recd007.tcl
@@ -0,0 +1,723 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd007.tcl,v 11.38 2000/12/20 21:39:23 krinsky Exp $
+#
+# Recovery Test 7.
+# This is a recovery test for create/delete of databases. We have
+# hooks in the database so that we can abort the process at various
+# points and make sure that the transaction doesn't commit. We
+# then need to recover and make sure the file is correctly existing
+# or not, as the case may be.
+proc recd007 { method args} {
+ global fixed_len
+ source ./include.tcl
+
+ set orig_fixed_len $fixed_len
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd007: $method operation/transaction tests"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ set testfile recd007.db
+ set flags "-create -txn -home $testdir"
+
+ puts "\tRecd007.a: creating environment"
+ set env_cmd "berkdb env $flags"
+
+ set env [eval $env_cmd]
+ #
+ # We need to create a database to get the pagesize (either
+ # the default or whatever might have been specified).
+ # Then remove it so we can compute fixed_len and create the
+ # real database.
+ set oflags "-create $omethod -mode 0644 -env $env $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set stat [$db stat]
+ #
+ # Compute the fixed_len based on the pagesize being used.
+ # We want the fixed_len to be 1/4 the pagesize.
+ #
+ set pg [get_pagesize $stat]
+ error_check_bad get_pagesize $pg -1
+ set fixed_len [expr $pg / 4]
+ error_check_good db_close [$db close] 0
+ error_check_good dbremove [berkdb dbremove -env $env $testfile] 0
+ error_check_good envclose [$env close] 0
+
+ # Convert the args again because fixed_len is now real.
+ set opts [convert_args $method ""]
+
+ #
+ # List of recovery tests: {HOOKS MSG} pairs
+ # Where each HOOK is a list of {COPY ABORT}
+ #
+ set rlist {
+ { {"none" "preopen"} "Recd007.b0: none/preopen"}
+ { {"none" "postopen"} "Recd007.b1: none/postopen"}
+ { {"none" "postlogmeta"} "Recd007.b2: none/postlogmeta"}
+ { {"none" "postlog"} "Recd007.b3: none/postlog"}
+ { {"none" "postsync"} "Recd007.b4: none/postsync"}
+ { {"postopen" "none"} "Recd007.c0: postopen/none"}
+ { {"postlogmeta" "none"} "Recd007.c1: postlogmeta/none"}
+ { {"postlog" "none"} "Recd007.c2: postlog/none"}
+ { {"postsync" "none"} "Recd007.c3: postsync/none"}
+ { {"postopen" "postopen"} "Recd007.d: postopen/postopen"}
+ { {"postopen" "postlogmeta"} "Recd007.e: postopen/postlogmeta"}
+ { {"postopen" "postlog"} "Recd007.f: postopen/postlog"}
+ { {"postlog" "postlog"} "Recd007.g: postlog/postlog"}
+ { {"postlogmeta" "postlogmeta"} "Recd007.h: postlogmeta/postlogmeta"}
+ { {"postlogmeta" "postlog"} "Recd007.i: postlogmeta/postlog"}
+ { {"postlog" "postsync"} "Recd007.j: postlog/postsync"}
+ { {"postsync" "postsync"} "Recd007.k: postsync/postsync"}
+ }
+
+ # These are all the data values that we're going to need to read
+ # through the operation table and run the recovery tests.
+
+ foreach pair $rlist {
+ set cmd [lindex $pair 0]
+ set msg [lindex $pair 1]
+ file_recover_create $testdir $env_cmd $omethod \
+ $opts $testfile $cmd $msg
+ }
+
+ set rlist {
+ { {"none" "prerename"} "Recd007.l0: none/prerename"}
+ { {"none" "postrename"} "Recd007.l1: none/postrename"}
+ { {"prerename" "none"} "Recd007.m0: prerename/none"}
+ { {"postrename" "none"} "Recd007.m1: postrename/none"}
+ { {"prerename" "prerename"} "Recd007.n: prerename/prerename"}
+ { {"prerename" "postrename"} "Recd007.o: prerename/postrename"}
+ { {"postrename" "postrename"} "Recd007.p: postrename/postrename"}
+ }
+ foreach op { dbremove dbrename } {
+ foreach pair $rlist {
+ set cmd [lindex $pair 0]
+ set msg [lindex $pair 1]
+ file_recover_delete $testdir $env_cmd $omethod \
+ $opts $testfile $cmd $msg $op
+ }
+ }
+
+ if { $is_windows_test != 1 } {
+ do_file_recover_delmk $testdir $env_cmd $omethod $opts $testfile
+ }
+
+ puts "\tRecd007.r: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
+
+proc file_recover_create { dir env_cmd method opts dbfile cmd msg } {
+ #
+ # We run this test on each of these scenarios:
+ # 1. Creating just a database
+ # 2. Creating a database with a subdb
+ # 3. Creating a 2nd subdb in a database
+ puts "\t$msg create with a database"
+ do_file_recover_create $dir $env_cmd $method $opts $dbfile \
+ 0 $cmd $msg
+ if { [is_queue $method] == 1 } {
+ puts "\tSkipping subdatabase tests for method $method"
+ return
+ }
+ puts "\t$msg create with a database and subdb"
+ do_file_recover_create $dir $env_cmd $method $opts $dbfile \
+ 1 $cmd $msg
+ puts "\t$msg create with a database and 2nd subdb"
+ do_file_recover_create $dir $env_cmd $method $opts $dbfile \
+ 2 $cmd $msg
+
+}
+
+proc do_file_recover_create { dir env_cmd method opts dbfile sub cmd msg } {
+ global log_log_record_types
+ source ./include.tcl
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $dir
+ }
+
+ env_cleanup $dir
+ # Open the environment and set the copy/abort locations
+ set env [eval $env_cmd]
+ set copy [lindex $cmd 0]
+ set abort [lindex $cmd 1]
+ error_check_good copy_location [is_valid_create_loc $copy] 1
+ error_check_good abort_location [is_valid_create_loc $abort] 1
+
+ if {([string first "logmeta" $copy] != -1 || \
+ [string first "logmeta" $abort] != -1) && \
+ [is_btree $method] == 0 } {
+ puts "\tSkipping for method $method"
+ $env test copy none
+ $env test abort none
+ error_check_good env_close [$env close] 0
+ return
+ }
+
+ #
+ # Basically non-existence is our initial state. When we
+ # abort, it is also our final state.
+ #
+ switch $sub {
+ 0 {
+ set oflags "-create $method -mode 0644 \
+ -env $env $opts $dbfile"
+ }
+ 1 {
+ set oflags "-create $method -mode 0644 \
+ -env $env $opts $dbfile sub0"
+ }
+ 2 {
+ #
+ # If we are aborting here, then we need to
+ # create a first subdb, then create a second
+ #
+ set oflags "-create $method -mode 0644 \
+ -env $env $opts $dbfile sub0"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+ set init_file $dir/$dbfile.init
+ catch { file copy -force $dir/$dbfile $init_file } res
+ set oflags "-create $method -mode 0644 \
+ -env $env $opts $dbfile sub1"
+ }
+ default {
+ puts "\tBad value $sub for sub"
+ return
+ }
+ }
+ #
+ # Set our locations to copy and abort
+ #
+ set ret [eval $env test copy $copy]
+ error_check_good test_copy $ret 0
+ set ret [eval $env test abort $abort]
+ error_check_good test_abort $ret 0
+
+ puts "\t\tExecuting command"
+ set ret [catch {eval {berkdb_open} $oflags} db]
+
+ # Sync the mpool so any changes to the file that are
+ # in mpool get written to the disk file before the
+ # diff.
+ puts "\t\tSyncing"
+ $env mpool_sync "0 0"
+
+ #
+ # If we don't abort, then we expect success.
+ # If we abort, we expect no file created.
+ #
+ if {[string first "none" $abort] == -1} {
+ #
+ # Operation was aborted, verify it does
+ # not exist.
+ #
+ puts "\t\tCommand executed and aborted."
+ error_check_bad db_open ret 0
+
+ #
+ # Check that the file does not exist. Final state.
+ #
+ if { $sub != 2 } {
+ error_check_good db_open:exists \
+ [file exists $dir/$dbfile] 0
+ } else {
+ error_check_good \
+ diff(init,postcreate):diff($init_file,$dir/$dbfile)\
+ [dbdump_diff $init_file $dir/$dbfile] 0
+ }
+ } else {
+ #
+ # Operation was committed, verify it exists.
+ #
+ puts "\t\tCommand executed and committed."
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ #
+ # Check that the file exists.
+ #
+ error_check_good db_open [file exists $dir/$dbfile] 1
+ set init_file $dir/$dbfile.init
+ catch { file copy -force $dir/$dbfile $init_file } res
+
+ if { [is_queue $method] == 1 } {
+ copy_extent_file $dir $dbfile init
+ }
+ }
+ error_check_good env_close [$env close] 0
+
+ #
+ # Run recovery here. Should be a no-op. Verify that
+ # the file still doesn't exist or change (depending on sub)
+ # when we are done.
+ #
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+ if { $sub != 2 && [string first "none" $abort] == -1} {
+ #
+ # Operation was aborted, verify it still does
+ # not exist. Only done with file creations.
+ #
+ error_check_good after_recover1 [file exists $dir/$dbfile] 0
+ } else {
+ #
+ # Operation was committed or just a subdb was aborted.
+ # Verify it did not change.
+ #
+ error_check_good \
+ diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \
+ [dbdump_diff $init_file $dir/$dbfile] 0
+ #
+ # Need a new copy to get the right LSN into the file.
+ #
+ catch { file copy -force $dir/$dbfile $init_file } res
+
+ if { [is_queue $method] == 1 } {
+ copy_extent_file $dir $dbfile init
+ }
+ }
+
+ #
+ # If we didn't make a copy, then we are done.
+ #
+ if {[string first "none" $copy] != -1} {
+ return
+ }
+
+ #
+ # Now move the .afterop file to $dbfile. Run recovery again.
+ #
+ file copy -force $dir/$dbfile.afterop $dir/$dbfile
+
+ if { [is_queue $method] == 1 } {
+ move_file_extent $dir $dbfile afterop copy
+ }
+
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+ if { $sub != 2 && [string first "none" $abort] == -1} {
+ #
+ # Operation was aborted, verify it still does
+ # not exist. Only done with file creations.
+ #
+ error_check_good after_recover2 [file exists $dir/$dbfile] 0
+ } else {
+ #
+ # Operation was committed or just a subdb was aborted.
+ # Verify it did not change.
+ #
+ error_check_good \
+ diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \
+ [dbdump_diff $init_file $dir/$dbfile] 0
+ }
+
+}
+
+proc file_recover_delete { dir env_cmd method opts dbfile cmd msg op } {
+ #
+ # We run this test on each of these scenarios:
+ # 1. Deleting/Renaming just a database
+ # 2. Deleting/Renaming a database with a subdb
+ # 3. Deleting/Renaming a 2nd subdb in a database
+ puts "\t$msg $op with a database"
+ do_file_recover_delete $dir $env_cmd $method $opts $dbfile \
+ 0 $cmd $msg $op
+ if { [is_queue $method] == 1 } {
+ puts "\tSkipping subdatabase tests for method $method"
+ return
+ }
+ puts "\t$msg $op with a database and subdb"
+ do_file_recover_delete $dir $env_cmd $method $opts $dbfile \
+ 1 $cmd $msg $op
+ puts "\t$msg $op with a database and 2nd subdb"
+ do_file_recover_delete $dir $env_cmd $method $opts $dbfile \
+ 2 $cmd $msg $op
+
+}
+
+proc do_file_recover_delete { dir env_cmd method opts dbfile sub cmd msg op } {
+ global log_log_record_types
+ source ./include.tcl
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $dir
+ }
+
+ env_cleanup $dir
+ # Open the environment and set the copy/abort locations
+ set env [eval $env_cmd]
+ set copy [lindex $cmd 0]
+ set abort [lindex $cmd 1]
+ error_check_good copy_location [is_valid_delete_loc $copy] 1
+ error_check_good abort_location [is_valid_delete_loc $abort] 1
+
+ if { [is_record_based $method] == 1 } {
+ set key 1
+ } else {
+ set key recd007_key
+ }
+ set data1 recd007_data
+ set data2 NEWrecd007_data2
+
+ #
+ # Depending on what sort of subdb we want, if any, our
+ # args to the open call will be different (and if we
+ # want a 2nd subdb, we create the first here.
+ #
+ switch $sub {
+ 0 {
+ set oflags "-create $method -mode 0644 \
+ -env $env $opts $dbfile"
+ }
+ 1 {
+ set oflags "-create $method -mode 0644 \
+ -env $env $opts $dbfile sub0"
+ }
+ 2 {
+ #
+ # If we are aborting here, then we need to
+ # create a first subdb, then create a second
+ #
+ set oflags "-create $method -mode 0644 \
+ -env $env $opts $dbfile sub0"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set txn [$env txn]
+ set ret [$db put -txn $txn $key $data2]
+ error_check_good db_put $ret 0
+ error_check_good commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+ set oflags "-create $method -mode 0644 \
+ -env $env $opts $dbfile sub1"
+ }
+ default {
+ puts "\tBad value $sub for sub"
+ return
+ }
+ }
+
+ #
+ # Set our locations to copy and abort
+ #
+ set ret [eval $env test copy $copy]
+ error_check_good test_copy $ret 0
+ set ret [eval $env test abort $abort]
+ error_check_good test_abort $ret 0
+
+ #
+ # Open our db, add some data, close and copy as our
+ # init file.
+ #
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set txn [$env txn]
+ set ret [$db put -txn $txn $key $data1]
+ error_check_good db_put $ret 0
+ error_check_good commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+
+ set init_file $dir/$dbfile.init
+ catch { file copy -force $dir/$dbfile $init_file } res
+
+ if { [is_queue $method] == 1} {
+ copy_extent_file $dir $dbfile init
+ }
+
+ #
+ # If we don't abort, then we expect success.
+ # If we abort, we expect no file removed.
+ #
+ if { [string compare $op dbremove] == 0 } {
+ set ret [catch { berkdb $op -env $env $dbfile } remret]
+ } else {
+ set ret [catch { berkdb $op -env $env $dbfile $dbfile.new } \
+ remret]
+ }
+ if {[string first "none" $abort] == -1} {
+ #
+ # Operation was aborted, verify it did not change.
+ #
+ puts "\t\tCommand executed and aborted."
+ error_check_good $op $ret 1
+
+ #
+ # Check that the file exists. Final state.
+ # Compare against initial file.
+ #
+ error_check_good post$op.1 [file exists $dir/$dbfile] 1
+ error_check_good \
+ diff(init,post$op.2):diff($init_file,$dir/$dbfile)\
+ [dbdump_diff $init_file $dir/$dbfile] 0
+ } else {
+ #
+ # Operation was committed, verify it does
+ # not exist.
+ #
+ puts "\t\tCommand executed and committed."
+ error_check_good $op $ret 0
+ #
+ # Check that the file does not exist or correct
+ # file exists.
+ #
+ error_check_good $op [file exists $dir/$dbfile] 0
+ if { [string compare $op dbrename] == 0 } {
+ error_check_good $op [file exists $dir/$dbfile.new] 1
+ }
+ }
+ error_check_good env_close [$env close] 0
+ catch { file copy -force $dir/$dbfile $init_file } res
+
+ if { [is_queue $method] == 1} {
+ copy_extent_file $dir $dbfile init
+ }
+
+ #
+ # Run recovery here. Should be a no-op. Verify that
+ # the file still doesn't exist or change (depending on abort)
+ # when we are done.
+ #
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+ if { [string first "none" $abort] != -1} {
+ #
+ # Operation was committed, verify it still does
+ # not exist.
+ #
+ error_check_good after_recover1 [file exists $dir/$dbfile] 0
+ } else {
+ #
+ # Operation was aborted, verify it did not change.
+ #
+ error_check_good \
+ diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \
+ [dbdump_diff $init_file $dir/$dbfile] 0
+ }
+
+ #
+ # If we didn't make a copy, then we are done.
+ #
+ if {[string first "none" $copy] != -1} {
+ return
+ }
+
+ #
+ # Now move the .afterop file to $dbfile. Run recovery again.
+ #
+ set filecopy [glob $dir/*.afterop]
+ set afterop [lindex $filecopy 0]
+ file rename -force $afterop $dir/$dbfile
+ set afterop [string range $afterop \
+ [expr [string last "/" $afterop] + 1] \
+ [string last "." $afterop]]
+ move_file_extent $dir $dbfile afterop rename
+
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+
+ if { [string first "none" $abort] != -1} {
+ #
+ # Operation was committed, verify it still does
+ # not exist.
+ #
+ error_check_good after_recover2 [file exists $dir/$dbfile] 0
+ } else {
+ #
+ # Operation was aborted, verify it did not change.
+ #
+ error_check_good \
+ diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \
+ [dbdump_diff $init_file $dir/$dbfile] 0
+ }
+
+}
+
+#
+# This function tests a specific case of recovering after a db removal.
+# This is for SR #2538. Basically we want to test that:
+# - Make an env.
+# - Make/close a db.
+# - Remove the db.
+# - Create another db of same name.
+# - Sync db but leave open.
+# - Run recovery.
+# - Verify no recovery errors and that new db is there.
+proc do_file_recover_delmk { dir env_cmd method opts dbfile } {
+ global log_log_record_types
+ source ./include.tcl
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $dir
+ }
+
+ puts "\tRecd007.q: Delete and recreate a database"
+ env_cleanup $dir
+ # Open the environment and set the copy/abort locations
+ set env [eval $env_cmd]
+
+ if { [is_record_based $method] == 1 } {
+ set key 1
+ } else {
+ set key recd007_key
+ }
+ set data1 recd007_data
+ set data2 NEWrecd007_data2
+
+ set oflags "-create $method -mode 0644 -env $env $opts $dbfile"
+
+ #
+ # Open our db, add some data, close and copy as our
+ # init file.
+ #
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set txn [$env txn]
+ set ret [$db put -txn $txn $key $data1]
+ error_check_good db_put $ret 0
+ error_check_good commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+
+ set ret [catch { berkdb dbremove -env $env $dbfile } remret]
+ #
+ # Operation was committed, verify it does
+ # not exist.
+ #
+ puts "\t\tCommand executed and committed."
+ error_check_good dbremove $ret 0
+ error_check_good dbremove.1 [file exists $dir/$dbfile] 0
+
+ #
+ # Now create a new db with the same name.
+ #
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set txn [$env txn]
+ set ret [$db put -txn $txn $key $data1]
+ error_check_good db_put $ret 0
+ error_check_good commit [$txn commit] 0
+ error_check_good db_sync [$db sync] 0
+
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+ error_check_good db_recover $stat 0
+ error_check_good db_recover.1 [file exists $dir/$dbfile] 1
+ #
+ # Since we ran recovery on the open db/env, we need to
+ # catch these calls. Basically they are there to clean
+ # up the Tcl widgets.
+ #
+ set stat [catch {$db close} ret]
+ set stat [catch {$env close} ret]
+
+}
+proc is_valid_create_loc { loc } {
+ switch $loc {
+ none -
+ preopen -
+ postopen -
+ postlogmeta -
+ postlog -
+ postsync
+ { return 1 }
+ default
+ { return 0 }
+ }
+}
+
+proc is_valid_delete_loc { loc } {
+ switch $loc {
+ none -
+ prerename -
+ postrename -
+ postremcall
+ { return 1 }
+ default
+ { return 0 }
+ }
+}
+
+# Do a logical diff on the db dump files. We expect that either
+# the files are identical, or if they differ, that it is exactly
+# just a free/invalid page.
+# Return 1 if they are different, 0 if logically the same (or identical).
+#
+proc dbdump_diff { initfile dbfile } {
+ source ./include.tcl
+
+ set initdump $initfile.dump
+ set dbdump $dbfile.dump
+
+ set stat [catch {exec $util_path/db_dump -dar -f $initdump \
+ $initfile} ret]
+ error_check_good dbdump.init $stat 0
+
+ # Do a dump without the freelist which should eliminate any
+ # recovery differences.
+ set stat [catch {exec $util_path/db_dump -dar -f $dbdump $dbfile} \
+ ret]
+ error_check_good dbdump.db $stat 0
+
+ set stat [filecmp $dbdump $initdump]
+
+ if {$stat == 0} {
+ return 0
+ }
+ puts "diff: $dbdump $initdump gives:\n$ret"
+ return 1
+}
diff --git a/bdb/test/recd008.tcl b/bdb/test/recd008.tcl
new file mode 100644
index 00000000000..b75605b0475
--- /dev/null
+++ b/bdb/test/recd008.tcl
@@ -0,0 +1,227 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd008.tcl,v 1.22 2000/12/07 19:13:46 sue Exp $
+#
+# Recovery Test 8.
+# Test deeply nested transactions and many-child transactions.
+proc recd008 { method {breadth 4} {depth 4} args} {
+ global kvals
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 } {
+ puts "Recd008 skipping for method $method"
+ return
+ }
+ puts "Recd008: $method $breadth X $depth deeply nested transactions"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ set dbfile recd008.db
+
+ puts "\tRecd008.a: create database"
+ set db [eval {berkdb_open -create} $args $omethod $testdir/$dbfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Make sure that we have enough entries to span a couple of
+ # different pages.
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < 1000 } {
+ if { [string compare $omethod "-recno"] == 0 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ if { $count == 500} {
+ set p1 $key
+ set kvals($p1) $str
+ }
+ set ret [$db put $key $str]
+ error_check_good put $ret 0
+
+ incr count
+ }
+ close $did
+ error_check_good db_close [$db close] 0
+
+ set txn_max [expr int([expr pow($breadth,$depth)])]
+ if { $txn_max < 20 } {
+ set txn_max 20
+ }
+ puts "\tRecd008.b: create environment for $txn_max transactions"
+
+ set eflags "-mode 0644 -create -txn_max $txn_max \
+ -txn -home $testdir"
+ set env_cmd "berkdb env $eflags"
+ set dbenv [eval $env_cmd]
+ error_check_good env_open [is_valid_env $dbenv] TRUE
+
+ reset_env $dbenv
+
+ set rlist {
+ { {recd008_parent abort ENV DB $p1 TXNID 1 1 $breadth $depth}
+ "Recd008.c: child abort parent" }
+ { {recd008_parent commit ENV DB $p1 TXNID 1 1 $breadth $depth}
+ "Recd008.d: child commit parent" }
+ }
+ foreach pair $rlist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ op_recover abort $testdir $env_cmd $dbfile $cmd $msg
+ recd008_setkval $dbfile $p1
+ op_recover commit $testdir $env_cmd $dbfile $cmd $msg
+ recd008_setkval $dbfile $p1
+ }
+
+ puts "\tRecd008.e: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
+
+proc recd008_setkval { dbfile p1 } {
+ global kvals
+ source ./include.tcl
+
+ set db [berkdb_open $testdir/$dbfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [$db get $p1]
+ set kvals($p1) [lindex [lindex $ret 0] 1]
+}
+
+# This is a lot like the op_recover procedure. We cannot use that
+# because it was not meant to be called recursively. This proc
+# knows about depth/breadth and file naming so that recursive calls
+# don't overwrite various initial and afterop files, etc.
+#
+# The basic flow of this is:
+# (Initial file)
+# Parent begin transaction (in op_recover)
+# Parent starts children
+# Recursively call recd008_recover
+# (children modify p1)
+# Parent modifies p1
+# (Afterop file)
+# Parent commit/abort (in op_recover)
+# (Final file)
+# Recovery test (in op_recover)
+proc recd008_parent { op env db p1key parent b0 d0 breadth depth } {
+ global kvals
+ source ./include.tcl
+
+ #
+ # Save copy of original data
+ # Acquire lock on data
+ #
+ set olddata $kvals($p1key)
+ set ret [$db get -rmw -txn $parent $p1key]
+ set Dret [lindex [lindex $ret 0] 1]
+ error_check_good get_parent_RMW $Dret $olddata
+
+ #
+ # Parent spawns off children
+ #
+ set ret [recd008_txn $op $env $db $p1key $parent \
+ $b0 $d0 $breadth $depth]
+
+ puts "Child runs complete. Parent modifies data."
+
+ #
+ # Parent modifies p1
+ #
+ set newdata $olddata.parent
+ set ret [$db put -txn $parent $p1key $newdata]
+ error_check_good db_put $ret 0
+
+ #
+ # Save value in kvals for later comparison
+ #
+ switch $op {
+ "commit" {
+ set kvals($p1key) $newdata
+ }
+ "abort" {
+ set kvals($p1key) $olddata
+ }
+ }
+ return 0
+}
+
+proc recd008_txn { op env db p1key parent b0 d0 breadth depth } {
+ global log_log_record_types
+ global kvals
+ source ./include.tcl
+
+ for {set d 1} {$d < $d0} {incr d} {
+ puts -nonewline "\t"
+ }
+ puts "Recd008_txn: $op parent:$parent $breadth $depth ($b0 $d0)"
+
+ # Save the initial file and open the environment and the file
+ for {set b $b0} {$b <= $breadth} {incr b} {
+ #
+ # Begin child transaction
+ #
+ set t [$env txn -parent $parent]
+ error_check_bad txn_begin $t NULL
+ error_check_good txn_begin [is_valid_txn $t $env] TRUE
+ set startd [expr $d0 + 1]
+ set child $b:$startd:$t
+ set olddata $kvals($p1key)
+ set newdata $olddata.$child
+ set ret [$db get -rmw -txn $t $p1key]
+ set Dret [lindex [lindex $ret 0] 1]
+ error_check_good get_parent_RMW $Dret $olddata
+
+ #
+ # Recursively call to set up nested transactions/children
+ #
+ for {set d $startd} {$d <= $depth} {incr d} {
+ set ret [recd008_txn commit $env $db $p1key $t \
+ $b $d $breadth $depth]
+ set ret [recd008_txn abort $env $db $p1key $t \
+ $b $d $breadth $depth]
+ }
+ #
+ # Modifies p1.
+ #
+ set ret [$db put -txn $t $p1key $newdata]
+ error_check_good db_put $ret 0
+
+ #
+ # Commit or abort
+ #
+ for {set d 1} {$d < $startd} {incr d} {
+ puts -nonewline "\t"
+ }
+ puts "Executing txn_$op:$t"
+ error_check_good txn_$op:$t [$t $op] 0
+ for {set d 1} {$d < $startd} {incr d} {
+ puts -nonewline "\t"
+ }
+ set ret [$db get -rmw -txn $parent $p1key]
+ set Dret [lindex [lindex $ret 0] 1]
+ switch $op {
+ "commit" {
+ puts "Command executed and committed."
+ error_check_good get_parent_RMW $Dret $newdata
+ set kvals($p1key) $newdata
+ }
+ "abort" {
+ puts "Command executed and aborted."
+ error_check_good get_parent_RMW $Dret $olddata
+ set kvals($p1key) $olddata
+ }
+ }
+ }
+ return 0
+}
diff --git a/bdb/test/recd009.tcl b/bdb/test/recd009.tcl
new file mode 100644
index 00000000000..2b49437346c
--- /dev/null
+++ b/bdb/test/recd009.tcl
@@ -0,0 +1,181 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd009.tcl,v 1.13 2000/12/07 19:13:46 sue Exp $
+#
+# Recovery Test 9.
+# Test stability of record numbers across splits
+# and reverse splits and across recovery.
+proc recd009 { method {select 0} args} {
+ global fixed_len
+ source ./include.tcl
+
+ if { [is_rbtree $method] != 1 && [is_rrecno $method] != 1} {
+ puts "Recd009 skipping for method $method."
+ return
+ }
+
+ set opts [convert_args $method $args]
+ set method [convert_method $method]
+
+ puts "\tRecd009: Test record numbers across splits and recovery"
+
+ set testfile recd009.db
+ env_cleanup $testdir
+ set mkeys 1000
+ set nkeys 5
+ set data "data"
+
+ puts "\tRecd009.a: Create $method environment and database."
+ set flags "-create -txn -home $testdir"
+
+ set env_cmd "berkdb env $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set oflags "-env $dbenv -create -mode 0644 $opts $method"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Fill page with small key/data pairs. Keep at leaf.
+ puts "\tRecd009.b: Fill page with $nkeys small key/data pairs."
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ if { [is_recno $method] == 1 } {
+ set key $i
+ } else {
+ set key key000$i
+ }
+ set ret [$db put $key $data$i]
+ error_check_good dbput $ret 0
+ }
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$dbenv close] 0
+
+ set newnkeys [expr $nkeys + 1]
+ # List of recovery tests: {CMD MSG} pairs.
+ set rlist {
+ { {recd009_split DB TXNID 1 $method $newnkeys $mkeys}
+ "Recd009.c: split"}
+ { {recd009_split DB TXNID 0 $method $newnkeys $mkeys}
+ "Recd009.d: reverse split"}
+ }
+
+ foreach pair $rlist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+ set reverse [string first "reverse" $msg]
+ if { $reverse == -1 } {
+ set abortkeys $nkeys
+ set commitkeys $mkeys
+ set abortpg 0
+ set commitpg 1
+ } else {
+ set abortkeys $mkeys
+ set commitkeys $nkeys
+ set abortpg 1
+ set commitpg 0
+ }
+ op_recover abort $testdir $env_cmd $testfile $cmd $msg
+ recd009_recnocheck $testdir $testfile $opts $abortkeys $abortpg
+ op_recover commit $testdir $env_cmd $testfile $cmd $msg
+ recd009_recnocheck $testdir $testfile $opts \
+ $commitkeys $commitpg
+ }
+ puts "\tRecd009.e: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
+
+#
+# This procedure verifies that the database has only numkeys number
+# of keys and that they are in order.
+#
+proc recd009_recnocheck { tdir testfile opts numkeys numpg} {
+ source ./include.tcl
+
+ set db [eval {berkdb_open} $opts $tdir/$testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tRecd009_recnocheck: Verify page count of $numpg on split."
+ set stat [$db stat]
+ error_check_bad stat:check-split [is_substr $stat \
+ "{{Internal pages} 0}"] $numpg
+
+ set type [$db get_type]
+ set dbc [$db cursor]
+ error_check_good dbcursor [is_valid_cursor $dbc $db] TRUE
+ set i 1
+ puts "\tRecd009_recnocheck: Checking $numkeys record numbers."
+ for {set d [$dbc get -first]} { [llength $d] != 0 } {
+ set d [$dbc get -next]} {
+ if { [is_btree $type] } {
+ set thisi [$dbc get -get_recno]
+ } else {
+ set thisi [lindex [lindex $d 0] 0]
+ }
+ error_check_good recno_check $i $thisi
+ error_check_good record_count [expr $i <= $numkeys] 1
+ incr i
+ }
+ error_check_good curs_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+}
+
+proc recd009_split { db txn split method nkeys mkeys } {
+ global errorCode
+ source ./include.tcl
+
+ set data "data"
+
+ set isrecno [is_recno $method]
+ # if mkeys is above 1000, need to adjust below for lexical order
+ if { $split == 1 } {
+ puts "\tRecd009_split: Add $mkeys pairs to force split."
+ for {set i $nkeys} { $i <= $mkeys } { incr i } {
+ if { $isrecno == 1 } {
+ set key $i
+ } else {
+ if { $i >= 100 } {
+ set key key0$i
+ } elseif { $i >= 10 } {
+ set key key00$i
+ } else {
+ set key key000$i
+ }
+ }
+ set ret [$db put -txn $txn $key $data$i]
+ error_check_good dbput:more $ret 0
+ }
+ } else {
+ puts "\tRecd009_split: Delete added keys to force reverse split."
+ # Since rrecno renumbers, we delete downward.
+ for {set i $mkeys} { $i >= $nkeys } { set i [expr $i - 1] } {
+ if { $isrecno == 1 } {
+ set key $i
+ } else {
+ if { $i >= 100 } {
+ set key key0$i
+ } elseif { $i >= 10 } {
+ set key key00$i
+ } else {
+ set key key000$i
+ }
+ }
+ error_check_good db_del:$i [$db del -txn $txn $key] 0
+ }
+ }
+ return 0
+}
diff --git a/bdb/test/recd010.tcl b/bdb/test/recd010.tcl
new file mode 100644
index 00000000000..4fd1aefbb60
--- /dev/null
+++ b/bdb/test/recd010.tcl
@@ -0,0 +1,235 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd010.tcl,v 1.14 2000/12/11 17:24:55 sue Exp $
+#
+# Recovery Test 10.
+# Test stability of btree duplicates across btree off-page dup splits
+# and reverse splits and across recovery.
+proc recd010 { method {select 0} args} {
+ global fixed_len
+ global kvals
+ global kvals_dups
+ source ./include.tcl
+
+ if { [is_dbtree $method] != 1 && [is_ddbtree $method] != 1} {
+ puts "Recd010 skipping for method $method."
+ return
+ }
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Recd010: skipping for specific pagesizes"
+ return
+ }
+
+ set opts [convert_args $method $args]
+ set method [convert_method $method]
+
+ puts "\tRecd010 ($opts): Test duplicates across splits and recovery"
+
+ set testfile recd010.db
+ env_cleanup $testdir
+ #
+ # Set pagesize small to generate lots of off-page dups
+ #
+ set page 512
+ set mkeys 1000
+ set firstkeys 5
+ set data "data"
+ set key "recd010_key"
+
+ puts "\tRecd010.a: Create $method environment and database."
+ set flags "-create -txn -home $testdir"
+
+ set env_cmd "berkdb env $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set oflags "-env $dbenv -create -mode 0644 $opts $method"
+ set db [eval {berkdb_open} -pagesize $page $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Fill page with small key/data pairs. Keep at leaf.
+ puts "\tRecd010.b: Fill page with $firstkeys small dups."
+ for { set i 1 } { $i <= $firstkeys } { incr i } {
+ set ret [$db put $key $data$i]
+ error_check_good dbput $ret 0
+ }
+ set kvals 1
+ set kvals_dups $firstkeys
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$dbenv close] 0
+
+ # List of recovery tests: {CMD MSG} pairs.
+ if { $mkeys < 100 } {
+ puts "Recd010 mkeys of $mkeys too small"
+ return
+ }
+ set rlist {
+ { {recd010_split DB TXNID 1 $method 2 $mkeys}
+ "Recd010.c: btree split 2 large dups"}
+ { {recd010_split DB TXNID 0 $method 2 $mkeys}
+ "Recd010.d: btree reverse split 2 large dups"}
+ { {recd010_split DB TXNID 1 $method 10 $mkeys}
+ "Recd010.e: btree split 10 dups"}
+ { {recd010_split DB TXNID 0 $method 10 $mkeys}
+ "Recd010.f: btree reverse split 10 dups"}
+ { {recd010_split DB TXNID 1 $method 100 $mkeys}
+ "Recd010.g: btree split 100 dups"}
+ { {recd010_split DB TXNID 0 $method 100 $mkeys}
+ "Recd010.h: btree reverse split 100 dups"}
+ }
+
+ foreach pair $rlist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+ set reverse [string first "reverse" $msg]
+ op_recover abort $testdir $env_cmd $testfile $cmd $msg
+ recd010_check $testdir $testfile $opts abort $reverse $firstkeys
+ op_recover commit $testdir $env_cmd $testfile $cmd $msg
+ recd010_check $testdir $testfile $opts commit $reverse $firstkeys
+ }
+ puts "\tRecd010.e: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
+
+#
+# This procedure verifies that the database has only numkeys number
+# of keys and that they are in order.
+#
+proc recd010_check { tdir testfile opts op reverse origdups } {
+ global kvals
+ global kvals_dups
+ source ./include.tcl
+
+ set db [eval {berkdb_open} $opts $tdir/$testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set data "data"
+
+ if { $reverse == -1 } {
+ puts "\tRecd010_check: Verify split after $op"
+ } else {
+ puts "\tRecd010_check: Verify reverse split after $op"
+ }
+
+ set stat [$db stat]
+ if { [expr ([string compare $op "abort"] == 0 && $reverse == -1) || \
+ ([string compare $op "commit"] == 0 && $reverse != -1)]} {
+ set numkeys 0
+ set allkeys [expr $numkeys + 1]
+ set numdups $origdups
+ #
+ # If we abort the adding of dups, or commit
+ # the removal of dups, either way check that
+ # we are back at the beginning. Check that:
+ # - We have 0 internal pages.
+ # - We have only 1 key (the original we primed the db
+ # with at the beginning of the test).
+ # - We have only the original number of dups we primed
+ # the db with at the beginning of the test.
+ #
+ error_check_good stat:orig0 [is_substr $stat \
+ "{{Internal pages} 0}"] 1
+ error_check_good stat:orig1 [is_substr $stat \
+ "{{Number of keys} 1}"] 1
+ error_check_good stat:orig2 [is_substr $stat \
+ "{{Number of records} $origdups}"] 1
+ } else {
+ set numkeys $kvals
+ set allkeys [expr $numkeys + 1]
+ set numdups $kvals_dups
+ #
+ # If we abort the removal of dups, or commit the
+ # addition of dups, check that:
+ # - We have > 0 internal pages.
+ # - We have the number of keys.
+ #
+ error_check_bad stat:new0 [is_substr $stat \
+ "{{Internal pages} 0}"] 1
+ error_check_good stat:new1 [is_substr $stat \
+ "{{Number of keys} $allkeys}"] 1
+ }
+
+ set dbc [$db cursor]
+ error_check_good dbcursor [is_valid_cursor $dbc $db] TRUE
+ puts "\tRecd010_check: Checking key and duplicate values"
+ set key "recd010_key"
+ #
+ # Check dups are there as they should be.
+ #
+ for {set ki 0} {$ki < $numkeys} {incr ki} {
+ set datacnt 0
+ for {set d [$dbc get -set $key$ki]} { [llength $d] != 0 } {
+ set d [$dbc get -nextdup]} {
+ set thisdata [lindex [lindex $d 0] 1]
+ error_check_good dup_check $thisdata $data$datacnt
+ incr datacnt
+ }
+ error_check_good dup_count $datacnt $numdups
+ }
+ #
+ # Check that the number of expected keys (allkeys) are
+ # all of the ones that exist in the database.
+ #
+ set dupkeys 0
+ set lastkey ""
+ for {set d [$dbc get -first]} { [llength $d] != 0 } {
+ set d [$dbc get -next]} {
+ set thiskey [lindex [lindex $d 0] 0]
+ if { [string compare $lastkey $thiskey] != 0 } {
+ incr dupkeys
+ }
+ set lastkey $thiskey
+ }
+ error_check_good key_check $allkeys $dupkeys
+ error_check_good curs_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+}
+
+proc recd010_split { db txn split method nkeys mkeys } {
+ global errorCode
+ global kvals
+ global kvals_dups
+ source ./include.tcl
+
+ set data "data"
+ set key "recd010_key"
+
+ set numdups [expr $mkeys / $nkeys]
+
+ set kvals $nkeys
+ set kvals_dups $numdups
+ if { $split == 1 } {
+ puts \
+"\tRecd010_split: Add $nkeys keys, with $numdups duplicates each to force split."
+ for {set k 0} { $k < $nkeys } { incr k } {
+ for {set i 0} { $i < $numdups } { incr i } {
+ set ret [$db put -txn $txn $key$k $data$i]
+ error_check_good dbput:more $ret 0
+ }
+ }
+ } else {
+ puts \
+"\tRecd010_split: Delete $nkeys keys to force reverse split."
+ for {set k 0} { $k < $nkeys } { incr k } {
+ error_check_good db_del:$k [$db del -txn $txn $key$k] 0
+ }
+ }
+ return 0
+}
diff --git a/bdb/test/recd011.tcl b/bdb/test/recd011.tcl
new file mode 100644
index 00000000000..a6fc269741b
--- /dev/null
+++ b/bdb/test/recd011.tcl
@@ -0,0 +1,115 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd011.tcl,v 11.13 2000/12/06 17:09:54 sue Exp $
+#
+# Recovery Test 11.
+# Test recovery to a specific timestamp.
+proc recd011 { method {niter 200} {ckpt_freq 15} {sleep_time 1} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ set tnum 11
+
+ puts "Recd0$tnum ($args): Test recovery to a specific timestamp."
+
+ set testfile recd0$tnum.db
+ env_cleanup $testdir
+
+ set i 0
+ if { [is_record_based $method] == 1 } {
+ set key 1
+ } else {
+ set key KEY
+ }
+
+ puts "\tRecd0$tnum.a: Create environment and database."
+ set flags "-create -txn -home $testdir"
+
+ set env_cmd "berkdb env $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set oflags "-env $dbenv -create -mode 0644 $args $omethod"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Main loop: every second or so, increment the db in a txn.
+ puts "\t\tInitial Checkpoint"
+ error_check_good "Initial Checkpoint" [$dbenv txn_checkpoint] 0
+
+ puts "\tRecd0$tnum.b ($niter iterations):\
+ Transaction-protected increment loop."
+ for { set i 0 } { $i <= $niter } { incr i } {
+ set data $i
+
+ # Put, in a txn.
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+ error_check_good db_put \
+ [$db put -txn $txn $key [chop_data $method $data]] 0
+ error_check_good txn_commit [$txn commit] 0
+
+ set timeof($i) [timestamp -r]
+
+ # If an appropriate period has elapsed, checkpoint.
+ if { $i % $ckpt_freq == $ckpt_freq - 1 } {
+ puts "\t\tIteration $i: Checkpointing."
+ error_check_good ckpt($i) [$dbenv txn_checkpoint] 0
+ }
+
+ # sleep for N seconds.
+ tclsleep $sleep_time
+ }
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$dbenv close] 0
+
+ # Now, loop through and recover to each timestamp, verifying the
+ # expected increment.
+ puts "\tRecd0$tnum.c: Recover to each timestamp and check."
+ for { set i 0 } { $i <= $niter } { incr i } {
+
+ # Run db_recover.
+ berkdb debug_check
+ set t [clock format $timeof($i) -format "%y%m%d%H%M.%S"]
+ set ret [catch {exec $util_path/db_recover -h $testdir -t $t} r]
+ error_check_good db_recover($i,$t) $ret 0
+
+ # Now open the db and check the timestamp.
+ set db [eval {berkdb_open} $testdir/$testfile]
+ error_check_good db_open($i) [is_valid_db $db] TRUE
+
+ set dbt [$db get $key]
+ set datum [lindex [lindex $dbt 0] 1]
+ error_check_good timestamp_recover $datum [pad_data $method $i]
+
+ error_check_good db_close [$db close] 0
+ }
+
+ # Finally, recover to a time well before the first timestamp
+ # and well after the last timestamp. The latter should
+ # be just like the last timestamp; the former should fail.
+ puts "\tRecd0$tnum.d: Recover to before the first timestamp."
+ set t [clock format [expr $timeof(0) - 1000] -format "%y%m%d%H%M.%S"]
+ set ret [catch {exec $util_path/db_recover -h $testdir -t $t} r]
+ error_check_bad db_recover(before,$t) $ret 0
+
+ puts "\tRecd0$tnum.e: Recover to after the last timestamp."
+ set t [clock format \
+ [expr $timeof($niter) + 1000] -format "%y%m%d%H%M.%S"]
+ set ret [catch {exec $util_path/db_recover -h $testdir -t $t} r]
+ error_check_good db_recover(after,$t) $ret 0
+
+ # Now open the db and check the timestamp.
+ set db [eval {berkdb_open} $testdir/$testfile]
+ error_check_good db_open(after) [is_valid_db $db] TRUE
+
+ set dbt [$db get $key]
+ set datum [lindex [lindex $dbt 0] 1]
+
+ error_check_good timestamp_recover $datum [pad_data $method $niter]
+ error_check_good db_close [$db close] 0
+}
diff --git a/bdb/test/recd012.tcl b/bdb/test/recd012.tcl
new file mode 100644
index 00000000000..19dd7b011d1
--- /dev/null
+++ b/bdb/test/recd012.tcl
@@ -0,0 +1,423 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd012.tcl,v 11.14 2000/12/11 17:24:55 sue Exp $
+#
+# Recovery Test 12.
+# Test recovery handling of file opens and closes.
+proc recd012 { method {start 0} \
+ {niter 49} {noutiter 25} {niniter 100} {ndbs 5} args } {
+ source ./include.tcl
+
+ set tnum 12
+ set pagesize 512
+
+ if { $is_qnx_test } {
+ set niter 40
+ }
+
+ puts "Recd0$tnum $method ($args): Test recovery file management."
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Recd012: skipping for specific pagesizes"
+ return
+ }
+
+ for { set i $start } { $i <= $niter } { incr i } {
+
+ env_cleanup $testdir
+
+ # For repeatability, we pass in the iteration number
+ # as a parameter and use that in recd012_body to seed
+ # the random number generator to randomize our operations.
+ # This lets us re-run a potentially failing iteration
+ # without having to start from the beginning and work
+ # our way to it.
+ #
+ # The number of databases ranges from 4 to 8 and is
+ # a function of $niter
+# set ndbs [expr ($i % 5) + 4]
+
+ recd012_body \
+ $method $ndbs $i $noutiter $niniter $pagesize $tnum $args
+ }
+}
+
+proc recd012_body { method {ndbs 5} iter noutiter niniter psz tnum {largs ""} } {
+ global alphabet rand_init fixed_len recd012_ofkey recd012_ofckptkey
+ source ./include.tcl
+
+ set largs [convert_args $method $largs]
+ set omethod [convert_method $method]
+
+ puts "\tRecd0$tnum $method ($largs): Iteration $iter"
+ puts "\t\tRecd0$tnum.a: Create environment and $ndbs databases."
+
+ set flags "-create -txn -home $testdir"
+ set env_cmd "berkdb env $flags"
+ error_check_good env_remove [berkdb envremove -home $testdir] 0
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ # Initialize random number generator based on $iter.
+ berkdb srand [expr $iter + $rand_init]
+
+ # Initialize database that keeps track of number of open files (so
+ # we don't run out of descriptors).
+ set ofname of.db
+ set ofdb [berkdb_open -env $dbenv\
+ -create -dup -mode 0644 -btree -pagesize 512 $ofname]
+ error_check_good of_open [is_valid_db $ofdb] TRUE
+ set oftxn [$dbenv txn]
+ error_check_good of_txn [is_valid_txn $oftxn $dbenv] TRUE
+ error_check_good of_put [$ofdb put -txn $oftxn $recd012_ofkey 1] 0
+ error_check_good of_put2 [$ofdb put -txn $oftxn $recd012_ofckptkey 0] 0
+ error_check_good of_put3 [$ofdb put -txn $oftxn $recd012_ofckptkey 0] 0
+ error_check_good of_txn_commit [$oftxn commit] 0
+ error_check_good of_close [$ofdb close] 0
+
+ # Create ndbs databases to work in, and a file listing db names to
+ # pick from.
+ set f [open TESTDIR/dblist w]
+ set oflags \
+ "-env $dbenv -create -mode 0644 -pagesize $psz $largs $omethod"
+ for { set i 0 } { $i < $ndbs } { incr i } {
+ # 50-50 chance of being a subdb, unless we're a queue.
+ if { [berkdb random_int 0 1] || [is_queue $method] } {
+ # not a subdb
+ set dbname recd0$tnum-$i.db
+ } else {
+ # subdb
+ set dbname "recd0$tnum-subdb.db s$i"
+ }
+ puts $f $dbname
+ set db [eval berkdb_open $oflags $dbname]
+ error_check_good db($i) [is_valid_db $db] TRUE
+ error_check_good db($i)_close [$db close] 0
+ }
+ close $f
+
+ error_check_good env_close [$dbenv close] 0
+
+ # Now we get to the meat of things. Our goal is to do some number
+ # of opens, closes, updates, and shutdowns (simulated here by a
+ # close of all open handles and a close/reopen of the environment,
+ # with or without an envremove), matching the regular expression
+ #
+ # ((O[OUC]+S)+R+V)
+ #
+ # We'll repeat the inner + a random number up to $niniter times,
+ # and the outer + a random number up to $noutiter times.
+ #
+ # In order to simulate shutdowns, we'll perform the opens, closes,
+ # and updates in a separate process, which we'll exit without closing
+ # all handles properly. The environment will be left lying around
+ # before we run recovery 50% of the time.
+ set out [berkdb random_int 1 $noutiter]
+ puts "\t\tRecd0$tnum.b: Performing $out recoveries of up to $niniter\
+ ops."
+ for { set i 0 } { $i < $out } { incr i } {
+ set child [open "|$tclsh_path" w]
+
+ # For performance, don't source everything,
+ # just what we'll need.
+ puts $child "load $tcllib"
+ puts $child "set fixed_len $fixed_len"
+ puts $child "source ../test/testutils.tcl"
+ puts $child "source ../test/recd0$tnum.tcl"
+
+ set rnd [expr $iter * 10000 + $i * 100 + $rand_init]
+
+ # Go.
+ # puts "recd012_dochild {$env_cmd} $rnd $i $niniter\
+ # $ndbs $tnum $method $ofname $largs"
+ puts $child "recd012_dochild {$env_cmd} $rnd $i $niniter\
+ $ndbs $tnum $method $ofname $largs"
+ close $child
+
+ # Run recovery 0-3 times.
+ set nrecs [berkdb random_int 0 3]
+ for { set j 0 } { $j < $nrecs } { incr j } {
+ set ret [catch {exec $util_path/db_recover \
+ -h $testdir} res]
+ if { $ret != 0 } {
+ puts "FAIL: db_recover returned with nonzero\
+ exit status, output as follows:"
+ file mkdir /tmp/12out
+ set fd [open /tmp/12out/[pid] w]
+ puts $fd $res
+ close $fd
+ }
+ error_check_good recover($j) $ret 0
+ }
+
+ }
+
+ # Run recovery one final time; it doesn't make sense to
+ # check integrity if we do not.
+ set ret [catch {exec $util_path/db_recover -h $testdir} res]
+ if { $ret != 0 } {
+ puts "FAIL: db_recover returned with nonzero\
+ exit status, output as follows:"
+ puts $res
+ }
+
+ # Make sure each datum is the correct filename.
+ puts "\t\tRecd0$tnum.c: Checking data integrity."
+ set dbenv [berkdb env -create -private -home $testdir]
+ error_check_good env_open_integrity [is_valid_env $dbenv] TRUE
+ set f [open TESTDIR/dblist r]
+ set i 0
+ while { [gets $f dbinfo] > 0 } {
+ set db [eval berkdb_open -env $dbenv $dbinfo]
+ error_check_good dbopen($dbinfo) [is_valid_db $db] TRUE
+
+ set dbc [$db cursor]
+ error_check_good cursor [is_valid_cursor $dbc $db] TRUE
+
+ for { set dbt [$dbc get -first] } { [llength $dbt] > 0 } \
+ { set dbt [$dbc get -next] } {
+ error_check_good integrity [lindex [lindex $dbt 0] 1] \
+ [pad_data $method $dbinfo]
+ }
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ }
+ close $f
+ error_check_good env_close_integrity [$dbenv close] 0
+
+
+ # Verify
+ error_check_good verify [verify_dir $testdir "\t\tRecd0$tnum.d: "] 0
+}
+
+
+proc recd012_dochild { env_cmd rnd outiter niniter ndbs tnum method\
+ ofname args } {
+ global recd012_ofkey
+ if { [is_record_based $method] } {
+ set keybase ""
+ } else {
+ set keybase .[repeat abcdefghijklmnopqrstuvwxyz 4]
+ }
+
+ # Initialize our random number generator, repeatably based on an arg.
+ berkdb srand $rnd
+
+ # Open our env.
+ set dbenv [eval $env_cmd]
+ error_check_good env_open [is_valid_env $dbenv] TRUE
+
+ # Find out how many databases appear to be open in the log--we
+ # don't want recovery to run out of filehandles.
+ set ofdb [berkdb_open -env $dbenv $ofname]
+ set oftxn [$dbenv txn]
+ error_check_good of_txn [is_valid_txn $oftxn $dbenv] TRUE
+ set dbt [$ofdb get -txn $oftxn $recd012_ofkey]
+ error_check_good of_get [lindex [lindex $dbt 0] 0] $recd012_ofkey
+ set nopenfiles [lindex [lindex $dbt 0] 1]
+
+ error_check_good of_commit [$oftxn commit] 0
+
+ # Read our dbnames
+ set f [open TESTDIR/dblist r]
+ set i 0
+ while { [gets $f dbname($i)] > 0 } {
+ incr i
+ }
+ close $f
+
+ # We now have $ndbs extant databases.
+ # Open one of them, just to get us started.
+ set opendbs {}
+ set oflags "-env $dbenv $args"
+
+ # Start a transaction, just to get us started.
+ set curtxn [$dbenv txn]
+ error_check_good txn [is_valid_txn $curtxn $dbenv] TRUE
+
+ # Inner loop. Do $in iterations of a random open, close, or
+ # update, where $in is between 1 and $niniter.
+ set in [berkdb random_int 1 $niniter]
+ for { set j 0 } { $j < $in } { incr j } {
+ set op [berkdb random_int 0 2]
+ switch $op {
+ 0 {
+ # Open.
+ recd012_open
+ }
+ 1 {
+ # Update. Put random-number$keybase as key,
+ # filename as data, into random database.
+ set num_open [llength $opendbs]
+ if { $num_open == 0 } {
+ # If none are open, do an open first.
+
+ recd012_open
+ }
+ set n [berkdb random_int 0 [expr $num_open - 1]]
+ set pair [lindex $opendbs $n]
+ set udb [lindex $pair 0]
+ set uname [lindex $pair 1]
+
+ set key [berkdb random_int 1000 1999]$keybase
+ set data [chop_data $method $uname]
+ error_check_good put($uname,$udb,$key,$data) \
+ [$udb put -txn $curtxn $key $data] 0
+
+ # One time in four, commit the transaction.
+ if { [berkdb random_int 0 3] == 0 && 0 } {
+ error_check_good txn_recommit \
+ [$curtxn commit] 0
+ set curtxn [$dbenv txn]
+ error_check_good txn_reopen \
+ [is_valid_txn $curtxn $dbenv] TRUE
+ }
+ }
+ 2 {
+ # Close.
+
+ if { [llength $opendbs] == 0 } {
+ # If none are open, open instead of closing.
+ recd012_open
+ continue
+ }
+
+ # Commit curtxn first, lest we self-deadlock.
+ error_check_good txn_recommit \
+ [$curtxn commit] 0
+
+ # Do it.
+ set which [berkdb random_int 0 \
+ [expr [llength $opendbs] - 1]]
+
+ set db [lindex [lindex $opendbs $which] 0]
+ error_check_good db_choice [is_valid_db $db] TRUE
+ global errorCode errorInfo
+
+ error_check_good db_close \
+ [[lindex [lindex $opendbs $which] 0] close] 0
+ set opendbs [lreplace $opendbs $which $which]
+ incr nopenfiles -1
+
+
+ # Reopen txn.
+ set curtxn [$dbenv txn]
+ error_check_good txn_reopen \
+ [is_valid_txn $curtxn $dbenv] TRUE
+
+ }
+ }
+
+ # One time in two hundred, checkpoint.
+ if { [berkdb random_int 0 199] == 0 } {
+ puts "\t\t\tRecd0$tnum:\
+ Random checkpoint after operation $outiter.$j."
+ error_check_good txn_ckpt \
+ [$dbenv txn_checkpoint] 0
+ set nopenfiles \
+ [recd012_nopenfiles_ckpt $dbenv $ofdb $nopenfiles]
+ }
+ }
+
+ # We have to commit curtxn. It'd be kind of nice not to, but
+ # if we start in again without running recovery, we may block
+ # ourselves.
+ error_check_good curtxn_commit [$curtxn commit] 0
+
+ # Put back the new number of open files.
+ set oftxn [$dbenv txn]
+ error_check_good of_txn [is_valid_txn $oftxn $dbenv] TRUE
+ error_check_good of_del [$ofdb del -txn $oftxn $recd012_ofkey] 0
+ error_check_good of_put \
+ [$ofdb put -txn $oftxn $recd012_ofkey $nopenfiles] 0
+ error_check_good of_commit [$oftxn commit] 0
+ error_check_good ofdb_close [$ofdb close] 0
+}
+
+proc recd012_open { } {
+ # This is basically an inline and has to modify curtxn,
+ # so use upvars.
+ upvar curtxn curtxn
+ upvar ndbs ndbs
+ upvar dbname dbname
+ upvar dbenv dbenv
+ upvar oflags oflags
+ upvar opendbs opendbs
+ upvar nopenfiles nopenfiles
+
+ # Return without an open if we've already opened too many files--
+ # we don't want to make recovery run out of filehandles.
+ if { $nopenfiles > 30 } {
+ #puts "skipping--too many open files"
+ return -code break
+ }
+
+ # Commit curtxn first, lest we self-deadlock.
+ error_check_good txn_recommit \
+ [$curtxn commit] 0
+
+ # Do it.
+ set which [berkdb random_int 0 [expr $ndbs - 1]]
+ set db [eval berkdb_open \
+ $oflags $dbname($which)]
+ lappend opendbs [list $db $dbname($which)]
+
+ # Reopen txn.
+ set curtxn [$dbenv txn]
+ error_check_good txn_reopen \
+ [is_valid_txn $curtxn $dbenv] TRUE
+
+ incr nopenfiles
+}
+
+# Update the database containing the number of files that db_recover has
+# to contend with--we want to avoid letting it run out of file descriptors.
+# We do this by keeping track of the number of unclosed opens since the
+# checkpoint before last.
+# $recd012_ofkey stores this current value; the two dups available
+# at $recd012_ofckptkey store the number of opens since the last checkpoint
+# previous.
+# Thus, if the current value is 17 when we do a checkpoint, and the
+# stored values are 3 and 8, the new current value (which we return)
+# is 14, and the new stored values are 8 and 6.
+proc recd012_nopenfiles_ckpt { env db nopenfiles } {
+ global recd012_ofckptkey
+ set txn [$env txn]
+ error_check_good nopenfiles_ckpt_txn [is_valid_txn $txn $env] TRUE
+
+ set dbc [$db cursor -txn $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ # Get the first ckpt value and delete it.
+ set dbt [$dbc get -set $recd012_ofckptkey]
+ error_check_good set [llength $dbt] 1
+
+ set discard [lindex [lindex $dbt 0] 1]
+ error_check_good del [$dbc del] 0
+
+ set nopenfiles [expr $nopenfiles - $discard]
+
+ # Get the next ckpt value
+ set dbt [$dbc get -nextdup]
+ error_check_good set2 [llength $dbt] 1
+
+ # Calculate how many opens we've had since this checkpoint before last.
+ set onlast [lindex [lindex $dbt 0] 1]
+ set sincelast [expr $nopenfiles - $onlast]
+
+ # Put this new number at the end of the dup set.
+ error_check_good put [$dbc put -keylast $recd012_ofckptkey $sincelast] 0
+
+ # We should never deadlock since we're the only one in this db.
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good txn_commit [$txn commit] 0
+
+ return $nopenfiles
+}
+
+# globals -- it's not worth passing these around, as they're constants
+set recd012_ofkey OPENFILES
+set recd012_ofckptkey CKPTS
diff --git a/bdb/test/recd013.tcl b/bdb/test/recd013.tcl
new file mode 100644
index 00000000000..d134d487f1e
--- /dev/null
+++ b/bdb/test/recd013.tcl
@@ -0,0 +1,244 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd013.tcl,v 11.10 2000/12/11 17:24:55 sue Exp $
+#
+# Recovery Test 13.
+# Smoke test of aborted cursor adjustments.
+#
+# XXX
+# Other tests that cover more specific variants of the same issue
+# are in the access method tests for now. This is probably wrong; we
+# put this one here because they're closely based on and intertwined
+# with other, non-transactional cursor stability tests that are among
+# the access method tests, and because we need at least one test to
+# fit under recd and keep logtrack from complaining. We'll sort out the mess
+# later; the important thing, for now, is that everything that needs to gets
+# tested. (This really shouldn't be under recd at all, since it doesn't
+# run recovery!)
+proc recd013 { method { nitems 100 } args } {
+ source ./include.tcl
+ global alphabet log_log_record_types
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ set tnum 13
+ set pgsz 512
+
+ puts "Recd0$tnum $method ($args): Test of aborted cursor adjustments."
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Recd013: skipping for specific pagesizes"
+ return
+ }
+
+ set testfile recd0$tnum.db
+ env_cleanup $testdir
+
+ set i 0
+ if { [is_record_based $method] == 1 } {
+ set keybase ""
+ } else {
+ set keybase "key"
+ }
+
+ puts "\tRecd0$tnum.a:\
+ Create environment, database, and parent transaction."
+ set flags "-create -txn -home $testdir"
+
+ set env_cmd "berkdb env $flags"
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ set oflags "-env $env -create -mode 0644 -pagesize $pgsz $args $omethod"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Create a database containing $nitems items, numbered with odds.
+ # We'll then put the even numbers during the body of the test.
+ set txn [$env txn]
+ error_check_good init_txn [is_valid_txn $txn $env] TRUE
+ for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
+ set key $keybase$i
+ set data [chop_data $method $i$alphabet]
+ error_check_good init_put($i) [$db put -txn $txn $key $data] 0
+ }
+ error_check_good init_txn_commit [$txn commit] 0
+
+ # Create an initial txn; set a cursor of that txn to each item.
+ set txn [$env txn]
+ error_check_good txn [is_valid_txn $txn $env] TRUE
+ for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
+ set dbc($i) [$db cursor -txn $txn]
+ error_check_good dbc_getset($i) [$dbc($i) get -set $keybase$i] \
+ [list [list $keybase$i [pad_data $method $i$alphabet]]]
+ }
+
+ puts "\tRecd0$tnum.b: Put test."
+ puts "\t\tRecd0$tnum.b.1: Put items."
+ set ctxn [$env txn -parent $txn]
+ error_check_good txn [is_valid_txn $ctxn $env] TRUE
+ for { set i 2 } { $i <= 2 * $nitems } { incr i 2 } {
+ set key $keybase$i
+ set data [chop_data $method $i$alphabet]
+ error_check_good child_put($i) [$db put -txn $ctxn $key $data] 0
+
+ # If we're a renumbering recno, this is uninteresting.
+ # Stir things up by putting a few additional records at
+ # the beginning.
+ if { [is_rrecno $method] == 1 } {
+ set curs [$db cursor -txn $ctxn]
+ error_check_bad llength_get_first \
+ [llength [$curs get -first]] 0
+ error_check_good cursor [is_valid_cursor $curs $db] TRUE
+ # expect a recno!
+ error_check_good rrecno_put($i) \
+ [$curs put -before ADDITIONAL.$i] 1
+ error_check_good curs_close [$curs close] 0
+ }
+ }
+
+ puts "\t\tRecd0$tnum.b.2: Verify cursor stability after abort."
+ error_check_good ctxn_abort [$ctxn abort] 0
+
+ for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
+ error_check_good dbc_get($i) [$dbc($i) get -current] \
+ [list [list $keybase$i [pad_data $method $i$alphabet]]]
+ }
+
+ # Clean up cursors.
+ for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
+ error_check_good dbc($i)_close [$dbc($i) close] 0
+ }
+
+ # Sync and verify.
+ error_check_good txn_commit [$txn commit] 0
+ set txn [$env txn]
+ error_check_good txn [is_valid_txn $txn $env] TRUE
+
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_verify \
+ [verify_dir $testdir "\t\tRecd0$tnum.b.3: "] 0
+
+ # Now put back all the even records, this time in the parent.
+ # Commit and re-begin the transaction so we can abort and
+ # get back to a nice full database.
+ for { set i 2 } { $i <= 2 * $nitems } { incr i 2 } {
+ set key $keybase$i
+ set data [chop_data $method $i$alphabet]
+ error_check_good child_put($i) [$db put -txn $txn $key $data] 0
+ }
+ error_check_good txn_commit [$txn commit] 0
+ set txn [$env txn]
+ error_check_good txn [is_valid_txn $txn $env] TRUE
+
+ # Delete test. Set a cursor to each record. Delete the even ones
+ # in the parent and check cursor stability. Then open a child
+ # transaction, and delete the odd ones. Verify that the database
+ # is empty
+ puts "\tRecd0$tnum.c: Delete test."
+ unset dbc
+
+ # Create cursors pointing at each item.
+ for { set i 1 } { $i <= 2 * $nitems } { incr i } {
+ set dbc($i) [$db cursor -txn $txn]
+ error_check_good dbc($i)_create [is_valid_cursor $dbc($i) $db] \
+ TRUE
+ error_check_good dbc_getset($i) [$dbc($i) get -set $keybase$i] \
+ [list [list $keybase$i [pad_data $method $i$alphabet]]]
+ }
+
+ puts "\t\tRecd0$tnum.c.1: Delete even items in parent txn."
+ if { [is_rrecno $method] != 1 } {
+ set init 2
+ set bound [expr 2 * $nitems]
+ set step 2
+ } else {
+ # In rrecno, deletes will renumber the items, so we have
+ # to take that into account when we delete by recno.
+ set init 2
+ set bound [expr $nitems + 1]
+ set step 1
+ }
+ for { set i $init } { $i <= $bound } { incr i $step } {
+ error_check_good del($i) [$db del -txn $txn $keybase$i] 0
+ }
+
+ # Verify that even items are deleted and odd items are not.
+ for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
+ if { [is_rrecno $method] != 1 } {
+ set j $i
+ } else {
+ set j [expr ($i - 1) / 2 + 1]
+ }
+ error_check_good dbc_get($i) [$dbc($i) get -current] \
+ [list [list $keybase$j [pad_data $method $i$alphabet]]]
+ }
+ for { set i 2 } { $i <= 2 * $nitems } { incr i 2 } {
+ error_check_good dbc_get($i) [$dbc($i) get -current] \
+ [list [list "" ""]]
+ }
+
+ puts "\t\tRecd0$tnum.c.2: Delete odd items in child txn."
+
+ set ctxn [$env txn -parent $txn]
+
+ for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
+ if { [is_rrecno $method] != 1 } {
+ set j $i
+ } else {
+ # If this is an rrecno, just delete the first
+ # item repeatedly--the renumbering will make
+ # that delete everything.
+ set j 1
+ }
+ error_check_good del($i) [$db del -txn $ctxn $keybase$j] 0
+ }
+
+ # Verify that everyone's deleted.
+ for { set i 1 } { $i <= 2 * $nitems } { incr i } {
+ error_check_good get_deleted($i) \
+ [llength [$db get -txn $ctxn $keybase$i]] 0
+ }
+
+ puts "\t\tRecd0$tnum.c.3: Verify cursor stability after abort."
+ error_check_good ctxn_abort [$ctxn abort] 0
+
+ # Verify that even items are deleted and odd items are not.
+ for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
+ if { [is_rrecno $method] != 1 } {
+ set j $i
+ } else {
+ set j [expr ($i - 1) / 2 + 1]
+ }
+ error_check_good dbc_get($i) [$dbc($i) get -current] \
+ [list [list $keybase$j [pad_data $method $i$alphabet]]]
+ }
+ for { set i 2 } { $i <= 2 * $nitems } { incr i 2 } {
+ error_check_good dbc_get($i) [$dbc($i) get -current] \
+ [list [list "" ""]]
+ }
+
+ # Clean up cursors.
+ for { set i 1 } { $i <= 2 * $nitems } { incr i } {
+ error_check_good dbc($i)_close [$dbc($i) close] 0
+ }
+
+ # Sync and verify.
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_verify \
+ [verify_dir $testdir "\t\tRecd0$tnum.c.4: "] 0
+
+ puts "\tRecd0$tnum.d: Clean up."
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+ error_check_good verify_dir \
+ [verify_dir $testdir "\t\tRecd0$tnum.d.1: "] 0
+
+ if { $log_log_record_types == 1 } {
+ logtrack_read $testdir
+ }
+}
diff --git a/bdb/test/recd014.tcl b/bdb/test/recd014.tcl
new file mode 100644
index 00000000000..83b3920de9b
--- /dev/null
+++ b/bdb/test/recd014.tcl
@@ -0,0 +1,467 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd014.tcl,v 1.9 2001/01/11 17:16:04 sue Exp $
+#
+# Recovery Test 14.
+# This is a recovery test for create/delete of queue extents. We have
+# hooks in the database so that we can abort the process at various
+# points and make sure that the extent file does or does not exist. We
+# then need to recover and make sure the file is correctly existing
+# or not, as the case may be.
+proc recd014 { method args} {
+ global fixed_len
+ source ./include.tcl
+
+ if { ![is_queueext $method] == 1 } {
+ puts "Recd014: Skipping for method $method"
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Recd014: skipping for specific pagesizes"
+ return
+ }
+
+ set orig_fixed_len $fixed_len
+ #
+ # We will use 512-byte pages, to be able to control
+ # when extents get created/removed.
+ #
+ set fixed_len 300
+
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+ #
+ # We want to set -extent 1 instead of what
+ # convert_args gave us.
+ #
+ set exti [lsearch -exact $opts "-extent"]
+ incr exti
+ set opts [lreplace $opts $exti $exti 1]
+
+ puts "Recd014: $method extent creation/deletion tests"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ set testfile recd014.db
+ set flags "-create -txn -home $testdir"
+
+ puts "\tRecd014.a: creating environment"
+ set env_cmd "berkdb env $flags"
+
+ puts "\tRecd014.b: Create test commit"
+ ext_recover_create $testdir $env_cmd $omethod \
+ $opts $testfile commit
+ puts "\tRecd014.b: Create test abort"
+ ext_recover_create $testdir $env_cmd $omethod \
+ $opts $testfile abort
+
+ puts "\tRecd014.c: Consume test commit"
+ ext_recover_delete $testdir $env_cmd $omethod \
+ $opts $testfile consume commit
+ puts "\tRecd014.c: Consume test abort"
+ ext_recover_delete $testdir $env_cmd $omethod \
+ $opts $testfile consume abort
+
+ puts "\tRecd014.d: Delete test commit"
+ ext_recover_delete $testdir $env_cmd $omethod \
+ $opts $testfile delete commit
+ puts "\tRecd014.d: Delete test abort"
+ ext_recover_delete $testdir $env_cmd $omethod \
+ $opts $testfile delete abort
+
+ set fixed_len $orig_fixed_len
+ puts "\tRecd014.e: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
+
+proc ext_recover_create { dir env_cmd method opts dbfile txncmd } {
+ global log_log_record_types
+ global fixed_len
+ global alphabet
+ source ./include.tcl
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $dir
+ }
+
+ env_cleanup $dir
+ # Open the environment and set the copy/abort locations
+ set env [eval $env_cmd]
+
+ set init_file $dir/$dbfile.init
+ set noenvflags "-create $method -mode 0644 -pagesize 512 $opts $dbfile"
+ set oflags "-env $env $noenvflags"
+
+ set t [$env txn]
+ error_check_good txn_begin [is_valid_txn $t $env] TRUE
+
+ set ret [catch {eval {berkdb_open} $oflags} db]
+
+ #
+ # The command to execute to create an extent is a put.
+ # We are just creating the first one, so our extnum is 0.
+ #
+ set extnum 0
+ set data [chop_data $method [replicate $alphabet 512]]
+ puts "\t\tExecuting command"
+ set putrecno [$db put -txn $t -append $data]
+ error_check_good db_put $putrecno 1
+
+ # Sync the db so any changes to the file that are
+ # in mpool get written to the disk file before the
+ # diff.
+ puts "\t\tSyncing"
+ error_check_good db_sync [$db sync] 0
+
+ catch { file copy -force $dir/$dbfile $dir/$dbfile.afterop } res
+ copy_extent_file $dir $dbfile afterop
+
+ error_check_good txn_$txncmd:$t [$t $txncmd] 0
+ #
+ # If we don't abort, then we expect success.
+ # If we abort, we expect no file created.
+ #
+ set dbq [make_ext_filename $dir $dbfile $extnum]
+ error_check_good extput:exists1 [file exists $dbq] 1
+ set ret [$db get $putrecno]
+ if {$txncmd == "abort"} {
+ #
+ # Operation was aborted. Verify our entry is not there.
+ #
+ puts "\t\tCommand executed and aborted."
+ error_check_good db_get [llength $ret] 0
+ } else {
+ #
+ # Operation was committed, verify it exists.
+ #
+ puts "\t\tCommand executed and committed."
+ error_check_good db_get [llength $ret] 1
+ catch { file copy -force $dir/$dbfile $init_file } res
+ copy_extent_file $dir $dbfile init
+ }
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+
+ #
+ # Run recovery here. Should be a no-op. Verify that
+ # the file still does/n't exist when we are done.
+ #
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery (no-op) ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+ #
+ # Verify it did not change.
+ #
+ error_check_good extput:exists2 [file exists $dbq] 1
+ ext_create_check $dir $txncmd $init_file $dbfile $noenvflags $putrecno
+
+ #
+ # Need a new copy to get the right LSN into the file.
+ #
+ catch { file copy -force $dir/$dbfile $init_file } res
+ copy_extent_file $dir $dbfile init
+
+ #
+ # Undo.
+ # Now move the .afterop file to $dbfile. Run recovery again.
+ #
+ file copy -force $dir/$dbfile.afterop $dir/$dbfile
+ move_file_extent $dir $dbfile afterop copy
+
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery (afterop) ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+ ext_create_check $dir $txncmd $init_file $dbfile $noenvflags $putrecno
+
+ #
+ # To redo, remove the dbfiles. Run recovery again.
+ #
+ catch { file rename -force $dir/$dbfile $dir/$dbfile.renamed } res
+ copy_extent_file $dir $dbfile renamed rename
+
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery (init) ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ #
+ # !!!
+ # Even though db_recover exits with status 0, it should print out
+ # a warning because the file didn't exist. Db_recover writes this
+ # to stderr. Tcl assumes that ANYTHING written to stderr is an
+ # error, so even though we exit with 0 status, we still get an
+ # error back from 'catch'. Look for the warning.
+ #
+ if { $stat == 1 && [is_substr $result "warning"] == 0 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+
+ #
+ # Verify it was redone. However, since we removed the files
+ # to begin with, recovery with abort will not recreate the
+ # extent. Recovery with commit will.
+ #
+ if {$txncmd == "abort"} {
+ error_check_good extput:exists3 [file exists $dbq] 0
+ } else {
+ error_check_good extput:exists3 [file exists $dbq] 1
+ }
+}
+
+proc ext_create_check { dir txncmd init_file dbfile oflags putrecno } {
+ if { $txncmd == "commit" } {
+ #
+ # Operation was committed. Verify it did not change.
+ #
+ error_check_good \
+ diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \
+ [dbdump_diff $init_file $dir/$dbfile] 0
+ } else {
+ #
+ # Operation aborted. The file is there, but make
+ # sure the item is not.
+ #
+ set xdb [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $xdb] TRUE
+ set ret [$xdb get $putrecno]
+ error_check_good db_get [llength $ret] 0
+ error_check_good db_close [$xdb close] 0
+ }
+}
+
+
+proc ext_recover_delete { dir env_cmd method opts dbfile op txncmd} {
+ global log_log_record_types
+ global alphabet
+ source ./include.tcl
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $dir
+ }
+
+ env_cleanup $dir
+ # Open the environment and set the copy/abort locations
+ set env [eval $env_cmd]
+
+ set oflags "-create $method -mode 0644 -pagesize 512 \
+ -env $env $opts $dbfile"
+
+ #
+ # Open our db, add some data, close and copy as our
+ # init file.
+ #
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set extnum 0
+ set data [chop_data $method [replicate $alphabet 512]]
+
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+ set putrecno [$db put -append $data]
+ error_check_good db_put $putrecno 1
+ error_check_good commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+
+ puts "\t\tExecuting command"
+
+ set init_file $dir/$dbfile.init
+ catch { file copy -force $dir/$dbfile $init_file } res
+ copy_extent_file $dir $dbfile init
+
+ #
+ # If we don't abort, then we expect success.
+ # If we abort, we expect no file removed until recovery is run.
+ #
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set t [$env txn]
+ error_check_good txn_begin [is_valid_txn $t $env] TRUE
+
+ if { [string compare $op "delete"] == 0 } {
+ set dbcmd "$db del -txn $t $putrecno"
+ } else {
+ set dbcmd "$db get -txn $t -consume"
+ }
+ set ret [eval $dbcmd]
+ error_check_good db_sync [$db sync] 0
+
+ catch { file copy -force $dir/$dbfile $dir/$dbfile.afterop } res
+ copy_extent_file $dir $dbfile afterop
+
+ error_check_good txn_$txncmd:$t [$t $txncmd] 0
+ set dbq [make_ext_filename $dir $dbfile $extnum]
+ if {$txncmd == "abort"} {
+ #
+ # Operation was aborted, verify ext did not change.
+ #
+ puts "\t\tCommand executed and aborted."
+
+ #
+ # Check that the file exists. Final state.
+ # Since we aborted the txn, we should be able
+ # to get to our original entry.
+ #
+ error_check_good post$op.1 [file exists $dbq] 1
+
+ set xdb [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $xdb] TRUE
+ set kd [$xdb get $putrecno]
+ set key [lindex [lindex $kd 0] 0]
+ error_check_good dbget_key $key $putrecno
+ set retdata [lindex [lindex $kd 0] 1]
+ error_check_good dbget_data $data $retdata
+ error_check_good db_close [$xdb close] 0
+
+ error_check_good \
+ diff(init,post$op.2):diff($init_file,$dir/$dbfile)\
+ [dbdump_diff $init_file $dir/$dbfile] 0
+ } else {
+ #
+ # Operation was committed, verify it does
+ # not exist.
+ #
+ puts "\t\tCommand executed and committed."
+ #
+ # Check file existence. Consume operations remove
+ # the extent when we move off, which we should have
+ # done. Delete operations won't remove the extent
+ # until we run recovery.
+ #
+ if { [string compare $op "delete"] == 0 } {
+ error_check_good ${op}_exists [file exists $dbq] 1
+ } else {
+ error_check_good ${op}_exists [file exists $dbq] 0
+ }
+ }
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+
+ #
+ # Run recovery here on what we ended up with. Should be a no-op.
+ #
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery (no-op) ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+ if { $txncmd == "abort"} {
+ #
+ # Operation was aborted, verify it did not change.
+ #
+ error_check_good \
+ diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \
+ [dbdump_diff $init_file $dir/$dbfile] 0
+ } else {
+ #
+ # Operation was committed, verify it does
+ # not exist. Both operations should result
+ # in no file existing now that we've run recovery.
+ #
+ error_check_good after_recover1 [file exists $dbq] 0
+ }
+
+ #
+ # Run recovery here. Re-do the operation.
+ # Verify that the file doesn't exist
+ # (if we committed) or change (if we aborted)
+ # when we are done.
+ #
+ catch { file copy -force $dir/$dbfile $init_file } res
+ copy_extent_file $dir $dbfile init
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery (init) ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+ if { $txncmd == "abort"} {
+ #
+ # Operation was aborted, verify it did not change.
+ #
+ error_check_good \
+ diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \
+ [dbdump_diff $init_file $dir/$dbfile] 0
+ } else {
+ #
+ # Operation was committed, verify it does
+ # not exist. Both operations should result
+ # in no file existing now that we've run recovery.
+ #
+ error_check_good after_recover1 [file exists $dbq] 0
+ }
+
+ #
+ # Now move the .afterop file to $dbfile. Run recovery again.
+ #
+ set filecopy [glob $dir/*.afterop]
+ set afterop [lindex $filecopy 0]
+ file rename -force $afterop $dir/$dbfile
+ set afterop [string range $afterop \
+ [expr [string last "/" $afterop] + 1] \
+ [string last "." $afterop]]
+ move_file_extent $dir $dbfile afterop rename
+
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery (afterop) ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+
+ if { $txncmd == "abort"} {
+ #
+ # Operation was aborted, verify it did not change.
+ #
+ error_check_good \
+ diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \
+ [dbdump_diff $init_file $dir/$dbfile] 0
+ } else {
+ #
+ # Operation was committed, verify it still does
+ # not exist.
+ #
+ error_check_good after_recover2 [file exists $dbq] 0
+ }
+}
diff --git a/bdb/test/rpc001.tcl b/bdb/test/rpc001.tcl
new file mode 100644
index 00000000000..331a18cfbf1
--- /dev/null
+++ b/bdb/test/rpc001.tcl
@@ -0,0 +1,444 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rpc001.tcl,v 11.23 2001/01/02 20:04:56 sue Exp $
+#
+# Test RPC specifics, primarily that unsupported functions return
+# errors and such.
+#
+proc rpc001 { } {
+ global __debug_on
+ global __debug_print
+ global errorInfo
+ source ./include.tcl
+
+ #
+ # First test timeouts on server.
+ #
+ set ttime 5
+ set itime 10
+ puts "Rpc001: Server timeouts: resource $ttime sec, idle $itime sec"
+ if { [string compare $rpc_server "localhost"] == 0 } {
+ set dpid [exec $util_path/berkeley_db_svc \
+ -h $rpc_testdir -t $ttime -I $itime &]
+ } else {
+ set dpid [exec rsh $rpc_server $rpc_path/berkeley_db_svc \
+ -h $rpc_testdir -t $ttime -I $itime&]
+ }
+ puts "\tRpc001.a: Started server, pid $dpid"
+
+ tclsleep 2
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ puts "\tRpc001.b: Creating environment"
+
+ set testfile "rpc001.db"
+ set home [file tail $rpc_testdir]
+
+ set env [eval {berkdb env -create -mode 0644 -home $home \
+ -server $rpc_server -client_timeout 10000 -txn}]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ puts "\tRpc001.c: Opening a database"
+ #
+ # NOTE: the type of database doesn't matter, just use btree.
+ set db [eval {berkdb_open -create -btree -mode 0644} \
+ -env $env $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set curs_list {}
+ set txn_list {}
+ puts "\tRpc001.d: Basic timeout test"
+ puts "\tRpc001.d1: Starting a transaction"
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+ lappend txn_list $txn
+
+ puts "\tRpc001.d2: Open a cursor in that transaction"
+ set dbc [$db cursor -txn $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ lappend curs_list $dbc
+
+ puts "\tRpc001.d3: Duplicate that cursor"
+ set dbc [$dbc dup]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ lappend curs_list $dbc
+
+ puts "\tRpc001.d4: Starting a nested transaction"
+ set txn [$env txn -parent $txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+ set txn_list [linsert $txn_list 0 $txn]
+
+ puts "\tRpc001.d5: Create a cursor, no transaction"
+ set dbc [$db cursor]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ lappend curs_list $dbc
+
+ puts "\tRpc001.d6: Timeout cursor and transactions"
+ set sleeptime [expr $ttime + 2]
+ tclsleep $sleeptime
+
+ #
+ # Perform a generic db operations to cause the timeout routine
+ # to trigger.
+ #
+ set stat [catch {$db stat} ret]
+ error_check_good dbstat $stat 0
+
+ #
+ # Check that every handle we opened above is timed out
+ #
+ foreach c $curs_list {
+ set stat [catch {$c close} ret]
+ error_check_good dbc_close:$c $stat 1
+ error_check_good dbc_timeout:$c \
+ [is_substr $errorInfo "DB_NOSERVER_ID"] 1
+ }
+ foreach t $txn_list {
+ set stat [catch {$t commit} ret]
+ error_check_good txn_commit:$t $stat 1
+ error_check_good txn_timeout:$t \
+ [is_substr $errorInfo "DB_NOSERVER_ID"] 1
+ }
+
+ set txn_list {}
+ set ntxns 8
+ puts "\tRpc001.e: Nested ($ntxns x $ntxns) transaction activity test"
+ puts "\tRpc001.e1: Starting parent transaction"
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+ set txn_list [linsert $txn_list 0 $txn]
+ set last_txn $txn
+ set parent_txn $txn
+
+ #
+ # First set a breadth of 'ntxns'
+ # We need 2 from this set for testing later on. Just set them
+ # up separately first.
+ #
+ puts "\tRpc001.e2: Creating $ntxns child transactions"
+ set child0 [$env txn -parent $parent_txn]
+ error_check_good txn_begin [is_valid_txn $child0 $env] TRUE
+ set child1 [$env txn -parent $parent_txn]
+ error_check_good txn_begin [is_valid_txn $child1 $env] TRUE
+
+ for {set i 2} {$i < $ntxns} {incr i} {
+ set txn [$env txn -parent $parent_txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+ set txn_list [linsert $txn_list 0 $txn]
+ }
+
+ #
+ # Now make one 'ntxns' deeply nested.
+ # Add one more for testing later on separately.
+ #
+ puts "\tRpc001.e3: Creating $ntxns nested child transactions"
+ for {set i 0} {$i < $ntxns} {incr i} {
+ set txn [$env txn -parent $last_txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+ set txn_list [linsert $txn_list 0 $txn]
+ set last_txn $txn
+ }
+ set last_parent $last_txn
+ set last_txn [$env txn -parent $last_parent]
+ error_check_good txn_begin [is_valid_txn $last_txn $env] TRUE
+
+ puts "\tRpc001.e4: Open a cursor in deepest transaction"
+ set dbc [$db cursor -txn $last_txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ puts "\tRpc001.e5: Duplicate that cursor"
+ set dbcdup [$dbc dup]
+ error_check_good db_cursor [is_valid_cursor $dbcdup $db] TRUE
+ lappend curs_list $dbcdup
+
+ puts "\tRpc001.f: Timeout then activate duplicate cursor"
+ tclsleep $sleeptime
+ set stat [catch {$dbcdup close} ret]
+ error_check_good dup_close:$dbcdup $stat 0
+ error_check_good dup_close:$dbcdup $ret 0
+
+ #
+ # Make sure that our parent txn is not timed out. We will
+ # try to begin another child tnx using the parent. We expect
+ # that to succeed. Immediately commit that txn.
+ #
+ set stat [catch {$env txn -parent $parent_txn} newchild]
+ error_check_good newchildtxn $stat 0
+ error_check_good newcommit [$newchild commit] 0
+
+ puts "\tRpc001.g: Timeout, then activate cursor"
+ tclsleep $sleeptime
+ set stat [catch {$dbc close} ret]
+ error_check_good dbc_close:$dbc $stat 0
+ error_check_good dbc_close:$dbc $ret 0
+
+ #
+ # Make sure that our parent txn is not timed out. We will
+ # try to begin another child tnx using the parent. We expect
+ # that to succeed. Immediately commit that txn.
+ #
+ set stat [catch {$env txn -parent $parent_txn} newchild]
+ error_check_good newchildtxn $stat 0
+ error_check_good newcommit [$newchild commit] 0
+
+ puts "\tRpc001.h: Timeout, then activate child txn"
+ tclsleep $sleeptime
+ set stat [catch {$child0 commit} ret]
+ error_check_good child_commit $stat 0
+ error_check_good child_commit:$child0 $ret 0
+
+ #
+ #
+ # Make sure that our nested txn is not timed out. We will
+ # try to begin another child tnx using the parent. We expect
+ # that to succeed. Immediately commit that txn.
+ #
+ set stat [catch {$env txn -parent $last_parent} newchild]
+ error_check_good newchildtxn $stat 0
+ error_check_good newcommit [$newchild commit] 0
+
+ puts "\tRpc001.i: Timeout, then activate nested txn"
+ tclsleep $sleeptime
+ set stat [catch {$last_txn commit} ret]
+ error_check_good lasttxn_commit $stat 0
+ error_check_good lasttxn_commit:$child0 $ret 0
+
+ #
+ # Make sure that our child txn is not timed out. We should
+ # be able to commit it.
+ #
+ set stat [catch {$child1 commit} ret]
+ error_check_good child_commit:$child1 $stat 0
+ error_check_good child_commit:$child1 $ret 0
+
+ #
+ # Clean up. They were inserted in LIFO order, so we should
+ # just be able to commit them all.
+ foreach t $txn_list {
+ set stat [catch {$t commit} ret]
+ error_check_good txn_commit:$t $stat 0
+ error_check_good txn_commit:$t $ret 0
+ }
+
+ set stat [catch {$db close} ret]
+ error_check_good db_close $stat 0
+
+ rpc_timeoutjoin $env "Rpc001.j" $sleeptime 0
+ rpc_timeoutjoin $env "Rpc001.k" $sleeptime 1
+
+ #
+ # We need a 2nd env just to do an op to timeout the env.
+ #
+ set env1 [eval {berkdb env -create -mode 0644 -home $home \
+ -server $rpc_server -client_timeout 10000 -txn}]
+ error_check_good lock_env:open [is_valid_env $env1] TRUE
+
+ puts "\tRpc001.l: Timeout idle env handle"
+ set sleeptime [expr $itime + 2]
+ tclsleep $sleeptime
+
+ set stat [catch {$env1 close} ret]
+ error_check_good env1_close $stat 0
+
+ set stat [catch {$env close} ret]
+ error_check_good env_close $stat 1
+ error_check_good env_timeout \
+ [is_substr $errorInfo "DB_NOSERVER_ID"] 1
+
+ exec $KILL $dpid
+}
+
+proc rpc_timeoutjoin {env msg sleeptime use_txn} {
+ #
+ # Check join cursors now.
+ #
+ puts -nonewline "\t$msg: Test join cursors and timeouts"
+ if { $use_txn } {
+ puts " (using txns)"
+ } else {
+ puts " (without txns)"
+ }
+ #
+ # Set up a simple set of join databases
+ #
+ puts "\t${msg}0: Set up join databases"
+ set fruit {
+ {blue blueberry}
+ {red apple} {red cherry} {red raspberry}
+ {yellow lemon} {yellow pear}
+ }
+ set price {
+ {expen blueberry} {expen cherry} {expen raspberry}
+ {inexp apple} {inexp lemon} {inexp pear}
+ }
+ set dessert {
+ {blueberry cobbler} {cherry cobbler} {pear cobbler}
+ {apple pie} {raspberry pie} {lemon pie}
+ }
+ set fdb [eval {berkdb_open -create -btree -mode 0644} \
+ -env $env -dup fruit.db]
+ error_check_good dbopen [is_valid_db $fdb] TRUE
+ set pdb [eval {berkdb_open -create -btree -mode 0644} \
+ -env $env -dup price.db]
+ error_check_good dbopen [is_valid_db $pdb] TRUE
+ set ddb [eval {berkdb_open -create -btree -mode 0644} \
+ -env $env -dup dessert.db]
+ error_check_good dbopen [is_valid_db $ddb] TRUE
+ foreach kd $fruit {
+ set k [lindex $kd 0]
+ set d [lindex $kd 1]
+ set ret [$fdb put $k $d]
+ error_check_good fruit_put $ret 0
+ }
+ error_check_good sync [$fdb sync] 0
+ foreach kd $price {
+ set k [lindex $kd 0]
+ set d [lindex $kd 1]
+ set ret [$pdb put $k $d]
+ error_check_good price_put $ret 0
+ }
+ error_check_good sync [$pdb sync] 0
+ foreach kd $dessert {
+ set k [lindex $kd 0]
+ set d [lindex $kd 1]
+ set ret [$ddb put $k $d]
+ error_check_good dessert_put $ret 0
+ }
+ error_check_good sync [$ddb sync] 0
+
+ rpc_join $env $msg $sleeptime $fdb $pdb $ddb $use_txn 0
+ rpc_join $env $msg $sleeptime $fdb $pdb $ddb $use_txn 1
+
+ error_check_good ddb:close [$ddb close] 0
+ error_check_good pdb:close [$pdb close] 0
+ error_check_good fdb:close [$fdb close] 0
+}
+
+proc rpc_join {env msg sleep fdb pdb ddb use_txn op} {
+ global errorInfo
+
+ #
+ # Start a parent and child transaction. We'll do our join in
+ # the child transaction just to make sure everything gets timed
+ # out correctly.
+ #
+ set curs_list {}
+ set txn_list {}
+ set msgnum [expr $op * 2 + 1]
+ if { $use_txn } {
+ puts "\t$msg$msgnum: Set up txns and join cursor"
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+ set txn_list [linsert $txn_list 0 $txn]
+ set child0 [$env txn -parent $txn]
+ error_check_good txn_begin [is_valid_txn $child0 $env] TRUE
+ set txn_list [linsert $txn_list 0 $child0]
+ set child1 [$env txn -parent $txn]
+ error_check_good txn_begin [is_valid_txn $child1 $env] TRUE
+ set txn_list [linsert $txn_list 0 $child1]
+ set txncmd "-txn $child0"
+ } else {
+ puts "\t$msg$msgnum: Set up join cursor"
+ set txncmd ""
+ }
+
+ #
+ # Start a cursor, (using txn child0 in the fruit and price dbs, if
+ # needed). # Just pick something simple to join on.
+ # Then call join on the dessert db.
+ #
+ set fkey yellow
+ set pkey inexp
+ set fdbc [eval $fdb cursor $txncmd]
+ error_check_good fdb_cursor [is_valid_cursor $fdbc $fdb] TRUE
+ set ret [$fdbc get -set $fkey]
+ error_check_bad fget:set [llength $ret] 0
+ set k [lindex [lindex $ret 0] 0]
+ error_check_good fget:set:key $k $fkey
+ set curs_list [linsert $curs_list 0 $fdbc]
+
+ set pdbc [eval $pdb cursor $txncmd]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ set ret [$pdbc get -set $pkey]
+ error_check_bad pget:set [llength $ret] 0
+ set k [lindex [lindex $ret 0] 0]
+ error_check_good pget:set:key $k $pkey
+ set curs_list [linsert $curs_list 0 $pdbc]
+
+ set jdbc [$ddb join $fdbc $pdbc]
+ error_check_good join_cursor [is_valid_cursor $jdbc $ddb] TRUE
+ set ret [$jdbc get]
+ error_check_bad jget [llength $ret] 0
+
+ set msgnum [expr $op * 2 + 2]
+ if { $op == 1 } {
+ puts -nonewline "\t$msg$msgnum: Timeout all cursors"
+ if { $use_txn } {
+ puts " and txns"
+ } else {
+ puts ""
+ }
+ } else {
+ puts "\t$msg$msgnum: Timeout, then activate join cursor"
+ }
+
+ tclsleep $sleep
+
+ if { $op == 1 } {
+ #
+ # Perform a generic db operations to cause the timeout routine
+ # to trigger.
+ #
+ set stat [catch {$fdb stat} ret]
+ error_check_good fdbstat $stat 0
+
+ #
+ # Check that join cursor is timed out.
+ #
+ set stat [catch {$jdbc close} ret]
+ error_check_good dbc_close:$jdbc $stat 1
+ error_check_good dbc_timeout:$jdbc \
+ [is_substr $errorInfo "DB_NOSERVER_ID"] 1
+
+ #
+ # Now the server may or may not timeout constituent
+ # cursors when it times out the join cursor. So, just
+ # sleep again and then they should timeout.
+ #
+ tclsleep $sleep
+ set stat [catch {$fdb stat} ret]
+ error_check_good fdbstat $stat 0
+
+ foreach c $curs_list {
+ set stat [catch {$c close} ret]
+ error_check_good dbc_close:$c $stat 1
+ error_check_good dbc_timeout:$c \
+ [is_substr $errorInfo "DB_NOSERVER_ID"] 1
+ }
+
+ foreach t $txn_list {
+ set stat [catch {$t commit} ret]
+ error_check_good txn_commit:$t $stat 1
+ error_check_good txn_timeout:$t \
+ [is_substr $errorInfo "DB_NOSERVER_ID"] 1
+ }
+ } else {
+ set stat [catch {$jdbc get} ret]
+ error_check_good jget.stat $stat 0
+ error_check_bad jget [llength $ret] 0
+ set curs_list [linsert $curs_list 0 $jdbc]
+ foreach c $curs_list {
+ set stat [catch {$c close} ret]
+ error_check_good dbc_close:$c $stat 0
+ error_check_good dbc_close:$c $ret 0
+ }
+
+ foreach t $txn_list {
+ set stat [catch {$t commit} ret]
+ error_check_good txn_commit:$t $stat 0
+ error_check_good txn_commit:$t $ret 0
+ }
+ }
+}
diff --git a/bdb/test/rpc002.tcl b/bdb/test/rpc002.tcl
new file mode 100644
index 00000000000..6b11914c2eb
--- /dev/null
+++ b/bdb/test/rpc002.tcl
@@ -0,0 +1,144 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rpc002.tcl,v 1.7 2000/10/27 13:23:56 sue Exp $
+#
+# RPC Test 2
+# Test invalid RPC functions and make sure we error them correctly
+proc rpc002 { } {
+ global __debug_on
+ global __debug_print
+ global errorInfo
+ source ./include.tcl
+
+ set testfile "rpc002.db"
+ set home [file tail $rpc_testdir]
+ #
+ # First start the server.
+ #
+ puts "Rpc002: Unsupported interface test"
+ if { [string compare $rpc_server "localhost"] == 0 } {
+ set dpid [exec $util_path/berkeley_db_svc -h $rpc_testdir &]
+ } else {
+ set dpid [exec rsh $rpc_server $rpc_path/berkeley_db_svc \
+ -h $rpc_testdir &]
+ }
+ puts "\tRpc002.a: Started server, pid $dpid"
+ tclsleep 2
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+
+ puts "\tRpc002.b: Unsupported env options"
+ #
+ # Test each "pre-open" option for env's. These need to be
+ # tested on the 'berkdb env' line.
+ #
+ set rlist {
+ { "-data_dir $rpc_testdir" "Rpc002.b0"}
+ { "-log_buffer 512" "Rpc002.b1"}
+ { "-log_dir $rpc_testdir" "Rpc002.b2"}
+ { "-log_max 100" "Rpc002.b3"}
+ { "-lock_conflict {3 {0 0 0 0 0 1 0 1 1}}" "Rpc002.b4"}
+ { "-lock_detect default" "Rpc002.b5"}
+ { "-lock_max 100" "Rpc002.b6"}
+ { "-mmapsize 100" "Rpc002.b7"}
+ { "-shm_key 100" "Rpc002.b9"}
+ { "-tmp_dir $rpc_testdir" "Rpc002.b10"}
+ { "-txn_max 100" "Rpc002.b11"}
+ { "-txn_timestamp 100" "Rpc002.b12"}
+ { "-verbose {recovery on}" "Rpc002.b13"}
+ }
+
+ set e "berkdb env -create -mode 0644 -home $home -server $rpc_server \
+ -client_timeout 10000 -txn"
+ foreach pair $rlist {
+ set cmd [lindex $pair 0]
+ set msg [lindex $pair 1]
+ puts "\t$msg: $cmd"
+
+ set stat [catch {eval $e $cmd} ret]
+ error_check_good $cmd $stat 1
+ error_check_good $cmd.err \
+ [is_substr $errorInfo "meaningless in RPC env"] 1
+ }
+
+ #
+ # Open an env with all the subsystems (-txn implies all
+ # the rest)
+ #
+ puts "\tRpc002.c: Unsupported env related interfaces"
+ set env [eval {berkdb env -create -mode 0644 -home $home \
+ -server $rpc_server -client_timeout 10000 -txn}]
+ error_check_good envopen [is_valid_env $env] TRUE
+ set dbcmd "berkdb_open_noerr -create -btree -mode 0644 -env $env \
+ $testfile"
+ set db [eval $dbcmd]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ #
+ # Test each "post-open" option relating to envs, txns, locks,
+ # logs and mpools.
+ #
+ set rlist {
+ { " lock_detect default" "Rpc002.c0"}
+ { " lock_get read 1 $env" "Rpc002.c1"}
+ { " lock_id" "Rpc002.c2"}
+ { " lock_stat" "Rpc002.c3"}
+ { " lock_vec 1 {get $env read}" "Rpc002.c4"}
+ { " log_archive" "Rpc002.c5"}
+ { " log_file {0 0}" "Rpc002.c6"}
+ { " log_flush" "Rpc002.c7"}
+ { " log_get -current" "Rpc002.c8"}
+ { " log_register $db $testfile" "Rpc002.c9"}
+ { " log_stat" "Rpc002.c10"}
+ { " log_unregister $db" "Rpc002.c11"}
+ { " mpool -create -pagesize 512" "Rpc002.c12"}
+ { " mpool_stat" "Rpc002.c13"}
+ { " mpool_sync {0 0}" "Rpc002.c14"}
+ { " mpool_trickle 50" "Rpc002.c15"}
+ { " txn_checkpoint -min 1" "Rpc002.c16"}
+ { " txn_stat" "Rpc002.c17"}
+ }
+
+ foreach pair $rlist {
+ set cmd [lindex $pair 0]
+ set msg [lindex $pair 1]
+ puts "\t$msg: $cmd"
+
+ set stat [catch {eval $env $cmd} ret]
+ error_check_good $cmd $stat 1
+ error_check_good $cmd.err \
+ [is_substr $errorInfo "meaningless in RPC env"] 1
+ }
+ error_check_good dbclose [$db close] 0
+
+ #
+ # The database operations that aren't supported are few
+ # because mostly they are the ones Tcl doesn't support
+ # either so we have no way to get at them. Test what we can.
+ #
+ puts "\tRpc002.d: Unsupported database related interfaces"
+ #
+ # NOTE: the type of database doesn't matter, just use btree.
+ #
+ puts "\tRpc002.d0: -cachesize"
+ set dbcmd "berkdb_open_noerr -create -btree -mode 0644 -env $env \
+ -cachesize {0 65536 0} $testfile"
+ set stat [catch {eval $dbcmd} ret]
+ error_check_good dbopen_cache $stat 1
+ error_check_good dbopen_cache_err \
+ [is_substr $errorInfo "meaningless in RPC env"] 1
+
+ puts "\tRpc002.d1: Try to upgrade a database"
+ #
+ # NOTE: the type of database doesn't matter, just use btree.
+ set stat [catch {eval {berkdb upgrade -env} $env $testfile} ret]
+ error_check_good dbupgrade $stat 1
+ error_check_good dbupgrade_err \
+ [is_substr $errorInfo "meaningless in RPC env"] 1
+
+ error_check_good envclose [$env close] 0
+
+ exec $KILL $dpid
+}
diff --git a/bdb/test/rsrc001.tcl b/bdb/test/rsrc001.tcl
new file mode 100644
index 00000000000..6d76044f454
--- /dev/null
+++ b/bdb/test/rsrc001.tcl
@@ -0,0 +1,223 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rsrc001.tcl,v 11.18 2001/01/18 06:41:03 krinsky Exp $
+#
+# Recno backing file test.
+# Try different patterns of adding records and making sure that the
+# corresponding file matches
+proc rsrc001 { } {
+ source ./include.tcl
+
+ puts "Rsrc001: Basic recno backing file writeback tests"
+
+ # We run this test essentially twice, once with a db file
+ # and once without (an in-memory database).
+ set rec1 "This is record 1"
+ set rec2 "This is record 2 This is record 2"
+ set rec3 "This is record 3 This is record 3 This is record 3"
+ set rec4 [replicate "This is record 4 " 512]
+
+ foreach testfile { "$testdir/rsrc001.db" "" } {
+
+ cleanup $testdir NULL
+
+ if { $testfile == "" } {
+ puts "Rsrc001: Testing with in-memory database."
+ } else {
+ puts "Rsrc001: Testing with disk-backed database."
+ }
+
+ # Create backing file for the empty-file test.
+ set oid1 [open $testdir/rsrc.txt w]
+ close $oid1
+
+ puts "\tRsrc001.a: Put to empty file."
+ set db [eval {berkdb_open -create -mode 0644\
+ -recno -source $testdir/rsrc.txt} $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set txn ""
+
+ set ret [eval {$db put} $txn {1 $rec1}]
+ error_check_good put_to_empty $ret 0
+ error_check_good db_close [$db close] 0
+
+ # Now fill out the backing file and create the check file.
+ set oid1 [open $testdir/rsrc.txt a]
+ set oid2 [open $testdir/check.txt w]
+
+ # This one was already put into rsrc.txt.
+ puts $oid2 $rec1
+
+ # These weren't.
+ puts $oid1 $rec2
+ puts $oid2 $rec2
+ puts $oid1 $rec3
+ puts $oid2 $rec3
+ puts $oid1 $rec4
+ puts $oid2 $rec4
+ close $oid1
+ close $oid2
+
+ puts -nonewline "\tRsrc001.b: Read file, rewrite last record;"
+ puts " write it out and diff"
+ set db [eval {berkdb_open -create -mode 0644\
+ -recno -source $testdir/rsrc.txt} $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Read the last record; replace it (but we won't change it).
+ # Then close the file and diff the two files.
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ set rec [$dbc get -last]
+ error_check_good get_last [llength [lindex $rec 0]] 2
+ set key [lindex [lindex $rec 0] 0]
+ set data [lindex [lindex $rec 0] 1]
+
+ # Get the last record from the text file
+ set oid [open $testdir/rsrc.txt]
+ set laststr ""
+ while { [gets $oid str] != -1 } {
+ set laststr $str
+ }
+ close $oid
+ set data [sanitize_record $data]
+ error_check_good getlast $data $laststr
+
+ set ret [eval {$db put} $txn {$key $data}]
+ error_check_good replace_last $ret 0
+
+ error_check_good curs_close [$dbc close] 0
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_sync [$db sync] 0
+ error_check_good \
+ Rsrc001:diff($testdir/rsrc.txt,$testdir/check.txt) \
+ [filecmp $testdir/rsrc.txt $testdir/check.txt] 0
+
+ puts -nonewline "\tRsrc001.c: "
+ puts "Append some records in tree and verify in file."
+ set oid [open $testdir/check.txt a]
+ for {set i 1} {$i < 10} {incr i} {
+ set rec [replicate "New Record $i" $i]
+ puts $oid $rec
+ incr key
+ set ret [eval {$db put} $txn {-append $rec}]
+ error_check_good put_append $ret $key
+ }
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_sync [$db sync] 0
+ close $oid
+ set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
+ error_check_good \
+ Rsrc001:diff($testdir/{rsrc.txt,check.txt}) $ret 0
+
+ puts "\tRsrc001.d: Append by record number"
+ set oid [open $testdir/check.txt a]
+ for {set i 1} {$i < 10} {incr i} {
+ set rec [replicate "New Record (set 2) $i" $i]
+ puts $oid $rec
+ incr key
+ set ret [eval {$db put} $txn {$key $rec}]
+ error_check_good put_byno $ret 0
+ }
+
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_sync [$db sync] 0
+ close $oid
+ set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
+ error_check_good \
+ Rsrc001:diff($testdir/{rsrc.txt,check.txt}) $ret 0
+
+ puts "\tRsrc001.e: Put beyond end of file."
+ set oid [open $testdir/check.txt a]
+ for {set i 1} {$i < 10} {incr i} {
+ puts $oid ""
+ incr key
+ }
+ set rec "Last Record"
+ puts $oid $rec
+ incr key
+
+ set ret [eval {$db put} $txn {$key $rec}]
+ error_check_good put_byno $ret 0
+
+ puts "\tRsrc001.f: Put beyond end of file, after reopen."
+
+ error_check_good db_close [$db close] 0
+ set db [eval {berkdb_open -create -mode 0644\
+ -recno -source $testdir/rsrc.txt} $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set rec "Last record with reopen"
+ puts $oid $rec
+
+ incr key
+ set ret [eval {$db put} $txn {$key $rec}]
+ error_check_good put_byno_with_reopen $ret 0
+
+ puts "\tRsrc001.g:\
+ Put several beyond end of file, after reopen."
+ error_check_good db_close [$db close] 0
+ set db [eval {berkdb_open -create -mode 0644\
+ -recno -source $testdir/rsrc.txt} $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set rec "Really really last record with reopen"
+ puts $oid ""
+ puts $oid ""
+ puts $oid ""
+ puts $oid $rec
+
+ incr key
+ incr key
+ incr key
+ incr key
+
+ set ret [eval {$db put} $txn {$key $rec}]
+ error_check_good put_byno_with_reopen $ret 0
+
+
+
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_sync [$db sync] 0
+
+ close $oid
+ set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
+ error_check_good \
+ Rsrc001:diff($testdir/{rsrc.txt,check.txt}) $ret 0
+
+ puts "\tRsrc001.h: Verify proper syncing of changes on close."
+ error_check_good Rsrc001:db_close [$db close] 0
+ set db [eval {berkdb_open -create -mode 0644 -recno \
+ -source $testdir/rsrc.txt} $testfile]
+ set oid [open $testdir/check.txt a]
+ for {set i 1} {$i < 10} {incr i} {
+ set rec [replicate "New Record $i" $i]
+ puts $oid $rec
+ set ret [eval {$db put} $txn {-append $rec}]
+ # Don't bother checking return; we don't know what
+ # the key number is, and we'll pick up a failure
+ # when we compare.
+ }
+ error_check_good Rsrc001:db_close [$db close] 0
+ close $oid
+ set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
+ error_check_good Rsrc001:diff($testdir/{rsrc,check}.txt) $ret 0
+ }
+}
+
+# Strip CRs from a record.
+# Needed on Windows when a file is created as text (with CR/LF)
+# but read as binary (where CR is read as a separate character)
+proc sanitize_record { rec } {
+ source ./include.tcl
+
+ if { $is_windows_test != 1 } {
+ return $rec
+ }
+ regsub -all \15 $rec "" data
+ return $data
+}
diff --git a/bdb/test/rsrc002.tcl b/bdb/test/rsrc002.tcl
new file mode 100644
index 00000000000..d3b45c9a7f3
--- /dev/null
+++ b/bdb/test/rsrc002.tcl
@@ -0,0 +1,65 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rsrc002.tcl,v 11.11 2000/11/29 15:01:06 sue Exp $
+#
+# Recno backing file test #2: test of set_re_delim.
+# Specify a backing file with colon-delimited records,
+# and make sure they are correctly interpreted.
+proc rsrc002 { } {
+ source ./include.tcl
+
+ puts "Rsrc002: Alternate variable-length record delimiters."
+
+ # We run this test essentially twice, once with a db file
+ # and once without (an in-memory database).
+ foreach testfile { "$testdir/rsrc002.db" "" } {
+
+ cleanup $testdir NULL
+
+ # Create the starting files
+ set oid1 [open $testdir/rsrc.txt w]
+ set oid2 [open $testdir/check.txt w]
+ puts -nonewline $oid1 "ostrich:emu:kiwi:moa:cassowary:rhea:"
+ puts -nonewline $oid2 "ostrich:emu:kiwi:penguin:cassowary:rhea:"
+ close $oid1
+ close $oid2
+
+ if { $testfile == "" } {
+ puts "Rsrc002: Testing with in-memory database."
+ } else {
+ puts "Rsrc002: Testing with disk-backed database."
+ }
+
+ puts "\tRsrc002.a: Read file, verify correctness."
+ set db [eval {berkdb_open -create -mode 0644 -delim 58 \
+ -recno -source $testdir/rsrc.txt} $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Read the last record; replace it (but we won't change it).
+ # Then close the file and diff the two files.
+ set txn ""
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ set rec [$dbc get -first]
+ error_check_good get_first $rec [list [list 1 "ostrich"]]
+ set rec [$dbc get -next]
+ error_check_good get_next $rec [list [list 2 "emu"]]
+
+ puts "\tRsrc002.b: Write record, verify correctness."
+
+ eval {$dbc get -set 4}
+ set ret [$dbc put -current "penguin"]
+ error_check_good dbc_put $ret 0
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+
+ error_check_good \
+ Rsrc002:diff($testdir/rsrc.txt,$testdir/check.txt) \
+ [filecmp $testdir/rsrc.txt $testdir/check.txt] 0
+ }
+}
diff --git a/bdb/test/rsrc003.tcl b/bdb/test/rsrc003.tcl
new file mode 100644
index 00000000000..c93b3bbde12
--- /dev/null
+++ b/bdb/test/rsrc003.tcl
@@ -0,0 +1,174 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rsrc003.tcl,v 11.1 2000/11/29 18:28:49 sue Exp $
+#
+# Recno backing file test.
+# Try different patterns of adding records and making sure that the
+# corresponding file matches
+proc rsrc003 { } {
+ source ./include.tcl
+ global fixed_len
+
+ puts "Rsrc003: Basic recno backing file writeback tests fixed length"
+
+ # We run this test essentially twice, once with a db file
+ # and once without (an in-memory database).
+ #
+ # Then run with big fixed-length records
+ set rec1 "This is record 1"
+ set rec2 "This is record 2"
+ set rec3 "This is record 3"
+ set bigrec1 [replicate "This is record 1 " 512]
+ set bigrec2 [replicate "This is record 2 " 512]
+ set bigrec3 [replicate "This is record 3 " 512]
+
+ set orig_fixed_len $fixed_len
+ set rlist {
+ {{$rec1 $rec2 $rec3} "small records" }
+ {{$bigrec1 $bigrec2 $bigrec3} "large records" }}
+
+ foreach testfile { "$testdir/rsrc003.db" "" } {
+
+ foreach rec $rlist {
+ cleanup $testdir NULL
+
+ set recs [lindex $rec 0]
+ set msg [lindex $rec 1]
+ # Create the starting files
+ # Note that for the rest of the test, we are going
+ # to append a LF when we 'put' via DB to maintain
+ # file structure and allow us to use 'gets'.
+ set oid1 [open $testdir/rsrc.txt w]
+ set oid2 [open $testdir/check.txt w]
+ foreach record $recs {
+ set r [subst $record]
+ set fixed_len [string length $r]
+ puts $oid1 $r
+ puts $oid2 $r
+ }
+ close $oid1
+ close $oid2
+
+ set reclen [expr $fixed_len + 1]
+ if { $reclen > [string length $rec1] } {
+ set repl 512
+ } else {
+ set repl 2
+ }
+ if { $testfile == "" } {
+ puts \
+"Rsrc003: Testing with in-memory database with $msg."
+ } else {
+ puts \
+"Rsrc003: Testing with disk-backed database with $msg."
+ }
+
+ puts -nonewline \
+ "\tRsrc003.a: Read file, rewrite last record;"
+ puts " write it out and diff"
+ set db [eval {berkdb_open -create -mode 0644 -recno \
+ -len $reclen -source $testdir/rsrc.txt} $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Read the last record; replace it (don't change it).
+ # Then close the file and diff the two files.
+ set txn ""
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor \
+ [is_valid_cursor $dbc $db] TRUE
+
+ set rec [$dbc get -last]
+ error_check_good get_last [llength [lindex $rec 0]] 2
+ set key [lindex [lindex $rec 0] 0]
+ set data [lindex [lindex $rec 0] 1]
+
+ # Get the last record from the text file
+ set oid [open $testdir/rsrc.txt]
+ set laststr ""
+ while { [gets $oid str] != -1 } {
+ append str \12
+ set laststr $str
+ }
+ close $oid
+ set data [sanitize_record $data]
+ error_check_good getlast $data $laststr
+
+ set ret [eval {$db put} $txn {$key $data}]
+ error_check_good replace_last $ret 0
+
+ error_check_good curs_close [$dbc close] 0
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_sync [$db sync] 0
+ error_check_good \
+ diff1($testdir/rsrc.txt,$testdir/check.txt) \
+ [filecmp $testdir/rsrc.txt $testdir/check.txt] 0
+
+ puts -nonewline "\tRsrc003.b: "
+ puts "Append some records in tree and verify in file."
+ set oid [open $testdir/check.txt a]
+ for {set i 1} {$i < 10} {incr i} {
+ set rec [chop_data -frecno [replicate \
+ "This is New Record $i" $repl]]
+ puts $oid $rec
+ append rec \12
+ incr key
+ set ret [eval {$db put} $txn {-append $rec}]
+ error_check_good put_append $ret $key
+ }
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_sync [$db sync] 0
+ close $oid
+ set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
+ error_check_good \
+ diff2($testdir/{rsrc.txt,check.txt}) $ret 0
+
+ puts "\tRsrc003.c: Append by record number"
+ set oid [open $testdir/check.txt a]
+ for {set i 1} {$i < 10} {incr i} {
+ set rec [chop_data -frecno [replicate \
+ "New Record (set 2) $i" $repl]]
+ puts $oid $rec
+ append rec \12
+ incr key
+ set ret [eval {$db put} $txn {$key $rec}]
+ error_check_good put_byno $ret 0
+ }
+
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_sync [$db sync] 0
+ close $oid
+ set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
+ error_check_good \
+ diff3($testdir/{rsrc.txt,check.txt}) $ret 0
+
+ puts \
+"\tRsrc003.d: Verify proper syncing of changes on close."
+ error_check_good Rsrc003:db_close [$db close] 0
+ set db [eval {berkdb_open -create -mode 0644 -recno \
+ -len $reclen -source $testdir/rsrc.txt} $testfile]
+ set oid [open $testdir/check.txt a]
+ for {set i 1} {$i < 10} {incr i} {
+ set rec [chop_data -frecno [replicate \
+ "New Record (set 3) $i" $repl]]
+ puts $oid $rec
+ append rec \12
+ set ret [eval {$db put} $txn {-append $rec}]
+ # Don't bother checking return;
+ # we don't know what
+ # the key number is, and we'll pick up a failure
+ # when we compare.
+ }
+ error_check_good Rsrc003:db_close [$db close] 0
+ close $oid
+ set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
+ error_check_good \
+ diff5($testdir/{rsrc,check}.txt) $ret 0
+ }
+ }
+ set fixed_len $orig_fixed_len
+ return
+}
+
diff --git a/bdb/test/sdb001.tcl b/bdb/test/sdb001.tcl
new file mode 100644
index 00000000000..938b6c10c6d
--- /dev/null
+++ b/bdb/test/sdb001.tcl
@@ -0,0 +1,123 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdb001.tcl,v 11.12 2000/08/25 14:21:52 sue Exp $
+#
+# Sub DB Test 1 {access method}
+# Test non-subdb and subdb operations
+# Test naming (filenames begin with -)
+# Test existence (cannot create subdb of same name with -excl)
+proc subdb001 { method args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Subdb001: $method ($args) subdb and non-subdb tests"
+
+ # Create the database and open the dictionary
+ set testfile $testdir/subdb001.db
+ set subdb subdb0
+ cleanup $testdir NULL
+ puts "\tSubdb001.a: Non-subdb database and subdb operations"
+ #
+ # Create a db with no subdbs. Add some data. Close. Try to
+ # open/add with a subdb. Should fail.
+ #
+ puts "\tSubdb001.a.0: Create db, add data, close, try subdb"
+ set db [eval {berkdb_open -create -truncate -mode 0644} \
+ $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+ while { [gets $did str] != -1 && $count < 5 } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) $str
+ } else {
+ set key $str
+ }
+ set ret [eval \
+ {$db put} $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+ incr count
+ }
+ close $did
+ error_check_good db_close [$db close] 0
+ set ret [catch {eval {berkdb_open_noerr -create -mode 0644} $args \
+ {$omethod $testfile $subdb}} db]
+ error_check_bad dbopen $ret 0
+ #
+ # Create a db with no subdbs. Add no data. Close. Try to
+ # open/add with a subdb. Should fail.
+ #
+ set testfile $testdir/subdb001a.db
+ puts "\tSubdb001.a.1: Create db, close, try subdb"
+ set db [eval {berkdb_open -create -truncate -mode 0644} $args \
+ {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ set ret [catch {eval {berkdb_open_noerr -create -mode 0644} $args \
+ {$omethod $testfile $subdb}} db]
+ error_check_bad dbopen $ret 0
+
+ if { [is_queue $method] == 1 } {
+ puts "Subdb001: skipping remainder of test for method $method"
+ return
+ }
+
+ #
+ # Test naming, db and subdb names beginning with -.
+ #
+ puts "\tSubdb001.b: Naming"
+ set cwd [pwd]
+ cd $testdir
+ set testfile1 -subdb001.db
+ set subdb -subdb
+ puts "\tSubdb001.b.0: Create db and subdb with -name, no --"
+ set ret [catch {eval {berkdb_open -create -mode 0644} $args \
+ {$omethod $testfile1 $subdb}} db]
+ error_check_bad dbopen $ret 0
+ puts "\tSubdb001.b.1: Create db and subdb with -name, with --"
+ set db [eval {berkdb_open -create -mode 0644} $args \
+ {$omethod -- $testfile1 $subdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ cd $cwd
+
+ #
+ # Create 1 db with 1 subdb. Try to create another subdb of
+ # the same name. Should fail.
+ #
+ puts "\tSubdb001.c: Existence check"
+ set testfile $testdir/subdb001c.db
+ set subdb subdb
+ set ret [catch {eval {berkdb_open -create -excl -mode 0644} $args \
+ {$omethod $testfile $subdb}} db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [catch {eval {berkdb_open_noerr -create -excl -mode 0644} \
+ $args {$omethod $testfile $subdb}} db1]
+ error_check_bad dbopen $ret 0
+ error_check_good db_close [$db close] 0
+
+ return
+}
diff --git a/bdb/test/sdb002.tcl b/bdb/test/sdb002.tcl
new file mode 100644
index 00000000000..11547195c02
--- /dev/null
+++ b/bdb/test/sdb002.tcl
@@ -0,0 +1,167 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdb002.tcl,v 11.20 2000/09/20 13:22:04 sue Exp $
+#
+# Sub DB Test 2 {access method}
+# Use the first 10,000 entries from the dictionary.
+# Insert each with self as key and data; retrieve each.
+# After all are entered, retrieve all; compare output to original.
+# Close file, reopen, do retrieve and re-verify.
+# Then repeat using an environment.
+proc subdb002 { method {nentries 10000} args } {
+ source ./include.tcl
+
+ set largs [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ env_cleanup $testdir
+
+ puts "Subdb002: $method ($largs) basic subdb tests"
+ set testfile $testdir/subdb002.db
+ subdb002_body $method $omethod $nentries $largs $testfile NULL
+
+ cleanup $testdir NULL
+ set env [berkdb env -create -mode 0644 -txn -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+ puts "Subdb002: $method ($largs) basic subdb tests in an environment"
+
+ # We're in an env--use default path to database rather than specifying
+ # it explicitly.
+ set testfile subdb002.db
+ subdb002_body $method $omethod $nentries $largs $testfile $env
+ error_check_good env_close [$env close] 0
+}
+
+proc subdb002_body { method omethod nentries largs testfile env } {
+ source ./include.tcl
+
+ # Create the database and open the dictionary
+ set subdb subdb0
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+
+ if { [is_queue $omethod] == 1 } {
+ set sdb002_open berkdb_open_noerr
+ } else {
+ set sdb002_open berkdb_open
+ }
+
+ if { $env == "NULL" } {
+ set ret [catch {eval {$sdb002_open -create -mode 0644} $largs \
+ {$omethod $testfile $subdb}} db]
+ } else {
+ set ret [catch {eval {$sdb002_open -create -mode 0644} $largs \
+ {-env $env $omethod $testfile $subdb}} db]
+ }
+
+ #
+ # If -queue method, we need to make sure that trying to
+ # create a subdb fails.
+ if { [is_queue $method] == 1 } {
+ error_check_bad dbopen $ret 0
+ puts "Subdb002: skipping remainder of test for method $method"
+ return
+ }
+
+ error_check_good dbopen $ret 0
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc subdb002_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc subdb002.check
+ }
+ puts "\tSubdb002.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ }
+ set ret [eval \
+ {$db put} $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+ incr count
+ }
+ close $did
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tSubdb002.b: dump file"
+ dump_file $db $txn $t1 $checkfunc
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdb002:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tSubdb002.c: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ open_and_dump_subfile $testfile $env $txn $t1 $checkfunc \
+ dump_file_direction "-first" "-next" $subdb
+ if { [is_record_based $method] != 1 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdb002:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tSubdb002.d: close, open, and dump file in reverse direction"
+ open_and_dump_subfile $testfile $env $txn $t1 $checkfunc \
+ dump_file_direction "-last" "-prev" $subdb
+
+ if { [is_record_based $method] != 1 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdb002:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+}
+
+# Check function for Subdb002; keys and data are identical
+proc subdb002.check { key data } {
+ error_check_good "key/data mismatch" $data $key
+}
+
+proc subdb002_recno.check { key data } {
+ global dict
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
diff --git a/bdb/test/sdb003.tcl b/bdb/test/sdb003.tcl
new file mode 100644
index 00000000000..32bb93d5236
--- /dev/null
+++ b/bdb/test/sdb003.tcl
@@ -0,0 +1,137 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdb003.tcl,v 11.17 2000/08/25 14:21:52 sue Exp $
+#
+# Sub DB Test 3 {access method}
+# Use the first 10,000 entries from the dictionary as subdbnames.
+# Insert each with entry as name of subdatabase and a partial list as key/data.
+# After all are entered, retrieve all; compare output to original.
+# Close file, reopen, do retrieve and re-verify.
+proc subdb003 { method {nentries 1000} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 } {
+ puts "Subdb003: skipping for method $method"
+ return
+ }
+
+ puts "Subdb003: $method ($args) many subdb tests"
+
+ # Create the database and open the dictionary
+ set testfile $testdir/subdb003.db
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir NULL
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set fcount 0
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc subdb003_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc subdb003.check
+ }
+
+ # Here is the loop where we put and get each key/data pair
+ set ndataent 10
+ set fdid [open $dict]
+ while { [gets $fdid str] != -1 && $fcount < $nentries } {
+ set subdb $str
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile $subdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set count 0
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $ndataent } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get $ret [list [list $key [pad_data $method $str]]]
+ incr count
+ }
+ close $did
+ incr fcount
+
+ dump_file $db $txn $t1 $checkfunc
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $ndataent} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set q q
+ filehead $ndataent $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdb003:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again.
+ open_and_dump_subfile $testfile NULL $txn $t1 $checkfunc \
+ dump_file_direction "-first" "-next" $subdb
+ if { [is_record_based $method] != 1 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdb003:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ open_and_dump_subfile $testfile NULL $txn $t1 $checkfunc \
+ dump_file_direction "-last" "-prev" $subdb
+
+ if { [is_record_based $method] != 1 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdb003:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+ if { [expr $fcount % 100] == 0 } {
+ puts -nonewline "$fcount "
+ flush stdout
+ }
+ }
+ puts ""
+}
+
+# Check function for Subdb003; keys and data are identical
+proc subdb003.check { key data } {
+ error_check_good "key/data mismatch" $data $key
+}
+
+proc subdb003_recno.check { key data } {
+ global dict
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
diff --git a/bdb/test/sdb004.tcl b/bdb/test/sdb004.tcl
new file mode 100644
index 00000000000..fb63f9d6d1d
--- /dev/null
+++ b/bdb/test/sdb004.tcl
@@ -0,0 +1,179 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdb004.tcl,v 11.14 2000/08/25 14:21:53 sue Exp $
+#
+# SubDB Test 4 {access method}
+# Create 1 db with many large subdbs. Use the contents as subdb names.
+# Take the source files and dbtest executable and enter their names as the
+# key with their contents as data. After all are entered, retrieve all;
+# compare output to original. Close file, reopen, do retrieve and re-verify.
+proc subdb004 { method args} {
+ global names
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 || [is_fixed_length $method] == 1 } {
+ puts "Subdb004: skipping for method $method"
+ return
+ }
+
+ puts "Subdb004: $method ($args) \
+ filecontents=subdbname filename=key filecontents=data pairs"
+
+ # Create the database and open the dictionary
+ set testfile $testdir/subdb004.db
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+
+ cleanup $testdir NULL
+ set pflags ""
+ set gflags ""
+ set txn ""
+ if { [is_record_based $method] == 1 } {
+ set checkfunc subdb004_recno.check
+ append gflags "-recno"
+ } else {
+ set checkfunc subdb004.check
+ }
+
+ # Here is the loop where we put and get each key/data pair
+ set file_list [glob ../*/*.c ./libdb.so.3.0 ./libtool ./libtool.exe]
+ set fcount [llength $file_list]
+
+ set count 0
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $fcount} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ } else {
+ set oid [open $t2.tmp w]
+ foreach f $file_list {
+ puts $oid $f
+ }
+ close $oid
+ filesort $t2.tmp $t2
+ }
+ puts "\tSubdb004.a: Set/Check each subdb"
+ foreach f $file_list {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set names([expr $count + 1]) $f
+ } else {
+ set key $f
+ }
+ # Should really catch errors
+ set fid [open $f r]
+ fconfigure $fid -translation binary
+ set data [read $fid]
+ set subdb $data
+ close $fid
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile $subdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [eval \
+ {$db put} $txn $pflags {$key [chop_data $method $data]}]
+ error_check_good put $ret 0
+
+ # Should really catch errors
+ set fid [open $t4 w]
+ fconfigure $fid -translation binary
+ if [catch {eval {$db get} $gflags {$key}} data] {
+ puts -nonewline $fid $data
+ } else {
+ # Data looks like {{key data}}
+ set key [lindex [lindex $data 0] 0]
+ set data [lindex [lindex $data 0] 1]
+ puts -nonewline $fid $data
+ }
+ close $fid
+
+ error_check_good Subdb004:diff($f,$t4) \
+ [filecmp $f $t4] 0
+
+ incr count
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ # puts "\tSubdb004.b: dump file"
+ dump_bin_file $db $txn $t1 $checkfunc
+ error_check_good db_close [$db close] 0
+
+ }
+
+ #
+ # Now for each file, check that the subdb name is the same
+ # as the data in that subdb and that the filename is the key.
+ #
+ puts "\tSubdb004.b: Compare subdb names with key/data"
+ set db [berkdb_open -rdonly $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set c [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $c $db] TRUE
+
+ for {set d [$c get -first] } { [llength $d] != 0 } \
+ {set d [$c get -next] } {
+ set subdbname [lindex [lindex $d 0] 0]
+ set subdb [berkdb_open $testfile $subdbname]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Output the subdb name
+ set ofid [open $t3 w]
+ fconfigure $ofid -translation binary
+ set subdbname [string trimright $subdbname \0]
+ puts -nonewline $ofid $subdbname
+ close $ofid
+
+ # Output the data
+ set subc [eval {$subdb cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $subc $subdb] TRUE
+ set d [$subc get -first]
+ error_check_good dbc_get [expr [llength $d] != 0] 1
+ set key [lindex [lindex $d 0] 0]
+ set data [lindex [lindex $d 0] 1]
+
+ set ofid [open $t1 w]
+ fconfigure $ofid -translation binary
+ puts -nonewline $ofid $data
+ close $ofid
+
+ $checkfunc $key $t1
+ $checkfunc $key $t3
+
+ error_check_good Subdb004:diff($t3,$t1) \
+ [filecmp $t3 $t1] 0
+ error_check_good curs_close [$subc close] 0
+ error_check_good db_close [$subdb close] 0
+ }
+ error_check_good curs_close [$c close] 0
+ error_check_good db_close [$db close] 0
+
+ if { [is_record_based $method] != 1 } {
+ fileremove $t2.tmp
+ }
+}
+
+# Check function for subdb004; key should be file name; data should be contents
+proc subdb004.check { binfile tmpfile } {
+ source ./include.tcl
+
+ error_check_good Subdb004:datamismatch($binfile,$tmpfile) \
+ [filecmp $binfile $tmpfile] 0
+}
+proc subdb004_recno.check { binfile tmpfile } {
+ global names
+ source ./include.tcl
+
+ set fname $names($binfile)
+ error_check_good key"$binfile"_exists [info exists names($binfile)] 1
+ error_check_good Subdb004:datamismatch($fname,$tmpfile) \
+ [filecmp $fname $tmpfile] 0
+}
diff --git a/bdb/test/sdb005.tcl b/bdb/test/sdb005.tcl
new file mode 100644
index 00000000000..22e4083c46c
--- /dev/null
+++ b/bdb/test/sdb005.tcl
@@ -0,0 +1,109 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdb005.tcl,v 11.12 2000/08/25 14:21:53 sue Exp $
+#
+# Test cursor operations between subdbs.
+#
+# We should test this on all btrees, all hash, and a combination thereof
+proc subdb005 {method {nentries 100} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 } {
+ puts "Subdb005: skipping for method $method"
+ return
+ }
+
+ puts "Subdb005: $method ( $args ) subdb cursor operations test"
+ set txn ""
+ cleanup $testdir NULL
+ set psize 8192
+ set testfile $testdir/subdb005.db
+ set duplist {-1 -1 -1 -1 -1}
+ build_all_subdb \
+ $testfile [list $method] [list $psize] $duplist $nentries $args
+ set numdb [llength $duplist]
+ #
+ # Get a cursor in each subdb and move past the end of each
+ # subdb. Make sure we don't end up in another subdb.
+ #
+ puts "\tSubdb005.a: Cursor ops - first/prev and last/next"
+ for {set i 0} {$i < $numdb} {incr i} {
+ set db [berkdb_open -unknown $testfile sub$i.db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set db_handle($i) $db
+ # Used in 005.c test
+ lappend subdbnames sub$i.db
+
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ set d [$dbc get -first]
+ error_check_good dbc_get [expr [llength $d] != 0] 1
+
+ # Used in 005.b test
+ set db_key($i) [lindex [lindex $d 0] 0]
+
+ set d [$dbc get -prev]
+ error_check_good dbc_get [expr [llength $d] == 0] 1
+ set d [$dbc get -last]
+ error_check_good dbc_get [expr [llength $d] != 0] 1
+ set d [$dbc get -next]
+ error_check_good dbc_get [expr [llength $d] == 0] 1
+ }
+ #
+ # Get a key from each subdb and try to get this key in a
+ # different subdb. Make sure it fails
+ #
+ puts "\tSubdb005.b: Get keys in different subdb's"
+ for {set i 0} {$i < $numdb} {incr i} {
+ set n [expr $i + 1]
+ if {$n == $numdb} {
+ set n 0
+ }
+ set db $db_handle($i)
+ if { [is_record_based $method] == 1 } {
+ set d [$db get -recno $db_key($n)]
+ error_check_good \
+ db_get [expr [llength $d] == 0] 1
+ } else {
+ set d [$db get $db_key($n)]
+ error_check_good db_get [expr [llength $d] == 0] 1
+ }
+ }
+
+ #
+ # Clean up
+ #
+ for {set i 0} {$i < $numdb} {incr i} {
+ error_check_good db_close [$db_handle($i) close] 0
+ }
+
+ #
+ # Check contents of DB for subdb names only. Makes sure that
+ # every subdbname is there and that nothing else is there.
+ #
+ puts "\tSubdb005.c: Check DB is read-only"
+ error_check_bad dbopen [catch \
+ {berkdb_open_noerr -unknown $testfile} ret] 0
+
+ puts "\tSubdb005.d: Check contents of DB for subdb names only"
+ set db [berkdb_open -unknown -rdonly $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set subdblist [$db get -glob *]
+ foreach kd $subdblist {
+ # subname also used in subdb005.e,f below
+ set subname [lindex $kd 0]
+ set i [lsearch $subdbnames $subname]
+ error_check_good subdb_search [expr $i != -1] 1
+ set subdbnames [lreplace $subdbnames $i $i]
+ }
+ error_check_good subdb_done [llength $subdbnames] 0
+
+ error_check_good db_close [$db close] 0
+ return
+}
diff --git a/bdb/test/sdb006.tcl b/bdb/test/sdb006.tcl
new file mode 100644
index 00000000000..70dee5c7343
--- /dev/null
+++ b/bdb/test/sdb006.tcl
@@ -0,0 +1,130 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdb006.tcl,v 11.12 2000/09/20 13:22:03 sue Exp $
+#
+# We'll test 2-way, 3-way, and 4-way joins and figure that if those work,
+# everything else does as well. We'll create test databases called
+# sub1.db, sub2.db, sub3.db, and sub4.db. The number on the database
+# describes the duplication -- duplicates are of the form 0, N, 2N, 3N, ...
+# where N is the number of the database. Primary.db is the primary database,
+# and sub0.db is the database that has no matching duplicates. All of
+# these are within a single database.
+#
+# We should test this on all btrees, all hash, and a combination thereof
+proc subdb006 {method {nentries 100} args } {
+ source ./include.tcl
+ global rand_init
+
+ # NB: these flags are internal only, ok
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] } {
+ puts "\tSubdb006 skipping for method $method."
+ return
+ }
+
+ berkdb srand $rand_init
+
+ foreach opt {" -dup" " -dupsort"} {
+ append args $opt
+
+ puts "Subdb006: $method ( $args ) Intra-subdb join"
+ set txn ""
+ #
+ # Get a cursor in each subdb and move past the end of each
+ # subdb. Make sure we don't end up in another subdb.
+ #
+ puts "\tSubdb006.a: Intra-subdb join"
+
+ cleanup $testdir NULL
+ set testfile $testdir/subdb006.db
+
+ set psize [list 8192]
+ set duplist {0 50 25 16 12}
+ set numdb [llength $duplist]
+ build_all_subdb $testfile [list $method] $psize \
+ $duplist $nentries $args
+
+ # Build the primary
+ puts "Subdb006: Building the primary database $method"
+ set oflags "-create -mode 0644 [conv $omethod \
+ [berkdb random_int 1 2]]"
+ set db [eval {berkdb_open} $oflags $testfile primary.db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ for { set i 0 } { $i < 1000 } { incr i } {
+ set key [format "%04d" $i]
+ set ret [$db put $key stub]
+ error_check_good "primary put" $ret 0
+ }
+ error_check_good "primary close" [$db close] 0
+ set did [open $dict]
+ gets $did str
+ do_join_subdb $testfile primary.db "1 0" $str
+ gets $did str
+ do_join_subdb $testfile primary.db "2 0" $str
+ gets $did str
+ do_join_subdb $testfile primary.db "3 0" $str
+ gets $did str
+ do_join_subdb $testfile primary.db "4 0" $str
+ gets $did str
+ do_join_subdb $testfile primary.db "1" $str
+ gets $did str
+ do_join_subdb $testfile primary.db "2" $str
+ gets $did str
+ do_join_subdb $testfile primary.db "3" $str
+ gets $did str
+ do_join_subdb $testfile primary.db "4" $str
+ gets $did str
+ do_join_subdb $testfile primary.db "1 2" $str
+ gets $did str
+ do_join_subdb $testfile primary.db "1 2 3" $str
+ gets $did str
+ do_join_subdb $testfile primary.db "1 2 3 4" $str
+ gets $did str
+ do_join_subdb $testfile primary.db "2 1" $str
+ gets $did str
+ do_join_subdb $testfile primary.db "3 2 1" $str
+ gets $did str
+ do_join_subdb $testfile primary.db "4 3 2 1" $str
+ gets $did str
+ do_join_subdb $testfile primary.db "1 3" $str
+ gets $did str
+ do_join_subdb $testfile primary.db "3 1" $str
+ gets $did str
+ do_join_subdb $testfile primary.db "1 4" $str
+ gets $did str
+ do_join_subdb $testfile primary.db "4 1" $str
+ gets $did str
+ do_join_subdb $testfile primary.db "2 3" $str
+ gets $did str
+ do_join_subdb $testfile primary.db "3 2" $str
+ gets $did str
+ do_join_subdb $testfile primary.db "2 4" $str
+ gets $did str
+ do_join_subdb $testfile primary.db "4 2" $str
+ gets $did str
+ do_join_subdb $testfile primary.db "3 4" $str
+ gets $did str
+ do_join_subdb $testfile primary.db "4 3" $str
+ gets $did str
+ do_join_subdb $testfile primary.db "2 3 4" $str
+ gets $did str
+ do_join_subdb $testfile primary.db "3 4 1" $str
+ gets $did str
+ do_join_subdb $testfile primary.db "4 2 1" $str
+ gets $did str
+ do_join_subdb $testfile primary.db "0 2 1" $str
+ gets $did str
+ do_join_subdb $testfile primary.db "3 2 0" $str
+ gets $did str
+ do_join_subdb $testfile primary.db "4 3 2 1" $str
+ gets $did str
+ do_join_subdb $testfile primary.db "4 3 0 1" $str
+
+ close $did
+ }
+}
diff --git a/bdb/test/sdb007.tcl b/bdb/test/sdb007.tcl
new file mode 100644
index 00000000000..6b56fd411dd
--- /dev/null
+++ b/bdb/test/sdb007.tcl
@@ -0,0 +1,123 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdb007.tcl,v 11.13 2000/12/11 17:24:55 sue Exp $
+#
+# Sub DB Test 7 {access method}
+# Use the first 10,000 entries from the dictionary spread across each subdb.
+# Use a different page size for every subdb.
+# Insert each with self as key and data; retrieve each.
+# After all are entered, retrieve all; compare output to original.
+# Close file, reopen, do retrieve and re-verify.
+proc subdb007 { method {nentries 10000} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 } {
+ puts "Subdb007: skipping for method $method"
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Subdb007: skipping for specific pagesizes"
+ return
+ }
+
+ puts "Subdb007: $method ($args) subdb tests with different pagesizes"
+
+ # Create the database and open the dictionary
+ set testfile $testdir/subdb007.db
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+ cleanup $testdir NULL
+
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc subdb007_recno.check
+ } else {
+ set checkfunc subdb007.check
+ }
+ puts "\tSubdb007.a: create subdbs of different page sizes"
+ set psize {8192 4096 2048 1024 512}
+ set nsubdbs [llength $psize]
+ for { set i 0 } { $i < $nsubdbs } { incr i } {
+ lappend duplist -1
+ }
+ set newent [expr $nentries / $nsubdbs]
+ build_all_subdb $testfile [list $method] $psize $duplist $newent $args
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ for { set subdb 0 } { $subdb < $nsubdbs } { incr subdb } {
+ puts "\tSubdb007.b: dump file sub$subdb.db"
+ set db [berkdb_open -unknown $testfile sub$subdb.db]
+ dump_file $db $txn $t1 $checkfunc
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary
+ # (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $newent} {incr i} {
+ puts $oid [expr $subdb * $newent + $i]
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set beg [expr $subdb * $newent]
+ incr beg
+ set end [expr $beg + $newent - 1]
+ filehead $end $dict $t3 $beg
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdb007:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tSubdb007.c: sub$subdb.db: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ open_and_dump_subfile $testfile NULL $txn $t1 $checkfunc \
+ dump_file_direction "-first" "-next" sub$subdb.db
+ if { [is_record_based $method] != 1 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdb007:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tSubdb007.d: sub$subdb.db:\
+ close, open, and dump file in reverse direction"
+ open_and_dump_subfile $testfile NULL $txn $t1 $checkfunc \
+ dump_file_direction "-last" "-prev" sub$subdb.db
+
+ if { [is_record_based $method] != 1 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdb007:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+ }
+}
+
+# Check function for Subdb007; keys and data are identical
+proc subdb007.check { key data } {
+ error_check_good "key/data mismatch" $data $key
+}
+
+proc subdb007_recno.check { key data } {
+global dict
+global kvals
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
diff --git a/bdb/test/sdb008.tcl b/bdb/test/sdb008.tcl
new file mode 100644
index 00000000000..b005f00931a
--- /dev/null
+++ b/bdb/test/sdb008.tcl
@@ -0,0 +1,151 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdb008.tcl,v 11.14 2000/08/25 14:21:53 sue Exp $
+#
+# Sub DB Test 8 {access method}
+# Use the first 10,000 entries from the dictionary.
+# Use a different or random lorder for each subdb.
+# Insert each with self as key and data; retrieve each.
+# After all are entered, retrieve all; compare output to original.
+# Close file, reopen, do retrieve and re-verify.
+proc subdb008 { method {nentries 10000} args } {
+ source ./include.tcl
+ global rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 } {
+ puts "Subdb008: skipping for method $method"
+ return
+ }
+
+ berkdb srand $rand_init
+
+ puts "Subdb008: $method ($args) subdb lorder tests"
+
+ # Create the database and open the dictionary
+ set testfile $testdir/subdb008.db
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+ cleanup $testdir NULL
+
+ set txn ""
+ set pflags ""
+ set gflags ""
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc subdb008_recno.check
+ } else {
+ set checkfunc subdb008.check
+ }
+ set nsubdbs 4
+ set lo [list 4321 1234]
+ puts "\tSubdb008.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ for { set i 0 } { $i < $nsubdbs } { incr i } {
+ set subdb sub$i.db
+ if { $i >= [llength $lo]} {
+ set r [berkdb random_int 0 1]
+ set order [lindex $lo $r]
+ } else {
+ set order [lindex $lo $i]
+ }
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-lorder $order $omethod $testfile $subdb}]
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set gflags "-recno"
+ set key [expr $i * $nentries]
+ set key [expr $key + $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+ incr count
+ }
+ close $did
+ error_check_good db_close [$db close] 0
+ }
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ for { set subdb 0 } { $subdb < $nsubdbs } { incr subdb } {
+ puts "\tSubdb008.b: dump file sub$subdb.db"
+ set db [berkdb_open -unknown $testfile sub$subdb.db]
+ dump_file $db $txn $t1 $checkfunc
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary
+ # (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {incr i} {
+ puts $oid [expr $subdb * $nentries + $i]
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdb008:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tSubdb008.c: sub$subdb.db: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ open_and_dump_subfile $testfile NULL $txn $t1 $checkfunc \
+ dump_file_direction "-first" "-next" sub$subdb.db
+ if { [is_record_based $method] != 1 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdb008:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tSubdb008.d: sub$subdb.db:\
+ close, open, and dump file in reverse direction"
+ open_and_dump_subfile $testfile NULL $txn $t1 $checkfunc \
+ dump_file_direction "-last" "-prev" sub$subdb.db
+
+ if { [is_record_based $method] != 1 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdb008:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+ }
+}
+
+# Check function for Subdb008; keys and data are identical
+proc subdb008.check { key data } {
+ error_check_good "key/data mismatch" $data $key
+}
+
+proc subdb008_recno.check { key data } {
+global dict
+global kvals
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
diff --git a/bdb/test/sdb009.tcl b/bdb/test/sdb009.tcl
new file mode 100644
index 00000000000..060bea643bb
--- /dev/null
+++ b/bdb/test/sdb009.tcl
@@ -0,0 +1,77 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdb009.tcl,v 11.4 2000/08/25 14:21:53 sue Exp $
+#
+# Subdatabase Test 9 (replacement)
+# Test the DB->rename method.
+proc subdb009 { method args } {
+ global errorCode
+ source ./include.tcl
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ puts "Subdb009: $method ($args): Test of DB->rename()"
+
+ if { [is_queue $method] == 1 } {
+ puts "\tSubdb009: Skipping for method $method."
+ return
+ }
+
+ set file $testdir/subdb009.db
+ set oldsdb OLDDB
+ set newsdb NEWDB
+
+ # Make sure we're starting from a clean slate.
+ cleanup $testdir NULL
+ error_check_bad "$file exists" [file exists $file] 1
+
+ puts "\tSubdb009.a: Create/rename file"
+ puts "\t\tSubdb009.a.1: create"
+ set db [eval {berkdb_open -create -mode 0644}\
+ $omethod $args $file $oldsdb]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # The nature of the key and data are unimportant; use numeric key
+ # so record-based methods don't need special treatment.
+ set key 1
+ set data [pad_data $method data]
+
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\t\tSubdb009.a.2: rename"
+ error_check_good rename_file [eval {berkdb dbrename} $file \
+ $oldsdb $newsdb] 0
+
+ puts "\t\tSubdb009.a.3: check"
+ # Open again with create to make sure we've really completely
+ # disassociated the subdb from the old name.
+ set odb [eval {berkdb_open -create -mode 0644}\
+ $omethod $args $file $oldsdb]
+ error_check_good odb_open [is_valid_db $odb] TRUE
+ set odbt [$odb get $key]
+ error_check_good odb_close [$odb close] 0
+
+ set ndb [eval {berkdb_open -create -mode 0644}\
+ $omethod $args $file $newsdb]
+ error_check_good ndb_open [is_valid_db $ndb] TRUE
+ set ndbt [$ndb get $key]
+ error_check_good ndb_close [$ndb close] 0
+
+ # The DBT from the "old" database should be empty, not the "new" one.
+ error_check_good odbt_empty [llength $odbt] 0
+ error_check_bad ndbt_empty [llength $ndbt] 0
+ error_check_good ndbt [lindex [lindex $ndbt 0] 1] $data
+
+ # Now there's both an old and a new. Rename the "new" to the "old"
+ # and make sure that fails.
+ puts "\tSubdb009.b: Make sure rename fails instead of overwriting"
+ set ret [catch {eval {berkdb dbrename} $file $oldsdb $newsdb} res]
+ error_check_bad rename_overwrite $ret 0
+ error_check_good rename_overwrite_ret [is_substr $errorCode EEXIST] 1
+
+ puts "\tSubdb009 succeeded."
+}
diff --git a/bdb/test/sdb010.tcl b/bdb/test/sdb010.tcl
new file mode 100644
index 00000000000..6bec78d372b
--- /dev/null
+++ b/bdb/test/sdb010.tcl
@@ -0,0 +1,46 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdb010.tcl,v 11.4 2000/08/25 14:21:53 sue Exp $
+#
+# Subdatabase Test 10 {access method}
+# Test of dbremove
+proc subdb010 { method args } {
+ global errorCode
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Subdb010: Test of DB->remove()"
+
+ if { [is_queue $method] == 1 } {
+ puts "\tSubdb010: Skipping for method $method."
+ return
+ }
+
+ cleanup $testdir NULL
+
+ set testfile $testdir/subdb010.db
+ set testdb DATABASE
+
+ set db [eval {berkdb_open -create -truncate -mode 0644} $omethod \
+ $args $testfile $testdb]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ error_check_good file_exists_before [file exists $testfile] 1
+ error_check_good db_remove [berkdb dbremove $testfile $testdb] 0
+
+ # File should still exist.
+ error_check_good file_exists_after [file exists $testfile] 1
+
+ # But database should not.
+ set ret [catch {eval berkdb_open $omethod $args $testfile $testdb} res]
+ error_check_bad open_failed ret 0
+ error_check_good open_failed_ret [is_substr $errorCode ENOENT] 1
+
+ puts "\tSubdb010 succeeded."
+}
diff --git a/bdb/test/sdbscript.tcl b/bdb/test/sdbscript.tcl
new file mode 100644
index 00000000000..1b099520e88
--- /dev/null
+++ b/bdb/test/sdbscript.tcl
@@ -0,0 +1,47 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdbscript.tcl,v 11.7 2000/04/21 18:36:23 krinsky Exp $
+#
+# Usage: subdbscript testfile subdbnumber factor
+# testfile: name of DB itself
+# subdbnumber: n, subdb indicator, of form sub$n.db
+# factor: Delete over factor'th + n'th from my subdb.
+#
+# I.e. if factor is 10, and n is 0, remove entries, 0, 10, 20, ...
+# if factor is 10 and n is 1, remove entries 1, 11, 21, ...
+source ./include.tcl
+source $test_path/test.tcl
+
+set usage "subdbscript testfile subdbnumber factor"
+
+# Verify usage
+if { $argc != 3 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set testfile [lindex $argv 0]
+set n [ lindex $argv 1 ]
+set factor [ lindex $argv 2 ]
+
+set db [berkdb_open -unknown $testfile sub$n.db]
+error_check_good db_open [is_valid_db $db] TRUE
+
+set dbc [$db cursor]
+error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+set i 1
+for {set d [$dbc get -first]} {[llength $d] != 0} {set d [$dbc get -next]} {
+ set x [expr $i - $n]
+ if { $x >= 0 && [expr $x % $factor] == 0 } {
+ puts "Deleting $d"
+ error_check_good dbc_del [$dbc del] 0
+ }
+ incr i
+}
+error_check_good db_close [$db close] 0
+
+exit
diff --git a/bdb/test/sdbtest001.tcl b/bdb/test/sdbtest001.tcl
new file mode 100644
index 00000000000..e3ff2b032d3
--- /dev/null
+++ b/bdb/test/sdbtest001.tcl
@@ -0,0 +1,133 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdbtest001.tcl,v 11.13 2000/08/25 14:21:53 sue Exp $
+#
+# Sub DB All-Method Test 1
+# Make several subdb's of different access methods all in one DB.
+# Rotate methods and repeat [#762].
+# Use the first 10,000 entries from the dictionary.
+# Insert each with self as key and data; retrieve each.
+# After all are entered, retrieve all; compare output to original.
+# Close file, reopen, do retrieve and re-verify.
+proc subdbtest001 { {nentries 10000} } {
+ source ./include.tcl
+
+ puts "Subdbtest001: many different subdb access methods in one"
+
+ # Create the database and open the dictionary
+ set testfile $testdir/subdbtest001.db
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+
+ set txn ""
+ set count 0
+
+ # Set up various methods to rotate through
+ lappend method_list [list "-rrecno" "-rbtree" "-hash" "-recno" "-btree"]
+ lappend method_list [list "-recno" "-hash" "-btree" "-rbtree" "-rrecno"]
+ lappend method_list [list "-btree" "-recno" "-rbtree" "-rrecno" "-hash"]
+ lappend method_list [list "-hash" "-recno" "-rbtree" "-rrecno" "-btree"]
+ lappend method_list [list "-rbtree" "-hash" "-btree" "-rrecno" "-recno"]
+ lappend method_list [list "-rrecno" "-recno"]
+ lappend method_list [list "-recno" "-rrecno"]
+ lappend method_list [list "-hash" "-dhash"]
+ lappend method_list [list "-dhash" "-hash"]
+ lappend method_list [list "-rbtree" "-btree" "-dbtree" "-ddbtree"]
+ lappend method_list [list "-btree" "-rbtree" "-ddbtree" "-dbtree"]
+ lappend method_list [list "-dbtree" "-ddbtree" "-btree" "-rbtree"]
+ lappend method_list [list "-ddbtree" "-dbtree" "-rbtree" "-btree"]
+ foreach methods $method_list {
+ cleanup $testdir NULL
+ puts "\tSubdbtest001.a: create subdbs of different access methods:"
+ puts "\tSubdbtest001.a: $methods"
+ set psize {8192 4096}
+ set nsubdbs [llength $methods]
+ set duplist ""
+ for { set i 0 } { $i < $nsubdbs } { incr i } {
+ lappend duplist -1
+ }
+ set newent [expr $nentries / $nsubdbs]
+ build_all_subdb $testfile $methods $psize $duplist $newent
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ for { set subdb 0 } { $subdb < $nsubdbs } { incr subdb } {
+
+ set method [lindex $methods $subdb]
+ set method [convert_method $method]
+ if { [is_record_based $method] == 1 } {
+ set checkfunc subdbtest001_recno.check
+ } else {
+ set checkfunc subdbtest001.check
+ }
+
+ puts "\tSubdbtest001.b: dump file sub$subdb.db"
+ set db [berkdb_open -unknown $testfile sub$subdb.db]
+ dump_file $db $txn $t1 $checkfunc
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the
+ # dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $newent} {incr i} {
+ puts $oid [expr $subdb * $newent + $i]
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ # filehead uses 1-based line numbers
+ set beg [expr $subdb * $newent]
+ incr beg
+ set end [expr $beg + $newent - 1]
+ filehead $end $dict $t3 $beg
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdbtest001:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tSubdbtest001.c: sub$subdb.db: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ open_and_dump_subfile $testfile NULL $txn $t1 $checkfunc \
+ dump_file_direction "-first" "-next" sub$subdb.db
+ if { [string compare $method "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdbtest001:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tSubdbtest001.d: sub$subdb.db: close, open, and dump file in reverse direction"
+ open_and_dump_subfile $testfile NULL $txn $t1 $checkfunc \
+ dump_file_direction "-last" "-prev" sub$subdb.db
+
+ if { [string compare $method "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdbtest001:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+ }
+ }
+}
+
+# Check function for Subdbtest001; keys and data are identical
+proc subdbtest001.check { key data } {
+ error_check_good "key/data mismatch" $data $key
+}
+
+proc subdbtest001_recno.check { key data } {
+global dict
+global kvals
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
diff --git a/bdb/test/sdbtest002.tcl b/bdb/test/sdbtest002.tcl
new file mode 100644
index 00000000000..b8bad4e70e1
--- /dev/null
+++ b/bdb/test/sdbtest002.tcl
@@ -0,0 +1,163 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdbtest002.tcl,v 11.19 2000/08/25 14:21:53 sue Exp $
+#
+# Sub DB All-Method Test 2
+# Make several subdb's of different access methods all in one DB.
+# Fork of some child procs to each manipulate one subdb and when
+# they are finished, verify the contents of the databases.
+# Use the first 10,000 entries from the dictionary.
+# Insert each with self as key and data; retrieve each.
+# After all are entered, retrieve all; compare output to original.
+# Close file, reopen, do retrieve and re-verify.
+proc subdbtest002 { {nentries 10000} } {
+ source ./include.tcl
+
+ puts "Subdbtest002: many different subdb access methods in one"
+
+ # Create the database and open the dictionary
+ set testfile $testdir/subdbtest002.db
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+
+ set txn ""
+ set count 0
+
+ # Set up various methods to rotate through
+ set methods \
+ [list "-rbtree" "-recno" "-btree" "-btree" "-recno" "-rbtree"]
+ cleanup $testdir NULL
+ puts "\tSubdbtest002.a: create subdbs of different access methods:"
+ puts "\t\t$methods"
+ set psize {8192 4096}
+ set nsubdbs [llength $methods]
+ set duplist ""
+ for { set i 0 } { $i < $nsubdbs } { incr i } {
+ lappend duplist -1
+ }
+ set newent [expr $nentries / $nsubdbs]
+
+ #
+ # XXX We need dict sorted to figure out what was deleted
+ # since things are stored sorted in the btree.
+ #
+ filesort $dict $t4
+ set dictorig $dict
+ set dict $t4
+
+ build_all_subdb $testfile $methods $psize $duplist $newent
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ set pidlist ""
+ puts "\tSubdbtest002.b: create $nsubdbs procs to delete some keys"
+ for { set subdb 0 } { $subdb < $nsubdbs } { incr subdb } {
+ puts "$tclsh_path\
+ $test_path/sdbscript.tcl $testfile \
+ $subdb $nsubdbs >& $testdir/subdb002.log.$subdb"
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ sdbscript.tcl \
+ $testdir/subdb002.log.$subdb $testfile $subdb $nsubdbs &]
+ lappend pidlist $p
+ }
+ watch_procs 5
+
+ for { set subdb 0 } { $subdb < $nsubdbs } { incr subdb } {
+ set method [lindex $methods $subdb]
+ set method [convert_method $method]
+ if { [is_record_based $method] == 1 } {
+ set checkfunc subdbtest002_recno.check
+ } else {
+ set checkfunc subdbtest002.check
+ }
+
+ puts "\tSubdbtest002.b: dump file sub$subdb.db"
+ set db [berkdb_open -unknown $testfile sub$subdb.db]
+ error_check_good db_open [is_valid_db $db] TRUE
+ dump_file $db $txn $t1 $checkfunc
+ error_check_good db_close [$db close] 0
+ #
+ # This is just so that t2 is there and empty
+ # since we are only appending below.
+ #
+ exec > $t2
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $newent} {incr i} {
+ set x [expr $i - $subdb]
+ if { [expr $x % $nsubdbs] != 0 } {
+ puts $oid [expr $subdb * $newent + $i]
+ }
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set oid [open $t4 r]
+ for {set i 1} {[gets $oid line] >= 0} {incr i} {
+ set farr($i) $line
+ }
+ close $oid
+
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $newent} {incr i} {
+ # Sed uses 1-based line numbers
+ set x [expr $i - $subdb]
+ if { [expr $x % $nsubdbs] != 0 } {
+ set beg [expr $subdb * $newent]
+ set beg [expr $beg + $i]
+ puts $oid $farr($beg)
+ }
+ }
+ close $oid
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdbtest002:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tSubdbtest002.c: sub$subdb.db: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ open_and_dump_subfile $testfile NULL $txn $t1 $checkfunc \
+ dump_file_direction "-first" "-next" sub$subdb.db
+ if { [string compare $method "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdbtest002:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tSubdbtest002.d: sub$subdb.db: close, open, and dump file in reverse direction"
+ open_and_dump_subfile $testfile NULL $txn $t1 $checkfunc \
+ dump_file_direction "-last" "-prev" sub$subdb.db
+
+ if { [string compare $method "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdbtest002:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+ }
+ set dict $dictorig
+ return
+}
+
+# Check function for Subdbtest002; keys and data are identical
+proc subdbtest002.check { key data } {
+ error_check_good "key/data mismatch" $data $key
+}
+
+proc subdbtest002_recno.check { key data } {
+global dict
+global kvals
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
diff --git a/bdb/test/sdbutils.tcl b/bdb/test/sdbutils.tcl
new file mode 100644
index 00000000000..0cb33b28649
--- /dev/null
+++ b/bdb/test/sdbutils.tcl
@@ -0,0 +1,171 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdbutils.tcl,v 11.9 2000/05/22 12:51:38 bostic Exp $
+#
+proc build_all_subdb { dbname methods psize dups {nentries 100} {dbargs ""}} {
+ set nsubdbs [llength $dups]
+ set plen [llength $psize]
+ set mlen [llength $methods]
+ set savearg $dbargs
+ for {set i 0} {$i < $nsubdbs} { incr i } {
+ set m [lindex $methods [expr $i % $mlen]]
+ set dbargs $savearg
+ set p [lindex $psize [expr $i % $plen]]
+ subdb_build $dbname $nentries [lindex $dups $i] \
+ $i $m $p sub$i.db $dbargs
+ }
+}
+
+proc subdb_build { name nkeys ndups dup_interval method psize subdb dbargs} {
+ source ./include.tcl
+
+ set dbargs [convert_args $method $dbargs]
+ set omethod [convert_method $method]
+
+ puts "Method: $method"
+
+ # Create the database and open the dictionary
+ set oflags "-create -mode 0644 $omethod \
+ -pagesize $psize $dbargs $name $subdb"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+ set count 0
+ if { $ndups >= 0 } {
+ puts "\tBuilding $method $name $subdb. \
+ $nkeys keys with $ndups duplicates at interval of $dup_interval"
+ }
+ if { $ndups < 0 } {
+ puts "\tBuilding $method $name $subdb. \
+ $nkeys unique keys of pagesize $psize"
+ #
+ # If ndups is < 0, we want unique keys in each subdb,
+ # so skip ahead in the dict by nkeys * iteration
+ #
+ for { set count 0 } \
+ { $count < [expr $nkeys * $dup_interval] } {
+ incr count} {
+ set ret [gets $did str]
+ if { $ret == -1 } {
+ break
+ }
+ }
+ }
+ for { set count 0 } { [gets $did str] != -1 && $count < $nkeys } {
+ incr count} {
+ for { set i 0 } { $i < $ndups } { incr i } {
+ set data [format "%04d" [expr $i * $dup_interval]]
+ set ret [$db put $str [chop_data $method $data]]
+ error_check_good put $ret 0
+ }
+
+ if { $ndups == 0 } {
+ set ret [$db put $str [chop_data $method NODUP]]
+ error_check_good put $ret 0
+ } elseif { $ndups < 0 } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set num [expr $nkeys * $dup_interval]
+ set num [expr $num + $count + 1]
+ set ret [$db put $num [chop_data $method $str]]
+ set kvals($num) [pad_data $method $str]
+ error_check_good put $ret 0
+ } else {
+ set ret [$db put $str [chop_data $method $str]]
+ error_check_good put $ret 0
+ }
+ }
+ }
+ close $did
+ error_check_good close:$name [$db close] 0
+}
+
+proc do_join_subdb { db primary subdbs key } {
+ source ./include.tcl
+
+ puts "\tJoining: $subdbs on $key"
+
+ # Open all the databases
+ set p [berkdb_open -unknown $db $primary]
+ error_check_good "primary open" [is_valid_db $p] TRUE
+
+ set dblist ""
+ set curslist ""
+
+ foreach i $subdbs {
+ set jdb [berkdb_open -unknown $db sub$i.db]
+ error_check_good "sub$i.db open" [is_valid_db $jdb] TRUE
+
+ lappend jlist [list $jdb $key]
+ lappend dblist $jdb
+
+ }
+
+ set join_res [eval {$p get_join} $jlist]
+ set ndups [llength $join_res]
+
+ # Calculate how many dups we expect.
+ # We go through the list of indices. If we find a 0, then we
+ # expect 0 dups. For everything else, we look at pairs of numbers,
+ # if the are relatively prime, multiply them and figure out how
+ # many times that goes into 50. If they aren't relatively prime,
+ # take the number of times the larger goes into 50.
+ set expected 50
+ set last 1
+ foreach n $subdbs {
+ if { $n == 0 } {
+ set expected 0
+ break
+ }
+ if { $last == $n } {
+ continue
+ }
+
+ if { [expr $last % $n] == 0 || [expr $n % $last] == 0 } {
+ if { $n > $last } {
+ set last $n
+ set expected [expr 50 / $last]
+ }
+ } else {
+ set last [expr $n * $last / [gcd $n $last]]
+ set expected [expr 50 / $last]
+ }
+ }
+
+ error_check_good number_of_dups:$subdbs $ndups $expected
+
+ #
+ # If we get here, we have the number expected, now loop
+ # through each and see if it is what we expected.
+ #
+ for { set i 0 } { $i < $ndups } { incr i } {
+ set pair [lindex $join_res $i]
+ set k [lindex $pair 0]
+ foreach j $subdbs {
+ error_check_bad valid_dup:$j:$subdbs $j 0
+ set kval [string trimleft $k 0]
+ if { [string length $kval] == 0 } {
+ set kval 0
+ }
+ error_check_good \
+ valid_dup:$j:$subdbs [expr $kval % $j] 0
+ }
+ }
+
+ error_check_good close_primary [$p close] 0
+ foreach i $dblist {
+ error_check_good close_index:$i [$i close] 0
+ }
+}
+
+proc n_to_subname { n } {
+ if { $n == 0 } {
+ return null.db;
+ } else {
+ return sub$n.db;
+ }
+}
diff --git a/bdb/test/sysscript.tcl b/bdb/test/sysscript.tcl
new file mode 100644
index 00000000000..1b7545e4c6b
--- /dev/null
+++ b/bdb/test/sysscript.tcl
@@ -0,0 +1,283 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sysscript.tcl,v 11.12 2000/05/22 12:51:38 bostic Exp $
+#
+# System integration test script.
+# This script runs a single process that tests the full functionality of
+# the system. The database under test contains nfiles files. Each process
+# randomly generates a key and some data. Both keys and data are bimodally
+# distributed between small keys (1-10 characters) and large keys (the avg
+# length is indicated via the command line parameter.
+# The process then decides on a replication factor between 1 and nfiles.
+# It writes the key and data to that many files and tacks on the file ids
+# of the files it writes to the data string. For example, let's say that
+# I randomly generate the key dog and data cat. Then I pick a replication
+# factor of 3. I pick 3 files from the set of n (say 1, 3, and 5). I then
+# rewrite the data as 1:3:5:cat. I begin a transaction, add the key/data
+# pair to each file and then commit. Notice that I may generate replication
+# of the form 1:3:3:cat in which case I simply add a duplicate to file 3.
+#
+# Usage: sysscript dir nfiles key_avg data_avg
+#
+# dir: DB_HOME directory
+# nfiles: number of files in the set
+# key_avg: average big key size
+# data_avg: average big data size
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set alphabet "abcdefghijklmnopqrstuvwxyz"
+set mypid [pid]
+
+set usage "sysscript dir nfiles key_avg data_avg method"
+
+# Verify usage
+if { $argc != 5 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+puts [concat "Argc: " $argc " Argv: " $argv]
+
+# Initialize arguments
+set dir [lindex $argv 0]
+set nfiles [ lindex $argv 1 ]
+set key_avg [ lindex $argv 2 ]
+set data_avg [ lindex $argv 3 ]
+set method [ lindex $argv 4 ]
+
+# Initialize seed
+global rand_init
+berkdb srand $rand_init
+
+puts "Beginning execution for $mypid"
+puts "$dir DB_HOME"
+puts "$nfiles files"
+puts "$key_avg average key length"
+puts "$data_avg average data length"
+
+flush stdout
+
+# Create local environment
+set dbenv [berkdb env -txn -home $dir]
+set err [catch {error_check_good $mypid:dbenv [is_substr $dbenv env] 1} ret]
+if {$err != 0} {
+ puts $ret
+ return
+}
+
+# Now open the files
+for { set i 0 } { $i < $nfiles } { incr i } {
+ set file test044.$i.db
+ set db($i) [berkdb open -env $dbenv $method $file]
+ set err [catch {error_check_bad $mypid:dbopen $db($i) NULL} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ set err [catch {error_check_bad $mypid:dbopen [is_substr $db($i) \
+ error] 1} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+}
+
+set record_based [is_record_based $method]
+while { 1 } {
+ # Decide if we're going to create a big key or a small key
+ # We give small keys a 70% chance.
+ if { [berkdb random_int 1 10] < 8 } {
+ set k [random_data 5 0 0 $record_based]
+ } else {
+ set k [random_data $key_avg 0 0 $record_based]
+ }
+ set data [chop_data $method [random_data $data_avg 0 0]]
+
+ set txn [$dbenv txn]
+ set err [catch {error_check_good $mypid:txn_begin [is_substr $txn \
+ $dbenv.txn] 1} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+
+ # Open cursors
+ for { set f 0 } {$f < $nfiles} {incr f} {
+ set cursors($f) [$db($f) cursor -txn $txn]
+ set err [catch {error_check_good $mypid:cursor_open \
+ [is_substr $cursors($f) $db($f)] 1} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ }
+ set aborted 0
+
+ # Check to see if key is already in database
+ set found 0
+ for { set i 0 } { $i < $nfiles } { incr i } {
+ set r [$db($i) get -txn $txn $k]
+ set r [$db($i) get -txn $txn $k]
+ if { $r == "-1" } {
+ for {set f 0 } {$f < $nfiles} {incr f} {
+ set err [catch {error_check_good \
+ $mypid:cursor_close \
+ [$cursors($f) close] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ }
+ set err [catch {error_check_good $mypid:txn_abort \
+ [$txn abort] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ set aborted 1
+ set found 2
+ break
+ } elseif { $r != "Key $k not found." } {
+ set found 1
+ break
+ }
+ }
+ switch $found {
+ 2 {
+ # Transaction aborted, no need to do anything.
+ }
+ 0 {
+ # Key was not found, decide how much to replicate
+ # and then create a list of that many file IDs.
+ set repl [berkdb random_int 1 $nfiles]
+ set fset ""
+ for { set i 0 } { $i < $repl } {incr i} {
+ set f [berkdb random_int 0 [expr $nfiles - 1]]
+ lappend fset $f
+ set data [chop_data $method $f:$data]
+ }
+
+ foreach i $fset {
+ set r [$db($i) put -txn $txn $k $data]
+ if {$r == "-1"} {
+ for {set f 0 } {$f < $nfiles} {incr f} {
+ set err [catch {error_check_good \
+ $mypid:cursor_close \
+ [$cursors($f) close] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ }
+ set err [catch {error_check_good \
+ $mypid:txn_abort [$txn abort] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ set aborted 1
+ break
+ }
+ }
+ }
+ 1 {
+ # Key was found. Make sure that all the data values
+ # look good.
+ set f [zero_list $nfiles]
+ set data $r
+ while { [set ndx [string first : $r]] != -1 } {
+ set fnum [string range $r 0 [expr $ndx - 1]]
+ if { [lindex $f $fnum] == 0 } {
+ #set flag -set
+ set full [record $cursors($fnum) get -set $k]
+ } else {
+ #set flag -next
+ set full [record $cursors($fnum) get -next]
+ }
+ if {[llength $full] == 0} {
+ for {set f 0 } {$f < $nfiles} {incr f} {
+ set err [catch {error_check_good \
+ $mypid:cursor_close \
+ [$cursors($f) close] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ }
+ set err [catch {error_check_good \
+ $mypid:txn_abort [$txn abort] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ set aborted 1
+ break
+ }
+ set err [catch {error_check_bad \
+ $mypid:curs_get($k,$data,$fnum,$flag) \
+ [string length $full] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ set key [lindex [lindex $full 0] 0]
+ set rec [pad_data $method [lindex [lindex $full 0] 1]]
+ set err [catch {error_check_good \
+ $mypid:dbget_$fnum:key $key $k} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ set err [catch {error_check_good \
+ $mypid:dbget_$fnum:data($k) $rec $data} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ set f [lreplace $f $fnum $fnum 1]
+ incr ndx
+ set r [string range $r $ndx end]
+ }
+ }
+ }
+ if { $aborted == 0 } {
+ for {set f 0 } {$f < $nfiles} {incr f} {
+ set err [catch {error_check_good $mypid:cursor_close \
+ [$cursors($f) close] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ }
+ set err [catch {error_check_good $mypid:commit [$txn commit] \
+ 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ }
+}
+
+# Close files
+for { set i 0 } { $i < $nfiles} { incr i } {
+ set r [$db($i) close]
+ set err [catch {error_check_good $mypid:db_close:$i $r 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+}
+
+# Close tm and environment
+$dbenv close
+
+puts "[timestamp] [pid] Complete"
+flush stdout
+
+filecheck $file 0
diff --git a/bdb/test/test.tcl b/bdb/test/test.tcl
new file mode 100644
index 00000000000..7678f2fcbfb
--- /dev/null
+++ b/bdb/test/test.tcl
@@ -0,0 +1,1297 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test.tcl,v 11.114 2001/01/09 21:28:52 sue Exp $
+
+source ./include.tcl
+
+# Load DB's TCL API.
+load $tcllib
+
+if { [file exists $testdir] != 1 } {
+ file mkdir $testdir
+}
+
+global __debug_print
+global __debug_on
+global util_path
+
+#
+# Test if utilities work to figure out the path. Most systems
+# use ., but QNX has a problem with execvp of shell scripts which
+# causes it to break.
+#
+set stat [catch {exec ./db_printlog -?} ret]
+if { [string first "exec format error" $ret] != -1 } {
+ set util_path ./.libs
+} else {
+ set util_path .
+}
+set __debug_print 0
+set __debug_on 0
+
+# This is where the test numbering and parameters now live.
+source $test_path/testparams.tcl
+
+for { set i 1 } { $i <= $deadtests } {incr i} {
+ set name [format "dead%03d.tcl" $i]
+ source $test_path/$name
+}
+for { set i 1 } { $i <= $envtests } {incr i} {
+ set name [format "env%03d.tcl" $i]
+ source $test_path/$name
+}
+for { set i 1 } { $i <= $recdtests } {incr i} {
+ set name [format "recd%03d.tcl" $i]
+ source $test_path/$name
+}
+for { set i 1 } { $i <= $rpctests } {incr i} {
+ set name [format "rpc%03d.tcl" $i]
+ source $test_path/$name
+}
+for { set i 1 } { $i <= $rsrctests } {incr i} {
+ set name [format "rsrc%03d.tcl" $i]
+ source $test_path/$name
+}
+for { set i 1 } { $i <= $runtests } {incr i} {
+ set name [format "test%03d.tcl" $i]
+ # Test numbering may be sparse.
+ if { [file exists $test_path/$name] == 1 } {
+ source $test_path/$name
+ }
+}
+for { set i 1 } { $i <= $subdbtests } {incr i} {
+ set name [format "sdb%03d.tcl" $i]
+ source $test_path/$name
+}
+
+source $test_path/archive.tcl
+source $test_path/byteorder.tcl
+source $test_path/dbm.tcl
+source $test_path/hsearch.tcl
+source $test_path/join.tcl
+source $test_path/lock001.tcl
+source $test_path/lock002.tcl
+source $test_path/lock003.tcl
+source $test_path/log.tcl
+source $test_path/logtrack.tcl
+source $test_path/mpool.tcl
+source $test_path/mutex.tcl
+source $test_path/ndbm.tcl
+source $test_path/sdbtest001.tcl
+source $test_path/sdbtest002.tcl
+source $test_path/sdbutils.tcl
+source $test_path/testutils.tcl
+source $test_path/txn.tcl
+source $test_path/upgrade.tcl
+
+set dict $test_path/wordlist
+set alphabet "abcdefghijklmnopqrstuvwxyz"
+
+# Random number seed.
+global rand_init
+set rand_init 1013
+
+# Default record length and padding character for
+# fixed record length access method(s)
+set fixed_len 20
+set fixed_pad 0
+
+set recd_debug 0
+set log_log_record_types 0
+set ohandles {}
+
+# Set up any OS-specific values
+global tcl_platform
+set is_windows_test [is_substr $tcl_platform(os) "Win"]
+set is_hp_test [is_substr $tcl_platform(os) "HP-UX"]
+set is_qnx_test [is_substr $tcl_platform(os) "QNX"]
+
+# From here on out, test.tcl contains the procs that are used to
+# run all or part of the test suite.
+
+proc run_am { } {
+ global runtests
+ source ./include.tcl
+
+ fileremove -f ALL.OUT
+
+ # Access method tests.
+ #
+ # XXX
+ # Broken up into separate tclsh instantiations so we don't require
+ # so much memory.
+ foreach i "btree rbtree hash queue queueext recno frecno rrecno" {
+ puts "Running $i tests"
+ for { set j 1 } { $j <= $runtests } {incr j} {
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; \
+ run_method -$i $j $j" >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL: [format "test%03d" $j] $i"
+ close $o
+ }
+ }
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; \
+ subdb -$i 0 1" >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL: subdb -$i test"
+ close $o
+ }
+ }
+}
+
+proc run_std { args } {
+ global runtests
+ global subdbtests
+ source ./include.tcl
+
+ set exflgs [eval extractflags $args]
+ set args [lindex $exflgs 0]
+ set flags [lindex $exflgs 1]
+
+ set display 1
+ set run 1
+ set am_only 0
+ set std_only 1
+ set rflags {--}
+ foreach f $flags {
+ switch $f {
+ A {
+ set std_only 0
+ }
+ m {
+ set am_only 1
+ puts "run_std: access method tests only."
+ }
+ n {
+ set display 1
+ set run 0
+ set rflags [linsert $rflags 0 "-n"]
+ }
+ }
+ }
+
+ if { $std_only == 1 } {
+ fileremove -f ALL.OUT
+
+ set o [open ALL.OUT a]
+ if { $run == 1 } {
+ puts -nonewline "Test suite run started at: "
+ puts [clock format [clock seconds] -format "%H:%M %D"]
+ puts [berkdb version -string]
+
+ puts -nonewline $o "Test suite run started at: "
+ puts $o [clock format [clock seconds] -format "%H:%M %D"]
+ puts $o [berkdb version -string]
+ }
+ close $o
+ }
+
+ set test_list {
+ {"environment" "env"}
+ {"archive" "archive"}
+ {"locking" "lock"}
+ {"logging" "log"}
+ {"memory pool" "mpool"}
+ {"mutex" "mutex"}
+ {"transaction" "txn"}
+ {"deadlock detection" "dead"}
+ {"subdatabase" "subdb_gen"}
+ {"byte-order" "byte"}
+ {"recno backing file" "rsrc"}
+ {"DBM interface" "dbm"}
+ {"NDBM interface" "ndbm"}
+ {"Hsearch interface" "hsearch"}
+ }
+
+ if { $am_only == 0 } {
+
+ foreach pair $test_list {
+ set msg [lindex $pair 0]
+ set cmd [lindex $pair 1]
+ puts "Running $msg tests"
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; r $rflags $cmd" \
+ >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL: $cmd test"
+ close $o
+ }
+ }
+
+ # Run recovery tests.
+ #
+ # XXX These too are broken into separate tclsh instantiations
+ # so we don't require so much memory, but I think it's cleaner
+ # and more useful to do it down inside proc r than here,
+ # since "r recd" gets done a lot and needs to work.
+ puts "Running recovery tests"
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; \
+ r $rflags recd" >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL: recd test"
+ close $o
+ }
+
+ # Run join test
+ #
+ # XXX
+ # Broken up into separate tclsh instantiations so we don't
+ # require so much memory.
+ puts "Running join test"
+ foreach i "join1 join2 join3 join4 join5 join6" {
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; r $rflags $i" \
+ >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL: $i test"
+ close $o
+ }
+ }
+ }
+
+ # Access method tests.
+ #
+ # XXX
+ # Broken up into separate tclsh instantiations so we don't require
+ # so much memory.
+ foreach i "btree rbtree hash queue queueext recno frecno rrecno" {
+ puts "Running $i tests"
+ for { set j 1 } { $j <= $runtests } {incr j} {
+ if { $run == 0 } {
+ set o [open ALL.OUT a]
+ run_method -$i $j $j $display $run $o
+ close $o
+ }
+ if { $run } {
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; \
+ run_method -$i $j $j $display $run" \
+ >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o \
+ "FAIL: [format "test%03d" $j] $i"
+ close $o
+ }
+ }
+ }
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; \
+ subdb -$i $display $run" >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL: subdb -$i test"
+ close $o
+ }
+ }
+
+ # If not actually running, no need to check for failure.
+ # If running in the context of the larger 'run_all' we don't
+ # check for failure here either.
+ if { $run == 0 || $std_only == 0 } {
+ return
+ }
+
+ set failed 0
+ set o [open ALL.OUT r]
+ while { [gets $o line] >= 0 } {
+ if { [regexp {^FAIL} $line] != 0 } {
+ set failed 1
+ }
+ }
+ close $o
+ set o [open ALL.OUT a]
+ if { $failed == 0 } {
+ puts "Regression Tests Succeeded"
+ puts $o "Regression Tests Succeeded"
+ } else {
+ puts "Regression Tests Failed; see ALL.OUT for log"
+ puts $o "Regression Tests Failed"
+ }
+
+ puts -nonewline "Test suite run completed at: "
+ puts [clock format [clock seconds] -format "%H:%M %D"]
+ puts -nonewline $o "Test suite run completed at: "
+ puts $o [clock format [clock seconds] -format "%H:%M %D"]
+ close $o
+}
+
+proc r { args } {
+ global envtests
+ global recdtests
+ global subdbtests
+ global deadtests
+ source ./include.tcl
+
+ set exflgs [eval extractflags $args]
+ set args [lindex $exflgs 0]
+ set flags [lindex $exflgs 1]
+
+ set display 1
+ set run 1
+ set saveflags "--"
+ foreach f $flags {
+ switch $f {
+ n {
+ set display 1
+ set run 0
+ set saveflags "-n $saveflags"
+ }
+ }
+ }
+
+ if {[catch {
+ set l [ lindex $args 0 ]
+ switch $l {
+ archive {
+ if { $display } {
+ puts "eval archive [lrange $args 1 end]"
+ }
+ if { $run } {
+ check_handles
+ eval archive [lrange $args 1 end]
+ }
+ }
+ byte {
+ foreach method \
+ "-hash -btree -recno -queue -queueext -frecno" {
+ if { $display } {
+ puts "byteorder $method"
+ }
+ if { $run } {
+ check_handles
+ byteorder $method
+ }
+ }
+ }
+ dbm {
+ if { $display } {
+ puts "dbm"
+ }
+ if { $run } {
+ check_handles
+ dbm
+ }
+ }
+ dead {
+ for { set i 1 } { $i <= $deadtests } \
+ { incr i } {
+ if { $display } {
+ puts "eval dead00$i\
+ [lrange $args 1 end]"
+ }
+ if { $run } {
+ check_handles
+ eval dead00$i\
+ [lrange $args 1 end]
+ }
+ }
+ }
+ env {
+ for { set i 1 } { $i <= $envtests } {incr i} {
+ if { $display } {
+ puts "eval env00$i"
+ }
+ if { $run } {
+ check_handles
+ eval env00$i
+ }
+ }
+ }
+ hsearch {
+ if { $display } { puts "hsearch" }
+ if { $run } {
+ check_handles
+ hsearch
+ }
+ }
+ join {
+ eval r $saveflags join1
+ eval r $saveflags join2
+ eval r $saveflags join3
+ eval r $saveflags join4
+ eval r $saveflags join5
+ eval r $saveflags join6
+ }
+ join1 {
+ if { $display } { puts jointest }
+ if { $run } {
+ check_handles
+ jointest
+ }
+ }
+ joinbench {
+ puts "[timestamp]"
+ eval r $saveflags join1
+ eval r $saveflags join2
+ puts "[timestamp]"
+ }
+ join2 {
+ if { $display } { puts "jointest 512" }
+ if { $run } {
+ check_handles
+ jointest 512
+ }
+ }
+ join3 {
+ if { $display } {
+ puts "jointest 8192 0 -join_item"
+ }
+ if { $run } {
+ check_handles
+ jointest 8192 0 -join_item
+ }
+ }
+ join4 {
+ if { $display } { puts "jointest 8192 2" }
+ if { $run } {
+ check_handles
+ jointest 8192 2
+ }
+ }
+ join5 {
+ if { $display } { puts "jointest 8192 3" }
+ if { $run } {
+ check_handles
+ jointest 8192 3
+ }
+ }
+ join6 {
+ if { $display } { puts "jointest 512 3" }
+ if { $run } {
+ check_handles
+ jointest 512 3
+ }
+ }
+ lock {
+ if { $display } {
+ puts \
+ "eval locktest [lrange $args 1 end]"
+ }
+ if { $run } {
+ check_handles
+ eval locktest [lrange $args 1 end]
+ }
+ }
+ log {
+ if { $display } {
+ puts "eval logtest [lrange $args 1 end]"
+ }
+ if { $run } {
+ check_handles
+ eval logtest [lrange $args 1 end]
+ }
+ }
+ mpool {
+ eval r $saveflags mpool1
+ eval r $saveflags mpool2
+ eval r $saveflags mpool3
+ }
+ mpool1 {
+ if { $display } {
+ puts "eval mpool [lrange $args 1 end]"
+ }
+ if { $run } {
+ check_handles
+ eval mpool [lrange $args 1 end]
+ }
+ }
+ mpool2 {
+ if { $display } {
+ puts "eval mpool\
+ -mem system [lrange $args 1 end]"
+ }
+ if { $run } {
+ check_handles
+ eval mpool\
+ -mem system [lrange $args 1 end]
+ }
+ }
+ mpool3 {
+ if { $display } {
+ puts "eval mpool\
+ -mem private [lrange $args 1 end]"
+ }
+ if { $run } {
+ eval mpool\
+ -mem private [lrange $args 1 end]
+ }
+ }
+ mutex {
+ if { $display } {
+ puts "eval mutex [lrange $args 1 end]"
+ }
+ if { $run } {
+ check_handles
+ eval mutex [lrange $args 1 end]
+ }
+ }
+ ndbm {
+ if { $display } { puts ndbm }
+ if { $run } {
+ check_handles
+ ndbm
+ }
+ }
+ recd {
+ if { $display } { puts run_recds }
+ if { $run } {
+ check_handles
+ run_recds
+ }
+ }
+ rpc {
+ # RPC must be run as one unit due to server,
+ # so just print "r rpc" in the display case.
+ if { $display } { puts "r rpc" }
+ if { $run } {
+ check_handles
+ eval rpc001
+ check_handles
+ eval rpc002
+ if { [catch {run_rpcmethod -txn} ret]\
+ != 0 } {
+ puts $ret
+ }
+ foreach method \
+ "hash queue queueext recno frecno rrecno rbtree btree" {
+ if { [catch {run_rpcmethod \
+ -$method} ret] != 0 } {
+ puts $ret
+ }
+ }
+ }
+ }
+ rsrc {
+ if { $display } { puts "rsrc001\nrsrc002" }
+ if { $run } {
+ check_handles
+ rsrc001
+ check_handles
+ rsrc002
+ }
+ }
+ subdb {
+ eval r $saveflags subdb_gen
+
+ foreach method \
+ "btree rbtree hash queue queueext recno frecno rrecno" {
+ check_handles
+ eval subdb -$method $display $run
+ }
+ }
+ subdb_gen {
+ if { $display } {
+ puts "subdbtest001 ; verify_dir"
+ puts "subdbtest002 ; verify_dir"
+ }
+ if { $run } {
+ check_handles
+ eval subdbtest001
+ verify_dir
+ check_handles
+ eval subdbtest002
+ verify_dir
+ }
+ }
+ txn {
+ if { $display } {
+ puts "txntest [lrange $args 1 end]"
+ }
+ if { $run } {
+ check_handles
+ eval txntest [lrange $args 1 end]
+ }
+ }
+
+ btree -
+ rbtree -
+ hash -
+ queue -
+ queueext -
+ recno -
+ frecno -
+ rrecno {
+ eval run_method [lindex $args 0] \
+ 1 0 $display $run [lrange $args 1 end]
+ }
+
+ default {
+ error \
+ "FAIL:[timestamp] r: $args: unknown command"
+ }
+ }
+ flush stdout
+ flush stderr
+ } res] != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp] r: $args: $theError"
+ } else {
+ error $theError;
+ }
+ }
+}
+
+proc run_method { method {start 1} {stop 0} {display 0} {run 1} \
+ { outfile stdout } args } {
+ global __debug_on
+ global __debug_print
+ global parms
+ global runtests
+ source ./include.tcl
+
+ if { $stop == 0 } {
+ set stop $runtests
+ }
+ if { $run == 1 } {
+ puts $outfile "run_method: $method $start $stop $args"
+ }
+
+ if {[catch {
+ for { set i $start } { $i <= $stop } {incr i} {
+ set name [format "test%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts "[format Test%03d $i] disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ if { $display } {
+ puts -nonewline $outfile "eval $name $method"
+ puts -nonewline $outfile " $parms($name) $args"
+ puts $outfile " ; verify_dir $testdir \"\" 1"
+ }
+ if { $run } {
+ check_handles $outfile
+ puts $outfile "[timestamp]"
+ eval $name $method $parms($name) $args
+ if { $__debug_print != 0 } {
+ puts $outfile ""
+ }
+ # verify all databases the test leaves behind
+ verify_dir $testdir "" 1
+ if { $__debug_on != 0 } {
+ debug
+ }
+ }
+ flush stdout
+ flush stderr
+ }
+ } res] != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_method: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+}
+
+proc run_rpcmethod { type {start 1} {stop 0} {largs ""} } {
+ global __debug_on
+ global __debug_print
+ global parms
+ global runtests
+ source ./include.tcl
+
+ if { $stop == 0 } {
+ set stop $runtests
+ }
+ puts "run_rpcmethod: $type $start $stop $largs"
+
+ set save_largs $largs
+ if { [string compare $rpc_server "localhost"] == 0 } {
+ set dpid [exec $util_path/berkeley_db_svc -h $rpc_testdir &]
+ } else {
+ set dpid [exec rsh $rpc_server $rpc_path/berkeley_db_svc \
+ -h $rpc_testdir &]
+ }
+ puts "\tRun_rpcmethod.a: starting server, pid $dpid"
+ tclsleep 2
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+
+ set home [file tail $rpc_testdir]
+
+ set txn ""
+ set use_txn 0
+ if { [string first "txn" $type] != -1 } {
+ set use_txn 1
+ }
+ if { $use_txn == 1 } {
+ if { $start == 1 } {
+ set ntxns 32
+ } else {
+ set ntxns $start
+ }
+ set i 1
+ check_handles
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ set env [eval {berkdb env -create -mode 0644 -home $home \
+ -server $rpc_server -client_timeout 10000} -txn]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ set stat [catch {eval txn001_suba $ntxns $env} res]
+ if { $stat == 0 } {
+ set stat [catch {eval txn001_subb $ntxns $env} res]
+ }
+ error_check_good envclose [$env close] 0
+ } else {
+ set stat [catch {
+ for { set i $start } { $i <= $stop } {incr i} {
+ check_handles
+ set name [format "test%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts "[format Test%03d $i] disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ #
+ # Set server cachesize to 1Mb. Otherwise some
+ # tests won't fit (like test084 -btree).
+ #
+ set env [eval {berkdb env -create -mode 0644 \
+ -home $home -server $rpc_server \
+ -client_timeout 10000 \
+ -cachesize {0 1048576 1} }]
+ error_check_good env_open \
+ [is_valid_env $env] TRUE
+ append largs " -env $env "
+
+ puts "[timestamp]"
+ eval $name $type $parms($name) $largs
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug
+ }
+ flush stdout
+ flush stderr
+ set largs $save_largs
+ error_check_good envclose [$env close] 0
+ }
+ } res]
+ }
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ exec $KILL $dpid
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_rpcmethod: $type $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ exec $KILL $dpid
+
+}
+
+proc run_rpcnoserver { type {start 1} {stop 0} {largs ""} } {
+ global __debug_on
+ global __debug_print
+ global parms
+ global runtests
+ source ./include.tcl
+
+ if { $stop == 0 } {
+ set stop $runtests
+ }
+ puts "run_rpcnoserver: $type $start $stop $largs"
+
+ set save_largs $largs
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ set home [file tail $rpc_testdir]
+
+ set txn ""
+ set use_txn 0
+ if { [string first "txn" $type] != -1 } {
+ set use_txn 1
+ }
+ if { $use_txn == 1 } {
+ if { $start == 1 } {
+ set ntxns 32
+ } else {
+ set ntxns $start
+ }
+ set i 1
+ check_handles
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ set env [eval {berkdb env -create -mode 0644 -home $home \
+ -server $rpc_server -client_timeout 10000} -txn]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ set stat [catch {eval txn001_suba $ntxns $env} res]
+ if { $stat == 0 } {
+ set stat [catch {eval txn001_subb $ntxns $env} res]
+ }
+ error_check_good envclose [$env close] 0
+ } else {
+ set stat [catch {
+ for { set i $start } { $i <= $stop } {incr i} {
+ check_handles
+ set name [format "test%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts "[format Test%03d $i] disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ #
+ # Set server cachesize to 1Mb. Otherwise some
+ # tests won't fit (like test084 -btree).
+ #
+ set env [eval {berkdb env -create -mode 0644 \
+ -home $home -server $rpc_server \
+ -client_timeout 10000 \
+ -cachesize {0 1048576 1} }]
+ error_check_good env_open \
+ [is_valid_env $env] TRUE
+ append largs " -env $env "
+
+ puts "[timestamp]"
+ eval $name $type $parms($name) $largs
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug
+ }
+ flush stdout
+ flush stderr
+ set largs $save_largs
+ error_check_good envclose [$env close] 0
+ }
+ } res]
+ }
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_rpcnoserver: $type $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+
+}
+
+#
+# Run method tests in one environment. (As opposed to run_envmethod1
+# which runs each test in its own, new environment.)
+#
+proc run_envmethod { type {start 1} {stop 0} {largs ""} } {
+ global __debug_on
+ global __debug_print
+ global parms
+ global runtests
+ source ./include.tcl
+
+ if { $stop == 0 } {
+ set stop $runtests
+ }
+ puts "run_envmethod: $type $start $stop $largs"
+
+ set save_largs $largs
+ env_cleanup $testdir
+ set txn ""
+ set stat [catch {
+ for { set i $start } { $i <= $stop } {incr i} {
+ check_handles
+ set env [eval {berkdb env -create -mode 0644 \
+ -home $testdir}]
+ error_check_good env_open [is_valid_env $env] TRUE
+ append largs " -env $env "
+
+ puts "[timestamp]"
+ set name [format "test%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts "[format Test%03d $i] disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ eval $name $type $parms($name) $largs
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug
+ }
+ flush stdout
+ flush stderr
+ set largs $save_largs
+ error_check_good envclose [$env close] 0
+ error_check_good envremove [berkdb envremove \
+ -home $testdir] 0
+ }
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_envmethod: $type $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+
+}
+
+proc subdb { method display run {outfile stdout} args} {
+ global subdbtests testdir
+ global parms
+
+ for { set i 1 } {$i <= $subdbtests} {incr i} {
+ set name [format "subdb%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts "[format Subdb%03d $i] disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ if { $display } {
+ puts -nonewline $outfile "eval $name $method"
+ puts -nonewline $outfile " $parms($name) $args;"
+ puts $outfile "verify_dir $testdir \"\" 1"
+ }
+ if { $run } {
+ check_handles $outfile
+ eval $name $method $parms($name) $args
+ verify_dir $testdir "" 1
+ }
+ flush stdout
+ flush stderr
+ }
+}
+
+proc run_recd { method {start 1} {stop 0} args } {
+ global __debug_on
+ global __debug_print
+ global parms
+ global recdtests
+ global log_log_record_types
+ source ./include.tcl
+
+ if { $stop == 0 } {
+ set stop $recdtests
+ }
+ puts "run_recd: $method $start $stop $args"
+
+ if {[catch {
+ for { set i $start } { $i <= $stop } {incr i} {
+ check_handles
+ puts "[timestamp]"
+ set name [format "recd%03d" $i]
+ # By redirecting stdout to stdout, we make exec
+ # print output rather than simply returning it.
+ exec $tclsh_path << "source $test_path/test.tcl; \
+ set log_log_record_types $log_log_record_types; \
+ eval $name $method" >@ stdout
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug
+ }
+ flush stdout
+ flush stderr
+ }
+ } res] != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_recd: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+}
+
+proc run_recds { } {
+ global log_log_record_types
+
+ set log_log_record_types 1
+ logtrack_init
+ foreach method \
+ "btree rbtree hash queue queueext recno frecno rrecno" {
+ check_handles
+ if { [catch \
+ {run_recd -$method} ret ] != 0 } {
+ puts $ret
+ }
+ }
+ logtrack_summary
+ set log_log_record_types 0
+}
+
+proc run_all { args } {
+ global runtests
+ global subdbtests
+ source ./include.tcl
+
+ fileremove -f ALL.OUT
+
+ set exflgs [eval extractflags $args]
+ set flags [lindex $exflgs 1]
+ set display 1
+ set run 1
+ set am_only 0
+ set rflags {--}
+ foreach f $flags {
+ switch $f {
+ m {
+ set am_only 1
+ }
+ n {
+ set display 1
+ set run 0
+ set rflags [linsert $rflags 0 "-n"]
+ }
+ }
+ }
+
+ set o [open ALL.OUT a]
+ if { $run == 1 } {
+ puts -nonewline "Test suite run started at: "
+ puts [clock format [clock seconds] -format "%H:%M %D"]
+ puts [berkdb version -string]
+
+ puts -nonewline $o "Test suite run started at: "
+ puts $o [clock format [clock seconds] -format "%H:%M %D"]
+ puts $o [berkdb version -string]
+ }
+ close $o
+ #
+ # First run standard tests. Send in a -A to let run_std know
+ # that it is part of the "run_all" run, so that it doesn't
+ # print out start/end times.
+ #
+ lappend args -A
+ eval {run_std} $args
+
+ set test_pagesizes { 512 8192 65536 }
+ set args [lindex $exflgs 0]
+ set save_args $args
+
+ foreach pgsz $test_pagesizes {
+ set args $save_args
+ append args " -pagesize $pgsz"
+ if { $am_only == 0 } {
+ # Run recovery tests.
+ #
+ # XXX These too are broken into separate tclsh
+ # instantiations so we don't require so much
+ # memory, but I think it's cleaner
+ # and more useful to do it down inside proc r than here,
+ # since "r recd" gets done a lot and needs to work.
+ puts "Running recovery tests with pagesize $pgsz"
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; \
+ r $rflags recd $args" >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL: recd test"
+ close $o
+ }
+ }
+
+ # Access method tests.
+ #
+ # XXX
+ # Broken up into separate tclsh instantiations so
+ # we don't require so much memory.
+ foreach i \
+ "btree rbtree hash queue queueext recno frecno rrecno" {
+ puts "Running $i tests with pagesize $pgsz"
+ for { set j 1 } { $j <= $runtests } {incr j} {
+ if { $run == 0 } {
+ set o [open ALL.OUT a]
+ run_method -$i $j $j $display \
+ $run $o $args
+ close $o
+ }
+ if { $run } {
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; \
+ run_method -$i $j $j $display \
+ $run stdout $args" \
+ >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o \
+ "FAIL: [format \
+ "test%03d" $j] $i"
+ close $o
+ }
+ }
+ }
+
+ #
+ # Run subdb tests with varying pagesizes too.
+ #
+ if { $run == 0 } {
+ set o [open ALL.OUT a]
+ subdb -$i $display $run $o $args
+ close $o
+ }
+ if { $run == 1 } {
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; \
+ subdb -$i $display $run stdout $args" \
+ >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL: subdb -$i test"
+ close $o
+ }
+ }
+ }
+ }
+ set args $save_args
+ #
+ # Run access method tests at default page size in one env.
+ #
+ foreach i "btree rbtree hash queue queueext recno frecno rrecno" {
+ puts "Running $i tests in an env"
+ if { $run == 0 } {
+ set o [open ALL.OUT a]
+ run_envmethod1 -$i 1 $runtests $display \
+ $run $o $args
+ close $o
+ }
+ if { $run } {
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; \
+ run_envmethod1 -$i 1 $runtests $display \
+ $run stdout $args" \
+ >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o \
+ "FAIL: run_envmethod1 $i"
+ close $o
+ }
+ }
+ }
+
+ # If not actually running, no need to check for failure.
+ if { $run == 0 } {
+ return
+ }
+
+ set failed 0
+ set o [open ALL.OUT r]
+ while { [gets $o line] >= 0 } {
+ if { [regexp {^FAIL} $line] != 0 } {
+ set failed 1
+ }
+ }
+ close $o
+ set o [open ALL.OUT a]
+ if { $failed == 0 } {
+ puts "Regression Tests Succeeded"
+ puts $o "Regression Tests Succeeded"
+ } else {
+ puts "Regression Tests Failed; see ALL.OUT for log"
+ puts $o "Regression Tests Failed"
+ }
+
+ puts -nonewline "Test suite run completed at: "
+ puts [clock format [clock seconds] -format "%H:%M %D"]
+ puts -nonewline $o "Test suite run completed at: "
+ puts $o [clock format [clock seconds] -format "%H:%M %D"]
+ close $o
+}
+
+#
+# Run method tests in one environment. (As opposed to run_envmethod
+# which runs each test in its own, new environment.)
+#
+proc run_envmethod1 { method {start 1} {stop 0} {display 0} {run 1} \
+ { outfile stdout } args } {
+ global __debug_on
+ global __debug_print
+ global parms
+ global runtests
+ source ./include.tcl
+
+ if { $stop == 0 } {
+ set stop $runtests
+ }
+ if { $run == 1 } {
+ puts "run_envmethod1: $method $start $stop $args"
+ }
+
+ set txn ""
+ if { $run == 1 } {
+ check_handles
+ env_cleanup $testdir
+ error_check_good envremove [berkdb envremove -home $testdir] 0
+ set env [eval {berkdb env -create -mode 0644 -home $testdir}]
+ error_check_good env_open [is_valid_env $env] TRUE
+ append largs " -env $env "
+ }
+
+ set stat [catch {
+ for { set i $start } { $i <= $stop } {incr i} {
+ set name [format "test%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts "[format Test%03d $i] disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ if { $display } {
+ puts -nonewline $outfile "eval $name $method"
+ puts -nonewline $outfile " $parms($name) $args"
+ puts $outfile " ; verify_dir $testdir \"\" 1"
+ }
+ if { $run } {
+ check_handles $outfile
+ puts $outfile "[timestamp]"
+ eval $name $method $parms($name) $largs
+ if { $__debug_print != 0 } {
+ puts $outfile ""
+ }
+ if { $__debug_on != 0 } {
+ debug
+ }
+ }
+ flush stdout
+ flush stderr
+ }
+ } res]
+ if { $run == 1 } {
+ error_check_good envclose [$env close] 0
+ }
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_envmethod1: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+
+}
diff --git a/bdb/test/test001.tcl b/bdb/test/test001.tcl
new file mode 100644
index 00000000000..fa8e112d100
--- /dev/null
+++ b/bdb/test/test001.tcl
@@ -0,0 +1,157 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test001.tcl,v 11.17 2000/12/06 16:08:05 bostic Exp $
+#
+# DB Test 1 {access method}
+# Use the first 10,000 entries from the dictionary.
+# Insert each with self as key and data; retrieve each.
+# After all are entered, retrieve all; compare output to original.
+# Close file, reopen, do retrieve and re-verify.
+proc test001 { method {nentries 10000} {start 0} {tnum "01"} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test0$tnum: $method ($args) $nentries equal key/data pairs"
+ if { $start != 0 } {
+ puts "\tStarting at $start"
+ }
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -truncate -mode 0644} $args $omethod $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ set nentries [expr $nentries + $start]
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test001_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc test001.check
+ }
+ puts "\tTest0$tnum.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ set count $start
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ set str [reverse $str]
+ }
+ set ret [eval \
+ {$db put} $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+
+ # Test DB_GET_BOTH for success
+ set ret [$db get -get_both $key [pad_data $method $str]]
+ error_check_good \
+ getboth $ret [list [list $key [pad_data $method $str]]]
+
+ # Test DB_GET_BOTH for failure
+ set ret [$db get -get_both $key [pad_data $method BAD$str]]
+ error_check_good getbothBAD [llength $ret] 0
+
+ incr count
+ if { [expr $count + 1] == 0 } {
+ incr count
+ }
+ }
+ close $did
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.b: dump file"
+ dump_file $db $txn $t1 $checkfunc
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i [expr $start + 1]} {$i <= $nentries} {set i [incr i]} {
+ if { $i == 0 } {
+ incr i
+ }
+ puts $oid $i
+ }
+ close $oid
+ } else {
+ set q q
+ filehead $nentries $dict $t2
+ }
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ filesort $t1 $t3
+
+ error_check_good Test0$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tTest0$tnum.c: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ open_and_dump_file $testfile $env $txn $t1 $checkfunc \
+ dump_file_direction "-first" "-next"
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Test0$tnum:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tTest0$tnum.d: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $txn $t1 $checkfunc \
+ dump_file_direction "-last" "-prev"
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Test0$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+}
+
+# Check function for test001; keys and data are identical
+proc test001.check { key data } {
+ error_check_good "key/data mismatch" $data [reverse $key]
+}
+
+proc test001_recno.check { key data } {
+ global dict
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
diff --git a/bdb/test/test002.tcl b/bdb/test/test002.tcl
new file mode 100644
index 00000000000..882240b77bb
--- /dev/null
+++ b/bdb/test/test002.tcl
@@ -0,0 +1,128 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test002.tcl,v 11.13 2000/08/25 14:21:53 sue Exp $
+#
+# DB Test 2 {access method}
+# Use the first 10,000 entries from the dictionary.
+# Insert each with self as key and a fixed, medium length data string;
+# retrieve each. After all are entered, retrieve all; compare output
+# to original. Close file, reopen, do retrieve and re-verify.
+
+set datastr abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz
+
+proc test002 { method {nentries 10000} args } {
+ global datastr
+ global pad_datastr
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test002: $method ($args) $nentries key <fixed data> pairs"
+
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test002.db
+ set env NULL
+ } else {
+ set testfile test002.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ # Create the database and open the dictionary
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -truncate -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+
+ if { [is_record_based $method] == 1 } {
+ append gflags "-recno"
+ }
+ set pad_datastr [pad_data $method $datastr]
+ puts "\tTest002.a: put/get loop"
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ set ret [eval {$db put} $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $gflags {$key}]
+
+ error_check_good get $ret [list [list $key [pad_data $method $datastr]]]
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest002.b: dump file"
+ dump_file $db $txn $t1 test002.check
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ }
+ filesort $t1 $t3
+
+ error_check_good Test002:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again.
+ puts "\tTest002.c: close, open, and dump file"
+ open_and_dump_file $testfile $env $txn $t1 test002.check \
+ dump_file_direction "-first" "-next"
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+ error_check_good Test002:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again in reverse direction.
+ puts "\tTest002.d: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $txn $t1 test002.check \
+ dump_file_direction "-last" "-prev"
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+ error_check_good Test002:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+}
+
+# Check function for test002; data should be fixed are identical
+proc test002.check { key data } {
+ global pad_datastr
+ error_check_good "data mismatch for key $key" $data $pad_datastr
+}
diff --git a/bdb/test/test003.tcl b/bdb/test/test003.tcl
new file mode 100644
index 00000000000..013af2d419c
--- /dev/null
+++ b/bdb/test/test003.tcl
@@ -0,0 +1,177 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test003.tcl,v 11.18 2000/08/25 14:21:54 sue Exp $
+#
+# DB Test 3 {access method}
+# Take the source files and dbtest executable and enter their names as the
+# key with their contents as data. After all are entered, retrieve all;
+# compare output to original. Close file, reopen, do retrieve and re-verify.
+proc test003 { method args} {
+ global names
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if {[is_fixed_length $method] == 1} {
+ puts "Test003 skipping for method $method"
+ return
+ }
+ puts "Test003: $method ($args) filename=key filecontents=data pairs"
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test003.db
+ set env NULL
+ } else {
+ set testfile test003.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -truncate -mode 0644} $args $omethod $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set pflags ""
+ set gflags ""
+ set txn ""
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test003_recno.check
+ append gflags "-recno"
+ } else {
+ set checkfunc test003.check
+ }
+
+ # Here is the loop where we put and get each key/data pair
+ set file_list [ glob \
+ { $test_path/../*/*.[ch] } $test_path/*.tcl *.{a,o,lo,exe} \
+ $test_path/file.1 ]
+
+ puts "\tTest003.a: put/get loop"
+ set count 0
+ foreach f $file_list {
+ if { [string compare [file type $f] "file"] != 0 } {
+ continue
+ }
+
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set names([expr $count + 1]) $f
+ } else {
+ set key $f
+ }
+
+ # Should really catch errors
+ set fid [open $f r]
+ fconfigure $fid -translation binary
+ set data [read $fid]
+ close $fid
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $data]}]
+ error_check_good put $ret 0
+
+ # Should really catch errors
+ set fid [open $t4 w]
+ fconfigure $fid -translation binary
+ if [catch {eval {$db get} $gflags {$key}} data] {
+ puts -nonewline $fid $data
+ } else {
+ # Data looks like {{key data}}
+ set key [lindex [lindex $data 0] 0]
+ set data [lindex [lindex $data 0] 1]
+ puts -nonewline $fid [pad_data $method $data]
+ }
+ close $fid
+
+ error_check_good \
+ Test003:diff($f,$t4) [filecmp $f $t4] 0
+
+ incr count
+ }
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest003.b: dump file"
+ dump_bin_file $db $txn $t1 $checkfunc
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the entries in the
+ # current directory
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $count} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set oid [open $t2.tmp w]
+ foreach f $file_list {
+ if { [string compare [file type $f] "file"] != 0 } {
+ continue
+ }
+ puts $oid $f
+ }
+ close $oid
+ filesort $t2.tmp $t2
+ fileremove $t2.tmp
+ filesort $t1 $t3
+ }
+
+ error_check_good \
+ Test003:diff($t3,$t2) [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again.
+ puts "\tTest003.c: close, open, and dump file"
+ open_and_dump_file $testfile $env $txn $t1 $checkfunc \
+ dump_bin_file_direction "-first" "-next"
+
+ if { [is_record_based $method] == 1 } {
+ filesort $t1 $t3 -n
+ }
+
+ error_check_good \
+ Test003:diff($t3,$t2) [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again in reverse direction.
+ puts "\tTest003.d: close, open, and dump file in reverse direction"
+
+ open_and_dump_file $testfile $env $txn $t1 $checkfunc \
+ dump_bin_file_direction "-last" "-prev"
+
+ if { [is_record_based $method] == 1 } {
+ filesort $t1 $t3 -n
+ }
+
+ error_check_good \
+ Test003:diff($t3,$t2) [filecmp $t3 $t2] 0
+}
+
+# Check function for test003; key should be file name; data should be contents
+proc test003.check { binfile tmpfile } {
+ source ./include.tcl
+
+ error_check_good Test003:datamismatch($binfile,$tmpfile) \
+ [filecmp $binfile $tmpfile] 0
+}
+proc test003_recno.check { binfile tmpfile } {
+ global names
+ source ./include.tcl
+
+ set fname $names($binfile)
+ error_check_good key"$binfile"_exists [info exists names($binfile)] 1
+ error_check_good Test003:datamismatch($fname,$tmpfile) \
+ [filecmp $fname $tmpfile] 0
+}
diff --git a/bdb/test/test004.tcl b/bdb/test/test004.tcl
new file mode 100644
index 00000000000..0b076d6cfb7
--- /dev/null
+++ b/bdb/test/test004.tcl
@@ -0,0 +1,134 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test004.tcl,v 11.15 2000/08/25 14:21:54 sue Exp $
+#
+# DB Test 4 {access method}
+# Check that cursor operations work. Create a database.
+# Read through the database sequentially using cursors and
+# delete each element.
+proc test004 { method {nentries 10000} {reopen 4} {build_only 0} args} {
+ source ./include.tcl
+
+ set do_renumber [is_rrecno $method]
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set tnum test00$reopen
+
+ puts -nonewline "$tnum:\
+ $method ($args) $nentries delete small key; medium data pairs"
+ if {$reopen == 5} {
+ puts "(with close)"
+ } else {
+ puts ""
+ }
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test004.db
+ set env NULL
+ } else {
+ set testfile test004.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ # Create the database and open the dictionary
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+ set db [eval {berkdb_open -create -truncate -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ # Here is the loop where we put and get each key/data pair
+ set kvals ""
+ puts "\tTest00$reopen.a: put/get loop"
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ lappend kvals $str
+ } else {
+ set key $str
+ }
+
+ set datastr [ make_data_str $str ]
+
+ set ret [eval {$db put} $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good "$tnum:put" $ret \
+ [list [list $key [pad_data $method $datastr]]]
+ incr count
+ }
+ close $did
+ if { $build_only == 1 } {
+ return $db
+ }
+ if { $reopen == 5 } {
+ error_check_good db_close [$db close] 0
+
+ set db [eval {berkdb_open} $args {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ }
+ puts "\tTest00$reopen.b: get/delete loop"
+ # Now we will get each key from the DB and compare the results
+ # to the original, then delete it.
+ set outf [open $t1 w]
+ set c [eval {$db cursor} $txn]
+
+ set count 0
+ for {set d [$c get -first] } { [llength $d] != 0 } {
+ set d [$c get -next] } {
+ set k [lindex [lindex $d 0] 0]
+ set d2 [lindex [lindex $d 0] 1]
+ if { [is_record_based $method] == 1 } {
+ set datastr \
+ [make_data_str [lindex $kvals [expr $k - 1]]]
+ } else {
+ set datastr [make_data_str $k]
+ }
+ error_check_good $tnum:$k $d2 [pad_data $method $datastr]
+ puts $outf $k
+ $c del
+ if { [is_record_based $method] == 1 && \
+ $do_renumber == 1 } {
+ set kvals [lreplace $kvals 0 0]
+ }
+ incr count
+ }
+ close $outf
+ error_check_good curs_close [$c close] 0
+
+ # Now compare the keys to see if they match the dictionary
+ if { [is_record_based $method] == 1 } {
+ error_check_good test00$reopen:keys_deleted $count $nentries
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ error_check_good Test00$reopen:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+ }
+
+ error_check_good db_close [$db close] 0
+}
diff --git a/bdb/test/test005.tcl b/bdb/test/test005.tcl
new file mode 100644
index 00000000000..4cb5d88dfe2
--- /dev/null
+++ b/bdb/test/test005.tcl
@@ -0,0 +1,14 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test005.tcl,v 11.4 2000/05/22 12:51:38 bostic Exp $
+#
+# DB Test 5 {access method}
+# Check that cursor operations work. Create a database; close database and
+# reopen it. Then read through the database sequentially using cursors and
+# delete each element.
+proc test005 { method {nentries 10000} args } {
+ eval {test004 $method $nentries 5 0} $args
+}
diff --git a/bdb/test/test006.tcl b/bdb/test/test006.tcl
new file mode 100644
index 00000000000..9364d2a4f60
--- /dev/null
+++ b/bdb/test/test006.tcl
@@ -0,0 +1,118 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test006.tcl,v 11.13 2000/08/25 14:21:54 sue Exp $
+#
+# DB Test 6 {access method}
+# Keyed delete test.
+# Create database.
+# Go through database, deleting all entries by key.
+proc test006 { method {nentries 10000} {reopen 0} {tnum 6} args} {
+ source ./include.tcl
+
+ set do_renumber [is_rrecno $method]
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { $tnum < 10 } {
+ set tname Test00$tnum
+ set dbname test00$tnum
+ } else {
+ set tname Test0$tnum
+ set dbname test0$tnum
+ }
+ puts -nonewline "$tname: $method ($args) "
+ puts -nonewline "$nentries equal small key; medium data pairs"
+ if {$reopen == 1} {
+ puts " (with close)"
+ } else {
+ puts ""
+ }
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/$dbname.db
+ set env NULL
+ } else {
+ set testfile $dbname.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ # Here is the loop where we put and get each key/data pair
+
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -truncate -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1 ]
+ } else {
+ set key $str
+ }
+
+ set datastr [make_data_str $str]
+
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good "$tname: put $datastr got $ret" \
+ $ret [list [list $key [pad_data $method $datastr]]]
+ incr count
+ }
+ close $did
+
+ if { $reopen == 1 } {
+ error_check_good db_close [$db close] 0
+
+ set db [eval {berkdb_open} $args {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ }
+
+ # Now we will get each key from the DB and compare the results
+ # to the original, then delete it.
+ set count 0
+ set did [open $dict]
+ set key 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { $do_renumber == 1 } {
+ set key 1
+ } elseif { [is_record_based $method] == 1 } {
+ incr key
+ } else {
+ set key $str
+ }
+
+ set datastr [make_data_str $str]
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good "$tname: get $datastr got $ret" \
+ $ret [list [list $key [pad_data $method $datastr]]]
+
+ set ret [eval {$db del} $txn {$key}]
+ error_check_good db_del:$key $ret 0
+ incr count
+ }
+ close $did
+
+ error_check_good db_close [$db close] 0
+}
diff --git a/bdb/test/test007.tcl b/bdb/test/test007.tcl
new file mode 100644
index 00000000000..305740f0369
--- /dev/null
+++ b/bdb/test/test007.tcl
@@ -0,0 +1,13 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test007.tcl,v 11.5 2000/05/22 12:51:38 bostic Exp $
+#
+# DB Test 7 {access method}
+# Check that delete operations work. Create a database; close database and
+# reopen it. Then issues delete by key for each entry.
+proc test007 { method {nentries 10000} {tnum 7} args} {
+ eval {test006 $method $nentries 1 $tnum} $args
+}
diff --git a/bdb/test/test008.tcl b/bdb/test/test008.tcl
new file mode 100644
index 00000000000..34144391ccc
--- /dev/null
+++ b/bdb/test/test008.tcl
@@ -0,0 +1,138 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test008.tcl,v 11.17 2000/10/19 17:35:39 sue Exp $
+#
+# DB Test 8 {access method}
+# Take the source files and dbtest executable and enter their names as the
+# key with their contents as data. After all are entered, begin looping
+# through the entries; deleting some pairs and then readding them.
+proc test008 { method {nentries 10000} {reopen 8} {debug 0} args} {
+ source ./include.tcl
+
+ set tnum test00$reopen
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 } {
+ puts "Test00$reopen skipping for method $method"
+ return
+ }
+
+ puts -nonewline "$tnum: $method filename=key filecontents=data pairs"
+ if {$reopen == 9} {
+ puts "(with close)"
+ } else {
+ puts ""
+ }
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/$tnum.db
+ set env NULL
+ } else {
+ set testfile $tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open -create -truncate -mode 0644} \
+ $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ # Here is the loop where we put and get each key/data pair
+ set file_list [glob ../*/*.c ./*.o ./*.lo ./*.exe]
+
+ set count 0
+ puts "\tTest00$reopen.a: Initial put/get loop"
+ foreach f $file_list {
+ set names($count) $f
+ set key $f
+
+ put_file $db $txn $pflags $f
+
+ get_file $db $txn $gflags $f $t4
+
+ error_check_good Test00$reopen:diff($f,$t4) \
+ [filecmp $f $t4] 0
+
+ incr count
+ }
+
+ if {$reopen == 9} {
+ error_check_good db_close [$db close] 0
+
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ }
+
+ # Now we will get step through keys again (by increments) and
+ # delete all the entries, then re-insert them.
+
+ puts "\tTest00$reopen.b: Delete re-add loop"
+ foreach i "1 2 4 8 16" {
+ for {set ndx 0} {$ndx < $count} { incr ndx $i} {
+ set r [eval {$db del} $txn {$names($ndx)}]
+ error_check_good db_del:$names($ndx) $r 0
+ }
+ for {set ndx 0} {$ndx < $count} { incr ndx $i} {
+ put_file $db $txn $pflags $names($ndx)
+ }
+ }
+
+ if {$reopen == 9} {
+ error_check_good db_close [$db close] 0
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ }
+
+ # Now, reopen the file and make sure the key/data pairs look right.
+ puts "\tTest00$reopen.c: Dump contents forward"
+ dump_bin_file $db $txn $t1 test008.check
+
+ set oid [open $t2.tmp w]
+ foreach f $file_list {
+ puts $oid $f
+ }
+ close $oid
+ filesort $t2.tmp $t2
+ fileremove $t2.tmp
+ filesort $t1 $t3
+
+ error_check_good Test00$reopen:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again in reverse direction.
+ puts "\tTest00$reopen.d: Dump contents backward"
+ dump_bin_file_direction $db $txn $t1 test008.check "-last" "-prev"
+
+ filesort $t1 $t3
+
+ error_check_good Test00$reopen:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+ error_check_good close:$db [$db close] 0
+}
+
+proc test008.check { binfile tmpfile } {
+ global tnum
+ source ./include.tcl
+
+ error_check_good diff($binfile,$tmpfile) \
+ [filecmp $binfile $tmpfile] 0
+}
diff --git a/bdb/test/test009.tcl b/bdb/test/test009.tcl
new file mode 100644
index 00000000000..e9c01875f77
--- /dev/null
+++ b/bdb/test/test009.tcl
@@ -0,0 +1,15 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test009.tcl,v 11.4 2000/05/22 12:51:38 bostic Exp $
+#
+# DB Test 9 {access method}
+# Check that we reuse overflow pages. Create database with lots of
+# big key/data pairs. Go through and delete and add keys back
+# randomly. Then close the DB and make sure that we have everything
+# we think we should.
+proc test009 { method {nentries 10000} args} {
+ eval {test008 $method $nentries 9 0} $args
+}
diff --git a/bdb/test/test010.tcl b/bdb/test/test010.tcl
new file mode 100644
index 00000000000..b3aedb2bee9
--- /dev/null
+++ b/bdb/test/test010.tcl
@@ -0,0 +1,126 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test010.tcl,v 11.14 2000/08/25 14:21:54 sue Exp $
+#
+# DB Test 10 {access method}
+# Use the first 10,000 entries from the dictionary.
+# Insert each with self as key and data; add duplicate
+# records for each.
+# After all are entered, retrieve all; verify output.
+# Close file, reopen, do retrieve and re-verify.
+# This does not work for recno
+proc test010 { method {nentries 10000} {ndups 5} {tnum 10} args } {
+ source ./include.tcl
+
+ set omethod $method
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $method"
+ return
+ }
+
+ puts "Test0$tnum: $method ($args) $nentries small dup key/data pairs"
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open \
+ -create -truncate -mode 0644 -dup} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ set dbc [eval {$db cursor} $txn]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set datastr $i:$str
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ }
+
+ # Now retrieve all the keys matching this key
+ set x 1
+ for {set ret [$dbc get "-set" $str]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get "-next"] } {
+ if {[llength $ret] == 0} {
+ break
+ }
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+ set datastr [lindex [lindex $ret 0] 1]
+ set d [data_of $datastr]
+ error_check_good "Test0$tnum:get" $d $str
+ set id [ id_of $datastr ]
+ error_check_good "Test0$tnum:dup#" $id $x
+ incr x
+ }
+ error_check_good "Test0$tnum:ndups:$str" [expr $x - 1] $ndups
+ incr count
+ }
+ error_check_good cursor_close [$dbc close] 0
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.a: Checking file for correct duplicates"
+ set dlist ""
+ for { set i 1 } { $i <= $ndups } {incr i} {
+ lappend dlist $i
+ }
+ dup_check $db $txn $t1 $dlist
+
+ # Now compare the keys to see if they match the dictionary entries
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+
+ error_check_good Test0$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ error_check_good db_close [$db close] 0
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tTest0$tnum.b: Checking file for correct duplicates after close"
+ dup_check $db $txn $t1 $dlist
+
+ # Now compare the keys to see if they match the dictionary entries
+ filesort $t1 $t3
+ error_check_good Test0$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ error_check_good db_close [$db close] 0
+}
diff --git a/bdb/test/test011.tcl b/bdb/test/test011.tcl
new file mode 100644
index 00000000000..444f6240e92
--- /dev/null
+++ b/bdb/test/test011.tcl
@@ -0,0 +1,349 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test011.tcl,v 11.20 2000/08/25 14:21:54 sue Exp $
+#
+# DB Test 11 {access method}
+# Use the first 10,000 entries from the dictionary.
+# Insert each with self as key and data; add duplicate
+# records for each.
+# Then do some key_first/key_last add_before, add_after operations.
+# This does not work for recno
+# To test if dups work when they fall off the main page, run this with
+# a very tiny page size.
+proc test011 { method {nentries 10000} {ndups 5} {tnum 11} args } {
+ global dlist
+ global rand_init
+ source ./include.tcl
+
+ set dlist ""
+
+ if { [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $method"
+ return
+ }
+ if { [is_record_based $method] == 1 } {
+ test011_recno $method $nentries $tnum $args
+ return
+ } else {
+ puts -nonewline "Test0$tnum: $method $nentries small dup "
+ puts "key/data pairs, cursor ops"
+ }
+ if {$ndups < 5} {
+ set ndups 5
+ }
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ berkdb srand $rand_init
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open -create -truncate \
+ -mode 0644} [concat $args "-dup"] {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ # We will add dups with values 1, 3, ... $ndups. Then we'll add
+ # 0 and $ndups+1 using keyfirst/keylast. We'll add 2 and 4 using
+ # add before and add after.
+ puts "\tTest0$tnum.a: put and get duplicate keys."
+ set dbc [eval {$db cursor} $txn]
+ set i ""
+ for { set i 1 } { $i <= $ndups } { incr i 2 } {
+ lappend dlist $i
+ }
+ set maxodd $i
+ while { [gets $did str] != -1 && $count < $nentries } {
+ for { set i 1 } { $i <= $ndups } { incr i 2 } {
+ set datastr $i:$str
+ set ret [eval {$db put} $txn $pflags {$str $datastr}]
+ error_check_good put $ret 0
+ }
+
+ # Now retrieve all the keys matching this key
+ set x 1
+ for {set ret [$dbc get "-set" $str ]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get "-next"] } {
+ if {[llength $ret] == 0} {
+ break
+ }
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+ set datastr [lindex [lindex $ret 0] 1]
+ set d [data_of $datastr]
+
+ error_check_good Test0$tnum:put $d $str
+ set id [ id_of $datastr ]
+ error_check_good Test0$tnum:dup# $id $x
+ incr x 2
+ }
+ error_check_good Test0$tnum:numdups $x $maxodd
+ incr count
+ }
+ error_check_good curs_close [$dbc close] 0
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.b: \
+ traverse entire file checking duplicates before close."
+ dup_check $db $txn $t1 $dlist
+
+ # Now compare the keys to see if they match the dictionary entries
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+
+ error_check_good Test0$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ error_check_good db_close [$db close] 0
+
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tTest0$tnum.c: \
+ traverse entire file checking duplicates after close."
+ dup_check $db $txn $t1 $dlist
+
+ # Now compare the keys to see if they match the dictionary entries
+ filesort $t1 $t3
+ error_check_good Test0$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tTest0$tnum.d: Testing key_first functionality"
+ add_dup $db $txn $nentries "-keyfirst" 0 0
+ set dlist [linsert $dlist 0 0]
+ dup_check $db $txn $t1 $dlist
+
+ puts "\tTest0$tnum.e: Testing key_last functionality"
+ add_dup $db $txn $nentries "-keylast" [expr $maxodd - 1] 0
+ lappend dlist [expr $maxodd - 1]
+ dup_check $db $txn $t1 $dlist
+
+ puts "\tTest0$tnum.f: Testing add_before functionality"
+ add_dup $db $txn $nentries "-before" 2 3
+ set dlist [linsert $dlist 2 2]
+ dup_check $db $txn $t1 $dlist
+
+ puts "\tTest0$tnum.g: Testing add_after functionality"
+ add_dup $db $txn $nentries "-after" 4 4
+ set dlist [linsert $dlist 4 4]
+ dup_check $db $txn $t1 $dlist
+
+ error_check_good db_close [$db close] 0
+}
+
+proc add_dup {db txn nentries flag dataval iter} {
+ source ./include.tcl
+
+ set dbc [eval {$db cursor} $txn]
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set datastr $dataval:$str
+ set ret [$dbc get "-set" $str]
+ error_check_bad "cget(SET)" [is_substr $ret Error] 1
+ for { set i 1 } { $i < $iter } { incr i } {
+ set ret [$dbc get "-next"]
+ error_check_bad "cget(NEXT)" [is_substr $ret Error] 1
+ }
+
+ if { [string compare $flag "-before"] == 0 ||
+ [string compare $flag "-after"] == 0 } {
+ set ret [$dbc put $flag $datastr]
+ } else {
+ set ret [$dbc put $flag $str $datastr]
+ }
+ error_check_good "$dbc put $flag" $ret 0
+ incr count
+ }
+ close $did
+ $dbc close
+}
+
+proc test011_recno { method {nentries 10000} {tnum 11} largs } {
+ global dlist
+ source ./include.tcl
+
+ set largs [convert_args $method $largs]
+ set omethod [convert_method $method]
+ set renum [is_rrecno $method]
+
+ puts "Test0$tnum: \
+ $method ($largs) $nentries test cursor insert functionality"
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $largs "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $largs $eindex]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ if {$renum == 1} {
+ append largs " -renumber"
+ }
+ set db [eval {berkdb_open \
+ -create -truncate -mode 0644} $largs {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # The basic structure of the test is that we pick a random key
+ # in the database and then add items before, after, ?? it. The
+ # trickiness is that with RECNO, these are not duplicates, they
+ # are creating new keys. Therefore, every time we do this, the
+ # keys assigned to other values change. For this reason, we'll
+ # keep the database in tcl as a list and insert properly into
+ # it to verify that the right thing is happening. If we do not
+ # have renumber set, then the BEFORE and AFTER calls should fail.
+
+ # Seed the database with an initial record
+ gets $did str
+ set ret [eval {$db put} $txn {1 [chop_data $method $str]}]
+ error_check_good put $ret 0
+ set count 1
+
+ set dlist "NULL $str"
+
+ # Open a cursor
+ set dbc [eval {$db cursor} $txn]
+ puts "\tTest0$tnum.a: put and get entries"
+ while { [gets $did str] != -1 && $count < $nentries } {
+ # Pick a random key
+ set key [berkdb random_int 1 $count]
+ set ret [$dbc get -set $key]
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_good cget:SET:key $k $key
+ error_check_good \
+ cget:SET $d [pad_data $method [lindex $dlist $key]]
+
+ # Current
+ set ret [$dbc put -current [chop_data $method $str]]
+ error_check_good cput:$key $ret 0
+ set dlist [lreplace $dlist $key $key [pad_data $method $str]]
+
+ # Before
+ if { [gets $did str] == -1 } {
+ continue;
+ }
+
+ if { $renum == 1 } {
+ set ret [$dbc put \
+ -before [chop_data $method $str]]
+ error_check_good cput:$key:BEFORE $ret $key
+ set dlist [linsert $dlist $key $str]
+ incr count
+
+ # After
+ if { [gets $did str] == -1 } {
+ continue;
+ }
+ set ret [$dbc put \
+ -after [chop_data $method $str]]
+ error_check_good cput:$key:AFTER $ret [expr $key + 1]
+ set dlist [linsert $dlist [expr $key + 1] $str]
+ incr count
+ }
+
+ # Now verify that the keys are in the right place
+ set i 0
+ for {set ret [$dbc get "-set" $key]} \
+ {[string length $ret] != 0 && $i < 3} \
+ {set ret [$dbc get "-next"] } {
+ set check_key [expr $key + $i]
+
+ set k [lindex [lindex $ret 0] 0]
+ error_check_good cget:$key:loop $k $check_key
+
+ set d [lindex [lindex $ret 0] 1]
+ error_check_good cget:data $d \
+ [pad_data $method [lindex $dlist $check_key]]
+ incr i
+ }
+ }
+ close $did
+ error_check_good cclose [$dbc close] 0
+
+ # Create check key file.
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $count} {incr i} {
+ puts $oid $i
+ }
+ close $oid
+
+ puts "\tTest0$tnum.b: dump file"
+ dump_file $db $txn $t1 test011_check
+ error_check_good Test0$tnum:diff($t2,$t1) \
+ [filecmp $t2 $t1] 0
+
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest0$tnum.c: close, open, and dump file"
+ open_and_dump_file $testfile $env $txn $t1 test011_check \
+ dump_file_direction "-first" "-next"
+ error_check_good Test0$tnum:diff($t2,$t1) \
+ [filecmp $t2 $t1] 0
+
+ puts "\tTest0$tnum.d: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $txn $t1 test011_check \
+ dump_file_direction "-last" "-prev"
+
+ filesort $t1 $t3 -n
+ error_check_good Test0$tnum:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+}
+
+proc test011_check { key data } {
+ global dlist
+
+ error_check_good "get key $key" $data [lindex $dlist $key]
+}
diff --git a/bdb/test/test012.tcl b/bdb/test/test012.tcl
new file mode 100644
index 00000000000..87127901e19
--- /dev/null
+++ b/bdb/test/test012.tcl
@@ -0,0 +1,113 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test012.tcl,v 11.14 2000/08/25 14:21:54 sue Exp $
+#
+# DB Test 12 {access method}
+# Take the source files and dbtest executable and enter their contents as
+# the key with their names as data. After all are entered, retrieve all;
+# compare output to original. Close file, reopen, do retrieve and re-verify.
+proc test012 { method args} {
+ global names
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 } {
+ puts "Test012 skipping for method $method"
+ return
+ }
+
+ puts "Test012: $method ($args) filename=data filecontents=key pairs"
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test012.db
+ set env NULL
+ } else {
+ set testfile test012.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open \
+ -create -truncate -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ # Here is the loop where we put and get each key/data pair
+ set file_list [glob $test_path/../\[a-z\]*/*.c \
+ $test_path/./*.lo ./*.exe]
+
+ puts "\tTest012.a: put/get loop"
+ set count 0
+ foreach f $file_list {
+ put_file_as_key $db $txn $pflags $f
+
+ set kd [get_file_as_key $db $txn $gflags $f]
+ incr count
+ }
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest012.b: dump file"
+ dump_binkey_file $db $txn $t1 test012.check
+ error_check_good db_close [$db close] 0
+
+ # Now compare the data to see if they match the .o and dbtest files
+ set oid [open $t2.tmp w]
+ foreach f $file_list {
+ puts $oid $f
+ }
+ close $oid
+ filesort $t2.tmp $t2
+ fileremove $t2.tmp
+ filesort $t1 $t3
+
+ error_check_good Test012:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again.
+ puts "\tTest012.c: close, open, and dump file"
+ open_and_dump_file $testfile $env $txn $t1 test012.check \
+ dump_binkey_file_direction "-first" "-next"
+
+ filesort $t1 $t3
+
+ error_check_good Test012:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again in reverse direction.
+ puts "\tTest012.d: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $txn $t1 test012.check\
+ dump_binkey_file_direction "-last" "-prev"
+
+ filesort $t1 $t3
+
+ error_check_good Test012:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+}
+
+# Check function for test012; key should be file name; data should be contents
+proc test012.check { binfile tmpfile } {
+ source ./include.tcl
+
+ error_check_good Test012:diff($binfile,$tmpfile) \
+ [filecmp $binfile $tmpfile] 0
+}
diff --git a/bdb/test/test013.tcl b/bdb/test/test013.tcl
new file mode 100644
index 00000000000..5812cf8f64d
--- /dev/null
+++ b/bdb/test/test013.tcl
@@ -0,0 +1,193 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test013.tcl,v 11.18 2000/08/25 14:21:54 sue Exp $
+#
+# DB Test 13 {access method}
+#
+# 1. Insert 10000 keys and retrieve them (equal key/data pairs).
+# 2. Attempt to overwrite keys with NO_OVERWRITE set (expect error).
+# 3. Actually overwrite each one with its datum reversed.
+#
+# No partial testing here.
+proc test013 { method {nentries 10000} args } {
+ global errorCode
+ global errorInfo
+ global fixed_pad
+ global fixed_len
+
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test013: $method ($args) $nentries equal key/data pairs, put test"
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test013.db
+ set env NULL
+ } else {
+ set testfile test013.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open \
+ -create -truncate -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test013_recno.check
+ append gflags " -recno"
+ global kvals
+ } else {
+ set checkfunc test013.check
+ }
+ puts "\tTest013.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $gflags $txn {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+ incr count
+ }
+ close $did
+
+ # Now we will try to overwrite each datum, but set the
+ # NOOVERWRITE flag.
+ puts "\tTest013.b: overwrite values with NOOVERWRITE flag."
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+
+ set ret [eval {$db put} $txn $pflags \
+ {-nooverwrite $key [chop_data $method $str]}]
+ error_check_good put [is_substr $ret "DB_KEYEXIST"] 1
+
+ # Value should be unchanged.
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+ incr count
+ }
+ close $did
+
+ # Now we will replace each item with its datum capitalized.
+ puts "\tTest013.c: overwrite values with capitalized datum"
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ set rstr [string toupper $str]
+ set r [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $rstr]}]
+ error_check_good put $r 0
+
+ # Value should be changed.
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $rstr]]]
+ incr count
+ }
+ close $did
+
+ # Now make sure that everything looks OK
+ puts "\tTest013.d: check entire file contents"
+ dump_file $db $txn $t1 $checkfunc
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {incr i} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good \
+ Test013:diff($t3,$t2) [filecmp $t3 $t2] 0
+
+ puts "\tTest013.e: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ open_and_dump_file $testfile $env $txn $t1 $checkfunc \
+ dump_file_direction "-first" "-next"
+
+ if { [is_record_based $method] == 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good \
+ Test013:diff($t3,$t2) [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tTest013.f: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $txn $t1 $checkfunc \
+ dump_file_direction "-last" "-prev"
+
+ if { [is_record_based $method] == 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good \
+ Test013:diff($t3,$t2) [filecmp $t3 $t2] 0
+}
+
+# Check function for test013; keys and data are identical
+proc test013.check { key data } {
+ error_check_good \
+ "key/data mismatch for $key" $data [string toupper $key]
+}
+
+proc test013_recno.check { key data } {
+ global dict
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good \
+ "data mismatch for $key" $data [string toupper $kvals($key)]
+}
diff --git a/bdb/test/test014.tcl b/bdb/test/test014.tcl
new file mode 100644
index 00000000000..3ad5335dd0a
--- /dev/null
+++ b/bdb/test/test014.tcl
@@ -0,0 +1,204 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test014.tcl,v 11.19 2000/08/25 14:21:54 sue Exp $
+#
+# DB Test 14 {access method}
+#
+# Partial put test, small data, replacing with same size. The data set
+# consists of the first nentries of the dictionary. We will insert them
+# (and retrieve them) as we do in test 1 (equal key/data pairs). Then
+# we'll try to perform partial puts of some characters at the beginning,
+# some at the end, and some at the middle.
+proc test014 { method {nentries 10000} args } {
+ set fixed 0
+ set args [convert_args $method $args]
+
+ if { [is_fixed_length $method] == 1 } {
+ set fixed 1
+ }
+
+ puts "Test014: $method ($args) $nentries equal key/data pairs, put test"
+
+ # flagp indicates whether this is a postpend or a
+ # normal partial put
+ set flagp 0
+
+ eval {test014_body $method $flagp 1 1 $nentries} $args
+ eval {test014_body $method $flagp 1 4 $nentries} $args
+ eval {test014_body $method $flagp 2 4 $nentries} $args
+ eval {test014_body $method $flagp 1 128 $nentries} $args
+ eval {test014_body $method $flagp 2 16 $nentries} $args
+ if { $fixed == 0 } {
+ eval {test014_body $method $flagp 0 1 $nentries} $args
+ eval {test014_body $method $flagp 0 4 $nentries} $args
+ eval {test014_body $method $flagp 0 128 $nentries} $args
+
+ # POST-PENDS :
+ # partial put data after the end of the existent record
+ # chars: number of empty spaces that will be padded with null
+ # increase: is the length of the str to be appended (after pad)
+ #
+ set flagp 1
+ eval {test014_body $method $flagp 1 1 $nentries} $args
+ eval {test014_body $method $flagp 4 1 $nentries} $args
+ eval {test014_body $method $flagp 128 1 $nentries} $args
+ eval {test014_body $method $flagp 1 4 $nentries} $args
+ eval {test014_body $method $flagp 1 128 $nentries} $args
+ }
+ puts "Test014 complete."
+}
+
+proc test014_body { method flagp chars increase {nentries 10000} args } {
+ source ./include.tcl
+
+ set omethod [convert_method $method]
+
+ if { [is_fixed_length $method] == 1 && $chars != $increase } {
+ puts "Test014: $method: skipping replace\
+ $chars chars with string $increase times larger."
+ return
+ }
+
+ if { $flagp == 1} {
+ puts "Test014: Postpending string of len $increase with \
+ gap $chars."
+ } else {
+ puts "Test014: Replace $chars chars with string \
+ $increase times larger"
+ }
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test014.db
+ set env NULL
+ } else {
+ set testfile test014.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open \
+ -create -truncate -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set gflags ""
+ set pflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ puts "\tTest014.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ # We will do the initial put and then three Partial Puts
+ # for the beginning, middle and end of the string.
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ if { $flagp == 1 } {
+ # this is for postpend only
+ global dvals
+
+ # initial put
+ set ret [$db put $key $str]
+ error_check_good dbput $ret 0
+
+ set offset [string length $str]
+
+ # increase is the actual number of new bytes
+ # to be postpended (besides the null padding)
+ set data [repeat "P" $increase]
+
+ # chars is the amount of padding in between
+ # the old data and the new
+ set len [expr $offset + $chars + $increase]
+ set dvals($key) [binary format \
+ a[set offset]x[set chars]a[set increase] \
+ $str $data]
+ set offset [expr $offset + $chars]
+ set ret [$db put -partial [list $offset 0] $key $data]
+ error_check_good dbput:post $ret 0
+ } else {
+ partial_put $method $db $txn \
+ $gflags $key $str $chars $increase
+ }
+ incr count
+ }
+ close $did
+
+ # Now make sure that everything looks OK
+ puts "\tTest014.b: check entire file contents"
+ dump_file $db $txn $t1 test014.check
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good \
+ Test014:diff($t3,$t2) [filecmp $t3 $t2] 0
+
+ puts "\tTest014.c: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ open_and_dump_file $testfile $env $txn \
+ $t1 test014.check dump_file_direction "-first" "-next"
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good \
+ Test014:diff($t3,$t2) [filecmp $t3 $t2] 0
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tTest014.d: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $txn $t1 \
+ test014.check dump_file_direction "-last" "-prev"
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good \
+ Test014:diff($t3,$t2) [filecmp $t3 $t2] 0
+}
+
+# Check function for test014; keys and data are identical
+proc test014.check { key data } {
+ global dvals
+
+ error_check_good key"$key"_exists [info exists dvals($key)] 1
+ error_check_good "data mismatch for key $key" $data $dvals($key)
+}
diff --git a/bdb/test/test015.tcl b/bdb/test/test015.tcl
new file mode 100644
index 00000000000..61abddd3799
--- /dev/null
+++ b/bdb/test/test015.tcl
@@ -0,0 +1,235 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test015.tcl,v 11.20 2000/08/25 14:21:54 sue Exp $
+#
+# DB Test 15 {access method}
+# Partial put test when item does not exist.
+proc test015 { method {nentries 7500} { start 0 } args } {
+ global fixed_len
+
+ set low_range 50
+ set mid_range 100
+ set high_range 1000
+
+ if { [is_fixed_length $method] } {
+ set low_range [expr $fixed_len/2 - 2]
+ set mid_range [expr $fixed_len/2]
+ set high_range $fixed_len
+ }
+
+ set t_table {
+ { 1 { 1 1 1 } }
+ { 2 { 1 1 5 } }
+ { 3 { 1 1 $low_range } }
+ { 4 { 1 $mid_range 1 } }
+ { 5 { $mid_range $high_range 5 } }
+ { 6 { 1 $mid_range $low_range } }
+ }
+
+ puts "Test015: \
+ $method ($args) $nentries equal key/data pairs, partial put test"
+ test015_init
+ if { $start == 0 } {
+ set start { 1 2 3 4 5 6 }
+ }
+ foreach entry $t_table {
+ set this [lindex $entry 0]
+ if { [lsearch $start $this] == -1 } {
+ continue
+ }
+ puts -nonewline "$this: "
+ eval [concat test015_body $method [lindex $entry 1] \
+ $nentries $args]
+ }
+}
+
+proc test015_init { } {
+ global rand_init
+
+ berkdb srand $rand_init
+}
+
+proc test015_body { method off_low off_hi rcount {nentries 10000} args } {
+ global dvals
+ global fixed_len
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set checkfunc test015.check
+
+ if { [is_fixed_length $method] && \
+ [string compare $omethod "-recno"] == 0} {
+ # is fixed recno method
+ set checkfunc test015.check
+ }
+
+ puts "Put $rcount strings random offsets between $off_low and $off_hi"
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test015.db
+ set env NULL
+ } else {
+ set testfile test015.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open \
+ -create -truncate -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ puts "\tTest015.a: put/get loop"
+
+ # Here is the loop where we put and get each key/data pair
+ # Each put is a partial put of a record that does not exist.
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ if { [string length $str] > $fixed_len } {
+ continue
+ }
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+
+ if { 0 } {
+ set data [replicate $str $rcount]
+ set off [ berkdb random_int $off_low $off_hi ]
+ set offn [expr $off + 1]
+ if { [is_fixed_length $method] && \
+ [expr [string length $data] + $off] >= $fixed_len} {
+ set data [string range $data 0 [expr $fixed_len-$offn]]
+ }
+ set dvals($key) [partial_shift $data $off right]
+ } else {
+ set data [chop_data $method [replicate $str $rcount]]
+
+ # This is a hack. In DB we will store the records with
+ # some padding, but these will get lost if we just return
+ # them in TCL. As a result, we're going to have to hack
+ # get to check for 0 padding and return a list consisting
+ # of the number of 0's and the actual data.
+ set off [ berkdb random_int $off_low $off_hi ]
+
+ # There is no string concatenation function in Tcl
+ # (although there is one in TclX), so we have to resort
+ # to this hack. Ugh.
+ set slen [string length $data]
+ if {[is_fixed_length $method] && \
+ $slen > $fixed_len - $off} {
+ set $slen [expr $fixed_len - $off]
+ }
+ set a "a"
+ set dvals($key) [pad_data \
+ $method [eval "binary format x$off$a$slen" {$data}]]
+ }
+ if {[is_fixed_length $method] && \
+ [string length $data] > ($fixed_len - $off)} {
+ set slen [expr $fixed_len - $off]
+ set data [eval "binary format a$slen" {$data}]
+ }
+ set ret [eval {$db put} \
+ {-partial [list $off [string length $data]] $key $data}]
+ error_check_good put $ret 0
+
+ incr count
+ }
+ close $did
+
+ # Now make sure that everything looks OK
+ puts "\tTest015.b: check entire file contents"
+ dump_file $db $txn $t1 $checkfunc
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ filesort $t1 $t3
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good Test015:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tTest015.c: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ open_and_dump_file $testfile $env $txn $t1 \
+ $checkfunc dump_file_direction "-first" "-next"
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Test015:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tTest015.d: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $txn $t1 \
+ $checkfunc dump_file_direction "-last" "-prev"
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Test015:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ unset dvals
+}
+
+# Check function for test015; keys and data are identical
+proc test015.check { key data } {
+ global dvals
+
+ error_check_good key"$key"_exists [info exists dvals($key)] 1
+ binary scan $data "c[string length $data]" a
+ binary scan $dvals($key) "c[string length $dvals($key)]" b
+ error_check_good "mismatch on padding for key $key" $a $b
+}
+
+proc test015.fixed.check { key data } {
+ global dvals
+ global fixed_len
+
+ error_check_good key"$key"_exists [info exists dvals($key)] 1
+ if { [string length $data] > $fixed_len } {
+ error_check_bad \
+ "data length:[string length $data] \
+ for fixed:$fixed_len" 1 1
+ }
+ puts "$data : $dvals($key)"
+ error_check_good compare_data($data,$dvals($key) \
+ $dvals($key) $data
+}
diff --git a/bdb/test/test016.tcl b/bdb/test/test016.tcl
new file mode 100644
index 00000000000..def3c114693
--- /dev/null
+++ b/bdb/test/test016.tcl
@@ -0,0 +1,170 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test016.tcl,v 11.17 2000/08/25 14:21:54 sue Exp $
+#
+# DB Test 16 {access method}
+# Partial put test where partial puts make the record smaller.
+# Use the first 10,000 entries from the dictionary.
+# Insert each with self as key and a fixed, medium length data string;
+# retrieve each. After all are entered, go back and do partial puts,
+# replacing a random-length string with the key value.
+# Then verify.
+
+set datastr abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz
+
+proc test016 { method {nentries 10000} args } {
+ global datastr
+ global dvals
+ global rand_init
+ source ./include.tcl
+
+ berkdb srand $rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_fixed_length $method] == 1 } {
+ puts "Test016: skipping for method $method"
+ return
+ }
+
+ puts "Test016: $method ($args) $nentries partial put shorten"
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test016.db
+ set env NULL
+ } else {
+ set testfile test016.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -truncate -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ # Here is the loop where we put and get each key/data pair
+
+ puts "\tTest016.a: put/get loop"
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $datastr]]]
+ incr count
+ }
+ close $did
+
+ # Next we will do a partial put replacement, making the data
+ # shorter
+ puts "\tTest016.b: partial put loop"
+ set did [open $dict]
+ set count 0
+ set len [string length $datastr]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+
+ set repl_len [berkdb random_int [string length $key] $len]
+ set repl_off [berkdb random_int 0 [expr $len - $repl_len] ]
+ set s1 [string range $datastr 0 [ expr $repl_off - 1] ]
+ set s2 [string toupper $key]
+ set s3 [string range $datastr [expr $repl_off + $repl_len] end ]
+ set dvals($key) [pad_data $method $s1$s2$s3]
+ set ret [eval {$db put} $txn {-partial \
+ [list $repl_off $repl_len] $key [chop_data $method $s2]}]
+ error_check_good put $ret 0
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good \
+ put $ret [list [list $key [pad_data $method $s1$s2$s3]]]
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest016.c: dump file"
+ dump_file $db $txn $t1 test016.check
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good Test016:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again.
+ puts "\tTest016.d: close, open, and dump file"
+ open_and_dump_file $testfile $env $txn $t1 test016.check \
+ dump_file_direction "-first" "-next"
+
+ if { [ is_record_based $method ] == 0 } {
+ filesort $t1 $t3
+ }
+ error_check_good Test016:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again in reverse direction.
+ puts "\tTest016.e: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $txn $t1 test016.check \
+ dump_file_direction "-last" "-prev"
+
+ if { [ is_record_based $method ] == 0 } {
+ filesort $t1 $t3
+ }
+ error_check_good Test016:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+}
+
+# Check function for test016; data should be whatever is set in dvals
+proc test016.check { key data } {
+ global datastr
+ global dvals
+
+ error_check_good key"$key"_exists [info exists dvals($key)] 1
+ error_check_good "data mismatch for key $key" $data $dvals($key)
+}
diff --git a/bdb/test/test017.tcl b/bdb/test/test017.tcl
new file mode 100644
index 00000000000..95fe82e081c
--- /dev/null
+++ b/bdb/test/test017.tcl
@@ -0,0 +1,237 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test017.tcl,v 11.13 2000/12/11 17:42:18 sue Exp $
+#
+# DB Test 17 {access method}
+# Run duplicates with small page size so that we test off page duplicates.
+# Then after we have an off-page database, test with overflow pages too.
+#
+proc test017 { method {contents 0} {ndups 19} {tnum 17} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $method"
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ incr pgindex
+ if { [lindex $args $pgindex] > 8192 } {
+ puts "Test0$tnum: Skipping for large pagesizes"
+ return
+ }
+ }
+
+ puts "Test0$tnum: $method ($args) Off page duplicate tests with $ndups duplicates"
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open \
+ -create -truncate -mode 0644 -dup} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ set ovfl ""
+ # Here is the loop where we put and get each key/data pair
+ set dbc [eval {$db cursor} $txn]
+ puts -nonewline \
+ "\tTest0$tnum.a: Creating duplicates with "
+ if { $contents != 0 } {
+ puts "file contents as key/data"
+ } else {
+ puts "file name as key/data"
+ }
+ set file_list [glob ../*/*.c ./*.lo]
+ foreach f $file_list {
+ if { $contents != 0 } {
+ set fid [open $f r]
+ fconfigure $fid -translation binary
+ #
+ # Prepend file name to guarantee uniqueness
+ set filecont [read $fid]
+ set str $f:$filecont
+ close $fid
+ } else {
+ set str $f
+ }
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set datastr $i:$str
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ }
+
+ #
+ # Save 10% files for overflow test
+ #
+ if { $contents == 0 && [expr $count % 10] == 0 } {
+ lappend ovfl $f
+ }
+ # Now retrieve all the keys matching this key
+ set ret [$db get $str]
+ error_check_bad $f:dbget_dups [llength $ret] 0
+ error_check_good $f:dbget_dups1 [llength $ret] $ndups
+ set x 1
+ for {set ret [$dbc get "-set" $str]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get "-next"] } {
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+ set datastr [lindex [lindex $ret 0] 1]
+ set d [data_of $datastr]
+ if {[string length $d] == 0} {
+ break
+ }
+ error_check_good "Test0$tnum:get" $d $str
+ set id [ id_of $datastr ]
+ error_check_good "Test0$tnum:$f:dup#" $id $x
+ incr x
+ }
+ error_check_good "Test0$tnum:ndups:$str" [expr $x - 1] $ndups
+ incr count
+ }
+ error_check_good cursor_close [$dbc close] 0
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.b: Checking file for correct duplicates"
+ set dlist ""
+ for { set i 1 } { $i <= $ndups } {incr i} {
+ lappend dlist $i
+ }
+ set oid [open $t2.tmp w]
+ set o1id [open $t4.tmp w]
+ foreach f $file_list {
+ for {set i 1} {$i <= $ndups} {incr i} {
+ puts $o1id $f
+ }
+ puts $oid $f
+ }
+ close $oid
+ close $o1id
+ filesort $t2.tmp $t2
+ filesort $t4.tmp $t4
+ fileremove $t2.tmp
+ fileremove $t4.tmp
+
+ dup_check $db $txn $t1 $dlist
+ if {$contents == 0} {
+ filesort $t1 $t3
+
+ error_check_good Test0$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now compare the keys to see if they match the file names
+ dump_file $db $txn $t1 test017.check
+ filesort $t1 $t3
+
+ error_check_good Test0$tnum:diff($t3,$t4) \
+ [filecmp $t3 $t4] 0
+ }
+
+ error_check_good db_close [$db close] 0
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tTest0$tnum.c: Checking file for correct duplicates after close"
+ dup_check $db $txn $t1 $dlist
+
+ if {$contents == 0} {
+ # Now compare the keys to see if they match the filenames
+ filesort $t1 $t3
+ error_check_good Test0$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest0$tnum.d: Verify off page duplicates and overflow status"
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set stat [$db stat]
+ if { [is_btree $method] } {
+ error_check_bad stat:offpage \
+ [is_substr $stat "{{Internal pages} 0}"] 1
+ }
+ if {$contents == 0} {
+ # This check doesn't work in hash, since overflow
+ # pages count extra pages in buckets as well as true
+ # P_OVERFLOW pages.
+ if { [is_hash $method] == 0 } {
+ error_check_good overflow \
+ [is_substr $stat "{{Overflow pages} 0}"] 1
+ }
+ } else {
+ error_check_bad overflow \
+ [is_substr $stat "{{Overflow pages} 0}"] 1
+ }
+
+ #
+ # If doing overflow test, do that now. Else we are done.
+ # Add overflow pages by adding a large entry to a duplicate.
+ #
+ if { [llength $ovfl] == 0} {
+ error_check_good db_close [$db close] 0
+ return
+ }
+ puts "\tTest0$tnum.e: Add overflow duplicate entries"
+ set ovfldup [expr $ndups + 1]
+ foreach f $ovfl {
+ #
+ # This is just like put_file, but prepends the dup number
+ #
+ set fid [open $f r]
+ fconfigure $fid -translation binary
+ set fdata [read $fid]
+ close $fid
+ set data $ovfldup:$fdata
+
+ set ret [eval {$db put} $txn $pflags {$f $data}]
+ error_check_good ovfl_put $ret 0
+ }
+ puts "\tTest0$tnum.f: Verify overflow duplicate entries"
+ dup_check $db $txn $t1 $dlist $ovfldup
+ filesort $t1 $t3
+ error_check_good Test0$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ set stat [$db stat]
+ error_check_bad overflow1 \
+ [is_substr $stat "{{Overflow pages} 0}"] 1
+ error_check_good db_close [$db close] 0
+}
+
+# Check function; verify data contains key
+proc test017.check { key data } {
+ error_check_good "data mismatch for key $key" $key [data_of $data]
+}
diff --git a/bdb/test/test018.tcl b/bdb/test/test018.tcl
new file mode 100644
index 00000000000..95493da2d03
--- /dev/null
+++ b/bdb/test/test018.tcl
@@ -0,0 +1,13 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test018.tcl,v 11.3 2000/02/14 03:00:18 bostic Exp $
+#
+# DB Test 18 {access method}
+# Run duplicates with small page size so that we test off page duplicates.
+proc test018 { method {nentries 10000} args} {
+ puts "Test018: Off page duplicate tests"
+ eval {test011 $method $nentries 19 18 -pagesize 512} $args
+}
diff --git a/bdb/test/test019.tcl b/bdb/test/test019.tcl
new file mode 100644
index 00000000000..4031ae2dc16
--- /dev/null
+++ b/bdb/test/test019.tcl
@@ -0,0 +1,107 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test019.tcl,v 11.14 2000/08/25 14:21:54 sue Exp $
+#
+# Test019 { access_method nentries }
+# Test the partial get functionality.
+proc test019 { method {nentries 10000} args } {
+ global fixed_len
+ global rand_init
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ puts "Test019: $method ($args) $nentries partial get test"
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test019.db
+ set env NULL
+ } else {
+ set testfile test019.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open \
+ -create -truncate -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+ berkdb srand $rand_init
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ puts "\tTest019.a: put/get loop"
+ for { set i 0 } { [gets $did str] != -1 && $i < $nentries } \
+ { incr i } {
+
+ if { [is_record_based $method] == 1 } {
+ set key [expr $i + 1]
+ } else {
+ set key $str
+ }
+ set repl [berkdb random_int $fixed_len 100]
+ set data [chop_data $method [replicate $str $repl]]
+ set ret [eval {$db put} $txn {-nooverwrite $key $data}]
+ error_check_good dbput:$key $ret 0
+
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good \
+ dbget:$key $ret [list [list $key [pad_data $method $data]]]
+ set kvals($key) $repl
+ }
+ close $did
+
+ puts "\tTest019.b: partial get loop"
+ set did [open $dict]
+ for { set i 0 } { [gets $did str] != -1 && $i < $nentries } \
+ { incr i } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $i + 1]
+ } else {
+ set key $str
+ }
+ set data [replicate $str $kvals($key)]
+
+ if { [is_fixed_length $method] == 1 } {
+ set maxndx $fixed_len
+ } else {
+ set maxndx [expr [string length $data] - 1]
+ }
+ set beg [berkdb random_int 0 [expr $maxndx - 1]]
+ set len [berkdb random_int 1 [expr $maxndx - $beg]]
+
+ set ret [eval {$db get} \
+ $txn {-partial [list $beg $len]} $gflags {$key}]
+
+ # In order for tcl to handle this, we have to overwrite the
+ # last character with a NULL. That makes the length one less
+ # than we expect.
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_good dbget_key $k $key
+ # If $d contains some of the padding, we want to get rid of it.
+ set firstnull [string first "\0" $d]
+ if { $firstnull == -1 } { set firstnull [string length $d] }
+ error_check_good dbget_data \
+ [string range $d 0 [expr $firstnull - 1]] \
+ [string range $data $beg [expr $beg + $len - 1]]
+ }
+ error_check_good db_close [$db close] 0
+ close $did
+}
diff --git a/bdb/test/test020.tcl b/bdb/test/test020.tcl
new file mode 100644
index 00000000000..1961d0e02dd
--- /dev/null
+++ b/bdb/test/test020.tcl
@@ -0,0 +1,108 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test020.tcl,v 11.12 2000/10/19 23:15:22 ubell Exp $
+#
+# DB Test 20 {access method}
+# Test in-memory databases.
+proc test020 { method {nentries 10000} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ if { [is_queueext $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test020 skipping for method $method"
+ return
+ }
+ puts "Test020: $method ($args) $nentries equal key/data pairs"
+
+ # Create the database and open the dictionary
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # Check if we are using an env.
+ if { $eindex == -1 } {
+ set env NULL
+ } else {
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -truncate -mode 0644} $args {$omethod}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test020_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc test020.check
+ }
+ puts "\tTest020.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+ incr count
+ }
+ close $did
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest020.b: dump file"
+ dump_file $db $txn $t1 $checkfunc
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good Test020:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+}
+
+# Check function for test020; keys and data are identical
+proc test020.check { key data } {
+ error_check_good "key/data mismatch" $data $key
+}
+
+proc test020_recno.check { key data } {
+ global dict
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "data mismatch: key $key" $data $kvals($key)
+}
diff --git a/bdb/test/test021.tcl b/bdb/test/test021.tcl
new file mode 100644
index 00000000000..f9a1fe32f7e
--- /dev/null
+++ b/bdb/test/test021.tcl
@@ -0,0 +1,130 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test021.tcl,v 11.10 2000/08/25 14:21:55 sue Exp $
+#
+# DB Test 21 {access method}
+# Use the first 10,000 entries from the dictionary.
+# Insert each with self, reversed as key and self as data.
+# After all are entered, retrieve each using a cursor SET_RANGE, and getting
+# about 20 keys sequentially after it (in some cases we'll run out towards
+# the end of the file).
+proc test021 { method {nentries 10000} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test021: $method ($args) $nentries equal key/data pairs"
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test021.db
+ set env NULL
+ } else {
+ set testfile test021.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -truncate -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test021_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc test021.check
+ }
+ puts "\tTest021.a: put loop"
+ # Here is the loop where we put each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key [reverse $str]
+ }
+
+ set r [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good db_put $r 0
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and retrieve about 20
+ # records after it.
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest021.b: test ranges"
+ set db [eval {berkdb_open -rdonly} $args $omethod $testfile ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Open a cursor
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ set did [open $dict]
+ set i 0
+ while { [gets $did str] != -1 && $i < $count } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $i + 1]
+ } else {
+ set key [reverse $str]
+ }
+
+ set r [$dbc get -set_range $key]
+ error_check_bad dbc_get:$key [string length $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ $checkfunc $k $d
+
+ for { set nrecs 0 } { $nrecs < 20 } { incr nrecs } {
+ set r [$dbc get "-next"]
+ # no error checking because we may run off the end
+ # of the database
+ if { [llength $r] == 0 } {
+ continue;
+ }
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ $checkfunc $k $d
+ }
+ incr i
+ }
+ error_check_good db_close [$db close] 0
+ close $did
+}
+
+# Check function for test021; keys and data are reversed
+proc test021.check { key data } {
+ error_check_good "key/data mismatch for $key" $data [reverse $key]
+}
+
+proc test021_recno.check { key data } {
+ global dict
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "data mismatch: key $key" $data $kvals($key)
+}
diff --git a/bdb/test/test022.tcl b/bdb/test/test022.tcl
new file mode 100644
index 00000000000..f9a4c96637e
--- /dev/null
+++ b/bdb/test/test022.tcl
@@ -0,0 +1,55 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test022.tcl,v 11.10 2000/08/25 14:21:55 sue Exp $
+#
+# Test022: Test of DB->get_byteswapped
+proc test022 { method args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test022 ($args) $omethod: DB->getbyteswapped()"
+
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile1 "$testdir/test022a.db"
+ set testfile2 "$testdir/test022b.db"
+ set env NULL
+ } else {
+ set testfile1 "test022a.db"
+ set testfile2 "test022b.db"
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ cleanup $testdir $env
+
+ # Create two databases, one in each byte order.
+ set db1 [eval {berkdb_open -create \
+ -mode 0644} $omethod $args {-lorder 1234} $testfile1]
+ error_check_good db1_open [is_valid_db $db1] TRUE
+
+ set db2 [eval {berkdb_open -create \
+ -mode 0644} $omethod $args {-lorder 4321} $testfile2]
+ error_check_good db2_open [is_valid_db $db2] TRUE
+
+ # Call DB->get_byteswapped on both of them.
+ set db1_order [$db1 is_byteswapped]
+ set db2_order [$db2 is_byteswapped]
+
+ # Make sure that both answers are either 1 or 0,
+ # and that exactly one of them is 1.
+ error_check_good is_byteswapped_sensible_1 \
+ [expr ($db1_order == 1 && $db2_order == 0) || \
+ ($db1_order == 0 && $db2_order == 1)] 1
+
+ error_check_good db1_close [$db1 close] 0
+ error_check_good db2_close [$db2 close] 0
+ puts "\tTest022 complete."
+}
diff --git a/bdb/test/test023.tcl b/bdb/test/test023.tcl
new file mode 100644
index 00000000000..c222bdd83c5
--- /dev/null
+++ b/bdb/test/test023.tcl
@@ -0,0 +1,204 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test023.tcl,v 11.13 2000/08/25 14:21:55 sue Exp $
+#
+# Duplicate delete test.
+# Add a key with duplicates (first time on-page, second time off-page)
+# Number the dups.
+# Delete dups and make sure that CURRENT/NEXT/PREV work correctly.
+proc test023 { method args } {
+ global alphabet
+ global dupnum
+ global dupstr
+ global errorInfo
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ puts "Test023: $method delete duplicates/check cursor operations"
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test023: skipping for method $omethod"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test023.db
+ set env NULL
+ } else {
+ set testfile test023.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -truncate -mode 0644 -dup} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ foreach i { onpage offpage } {
+ if { $i == "onpage" } {
+ set dupstr DUP
+ } else {
+ set dupstr [repeat $alphabet 50]
+ }
+ puts "\tTest023.a: Insert key w/$i dups"
+ set key "duplicate_val_test"
+ for { set count 0 } { $count < 20 } { incr count } {
+ set ret \
+ [eval {$db put} $txn $pflags {$key $count$dupstr}]
+ error_check_good db_put $ret 0
+ }
+
+ # Now let's get all the items and make sure they look OK.
+ puts "\tTest023.b: Check initial duplicates"
+ set dupnum 0
+ dump_file $db $txn $t1 test023.check
+
+ # Delete a couple of random items (FIRST, LAST one in middle)
+ # Make sure that current returns an error and that NEXT and
+ # PREV do the right things.
+
+ set ret [$dbc get -set $key]
+ error_check_bad dbc_get:SET [llength $ret] 0
+
+ puts "\tTest023.c: Delete first and try gets"
+ # This should be the first duplicate
+ error_check_good \
+ dbc_get:SET $ret [list [list duplicate_val_test 0$dupstr]]
+
+ # Now delete it.
+ set ret [$dbc del]
+ error_check_good dbc_del:FIRST $ret 0
+
+ # Now current should fail
+ set ret [$dbc get -current]
+ error_check_good dbc_get:CURRENT $ret [list [list [] []]]
+
+ # Now Prev should fail
+ set ret [$dbc get -prev]
+ error_check_good dbc_get:prev0 [llength $ret] 0
+
+ # Now 10 nexts should work to get us in the middle
+ for { set j 1 } { $j <= 10 } { incr j } {
+ set ret [$dbc get -next]
+ error_check_good \
+ dbc_get:next [llength [lindex $ret 0]] 2
+ error_check_good \
+ dbc_get:next [lindex [lindex $ret 0] 1] $j$dupstr
+ }
+
+ puts "\tTest023.d: Delete middle and try gets"
+ # Now do the delete on the current key.
+ set ret [$dbc del]
+ error_check_good dbc_del:10 $ret 0
+
+ # Now current should fail
+ set ret [$dbc get -current]
+ error_check_good \
+ dbc_get:deleted $ret [list [list [] []]]
+
+ # Prev and Next should work
+ set ret [$dbc get -next]
+ error_check_good dbc_get:next [llength [lindex $ret 0]] 2
+ error_check_good \
+ dbc_get:next [lindex [lindex $ret 0] 1] 11$dupstr
+
+ set ret [$dbc get -prev]
+ error_check_good dbc_get:next [llength [lindex $ret 0]] 2
+ error_check_good \
+ dbc_get:next [lindex [lindex $ret 0] 1] 9$dupstr
+
+ # Now go to the last one
+ for { set j 11 } { $j <= 19 } { incr j } {
+ set ret [$dbc get -next]
+ error_check_good \
+ dbc_get:next [llength [lindex $ret 0]] 2
+ error_check_good \
+ dbc_get:next [lindex [lindex $ret 0] 1] $j$dupstr
+ }
+
+ puts "\tTest023.e: Delete last and try gets"
+ # Now do the delete on the current key.
+ set ret [$dbc del]
+ error_check_good dbc_del:LAST $ret 0
+
+ # Now current should fail
+ set ret [$dbc get -current]
+ error_check_good \
+ dbc_get:deleted $ret [list [list [] []]]
+
+ # Next should fail
+ set ret [$dbc get -next]
+ error_check_good dbc_get:next19 [llength $ret] 0
+
+ # Prev should work
+ set ret [$dbc get -prev]
+ error_check_good dbc_get:next [llength [lindex $ret 0]] 2
+ error_check_good \
+ dbc_get:next [lindex [lindex $ret 0] 1] 18$dupstr
+
+ # Now overwrite the current one, then count the number
+ # of data items to make sure that we have the right number.
+
+ puts "\tTest023.f: Count keys, overwrite current, count again"
+ # At this point we should have 17 keys the (initial 20 minus
+ # 3 deletes)
+ set dbc2 [$db cursor]
+ error_check_good db_cursor:2 [is_substr $dbc2 $db] 1
+
+ set count_check 0
+ for { set rec [$dbc2 get -first] } {
+ [llength $rec] != 0 } { set rec [$dbc2 get -next] } {
+ incr count_check
+ }
+ error_check_good numdups $count_check 17
+
+ set ret [$dbc put -current OVERWRITE]
+ error_check_good dbc_put:current $ret 0
+
+ set count_check 0
+ for { set rec [$dbc2 get -first] } {
+ [llength $rec] != 0 } { set rec [$dbc2 get -next] } {
+ incr count_check
+ }
+ error_check_good numdups $count_check 17
+
+ # Done, delete all the keys for next iteration
+ set ret [eval {$db del} $txn {$key}]
+ error_check_good db_delete $ret 0
+
+ # database should be empty
+
+ set ret [$dbc get -first]
+ error_check_good first_after_empty [llength $ret] 0
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+
+}
+
+# Check function for test023; keys and data are identical
+proc test023.check { key data } {
+ global dupnum
+ global dupstr
+ error_check_good "bad key" $key duplicate_val_test
+ error_check_good "data mismatch for $key" $data $dupnum$dupstr
+ incr dupnum
+}
diff --git a/bdb/test/test024.tcl b/bdb/test/test024.tcl
new file mode 100644
index 00000000000..f0b6762cd2f
--- /dev/null
+++ b/bdb/test/test024.tcl
@@ -0,0 +1,206 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test024.tcl,v 11.14 2000/08/25 14:21:55 sue Exp $
+#
+# DB Test 24 {method nentries}
+# Test the Btree and Record number get-by-number functionality.
+proc test024 { method {nentries 10000} args} {
+ source ./include.tcl
+ global rand_init
+
+ set do_renumber [is_rrecno $method]
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test024: $method ($args)"
+
+ if { [string compare $omethod "-hash"] == 0 } {
+ puts "Test024 skipping for method HASH"
+ return
+ }
+
+ berkdb srand $rand_init
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test024.db
+ set env NULL
+ } else {
+ set testfile test024.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+
+ cleanup $testdir $env
+
+ # Read the first nentries dictionary elements and reverse them.
+ # Keep a list of these (these will be the keys).
+ puts "\tTest024.a: initialization"
+ set keys ""
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ lappend keys [reverse $str]
+ incr count
+ }
+ close $did
+
+ # Generate sorted order for the keys
+ set sorted_keys [lsort $keys]
+ # Create the database
+ if { [string compare $omethod "-btree"] == 0 } {
+ set db [eval {berkdb_open -create -truncate \
+ -mode 0644 -recnum} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ } else {
+ set db [eval {berkdb_open -create -truncate \
+ -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ }
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ if { [is_record_based $method] == 1 } {
+ set gflags " -recno"
+ }
+
+ puts "\tTest024.b: put/get loop"
+ foreach k $keys {
+ if { [is_record_based $method] == 1 } {
+ set key [lsearch $sorted_keys $k]
+ incr key
+ } else {
+ set key $k
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $k]}]
+ error_check_good put $ret 0
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $k]]]
+ }
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest024.c: dump file"
+
+ # Put sorted keys in file
+ set oid [open $t1 w]
+ foreach k $sorted_keys {
+ puts $oid [pad_data $method $k]
+ }
+ close $oid
+
+ # Instead of using dump_file; get all the keys by keynum
+ set oid [open $t2 w]
+ if { [string compare $omethod "-btree"] == 0 } {
+ set do_renumber 1
+ }
+
+ set gflags " -recno"
+
+ for { set k 1 } { $k <= $count } { incr k } {
+ set ret [eval {$db get} $txn $gflags {$k}]
+ puts $oid [lindex [lindex $ret 0] 1]
+ error_check_good recnum_get [lindex [lindex $ret 0] 1] \
+ [pad_data $method [lindex $sorted_keys [expr $k - 1]]]
+ }
+ close $oid
+ error_check_good db_close [$db close] 0
+
+ error_check_good Test024.c:diff($t1,$t2) \
+ [filecmp $t1 $t2] 0
+
+ # Now, reopen the file and run the last test again.
+ puts "\tTest024.d: close, open, and dump file"
+ set db [eval {berkdb_open -rdonly} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set oid [open $t2 w]
+ for { set k 1 } { $k <= $count } { incr k } {
+ set ret [eval {$db get} $txn $gflags {$k}]
+ puts $oid [lindex [lindex $ret 0] 1]
+ error_check_good recnum_get [lindex [lindex $ret 0] 1] \
+ [pad_data $method [lindex $sorted_keys [expr $k - 1]]]
+ }
+ close $oid
+ error_check_good db_close [$db close] 0
+ error_check_good Test024.d:diff($t1,$t2) \
+ [filecmp $t1 $t2] 0
+
+ # Now, reopen the file and run the last test again in reverse direction.
+ puts "\tTest024.e: close, open, and dump file in reverse direction"
+ set db [eval {berkdb_open -rdonly} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ # Put sorted keys in file
+ set rsorted ""
+ foreach k $sorted_keys {
+ set rsorted [linsert $rsorted 0 $k]
+ }
+ set oid [open $t1 w]
+ foreach k $rsorted {
+ puts $oid [pad_data $method $k]
+ }
+ close $oid
+
+ set oid [open $t2 w]
+ for { set k $count } { $k > 0 } { incr k -1 } {
+ set ret [eval {$db get} $txn $gflags {$k}]
+ puts $oid [lindex [lindex $ret 0] 1]
+ error_check_good recnum_get [lindex [lindex $ret 0] 1] \
+ [pad_data $method [lindex $sorted_keys [expr $k - 1]]]
+ }
+ close $oid
+ error_check_good db_close [$db close] 0
+ error_check_good Test024.e:diff($t1,$t2) \
+ [filecmp $t1 $t2] 0
+
+ # Now try deleting elements and making sure they work
+ puts "\tTest024.f: delete test"
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ while { $count > 0 } {
+ set kndx [berkdb random_int 1 $count]
+ set kval [lindex $keys [expr $kndx - 1]]
+ set recno [expr [lsearch $sorted_keys $kval] + 1]
+
+ if { [is_record_based $method] == 1 } {
+ set ret [eval {$db del} $txn {$recno}]
+ } else {
+ set ret [eval {$db del} $txn {$kval}]
+ }
+ error_check_good delete $ret 0
+
+ # Remove the key from the key list
+ set ndx [expr $kndx - 1]
+ set keys [lreplace $keys $ndx $ndx]
+
+ if { $do_renumber == 1 } {
+ set r [expr $recno - 1]
+ set sorted_keys [lreplace $sorted_keys $r $r]
+ }
+
+ # Check that the keys after it have been renumbered
+ if { $do_renumber == 1 && $recno != $count } {
+ set r [expr $recno - 1]
+ set ret [eval {$db get} $txn $gflags {$recno}]
+ error_check_good get_after_del \
+ [lindex [lindex $ret 0] 1] [lindex $sorted_keys $r]
+ }
+
+ # Decrement count
+ incr count -1
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/bdb/test/test025.tcl b/bdb/test/test025.tcl
new file mode 100644
index 00000000000..9f8deecb488
--- /dev/null
+++ b/bdb/test/test025.tcl
@@ -0,0 +1,105 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test025.tcl,v 11.11 2000/11/16 23:56:18 ubell Exp $
+#
+# DB Test 25 {method nentries}
+# Test the DB_APPEND flag.
+proc test025 { method {nentries 10000} {start 0 } {tnum "25" } args} {
+ global kvals
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ puts "Test0$tnum: $method ($args)"
+
+ if { [string compare $omethod "-btree"] == 0 } {
+ puts "Test0$tnum skipping for method BTREE"
+ return
+ }
+ if { [string compare $omethod "-hash"] == 0 } {
+ puts "Test0$tnum skipping for method HASH"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ set t1 $testdir/t1
+
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -truncate -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ puts "\tTest0$tnum.a: put/get loop"
+ set gflags " -recno"
+ set pflags " -append"
+ set txn ""
+ set checkfunc test025_check
+
+ # Here is the loop where we put and get each key/data pair
+ set count $start
+ set nentries [expr $start + $nentries]
+ if { $count != 0 } {
+ gets $did str
+ set k [expr $count + 1]
+ set kvals($k) [pad_data $method $str]
+ set ret [eval {$db put} $txn $k {[chop_data $method $str]}]
+ error_check_good db_put $ret 0
+ incr count
+ }
+
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set k [expr $count + 1]
+ set kvals($k) [pad_data $method $str]
+ set ret [eval {$db put} $txn $pflags {[chop_data $method $str]}]
+ error_check_good db_put $ret $k
+
+ set ret [eval {$db get} $txn $gflags {$k}]
+ error_check_good \
+ get $ret [list [list $k [pad_data $method $str]]]
+ incr count
+ if { [expr $count + 1] == 0 } {
+ incr count
+ }
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.b: dump file"
+ dump_file $db $txn $t1 $checkfunc
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest0$tnum.c: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ open_and_dump_file $testfile $env $txn $t1 $checkfunc \
+ dump_file_direction -first -next
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tTest0$tnum.d: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $txn $t1 $checkfunc \
+ dump_file_direction -last -prev
+}
+
+proc test025_check { key data } {
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good " key/data mismatch for |$key|" $data $kvals($key)
+}
diff --git a/bdb/test/test026.tcl b/bdb/test/test026.tcl
new file mode 100644
index 00000000000..6c19c60a2e5
--- /dev/null
+++ b/bdb/test/test026.tcl
@@ -0,0 +1,112 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test026.tcl,v 11.13 2000/11/17 19:07:51 sue Exp $
+#
+# DB Test 26 {access method}
+# Keyed delete test through cursor.
+# If ndups is small; this will test on-page dups; if it's large, it
+# will test off-page dups.
+proc test026 { method {nentries 2000} {ndups 5} {tnum 26} args} {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $method"
+ return
+ }
+ puts "Test0$tnum: $method ($args) $nentries keys\
+ with $ndups dups; cursor delete test"
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ cleanup $testdir $env
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+
+ puts "\tTest0$tnum.a: Put loop"
+ set db [eval {berkdb_open -create -truncate \
+ -mode 0644} $args {$omethod -dup $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < [expr $nentries * $ndups] } {
+ set datastr [ make_data_str $str ]
+ for { set j 1 } { $j <= $ndups} {incr j} {
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $j$datastr]}]
+ error_check_good db_put $ret 0
+ incr count
+ }
+ }
+ close $did
+
+ error_check_good db_close [$db close] 0
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Now we will sequentially traverse the database getting each
+ # item and deleting it.
+ set count 0
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ puts "\tTest0$tnum.b: Get/delete loop"
+ set i 1
+ for { set ret [$dbc get -first] } {
+ [string length $ret] != 0 } {
+ set ret [$dbc get -next] } {
+
+ set key [lindex [lindex $ret 0] 0]
+ set data [lindex [lindex $ret 0] 1]
+ if { $i == 1 } {
+ set curkey $key
+ }
+ error_check_good seq_get:key $key $curkey
+ error_check_good \
+ seq_get:data $data [pad_data $method $i[make_data_str $key]]
+
+ if { $i == $ndups } {
+ set i 1
+ } else {
+ incr i
+ }
+
+ # Now delete the key
+ set ret [$dbc del]
+ error_check_good db_del:$key $ret 0
+ }
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest0$tnum.c: Verify empty file"
+ # Double check that file is now empty
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+ set ret [$dbc get -first]
+ error_check_good get_on_empty [string length $ret] 0
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+}
diff --git a/bdb/test/test027.tcl b/bdb/test/test027.tcl
new file mode 100644
index 00000000000..ae4bf64fb3e
--- /dev/null
+++ b/bdb/test/test027.tcl
@@ -0,0 +1,13 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test027.tcl,v 11.4 2000/05/22 12:51:39 bostic Exp $
+#
+# DB Test 27 {access method}
+# Check that delete operations work. Create a database; close database and
+# reopen it. Then issues delete by key for each entry.
+proc test027 { method {nentries 100} args} {
+ eval {test026 $method $nentries 100 27} $args
+}
diff --git a/bdb/test/test028.tcl b/bdb/test/test028.tcl
new file mode 100644
index 00000000000..b460dd53a98
--- /dev/null
+++ b/bdb/test/test028.tcl
@@ -0,0 +1,208 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test028.tcl,v 11.12 2000/08/25 14:21:55 sue Exp $
+#
+# Put after cursor delete test.
+proc test028 { method args } {
+ global dupnum
+ global dupstr
+ global alphabet
+ global errorInfo
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test028: $method put after cursor delete test"
+
+ if { [is_rbtree $method] == 1 } {
+ puts "Test028 skipping for method $method"
+ return
+ }
+ if { [is_record_based $method] == 1 } {
+ set key 10
+ } else {
+ append args " -dup"
+ set key "put_after_cursor_del"
+ }
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test028.db
+ set env NULL
+ } else {
+ set testfile test028.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -truncate -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set ndups 20
+ set txn ""
+ set pflags ""
+ set gflags ""
+
+ if { [is_record_based $method] == 1 } {
+ set gflags " -recno"
+ }
+
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ foreach i { offpage onpage } {
+ foreach b { bigitem smallitem } {
+ if { $i == "onpage" } {
+ if { $b == "bigitem" } {
+ set dupstr [repeat $alphabet 100]
+ } else {
+ set dupstr DUP
+ }
+ } else {
+ if { $b == "bigitem" } {
+ set dupstr [repeat $alphabet 100]
+ } else {
+ set dupstr [repeat $alphabet 50]
+ }
+ }
+
+ if { $b == "bigitem" } {
+ set dupstr [repeat $dupstr 10]
+ }
+ puts "\tTest028: $i/$b"
+
+ puts "\tTest028.a: Insert key with single data item"
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $dupstr]}]
+ error_check_good db_put $ret 0
+
+ # Now let's get the item and make sure its OK.
+ puts "\tTest028.b: Check initial entry"
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good db_get \
+ $ret [list [list $key [pad_data $method $dupstr]]]
+
+ # Now try a put with NOOVERWRITE SET (should be error)
+ puts "\tTest028.c: No_overwrite test"
+ set ret [eval {$db put} $txn \
+ {-nooverwrite $key [chop_data $method $dupstr]}]
+ error_check_good \
+ db_put [is_substr $ret "DB_KEYEXIST"] 1
+
+ # Now delete the item with a cursor
+ puts "\tTest028.d: Delete test"
+ set ret [$dbc get -set $key]
+ error_check_bad dbc_get:SET [llength $ret] 0
+
+ set ret [$dbc del]
+ error_check_good dbc_del $ret 0
+
+ puts "\tTest028.e: Reput the item"
+ set ret [eval {$db put} $txn \
+ {-nooverwrite $key [chop_data $method $dupstr]}]
+ error_check_good db_put $ret 0
+
+ puts "\tTest028.f: Retrieve the item"
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good db_get $ret \
+ [list [list $key [pad_data $method $dupstr]]]
+
+ # Delete the key to set up for next test
+ set ret [eval {$db del} $txn {$key}]
+ error_check_good db_del $ret 0
+
+ # Now repeat the above set of tests with
+ # duplicates (if not RECNO).
+ if { [is_record_based $method] == 1 } {
+ continue;
+ }
+
+ puts "\tTest028.g: Insert key with duplicates"
+ for { set count 0 } { $count < $ndups } { incr count } {
+ set ret [eval {$db put} \
+ $txn {$key [chop_data $method $count$dupstr]}]
+ error_check_good db_put $ret 0
+ }
+
+ puts "\tTest028.h: Check dups"
+ set dupnum 0
+ dump_file $db $txn $t1 test028.check
+
+ # Try no_overwrite
+ puts "\tTest028.i: No_overwrite test"
+ set ret [eval {$db put} \
+ $txn {-nooverwrite $key $dupstr}]
+ error_check_good \
+ db_put [is_substr $ret "DB_KEYEXIST"] 1
+
+ # Now delete all the elements with a cursor
+ puts "\tTest028.j: Cursor Deletes"
+ set count 0
+ for { set ret [$dbc get -set $key] } {
+ [string length $ret] != 0 } {
+ set ret [$dbc get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_good db_seq(key) $k $key
+ error_check_good db_seq(data) $d $count$dupstr
+ set ret [$dbc del]
+ error_check_good dbc_del $ret 0
+ incr count
+ if { $count == [expr $ndups - 1] } {
+ puts "\tTest028.k:\
+ Duplicate No_Overwrite test"
+ set $errorInfo ""
+ set ret [eval {$db put} $txn \
+ {-nooverwrite $key $dupstr}]
+ error_check_good db_put [is_substr \
+ $ret "DB_KEYEXIST"] 1
+ }
+ }
+
+ # Make sure all the items are gone
+ puts "\tTest028.l: Get after delete"
+ set ret [$dbc get -set $key]
+ error_check_good get_after_del [string length $ret] 0
+
+ puts "\tTest028.m: Reput the item"
+ set ret [eval {$db put} \
+ $txn {-nooverwrite $key 0$dupstr}]
+ error_check_good db_put $ret 0
+ for { set count 1 } { $count < $ndups } { incr count } {
+ set ret [eval {$db put} $txn {$key $count$dupstr}]
+ error_check_good db_put $ret 0
+ }
+
+ puts "\tTest028.n: Retrieve the item"
+ set dupnum 0
+ dump_file $db $txn $t1 test028.check
+
+ # Clean out in prep for next test
+ set ret [eval {$db del} $txn {$key}]
+ error_check_good db_del $ret 0
+ }
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+
+}
+
+# Check function for test028; keys and data are identical
+proc test028.check { key data } {
+ global dupnum
+ global dupstr
+ error_check_good "Bad key" $key put_after_cursor_del
+ error_check_good "data mismatch for $key" $data $dupnum$dupstr
+ incr dupnum
+}
diff --git a/bdb/test/test029.tcl b/bdb/test/test029.tcl
new file mode 100644
index 00000000000..c10815b0bf3
--- /dev/null
+++ b/bdb/test/test029.tcl
@@ -0,0 +1,192 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test029.tcl,v 11.13 2000/08/25 14:21:55 sue Exp $
+#
+# DB Test 29 {method nentries}
+# Test the Btree and Record number renumbering.
+proc test029 { method {nentries 10000} args} {
+ source ./include.tcl
+
+ set do_renumber [is_rrecno $method]
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test029: $method ($args)"
+
+ if { [string compare $omethod "-hash"] == 0 } {
+ puts "Test029 skipping for method HASH"
+ return
+ }
+ if { [is_record_based $method] == 1 && $do_renumber != 1 } {
+ puts "Test029 skipping for method RECNO (w/out renumbering)"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test029.db
+ set env NULL
+ } else {
+ set testfile test029.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ cleanup $testdir $env
+
+ # Read the first nentries dictionary elements and reverse them.
+ # Keep a list of these (these will be the keys).
+ puts "\tTest029.a: initialization"
+ set keys ""
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ lappend keys [reverse $str]
+ incr count
+ }
+ close $did
+
+ # Generate sorted order for the keys
+ set sorted_keys [lsort $keys]
+
+ # Save the first and last keys
+ set last_key [lindex $sorted_keys end]
+ set last_keynum [llength $sorted_keys]
+
+ set first_key [lindex $sorted_keys 0]
+ set first_keynum 1
+
+ # Create the database
+ if { [string compare $omethod "-btree"] == 0 } {
+ set db [eval {berkdb_open -create -truncate \
+ -mode 0644 -recnum} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ } else {
+ set db [eval {berkdb_open -create -truncate \
+ -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ }
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ puts "\tTest029.b: put/get loop"
+ foreach k $keys {
+ if { [is_record_based $method] == 1 } {
+ set key [lsearch $sorted_keys $k]
+ incr key
+ } else {
+ set key $k
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $k]}]
+ error_check_good dbput $ret 0
+
+ set ret [eval {$db get} $txn $gflags {$key}]
+ if { [string compare [lindex [lindex $ret 0] 1] $k] != 0 } {
+ puts "Test029: put key-data $key $k got $ret"
+ return
+ }
+ }
+
+ # Now delete the first key in the database
+ puts "\tTest029.c: delete and verify renumber"
+
+ # Delete the first key in the file
+ if { [is_record_based $method] == 1 } {
+ set key $first_keynum
+ } else {
+ set key $first_key
+ }
+
+ set ret [eval {$db del} $txn {$key}]
+ error_check_good db_del $ret 0
+
+ # Now we are ready to retrieve records based on
+ # record number
+ if { [string compare $omethod "-btree"] == 0 } {
+ append gflags " -recno"
+ }
+
+ # First try to get the old last key (shouldn't exist)
+ set ret [eval {$db get} $txn $gflags {$last_keynum}]
+ error_check_good get_after_del $ret [list]
+
+ # Now try to get what we think should be the last key
+ set ret [eval {$db get} $txn $gflags {[expr $last_keynum - 1]}]
+ error_check_good \
+ getn_last_after_del [lindex [lindex $ret 0] 1] $last_key
+
+ # Create a cursor; we need it for the next test and we
+ # need it for recno here.
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ # OK, now re-put the first key and make sure that we
+ # renumber the last key appropriately.
+ if { [string compare $omethod "-btree"] == 0 } {
+ set ret [eval {$db put} $txn {$key [chop_data $method $first_key]}]
+ error_check_good db_put $ret 0
+ } else {
+ # Recno
+ set ret [eval {$dbc get} $txn {-first}]
+ set ret [eval {$dbc put} $txn $pflags {-before $first_key}]
+ error_check_bad dbc_put:DB_BEFORE $ret 0
+ }
+
+ # Now check that the last record matches the last record number
+ set ret [eval {$db get} $txn $gflags {$last_keynum}]
+ error_check_good \
+ getn_last_after_put [lindex [lindex $ret 0] 1] $last_key
+
+ # Now delete the first key in the database using a cursor
+ puts "\tTest029.d: delete with cursor and verify renumber"
+
+ set ret [eval {$dbc get} $txn {-first}]
+ error_check_good dbc_first $ret [list [list $key $first_key]]
+
+ # Now delete at the cursor
+ set ret [$dbc del]
+ error_check_good dbc_del $ret 0
+
+ # Now check the record numbers of the last keys again.
+ # First try to get the old last key (shouldn't exist)
+ set ret [eval {$db get} $txn $gflags {$last_keynum}]
+ error_check_good get_last_after_cursor_del:$ret $ret [list]
+
+ # Now try to get what we think should be the last key
+ set ret [eval {$db get} $txn $gflags {[expr $last_keynum - 1]}]
+ error_check_good \
+ getn_after_cursor_del [lindex [lindex $ret 0] 1] $last_key
+
+ # Re-put the first key and make sure that we renumber the last
+ # key appropriately.
+ puts "\tTest029.e: put with cursor and verify renumber"
+ if { [string compare $omethod "-btree"] == 0 } {
+ set ret [eval {$dbc put} \
+ $txn $pflags {-current $first_key}]
+ error_check_good dbc_put:DB_CURRENT $ret 0
+ } else {
+ set ret [eval {$dbc put} $txn $pflags {-before $first_key}]
+ error_check_bad dbc_put:DB_BEFORE $ret 0
+ }
+
+ # Now check that the last record matches the last record number
+ set ret [eval {$db get} $txn $gflags {$last_keynum}]
+ error_check_good \
+ get_after_cursor_reput [lindex [lindex $ret 0] 1] $last_key
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+}
diff --git a/bdb/test/test030.tcl b/bdb/test/test030.tcl
new file mode 100644
index 00000000000..7395adf82bd
--- /dev/null
+++ b/bdb/test/test030.tcl
@@ -0,0 +1,191 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test030.tcl,v 11.13 2000/08/25 14:21:55 sue Exp $
+#
+# DB Test 30: Test DB_NEXT_DUP Functionality.
+proc test030 { method {nentries 10000} args } {
+ global rand_init
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 ||
+ [is_rbtree $method] == 1 } {
+ puts "Test030 skipping for method $method"
+ return
+ }
+
+ puts "Test030: $method ($args) $nentries DB_NEXT_DUP testing"
+ berkdb srand $rand_init
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test030.db
+ set cntfile $testdir/cntfile.db
+ set env NULL
+ } else {
+ set testfile test030.db
+ set cntfile cntfile.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open -create -truncate \
+ -mode 0644 -dup} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Use a second DB to keep track of how many duplicates
+ # we enter per key
+
+ set cntdb [eval {berkdb_open -create -truncate \
+ -mode 0644} $args {-btree $cntfile}]
+ error_check_good dbopen:cntfile [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ # We will add between 1 and 10 dups with values 1 ... dups
+ # We'll verify each addition.
+
+ set did [open $dict]
+ puts "\tTest030.a: put and get duplicate keys."
+ set dbc [eval {$db cursor} $txn]
+
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set ndup [berkdb random_int 1 10]
+
+ for { set i 1 } { $i <= $ndup } { incr i 1 } {
+ set ret [eval {$cntdb put} \
+ $txn $pflags {$str [chop_data $method $ndup]}]
+ error_check_good put_cnt $ret 0
+ set datastr $i:$str
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ }
+
+ # Now retrieve all the keys matching this key
+ set x 0
+ for {set ret [$dbc get -set $str]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -nextdup] } {
+ incr x
+
+ if { [llength $ret] == 0 } {
+ break
+ }
+
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+
+ set datastr [lindex [lindex $ret 0] 1]
+ set d [data_of $datastr]
+ error_check_good Test030:put $d $str
+
+ set id [ id_of $datastr ]
+ error_check_good Test030:dup# $id $x
+ }
+ error_check_good Test030:numdups $x $ndup
+ incr count
+ }
+ close $did
+
+ # Verify on sequential pass of entire file
+ puts "\tTest030.b: sequential check"
+
+ # We can't just set lastkey to a null string, since that might
+ # be a key now!
+ set lastkey "THIS STRING WILL NEVER BE A KEY"
+
+ for {set ret [$dbc get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -next] } {
+
+ # Outer loop should always get a new key
+
+ set k [lindex [lindex $ret 0] 0]
+ error_check_bad outer_get_loop:key $k $lastkey
+
+ set datastr [lindex [lindex $ret 0] 1]
+ set d [data_of $datastr]
+ set id [ id_of $datastr ]
+
+ error_check_good outer_get_loop:data $d $k
+ error_check_good outer_get_loop:id $id 1
+
+ set lastkey $k
+ # Figure out how may dups we should have
+ set ret [eval {$cntdb get} $txn $pflags {$k}]
+ set ndup [lindex [lindex $ret 0] 1]
+
+ set howmany 1
+ for { set ret [$dbc get -nextdup] } \
+ { [llength $ret] != 0 } \
+ { set ret [$dbc get -nextdup] } {
+ incr howmany
+
+ set k [lindex [lindex $ret 0] 0]
+ error_check_good inner_get_loop:key $k $lastkey
+
+ set datastr [lindex [lindex $ret 0] 1]
+ set d [data_of $datastr]
+ set id [ id_of $datastr ]
+
+ error_check_good inner_get_loop:data $d $k
+ error_check_good inner_get_loop:id $id $howmany
+
+ }
+ error_check_good ndups_found $howmany $ndup
+ }
+
+ # Verify on key lookup
+ puts "\tTest030.c: keyed check"
+ set cnt_dbc [$cntdb cursor]
+ for {set ret [$cnt_dbc get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$cnt_dbc get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+
+ set howmany [lindex [lindex $ret 0] 1]
+ error_check_bad cnt_seq:data [string length $howmany] 0
+
+ set i 0
+ for {set ret [$dbc get -set $k]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -nextdup] } {
+ incr i
+
+ set k [lindex [lindex $ret 0] 0]
+
+ set datastr [lindex [lindex $ret 0] 1]
+ set d [data_of $datastr]
+ set id [ id_of $datastr ]
+
+ error_check_good inner_get_loop:data $d $k
+ error_check_good inner_get_loop:id $id $i
+ }
+ error_check_good keyed_count $i $howmany
+
+ }
+ error_check_good cnt_curs_close [$cnt_dbc close] 0
+ error_check_good db_curs_close [$dbc close] 0
+ error_check_good cnt_file_close [$cntdb close] 0
+ error_check_good db_file_close [$db close] 0
+}
diff --git a/bdb/test/test031.tcl b/bdb/test/test031.tcl
new file mode 100644
index 00000000000..35041541fa7
--- /dev/null
+++ b/bdb/test/test031.tcl
@@ -0,0 +1,196 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test031.tcl,v 11.17 2000/11/06 19:31:55 sue Exp $
+#
+# DB Test 31 {access method}
+# Use the first 10,000 entries from the dictionary.
+# Insert each with self as key and "ndups" duplicates
+# For the data field, prepend random five-char strings (see test032)
+# that we force the duplicate sorting code to do something.
+# Along the way, test that we cannot insert duplicate duplicates
+# using DB_NODUPDATA.
+# By setting ndups large, we can make this an off-page test
+# After all are entered, retrieve all; verify output.
+# Close file, reopen, do retrieve and re-verify.
+# This does not work for recno
+proc test031 { method {nentries 10000} {ndups 5} {tnum 31} args } {
+ global alphabet
+ global rand_init
+ source ./include.tcl
+
+ berkdb srand $rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set checkdb $testdir/checkdb.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ set checkdb checkdb.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ puts "Test0$tnum: \
+ $method ($args) $nentries small sorted dup key/data pairs"
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $omethod"
+ return
+ }
+ set db [eval {berkdb_open -create -truncate \
+ -mode 0644} $args {$omethod -dup -dupsort $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set check_db [eval {berkdb_open \
+ -create -truncate -mode 0644} $args {-hash $checkdb}]
+ error_check_good dbopen:check_db [is_valid_db $check_db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ puts "\tTest0$tnum.a: Put/get loop, check nodupdata"
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_substr $dbc $db] 1
+ while { [gets $did str] != -1 && $count < $nentries } {
+ # Re-initialize random string generator
+ randstring_init $ndups
+
+ set dups ""
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set pref [randstring]
+ set dups $dups$pref
+ set datastr $pref:$str
+ if { $i == 2 } {
+ set nodupstr $datastr
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ }
+
+ # Test DB_NODUPDATA using the DB handle
+ set ret [eval {$db put -nodupdata} \
+ $txn $pflags {$str [chop_data $method $nodupstr]}]
+ error_check_good db_nodupdata [is_substr $ret "DB_KEYEXIST"] 1
+
+ set ret [eval {$check_db put} \
+ $txn $pflags {$str [chop_data $method $dups]}]
+ error_check_good checkdb_put $ret 0
+
+ # Now retrieve all the keys matching this key
+ set x 0
+ set lastdup ""
+ # Test DB_NODUPDATA using cursor handle
+ set ret [$dbc get -set $str]
+ error_check_bad dbc_get [llength $ret] 0
+ set datastr [lindex [lindex $ret 0] 1]
+ error_check_bad dbc_data [string length $datastr] 0
+ set ret [eval {$dbc put -nodupdata} \
+ {$str [chop_data $method $datastr]}]
+ error_check_good dbc_nodupdata [is_substr $ret "DB_KEYEXIST"] 1
+
+ for {set ret [$dbc get -set $str]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -nextdup] } {
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+ set datastr [lindex [lindex $ret 0] 1]
+ if {[string length $datastr] == 0} {
+ break
+ }
+ if {[string compare \
+ $lastdup [pad_data $method $datastr]] > 0} {
+ error_check_good \
+ sorted_dups($lastdup,$datastr) 0 1
+ }
+ incr x
+ set lastdup $datastr
+ }
+ error_check_good "Test0$tnum:ndups:$str" $x $ndups
+ incr count
+ }
+ error_check_good cursor_close [$dbc close] 0
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.b: Checking file for correct duplicates"
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open(2) [is_substr $dbc $db] 1
+
+ set lastkey "THIS WILL NEVER BE A KEY VALUE"
+ # no need to delete $lastkey
+ set firsttimethru 1
+ for {set ret [$dbc get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_bad data_check:$d [string length $d] 0
+
+ if { [string compare $k $lastkey] != 0 } {
+ # Remove last key from the checkdb
+ if { $firsttimethru != 1 } {
+ error_check_good check_db:del:$lastkey \
+ [eval {$check_db del} $txn {$lastkey}] 0
+ }
+ set firsttimethru 0
+ set lastdup ""
+ set lastkey $k
+ set dups [lindex [lindex [eval {$check_db get} \
+ $txn {$k}] 0] 1]
+ error_check_good check_db:get:$k \
+ [string length $dups] [expr $ndups * 4]
+ }
+
+ if { [string compare $lastdup $d] > 0 } {
+ error_check_good dup_check:$k:$d 0 1
+ }
+ set lastdup $d
+
+ set pref [string range $d 0 3]
+ set ndx [string first $pref $dups]
+ error_check_good valid_duplicate [expr $ndx >= 0] 1
+ set a [string range $dups 0 [expr $ndx - 1]]
+ set b [string range $dups [expr $ndx + 4] end]
+ set dups $a$b
+ }
+ # Remove last key from the checkdb
+ if { [string length $lastkey] != 0 } {
+ error_check_good check_db:del:$lastkey \
+ [eval {$check_db del} $txn {$lastkey}] 0
+ }
+
+ # Make sure there is nothing left in check_db
+
+ set check_c [eval {$check_db cursor} $txn]
+ set ret [$check_c get -first]
+ error_check_good check_c:get:$ret [llength $ret] 0
+ error_check_good check_c:close [$check_c close] 0
+ error_check_good check_db:close [$check_db close] 0
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+}
diff --git a/bdb/test/test032.tcl b/bdb/test/test032.tcl
new file mode 100644
index 00000000000..1504ec5cc2d
--- /dev/null
+++ b/bdb/test/test032.tcl
@@ -0,0 +1,195 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test032.tcl,v 11.15 2000/08/25 14:21:55 sue Exp $
+#
+# DB Test 32 {access method}
+# Use the first 10,000 entries from the dictionary.
+# Insert each with self as key and "ndups" duplicates
+# For the data field, prepend the letters of the alphabet
+# in a random order so that we force the duplicate sorting
+# code to do something.
+# By setting ndups large, we can make this an off-page test
+# After all are entered; test the DB_GET_BOTH functionality
+# first by retrieving each dup in the file explicitly. Then
+# test the failure case.
+proc test032 { method {nentries 10000} {ndups 5} {tnum 32} args } {
+ global alphabet rand_init
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ berkdb srand $rand_init
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set checkdb $testdir/checkdb.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ set checkdb checkdb.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ puts "Test0$tnum:\
+ $method ($args) $nentries small sorted dup key/data pairs"
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $omethod"
+ return
+ }
+ set db [eval {berkdb_open -create -truncate -mode 0644 \
+ $omethod -dup -dupsort} $args {$testfile} ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set check_db [eval {berkdb_open \
+ -create -truncate -mode 0644} $args {-hash $checkdb}]
+ error_check_good dbopen:check_db [is_valid_db $check_db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ puts "\tTest0$tnum.a: Put/get loop"
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_substr $dbc $db] 1
+ while { [gets $did str] != -1 && $count < $nentries } {
+ # Re-initialize random string generator
+ randstring_init $ndups
+
+ set dups ""
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set pref [randstring]
+ set dups $dups$pref
+ set datastr $pref:$str
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ }
+ set ret [eval {$check_db put} \
+ $txn $pflags {$str [chop_data $method $dups]}]
+ error_check_good checkdb_put $ret 0
+
+ # Now retrieve all the keys matching this key
+ set x 0
+ set lastdup ""
+ for {set ret [$dbc get -set $str]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -nextdup] } {
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+ set datastr [lindex [lindex $ret 0] 1]
+ if {[string length $datastr] == 0} {
+ break
+ }
+ if {[string compare $lastdup $datastr] > 0} {
+ error_check_good sorted_dups($lastdup,$datastr)\
+ 0 1
+ }
+ incr x
+ set lastdup $datastr
+ }
+
+ error_check_good "Test0$tnum:ndups:$str" $x $ndups
+ incr count
+ }
+ error_check_good cursor_close [$dbc close] 0
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.b: Checking file for correct duplicates (no cursor)"
+ set check_c [eval {$check_db cursor} $txn]
+ error_check_good check_c_open(2) \
+ [is_substr $check_c $check_db] 1
+
+ for {set ndx 0} {$ndx < [expr 4 * $ndups]} {incr ndx 4} {
+ for {set ret [$check_c get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$check_c get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_bad data_check:$d [string length $d] 0
+
+ set pref [string range $d $ndx [expr $ndx + 3]]
+ set data $pref:$k
+ set ret [eval {$db get} $txn {-get_both $k $data}]
+ error_check_good \
+ get_both_data:$k $ret [list [list $k $data]]
+ }
+ }
+
+ $db sync
+ # Now repeat the above test using cursor ops
+ puts "\tTest0$tnum.c: Checking file for correct duplicates (cursor)"
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_substr $dbc $db] 1
+
+ for {set ndx 0} {$ndx < [expr 4 * $ndups]} {incr ndx 4} {
+ for {set ret [$check_c get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$check_c get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_bad data_check:$d [string length $d] 0
+
+ set pref [string range $d $ndx [expr $ndx + 3]]
+ set data $pref:$k
+ set ret [eval {$dbc get} {-get_both $k $data}]
+ error_check_good \
+ get_both_key:$k $ret [list [list $k $data]]
+ }
+ }
+
+ # Now check the error case
+ puts "\tTest0$tnum.d: Check error case (no cursor)"
+ for {set ret [$check_c get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$check_c get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_bad data_check:$d [string length $d] 0
+
+ set data XXX$k
+ set ret [eval {$db get} $txn {-get_both $k $data}]
+ error_check_good error_case:$k [llength $ret] 0
+ }
+
+ # Now check the error case
+ puts "\tTest0$tnum.e: Check error case (cursor)"
+ for {set ret [$check_c get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$check_c get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_bad data_check:$d [string length $d] 0
+
+ set data XXX$k
+ set ret [eval {$dbc get} {-get_both $k $data}]
+ error_check_good error_case:$k [llength $ret] 0
+ }
+
+ error_check_good check_c:close [$check_c close] 0
+ error_check_good check_db:close [$check_db close] 0
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+}
diff --git a/bdb/test/test033.tcl b/bdb/test/test033.tcl
new file mode 100644
index 00000000000..ed46e6bda04
--- /dev/null
+++ b/bdb/test/test033.tcl
@@ -0,0 +1,103 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test033.tcl,v 11.11 2000/10/25 15:45:20 sue Exp $
+#
+# DB Test 33 {access method}
+# Use the first 10,000 entries from the dictionary.
+# Insert each with self as key and data; add duplicate
+# records for each.
+# After all are entered, retrieve all; verify output by doing
+# DB_GET_BOTH on existing and non-existing keys.
+# This does not work for recno
+proc test033 { method {nentries 10000} {ndups 5} {tnum 33} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test0$tnum: $method ($args) $nentries small dup key/data pairs"
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $omethod"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open -create -truncate -mode 0644 \
+ $omethod -dup} $args {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ puts "\tTest0$tnum.a: Put/get loop."
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set datastr $i:$str
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good db_put $ret 0
+ }
+
+ # Now retrieve all the keys matching this key and dup
+ for {set i 1} {$i <= $ndups } { incr i } {
+ set datastr $i:$str
+ set ret [eval {$db get} $txn {-get_both $str $datastr}]
+ error_check_good "Test0$tnum:dup#" [lindex \
+ [lindex $ret 0] 1] [pad_data $method $datastr]
+ }
+
+ # Now retrieve non-existent dup (i is ndups + 1)
+ set datastr $i:$str
+ set ret [eval {$db get} $txn {-get_both $str $datastr}]
+ error_check_good Test0$tnum:dupfailure [llength $ret] 0
+ incr count
+ }
+ close $did
+
+ set did [open $dict]
+ set count 0
+ puts "\tTest0$tnum.b: Verifying DB_GET_BOTH after creation."
+ while { [gets $did str] != -1 && $count < $nentries } {
+ # Now retrieve all the keys matching this key and dup
+ for {set i 1} {$i <= $ndups } { incr i } {
+ set datastr $i:$str
+ set ret [eval {$db get} $txn {-get_both $str $datastr}]
+ error_check_good "Test0$tnum:dup#" \
+ [lindex [lindex $ret 0] 1] $datastr
+ }
+
+ # Now retrieve non-existent dup (i is ndups + 1)
+ set datastr $i:$str
+ set ret [eval {$db get} $txn {-get_both $str $datastr}]
+ error_check_good Test0$tnum:dupfailure [llength $ret] 0
+ incr count
+ }
+ close $did
+
+ error_check_good db_close [$db close] 0
+}
diff --git a/bdb/test/test034.tcl b/bdb/test/test034.tcl
new file mode 100644
index 00000000000..b82f369f791
--- /dev/null
+++ b/bdb/test/test034.tcl
@@ -0,0 +1,16 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test034.tcl,v 11.4 2000/02/14 03:00:19 bostic Exp $
+#
+# DB Test 34 {access method}
+# DB_GET_BOTH functionality with off-page duplicates.
+proc test034 { method {nentries 10000} args} {
+ # Test with off-page duplicates
+ eval {test032 $method $nentries 20 34 -pagesize 512} $args
+
+ # Test with multiple pages of off-page duplicates
+ eval {test032 $method [expr $nentries / 10] 100 34 -pagesize 512} $args
+}
diff --git a/bdb/test/test035.tcl b/bdb/test/test035.tcl
new file mode 100644
index 00000000000..e2afef4afb3
--- /dev/null
+++ b/bdb/test/test035.tcl
@@ -0,0 +1,16 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test035.tcl,v 11.3 2000/02/14 03:00:19 bostic Exp $
+#
+# DB Test 35 {access method}
+# DB_GET_BOTH functionality with off-page duplicates.
+proc test035 { method {nentries 10000} args} {
+ # Test with off-page duplicates
+ eval {test033 $method $nentries 20 35 -pagesize 512} $args
+
+ # Test with multiple pages of off-page duplicates
+ eval {test033 $method [expr $nentries / 10] 100 35 -pagesize 512} $args
+}
diff --git a/bdb/test/test036.tcl b/bdb/test/test036.tcl
new file mode 100644
index 00000000000..4d859c0652a
--- /dev/null
+++ b/bdb/test/test036.tcl
@@ -0,0 +1,135 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test036.tcl,v 11.13 2000/08/25 14:21:55 sue Exp $
+#
+# DB Test 36 {access method}
+# Put nentries key/data pairs (from the dictionary) using a cursor
+# and KEYFIRST and KEYLAST (this tests the case where use use cursor
+# put for non-existent keys).
+proc test036 { method {nentries 10000} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test036: $method ($args) $nentries equal key/data pairs"
+ if { [is_record_based $method] == 1 } {
+ puts "Test036 skipping for method recno"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test036.db
+ set env NULL
+ } else {
+ set testfile test036.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -truncate -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test036_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc test036.check
+ }
+ puts "\tTest036.a: put/get loop KEYFIRST"
+ # Here is the loop where we put and get each key/data pair
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor [is_substr $dbc $db] 1
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) $str
+ } else {
+ set key $str
+ }
+ set ret [eval {$dbc put} $txn $pflags {-keyfirst $key $str}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good get [lindex [lindex $ret 0] 1] $str
+ incr count
+ }
+ error_check_good dbc_close [$dbc close] 0
+
+ puts "\tTest036.a: put/get loop KEYLAST"
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor [is_substr $dbc $db] 1
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) $str
+ } else {
+ set key $str
+ }
+ set ret [eval {$dbc put} $txn $pflags {-keylast $key $str}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good get [lindex [lindex $ret 0] 1] $str
+ incr count
+ }
+ error_check_good dbc_close [$dbc close] 0
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest036.c: dump file"
+ dump_file $db $txn $t1 $checkfunc
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+}
+
+# Check function for test036; keys and data are identical
+proc test036.check { key data } {
+ error_check_good "key/data mismatch" $data $key
+}
+
+proc test036_recno.check { key data } {
+ global dict
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
diff --git a/bdb/test/test037.tcl b/bdb/test/test037.tcl
new file mode 100644
index 00000000000..31528c6ee54
--- /dev/null
+++ b/bdb/test/test037.tcl
@@ -0,0 +1,191 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test037.tcl,v 11.11 2000/08/25 14:21:55 sue Exp $
+#
+# Test037: RMW functionality.
+proc test037 { method {nentries 100} args } {
+ source ./include.tcl
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test037 skipping for env $env"
+ return
+ }
+
+ puts "Test037: RMW $method"
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ # Create the database
+ env_cleanup $testdir
+ set testfile test037.db
+
+ set local_env \
+ [berkdb env -create -mode 0644 -txn -home $testdir]
+ error_check_good dbenv [is_valid_env $local_env] TRUE
+
+ set db [eval {berkdb_open \
+ -env $local_env -create -mode 0644 $omethod} $args {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+ set count 0
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ puts "\tTest037.a: Creating database"
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good get \
+ [lindex [lindex $ret 0] 1] [pad_data $method $str]
+ incr count
+ }
+ close $did
+ error_check_good dbclose [$db close] 0
+ error_check_good envclode [$local_env close] 0
+
+ puts "\tTest037.b: Setting up environments"
+
+ # Open local environment
+ set env_cmd [concat berkdb env -create -txn -home $testdir]
+ set local_env [eval $env_cmd]
+ error_check_good dbenv [is_valid_widget $local_env env] TRUE
+
+ # Open local transaction
+ set local_txn [$local_env txn]
+ error_check_good txn_open [is_valid_txn $local_txn $local_env] TRUE
+
+ # Open remote environment
+ set f1 [open |$tclsh_path r+]
+ puts $f1 "source $test_path/test.tcl"
+
+ set remote_env [send_cmd $f1 $env_cmd]
+ error_check_good remote:env_open [is_valid_env $remote_env] TRUE
+
+ # Open remote transaction
+ set remote_txn [send_cmd $f1 "$remote_env txn"]
+ error_check_good \
+ remote:txn_open [is_valid_txn $remote_txn $remote_env] TRUE
+
+ # Now try put test without RMW. Gets on one site should not
+ # lock out gets on another.
+
+ # Open databases and dictionary
+ puts "\tTest037.c: Opening databases"
+ set did [open $dict]
+ set rkey 0
+
+ set db [berkdb_open -env $local_env $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set rdb [send_cmd $f1 \
+ "berkdb_open -env $remote_env -mode 0644 $testfile"]
+ error_check_good remote:dbopen [is_valid_widget $rdb db] TRUE
+
+ puts "\tTest037.d: Testing without RMW"
+
+ # Now, get a key and try to "get" it from both DBs.
+ error_check_bad "gets on new open" [gets $did str] -1
+ incr rkey
+ if { [is_record_based $method] == 1 } {
+ set key $rkey
+ } else {
+ set key $str
+ }
+
+ set rec [eval {$db get -txn $local_txn} $gflags {$key}]
+ error_check_good local_get [lindex [lindex $rec 0] 1] \
+ [pad_data $method $str]
+
+ set r [send_timed_cmd $f1 0 "$rdb get -txn $remote_txn $gflags $key"]
+ error_check_good remote_send $r 0
+
+ # Now sleep before releasing local record lock
+ tclsleep 5
+ error_check_good local_commit [$local_txn commit] 0
+
+ # Now get the remote result
+ set remote_time [rcv_result $f1]
+ error_check_good no_rmw_get:remote_time [expr $remote_time <= 1] 1
+
+ # Commit the remote
+ set r [send_cmd $f1 "$remote_txn commit"]
+ error_check_good remote_commit $r 0
+
+ puts "\tTest037.e: Testing with RMW"
+
+ # Open local transaction
+ set local_txn [$local_env txn]
+ error_check_good \
+ txn_open [is_valid_widget $local_txn $local_env.txn] TRUE
+
+ # Open remote transaction
+ set remote_txn [send_cmd $f1 "$remote_env txn"]
+ error_check_good remote:txn_open \
+ [is_valid_widget $remote_txn $remote_env.txn] TRUE
+
+ # Now, get a key and try to "get" it from both DBs.
+ error_check_bad "gets on new open" [gets $did str] -1
+ incr rkey
+ if { [is_record_based $method] == 1 } {
+ set key $rkey
+ } else {
+ set key $str
+ }
+
+ set rec [eval {$db get -txn $local_txn -rmw} $gflags {$key}]
+ error_check_good \
+ local_get [lindex [lindex $rec 0] 1] [pad_data $method $str]
+
+ set r [send_timed_cmd $f1 0 "$rdb get -txn $remote_txn $gflags $key"]
+ error_check_good remote_send $r 0
+
+ # Now sleep before releasing local record lock
+ tclsleep 5
+ error_check_good local_commit [$local_txn commit] 0
+
+ # Now get the remote result
+ set remote_time [rcv_result $f1]
+ error_check_good rmw_get:remote_time [expr $remote_time > 4] 1
+
+ # Commit the remote
+ set r [send_cmd $f1 "$remote_txn commit"]
+ error_check_good remote_commit $r 0
+
+ # Close everything up: remote first
+ set r [send_cmd $f1 "$rdb close"]
+ error_check_good remote_db_close $r 0
+
+ set r [send_cmd $f1 "$remote_env close"]
+
+ # Close locally
+ error_check_good db_close [$db close] 0
+ $local_env close
+ close $did
+ close $f1
+}
diff --git a/bdb/test/test038.tcl b/bdb/test/test038.tcl
new file mode 100644
index 00000000000..2a726f1bcd9
--- /dev/null
+++ b/bdb/test/test038.tcl
@@ -0,0 +1,174 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test038.tcl,v 11.12 2000/08/25 14:21:56 sue Exp $
+#
+# DB Test 38 {access method}
+# Use the first 10,000 entries from the dictionary.
+# Insert each with self as key and "ndups" duplicates
+# For the data field, prepend the letters of the alphabet
+# in a random order so that we force the duplicate sorting
+# code to do something.
+# By setting ndups large, we can make this an off-page test
+# After all are entered; test the DB_GET_BOTH functionality
+# first by retrieving each dup in the file explicitly. Then
+# remove each duplicate and try DB_GET_BOTH again.
+proc test038 { method {nentries 10000} {ndups 5} {tnum 38} args } {
+ global alphabet
+ global rand_init
+ source ./include.tcl
+
+ berkdb srand $rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set checkdb $testdir/checkdb.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ set checkdb checkdb.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ puts "Test0$tnum: \
+ $method ($args) $nentries small sorted dup key/data pairs"
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $method"
+ return
+ }
+ set db [eval {berkdb_open -create -truncate -mode 0644 \
+ $omethod -dup -dupsort} $args {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set check_db [berkdb_open \
+ -create -truncate -mode 0644 -hash $checkdb]
+ error_check_good dbopen:check_db [is_valid_db $check_db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ puts "\tTest0$tnum.a: Put/get loop"
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_substr $dbc $db] 1
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set dups ""
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set pref \
+ [string index $alphabet [berkdb random_int 0 25]]
+ set pref $pref[string \
+ index $alphabet [berkdb random_int 0 25]]
+ while { [string first $pref $dups] != -1 } {
+ set pref [string toupper $pref]
+ if { [string first $pref $dups] != -1 } {
+ set pref [string index $alphabet \
+ [berkdb random_int 0 25]]
+ set pref $pref[string index $alphabet \
+ [berkdb random_int 0 25]]
+ }
+ }
+ if { [string length $dups] == 0 } {
+ set dups $pref
+ } else {
+ set dups "$dups $pref"
+ }
+ set datastr $pref:$str
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ }
+ set ret [eval {$check_db put} \
+ $txn $pflags {$str [chop_data $method $dups]}]
+ error_check_good checkdb_put $ret 0
+
+ # Now retrieve all the keys matching this key
+ set x 0
+ set lastdup ""
+ for {set ret [$dbc get -set $str]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -nextdup] } {
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+ set datastr [lindex [lindex $ret 0] 1]
+ if {[string length $datastr] == 0} {
+ break
+ }
+ if {[string compare $lastdup $datastr] > 0} {
+ error_check_good sorted_dups($lastdup,$datastr)\
+ 0 1
+ }
+ incr x
+ set lastdup $datastr
+ }
+ error_check_good "Test0$tnum:ndups:$str" $x $ndups
+ incr count
+ }
+ error_check_good cursor_close [$dbc close] 0
+ close $did
+
+ # Now check the duplicates, then delete then recheck
+ puts "\tTest0$tnum.b: Checking and Deleting duplicates"
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_substr $dbc $db] 1
+ set check_c [eval {$check_db cursor} $txn]
+ error_check_good cursor_open [is_substr $check_c $check_db] 1
+
+ for {set ndx 0} {$ndx < $ndups} {incr ndx} {
+ for {set ret [$check_c get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$check_c get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_bad data_check:$d [string length $d] 0
+
+ set nn [expr $ndx * 3]
+ set pref [string range $d $nn [expr $nn + 1]]
+ set data $pref:$k
+ set ret [eval {$dbc get} $txn {-get_both $k $data}]
+ error_check_good \
+ get_both_key:$k [lindex [lindex $ret 0] 0] $k
+ error_check_good \
+ get_both_data:$k [lindex [lindex $ret 0] 1] $data
+ set ret [$dbc del]
+ error_check_good del $ret 0
+ set ret [eval {$db get} $txn {-get_both $k $data}]
+ error_check_good error_case:$k [llength $ret] 0
+
+ if {$ndx != 0} {
+ set n [expr ($ndx - 1) * 3]
+ set pref [string range $d $n [expr $n + 1]]
+ set data $pref:$k
+ set ret \
+ [eval {$db get} $txn {-get_both $k $data}]
+ error_check_good error_case:$k [llength $ret] 0
+ }
+ }
+ }
+
+ error_check_good check_c:close [$check_c close] 0
+ error_check_good check_db:close [$check_db close] 0
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+}
diff --git a/bdb/test/test039.tcl b/bdb/test/test039.tcl
new file mode 100644
index 00000000000..957468ce542
--- /dev/null
+++ b/bdb/test/test039.tcl
@@ -0,0 +1,177 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test039.tcl,v 11.11 2000/08/25 14:21:56 sue Exp $
+#
+# DB Test 39 {access method}
+# Use the first 10,000 entries from the dictionary.
+# Insert each with self as key and "ndups" duplicates
+# For the data field, prepend the letters of the alphabet
+# in a random order so that we force the duplicate sorting
+# code to do something.
+# By setting ndups large, we can make this an off-page test
+# After all are entered; test the DB_GET_BOTH functionality
+# first by retrieving each dup in the file explicitly. Then
+# remove each duplicate and try DB_GET_BOTH again.
+proc test039 { method {nentries 10000} {ndups 5} {tnum 39} args } {
+ global alphabet
+ global rand_init
+ source ./include.tcl
+
+ berkdb srand $rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set checkdb $testdir/checkdb.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ set checkdb checkdb.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ puts "Test0$tnum: $method $nentries small unsorted dup key/data pairs"
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $method"
+ return
+ }
+
+ set db [eval {berkdb_open -create -truncate -mode 0644 \
+ $omethod -dup} $args {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set check_db \
+ [berkdb_open -create -truncate -mode 0644 -hash $checkdb]
+ error_check_good dbopen:check_db [is_valid_db $check_db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ puts "\tTest0$tnum.a: Put/get loop"
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_substr $dbc $db] 1
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set dups ""
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set pref \
+ [string index $alphabet [berkdb random_int 0 25]]
+ set pref $pref[string \
+ index $alphabet [berkdb random_int 0 25]]
+ while { [string first $pref $dups] != -1 } {
+ set pref [string toupper $pref]
+ if { [string first $pref $dups] != -1 } {
+ set pref [string index $alphabet \
+ [berkdb random_int 0 25]]
+ set pref $pref[string index $alphabet \
+ [berkdb random_int 0 25]]
+ }
+ }
+ if { [string length $dups] == 0 } {
+ set dups $pref
+ } else {
+ set dups "$dups $pref"
+ }
+ set datastr $pref:$str
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ }
+ set ret [eval {$check_db put} \
+ $txn $pflags {$str [chop_data $method $dups]}]
+ error_check_good checkdb_put $ret 0
+
+ # Now retrieve all the keys matching this key
+ set x 0
+ set lastdup ""
+ for {set ret [$dbc get -set $str]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -nextdup] } {
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+ set datastr [lindex [lindex $ret 0] 1]
+ if {[string length $datastr] == 0} {
+ break
+ }
+ set xx [expr $x * 3]
+ set check_data \
+ [string range $dups $xx [expr $xx + 1]]:$k
+ error_check_good retrieve $datastr $check_data
+ incr x
+ }
+ error_check_good "Test0$tnum:ndups:$str" $x $ndups
+ incr count
+ }
+ error_check_good cursor_close [$dbc close] 0
+ close $did
+
+ # Now check the duplicates, then delete then recheck
+ puts "\tTest0$tnum.b: Checking and Deleting duplicates"
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_substr $dbc $db] 1
+ set check_c [eval {$check_db cursor} $txn]
+ error_check_good cursor_open [is_substr $check_c $check_db] 1
+
+ for {set ndx 0} {$ndx < $ndups} {incr ndx} {
+ for {set ret [$check_c get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$check_c get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_bad data_check:$d [string length $d] 0
+
+ set nn [expr $ndx * 3]
+ set pref [string range $d $nn [expr $nn + 1]]
+ set data $pref:$k
+ set ret \
+ [eval {$dbc get} $txn $gflags {-get_both $k $data}]
+ error_check_good \
+ get_both_key:$k [lindex [lindex $ret 0] 0] $k
+ error_check_good \
+ get_both_data:$k [lindex [lindex $ret 0] 1] $data
+
+ set ret [$dbc del]
+ error_check_good del $ret 0
+
+ set ret \
+ [eval {$dbc get} $txn $gflags {-get_both $k $data}]
+ error_check_good error_case:$k [llength $ret] 0
+
+ if {$ndx != 0} {
+ set n [expr ($ndx - 1) * 3]
+ set pref [string range $d $n [expr $n + 1]]
+ set data $pref:$k
+ set ret [eval {$dbc get} \
+ $txn $gflags {-get_both $k $data}]
+ error_check_good error_case:$k [llength $ret] 0
+ }
+ }
+ }
+
+ error_check_good check_c:close [$check_c close] 0
+ error_check_good check_db:close [$check_db close] 0
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+}
diff --git a/bdb/test/test040.tcl b/bdb/test/test040.tcl
new file mode 100644
index 00000000000..912e1735d8e
--- /dev/null
+++ b/bdb/test/test040.tcl
@@ -0,0 +1,16 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test040.tcl,v 11.3 2000/02/14 03:00:20 bostic Exp $
+#
+# DB Test 40 {access method}
+# DB_GET_BOTH functionality with off-page duplicates.
+proc test040 { method {nentries 10000} args} {
+ # Test with off-page duplicates
+ eval {test038 $method $nentries 20 40 -pagesize 512} $args
+
+ # Test with multiple pages of off-page duplicates
+ eval {test038 $method [expr $nentries / 10] 100 40 -pagesize 512} $args
+}
diff --git a/bdb/test/test041.tcl b/bdb/test/test041.tcl
new file mode 100644
index 00000000000..bba89f49b5a
--- /dev/null
+++ b/bdb/test/test041.tcl
@@ -0,0 +1,16 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test041.tcl,v 11.3 2000/02/14 03:00:20 bostic Exp $
+#
+# DB Test 41 {access method}
+# DB_GET_BOTH functionality with off-page duplicates.
+proc test041 { method {nentries 10000} args} {
+ # Test with off-page duplicates
+ eval {test039 $method $nentries 20 41 -pagesize 512} $args
+
+ # Test with multiple pages of off-page duplicates
+ eval {test039 $method [expr $nentries / 10] 100 41 -pagesize 512} $args
+}
diff --git a/bdb/test/test042.tcl b/bdb/test/test042.tcl
new file mode 100644
index 00000000000..232cb3a6b0e
--- /dev/null
+++ b/bdb/test/test042.tcl
@@ -0,0 +1,149 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test042.tcl,v 11.24 2000/08/25 14:21:56 sue Exp $
+#
+# DB Test 42 {access method}
+#
+# Multiprocess DB test; verify that locking is working for the concurrent
+# access method product.
+#
+# Use the first "nentries" words from the dictionary. Insert each with self
+# as key and a fixed, medium length data string. Then fire off multiple
+# processes that bang on the database. Each one should try to read and write
+# random keys. When they rewrite, they'll append their pid to the data string
+# (sometimes doing a rewrite sometimes doing a partial put). Some will use
+# cursors to traverse through a few keys before finding one to write.
+
+set datastr abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz
+
+proc test042 { method {nentries 1000} args } {
+ global datastr
+ source ./include.tcl
+
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test042 skipping for env $env"
+ return
+ }
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test042: CDB Test $method $nentries"
+
+ # Set initial parameters
+ set do_exit 0
+ set iter 10000
+ set procs 5
+
+ # Process arguments
+ set oargs ""
+ for { set i 0 } { $i < [llength $args] } {incr i} {
+ switch -regexp -- [lindex $args $i] {
+ -dir { incr i; set testdir [lindex $args $i] }
+ -iter { incr i; set iter [lindex $args $i] }
+ -procs { incr i; set procs [lindex $args $i] }
+ -exit { set do_exit 1 }
+ default { append oargs " " [lindex $args $i] }
+ }
+ }
+
+ # Create the database and open the dictionary
+ set testfile test042.db
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+
+ env_cleanup $testdir
+
+ set env [berkdb env -create -cdb -home $testdir]
+ error_check_good dbenv [is_valid_widget $env env] TRUE
+
+ set db [eval {berkdb_open -env $env -create -truncate \
+ -mode 0644 $omethod} $oargs {$testfile}]
+ error_check_good dbopen [is_valid_widget $db db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put each key/data pair
+ puts "\tTest042.a: put/get loop"
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put:$db $ret 0
+ incr count
+ }
+ close $did
+ error_check_good close:$db [$db close] 0
+
+ # Database is created, now set up environment
+
+ # Remove old mpools and Open/create the lock and mpool regions
+ error_check_good env:close:$env [$env close] 0
+ set ret [berkdb envremove -home $testdir]
+ error_check_good env_remove $ret 0
+
+ set env [berkdb env -create -cdb -home $testdir]
+ error_check_good dbenv [is_valid_widget $env env] TRUE
+
+ if { $do_exit == 1 } {
+ return
+ }
+
+ # Now spawn off processes
+ berkdb debug_check
+ puts "\tTest042.b: forking off $procs children"
+ set pidlist {}
+
+ for { set i 0 } {$i < $procs} {incr i} {
+ puts "exec $tclsh_path $test_path/wrap.tcl \
+ mdbscript.tcl $testdir/test042.$i.log \
+ $method $testdir $testfile $nentries $iter $i $procs &"
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ mdbscript.tcl $testdir/test042.$i.log $method \
+ $testdir $testfile $nentries $iter $i $procs &]
+ lappend pidlist $p
+ }
+ puts "Test042: $procs independent processes now running"
+ watch_procs
+
+ # Check for test failure
+ set e [eval findfail [glob $testdir/test042.*.log]]
+ error_check_good "FAIL: error message(s) in log files" $e 0
+
+ # Test is done, blow away lock and mpool region
+ reset_env $env
+}
+
+# If we are renumbering, then each time we delete an item, the number of
+# items in the file is temporarily decreased, so the highest record numbers
+# do not exist. To make sure this doesn't happen, we never generate the
+# highest few record numbers as keys.
+#
+# For record-based methods, record numbers begin at 1, while for other keys,
+# we begin at 0 to index into an array.
+proc rand_key { method nkeys renum procs} {
+ if { $renum == 1 } {
+ return [berkdb random_int 1 [expr $nkeys - $procs]]
+ } elseif { [is_record_based $method] == 1 } {
+ return [berkdb random_int 1 $nkeys]
+ } else {
+ return [berkdb random_int 0 [expr $nkeys - 1]]
+ }
+}
diff --git a/bdb/test/test043.tcl b/bdb/test/test043.tcl
new file mode 100644
index 00000000000..274ec1b7184
--- /dev/null
+++ b/bdb/test/test043.tcl
@@ -0,0 +1,162 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test043.tcl,v 11.12 2000/08/25 14:21:56 sue Exp $
+#
+# DB Test 43 {method nentries}
+# Test the Record number implicit creation and renumbering options.
+proc test043 { method {nentries 10000} args} {
+ source ./include.tcl
+
+ set do_renumber [is_rrecno $method]
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test043: $method ($args)"
+
+ if { [is_record_based $method] != 1 } {
+ puts "Test043 skipping for method $method"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test043.db
+ set env NULL
+ } else {
+ set testfile test043.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ cleanup $testdir $env
+
+ # Create the database
+ set db [eval {berkdb_open -create -truncate -mode 0644} $args \
+ {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags " -recno"
+ set txn ""
+
+ # First test implicit creation and retrieval
+ set count 1
+ set interval 5
+ if { $nentries < $interval } {
+ set nentries [expr $interval + 1]
+ }
+ puts "\tTest043.a: insert keys at $interval record intervals"
+ while { $count <= $nentries } {
+ set ret [eval {$db put} \
+ $txn $pflags {$count [chop_data $method $count]}]
+ error_check_good "$db put $count" $ret 0
+ set last $count
+ incr count $interval
+ }
+
+ puts "\tTest043.b: get keys using DB_FIRST/DB_NEXT"
+ set dbc [eval {$db cursor} $txn]
+ error_check_good "$db cursor" [is_substr $dbc $db] 1
+
+ set check 1
+ for { set rec [$dbc get -first] } { [llength $rec] != 0 } {
+ set rec [$dbc get -next] } {
+ set k [lindex [lindex $rec 0] 0]
+ set d [pad_data $method [lindex [lindex $rec 0] 1]]
+ error_check_good "$dbc get key==data" [pad_data $method $k] $d
+ error_check_good "$dbc get sequential" $k $check
+ if { $k > $nentries } {
+ error_check_good "$dbc get key too large" $k $nentries
+ }
+ incr check $interval
+ }
+
+ # Now make sure that we get DB_KEYEMPTY for non-existent keys
+ puts "\tTest043.c: Retrieve non-existent keys"
+ global errorInfo
+
+ set check 1
+ for { set rec [$dbc get -first] } { [llength $rec] != 0 } {
+ set rec [$dbc get -next] } {
+ set k [lindex [lindex $rec 0] 0]
+
+ set ret [eval {$db get} $txn $gflags {[expr $k + 1]}]
+ error_check_good "$db \
+ get [expr $k + 1]" $ret [list]
+
+ incr check $interval
+ # Make sure we don't do a retrieve past the end of file
+ if { $check >= $last } {
+ break
+ }
+ }
+
+ # Now try deleting and make sure the right thing happens.
+ puts "\tTest043.d: Delete tests"
+ set rec [$dbc get -first]
+ error_check_bad "$dbc get -first" [llength $rec] 0
+ error_check_good "$dbc get -first key" [lindex [lindex $rec 0] 0] 1
+ error_check_good "$dbc get -first data" \
+ [lindex [lindex $rec 0] 1] [pad_data $method 1]
+
+ # Delete the first item
+ error_check_good "$dbc del" [$dbc del] 0
+
+ # Retrieving 1 should always fail
+ set ret [eval {$db get} $txn $gflags {1}]
+ error_check_good "$db get 1" $ret [list]
+
+ # Now, retrieving other keys should work; keys will vary depending
+ # upon renumbering.
+ if { $do_renumber == 1 } {
+ set count [expr 0 + $interval]
+ set max [expr $nentries - 1]
+ } else {
+ set count [expr 1 + $interval]
+ set max $nentries
+ }
+
+ while { $count <= $max } {
+ set rec [eval {$db get} $txn $gflags {$count}]
+ if { $do_renumber == 1 } {
+ set data [expr $count + 1]
+ } else {
+ set data $count
+ }
+ error_check_good "$db get $count" \
+ [pad_data $method $data] [lindex [lindex $rec 0] 1]
+ incr count $interval
+ }
+ set max [expr $count - $interval]
+
+ puts "\tTest043.e: Verify LAST/PREV functionality"
+ set count $max
+ for { set rec [$dbc get -last] } { [llength $rec] != 0 } {
+ set rec [$dbc get -prev] } {
+ set k [lindex [lindex $rec 0] 0]
+ set d [lindex [lindex $rec 0] 1]
+ if { $do_renumber == 1 } {
+ set data [expr $k + 1]
+ } else {
+ set data $k
+ }
+ error_check_good \
+ "$dbc get key==data" [pad_data $method $data] $d
+ error_check_good "$dbc get sequential" $k $count
+ if { $k > $nentries } {
+ error_check_good "$dbc get key too large" $k $nentries
+ }
+ set count [expr $count - $interval]
+ if { $count < 1 } {
+ break
+ }
+ }
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+}
diff --git a/bdb/test/test044.tcl b/bdb/test/test044.tcl
new file mode 100644
index 00000000000..0be7a704961
--- /dev/null
+++ b/bdb/test/test044.tcl
@@ -0,0 +1,243 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test044.tcl,v 11.26 2000/10/27 13:23:56 sue Exp $
+#
+# DB Test 44 {access method}
+# System integration DB test: verify that locking, recovery, checkpoint,
+# and all the other utilities basically work.
+#
+# The test consists of $nprocs processes operating on $nfiles files. A
+# transaction consists of adding the same key/data pair to some random
+# number of these files. We generate a bimodal distribution in key
+# size with 70% of the keys being small (1-10 characters) and the
+# remaining 30% of the keys being large (uniform distribution about
+# mean $key_avg). If we generate a key, we first check to make sure
+# that the key is not already in the dataset. If it is, we do a lookup.
+#
+# XXX This test uses grow-only files currently!
+proc test044 { method {nprocs 5} {nfiles 10} {cont 0} args } {
+ source ./include.tcl
+ global rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ berkdb srand $rand_init
+
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test044 skipping for env $env"
+ return
+ }
+
+ puts "Test044: system integration test db $method $nprocs processes \
+ on $nfiles files"
+
+ # Parse options
+ set otherargs ""
+ set key_avg 10
+ set data_avg 20
+ set do_exit 0
+ for { set i 0 } { $i < [llength $args] } {incr i} {
+ switch -regexp -- [lindex $args $i] {
+ -key_avg { incr i; set key_avg [lindex $args $i] }
+ -data_avg { incr i; set data_avg [lindex $args $i] }
+ -testdir { incr i; set testdir [lindex $args $i] }
+ -x.* { set do_exit 1 }
+ default {
+ lappend otherargs [lindex $args $i]
+ }
+ }
+ }
+
+ if { $cont == 0 } {
+ # Create the database and open the dictionary
+ env_cleanup $testdir
+
+ # Create an environment
+ puts "\tTest044.a: creating environment and $nfiles files"
+ set dbenv [berkdb env -create -txn -home $testdir]
+ error_check_good env_open [is_valid_env $dbenv] TRUE
+
+ # Create a bunch of files
+ set m $method
+
+ for { set i 0 } { $i < $nfiles } { incr i } {
+ if { $method == "all" } {
+ switch [berkdb random_int 1 2] {
+ 1 { set m -btree }
+ 2 { set m -hash }
+ }
+ } else {
+ set m $omethod
+ }
+
+ set db [eval {berkdb_open -env $dbenv -create \
+ -mode 0644 $m} $otherargs {test044.$i.db}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+ }
+ }
+
+ # Close the environment
+ $dbenv close
+
+ if { $do_exit == 1 } {
+ return
+ }
+
+ # Database is created, now fork off the kids.
+ puts "\tTest044.b: forking off $nprocs processes and utilities"
+ set cycle 1
+ set ncycles 3
+ while { $cycle <= $ncycles } {
+ set dbenv [berkdb env -create -txn -home $testdir]
+ error_check_good env_open [is_valid_env $dbenv] TRUE
+
+ # Fire off deadlock detector and checkpointer
+ puts "Beginning cycle $cycle"
+ set ddpid [exec $util_path/db_deadlock -h $testdir -t 5 &]
+ set cppid [exec $util_path/db_checkpoint -h $testdir -p 2 &]
+ puts "Deadlock detector: $ddpid Checkpoint daemon $cppid"
+
+ set pidlist {}
+ for { set i 0 } {$i < $nprocs} {incr i} {
+ set p [exec $tclsh_path \
+ $test_path/sysscript.tcl $testdir \
+ $nfiles $key_avg $data_avg $omethod \
+ >& $testdir/test044.$i.log &]
+ lappend pidlist $p
+ }
+ set sleep [berkdb random_int 300 600]
+ puts \
+"[timestamp] $nprocs processes running $pidlist for $sleep seconds"
+ tclsleep $sleep
+
+ # Now simulate a crash
+ puts "[timestamp] Crashing"
+
+ #
+ # The environment must remain open until this point to get
+ # proper sharing (using the paging file) on Win/9X. [#2342]
+ #
+ error_check_good env_close [$dbenv close] 0
+
+ exec $KILL -9 $ddpid
+ exec $KILL -9 $cppid
+ #
+ # Use catch so that if any of the children died, we don't
+ # stop the script
+ #
+ foreach p $pidlist {
+ set e [catch {eval exec \
+ [concat $KILL -9 $p]} res]
+ }
+ # Check for test failure
+ set e [eval findfail [glob $testdir/test044.*.log]]
+ error_check_good "FAIL: error message(s) in log files" $e 0
+
+ # Now run recovery
+ test044_verify $testdir $nfiles
+ incr cycle
+ }
+}
+
+proc test044_usage { } {
+ puts -nonewline "test044 method nentries [-d directory] [-i iterations]"
+ puts " [-p procs] -x"
+}
+
+proc test044_verify { dir nfiles } {
+ source ./include.tcl
+
+ # Save everything away in case something breaks
+# for { set f 0 } { $f < $nfiles } {incr f} {
+# file copy -force $dir/test044.$f.db $dir/test044.$f.save1
+# }
+# foreach f [glob $dir/log.*] {
+# if { [is_substr $f save] == 0 } {
+# file copy -force $f $f.save1
+# }
+# }
+
+ # Run recovery and then read through all the database files to make
+ # sure that they all look good.
+
+ puts "\tTest044.verify: Running recovery and verifying file contents"
+ set stat [catch {exec $util_path/db_recover -h $dir} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ }
+
+ # Save everything away in case something breaks
+# for { set f 0 } { $f < $nfiles } {incr f} {
+# file copy -force $dir/test044.$f.db $dir/test044.$f.save2
+# }
+# foreach f [glob $dir/log.*] {
+# if { [is_substr $f save] == 0 } {
+# file copy -force $f $f.save2
+# }
+# }
+
+ for { set f 0 } { $f < $nfiles } { incr f } {
+ set db($f) [berkdb_open $dir/test044.$f.db]
+ error_check_good $f:dbopen [is_valid_db $db($f)] TRUE
+
+ set cursors($f) [$db($f) cursor]
+ error_check_bad $f:cursor_open $cursors($f) NULL
+ error_check_good \
+ $f:cursor_open [is_substr $cursors($f) $db($f)] 1
+ }
+
+ for { set f 0 } { $f < $nfiles } { incr f } {
+ for {set d [$cursors($f) get -first] } \
+ { [string length $d] != 0 } \
+ { set d [$cursors($f) get -next] } {
+
+ set k [lindex [lindex $d 0] 0]
+ set d [lindex [lindex $d 0] 1]
+
+ set flist [zero_list $nfiles]
+ set r $d
+ while { [set ndx [string first : $r]] != -1 } {
+ set fnum [string range $r 0 [expr $ndx - 1]]
+ if { [lindex $flist $fnum] == 0 } {
+ set fl "-set"
+ } else {
+ set fl "-next"
+ }
+
+ if { $fl != "-set" || $fnum != $f } {
+ if { [string compare $fl "-set"] == 0} {
+ set full [$cursors($fnum) \
+ get -set $k]
+ } else {
+ set full [$cursors($fnum) \
+ get -next]
+ }
+ set key [lindex [lindex $full 0] 0]
+ set rec [lindex [lindex $full 0] 1]
+ error_check_good \
+ $f:dbget_$fnum:key $key $k
+ error_check_good \
+ $f:dbget_$fnum:data $rec $d
+ }
+
+ set flist [lreplace $flist $fnum $fnum 1]
+ incr ndx
+ set r [string range $r $ndx end]
+ }
+ }
+ }
+
+ for { set f 0 } { $f < $nfiles } { incr f } {
+ error_check_good $cursors($f) [$cursors($f) close] 0
+ error_check_good db_close:$f [$db($f) close] 0
+ }
+}
diff --git a/bdb/test/test045.tcl b/bdb/test/test045.tcl
new file mode 100644
index 00000000000..65f031d0290
--- /dev/null
+++ b/bdb/test/test045.tcl
@@ -0,0 +1,117 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test045.tcl,v 11.17 2000/10/19 23:15:22 ubell Exp $
+#
+# DB Test 45 Run the random db tester on the specified access method.
+# Options are:
+# -adds <maximum number of keys before you disable adds>
+# -cursors <number of cursors>
+# -dataavg <average data size>
+# -delete <minimum number of keys before you disable deletes>
+# -dups <allow duplicates in file>
+# -errpct <Induce errors errpct of the time>
+# -init <initial number of entries in database>
+# -keyavg <average key size>
+proc test045 { method {nops 10000} args } {
+ source ./include.tcl
+
+ if { [is_frecno $method] == 1 } {
+ puts "\tSkipping Test045 for method $method."
+ return
+ }
+
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test045 skipping for env $env"
+ return
+ }
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test045: Random tester on $method for $nops operations"
+
+ # Set initial parameters
+ set adds [expr $nops * 10]
+ set cursors 5
+ set dataavg 40
+ set delete $nops
+ set dups 0
+ set errpct 0
+ set init 0
+ if { [is_record_based $method] == 1 } {
+ set keyavg 10
+ } else {
+ set keyavg 25
+ }
+
+ # Process arguments
+ set oargs ""
+ for { set i 0 } { $i < [llength $args] } {incr i} {
+ switch -regexp -- [lindex $args $i] {
+ -adds { incr i; set adds [lindex $args $i] }
+ -cursors { incr i; set cursors [lindex $args $i] }
+ -dataavg { incr i; set dataavg [lindex $args $i] }
+ -delete { incr i; set delete [lindex $args $i] }
+ -dups { incr i; set dups [lindex $args $i] }
+ -errpct { incr i; set errpct [lindex $args $i] }
+ -init { incr i; set init [lindex $args $i] }
+ -keyavg { incr i; set keyavg [lindex $args $i] }
+ -extent { incr i;
+ lappend oargs "-extent" "100" }
+ default { lappend oargs [lindex $args $i] }
+ }
+ }
+
+ # Create the database and and initialize it.
+ set root $testdir/test045
+ set f $root.db
+ env_cleanup $testdir
+
+ # Run the script with 3 times the number of initial elements to
+ # set it up.
+ set db [eval {berkdb_open \
+ -create -truncate -mode 0644 $omethod} $oargs {$f}]
+ error_check_good dbopen:$f [is_valid_db $db] TRUE
+
+ set r [$db close]
+ error_check_good dbclose:$f $r 0
+
+ # We redirect standard out, but leave standard error here so we
+ # can see errors.
+
+ puts "\tTest045.a: Initializing database"
+ if { $init != 0 } {
+ set n [expr 3 * $init]
+ exec $tclsh_path \
+ $test_path/dbscript.tcl $f $n \
+ 1 $init $n $keyavg $dataavg $dups 0 -1 \
+ > $testdir/test045.init
+ }
+ # Check for test failure
+ set e [findfail $testdir/test045.init]
+ error_check_good "FAIL: error message(s) in init file" $e 0
+
+ puts "\tTest045.b: Now firing off berkdb rand dbscript, running: "
+ # Now the database is initialized, run a test
+ puts "$tclsh_path\
+ $test_path/dbscript.tcl $f $nops $cursors $delete $adds \
+ $keyavg $dataavg $dups $errpct > $testdir/test045.log"
+
+ exec $tclsh_path \
+ $test_path/dbscript.tcl $f \
+ $nops $cursors $delete $adds $keyavg \
+ $dataavg $dups $errpct \
+ > $testdir/test045.log
+
+ # Check for test failure
+ set e [findfail $testdir/test045.log]
+ error_check_good "FAIL: error message(s) in log file" $e 0
+
+}
diff --git a/bdb/test/test046.tcl b/bdb/test/test046.tcl
new file mode 100644
index 00000000000..3bfed3ef5d8
--- /dev/null
+++ b/bdb/test/test046.tcl
@@ -0,0 +1,717 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test046.tcl,v 11.26 2000/08/25 14:21:56 sue Exp $
+#
+# DB Test 46: Overwrite test of small/big key/data with cursor checks.
+proc test046 { method args } {
+ global alphabet
+ global errorInfo
+ global errorCode
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "\tTest046: Overwrite test with cursor and small/big key/data."
+ puts "\tTest046:\t$method $args"
+
+ if { [is_rrecno $method] == 1} {
+ puts "\tTest046: skipping for method $method."
+ return
+ }
+
+ set key "key"
+ set data "data"
+ set txn ""
+ set flags ""
+
+ if { [is_record_based $method] == 1} {
+ set key ""
+ }
+
+ puts "\tTest046: Create $method database."
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test046.db
+ set env NULL
+ } else {
+ set testfile test046.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set oflags "-create -mode 0644 $args $omethod"
+ set db [eval {berkdb_open} $oflags $testfile.a]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # open curs to db
+ set dbc [$db cursor]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ # keep nkeys even
+ set nkeys 20
+
+ # Fill page w/ small key/data pairs
+ puts "\tTest046: Fill page with $nkeys small key/data pairs."
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ if { [is_record_based $method] == 1} {
+ set ret [$db put $i $data$i]
+ } elseif { $i < 10 } {
+ set ret [$db put [set key]00$i [set data]00$i]
+ } elseif { $i < 100 } {
+ set ret [$db put [set key]0$i [set data]0$i]
+ } else {
+ set ret [$db put $key$i $data$i]
+ }
+ error_check_good dbput $ret 0
+ }
+
+ # get db order of keys
+ for {set i 1; set ret [$dbc get -first]} { [llength $ret] != 0} { \
+ set ret [$dbc get -next]} {
+ set key_set($i) [lindex [lindex $ret 0] 0]
+ set data_set($i) [lindex [lindex $ret 0] 1]
+ incr i
+ }
+
+ puts "\tTest046.a: Deletes by key."
+ puts "\t\tTest046.a.1: Get data with SET, then delete before cursor."
+ # get key in middle of page, call this the nth set curr to it
+ set i [expr $nkeys/2]
+ set ret [$dbc get -set $key_set($i)]
+ error_check_bad dbc_get:set [llength $ret] 0
+ set curr $ret
+
+ # delete before cursor(n-1), make sure it is gone
+ set i [expr $i - 1]
+ error_check_good db_del [$db del $key_set($i)] 0
+
+ # use set_range to get first key starting at n-1, should
+ # give us nth--but only works for btree
+ if { [is_btree $method] == 1 } {
+ set ret [$dbc get -set_range $key_set($i)]
+ } else {
+ if { [is_record_based $method] == 1 } {
+ set ret [$dbc get -set $key_set($i)]
+ error_check_good \
+ dbc_get:deleted(recno) [llength [lindex $ret 1]] 0
+ #error_check_good \
+ # catch:get [catch {$dbc get -set $key_set($i)} ret] 1
+ #error_check_good \
+ # dbc_get:deleted(recno) [is_substr $ret "KEYEMPTY"] 1
+ } else {
+ set ret [$dbc get -set $key_set($i)]
+ error_check_good dbc_get:deleted [llength $ret] 0
+ }
+ set ret [$dbc get -set $key_set([incr i])]
+ incr i -1
+ }
+ error_check_bad dbc_get:set(R)(post-delete) [llength $ret] 0
+ error_check_good dbc_get(match):set $ret $curr
+
+ puts "\t\tTest046.a.2: Delete cursor item by key."
+ # nth key, which cursor should be on now
+ set i [incr i]
+ set ret [$db del $key_set($i)]
+ error_check_good db_del $ret 0
+
+ # this should return n+1 key/data, curr has nth key/data
+ if { [string compare $omethod "-btree"] == 0 } {
+ set ret [$dbc get -set_range $key_set($i)]
+ } else {
+ if { [is_record_based $method] == 1 } {
+ set ret [$dbc get -set $key_set($i)]
+ error_check_good \
+ dbc_get:deleted(recno) [llength [lindex $ret 1]] 0
+ #error_check_good \
+ # catch:get [catch {$dbc get -set $key_set($i)} ret] 1
+ #error_check_good \
+ # dbc_get:deleted(recno) [is_substr $ret "KEYEMPTY"] 1
+ } else {
+ set ret [$dbc get -set $key_set($i)]
+ error_check_good dbc_get:deleted [llength $ret] 0
+ }
+ set ret [$dbc get -set $key_set([expr $i+1])]
+ }
+ error_check_bad dbc_get(post-delete):set_range [llength $ret] 0
+ error_check_bad dbc_get(no-match):set_range $ret $curr
+
+ puts "\t\tTest046.a.3: Delete item after cursor."
+ # we'll delete n+2, since we have deleted n-1 and n
+ # i still equal to nth, cursor on n+1
+ set i [incr i]
+ set ret [$dbc get -set $key_set($i)]
+ error_check_bad dbc_get:set [llength $ret] 0
+ set curr [$dbc get -next]
+ error_check_bad dbc_get:next [llength $curr] 0
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:prev [llength $curr] 0
+ # delete *after* cursor pos.
+ error_check_good db:del [$db del $key_set([incr i])] 0
+
+ # make sure item is gone, try to get it
+ if { [string compare $omethod "-btree"] == 0} {
+ set ret [$dbc get -set_range $key_set($i)]
+ } else {
+ if { [is_record_based $method] == 1 } {
+ set ret [$dbc get -set $key_set($i)]
+ error_check_good \
+ dbc_get:deleted(recno) [llength [lindex $ret 1]] 0
+ #error_check_good \
+ # catch:get [catch {$dbc get -set $key_set($i)} ret] 1
+ #error_check_good \
+ # dbc_get:deleted(recno) [is_substr $ret "KEYEMPTY"] 1
+ } else {
+ set ret [$dbc get -set $key_set($i)]
+ error_check_good dbc_get:deleted [llength $ret] 0
+ }
+ set ret [$dbc get -set $key_set([expr $i +1])]
+ }
+ error_check_bad dbc_get:set(_range) [llength $ret] 0
+ error_check_bad dbc_get:set(_range) $ret $curr
+ error_check_good dbc_get:set [lindex [lindex $ret 0] 0] \
+ $key_set([expr $i+1])
+
+ puts "\tTest046.b: Deletes by cursor."
+ puts "\t\tTest046.b.1: Delete, do DB_NEXT."
+ error_check_good dbc:del [$dbc del] 0
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ set i [expr $i+2]
+ # i = n+4
+ error_check_good dbc_get:next(match) \
+ [lindex [lindex $ret 0] 0] $key_set($i)
+
+ puts "\t\tTest046.b.2: Delete, do DB_PREV."
+ error_check_good dbc:del [$dbc del] 0
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:prev [llength $ret] 0
+ set i [expr $i-3]
+ # i = n+1 (deleted all in between)
+ error_check_good dbc_get:prev(match) \
+ [lindex [lindex $ret 0] 0] $key_set($i)
+
+ puts "\t\tTest046.b.3: Delete, do DB_CURRENT."
+ error_check_good dbc:del [$dbc del] 0
+ # we just deleted, so current item should be KEYEMPTY, throws err
+ set ret [$dbc get -current]
+ error_check_good dbc_get:curr:deleted [llength [lindex $ret 1]] 0
+ #error_check_good catch:get:current [catch {$dbc get -current} ret] 1
+ #error_check_good dbc_get:curr:deleted [is_substr $ret "DB_KEYEMPTY"] 1
+
+ puts "\tTest046.c: Inserts (before/after), by key then cursor."
+ puts "\t\tTest046.c.1: Insert by key before the cursor."
+ # i is at curs pos, i=n+1, we want to go BEFORE
+ set i [incr i -1]
+ set ret [$db put $key_set($i) $data_set($i)]
+ error_check_good db_put:before $ret 0
+
+ puts "\t\tTest046.c.2: Insert by key after the cursor."
+ set i [incr i +2]
+ set ret [$db put $key_set($i) $data_set($i)]
+ error_check_good db_put:after $ret 0
+
+ puts "\t\tTest046.c.3: Insert by curs with deleted curs (should fail)."
+ # cursor is on n+1, we'll change i to match
+ set i [incr i -1]
+
+ error_check_good dbc:close [$dbc close] 0
+ error_check_good db:close [$db close] 0
+ if { [is_record_based $method] == 1} {
+ puts "\t\tSkipping the rest of test for method $method."
+ puts "\tTest046 ($method) complete."
+ return
+ } else {
+ # Reopen without printing __db_errs.
+ set db [eval {berkdb_open_noerr} $oflags $testfile.a]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set dbc [$db cursor]
+ error_check_good cursor [is_valid_cursor $dbc $db] TRUE
+
+ # should fail with EINVAL (deleted cursor)
+ set errorCode NONE
+ error_check_good catch:put:before 1 \
+ [catch {$dbc put -before $data_set($i)} ret]
+ error_check_good dbc_put:deleted:before \
+ [is_substr $errorCode "EINVAL"] 1
+
+ # should fail with EINVAL
+ set errorCode NONE
+ error_check_good catch:put:after 1 \
+ [catch {$dbc put -after $data_set($i)} ret]
+ error_check_good dbc_put:deleted:after \
+ [is_substr $errorCode "EINVAL"] 1
+
+ puts "\t\tTest046.c.4:\
+ Insert by cursor before/after existent cursor."
+ # can't use before after w/o dup except renumber in recno
+ # first, restore an item so they don't fail
+ #set ret [$db put $key_set($i) $data_set($i)]
+ #error_check_good db_put $ret 0
+
+ #set ret [$dbc get -set $key_set($i)]
+ #error_check_bad dbc_get:set [llength $ret] 0
+ #set i [incr i -2]
+ # i = n - 1
+ #set ret [$dbc get -prev]
+ #set ret [$dbc put -before $key_set($i) $data_set($i)]
+ #error_check_good dbc_put:before $ret 0
+ # cursor pos is adjusted to match prev, recently inserted
+ #incr i
+ # i = n
+ #set ret [$dbc put -after $key_set($i) $data_set($i)]
+ #error_check_good dbc_put:after $ret 0
+ }
+
+ # For the next part of the test, we need a db with no dups to test
+ # overwrites
+ puts "\tTest046.d.0: Cleanup, close db, open new db with no dups."
+ error_check_good dbc:close [$dbc close] 0
+ error_check_good db:close [$db close] 0
+
+ set db [eval {berkdb_open} $oflags $testfile.d]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set dbc [$db cursor]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+ set nkeys 20
+
+ # Fill page w/ small key/data pairs
+ puts "\tTest046.d.0: Fill page with $nkeys small key/data pairs."
+ for { set i 1 } { $i < $nkeys } { incr i } {
+ set ret [$db put $key$i $data$i]
+ error_check_good dbput $ret 0
+ }
+
+ # Prepare cursor on item
+ set ret [$dbc get -first]
+ error_check_bad dbc_get:first [llength $ret] 0
+
+ # Prepare unique big/small values for an initial
+ # and an overwrite set of key/data
+ foreach ptype {init over} {
+ foreach size {big small} {
+ if { [string compare $size big] == 0 } {
+ set key_$ptype$size \
+ KEY_$size[repeat alphabet 250]
+ set data_$ptype$size \
+ DATA_$size[repeat alphabet 250]
+ } else {
+ set key_$ptype$size \
+ KEY_$size[repeat alphabet 10]
+ set data_$ptype$size \
+ DATA_$size[repeat alphabet 10]
+ }
+ }
+ }
+
+ set i 0
+ # Do all overwrites for key and cursor
+ foreach type {key_over curs_over} {
+ # Overwrite (i=initial) four different kinds of pairs
+ incr i
+ puts "\tTest046.d: Overwrites $type."
+ foreach i_pair {\
+ {small small} {big small} {small big} {big big} } {
+ # Overwrite (w=write) with four different kinds of data
+ foreach w_pair {\
+ {small small} {big small} {small big} {big big} } {
+
+ # we can only overwrite if key size matches
+ if { [string compare [lindex \
+ $i_pair 0] [lindex $w_pair 0]] != 0} {
+ continue
+ }
+
+ # first write the initial key/data
+ set ret [$dbc put -keyfirst \
+ key_init[lindex $i_pair 0] \
+ data_init[lindex $i_pair 1]]
+ error_check_good \
+ dbc_put:curr:init:$i_pair $ret 0
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:curr [llength $ret] 0
+ error_check_good dbc_get:curr:data \
+ [lindex [lindex $ret 0] 1] \
+ data_init[lindex $i_pair 1]
+
+ # Now, try to overwrite: dups not supported in
+ # this db
+ if { [string compare $type key_over] == 0 } {
+ puts "\t\tTest046.d.$i: Key\
+ Overwrite:($i_pair) by ($w_pair)."
+ set ret [$db put \
+ $"key_init[lindex $i_pair 0]" \
+ $"data_over[lindex $w_pair 1]"]
+ error_check_good \
+ dbput:over:i($i_pair):o($w_pair) $ret 0
+ # check value
+ set ret [$db \
+ get $"key_init[lindex $i_pair 0]"]
+ error_check_bad \
+ db:get:check [llength $ret] 0
+ error_check_good db:get:compare_data \
+ [lindex [lindex $ret 0] 1] \
+ $"data_over[lindex $w_pair 1]"
+ } else {
+ # This is a cursor overwrite
+ puts \
+ "\t\tTest046.d.$i:Curs Overwrite:($i_pair) by ($w_pair)."
+ set ret [$dbc put -current \
+ $"data_over[lindex $w_pair 1]"]
+ error_check_good \
+ dbcput:over:i($i_pair):o($w_pair) $ret 0
+ # check value
+ set ret [$dbc get -current]
+ error_check_bad \
+ dbc_get:curr [llength $ret] 0
+ error_check_good dbc_get:curr:data \
+ [lindex [lindex $ret 0] 1] \
+ $"data_over[lindex $w_pair 1]"
+ }
+ } ;# foreach write pair
+ } ;# foreach initial pair
+ } ;# foreach type big/small
+
+ puts "\tTest046.d.3: Cleanup for next part of test."
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+
+ if { [is_rbtree $method] == 1} {
+ puts "\tSkipping the rest of Test046 for method $method."
+ puts "\tTest046 complete."
+ return
+ }
+
+ puts "\tTest046.e.1: Open db with sorted dups."
+ set db [eval {berkdb_open_noerr} $oflags -dup -dupsort $testfile.e]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # open curs to db
+ set dbc [$db cursor]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ # keep nkeys even
+ set nkeys 20
+ set ndups 20
+
+ # Fill page w/ small key/data pairs
+ puts "\tTest046.e.2:\
+ Put $nkeys small key/data pairs and $ndups sorted dups."
+ for { set i 0 } { $i < $nkeys } { incr i } {
+ if { $i < 10 } {
+ set ret [$db put [set key]0$i [set data]0$i]
+ } else {
+ set ret [$db put $key$i $data$i]
+ }
+ error_check_good dbput $ret 0
+ }
+
+ # get db order of keys
+ for {set i 0; set ret [$dbc get -first]} { [llength $ret] != 0} { \
+ set ret [$dbc get -next]} {
+ set key_set($i) [lindex [lindex $ret 0] 0]
+ set data_set($i) [lindex [lindex $ret 0] 1]
+ incr i
+ }
+
+ # put 20 sorted duplicates on key in middle of page
+ set i [expr $nkeys/2]
+ set ret [$dbc get -set $key_set($i)]
+ error_check_bad dbc_get:set [llength $ret] 0
+
+ set keym $key_set($i)
+
+ for { set i 0 } { $i < $ndups } { incr i } {
+ if { $i < 10 } {
+ set ret [$db put $keym DUPLICATE_0$i]
+ } else {
+ set ret [$db put $keym DUPLICATE_$i]
+ }
+ error_check_good db_put:DUP($i) $ret 0
+ }
+
+ puts "\tTest046.e.3: Check duplicate duplicates"
+ set ret [$db put $keym DUPLICATE_00]
+ error_check_good dbput:dupdup [is_substr $ret "DB_KEYEXIST"] 1
+
+ # get dup ordering
+ for {set i 0; set ret [$dbc get -set $keym]} { [llength $ret] != 0} {\
+ set ret [$dbc get -nextdup] } {
+ set dup_set($i) [lindex [lindex $ret 0] 1]
+ incr i
+ }
+
+ # put cursor on item in middle of dups
+ set i [expr $ndups/2]
+ set ret [$dbc get -get_both $keym $dup_set($i)]
+ error_check_bad dbc_get:get_both [llength $ret] 0
+
+ puts "\tTest046.f: Deletes by cursor."
+ puts "\t\tTest046.f.1: Delete by cursor, do a DB_NEXT, check cursor."
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:current [llength $ret] 0
+ error_check_good dbc:del [$dbc del] 0
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ error_check_good \
+ dbc_get:nextdup [lindex [lindex $ret 0] 1] $dup_set([incr i])
+
+ puts "\t\tTest046.f.2: Delete by cursor, do DB_PREV, check cursor."
+ error_check_good dbc:del [$dbc del] 0
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:prev [llength $ret] 0
+ set i [incr i -2]
+ error_check_good dbc_get:prev [lindex [lindex $ret 0] 1] $dup_set($i)
+
+ puts "\t\tTest046.f.3: Delete by cursor, do DB_CURRENT, check cursor."
+ error_check_good dbc:del [$dbc del] 0
+ set ret [$dbc get -current]
+ error_check_good dbc_get:current:deleted [llength [lindex $ret 1]] 0
+ #error_check_good catch:dbc_get:curr [catch {$dbc get -current} ret] 1
+ #error_check_good \
+ # dbc_get:current:deleted [is_substr $ret "DB_KEYEMPTY"] 1
+ error_check_good dbc_close [$dbc close] 0
+
+ # restore deleted keys
+ error_check_good db_put:1 [$db put $keym $dup_set($i)] 0
+ error_check_good db_put:2 [$db put $keym $dup_set([incr i])] 0
+ error_check_good db_put:3 [$db put $keym $dup_set([incr i])] 0
+
+ # tested above
+
+ # Reopen database without __db_err, reset cursor
+ error_check_good dbclose [$db close] 0
+ set db [eval {berkdb_open_noerr} $oflags -dup -dupsort $testfile.e]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good db_cursor [is_substr [set dbc [$db cursor]] $db] 1
+
+ set ret [$dbc get -set $keym]
+ error_check_bad dbc_get:set [llength $ret] 0
+ set ret2 [$dbc get -current]
+ error_check_bad dbc_get:current [llength $ret2] 0
+ # match
+ error_check_good dbc_get:current/set(match) $ret $ret2
+ # right one?
+ error_check_good \
+ dbc_get:curr/set(matchdup) [lindex [lindex $ret 0] 1] $dup_set(0)
+
+ # cursor is on first dup
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ # now on second dup
+ error_check_good dbc_get:next [lindex [lindex $ret 0] 1] $dup_set(1)
+ # check cursor
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:curr [llength $ret] 0
+ error_check_good \
+ dbcget:curr(compare) [lindex [lindex $ret 0] 1] $dup_set(1)
+
+ puts "\tTest046.g: Inserts."
+ puts "\t\tTest046.g.1: Insert by key before cursor."
+ set i 0
+
+ # use "spam" to prevent a duplicate duplicate.
+ set ret [$db put $keym $dup_set($i)spam]
+ error_check_good db_put:before $ret 0
+ # make sure cursor was maintained
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:curr [llength $ret] 0
+ error_check_good \
+ dbc_get:current(post-put) [lindex [lindex $ret 0] 1] $dup_set(1)
+
+ puts "\t\tTest046.g.2: Insert by key after cursor."
+ set i [expr $i + 2]
+ # use "eggs" to prevent a duplicate duplicate
+ set ret [$db put $keym $dup_set($i)eggs]
+ error_check_good db_put:after $ret 0
+ # make sure cursor was maintained
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:curr [llength $ret] 0
+ error_check_good \
+ dbc_get:curr(post-put,after) [lindex [lindex $ret 0] 1] $dup_set(1)
+
+ puts "\t\tTest046.g.3: Insert by curs before/after curs (should fail)."
+ # should return EINVAL (dupsort specified)
+ error_check_good dbc_put:before:catch \
+ [catch {$dbc put -before $dup_set([expr $i -1])} ret] 1
+ error_check_good \
+ dbc_put:before:deleted [is_substr $errorCode "EINVAL"] 1
+ error_check_good dbc_put:after:catch \
+ [catch {$dbc put -after $dup_set([expr $i +2])} ret] 1
+ error_check_good \
+ dbc_put:after:deleted [is_substr $errorCode "EINVAL"] 1
+
+ puts "\tTest046.h: Cursor overwrites."
+ puts "\t\tTest046.h.1: Test that dupsort disallows current overwrite."
+ set ret [$dbc get -set $keym]
+ error_check_bad dbc_get:set [llength $ret] 0
+ error_check_good \
+ catch:dbc_put:curr [catch {$dbc put -current DATA_OVERWRITE} ret] 1
+ error_check_good dbc_put:curr:dupsort [is_substr $errorCode EINVAL] 1
+
+ puts "\t\tTest046.h.2: New db (no dupsort)."
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+
+ set db [berkdb_open \
+ -create -dup $omethod -mode 0644 -truncate $testfile.h]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set dbc [$db cursor]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ for {set i 0} {$i < $nkeys} {incr i} {
+ if { $i < 10 } {
+ error_check_good db_put [$db put key0$i datum0$i] 0
+ } else {
+ error_check_good db_put [$db put key$i datum$i] 0
+ }
+ if { $i == 0 } {
+ for {set j 0} {$j < $ndups} {incr j} {
+ if { $i < 10 } {
+ set keyput key0$i
+ } else {
+ set keyput key$i
+ }
+ if { $j < 10 } {
+ set ret [$db put $keyput DUP_datum0$j]
+ } else {
+ set ret [$db put $keyput DUP_datum$j]
+ }
+ error_check_good dbput:dup $ret 0
+ }
+ }
+ }
+
+ for {set i 0; set ret [$dbc get -first]} { [llength $ret] != 0} { \
+ set ret [$dbc get -next]} {
+ set key_set($i) [lindex [lindex $ret 0] 0]
+ set data_set($i) [lindex [lindex $ret 0] 1]
+ incr i
+ }
+
+ for {set i 0; set ret [$dbc get -set key00]} {\
+ [llength $ret] != 0} {set ret [$dbc get -nextdup]} {
+ set dup_set($i) [lindex [lindex $ret 0] 1]
+ incr i
+ }
+ set i 0
+ set keym key0$i
+ set ret [$dbc get -set $keym]
+ error_check_bad dbc_get:set [llength $ret] 0
+ error_check_good \
+ dbc_get:set(match) [lindex [lindex $ret 0] 1] $dup_set($i)
+
+ set ret [$dbc get -nextdup]
+ error_check_bad dbc_get:nextdup [llength $ret] 0
+ error_check_good dbc_get:nextdup(match) \
+ [lindex [lindex $ret 0] 1] $dup_set([expr $i + 1])
+
+ puts "\t\tTest046.h.3: Insert by cursor before cursor (DB_BEFORE)."
+ set ret [$dbc put -before BEFOREPUT]
+ error_check_good dbc_put:before $ret 0
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:curr [llength $ret] 0
+ error_check_good \
+ dbc_get:curr:match [lindex [lindex $ret 0] 1] BEFOREPUT
+ # make sure that this is actually a dup w/ dup before
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:prev [llength $ret] 0
+ error_check_good dbc_get:prev:match \
+ [lindex [lindex $ret 0] 1] $dup_set($i)
+ set ret [$dbc get -prev]
+ # should not be a dup
+ error_check_bad dbc_get:prev(no_dup) \
+ [lindex [lindex $ret 0] 0] $keym
+
+ puts "\t\tTest046.h.4: Insert by cursor after cursor (DB_AFTER)."
+ set ret [$dbc get -set $keym]
+
+ # delete next 3 when fix
+ #puts "[$dbc get -current]\
+ # [$dbc get -next] [$dbc get -next] [$dbc get -next] [$dbc get -next]"
+ #set ret [$dbc get -set $keym]
+
+ error_check_bad dbc_get:set [llength $ret] 0
+ set ret [$dbc put -after AFTERPUT]
+ error_check_good dbc_put:after $ret 0
+ #puts [$dbc get -current]
+
+ # delete next 3 when fix
+ #set ret [$dbc get -set $keym]
+ #puts "[$dbc get -current] next: [$dbc get -next] [$dbc get -next]"
+ #set ret [$dbc get -set AFTERPUT]
+ #set ret [$dbc get -set $keym]
+ #set ret [$dbc get -next]
+ #puts $ret
+
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:curr [llength $ret] 0
+ error_check_good dbc_get:curr:match [lindex [lindex $ret 0] 1] AFTERPUT
+ set ret [$dbc get -prev]
+ # now should be on first item (non-dup) of keym
+ error_check_bad dbc_get:prev1 [llength $ret] 0
+ error_check_good \
+ dbc_get:match [lindex [lindex $ret 0] 1] $dup_set($i)
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ error_check_good \
+ dbc_get:match2 [lindex [lindex $ret 0] 1] AFTERPUT
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ # this is the dup we added previously
+ error_check_good \
+ dbc_get:match3 [lindex [lindex $ret 0] 1] BEFOREPUT
+
+ # now get rid of the dups we added
+ error_check_good dbc_del [$dbc del] 0
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:prev2 [llength $ret] 0
+ error_check_good dbc_del2 [$dbc del] 0
+ # put cursor on first dup item for the rest of test
+ set ret [$dbc get -set $keym]
+ error_check_bad dbc_get:first [llength $ret] 0
+ error_check_good \
+ dbc_get:first:check [lindex [lindex $ret 0] 1] $dup_set($i)
+
+ puts "\t\tTest046.h.5: Overwrite small by small."
+ set ret [$dbc put -current DATA_OVERWRITE]
+ error_check_good dbc_put:current:overwrite $ret 0
+ set ret [$dbc get -current]
+ error_check_good dbc_get:current(put,small/small) \
+ [lindex [lindex $ret 0] 1] DATA_OVERWRITE
+
+ puts "\t\tTest046.h.6: Overwrite small with big."
+ set ret [$dbc put -current DATA_BIG_OVERWRITE[repeat $alphabet 200]]
+ error_check_good dbc_put:current:overwrite:big $ret 0
+ set ret [$dbc get -current]
+ error_check_good dbc_get:current(put,small/big) \
+ [is_substr [lindex [lindex $ret 0] 1] DATA_BIG_OVERWRITE] 1
+
+ puts "\t\tTest046.h.7: Overwrite big with big."
+ set ret [$dbc put -current DATA_BIG_OVERWRITE2[repeat $alphabet 200]]
+ error_check_good dbc_put:current:overwrite(2):big $ret 0
+ set ret [$dbc get -current]
+ error_check_good dbc_get:current(put,big/big) \
+ [is_substr [lindex [lindex $ret 0] 1] DATA_BIG_OVERWRITE2] 1
+
+ puts "\t\tTest046.h.8: Overwrite big with small."
+ set ret [$dbc put -current DATA_OVERWRITE2]
+ error_check_good dbc_put:current:overwrite:small $ret 0
+ set ret [$dbc get -current]
+ error_check_good dbc_get:current(put,big/small) \
+ [is_substr [lindex [lindex $ret 0] 1] DATA_OVERWRITE2] 1
+
+ puts "\tTest046.i: Cleaning up from test."
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest046 complete."
+}
diff --git a/bdb/test/test047.tcl b/bdb/test/test047.tcl
new file mode 100644
index 00000000000..9d11cd3db83
--- /dev/null
+++ b/bdb/test/test047.tcl
@@ -0,0 +1,192 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test047.tcl,v 11.10 2000/08/25 14:21:56 sue Exp $
+#
+# DB Test 47: test of the SET_RANGE interface to DB->c_get.
+proc test047 { method args } {
+ source ./include.tcl
+
+ set tstn 047
+
+ if { [is_btree $method] != 1 } {
+ puts "Test$tstn skipping for method $method"
+ return
+ }
+
+ set method "-btree"
+
+ puts "\tTest$tstn: Test of SET_RANGE interface to DB->c_get ($method)."
+
+ set key "key"
+ set data "data"
+ set txn ""
+ set flags ""
+
+ puts "\tTest$tstn.a: Create $method database."
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tstn.db
+ set testfile1 $testdir/test0$tstn.a.db
+ set testfile2 $testdir/test0$tstn.b.db
+ set env NULL
+ } else {
+ set testfile test0$tstn.db
+ set testfile1 test0$tstn.a.db
+ set testfile2 test0$tstn.b.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set oflags "-create -truncate -mode 0644 -dup $args $method"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # open curs to db
+ set dbc [$db cursor]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ set nkeys 20
+ # Fill page w/ small key/data pairs
+ #
+ puts "\tTest$tstn.b: Fill page with $nkeys small key/data pairs."
+ for { set i 0 } { $i < $nkeys } { incr i } {
+ set ret [$db put $key$i $data$i]
+ error_check_good dbput $ret 0
+ }
+
+ puts "\tTest$tstn.c: Get data with SET_RANGE, then delete by cursor."
+ set i 0
+ set ret [$dbc get -set_range $key$i]
+ error_check_bad dbc_get:set_range [llength $ret] 0
+ set curr $ret
+
+ # delete by cursor, make sure it is gone
+ error_check_good dbc_del [$dbc del] 0
+
+ set ret [$dbc get -set_range $key$i]
+ error_check_bad dbc_get(post-delete):set_range [llength $ret] 0
+ error_check_bad dbc_get(no-match):set_range $ret $curr
+
+ puts "\tTest$tstn.d: \
+ Use another cursor to fix item on page, delete by db."
+ set dbcurs2 [$db cursor]
+ error_check_good db:cursor2 [is_substr $dbcurs2 $db] 1
+
+ set ret [$dbcurs2 get -set [lindex [lindex $ret 0] 0]]
+ error_check_bad dbc_get(2):set [llength $ret] 0
+ set curr $ret
+ error_check_good db:del [$db del [lindex [lindex $ret 0] 0]] 0
+
+ # make sure item is gone
+ set ret [$dbcurs2 get -set_range [lindex [lindex $curr 0] 0]]
+ error_check_bad dbc2_get:set_range [llength $ret] 0
+ error_check_bad dbc2_get:set_range $ret $curr
+
+ puts "\tTest$tstn.e: Close for second part of test, close db/cursors."
+ error_check_good dbc:close [$dbc close] 0
+ error_check_good dbc2:close [$dbcurs2 close] 0
+ error_check_good dbclose [$db close] 0
+
+ # open db
+ set db [eval {berkdb_open} $oflags $testfile1]
+ error_check_good dbopen2 [is_valid_db $db] TRUE
+
+ set nkeys 10
+ puts "\tTest$tstn.f: Fill page with $nkeys pairs, one set of dups."
+ for {set i 0} { $i < $nkeys } {incr i} {
+ # a pair
+ set ret [$db put $key$i $data$i]
+ error_check_good dbput($i) $ret 0
+ }
+
+ set j 0
+ for {set i 0} { $i < $nkeys } {incr i} {
+ # a dup set for same 1 key
+ set ret [$db put $key$i DUP_$data$i]
+ error_check_good dbput($i):dup $ret 0
+ }
+
+ puts "\tTest$tstn.g: \
+ Get dups key w/ SET_RANGE, pin onpage with another cursor."
+ set i 0
+ set dbc [$db cursor]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+ set ret [$dbc get -set_range $key$i]
+ error_check_bad dbc_get:set_range [llength $ret] 0
+
+ set dbc2 [$db cursor]
+ error_check_good db_cursor2 [is_substr $dbc2 $db] 1
+ set ret2 [$dbc2 get -set_range $key$i]
+ error_check_bad dbc2_get:set_range [llength $ret] 0
+
+ error_check_good dbc_compare $ret $ret2
+ puts "\tTest$tstn.h: \
+ Delete duplicates' key, use SET_RANGE to get next dup."
+ set ret [$dbc2 del]
+ error_check_good dbc2_del $ret 0
+ set ret [$dbc get -set_range $key$i]
+ error_check_bad dbc_get:set_range [llength $ret] 0
+ error_check_bad dbc_get:set_range $ret $ret2
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good dbc2_close [$dbc2 close] 0
+ error_check_good db_close [$db close] 0
+
+ set db [eval {berkdb_open} $oflags $testfile2]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set dbc [$db cursor]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+ set dbc2 [$db cursor]
+ error_check_good db_cursor2 [is_substr $dbc2 $db] 1
+
+ set nkeys 10
+ set ndups 1000
+
+ puts "\tTest$tstn.i: Fill page with $nkeys pairs and $ndups dups."
+ for {set i 0} { $i < $nkeys } { incr i} {
+ # a pair
+ set ret [$db put $key$i $data$i]
+ error_check_good dbput $ret 0
+
+ # dups for single pair
+ if { $i == 0} {
+ for {set j 0} { $j < $ndups } { incr j } {
+ set ret [$db put $key$i DUP_$data$i:$j]
+ error_check_good dbput:dup $ret 0
+ }
+ }
+ }
+ set i 0
+ puts "\tTest$tstn.j: \
+ Get key of first dup with SET_RANGE, fix with 2 curs."
+ set ret [$dbc get -set_range $key$i]
+ error_check_bad dbc_get:set_range [llength $ret] 0
+
+ set ret2 [$dbc2 get -set_range $key$i]
+ error_check_bad dbc2_get:set_range [llength $ret] 0
+ set curr $ret2
+
+ error_check_good dbc_compare $ret $ret2
+
+ puts "\tTest$tstn.k: Delete item by cursor, use SET_RANGE to verify."
+ set ret [$dbc2 del]
+ error_check_good dbc2_del $ret 0
+ set ret [$dbc get -set_range $key$i]
+ error_check_bad dbc_get:set_range [llength $ret] 0
+ error_check_bad dbc_get:set_range $ret $curr
+
+ puts "\tTest$tstn.l: Cleanup."
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good dbc2_close [$dbc2 close] 0
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest$tstn complete."
+}
diff --git a/bdb/test/test048.tcl b/bdb/test/test048.tcl
new file mode 100644
index 00000000000..84c7c47b721
--- /dev/null
+++ b/bdb/test/test048.tcl
@@ -0,0 +1,139 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test048.tcl,v 11.11 2000/12/11 17:42:18 sue Exp $
+#
+# Test048: Cursor stability across btree splits.
+proc test048 { method args } {
+ global errorCode
+ source ./include.tcl
+
+ set tstn 048
+
+ if { [is_btree $method] != 1 } {
+ puts "Test$tstn skipping for method $method."
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ incr pgindex
+ if { [lindex $args $pgindex] > 8192 } {
+ puts "Test048: Skipping for large pagesizes"
+ return
+ }
+ }
+
+ set method "-btree"
+
+ puts "\tTest$tstn: Test of cursor stability across btree splits."
+
+ set key "key"
+ set data "data"
+ set txn ""
+ set flags ""
+
+ puts "\tTest$tstn.a: Create $method database."
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tstn.db
+ set env NULL
+ } else {
+ set testfile test0$tstn.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set oflags "-create -truncate -mode 0644 $args $method"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set nkeys 5
+ # Fill page w/ small key/data pairs, keep at leaf
+ #
+ puts "\tTest$tstn.b: Fill page with $nkeys small key/data pairs."
+ for { set i 0 } { $i < $nkeys } { incr i } {
+ set ret [$db put key000$i $data$i]
+ error_check_good dbput $ret 0
+ }
+
+ # get db ordering, set cursors
+ puts "\tTest$tstn.c: Set cursors on each of $nkeys pairs."
+ for {set i 0; set ret [$db get key000$i]} {\
+ $i < $nkeys && [llength $ret] != 0} {\
+ incr i; set ret [$db get key000$i]} {
+ set key_set($i) [lindex [lindex $ret 0] 0]
+ set data_set($i) [lindex [lindex $ret 0] 1]
+ set dbc [$db cursor]
+ set dbc_set($i) $dbc
+ error_check_good db_cursor:$i [is_substr $dbc_set($i) $db] 1
+ set ret [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc_set($i)_get:set [llength $ret] 0
+ }
+
+ # if mkeys is above 1000, need to adjust below for lexical order
+ set mkeys 1000
+ puts "\tTest$tstn.d: Add $mkeys pairs to force split."
+ for {set i $nkeys} { $i < $mkeys } { incr i } {
+ if { $i >= 100 } {
+ set ret [$db put key0$i $data$i]
+ } elseif { $i >= 10 } {
+ set ret [$db put key00$i $data$i]
+ } else {
+ set ret [$db put key000$i $data$i]
+ }
+ error_check_good dbput:more $ret 0
+ }
+
+ puts "\tTest$tstn.e: Make sure split happened."
+ error_check_bad stat:check-split [is_substr [$db stat] \
+ "{{Internal pages} 0}"] 1
+
+ puts "\tTest$tstn.f: Check to see that cursors maintained reference."
+ for {set i 0} { $i < $nkeys } {incr i} {
+ set ret [$dbc_set($i) get -current]
+ error_check_bad dbc$i:get:current [llength $ret] 0
+ set ret2 [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc$i:get:set [llength $ret2] 0
+ error_check_good dbc$i:get(match) $ret $ret2
+ }
+
+ puts "\tTest$tstn.g: Delete added keys to force reverse split."
+ for {set i $nkeys} { $i < $mkeys } { incr i } {
+ if { $i >= 100 } {
+ error_check_good db_del:$i [$db del key0$i] 0
+ } elseif { $i >= 10 } {
+ error_check_good db_del:$i [$db del key00$i] 0
+ } else {
+ error_check_good db_del:$i [$db del key000$i] 0
+ }
+ }
+
+ puts "\tTest$tstn.h: Verify reverse split."
+ error_check_good stat:check-reverse_split [is_substr [$db stat] \
+ "{{Internal pages} 0}"] 1
+
+ puts "\tTest$tstn.i: Verify cursor reference."
+ for {set i 0} { $i < $nkeys } {incr i} {
+ set ret [$dbc_set($i) get -current]
+ error_check_bad dbc$i:get:current [llength $ret] 0
+ set ret2 [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc$i:get:set [llength $ret2] 0
+ error_check_good dbc$i:get(match) $ret $ret2
+ }
+
+ puts "\tTest$tstn.j: Cleanup."
+ # close cursors
+ for {set i 0} { $i < $nkeys } {incr i} {
+ error_check_good dbc_close:$i [$dbc_set($i) close] 0
+ }
+ error_check_good dbclose [$db close] 0
+
+ puts "\tTest$tstn complete."
+}
diff --git a/bdb/test/test049.tcl b/bdb/test/test049.tcl
new file mode 100644
index 00000000000..aaea3b200bf
--- /dev/null
+++ b/bdb/test/test049.tcl
@@ -0,0 +1,160 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test049.tcl,v 11.15 2000/08/25 14:21:56 sue Exp $
+#
+# Test 049: Test of each cursor routine with unitialized cursors
+proc test049 { method args } {
+ global errorInfo
+ global errorCode
+ source ./include.tcl
+
+ set tstn 049
+ set renum [is_rrecno $method]
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "\tTest$tstn: Test of cursor routines with unitialized cursors."
+
+ set key "key"
+ set data "data"
+ set txn ""
+ set flags ""
+ set rflags ""
+
+ if { [is_record_based $method] == 1 } {
+ set key ""
+ }
+
+ puts "\tTest$tstn.a: Create $method database."
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tstn.db
+ set env NULL
+ } else {
+ set testfile test0$tstn.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set oflags "-create -truncate -mode 0644 $rflags $omethod $args"
+ if { [is_record_based $method] == 0 && [is_rbtree $method] != 1 } {
+ append oflags " -dup"
+ }
+ set db [eval {berkdb_open_noerr} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set dbc_u [$db cursor]
+ error_check_good db:cursor [is_substr $dbc_u $db] 1
+
+ set nkeys 10
+ puts "\tTest$tstn.b: Fill page with $nkeys small key/data pairs."
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ set ret [$db put $key$i $data$i]
+ error_check_good dbput:$i $ret 0
+ if { $i == 1 } {
+ for {set j 0} { $j < [expr $nkeys / 2]} {incr j} {
+ set ret [$db put $key$i DUPLICATE$j]
+ error_check_good dbput:dup:$j $ret 0
+ }
+ }
+ }
+
+ # DBC GET
+ puts "\tTest$tstn.c: Test dbc->get interfaces..."
+ set i 0
+ foreach flag { current first last next prev nextdup} {
+ puts "\t\t...dbc->get($flag)"
+ catch {$dbc_u get -$flag} ret
+ error_check_good dbc:get:$flag [is_substr $errorCode EINVAL] 1
+ }
+
+ foreach flag { set set_range get_both} {
+ puts "\t\t...dbc->get($flag)"
+ if { [string compare $flag get_both] == 0} {
+ catch {$dbc_u get -$flag $key$i data0} ret
+ } else {
+ catch {$dbc_u get -$flag $key$i} ret
+ }
+ error_check_good dbc:get:$flag [is_substr $errorCode EINVAL] 1
+ }
+
+ puts "\t\t...dbc->get(current, partial)"
+ catch {$dbc_u get -current -partial {0 0}} ret
+ error_check_good dbc:get:partial [is_substr $errorCode EINVAL] 1
+
+ puts "\t\t...dbc->get(current, rmw)"
+ catch {$dbc_u get -rmw -current } ret
+ error_check_good dbc_get:rmw [is_substr $errorCode EINVAL] 1
+
+ puts "\tTest$tstn.d: Test dbc->put interface..."
+ # partial...depends on another
+ foreach flag { after before current keyfirst keylast } {
+ puts "\t\t...dbc->put($flag)"
+ if { [string match key* $flag] == 1 } {
+ if { [is_record_based $method] == 1 } {
+ # keyfirst/keylast not allowed in recno
+ puts "\t\t...Skipping dbc->put($flag) for $method."
+ continue
+ } else {
+ # keyfirst/last should succeed
+ puts "\t\t...dbc->put($flag)...should succeed for $method"
+ error_check_good dbcput:$flag \
+ [$dbc_u put -$flag $key$i data0] 0
+
+ # now uninitialize cursor
+ error_check_good dbc_close [$dbc_u close] 0
+ set dbc_u [$db cursor]
+ error_check_good \
+ db_cursor [is_substr $dbc_u $db] 1
+ }
+ } elseif { [string compare $flag before ] == 0 ||
+ [string compare $flag after ] == 0 } {
+ if { [is_record_based $method] == 0 &&
+ [is_rbtree $method] == 0} {
+ set ret [$dbc_u put -$flag data0]
+ error_check_good "$dbc_u:put:-$flag" $ret 0
+ } elseif { $renum == 1 } {
+ # Renumbering recno will return a record number
+ set currecno \
+ [lindex [lindex [$dbc_u get -current] 0] 0]
+ set ret [$dbc_u put -$flag data0]
+ if { [string compare $flag after] == 0 } {
+ error_check_good "$dbc_u put $flag" \
+ $ret [expr $currecno + 1]
+ } else {
+ error_check_good "$dbc_u put $flag" \
+ $ret $currecno
+ }
+ } else {
+ puts "\t\tSkipping $flag for $method"
+ }
+ } else {
+ set ret [$dbc_u put -$flag data0]
+ error_check_good "$dbc_u:put:-$flag" $ret 0
+ }
+ }
+ # and partial
+ puts "\t\t...dbc->put(partial)"
+ catch {$dbc_u put -partial {0 0} $key$i $data$i} ret
+ error_check_good dbc_put:partial [is_substr $errorCode EINVAL] 1
+
+ # XXX dbc->dup, db->join (dbc->get join_item)
+ # dbc del
+ puts "\tTest$tstn.e: Test dbc->del interface."
+ catch {$dbc_u del} ret
+ error_check_good dbc_del [is_substr $errorCode EINVAL] 1
+
+ error_check_good dbc_close [$dbc_u close] 0
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest$tstn complete."
+}
diff --git a/bdb/test/test050.tcl b/bdb/test/test050.tcl
new file mode 100644
index 00000000000..4a2d8c8fdc0
--- /dev/null
+++ b/bdb/test/test050.tcl
@@ -0,0 +1,191 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test050.tcl,v 11.15 2000/08/25 14:21:57 sue Exp $
+#
+# Test050: Overwrite test of small/big key/data with cursor checks for RECNO
+proc test050 { method args } {
+ global alphabet
+ global errorInfo
+ global errorCode
+ source ./include.tcl
+
+ set tstn 050
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_rrecno $method] != 1 } {
+ puts "Test$tstn skipping for method $method."
+ return
+ }
+
+ puts "\tTest$tstn:\
+ Overwrite test with cursor and small/big key/data ($method)."
+
+ set data "data"
+ set txn ""
+ set flags ""
+
+ puts "\tTest$tstn: Create $method database."
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tstn.db
+ set env NULL
+ } else {
+ set testfile test0$tstn.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set oflags "-create -truncate -mode 0644 $args $omethod"
+ set db [eval {berkdb_open_noerr} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # open curs to db
+ set dbc [$db cursor]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ # keep nkeys even
+ set nkeys 20
+
+ # Fill page w/ small key/data pairs
+ #
+ puts "\tTest$tstn: Fill page with $nkeys small key/data pairs."
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ set ret [$db put $i [chop_data $method $data$i]]
+ error_check_good dbput $ret 0
+ }
+
+ # get db order of keys
+ for {set i 0; set ret [$dbc get -first]} { [llength $ret] != 0} { \
+ set ret [$dbc get -next]} {
+ set key_set($i) [lindex [lindex $ret 0] 0]
+ set data_set($i) [lindex [lindex $ret 0] 1]
+ incr i
+ }
+
+ # verify ordering: should be unnecessary, but hey, why take chances?
+ # key_set is zero indexed but keys start at 1
+ for {set i 0} { $i < $nkeys } {incr i} {
+ error_check_good \
+ verify_order:$i $key_set($i) [pad_data $method [expr $i+1]]
+ }
+
+ puts "\tTest$tstn.a: Inserts before/after by cursor."
+ puts "\t\tTest$tstn.a.1:\
+ Insert with uninitialized cursor (should fail)."
+ error_check_good dbc_close [$dbc close] 0
+ set dbc [$db cursor]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+ catch {$dbc put -before DATA1} ret
+ error_check_good dbc_put:before:uninit [is_substr $errorCode EINVAL] 1
+
+ catch {$dbc put -after DATA2} ret
+ error_check_good dbc_put:after:uninit [is_substr $errorCode EINVAL] 1
+
+ puts "\t\tTest$tstn.a.2: Insert with deleted cursor (should succeed)."
+ set ret [$dbc get -first]
+ error_check_bad dbc_get:first [llength $ret] 0
+ error_check_good dbc_del [$dbc del] 0
+ set ret [$dbc put -current DATAOVER1]
+ error_check_good dbc_put:current:deleted $ret 0
+
+ puts "\t\tTest$tstn.a.3: Insert by cursor before cursor (DB_BEFORE)."
+ set currecno [lindex [lindex [$dbc get -current] 0] 0]
+ set ret [$dbc put -before DATAPUTBEFORE]
+ error_check_good dbc_put:before $ret $currecno
+ set old1 [$dbc get -next]
+ error_check_bad dbc_get:next [llength $old1] 0
+ error_check_good \
+ dbc_get:next(compare) [lindex [lindex $old1 0] 1] DATAOVER1
+
+ puts "\t\tTest$tstn.a.4: Insert by cursor after cursor (DB_AFTER)."
+ set ret [$dbc get -first]
+ error_check_bad dbc_get:first [llength $ret] 0
+ error_check_good dbc_get:first [lindex [lindex $ret 0] 1] DATAPUTBEFORE
+ set currecno [lindex [lindex [$dbc get -current] 0] 0]
+ set ret [$dbc put -after DATAPUTAFTER]
+ error_check_good dbc_put:after $ret [expr $currecno + 1]
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:prev [llength $ret] 0
+ error_check_good \
+ dbc_get:prev [lindex [lindex $ret 0] 1] DATAPUTBEFORE
+
+ puts "\t\tTest$tstn.a.5: Verify that all keys have been renumbered."
+ # should be $nkeys + 2 keys, starting at 1
+ for {set i 1; set ret [$dbc get -first]} { \
+ $i <= $nkeys && [llength $ret] != 0 } {\
+ incr i; set ret [$dbc get -next]} {
+ error_check_good check_renumber $i [lindex [lindex $ret 0] 0]
+ }
+
+ # tested above
+
+ puts "\tTest$tstn.b: Overwrite tests (cursor and key)."
+ # For the next part of the test, we need a db with no dups to test
+ # overwrites
+ #
+ # we should have ($nkeys + 2) keys, ordered:
+ # DATAPUTBEFORE, DATAPUTAFTER, DATAOVER1, data1, ..., data$nkeys
+ #
+ # Prepare cursor on item
+ #
+ set ret [$dbc get -first]
+ error_check_bad dbc_get:first [llength $ret] 0
+
+ # Prepare unique big/small values for an initial
+ # and an overwrite set of data
+ set databig DATA_BIG_[repeat alphabet 250]
+ set datasmall DATA_SMALL
+
+ # Now, we want to overwrite data:
+ # by key and by cursor
+ # 1. small by small
+ # 2. small by big
+ # 3. big by small
+ # 4. big by big
+ #
+ set i 0
+ # Do all overwrites for key and cursor
+ foreach type { by_key by_cursor } {
+ incr i
+ puts "\tTest$tstn.b.$i: Overwrites $type."
+ foreach pair { {small small} \
+ {small big} {big small} {big big} } {
+ # put in initial type
+ set data $data[lindex $pair 0]
+ set ret [$dbc put -current $data]
+ error_check_good dbc_put:curr:init:($pair) $ret 0
+
+ # Now, try to overwrite: dups not supported in this db
+ if { [string compare $type by_key] == 0 } {
+ puts "\t\tTest$tstn.b.$i:\
+ Overwrite:($pair):$type"
+ set ret [$db put \
+ 1 OVER$pair$data[lindex $pair 1]]
+ error_check_good dbput:over:($pair) $ret 0
+ } else {
+ # This is a cursor overwrite
+ puts "\t\tTest$tstn.b.$i:\
+ Overwrite:($pair) by cursor."
+ set ret [$dbc put \
+ -current OVER$pair$data[lindex $pair 1]]
+ error_check_good dbcput:over:($pair) $ret 0
+ }
+ } ;# foreach pair
+ } ;# foreach type key/cursor
+
+ puts "\tTest$tstn.c: Cleanup and close cursor."
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest$tstn complete."
+}
diff --git a/bdb/test/test051.tcl b/bdb/test/test051.tcl
new file mode 100644
index 00000000000..6994526e214
--- /dev/null
+++ b/bdb/test/test051.tcl
@@ -0,0 +1,191 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test051.tcl,v 11.14 2000/08/25 14:21:57 sue Exp $
+#
+# Test51:
+# Test of the fixed recno method.
+# 0. Test various flags (legal and illegal) to open
+# 1. Test partial puts where dlen != size (should fail)
+# 2. Partial puts for existent record -- replaces at beg, mid, and
+# end of record, as well as full replace
+#
+proc test051 { method { args "" } } {
+ global fixed_len
+ global errorInfo
+ global errorCode
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test051: Test of the fixed length records."
+ if { [is_fixed_length $method] != 1 } {
+ puts "Test051: skipping for method $method"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test051.db
+ set testfile1 $testdir/test051a.db
+ set env NULL
+ } else {
+ set testfile test051.db
+ set testfile1 test051a.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ cleanup $testdir $env
+ set oflags "-create -truncate -mode 0644 $args"
+
+ # Test various flags (legal and illegal) to open
+ puts "\tTest051.a: Test correct flag behavior on open."
+ set errorCode NONE
+ foreach f { "-dup" "-dup -dupsort" "-recnum" } {
+ puts "\t\tTest051.a: Test flag $f"
+ error_check_good dbopen:flagtest:catch \
+ [catch {set db \
+ [eval {berkdb_open_noerr} $oflags $f $omethod \
+ $testfile]} ret] 1
+ error_check_good \
+ dbopen:flagtest:$f [is_substr $errorCode EINVAL] 1
+ set errorCode NONE
+ }
+ set f "-renumber"
+ puts "\t\tTest051.a: Test $f"
+ if { [is_frecno $method] == 1 } {
+ set db [eval {berkdb_open} $oflags $f $omethod $testfile]
+ error_check_good dbopen:flagtest:$f [is_valid_db $db] TRUE
+ $db close
+ } else {
+ error_check_good \
+ dbopen:flagtest:catch [catch {set db [eval \
+ {berkdb_open_noerr} $oflags $f \
+ $omethod $testfile]} ret] 1
+ error_check_good \
+ dbopen:flagtest:$f [is_substr $errorCode EINVAL] 1
+ }
+
+
+ # Test partial puts where dlen != size (should fail)
+ # it is an error to specify a partial put w/ different
+ # dlen and size in fixed length recno/queue
+ set key 1
+ set data ""
+ set test_char "a"
+
+ set db [eval {berkdb_open_noerr} $oflags $omethod $testfile1]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tTest051.b: Partial puts with dlen != size."
+ foreach dlen { 1 16 20 32 } {
+ foreach doff { 0 10 20 32 } {
+ # dlen < size
+ puts "\t\tTest051.e: dlen: $dlen, doff: $doff, \
+ size: [expr $dlen+1]"
+ set data [repeat $test_char [expr $dlen + 1]]
+ error_check_good catch:put 1 [catch {$db \
+ put -partial [list $doff $dlen] $key $data} ret]
+ #
+ # We don't get back the server error string just
+ # the result.
+ #
+ if { $eindex == -1 } {
+ error_check_good "dbput:partial: dlen < size" \
+ [is_substr $errorInfo "Length improper"] 1
+ } else {
+ error_check_good "dbput:partial: dlen < size" \
+ [is_substr $errorCode "EINVAL"] 1
+ }
+
+ # dlen > size
+ puts "\t\tTest051.e: dlen: $dlen, doff: $doff, \
+ size: [expr $dlen-1]"
+ set data [repeat $test_char [expr $dlen - 1]]
+ error_check_good catch:put 1 [catch {$db \
+ put -partial [list $doff $dlen] $key $data} ret]
+ if { $eindex == -1 } {
+ error_check_good "dbput:partial: dlen > size" \
+ [is_substr $errorInfo "Length improper"] 1
+ } else {
+ error_check_good "dbput:partial: dlen < size" \
+ [is_substr $errorCode "EINVAL"] 1
+ }
+ }
+ }
+
+ $db close
+
+ # Partial puts for existent record -- replaces at beg, mid, and
+ # end of record, as well as full replace
+ puts "\tTest051.f: Partial puts within existent record."
+ set db [eval {berkdb_open} $oflags $omethod $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\t\tTest051.f: First try a put and then a full replace."
+ set data [repeat "a" $fixed_len]
+
+ set ret [$db put 1 $data]
+ error_check_good dbput $ret 0
+ error_check_good dbget $data [lindex [lindex [$db get -recno 1] 0] 1]
+
+ set data [repeat "b" $fixed_len]
+ set ret [$db put -partial [list 0 $fixed_len] 1 $data]
+ error_check_good dbput $ret 0
+ error_check_good dbget $data [lindex [lindex [$db get -recno 1] 0] 1]
+
+ set data "InitialData"
+ set pdata "PUT"
+ set dlen [string length $pdata]
+ set ilen [string length $data]
+ set mid [expr $ilen/2]
+
+ # put initial data
+ set key 0
+
+ set offlist [list 0 $mid [expr $ilen -1] [expr $fixed_len - $dlen]]
+ puts "\t\tTest051.g: Now replace at different offsets ($offlist)."
+ foreach doff $offlist {
+ incr key
+ set ret [$db put $key $data]
+ error_check_good dbput:init $ret 0
+
+ puts "\t\t Test051.g: Replace at offset $doff."
+ set ret [$db put -partial [list $doff $dlen] $key $pdata]
+ error_check_good dbput:partial $ret 0
+
+ if { $doff == 0} {
+ set beg ""
+ set end [string range $data $dlen $ilen]
+ } else {
+ set beg [string range $data 0 [expr $doff - 1]]
+ set end [string range $data [expr $doff + $dlen] $ilen]
+ }
+ if { $doff > $ilen } {
+ # have to put padding between record and inserted
+ # string
+ set newdata [format %s%s $beg $end]
+ set diff [expr $doff - $ilen]
+ set nlen [string length $newdata]
+ set newdata [binary \
+ format a[set nlen]x[set diff]a$dlen $newdata $pdata]
+ } else {
+ set newdata [make_fixed_length \
+ frecno [format %s%s%s $beg $pdata $end]]
+ }
+ set ret [$db get -recno $key]
+ error_check_good compare($newdata,$ret) \
+ [binary_compare [lindex [lindex $ret 0] 1] $newdata] 0
+ }
+
+ $db close
+
+ puts "\tTest051 complete."
+}
diff --git a/bdb/test/test052.tcl b/bdb/test/test052.tcl
new file mode 100644
index 00000000000..820c99a2bd5
--- /dev/null
+++ b/bdb/test/test052.tcl
@@ -0,0 +1,254 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test052.tcl,v 11.10 2000/10/06 19:29:52 krinsky Exp $
+#
+# Test52
+# Renumbering recno test.
+proc test052 { method args } {
+ global alphabet
+ global errorInfo
+ global errorCode
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test052: Test of renumbering recno."
+ if { [is_rrecno $method] != 1} {
+ puts "Test052: skipping for method $method."
+ return
+ }
+
+ set data "data"
+ set txn ""
+ set flags ""
+
+ puts "\tTest052: Create $method database."
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test052.db
+ set env NULL
+ } else {
+ set testfile test052.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set oflags "-create -truncate -mode 0644 $args $omethod"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # open curs to db
+ set dbc [$db cursor]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ # keep nkeys even
+ set nkeys 20
+
+ # Fill page w/ small key/data pairs
+ puts "\tTest052: Fill page with $nkeys small key/data pairs."
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ set ret [$db put $i $data$i]
+ error_check_good dbput $ret 0
+ }
+
+ # get db order of keys
+ for {set i 1; set ret [$dbc get -first]} { [llength $ret] != 0} { \
+ set ret [$dbc get -next]} {
+ set keys($i) [lindex [lindex $ret 0] 0]
+ set darray($i) [lindex [lindex $ret 0] 1]
+ incr i
+ }
+
+ puts "\tTest052: Deletes by key."
+ puts "\t Test052.a: Get data with SET, then delete before cursor."
+ # get key in middle of page, call this the nth set curr to it
+ set i [expr $nkeys/2]
+ set k $keys($i)
+ set ret [$dbc get -set $k]
+ error_check_bad dbc_get:set [llength $ret] 0
+ error_check_good dbc_get:set [lindex [lindex $ret 0] 1] $darray($i)
+
+ # delete by key before current
+ set i [incr i -1]
+ error_check_good db_del:before [$db del $keys($i)] 0
+ # with renumber, current's data should be constant, but key==--key
+ set i [incr i +1]
+ error_check_good dbc:data \
+ [lindex [lindex [$dbc get -current] 0] 1] $darray($i)
+ error_check_good dbc:keys \
+ [lindex [lindex [$dbc get -current] 0] 0] $keys([expr $nkeys/2 - 1])
+
+ puts "\t Test052.b: Delete cursor item by key."
+ set i [expr $nkeys/2 ]
+
+ set ret [$dbc get -set $keys($i)]
+ error_check_bad dbc:get [llength $ret] 0
+ error_check_good dbc:get:curs [lindex [lindex $ret 0] 1] \
+ $darray([expr $i + 1])
+ error_check_good db_del:curr [$db del $keys($i)] 0
+ set ret [$dbc get -current]
+
+ # After a delete, cursor should return DB_NOTFOUND.
+ error_check_good dbc:get:key [llength [lindex [lindex $ret 0] 0]] 0
+ error_check_good dbc:get:data [llength [lindex [lindex $ret 0] 1]] 0
+
+ # And the item after the cursor should now be
+ # key: $nkeys/2, data: $nkeys/2 + 2
+ set ret [$dbc get -next]
+ error_check_bad dbc:getnext [llength $ret] 0
+ error_check_good dbc:getnext:data \
+ [lindex [lindex $ret 0] 1] $darray([expr $i + 2])
+ error_check_good dbc:getnext:keys \
+ [lindex [lindex $ret 0] 0] $keys($i)
+
+ puts "\t Test052.c: Delete item after cursor."
+ # should be { keys($nkeys/2), darray($nkeys/2 + 2) }
+ set i [expr $nkeys/2]
+ # deleting data for key after current (key $nkeys/2 + 1)
+ error_check_good db_del [$db del $keys([expr $i + 1])] 0
+
+ # current should be constant
+ set ret [$dbc get -current]
+ error_check_bad dbc:get:current [llength $ret] 0
+ error_check_good dbc:get:keys [lindex [lindex $ret 0] 0] \
+ $keys($i)
+ error_check_good dbc:get:data [lindex [lindex $ret 0] 1] \
+ $darray([expr $i + 2])
+
+ puts "\tTest052: Deletes by cursor."
+ puts "\t Test052.d: Delete, do DB_NEXT."
+ set i 1
+ set ret [$dbc get -first]
+ error_check_bad dbc_get:first [llength $ret] 0
+ error_check_good dbc_get:first [lindex [lindex $ret 0] 1] $darray($i)
+ error_check_good dbc_del [$dbc del] 0
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:current [llength $ret] 0
+ error_check_good dbc:getcurrent:key \
+ [llength [lindex [lindex $ret 0] 0]] 0
+ error_check_good dbc:getcurrent:data \
+ [llength [lindex [lindex $ret 0] 1]] 0
+
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ error_check_good dbc:get:curs \
+ [lindex [lindex $ret 0] 1] $darray([expr $i + 1])
+ error_check_good dbc:get:keys \
+ [lindex [lindex $ret 0] 0] $keys($i)
+
+ # Move one more forward, so we're not on the first item.
+ error_check_bad dbc:getnext [llength [$dbc get -next]] 0
+
+ puts "\t Test052.e: Delete, do DB_PREV."
+ error_check_good dbc:del [$dbc del] 0
+ set ret [$dbc get -current]
+ error_check_bad dbc:get:curr [llength $ret] 0
+ error_check_good dbc:getcurrent:key \
+ [llength [lindex [lindex $ret 0] 0]] 0
+ error_check_good dbc:getcurrent:data \
+ [llength [lindex [lindex $ret 0] 1]] 0
+
+ # next should now reference the record that was previously after
+ # old current
+ set ret [$dbc get -next]
+ error_check_bad get:next [llength $ret] 0
+ error_check_good dbc:get:next:data \
+ [lindex [lindex $ret 0] 1] $darray([expr $i + 3])
+ error_check_good dbc:get:next:keys \
+ [lindex [lindex $ret 0] 0] $keys([expr $i + 1])
+
+ set ret [$dbc get -prev]
+ error_check_bad dbc:get:curr [llength $ret] 0
+ error_check_good dbc:get:curr:compare \
+ [lindex [lindex $ret 0] 1] $darray([expr $i + 1])
+ error_check_good dbc:get:curr:keys \
+ [lindex [lindex $ret 0] 0] $keys($i)
+
+ # The rest of the test was written with the old rrecno semantics,
+ # which required a separate c_del(CURRENT) test; to leave
+ # the database in the expected state, we now delete the first item.
+ set ret [$dbc get -first]
+ error_check_bad getfirst [llength $ret] 0
+ error_check_good delfirst [$dbc del] 0
+
+ puts "\tTest052: Inserts."
+ puts "\t Test052.g: Insert before (DB_BEFORE)."
+ set i 1
+ set ret [$dbc get -first]
+ error_check_bad dbc:get:first [llength $ret] 0
+ error_check_good dbc_get:first \
+ [lindex [lindex $ret 0] 0] $keys($i)
+ error_check_good dbc_get:first:data \
+ [lindex [lindex $ret 0] 1] $darray([expr $i + 3])
+
+ set ret [$dbc put -before $darray($i)]
+ # should return new key, which should be $keys($i)
+ error_check_good dbc_put:before $ret $keys($i)
+ # cursor should adjust to point to new item
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:curr [llength $ret] 0
+ error_check_good dbc_put:before:keys \
+ [lindex [lindex $ret 0] 0] $keys($i)
+ error_check_good dbc_put:before:data \
+ [lindex [lindex $ret 0] 1] $darray($i)
+
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ error_check_good dbc_get:next:compare \
+ $ret [list [list $keys([expr $i + 1]) $darray([expr $i + 3])]]
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:prev [llength $ret] 0
+
+ puts "\t Test052.h: Insert by cursor after (DB_AFTER)."
+ set i [incr i]
+ set ret [$dbc put -after $darray($i)]
+ # should return new key, which should be $keys($i)
+ error_check_good dbcput:after $ret $keys($i)
+ # cursor should reference new item
+ set ret [$dbc get -current]
+ error_check_good dbc:get:current:keys \
+ [lindex [lindex $ret 0] 0] $keys($i)
+ error_check_good dbc:get:current:data \
+ [lindex [lindex $ret 0] 1] $darray($i)
+
+ # items after curs should be adjusted
+ set ret [$dbc get -next]
+ error_check_bad dbc:get:next [llength $ret] 0
+ error_check_good dbc:get:next:compare \
+ $ret [list [list $keys([expr $i + 1]) $darray([expr $i + 2])]]
+
+ puts "\t Test052.i: Insert (overwrite) current item (DB_CURRENT)."
+ set i 1
+ set ret [$dbc get -first]
+ error_check_bad dbc_get:first [llength $ret] 0
+ # choose a datum that is not currently in db
+ set ret [$dbc put -current $darray([expr $i + 2])]
+ error_check_good dbc_put:curr $ret 0
+ # curs should be on new item
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:current [llength $ret] 0
+ error_check_good dbc_get:curr:keys \
+ [lindex [lindex $ret 0] 0] $keys($i)
+ error_check_good dbc_get:curr:data \
+ [lindex [lindex $ret 0] 1] $darray([expr $i + 2])
+
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ set i [incr i]
+ error_check_good dbc_get:next \
+ $ret [list [list $keys($i) $darray($i)]]
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest052 complete."
+}
diff --git a/bdb/test/test053.tcl b/bdb/test/test053.tcl
new file mode 100644
index 00000000000..e3a908c90d8
--- /dev/null
+++ b/bdb/test/test053.tcl
@@ -0,0 +1,194 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test053.tcl,v 11.12 2000/12/11 17:24:55 sue Exp $
+#
+# Test53: test of the DB_REVSPLITOFF flag in the btree and
+# Btree-w-recnum methods
+proc test053 { method args } {
+ global alphabet
+ global errorCode
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "\tTest053: Test of cursor stability across btree splits."
+ if { [is_btree $method] != 1 && [is_rbtree $method] != 1 } {
+ puts "Test053: skipping for method $method."
+ return
+ }
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test053: skipping for specific pagesizes"
+ return
+ }
+
+ set txn ""
+ set flags ""
+
+ puts "\tTest053.a: Create $omethod $args database."
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test053.db
+ set env NULL
+ } else {
+ set testfile test053.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set oflags \
+ "-create -truncate -revsplitoff -pagesize 1024 $args $omethod"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set nkeys 8
+ set npages 15
+
+ # We want to create a db with npages leaf pages, and have each page
+ # be near full with keys that we can predict. We set pagesize above
+ # to 1024 bytes, it should breakdown as follows (per page):
+ #
+ # ~20 bytes overhead
+ # key: ~4 bytes overhead, XXX0N where X is a letter, N is 0-9
+ # data: ~4 bytes overhead, + 100 bytes
+ #
+ # then, with 8 keys/page we should be just under 1024 bytes
+ puts "\tTest053.b: Create $npages pages with $nkeys pairs on each."
+ set keystring [string range $alphabet 0 [expr $npages -1]]
+ set data [repeat DATA 22]
+ for { set i 0 } { $i < $npages } {incr i } {
+ set key ""
+ set keyroot \
+ [repeat [string toupper [string range $keystring $i $i]] 3]
+ set key_set($i) $keyroot
+ for {set j 0} { $j < $nkeys} {incr j} {
+ if { $j < 10 } {
+ set key [set keyroot]0$j
+ } else {
+ set key $keyroot$j
+ }
+ set ret [$db put $key $data]
+ error_check_good dbput $ret 0
+ }
+ }
+
+ puts "\tTest053.c: Check page count."
+ error_check_good page_count:check \
+ [is_substr [$db stat] "{Leaf pages} $npages"] 1
+
+ puts "\tTest053.d: Delete all but one key per page."
+ for {set i 0} { $i < $npages } {incr i } {
+ for {set j 1} { $j < $nkeys } {incr j } {
+ set ret [$db del $key_set($i)0$j]
+ error_check_good dbdel $ret 0
+ }
+ }
+ puts "\tTest053.e: Check to make sure all pages are still there."
+ error_check_good page_count:check \
+ [is_substr [$db stat] "{Leaf pages} $npages"] 1
+
+ set dbc [$db cursor]
+ error_check_good db:cursor [is_substr $dbc $db] 1
+
+ # walk cursor through tree forward, backward.
+ # delete one key, repeat
+ for {set i 0} { $i < $npages} {incr i} {
+ puts -nonewline \
+ "\tTest053.f.$i: Walk curs through tree: forward..."
+ for { set j $i; set curr [$dbc get -first]} { $j < $npages} { \
+ incr j; set curr [$dbc get -next]} {
+ error_check_bad dbc:get:next [llength $curr] 0
+ error_check_good dbc:get:keys \
+ [lindex [lindex $curr 0] 0] $key_set($j)00
+ }
+ puts -nonewline "backward..."
+ for { set j [expr $npages - 1]; set curr [$dbc get -last]} { \
+ $j >= $i } { \
+ set j [incr j -1]; set curr [$dbc get -prev]} {
+ error_check_bad dbc:get:prev [llength $curr] 0
+ error_check_good dbc:get:keys \
+ [lindex [lindex $curr 0] 0] $key_set($j)00
+ }
+ puts "complete."
+
+ if { [is_rbtree $method] == 1} {
+ puts "\t\tTest053.f.$i:\
+ Walk through tree with record numbers."
+ for {set j 1} {$j <= [expr $npages - $i]} {incr j} {
+ set curr [$db get -recno $j]
+ error_check_bad \
+ db_get:recno:$j [llength $curr] 0
+ error_check_good db_get:recno:keys:$j \
+ [lindex [lindex $curr 0] 0] \
+ $key_set([expr $j + $i - 1])00
+ }
+ }
+ puts "\tTest053.g.$i:\
+ Delete single key ([expr $npages - $i] keys left)."
+ set ret [$db del $key_set($i)00]
+ error_check_good dbdel $ret 0
+ error_check_good del:check \
+ [llength [$db get $key_set($i)00]] 0
+ }
+
+ # end for loop, verify db_notfound
+ set ret [$dbc get -first]
+ error_check_good dbc:get:verify [llength $ret] 0
+
+ # loop: until single key restored on each page
+ for {set i 0} { $i < $npages} {incr i} {
+ puts "\tTest053.i.$i:\
+ Restore single key ([expr $i + 1] keys in tree)."
+ set ret [$db put $key_set($i)00 $data]
+ error_check_good dbput $ret 0
+
+ puts -nonewline \
+ "\tTest053.j: Walk cursor through tree: forward..."
+ for { set j 0; set curr [$dbc get -first]} { $j <= $i} {\
+ incr j; set curr [$dbc get -next]} {
+ error_check_bad dbc:get:next [llength $curr] 0
+ error_check_good dbc:get:keys \
+ [lindex [lindex $curr 0] 0] $key_set($j)00
+ }
+ error_check_good dbc:get:next [llength $curr] 0
+
+ puts -nonewline "backward..."
+ for { set j $i; set curr [$dbc get -last]} { \
+ $j >= 0 } { \
+ set j [incr j -1]; set curr [$dbc get -prev]} {
+ error_check_bad dbc:get:prev [llength $curr] 0
+ error_check_good dbc:get:keys \
+ [lindex [lindex $curr 0] 0] $key_set($j)00
+ }
+ puts "complete."
+ error_check_good dbc:get:prev [llength $curr] 0
+
+ if { [is_rbtree $method] == 1} {
+ puts "\t\tTest053.k.$i:\
+ Walk through tree with record numbers."
+ for {set j 1} {$j <= [expr $i + 1]} {incr j} {
+ set curr [$db get -recno $j]
+ error_check_bad \
+ db_get:recno:$j [llength $curr] 0
+ error_check_good db_get:recno:keys:$j \
+ [lindex [lindex $curr 0] 0] \
+ $key_set([expr $j - 1])00
+ }
+ }
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+
+ puts "Test053 complete."
+}
diff --git a/bdb/test/test054.tcl b/bdb/test/test054.tcl
new file mode 100644
index 00000000000..7308f995645
--- /dev/null
+++ b/bdb/test/test054.tcl
@@ -0,0 +1,369 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test054.tcl,v 11.15 2000/08/25 14:21:57 sue Exp $
+#
+# Test054:
+#
+# This test checks for cursor maintenance in the presence of deletes.
+# There are N different scenarios to tests:
+# 1. No duplicates. Cursor A deletes a key, do a GET for the key.
+# 2. No duplicates. Cursor is positioned right before key K, Delete K,
+# do a next on the cursor.
+# 3. No duplicates. Cursor is positioned on key K, do a regular delete of K.
+# do a current get on K.
+# 4. Repeat 3 but do a next instead of current.
+#
+# 5. Duplicates. Cursor A is on the first item of a duplicate set, A
+# does a delete. Then we do a non-cursor get.
+# 6. Duplicates. Cursor A is in a duplicate set and deletes the item.
+# do a delete of the entire Key. Test cursor current.
+# 7. Continue last test and try cursor next.
+# 8. Duplicates. Cursor A is in a duplicate set and deletes the item.
+# Cursor B is in the same duplicate set and deletes a different item.
+# Verify that the cursor is in the right place.
+# 9. Cursors A and B are in the place in the same duplicate set. A deletes
+# its item. Do current on B.
+# 10. Continue 8 and do a next on B.
+proc test054 { method args } {
+ global errorInfo
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ append args " -create -truncate -mode 0644"
+ puts "Test054 ($method $args):\
+ interspersed cursor and normal operations"
+ if { [is_record_based $method] == 1 } {
+ puts "Test054 skipping for method $method"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test054.db
+ set env NULL
+ } else {
+ set testfile test054.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ cleanup $testdir $env
+
+ set flags ""
+ set txn ""
+
+ puts "\tTest054.a: No Duplicate Tests"
+ set db [eval {berkdb_open} $args {$omethod $testfile}]
+ error_check_good db_open:nodup [is_valid_db $db] TRUE
+
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:nodup [is_substr $curs $db] 1
+
+ # Put three keys in the database
+ for { set key 1 } { $key <= 3 } {incr key} {
+ set r [eval {$db put} $txn $flags {$key datum$key}]
+ error_check_good put $r 0
+ }
+
+ # Retrieve keys sequentially so we can figure out their order
+ set i 1
+ for {set d [$curs get -first] } \
+ {[llength $d] != 0 } \
+ {set d [$curs get -next] } {
+ set key_set($i) [lindex [lindex $d 0] 0]
+ incr i
+ }
+
+ # TEST CASE 1
+ puts "\tTest054.a1: Delete w/cursor, regular get"
+
+ # Now set the cursor on the middle on.
+ set r [$curs get -set $key_set(2)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(2)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(2)
+
+ # Now do the delete
+ set r [eval {$curs del} $txn]
+ error_check_good curs_del $r 0
+
+ # Now do the get
+ set r [eval {$db get} $txn {$key_set(2)}]
+ error_check_good get_after_del [llength $r] 0
+
+ # Free up the cursor.
+ error_check_good cursor_close [eval {$curs close}] 0
+
+ # TEST CASE 2
+ puts "\tTest054.a2: Cursor before K, delete K, cursor next"
+
+ # Replace key 2
+ set r [eval {$db put} $txn {$key_set(2) datum$key_set(2)}]
+ error_check_good put $r 0
+
+ # Open and position cursor on first item.
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:nodup [is_substr $curs $db] 1
+
+ # Retrieve keys sequentially so we can figure out their order
+ set i 1
+ for {set d [eval {$curs get} -first] } \
+ {[llength $d] != 0 } \
+ {set d [$curs get -nextdup] } {
+ set key_set($i) [lindex [lindex $d 0] 0]
+ incr i
+ }
+
+ set r [eval {$curs get} -set {$key_set(1)} ]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(1)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(1)
+
+ # Now delete (next item) $key_set(2)
+ error_check_good \
+ db_del:$key_set(2) [eval {$db del} $txn {$key_set(2)}] 0
+
+ # Now do next on cursor
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(3)
+ error_check_good curs_get:DB_NEXT:data $d datum$key_set(3)
+
+ # TEST CASE 3
+ puts "\tTest054.a3: Cursor on K, delete K, cursor current"
+
+ # delete item 3
+ error_check_good \
+ db_del:$key_set(3) [eval {$db del} $txn {$key_set(3)}] 0
+ # NEEDS TO COME BACK IN, BUG CHECK
+ set ret [$curs get -current]
+ error_check_good current_after_del $ret [list [list [] []]]
+ error_check_good cursor_close [$curs close] 0
+
+ puts "\tTest054.a4: Cursor on K, delete K, cursor next"
+
+ # Restore keys 2 and 3
+ set r [eval {$db put} $txn {$key_set(2) datum$key_set(2)}]
+ error_check_good put $r 0
+ set r [eval {$db put} $txn {$key_set(3) datum$key_set(3)}]
+ error_check_good put $r 0
+
+ # Create the new cursor and put it on 1
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:nodup [is_substr $curs $db] 1
+ set r [$curs get -set $key_set(1)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(1)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(1)
+
+ # Delete 2
+ error_check_good \
+ db_del:$key_set(2) [eval {$db del} $txn {$key_set(2)}] 0
+
+ # Now do next on cursor
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(3)
+ error_check_good curs_get:DB_NEXT:data $d datum$key_set(3)
+
+ # Close cursor
+ error_check_good curs_close [$curs close] 0
+ error_check_good db_close [$db close] 0
+
+ # Now get ready for duplicate tests
+
+ if { [is_rbtree $method] == 1 } {
+ puts "Test054: skipping remainder of test for method $method."
+ return
+ }
+
+ puts "\tTest054.b: Duplicate Tests"
+ append args " -dup"
+ set db [eval {berkdb_open} $args {$omethod $testfile}]
+ error_check_good db_open:dup [is_valid_db $db] TRUE
+
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:dup [is_substr $curs $db] 1
+
+ # Put three keys in the database
+ for { set key 1 } { $key <= 3 } {incr key} {
+ set r [eval {$db put} $txn $flags {$key datum$key}]
+ error_check_good put $r 0
+ }
+
+ # Retrieve keys sequentially so we can figure out their order
+ set i 1
+ for {set d [$curs get -first] } \
+ {[llength $d] != 0 } \
+ {set d [$curs get -nextdup] } {
+ set key_set($i) [lindex [lindex $d 0] 0]
+ incr i
+ }
+
+ # Now put in a bunch of duplicates for key 2
+ for { set d 1 } { $d <= 5 } {incr d} {
+ set r [eval {$db put} $txn $flags {$key_set(2) dup_$d}]
+ error_check_good dup:put $r 0
+ }
+
+ # TEST CASE 5
+ puts "\tTest054.b1: Delete dup w/cursor on first item. Get on key."
+
+ # Now set the cursor on the first of the duplicate set.
+ set r [eval {$curs get} -set {$key_set(2)}]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(2)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(2)
+
+ # Now do the delete
+ set r [$curs del]
+ error_check_good curs_del $r 0
+
+ # Now do the get
+ set r [eval {$db get} $txn {$key_set(2)}]
+ error_check_good get_after_del [lindex [lindex $r 0] 1] dup_1
+
+ # TEST CASE 6
+ puts "\tTest054.b2: Now get the next duplicate from the cursor."
+
+ # Now do next on cursor
+ set r [$curs get -nextdup]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs_get:DB_NEXT:data $d dup_1
+
+ # TEST CASE 3
+ puts "\tTest054.b3: Two cursors in set; each delete different items"
+
+ # Open a new cursor.
+ set curs2 [eval {$db cursor} $txn]
+ error_check_good curs_open [is_substr $curs2 $db] 1
+
+ # Set on last of duplicate set.
+ set r [$curs2 get -set $key_set(3)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(3)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(3)
+
+ set r [$curs2 get -prev]
+ error_check_bad cursor_get:DB_PREV [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_PREV:key $k $key_set(2)
+ error_check_good curs_get:DB_PREV:data $d dup_5
+
+ # Delete the item at cursor 1 (dup_1)
+ error_check_good curs1_del [$curs del] 0
+
+ # Verify curs1 and curs2
+ # current should fail
+ set ret [$curs get -current]
+ error_check_good \
+ curs1_get_after_del $ret [list [list [] []]]
+
+ set r [$curs2 get -current]
+ error_check_bad curs2_get [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_CURRENT:key $k $key_set(2)
+ error_check_good curs_get:DB_CURRENT:data $d dup_5
+
+ # Now delete the item at cursor 2 (dup_5)
+ error_check_good curs2_del [$curs2 del] 0
+
+ # Verify curs1 and curs2
+ set ret [$curs get -current]
+ error_check_good curs1_get:del2 $ret [list [list [] []]]
+
+ set ret [$curs2 get -current]
+ error_check_good curs2_get:del2 $ret [list [list [] []]]
+
+ # Now verify that next and prev work.
+
+ set r [$curs2 get -prev]
+ error_check_bad cursor_get:DB_PREV [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_PREV:key $k $key_set(2)
+ error_check_good curs_get:DB_PREV:data $d dup_4
+
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs_get:DB_NEXT:data $d dup_2
+
+ puts "\tTest054.b4: Two cursors same item, one delete, one get"
+
+ # Move curs2 onto dup_2
+ set r [$curs2 get -prev]
+ error_check_bad cursor_get:DB_PREV [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_PREV:key $k $key_set(2)
+ error_check_good curs_get:DB_PREV:data $d dup_3
+
+ set r [$curs2 get -prev]
+ error_check_bad cursor_get:DB_PREV [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_PREV:key $k $key_set(2)
+ error_check_good curs_get:DB_PREV:data $d dup_2
+
+ # delete on curs 1
+ error_check_good curs1_del [$curs del] 0
+
+ # Verify gets on both 1 and 2
+ set ret [$curs get -current]
+ error_check_good \
+ curs1_get:deleted $ret [list [list [] []]]
+ set ret [$curs2 get -current]
+ error_check_good \
+ curs2_get:deleted $ret [list [list [] []]]
+
+ puts "\tTest054.b5: Now do a next on both cursors"
+
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs_get:DB_NEXT:data $d dup_3
+
+ set r [$curs2 get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs_get:DB_NEXT:data $d dup_3
+
+ # Close cursor
+ error_check_good curs_close [$curs close] 0
+ error_check_good curs2_close [$curs2 close] 0
+ error_check_good db_close [$db close] 0
+}
diff --git a/bdb/test/test055.tcl b/bdb/test/test055.tcl
new file mode 100644
index 00000000000..fc5ce4e98bd
--- /dev/null
+++ b/bdb/test/test055.tcl
@@ -0,0 +1,118 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test055.tcl,v 11.11 2000/08/25 14:21:57 sue Exp $
+#
+# Test055:
+# This test checks basic cursor operations.
+# There are N different scenarios to tests:
+# 1. (no dups) Set cursor, retrieve current.
+# 2. (no dups) Set cursor, retrieve next.
+# 3. (no dups) Set cursor, retrieve prev.
+proc test055 { method args } {
+ global errorInfo
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test055: $method interspersed cursor and normal operations"
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test055.db
+ set env NULL
+ } else {
+ set testfile test055.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ cleanup $testdir $env
+
+ set flags ""
+ set txn ""
+
+ puts "\tTest055.a: No duplicates"
+ set db [eval {berkdb_open -create -truncate -mode 0644 $omethod } \
+ $args {$testfile}]
+ error_check_good db_open:nodup [is_valid_db $db] TRUE
+
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:nodup [is_substr $curs $db] 1
+
+ # Put three keys in the database
+ for { set key 1 } { $key <= 3 } {incr key} {
+ set r [eval {$db put} $txn $flags {$key datum$key}]
+ error_check_good put $r 0
+ }
+
+ # Retrieve keys sequentially so we can figure out their order
+ set i 1
+ for {set d [$curs get -first] } { [llength $d] != 0 } {\
+ set d [$curs get -next] } {
+ set key_set($i) [lindex [lindex $d 0] 0]
+ incr i
+ }
+
+ # TEST CASE 1
+ puts "\tTest055.a1: Set cursor, retrieve current"
+
+ # Now set the cursor on the middle on.
+ set r [$curs get -set $key_set(2)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(2)
+ error_check_good \
+ curs_get:DB_SET:data $d [pad_data $method datum$key_set(2)]
+
+ # Now retrieve current
+ set r [$curs get -current]
+ error_check_bad cursor_get:DB_CURRENT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_CURRENT:key $k $key_set(2)
+ error_check_good \
+ curs_get:DB_CURRENT:data $d [pad_data $method datum$key_set(2)]
+
+ # TEST CASE 2
+ puts "\tTest055.a2: Set cursor, retrieve previous"
+ set r [$curs get -prev]
+ error_check_bad cursor_get:DB_PREV [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_PREV:key $k $key_set(1)
+ error_check_good \
+ curs_get:DB_PREV:data $d [pad_data $method datum$key_set(1)]
+
+ #TEST CASE 3
+ puts "\tTest055.a2: Set cursor, retrieve next"
+
+ # Now set the cursor on the middle on.
+ set r [$curs get -set $key_set(2)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(2)
+ error_check_good \
+ curs_get:DB_SET:data $d [pad_data $method datum$key_set(2)]
+
+ # Now retrieve next
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(3)
+ error_check_good \
+ curs_get:DB_NEXT:data $d [pad_data $method datum$key_set(3)]
+
+ # Close cursor and database.
+ error_check_good curs_close [$curs close] 0
+ error_check_good db_close [$db close] 0
+}
diff --git a/bdb/test/test056.tcl b/bdb/test/test056.tcl
new file mode 100644
index 00000000000..ade3890c3f9
--- /dev/null
+++ b/bdb/test/test056.tcl
@@ -0,0 +1,145 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test056.tcl,v 11.13 2000/08/25 14:21:57 sue Exp $
+#
+# Test056
+# Check if deleting a key when a cursor is on a duplicate of that key works.
+proc test056 { method args } {
+ global errorInfo
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ append args " -create -truncate -mode 0644 -dup "
+ if { [is_record_based $method] == 1 || [is_rbtree $method] } {
+ puts "Test056: skipping for method $method"
+ return
+ }
+ puts "Test056: $method delete of key in presence of cursor"
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test056.db
+ set env NULL
+ } else {
+ set testfile test056.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ cleanup $testdir $env
+
+ set flags ""
+ set txn ""
+
+ set db [eval {berkdb_open} $args {$omethod $testfile}]
+ error_check_good db_open:dup [is_valid_db $db] TRUE
+
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:dup [is_substr $curs $db] 1
+
+ puts "\tTest056.a: Key delete with cursor on duplicate."
+ # Put three keys in the database
+ for { set key 1 } { $key <= 3 } {incr key} {
+ set r [eval {$db put} $txn $flags {$key datum$key}]
+ error_check_good put $r 0
+ }
+
+ # Retrieve keys sequentially so we can figure out their order
+ set i 1
+ for {set d [$curs get -first] } { [llength $d] != 0 } {
+ set d [$curs get -next] } {
+ set key_set($i) [lindex [lindex $d 0] 0]
+ incr i
+ }
+
+ # Now put in a bunch of duplicates for key 2
+ for { set d 1 } { $d <= 5 } {incr d} {
+ set r [eval {$db put} $txn $flags {$key_set(2) dup_$d}]
+ error_check_good dup:put $r 0
+ }
+
+ # Now put the cursor on a duplicate of key 2
+
+ # Now set the cursor on the first of the duplicate set.
+ set r [$curs get -set $key_set(2)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(2)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(2)
+
+ # Now do two nexts
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs_get:DB_NEXT:data $d dup_1
+
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs_get:DB_NEXT:data $d dup_2
+
+ # Now do the delete
+ set r [eval {$db del} $txn $flags {$key_set(2)}]
+ error_check_good delete $r 0
+
+ # Now check the get current on the cursor.
+ set ret [$curs get -current]
+ error_check_good curs_after_del $ret [list [list [] []]]
+
+ # Now check that the rest of the database looks intact. There
+ # should be only two keys, 1 and 3.
+
+ set r [$curs get -first]
+ error_check_bad cursor_get:DB_FIRST [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_FIRST:key $k $key_set(1)
+ error_check_good curs_get:DB_FIRST:data $d datum$key_set(1)
+
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(3)
+ error_check_good curs_get:DB_NEXT:data $d datum$key_set(3)
+
+ set r [$curs get -next]
+ error_check_good cursor_get:DB_NEXT [llength $r] 0
+
+ puts "\tTest056.b:\
+ Cursor delete of first item, followed by cursor FIRST"
+ # Set to beginning
+ set r [$curs get -first]
+ error_check_bad cursor_get:DB_FIRST [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_FIRST:key $k $key_set(1)
+ error_check_good curs_get:DB_FIRST:data $d datum$key_set(1)
+
+ # Now do delete
+ error_check_good curs_del [$curs del] 0
+
+ # Now do DB_FIRST
+ set r [$curs get -first]
+ error_check_bad cursor_get:DB_FIRST [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_FIRST:key $k $key_set(3)
+ error_check_good curs_get:DB_FIRST:data $d datum$key_set(3)
+
+ error_check_good curs_close [$curs close] 0
+ error_check_good db_close [$db close] 0
+}
diff --git a/bdb/test/test057.tcl b/bdb/test/test057.tcl
new file mode 100644
index 00000000000..1dc350e32a5
--- /dev/null
+++ b/bdb/test/test057.tcl
@@ -0,0 +1,225 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test057.tcl,v 11.17 2000/08/25 14:21:57 sue Exp $
+#
+# Test057:
+# Check if we handle the case where we delete a key with the cursor on it
+# and then add the same key. The cursor should not get the new item
+# returned, but the item shouldn't disappear.
+# Run test tests, one where the overwriting put is done with a put and
+# one where it's done with a cursor put.
+proc test057 { method args } {
+ global errorInfo
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ append args " -create -truncate -mode 0644 -dup "
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "Test057: skipping for method $method"
+ return
+ }
+ puts "Test057: $method delete and replace in presence of cursor."
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test057.db
+ set env NULL
+ } else {
+ set testfile test057.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ cleanup $testdir $env
+
+ set flags ""
+ set txn ""
+
+ set db [eval {berkdb_open} $args {$omethod $testfile}]
+ error_check_good dbopen:dup [is_valid_db $db] TRUE
+
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:dup [is_substr $curs $db] 1
+
+ puts "\tTest057.a: Set cursor, delete cursor, put with key."
+ # Put three keys in the database
+ for { set key 1 } { $key <= 3 } {incr key} {
+ set r [eval {$db put} $txn $flags {$key datum$key}]
+ error_check_good put $r 0
+ }
+
+ # Retrieve keys sequentially so we can figure out their order
+ set i 1
+ for {set d [$curs get -first] } {[llength $d] != 0 } {\
+ set d [$curs get -next] } {
+ set key_set($i) [lindex [lindex $d 0] 0]
+ incr i
+ }
+
+ # Now put in a bunch of duplicates for key 2
+ for { set d 1 } { $d <= 5 } {incr d} {
+ set r [eval {$db put} $txn $flags {$key_set(2) dup_$d}]
+ error_check_good dup:put $r 0
+ }
+
+ # Now put the cursor on key 1
+
+ # Now set the cursor on the first of the duplicate set.
+ set r [$curs get -set $key_set(1)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(1)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(1)
+
+ # Now do the delete
+ set r [$curs del]
+ error_check_good delete $r 0
+
+ # Now check the get current on the cursor.
+ error_check_good curs_get:del [$curs get -current] [list [list [] []]]
+
+ # Now do a put on the key
+ set r [eval {$db put} $txn $flags {$key_set(1) new_datum$key_set(1)}]
+ error_check_good put $r 0
+
+ # Do a get
+ set r [eval {$db get} $txn {$key_set(1)}]
+ error_check_good get [lindex [lindex $r 0] 1] new_datum$key_set(1)
+
+ # Recheck cursor
+ error_check_good curs_get:deleted [$curs get -current] [list [list [] []]]
+
+ # Move cursor and see if we get the key.
+ set r [$curs get -first]
+ error_check_bad cursor_get:DB_FIRST [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_FIRST:key $k $key_set(1)
+ error_check_good curs_get:DB_FIRST:data $d new_datum$key_set(1)
+
+ puts "\tTest057.b: Set two cursor on a key, delete one, overwrite other"
+ set curs2 [eval {$db cursor} $txn]
+ error_check_good curs2_open [is_substr $curs2 $db] 1
+
+ # Set both cursors on the 4rd key
+ set r [$curs get -set $key_set(3)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(3)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(3)
+
+ set r [$curs2 get -set $key_set(3)]
+ error_check_bad cursor2_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs2_get:DB_SET:key $k $key_set(3)
+ error_check_good curs2_get:DB_SET:data $d datum$key_set(3)
+
+ # Now delete through cursor 1
+ error_check_good curs1_del [$curs del] 0
+
+ # Verify gets on both 1 and 2
+ error_check_good curs_get:deleted [$curs get -current] \
+ [list [list [] []]]
+ error_check_good curs_get:deleted [$curs2 get -current] \
+ [list [list [] []]]
+
+ # Now do a replace through cursor 2
+ set pflags "-current"
+ if {[is_hash $method] == 1} {
+ error_check_good curs1_get_after_del [is_substr \
+ [$curs2 put $pflags new_datum$key_set(3)] "DB_NOTFOUND"] 1
+
+ # Gets fail
+ error_check_good curs1_get:deleted \
+ [$curs get -current] \
+ [list [list [] []]]
+ error_check_good curs2_get:deleted \
+ [$curs get -current] \
+ [list [list [] []]]
+ } else {
+ # btree only, recno is skipped this test
+ set ret [$curs2 put $pflags new_datum$key_set(3)]
+ error_check_good curs_replace $ret 0
+ }
+
+ # Gets fail
+ #error_check_good curs1_get:deleted [catch {$curs get -current} r] 1
+ #error_check_good curs1_get_after_del \
+ [is_substr $errorInfo "DB_KEYEMPTY"] 1
+ #error_check_good curs2_get:deleted [catch {$curs2 get -current} r] 1
+ #error_check_good curs2_get_after_del \
+ [is_substr $errorInfo "DB_KEYEMPTY"] 1
+
+ puts "\tTest057.c:\
+ Set two cursors on a dup, delete one, overwrite other"
+
+ # Set both cursors on the 2nd duplicate of key 2
+ set r [$curs get -set $key_set(2)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(2)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(2)
+
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs_get:DB_NEXT:data $d dup_1
+
+ set r [$curs2 get -set $key_set(2)]
+ error_check_bad cursor2_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs2_get:DB_SET:key $k $key_set(2)
+ error_check_good curs2_get:DB_SET:data $d datum$key_set(2)
+
+ set r [$curs2 get -next]
+ error_check_bad cursor2_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs2_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs2_get:DB_NEXT:data $d dup_1
+
+ # Now delete through cursor 1
+ error_check_good curs1_del [$curs del] 0
+
+ # Verify gets on both 1 and 2
+ error_check_good curs_get:deleted [$curs get -current] \
+ [list [list [] []]]
+ error_check_good curs_get:deleted [$curs2 get -current] \
+ [list [list [] []]]
+
+ # Now do a replace through cursor 2 -- this will work on btree but
+ # not on hash
+ if {[is_hash $method] == 1} {
+ error_check_good hash_replace \
+ [is_substr [$curs2 put -current new_dup_1] "DB_NOTFOUND"] 1
+ } else {
+ error_check_good curs_replace [$curs2 put -current new_dup_1] 0
+ }
+
+ # Both gets should fail
+ #error_check_good curs1_get:deleted [catch {$curs get -current} r] 1
+ #error_check_good curs1_get_after_del \
+ [is_substr $errorInfo "DB_KEYEMPTY"] 1
+ #error_check_good curs2_get:deleted [catch {$curs2 get -current} r] 1
+ #error_check_good curs2_get_after_del \
+ [is_substr $errorInfo "DB_KEYEMPTY"] 1
+
+ error_check_good curs2_close [$curs2 close] 0
+ error_check_good curs_close [$curs close] 0
+ error_check_good db_close [$db close] 0
+}
diff --git a/bdb/test/test058.tcl b/bdb/test/test058.tcl
new file mode 100644
index 00000000000..00870a6b5f8
--- /dev/null
+++ b/bdb/test/test058.tcl
@@ -0,0 +1,99 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test058.tcl,v 11.14 2000/08/25 14:21:57 sue Exp $
+#
+proc test058 { method args } {
+ source ./include.tcl
+
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test058 skipping for env $env"
+ return
+ }
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "Test058: skipping for method $method"
+ return
+ }
+ puts "Test058: $method delete dups after inserting after duped key."
+
+ # environment
+ env_cleanup $testdir
+ set eflags "-create -txn -home $testdir"
+ set env [eval {berkdb env} $eflags]
+ error_check_good env [is_valid_env $env] TRUE
+
+ # db open
+ set flags "-create -mode 0644 -dup -env $env $args"
+ set db [eval {berkdb_open} $flags $omethod "test058.db"]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set tn ""
+ set tid ""
+ set tn [$env txn]
+ set tflags "-txn $tn"
+
+ puts "\tTest058.a: Adding 10 duplicates"
+ # Add a bunch of dups
+ for { set i 0 } { $i < 10 } {incr i} {
+ set ret \
+ [eval {$db put} $tflags {doghouse $i"DUPLICATE_DATA_VALUE"}]
+ error_check_good db_put $ret 0
+ }
+
+ puts "\tTest058.b: Adding key after duplicates"
+ # Now add one more key/data AFTER the dup set.
+ set ret [eval {$db put} $tflags {zebrahouse NOT_A_DUP}]
+ error_check_good db_put $ret 0
+
+ error_check_good txn_commit [$tn commit] 0
+
+ set tn [$env txn]
+ error_check_good txnbegin [is_substr $tn $env] 1
+ set tflags "-txn $tn"
+
+ # Now delete everything
+ puts "\tTest058.c: Deleting duplicated key"
+ set ret [eval {$db del} $tflags {doghouse}]
+ error_check_good del $ret 0
+
+ # Now reput everything
+ set pad \
+ abcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuv
+
+ puts "\tTest058.d: Reputting duplicates with big data vals"
+ for { set i 0 } { $i < 10 } {incr i} {
+ set ret [eval {$db put} \
+ $tflags {doghouse $i"DUPLICATE_DATA_VALUE"$pad}]
+ error_check_good db_put $ret 0
+ }
+ error_check_good txn_commit [$tn commit] 0
+
+ # Check duplicates for order
+ set dbc [$db cursor]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ puts "\tTest058.e: Verifying that duplicates are in order."
+ set i 0
+ for { set ret [$dbc get -set doghouse] } \
+ {$i < 10 && [llength $ret] != 0} \
+ { set ret [$dbc get -nextdup] } {
+ set data [lindex [lindex $ret 0] 1]
+ error_check_good \
+ duplicate_value $data $i"DUPLICATE_DATA_VALUE"$pad
+ incr i
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ reset_env $env
+}
diff --git a/bdb/test/test059.tcl b/bdb/test/test059.tcl
new file mode 100644
index 00000000000..f9988c4e20b
--- /dev/null
+++ b/bdb/test/test059.tcl
@@ -0,0 +1,128 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test059.tcl,v 11.12 2000/08/25 14:21:57 sue Exp $
+#
+# Test059:
+# Make sure that we handle retrieves of zero-length data items correctly.
+# The following ops, should allow a partial data retrieve of 0-length.
+# db_get
+# db_cget FIRST, NEXT, LAST, PREV, CURRENT, SET, SET_RANGE
+#
+proc test059 { method args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test059: $method 0-length partial data retrieval"
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test059.db
+ set env NULL
+ } else {
+ set testfile test059.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ cleanup $testdir $env
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ puts "\tTest059.a: Populate a database"
+ set oflags "-create -truncate -mode 0644 $omethod $args $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_create [is_substr $db db] 1
+
+ # Put ten keys in the database
+ for { set key 1 } { $key <= 10 } {incr key} {
+ set r [eval {$db put} $txn $pflags {$key datum$key}]
+ error_check_good put $r 0
+ }
+
+ # Retrieve keys sequentially so we can figure out their order
+ set i 1
+ set curs [$db cursor]
+ error_check_good db_curs [is_substr $curs $db] 1
+
+ for {set d [$curs get -first] } { [llength $d] != 0 } {
+ set d [$curs get -next] } {
+ set key_set($i) [lindex [lindex $d 0] 0]
+ incr i
+ }
+
+ puts "\tTest059.a: db get with 0 partial length retrieve"
+
+ # Now set the cursor on the middle one.
+ set ret [eval {$db get -partial {0 0}} $gflags {$key_set(5)}]
+ error_check_bad db_get_0 [llength $ret] 0
+
+ puts "\tTest059.a: db cget FIRST with 0 partial length retrieve"
+ set ret [$curs get -first -partial {0 0}]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_first $key $key_set(1)
+ error_check_good db_cget_first [string length $data] 0
+
+ puts "\tTest059.b: db cget NEXT with 0 partial length retrieve"
+ set ret [$curs get -next -partial {0 0}]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_next $key $key_set(2)
+ error_check_good db_cget_next [string length $data] 0
+
+ puts "\tTest059.c: db cget LAST with 0 partial length retrieve"
+ set ret [$curs get -last -partial {0 0}]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_last $key $key_set(10)
+ error_check_good db_cget_last [string length $data] 0
+
+ puts "\tTest059.d: db cget PREV with 0 partial length retrieve"
+ set ret [$curs get -prev -partial {0 0}]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_prev $key $key_set(9)
+ error_check_good db_cget_prev [string length $data] 0
+
+ puts "\tTest059.e: db cget CURRENT with 0 partial length retrieve"
+ set ret [$curs get -current -partial {0 0}]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_current $key $key_set(9)
+ error_check_good db_cget_current [string length $data] 0
+
+ puts "\tTest059.f: db cget SET with 0 partial length retrieve"
+ set ret [$curs get -set -partial {0 0} $key_set(7)]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_set $key $key_set(7)
+ error_check_good db_cget_set [string length $data] 0
+
+ if {[is_btree $method] == 1} {
+ puts "\tTest059.g:\
+ db cget SET_RANGE with 0 partial length retrieve"
+ set ret [$curs get -set_range -partial {0 0} $key_set(5)]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_set $key $key_set(5)
+ error_check_good db_cget_set [string length $data] 0
+ }
+
+ error_check_good curs_close [$curs close] 0
+ error_check_good db_close [$db close] 0
+}
diff --git a/bdb/test/test060.tcl b/bdb/test/test060.tcl
new file mode 100644
index 00000000000..7f7cc71f00b
--- /dev/null
+++ b/bdb/test/test060.tcl
@@ -0,0 +1,53 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test060.tcl,v 11.6 2000/08/25 14:21:57 sue Exp $
+#
+# Test060: Test of the DB_EXCL flag to DB->open.
+# 1) Attempt to open and create a nonexistent database; verify success.
+# 2) Attempt to reopen it; verify failure.
+proc test060 { method args } {
+ global errorCode
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test060: $method ($args) Test of the DB_EXCL flag to DB->open"
+
+ # Set the database location and make sure the db doesn't exist yet
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test060.db
+ set env NULL
+ } else {
+ set testfile test060.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ cleanup $testdir $env
+
+ # Create the database and check success
+ puts "\tTest060.a: open and close non-existent file with DB_EXCL"
+ set db [eval {berkdb_open \
+ -create -excl -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen:excl [is_valid_db $db] TRUE
+
+ # Close it and check success
+ error_check_good db_close [$db close] 0
+
+ # Try to open it again, and make sure the open fails
+ puts "\tTest060.b: open it again with DB_EXCL and make sure it fails"
+ set errorCode NONE
+ error_check_good open:excl:catch [catch { \
+ set db [eval {berkdb_open_noerr \
+ -create -excl -mode 0644} $args {$omethod $testfile}]
+ } ret ] 1
+
+ error_check_good dbopen:excl [is_substr $errorCode EEXIST] 1
+}
diff --git a/bdb/test/test061.tcl b/bdb/test/test061.tcl
new file mode 100644
index 00000000000..c3187268e39
--- /dev/null
+++ b/bdb/test/test061.tcl
@@ -0,0 +1,215 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test061.tcl,v 11.12 2000/10/27 13:23:56 sue Exp $
+#
+# Test061: Test of transaction abort and commit for in-memory databases.
+# a) Put + abort: verify absence of data
+# b) Put + commit: verify presence of data
+# c) Overwrite + abort: verify that data is unchanged
+# d) Overwrite + commit: verify that data has changed
+# e) Delete + abort: verify that data is still present
+# f) Delete + commit: verify that data has been deleted
+proc test061 { method args } {
+ global alphabet
+ global errorCode
+ source ./include.tcl
+
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test061 skipping for env $env"
+ return
+ }
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ if { [is_queueext $method] == 1} {
+ puts "Test061 skipping for method $method"
+ return
+ }
+
+ puts "Test061: Transaction abort and commit test for in-memory data."
+ puts "Test061: $method $args"
+
+ set key "key"
+ set data "data"
+ set otherdata "otherdata"
+ set txn ""
+ set flags ""
+ set gflags ""
+
+ if { [is_record_based $method] == 1} {
+ set key 1
+ set gflags " -recno"
+ }
+
+ puts "\tTest061: Create environment and $method database."
+ env_cleanup $testdir
+
+ # create environment
+ set eflags "-create -txn -home $testdir"
+ set dbenv [eval {berkdb env} $eflags]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ # db open -- no file specified, in-memory database
+ set flags "-create $args $omethod"
+ set db [eval {berkdb_open -env} $dbenv $flags]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Here we go with the six test cases. Since we need to verify
+ # a different thing each time, and since we can't just reuse
+ # the same data if we're to test overwrite, we just
+ # plow through rather than writing some impenetrable loop code;
+ # each of the cases is only a few lines long, anyway.
+
+ puts "\tTest061.a: put/abort"
+
+ # txn_begin
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+
+ # put a key
+ set ret [eval {$db put} -txn $txn {$key [chop_data $method $data]}]
+ error_check_good db_put $ret 0
+
+ # check for existence
+ set ret [eval {$db get} -txn $txn $gflags {$key}]
+ error_check_good get $ret [list [list $key [pad_data $method $data]]]
+
+ # abort
+ error_check_good txn_abort [$txn abort] 0
+
+ # check for *non*-existence
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get $ret {}
+
+ puts "\tTest061.b: put/commit"
+
+ # txn_begin
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+
+ # put a key
+ set ret [eval {$db put} -txn $txn {$key [chop_data $method $data]}]
+ error_check_good db_put $ret 0
+
+ # check for existence
+ set ret [eval {$db get} -txn $txn $gflags {$key}]
+ error_check_good get $ret [list [list $key [pad_data $method $data]]]
+
+ # commit
+ error_check_good txn_commit [$txn commit] 0
+
+ # check again for existence
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get $ret [list [list $key [pad_data $method $data]]]
+
+ puts "\tTest061.c: overwrite/abort"
+
+ # txn_begin
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+
+ # overwrite {key,data} with {key,otherdata}
+ set ret [eval {$db put} -txn $txn {$key [chop_data $method $otherdata]}]
+ error_check_good db_put $ret 0
+
+ # check for existence
+ set ret [eval {$db get} -txn $txn $gflags {$key}]
+ error_check_good get $ret \
+ [list [list $key [pad_data $method $otherdata]]]
+
+ # abort
+ error_check_good txn_abort [$txn abort] 0
+
+ # check that data is unchanged ($data not $otherdata)
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get $ret [list [list $key [pad_data $method $data]]]
+
+ puts "\tTest061.d: overwrite/commit"
+
+ # txn_begin
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+
+ # overwrite {key,data} with {key,otherdata}
+ set ret [eval {$db put} -txn $txn {$key [chop_data $method $otherdata]}]
+ error_check_good db_put $ret 0
+
+ # check for existence
+ set ret [eval {$db get} -txn $txn $gflags {$key}]
+ error_check_good get $ret \
+ [list [list $key [pad_data $method $otherdata]]]
+
+ # commit
+ error_check_good txn_commit [$txn commit] 0
+
+ # check that data has changed ($otherdata not $data)
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get $ret \
+ [list [list $key [pad_data $method $otherdata]]]
+
+ puts "\tTest061.e: delete/abort"
+
+ # txn_begin
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+
+ # delete
+ set ret [eval {$db del} -txn $txn {$key}]
+ error_check_good db_put $ret 0
+
+ # check for nonexistence
+ set ret [eval {$db get} -txn $txn $gflags {$key}]
+ error_check_good get $ret {}
+
+ # abort
+ error_check_good txn_abort [$txn abort] 0
+
+ # check for existence
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get $ret \
+ [list [list $key [pad_data $method $otherdata]]]
+
+ puts "\tTest061.f: delete/commit"
+
+ # txn_begin
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+
+ # put a key
+ set ret [eval {$db del} -txn $txn {$key}]
+ error_check_good db_put $ret 0
+
+ # check for nonexistence
+ set ret [eval {$db get} -txn $txn $gflags {$key}]
+ error_check_good get $ret {}
+
+ # commit
+ error_check_good txn_commit [$txn commit] 0
+
+ # check for continued nonexistence
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get $ret {}
+
+ # We're done; clean up.
+ error_check_good db_close [eval {$db close}] 0
+ error_check_good env_close [eval {$dbenv close}] 0
+
+ # Now run db_recover and ensure that it runs cleanly.
+ puts "\tTest061.g: Running db_recover -h"
+ set ret [catch {exec $util_path/db_recover -h $testdir} res]
+ if { $ret != 0 } {
+ puts "FAIL: db_recover outputted $res"
+ }
+ error_check_good db_recover $ret 0
+
+ puts "\tTest061.h: Running db_recover -c -h"
+ set ret [catch {exec $util_path/db_recover -c -h $testdir} res]
+ error_check_good db_recover-c $ret 0
+}
diff --git a/bdb/test/test062.tcl b/bdb/test/test062.tcl
new file mode 100644
index 00000000000..43a5e1d3939
--- /dev/null
+++ b/bdb/test/test062.tcl
@@ -0,0 +1,125 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test062.tcl,v 11.13 2000/12/20 19:02:36 sue Exp $
+#
+# DB Test 62: Test of partial puts onto duplicate pages.
+# Insert the first 200 words into the dictionary 200 times each with
+# self as key and <random letter>:self as data. Use partial puts to
+# append self again to data; verify correctness.
+proc test062 { method {nentries 200} {ndups 200} {tnum 62} args } {
+ global alphabet
+ global rand_init
+ source ./include.tcl
+
+ berkdb srand $rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ cleanup $testdir $env
+
+ puts "Test0$tnum:\
+ $method ($args) Partial puts and duplicates."
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $omethod"
+ return
+ }
+ set db [eval {berkdb_open -create -truncate -mode 0644 \
+ $omethod -dup} $args {$testfile} ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put each key/data pair
+ puts "\tTest0$tnum.a: Put loop (initialize database)"
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_substr $dbc $db] 1
+ while { [gets $did str] != -1 && $count < $nentries } {
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set pref \
+ [string index $alphabet [berkdb random_int 0 25]]
+ set datastr $pref:$str
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ }
+ set keys($count) $str
+
+ incr count
+ }
+ error_check_good cursor_close [$dbc close] 0
+ close $did
+
+ puts "\tTest0$tnum.b: Partial puts."
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_substr $dbc $db] 1
+
+ # Do a partial write to extend each datum in
+ # the regular db by the corresponding dictionary word.
+ # We have to go through each key's dup set using -set
+ # because cursors are not stable in the hash AM and we
+ # want to make sure we hit all the keys.
+ for { set i 0 } { $i < $count } { incr i } {
+ set key $keys($i)
+ for {set ret [$dbc get -set $key]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -nextdup]} {
+
+ set k [lindex [lindex $ret 0] 0]
+ set orig_d [lindex [lindex $ret 0] 1]
+ set d [string range $orig_d 2 end]
+ set doff [expr [string length $d] + 2]
+ set dlen 0
+ error_check_good data_and_key_sanity $d $k
+
+ set ret [$dbc get -current]
+ error_check_good before_sanity \
+ [lindex [lindex $ret 0] 0] \
+ [string range [lindex [lindex $ret 0] 1] 2 end]
+
+ error_check_good partial_put [eval {$dbc put -current \
+ -partial [list $doff $dlen] $d}] 0
+
+ set ret [$dbc get -current]
+ error_check_good partial_put_correct \
+ [lindex [lindex $ret 0] 1] $orig_d$d
+ }
+ }
+
+ puts "\tTest0$tnum.c: Double-checking get loop."
+ # Double-check that each datum in the regular db has
+ # been appropriately modified.
+
+ for {set ret [$dbc get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -next]} {
+
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_good modification_correct \
+ [string range $d 2 end] [repeat $k 2]
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+}
diff --git a/bdb/test/test063.tcl b/bdb/test/test063.tcl
new file mode 100644
index 00000000000..2b9c4c4c763
--- /dev/null
+++ b/bdb/test/test063.tcl
@@ -0,0 +1,141 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test063.tcl,v 11.11 2000/08/25 14:21:58 sue Exp $
+#
+# DB Test 63: Test that the DB_RDONLY flag is respected.
+# Attempt to both DB->put and DBC->c_put into a database
+# that has been opened DB_RDONLY, and check for failure.
+proc test063 { method args } {
+ global errorCode
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ set tnum 63
+
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ cleanup $testdir $env
+
+ set key "key"
+ set data "data"
+ set key2 "another_key"
+ set data2 "more_data"
+
+ set gflags ""
+
+ if { [is_record_based $method] == 1 } {
+ set key "1"
+ set key2 "2"
+ append gflags " -recno"
+ }
+
+ puts "Test0$tnum: $method ($args) DB_RDONLY test."
+
+ # Create a test database.
+ puts "\tTest0$tnum.a: Creating test database."
+ set db [eval {berkdb_open_noerr -create -truncate -mode 0644} \
+ $omethod $args $testfile]
+ error_check_good db_create [is_valid_db $db] TRUE
+
+ # Put and get an item so it's nonempty.
+ set ret [eval {$db put} $key [chop_data $method $data]]
+ error_check_good initial_put $ret 0
+
+ set dbt [eval {$db get} $gflags $key]
+ error_check_good initial_get $dbt \
+ [list [list $key [pad_data $method $data]]]
+
+ error_check_good db_close [$db close] 0
+
+ if { $eindex == -1 } {
+ # Confirm that database is writable. If we are
+ # using an env (that may be remote on a server)
+ # we cannot do this check.
+ error_check_good writable [file writable $testfile] 1
+ }
+
+ puts "\tTest0$tnum.b: Re-opening DB_RDONLY and attempting to put."
+
+ # Now open it read-only and make sure we can get but not put.
+ set db [eval {berkdb_open_noerr -rdonly} $args {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set dbt [eval {$db get} $gflags $key]
+ error_check_good db_get $dbt \
+ [list [list $key [pad_data $method $data]]]
+
+ set ret [catch {eval {$db put} $key2 [chop_data $method $data]} res]
+ error_check_good put_failed $ret 1
+ error_check_good db_put_rdonly [is_substr $errorCode "EACCES"] 1
+
+ set errorCode "NONE"
+
+ puts "\tTest0$tnum.c: Attempting cursor put."
+
+ set dbc [$db cursor]
+ error_check_good cursor_create [is_valid_cursor $dbc $db] TRUE
+
+ error_check_good cursor_set [$dbc get -first] $dbt
+ set ret [catch {eval {$dbc put} -current $data} res]
+ error_check_good c_put_failed $ret 1
+ error_check_good dbc_put_rdonly [is_substr $errorCode "EACCES"] 1
+
+ set dbt [eval {$db get} $gflags $key2]
+ error_check_good db_get_key2 $dbt ""
+
+ puts "\tTest0$tnum.d: Attempting ordinary delete."
+
+ set errorCode "NONE"
+ set ret [catch {eval {$db del} $key} 1]
+ error_check_good del_failed $ret 1
+ error_check_good db_del_rdonly [is_substr $errorCode "EACCES"] 1
+
+ set dbt [eval {$db get} $gflags $key]
+ error_check_good db_get_key $dbt \
+ [list [list $key [pad_data $method $data]]]
+
+ puts "\tTest0$tnum.e: Attempting cursor delete."
+ # Just set the cursor to the beginning; we don't care what's there...
+ # yet.
+ set dbt2 [$dbc get -first]
+ error_check_good db_get_first_key $dbt2 $dbt
+ set errorCode "NONE"
+ set ret [catch {$dbc del} res]
+ error_check_good c_del_failed $ret 1
+ error_check_good dbc_del_rdonly [is_substr $errorCode "EACCES"] 1
+
+ set dbt2 [$dbc get -current]
+ error_check_good db_get_key $dbt2 $dbt
+
+ puts "\tTest0$tnum.f: Close, reopen db; verify unchanged."
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+
+ set db [eval {berkdb_open} $omethod $args $testfile]
+ error_check_good db_reopen [is_valid_db $db] TRUE
+
+ set dbc [$db cursor]
+ error_check_good cursor_create [is_valid_cursor $dbc $db] TRUE
+
+ error_check_good first_there [$dbc get -first] \
+ [list [list $key [pad_data $method $data]]]
+ error_check_good nomore_there [$dbc get -next] ""
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+}
diff --git a/bdb/test/test064.tcl b/bdb/test/test064.tcl
new file mode 100644
index 00000000000..ad39f4b2256
--- /dev/null
+++ b/bdb/test/test064.tcl
@@ -0,0 +1,62 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test064.tcl,v 11.8 2000/08/25 14:21:58 sue Exp $
+#
+# DB Test 64: Test of DB->get_type
+# Create a database of type specified by method.
+# Make sure DB->get_type returns the right thing with both a
+# normal and DB_UNKNOWN open.
+proc test064 { method args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ set tnum 64
+
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ cleanup $testdir $env
+
+ puts "Test0$tnum: $method ($args) DB->get_type test."
+
+ # Create a test database.
+ puts "\tTest0$tnum.a: Creating test database of type $method."
+ set db [eval {berkdb_open -create -truncate -mode 0644} \
+ $omethod $args $testfile]
+ error_check_good db_create [is_valid_db $db] TRUE
+
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest0$tnum.b: get_type after method specifier."
+
+ set db [eval {berkdb_open} $omethod $args {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set type [$db get_type]
+ error_check_good get_type $type [string range $omethod 1 end]
+
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest0$tnum.c: get_type after DB_UNKNOWN."
+
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set type [$db get_type]
+ error_check_good get_type $type [string range $omethod 1 end]
+
+ error_check_good db_close [$db close] 0
+}
diff --git a/bdb/test/test065.tcl b/bdb/test/test065.tcl
new file mode 100644
index 00000000000..5f236ebbd04
--- /dev/null
+++ b/bdb/test/test065.tcl
@@ -0,0 +1,146 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test065.tcl,v 11.8 2000/08/25 14:21:58 sue Exp $
+#
+# DB Test 65: Test of DB->stat(DB_RECORDCOUNT)
+proc test065 { method args } {
+ source ./include.tcl
+ global errorCode
+ global alphabet
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ set tnum 65
+
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ cleanup $testdir $env
+
+ puts "Test0$tnum: $method ($args) DB->stat(DB_RECORDCOUNT) test."
+
+ puts "\tTest0$tnum.a: Create database and check it while empty."
+
+ set db [eval {berkdb_open_noerr -create -truncate -mode 0644} \
+ $omethod $args $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set ret [catch {eval $db stat -recordcount} res]
+
+ error_check_good db_close [$db close] 0
+
+ if { ([is_record_based $method] && ![is_queue $method]) \
+ || [is_rbtree $method] } {
+ error_check_good recordcount_ok [lindex [lindex $res 0] 1] 0
+ } else {
+ error_check_good \
+ recordcount_notok [is_substr $errorCode "EINVAL"] 1
+ puts "\tTest0$tnum: Test complete for method $method."
+ return
+ }
+
+ # If we've got this far, we're on an access method for
+ # which DB_RECORDCOUNT makes sense. Thus, we no longer
+ # catch EINVALs, and no longer care about __db_errs.
+ set db [eval {berkdb_open -create -mode 0644} $omethod $args $testfile]
+
+ puts "\tTest0$tnum.b: put 10000 keys."
+
+ if { [is_record_based $method] } {
+ set gflags " -recno "
+ set keypfx ""
+ } else {
+ set gflags ""
+ set keypfx "key"
+ }
+
+ set data [pad_data $method $alphabet]
+
+ for { set ndx 1 } { $ndx <= 10000 } { incr ndx } {
+ set ret [eval {$db put} $keypfx$ndx $data]
+ error_check_good db_put $ret 0
+ }
+
+ set ret [$db stat -recordcount]
+ error_check_good \
+ recordcount_after_puts [lindex [lindex $ret 0] 1] 10000
+
+ puts "\tTest0$tnum.c: delete 9000 keys."
+ for { set ndx 1 } { $ndx <= 9000 } { incr ndx } {
+ if { [is_rrecno $method] == 1 } {
+ # if we're renumbering, when we hit key 5001 we'll
+ # have deleted 5000 and we'll croak! So delete key
+ # 1, repeatedly.
+ set ret [eval {$db del} [concat $keypfx 1]]
+ } else {
+ set ret [eval {$db del} $keypfx$ndx]
+ }
+ error_check_good db_del $ret 0
+ }
+
+ set ret [$db stat -recordcount]
+ if { [is_rrecno $method] == 1 || [is_rbtree $method] == 1 } {
+ # We allow renumbering--thus the stat should return 1000
+ error_check_good \
+ recordcount_after_dels [lindex [lindex $ret 0] 1] 1000
+ } else {
+ # No renumbering--no change in RECORDCOUNT!
+ error_check_good \
+ recordcount_after_dels [lindex [lindex $ret 0] 1] 10000
+ }
+
+ puts "\tTest0$tnum.d: put 8000 new keys at the beginning."
+ for { set ndx 1 } { $ndx <= 8000 } {incr ndx } {
+ set ret [eval {$db put} $keypfx$ndx $data]
+ error_check_good db_put_beginning $ret 0
+ }
+
+ set ret [$db stat -recordcount]
+ if { [is_rrecno $method] == 1 } {
+ # With renumbering we're back up to 8000
+ error_check_good \
+ recordcount_after_dels [lindex [lindex $ret 0] 1] 8000
+ } elseif { [is_rbtree $method] == 1 } {
+ # Total records in a btree is now 9000
+ error_check_good \
+ recordcount_after_dels [lindex [lindex $ret 0] 1] 9000
+ } else {
+ # No renumbering--still no change in RECORDCOUNT.
+ error_check_good \
+ recordcount_after_dels [lindex [lindex $ret 0] 1] 10000
+ }
+
+ puts "\tTest0$tnum.e: put 8000 new keys off the end."
+ for { set ndx 9001 } { $ndx <= 17000 } {incr ndx } {
+ set ret [eval {$db put} $keypfx$ndx $data]
+ error_check_good db_put_end $ret 0
+ }
+
+ set ret [$db stat -recordcount]
+ if { [is_rbtree $method] != 1 } {
+ # If this is a recno database, the record count should
+ # be up to 17000, the largest number we've seen, with
+ # or without renumbering.
+ error_check_good \
+ recordcount_after_dels [lindex [lindex $ret 0] 1] 17000
+ } else {
+ # In an rbtree, 1000 of those keys were overwrites,
+ # so there are 7000 new keys + 9000 old keys == 16000
+ error_check_good \
+ recordcount_after_dels [lindex [lindex $ret 0] 1] 16000
+ }
+
+ error_check_good db_close [$db close] 0
+}
diff --git a/bdb/test/test066.tcl b/bdb/test/test066.tcl
new file mode 100644
index 00000000000..591c51a4c87
--- /dev/null
+++ b/bdb/test/test066.tcl
@@ -0,0 +1,73 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test066.tcl,v 11.7 2000/08/25 14:21:58 sue Exp $
+#
+# DB Test 66: Make sure a cursor put to DB_CURRENT acts as an overwrite in
+# a database with duplicates
+proc test066 { method args } {
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ set tnum 66
+
+ if { [is_record_based $method] || [is_rbtree $method] } {
+ puts "Test0$tnum: Skipping for method $method."
+ return
+ }
+
+ puts "Test0$tnum: Test of cursor put to DB_CURRENT with duplicates."
+
+ source ./include.tcl
+
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test066.db
+ set env NULL
+ } else {
+ set testfile test066.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ cleanup $testdir $env
+
+ set key "test"
+ set data "olddata"
+
+ set db [eval {berkdb_open -create -mode 0644 -dup} $omethod $args \
+ $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set ret [eval {$db put} $key [chop_data $method $data]]
+ error_check_good db_put $ret 0
+
+ set dbc [$db cursor]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ set ret [$dbc get -first]
+ error_check_good db_get $ret [list [list $key [pad_data $method $data]]]
+
+ set newdata "newdata"
+ set ret [$dbc put -current [chop_data $method $newdata]]
+ error_check_good dbc_put $ret 0
+
+ # There should be only one (key,data) pair in the database, and this
+ # is it.
+ set ret [$dbc get -first]
+ error_check_good db_get_first $ret \
+ [list [list $key [pad_data $method $newdata]]]
+
+ # and this one should come up empty.
+ set ret [$dbc get -next]
+ error_check_good db_get_next $ret ""
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest0$tnum: Test completed successfully."
+}
diff --git a/bdb/test/test067.tcl b/bdb/test/test067.tcl
new file mode 100644
index 00000000000..c287d7b1ec5
--- /dev/null
+++ b/bdb/test/test067.tcl
@@ -0,0 +1,114 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test067.tcl,v 11.12 2000/08/25 14:21:58 sue Exp $
+#
+# DB Test 67: Test of DB_CURRENT partial puts on almost-empty duplicate pages.
+# This test was written to address the following issue, #2 in the list of
+# issues relating to bug #0820:
+# 2. DBcursor->put, DB_CURRENT flag, off-page duplicates, hash and btree:
+# In Btree, the DB_CURRENT overwrite of off-page duplicate records
+# first deletes the record and then puts the new one -- this could
+# be a problem if the removal of the record causes a reverse split.
+# Suggested solution is to acquire a cursor to lock down the current
+# record, put a new record after that record, and then delete using
+# the held cursor.
+# It also tests the following, #5 in the same list of issues:
+# 5. DBcursor->put, DB_AFTER/DB_BEFORE/DB_CURRENT flags, DB_DBT_PARTIAL set,
+# duplicate comparison routine specified.
+# The partial change does not change how data items sort, but the
+# record to be put isn't built yet, and that record supplied is the
+# one that's checked for ordering compatibility.
+proc test067 { method {ndups 1000} {tnum 67} args } {
+ source ./include.tcl
+ global alphabet
+ global errorCode
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set eindex [lsearch -exact $args "-env"]
+
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+
+ puts "Test0$tnum:\
+ $method ($args) Partial puts on near-empty duplicate pages."
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "\tTest0$tnum: skipping for method $method."
+ return
+ }
+
+ foreach dupopt { "-dup" "-dup -dupsort" } {
+ cleanup $testdir $env
+ set db [eval {berkdb_open -create -truncate -mode 0644 \
+ $omethod} $args $dupopt {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ puts "\tTest0$tnum.a ($dupopt): Put $ndups duplicates."
+
+ set key "key_test$tnum"
+
+ for { set ndx 0 } { $ndx < $ndups } { incr ndx } {
+ set data $alphabet$ndx
+
+ # No need for pad_data since we're skipping recno.
+ set ret [eval {$db put} $key $data]
+ error_check_good put($key,$data) $ret 0
+ }
+
+ # Sync so we can inspect database if the next section bombs.
+ error_check_good db_sync [$db sync] 0
+ puts "\tTest0$tnum.b ($dupopt):\
+ Deleting dups (last first), overwriting each."
+
+ set dbc [$db cursor]
+ error_check_good cursor_create [is_valid_cursor $dbc $db] TRUE
+
+ set count 0
+ while { $count < $ndups - 1 } {
+ # set cursor to last item in db
+ set ret [$dbc get -last]
+ error_check_good \
+ verify_key [lindex [lindex $ret 0] 0] $key
+
+ # for error reporting
+ set currdatum [lindex [lindex $ret 0] 1]
+
+ # partial-overwrite it
+ # (overwrite offsets 1-4 with "bcde"--which they
+ # already are)
+
+ # Even though we expect success, we catch this
+ # since it might return EINVAL, and we want that
+ # to FAIL.
+ set errorCode NONE
+ set ret [catch {eval $dbc put -current \
+ {-partial [list 1 4]} "bcde"} \
+ res]
+ error_check_good \
+ partial_put_valid($currdatum) $errorCode NONE
+ error_check_good partial_put($currdatum) $res 0
+
+ # delete it
+ error_check_good dbc_del [$dbc del] 0
+
+ #puts $currdatum
+
+ incr count
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ }
+}
diff --git a/bdb/test/test068.tcl b/bdb/test/test068.tcl
new file mode 100644
index 00000000000..587cd207890
--- /dev/null
+++ b/bdb/test/test068.tcl
@@ -0,0 +1,181 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test068.tcl,v 11.11 2000/08/25 14:21:58 sue Exp $
+#
+# DB Test 68: Test of DB_BEFORE and DB_AFTER and partial puts.
+# Make sure DB_BEFORE and DB_AFTER work properly with partial puts,
+# and check that they return EINVAL if DB_DUPSORT is set or if DB_DUP is not.
+proc test068 { method args } {
+ source ./include.tcl
+ global alphabet
+ global errorCode
+
+ set tnum 68
+ set nkeys 1000
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+
+ puts "Test0$tnum:\
+ $method ($args) Test of DB_BEFORE/DB_AFTER and partial puts."
+ if { [is_record_based $method] == 1 } {
+ puts "\tTest0$tnum: skipping for method $method."
+ return
+ }
+
+ # Create a list of $nkeys words to insert into db.
+ puts "\tTest0$tnum.a: Initialize word list."
+ set wordlist {}
+ set count 0
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nkeys } {
+ lappend wordlist $str
+ incr count
+ }
+ close $did
+
+ # Sanity check: did we get $nkeys words?
+ error_check_good enough_keys [llength $wordlist] $nkeys
+
+ # rbtree can't handle dups, so just test the non-dup case
+ # if it's the current method.
+ if { [is_rbtree $method] == 1 } {
+ set dupoptlist { "" }
+ } else {
+ set dupoptlist { "" "-dup" "-dup -dupsort" }
+ }
+
+ foreach dupopt $dupoptlist {
+ cleanup $testdir $env
+ set db [eval {berkdb_open_noerr -create -truncate -mode 0644 \
+ $omethod} $args $dupopt {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ puts "\tTest0$tnum.b ($dupopt): DB initialization: put loop."
+ foreach word $wordlist {
+ error_check_good db_put [$db put $word $word] 0
+ }
+
+ puts "\tTest0$tnum.c ($dupopt): get loop."
+ foreach word $wordlist {
+ # Make sure that the Nth word has been correctly
+ # inserted, and also that the Nth word is the
+ # Nth one we pull out of the database using a cursor.
+
+ set dbt [$db get $word]
+ error_check_good get_key [list [list $word $word]] $dbt
+ }
+
+ set dbc [$db cursor]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+
+ puts "\tTest0$tnum.d ($dupopt): DBC->put w/ DB_AFTER."
+
+ # Set cursor to the first key; make sure it succeeds.
+ # With an unsorted wordlist, we can't be sure that the
+ # first item returned will equal the first item in the
+ # wordlist, so we just make sure it got something back.
+ set dbt [eval {$dbc get -first}]
+ error_check_good \
+ dbc_get_first [llength $dbt] 1
+
+ # If -dup is not set, or if -dupsort is set too, we
+ # need to verify that DB_BEFORE and DB_AFTER fail
+ # and then move on to the next $dupopt.
+ if { $dupopt != "-dup" } {
+ set errorCode "NONE"
+ set ret [catch {eval $dbc put -after \
+ {-partial [list 6 0]} "after"} res]
+ error_check_good dbc_put_after_fail $ret 1
+ error_check_good dbc_put_after_einval \
+ [is_substr $errorCode EINVAL] 1
+ puts "\tTest0$tnum ($dupopt): DB_AFTER returns EINVAL."
+ set errorCode "NONE"
+ set ret [catch {eval $dbc put -before \
+ {-partial [list 6 0]} "before"} res]
+ error_check_good dbc_put_before_fail $ret 1
+ error_check_good dbc_put_before_einval \
+ [is_substr $errorCode EINVAL] 1
+ puts "\tTest0$tnum ($dupopt): DB_BEFORE returns EINVAL."
+ puts "\tTest0$tnum ($dupopt): Correct error returns,\
+ skipping further test."
+ # continue with broad foreach
+ error_check_good db_close [$db close] 0
+ continue
+ }
+
+ puts "\tTest0$tnum.e ($dupopt): DBC->put(DB_AFTER) loop."
+ foreach word $wordlist {
+ # set cursor to $word
+ set dbt [$dbc get -set $word]
+ error_check_good \
+ dbc_get_set $dbt [list [list $word $word]]
+ # put after it
+ set ret [$dbc put -after -partial {4 0} after]
+ error_check_good dbc_put_after $ret 0
+ }
+
+ puts "\tTest0$tnum.f ($dupopt): DBC->put(DB_BEFORE) loop."
+ foreach word $wordlist {
+ # set cursor to $word
+ set dbt [$dbc get -set $word]
+ error_check_good \
+ dbc_get_set $dbt [list [list $word $word]]
+ # put before it
+ set ret [$dbc put -before -partial {6 0} before]
+ error_check_good dbc_put_before $ret 0
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+
+ eval $db sync
+ puts "\tTest0$tnum.g ($dupopt): Verify correctness."
+
+ set dbc [$db cursor]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ # loop through the whole db beginning to end,
+ # make sure we have, in order, {$word "\0\0\0\0\0\0before"},
+ # {$word $word}, {$word "\0\0\0\0after"} for each word.
+ set count 0
+ while { $count < $nkeys } {
+ # Get the first item of each set of three.
+ # We don't know what the word is, but set $word to
+ # the key and check that the data is
+ # "\0\0\0\0\0\0before".
+ set dbt [$dbc get -next]
+ set word [lindex [lindex $dbt 0] 0]
+
+ error_check_good dbc_get_one $dbt \
+ [list [list $word "\0\0\0\0\0\0before"]]
+
+ set dbt [$dbc get -next]
+ error_check_good \
+ dbc_get_two $dbt [list [list $word $word]]
+
+ set dbt [$dbc get -next]
+ error_check_good dbc_get_three $dbt \
+ [list [list $word "\0\0\0\0after"]]
+
+ incr count
+ }
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ }
+}
diff --git a/bdb/test/test069.tcl b/bdb/test/test069.tcl
new file mode 100644
index 00000000000..f3b839de7f9
--- /dev/null
+++ b/bdb/test/test069.tcl
@@ -0,0 +1,14 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test069.tcl,v 11.4 2000/02/14 03:00:21 bostic Exp $
+#
+# DB Test 69: Run DB Test 67 with a small number of dups,
+# to ensure that partial puts to DB_CURRENT work correctly in
+# the absence of duplicate pages.
+
+proc test069 { method {ndups 50} {tnum 69} args } {
+ eval test067 $method $ndups $tnum $args
+}
diff --git a/bdb/test/test070.tcl b/bdb/test/test070.tcl
new file mode 100644
index 00000000000..befec9ce1e9
--- /dev/null
+++ b/bdb/test/test070.tcl
@@ -0,0 +1,120 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test070.tcl,v 11.18 2000/12/18 20:04:47 sue Exp $
+#
+# DB Test 70: Test of DB_CONSUME.
+# Fork off six processes, four consumers and two producers.
+# The producers will each put 20000 records into a queue;
+# the consumers will each get 10000.
+# Then, verify that no record was lost or retrieved twice.
+proc test070 { method {nconsumers 4} {nproducers 2} \
+ {nitems 1000} {mode CONSUME } {start 0} {txn -txn} {tnum 70} args } {
+ source ./include.tcl
+ global alphabet
+
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test0$tnum skipping for env $env"
+ return
+ }
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ puts "Test0$tnum: $method ($args) Test of DB_$mode flag to DB->get."
+ puts "\tUsing $txn environment."
+
+ error_check_good enough_consumers [expr $nconsumers > 0] 1
+ error_check_good enough_producers [expr $nproducers > 0] 1
+
+ if { [is_queue $method] != 1 } {
+ puts "\tSkipping Test0$tnum for method $method."
+ return
+ }
+
+ env_cleanup $testdir
+ set testfile test0$tnum.db
+
+ # Create environment
+ set dbenv [eval {berkdb env -create $txn -home } $testdir]
+ error_check_good dbenv_create [is_valid_env $dbenv] TRUE
+
+ # Create database
+ set db [eval {berkdb_open -create -mode 0644 -queue}\
+ -env $dbenv $args $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ if { $start != 0 } {
+ error_check_good set_seed [$db put $start "consumer data"] 0
+ puts "\tStarting at $start."
+ } else {
+ incr start
+ }
+
+ set pidlist {}
+
+ # Divvy up the total number of records amongst the consumers and
+ # producers.
+ error_check_good cons_div_evenly [expr $nitems % $nconsumers] 0
+ error_check_good prod_div_evenly [expr $nitems % $nproducers] 0
+ set nperconsumer [expr $nitems / $nconsumers]
+ set nperproducer [expr $nitems / $nproducers]
+
+ set consumerlog $testdir/CONSUMERLOG.
+
+ # Fork consumer processes (we want them to be hungry)
+ for { set ndx 0 } { $ndx < $nconsumers } { incr ndx } {
+ set output $consumerlog$ndx
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ conscript.tcl $testdir/conscript.log.consumer$ndx \
+ $testdir $testfile $mode $nperconsumer $output $tnum \
+ $args &]
+ lappend pidlist $p
+ }
+ for { set ndx 0 } { $ndx < $nproducers } { incr ndx } {
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ conscript.tcl $testdir/conscript.log.producer$ndx \
+ $testdir $testfile PRODUCE $nperproducer "" $tnum \
+ $args &]
+ lappend pidlist $p
+ }
+
+ # Wait for all children.
+ watch_procs 10
+
+ # Verify: slurp all record numbers into list, sort, and make
+ # sure each appears exactly once.
+ puts "\tTest0$tnum: Verifying results."
+ set reclist {}
+ for { set ndx 0 } { $ndx < $nconsumers } { incr ndx } {
+ set input $consumerlog$ndx
+ set iid [open $input r]
+ while { [gets $iid str] != -1 } {
+ lappend reclist $str
+ }
+ close $iid
+ }
+ set sortreclist [lsort -integer $reclist]
+
+ set nitems [expr $start + $nitems]
+ for { set ndx $start } { $ndx < $nitems } { incr ndx } {
+ # Skip 0 if we are wrapping around
+ if { $ndx == 0 } {
+ incr ndx
+ incr nitems
+ }
+ # Be sure to convert ndx to a number before comparing.
+ error_check_good pop_num [lindex $sortreclist 0] [expr $ndx + 0]
+ set sortreclist [lreplace $sortreclist 0 0]
+ }
+ error_check_good list_ends_empty $sortreclist {}
+ error_check_good dbenv_close [$dbenv close] 0
+
+ puts "\tTest0$tnum completed successfully."
+}
diff --git a/bdb/test/test071.tcl b/bdb/test/test071.tcl
new file mode 100644
index 00000000000..376c902ec4d
--- /dev/null
+++ b/bdb/test/test071.tcl
@@ -0,0 +1,15 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test071.tcl,v 11.6 2000/12/01 04:28:36 ubell Exp $
+#
+# DB Test 71: Test of DB_CONSUME.
+# This is DB Test 70, with one consumer, one producers, and 10000 items.
+proc test071 { method {nconsumers 1} {nproducers 1}\
+ {nitems 10000} {mode CONSUME} {start 0 } {txn -txn} {tnum 71} args } {
+
+ eval test070 $method \
+ $nconsumers $nproducers $nitems $mode $start $txn $tnum $args
+}
diff --git a/bdb/test/test072.tcl b/bdb/test/test072.tcl
new file mode 100644
index 00000000000..3ca7415a2cb
--- /dev/null
+++ b/bdb/test/test072.tcl
@@ -0,0 +1,225 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test072.tcl,v 11.13 2000/12/11 17:24:55 sue Exp $
+#
+# DB Test 72: Test of cursor stability when duplicates are moved off-page.
+proc test072 { method {pagesize 512} {ndups 20} {tnum 72} args } {
+ source ./include.tcl
+ global alphabet
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ cleanup $testdir $env
+
+ # Keys must sort $prekey < $key < $postkey.
+ set prekey "a key"
+ set key "the key"
+ set postkey "z key"
+
+ # Make these distinguishable from each other and from the
+ # alphabets used for the $key's data.
+ set predatum "1234567890"
+ set postdatum "0987654321"
+
+ append args " -pagesize $pagesize "
+
+ puts -nonewline "Test0$tnum $omethod ($args): "
+ if { [is_record_based $method] || [is_rbtree $method] } {
+ puts "Skipping for method $method."
+ return
+ } else {
+ puts "\n Test of cursor stability when\
+ duplicates are moved off-page."
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test0$tnum: skipping for specific pagesizes"
+ return
+ }
+
+ foreach dupopt { "-dup" "-dup -dupsort" } {
+ set db [eval {berkdb_open -create -truncate -mode 0644} \
+ $omethod $args $dupopt $testfile]
+ error_check_good "db open" [is_valid_db $db] TRUE
+
+ puts \
+"\tTest0$tnum.a: ($dupopt) Set up surrounding keys and cursors."
+ error_check_good pre_put [$db put $prekey $predatum] 0
+ error_check_good post_put [$db put $postkey $postdatum] 0
+ set precursor [$db cursor]
+ error_check_good precursor [is_valid_cursor $precursor \
+ $db] TRUE
+ set postcursor [$db cursor]
+ error_check_good postcursor [is_valid_cursor $postcursor \
+ $db] TRUE
+ error_check_good preset [$precursor get -set $prekey] \
+ [list [list $prekey $predatum]]
+ error_check_good postset [$postcursor get -set $postkey] \
+ [list [list $postkey $postdatum]]
+
+ puts "\tTest0$tnum.b: Put/create cursor/verify all cursor loop."
+
+ for { set i 0 } { $i < $ndups } { incr i } {
+ set datum [format "%4d$alphabet" [expr $i + 1000]]
+ set data($i) $datum
+
+ # Uncomment these lines to see intermediate steps.
+ error_check_good db_sync($i) [$db sync] 0
+ error_check_good db_dump($i) \
+ [catch {exec $util_path/db_dump \
+ -da $testfile > TESTDIR/out.$i}] 0
+
+ error_check_good "db put ($i)" [$db put $key $datum] 0
+
+ set dbc($i) [$db cursor]
+ error_check_good "db cursor ($i)"\
+ [is_valid_cursor $dbc($i) $db] TRUE
+
+ error_check_good "dbc get -get_both ($i)"\
+ [$dbc($i) get -get_both $key $datum]\
+ [list [list $key $datum]]
+
+ for { set j 0 } { $j < $i } { incr j } {
+ set dbt [$dbc($j) get -current]
+ set k [lindex [lindex $dbt 0] 0]
+ set d [lindex [lindex $dbt 0] 1]
+
+ #puts "cursor $j after $i: $d"
+
+ eval {$db sync}
+
+ error_check_good\
+ "cursor $j key correctness after $i puts" \
+ $k $key
+ error_check_good\
+ "cursor $j data correctness after $i puts" \
+ $d $data($j)
+ }
+
+ # Check correctness of pre- and post- cursors. Do an
+ # error_check_good on the lengths first so that we don't
+ # spew garbage as the "got" field and screw up our
+ # terminal. (It's happened here.)
+ set pre_dbt [$precursor get -current]
+ set post_dbt [$postcursor get -current]
+ error_check_good \
+ "key earlier cursor correctness after $i puts" \
+ [string length [lindex [lindex $pre_dbt 0] 0]] \
+ [string length $prekey]
+ error_check_good \
+ "data earlier cursor correctness after $i puts" \
+ [string length [lindex [lindex $pre_dbt 0] 1]] \
+ [string length $predatum]
+ error_check_good \
+ "key later cursor correctness after $i puts" \
+ [string length [lindex [lindex $post_dbt 0] 0]] \
+ [string length $postkey]
+ error_check_good \
+ "data later cursor correctness after $i puts" \
+ [string length [lindex [lindex $post_dbt 0] 1]]\
+ [string length $postdatum]
+
+
+ error_check_good \
+ "earlier cursor correctness after $i puts" \
+ $pre_dbt [list [list $prekey $predatum]]
+ error_check_good \
+ "later cursor correctness after $i puts" \
+ $post_dbt [list [list $postkey $postdatum]]
+ }
+
+ puts "\tTest0$tnum.c: Reverse Put/create cursor/verify all cursor loop."
+ set end [expr $ndups * 2 - 1]
+ for { set i $end } { $i > $ndups } { set i [expr $i - 1] } {
+ set datum [format "%4d$alphabet" [expr $i + 1000]]
+ set data($i) $datum
+
+ # Uncomment these lines to see intermediate steps.
+ error_check_good db_sync($i) [$db sync] 0
+ error_check_good db_dump($i) \
+ [catch {exec $util_path/db_dump \
+ -da $testfile > TESTDIR/out.$i}] 0
+
+ error_check_good "db put ($i)" [$db put $key $datum] 0
+
+ set dbc($i) [$db cursor]
+ error_check_good "db cursor ($i)"\
+ [is_valid_cursor $dbc($i) $db] TRUE
+
+ error_check_good "dbc get -get_both ($i)"\
+ [$dbc($i) get -get_both $key $datum]\
+ [list [list $key $datum]]
+
+ for { set j $i } { $j < $end } { incr j } {
+ set dbt [$dbc($j) get -current]
+ set k [lindex [lindex $dbt 0] 0]
+ set d [lindex [lindex $dbt 0] 1]
+
+ #puts "cursor $j after $i: $d"
+
+ eval {$db sync}
+
+ error_check_good\
+ "cursor $j key correctness after $i puts" \
+ $k $key
+ error_check_good\
+ "cursor $j data correctness after $i puts" \
+ $d $data($j)
+ }
+
+ # Check correctness of pre- and post- cursors. Do an
+ # error_check_good on the lengths first so that we don't
+ # spew garbage as the "got" field and screw up our
+ # terminal. (It's happened here.)
+ set pre_dbt [$precursor get -current]
+ set post_dbt [$postcursor get -current]
+ error_check_good \
+ "key earlier cursor correctness after $i puts" \
+ [string length [lindex [lindex $pre_dbt 0] 0]] \
+ [string length $prekey]
+ error_check_good \
+ "data earlier cursor correctness after $i puts" \
+ [string length [lindex [lindex $pre_dbt 0] 1]] \
+ [string length $predatum]
+ error_check_good \
+ "key later cursor correctness after $i puts" \
+ [string length [lindex [lindex $post_dbt 0] 0]] \
+ [string length $postkey]
+ error_check_good \
+ "data later cursor correctness after $i puts" \
+ [string length [lindex [lindex $post_dbt 0] 1]]\
+ [string length $postdatum]
+
+
+ error_check_good \
+ "earlier cursor correctness after $i puts" \
+ $pre_dbt [list [list $prekey $predatum]]
+ error_check_good \
+ "later cursor correctness after $i puts" \
+ $post_dbt [list [list $postkey $postdatum]]
+ }
+
+ # Close cursors.
+ puts "\tTest0$tnum.d: Closing cursors."
+ for { set i 0 } { $i < $ndups } { incr i } {
+ error_check_good "dbc close ($i)" [$dbc($i) close] 0
+ }
+ error_check_good "db close" [$db close] 0
+ }
+}
diff --git a/bdb/test/test073.tcl b/bdb/test/test073.tcl
new file mode 100644
index 00000000000..12a48b0e412
--- /dev/null
+++ b/bdb/test/test073.tcl
@@ -0,0 +1,265 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test073.tcl,v 11.17 2000/12/11 17:24:55 sue Exp $
+#
+# DB Test 73: Test of cursor stability on duplicate pages.
+# Does the following:
+# a. Initialize things by DB->putting ndups dups and
+# setting a reference cursor to point to each.
+# b. c_put ndups dups (and correspondingly expanding
+# the set of reference cursors) after the last one, making sure
+# after each step that all the reference cursors still point to
+# the right item.
+# c. Ditto, but before the first one.
+# d. Ditto, but after each one in sequence first to last.
+# e. Ditto, but after each one in sequence from last to first.
+# occur relative to the new datum)
+# f. Ditto for the two sequence tests, only doing a
+# DBC->c_put(DB_CURRENT) of a larger datum instead of adding a
+# new one.
+proc test073 { method {pagesize 512} {ndups 50} {tnum 73} args } {
+ source ./include.tcl
+ global alphabet
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ cleanup $testdir $env
+
+ set key "the key"
+
+
+ puts -nonewline "Test0$tnum $omethod ($args): "
+ if { [is_record_based $method] || [is_rbtree $method] } {
+ puts "Skipping for method $method."
+ return
+ } else {
+ puts "cursor stability on duplicate pages."
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test073: skipping for specific pagesizes"
+ return
+ }
+
+ append args " -pagesize $pagesize -dup"
+
+ set db [eval {berkdb_open \
+ -create -truncate -mode 0644} $omethod $args $testfile]
+ error_check_good "db open" [is_valid_db $db] TRUE
+
+ # Number of outstanding keys.
+ set keys 0
+
+ puts "\tTest0$tnum.a.1: Initializing put loop; $ndups dups, short data."
+
+ for { set i 0 } { $i < $ndups } { incr i } {
+ set datum [makedatum_t73 $i 0]
+
+ error_check_good "db put ($i)" [$db put $key $datum] 0
+
+ set is_long($i) 0
+ incr keys
+ }
+
+ puts "\tTest0$tnum.a.2: Initializing cursor get loop; $keys dups."
+ for { set i 0 } { $i < $keys } { incr i } {
+ set datum [makedatum_t73 $i 0]
+
+ set dbc($i) [$db cursor]
+ error_check_good "db cursor ($i)"\
+ [is_valid_cursor $dbc($i) $db] TRUE
+ error_check_good "dbc get -get_both ($i)"\
+ [$dbc($i) get -get_both $key $datum]\
+ [list [list $key $datum]]
+ }
+
+ puts "\tTest0$tnum.b: Cursor put (DB_KEYLAST); $ndups new dups,\
+ short data."
+
+ for { set i 0 } { $i < $ndups } { incr i } {
+ # !!! keys contains the number of the next dup
+ # to be added (since they start from zero)
+
+ set datum [makedatum_t73 $keys 0]
+ set curs [$db cursor]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+ error_check_good "c_put(DB_KEYLAST, $keys)"\
+ [$curs put -keylast $key $datum] 0
+
+ set dbc($keys) $curs
+ set is_long($keys) 0
+ incr keys
+
+ verify_t73 is_long dbc $keys $key
+ }
+
+ puts "\tTest0$tnum.c: Cursor put (DB_KEYFIRST); $ndups new dups,\
+ short data."
+
+ for { set i 0 } { $i < $ndups } { incr i } {
+ # !!! keys contains the number of the next dup
+ # to be added (since they start from zero)
+
+ set datum [makedatum_t73 $keys 0]
+ set curs [$db cursor]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+ error_check_good "c_put(DB_KEYFIRST, $keys)"\
+ [$curs put -keyfirst $key $datum] 0
+
+ set dbc($keys) $curs
+ set is_long($keys) 0
+ incr keys
+
+ verify_t73 is_long dbc $keys $key
+ }
+
+ puts "\tTest0$tnum.d: Cursor put (DB_AFTER) first to last;\
+ $keys new dups, short data"
+ # We want to add a datum after each key from 0 to the current
+ # value of $keys, which we thus need to save.
+ set keysnow $keys
+ for { set i 0 } { $i < $keysnow } { incr i } {
+ set datum [makedatum_t73 $keys 0]
+ set curs [$db cursor]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+
+ # Which datum to insert this guy after.
+ set curdatum [makedatum_t73 $i 0]
+ error_check_good "c_get(DB_GET_BOTH, $i)"\
+ [$curs get -get_both $key $curdatum]\
+ [list [list $key $curdatum]]
+ error_check_good "c_put(DB_AFTER, $i)"\
+ [$curs put -after $datum] 0
+
+ set dbc($keys) $curs
+ set is_long($keys) 0
+ incr keys
+
+ verify_t73 is_long dbc $keys $key
+ }
+
+ puts "\tTest0$tnum.e: Cursor put (DB_BEFORE) last to first;\
+ $keys new dups, short data"
+
+ for { set i [expr $keys - 1] } { $i >= 0 } { incr i -1 } {
+ set datum [makedatum_t73 $keys 0]
+ set curs [$db cursor]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+
+ # Which datum to insert this guy before.
+ set curdatum [makedatum_t73 $i 0]
+ error_check_good "c_get(DB_GET_BOTH, $i)"\
+ [$curs get -get_both $key $curdatum]\
+ [list [list $key $curdatum]]
+ error_check_good "c_put(DB_BEFORE, $i)"\
+ [$curs put -before $datum] 0
+
+ set dbc($keys) $curs
+ set is_long($keys) 0
+ incr keys
+
+ if { $i % 10 == 1 } {
+ verify_t73 is_long dbc $keys $key
+ }
+ }
+ verify_t73 is_long dbc $keys $key
+
+ puts "\tTest0$tnum.f: Cursor put (DB_CURRENT), first to last,\
+ growing $keys data."
+ set keysnow $keys
+ for { set i 0 } { $i < $keysnow } { incr i } {
+ set olddatum [makedatum_t73 $i 0]
+ set newdatum [makedatum_t73 $i 1]
+ set curs [$db cursor]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+
+ error_check_good "c_get(DB_GET_BOTH, $i)"\
+ [$curs get -get_both $key $olddatum]\
+ [list [list $key $olddatum]]
+ error_check_good "c_put(DB_CURRENT, $i)"\
+ [$curs put -current $newdatum] 0
+
+ error_check_good "cursor close" [$curs close] 0
+
+ set is_long($i) 1
+
+ if { $i % 10 == 1 } {
+ verify_t73 is_long dbc $keys $key
+ }
+ }
+ verify_t73 is_long dbc $keys $key
+
+ # Close cursors.
+ puts "\tTest0$tnum.g: Closing cursors."
+ for { set i 0 } { $i < $keys } { incr i } {
+ error_check_good "dbc close ($i)" [$dbc($i) close] 0
+ }
+ error_check_good "db close" [$db close] 0
+}
+
+# !!!: This procedure is also used by test087.
+proc makedatum_t73 { num is_long } {
+ global alphabet
+ if { $is_long == 1 } {
+ set a $alphabet$alphabet$alphabet
+ } else {
+ set a abcdefghijklm
+ }
+
+ # format won't do leading zeros, alas.
+ if { $num / 1000 > 0 } {
+ set i $num
+ } elseif { $num / 100 > 0 } {
+ set i 0$num
+ } elseif { $num / 10 > 0 } {
+ set i 00$num
+ } else {
+ set i 000$num
+ }
+
+ return $i$a
+}
+
+# !!!: This procedure is also used by test087.
+proc verify_t73 { is_long_array curs_array numkeys key } {
+ upvar $is_long_array is_long
+ upvar $curs_array dbc
+ upvar db db
+
+ #useful for debugging, perhaps.
+ eval $db sync
+
+ for { set j 0 } { $j < $numkeys } { incr j } {
+ set dbt [$dbc($j) get -current]
+ set k [lindex [lindex $dbt 0] 0]
+ set d [lindex [lindex $dbt 0] 1]
+
+ error_check_good\
+ "cursor $j key correctness (with $numkeys total items)"\
+ $k $key
+ error_check_good\
+ "cursor $j data correctness (with $numkeys total items)"\
+ $d [makedatum_t73 $j $is_long($j)]
+ }
+}
diff --git a/bdb/test/test074.tcl b/bdb/test/test074.tcl
new file mode 100644
index 00000000000..ddc5f16429d
--- /dev/null
+++ b/bdb/test/test074.tcl
@@ -0,0 +1,221 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test074.tcl,v 11.10 2000/08/25 14:21:58 sue Exp $
+#
+# DB Test 74: Test of DB_NEXT_NODUP.
+proc test074 { method {dir -nextnodup} {pagesize 512} {nitems 100} {tnum 74} args } {
+ source ./include.tcl
+ global alphabet
+ global rand_init
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ berkdb srand $rand_init
+
+ # Data prefix--big enough that we get a mix of on-page, off-page,
+ # and multi-off-page dups with the default nitems
+ if { [is_fixed_length $method] == 1 } {
+ set globaldata "somedata"
+ } else {
+ set globaldata [repeat $alphabet 4]
+ }
+
+ puts "Test0$tnum $omethod ($args): Test of $dir"
+
+ # First, test non-dup (and not-very-interesting) case with
+ # all db types.
+
+ puts "\tTest0$tnum.a: No duplicates."
+
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum-nodup.db
+ set env NULL
+ } else {
+ set testfile test0$tnum-nodup.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ cleanup $testdir $env
+ set db [eval {berkdb_open -create -truncate -mode 0644} $omethod\
+ $args {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ # Insert nitems items.
+ puts "\t\tTest0$tnum.a.1: Put loop."
+ for {set i 1} {$i <= $nitems} {incr i} {
+ #
+ # If record based, set key to $i * 2 to leave
+ # holes/unused entries for further testing.
+ #
+ if {[is_record_based $method] == 1} {
+ set key [expr $i * 2]
+ } else {
+ set key "key$i"
+ }
+ set data "$globaldata$i"
+ error_check_good put($i) [$db put $key\
+ [chop_data $method $data]] 0
+ }
+
+ puts "\t\tTest0$tnum.a.2: Get($dir)"
+
+ # foundarray($i) is set when key number i is found in the database
+ set dbc [$db cursor]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ # Initialize foundarray($i) to zero for all $i
+ for {set i 1} {$i < $nitems} {incr i} {
+ set foundarray($i) 0
+ }
+
+ # Walk database using $dir and record each key gotten.
+ for {set i 1} {$i <= $nitems} {incr i} {
+ set dbt [$dbc get $dir]
+ set key [lindex [lindex $dbt 0] 0]
+ if {[is_record_based $method] == 1} {
+ set num [expr $key / 2]
+ set desired_key $key
+ error_check_good $method:num $key [expr $num * 2]
+ } else {
+ set num [string range $key 3 end]
+ set desired_key key$num
+ }
+
+ error_check_good dbt_correct($i) $dbt\
+ [list [list $desired_key\
+ [pad_data $method $globaldata$num]]]
+
+ set foundarray($num) 1
+ }
+
+ puts "\t\tTest0$tnum.a.3: Final key."
+ error_check_good last_db_get [$dbc get $dir] [list]
+
+ puts "\t\tTest0$tnum.a.4: Verify loop."
+ for { set i 1 } { $i <= $nitems } { incr i } {
+ error_check_good found_key($i) $foundarray($i) 1
+ }
+
+ error_check_good dbc_close(nodup) [$dbc close] 0
+
+ # If we are a method that doesn't allow dups, verify that
+ # we get an empty list if we try to use DB_NEXT_DUP
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "\t\tTest0$tnum.a.5: Check DB_NEXT_DUP for $method."
+ set dbc [$db cursor]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ set dbt [$dbc get $dir]
+ error_check_good $method:nextdup [$dbc get -nextdup] [list]
+ error_check_good dbc_close(nextdup) [$dbc close] 0
+ }
+ error_check_good db_close(nodup) [$db close] 0
+
+ # Quit here if we're a method that won't allow dups.
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "\tTest0$tnum: Skipping remainder for method $method."
+ return
+ }
+
+ foreach opt { "-dup" "-dupsort" } {
+
+ #
+ # If we are using an env, then testfile should just be the
+ # db name. Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum$opt.db
+ } else {
+ set testfile test0$tnum$opt.db
+ }
+
+ if { [string compare $opt "-dupsort"] == 0 } {
+ set opt "-dup -dupsort"
+ }
+
+ puts "\tTest0$tnum.b: Duplicates ($opt)."
+
+ puts "\t\tTest0$tnum.b.1 ($opt): Put loop."
+ set db [eval {berkdb_open -create -truncate -mode 0644}\
+ $opt $omethod $args {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ # Insert nitems different keys such that key i has i dups.
+ for {set i 1} {$i <= $nitems} {incr i} {
+ set key key$i
+
+ for {set j 1} {$j <= $i} {incr j} {
+ if { $j < 10 } {
+ set data "${globaldata}00$j"
+ } elseif { $j < 100 } {
+ set data "${globaldata}0$j"
+ } else {
+ set data "$globaldata$j"
+ }
+
+ error_check_good put($i,$j) \
+ [$db put $key $data] 0
+ }
+ }
+
+ # Initialize foundarray($i) to 0 for all i.
+ unset foundarray
+ for { set i 1 } { $i <= $nitems } { incr i } {
+ set foundarray($i) 0
+ }
+
+ # Get loop--after each get, move forward a random increment
+ # within the duplicate set.
+ puts "\t\tTest0$tnum.b.2 ($opt): Get loop."
+ set one "001"
+ set dbc [$db cursor]
+ error_check_good dbc($opt) [is_valid_cursor $dbc $db] TRUE
+ for { set i 1 } { $i <= $nitems } { incr i } {
+ set dbt [$dbc get $dir]
+ set key [lindex [lindex $dbt 0] 0]
+ set num [string range $key 3 end]
+
+ set desired_key key$num
+ if { [string compare $dir "-prevnodup"] == 0 } {
+ if { $num < 10 } {
+ set one "00$num"
+ } elseif { $num < 100 } {
+ set one "0$num"
+ } else {
+ set one $num
+ }
+ }
+
+ error_check_good dbt_correct($i) $dbt\
+ [list [list $desired_key\
+ "$globaldata$one"]]
+
+ set foundarray($num) 1
+
+ # Go forward by some number w/i dup set.
+ set inc [berkdb random_int 0 [expr $num - 1]]
+ for { set j 0 } { $j < $inc } { incr j } {
+ eval {$dbc get -nextdup}
+ }
+ }
+
+ puts "\t\tTest0$tnum.b.3 ($opt): Final key."
+ error_check_good last_db_get($opt) [$dbc get $dir] [list]
+
+ # Verify
+ puts "\t\tTest0$tnum.b.4 ($opt): Verify loop."
+ for { set i 1 } { $i <= $nitems } { incr i } {
+ error_check_good found_key($i) $foundarray($i) 1
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ }
+}
diff --git a/bdb/test/test075.tcl b/bdb/test/test075.tcl
new file mode 100644
index 00000000000..2aa0e1e2501
--- /dev/null
+++ b/bdb/test/test075.tcl
@@ -0,0 +1,195 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test075.tcl,v 11.9 2000/08/25 14:21:58 sue Exp $
+#
+# DB Test 75 (replacement)
+# Test the DB->rename method.
+proc test075 { method { tnum 75 } args } {
+ global errorCode
+ source ./include.tcl
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ puts "Test0$tnum: $method ($args): Test of DB->rename()"
+
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex == -1 } {
+ set oldfile $testdir/test0$tnum-old.db
+ set newfile $testdir/test0$tnum.db
+ set env NULL
+ set renargs ""
+ } else {
+ set oldfile test0$tnum-old.db
+ set newfile test0$tnum.db
+ # File existence checks won't work in an env, since $oldfile
+ # and $newfile won't be in the current working directory.
+ # We use this to skip them, and turn our secondary check
+ # (opening the dbs and seeing that all is well) into the main
+ # one.
+ incr eindex
+ set env [lindex $args $eindex]
+ set renargs " -env $env"
+ }
+
+ # Make sure we're starting from a clean slate.
+ cleanup $testdir $env
+ if { $env == "NULL" } {
+ error_check_bad "$oldfile exists" [file exists $oldfile] 1
+ error_check_bad "$newfile exists" [file exists $newfile] 1
+ }
+
+ puts "\tTest0$tnum.a: Create/rename file"
+ puts "\t\tTest0$tnum.a.1: create"
+ set db [eval {berkdb_open -create -mode 0644} $omethod $args $oldfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ if { $env == "NULL" } {
+ error_check_bad "$oldfile exists" [file exists $oldfile] 0
+ error_check_bad "$newfile exists" [file exists $newfile] 1
+ }
+
+ # The nature of the key and data are unimportant; use numeric key
+ # so record-based methods don't need special treatment.
+ set key 1
+ set data [pad_data $method data]
+
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\t\tTest0$tnum.a.2: rename"
+ if { $env == "NULL" } {
+ error_check_bad "$oldfile exists" [file exists $oldfile] 0
+ error_check_bad "$newfile exists" [file exists $newfile] 1
+ }
+ error_check_good rename_file [eval {berkdb dbrename}\
+ $renargs $oldfile $newfile] 0
+ if { $env == "NULL" } {
+ error_check_bad "$oldfile exists" [file exists $oldfile] 1
+ error_check_bad "$newfile exists" [file exists $newfile] 0
+ }
+
+ puts "\t\tTest0$tnum.a.3: check"
+ # Open again with create to make sure we're not caching or anything
+ # silly. In the normal case (no env), we already know the file doesn't
+ # exist.
+ set odb [eval {berkdb_open -create -mode 0644} $omethod $args $oldfile]
+ set ndb [eval {berkdb_open -create -mode 0644} $omethod $args $newfile]
+ error_check_good odb_open [is_valid_db $odb] TRUE
+ error_check_good ndb_open [is_valid_db $ndb] TRUE
+
+ set odbt [$odb get $key]
+ set ndbt [$ndb get $key]
+
+ # The DBT from the "old" database should be empty, not the "new" one.
+ error_check_good odbt_empty [llength $odbt] 0
+ error_check_bad ndbt_empty [llength $ndbt] 0
+
+ error_check_good ndbt [lindex [lindex $ndbt 0] 1] $data
+
+ error_check_good odb_close [$odb close] 0
+ error_check_good ndb_close [$ndb close] 0
+
+ if { $env != "NULL" } {
+ puts "\tTest0$tnum: External environment present; \
+ skipping remainder"
+ return
+ }
+
+ # Now there's both an old and a new. Rename the "new" to the "old"
+ # and make sure that fails.
+ #
+ # XXX Ideally we'd do this test even when there's an external
+ # environment, but that env has errpfx/errfile set now. :-(
+ puts "\tTest0$tnum.b: Make sure rename fails instead of overwriting"
+ set ret [catch {eval {berkdb dbrename} $renargs $newfile $oldfile} res]
+ error_check_bad rename_overwrite $ret 0
+ error_check_good rename_overwrite_ret [is_substr $errorCode EEXIST] 1
+
+ # Verify and then start over from a clean slate.
+ verify_dir $testdir "\tTest0$tnum.c: "
+ cleanup $testdir $env
+ error_check_bad "$oldfile exists" [file exists $oldfile] 1
+ error_check_bad "$newfile exists" [file exists $newfile] 1
+
+ set oldfile test0$tnum-old.db
+ set newfile test0$tnum.db
+
+ puts "\tTest0$tnum.d: Create/rename file in environment"
+
+ set env [berkdb env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+ error_check_bad "$oldfile exists" [file exists $oldfile] 1
+ error_check_bad "$newfile exists" [file exists $newfile] 1
+
+ puts "\t\tTest0$tnum.d.1: create"
+ set db [eval {berkdb_open -create -mode 0644} -env $env\
+ $omethod $args $oldfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # We need to make sure that it didn't create/rename into the
+ # current directory.
+ error_check_bad "$oldfile exists" [file exists $oldfile] 1
+ error_check_bad "$newfile exists" [file exists $newfile] 1
+ error_check_bad "$testdir/$oldfile exists"\
+ [file exists $testdir/$oldfile] 0
+ error_check_bad "$testdir/$newfile exists"\
+ [file exists $testdir/$newfile] 1
+
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\t\tTest0$tnum.d.2: rename"
+
+ error_check_good rename_file [berkdb dbrename -env $env\
+ $oldfile $newfile] 0
+ error_check_bad "$oldfile exists" [file exists $oldfile] 1
+ error_check_bad "$newfile exists" [file exists $newfile] 1
+ error_check_bad "$testdir/$oldfile exists"\
+ [file exists $testdir/$oldfile] 1
+ error_check_bad "$testdir/$newfile exists"\
+ [file exists $testdir/$newfile] 0
+
+ puts "\t\tTest0$tnum.d.3: check"
+ # Open again with create to make sure we're not caching or anything
+ # silly.
+ set odb [eval {berkdb_open -create -mode 0644} -env $env\
+ $omethod $args $oldfile]
+ set ndb [eval {berkdb_open -create -mode 0644} -env $env\
+ $omethod $args $newfile]
+ error_check_good odb_open [is_valid_db $odb] TRUE
+ error_check_good ndb_open [is_valid_db $ndb] TRUE
+
+ set odbt [$odb get $key]
+ set ndbt [$ndb get $key]
+
+ # The DBT from the "old" database should be empty, not the "new" one.
+ error_check_good odbt_empty [llength $odbt] 0
+ error_check_bad ndbt_empty [llength $ndbt] 0
+
+ error_check_good ndbt [lindex [lindex $ndbt 0] 1] $data
+
+ error_check_good odb_close [$odb close] 0
+ error_check_good ndb_close [$ndb close] 0
+
+ # XXX
+ # We need to close and reopen the env since berkdb_open has
+ # set its errfile/errpfx, and we can't unset that.
+ error_check_good env_close [$env close] 0
+ set env [berkdb env -home $testdir]
+ error_check_good env_open2 [is_valid_env $env] TRUE
+
+ puts "\tTest0$tnum.e:\
+ Make sure rename fails instead of overwriting in env"
+ set ret [catch {eval {berkdb dbrename} -env $env $newfile $oldfile} res]
+ error_check_bad rename_overwrite $ret 0
+ error_check_good rename_overwrite_ret [is_substr $errorCode EEXIST] 1
+
+ error_check_good env_close [$env close] 0
+
+ puts "\tTest0$tnum succeeded."
+}
diff --git a/bdb/test/test076.tcl b/bdb/test/test076.tcl
new file mode 100644
index 00000000000..13a919011e4
--- /dev/null
+++ b/bdb/test/test076.tcl
@@ -0,0 +1,59 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test076.tcl,v 1.7 2000/08/25 14:21:58 sue Exp $
+#
+# DB Test 76: Test creation of many small databases in an env
+proc test076 { method { ndbs 1000 } { tnum 76 } args } {
+ source ./include.tcl
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+
+ if { [is_record_based $method] == 1 } {
+ set key ""
+ } else {
+ set key "key"
+ }
+ set data "datamoredatamoredata"
+
+ puts -nonewline "Test0$tnum $method ($args): "
+ puts -nonewline "Create $ndbs"
+ puts " small databases in one env."
+
+ # Create an env if we weren't passed one.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex == -1 } {
+ set deleteenv 1
+ set env [eval {berkdb env -create -home} $testdir \
+ {-cachesize {0 102400 1}}]
+ error_check_good env [is_valid_env $env] TRUE
+ set args "$args -env $env"
+ } else {
+ set deleteenv 0
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ cleanup $testdir $env
+
+ for { set i 1 } { $i <= $ndbs } { incr i } {
+ set testfile test0$tnum.$i.db
+
+ set db [eval {berkdb_open -create -truncate -mode 0644}\
+ $args $omethod $testfile]
+ error_check_good db_open($i) [is_valid_db $db] TRUE
+
+ error_check_good db_put($i) [$db put $key$i \
+ [chop_data $method $data$i]] 0
+ error_check_good db_close($i) [$db close] 0
+ }
+
+ if { $deleteenv == 1 } {
+ error_check_good env_close [$env close] 0
+ }
+
+ puts "\tTest0$tnum passed."
+}
diff --git a/bdb/test/test077.tcl b/bdb/test/test077.tcl
new file mode 100644
index 00000000000..47248a309b8
--- /dev/null
+++ b/bdb/test/test077.tcl
@@ -0,0 +1,68 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test077.tcl,v 1.4 2000/08/25 14:21:58 sue Exp $
+#
+# DB Test 77: Test of DB_GET_RECNO [#1206].
+proc test077 { method { nkeys 1000 } { pagesize 512 } { tnum 77 } args } {
+ source ./include.tcl
+ global alphabet
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ puts "Test0$tnum: Test of DB_GET_RECNO."
+
+ if { [is_rbtree $method] != 1 } {
+ puts "\tTest0$tnum: Skipping for method $method."
+ return
+ }
+
+ set data $alphabet
+
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open -create -truncate -mode 0644\
+ -pagesize $pagesize} $omethod $args {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ puts "\tTest0$tnum.a: Populating database."
+
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ set key [format %5d $i]
+ error_check_good db_put($key) [$db put $key $data] 0
+ }
+
+ puts "\tTest0$tnum.b: Verifying record numbers."
+
+ set dbc [$db cursor]
+ error_check_good dbc_open [is_valid_cursor $dbc $db] TRUE
+
+ set i 1
+ for { set dbt [$dbc get -first] } \
+ { [string length $dbt] != 0 } \
+ { set dbt [$dbc get -next] } {
+ set recno [$dbc get -get_recno]
+ set keynum [expr [lindex [lindex $dbt 0] 0]]
+
+ # Verify that i, the number that is the key, and recno
+ # are all equal.
+ error_check_good key($i) $keynum $i
+ error_check_good recno($i) $recno $i
+ incr i
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+}
diff --git a/bdb/test/test078.tcl b/bdb/test/test078.tcl
new file mode 100644
index 00000000000..9642096faf9
--- /dev/null
+++ b/bdb/test/test078.tcl
@@ -0,0 +1,90 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test078.tcl,v 1.9 2000/12/11 17:24:55 sue Exp $
+#
+# DB Test 78: Test of DBC->c_count(). [#303]
+proc test078 { method { nkeys 100 } { pagesize 512 } { tnum 78 } args } {
+ source ./include.tcl
+ global alphabet rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test0$tnum: Test of key counts."
+
+ berkdb srand $rand_init
+
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ cleanup $testdir $env
+
+ puts "\tTest0$tnum.a: No duplicates, trivial answer."
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test078: skipping for specific pagesizes"
+ return
+ }
+
+ set db [eval {berkdb_open -create -truncate -mode 0644\
+ -pagesize $pagesize} $omethod $args {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ error_check_good put.a($i) [$db put $i\
+ [pad_data $method $alphabet$i]] 0
+ error_check_good count.a [$db count $i] 1
+ }
+ error_check_good db_close.a [$db close] 0
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts \
+ "\tTest0$tnum.b: Duplicates not supported in $method, skipping."
+ return
+ }
+
+ foreach tuple {{b sorted "-dup -dupsort"} {c unsorted "-dup"}} {
+ set letter [lindex $tuple 0]
+ set dupopt [lindex $tuple 2]
+
+ puts "\tTest0$tnum.$letter: Duplicates ([lindex $tuple 1])."
+
+ puts "\t\tTest0$tnum.$letter.1: Populating database."
+
+ set db [eval {berkdb_open -create -truncate -mode 0644\
+ -pagesize $pagesize} $dupopt $omethod $args {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ for { set j 0 } { $j < $i } { incr j } {
+ error_check_good put.$letter,$i [$db put $i\
+ [pad_data $method $j$alphabet]] 0
+ }
+ }
+
+ puts -nonewline "\t\tTest0$tnum.$letter.2: "
+ puts "Verifying dup counts on first dup."
+ for { set i 1 } { $i < $nkeys } { incr i } {
+ error_check_good count.$letter,$i \
+ [$db count $i] $i
+ }
+
+ puts -nonewline "\t\tTest0$tnum.$letter.3: "
+ puts "Verifying dup counts on random dup."
+ for { set i 1 } { $i < $nkeys } { incr i } {
+ set key [berkdb random_int 1 $nkeys]
+ error_check_good count.$letter,$i \
+ [$db count $i] $i
+ }
+ error_check_good db_close.$letter [$db close] 0
+ }
+}
diff --git a/bdb/test/test079.tcl b/bdb/test/test079.tcl
new file mode 100644
index 00000000000..fe7b978a3dd
--- /dev/null
+++ b/bdb/test/test079.tcl
@@ -0,0 +1,18 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test079.tcl,v 11.5 2000/11/16 23:56:18 ubell Exp $
+#
+# DB Test 79 {access method}
+# Check that delete operations work in large btrees. 10000 entries and
+# a pagesize of 512 push this out to a four-level btree, with a small fraction
+# of the entries going on overflow pages.
+proc test079 { method {nentries 10000} {pagesize 512} {tnum 79} args} {
+ if { [ is_queueext $method ] == 1 } {
+ set method "queue";
+ lappend args "-extent" "20"
+ }
+ eval {test006 $method $nentries 1 $tnum -pagesize $pagesize} $args
+}
diff --git a/bdb/test/test080.tcl b/bdb/test/test080.tcl
new file mode 100644
index 00000000000..02a6a7242cd
--- /dev/null
+++ b/bdb/test/test080.tcl
@@ -0,0 +1,41 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test080.tcl,v 11.7 2000/10/19 23:15:22 ubell Exp $
+#
+# DB Test 80 {access method}
+# Test of dbremove
+proc test080 { method {tnum 80} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test0$tnum: Test of DB->remove()"
+
+
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ puts "\tTest0$tnum: Skipping in the presence of an environment"
+ return
+ }
+ cleanup $testdir NULL
+
+ set testfile $testdir/test0$tnum.db
+ set db [eval {berkdb_open -create -truncate -mode 0644} $omethod \
+ $args {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+ for {set i 1} { $i < 1000 } {incr i} {
+ $db put $i $i
+ }
+ error_check_good db_close [$db close] 0
+
+ error_check_good file_exists_before [file exists $testfile] 1
+
+ error_check_good db_remove [berkdb dbremove $testfile] 0
+ error_check_good file_exists_after [file exists $testfile] 0
+
+ puts "\tTest0$tnum succeeded."
+}
diff --git a/bdb/test/test081.tcl b/bdb/test/test081.tcl
new file mode 100644
index 00000000000..44e708c5d49
--- /dev/null
+++ b/bdb/test/test081.tcl
@@ -0,0 +1,16 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test081.tcl,v 11.3 2000/03/01 15:13:59 krinsky Exp $
+#
+# Test 81.
+# Test off-page duplicates and overflow pages together with
+# very large keys (key/data as file contents).
+#
+proc test081 { method {ndups 13} {tnum 81} args} {
+ source ./include.tcl
+
+ eval {test017 $method 1 $ndups $tnum} $args
+}
diff --git a/bdb/test/test082.tcl b/bdb/test/test082.tcl
new file mode 100644
index 00000000000..e8bd4f975dd
--- /dev/null
+++ b/bdb/test/test082.tcl
@@ -0,0 +1,15 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test082.tcl,v 11.1 2000/04/30 05:05:26 krinsky Exp $
+#
+# Test 82.
+# Test of DB_PREV_NODUP
+proc test082 { method {dir -prevnodup} {pagesize 512} {nitems 100}\
+ {tnum 82} args} {
+ source ./include.tcl
+
+ eval {test074 $method $dir $pagesize $nitems $tnum} $args
+}
diff --git a/bdb/test/test083.tcl b/bdb/test/test083.tcl
new file mode 100644
index 00000000000..7565a5a74f5
--- /dev/null
+++ b/bdb/test/test083.tcl
@@ -0,0 +1,136 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test083.tcl,v 11.6 2000/12/11 17:24:55 sue Exp $
+#
+# Test 83.
+# Test of DB->key_range
+proc test083 { method {pgsz 512} {maxitems 5000} {step 2} args} {
+ source ./include.tcl
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ puts "Test083 $method ($args): Test of DB->key_range"
+ if { [is_btree $method] != 1 } {
+ puts "\tTest083: Skipping for method $method."
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test083: skipping for specific pagesizes"
+ return
+ }
+
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex == -1 } {
+ set testfile $testdir/test083.db
+ set env NULL
+ } else {
+ set testfile test083.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+
+ # We assume that numbers will be at most six digits wide
+ error_check_bad maxitems_range [expr $maxitems > 999999] 1
+
+ # We want to test key_range on a variety of sizes of btree.
+ # Start at ten keys and work up to $maxitems keys, at each step
+ # multiplying the number of keys by $step.
+ for { set nitems 10 } { $nitems <= $maxitems }\
+ { set nitems [expr $nitems * $step] } {
+
+ puts "\tTest083.a: Opening new database"
+ cleanup $testdir $env
+ set db [eval {berkdb_open -create -truncate -mode 0644} \
+ -pagesize $pgsz $omethod $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ t83_build $db $nitems
+ t83_test $db $nitems
+
+ error_check_good db_close [$db close] 0
+ }
+}
+
+proc t83_build { db nitems } {
+ source ./include.tcl
+
+ puts "\tTest083.b: Populating database with $nitems keys"
+
+ set keylist {}
+ puts "\t\tTest083.b.1: Generating key list"
+ for { set i 0 } { $i < $nitems } { incr i } {
+ lappend keylist $i
+ }
+
+ # With randomly ordered insertions, the range of errors we
+ # get from key_range can be unpredictably high [#2134]. For now,
+ # just skip the randomization step.
+ #puts "\t\tTest083.b.2: Randomizing key list"
+ #set keylist [randomize_list $keylist]
+
+ #puts "\t\tTest083.b.3: Populating database with randomized keys"
+
+ puts "\t\tTest083.b.2: Populating database"
+ set data [repeat . 50]
+
+ foreach keynum $keylist {
+ error_check_good db_put [$db put key[format %6d $keynum] \
+ $data] 0
+ }
+}
+
+proc t83_test { db nitems } {
+ # Look at the first key, then at keys about 1/4, 1/2, 3/4, and
+ # all the way through the database. Make sure the key_ranges
+ # aren't off by more than 10%.
+
+ set dbc [$db cursor]
+ error_check_good dbc [is_valid_cursor $dbc $db] TRUE
+
+ puts "\tTest083.c: Verifying ranges..."
+
+ for { set i 0 } { $i < $nitems } \
+ { incr i [expr $nitems / [berkdb random_int 3 16]] } {
+ puts "\t\t...key $i"
+ error_check_bad key0 [llength [set dbt [$dbc get -first]]] 0
+
+ for { set j 0 } { $j < $i } { incr j } {
+ error_check_bad key$j \
+ [llength [set dbt [$dbc get -next]]] 0
+ }
+
+ set ranges [$db keyrange [lindex [lindex $dbt 0] 0]]
+
+ #puts $ranges
+ error_check_good howmanyranges [llength $ranges] 3
+
+ set lessthan [lindex $ranges 0]
+ set morethan [lindex $ranges 2]
+
+ set rangesum [expr $lessthan + [lindex $ranges 1] + $morethan]
+
+ roughly_equal $rangesum 1 0.05
+
+ # Wild guess.
+ if { $nitems < 500 } {
+ set tol 0.3
+ } elseif { $nitems > 500 } {
+ set tol 0.15
+ }
+
+ roughly_equal $lessthan [expr $i * 1.0 / $nitems] $tol
+
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+}
+
+proc roughly_equal { a b tol } {
+ error_check_good "$a =~ $b" [expr $a - $b < $tol] 1
+}
diff --git a/bdb/test/test084.tcl b/bdb/test/test084.tcl
new file mode 100644
index 00000000000..0efd0d17c00
--- /dev/null
+++ b/bdb/test/test084.tcl
@@ -0,0 +1,48 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test084.tcl,v 11.6 2000/12/11 17:24:55 sue Exp $
+#
+# Test 84.
+# Basic sanity test (test001) with large (64K) pages.
+#
+proc test084 { method {nentries 10000} {tnum 84} {pagesize 65536} args} {
+ source ./include.tcl
+
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum-empty.db
+ set env NULL
+ } else {
+ set testfile test0$tnum-empty.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test084: skipping for specific pagesizes"
+ return
+ }
+
+ cleanup $testdir $env
+
+ set args "-pagesize $pagesize $args"
+
+ eval {test001 $method $nentries 0 $tnum} $args
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ # For good measure, create a second database that's empty
+ # with the large page size. (There was a verifier bug that
+ # choked on empty 64K pages. [#2408])
+ set db [eval {berkdb_open -create -mode 0644} $args $omethod $testfile]
+ error_check_good empty_db [is_valid_db $db] TRUE
+ error_check_good empty_db_close [$db close] 0
+}
diff --git a/bdb/test/test085.tcl b/bdb/test/test085.tcl
new file mode 100644
index 00000000000..09134a00f65
--- /dev/null
+++ b/bdb/test/test085.tcl
@@ -0,0 +1,274 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test085.tcl,v 1.4 2000/12/11 17:24:55 sue Exp $
+#
+# DB Test 85: Test of cursor behavior when a cursor is pointing to a deleted
+# btree key which then has duplicates added.
+proc test085 { method {pagesize 512} {onp 3} {offp 10} {tnum 85} args } {
+ source ./include.tcl
+ global alphabet
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test085: skipping for specific pagesizes"
+ return
+ }
+ cleanup $testdir $env
+
+ # Keys must sort $prekey < $key < $postkey.
+ set prekey "AA"
+ set key "BBB"
+ set postkey "CCCC"
+
+ # Make these distinguishable from each other and from the
+ # alphabets used for the $key's data.
+ set predatum "1234567890"
+ set datum $alphabet
+ set postdatum "0987654321"
+
+ append args " -pagesize $pagesize -dup"
+
+ puts -nonewline "Test0$tnum $omethod ($args): "
+
+ # Skip for all non-btrees. (Rbtrees don't count as btrees, for
+ # now, since they don't support dups.)
+ if { [is_btree $method] != 1 } {
+ puts "Skipping for method $method."
+ return
+ } else {
+ puts "Duplicates w/ deleted item cursor."
+ }
+
+ # Repeat the test with both on-page and off-page numbers of dups.
+ foreach ndups "$onp $offp" {
+ # Put operations we want to test on a cursor set to the
+ # deleted item, the key to use with them, and what should
+ # come before and after them given a placement of
+ # the deleted item at the beginning or end of the dupset.
+ set final [expr $ndups - 1]
+ set putops {
+ {{-before} "" $predatum {[test085_ddatum 0]} beginning}
+ {{-before} "" {[test085_ddatum $final]} $postdatum end}
+ {{-current} "" $predatum {[test085_ddatum 0]} beginning}
+ {{-current} "" {[test085_ddatum $final]} $postdatum end}
+ {{-keyfirst} $key $predatum {[test085_ddatum 0]} beginning}
+ {{-keyfirst} $key $predatum {[test085_ddatum 0]} end}
+ {{-keylast} $key {[test085_ddatum $final]} $postdatum beginning}
+ {{-keylast} $key {[test085_ddatum $final]} $postdatum end}
+ {{-after} "" $predatum {[test085_ddatum 0]} beginning}
+ {{-after} "" {[test085_ddatum $final]} $postdatum end}
+ }
+
+ # Get operations we want to test on a cursor set to the
+ # deleted item, any args to get, and the expected key/data pair.
+ set getops {
+ {{-current} "" "" "" beginning}
+ {{-current} "" "" "" end}
+ {{-next} "" $key {[test085_ddatum 0]} beginning}
+ {{-next} "" $postkey $postdatum end}
+ {{-prev} "" $prekey $predatum beginning}
+ {{-prev} "" $key {[test085_ddatum $final]} end}
+ {{-first} "" $prekey $predatum beginning}
+ {{-first} "" $prekey $predatum end}
+ {{-last} "" $postkey $postdatum beginning}
+ {{-last} "" $postkey $postdatum end}
+ {{-nextdup} "" $key {[test085_ddatum 0]} beginning}
+ {{-nextdup} "" EMPTYLIST "" end}
+ {{-nextnodup} "" $postkey $postdatum beginning}
+ {{-nextnodup} "" $postkey $postdatum end}
+ {{-prevnodup} "" $prekey $predatum beginning}
+ {{-prevnodup} "" $prekey $predatum end}
+ }
+
+ foreach pair $getops {
+ set op [lindex $pair 0]
+ puts "\tTest0$tnum: Get ($op) with $ndups duplicates,\
+ cursor at the [lindex $pair 4]."
+ set db [eval {berkdb_open -create \
+ -truncate -mode 0644} $omethod $args $testfile]
+ error_check_good "db open" [is_valid_db $db] TRUE
+
+ set dbc [test085_setup $db]
+
+ set beginning [expr [string compare \
+ [lindex $pair 4] "beginning"] == 0]
+
+ for { set i 0 } { $i < $ndups } { incr i } {
+ if { $beginning } {
+ error_check_good db_put($i) \
+ [$db put $key [test085_ddatum $i]] 0
+ } else {
+ set c [$db cursor]
+ set j [expr $ndups - $i - 1]
+ error_check_good db_cursor($j) \
+ [is_valid_cursor $c $db] TRUE
+ set d [test085_ddatum $j]
+ error_check_good dbc_put($j) \
+ [$c put -keyfirst $key $d] 0
+ error_check_good c_close [$c close] 0
+ }
+ }
+
+ set gargs [lindex $pair 1]
+ set ekey ""
+ set edata ""
+ eval set ekey [lindex $pair 2]
+ eval set edata [lindex $pair 3]
+
+ set dbt [eval $dbc get $op $gargs]
+ if { [string compare $ekey EMPTYLIST] == 0 } {
+ error_check_good dbt($op,$ndups) \
+ [llength $dbt] 0
+ } else {
+ error_check_good dbt($op,$ndups) $dbt \
+ [list [list $ekey $edata]]
+ }
+ error_check_good "dbc close" [$dbc close] 0
+ error_check_good "db close" [$db close] 0
+ verify_dir $testdir "\t\t"
+ }
+
+ foreach pair $putops {
+ # Open and set up database.
+ set op [lindex $pair 0]
+ puts "\tTest0$tnum: Put ($op) with $ndups duplicates,\
+ cursor at the [lindex $pair 4]."
+ set db [eval {berkdb_open -create \
+ -truncate -mode 0644} $omethod $args $testfile]
+ error_check_good "db open" [is_valid_db $db] TRUE
+
+ set beginning [expr [string compare \
+ [lindex $pair 4] "beginning"] == 0]
+
+ set dbc [test085_setup $db]
+
+ # Put duplicates.
+ for { set i 0 } { $i < $ndups } { incr i } {
+ if { $beginning } {
+ error_check_good db_put($i) \
+ [$db put $key [test085_ddatum $i]] 0
+ } else {
+ set c [$db cursor]
+ set j [expr $ndups - $i - 1]
+ error_check_good db_cursor($j) \
+ [is_valid_cursor $c $db] TRUE
+ set d [test085_ddatum $j]
+ error_check_good dbc_put($j) \
+ [$c put -keyfirst $key $d] 0
+ error_check_good c_close [$c close] 0
+ }
+ }
+
+ # Set up cursors for stability test.
+ set pre_dbc [$db cursor]
+ error_check_good pre_set [$pre_dbc get -set $prekey] \
+ [list [list $prekey $predatum]]
+ set post_dbc [$db cursor]
+ error_check_good post_set [$post_dbc get -set $postkey]\
+ [list [list $postkey $postdatum]]
+ set first_dbc [$db cursor]
+ error_check_good first_set \
+ [$first_dbc get -get_both $key [test085_ddatum 0]] \
+ [list [list $key [test085_ddatum 0]]]
+ set last_dbc [$db cursor]
+ error_check_good last_set \
+ [$last_dbc get -get_both $key [test085_ddatum \
+ [expr $ndups - 1]]] \
+ [list [list $key [test085_ddatum [expr $ndups -1]]]]
+
+ set k [lindex $pair 1]
+ set d_before ""
+ set d_after ""
+ eval set d_before [lindex $pair 2]
+ eval set d_after [lindex $pair 3]
+ set newdatum "NewDatum"
+ error_check_good dbc_put($op,$ndups) \
+ [eval $dbc put $op $k $newdatum] 0
+ error_check_good dbc_prev($op,$ndups) \
+ [lindex [lindex [$dbc get -prev] 0] 1] \
+ $d_before
+ error_check_good dbc_current($op,$ndups) \
+ [lindex [lindex [$dbc get -next] 0] 1] \
+ $newdatum
+
+ error_check_good dbc_next($op,$ndups) \
+ [lindex [lindex [$dbc get -next] 0] 1] \
+ $d_after
+
+ # Verify stability of pre- and post- cursors.
+ error_check_good pre_stable [$pre_dbc get -current] \
+ [list [list $prekey $predatum]]
+ error_check_good post_stable [$post_dbc get -current] \
+ [list [list $postkey $postdatum]]
+ error_check_good first_stable \
+ [$first_dbc get -current] \
+ [list [list $key [test085_ddatum 0]]]
+ error_check_good last_stable \
+ [$last_dbc get -current] \
+ [list [list $key [test085_ddatum [expr $ndups -1]]]]
+
+
+ foreach c "$pre_dbc $post_dbc $first_dbc $last_dbc" {
+ error_check_good ${c}_close [$c close] 0
+ }
+
+ error_check_good "dbc close" [$dbc close] 0
+ error_check_good "db close" [$db close] 0
+ verify_dir $testdir "\t\t"
+ }
+ }
+}
+
+
+# Set up the test database; put $prekey, $key, and $postkey with their
+# respective data, and then delete $key with a new cursor. Return that
+# cursor, still pointing to the deleted item.
+proc test085_setup { db } {
+ upvar key key
+ upvar prekey prekey
+ upvar postkey postkey
+ upvar predatum predatum
+ upvar postdatum postdatum
+
+ # no one else should ever see this one!
+ set datum "bbbbbbbb"
+
+ error_check_good pre_put [$db put $prekey $predatum] 0
+ error_check_good main_put [$db put $key $datum] 0
+ error_check_good post_put [$db put $postkey $postdatum] 0
+
+ set dbc [$db cursor]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ error_check_good dbc_getset [$dbc get -get_both $key $datum] \
+ [list [list $key $datum]]
+
+ error_check_good dbc_del [$dbc del] 0
+
+ return $dbc
+}
+
+proc test085_ddatum { a } {
+ global alphabet
+ return $a$alphabet
+}
diff --git a/bdb/test/test086.tcl b/bdb/test/test086.tcl
new file mode 100644
index 00000000000..dc30de8ec37
--- /dev/null
+++ b/bdb/test/test086.tcl
@@ -0,0 +1,162 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test086.tcl,v 11.2 2000/08/25 14:21:58 sue Exp $
+
+# Test086: Cursor stability across btree splits w/ subtransaction abort [#2373].
+proc test086 { method args } {
+ global errorCode
+ source ./include.tcl
+
+ set tstn 086
+
+ if { [is_btree $method] != 1 } {
+ puts "Test$tstn skipping for method $method."
+ return
+ }
+
+ set method "-btree"
+
+ puts "\tTest$tstn: Test of cursor stability across aborted\
+ btree splits."
+
+ set key "key"
+ set data "data"
+ set txn ""
+ set flags ""
+
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then this test won't work.
+ if { $eindex == -1 } {
+ # But we will be using our own env...
+ set testfile test0$tstn.db
+ } else {
+ puts "\tTest$tstn: Environment provided; skipping test."
+ return
+ }
+ set t1 $testdir/t1
+ env_cleanup $testdir
+
+ set env [berkdb env -create -home $testdir -txn]
+ error_check_good berkdb_env [is_valid_env $env] TRUE
+
+ puts "\tTest$tstn.a: Create $method database."
+ set oflags "-create -env $env -mode 0644 $args $method"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set nkeys 5
+ # Fill page w/ small key/data pairs, keep at leaf
+ #
+ puts "\tTest$tstn.b: Fill page with $nkeys small key/data pairs."
+ set txn [$env txn]
+ error_check_good txn [is_valid_txn $txn $env] TRUE
+ for { set i 0 } { $i < $nkeys } { incr i } {
+ set ret [$db put -txn $txn key000$i $data$i]
+ error_check_good dbput $ret 0
+ }
+ error_check_good commit [$txn commit] 0
+
+ # get db ordering, set cursors
+ puts "\tTest$tstn.c: Set cursors on each of $nkeys pairs."
+ set txn [$env txn]
+ error_check_good txn [is_valid_txn $txn $env] TRUE
+ for {set i 0; set ret [$db get -txn $txn key000$i]} {\
+ $i < $nkeys && [llength $ret] != 0} {\
+ incr i; set ret [$db get -txn $txn key000$i]} {
+ set key_set($i) [lindex [lindex $ret 0] 0]
+ set data_set($i) [lindex [lindex $ret 0] 1]
+ set dbc [$db cursor -txn $txn]
+ set dbc_set($i) $dbc
+ error_check_good db_cursor:$i [is_substr $dbc_set($i) $db] 1
+ set ret [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc_set($i)_get:set [llength $ret] 0
+ }
+
+ # Create child txn.
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn [is_valid_txn $txn $env] TRUE
+
+ # if mkeys is above 1000, need to adjust below for lexical order
+ set mkeys 1000
+ puts "\tTest$tstn.d: Add $mkeys pairs to force split."
+ for {set i $nkeys} { $i < $mkeys } { incr i } {
+ if { $i >= 100 } {
+ set ret [$db put -txn $ctxn key0$i $data$i]
+ } elseif { $i >= 10 } {
+ set ret [$db put -txn $ctxn key00$i $data$i]
+ } else {
+ set ret [$db put -txn $ctxn key000$i $data$i]
+ }
+ error_check_good dbput:more $ret 0
+ }
+
+ puts "\tTest$tstn.e: Abort."
+ error_check_good ctxn_abort [$ctxn abort] 0
+
+
+ puts "\tTest$tstn.f: Check and see that cursors maintained reference."
+ for {set i 0} { $i < $nkeys } {incr i} {
+ set ret [$dbc_set($i) get -current]
+ error_check_bad dbc$i:get:current [llength $ret] 0
+ set ret2 [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc$i:get:set [llength $ret2] 0
+ error_check_good dbc$i:get(match) $ret $ret2
+ }
+
+ # Put (and this time keep) the keys that caused the split.
+ # We'll delete them to test reverse splits.
+ puts "\tTest$tstn.g: Put back added keys."
+ for {set i $nkeys} { $i < $mkeys } { incr i } {
+ if { $i >= 100 } {
+ set ret [$db put -txn $txn key0$i $data$i]
+ } elseif { $i >= 10 } {
+ set ret [$db put -txn $txn key00$i $data$i]
+ } else {
+ set ret [$db put -txn $txn key000$i $data$i]
+ }
+ error_check_good dbput:more $ret 0
+ }
+
+ puts "\tTest$tstn.h: Delete added keys to force reverse split."
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn [is_valid_txn $txn $env] TRUE
+ for {set i $nkeys} { $i < $mkeys } { incr i } {
+ if { $i >= 100 } {
+ error_check_good db_del:$i [$db del -txn $ctxn key0$i] 0
+ } elseif { $i >= 10 } {
+ error_check_good db_del:$i \
+ [$db del -txn $ctxn key00$i] 0
+ } else {
+ error_check_good db_del:$i \
+ [$db del -txn $ctxn key000$i] 0
+ }
+ }
+
+ puts "\tTest$tstn.i: Abort."
+ error_check_good ctxn_abort [$ctxn abort] 0
+
+ puts "\tTest$tstn.j: Verify cursor reference."
+ for {set i 0} { $i < $nkeys } {incr i} {
+ set ret [$dbc_set($i) get -current]
+ error_check_bad dbc$i:get:current [llength $ret] 0
+ set ret2 [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc$i:get:set [llength $ret2] 0
+ error_check_good dbc$i:get(match) $ret $ret2
+ }
+
+ puts "\tTest$tstn.j: Cleanup."
+ # close cursors
+ for {set i 0} { $i < $nkeys } {incr i} {
+ error_check_good dbc_close:$i [$dbc_set($i) close] 0
+ }
+
+ error_check_good commit [$txn commit] 0
+ error_check_good dbclose [$db close] 0
+ error_check_good envclose [$env close] 0
+
+ puts "\tTest$tstn complete."
+}
diff --git a/bdb/test/test087.tcl b/bdb/test/test087.tcl
new file mode 100644
index 00000000000..7096e6c1cb9
--- /dev/null
+++ b/bdb/test/test087.tcl
@@ -0,0 +1,278 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test087.tcl,v 11.6 2000/12/11 17:24:55 sue Exp $
+#
+# DB Test 87: Test of cursor stability on duplicate pages w/aborts.
+# Does the following:
+# a. Initialize things by DB->putting ndups dups and
+# setting a reference cursor to point to each.
+# b. c_put ndups dups (and correspondingly expanding
+# the set of reference cursors) after the last one, making sure
+# after each step that all the reference cursors still point to
+# the right item.
+# c. Ditto, but before the first one.
+# d. Ditto, but after each one in sequence first to last.
+# e. Ditto, but after each one in sequence from last to first.
+# occur relative to the new datum)
+# f. Ditto for the two sequence tests, only doing a
+# DBC->c_put(DB_CURRENT) of a larger datum instead of adding a
+# new one.
+proc test087 { method {pagesize 512} {ndups 50} {tnum 87} args } {
+ source ./include.tcl
+ global alphabet
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ puts "Test0$tnum $omethod ($args): "
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then return
+ if { $eindex != -1 } {
+ puts "Environment specified; skipping."
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test087: skipping for specific pagesizes"
+ return
+ }
+ env_cleanup $testdir
+ set testfile test0$tnum.db
+ set key "the key"
+ append args " -pagesize $pagesize -dup"
+
+ if { [is_record_based $method] || [is_rbtree $method] } {
+ puts "Skipping for method $method."
+ return
+ } else {
+ puts "Cursor stability on dup. pages w/ aborts."
+ }
+
+ set env [berkdb env -create -home $testdir -txn]
+ error_check_good env_create [is_valid_env $env] TRUE
+
+ set db [eval {berkdb_open -env $env \
+ -create -mode 0644} $omethod $args $testfile]
+ error_check_good "db open" [is_valid_db $db] TRUE
+
+ # Number of outstanding keys.
+ set keys 0
+
+ puts "\tTest0$tnum.a.1: Initializing put loop; $ndups dups, short data."
+ set txn [$env txn]
+ error_check_good txn [is_valid_txn $txn $env] TRUE
+ for { set i 0 } { $i < $ndups } { incr i } {
+ set datum [makedatum_t73 $i 0]
+
+ error_check_good "db put ($i)" [$db put -txn $txn $key $datum] 0
+
+ set is_long($i) 0
+ incr keys
+ }
+ error_check_good txn_commit [$txn commit] 0
+
+ puts "\tTest0$tnum.a.2: Initializing cursor get loop; $keys dups."
+ set txn [$env txn]
+ error_check_good txn [is_valid_txn $txn $env] TRUE
+ for { set i 0 } { $i < $keys } { incr i } {
+ set datum [makedatum_t73 $i 0]
+
+ set dbc($i) [$db cursor -txn $txn]
+ error_check_good "db cursor ($i)"\
+ [is_valid_cursor $dbc($i) $db] TRUE
+ error_check_good "dbc get -get_both ($i)"\
+ [$dbc($i) get -get_both $key $datum]\
+ [list [list $key $datum]]
+ }
+
+ puts "\tTest0$tnum.b: Cursor put (DB_KEYLAST); $ndups new dups,\
+ short data."
+
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ for { set i 0 } { $i < $ndups } { incr i } {
+ # !!! keys contains the number of the next dup
+ # to be added (since they start from zero)
+
+ set datum [makedatum_t73 $keys 0]
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+ error_check_good "c_put(DB_KEYLAST, $keys)"\
+ [$curs put -keylast $key $datum] 0
+
+ # We can't do a verification while a child txn is active,
+ # or we'll run into trouble when DEBUG_ROP is enabled.
+ # If this test has trouble, though, uncommenting this
+ # might be illuminating--it makes things a bit more rigorous
+ # and works fine when DEBUG_ROP is not enabled.
+ # verify_t73 is_long dbc $keys $key
+ error_check_good curs_close [$curs close] 0
+ }
+ error_check_good ctxn_abort [$ctxn abort] 0
+ verify_t73 is_long dbc $keys $key
+
+ puts "\tTest0$tnum.c: Cursor put (DB_KEYFIRST); $ndups new dups,\
+ short data."
+
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ for { set i 0 } { $i < $ndups } { incr i } {
+ # !!! keys contains the number of the next dup
+ # to be added (since they start from zero)
+
+ set datum [makedatum_t73 $keys 0]
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+ error_check_good "c_put(DB_KEYFIRST, $keys)"\
+ [$curs put -keyfirst $key $datum] 0
+
+ # verify_t73 is_long dbc $keys $key
+ error_check_good curs_close [$curs close] 0
+ }
+ # verify_t73 is_long dbc $keys $key
+ # verify_t73 is_long dbc $keys $key
+ error_check_good ctxn_abort [$ctxn abort] 0
+ verify_t73 is_long dbc $keys $key
+
+ puts "\tTest0$tnum.d: Cursor put (DB_AFTER) first to last;\
+ $keys new dups, short data"
+ # We want to add a datum after each key from 0 to the current
+ # value of $keys, which we thus need to save.
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ set keysnow $keys
+ for { set i 0 } { $i < $keysnow } { incr i } {
+ set datum [makedatum_t73 $keys 0]
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+
+ # Which datum to insert this guy after.
+ set curdatum [makedatum_t73 $i 0]
+ error_check_good "c_get(DB_GET_BOTH, $i)"\
+ [$curs get -get_both $key $curdatum]\
+ [list [list $key $curdatum]]
+ error_check_good "c_put(DB_AFTER, $i)"\
+ [$curs put -after $datum] 0
+
+ # verify_t73 is_long dbc $keys $key
+ error_check_good curs_close [$curs close] 0
+ }
+ error_check_good ctxn_abort [$ctxn abort] 0
+ verify_t73 is_long dbc $keys $key
+
+ puts "\tTest0$tnum.e: Cursor put (DB_BEFORE) last to first;\
+ $keys new dups, short data"
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ for { set i [expr $keys - 1] } { $i >= 0 } { incr i -1 } {
+ set datum [makedatum_t73 $keys 0]
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+
+ # Which datum to insert this guy before.
+ set curdatum [makedatum_t73 $i 0]
+ error_check_good "c_get(DB_GET_BOTH, $i)"\
+ [$curs get -get_both $key $curdatum]\
+ [list [list $key $curdatum]]
+ error_check_good "c_put(DB_BEFORE, $i)"\
+ [$curs put -before $datum] 0
+
+ # verify_t73 is_long dbc $keys $key
+ error_check_good curs_close [$curs close] 0
+ }
+ error_check_good ctxn_abort [$ctxn abort] 0
+ verify_t73 is_long dbc $keys $key
+
+ puts "\tTest0$tnum.f: Cursor put (DB_CURRENT), first to last,\
+ growing $keys data."
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ for { set i 0 } { $i < $keysnow } { incr i } {
+ set olddatum [makedatum_t73 $i 0]
+ set newdatum [makedatum_t73 $i 1]
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+
+ error_check_good "c_get(DB_GET_BOTH, $i)"\
+ [$curs get -get_both $key $olddatum]\
+ [list [list $key $olddatum]]
+ error_check_good "c_put(DB_CURRENT, $i)"\
+ [$curs put -current $newdatum] 0
+
+ set is_long($i) 1
+
+ # verify_t73 is_long dbc $keys $key
+ error_check_good curs_close [$curs close] 0
+ }
+ error_check_good ctxn_abort [$ctxn abort] 0
+ for { set i 0 } { $i < $keysnow } { incr i } {
+ set is_long($i) 0
+ }
+ verify_t73 is_long dbc $keys $key
+
+ # Now delete the first item, abort the deletion, and make sure
+ # we're still sane.
+ puts "\tTest0$tnum.g: Cursor delete first item, then abort delete."
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db] TRUE
+ set datum [makedatum_t73 0 0]
+ error_check_good "c_get(DB_GET_BOTH, 0)"\
+ [$curs get -get_both $key $datum] [list [list $key $datum]]
+ error_check_good "c_del(0)" [$curs del] 0
+ error_check_good curs_close [$curs close] 0
+ error_check_good ctxn_abort [$ctxn abort] 0
+ verify_t73 is_long dbc $keys $key
+
+ # Ditto, for the last item.
+ puts "\tTest0$tnum.h: Cursor delete last item, then abort delete."
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db] TRUE
+ set datum [makedatum_t73 [expr $keys - 1] 0]
+ error_check_good "c_get(DB_GET_BOTH, [expr $keys - 1])"\
+ [$curs get -get_both $key $datum] [list [list $key $datum]]
+ error_check_good "c_del(0)" [$curs del] 0
+ error_check_good curs_close [$curs close] 0
+ error_check_good ctxn_abort [$ctxn abort] 0
+ verify_t73 is_long dbc $keys $key
+
+ # Ditto, for all the items.
+ puts "\tTest0$tnum.i: Cursor delete all items, then abort delete."
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db] TRUE
+ set datum [makedatum_t73 0 0]
+ error_check_good "c_get(DB_GET_BOTH, 0)"\
+ [$curs get -get_both $key $datum] [list [list $key $datum]]
+ error_check_good "c_del(0)" [$curs del] 0
+ for { set i 1 } { $i < $keys } { incr i } {
+ error_check_good "c_get(DB_NEXT, $i)"\
+ [$curs get -next] [list [list $key [makedatum_t73 $i 0]]]
+ error_check_good "c_del($i)" [$curs del] 0
+ }
+ error_check_good curs_close [$curs close] 0
+ error_check_good ctxn_abort [$ctxn abort] 0
+ verify_t73 is_long dbc $keys $key
+
+ # Close cursors.
+ puts "\tTest0$tnum.j: Closing cursors."
+ for { set i 0 } { $i < $keys } { incr i } {
+ error_check_good "dbc close ($i)" [$dbc($i) close] 0
+ }
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good "db close" [$db close] 0
+ error_check_good "env close" [$env close] 0
+}
diff --git a/bdb/test/test088.tcl b/bdb/test/test088.tcl
new file mode 100644
index 00000000000..d7b0f815a00
--- /dev/null
+++ b/bdb/test/test088.tcl
@@ -0,0 +1,142 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test088.tcl,v 11.4 2000/12/11 17:24:55 sue Exp $
+#
+# Test088: Cursor stability across btree splits with very deep trees.
+# (Variant of test048, SR #2514.)
+proc test088 { method args } {
+ global errorCode alphabet
+ source ./include.tcl
+
+ set tstn 088
+
+ if { [is_btree $method] != 1 } {
+ puts "Test$tstn skipping for method $method."
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test088: skipping for specific pagesizes"
+ return
+ }
+
+ set method "-btree"
+
+ puts "\tTest$tstn: Test of cursor stability across btree splits."
+
+ set key "key$alphabet$alphabet$alphabet"
+ set data "data$alphabet$alphabet$alphabet"
+ set txn ""
+ set flags ""
+
+ puts "\tTest$tstn.a: Create $method database."
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tstn.db
+ set env NULL
+ } else {
+ set testfile test$tstn.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set ps 512
+ set oflags "-create -pagesize $ps -truncate -mode 0644 $args $method"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set nkeys 5
+ # Fill page w/ key/data pairs.
+ #
+ puts "\tTest$tstn.b: Fill page with $nkeys small key/data pairs."
+ for { set i 0 } { $i < $nkeys } { incr i } {
+ set ret [$db put ${key}00000$i $data$i]
+ error_check_good dbput $ret 0
+ }
+
+ # get db ordering, set cursors
+ puts "\tTest$tstn.c: Set cursors on each of $nkeys pairs."
+ for {set i 0; set ret [$db get ${key}00000$i]} {\
+ $i < $nkeys && [llength $ret] != 0} {\
+ incr i; set ret [$db get ${key}00000$i]} {
+ set key_set($i) [lindex [lindex $ret 0] 0]
+ set data_set($i) [lindex [lindex $ret 0] 1]
+ set dbc [$db cursor]
+ set dbc_set($i) $dbc
+ error_check_good db_cursor:$i [is_substr $dbc_set($i) $db] 1
+ set ret [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc_set($i)_get:set [llength $ret] 0
+ }
+
+ # if mkeys is above 1000, need to adjust below for lexical order
+ set mkeys 30000
+ puts "\tTest$tstn.d: Add $mkeys pairs to force splits."
+ for {set i $nkeys} { $i < $mkeys } { incr i } {
+ if { $i >= 10000 } {
+ set ret [$db put ${key}0$i $data$i]
+ } elseif { $i >= 1000 } {
+ set ret [$db put ${key}00$i $data$i]
+ } elseif { $i >= 100 } {
+ set ret [$db put ${key}000$i $data$i]
+ } elseif { $i >= 10 } {
+ set ret [$db put ${key}0000$i $data$i]
+ } else {
+ set ret [$db put ${key}00000$i $data$i]
+ }
+ error_check_good dbput:more $ret 0
+ }
+
+ puts "\tTest$tstn.e: Make sure splits happened."
+ error_check_bad stat:check-split [is_substr [$db stat] \
+ "{{Internal pages} 0}"] 1
+
+ puts "\tTest$tstn.f: Check to see that cursors maintained reference."
+ for {set i 0} { $i < $nkeys } {incr i} {
+ set ret [$dbc_set($i) get -current]
+ error_check_bad dbc$i:get:current [llength $ret] 0
+ set ret2 [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc$i:get:set [llength $ret2] 0
+ error_check_good dbc$i:get(match) $ret $ret2
+ }
+
+ puts "\tTest$tstn.g: Delete added keys to force reverse splits."
+ for {set i $nkeys} { $i < $mkeys } { incr i } {
+ if { $i >= 10000 } {
+ error_check_good db_del:$i [$db del ${key}0$i] 0
+ } elseif { $i >= 1000 } {
+ error_check_good db_del:$i [$db del ${key}00$i] 0
+ } elseif { $i >= 100 } {
+ error_check_good db_del:$i [$db del ${key}000$i] 0
+ } elseif { $i >= 10 } {
+ error_check_good db_del:$i [$db del ${key}0000$i] 0
+ } else {
+ error_check_good db_del:$i [$db del ${key}00000$i] 0
+ }
+ }
+
+ puts "\tTest$tstn.h: Verify cursor reference."
+ for {set i 0} { $i < $nkeys } {incr i} {
+ set ret [$dbc_set($i) get -current]
+ error_check_bad dbc$i:get:current [llength $ret] 0
+ set ret2 [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc$i:get:set [llength $ret2] 0
+ error_check_good dbc$i:get(match) $ret $ret2
+ }
+
+ puts "\tTest$tstn.i: Cleanup."
+ # close cursors
+ for {set i 0} { $i < $nkeys } {incr i} {
+ error_check_good dbc_close:$i [$dbc_set($i) close] 0
+ }
+ error_check_good dbclose [$db close] 0
+
+ puts "\tTest$tstn complete."
+}
diff --git a/bdb/test/test090.tcl b/bdb/test/test090.tcl
new file mode 100644
index 00000000000..ed6ec9632f5
--- /dev/null
+++ b/bdb/test/test090.tcl
@@ -0,0 +1,20 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test090.tcl,v 11.4 2000/12/11 17:24:56 sue Exp $
+#
+# DB Test 90 {access method}
+# Check for functionality near the end of the queue.
+#
+#
+proc test090 { method {nentries 1000} {txn -txn} {tnum "90"} args} {
+ if { [is_queueext $method ] == 0 } {
+ puts "Skipping test0$tnum for $method."
+ return;
+ }
+ eval {test001 $method $nentries 4294967000 $tnum} $args
+ eval {test025 $method $nentries 4294967000 $tnum} $args
+ eval {test070 $method 4 2 $nentries WAIT 4294967000 $txn $tnum} $args
+}
diff --git a/bdb/test/test091.tcl b/bdb/test/test091.tcl
new file mode 100644
index 00000000000..9420b571ce3
--- /dev/null
+++ b/bdb/test/test091.tcl
@@ -0,0 +1,21 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test091.tcl,v 11.4 2000/12/01 04:28:36 ubell Exp $
+#
+# DB Test 91 {access method}
+# Check for CONSUME_WAIT functionality
+#
+proc test091 { method {nconsumers 4} \
+ {nproducers 2} {nitems 1000} {start 0 } {tnum "91"} args} {
+ if { [is_queue $method ] == 0 } {
+ puts "Skipping test0$tnum for $method."
+ return;
+ }
+ eval {test070 $method \
+ $nconsumers $nproducers $nitems WAIT $start -txn $tnum } $args
+ eval {test070 $method \
+ $nconsumers $nproducers $nitems WAIT $start -cdb $tnum } $args
+}
diff --git a/bdb/test/testparams.tcl b/bdb/test/testparams.tcl
new file mode 100644
index 00000000000..2def6a9d0d8
--- /dev/null
+++ b/bdb/test/testparams.tcl
@@ -0,0 +1,115 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: testparams.tcl,v 11.39 2001/01/11 17:29:42 sue Exp $
+
+set deadtests 3
+set envtests 8
+set recdtests 13
+set rsrctests 3
+set runtests 93
+set subdbtests 10
+set rpctests 2
+
+set parms(subdb001) ""
+set parms(subdb002) 10000
+set parms(subdb003) 1000
+set parms(subdb004) ""
+set parms(subdb005) 100
+set parms(subdb006) 100
+set parms(subdb007) 10000
+set parms(subdb008) 10000
+set parms(subdb009) ""
+set parms(subdb010) ""
+set parms(test001) {10000 0 "01"}
+set parms(test002) 10000
+set parms(test003) ""
+set parms(test004) {10000 4 0}
+set parms(test005) 10000
+set parms(test006) {10000 0 6}
+set parms(test007) {10000 7}
+set parms(test008) {10000 8 0}
+set parms(test009) 10000
+set parms(test010) {10000 5 10}
+set parms(test011) {10000 5 11}
+set parms(test012) ""
+set parms(test013) 10000
+set parms(test014) 10000
+set parms(test015) {7500 0}
+set parms(test016) 10000
+set parms(test017) {0 19 17}
+set parms(test018) 10000
+set parms(test019) 10000
+set parms(test020) 10000
+set parms(test021) 10000
+set parms(test022) ""
+set parms(test023) ""
+set parms(test024) 10000
+set parms(test025) {10000 0 25}
+set parms(test026) {2000 5 26}
+set parms(test027) {100}
+set parms(test028) ""
+set parms(test029) 10000
+set parms(test030) 10000
+set parms(test031) {10000 5 31}
+set parms(test032) {10000 5 32}
+set parms(test033) {10000 5 33}
+set parms(test034) 10000
+set parms(test035) 10000
+set parms(test036) 10000
+set parms(test037) 100
+set parms(test038) {10000 5 38}
+set parms(test039) {10000 5 39}
+set parms(test040) 10000
+set parms(test041) 10000
+set parms(test042) 1000
+set parms(test043) 10000
+set parms(test044) {5 10 0}
+set parms(test045) 1000
+set parms(test046) ""
+set parms(test047) ""
+set parms(test048) ""
+set parms(test049) ""
+set parms(test050) ""
+set parms(test051) ""
+set parms(test052) ""
+set parms(test053) ""
+set parms(test054) ""
+set parms(test055) ""
+set parms(test056) ""
+set parms(test057) ""
+set parms(test058) ""
+set parms(test059) ""
+set parms(test060) ""
+set parms(test061) ""
+set parms(test062) {200 200 62}
+set parms(test063) ""
+set parms(test064) ""
+set parms(test065) ""
+set parms(test066) ""
+set parms(test067) {1000 67}
+set parms(test068) ""
+set parms(test069) {50 69}
+set parms(test070) {4 2 1000 CONSUME 0 -txn 70}
+set parms(test071) {1 1 10000 CONSUME 0 -txn 71}
+set parms(test072) {512 20 72}
+set parms(test073) {512 50 73}
+set parms(test074) {-nextnodup 512 100 74}
+set parms(test075) {75}
+set parms(test076) {1000 76}
+set parms(test077) {1000 512 77}
+set parms(test078) {100 512 78}
+set parms(test079) {10000 512 79}
+set parms(test080) {80}
+set parms(test081) {13 81}
+set parms(test082) {-prevnodup 512 100 82}
+set parms(test083) {512 5000 2}
+set parms(test084) {10000 84 65536}
+set parms(test085) {512 3 10 85}
+set parms(test086) ""
+set parms(test087) {512 50 87}
+set parms(test088) ""
+set parms(test090) {1000 -txn 90}
+set parms(test091) {4 2 1000 0 91}
diff --git a/bdb/test/testutils.tcl b/bdb/test/testutils.tcl
new file mode 100644
index 00000000000..c5edaef7f6a
--- /dev/null
+++ b/bdb/test/testutils.tcl
@@ -0,0 +1,2380 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: testutils.tcl,v 11.86 2001/01/18 23:21:14 krinsky Exp $
+#
+# Test system utilities
+#
+# Timestamp -- print time along with elapsed time since last invocation
+# of timestamp.
+proc timestamp {{opt ""}} {
+ global __timestamp_start
+
+ if {[string compare $opt "-r"] == 0} {
+ clock seconds
+ } elseif {[string compare $opt "-t"] == 0} {
+ # -t gives us the current time in the format expected by
+ # db_recover -t.
+ return [clock format [clock seconds] -format "%y%m%d%H%M.%S"]
+ } else {
+ set now [clock seconds]
+
+ if {[catch {set start $__timestamp_start}] != 0} {
+ set __timestamp_start $now
+ }
+ set start $__timestamp_start
+
+ set elapsed [expr $now - $start]
+ set the_time [clock format $now -format ""]
+ set __timestamp_start $now
+
+ format "%02d:%02d:%02d (%02d:%02d:%02d)" \
+ [__fix_num [clock format $now -format "%H"]] \
+ [__fix_num [clock format $now -format "%M"]] \
+ [__fix_num [clock format $now -format "%S"]] \
+ [expr $elapsed / 3600] \
+ [expr ($elapsed % 3600) / 60] \
+ [expr ($elapsed % 3600) % 60]
+ }
+}
+
+proc __fix_num { num } {
+ set num [string trimleft $num "0"]
+ if {[string length $num] == 0} {
+ set num "0"
+ }
+ return $num
+}
+
+# Add a {key,data} pair to the specified database where
+# key=filename and data=file contents.
+proc put_file { db txn flags file } {
+ source ./include.tcl
+
+ set fid [open $file r]
+ fconfigure $fid -translation binary
+ set data [read $fid]
+ close $fid
+
+ set ret [eval {$db put} $txn $flags {$file $data}]
+ error_check_good put_file $ret 0
+}
+
+# Get a {key,data} pair from the specified database where
+# key=filename and data=file contents and then write the
+# data to the specified file.
+proc get_file { db txn flags file outfile } {
+ source ./include.tcl
+
+ set fid [open $outfile w]
+ fconfigure $fid -translation binary
+ if [catch {eval {$db get} $txn $flags {$file}} data] {
+ puts -nonewline $fid $data
+ } else {
+ # Data looks like {{key data}}
+ set data [lindex [lindex $data 0] 1]
+ puts -nonewline $fid $data
+ }
+ close $fid
+}
+
+# Add a {key,data} pair to the specified database where
+# key=file contents and data=file name.
+proc put_file_as_key { db txn flags file } {
+ source ./include.tcl
+
+ set fid [open $file r]
+ fconfigure $fid -translation binary
+ set filecont [read $fid]
+ close $fid
+
+ # Use not the file contents, but the file name concatenated
+ # before the file contents, as a key, to ensure uniqueness.
+ set data $file$filecont
+
+ set ret [eval {$db put} $txn $flags {$data $file}]
+ error_check_good put_file $ret 0
+}
+
+# Get a {key,data} pair from the specified database where
+# key=file contents and data=file name
+proc get_file_as_key { db txn flags file} {
+ source ./include.tcl
+
+ set fid [open $file r]
+ fconfigure $fid -translation binary
+ set filecont [read $fid]
+ close $fid
+
+ set data $file$filecont
+
+ return [eval {$db get} $txn $flags {$data}]
+}
+
+# open file and call dump_file to dumpkeys to tempfile
+proc open_and_dump_file {
+ dbname dbenv txn outfile checkfunc dump_func beg cont} {
+ source ./include.tcl
+ if { $dbenv == "NULL" } {
+ set db [berkdb open -rdonly -unknown $dbname]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ } else {
+ set db [berkdb open -env $dbenv -rdonly -unknown $dbname]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ }
+ $dump_func $db $txn $outfile $checkfunc $beg $cont
+ error_check_good db_close [$db close] 0
+}
+
+# open file and call dump_file to dumpkeys to tempfile
+proc open_and_dump_subfile {
+ dbname dbenv txn outfile checkfunc dump_func beg cont subdb} {
+ source ./include.tcl
+
+ if { $dbenv == "NULL" } {
+ set db [berkdb open -rdonly -unknown $dbname $subdb]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ } else {
+ set db [berkdb open -env $dbenv -rdonly -unknown $dbname $subdb]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ }
+ $dump_func $db $txn $outfile $checkfunc $beg $cont
+ error_check_good db_close [$db close] 0
+}
+
+# Sequentially read a file and call checkfunc on each key/data pair.
+# Dump the keys out to the file specified by outfile.
+proc dump_file { db txn outfile checkfunc } {
+ source ./include.tcl
+
+ dump_file_direction $db $txn $outfile $checkfunc "-first" "-next"
+}
+
+proc dump_file_direction { db txn outfile checkfunc start continue } {
+ source ./include.tcl
+
+ set outf [open $outfile w]
+ # Now we will get each key from the DB and dump to outfile
+ set c [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $c $db] TRUE
+ for {set d [$c get $start] } { [llength $d] != 0 } {
+ set d [$c get $continue] } {
+ set kd [lindex $d 0]
+ set k [lindex $kd 0]
+ set d2 [lindex $kd 1]
+ $checkfunc $k $d2
+ puts $outf $k
+ # XXX: Geoff Mainland
+ # puts $outf "$k $d2"
+ }
+ close $outf
+ error_check_good curs_close [$c close] 0
+}
+
+proc dump_binkey_file { db txn outfile checkfunc } {
+ source ./include.tcl
+
+ dump_binkey_file_direction $db $txn $outfile $checkfunc \
+ "-first" "-next"
+}
+proc dump_bin_file { db txn outfile checkfunc } {
+ source ./include.tcl
+
+ dump_bin_file_direction $db $txn $outfile $checkfunc "-first" "-next"
+}
+
+# Note: the following procedure assumes that the binary-file-as-keys were
+# inserted into the database by put_file_as_key, and consist of the file
+# name followed by the file contents as key, to ensure uniqueness.
+proc dump_binkey_file_direction { db txn outfile checkfunc begin cont } {
+ source ./include.tcl
+
+ set d1 $testdir/d1
+
+ set outf [open $outfile w]
+
+ # Now we will get each key from the DB and dump to outfile
+ set c [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $c $db] TRUE
+
+ set inf $d1
+ for {set d [$c get $begin] } { [llength $d] != 0 } \
+ {set d [$c get $cont] } {
+ set kd [lindex $d 0]
+ set keyfile [lindex $kd 0]
+ set data [lindex $kd 1]
+
+ set ofid [open $d1 w]
+ fconfigure $ofid -translation binary
+
+ # Chop off the first few bytes--that's the file name,
+ # added for uniqueness in put_file_as_key, which we don't
+ # want in the regenerated file.
+ set namelen [string length $data]
+ set keyfile [string range $keyfile $namelen end]
+ puts -nonewline $ofid $keyfile
+ close $ofid
+
+ $checkfunc $data $d1
+ puts $outf $data
+ flush $outf
+ }
+ close $outf
+ error_check_good curs_close [$c close] 0
+ fileremove $d1
+}
+
+proc dump_bin_file_direction { db txn outfile checkfunc begin cont } {
+ source ./include.tcl
+
+ set d1 $testdir/d1
+
+ set outf [open $outfile w]
+
+ # Now we will get each key from the DB and dump to outfile
+ set c [eval {$db cursor} $txn]
+
+ for {set d [$c get $begin] } \
+ { [llength $d] != 0 } {set d [$c get $cont] } {
+ set k [lindex [lindex $d 0] 0]
+ set data [lindex [lindex $d 0] 1]
+ set ofid [open $d1 w]
+ fconfigure $ofid -translation binary
+ puts -nonewline $ofid $data
+ close $ofid
+
+ $checkfunc $k $d1
+ puts $outf $k
+ }
+ close $outf
+ error_check_good curs_close [$c close] 0
+ fileremove -f $d1
+}
+
+proc make_data_str { key } {
+ set datastr ""
+ for {set i 0} {$i < 10} {incr i} {
+ append datastr $key
+ }
+ return $datastr
+}
+
+proc error_check_bad { func result bad {txn 0}} {
+ if { [binary_compare $result $bad] == 0 } {
+ if { $txn != 0 } {
+ $txn abort
+ }
+ flush stdout
+ flush stderr
+ error "FAIL:[timestamp] $func returned error value $bad"
+ }
+}
+
+proc error_check_good { func result desired {txn 0} } {
+ if { [binary_compare $desired $result] != 0 } {
+ if { $txn != 0 } {
+ $txn abort
+ }
+ flush stdout
+ flush stderr
+ error "FAIL:[timestamp]\
+ $func: expected $desired, got $result"
+ }
+}
+
+# Locks have the prefix of their manager.
+proc is_substr { l mgr } {
+ if { [string first $mgr $l] == -1 } {
+ return 0
+ } else {
+ return 1
+ }
+}
+
+proc release_list { l } {
+
+ # Now release all the locks
+ foreach el $l {
+ set ret [$el put]
+ error_check_good lock_put $ret 0
+ }
+}
+
+proc debug { {stop 0} } {
+ global __debug_on
+ global __debug_print
+ global __debug_test
+
+ set __debug_on 1
+ set __debug_print 1
+ set __debug_test $stop
+}
+
+# Check if each key appears exactly [llength dlist] times in the file with
+# the duplicate tags matching those that appear in dlist.
+proc dup_check { db txn tmpfile dlist {extra 0}} {
+ source ./include.tcl
+
+ set outf [open $tmpfile w]
+ # Now we will get each key from the DB and dump to outfile
+ set c [eval {$db cursor} $txn]
+ set lastkey ""
+ set done 0
+ while { $done != 1} {
+ foreach did $dlist {
+ set rec [$c get "-next"]
+ if { [string length $rec] == 0 } {
+ set done 1
+ break
+ }
+ set key [lindex [lindex $rec 0] 0]
+ set fulldata [lindex [lindex $rec 0] 1]
+ set id [id_of $fulldata]
+ set d [data_of $fulldata]
+ if { [string compare $key $lastkey] != 0 && \
+ $id != [lindex $dlist 0] } {
+ set e [lindex $dlist 0]
+ error "FAIL: \tKey \
+ $key, expected dup id $e, got $id"
+ }
+ error_check_good dupget.data $d $key
+ error_check_good dupget.id $id $did
+ set lastkey $key
+ }
+ #
+ # Some tests add an extra dup (like overflow entries)
+ # Check id if it exists.
+ if { $extra != 0} {
+ set okey $key
+ set rec [$c get "-next"]
+ if { [string length $rec] != 0 } {
+ set key [lindex [lindex $rec 0] 0]
+ #
+ # If this key has no extras, go back for
+ # next iteration.
+ if { [string compare $key $lastkey] != 0 } {
+ set key $okey
+ set rec [$c get "-prev"]
+ } else {
+ set fulldata [lindex [lindex $rec 0] 1]
+ set id [id_of $fulldata]
+ set d [data_of $fulldata]
+ error_check_bad dupget.data1 $d $key
+ error_check_good dupget.id1 $id $extra
+ }
+ }
+ }
+ if { $done != 1 } {
+ puts $outf $key
+ }
+ }
+ close $outf
+ error_check_good curs_close [$c close] 0
+}
+
+# Parse duplicate data entries of the form N:data. Data_of returns
+# the data part; id_of returns the numerical part
+proc data_of {str} {
+ set ndx [string first ":" $str]
+ if { $ndx == -1 } {
+ return ""
+ }
+ return [ string range $str [expr $ndx + 1] end]
+}
+
+proc id_of {str} {
+ set ndx [string first ":" $str]
+ if { $ndx == -1 } {
+ return ""
+ }
+
+ return [ string range $str 0 [expr $ndx - 1]]
+}
+
+proc nop { {args} } {
+ return
+}
+
+# Partial put test procedure.
+# Munges a data val through three different partial puts. Stores
+# the final munged string in the dvals array so that you can check
+# it later (dvals should be global). We take the characters that
+# are being replaced, make them capitals and then replicate them
+# some number of times (n_add). We do this at the beginning of the
+# data, at the middle and at the end. The parameters are:
+# db, txn, key -- as per usual. Data is the original data element
+# from which we are starting. n_replace is the number of characters
+# that we will replace. n_add is the number of times we will add
+# the replaced string back in.
+proc partial_put { method db txn gflags key data n_replace n_add } {
+ global dvals
+ source ./include.tcl
+
+ # Here is the loop where we put and get each key/data pair
+ # We will do the initial put and then three Partial Puts
+ # for the beginning, middle and end of the string.
+
+ eval {$db put} $txn {$key [chop_data $method $data]}
+
+ # Beginning change
+ set s [string range $data 0 [ expr $n_replace - 1 ] ]
+ set repl [ replicate [string toupper $s] $n_add ]
+
+ # This is gross, but necessary: if this is a fixed-length
+ # method, and the chopped length of $repl is zero,
+ # it's because the original string was zero-length and our data item
+ # is all nulls. Set repl to something non-NULL.
+ if { [is_fixed_length $method] && \
+ [string length [chop_data $method $repl]] == 0 } {
+ set repl [replicate "." $n_add]
+ }
+
+ set newstr [chop_data $method $repl[string range $data $n_replace end]]
+ set ret [eval {$db put} $txn {-partial [list 0 $n_replace] \
+ $key [chop_data $method $repl]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $gflags $txn {$key}]
+ error_check_good get $ret [list [list $key [pad_data $method $newstr]]]
+
+ # End Change
+ set len [string length $newstr]
+ set spl [expr $len - $n_replace]
+ # Handle case where $n_replace > $len
+ if { $spl < 0 } {
+ set spl 0
+ }
+
+ set s [string range $newstr [ expr $len - $n_replace ] end ]
+ # Handle zero-length keys
+ if { [string length $s] == 0 } { set s "A" }
+
+ set repl [ replicate [string toupper $s] $n_add ]
+ set newstr [chop_data $method \
+ [string range $newstr 0 [expr $spl - 1 ] ]$repl]
+
+ set ret [eval {$db put} $txn \
+ {-partial [list $spl $n_replace] $key [chop_data $method $repl]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $gflags $txn {$key}]
+ error_check_good get $ret [list [list $key [pad_data $method $newstr]]]
+
+ # Middle Change
+ set len [string length $newstr]
+ set mid [expr $len / 2 ]
+ set beg [expr $mid - [expr $n_replace / 2] ]
+ set end [expr $beg + $n_replace - 1]
+ set s [string range $newstr $beg $end]
+ set repl [ replicate [string toupper $s] $n_add ]
+ set newstr [chop_data $method [string range $newstr 0 \
+ [expr $beg - 1 ] ]$repl[string range $newstr [expr $end + 1] end]]
+
+ set ret [eval {$db put} $txn {-partial [list $beg $n_replace] \
+ $key [chop_data $method $repl]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $gflags $txn {$key}]
+ error_check_good get $ret [list [list $key [pad_data $method $newstr]]]
+
+ set dvals($key) [pad_data $method $newstr]
+}
+
+proc replicate { str times } {
+ set res $str
+ for { set i 1 } { $i < $times } { set i [expr $i * 2] } {
+ append res $res
+ }
+ return $res
+}
+
+proc repeat { str n } {
+ set ret ""
+ while { $n > 0 } {
+ set ret $str$ret
+ incr n -1
+ }
+ return $ret
+}
+
+proc isqrt { l } {
+ set s [expr sqrt($l)]
+ set ndx [expr [string first "." $s] - 1]
+ return [string range $s 0 $ndx]
+}
+
+# If we run watch_procs multiple times without an intervening
+# testdir cleanup, it's possible that old sentinel files will confuse
+# us. Make sure they're wiped out before we spawn any other processes.
+proc sentinel_init { } {
+ source ./include.tcl
+
+ set filelist {}
+ set ret [catch {glob $testdir/begin.*} result]
+ if { $ret == 0 } {
+ set filelist $result
+ }
+
+ set ret [catch {glob $testdir/end.*} result]
+ if { $ret == 0 } {
+ set filelist [concat $filelist $result]
+ }
+
+ foreach f $filelist {
+ fileremove $f
+ }
+}
+
+proc watch_procs { {delay 30} {max 3600} } {
+ source ./include.tcl
+
+ set elapsed 0
+ while { 1 } {
+
+ tclsleep $delay
+ incr elapsed $delay
+
+ # Find the list of processes withoutstanding sentinel
+ # files (i.e. a begin.pid and no end.pid).
+ set beginlist {}
+ set endlist {}
+ set ret [catch {glob $testdir/begin.*} result]
+ if { $ret == 0 } {
+ set beginlist $result
+ }
+ set ret [catch {glob $testdir/end.*} result]
+ if { $ret == 0 } {
+ set endlist $result
+ }
+
+ set bpids {}
+ catch {unset epids}
+ foreach begfile $beginlist {
+ lappend bpids [string range $begfile \
+ [string length $testdir/begin.] end]
+ }
+ foreach endfile $endlist {
+ set epids([string range $endfile \
+ [string length $testdir/end.] end]) 1
+ }
+
+ # The set of processes that we still want to watch, $l,
+ # is the set of pids that have begun but not ended
+ # according to their sentinel files.
+ set l {}
+ foreach p $bpids {
+ if { [info exists epids($p)] == 0 } {
+ lappend l $p
+ }
+ }
+
+ set rlist {}
+ foreach i $l {
+ set r [ catch { exec $KILL -0 $i } result ]
+ if { $r == 0 } {
+ lappend rlist $i
+ }
+ }
+ if { [ llength $rlist] == 0 } {
+ break
+ } else {
+ puts "[timestamp] processes running: $rlist"
+ }
+
+ if { $elapsed > $max } {
+ # We have exceeded the limit; kill processes
+ # and report an error
+ set rlist {}
+ foreach i $l {
+ set r [catch { exec $KILL $i } result]
+ if { $r == 0 } {
+ lappend rlist $i
+ }
+ }
+ error_check_good "Processes still running" \
+ [llength $rlist] 0
+ }
+ }
+ puts "All processes have exited."
+}
+
+# These routines are all used from within the dbscript.tcl tester.
+proc db_init { dbp do_data } {
+ global a_keys
+ global l_keys
+ source ./include.tcl
+
+ set txn ""
+ set nk 0
+ set lastkey ""
+
+ set a_keys() BLANK
+ set l_keys ""
+
+ set c [$dbp cursor]
+ for {set d [$c get -first] } { [llength $d] != 0 } {
+ set d [$c get -next] } {
+ set k [lindex [lindex $d 0] 0]
+ set d2 [lindex [lindex $d 0] 1]
+ incr nk
+ if { $do_data == 1 } {
+ if { [info exists a_keys($k)] } {
+ lappend a_keys($k) $d2]
+ } else {
+ set a_keys($k) $d2
+ }
+ }
+
+ lappend l_keys $k
+ }
+ error_check_good curs_close [$c close] 0
+
+ return $nk
+}
+
+proc pick_op { min max n } {
+ if { $n == 0 } {
+ return add
+ }
+
+ set x [berkdb random_int 1 12]
+ if {$n < $min} {
+ if { $x <= 4 } {
+ return put
+ } elseif { $x <= 8} {
+ return get
+ } else {
+ return add
+ }
+ } elseif {$n > $max} {
+ if { $x <= 4 } {
+ return put
+ } elseif { $x <= 8 } {
+ return get
+ } else {
+ return del
+ }
+
+ } elseif { $x <= 3 } {
+ return del
+ } elseif { $x <= 6 } {
+ return get
+ } elseif { $x <= 9 } {
+ return put
+ } else {
+ return add
+ }
+}
+
+# random_data: Generate a string of random characters.
+# If recno is 0 - Use average to pick a length between 1 and 2 * avg.
+# If recno is non-0, generate a number between 1 and 2 ^ (avg * 2),
+# that will fit into a 32-bit integer.
+# If the unique flag is 1, then make sure that the string is unique
+# in the array "where".
+proc random_data { avg unique where {recno 0} } {
+ upvar #0 $where arr
+ global debug_on
+ set min 1
+ set max [expr $avg+$avg-1]
+ if { $recno } {
+ #
+ # Tcl seems to have problems with values > 30.
+ #
+ if { $max > 30 } {
+ set max 30
+ }
+ set maxnum [expr int(pow(2, $max))]
+ }
+ while {1} {
+ set len [berkdb random_int $min $max]
+ set s ""
+ if {$recno} {
+ set s [berkdb random_int 1 $maxnum]
+ } else {
+ for {set i 0} {$i < $len} {incr i} {
+ append s [int_to_char [berkdb random_int 0 25]]
+ }
+ }
+
+ if { $unique == 0 || [info exists arr($s)] == 0 } {
+ break
+ }
+ }
+
+ return $s
+}
+
+proc random_key { } {
+ global l_keys
+ global nkeys
+ set x [berkdb random_int 0 [expr $nkeys - 1]]
+ return [lindex $l_keys $x]
+}
+
+proc is_err { desired } {
+ set x [berkdb random_int 1 100]
+ if { $x <= $desired } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc pick_cursput { } {
+ set x [berkdb random_int 1 4]
+ switch $x {
+ 1 { return "-keylast" }
+ 2 { return "-keyfirst" }
+ 3 { return "-before" }
+ 4 { return "-after" }
+ }
+}
+
+proc random_cursor { curslist } {
+ global l_keys
+ global nkeys
+
+ set x [berkdb random_int 0 [expr [llength $curslist] - 1]]
+ set dbc [lindex $curslist $x]
+
+ # We want to randomly set the cursor. Pick a key.
+ set k [random_key]
+ set r [$dbc get "-set" $k]
+ error_check_good cursor_get:$k [is_substr Error $r] 0
+
+ # Now move forward or backward some hops to randomly
+ # position the cursor.
+ set dist [berkdb random_int -10 10]
+
+ set dir "-next"
+ set boundary "-first"
+ if { $dist < 0 } {
+ set dir "-prev"
+ set boundary "-last"
+ set dist [expr 0 - $dist]
+ }
+
+ for { set i 0 } { $i < $dist } { incr i } {
+ set r [ record $dbc get $dir $k ]
+ if { [llength $d] == 0 } {
+ set r [ record $dbc get $k $boundary ]
+ }
+ error_check_bad dbcget [llength $r] 0
+ }
+ return { [linsert r 0 $dbc] }
+}
+
+proc record { args } {
+# Recording every operation makes tests ridiculously slow on
+# NT, so we are commenting this out; for debugging purposes,
+# it will undoubtedly be useful to uncomment this.
+# puts $args
+# flush stdout
+ return [eval $args]
+}
+
+proc newpair { k data } {
+ global l_keys
+ global a_keys
+ global nkeys
+
+ set a_keys($k) $data
+ lappend l_keys $k
+ incr nkeys
+}
+
+proc rempair { k } {
+ global l_keys
+ global a_keys
+ global nkeys
+
+ unset a_keys($k)
+ set n [lsearch $l_keys $k]
+ error_check_bad rempair:$k $n -1
+ set l_keys [lreplace $l_keys $n $n]
+ incr nkeys -1
+}
+
+proc changepair { k data } {
+ global l_keys
+ global a_keys
+ global nkeys
+
+ set a_keys($k) $data
+}
+
+proc changedup { k olddata newdata } {
+ global l_keys
+ global a_keys
+ global nkeys
+
+ set d $a_keys($k)
+ error_check_bad changedup:$k [llength $d] 0
+
+ set n [lsearch $d $olddata]
+ error_check_bad changedup:$k $n -1
+
+ set a_keys($k) [lreplace $a_keys($k) $n $n $newdata]
+}
+
+# Insert a dup into the a_keys array with DB_KEYFIRST.
+proc adddup { k olddata newdata } {
+ global l_keys
+ global a_keys
+ global nkeys
+
+ set d $a_keys($k)
+ if { [llength $d] == 0 } {
+ lappend l_keys $k
+ incr nkeys
+ set a_keys($k) { $newdata }
+ }
+
+ set ndx 0
+
+ set d [linsert d $ndx $newdata]
+ set a_keys($k) $d
+}
+
+proc remdup { k data } {
+ global l_keys
+ global a_keys
+ global nkeys
+
+ set d [$a_keys($k)]
+ error_check_bad changedup:$k [llength $d] 0
+
+ set n [lsearch $d $olddata]
+ error_check_bad changedup:$k $n -1
+
+ set a_keys($k) [lreplace $a_keys($k) $n $n]
+}
+
+proc dump_full_file { db txn outfile checkfunc start continue } {
+ source ./include.tcl
+
+ set outf [open $outfile w]
+ # Now we will get each key from the DB and dump to outfile
+ set c [eval {$db cursor} $txn]
+ error_check_good dbcursor [is_valid_cursor $c $db] TRUE
+
+ for {set d [$c get $start] } { [string length $d] != 0 } {
+ set d [$c get $continue] } {
+ set k [lindex [lindex $d 0] 0]
+ set d2 [lindex [lindex $d 0] 1]
+ $checkfunc $k $d2
+ puts $outf "$k\t$d2"
+ }
+ close $outf
+ error_check_good curs_close [$c close] 0
+}
+
+proc int_to_char { i } {
+ global alphabet
+
+ return [string index $alphabet $i]
+}
+
+proc dbcheck { key data } {
+ global l_keys
+ global a_keys
+ global nkeys
+ global check_array
+
+ if { [lsearch $l_keys $key] == -1 } {
+ error "FAIL: Key |$key| not in list of valid keys"
+ }
+
+ set d $a_keys($key)
+
+ if { [info exists check_array($key) ] } {
+ set check $check_array($key)
+ } else {
+ set check {}
+ }
+
+ if { [llength $d] > 1 } {
+ if { [llength $check] != [llength $d] } {
+ # Make the check array the right length
+ for { set i [llength $check] } { $i < [llength $d] } \
+ {incr i} {
+ lappend check 0
+ }
+ set check_array($key) $check
+ }
+
+ # Find this data's index
+ set ndx [lsearch $d $data]
+ if { $ndx == -1 } {
+ error "FAIL: \
+ Data |$data| not found for key $key. Found |$d|"
+ }
+
+ # Set the bit in the check array
+ set check_array($key) [lreplace $check_array($key) $ndx $ndx 1]
+ } elseif { [string compare $d $data] != 0 } {
+ error "FAIL: \
+ Invalid data |$data| for key |$key|. Expected |$d|."
+ } else {
+ set check_array($key) 1
+ }
+}
+
+# Dump out the file and verify it
+proc filecheck { file txn } {
+ global check_array
+ global l_keys
+ global nkeys
+ global a_keys
+ source ./include.tcl
+
+ if { [info exists check_array] == 1 } {
+ unset check_array
+ }
+
+ open_and_dump_file $file NULL $txn $file.dump dbcheck dump_full_file \
+ "-first" "-next"
+
+ # Check that everything we checked had all its data
+ foreach i [array names check_array] {
+ set count 0
+ foreach j $check_array($i) {
+ if { $j != 1 } {
+ puts -nonewline "Key |$i| never found datum"
+ puts " [lindex $a_keys($i) $count]"
+ }
+ incr count
+ }
+ }
+
+ # Check that all keys appeared in the checked array
+ set count 0
+ foreach k $l_keys {
+ if { [info exists check_array($k)] == 0 } {
+ puts "filecheck: key |$k| not found. Data: $a_keys($k)"
+ }
+ incr count
+ }
+
+ if { $count != $nkeys } {
+ puts "filecheck: Got $count keys; expected $nkeys"
+ }
+}
+
+proc esetup { dir } {
+ source ./include.tcl
+
+ set ret [berkdb envremove -home $dir]
+
+ fileremove -f $dir/file0 $dir/file1 $dir/file2 $dir/file3
+ set mp [memp $dir 0644 -create -cachesize { 0 10240 }]
+ set lp [lock_open "" -create 0644]
+ error_check_good memp_close [$mp close] 0
+ error_check_good lock_close [$lp close] 0
+}
+
+proc cleanup { dir env } {
+ global gen_upgrade
+ global upgrade_dir
+ global upgrade_be
+ global upgrade_method
+ global upgrade_name
+ source ./include.tcl
+
+ if { $gen_upgrade == 1 } {
+ set vers [berkdb version]
+ set maj [lindex $vers 0]
+ set min [lindex $vers 1]
+
+ if { $upgrade_be == 1 } {
+ set version_dir "$maj.${min}be"
+ } else {
+ set version_dir "$maj.${min}le"
+ }
+
+ set dest $upgrade_dir/$version_dir/$upgrade_method/$upgrade_name
+
+ catch {exec mkdir -p $dest}
+ catch {exec sh -c "mv $dir/*.db $dest"}
+ catch {exec sh -c "mv $dir/__dbq.* $dest"}
+ }
+
+# check_handles
+ set remfiles {}
+ set ret [catch { glob $dir/* } result]
+ if { $ret == 0 } {
+ foreach file $result {
+ #
+ # We:
+ # - Ignore any env-related files, which are
+ # those that have __db.* or log.* if we are
+ # running in an env.
+ # - Call 'dbremove' on any databases.
+ # Remove any remaining temp files.
+ #
+ switch -glob -- $file {
+ */__db.* -
+ */log.* {
+ if { $env != "NULL" } {
+ continue
+ } else {
+ lappend remfiles $file
+ }
+ }
+ *.db {
+ set envargs ""
+ if { $env != "NULL"} {
+ set file [file tail $file]
+ set envargs " -env $env "
+ }
+
+ # If a database is left in a corrupt
+ # state, dbremove might not be able to handle
+ # it (it does an open before the remove).
+ # Be prepared for this, and if necessary,
+ # just forcibly remove the file with a warning
+ # message.
+ set ret [catch \
+ {eval {berkdb dbremove} $envargs $file} res]
+ if { $ret != 0 } {
+ puts \
+ "FAIL: dbremove in cleanup failed: $res"
+ lappend remfiles $file
+ }
+ }
+ default {
+ lappend remfiles $file
+ }
+ }
+ }
+ if {[llength $remfiles] > 0} {
+ eval fileremove -f $remfiles
+ }
+ }
+}
+
+proc log_cleanup { dir } {
+ source ./include.tcl
+
+ set files [glob -nocomplain $dir/log.*]
+ if { [llength $files] != 0} {
+ foreach f $files {
+ fileremove -f $f
+ }
+ }
+}
+
+proc env_cleanup { dir } {
+ source ./include.tcl
+
+ set stat [catch {berkdb envremove -home $dir} ret]
+ #
+ # If something failed and we are left with a region entry
+ # in /dev/shmem that is zero-length, the envremove will
+ # succeed, and the shm_unlink will succeed, but it will not
+ # remove the zero-length entry from /dev/shmem. Remove it
+ # using fileremove or else all other tests using an env
+ # will immediately fail.
+ #
+ if { $is_qnx_test == 1 } {
+ set region_files [glob -nocomplain /dev/shmem/$dir*]
+ if { [llength $region_files] != 0 } {
+ foreach f $region_files {
+ fileremove -f $f
+ }
+ }
+ }
+ log_cleanup $dir
+ cleanup $dir NULL
+}
+
+proc remote_cleanup { server dir localdir } {
+ set home [file tail $dir]
+ error_check_good cleanup:remove [berkdb envremove -home $home \
+ -server $server] 0
+ catch {exec rsh $server rm -f $dir/*} ret
+ cleanup $localdir NULL
+}
+
+proc help { cmd } {
+ if { [info command $cmd] == $cmd } {
+ set is_proc [lsearch [info procs $cmd] $cmd]
+ if { $is_proc == -1 } {
+ # Not a procedure; must be a C command
+ # Let's hope that it takes some parameters
+ # and that it prints out a message
+ puts "Usage: [eval $cmd]"
+ } else {
+ # It is a tcl procedure
+ puts -nonewline "Usage: $cmd"
+ set args [info args $cmd]
+ foreach a $args {
+ set is_def [info default $cmd $a val]
+ if { $is_def != 0 } {
+ # Default value
+ puts -nonewline " $a=$val"
+ } elseif {$a == "args"} {
+ # Print out flag values
+ puts " options"
+ args
+ } else {
+ # No default value
+ puts -nonewline " $a"
+ }
+ }
+ puts ""
+ }
+ } else {
+ puts "$cmd is not a command"
+ }
+}
+
+# Run a recovery test for a particular operation
+# Notice that we catch the return from CP and do not do anything with it.
+# This is because Solaris CP seems to exit non-zero on occasion, but
+# everything else seems to run just fine.
+proc op_recover { encodedop dir env_cmd dbfile cmd msg } {
+ global log_log_record_types
+ global recd_debug
+ global recd_id
+ global recd_op
+ source ./include.tcl
+
+ #puts "op_recover: $encodedop $dir $env_cmd $dbfile $cmd $msg"
+
+ set init_file $dir/t1
+ set afterop_file $dir/t2
+ set final_file $dir/t3
+
+ set op ""
+ set op2 ""
+ if { $encodedop == "prepare-abort" } {
+ set op "prepare"
+ set op2 "abort"
+ } elseif { $encodedop == "prepare-commit" } {
+ set op "prepare"
+ set op2 "commit"
+ } else {
+ set op $encodedop
+ }
+
+ puts "\t$msg $encodedop"
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $dir
+ }
+
+ # Save the initial file and open the environment and the file
+ catch { file copy -force $dir/$dbfile $dir/$dbfile.init } res
+ copy_extent_file $dir $dbfile init
+
+ set env [eval $env_cmd]
+ set db [berkdb open -env $env $dbfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Dump out file contents for initial case
+ set tflags ""
+ open_and_dump_file $dbfile $env $tflags $init_file nop \
+ dump_file_direction "-first" "-next"
+
+ set t [$env txn]
+ error_check_bad txn_begin $t NULL
+ error_check_good txn_begin [is_substr $t "txn"] 1
+
+ # Now fill in the db, tmgr, and the txnid in the command
+ set exec_cmd $cmd
+
+ set i [lsearch $cmd ENV]
+ if { $i != -1 } {
+ set exec_cmd [lreplace $exec_cmd $i $i $env]
+ }
+
+ set i [lsearch $cmd TXNID]
+ if { $i != -1 } {
+ set exec_cmd [lreplace $exec_cmd $i $i $t]
+ }
+
+ set i [lsearch $exec_cmd DB]
+ if { $i != -1 } {
+ set exec_cmd [lreplace $exec_cmd $i $i $db]
+ }
+
+ # To test DB_CONSUME, we need to expect a record return, not "0".
+ set i [lsearch $exec_cmd "-consume"]
+ if { $i != -1 } {
+ set record_exec_cmd_ret 1
+ } else {
+ set record_exec_cmd_ret 0
+ }
+
+ # For the DB_APPEND test, we need to expect a return other than
+ # 0; set this flag to be more lenient in the error_check_good.
+ set i [lsearch $exec_cmd "-append"]
+ if { $i != -1 } {
+ set lenient_exec_cmd_ret 1
+ } else {
+ set lenient_exec_cmd_ret 0
+ }
+
+ # Execute command and commit/abort it.
+ set ret [eval $exec_cmd]
+ if { $record_exec_cmd_ret == 1 } {
+ error_check_good "\"$exec_cmd\"" [llength [lindex $ret 0]] 2
+ } elseif { $lenient_exec_cmd_ret == 1 } {
+ error_check_good "\"$exec_cmd\"" [expr $ret > 0] 1
+ } else {
+ error_check_good "\"$exec_cmd\"" $ret 0
+ }
+
+ set record_exec_cmd_ret 0
+ set lenient_exec_cmd_ret 0
+
+ # Sync the file so that we can capture a snapshot to test
+ # recovery.
+ error_check_good sync:$db [$db sync] 0
+
+ catch { file copy -force $dir/$dbfile $dir/$dbfile.afterop } res
+ copy_extent_file $dir $dbfile afterop
+
+ #set tflags "-txn $t"
+ open_and_dump_file $dir/$dbfile.afterop NULL $tflags \
+ $afterop_file nop dump_file_direction \
+ "-first" "-next"
+ #puts "\t\t\tExecuting txn_$op:$t"
+ error_check_good txn_$op:$t [$t $op] 0
+ if { $op2 != "" } {
+ #puts "\t\t\tExecuting txn_$op2:$t"
+ error_check_good txn_$op2:$t [$t $op2] 0
+ }
+
+ switch $encodedop {
+ "commit" { puts "\t\tCommand executed and committed." }
+ "abort" { puts "\t\tCommand executed and aborted." }
+ "prepare" { puts "\t\tCommand executed and prepared." }
+ "prepare-commit" {
+ puts "\t\tCommand executed, prepared, and committed."
+ }
+ "prepare-abort" {
+ puts "\t\tCommand executed, prepared, and aborted."
+ }
+ }
+
+ # Dump out file and save a copy.
+ error_check_good sync:$db [$db sync] 0
+ open_and_dump_file $dir/$dbfile NULL $tflags $final_file nop \
+ dump_file_direction "-first" "-next"
+
+ catch { file copy -force $dir/$dbfile $dir/$dbfile.final } res
+ copy_extent_file $dir $dbfile final
+
+ # If this is an abort or prepare-abort, it should match the
+ # original file.
+ # If this was a commit or prepare-commit, then this file should
+ # match the afterop file.
+ # If this was a prepare without an abort or commit, we still
+ # have transactions active, and peering at the database from
+ # another environment will show data from uncommitted transactions.
+ # Thus we just skip this in the prepare-only case; what
+ # we care about are the results of a prepare followed by a
+ # recovery, which we test later.
+ if { $op == "commit" || $op2 == "commit" } {
+ filesort $afterop_file $afterop_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(post-$op,pre-commit):diff($afterop_file,$final_file) \
+ [filecmp $afterop_file.sort $final_file.sort] 0
+ } elseif { $op == "abort" || $op2 == "abort" } {
+ filesort $init_file $init_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(initial,post-$op):diff($init_file,$final_file) \
+ [filecmp $init_file.sort $final_file.sort] 0
+ } else {
+ # Make sure this really is a prepare-only
+ error_check_good assert:prepare-only $encodedop "prepare"
+ }
+
+ # Running recovery on this database should not do anything.
+ # Flush all data to disk, close the environment and save the
+ # file.
+ error_check_good close:$db [$db close] 0
+
+ # If all we've done is a prepare, then there's still a
+ # transaction active, and an env close will return DB_RUNRECOVERY
+ if { $encodedop == "prepare" } {
+ catch {$env close} ret
+ error_check_good env_close \
+ [is_substr $ret DB_RUNRECOVERY] 1
+ } else {
+ reset_env $env
+ }
+
+ berkdb debug_check
+ puts -nonewline "\t\tRunning recovery ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ }
+ puts -nonewline "complete ... "
+
+ error_check_good db_verify [verify_dir $testdir "\t\t" 0 1] 0
+
+ puts "verified"
+
+ berkdb debug_check
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_widget $env env] TRUE
+ open_and_dump_file $dir/$dbfile NULL $tflags $final_file nop \
+ dump_file_direction "-first" "-next"
+ if { $op == "commit" || $op2 == "commit" } {
+ filesort $afterop_file $afterop_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(post-$op,pre-commit):diff($afterop_file,$final_file) \
+ [filecmp $afterop_file.sort $final_file.sort] 0
+ } else {
+ filesort $init_file $init_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(initial,post-$op):diff($init_file,$final_file) \
+ [filecmp $init_file.sort $final_file.sort] 0
+ }
+
+ # Now close the environment, substitute a file that will need
+ # recovery and try running recovery again.
+ reset_env $env
+ if { $op == "commit" || $op2 == "commit" } {
+ catch { file copy -force $dir/$dbfile.init $dir/$dbfile } res
+ move_file_extent $dir $dbfile init copy
+ } else {
+ catch { file copy -force $dir/$dbfile.afterop $dir/$dbfile } res
+ move_file_extent $dir $dbfile afterop copy
+ }
+
+ berkdb debug_check
+ puts -nonewline \
+ "\t\tRunning recovery on pre-op database ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ }
+ puts -nonewline "complete ... "
+
+ error_check_good db_verify_preop [verify_dir $testdir "\t\t" 0 1] 0
+
+ puts "verified"
+
+ set env [eval $env_cmd]
+
+ open_and_dump_file $dir/$dbfile NULL $tflags $final_file nop \
+ dump_file_direction "-first" "-next"
+ if { $op == "commit" || $op2 == "commit" } {
+ filesort $final_file $final_file.sort
+ filesort $afterop_file $afterop_file.sort
+ error_check_good \
+ diff(post-$op,recovered):diff($afterop_file,$final_file) \
+ [filecmp $afterop_file.sort $final_file.sort] 0
+ } else {
+ filesort $init_file $init_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(initial,post-$op):diff($init_file,$final_file) \
+ [filecmp $init_file.sort $final_file.sort] 0
+ }
+
+ # This should just close the environment, not blow it away.
+ reset_env $env
+}
+
+proc populate { db method txn n dups bigdata } {
+ source ./include.tcl
+
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $n } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } elseif { $dups == 1 } {
+ set key duplicate_key
+ } else {
+ set key $str
+ }
+ if { $bigdata == 1 && [berkdb random_int 1 3] == 1} {
+ set str [replicate $str 1000]
+ }
+
+ set ret [$db put -txn $txn $key $str]
+ error_check_good db_put:$key $ret 0
+ incr count
+ }
+ close $did
+ return 0
+}
+
+proc big_populate { db txn n } {
+ source ./include.tcl
+
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $n } {
+ set key [replicate $str 50]
+ set ret [$db put -txn $txn $key $str]
+ error_check_good db_put:$key $ret 0
+ incr count
+ }
+ close $did
+ return 0
+}
+
+proc unpopulate { db txn num } {
+ source ./include.tcl
+
+ set c [eval {$db cursor} "-txn $txn"]
+ error_check_bad $db:cursor $c NULL
+ error_check_good $db:cursor [is_substr $c $db] 1
+
+ set i 0
+ for {set d [$c get -first] } { [llength $d] != 0 } {
+ set d [$c get -next] } {
+ $c del
+ incr i
+ if { $num != 0 && $ >= $num } {
+ break
+ }
+ }
+ error_check_good cursor_close [$c close] 0
+ return 0
+}
+
+proc reset_env { env } {
+ error_check_good env_close [$env close] 0
+}
+
+# This routine will let us obtain a ring of deadlocks.
+# Each locker will get a lock on obj_id, then sleep, and
+# then try to lock (obj_id + 1) % num.
+# When the lock is finally granted, we release our locks and
+# return 1 if we got both locks and DEADLOCK if we deadlocked.
+# The results here should be that 1 locker deadlocks and the
+# rest all finish successfully.
+proc ring { myenv locker_id obj_id num } {
+ source ./include.tcl
+
+ if {[catch {$myenv lock_get write $locker_id $obj_id} lock1] != 0} {
+ puts $errorInfo
+ return ERROR
+ } else {
+ error_check_good lockget:$obj_id [is_substr $lock1 $myenv] 1
+ }
+
+ tclsleep 30
+ set nextobj [expr ($obj_id + 1) % $num]
+ set ret 1
+ if {[catch {$myenv lock_get write $locker_id $nextobj} lock2] != 0} {
+ if {[string match "*DEADLOCK*" $lock2] == 1} {
+ set ret DEADLOCK
+ } else {
+ set ret ERROR
+ }
+ } else {
+ error_check_good lockget:$obj_id [is_substr $lock2 $myenv] 1
+ }
+
+ # Now release the first lock
+ error_check_good lockput:$lock1 [$lock1 put] 0
+
+ if {$ret == 1} {
+ error_check_bad lockget:$obj_id $lock2 NULL
+ error_check_good lockget:$obj_id [is_substr $lock2 $myenv] 1
+ error_check_good lockput:$lock2 [$lock2 put] 0
+ }
+ return $ret
+}
+
+# This routine will create massive deadlocks.
+# Each locker will get a readlock on obj_id, then sleep, and
+# then try to upgrade the readlock to a write lock.
+# When the lock is finally granted, we release our first lock and
+# return 1 if we got both locks and DEADLOCK if we deadlocked.
+# The results here should be that 1 locker succeeds in getting all
+# the locks and everyone else deadlocks.
+proc clump { myenv locker_id obj_id num } {
+ source ./include.tcl
+
+ set obj_id 10
+ if {[catch {$myenv lock_get read $locker_id $obj_id} lock1] != 0} {
+ puts $errorInfo
+ return ERROR
+ } else {
+ error_check_good lockget:$obj_id \
+ [is_valid_lock $lock1 $myenv] TRUE
+ }
+
+ tclsleep 30
+ set ret 1
+ if {[catch {$myenv lock_get write $locker_id $obj_id} lock2] != 0} {
+ if {[string match "*DEADLOCK*" $lock2] == 1} {
+ set ret DEADLOCK
+ } else {
+ set ret ERROR
+ }
+ } else {
+ error_check_good \
+ lockget:$obj_id [is_valid_lock $lock2 $myenv] TRUE
+ }
+
+ # Now release the first lock
+ error_check_good lockput:$lock1 [$lock1 put] 0
+
+ if {$ret == 1} {
+ error_check_good \
+ lockget:$obj_id [is_valid_lock $lock2 $myenv] TRUE
+ error_check_good lockput:$lock2 [$lock2 put] 0
+ }
+ return $ret
+ }
+
+proc dead_check { t procs dead clean other } {
+ error_check_good $t:$procs:other $other 0
+ switch $t {
+ ring {
+ error_check_good $t:$procs:deadlocks $dead 1
+ error_check_good $t:$procs:success $clean \
+ [expr $procs - 1]
+ }
+ clump {
+ error_check_good $t:$procs:deadlocks $dead \
+ [expr $procs - 1]
+ error_check_good $t:$procs:success $clean 1
+ }
+ default {
+ error "Test $t not implemented"
+ }
+ }
+}
+
+proc rdebug { id op where } {
+ global recd_debug
+ global recd_id
+ global recd_op
+
+ set recd_debug $where
+ set recd_id $id
+ set recd_op $op
+}
+
+proc rtag { msg id } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { $id == $tag } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc zero_list { n } {
+ set ret ""
+ while { $n > 0 } {
+ lappend ret 0
+ incr n -1
+ }
+ return $ret
+}
+
+proc check_dump { k d } {
+ puts "key: $k data: $d"
+}
+
+proc reverse { s } {
+ set res ""
+ for { set i 0 } { $i < [string length $s] } { incr i } {
+ set res "[string index $s $i]$res"
+ }
+
+ return $res
+}
+
+proc is_valid_widget { w expected } {
+ # First N characters must match "expected"
+ set l [string length $expected]
+ incr l -1
+ if { [string compare [string range $w 0 $l] $expected] != 0 } {
+ return $w
+ }
+
+ # Remaining characters must be digits
+ incr l 1
+ for { set i $l } { $i < [string length $w] } { incr i} {
+ set c [string index $w $i]
+ if { $c < "0" || $c > "9" } {
+ return $w
+ }
+ }
+
+ return TRUE
+}
+
+proc is_valid_db { db } {
+ return [is_valid_widget $db db]
+}
+
+proc is_valid_env { env } {
+ return [is_valid_widget $env env]
+}
+
+proc is_valid_cursor { dbc db } {
+ return [is_valid_widget $dbc $db.c]
+}
+
+proc is_valid_lock { lock env } {
+ return [is_valid_widget $lock $env.lock]
+}
+
+proc is_valid_mpool { mpool env } {
+ return [is_valid_widget $mpool $env.mp]
+}
+
+proc is_valid_page { page mpool } {
+ return [is_valid_widget $page $mpool.pg]
+}
+
+proc is_valid_txn { txn env } {
+ return [is_valid_widget $txn $env.txn]
+}
+
+proc is_valid_mutex { m env } {
+ return [is_valid_widget $m $env.mutex]
+}
+
+proc send_cmd { fd cmd {sleep 2}} {
+ source ./include.tcl
+
+ puts $fd "set v \[$cmd\]"
+ puts $fd "puts \$v"
+ puts $fd "flush stdout"
+ flush $fd
+ berkdb debug_check
+ tclsleep $sleep
+
+ set r [rcv_result $fd]
+ return $r
+}
+
+proc rcv_result { fd } {
+ set r [gets $fd result]
+ error_check_bad remote_read $r -1
+
+ return $result
+}
+
+proc send_timed_cmd { fd rcv_too cmd } {
+ set c1 "set start \[timestamp -r\]; "
+ set c2 "puts \[expr \[timestamp -r\] - \$start\]"
+ set full_cmd [concat $c1 $cmd ";" $c2]
+
+ puts $fd $full_cmd
+ puts $fd "flush stdout"
+ flush $fd
+ return 0
+}
+
+#
+# The rationale behind why we have *two* "data padding" routines is outlined
+# below:
+#
+# Both pad_data and chop_data truncate data that is too long. However,
+# pad_data also adds the pad character to pad data out to the fixed length
+# record length.
+#
+# Which routine you call does not depend on the length of the data you're
+# using, but on whether you're doing a put or a get. When we do a put, we
+# have to make sure the data isn't longer than the size of a record because
+# otherwise we'll get an error (use chop_data). When we do a get, we want to
+# check that db padded everything correctly (use pad_data on the value against
+# which we are comparing).
+#
+# We don't want to just use the pad_data routine for both purposes, because
+# we want to be able to test whether or not db is padding correctly. For
+# example, the queue access method had a bug where when a record was
+# overwritten (*not* a partial put), only the first n bytes of the new entry
+# were written, n being the new entry's (unpadded) length. So, if we did
+# a put with key,value pair (1, "abcdef") and then a put (1, "z"), we'd get
+# back (1,"zbcdef"). If we had used pad_data instead of chop_data, we would
+# have gotten the "correct" result, but we wouldn't have found this bug.
+proc chop_data {method data} {
+ global fixed_len
+
+ if {[is_fixed_length $method] == 1 && \
+ [string length $data] > $fixed_len} {
+ return [eval {binary format a$fixed_len $data}]
+ } else {
+ return $data
+ }
+}
+
+proc pad_data {method data} {
+ global fixed_len
+
+ if {[is_fixed_length $method] == 1} {
+ return [eval {binary format a$fixed_len $data}]
+ } else {
+ return $data
+ }
+}
+
+proc make_fixed_length {method data {pad 0}} {
+ global fixed_len
+ global fixed_pad
+
+ if {[is_fixed_length $method] == 1} {
+ if {[string length $data] > $fixed_len } {
+ error_check_bad make_fixed_len:TOO_LONG 1 1
+ }
+ while { [string length $data] < $fixed_len } {
+ set data [format $data%c $fixed_pad]
+ }
+ }
+ return $data
+}
+
+# shift data for partial
+# pad with fixed pad (which is NULL)
+proc partial_shift { data offset direction} {
+ global fixed_len
+
+ set len [expr $fixed_len - 1]
+
+ if { [string compare $direction "right"] == 0 } {
+ for { set i 1} { $i <= $offset } {incr i} {
+ set data [binary format x1a$len $data]
+ }
+ } elseif { [string compare $direction "left"] == 0 } {
+ for { set i 1} { $i <= $offset } {incr i} {
+ set data [string range $data 1 end]
+ set data [binary format a$len $data]
+ }
+ }
+ return $data
+}
+
+# string compare does not always work to compare
+# this data, nor does expr (==)
+# specialized routine for comparison
+# (for use in fixed len recno and q)
+proc binary_compare { data1 data2 } {
+ if { [string length $data1] != [string length $data2] || \
+ [string compare -length \
+ [string length $data1] $data1 $data2] != 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc convert_method { method } {
+ switch -- $method {
+ -btree -
+ -dbtree -
+ -ddbtree -
+ -rbtree -
+ BTREE -
+ DB_BTREE -
+ DB_RBTREE -
+ RBTREE -
+ bt -
+ btree -
+ db_btree -
+ db_rbtree -
+ rbt -
+ rbtree { return "-btree" }
+
+ -dhash -
+ -hash -
+ DB_HASH -
+ HASH -
+ db_hash -
+ h -
+ hash { return "-hash" }
+
+ -queue -
+ DB_QUEUE -
+ QUEUE -
+ db_queue -
+ q -
+ qam -
+ queue { return "-queue" }
+
+ -queueextent -
+ QUEUEEXTENT -
+ qe -
+ qamext -
+ -queueext -
+ queueextent -
+ queueext { return "-queue" }
+
+ -frecno -
+ -recno -
+ -rrecno -
+ DB_FRECNO -
+ DB_RECNO -
+ DB_RRECNO -
+ FRECNO -
+ RECNO -
+ RRECNO -
+ db_frecno -
+ db_recno -
+ db_rrecno -
+ frec -
+ frecno -
+ rec -
+ recno -
+ rrec -
+ rrecno { return "-recno" }
+
+ default { error "FAIL:[timestamp] $method: unknown method" }
+ }
+}
+
+# If recno-with-renumbering or btree-with-renumbering is specified, then
+# fix the arguments to specify the DB_RENUMBER/DB_RECNUM option for the
+# -flags argument.
+proc convert_args { method {largs ""} } {
+ global fixed_len
+ global fixed_pad
+ global gen_upgrade
+ global upgrade_be
+ source ./include.tcl
+
+ if { [string first - $largs] == -1 &&\
+ [string compare $largs ""] != 0 } {
+ set errstring "args must contain a hyphen; does this test\
+ have no numeric args?"
+ puts "FAIL:[timestamp] $errstring"
+ return -code return
+ }
+
+ if { $gen_upgrade == 1 && $upgrade_be == 1 } {
+ append largs " -lorder 4321 "
+ } elseif { $gen_upgrade == 1 && $upgrade_be != 1 } {
+ append largs " -lorder 1234 "
+ }
+
+ if { [is_rrecno $method] == 1 } {
+ append largs " -renumber "
+ } elseif { [is_rbtree $method] == 1 } {
+ append largs " -recnum "
+ } elseif { [is_dbtree $method] == 1 } {
+ append largs " -dup "
+ } elseif { [is_ddbtree $method] == 1 } {
+ append largs " -dup "
+ append largs " -dupsort "
+ } elseif { [is_dhash $method] == 1 } {
+ append largs " -dup "
+ } elseif { [is_queueext $method] == 1 } {
+ append largs " -extent 2 "
+ }
+
+ if {[is_fixed_length $method] == 1} {
+ append largs " -len $fixed_len -pad $fixed_pad "
+ }
+ return $largs
+}
+
+proc is_btree { method } {
+ set names { -btree BTREE DB_BTREE bt btree }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_dbtree { method } {
+ set names { -dbtree }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_ddbtree { method } {
+ set names { -ddbtree }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_rbtree { method } {
+ set names { -rbtree rbtree RBTREE db_rbtree DB_RBTREE rbt }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_recno { method } {
+ set names { -recno DB_RECNO RECNO db_recno rec recno}
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_rrecno { method } {
+ set names { -rrecno rrecno RRECNO db_rrecno DB_RRECNO rrec }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_frecno { method } {
+ set names { -frecno frecno frec FRECNO db_frecno DB_FRECNO}
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_hash { method } {
+ set names { -hash DB_HASH HASH db_hash h hash }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_dhash { method } {
+ set names { -dhash }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_queue { method } {
+ if { [is_queueext $method] == 1 } {
+ return 1
+ }
+
+ set names { -queue DB_QUEUE QUEUE db_queue q queue qam }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_queueext { method } {
+ set names { -queueextent queueextent QUEUEEXTENT qe qamext \
+ queueext -queueext }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_record_based { method } {
+ if { [is_recno $method] || [is_frecno $method] ||
+ [is_rrecno $method] || [is_queue $method] } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_fixed_length { method } {
+ if { [is_queue $method] || [is_frecno $method] } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+# Sort lines in file $in and write results to file $out.
+# This is a more portable alternative to execing the sort command,
+# which has assorted issues on NT [#1576].
+# The addition of a "-n" argument will sort numerically.
+proc filesort { in out { arg "" } } {
+ set i [open $in r]
+
+ set ilines {}
+ while { [gets $i line] >= 0 } {
+ lappend ilines $line
+ }
+
+ if { [string compare $arg "-n"] == 0 } {
+ set olines [lsort -integer $ilines]
+ } else {
+ set olines [lsort $ilines]
+ }
+
+ close $i
+
+ set o [open $out w]
+ foreach line $olines {
+ puts $o $line
+ }
+
+ close $o
+}
+
+# Print lines up to the nth line of infile out to outfile, inclusive.
+# The optional beg argument tells us where to start.
+proc filehead { n infile outfile { beg 0 } } {
+ set in [open $infile r]
+ set out [open $outfile w]
+
+ # Sed uses 1-based line numbers, and so we do too.
+ for { set i 1 } { $i < $beg } { incr i } {
+ if { [gets $in junk] < 0 } {
+ break
+ }
+ }
+
+ for { } { $i <= $n } { incr i } {
+ if { [gets $in line] < 0 } {
+ break
+ }
+ puts $out $line
+ }
+
+ close $in
+ close $out
+}
+
+# Remove file (this replaces $RM).
+# Usage: fileremove filenames =~ rm; fileremove -f filenames =~ rm -rf.
+proc fileremove { args } {
+ set forceflag ""
+ foreach a $args {
+ if { [string first - $a] == 0 } {
+ # It's a flag. Better be f.
+ if { [string first f $a] != 1 } {
+ return -code error "bad flag to fileremove"
+ } else {
+ set forceflag "-force"
+ }
+ } else {
+ eval {file delete $forceflag $a}
+ }
+ }
+}
+
+proc findfail { args } {
+ foreach a $args {
+ if { [file exists $a] == 0 } {
+ continue
+ }
+ set f [open $a r]
+ while { [gets $f line] >= 0 } {
+ if { [string first FAIL $line] == 0 } {
+ close $f
+ return 1
+ }
+ }
+ close $f
+ }
+ return 0
+}
+
+# Sleep for s seconds.
+proc tclsleep { s } {
+ # On Windows, the system time-of-day clock may update as much
+ # as 55 ms late due to interrupt timing. Don't take any
+ # chances; sleep extra-long so that when tclsleep 1 returns,
+ # it's guaranteed to be a new second.
+ after [expr $s * 1000 + 56]
+}
+
+# Compare two files, a la diff. Returns 1 if non-identical, 0 if identical.
+proc filecmp { file_a file_b } {
+ set fda [open $file_a r]
+ set fdb [open $file_b r]
+
+ set nra 0
+ set nrb 0
+
+ # The gets can't be in the while condition because we'll
+ # get short-circuit evaluated.
+ while { $nra >= 0 && $nrb >= 0 } {
+ set nra [gets $fda aline]
+ set nrb [gets $fdb bline]
+
+ if { $nra != $nrb || [string compare $aline $bline] != 0} {
+ close $fda
+ close $fdb
+ return 1
+ }
+ }
+
+ close $fda
+ close $fdb
+ return 0
+}
+
+# Verify all .db files in the specified directory.
+proc verify_dir { \
+ {directory "./TESTDIR"} { pref "" } { noredo 0 } { quiet 0 } } {
+ # If we're doing database verification between tests, we don't
+ # want to do verification twice without an intervening cleanup--some
+ # test was skipped. Always verify by default (noredo == 0) so
+ # that explicit calls to verify_dir during tests don't require
+ # cleanup commands.
+ if { $noredo == 1 } {
+ if { [file exists $directory/NOREVERIFY] == 1 } {
+ if { $quiet == 0 } {
+ puts "Skipping verification."
+ }
+ return
+ }
+ set f [open $directory/NOREVERIFY w]
+ close $f
+ }
+
+ if { [catch {glob $directory/*.db} dbs] != 0 } {
+ # No files matched
+ return
+ }
+ if { [file exists /dev/stderr] == 1 } {
+ set errfilearg "-errfile /dev/stderr "
+ } else {
+ set errfilearg ""
+ }
+ set errpfxarg {-errpfx "FAIL: verify" }
+ set errarg $errfilearg$errpfxarg
+ set ret 0
+ foreach db $dbs {
+ if { [catch {eval {berkdb dbverify} $errarg $db} res] != 0 } {
+ puts $res
+ puts "FAIL:[timestamp] Verification of $db failed."
+ set ret 1
+ } else {
+ error_check_good verify:$db $res 0
+ if { $quiet == 0 } {
+ puts "${pref}Verification of $db succeeded."
+ }
+ }
+ }
+ return $ret
+}
+
+# Generate randomly ordered, guaranteed-unique four-character strings that can
+# be used to differentiate duplicates without creating duplicate duplicates.
+# (test031 & test032) randstring_init is required before the first call to
+# randstring and initializes things for up to $i distinct strings; randstring
+# gets the next string.
+proc randstring_init { i } {
+ global rs_int_list alphabet
+
+ # Fail if we can't generate sufficient unique strings.
+ if { $i > [expr 26 * 26 * 26 * 26] } {
+ set errstring\
+ "Duplicate set too large for random string generator"
+ puts "FAIL:[timestamp] $errstring"
+ return -code return $errstring
+ }
+
+ set rs_int_list {}
+
+ # generate alphabet array
+ for { set j 0 } { $j < 26 } { incr j } {
+ set a($j) [string index $alphabet $j]
+ }
+
+ # Generate a list with $i elements, { aaaa, aaab, ... aaaz, aaba ...}
+ for { set d1 0 ; set j 0 } { $d1 < 26 && $j < $i } { incr d1 } {
+ for { set d2 0 } { $d2 < 26 && $j < $i } { incr d2 } {
+ for { set d3 0 } { $d3 < 26 && $j < $i } { incr d3 } {
+ for { set d4 0 } { $d4 < 26 && $j < $i } \
+ { incr d4 } {
+ lappend rs_int_list \
+ $a($d1)$a($d2)$a($d3)$a($d4)
+ incr j
+ }
+ }
+ }
+ }
+
+ # Randomize the list.
+ set rs_int_list [randomize_list $rs_int_list]
+}
+
+# Randomize a list. Returns a randomly-reordered copy of l.
+proc randomize_list { l } {
+ set i [llength $l]
+
+ for { set j 0 } { $j < $i } { incr j } {
+ # Pick a random element from $j to the end
+ set k [berkdb random_int $j [expr $i - 1]]
+
+ # Swap it with element $j
+ set t1 [lindex $l $j]
+ set t2 [lindex $l $k]
+
+ set l [lreplace $l $j $j $t2]
+ set l [lreplace $l $k $k $t1]
+ }
+
+ return $l
+}
+
+proc randstring {} {
+ global rs_int_list
+
+ if { [info exists rs_int_list] == 0 || [llength $rs_int_list] == 0 } {
+ set errstring "randstring uninitialized or used too often"
+ puts "FAIL:[timestamp] $errstring"
+ return -code return $errstring
+ }
+
+ set item [lindex $rs_int_list 0]
+ set rs_int_list [lreplace $rs_int_list 0 0]
+
+ return $item
+}
+
+# Takes a variable-length arg list, and returns a list containing the list of
+# the non-hyphenated-flag arguments, followed by a list of each alphanumeric
+# flag it finds.
+proc extractflags { args } {
+ set inflags 1
+ set flags {}
+ while { $inflags == 1 } {
+ set curarg [lindex $args 0]
+ if { [string first "-" $curarg] == 0 } {
+ set i 1
+ while {[string length [set f \
+ [string index $curarg $i]]] > 0 } {
+ incr i
+ if { [string compare $f "-"] == 0 } {
+ set inflags 0
+ break
+ } else {
+ lappend flags $f
+ }
+ }
+ set args [lrange $args 1 end]
+ } else {
+ set inflags 0
+ }
+ }
+ return [list $args $flags]
+}
+
+# Wrapper for berkdb open, used throughout the test suite so that we can
+# set an errfile/errpfx as appropriate.
+proc berkdb_open { args } {
+ set errargs {}
+ if { [file exists /dev/stderr] == 1 } {
+ append errargs " -errfile /dev/stderr "
+ append errargs " -errpfx \\F\\A\\I\\L "
+ }
+
+ eval {berkdb open} $errargs $args
+}
+
+# Version without errpfx/errfile, used when we're expecting a failure.
+proc berkdb_open_noerr { args } {
+ eval {berkdb open} $args
+}
+
+proc check_handles { {outf stdout} } {
+ global ohandles
+
+ set handles [berkdb handles]
+ if {[llength $handles] != [llength $ohandles]} {
+ puts $outf "WARNING: Open handles during cleanup: $handles"
+ }
+ set ohandles $handles
+}
+
+proc open_handles { } {
+ return [llength [berkdb handles]]
+}
+
+proc move_file_extent { dir dbfile tag op } {
+ set files [get_extfiles $dir $dbfile $tag]
+ foreach extfile $files {
+ set i [string last "." $extfile]
+ incr i
+ set extnum [string range $extfile $i end]
+ set dbq [make_ext_filename $dir $dbfile $extnum]
+ #
+ # We can either copy or rename
+ #
+ file $op -force $extfile $dbq
+ }
+}
+
+proc copy_extent_file { dir dbfile tag { op copy } } {
+ set files [get_extfiles $dir $dbfile ""]
+ foreach extfile $files {
+ set i [string last "." $extfile]
+ incr i
+ set extnum [string range $extfile $i end]
+ file $op -force $extfile $dir/__dbq.$dbfile.$tag.$extnum
+ }
+}
+
+proc get_extfiles { dir dbfile tag } {
+ if { $tag == "" } {
+ set filepat $dir/__dbq.$dbfile.\[0-9\]*
+ } else {
+ set filepat $dir/__dbq.$dbfile.$tag.\[0-9\]*
+ }
+ return [glob -nocomplain -- $filepat]
+}
+
+proc make_ext_filename { dir dbfile extnum } {
+ return $dir/__dbq.$dbfile.$extnum
+}
+
+# All pids for Windows 9X are negative values. When we want to have
+# unsigned int values, unique to the process, we'll take the absolute
+# value of the pid. This avoids unsigned/signed mistakes, yet
+# guarantees uniqueness, since each system has pids that are all
+# either positive or negative.
+#
+proc sanitized_pid { } {
+ set mypid [pid]
+ if { $mypid < 0 } {
+ set mypid [expr - $mypid]
+ }
+ puts "PID: [pid] $mypid\n"
+ return $mypid
+}
+
+#
+# Extract the page size field from a stat record. Return -1 if
+# none is found.
+#
+proc get_pagesize { stat } {
+ foreach field $stat {
+ set title [lindex $field 0]
+ if {[string compare $title "Page size"] == 0} {
+ return [lindex $field 1]
+ }
+ }
+ return -1
+}
diff --git a/bdb/test/txn.tcl b/bdb/test/txn.tcl
new file mode 100644
index 00000000000..904ef5fdca0
--- /dev/null
+++ b/bdb/test/txn.tcl
@@ -0,0 +1,181 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txn.tcl,v 11.12 2000/12/31 19:26:23 bostic Exp $
+#
+# Options are:
+# -dir <directory in which to store memp>
+# -max <max number of concurrent transactions>
+# -iterations <iterations>
+# -stat
+proc txn_usage {} {
+ puts "txn -dir <directory> -iterations <number of ops> \
+ -max <max number of transactions> -stat"
+}
+
+proc txntest { args } {
+ source ./include.tcl
+
+ # Set defaults
+ set iterations 50
+ set max 1024
+ set dostat 0
+ set flags ""
+ for { set i 0 } { $i < [llength $args] } {incr i} {
+ switch -regexp -- [lindex $args $i] {
+ -d.* { incr i; set testdir [lindex $args $i] }
+ -f.* { incr i; set flags [lindex $args $i] }
+ -i.* { incr i; set iterations [lindex $args $i] }
+ -m.* { incr i; set max [lindex $args $i] }
+ -s.* { set dostat 1 }
+ default {
+ puts -nonewline "FAIL:[timestamp] Usage: "
+ txn_usage
+ return
+ }
+ }
+ }
+ if { $max < $iterations } {
+ set max $iterations
+ }
+
+ # Now run the various functionality tests
+ txn001 $testdir $max $iterations $flags
+ txn002 $testdir $max $iterations
+}
+
+proc txn001 { dir max ntxns flags} {
+ source ./include.tcl
+
+ puts "Txn001: Basic begin, commit, abort"
+
+ # Open environment
+ env_cleanup $dir
+
+ set env [eval {berkdb \
+ env -create -mode 0644 -txn -txn_max $max -home $dir} $flags]
+ error_check_good evn_open [is_valid_env $env] TRUE
+ txn001_suba $ntxns $env
+ txn001_subb $ntxns $env
+ txn001_subc $ntxns $env
+ # Close and unlink the file
+ error_check_good env_close:$env [$env close] 0
+}
+
+proc txn001_suba { ntxns env } {
+ source ./include.tcl
+
+ # We will create a bunch of transactions and commit them.
+ set txn_list {}
+ set tid_list {}
+ puts "Txn001.a: Beginning/Committing $ntxns Transactions in $env"
+ for { set i 0 } { $i < $ntxns } { incr i } {
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+
+ lappend txn_list $txn
+
+ set tid [$txn id]
+ error_check_good tid_check [lsearch $tid_list $tid] -1
+
+ lappend tid_list $tid
+ }
+
+ # Now commit them all
+ foreach t $txn_list {
+ error_check_good txn_commit:$t [$t commit] 0
+ }
+}
+
+proc txn001_subb { ntxns env } {
+ # We will create a bunch of transactions and abort them.
+ set txn_list {}
+ set tid_list {}
+ puts "Txn001.b: Beginning/Aborting Transactions"
+ for { set i 0 } { $i < $ntxns } { incr i } {
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+
+ lappend txn_list $txn
+
+ set tid [$txn id]
+ error_check_good tid_check [lsearch $tid_list $tid] -1
+
+ lappend tid_list $tid
+ }
+
+ # Now abort them all
+ foreach t $txn_list {
+ error_check_good txn_abort:$t [$t abort] 0
+ }
+}
+
+proc txn001_subc { ntxns env } {
+ # We will create a bunch of transactions and commit them.
+ set txn_list {}
+ set tid_list {}
+ puts "Txn001.c: Beginning/Prepare/Committing Transactions"
+ for { set i 0 } { $i < $ntxns } { incr i } {
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+
+ lappend txn_list $txn
+
+ set tid [$txn id]
+ error_check_good tid_check [lsearch $tid_list $tid] -1
+
+ lappend tid_list $tid
+ }
+
+ # Now prepare them all
+ foreach t $txn_list {
+ error_check_good txn_prepare:$t [$t prepare] 0
+ }
+
+ # Now commit them all
+ foreach t $txn_list {
+ error_check_good txn_commit:$t [$t commit] 0
+ }
+
+}
+
+# Verify that read-only transactions do not create any log records
+proc txn002 { dir max ntxns } {
+ source ./include.tcl
+
+ puts "Txn002: Read-only transaction test"
+
+ env_cleanup $dir
+ set env [berkdb \
+ env -create -mode 0644 -txn -txn_max $max -home $dir]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ # We will create a bunch of transactions and commit them.
+ set txn_list {}
+ set tid_list {}
+ puts "Txn002.a: Beginning/Committing Transactions"
+ for { set i 0 } { $i < $ntxns } { incr i } {
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+
+ lappend txn_list $txn
+
+ set tid [$txn id]
+ error_check_good tid_check [lsearch $tid_list $tid] -1
+
+ lappend tid_list $tid
+ }
+
+ # Now commit them all
+ foreach t $txn_list {
+ error_check_good txn_commit:$t [$t commit] 0
+ }
+
+ # Now verify that there aren't any log records.
+ set r [$env log_get -first]
+ error_check_good log_get:$r [llength $r] 0
+
+ error_check_good env_close:$r [$env close] 0
+}
diff --git a/bdb/test/update.tcl b/bdb/test/update.tcl
new file mode 100644
index 00000000000..81fc9ba9e2c
--- /dev/null
+++ b/bdb/test/update.tcl
@@ -0,0 +1,92 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: update.tcl,v 11.9 2000/10/27 13:23:56 sue Exp $
+source ./include.tcl
+global update_dir
+set update_dir "$test_path/update_test"
+
+proc update { } {
+ source ./include.tcl
+ global update_dir
+
+ foreach version [glob $update_dir/*] {
+ regexp \[^\/\]*$ $version version
+ foreach method [glob $update_dir/$version/*] {
+ regexp \[^\/\]*$ $method method
+ foreach file [glob $update_dir/$version/$method/*] {
+ regexp (\[^\/\]*)\.tar\.gz$ $file dummy name
+ foreach endianness {"le" "be"} {
+ puts "Update:\
+ $version $method $name $endianness"
+ set ret [catch {_update $update_dir $testdir $version $method $name $endianness 1 1} message]
+ if { $ret != 0 } {
+ puts $message
+ }
+ }
+ }
+ }
+ }
+}
+
+proc _update { source_dir temp_dir \
+ version method file endianness do_db_load_test do_update_test } {
+ source include.tcl
+ global errorInfo
+
+ cleanup $temp_dir NULL
+
+ exec sh -c \
+"gzcat $source_dir/$version/$method/$file.tar.gz | (cd $temp_dir && tar xf -)"
+
+ if { $do_db_load_test } {
+ set ret [catch \
+ {exec $util_path/db_load -f "$temp_dir/$file.dump" \
+ "$temp_dir/update.db"} message]
+ error_check_good \
+ "Update load: $version $method $file $message" $ret 0
+
+ set ret [catch \
+ {exec $util_path/db_dump -f "$temp_dir/update.dump" \
+ "$temp_dir/update.db"} message]
+ error_check_good \
+ "Update dump: $version $method $file $message" $ret 0
+
+ error_check_good "Update diff.1.1: $version $method $file" \
+ [filecmp "$temp_dir/$file.dump" "$temp_dir/update.dump"] 0
+ error_check_good \
+ "Update diff.1.2: $version $method $file" $ret ""
+ }
+
+ if { $do_update_test } {
+ set ret [catch \
+ {berkdb open -update "$temp_dir/$file-$endianness.db"} db]
+ if { $ret == 1 } {
+ if { ![is_substr $errorInfo "version upgrade"] } {
+ set fnl [string first "\n" $errorInfo]
+ set theError \
+ [string range $errorInfo 0 [expr $fnl - 1]]
+ error $theError
+ }
+ } else {
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+
+ set ret [catch \
+ {exec $util_path/db_dump -f \
+ "$temp_dir/update.dump" \
+ "$temp_dir/$file-$endianness.db"} message]
+ error_check_good "Update\
+ dump: $version $method $file $message" $ret 0
+
+ error_check_good \
+ "Update diff.2: $version $method $file" \
+ [filecmp "$temp_dir/$file.dump" \
+ "$temp_dir/update.dump"] 0
+ error_check_good \
+ "Update diff.2: $version $method $file" $ret ""
+ }
+ }
+}
diff --git a/bdb/test/upgrade.tcl b/bdb/test/upgrade.tcl
new file mode 100644
index 00000000000..0d2f656bcf9
--- /dev/null
+++ b/bdb/test/upgrade.tcl
@@ -0,0 +1,279 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999, 2000
+# Sleepycat Software. All rights reserved.
+#
+# $Id: upgrade.tcl,v 11.16 2000/10/27 13:23:56 sue Exp $
+
+source ./include.tcl
+
+global upgrade_dir
+# set upgrade_dir "$test_path/upgrade_test"
+set upgrade_dir "$test_path/upgrade/databases"
+
+global gen_upgrade
+set gen_upgrade 0
+
+global upgrade_dir
+global upgrade_be
+global upgrade_method
+
+proc upgrade { { archived_test_loc "DEFAULT" } } {
+ source ./include.tcl
+ global upgrade_dir
+
+ set saved_upgrade_dir $upgrade_dir
+
+ puts -nonewline "Upgrade test: "
+ if { $archived_test_loc == "DEFAULT" } {
+ puts "using default archived databases in $upgrade_dir."
+ } else {
+ set upgrade_dir $archived_test_loc
+ puts "using archived databases in $upgrade_dir."
+ }
+
+ foreach version [glob $upgrade_dir/*] {
+ if { [string first CVS $version] != -1 } { continue }
+ regexp \[^\/\]*$ $version version
+ foreach method [glob $upgrade_dir/$version/*] {
+ regexp \[^\/\]*$ $method method
+ foreach file [glob $upgrade_dir/$version/$method/*] {
+ regexp (\[^\/\]*)\.tar\.gz$ $file dummy name
+
+ cleanup $testdir NULL
+ #puts "$upgrade_dir/$version/$method/$name.tar.gz"
+ set curdir [pwd]
+ cd $testdir
+ set tarfd [open "|tar xf -" w]
+ cd $curdir
+
+ catch {exec gunzip -c "$upgrade_dir/$version/$method/$name.tar.gz" >@$tarfd}
+ close $tarfd
+
+ set f [open $testdir/$name.tcldump {RDWR CREAT}]
+ close $f
+
+ # It may seem suboptimal to exec a separate
+ # tclsh for each subtest, but this is
+ # necessary to keep the testing process
+ # from consuming a tremendous amount of
+ # memory.
+ if { [file exists $testdir/$name-le.db] } {
+ set ret [catch {exec $tclsh_path\
+ << "source $test_path/test.tcl;\
+ _upgrade_test $testdir $version\
+ $method\
+ $name le"} message]
+ puts $message
+ if { $ret != 0 } {
+ #exit
+ }
+ }
+
+ if { [file exists $testdir/$name-be.db] } {
+ set ret [catch {exec $tclsh_path\
+ << "source $test_path/test.tcl;\
+ _upgrade_test $testdir $version\
+ $method\
+ $name be"} message]
+ puts $message
+ if { $ret != 0 } {
+ #exit
+ }
+ }
+
+ set ret [catch {exec $tclsh_path\
+ << "source $test_path/test.tcl;\
+ _db_load_test $testdir $version $method\
+ $name"} message]
+ puts $message
+ if { $ret != 0 } {
+ #exit
+ }
+
+ }
+ }
+ }
+ set upgrade_dir $saved_upgrade_dir
+
+ # Don't provide a return value.
+ return
+}
+
+proc _upgrade_test { temp_dir version method file endianness } {
+ source include.tcl
+ global errorInfo
+
+ puts "Upgrade: $version $method $file $endianness"
+
+ set ret [berkdb upgrade "$temp_dir/$file-$endianness.db"]
+ error_check_good dbupgrade $ret 0
+
+ upgrade_dump "$temp_dir/$file-$endianness.db" "$temp_dir/temp.dump"
+
+ error_check_good "Upgrade diff.$endianness: $version $method $file" \
+ [filecmp "$temp_dir/$file.tcldump" "$temp_dir/temp.dump"] 0
+}
+
+proc _db_load_test { temp_dir version method file } {
+ source include.tcl
+ global errorInfo
+
+ puts "db_load: $version $method $file"
+
+ set ret [catch \
+ {exec $util_path/db_load -f "$temp_dir/$file.dump" \
+ "$temp_dir/upgrade.db"} message]
+ error_check_good \
+ "Upgrade load: $version $method $file $message" $ret 0
+
+ upgrade_dump "$temp_dir/upgrade.db" "$temp_dir/temp.dump"
+
+ error_check_good "Upgrade diff.1.1: $version $method $file" \
+ [filecmp "$temp_dir/$file.tcldump" "$temp_dir/temp.dump"] 0
+}
+
+proc gen_upgrade { dir } {
+ global gen_upgrade
+ global upgrade_dir
+ global upgrade_be
+ global upgrade_method
+ global runtests
+ source ./include.tcl
+
+ set gen_upgrade 1
+ set upgrade_dir $dir
+
+ foreach upgrade_be { 0 1 } {
+ foreach i "btree rbtree hash recno rrecno queue frecno" {
+ puts "Running $i tests"
+ set upgrade_method $i
+ set start 1
+ for { set j $start } { $j <= $runtests } {incr j} {
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl;\
+ global upgrade_be;\
+ set upgrade_be $upgrade_be;\
+ run_method -$i $j $j"} res] {
+ puts "FAIL: [format "test%03d" $j] $i"
+ }
+ puts $res
+ cleanup $testdir NULL
+ }
+ }
+ }
+
+ set gen_upgrade 0
+}
+
+proc upgrade_dump { database file {stripnulls 0} } {
+ global errorInfo
+
+ set db [berkdb open $database]
+ set dbc [$db cursor]
+
+ set f [open $file w+]
+ fconfigure $f -encoding binary -translation binary
+
+ #
+ # Get a sorted list of keys
+ #
+ set key_list ""
+ set pair [$dbc get -first]
+
+ while { 1 } {
+ if { [llength $pair] == 0 } {
+ break
+ }
+ set k [lindex [lindex $pair 0] 0]
+ lappend key_list $k
+ set pair [$dbc get -next]
+ }
+
+ # Discard duplicated keys; we now have a key for each
+ # duplicate, not each unique key, and we don't want to get each
+ # duplicate multiple times when we iterate over key_list.
+ set uniq_keys ""
+ foreach key $key_list {
+ if { [info exists existence_list($key)] == 0 } {
+ lappend uniq_keys $key
+ }
+ set existence_list($key) 1
+ }
+ set key_list $uniq_keys
+
+ set key_list [lsort -command _comp $key_list]
+
+ #
+ # Get the data for each key
+ #
+ set i 0
+ foreach key $key_list {
+ set pair [$dbc get -set $key]
+ if { $stripnulls != 0 } {
+ # the Tcl interface to db versions before 3.X
+ # added nulls at the end of all keys and data, so
+ # we provide functionality to strip that out.
+ set key [strip_null $key]
+ }
+ set data_list {}
+ catch { while { [llength $pair] != 0 } {
+ set data [lindex [lindex $pair 0] 1]
+ if { $stripnulls != 0 } {
+ set data [strip_null $data]
+ }
+ lappend data_list [list $data]
+ set pair [$dbc get -nextdup]
+ } }
+ #lsort -command _comp data_list
+ set data_list [lsort -command _comp $data_list]
+ puts -nonewline $f [binary format i [string length $key]]
+ puts -nonewline $f $key
+ puts -nonewline $f [binary format i [llength $data_list]]
+ for { set j 0 } { $j < [llength $data_list] } { incr j } {
+ puts -nonewline $f [binary format i [string length [concat [lindex $data_list $j]]]]
+ puts -nonewline $f [concat [lindex $data_list $j]]
+ }
+ if { [llength $data_list] == 0 } {
+ puts "WARNING: zero-length data list"
+ }
+ incr i
+ }
+
+ close $f
+}
+
+proc _comp { a b } {
+ if { 0 } {
+ # XXX
+ set a [strip_null [concat $a]]
+ set b [strip_null [concat $b]]
+ #return [expr [concat $a] < [concat $b]]
+ } else {
+ set an [string first "\0" $a]
+ set bn [string first "\0" $b]
+
+ if { $an != -1 } {
+ set a [string range $a 0 [expr $an - 1]]
+ }
+ if { $bn != -1 } {
+ set b [string range $b 0 [expr $bn - 1]]
+ }
+ }
+ #puts "$a $b"
+ return [string compare $a $b]
+}
+
+proc strip_null { str } {
+ set len [string length $str]
+ set last [expr $len - 1]
+
+ set termchar [string range $str $last $last]
+ if { [string compare $termchar \0] == 0 } {
+ set ret [string range $str 0 [expr $last - 1]]
+ } else {
+ set ret $str
+ }
+
+ return $ret
+}
diff --git a/bdb/test/upgrade/README b/bdb/test/upgrade/README
new file mode 100644
index 00000000000..1afada2ecf4
--- /dev/null
+++ b/bdb/test/upgrade/README
@@ -0,0 +1,85 @@
+ The Berkeley DB Upgrade Tests
+
+Quick ref:
+
+ Running the tests:
+ (in tclsh)
+ % source ../test/test.tcl
+ % upgrade
+
+ Generating the test databases:
+ (in tclsh)
+ % source ../test/test.tcl
+ % gen_upgrade /where/you/want/them
+
+ (in your shell)
+ $ cd /where/you/want/them
+ $ perl $db_dir/upgrade/scripts/pack-3.0.pl
+ $ mv 3.0 $db_dir/upgrade/databases
+
+What they are:
+
+The DB upgrade tests are a framework for testing two main features of
+Berkeley DB: the db_dump utility, and the "DB_UPGRADE" flag to DB->open.
+They work by taking a tarred, gzipped set of test databases and dumps, and
+verifying that the set of items is the same in the original database (as
+dumped by the version of DB that created it) as in the upgraded one,
+and is the same in the original database and in a new database generated by
+db_loading a db_dump.
+
+In db 3.X and higher, the upgrade test is repeated on a database with
+the opposite endianness to the system the database was generated on.
+
+How to generate test databases:
+
+Ordinarily, this is something that only very rarely has to occur;
+an archive of upgrade test databases can and should be kept, so ideally
+the generation step only needs to be done once for each major DB release.
+
+To generate the test databases, execute the command "gen_upgrade <dir>"
+inside a tclsh. The method tests will run twice, once for each endianness,
+and all the databases will be saved in a hierarchy named by <dir>.
+
+Once the databases have been built, the archives expected by the upgrade tests
+must be built using the "pack" script, in upgrade/scripts/pack-<version>.pl.
+This script must be edited slightly to specify the location on a given system
+of the DB source tree and utilities; it then converts the set of databases
+under the current working directory into a set of .tar.gz files containing
+the databases as well as flat files with their contents in item-by-item and
+db_dump formats.
+
+How to run the upgrade tests:
+
+Run "upgrade" from tclsh in the DB build directory. By default, this
+looks in upgrade/databases, in the DB source tree. An optional first argument
+can be used to specify an alternate directory.
+
+A note on 2.X tests:
+
+The 2.X packing script, as well as a patch against a 2.6.6 test directory
+to allow it to generate test databases, is in upgrade/generate-2.X.
+
+Note that the upgrade tests can be *run* on an the 2.X test archives
+without anything in this directory. It is provided only for
+archival reasons, in case there is ever reason to generate a new
+set of test databases.
+
+XXX: Note also that it quite likely has paths hard-coded for a specific
+system that is not yours.
+
+Known Issues:
+
+1. The following 2.X databases trigger a bug in the db 2.X hash code.
+This bug affects only empty and near-empty databases, and has been
+corrected in db 3.X, but it will prevent the following from passing
+the db_dump test. (They have been removed from the canonical database
+collection.)
+
+ 2.X hash -- test026
+ 2.X hash -- test038
+ 2.X hash -- test039
+ 2.X hash -- test040
+
+2. The 2.X recno versions of test043 cannot be made to pass the db_dump
+test because the 2.X version of db_dump has no -k flag and cannot preserve
+sparsely populated databases.
diff --git a/bdb/test/upgrade/generate-2.X/pack-2.6.6.pl b/bdb/test/upgrade/generate-2.X/pack-2.6.6.pl
new file mode 100644
index 00000000000..f031d46ca62
--- /dev/null
+++ b/bdb/test/upgrade/generate-2.X/pack-2.6.6.pl
@@ -0,0 +1,114 @@
+#!/usr/bin/perl
+
+use strict;
+use Archive::Tar;
+
+my $subdir;
+my $file;
+my $archive_name;
+
+my $version = "2.6.6";
+my $build_dir = "/work/db/upgrade/db-2.6.6/build_unix";
+my $db_dump_path = "$build_dir/db_dump";
+my $pwd = `pwd`;
+
+$| = 1;
+
+chomp( $pwd );
+
+opendir( DIR, $version . "le" ) || die;
+while( $subdir = readdir( DIR ) )
+{
+ if( $subdir !~ m{^\.\.?$} )
+ {
+ opendir( SUBDIR, $version . "le/$subdir" ) || die;
+ while( $file = readdir( SUBDIR ) )
+ {
+ if( $file !~ m{^\.\.?$} )
+ {
+ print "[" . localtime() . "] " . "$subdir $file", "\n";
+
+ eval
+ {
+ my $data;
+ my $archive;
+
+ system( "mkdir", "-p", "$version/$subdir" );
+ $file =~ m{(.*)\.};
+ $archive_name = "$1";
+ $archive_name =~ s{Test}{test};
+ $archive = Archive::Tar->new();
+ $archive->add_data( "$archive_name-le.db",
+ read_file( $version . "le/$subdir/$file" ) );
+# $archive->add_data( "$archive_name-be.db",
+# read_file( $version . "be/$subdir/$file" ) );
+ $archive->add_data( "$archive_name.dump",
+ db_dump( "$pwd/$version" . "le/$subdir/$file" ) );
+ $data = tcl_dump( "$pwd/$version" . "le/$subdir/$file" );
+ $archive->add_data( "$archive_name.tcldump", $data );
+ $archive->write( "$version/$subdir/$archive_name.tar.gz", 9 );
+ };
+ if( $@ )
+ {
+ print( "Could not process $file: $@\n" );
+ }
+ }
+ }
+ }
+}
+
+sub read_file
+{
+ my ($file) = @_;
+ my $data;
+
+ open( FILE, "<$file" ) || die;
+ read( FILE, $data, -s $file );
+ close( file );
+
+ return $data;
+}
+
+sub db_dump
+{
+ my ($file) = @_;
+
+ #print $file, "\n";
+ unlink( "temp.dump" );
+ system( "sh", "-c", "$db_dump_path $file >temp.dump" ) && die;
+ if( -e "temp.dump" )
+ {
+ return read_file( "temp.dump" );
+ }
+ else
+ {
+ die "db_dump failure: $file\n";
+ }
+}
+
+sub tcl_dump
+{
+ my ($file) = @_;
+ my $up_dump_args = "";
+
+ if ($file =~ /test012/) {
+ $up_dump_args .= "1";
+ }
+
+ unlink( "temp.dump" );
+ open( TCL, "|$build_dir/dbtest" );
+print TCL <<END;
+cd $build_dir
+source ../test/test.tcl
+upgrade_dump $file $pwd/temp.dump $up_dump_args
+END
+ close( TCL );
+ if( -e "temp.dump" )
+ {
+ return read_file( "temp.dump" );
+ }
+ else
+ {
+ die "TCL dump failure: $file\n";
+ }
+}
diff --git a/bdb/test/upgrade/generate-2.X/test-2.6.patch b/bdb/test/upgrade/generate-2.X/test-2.6.patch
new file mode 100644
index 00000000000..557e8061eae
--- /dev/null
+++ b/bdb/test/upgrade/generate-2.X/test-2.6.patch
@@ -0,0 +1,379 @@
+diff -crN test.orig/test.tcl test/test.tcl
+*** test.orig/test.tcl Fri Dec 11 14:56:26 1998
+--- test/test.tcl Mon Oct 4 15:26:16 1999
+***************
+*** 8,13 ****
+--- 8,14 ----
+ source ./include.tcl
+ source ../test/testutils.tcl
+ source ../test/byteorder.tcl
++ source ../test/upgrade.tcl
+
+ set testdir ./TESTDIR
+ if { [file exists $testdir] != 1 } {
+***************
+*** 114,119 ****
+--- 115,124 ----
+ global debug_print
+ global debug_on
+ global runtests
++
++ global __method
++ set __method $method
++
+ if { $stop == 0 } {
+ set stop $runtests
+ }
+diff -crN test.orig/testutils.tcl test/testutils.tcl
+*** test.orig/testutils.tcl Tue Dec 15 07:58:51 1998
+--- test/testutils.tcl Wed Oct 6 17:40:45 1999
+***************
+*** 680,690 ****
+--- 680,698 ----
+
+ proc cleanup { dir } {
+ source ./include.tcl
++ global __method
++ global errorInfo
+ # Remove the database and environment.
+ txn_unlink $dir 1
+ memp_unlink $dir 1
+ log_unlink $dir 1
+ lock_unlink $dir 1
++
++ catch { exec mkdir -p /work/upgrade/2.6/$__method } res
++ puts $res
++ catch { exec sh -c "mv $dir/*.db /work/upgrade/2.6/$__method" } res
++ puts $res
++
+ set ret [catch { glob $dir/* } result]
+ if { $ret == 0 } {
+ eval exec $RM -rf $result
+diff -crN test.orig/upgrade.tcl test/upgrade.tcl
+*** test.orig/upgrade.tcl Wed Dec 31 19:00:00 1969
+--- test/upgrade.tcl Mon Oct 18 21:22:39 1999
+***************
+*** 0 ****
+--- 1,322 ----
++ # See the file LICENSE for redistribution information.
++ #
++ # Copyright (c) 1999
++ # Sleepycat Software. All rights reserved.
++ #
++ # @(#)upgrade.tcl 11.1 (Sleepycat) 8/23/99
++ #
++ source ./include.tcl
++ global gen_upgrade
++ set gen_upgrade 0
++ global upgrade_dir
++ set upgrade_dir "/work/upgrade/DOTEST"
++ global upgrade_be
++ global upgrade_method
++
++ proc upgrade { } {
++ source ./include.tcl
++ global upgrade_dir
++
++ foreach version [glob $upgrade_dir/*] {
++ regexp \[^\/\]*$ $version version
++ foreach method [glob $upgrade_dir/$version/*] {
++ regexp \[^\/\]*$ $method method
++ foreach file [glob $upgrade_dir/$version/$method/*] {
++ puts $file
++ regexp (\[^\/\]*)\.tar\.gz$ $file dummy name
++ foreach endianness {"le" "be"} {
++ puts "Update: $version $method $name $endianness"
++ set ret [catch {_upgrade $upgrade_dir $testdir $version $method $name $endianness 1 1} message]
++ if { $ret != 0 } {
++ puts $message
++ }
++ }
++ }
++ }
++ }
++ }
++
++ proc _upgrade { source_dir temp_dir version method file endianness do_db_load_test do_upgrade_test } {
++ source include.tcl
++ global errorInfo
++
++ cleanup $temp_dir
++
++ exec tar zxf "$source_dir/$version/$method/$file.tar.gz" -C $temp_dir
++
++ if { $do_db_load_test } {
++ set ret [catch \
++ {exec ./db_load -f "$temp_dir/$file.dump" \
++ "$temp_dir/upgrade.db"} message]
++ error_check_good \
++ "Update load: $version $method $file $message" $ret 0
++
++ set ret [catch \
++ {exec ./db_dump -f "$temp_dir/upgrade.dump" \
++ "$temp_dir/upgrade.db"} message]
++ error_check_good \
++ "Update dump: $version $method $file $message" $ret 0
++
++ error_check_good "Update diff.1.1: $version $method $file" \
++ [catch { exec $CMP "$temp_dir/$file.dump" "$temp_dir/upgrade.dump" } ret] 0
++ error_check_good "Update diff.1.2: $version $method $file" $ret ""
++ }
++
++ if { $do_upgrade_test } {
++ set ret [catch {berkdb open -upgrade "$temp_dir/$file-$endianness.db"} db]
++ if { $ret == 1 } {
++ if { ![is_substr $errorInfo "version upgrade"] } {
++ set fnl [string first "\n" $errorInfo]
++ set theError [string range $errorInfo 0 [expr $fnl - 1]]
++ error $theError
++ }
++ } else {
++ error_check_good dbopen [is_valid_db $db] TRUE
++ error_check_good dbclose [$db close] 0
++
++ set ret [catch \
++ {exec ./db_dump -f "$temp_dir/upgrade.dump" \
++ "$temp_dir/$file-$endianness.db"} message]
++ error_check_good \
++ "Update dump: $version $method $file $message" $ret 0
++
++ error_check_good "Update diff.2: $version $method $file" \
++ [catch { exec $CMP "$temp_dir/$file.dump" "$temp_dir/upgrade.dump" } ret] 0
++ error_check_good "Update diff.2: $version $method $file" $ret ""
++ }
++ }
++ }
++
++ proc gen_upgrade { dir } {
++ global gen_upgrade
++ global upgrade_dir
++ global upgrade_be
++ global upgrade_method
++ global __method
++ global runtests
++ source ./include.tcl
++ set tclsh_path "/work/db/upgrade/db-2.6.6/build_unix/dbtest"
++
++ set gen_upgrade 1
++ set upgrade_dir $dir
++
++ foreach upgrade_be { 0 1 } {
++ foreach i "rrecno" {
++ # "hash btree rbtree hash recno rrecno"
++ puts "Running $i tests"
++ set upgrade_method $i
++ for { set j 1 } { $j <= $runtests } {incr j} {
++ if [catch {exec $tclsh_path \
++ << "source ../test/test.tcl; \
++ run_method $i $j $j"} res] {
++ puts "FAIL: [format "test%03d" $j] $i"
++ }
++ puts $res
++ set __method $i
++ cleanup $testdir
++ }
++ }
++ }
++
++ set gen_upgrade 0
++ }
++
++ proc upgrade_dump { database file {with_binkey 0} } {
++ source ./include.tcl
++ global errorInfo
++
++ set is_recno 0
++
++ set db [dbopen $database 0 0600 DB_UNKNOWN]
++ set dbc [$db cursor 0]
++
++ set f [open $file w+]
++ fconfigure $f -encoding binary -translation binary
++
++ #
++ # Get a sorted list of keys
++ #
++ set key_list ""
++ if { [catch {set pair [$dbc get "" $DB_FIRST]}] != 0 } {
++ set pair [$dbc get 0 $DB_FIRST]
++ set is_recno 1
++ }
++
++ while { 1 } {
++ if { [llength $pair] == 0 } {
++ break
++ }
++ lappend key_list [list [lindex $pair 0]]
++ set pair [$dbc get 0 $DB_NEXT]
++ }
++
++
++ # Discard duplicated keys; we now have a key for each
++ # duplicate, not each unique key, and we don't want to get each
++ # duplicate multiple times when we iterate over key_list.
++ set uniq_keys {}
++ foreach key $key_list {
++ if { [info exists existence_list($key)] == 0 } {
++ lappend uniq_keys [list $key]
++ }
++ set existence_list($key) 1
++ }
++ set key_list $uniq_keys
++
++ set key_list [lsort -command _comp $key_list]
++
++ #foreach llave $key_list {
++ # puts $llave
++ #}
++
++ #
++ # Get the data for each key
++ #
++
++ for { set i 0 } { $i < [llength $key_list] } { incr i } {
++ set key [concat [lindex $key_list $i]]
++ # XXX Gross awful hack. We want to DB_SET in the vast
++ # majority of cases, but DB_SET can't handle binary keys
++ # in the 2.X Tcl interface. So we look manually and linearly
++ # for the key we want if with_binkey == 1.
++ if { $with_binkey != 1 } {
++ set pair [$dbc get $key $DB_SET]
++ } else {
++ set pair [_search_binkey $key $dbc]
++ }
++ if { $is_recno != 1 } {
++ set key [upgrade_convkey $key $dbc]
++ }
++ #puts "pair:$pair:[lindex $pair 1]"
++ set data [lindex $pair 1]
++ set data [upgrade_convdata $data $dbc]
++ set data_list [list $data]
++ catch { while { $is_recno == 0 } {
++ set pair [$dbc get 0 $DB_NEXT_DUP]
++ if { [llength $pair] == 0 } {
++ break
++ }
++
++ set data [lindex $pair 1]
++ set data [upgrade_convdata $data $dbc]
++ lappend data_list [list $data]
++ } }
++ set data_list [lsort -command _comp $data_list]
++ puts -nonewline $f [binary format i [string length $key]]
++ puts -nonewline $f $key
++ puts -nonewline $f [binary format i [llength $data_list]]
++ for { set j 0 } { $j < [llength $data_list] } { incr j } {
++ puts -nonewline $f [binary format i [string length [concat [lindex $data_list $j]]]]
++ puts -nonewline $f [concat [lindex $data_list $j]]
++ }
++ }
++
++ close $f
++ }
++
++ proc _comp { a b } {
++ # return expr [[concat $a] < [concat $b]]
++ return [string compare [concat $a] [concat $b]]
++ }
++
++ # Converts a key to the format of keys in the 3.X Tcl interface
++ proc upgrade_convkey { key dbc } {
++ source ./include.tcl
++
++ # Stick a null on the end.
++ set k "$key\0"
++
++ set tmp $testdir/gb0
++
++ # Attempt a dbc getbinkey to get any additional parts of the key.
++ set dbt [$dbc getbinkey $tmp 0 $DB_CURRENT]
++
++ set tmpid [open $tmp r]
++ fconfigure $tmpid -encoding binary -translation binary
++ set cont [read $tmpid]
++
++ set k $k$cont
++
++ close $tmpid
++
++ exec $RM -f $tmp
++
++ return $k
++ }
++
++ # Converts a datum to the format of data in the 3.X Tcl interface
++ proc upgrade_convdata { data dbc } {
++ source ./include.tcl
++ set is_partial 0
++
++ # Get the datum out of "data"
++ if { [llength $data] == 1 } {
++ set d [lindex $data 0]
++ } elseif { [llength $data] == 2 } {
++ # It was a partial return; the first arg is the number of nuls
++ set d [lindex $data 1]
++ set numnul [lindex $data 0]
++ while { $numnul > 0 } {
++ set d "\0$d"
++ incr numnul -1
++ }
++
++ # The old Tcl getbin and the old Tcl partial put
++ # interface are incompatible; we'll wind up returning
++ # the datum twice if we try a getbin now. So
++ # set a flag to avoid it.
++ set is_partial 1
++
++ } else {
++ set d $data
++ }
++
++
++ if { $is_partial != 1 } {
++
++ # Stick a null on the end.
++ set d "$d\0"
++
++ set tmp $testdir/gb1
++
++ # Attempt a dbc getbin to get any additional parts of the datum
++ # the Tcl interface has neglected.
++ set dbt [$dbc getbin $tmp 0 $DB_CURRENT]
++
++ set tmpid [open $tmp r]
++ fconfigure $tmpid -encoding binary -translation binary
++ set cont [read $tmpid]
++
++ set d $d$cont
++
++ #puts "$data->$d"
++
++ close $tmpid
++ }
++
++ return [list $d]
++ }
++
++ # Implement the DB_SET functionality, stupidly, in terms of DB_NEXT and
++ # manual comparisons. We have to use this instead of DB_SET with
++ # binary keys, as the old Tcl interface can't handle binary keys but DB_SET
++ # requires them. So instead, we page through using DB_NEXT, which returns
++ # the binary keys only up to the first null, and compare to our specified
++ # key, which is similarly truncated.
++ #
++ # This is really slow, but is seldom used.
++ proc _search_binkey { key dbc } {
++ #puts "doing _search_binkey $key $dbc"
++ source ./include.tcl
++ set dbt [$dbc get 0 $DB_FIRST]
++ while { [llength $dbt] != 0 } {
++ set curkey [lindex $dbt 0]
++ if { [string compare $key $curkey] == 0 } {
++ return $dbt
++ }
++ set dbt [$dbc get 0 $DB_NEXT]
++ }
++
++ # We didn't find it. Return an empty list.
++ return {}
++ }
diff --git a/bdb/test/wordlist b/bdb/test/wordlist
new file mode 100644
index 00000000000..03ea15f7277
--- /dev/null
+++ b/bdb/test/wordlist
@@ -0,0 +1,10001 @@
+cooperate
+benighted
+apologist's
+addresser
+cataract
+colonially
+atoned
+avow
+bathroom
+anaesthesia
+columnated
+bogs
+astral
+barbed
+captives
+acclaims
+adjutants
+affidavits
+baptisms
+bubbling
+classic
+allaying
+component
+battlement
+backtrack
+
+courage
+bore
+advertisement
+attests
+bunny's
+airlifts
+cajole
+cataloging
+airily
+collected
+abridged
+compel
+aftermath
+barrow
+approve
+chillier
+bequest
+attendant
+abjures
+adjudication
+banished
+asymptotes
+borrower
+caustic
+claim
+cohabitation
+corporacies
+buoy
+benchmark's
+averting
+anecdote's
+caress
+annihilate
+cajoles
+anywhere
+apparitions
+coves
+bribed
+casually
+clue's
+asserted
+architects
+abstained
+attitude
+accumulating
+coalesced
+angelic
+agnostic
+breathed
+bother
+congregating
+amatory
+caging
+countryside
+chapel
+buttonhole
+bartenders
+bridging
+bombardment
+accurately
+confirmed
+alleviated
+acquiring
+bruise
+antelope
+albums
+allusive
+corker
+cavity's
+compliment
+climb
+caterpillar
+almond
+authenticated
+balkan
+assembly's
+acidity
+abases
+bonny
+been
+abbots
+abductor's
+aerials
+cancels
+chalked
+beeps
+affirms
+contrariness
+clearest
+appropriations
+critiquing
+affluence
+bouts
+abiding
+comprises
+brunches
+biology
+conceptualization's
+assaying
+abutter
+adorable
+beatable
+appenders
+aggressors
+agrarian
+bottleneck
+angled
+beholds
+bereaved
+creation
+animated
+candied
+bar
+aeronautics
+cousin's
+cleaver
+alienation
+billet
+bungler
+contention
+businessman
+braids
+assert
+boisterous
+consolidate
+breathing
+ballot
+averted
+conscientiously
+bellow
+brazenness
+coaches
+bulldog
+classify
+checksum
+almond's
+cornered
+caskets
+capacitors
+beefer
+connoisseurs
+consisted
+adore
+circumvented
+colonels
+addenda
+boost
+compatibility's
+bumblebee
+commonest
+containment
+active
+absorption's
+creaks
+administer
+beset
+aborted
+aforesaid
+aridity
+broken
+azimuths
+aerial
+addition's
+aggrieve
+anthology
+circuitous
+checks
+alley's
+beam
+boss
+corrupting
+absolutes
+asteroid's
+bandstands
+beatitude's
+analogue's
+busts
+confession
+bedstead
+affairs
+blackmailers
+collared
+buckboard
+assassin
+accessor
+adjudging
+binders
+constituent's
+blister
+aromas
+approved
+absorbent
+barbarously
+cat's
+builder
+brandish
+assailing
+constitute
+christening
+acutely
+amount
+blurry
+blocks
+advertise
+chain
+brigade's
+confusion
+beds
+arrangers
+colonizers
+beautifying
+bankruptcy
+bedazzles
+candidates
+clearness
+admonishment's
+behind
+abbreviations
+basting
+ballasts
+amateurism
+celled
+constituted
+bonfire
+bugled
+advisee's
+battled
+budded
+burners
+causeway's
+calibrate
+brambly
+befuddles
+azure
+busiest
+admiringly
+appropriator
+accumulator
+cables
+abhor
+civil
+botulinus
+creaked
+bismuth
+astronomical
+abscissas
+bodice
+aunt
+cascades
+cares
+comradeship
+assemblages
+boater
+bellmen
+admission's
+ambitious
+baldness
+abortive
+controlled
+chinked
+coded
+courtrooms
+arteriolar
+cooler's
+cared
+brewer
+christians
+barbecues
+contacts
+blackjack's
+buzzing
+blasters
+accords
+braziers
+allegretto
+catered
+breveting
+cleaning
+amicably
+bummed
+consulted
+allegro's
+accumulator's
+compartmented
+condemned
+concludes
+bitwise
+cheered
+appropriator's
+accessors
+casting
+carolina's
+accompanying
+budding
+correspond
+bach's
+angel's
+bearing
+arresters
+biweekly
+character
+badgering
+cantankerous
+avalanching
+adjudges
+barometer
+append
+continuations
+burped
+boxtop's
+abstention
+amp
+axiomatized
+bimonthlies
+aghast
+arresting
+breakwater's
+continuing
+bridle
+bobbin's
+antagonistically
+blindly
+biochemical
+biologically
+antifundamentalist
+confer
+cloudiness
+bonded
+comfortingly
+caption
+blackmailed
+bidders
+breakpoint
+brigadier
+criminals
+coyotes
+casserole's
+annex
+cereals
+breadboxes
+belgian
+conductivity
+counterexample
+anarchist
+couches
+atavistic
+clipped
+button
+axiomatic
+capping
+correcting
+chase
+chastise
+angle
+burnished
+beauteously
+antipodes
+crippling
+crowns
+amends
+bah
+brigadiers
+alleged
+correctives
+bristles
+buzzards
+barbs
+bagel
+adaptation
+caliber
+browner
+apprehensions
+bonnet
+anachronistically
+composites
+bothered
+assurer
+arc
+chaser
+bastards
+calmed
+bunches
+apocalypse
+countably
+crowned
+contrivance
+boomerang's
+airplane's
+boarded
+consumption
+attuning
+blamed
+cooing
+annihilation
+abused
+absence
+coin
+coronaries
+applicatively
+binomial
+ablates
+banishes
+boating
+companions
+bilking
+captivate
+comment
+claimants
+admonish
+ameliorated
+bankruptcies
+author
+cheat
+chocolates
+botch
+averring
+beneath
+crudely
+creeping
+acolytes
+ass's
+cheese's
+checksum's
+chillers
+bracelet
+archenemy
+assistantship
+baroque
+butterfly
+coolie's
+anecdote
+coring
+cleansing
+accreditation
+ceaselessly
+attitudes
+bag
+belong
+assented
+aped
+constrains
+balalaikas
+consent
+carpeting
+conspiracy
+allude
+contradictory
+adverb's
+constitutive
+arterial
+admirable
+begot
+affectation
+antiquate
+attribution
+competition's
+bovine
+commodores
+alerters
+abatements
+corks
+battlements
+cave
+buoys
+credible
+bowdlerizes
+connector
+amorphously
+boredom
+bashing
+creams
+arthropods
+amalgamated
+ballets
+chafe
+autograph
+age
+aid
+colleague's
+atrocious
+carbonizing
+chutes
+barbecued
+circuits
+bandages
+corporations
+beehive
+bandwagon
+accommodated
+councillor's
+belted
+airdrop
+confrontations
+chieftain's
+canonicalization
+amyl
+abjectness
+choke
+consider
+adjuster
+crossover's
+agreeing
+consolations
+capitalizers
+binges
+annihilating
+callers
+coordinate
+banshees
+biscuits
+absorbency
+corollary
+corresponded
+aristocrat's
+banally
+cruiser
+bathtub's
+abbreviated
+balkiness
+crew
+acidulous
+air
+birdies
+canvassing
+concretion
+blackjacks
+controller's
+aquarius
+charm
+clip
+awarder
+consistently
+calibrated
+bushwhacking
+avaricious
+ceaselessness
+basically
+accolades
+adduction
+commending
+consulates
+certifiable
+admire
+bankers
+appropriateness
+bandlimits
+chill
+adds
+constable
+chirping
+cologne
+cowardice
+baklava
+amusedly
+blackberry
+crises
+bedeviling
+botching
+backbend
+attaining
+continuity
+artistry
+beginner
+cleaner's
+adores
+commemorating
+amusement
+burial
+bungalow's
+abstinence
+contractually
+advancement's
+conjecture
+buckling
+conferrer
+cherub's
+belonged
+classifications
+baseball
+carbonation
+craved
+bans
+aphid
+arbor
+ague
+acropolis
+applied
+aspired
+calibrating
+abundance
+appeased
+chanted
+ascent
+convenes
+beep
+bottles
+aborigines
+clips
+acquainting
+aiming
+creditor's
+abolitionists
+cloves
+containments
+bungling
+bunt
+anchors
+brazed
+communicator's
+brew
+accumulate
+addicting
+actively
+befog
+anachronisms
+bumblers
+closest
+calculators
+absurdity
+colleagues
+college
+assesses
+conflicted
+associational
+betide
+conceptualization
+adjutant
+alliances
+corresponding
+barometers
+cot
+brooch's
+coiled
+arboreal
+convicted
+artless
+certificates
+bourbon
+astonish
+bust
+correlate
+amounts
+anal
+abstraction's
+corns
+conqueror's
+boldly
+bob's
+beer
+blanks
+corpses
+contingent
+blackly
+backed
+appearances
+cancers
+actuating
+apprehension's
+colorings
+anglicanism
+armament
+armer
+bizarre
+begotten
+actions
+archly
+capriciously
+clue
+contractor
+contributions
+agendas
+coached
+blamable
+annoyers
+coupons
+brooked
+assortment
+axes
+celebrates
+courageously
+baroqueness
+blasphemous
+asserter
+contents
+correctly
+challenged
+bulldoze
+casement
+acknowledge
+bitterness
+belongs
+allotments
+chalice's
+bequest's
+adjacent
+consumer's
+conservatively
+coalition
+background's
+backache
+befouls
+brushfire's
+analysts
+branch
+airways
+awaiting
+breakfast
+anoints
+baying
+contrary
+bilge
+chasm's
+babes
+afresh
+centerpiece's
+barked
+coffin
+assumed
+actresses
+accentuating
+aching
+abet
+balancers
+consumptively
+cagers
+backing
+angiography
+chord's
+cheapened
+bewailed
+arson
+begged
+convergent
+bowlers
+conflicting
+confiscated
+bitch
+bloody
+brushfires
+bleach
+computation's
+choppers
+circuitously
+chancing
+bunker
+concept's
+alacrity
+boyhood
+ammo
+bobwhites
+carter
+ardent
+bier
+airway's
+brownies
+aura
+cannibalizing
+confirms
+australian
+barrage
+closures
+assertive
+abstainer
+bicarbonate
+clone
+back
+cipher
+crown
+cannibalizes
+away
+crafty
+airings
+amtrak
+comical
+burnish
+continuum
+apparition
+apologizing
+blot
+blacker
+characters
+built
+apparent
+applicative
+assiduous
+attorneys
+affectionately
+bobbing
+baggy
+comic's
+attempt
+appealers
+amortize
+bonanza
+backwards
+bowers
+anemometer
+ambulance's
+creeps
+abduction's
+coal
+chiller
+adjudications
+clogging
+ascending
+bookkeeper
+crawlers
+battery's
+artifacts
+attributions
+amusements
+aftermost
+allophones
+bemoaned
+comptroller
+bugger's
+buoyancy
+booboo
+award
+amplifying
+certify
+bivariate
+attunes
+asteroidal
+chant
+collectively
+chasteness
+chapels
+copiousness
+benign
+armies
+competing
+buss
+awakened
+breakpoint's
+conceptualizing
+cleansers
+acorns
+conveyance's
+bluer
+battle
+budges
+characteristically
+be
+contour
+beguiling
+awarding
+armhole
+airship's
+bathtub
+breathable
+crowded
+compiles
+certain
+brutalizing
+bacteria
+baronies
+abode
+blacksmith
+brinkmanship
+capitalizations
+cousin
+botany
+avionic
+companion
+consists
+connoisseur's
+avalanched
+claimant's
+backstitches
+affixes
+bikes
+atomically
+cowed
+asleep
+becomingly
+acorn's
+complainers
+appreciated
+cross
+cringed
+booting
+attitudinal
+broadcasting
+childishly
+breeze's
+craven
+boll
+clause's
+burden
+appendages
+atemporal
+allah
+carnival's
+anchorage
+adjures
+besought
+abounding
+crucifying
+arrangements
+antiquarians
+burrows
+antipode
+canvas
+constable's
+coopers
+ascended
+companionship
+bakery's
+bayonets
+conclusively
+boasters
+beneficiaries
+conspicuous
+contriver
+architecture
+breakthroughs
+brownie's
+blur
+academics
+antagonist
+contemplates
+arena
+caravan's
+administers
+comprehensively
+convey
+bigot
+blitz
+bibliography's
+coerced
+assail
+amazons
+banned
+alabaster
+concluding
+bouquet
+barks
+acquaintances
+astonishment
+constraint
+backpack's
+breakthroughes
+blocking
+accomplishers
+catastrophe
+bushels
+algae
+ailment's
+anemometers
+beginning's
+chefs
+converse
+cornerstone
+astound
+assuring
+adornment
+anyone
+alumni
+club
+bestselling
+businessmen
+constructed
+attendee's
+cooped
+ablute
+chronicler
+alaska
+clam
+canonicals
+concerned
+aligned
+creek
+burrow
+allay
+admirals
+blackens
+compressing
+confirm
+cows
+battleship's
+belched
+affixing
+chalices
+choirs
+absentee's
+baseboard's
+apportionment
+adheres
+accounts
+chef
+access
+clearings
+accompanists
+concentrating
+ado
+bathos
+bailiff
+continuance
+ball
+bearer
+congress
+cites
+can't
+balloon
+crams
+consults
+bungled
+bike's
+apes
+assassinations
+colt's
+consecrate
+ancients
+chick
+analyst
+adsorbing
+burntly
+accompanist's
+apprehensive
+bengal
+boughs
+ankles
+anchored
+benefits
+accommodation
+amiss
+brink
+chewers
+blueberry's
+chairs
+adjoin
+bivalve
+autobiography's
+automated
+comparisons
+climbed
+artists
+congruent
+cold
+atonement
+cashier
+armageddon
+allocations
+bereavements
+bumblebees
+blew
+busboys
+bottoming
+alternations
+apprenticed
+bestial
+cinder's
+consumption's
+abbey's
+amended
+continued
+birefringent
+barbados
+ability's
+compulsory
+antler
+centerpieces
+accountant's
+arrogant
+ballads
+ascenders
+appliers
+adjustment's
+blabbed
+baits
+activity's
+clod's
+adjudicating
+bleak
+commutes
+bumming
+beating
+cohesiveness
+branded
+acknowledger
+communications
+blockhouses
+booklets
+consenters
+creek's
+consulting
+binary
+coaster
+ascription
+bushwhack
+boggles
+affidavit's
+arrangement's
+congressionally
+convenient
+avoider
+abaft
+bootlegger's
+befriending
+ceases
+carbonizes
+clumps
+commented
+competence
+conversing
+butting
+astonishing
+armful
+allegory's
+crisis
+critiques
+concurred
+conservative
+aristotelian
+blizzard's
+corner
+amateur's
+compare
+affiliations
+bestseller
+batch
+cleanly
+assayed
+bravos
+bowls
+conceptualized
+babe's
+algorithm's
+baptist
+cheeks
+conquerer
+bidder's
+behaving
+briefcase's
+analogues
+amply
+attitude's
+apple
+crossable
+ambushed
+besmirches
+creditors
+bandwagons
+continentally
+adjuncts
+concerns
+agers
+cop
+amoebas
+bisected
+bombing
+appendices
+cocking
+bused
+babied
+compounds
+asserts
+believably
+alert
+apostate
+catalysts
+aureomycin
+convex
+beetle's
+banishing
+agitating
+bystanders
+bow
+connotes
+blanch
+charmingly
+animal's
+baritones
+brier
+astronomer
+company's
+balding
+actually
+aunt's
+avalanches
+acquisition
+base
+compilations
+bathtubs
+actualization
+chanced
+atom
+banged
+befuddled
+apologized
+componentwise
+britisher
+began
+conservationist
+actuate
+crosser
+appended
+bitten
+ambivalence
+acetate
+conversions
+buzzwords
+askance
+abolishing
+birdied
+creeds
+anglers
+colossal
+bereft
+chock
+apprentice
+cooper
+besmirching
+allocating
+antiques
+bikini's
+bonders
+afflictive
+augmentation
+atheist
+bucket
+bibliophile
+annexes
+beguiles
+birdbaths
+amendments
+animators
+asymptotically
+communally
+barber
+biographers
+arguable
+confidant
+apologies
+adorns
+contacting
+coarsest
+artichokes
+arraign
+absorbing
+alden
+commercially
+cabbage's
+coincides
+clumping
+cents
+alleviater
+buzzard
+braked
+anesthetized
+bugling
+capitalist
+befriended
+appreciatively
+boomtown's
+cozier
+critic's
+correspondent
+bard
+attenuator
+bake
+brings
+chews
+anechoic
+brutal
+colder
+buckshot
+canvassers
+analytic
+allies
+alloys
+awake
+alienates
+bin's
+crimes
+constructible
+classifiers
+bulb
+cream
+banquet
+axiomatize
+adjourn
+converted
+auditioned
+comfortably
+bandwidth
+cannibalize
+ascensions
+bussing
+balloons
+contenders
+commemoration
+aspersions
+consultation
+cashes
+belting
+augurs
+architectural
+bluebird's
+breastworks
+absconded
+bullets
+bloodstain's
+blunder
+astronautics
+coo
+approves
+authority
+assure
+amsterdam
+acquitted
+adversity
+celebrate
+bred
+bridged
+bloc's
+bullied
+affinity
+breezes
+baptistry's
+constitutions
+avouch
+amazingly
+consolation
+abnormality
+clashes
+buttes
+buzzard's
+breathers
+chipmunk
+contented
+carol's
+armers
+amazedly
+comprehends
+canonicalize
+breakthrough
+arbitrator
+butterfat
+cases
+besiegers
+affianced
+amelia
+bush
+airplane
+annulled
+bike
+alternated
+attackers
+convene
+aficionado
+anachronism's
+crude
+carelessness
+akin
+combated
+assisting
+clocker
+attacked
+briefed
+antic's
+attendants
+attracting
+cope
+allotting
+bandwidths
+add
+assaulting
+breakage
+climes
+arrival's
+burp
+accelerator
+capacitance
+arabians
+bankruptcy's
+archeological
+coins
+browbeating
+chasm
+cardinalities
+compartmentalize
+courter
+assess
+abreaction
+brakes
+compatibly
+compression
+characterizable
+briefing's
+alto's
+classifiable
+contrast
+correlation
+colonial
+applying
+authorizers
+contesters
+basely
+cherries
+clicking
+cornfield's
+alarmingly
+conferences
+business's
+banker
+bloomed
+airfield
+attracts
+building
+commutative
+atomization
+competitions
+boatsmen
+acquirable
+arkansas
+command
+beings
+compactors
+anodize
+arguments
+conforming
+adsorption
+accustomed
+blends
+bowstring's
+blackout
+appender
+buggy
+bricklaying
+chart
+calmer
+cage
+attractive
+causation's
+athenian
+advise
+cranks
+containers
+besotter
+beret
+attender
+cone
+bills
+aligns
+brushlike
+brownest
+bosom's
+berth
+accountably
+bequeathed
+affirmatively
+boundless
+alleyways
+commute
+bendable
+abhors
+calculation
+affidavit
+answerable
+bellicose
+counterfeiting
+admiral's
+chisel
+bridesmaids
+believers
+aggregated
+conspicuously
+abased
+armenian
+conspirator
+canonical
+assignable
+barrage's
+clearance's
+casts
+administratively
+befoul
+chaffer
+amazer
+colorer
+broaching
+crevice
+aniline
+coursing
+compassionate
+adhesive
+bibliographies
+corrects
+augments
+between
+causer
+amorist
+cellist's
+acoustical
+baseless
+cigarettes
+astuteness
+appropriators
+convincing
+bellhop's
+bemoaning
+calmingly
+chronologically
+castles
+algebraically
+appointees
+academic
+blunderings
+assassins
+barrel
+accuracy
+amortized
+ballpark
+acrobat's
+brazier's
+abortively
+coarser
+airfields
+contester
+circus's
+creased
+amorphous
+accomplisher
+blabs
+butchers
+crackles
+bachelor
+aviators
+chariot's
+circumflex
+binocular
+alienating
+artificially
+agreement's
+aglow
+afghan
+abrupt
+annihilates
+apologetic
+barge
+betters
+algorithms
+conjurer
+chargeable
+brindle
+alphabetizes
+coder
+availing
+bandpass
+arrogance
+convent's
+advertiser
+connected
+basso
+breakfaster
+comic
+congenial
+beau
+courters
+adapters
+abruptly
+chemicals
+bringed
+creaming
+butterer
+attained
+actuals
+averred
+brainwash
+centerpiece
+blabbermouth
+byproduct's
+adaptable
+automata
+art
+cheery
+beheld
+beehive's
+claimed
+crucial
+brokenness
+agility
+combating
+cleft
+amenity
+after
+configuration
+contrasting
+coarsely
+brass
+barnstormed
+bowel
+bridesmaid's
+cornfield
+crazing
+autocracies
+adult
+conceptualizations
+corroboration
+bedders
+arroyo
+alarmist
+boatman
+chests
+burglary
+budgets
+canary's
+arraigning
+chin
+barnstorms
+blamers
+brimful
+calculate
+cellular
+contended
+challenges
+brusque
+bikinis
+arithmetics
+chairpersons
+class
+aircraft
+capably
+centralize
+awhile
+compacting
+courteous
+archaeologist's
+cram
+adagio
+affronts
+amplitude's
+bureau's
+audaciously
+autism
+blueberries
+an
+chips
+confiner
+chopper's
+chronology
+breaching
+bead
+amass
+camouflage
+compensation
+aspect
+broker
+atrophy
+balk
+bloodless
+barnyard
+benefactor's
+airdrops
+caused
+anthem
+activist's
+bottomless
+arrogates
+avoided
+bouncy
+clarified
+articulate
+almoner
+communists
+blokes
+butternut
+clockings
+barium
+blows
+criticism's
+associations
+brute
+bleeds
+alliteration's
+bluestocking
+boxwood
+clearer
+allegiance
+conceptualizes
+captivating
+bolshevik's
+belabored
+biographic
+contaminates
+chanticleer's
+adjusted
+childhood
+arguing
+cape
+conversantly
+compensating
+collaborations
+arraignment's
+blasted
+charging
+aggregation
+apprentices
+bird
+codifiers
+ballistic
+breve
+bells
+carolina
+chalk
+buckles
+boyfriend's
+adorn
+accoutrements
+availability
+antisymmetry
+blades
+alluded
+asterisks
+bookcases
+additive
+consents
+advanced
+balalaika
+coders
+caliph
+alundum
+are
+controllable
+blazing
+clattered
+asiatic
+axiomatizes
+ace
+coining
+column
+auditor's
+carol
+concatenated
+arrayed
+capital
+cautioner
+clan
+beauteous
+abbreviate
+asteroids
+canal's
+consolidation
+closets
+concealer
+crevices
+abed
+complex
+conviction's
+abide
+arrests
+begrudges
+adolescent
+conceals
+cells
+circles
+bravest
+compromiser
+bagels
+areas
+afore
+allergies
+arrangement
+attraction's
+amulets
+abstraction
+captured
+crouched
+brothers
+cash
+achieving
+bastard
+compete
+boiling
+beaching
+amphetamines
+clerking
+congestion
+alleviates
+angry
+bared
+comprehended
+bloodstain
+constituency's
+automating
+aerial's
+counterfeit
+besotted
+basses
+biofeedback
+compilation's
+band
+consulate
+appellant
+cough
+antennae
+contend
+anniversary
+boor
+artifactually
+aerobics
+booths
+chubbiest
+consumable
+assignments
+bromide's
+confined
+breakers
+alongside
+courtier
+boisterously
+bilaterally
+alternation
+auspiciously
+arbitrated
+condemning
+burns
+correspondents
+composition
+cavalierly
+coverlets
+capacities
+clatter
+apotheoses
+cartography
+ceased
+capitalized
+auditor
+appendicitis
+chops
+barony
+anemometry
+befouled
+briefer
+chest
+begetting
+bloats
+bookseller's
+commitment
+confides
+carcass's
+battering
+altruistically
+ballots
+adornments
+broaden
+angularly
+coefficient
+cataloged
+brae
+advantage
+anthems
+calculated
+counseling
+agitate
+accentuated
+camel
+ambivalent
+bedposts
+beacons
+chubbier
+cheerer
+assumes
+concord
+autumns
+convention's
+alpha
+adulterates
+arbiters
+archaically
+criteria
+achilles
+cheaper
+bulling
+associators
+bloater
+brawler
+ability
+adherents
+commonwealth
+coyote's
+centrally
+bequeathing
+abandonment
+circumstantially
+courteously
+borrow
+countermeasure's
+capricious
+allied
+anagram's
+absorptive
+assuage
+asset
+booked
+aspects
+commits
+crates
+capacitive
+condones
+assimilates
+carriage
+competitor's
+cocoons
+aggravated
+caravans
+arbitrator's
+baked
+balanced
+annihilated
+addressable
+autonomous
+bandwagon's
+contesting
+burrowing
+coroutines
+abjection
+correctable
+applauded
+bragged
+code
+aggressiveness
+cluttered
+attacking
+chide
+am
+coasters
+blizzard
+contentment
+altruism
+certifier
+capturing
+combinators
+carefree
+activate
+blindfolding
+assassinating
+approximate
+biplane's
+aplenty
+arteriosclerosis
+concentrates
+antisymmetric
+assurances
+anarchist's
+ascend
+advancing
+atrocities
+butt's
+bearable
+craftiness
+categorized
+barn
+contributor's
+arises
+bushy
+bisque
+coasted
+bargaining
+area's
+couples
+cabs
+barter
+bulletin
+chisels
+broadcasters
+contingency
+bywords
+antimicrobial
+coexisted
+blinding
+arithmetize
+coweringly
+convince
+competed
+bauble's
+crab
+boggling
+advocacy
+atlas
+assembled
+ancient
+bloodstream
+balking
+bin
+bully
+affirm
+cruelest
+atone
+conserved
+confession's
+bat
+captive
+aster
+blames
+colonel's
+bones
+borderline
+cleanses
+classified
+crudest
+contiguity
+bailing
+ablaze
+bender
+attendee
+clobbers
+aliasing
+autopilot
+coolers
+cache
+allayed
+barnyards
+britons
+appointment
+adaptor
+blockers
+abridges
+bloodiest
+betrothal
+bombards
+bony
+bus
+canary
+antinomy
+awash
+comrades
+ablating
+collectible
+boats
+brand
+church
+bandy
+adhering
+barred
+ammunition
+chime
+accompaniment's
+battleground's
+composing
+caveats
+armor
+amoeba
+composure
+collides
+avowed
+banding
+counsels
+asymmetric
+abbreviates
+balky
+adjudicates
+anointing
+accursed
+copse
+action
+construction's
+accents
+ambition's
+caressing
+cosmetic
+accession
+clutters
+censures
+allusions
+belittled
+armchair
+abode's
+conception's
+ascribe
+aliases
+ancestry
+ax
+companionable
+aright
+boxed
+brighteners
+alloy's
+checkable
+arraignments
+bed
+bunkhouses
+abbeys
+ceasing
+companies
+cherishing
+chunk's
+barony's
+chinning
+burdens
+briskness
+beggarly
+beloved
+clambered
+constitutionality
+beguiled
+archers
+alleyway
+apostle's
+consulate's
+antiformant
+categories
+construct
+aliments
+acquired
+blotted
+alterations
+adolescent's
+cranes
+bluntest
+accusation
+chafer
+airstrips
+abolished
+bothersome
+churchly
+airy
+bedded
+awareness
+alliterative
+arose
+amputates
+civilization's
+arenas
+certifying
+aspirators
+carbon's
+bunching
+aerates
+bilked
+checking
+cloned
+administrations
+canvasses
+colorless
+chamber
+circumspectly
+benedictine
+advisedly
+classifier
+approachable
+banners
+concurrently
+chores
+agape
+convention
+bindings
+budget
+comedies
+ants
+ambassadors
+chroniclers
+carrots
+colorful
+bulkhead's
+coherence
+buyer
+aggressions
+congressional
+commoners
+cheapen
+concealed
+columnates
+anarchy
+actress's
+baseboards
+creature's
+centuries
+barbarian
+concrete
+bicycles
+acceptably
+acclimating
+biceps
+bloodhound's
+becalmed
+apostle
+bible
+conjunctive
+comb
+ballers
+bickering
+adulterous
+austrian
+applicable
+blackberries
+creasing
+catalogs
+avert
+asparagus
+cambridge
+bird's
+belgians
+admonished
+admirations
+conscientious
+crescent's
+connectives
+blissful
+commenting
+bagged
+assimilate
+abounded
+copyright's
+advancement
+axiom's
+compilation
+circumlocution's
+catheter
+chances
+concretely
+codification
+browned
+clustering
+bum's
+clauses
+boundlessness
+arteriole's
+alfresco
+begrudged
+blustered
+anglican
+adjoined
+bamboo
+bathed
+consortium
+carrot's
+cloak
+album
+bunglers
+approbate
+colored
+aim
+cowboy
+alienate
+cleverest
+ambiguous
+confrontation's
+clear
+africa
+bowline's
+astronauts
+belayed
+censorship
+animation
+bedrooms
+chasms
+compared
+cogitated
+barbarians
+accomplices
+columnizes
+beaming
+busied
+counterpointing
+aluminum
+coconut's
+acclamation
+chokers
+biomedicine
+basalt
+buckwheat
+cardinality's
+bafflers
+arid
+chap's
+abound
+biblical
+backbone
+anticipation
+condemner
+angular
+advisability
+believing
+boiler
+arclike
+abetter
+bespeaks
+axiomatically
+coarse
+auditions
+bludgeoning
+clam's
+chief
+arrow
+cementing
+anxiety
+aberrations
+brushes
+cherub
+corollary's
+bunters
+beefers
+barbiturate
+circumlocution
+conjoined
+charities
+coverage
+campaigner
+burrowed
+barracks
+bristling
+accomplice
+abandoned
+bull
+caked
+century's
+bantu
+bristled
+airer
+bench
+bevy
+chamberlain's
+attention
+cloning
+camouflaging
+alder
+counter
+credibly
+approvingly
+breakup
+artillery
+celestially
+bail
+baker
+bullish
+canvass
+conversationally
+bringers
+augment
+creditably
+butterers
+botswana
+contemptible
+bribing
+adumbrate
+barb
+calico
+alludes
+amplified
+chills
+cloak's
+aver
+arthropod's
+budgeter
+bereavement
+cellars
+crewing
+blackmailer
+ayes
+bedsteads
+breachers
+bazaar
+centered
+celebrity
+blameless
+abscissa
+aerators
+awaited
+british
+adversary
+cowslip
+buttons
+confusing
+buggy's
+belts
+canceled
+addresses
+bribes
+condoning
+bonneted
+coarsen
+amazement
+angels
+chemise
+carbonates
+apostolic
+bandit's
+contending
+consummate
+counterclockwise
+beneficence
+benefitted
+contradicts
+comfortabilities
+anemone
+conductive
+articles
+bookcase
+burst
+baptizes
+countless
+costs
+agonizes
+byte
+creeper
+begs
+bunnies
+attract
+able
+calories
+baskets
+american
+brunt
+cognition
+closing
+chef's
+backbone's
+complicates
+cloister
+bedsprings
+arrays
+brigs
+archbishop
+buckler
+clove
+catholic's
+bellboys
+chairmen
+clap
+clarifications
+ambuscade
+bight
+bellyfull
+allowance's
+academy's
+acquiescence
+ambush
+catches
+at
+billion
+contact
+bees
+adopters
+approximately
+chiseled
+attributively
+criers
+codification's
+cowslips
+contradictions
+buttock's
+categorically
+counterpart's
+confessor
+appreciably
+adjusts
+altitude
+construe
+cancer
+bay
+aristocratic
+alleviaters
+binoculars
+axiomatizing
+changer
+bustle
+civic
+bostonians
+crops
+authorizations
+cogitation
+baptize
+caressed
+abase
+ariser
+axiomatization
+aggravates
+confiscation
+bowdlerize
+backspaced
+alters
+clarity
+blots
+bland
+belligerent's
+burgher
+cardinally
+bookcase's
+buggers
+byte's
+avarice
+crowding
+beriberi
+allegories
+coronets
+cell
+calculative
+adduce
+amperes
+bladders
+adages
+contests
+cognizant
+actuates
+ambiguity
+brighten
+concert
+conviction
+booty
+ashtray
+braves
+blouses
+avoiders
+confederate
+bombings
+couplings
+convictions
+attractiveness
+chronicled
+corers
+anger
+covertly
+aural
+asynchrony
+arrowheads
+breakdown's
+bulletins
+ceremonialness
+clipper
+bracelets
+anthropomorphically
+benedict
+connecting
+bacterium
+achievers
+abutter's
+autocorrelate
+coupling
+blanketer
+continental
+assignment
+conundrum
+arab
+besides
+cheerful
+blowup
+bastion
+arrive
+combines
+agar
+cookie
+astronaut's
+constraint's
+article's
+confiscations
+bounded
+adjudicate
+belligerently
+boron
+brownness
+adept
+creep
+abduction
+accosting
+asylum
+autographed
+clash
+chiseler
+clumsily
+capitally
+braking
+absenting
+bagatelle's
+comet
+basked
+anything
+buffeted
+absentia
+bounty
+carols
+characteristic's
+constructive
+comforting
+aflame
+brainwashed
+booby
+aspirations
+adjudge
+behaviorism
+computability
+assessment
+consultations
+bowstring
+acknowledgment
+arranger
+chancellor
+attest
+compresses
+concessions
+asymmetrically
+administering
+clamoring
+arraigned
+archived
+admonition
+actor's
+aimers
+colorers
+booklet
+calibers
+affix
+bushel's
+atomizes
+creeks
+bleedings
+casuals
+archives
+certainly
+animate
+cons
+affiliate
+answered
+coyote
+coughed
+alligator's
+antagonized
+arousal
+assisted
+aerated
+competently
+conquering
+acclaimed
+assign
+announcer
+controllers
+amalgamation
+comfort
+antihistorical
+availed
+balsa
+annoyed
+basted
+asymptomatically
+cropped
+combinational
+barging
+conversant
+causality
+botches
+bedspread
+considerately
+bookstores
+climate
+blessing
+accordion's
+cdr
+bonanza's
+construing
+bearings
+bluster
+backspaces
+babyish
+countermeasure
+crime
+battered
+audit
+associating
+corps
+application
+archangel's
+aided
+breasted
+compelled
+acrobats
+breakfasts
+chronologies
+beet's
+averts
+convergence
+attributable
+adverbial
+churns
+arrest
+breastwork
+beefs
+brownie
+create
+contradistinctions
+coordinators
+abandoning
+byline
+beatitude
+autosuggestibility
+bipartite
+annals
+assents
+conceives
+amalgams
+cleft's
+clicked
+appointers
+bible's
+boots
+caret
+attaches
+controversy's
+combinatorial
+bazaars
+cardinals
+bored
+catering
+christian's
+ashman
+consequence's
+austere
+clay
+birthday's
+amongst
+arbitrariness
+brainstorms
+chateaus
+coaxer
+applause
+cautiousness
+adorned
+compromises
+creatures
+compliance
+apartheid
+archiving
+amoeba's
+communal
+comedian's
+aggressive
+crop
+ante
+better
+chalice
+aristocrats
+circling
+belittle
+abortion's
+coldly
+certification
+befriends
+courthouse
+anesthesia
+accorder
+athletic
+blithe
+bedder
+abasements
+councils
+beware
+abductor
+assonant
+clench
+aspersion
+abortion
+abating
+birches
+breakpoints
+acyclic
+ablate
+canners
+cistern
+boxtop
+composite
+cloudless
+computation
+chastely
+abusing
+bunker's
+compounding
+alveolar
+chaplains
+bias
+audiological
+capability's
+bangle
+barren
+antidote's
+cranking
+baptizing
+bond
+borders
+automobile's
+allegoric
+chargers
+baltic
+autumn
+columns
+absolute
+connoisseur
+cranberry
+contiguous
+consoled
+confirmations
+argot
+blouse
+annotated
+callous
+astounded
+crashed
+autonavigators
+chivalry
+columnating
+beefed
+convincer
+allegorical
+bagger
+assume
+containable
+artistically
+calibration
+architectonic
+campaigns
+addressability
+crazier
+buy
+brightener
+bastion's
+blurb
+awaits
+commands
+chocolate
+bleaching
+antenna
+blowers
+chorused
+composers
+assigners
+aspires
+coils
+bid
+application's
+clamped
+bedding
+awkwardly
+coppers
+costumes
+borax
+caged
+candler
+badges
+clutches
+consign
+apprised
+buys
+adiabatically
+aggregately
+canned
+abstract
+acrimony
+coax
+analytically
+absurd
+alluring
+contradicted
+aspersion's
+bribe
+boos
+chattererz
+backache's
+complying
+continent
+cohabitate
+causation
+astronomer's
+cities
+bookie
+bleating
+cracking
+bicameral
+convoluted
+adjustable
+ambulance
+can
+boulders
+consideration
+announces
+briars
+antipode's
+bartered
+ancestor
+biplanes
+characterize
+crested
+bum
+bridling
+consolable
+bungles
+coffee
+buffets
+congratulation
+commitment's
+adequately
+clown
+capacitor's
+broomsticks
+agglutinate
+activations
+asians
+canon's
+authenticity
+complexities
+cripple
+bracket
+counselor's
+beatably
+bounced
+baton's
+crankiest
+barbell's
+caster
+casseroles
+ballad's
+bob
+batched
+attenuated
+beakers
+biologist
+bleary
+condescend
+blondes
+augustness
+boldface
+battlefronts
+acumen
+bolting
+articulatory
+butyrate
+bowel's
+backwater's
+colonel
+creating
+authorized
+bijection
+accruing
+admirably
+correctness
+citadels
+clasps
+bandlimit
+bib
+appalachia
+contrives
+bundle
+audiology
+circumventing
+blinker
+choked
+bilks
+clears
+affirmations
+arbitrating
+bites
+bootstraps
+capitals
+commuters
+billeted
+authentication
+choice
+attentively
+aggressor
+arterioles
+crowds
+chestnut
+backstitched
+attachments
+assimilating
+bewilderment
+atrophied
+chintz
+blackjack
+armadillos
+bonfire's
+ballast
+agonies
+busier
+coefficient's
+adventurous
+ballet's
+coil
+chewed
+come
+bonder
+catalogue
+coursed
+arise
+biennium
+ceremony's
+blanching
+appraisers
+acolyte
+argues
+beholden
+appanage
+astatine
+banana's
+coons
+civilians
+bodyguard
+archipelago
+bug's
+candles
+antique's
+accidently
+blighted
+belgium
+besieged
+burned
+abuse
+asian
+chute
+awkwardness
+abasing
+bottler
+ardently
+blab
+breakwater
+cavity
+cheated
+befall
+according
+chronicle
+airframes
+bats
+choring
+authorize
+consumed
+chatter
+annunciated
+capers
+anomalous
+clustered
+burner
+acquaintance's
+badger's
+basic
+affectations
+buzzy
+coast
+attendances
+activating
+beams
+cohesive
+attainable
+barbecueing
+beautiful
+acronyms
+communion
+client
+atypical
+antagonists
+conservations
+arguers
+agglomerate
+antigen
+battalion
+ambition
+countered
+assistant
+classed
+arming
+alveoli
+buff's
+backplanes
+busted
+bermuda
+converting
+brutish
+boot
+acidities
+confrontation
+chapel's
+berlin
+ascender
+behead
+buddy's
+commandment
+actuated
+brilliancy
+chance
+bedrock's
+bridgeheads
+arable
+avid
+arteries
+caresser
+ballyhoo
+attested
+african
+comradely
+consciences
+commencing
+antennas
+annulments
+bobolink's
+advisee
+acceptance
+crack
+ascendent
+appendage's
+accommodates
+accumulated
+clones
+apocryphal
+ages
+cluster
+capitols
+camper
+beading
+amble
+buffeting
+circumspect
+advances
+analyzes
+courier's
+aperiodic
+appealer
+atonally
+attentive
+conspire
+appropriating
+armed
+allergic
+agglomeration
+consternation
+blinks
+audibly
+aspirins
+bunions
+adverbs
+armload
+bet's
+caring
+carryover
+coordinator's
+afterthoughts
+allays
+abided
+brownish
+baiting
+capitalism
+coined
+conspirators
+automatic
+contradistinction
+conductor's
+backstitching
+conjure
+casings
+accountant
+clinched
+constrain
+alcohol
+bee
+anticompetitive
+britain
+bade
+camera's
+antimony
+activated
+burglarizes
+compatible
+cotyledon's
+artificiality
+bath
+citadel
+archivist
+chandelier
+addiction
+ampersand
+bitterer
+constructively
+afield
+bing
+attractor's
+cringe
+allergy's
+bigots
+assimilation
+ate
+capitalization
+abridge
+buzzword
+befit
+bandlimited
+commandant
+alabama
+acculturated
+brightening
+bulldozing
+cooky
+bunks
+centers
+bespectacled
+adherent's
+abducts
+another's
+condensation
+billeting
+bye
+chess
+craziest
+ballgown's
+archaism
+consorted
+chinned
+cowl
+beat
+bootlegger
+bravado
+classically
+bulging
+browbeat
+accommodate
+borne
+bronzed
+artifice
+arcade
+become
+backlog
+addressers
+amphitheaters
+befogging
+crochet
+aiding
+celebrated
+conversational
+backbends
+authentications
+advertisement's
+blockade
+bulldozes
+contraction's
+bricklayer's
+brain
+conveying
+anemia
+chronology's
+channeling
+caution
+commanding
+crosses
+artisan
+conditions
+admired
+authenticator
+airships
+blunter
+bridesmaid
+counseled
+cheeriness
+chiefs
+boils
+clerical
+atrocity's
+balls
+ambled
+canvases
+consoles
+abscessed
+abetting
+blitzkrieg
+bottlers
+beveled
+condemn
+alumna
+cords
+admittance
+annotates
+citing
+corrector
+appreciative
+branching
+betrays
+buttoned
+ailment
+boulevards
+bottlenecks
+chamberlains
+bedbug
+covenant's
+crispness
+considering
+broadcasts
+audubon
+arousing
+correction
+barrack
+closure
+contrastingly
+brittleness
+assassin's
+bursa
+bungalows
+balked
+conceptual
+carcasses
+arabia
+blueprint's
+affectingly
+consorting
+buses
+auger
+appointed
+brute's
+bosoms
+anyway
+arrowed
+anaphorically
+clarify
+approachability
+assistance
+buzzes
+commonplace
+bluebonnet's
+adroitness
+availers
+aquifers
+architecture's
+action's
+backgrounds
+abduct
+attired
+briber
+admissibility
+cease
+beck
+auctioneers
+birdbath's
+atomic
+crossing
+considerate
+biconvex
+bulge
+bedridden
+arising
+aggression's
+cherish
+bureaucratic
+abater
+amputating
+atop
+climber
+clutched
+afford
+bisections
+bonnets
+commendations
+bloke
+abundant
+clamp
+aloes
+aboard
+atheistic
+advantageously
+buffs
+chimney's
+cheerily
+benefactor
+ample
+bushwhacked
+captain
+buckskins
+contextually
+antiquarian's
+browns
+bubble
+ban's
+brine
+acculturates
+anhydrously
+beaver's
+advantaged
+bibliographic
+clasping
+clattering
+coerce
+colorado
+airmen
+bandlimiting
+balks
+boners
+attached
+chosen
+convened
+bordello
+composer
+botanist
+backtracks
+civilization
+commutativity
+bloodshed
+cohere
+bunkhouse
+archdiocese
+boycotted
+crosswords
+bedspread's
+anteaters
+cove
+apothecary
+chute's
+addressee
+climatically
+blower
+bane
+cask's
+beetling
+ambiguities
+before
+abstain
+arachnids
+bucket's
+amateurs
+blackouts
+adverb
+butchery
+conjunction's
+barricade
+audiologists
+aphorism
+complete
+butts
+bishops
+allotment's
+confusingly
+channeller's
+blanches
+bragging
+bathe
+comedians
+celestial
+citizens
+couple
+backpack
+aphasic
+brothels
+axles
+cancellations
+bonus's
+consolidates
+authoritative
+axle's
+acclimatization
+carolinas
+chime's
+antibiotic
+bisons
+biographically
+achieve
+bleachers
+bicentennial
+behavioral
+accomplish
+concealment
+biddies
+antitoxins
+arriving
+apprehend
+affluent
+cliffs
+bleached
+astronomers
+connection
+bride
+backs
+bog's
+casket's
+continual
+ampere
+cat
+alternator
+cotton
+athletes
+communicant's
+best
+befuddling
+benefactors
+appease
+annoyingly
+context
+astonished
+cracked
+amnesty
+autumn's
+binder
+babying
+contributory
+assumption
+cowls
+cocks
+airless
+consummated
+atypically
+beneficially
+chairing
+accusative
+commanded
+bufferrer's
+alerter
+arbiter
+civilly
+charms
+backscattering
+cheater
+bushes
+caverns
+chieftain
+calf
+comparing
+aurora
+butyl
+cower
+bemoans
+baptistry
+carpenter's
+capes
+bordered
+arrows
+blocker
+crest
+appeal
+arabic
+conventions
+axis
+brains
+bookkeeper's
+circle
+cooks
+circumlocutions
+adventists
+barringer
+affording
+anatomically
+basements
+barbarities
+configuration's
+contributes
+collaborating
+beach
+comet's
+bakes
+assigns
+ballerina
+cheapens
+clinging
+conquered
+bisecting
+closenesses
+bugle
+boatmen
+beatings
+complicator
+bight's
+banister's
+archaic
+anthropologists
+clams
+beginners
+committee's
+communicants
+alone
+bounteously
+bastes
+ascertain
+alphabetical
+bringing
+batters
+amazon's
+constituent
+benders
+being
+constitutionally
+audiometric
+blast
+copings
+bailiffs
+colts
+coolies
+airlift's
+boomerang
+bifocal
+clothes
+cashiers
+congenially
+billows
+boilerplate
+biochemistry
+betting
+brimmed
+complementers
+breading
+bragger
+adducting
+bisectors
+abrogates
+criticized
+comrade
+bucolic
+birthright
+blurs
+challenger
+complicated
+bluebonnet
+biscuit's
+classmates
+campus's
+boundary
+bedbug's
+adjustor's
+acre
+bicycling
+awe
+additions
+baiter
+authorizes
+beautify
+copier
+buffet
+belfries
+acquisitions
+brooch
+crickets
+caterpillars
+beefsteak
+complicating
+bedpost
+criminal
+celebrity's
+bookseller
+christened
+coerces
+clamors
+all
+boatyard's
+canoe's
+begin
+anaerobic
+bushing
+agreers
+concedes
+countermeasures
+beg
+agglutinin
+bunted
+ammonium
+aspiration's
+bathrobes
+changeable
+beached
+bestowal
+beaner
+catsup
+admires
+clockwise
+agile
+alarms
+ached
+chinks
+buffer's
+cartesian
+annunciate
+chanticleer
+avenue
+anchor
+alliterations
+blanking
+bargained
+breathtaking
+crime's
+assiduity
+argentina
+contiguously
+aqua
+bested
+borderlands
+appetite
+captive's
+bipolar
+conceal
+counters
+costumed
+arrestingly
+bunting
+blight
+champagne
+brusquely
+address
+bloodhounds
+associative
+creed
+arithmetical
+balustrade's
+belabors
+complementing
+checkout
+archivers
+badlands
+behaviors
+ampoules
+bridgehead's
+antiquarian
+clumsiness
+considerable
+apportions
+anglicans
+appealingly
+barfly's
+absorptions
+awards
+congregates
+cloister's
+armour
+avoid
+correctively
+chucks
+burps
+bums
+berry
+batches
+administration
+atones
+bishop's
+blonde's
+casualty's
+cores
+bodied
+alter
+assonance
+apprise
+antitoxin
+avariciously
+checkpoint's
+affirmative
+conjures
+angstrom
+aesthetically
+canyon
+binge
+crazed
+breastwork's
+aids
+boston
+conceits
+announcement's
+beechen
+accessory
+authorities
+constrained
+automation
+anaplasmosis
+commander
+commendation's
+belabor
+cornfields
+artemis
+asphalt
+contracted
+brochure
+crafted
+allegedly
+alien's
+auditory
+blowfish
+adducible
+confederations
+annuals
+britches
+acquaintance
+appallingly
+abounds
+burglarproof
+crossers
+bayous
+brisk
+authority's
+covetousness
+averse
+accomplished
+aromatic
+admiral
+bijective
+avenging
+bran
+boatyards
+beseeching
+challenging
+bares
+acts
+abductions
+compendium
+compulsion's
+calendar's
+clad
+blockage
+conventional
+craze
+cajoling
+acceptability
+bungalow
+buff
+cramps
+attackable
+calculator's
+asp
+braved
+colors
+balling
+contaminate
+crackling
+comes
+complimenters
+across
+astronomy
+aborigine
+bobwhite's
+autopilot's
+chattered
+appall
+autonavigator
+bashed
+acoustics
+beachhead's
+apartments
+convenience
+blackout's
+bands
+autonomously
+amounters
+centripetal
+achievable
+astringency
+attuned
+concatenating
+copyright
+coding
+assumption's
+anastomoses
+confiscate
+asking
+beneficial
+adhesions
+busboy
+bronzes
+audacity
+bruises
+crash
+beau's
+circuit's
+aborts
+baubles
+beliefs
+assuaged
+costed
+blinking
+characterized
+bowled
+block
+conquests
+confesses
+amusers
+ceiling
+berets
+berliner
+abstentions
+child
+authoritatively
+closeness
+bushel
+considered
+communicates
+cheerlessly
+autofluorescence
+aquarium
+affects
+appurtenances
+airbag
+approaches
+admonishments
+bets
+bounden
+courtly
+bodybuilder's
+campus
+brainstorm
+americans
+chairperson's
+botanical
+askew
+amazon
+bleed
+clime's
+cooperations
+commonness
+boatloads
+blinked
+courtyard
+adapted
+aforethought
+backwater
+burr
+cathode
+awaking
+buzzed
+bridgeable
+arrives
+adventuring
+beseech
+attrition
+copied
+colon
+client's
+bandstand's
+advice
+baptistries
+antithetical
+alcohol's
+contradicting
+ambidextrous
+belches
+category
+bluntness
+coupon's
+assimilations
+comfortable
+caller
+affliction's
+attends
+compactest
+baler
+beacon
+blind
+bleakness
+beseeches
+courts
+couch
+consequential
+adulterers
+craving
+biggest
+astray
+bigoted
+barfly
+charges
+ambiguity's
+commentary
+crankily
+cowerer
+carnival
+bachelor's
+bituminous
+continuance's
+calamities
+claws
+apiece
+century
+ascendancy
+charts
+animations
+aggression
+chickadee's
+carve
+confidence
+actor
+bubbled
+becalming
+convulsion
+chivalrous
+brightest
+centralized
+beautifies
+amateurishness
+birthrights
+alligator
+circumstantial
+constructors
+conceptions
+arranging
+cart
+cent
+ager
+congruence
+carrot
+chariots
+cloudier
+captivity
+conquerers
+compartmentalizes
+condensing
+celebrities
+chalks
+accordance
+chilled
+conversations
+apples
+conceiving
+average
+blessed
+creator
+ant
+cling
+annoyer
+aviation
+cohesively
+correspondences
+boor's
+apprehended
+bessel
+both
+characterizes
+bards
+cots
+acculturating
+cemeteries
+carting
+alcohols
+bitterest
+ascetic's
+conducts
+caking
+airspace
+autocrats
+ashes
+chimes
+broadcaster
+commuter
+basket
+borderland's
+broadened
+boyish
+allegretto's
+ban
+bidder
+christen
+blessings
+bury
+arranged
+choir's
+apathetic
+boring
+aryan
+appearing
+binds
+cooperates
+bounces
+airspeed
+complicators
+adapting
+babbled
+agglomerates
+bedraggled
+addictions
+bolt
+calmly
+blur's
+boatload's
+anesthetic
+bugs
+colt
+completing
+boxer
+billers
+affronting
+absurdity's
+chides
+comparatively
+braided
+clipper's
+cot's
+calves
+articulations
+branchings
+attraction
+concatenates
+alligators
+cake
+boom
+crashing
+afar
+abler
+beamed
+adverse
+adrenaline
+agriculture
+beehives
+crankier
+courthouses
+advises
+consigns
+bisect
+azimuth's
+carpets
+arthropod
+brewery's
+commonalities
+altruist
+astride
+appreciate
+carved
+briefs
+admitter
+celery
+congregate
+clocking
+assassinated
+adding
+canvasser
+civics
+contemptuously
+calculates
+advisees
+bumbling
+algorithmically
+cloudy
+algebras
+addiction's
+cop's
+assurers
+confidently
+affector
+analyzers
+chimneys
+burdening
+antitrust
+admix
+avoidance
+choking
+coexists
+accustoms
+cellar
+anchovy
+constructor's
+confinements
+consequently
+accelerations
+accoutrement
+churchman
+biller
+affected
+brigades
+cremating
+corridor's
+bagging
+ah
+berating
+collective
+acuteness
+arrestors
+cab's
+border
+agitation
+animism
+arches
+alveolus
+cessation's
+averrer
+abash
+counterrevolution
+attesting
+animateness
+bawdy
+americana
+bloodstained
+applicator
+annotating
+annunciator
+clamored
+acting
+aerosols
+axiomatization's
+brags
+coalesces
+avocation
+combining
+crazily
+bravery
+burying
+adored
+airfield's
+accounting
+broadeners
+anise
+chimney
+added
+avenges
+bellicosity
+cranberries
+arsenic
+communities
+comparable
+bunkered
+architect
+alphabetically
+beautified
+apogees
+communist
+anatomical
+complexity
+accost
+autographing
+browsing
+ameliorate
+bookers
+bandaging
+clinical
+appellants
+counteract
+clairvoyantly
+bootstrap's
+canner
+boastful
+attainer
+ash
+beaded
+brake
+barest
+befriend
+burglarproofing
+allegorically
+bunts
+believes
+accession's
+buck
+boathouse's
+byword's
+anthracite
+accuse
+conjunction
+burping
+commandant's
+creativity
+affirming
+bark
+amuses
+balcony's
+auditors
+counsel
+clamber
+borates
+cowboy's
+bickered
+boors
+combing
+biting
+breeze
+crowder
+corn
+bloke's
+bombast
+bookstore
+blared
+bedlam
+carbohydrate
+coops
+bundles
+blistering
+antarctic
+anterior
+bilinear
+chocolate's
+context's
+alternating
+annoyance
+constancy
+ambivalently
+buddy
+brutalize
+bobbin
+alleles
+commotion
+attributes
+airborne
+creed's
+bolstering
+coaxed
+airframe
+breaker
+accept
+abashes
+attentional
+contributor
+comparability
+auscultating
+cocked
+computationally
+buffered
+career's
+analyzable
+absently
+courtyard's
+buildups
+apportioned
+balkanized
+annulling
+cremation
+buffetings
+conditional
+confided
+airliner
+bulldozer
+approaching
+anagram
+apollonian
+canaries
+bloat
+bluebird
+collision
+cool
+connectedness
+abasement
+artisan's
+avoidably
+clerks
+afflict
+briton
+corroborates
+cameras
+counted
+boldest
+burglars
+brutes
+brows
+abhorrent
+configuring
+averaged
+ace's
+buying
+abandon
+bayou
+cottons
+auditioning
+amplifies
+clippers
+brainstorm's
+alto
+brutalities
+bunch
+agricultural
+bursts
+blunting
+archer
+activity
+carefulness
+bedroom's
+concomitant
+balm's
+artificer
+barking
+breathy
+babies
+acacia
+bodies
+cap's
+criticised
+conversed
+crewed
+ascendant
+budgeting
+coroutine's
+charmed
+bellboy's
+conservatism
+butler
+acculturation
+conclusion's
+adapt
+cellist
+contempt
+adumbrates
+borrowed
+confounds
+allegiance's
+blabbermouths
+accrues
+captor
+coop
+baseballs
+cottages
+apartment's
+assertiveness
+assent
+artfully
+bagger's
+abolishment
+acetylene
+accessory's
+blackbird
+baptist's
+consist
+cavern
+buttock
+corporal's
+autoregressive
+bailiff's
+birds
+corder
+bracketing
+antlered
+barbiturates
+county's
+addicted
+agglutinated
+abashed
+competitively
+captains
+bloating
+accepts
+choose
+ashamed
+backyard's
+apiary
+contradiction
+balalaika's
+arctic
+broom
+anvils
+coffee's
+alliance's
+agitator's
+change
+adjusters
+cremates
+complexes
+bodyguard's
+burl
+antithyroid
+ambient
+airfoil
+apricots
+athleticism
+abjectly
+bankrupts
+answerers
+alternatively
+confronter
+breaking
+baronial
+cannibalized
+appetites
+breaded
+blackboard's
+battlegrounds
+cosine
+barrenness
+abbreviation
+budging
+boolean
+acrobatics
+again
+ashtrays
+clashed
+contingent's
+compulsion
+bedazzled
+collapsing
+comparison's
+businesses
+compassionately
+achievement
+buffering
+candlesticks
+austerely
+awls
+associate
+absolved
+annexed
+airway
+clipping
+counselors
+conscience
+attempters
+constructing
+biases
+cautioners
+comma's
+cosines
+char
+auscultates
+afire
+comely
+amity
+beverage's
+anew
+ballplayer's
+adulterated
+authorship
+alterers
+burdened
+attributive
+afflictions
+blinded
+barrier's
+attachment
+brotherhood
+bridegroom
+atoms
+cobweb's
+copes
+controversies
+complexion
+crawling
+atomized
+adjust
+accuracies
+concern
+cinders
+authorization
+appraisingly
+bladder's
+cooked
+cowers
+batter
+commissioner
+close
+burglar's
+allocated
+anvil
+aftershock
+abrogating
+chemistries
+advisable
+conduct
+committee
+blaring
+appalling
+braveness
+alertly
+artificialities
+brevet
+collision's
+arizona
+bower
+creamers
+awnings
+arsenals
+crane
+city
+contemplative
+catheters
+administrators
+attorney
+churned
+attractions
+columnation
+bobbed
+centipedes
+bostonian's
+apprises
+buries
+allege
+botulism
+adobe
+ambassador's
+covenants
+boon
+asynchronously
+bigness
+axial
+chaffing
+battleships
+ant's
+anthropological
+accent
+brushing
+brassy
+consumptions
+battleship
+absorb
+beckons
+brook
+connectors
+clinches
+accesses
+beaters
+archaicness
+bursitis
+chided
+bomb
+assimilated
+addicts
+convening
+arianists
+counting
+altar's
+confusions
+attachment's
+clipping's
+amazing
+corset
+bossed
+attach
+commandingly
+animatedly
+allegations
+assuages
+annulment
+compress
+aptitude
+absurdities
+autobiographic
+aspect's
+concentrator
+burgesses
+anagrams
+bedeviled
+assemblers
+convinced
+commentary's
+agglomerated
+biological
+callousness
+axolotl's
+atmospheres
+authoritarian
+cancer's
+above
+charting
+aldermen
+battler
+cistern's
+bouncer
+amassed
+conquest
+altering
+arrogantly
+brokenly
+comparator
+counsellor's
+attenders
+cackle
+criticize
+authored
+ably
+believed
+compelling
+accepter
+cleansed
+afflicted
+backslash
+computed
+almighty
+attache
+braes
+carriage's
+benediction
+brigadier's
+contemporariness
+boomtown
+amplitudes
+breakwaters
+clod
+catch
+bar's
+activist
+caves
+assenting
+camp
+attainments
+brotherliness
+continuances
+appearance
+applicator's
+browbeats
+banjos
+addendum
+became
+adduces
+armadillo
+brothel
+almanac
+courageous
+assault
+chunk
+coaching
+atheist's
+blunted
+aperiodicity
+congresses
+boastfully
+burglarproofed
+broadest
+bashfulness
+affect
+acne
+bottleneck's
+criticisms
+corrupts
+colonized
+closeted
+canonicalizing
+auditorium
+antenna's
+awfully
+anti
+consumes
+agonize
+algebra's
+championing
+blush
+bugger
+antagonize
+beethoven
+blase
+boycotts
+compensatory
+bugged
+boroughs
+anatomic
+batons
+arguably
+affricates
+appreciations
+cavalry
+alumna's
+arcing
+backpacks
+braces
+contextual
+coupon
+chillingly
+allocates
+abuts
+contribution
+commodity
+admonishing
+coolly
+cabinet's
+collapsed
+confessions
+adjured
+capriciousness
+chastising
+babe
+aerodynamics
+accepting
+concept
+contour's
+consequentialities
+birthday
+bankrupted
+birthed
+benefit
+concentrations
+azalea
+channels
+chestnuts
+contenting
+antedate
+censors
+contagious
+abbot's
+channellers
+apt
+commend
+avocation's
+admonition's
+abolition
+confederation
+carried
+clumsy
+coincidences
+bumper
+burr's
+bugles
+bribers
+attainably
+consume
+comma
+creativeness
+accuser
+bombs
+abbey
+baffled
+aside
+clip's
+appeases
+compass
+bundling
+abstractionism
+confide
+creases
+apropos
+confronted
+corrective
+concurrencies
+autocratic
+alien
+attending
+antagonistic
+broadcast
+asymptote's
+belied
+breasts
+contrapositives
+coiner
+accordingly
+cohering
+computers
+cow
+bibs
+ancestral
+controller
+attacker
+alerts
+coconut
+agency
+alerted
+alcoholism
+ammoniac
+actinometers
+acquitter
+bud
+cessation
+alleging
+centralizes
+articulators
+council's
+carvings
+arduously
+blown
+anode's
+arrogate
+bisects
+centimeters
+burgeoning
+course
+appointee's
+ascribable
+communicate
+contrivance's
+adoptions
+attune
+acres
+abyss's
+corporal
+certifiers
+analyze
+augusta
+bestseller's
+checkpoint
+coexist
+attainers
+argon
+bearded
+crudeness
+averaging
+brick
+adducing
+annulment's
+chicks
+blocked
+cisterns
+afoul
+affiliates
+briskly
+adhesion
+ascertainable
+appeasement
+blueprints
+agreements
+blindfolds
+communicator
+characterization
+annoyances
+breeches
+brushed
+clinic
+competes
+chuckled
+cradled
+balmy
+antisubmarine
+alternate
+armpits
+barn's
+conjuncts
+adhere
+allows
+counteracted
+appetizer
+capturers
+cleanse
+avant
+abbe
+corpse's
+arduousness
+badge
+begets
+contemplated
+caveat
+copiously
+athena
+aggrieving
+alibi
+accumulation
+basket's
+aftershocks
+bass
+conjuncted
+chaps
+brunch
+colonials
+bibbed
+clusters
+antagonizing
+constituencies
+combings
+bearish
+continuously
+adequacy
+brow's
+catalog
+alderman
+comedic
+chemists
+concernedly
+conceded
+alarm
+arced
+buckle
+confidingly
+coherent
+closes
+buffoon
+brace
+adjustably
+crackers
+contamination
+burgess's
+aerobic
+constitutes
+baptismal
+broadness
+blimps
+concatenation
+claiming
+bard's
+aerosolize
+adjoins
+copies
+coats
+boggle
+corroborated
+concreteness
+bill
+cautions
+bantam
+bearably
+armchair's
+birthright's
+cravat's
+cone's
+courtiers
+asunder
+bulletin's
+biopsies
+alley
+contrive
+blasphemies
+amuser
+ballerinas
+blushed
+causticly
+brandy
+blinkers
+complimenting
+crimsoning
+angola
+apprehensiveness
+bolster
+columnate
+byproducts
+berths
+accusal
+chubby
+arrived
+camps
+blemish's
+anaconda
+cook
+airfoils
+atlantic
+boosted
+converge
+availer
+appalachians
+coffin's
+boarding
+alga
+crouch
+columnizing
+consul's
+chastises
+angling
+apple's
+billiard
+attentiveness
+adroit
+apprehensible
+cereal
+blouse's
+browning
+bodybuilder
+coaxing
+assertion's
+connective's
+commemorated
+accountability
+crooked
+blips
+chandeliers
+aristocracy
+bangs
+coke
+abutment
+community
+calculus
+congregated
+crepe
+compromised
+airlines
+contributing
+contingencies
+coordinated
+alginate
+batted
+contender
+alma
+antagonisms
+accompanied
+airport
+administrator's
+appraisal
+breadbox
+condemnation
+backlog's
+available
+consequents
+crooks
+commonwealths
+barring
+channeller
+crucially
+archaeological
+charming
+adventist
+credits
+appetizing
+breads
+clients
+climbing
+aloneness
+abstractness
+appearer
+astute
+clockers
+antagonizes
+agonized
+bastard's
+conjectured
+aqueducts
+aureole
+boatswains
+conjured
+chauffeur
+complementer
+behold
+bustards
+bivouac
+cluck
+anus
+bless
+catastrophic
+bounty's
+allowed
+answer
+concealers
+brainchild's
+coercion
+buzzword's
+bordellos
+appertain
+applier
+couriers
+aesthetic's
+craft
+capacitances
+capped
+coupler
+category's
+anvil's
+conquest's
+checksums
+clucking
+bronchus
+acrimonious
+changeably
+accenting
+argued
+conditioning
+brewing
+backwardness
+cascaded
+atomize
+contours
+arianist
+apart
+conflict
+carefully
+banshee's
+conveys
+arbitrates
+amphitheater's
+amen
+alimony
+bound
+buzz
+courtroom
+apparently
+coalescing
+circulating
+amounter
+bypasses
+breadth
+choral
+completion
+arisen
+anticipating
+bilges
+contractions
+bedspring
+commune
+blacklisted
+beagle
+alkaline
+atolls
+carelessly
+blimp
+corking
+brevity
+alterable
+canada
+bear
+bluntly
+cartridges
+connoted
+countries
+corroborate
+consecration
+corrupted
+appreciating
+combatant's
+alkalis
+affecting
+blues
+casserole
+ballad
+bewitches
+common
+as
+because
+bathroom's
+anchorages
+beguile
+connect
+convenience's
+counteracting
+assorted
+care
+contains
+centimeter
+ancestors
+briefings
+busses
+churchyards
+breakable
+amortizing
+courthouse's
+click
+courses
+ajar
+county
+covet
+confidences
+capitalizer
+agog
+backtracking
+copious
+bestsellers
+chilliness
+bringer
+browse
+centipede
+bawled
+bricklayer
+breath
+assailants
+abysses
+command's
+characterizer
+calculating
+america's
+aurally
+contain
+alias
+commentators
+confounded
+appending
+accidents
+chatters
+coordinates
+bleeder
+blueness
+badger
+bolsters
+astounding
+capitalist's
+conservation's
+commences
+aimed
+bun
+comparators
+competition
+bauble
+backbend's
+bled
+assassinate
+chop
+anemometer's
+cobbler
+coldness
+audiometry
+affinity's
+amalgamates
+cowardly
+consolidating
+beads
+brackish
+bookings
+accuses
+bog
+compartmentalizing
+clutching
+calming
+collars
+clambers
+banqueting
+beaked
+authoring
+correspondence
+apostrophes
+affirmation's
+bespeak
+costing
+brought
+complainer
+battalions
+asymmetry
+boathouse
+canyon's
+awarded
+amplitude
+anarchical
+anticipatory
+bolder
+cooperatives
+caterer
+adviser
+balkanizing
+augur
+cannibal's
+balustrades
+attaching
+collector's
+commercials
+capaciously
+coincidence's
+bumps
+ascot
+bale
+blackmail
+baby
+aftereffect
+bloomers
+buttresses
+avenues
+climaxes
+aqueduct
+cater
+brainchild
+avail
+bypassed
+bowl
+california
+cements
+boxes
+brained
+bedevils
+captors
+acuity
+ascends
+breakthrough's
+assigner
+caner
+bequests
+ceilings
+axers
+bookshelf
+autistic
+celebrations
+axons
+chiding
+asterisk
+allophonic
+blindingly
+cherubim
+boaster
+confining
+anxious
+clowning
+advisement
+approach
+anesthetic's
+crescent
+alertedly
+birdbath
+beardless
+bras
+auspices
+choosers
+approval's
+afflicts
+corrosion
+arpeggio's
+bodyweight
+cranky
+battlefront
+affirmation
+churchyard's
+aeroacoustic
+anders
+adjustment
+baneful
+citation's
+acetone
+blend
+binuclear
+boner
+annotation
+announce
+claimable
+contemporary
+clothing
+acquitting
+choosing
+attacher
+bananas
+binaural
+arrestor's
+aches
+conclude
+collaborators
+await
+blaspheme
+bequeaths
+crows
+balconies
+begging
+conducting
+abstracts
+assignee's
+causations
+approximation
+articulated
+considerably
+apricot's
+afferent
+assertively
+bonding
+calms
+cranberry's
+cost
+captaining
+agenda
+corridors
+complaint
+christens
+aggravate
+countess
+arbitrators
+ascribing
+breech's
+bellwether's
+burglarized
+confinement's
+animating
+adjectives
+cannister's
+bemoan
+cleanest
+acme
+cheapest
+activities
+allophone
+boy
+belaboring
+captions
+compactor's
+actuator's
+befouling
+arachnid's
+computerizes
+compile
+absorption
+bridled
+absorber
+convicts
+birch
+alkaloid's
+cannot
+bacilli
+charitableness
+abated
+ceaseless
+beavers
+bookshelves
+commensurate
+appreciates
+basil
+cartoons
+aides
+buxom
+cages
+cantor's
+acceptances
+antiquated
+amalgamate
+babyhood
+beers
+conforms
+bouquets
+canner's
+baste
+cashed
+argue
+butcher
+backbones
+absolve
+crib's
+cafes
+abstracted
+book
+committees
+authentically
+conference
+antisera
+bourgeoisie
+attribute
+biddy
+autobiographies
+chivalrousness
+coverlet
+ambiguously
+calorie
+anhydrous
+alignments
+around
+archfool
+advance
+bedpost's
+affective
+contained
+amain
+bromides
+clogs
+bricker
+arduous
+consistent
+amidst
+confess
+complain
+anniversaries
+coasting
+cobwebs
+aries
+benchmark
+aviaries
+bombard
+boxers
+ashtray's
+assyriology
+blaze
+ablative
+chaos
+burro
+arguer
+ashamedly
+crier
+allocator's
+aggressively
+carts
+advisory
+airship
+alkali's
+backup
+chaining
+continue
+cartoon
+circumference
+breadwinners
+autonomy
+banking
+armored
+cabin
+chunks
+antigens
+blistered
+airers
+breakaway
+belief's
+belays
+coveting
+auburn
+careful
+anybody
+bumbled
+cautious
+adopter
+ballplayers
+anteater
+citadel's
+avails
+agent's
+caliphs
+bridgehead
+already
+caterpillar's
+coachman
+centralizing
+alphabet
+concede
+barbell
+breadboard
+ballast's
+activators
+attendance
+blandly
+calculator
+codeword
+addressee's
+avenue's
+alcoves
+alternately
+admonishes
+concentrate
+crossbars
+adjoining
+basset
+carbons
+beast
+blonde
+castle
+clarification
+bitch's
+abrasion's
+books
+amputate
+bicycler
+aphonic
+arraigns
+acquiesce
+buster
+chaperon
+advisements
+buyer's
+attack
+birthdays
+blazed
+confuser
+crag
+ballet
+airports
+bison
+counterexamples
+arteriole
+colony's
+adamantly
+blunders
+chivalrously
+adult's
+authors
+amplifiers
+counterfeited
+complicity
+astrophysical
+axolotl
+bash
+battleground
+butterfly's
+axioms
+allegory
+blitzes
+blindfold
+bufferrers
+approximating
+byways
+computations
+alight
+avoiding
+assurance's
+barrages
+canonicalized
+callously
+auditing
+authenticating
+bag's
+asters
+artistic
+bonanzas
+applaud
+certainties
+auto's
+concession's
+cascade
+chubbiness
+churchyard
+afternoons
+antigen's
+baron's
+amphibian
+banister
+capitalize
+approval
+appropriated
+bureaucrat's
+covets
+cloisters
+circulate
+bivalve's
+beta
+collector
+among
+cane
+birdlike
+attenuating
+conjunctions
+appliance's
+coral
+crucify
+abnormal
+combined
+classroom
+buckskin
+commissions
+abolishments
+arching
+croak
+americium
+associates
+car's
+assuringly
+agreer
+anticoagulation
+closure's
+corkers
+attend
+alphabet's
+awakening
+composedly
+attracted
+construed
+cricket's
+applicability
+autonavigator's
+chloroplast's
+ashen
+beggars
+corporation
+another
+conflicts
+bootlegs
+archeologist
+alcove's
+agitates
+cargoes
+creditor
+cops
+advisably
+coronation
+bourgeois
+crochets
+cropper's
+cramp's
+adulterer's
+corroborations
+changing
+combinatorics
+calm
+comprehensible
+blooms
+coolness
+copying
+blacksmiths
+commodore
+compulsions
+clump
+afterward
+crucified
+brooder
+buckets
+accelerating
+accented
+boat
+adventitious
+baseline's
+courier
+calamity's
+atoll's
+brutalizes
+bundled
+chairperson
+cheeses
+continuation
+celebrating
+apologists
+behest
+bumpers
+consonants
+circulation
+betraying
+commuting
+breezily
+circumstance
+coughing
+benefiting
+conquerors
+chemically
+commencement
+adjustors
+angel
+congratulate
+conspired
+causally
+bud's
+conquers
+augmented
+bereaving
+advisor
+articulation
+angler
+admission
+bide
+competitors
+amusement's
+collecting
+adder
+arithmetized
+cheek's
+apostrophe
+blockages
+clockwork
+bubbly
+apricot
+adjudicated
+banter
+amused
+breacher
+bracketed
+aimer
+comprehending
+bunkers
+canton
+arcane
+absent
+capitol
+consequence
+cognitive
+abjuring
+clever
+coronet
+anathema
+artichoke
+controls
+credulous
+acid
+crawled
+coupled
+boomtowns
+aspen
+acted
+anyhow
+burdensome
+backdrop's
+apocalyptic
+cornerstone's
+cautiously
+blisters
+conveniences
+arbor's
+accessories
+alleges
+clubs
+accompaniment
+blazes
+annually
+clique's
+beamers
+ballgown
+autumnal
+acreage
+conjunct
+balances
+consoling
+canvas's
+competent
+aggrieves
+although
+afraid
+clearly
+cognizance
+acoustic
+colleague
+causing
+absences
+closers
+airs
+cinder
+adversaries
+altruistic
+brews
+ceremonially
+appraisal's
+commissioners
+army's
+assists
+acceptor
+comparison
+cooling
+conveniently
+couching
+changes
+clinic's
+confronting
+adjunct's
+blandness
+alternates
+bunter
+consequent
+clean
+autos
+accumulators
+carver
+aprons
+awful
+bobbins
+blasphemy
+assuming
+abscess
+assemble
+cabinet
+atomics
+blacklists
+audacious
+assay
+anthropology
+barnstorm
+awl
+bumping
+assembles
+capture
+compensates
+coverable
+amend
+array
+continually
+absented
+cigarette
+antiresonance
+backspace
+branched
+appellate
+courtroom's
+alienated
+austerity
+cement
+asked
+antelopes
+cottager
+bluebonnets
+booze
+amendment's
+backslashes
+begun
+bijections
+cafe's
+boatload
+collect
+appeals
+belittles
+befit's
+beauty
+arrogated
+academia
+contagion
+blemishes
+coverlet's
+comfortability
+antecedent
+controllably
+congressman
+complicate
+coincide
+arrears
+clumped
+credited
+buffoon's
+catholic
+accompanist
+beauty's
+aster's
+blatantly
+bothering
+bewilder
+canceling
+carbonizer
+accentuation
+backstairs
+anticipations
+bestowed
+civilian
+blooming
+blunts
+airlocks
+argo
+blueprint
+aristocrat
+cakes
+complements
+ale
+camping
+army
+adrift
+bengali
+barely
+blasphemes
+briefcase
+brooches
+ailments
+blazers
+crevice's
+bankrupt
+archiver
+articulator
+alphabets
+bonds
+colliding
+candidate
+cashier's
+bellwethers
+airstrip
+announcers
+calendars
+corrupter
+aqueduct's
+axiom
+bathing
+blusters
+ascribed
+admittedly
+angrily
+analytical
+contraption
+convertibility
+abysmal
+cathedral's
+aversion's
+algol
+articulately
+breveted
+bickers
+chatterer
+adoptive
+bijectively
+cloudiest
+coarseness
+carted
+cocktail's
+capacious
+anion
+buffoons
+bleeding
+bedrock
+adventurer
+compositions
+camouflages
+brittle
+chip's
+aloe
+chorus
+cargo
+critical
+biographer's
+abject
+blasphemousness
+charmer
+betray
+blacking
+awoke
+allele
+bags
+claimant
+clover
+biographies
+confound
+advertises
+crafter
+cripples
+bygone
+concentric
+couldn't
+contentions
+acrid
+costume
+aft
+aesthetic
+bandits
+adducts
+constellations
+coffer's
+created
+commercial
+art's
+cookie's
+ammonia
+adjunct
+articulateness
+congratulated
+crags
+brandishes
+annual
+byword
+affection's
+college's
+aboriginal
+bikini
+buttering
+allotter
+console
+advent
+activates
+beverage
+april
+acceptable
+barrel's
+boys
+attractor
+azimuth
+critics
+ballooner
+aren't
+adulterating
+criticise
+abeyance
+automatically
+collaborative
+capabilities
+crawls
+anomaly's
+climaxed
+animately
+aroma
+belie
+attires
+argumentation
+baseboard
+bluebirds
+cactus
+byproduct
+balancer
+beholder
+conservationist's
+betrayer
+agony
+accusingly
+convict
+coaxes
+breeds
+agitated
+championship
+brevets
+auscultate
+counselling
+cornerstones
+america
+canoes
+aspirator
+compensate
+antiseptic
+bereave
+absinthe
+compose
+collide
+alabamian
+candid
+civilized
+clamps
+authoritarianism
+colonist
+bugging
+bins
+abashing
+battlers
+canning
+berate
+assembler
+amateurish
+boasted
+angriest
+bluffs
+colonize
+balcony
+bleat
+bustard's
+attenuate
+contagiously
+bicep
+babel
+beatniks
+brush
+analogy's
+audiologist
+assessment's
+camera
+arbitrary
+alleyway's
+concession
+constructions
+accompanies
+accretion's
+aroused
+charcoaled
+belated
+bottom
+bloodshot
+bisques
+advocate
+arabs
+cathodes
+adamant
+challenge
+absurdly
+abolitionist
+cleavers
+bludgeons
+bassinet
+clause
+coiling
+cask
+boob
+azalea's
+afghanistan
+carriages
+blade's
+bobby
+asinine
+acclaiming
+absorbed
+blacken
+cheating
+bootleg
+anonymous
+addict
+astonishes
+awry
+adequate
+categorization
+casks
+blaster
+aspirants
+abscesses
+airing
+assumptions
+capitalists
+board
+asynchronism
+body
+aye
+contraction
+athens
+arsine
+cohabitations
+below
+bows
+aviator's
+ampoule
+connective
+adapter
+authenticate
+blackboard
+brilliant
+appoints
+attics
+conquer
+boning
+comestible
+camped
+blonds
+aisle
+coals
+billboards
+characterizers
+crow
+clout
+admirer
+actuarially
+abstruse
+accessing
+bonfires
+clenched
+characteristic
+catching
+chars
+canons
+barrier
+championed
+butterflies
+completely
+calendar
+artwork
+abjections
+burgher's
+correlates
+arrivals
+accepters
+circuses
+breadboards
+accomplishment
+analyzed
+appropriates
+cancel
+bordering
+aperture
+civilizing
+assortments
+blackest
+blitz's
+copy
+commenced
+admirers
+cheers
+croppers
+cliff's
+circumstance's
+bibles
+buttressed
+consecutively
+birefringence
+automaton
+cheerless
+chopping
+ballooned
+convent
+acknowledgers
+appointing
+belies
+comeliness
+bangle's
+communication
+bisector
+avocations
+clique
+brainstem
+campusses
+allocators
+bramble's
+assaults
+commemorate
+appendix
+agent
+apportioning
+bottled
+artifact's
+block's
+archery
+bagatelles
+candies
+catched
+cognitively
+creepers
+concentrated
+bout
+balustrade
+abodes
+carrying
+confirming
+cannibal
+chinners
+carbonate
+anguish
+butt
+colons
+ablated
+corporation's
+cock
+convincers
+beret's
+bluish
+compressive
+authenticates
+commemorative
+bureaucracies
+coinage
+coach
+assigning
+concentrators
+capitalizing
+appraisals
+belaying
+candy
+blossomed
+bricks
+atonal
+analogue
+caters
+barbaric
+applique
+clink
+audio
+actress
+assyrian
+apprehension
+conversation
+apsis
+bedevil
+comics
+affricate
+comings
+buttress
+angering
+buckboards
+bombed
+adversely
+adequacies
+commended
+causeways
+adherers
+codes
+aquaria
+ape
+bulks
+compactly
+brainwashes
+bleats
+commandants
+conditionally
+adjourns
+clobbering
+allowances
+buildings
+complemented
+blanker
+algeria
+brief
+creak
+adductor
+categorizer
+approacher
+argument's
+clocked
+bedazzle
+cause
+coordinator
+buildup
+countenance
+abhorrer
+backtracked
+bogus
+closer
+broilers
+chirps
+adjournment
+belles
+bitingly
+befogged
+contexts
+amorous
+breeding
+abortions
+blockage's
+alternatives
+bouncing
+beryl
+ballistics
+banters
+carpenters
+auction
+bowdlerizing
+brazen
+bonuses
+circulated
+adultery
+archival
+bears
+baptized
+burglaries
+borrowing
+barbarous
+casher
+adolescents
+atrophic
+busily
+aerating
+coatings
+athenians
+casing
+consuming
+alphanumeric
+beaches
+bisection's
+conjecturing
+aspirate
+biography's
+accompany
+bureaucrat
+broomstick's
+colony
+coalesce
+clock
+bequeath
+collaborates
+belonging
+configured
+burlesques
+anode
+consenter
+bug
+counterpoint
+counts
+bangladesh
+analogical
+accident
+bulky
+affinities
+abysmally
+boorish
+assiduously
+cannisters
+autocollimator
+bassinet's
+barrelling
+blurts
+carbonize
+candle
+act
+addressees
+constraints
+boast
+complaining
+coziness
+avocado
+coolest
+blank
+beadles
+anytime
+covetous
+appellant's
+angers
+academies
+ageless
+chased
+constitution
+consonant's
+boosting
+ascetics
+aerosol
+apse
+blushes
+clang
+confers
+confidentiality
+coolie
+colon's
+chickadees
+badminton
+argonaut
+constituting
+aloha
+contracts
+broomstick
+brackets
+attendant's
+connection's
+conciseness
+abstractor's
+composes
+chaste
+assures
+conjuring
+barbital
+bunion
+bases
+clowns
+barrelled
+audience
+auctioneer
+complexly
+aviator
+conjectures
+backscatters
+cheerfulness
+communicating
+agreement
+bricklayers
+bilabial
+abstruseness
+cobol
+cooperating
+admit
+blundering
+accelerates
+assaulted
+concealing
+anachronism
+bowels
+butane
+anniversary's
+converts
+convoyed
+climates
+barriers
+clubbing
+additives
+bask
+confessing
+caravan
+colonizes
+continuous
+cheerlessness
+boggled
+armpit's
+bridgework
+allegro
+cricket
+cannon
+adoption
+clanging
+auscultations
+billowed
+alphabetize
+airlift
+appointee
+boyfriend
+chaotic
+corrections
+bonus
+contrasted
+convulsion's
+confessors
+adumbrating
+autocrat's
+coronary
+authentic
+barley
+brawling
+aegis
+appends
+bolshevism
+charted
+applicant
+aileron
+considers
+chin's
+alkyl
+amendment
+boulevard's
+avian
+breather
+canyons
+cannon's
+apportion
+badgered
+augers
+advisers
+censuses
+beveling
+aught
+arthogram
+anonymity
+appliance
+atmospheric
+anesthetizing
+ambulances
+blustering
+burnt
+chestnut's
+collects
+aliment
+anxieties
+championship's
+channeled
+arrival
+amassing
+corpse
+bedtime
+blackbirds
+cats
+constants
+chemistry
+brewery
+brother's
+boasts
+accentual
+bellwether
+bely
+courted
+baroness
+configure
+collection
+aviary
+achieves
+belfry's
+beech
+baseman
+bacterial
+contestable
+blond
+contracting
+comparably
+consultation's
+booster
+conspiracies
+belief
+candidate's
+boardinghouses
+connectivity
+check
+crazy
+collided
+assistant's
+critic
+bilateral
+cheapening
+appalled
+autopsy
+balled
+abnormally
+acquires
+aloofness
+backwaters
+combative
+computerizing
+craters
+contributorily
+behaved
+comers
+axiomatizations
+analogously
+banjo's
+cleanser
+capitalizes
+chamberlain
+aggregates
+amenorrhea
+begins
+condone
+cleaved
+bustard
+adsorb
+airedale
+bridles
+audited
+could
+amour
+checkbooks
+admiring
+arrested
+commerce
+asbestos
+can's
+clamping
+bathers
+acknowledgments
+census
+acrobat
+bargains
+apogee
+creaking
+busboy's
+additional
+chants
+circumvents
+afloat
+anyplace
+alumnae
+anions
+classroom's
+ballerina's
+convents
+angered
+climbers
+citation
+cools
+clamor
+capaciousness
+beatific
+abrades
+advocating
+coverings
+claims
+brethren
+advertised
+atrophies
+coffer
+beagle's
+brazenly
+bitterly
+clergyman
+braiding
+compressible
+convicting
+agreeableness
+antithesis
+cogently
+botanist's
+bidirectional
+bewilders
+airlock
+costumer
+blamelessness
+agglutinins
+catalyst's
+allocation
+annunciates
+borderings
+accomplishes
+confronters
+clinically
+breadbox's
+canvassed
+communicative
+coercing
+backpointer's
+bramble
+congregations
+crave
+courtesy's
+cocoon's
+admitting
+chieftains
+acclimate
+consequences
+cones
+contradict
+axolotls
+contractual
+artist
+atrociously
+consecutive
+berated
+bluing
+attacks
+choruses
+blatant
+balance
+amplifier
+assist
+analyst's
+ambler
+conveyance
+compromising
+baffler
+corridor
+bed's
+condoned
+boulevard
+anomie
+averages
+basics
+apologia
+cabbages
+concretes
+alcoholic
+aliased
+chocks
+balsam
+collies
+censor
+arouses
+conundrum's
+academically
+bent
+codings
+coastal
+allots
+acclaim
+citations
+cantor
+circularly
+boarder
+caribou
+biologist's
+cowling
+connects
+chasing
+bootstrap
+backscatter
+abstractly
+corrupt
+alleviating
+biasing
+abrade
+arraignment
+beaten
+blanketing
+compactness
+adage
+coincided
+borate
+bra's
+concepts
+bootleger
+christian
+argos
+basal
+abate
+campuses
+abridging
+confusers
+cabin's
+audition's
+amphibians
+attractively
+adhesive's
+ascendency
+beforehand
+ache
+brokers
+bowler
+criminally
+american's
+chock's
+artillerist
+appropriation
+characterization's
+artifices
+annoys
+constituents
+bottle
+beaned
+consisting
+beholding
+ceremony
+carpeted
+absolutely
+anorexia
+accredited
+azaleas
+amaze
+commit
+afflicting
+contriving
+adventure
+blood
+blabbing
+absoluteness
+appreciable
+approachers
+bumptious
+behavioristic
+anticipates
+adults
+barnyard's
+banging
+banana
+bilge's
+aware
+coheres
+bronchi
+commissioned
+arrogation
+confines
+core
+attenuation
+afterwards
+clearing
+applies
+alphabetized
+cemetery's
+campaigning
+abolishes
+brig
+cheer
+combers
+backtracker
+clinker
+clouds
+clog
+berries
+advising
+childish
+clobbered
+bride's
+astrophysics
+canker
+concatenate
+bite
+chagrin
+bodybuilders
+calamity
+admiralty
+councillors
+competitive
+assessments
+copper's
+cabling
+casket
+conducted
+backplane
+boyfriends
+bingo
+broader
+confiscates
+communicated
+baton
+cocktails
+albanians
+boardinghouse's
+brats
+akimbo
+categorizers
+comparator's
+blackbird's
+accidentally
+companion's
+clippings
+accosted
+bell's
+burly
+aggregations
+boathouses
+airmails
+abreactions
+changers
+carbon
+cleaners
+bookkeeping
+correlations
+backer
+conclusions
+brainstem's
+anecdotes
+chateau
+cogitating
+amphibious
+compounded
+completeness
+comptroller's
+boatswain's
+bolstered
+acquiescing
+actors
+calorie's
+adaptability
+abstractor
+bimolecular
+belly's
+automobile
+automotive
+analyticities
+awesome
+colonizer
+approximated
+chemist
+coronet's
+classmate
+anteater's
+altars
+adulthood
+amid
+assails
+blizzards
+corroborative
+biographer
+compartment
+blooded
+bipartisan
+bluff
+aloof
+bronchiole
+clincher
+congratulations
+ablation
+caught
+collier
+chooses
+antidotes
+artery
+clearance
+civility
+basketball
+auscultated
+behaviorally
+crowning
+autobiographical
+cheaply
+brutally
+agonizing
+clerk
+comprising
+baller
+confuses
+acquiesced
+astonishingly
+birthplace
+covered
+chopper
+combinator
+benignly
+bedside
+blasts
+billboard
+appraise
+aboveground
+comforter
+credulousness
+battlefield
+barefoot
+cleverness
+apparatus
+bartering
+bromine
+aerodynamic
+crabs
+chains
+airflow
+allegrettos
+armchairs
+blacklist
+approvals
+bait
+collections
+antecedent's
+airbags
+casted
+content
+conferrer's
+crouching
+coughs
+canal
+amphetamine
+augustly
+bedraggle
+arithmetic
+cataloger
+alluding
+credulity
+coffees
+crueler
+beautifully
+caresses
+correlative
+consul
+criticizing
+couched
+baths
+alchemy
+bargain
+accomplishments
+conveyer
+benevolence
+broil
+chilling
+axed
+attire
+collisions
+categorizes
+cited
+aeration
+accommodating
+coordinations
+boxcar
+cattle
+bullion
+afternoon's
+captures
+afghans
+comets
+component's
+ark
+bounds
+adjusting
+bravely
+capability
+chap
+absolving
+aspirating
+arcs
+conspires
+collaborated
+admonishment
+astounds
+brasses
+compromise
+changed
+consumers
+connoting
+buttonholes
+cordial
+anionic
+chastisers
+archive
+alleviate
+burglarize
+acquainted
+copiers
+cashers
+antisocial
+creations
+bookie's
+censure
+beadle's
+banded
+circled
+bulged
+cheapness
+attorney's
+chewer
+bookshelf's
+councillor
+assertion
+broom's
+contemplations
+club's
+balkans
+cherubs
+alas
+chair
+apologizes
+compartments
+beyond
+aptly
+censured
+allegros
+boosts
+card
+arithmetizes
+attainment's
+arrester
+anding
+asker
+compatibilities
+confidentially
+commissioning
+cleaner
+aversion
+cooperative
+battalion's
+cemented
+charity's
+conceited
+capable
+anymore
+computing
+aping
+chiefly
+affair
+beaners
+allying
+caption's
+antipathy
+causal
+abyss
+botchers
+burglarizing
+confidant's
+activator
+continent's
+census's
+brat's
+antagonism
+bedspring's
+antiserum
+charge
+connector's
+alike
+believable
+belfry
+cast's
+bureaus
+beneficiary
+abolisher
+artichoke's
+broadly
+concurrent
+alteration
+bookies
+crafts
+bays
+ass
+bouquet's
+ave
+chords
+crazes
+anemic
+appoint
+beets
+billing
+contest
+assassination
+allot
+brindled
+acute
+absolves
+adsorbed
+auxiliaries
+belatedly
+businesslike
+assassinates
+bookkeepers
+bevel
+adders
+automate
+archangels
+breakfasted
+changeability
+contested
+cradles
+combatants
+besieging
+certainty
+attempts
+bankrupting
+compiler's
+complications
+banquets
+ancestor's
+ail
+abbreviating
+compacter
+approvers
+acknowledges
+comically
+almonds
+counsellors
+calmness
+assailed
+crane's
+baser
+big
+corruption
+circuitry
+briefness
+community's
+banquetings
+alms
+bass's
+bellowing
+adoption's
+blockading
+compellingly
+builders
+befallen
+bombproof
+cartons
+chore
+crimson
+anther
+clucks
+assemblies
+beatitudes
+aspiration
+compels
+angst
+balancing
+bowstrings
+bayonet's
+butte
+biomedical
+casualness
+accolade
+blackberry's
+bunched
+affright
+clung
+burlesque
+bare
+corrected
+arbitrate
+cropping
+coherently
+bloodhound
+circularity
+courtesies
+articulating
+concluded
+analogy
+brutalized
+airmail
+cooperator
+cousins
+centralization
+bibbing
+beside
+bravo
+abductors
+cars
+bovines
+bump
+absconding
+chins
+chasers
+boundary's
+antecedents
+awed
+counselled
+aback
+attenuator's
+blazer
+bettered
+awaken
+abreast
+beagles
+artisans
+buckled
+credence
+control's
+bewhiskered
+calloused
+breathe
+collaring
+blossoms
+bring
+actualities
+bivalves
+animals
+cowboys
+constituency
+affordable
+acrobatic
+attiring
+boatswain
+concurrence
+abrasions
+babel's
+cowerers
+chiffon
+bostonian
+criterion
+blinds
+cased
+affections
+conditioners
+clutter
+accrued
+attractors
+botcher
+compunction
+bludgeoned
+censored
+allah's
+chronic
+burrs
+commodity's
+appraiser
+asserters
+cheaters
+besting
+anchorite
+combine
+afforded
+cigarette's
+bathrooms
+apostles
+chloroplast
+bootlegging
+bibliographical
+beans
+bylaw
+benefited
+brochure's
+cordially
+brashly
+beastly
+bologna
+alderman's
+burning
+billow
+convert
+buffaloes
+comparatives
+assistances
+camouflaged
+announcement
+bobwhite
+brawl
+adducted
+cavern's
+affectation's
+bandying
+brunette
+architect's
+aphorisms
+cremate
+bray
+billed
+conception
+battlefield's
+bandaged
+broaches
+bazaar's
+beatification
+bigotry
+clergy
+abstains
+befits
+bantering
+conceivable
+attachers
+analogies
+bimonthly
+august
+additionally
+confirmation's
+ballooning
+cardboard
+belle's
+counterparts
+candor
+bishop
+comprehension
+affronted
+bravura
+courting
+antidote
+buggies
+arisings
+appendix's
+bright
+categorize
+cooking
+agnostic's
+billets
+amok
+bewitching
+audiograms
+column's
+bussed
+checkbook
+alteration's
+atherosclerosis
+broached
+based
+cacti
+boardinghouse
+bowdlerized
+anchoritism
+achievement's
+bald
+cover
+codifications
+capacitor
+brashness
+causes
+acyclically
+argument
+boarders
+audiometer
+compute
+contribute
+crisply
+bitters
+circumvent
+assailant
+bosun
+buyers
+alibis
+blurting
+coasts
+bivouacs
+arrogating
+albanian
+attempted
+acquisitiveness
+applauding
+alfalfa
+cantors
+canonicalizes
+alkaloid
+bruising
+associativity
+budgetary
+carbolic
+clashing
+buffalo
+acorn
+analyzing
+backyards
+comedian
+betwixt
+aces
+chartered
+additivity
+becalm
+combat
+characterizations
+clinics
+bulbs
+bloc
+amenable
+civilian's
+breech
+attainment
+bounding
+compiler
+cotyledons
+billboard's
+caper
+aphasia
+chester
+combats
+biddable
+articulates
+caps
+assignees
+bifocals
+beady
+chinese
+assertions
+allegation
+championships
+accrue
+containment's
+croaking
+classifying
+annum
+brightened
+bits
+appointer
+besieger
+citizen's
+cerebral
+canto
+bakers
+capitol's
+authorizer
+blockaded
+anodizes
+alarmed
+buttressing
+attenuates
+bumptiously
+chronological
+colleges
+coward
+contraption's
+abstractions
+controversial
+boric
+bids
+agents
+backpointer
+bumped
+bottoms
+bowlines
+captivated
+article
+cliche's
+chases
+choker
+bremsstrahlung
+consult
+adjudged
+auctioneer's
+covers
+accurateness
+clues
+bugler
+bareness
+cedar
+alleviation
+anesthetically
+backpointers
+arched
+administered
+arrowhead
+continues
+asks
+confessor's
+allure
+backlogs
+childishness
+appointive
+covering
+conscience's
+bellows
+blanked
+considerations
+appalachian
+aerate
+budged
+city's
+accordion
+cliche
+collectors
+comprehensive
+boomed
+chariot
+baffling
+bunkmate's
+bumbles
+contaminating
+corroborating
+applications
+bursting
+cabbage
+befalling
+acquittal
+compromisers
+components
+arpeggio
+brothel's
+credibility
+begrudge
+confirmation
+academy
+appertains
+calibrates
+bureaucrats
+bawl
+costuming
+biography
+adoration
+cloaks
+aggregating
+business
+aphorism's
+carters
+admixture
+coexistence
+anomalously
+adapts
+amide
+affiliation
+capillary
+biscuit
+brainy
+bellhops
+chartings
+cohered
+austria
+champions
+basin's
+cascading
+consultants
+bison's
+admixed
+arithmetically
+clothed
+betterments
+conspirator's
+addition
+adolescence
+bolsheviks
+abominable
+breathless
+cozy
+arouse
+bumble
+about
+apace
+astronaut
+asteroid
+cable
+crab's
+beachhead
+assets
+analyses
+bisection
+coconuts
+alleys
+armament's
+bloodstains
+arpeggios
+apologist
+blithely
+anabaptist's
+beadle
+channelled
+confuse
+annoy
+beautifiers
+cheats
+clenches
+amuse
+bewail
+constitutional
+birth
+appendixes
+amazed
+berry's
+bilingual
+blustery
+amplification
+clogged
+blackmailing
+breakables
+adduct
+bondsmen
+conferred
+codewords
+bequeathal
+abundantly
+banner's
+atrocity
+congested
+closely
+absolution
+concatenations
+anarchic
+crag's
+communicators
+cavities
+comptrollers
+backstage
+bewailing
+charcoal
+conveyances
+collar
+bores
+briefest
+comments
+awning's
+associator's
+antarctica
+correspondingly
+bidden
+ad
+clings
+bit's
+apollo
+bulldogs
+chateau's
+amounting
+cogitates
+bellhop
+bookish
+bout's
+cannister
+bicep's
+asses
+beef
+battlefields
+consort
+auspicious
+breezy
+buried
+beverages
+approximates
+conduction
+bleakly
+blanketers
+ascertained
+absentminded
+bolivia
+births
+behave
+bilk
+breaths
+charter
+abstaining
+appareled
+boulder's
+breadwinner's
+correct
+accessed
+befitted
+adulterer
+axe
+activation
+betrothed
+asymptote
+bullet's
+clusterings
+baud
+bustling
+ballplayer
+constraining
+cleared
+brown
+affirmed
+agencies
+churches
+backyard
+burntness
+bronchioles
+charmers
+backscattered
+abridgment
+claw
+blow
+adjourning
+constantly
+brightens
+autobiography
+cards
+bypassing
+alcibiades
+concurrency
+chuckles
+bests
+belligerents
+adjustments
+bolshevik
+cabins
+astronomically
+cartridge
+boxcars
+boned
+bottomed
+burgeoned
+adjourned
+apprenticeship
+chastiser
+breached
+boycott
+butchered
+coordinating
+cottage
+brainwashing
+confinement
+bandies
+absentee
+collapses
+cruel
+along
+alloy
+convoying
+assignment's
+crisp
+ambidextrously
+blindfolded
+chilly
+condenses
+avers
+broiler
+anesthetics
+beaker
+cholera
+brag
+coffins
+cranked
+allocator
+brutality
+acquire
+blushing
+briar
+abolish
+crossovers
+broiling
+consolers
+beatify
+almanac's
+cooled
+commencements
+clasp
+committing
+condemnations
+altar
+by
+bombastic
+confederates
+bong
+concerted
+compilers
+counterproductive
+brig's
+accurate
+avidity
+cleavage
+blame
+conceive
+assessor
+consolingly
+concise
+computes
+alliance
+clucked
+axon's
+annunciating
+baseball's
+allusion
+brays
+auras
+blond's
+bronchitis
+ciphers
+blowing
+broth
+canonically
+baseness
+byline's
+appetite's
+colonists
+condensed
+cawing
+beaning
+broadening
+colonist's
+apocrypha
+chauffeured
+cored
+branding
+carrier
+assessed
+collegiate
+chirped
+accounted
+clubbed
+antibodies
+behalf
+alphabetizing
+conqueror
+alpine
+budgeters
+casements
+appropriate
+compliments
+cast
+accountancy
+cathedral
+conserve
+accorders
+arbitrarily
+cowing
+bars
+bagel's
+climax
+attention's
+cautioning
+centipede's
+almost
+abstractionist
+carpenter
+containing
+arab's
+courtesy
+carton
+accelerated
+bowman
+boastings
+banal
+bucking
+accomplishment's
+classification
+baldly
+abruptness
+calibrations
+blocs
+biking
+assenter
+adversities
+compartmentalized
+chemical
+attic
+audiogram's
+applauds
+crests
+bad
+bounce
+accelerators
+contemptuous
+attentions
+cancellation
+battles
+aging
+advantages
+anthologies
+answers
+bruised
+castes
+any
+coped
+arcade's
+adaptively
+arsenal's
+confessed
+controllability
+acceptor's
+abrogated
+abutted
+amusingly
+apology
+broils
+court
+boundaries
+bode
+collie
+adiabatic
+ambitions
+charged
+awfulness
+consorts
+botanists
+blurring
+absents
+batten
+backwoods
+breaks
+certified
+chattering
+admitted
+bathrobe's
+analogous
+corporacy
+bijection's
+combatant
+checked
+condition
+amoral
+bayed
+bedroom
+chanting
+antics
+charity
+blip's
+biped
+brilliance
+catchers
+booted
+anabaptist
+clothe
+comforted
+complaints
+coacher
+admissible
+bang
+concisely
+cookery
+capita
+assurance
+codifying
+benchmarks
+aunts
+commentaries
+anon
+applicators
+constructor
+associated
+abuses
+choicest
+confiding
+antislavery
+apron
+ashore
+cheerfully
+betterment
+administration's
+campaign
+cremated
+ambulatory
+bleacher
+afterthought
+barkers
+choir
+crossly
+conducive
+cache's
+battery
+actinium
+countryman
+cajoled
+appeasing
+beamer
+cleaves
+anthem's
+clearing's
+cooperated
+barker
+crowing
+apprising
+accusation's
+beginning
+associator
+booking
+caved
+amicable
+codify
+clairvoyant
+bevels
+becalms
+brawn
+bunkhouse's
+arms
+antiredeposition
+belt
+antiphonal
+cried
+brae's
+bridal
+acronym
+clay's
+checkers
+auxiliary
+bind
+compares
+agilely
+askers
+blankly
+antagonist's
+bimodal
+captivation
+creditable
+concentration
+calling
+bartender's
+autopsied
+correspondent's
+carnivals
+abjure
+bystander's
+bungle
+chanticleers
+conceding
+burghers
+boards
+accessions
+compensations
+arabian
+churn
+crowed
+centering
+abnormalities
+courtier's
+congregation
+aberrant
+annexing
+blockhouse
+anthropomorphic
+bedder's
+abutting
+conundrums
+affiliated
+cancellation's
+bolts
+ballgowns
+augmenting
+bureaucracy's
+bootlegged
+audiometers
+blueberry
+affliction
+appreciation
+codifier
+amasses
+countering
+crackle
+canoe
+consuls
+breathes
+broiled
+amalgam's
+bodes
+ballooners
+coating
+corollaries
+amphibology
+agenda's
+chafing
+alcoholics
+accredit
+anisotropy
+anchovies
+carriers
+acceptors
+betrayed
+buttocks
+busy
+bunny
+cropper
+accreditations
+bumblebee's
+adhesives
+civilize
+accedes
+abroad
+arch
+crept
+cotyledon
+alphabetic
+braille
+amateur
+adjure
+ascertaining
+budge
+adulterate
+additive's
+cardiac
+born
+brewed
+borneo
+bun's
+blue
+cackled
+acclimates
+airline
+blinder
+brokerage
+communicant
+central
+aggrieved
+asynchronous
+bough's
+acidly
+archaeology
+complementary
+animator's
+bodyguards
+climbs
+apathy
+constellation's
+acculturate
+archaeologists
+contingents
+control
+anglophilia
+billings
+corporate
+athlete
+accusing
+appear
+announcing
+accordions
+computerize
+combinations
+bile
+abut
+charger
+columnize
+computer
+blacks
+converges
+blamer
+bulked
+convincingly
+checker
+correspondence's
+accelerate
+accessible
+conceivably
+abscissa's
+adsorbs
+anglophobia
+anomic
+casters
+churning
+crease
+brood
+appendage
+bulwark
+bombers
+arcaded
+breadboard's
+aphrodite
+color
+commodore's
+answerer
+bobolink
+cloth
+conversion
+clime
+artery's
+birthplaces
+compiled
+arrack
+beetles
+bobs
+compatibility
+cocoon
+counterpart
+audible
+colonies
+airport's
+beige
+cogent
+bromide
+begrudging
+acids
+crucifies
+beggary
+archipelagoes
+availably
+counterfeiter
+blanketed
+amending
+accelerometer's
+advisors
+byway
+alignment
+amber
+austin
+copyrights
+beaus
+brigantine
+comforts
+appointment's
+crawler
+bangles
+contemplation
+concur
+characterizing
+censoring
+charters
+catalogues
+appropriately
+builds
+aeronautic
+confused
+comber
+axially
+cackler
+coercive
+ambassador
+arcades
+brash
+amorality
+belittling
+battling
+bloodied
+acrylic
+bantered
+clasped
+carcass
+archangel
+annunciators
+aristotle
+boulder
+burglarproofs
+chooser
+abilities
+calmest
+bach
+always
+blaspheming
+crossover
+bakeries
+clocks
+ankle's
+accidental
+arbitration
+chirp
+aeronautical
+boy's
+acidic
+bowline
+anonymously
+cod
+couplers
+beautifications
+bluffing
+backarrows
+brow
+covenant
+acronym's
+banning
+albeit
+ascetic
+burn
+animator
+beatnik's
+coveted
+cipher's
+broke
+cap
+bellman
+bulldozed
+clarifies
+bathes
+blip
+availabilities
+booth
+clangs
+audiences
+cathedrals
+confounding
+bigot's
+beecher
+arts
+company
+attributed
+avenged
+bawling
+caustics
+alee
+bordello's
+banks
+affords
+complied
+commas
+collaborate
+aquatic
+ambitiously
+burro's
+beard
+bittersweet
+candlestick
+bylaws
+broadcastings
+believe
+barrels
+braying
+certifications
+contrasts
+crashes
+audition
+confine
+bucks
+abates
+bureaucracy
+ambles
+besiege
+broccoli
+antibiotics
+attenuators
+accelerometer
+caste
+bib's
+browbeaten
+appurtenance
+bauxite
+asceticism
+case
+chewing
+aerator
+achievements
+barricade's
+agglutinates
+bewildering
+cartridge's
+children
+bufferrer
+actuator
+converging
+bolted
+chat
+combs
+chemist's
+adduced
+algebraic
+circular
+bloated
+conclusion
+burgess
+certifies
+absconds
+comprise
+benzedrine
+bumbler
+banjo
+allow
+appealing
+cooperation
+abraded
+chaperoned
+biracial
+braced
+censurer
+acoustician
+appraised
+benefitting
+constructs
+convertible
+administrative
+asocial
+area
+creature
+besetting
+crater
+begrudgingly
+blanket
+ablest
+alba
+airplanes
+allowing
+briefly
+beneficences
+concurring
+adjective's
+cork
+aerospace
+anomalies
+asher
+auger's
+boilers
+abhorring
+broadenings
+bladder
+belay
+approver
+abdominal
+commends
+cringing
+billiards
+beater
+auspice
+contrasters
+bights
+absentees
+atoll
+cooler
+activator's
+basement
+burgeon
+allusiveness
+codeword's
+bandage
+contemplate
+adopted
+coping
+carving
+baptism
+colds
+altos
+background
+closet
+commuted
+acre's
+aliens
+council
+cans
+cheese
+ally
+aseptic
+belgian's
+crossbar
+addressed
+commons
+call
+careers
+breakfasting
+brazilian
+catholics
+bachelors
+consultant
+brighter
+crossword's
+burglar
+avoidable
+batting
+cigar
+amps
+axiological
+combed
+comforters
+albumin
+cookies
+booming
+archaize
+canton's
+bunkmate
+combination
+bondsman
+anxiously
+affixed
+associatively
+cigar's
+backstitch
+calls
+captivates
+commodities
+atmosphere's
+asserting
+beaver
+beatnik
+container
+activists
+consoler
+commoner
+buttonhole's
+abhorred
+aggregate
+cliff
+antidisestablishmentarianism
+broach
+ambling
+comer
+bited
+advocated
+behaves
+bosom
+continents
+conserves
+bashful
+ago
+backarrow
+circumventable
+avocados
+briar's
+annuls
+barnstorming
+aired
+carry
+crossbar's
+aspire
+beards
+abides
+cliques
+completes
+brassiere
+absorbs
+annul
+chairman
+baron
+battens
+africans
+abatement
+colonization
+carries
+borough
+allurement
+breakfasters
+alkali
+acoustically
+corners
+capturer
+casualties
+asphyxia
+animized
+administrator
+belying
+basketballs
+bylines
+bandit
+autopsies
+braining
+contradiction's
+antic
+butted
+bacillus
+blurt
+conditioned
+backers
+agreeable
+almanacs
+cider
+chicken
+chambers
+clutch
+assailant's
+conveyers
+amazers
+beribboned
+breeder
+caveat's
+buffers
+combination's
+ampersand's
+crafting
+clanged
+caving
+aspirant
+butlers
+adjective
+auckland
+announced
+creators
+caches
+baseline
+codifies
+baptism's
+coarsened
+cohesion
+airman
+avenge
+backaches
+budgeted
+armpit
+bicycled
+converged
+besmirched
+autonomic
+coming
+assemblage's
+chained
+admissions
+alcoholic's
+branches
+bunk
+anciently
+bloods
+adventurers
+amazes
+coloring
+abstractors
+adaptation's
+boar
+amulet
+agglutination
+conquerable
+booker
+confronts
+barometer's
+bedbugs
+barricades
+cheap
+bewitch
+circus
+backward
+archeology
+automobiles
+bending
+amino
+beckoning
+admits
+berliners
+borer
+clambering
+atomizing
+banner
+blissfully
+catchable
+breakdown
+abjured
+computerized
+chaplain's
+amphitheater
+ballot's
+craziness
+croaks
+counties
+adopting
+breast
+airstrip's
+basin
+contemplating
+commitments
+critique
+appears
+bellies
+baccalaureate
+abducted
+blackened
+animosity
+appraising
+antiquity
+assistants
+asthma
+bootstrapping
+bounties
+agleam
+advertisements
+benches
+artful
+broadens
+chuck's
+betrayal
+blasphemed
+brooms
+castled
+coroutine
+conscious
+beetle
+banshee
+advertising
+baring
+awakens
+balm
+billions
+compromisingly
+ballroom's
+burrower
+bayou's
+ambiance
+beheading
+bought
+adagios
+adornment's
+anointed
+abolishment's
+anesthetizes
+badly
+boyishness
+consultant's
+cheek
+cannibals
+breakdowns
+assured
+agates
+bicker
+appliances
+cafe
+bagpipes
+adrenal
+combinatorially
+belligerence
+bricked
+adjacency
+aimless
+crook
+cherry's
+assessing
+brushfire
+cormorant
+captained
+blundered
+conceptually
+congress's
+contraster
+ambushes
+bronze
+autotransformer
+corded
+brisker
+contently
+announcements
+bullet
+apportionments
+columnized
+canon
+conservation
+algaecide
+blackening
+compassion
+beaks
+constructibility
+chapter
+abscond
+costly
+bacon
+coldest
+aptness
+billionth
+altercation
+approbation
+alternator's
+criticizes
+befell
+canopy
+buoyant
+brazil
+anticipate
+absenteeism
+champion
+aesthetics
+cadence
+betroth
+confidants
+bean
+braid
+aphids
+cluttering
+cantankerously
+bloom
+barbarity
+clawing
+bogged
+agreed
+asia
+abrasion
+corporals
+baselines
+box
+chartering
+apotheosis
+ampersands
+conceit
+creamer
+adhered
+circuit
+carpet
+accompaniments
+boomerangs
+blindness
+chipmunks
+bewitched
+allocate
+bicycle
+compacted
+cab
+calcium
+cellists
+apex
+borrows
+completed
+brightly
+constables
+ascertains
+conspiracy's
+badgers
+bunion's
+anabaptists
+broadband
+clefts
+accepted
+benched
+catalogued
+cadenced
+alliteration
+acquiesces
+boxcar's
+athlete's
+bracing
+cremations
+analysis
+crossings
+assorts
+apologize
+brazier
+configurable
+basking
+craves
+belle
+conversation's
+belligerent
+anesthetize
+brewers
+cackles
+adventures
+airlock's
+booklet's
+apply
+anecdotal
+bewails
+computer's
+autographs
+acclimated
+coefficients
+avidly
+beckoned
+broadener
+bulk
+blacklisting
+belly
+acquit
+convoy
+achiever
+aversions
+advisor's
+captor's
+camel's
+asset's
+advantageous
+basement's
+confident
+crescents
+compiling
+butler's
+cartoon's
+adaptive
+chlorine
+abets
+cruelly
+amiable
+baleful
+ceiling's
+adumbrated
+cherry
+aspirant's
+cashing
+candidly
+chaff
+bitter
+brim
+alcove
+bulb's
+carbonizers
+citizen
+attic's
+breed
+consumer
+conferrers
+accommodations
+contrapositive
+beget
+brilliantly
+attentionality
+continuation's
+bosses
+brave
+configurations
+benediction's
+conferring
+accessor's
+bobolinks
+bulled
+cleanness
+algorithm
+advancements
+altogether
+accumulations
+albacore
+bowing
+belching
+apical
+consequentiality
+bagpipe's
+ambrosial
+bullying
+cleans
+attendance's
+complimenter
+blink
+cager
+assembling
+coat
+allowable
+astringent
+antiresonator
+cardinal
+clicks
+commentator's
+blossom
+categorizing
+amphibian's
+commonality
+consonant
+classics
+affable
+accorded
+aimlessly
+archetype
+administerings
+boldness
+anatomy
+apprehensively
+absence's
+actuality
+attempting
+categorical
+checkpoints
+allemande
+corer
+behoove
+bleaches
+bough
+blended
+blotting
+baptists
+courtship
+benevolent
+bumptiousness
+chum
+anguished
+auto
+career
+bookstore's
+carbonized
+autocratically
+cherishes
+attendees
+contends
+anastomotic
+attributing
+abbot
+came
+blunt
+battlement's
+affection
+coordination
+annotate
+besets
+bucked
+boasting
+benedictions
+adherent
+blimp's
+acknowledging
+cleverly
+applejack
+annexation
+bat's
+cantons
+beetled
+closed
+country
+creatively
+bakery
+blasphemously
+chalking
+bold
+attended
+crasher
+backtrackers
+artist's
+bracelet's
+allowably
+affiliating
+arrant
+brayed
+barbells
+consigned
+abolishers
+climatic
+atrophying
+amigo
+arsenal
+ascribes
+converses
+aura's
+allotted
+bliss
+classical
+bigger
+ahead
+chopped
+blade
+casualty
+acceded
+bottling
+axon
+casement's
+battlefront's
+convinces
+alerting
+advertisers
+blemish
+agglutinating
+commonplaces
+autocorrelation
+armistice
+crediting
+besmirch
+amplify
+auscultation
+befalls
+called
+alnico
+arbiter's
+abort
+argonauts
+cessations
+cribs
+blare
+aforementioned
+condemners
+contaminated
+complained
+bootstrapped
+criticism
+cooperatively
+binding
+bullies
+basins
+contrived
+assort
+adulterously
+booms
+abandons
+also
+appealed
+count
+contributed
+beet
+crashers
+carryovers
+clays
+blackness
+cosmetics
+awkward
+blurted
+bothers
+analyzer
+backups
+alarming
+bicyclers
+credit
+abrogate
+audience's
+architecturally
+alibi's
+complicator's
+chuckle
+corporately
+banishment
+communist's
+birdie
+asymptotic
+break
+braze
+benzene
+bridgework's
+beak
+agitators
+collateral
+arranges
+bayonet
+breathlessly
+counsellor
+creates
+convulsions
+backdrops
+applicants
+altercation's
+commission
+breathtakingly
+corresponds
+backdrop
+armaments
+build
+biannual
+buttoning
+computational
+chaired
+bather
+critically
+amanuensis
+bantus
+confidential
+annoyance's
+carder
+authorizing
+acquits
+bipeds
+cocktail
+cinnamon
+burros
+brocade
+abdomen's
+creative
+acquisition's
+abdomen
+baited
+aristocratically
+alive
+committed
+arrestor
+cleaving
+comedy's
+baggage
+bra
+adaptors
+afoot
+bulls
+contoured
+amalgam
+comprehensibility
+amortizes
+biographical
+confront
+covert
+cravat
+animates
+booksellers
+bypass
+bootleggers
+bedfast
+affair's
+buzzer
+bellowed
+aligning
+bystander
+acclimatized
+accomplishing
+against
+blankness
+adopt
+addressing
+croaked
+boaters
+behooves
+audits
+boatyard
+cruise
+agnostics
+ailing
+anchorage's
+adaptations
+conceptualize
+advised
+cries
+bank
+actuators
+brazing
+catalyst
+beachheads
+aplomb
+compressed
+amputated
+contractor's
+bedspreads
+bowed
+coon
+chaplain
+cannons
+coffers
+assembly
+bouffant
+converters
+ampoule's
+borderland
+archaeologist
+blankets
+conserving
+avalanche
+assortment's
+aspic
+axle
+bereaves
+allowance
+carbonization
+bartender
+clawed
+coincidental
+appeared
+chipmunk's
+countable
+authenticators
+bestow
+alps
+caw
+aniseikonic
+avows
+blackmails
+controlling
+correlating
+audiologist's
+bit
+approving
+collapse
+coon's
+cleave
+atheists
+brigade
+autopilots
+bounteous
+commercialness
+accede
+cavalierness
+accustoming
+burnishing
+clobber
+aspirates
+brochures
+cellar's
+communes
+berkelium
+chickadee
+cobweb
+circumstances
+chose
+comprehend
+baritone's
+aggravation
+adopts
+cruelty
+and
+axer
+cautioned
+carbonic
+babbles
+bet
+charitable
+computable
+cardinality
+amenities
+confiscating
+catcher
+audaciousness
+complaint's
+cooperator's
+buddies
+baking
+constant
+classmate's
+accentuate
+choices
+crop's
+authorization's
+comedy
+brushy
+brotherly
+canals
+ads
+causeway
+abrading
+cemetery
+autocrat
+briefing
+abdomens
+apparition's
+consummately
+alkaloids
+bulkheads
+cravats
+bales
+campaigners
+bagpipe
+accentuates
+arm
+barometric
+bas
+agitator
+behavior
+abutters
+blockades
+alertness
+civilizes
+chinner
+anthropologist
+artificialness
+balkanize
+automates
+cackling
+anarchists
+amounted
+cereal's
+anodized
+cobblers
+acknowledgment's
+blear
+copper
+alphabetics
+blackboards
+apish
+answering
+afternoon
+arbors
+accused
+chickens
+agency's
+contractors
+contraptions
+cosmology
+anomaly
+bandstand
+attempter
+account
+challengers
+admiration
+calculations
+autocracy
+analyticity
+accord
+buildup's
+commonly
+babbling
+adjudication's
+attain
+ameliorating
+candlestick's
+chronicles
+align
+consensus
+agate
+adulation
+aspirated
+conclusive
+biologists
+cracks
+conform
+chambered
+beryllium
+connote
+amusing
+aquifer
+ankle
+batteries
+conservationists
+accountants
+apiaries
+actinometer
+beckon
+clearances
+clouded
+antitoxin's
+consolation's
+collectives
+boxtops
+bombarded
+bombarding
+bluest
+allusion's
+construction
+ballpark's
+codified
+coincidence
+celebration
+chip
+beginner's
+algerian
+boo
+athletics
+condenser
+bytes
+beauties
+concerts
+conductors
+awl's
+agitations
+buttered
+codifier's
+armory
+ascii
+aspirin
+arthritis
+bylaw's
+conformity
+blasting
+coinciding
+aphid's
+ceremonial
+banisters
+bristle
+bid's
+buckboard's
+bandied
+biopsy
+ballrooms
+chloroplasts
+bidding
+boil
+algebra
+constellation
+chuck
+cringes
+cleanliness
+apron's
+cosmopolitan
+bashes
+abusive
+believer
+conductor
+butters
+breweries
+allotment
+artfulness
+bunkmates
+blares
+connections
+anticipated
+classifies
+commandments
+beginnings
+bend
+brambles
+blacked
+basketball's
+affectionate
+cocoa
+anacondas
+busing
+bone
+birchen
+creamed
+aged
+commemorates
+brother
+aberration
+crawl
+actuarial
+apology's
+alumnus
+adversary's
+anaphoric
+aspiring
+consciousness
+cokes
+assignee
+boxing
+blanched
+camels
+contemporaries
+carnivorous
+assigned
+apologetically
+corpus
+accusations
+beefing
+champaign
+claps
+adherence
+aloft
+complication
+citizenship
+becomes
+compound
+arabesque
+bronchiole's
+appraises
+breach
+collection's
+botched
+bitches
+biblically
+bronchial
+amalgamating
+commoner's
+barbarian's
+arrange
+cradle
+conformed
+complimentary
+anodes
+cowering
+anoint
+brocaded
+bedazzling
+avionics
+burnishes
+bulkhead
+chink
+consciously
+contract
+clinch
+applicant's
+awning
+aloud
+chandelier's
+cathode's
+babble
+arachnid
+biplane
+clamorous
+assuredly
+consented
+axing
+avenger
+commence
+braving
+brandishing
+careless
+burningly
+boatsman
+channelling
+clarifying
+beggar
+berates
+cite
+cowered
+buffer
+condescending
+admixes
+bettering
+bedazzlement
+cord
+burglary's
+characteristics
+aptitudes
+adieu
+agree
+bends
+ceremonies
+accustom
+accessibly
+commanders
+ask
+cavalier
+brayer
+affront
+courser
+becoming
+carves
+configures
+beasts
+biters
+conditionals
+bodybuilding
+accretions
+chapter's
+cleverer
+corning
+brat
+classes
+almsman
+consumptive
+antique
+comprised
+beholders
+anthropologically
+buns
+bridge
+accretion
+acceptance's
+confederacy
+armorer
+argumentative
+crossword
+cowslip's
+analog
+counselor
+chastised
+barters
+clerked
+americas
+cloud
+aide
+alternators
+admitters
+bagatelle
+bridges
+civilizations
+anion's
+briton's
+apartment
+acquaints
+consummation
+chord
+coated
+barer
+carnivorously
+cheering
+allergy
+capacity
+classrooms
+assistantships
+complimented
+amphibiously
+commandment's
+audiogram
+corked
+badness
+bewildered
+assemblage
+backplane's
+asterisk's
+blob
+coexisting
+approximations
+counteractive
+barns
+adherer
+aborigine's
+brooding
+conceived
+adjustor
+cabled
+belongings
+breadwinner
+blot's
+brightness
+consigning
+barflies
+bisector's
+basing
+complement
+conditioner
+brazes
+crank
+antinomian
+crowd
+accelerometers
+befitting
+backlash
+bastions
+acceleration
+briefcases
+correlated
+baffle
+chew
+accosts
+agreeably
+bassinets
+cogitate
+concerning
+contouring
+broadside
+compact
+brainstems
+atom's
+bondage
+biter
+archdioceses
+basis
+bellboy
+blobs
+barons
+clods
+campaigned
+assessors
+bubbles
+annal
+casual
+altercations
+clog's
+biased
+arianism
+ancillary
+collaborator
+butter
+bureau
+blending
+antiquities
+brands
+activism
+crews
+beats
+broad
+buds
+baggers
+cobbler's
+condemns
+cabinets
+bomber
+blinders
+center
+contacted
+bewilderingly
+circulates
+burnings
+achieved
+belch
+barbecue
+angles
+comparative
+befuddle
+cherished
+chapters
+chanter
+allegation's
+armstrong
+converter
+combinatoric
+angrier
+brooks
+clinked
+blubber
+appointments
+compactor
+cleaned
+car
+contention's
+artificial
+cramp
+consistency
+aborting
+collaboration
+awarders
+crippled
+anaphora
+creamy
+buoyed
+baptistery
+altered
+anchoring
+alterer
+adjuring
+beacon's
+commencement's
+ascension
+candidness
+clouding
+cigars
+boiled
+christmas
+contingency's
+alum
+apparel
+contributors
+anisotropic
+annotations
+bushwhacks
+brides
+continuities
+carton's
+blurred
+antibody
+aorta
+blankest
+combinator's
+banish
+breaches
+accumulates
+bowling
+braver
+antibacterial
+cooperators
+banked
+compensated
+chartable
+conjunctively
+antelope's
+bluefish
+annoying
+composed
+barges
+biconcave
+australia
+ballparks
+bearers
+acknowledged
+advocates
+crossed
+competitor
+blaming
+andorra
+baritone
+collaborator's
+accessibility
+complains
+commentator
+bibliography
+conference's
+atmosphere
+agrees
+bedstead's
+ardor
+character's
+conventionally
+arena's
+chokes
+channel
+bludgeon
+convoys
+condense
+beautifier
+ailerons
+compacts
+black
+bell
+completions
+ballroom
+besotting
+conservatives
+adventured
+bulldog's
+conversely
+arroyos
+compositional
+alternative
+association
+broods
+beefy
+consolidated
+balms
+acquaint
+animal
+certificate
+combustion
+aims
+cracker
+abetted
+cautionings
+bread
+attains
+agriculturally
+courtyards
+bawls
+country's
+creator's
+checkbook's
+cliches
+colonizing
+biennial
+aqueous
+craftsman
+contrivances
+algorithmic
+crate
+barefooted
+bodily
+anthropologist's
+but
+climate's
+campers
+crackled
+awakes
+conveyed
+borrowers
+approached
+avoids
+crib
+albania
+bathrobe
+admonitions
+architectures
+consenting
+anastomosis
+blob's
+actual
+arrowhead's
+accountable
+allegiances
+commendation
+appearers
+comply
+concurs
+controversy
+abstracting
+artifact
diff --git a/bdb/test/wrap.tcl b/bdb/test/wrap.tcl
new file mode 100644
index 00000000000..4a5c825d8f0
--- /dev/null
+++ b/bdb/test/wrap.tcl
@@ -0,0 +1,58 @@
+# Sentinel file wrapper for multi-process tests.
+# This is designed to avoid a set of nasty bugs, primarily on Windows,
+# where pid reuse causes watch_procs to sit around waiting for some
+# random process that's not DB's and is not exiting.
+
+source ./include.tcl
+
+# Arguments:
+#
+if { $argc < 3 } {
+ puts "FAIL: wrap.tcl: Usage: wrap.tcl script log scriptargs"
+ exit
+}
+
+set script [lindex $argv 0]
+set logfile [lindex $argv 1]
+set args [lrange $argv 2 end]
+
+# Create a sentinel file to mark our creation and signal that watch_procs
+# should look for us.
+set parentpid [pid]
+set parentsentinel $testdir/begin.$parentpid
+set f [open $parentsentinel w]
+close $f
+
+# Create a Tcl subprocess that will actually run the test.
+set t [open "|$tclsh_path >& $logfile" w]
+
+# Create a sentinel for the subprocess.
+set childpid [pid $t]
+puts "Script watcher process $parentpid launching $script process $childpid."
+set childsentinel $testdir/begin.$childpid
+set f [open $childsentinel w]
+close $f
+
+# Set up argv for the subprocess, since the args aren't passed in as true
+# arguments thanks to the pipe structure.
+puts $t "set argc [llength $args]"
+puts $t "set argv [list $args]"
+
+# Command the test to run.
+puts $t "source $test_path/$script"
+
+# Close the pipe. This will flush the above commands and actually run the
+# test, and will also return an error a la exec if anything bad happens
+# to the subprocess. The magic here is that closing a pipe blocks
+# and waits for the exit of processes in the pipeline, at least according
+# to Ousterhout (p. 115).
+
+set ret [catch {close $t} res]
+
+# Write ending sentinel files--we're done.
+set f [open $testdir/end.$childpid w]
+close $f
+set f [open $testdir/end.$parentpid w]
+close $f
+
+exit $ret
diff --git a/bdb/txn/txn.c b/bdb/txn/txn.c
new file mode 100644
index 00000000000..0f6d894c19b
--- /dev/null
+++ b/bdb/txn/txn.c
@@ -0,0 +1,869 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1995, 1996
+ * The President and Fellows of Harvard University. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: txn.c,v 11.61 2001/01/10 18:18:52 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <string.h>
+#endif
+
+#ifdef HAVE_RPC
+#include "db_server.h"
+#endif
+
+#include "db_int.h"
+#include "db_shash.h"
+#include "txn.h"
+#include "lock.h"
+#include "log.h"
+#include "db_dispatch.h"
+#include "db_page.h"
+#include "db_ext.h"
+
+#ifdef HAVE_RPC
+#include "gen_client_ext.h"
+#include "rpc_client_ext.h"
+#endif
+
+static int __txn_begin __P((DB_TXN *));
+static int __txn_isvalid __P((const DB_TXN *, TXN_DETAIL **, u_int32_t));
+static int __txn_undo __P((DB_TXN *));
+
+/*
+ * txn_begin --
+ * This is a wrapper to the actual begin process. Normal txn_begin()
+ * allocates a DB_TXN structure for the caller, while txn_xa_begin() does
+ * not. Other than that, both call into the common __txn_begin code().
+ *
+ * Internally, we use TXN_DETAIL structures, but the DB_TXN structure
+ * provides access to the transaction ID and the offset in the transaction
+ * region of the TXN_DETAIL structure.
+ */
+int
+txn_begin(dbenv, parent, txnpp, flags)
+ DB_ENV *dbenv;
+ DB_TXN *parent, **txnpp;
+ u_int32_t flags;
+{
+ DB_TXN *txn;
+ int ret;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
+ return (__dbcl_txn_begin(dbenv, parent, txnpp, flags));
+#endif
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->tx_handle, DB_INIT_TXN);
+
+ if ((ret = __db_fchk(dbenv,
+ "txn_begin", flags,
+ DB_TXN_NOWAIT | DB_TXN_NOSYNC | DB_TXN_SYNC)) != 0)
+ return (ret);
+ if ((ret = __db_fcchk(dbenv,
+ "txn_begin", flags, DB_TXN_NOSYNC, DB_TXN_SYNC)) != 0)
+ return (ret);
+
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_TXN), &txn)) != 0)
+ return (ret);
+
+ txn->mgrp = dbenv->tx_handle;
+ txn->parent = parent;
+ TAILQ_INIT(&txn->kids);
+ txn->flags = TXN_MALLOC;
+ if (LF_ISSET(DB_TXN_NOSYNC))
+ F_SET(txn, TXN_NOSYNC);
+ if (LF_ISSET(DB_TXN_SYNC))
+ F_SET(txn, TXN_SYNC);
+ if (LF_ISSET(DB_TXN_NOWAIT))
+ F_SET(txn, TXN_NOWAIT);
+
+ if ((ret = __txn_begin(txn)) != 0) {
+ __os_free(txn, sizeof(DB_TXN));
+ txn = NULL;
+ }
+
+ if (txn != NULL && parent != NULL)
+ TAILQ_INSERT_HEAD(&parent->kids, txn, klinks);
+
+ *txnpp = txn;
+ return (ret);
+}
+
+/*
+ * __txn_xa_begin --
+ * XA version of txn_begin.
+ *
+ * PUBLIC: int __txn_xa_begin __P((DB_ENV *, DB_TXN *));
+ */
+int
+__txn_xa_begin(dbenv, txn)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+{
+ PANIC_CHECK(dbenv);
+
+ memset(txn, 0, sizeof(DB_TXN));
+
+ txn->mgrp = dbenv->tx_handle;
+
+ return (__txn_begin(txn));
+}
+
+/*
+ * __txn_begin --
+ * Normal DB version of txn_begin.
+ */
+static int
+__txn_begin(txn)
+ DB_TXN *txn;
+{
+ DB_ENV *dbenv;
+ DB_LSN begin_lsn;
+ DB_TXNMGR *mgr;
+ DB_TXNREGION *region;
+ TXN_DETAIL *td;
+ size_t off;
+ u_int32_t id;
+ int ret;
+
+ mgr = txn->mgrp;
+ dbenv = mgr->dbenv;
+ region = mgr->reginfo.primary;
+
+ /*
+ * We do not have to write begin records (and if we do not, then we
+ * need never write records for read-only transactions). However,
+ * we do need to find the current LSN so that we can store it in the
+ * transaction structure, so we can know where to take checkpoints.
+ */
+ if (LOGGING_ON(dbenv) &&
+ (ret = log_put(dbenv, &begin_lsn, NULL, DB_CURLSN)) != 0)
+ goto err2;
+
+ R_LOCK(dbenv, &mgr->reginfo);
+
+ /* Make sure that last_txnid is not going to wrap around. */
+ if (region->last_txnid == TXN_INVALID) {
+ __db_err(dbenv,
+"txn_begin: transaction ID wrapped. Exit the database environment\nand restart the application as if application failure had occurred");
+ ret = EINVAL;
+ goto err1;
+ }
+
+ /* Allocate a new transaction detail structure. */
+ if ((ret =
+ __db_shalloc(mgr->reginfo.addr, sizeof(TXN_DETAIL), 0, &td)) != 0) {
+ __db_err(dbenv,
+ "Unable to allocate memory for transaction detail");
+ goto err1;
+ }
+
+ /* Place transaction on active transaction list. */
+ SH_TAILQ_INSERT_HEAD(&region->active_txn, td, links, __txn_detail);
+
+ id = ++region->last_txnid;
+ ++region->nbegins;
+ if (++region->nactive > region->maxnactive)
+ region->maxnactive = region->nactive;
+
+ td->txnid = id;
+ td->begin_lsn = begin_lsn;
+ ZERO_LSN(td->last_lsn);
+ td->status = TXN_RUNNING;
+ if (txn->parent != NULL)
+ td->parent = txn->parent->off;
+ else
+ td->parent = INVALID_ROFF;
+
+ off = R_OFFSET(&mgr->reginfo, td);
+ R_UNLOCK(dbenv, &mgr->reginfo);
+
+ ZERO_LSN(txn->last_lsn);
+ txn->txnid = id;
+ txn->off = off;
+
+ /*
+ * If this is a transaction family, we must link the child to the
+ * maximal grandparent in the lock table for deadlock detection.
+ */
+ if (txn->parent != NULL && LOCKING_ON(dbenv))
+ if ((ret = __lock_addfamilylocker(dbenv,
+ txn->parent->txnid, txn->txnid)) != 0)
+ goto err2;
+
+ if (F_ISSET(txn, TXN_MALLOC)) {
+ MUTEX_THREAD_LOCK(dbenv, mgr->mutexp);
+ TAILQ_INSERT_TAIL(&mgr->txn_chain, txn, links);
+ MUTEX_THREAD_UNLOCK(dbenv, mgr->mutexp);
+ }
+
+ return (0);
+
+err1: R_UNLOCK(dbenv, &mgr->reginfo);
+
+err2: return (ret);
+}
+
+/*
+ * txn_commit --
+ * Commit a transaction.
+ */
+int
+txn_commit(txnp, flags)
+ DB_TXN *txnp;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DB_TXN *kid;
+ int is_commit, ret, t_ret;
+
+ dbenv = txnp->mgrp->dbenv;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
+ return (__dbcl_txn_commit(txnp, flags));
+#endif
+
+ PANIC_CHECK(dbenv);
+
+ if ((ret = __txn_isvalid(txnp, NULL, TXN_COMMITTED)) != 0)
+ return (ret);
+
+ /*
+ * We clear flags that are incorrect, ignoring any flag errors, and
+ * default to synchronous operations. By definition, transaction
+ * handles are dead when we return, and this error should never
+ * happen, but we don't want to fail in the field 'cause the app is
+ * specifying the wrong flag for some reason.
+ */
+ if (__db_fchk(dbenv,
+ "txn_commit", flags, DB_TXN_NOSYNC | DB_TXN_SYNC) != 0)
+ flags = DB_TXN_SYNC;
+ if (__db_fcchk(dbenv,
+ "txn_commit", flags, DB_TXN_NOSYNC, DB_TXN_SYNC) != 0)
+ flags = DB_TXN_SYNC;
+ if (LF_ISSET(DB_TXN_NOSYNC)) {
+ F_CLR(txnp, TXN_SYNC);
+ F_SET(txnp, TXN_NOSYNC);
+ }
+ if (LF_ISSET(DB_TXN_SYNC)) {
+ F_CLR(txnp, TXN_NOSYNC);
+ F_SET(txnp, TXN_SYNC);
+ }
+
+ /*
+ * Commit any unresolved children. If there's an error, abort any
+ * unresolved children and the parent.
+ */
+ while ((kid = TAILQ_FIRST(&txnp->kids)) != NULL)
+ if ((ret = txn_commit(kid, flags)) != 0) {
+ while ((kid = TAILQ_FIRST(&txnp->kids)) != NULL)
+ (void)txn_abort(kid);
+ (void)txn_abort(txnp);
+ goto err;
+ }
+
+ /*
+ * If there are any log records, write a log record and sync the log,
+ * else do no log writes. If the commit is for a child transaction,
+ * we do not need to commit the child synchronously since it may still
+ * abort (if its parent aborts), and otherwise its parent or ultimate
+ * ancestor will write synchronously.
+ *
+ * I'd rather return a logging error than a flag-wrong error, so if
+ * the log routines fail, set "ret" without regard to previous value.
+ */
+ if (LOGGING_ON(dbenv) && !IS_ZERO_LSN(txnp->last_lsn)) {
+ if (txnp->parent == NULL) {
+ if ((t_ret = __txn_regop_log(dbenv,
+ txnp, &txnp->last_lsn,
+ (F_ISSET(dbenv, DB_ENV_TXN_NOSYNC) &&
+ !F_ISSET(txnp, TXN_SYNC)) ||
+ F_ISSET(txnp, TXN_NOSYNC) ? 0 : DB_FLUSH,
+ TXN_COMMIT, (int32_t)time(NULL))) != 0) {
+ ret = t_ret;
+ goto err;
+ }
+ } else {
+ /* Log the commit in the parent! */
+ if ((t_ret = __txn_child_log(dbenv,
+ txnp->parent, &txnp->parent->last_lsn,
+ 0, txnp->txnid, &txnp->last_lsn)) != 0) {
+ ret = t_ret;
+ goto err;
+ }
+
+ F_SET(txnp->parent, TXN_CHILDCOMMIT);
+ }
+ }
+
+ is_commit = 1;
+ if (0) {
+err: is_commit = 0;
+ }
+ if ((t_ret = __txn_end(txnp, is_commit)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * txn_abort --
+ * Abort a transaction.
+ */
+int
+txn_abort(txnp)
+ DB_TXN *txnp;
+{
+ DB_ENV *dbenv;
+ DB_TXN *kid;
+ int ret, t_ret;
+
+ dbenv = txnp->mgrp->dbenv;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
+ return (__dbcl_txn_abort(txnp));
+#endif
+
+ PANIC_CHECK(dbenv);
+
+ if ((ret = __txn_isvalid(txnp, NULL, TXN_ABORTED)) != 0)
+ return (ret);
+
+ /* Abort any unresolved children. */
+ while ((kid = TAILQ_FIRST(&txnp->kids)) != NULL)
+ if ((t_ret = txn_abort(kid)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if ((t_ret = __txn_undo(txnp)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if ((t_ret = __txn_end(txnp, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * txn_prepare --
+ * Flush the log so a future commit is guaranteed to succeed.
+ */
+int
+txn_prepare(txnp)
+ DB_TXN *txnp;
+{
+ DBT xid;
+ DB_ENV *dbenv;
+ DB_TXN *kid;
+ TXN_DETAIL *td;
+ int ret;
+
+ dbenv = txnp->mgrp->dbenv;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
+ return (__dbcl_txn_prepare(txnp));
+#endif
+
+ PANIC_CHECK(dbenv);
+
+ if ((ret = __txn_isvalid(txnp, &td, TXN_PREPARED)) != 0)
+ return (ret);
+
+ /* Prepare any unresolved children. */
+ while ((kid = TAILQ_FIRST(&txnp->kids)) != NULL)
+ if ((ret = txn_prepare(kid)) != 0)
+ return (ret);
+
+ /*
+ * We indicate that a transaction is an XA transaction by putting
+ * a valid size in the xid.size fiels. XA requires that the transaction
+ * be either ENDED or SUSPENDED when prepare is called, so we know
+ * that if the xa_status isn't in one of those states, but we are
+ * calling prepare that we are not an XA transaction.
+ */
+
+ if (LOGGING_ON(dbenv)) {
+ memset(&xid, 0, sizeof(xid));
+ xid.data = td->xid;
+ xid.size = td->xa_status != TXN_XA_ENDED &&
+ td->xa_status != TXN_XA_SUSPENDED ? 0 : sizeof(td->xid);
+ if ((ret = __txn_xa_regop_log(dbenv, txnp, &txnp->last_lsn,
+ (F_ISSET(dbenv, DB_ENV_TXN_NOSYNC) &&
+ !F_ISSET(txnp, TXN_SYNC)) ||
+ F_ISSET(txnp, TXN_NOSYNC) ? 0 : DB_FLUSH, TXN_PREPARE,
+ &xid, td->format, td->gtrid, td->bqual,
+ &td->begin_lsn)) != 0) {
+ __db_err(dbenv, "txn_prepare: log_write failed %s",
+ db_strerror(ret));
+ return (ret);
+ }
+ if (txnp->parent != NULL)
+ F_SET(txnp->parent, TXN_CHILDCOMMIT);
+ }
+
+ MUTEX_THREAD_LOCK(dbenv, txnp->mgrp->mutexp);
+ td->status = TXN_PREPARED;
+ MUTEX_THREAD_UNLOCK(dbenv, txnp->mgrp->mutexp);
+ return (0);
+}
+
+/*
+ * txn_id --
+ * Return the transaction ID.
+ */
+u_int32_t
+txn_id(txnp)
+ DB_TXN *txnp;
+{
+ return (txnp->txnid);
+}
+
+/*
+ * __txn_isvalid --
+ * Return 0 if the txnp is reasonable, otherwise panic.
+ */
+static int
+__txn_isvalid(txnp, tdp, op)
+ const DB_TXN *txnp;
+ TXN_DETAIL **tdp;
+ u_int32_t op;
+{
+ DB_TXNMGR *mgrp;
+ TXN_DETAIL *tp;
+
+ mgrp = txnp->mgrp;
+
+ /* Check for live cursors. */
+ if (txnp->cursors != 0) {
+ __db_err(mgrp->dbenv, "transaction has active cursors");
+ goto err;
+ }
+
+ /* Check transaction's status. */
+ tp = (TXN_DETAIL *)R_ADDR(&mgrp->reginfo, txnp->off);
+ if (tdp != NULL)
+ *tdp = tp;
+
+ switch (tp->status) {
+ case TXN_ABORTED:
+ case TXN_COMMITTED:
+ default:
+ __db_err(mgrp->dbenv, "transaction already %s",
+ tp->status == TXN_COMMITTED ? "committed" : "aborted");
+ goto err;
+ case TXN_PREPARED:
+ if (op == TXN_PREPARED) {
+ __db_err(mgrp->dbenv, "transaction already prepared");
+ goto err;
+ }
+ case TXN_RUNNING:
+ break;
+ }
+
+ return (0);
+
+err: /*
+ * If there's a serious problem with the transaction, panic. TXN
+ * handles are dead by definition when we return, and if you use
+ * a cursor you forgot to close, we have no idea what will happen.
+ */
+ return (__db_panic(mgrp->dbenv, EINVAL));
+}
+
+/*
+ * __txn_end --
+ * Internal transaction end routine.
+ *
+ * PUBLIC: int __txn_end __P((DB_TXN *, int));
+ */
+int
+__txn_end(txnp, is_commit)
+ DB_TXN *txnp;
+ int is_commit;
+{
+ DB_ENV *dbenv;
+ DB_LOCKREQ request;
+ DB_TXNMGR *mgr;
+ DB_TXNREGION *region;
+ TXN_DETAIL *tp;
+ int ret;
+
+ mgr = txnp->mgrp;
+ dbenv = mgr->dbenv;
+ region = mgr->reginfo.primary;
+
+ /* Release the locks. */
+ request.op = txnp->parent == NULL ||
+ is_commit == 0 ? DB_LOCK_PUT_ALL : DB_LOCK_INHERIT;
+
+ if (LOCKING_ON(dbenv)) {
+ ret = lock_vec(dbenv, txnp->txnid, 0, &request, 1, NULL);
+ if (ret != 0 && (ret != DB_LOCK_DEADLOCK || is_commit)) {
+ __db_err(dbenv, "%s: release locks failed %s",
+ is_commit ? "txn_commit" : "txn_abort",
+ db_strerror(ret));
+ __db_panic(dbenv, ret);
+ }
+ }
+
+ /* End the transaction. */
+ R_LOCK(dbenv, &mgr->reginfo);
+
+ tp = (TXN_DETAIL *)R_ADDR(&mgr->reginfo, txnp->off);
+ SH_TAILQ_REMOVE(&region->active_txn, tp, links, __txn_detail);
+ __db_shalloc_free(mgr->reginfo.addr, tp);
+
+ if (is_commit)
+ region->ncommits++;
+ else
+ region->naborts++;
+ --region->nactive;
+
+ R_UNLOCK(dbenv, &mgr->reginfo);
+
+ /*
+ * The transaction cannot get more locks, remove its locker info.
+ */
+ if (LOCKING_ON(dbenv))
+ __lock_freefamilylocker(dbenv->lk_handle, txnp->txnid);
+ if (txnp->parent != NULL)
+ TAILQ_REMOVE(&txnp->parent->kids, txnp, klinks);
+
+ /* Free the space. */
+ if (F_ISSET(txnp, TXN_MALLOC)) {
+ MUTEX_THREAD_LOCK(dbenv, mgr->mutexp);
+ TAILQ_REMOVE(&mgr->txn_chain, txnp, links);
+ MUTEX_THREAD_UNLOCK(dbenv, mgr->mutexp);
+
+ __os_free(txnp, sizeof(*txnp));
+ }
+
+ return (0);
+}
+
+/*
+ * __txn_undo --
+ * Undo the transaction with id txnid. Returns 0 on success and
+ * errno on failure.
+ */
+static int
+__txn_undo(txnp)
+ DB_TXN *txnp;
+{
+ DBT rdbt;
+ DB_ENV *dbenv;
+ DB_LSN key_lsn;
+ DB_TXNMGR *mgr;
+ void *txnlist;
+ int ret, threaded;
+
+ mgr = txnp->mgrp;
+ dbenv = mgr->dbenv;
+ txnlist = NULL;
+
+ if (!LOGGING_ON(dbenv))
+ return (0);
+
+ /*
+ * This is the simplest way to code this, but if the mallocs during
+ * recovery turn out to be a performance issue, we can do the
+ * allocation here and use DB_DBT_USERMEM.
+ */
+ memset(&rdbt, 0, sizeof(rdbt));
+ threaded = F_ISSET(dbenv, DB_ENV_THREAD) ? 1 : 0;
+ if (threaded)
+ F_SET(&rdbt, DB_DBT_MALLOC);
+
+ key_lsn = txnp->last_lsn;
+
+ /* Allocate a transaction list for children or aborted page creates. */
+ if ((ret = __db_txnlist_init(dbenv, &txnlist)) != 0)
+ return (ret);
+
+ if (F_ISSET(txnp, TXN_CHILDCOMMIT) &&
+ (ret = __db_txnlist_lsninit(dbenv,
+ txnlist, &txnp->last_lsn)) != 0)
+ return (ret);
+
+ for (ret = 0; ret == 0 && !IS_ZERO_LSN(key_lsn);) {
+ /*
+ * The dispatch routine returns the lsn of the record
+ * before the current one in the key_lsn argument.
+ */
+ if ((ret = log_get(dbenv, &key_lsn, &rdbt, DB_SET)) == 0) {
+ ret = __db_dispatch(dbenv,
+ &rdbt, &key_lsn, DB_TXN_ABORT, txnlist);
+ if (threaded && rdbt.data != NULL) {
+ __os_free(rdbt.data, rdbt.size);
+ rdbt.data = NULL;
+ }
+ if (F_ISSET(txnp, TXN_CHILDCOMMIT))
+ (void)__db_txnlist_lsnadd(dbenv,
+ txnlist, &key_lsn, 0);
+ }
+ if (ret != 0) {
+ __db_err(txnp->mgrp->dbenv,
+ "txn_abort: Log undo failed for LSN: %lu %lu: %s",
+ (u_long)key_lsn.file, (u_long)key_lsn.offset,
+ db_strerror(ret));
+ if (txnlist != NULL)
+ __db_txnlist_end(dbenv, txnlist);
+ return (ret);
+ }
+ }
+
+ if (txnlist != NULL) {
+ __db_do_the_limbo(dbenv, txnlist);
+ __db_txnlist_end(dbenv, txnlist);
+ }
+
+ return (ret);
+}
+
+/*
+ * Transaction checkpoint.
+ * If either kbytes or minutes is non-zero, then we only take the checkpoint
+ * more than "minutes" minutes have passed since the last checkpoint or if
+ * more than "kbytes" of log data have been written since the last checkpoint.
+ * When taking a checkpoint, find the oldest active transaction and figure out
+ * its first LSN. This is the lowest LSN we can checkpoint, since any record
+ * written after since that point may be involved in a transaction and may
+ * therefore need to be undone in the case of an abort.
+ */
+int
+txn_checkpoint(dbenv, kbytes, minutes, flags)
+ DB_ENV *dbenv;
+ u_int32_t kbytes, minutes, flags;
+{
+ DB_LOG *dblp;
+ DB_LSN ckp_lsn, sync_lsn, last_ckp;
+ DB_TXNMGR *mgr;
+ DB_TXNREGION *region;
+ LOG *lp;
+ TXN_DETAIL *txnp;
+ time_t last_ckp_time, now;
+ u_int32_t bytes, mbytes;
+ int interval, ret;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
+ return (__dbcl_txn_checkpoint(dbenv, kbytes, minutes));
+#endif
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->tx_handle, DB_INIT_TXN);
+
+ mgr = dbenv->tx_handle;
+ region = mgr->reginfo.primary;
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+
+ /*
+ * Check if we need to checkpoint.
+ */
+ ZERO_LSN(ckp_lsn);
+
+ if (LF_ISSET(DB_FORCE))
+ goto do_ckp;
+
+ R_LOCK(dbenv, &dblp->reginfo);
+ mbytes = lp->stat.st_wc_mbytes;
+ /*
+ * We add the current buffer offset so as to count bytes that
+ * have not yet been written, but are sitting in the log buffer.
+ */
+ bytes = lp->stat.st_wc_bytes + lp->b_off;
+ ckp_lsn = lp->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ /* Don't checkpoint a quiescent database. */
+ if (bytes == 0 && mbytes == 0)
+ return (0);
+
+ if (kbytes != 0 && mbytes * 1024 + bytes / 1024 >= (u_int32_t)kbytes)
+ goto do_ckp;
+
+ if (minutes != 0) {
+ (void)time(&now);
+
+ R_LOCK(dbenv, &mgr->reginfo);
+ last_ckp_time = region->time_ckp;
+ R_UNLOCK(dbenv, &mgr->reginfo);
+
+ if (now - last_ckp_time >= (time_t)(minutes * 60))
+ goto do_ckp;
+ }
+
+ /*
+ * If we checked time and data and didn't go to checkpoint,
+ * we're done.
+ */
+ if (minutes != 0 || kbytes != 0)
+ return (0);
+
+do_ckp:
+ if (IS_ZERO_LSN(ckp_lsn)) {
+ R_LOCK(dbenv, &dblp->reginfo);
+ ckp_lsn = lp->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ }
+
+ /*
+ * We have to find an LSN such that all transactions begun
+ * before that LSN are complete.
+ */
+ R_LOCK(dbenv, &mgr->reginfo);
+
+ if (IS_ZERO_LSN(region->pending_ckp)) {
+ for (txnp =
+ SH_TAILQ_FIRST(&region->active_txn, __txn_detail);
+ txnp != NULL;
+ txnp = SH_TAILQ_NEXT(txnp, links, __txn_detail)) {
+
+ /*
+ * Look through the active transactions for the
+ * lowest begin lsn.
+ */
+ if (!IS_ZERO_LSN(txnp->begin_lsn) &&
+ log_compare(&txnp->begin_lsn, &ckp_lsn) < 0)
+ ckp_lsn = txnp->begin_lsn;
+ }
+ region->pending_ckp = ckp_lsn;
+ } else
+ ckp_lsn = region->pending_ckp;
+
+ R_UNLOCK(dbenv, &mgr->reginfo);
+
+ /*
+ * Try three times to sync the mpool buffers up to the specified LSN,
+ * sleeping 1, 2 and 4 seconds between attempts.
+ */
+ if (MPOOL_ON(dbenv))
+ for (interval = 1;;) {
+ /*
+ * memp_sync may change the lsn you pass it, so don't
+ * pass it the actual ckp_lsn, pass it a local instead.
+ */
+ sync_lsn = ckp_lsn;
+ if ((ret = memp_sync(dbenv, &sync_lsn)) == 0)
+ break;
+
+ /*
+ * ret == DB_INCOMPLETE means there are still buffers
+ * to flush, the checkpoint is not complete.
+ */
+ if (ret == DB_INCOMPLETE) {
+ if (interval > 4)
+ return (ret);
+
+ (void)__os_sleep(dbenv, interval, 0);
+ interval *= 2;
+ } else {
+ __db_err(dbenv,
+ "txn_checkpoint: failure in memp_sync %s",
+ db_strerror(ret));
+ return (ret);
+ }
+ }
+
+ if (LOGGING_ON(dbenv)) {
+ R_LOCK(dbenv, &mgr->reginfo);
+ last_ckp = region->last_ckp;
+ ZERO_LSN(region->pending_ckp);
+ R_UNLOCK(dbenv, &mgr->reginfo);
+
+ if ((ret = __txn_ckp_log(dbenv,
+ NULL, &ckp_lsn, DB_CHECKPOINT, &ckp_lsn,
+ &last_ckp, (int32_t)time(NULL))) != 0) {
+ __db_err(dbenv,
+ "txn_checkpoint: log failed at LSN [%ld %ld] %s",
+ (long)ckp_lsn.file, (long)ckp_lsn.offset,
+ db_strerror(ret));
+ return (ret);
+ }
+
+ R_LOCK(dbenv, &mgr->reginfo);
+ region->last_ckp = ckp_lsn;
+ (void)time(&region->time_ckp);
+ R_UNLOCK(dbenv, &mgr->reginfo);
+ }
+ return (0);
+}
+
+/*
+ * __txn_activekids --
+ * Return if this transaction has any active children.
+ *
+ * PUBLIC: int __txn_activekids __P((DB_ENV *, u_int32_t, DB_TXN *));
+ */
+int
+__txn_activekids(dbenv, rectype, txnp)
+ DB_ENV *dbenv;
+ u_int32_t rectype;
+ DB_TXN *txnp;
+{
+ /*
+ * On a child commit, we know that there are children (i.e., the
+ * commiting child at the least. In that case, skip this check.
+ */
+ if (rectype == DB_txn_child)
+ return (0);
+
+ if (TAILQ_FIRST(&txnp->kids) != NULL) {
+ __db_err(dbenv, "Child transaction is active");
+ return (EPERM);
+ }
+ return (0);
+}
diff --git a/bdb/txn/txn.src b/bdb/txn/txn.src
new file mode 100644
index 00000000000..b1e131c2bd7
--- /dev/null
+++ b/bdb/txn/txn.src
@@ -0,0 +1,114 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: txn.src,v 11.10 2001/01/02 00:58:33 margo Exp $
+ */
+
+PREFIX txn
+
+INCLUDE #include "db_config.h"
+INCLUDE
+INCLUDE #ifndef NO_SYSTEM_INCLUDES
+INCLUDE #include <sys/types.h>
+INCLUDE
+INCLUDE #include <ctype.h>
+INCLUDE #include <errno.h>
+INCLUDE #include <string.h>
+INCLUDE #endif
+INCLUDE
+INCLUDE #include "db_int.h"
+INCLUDE #include "db_page.h"
+INCLUDE #include "db_dispatch.h"
+INCLUDE #include "db_am.h"
+INCLUDE #include "txn.h"
+INCLUDE
+
+/*
+ * DEPRECATED in 3.1 to add timestamps.
+ */
+DEPRECATED old_regop 6
+ARG opcode u_int32_t lu
+END
+
+/*
+ * This is the standard log operation for commit.
+ * Note that we are using an int32_t for the timestamp. This means that
+ * in 2039 we will need to deprecate this log record and create one that
+ * either changes the Epoch or has a 64-bit offset.
+ */
+BEGIN regop 10
+ARG opcode u_int32_t lu
+ARG timestamp int32_t ld
+END
+
+DEPRECATED old_ckp 7
+POINTER ckp_lsn DB_LSN * lu
+POINTER last_ckp DB_LSN * lu
+END
+
+/*
+ * This is the checkpoint record. It contains the lsn that the checkpoint
+ * guarantees and a pointer to the last checkpoint so we can walk backwards
+ * by checkpoint.
+ *
+ * ckp_lsn:
+ * The lsn in the log of the most recent point at which all begun
+ * transactions have been aborted. This is the point for which
+ * the checkpoint is relevant.
+ * last_ckp:
+ * The previous checkpoint.
+ * timestamp:
+ * See comment in commit about timestamps.
+ */
+BEGIN ckp 11
+POINTER ckp_lsn DB_LSN * lu
+POINTER last_ckp DB_LSN * lu
+ARG timestamp int32_t ld
+END
+
+/*
+ * This is the standard log operation for prepare (since right now
+ * we only use prepare in an XA environment).
+ */
+DEPRECATED xa_regop_old 8
+ARG opcode u_int32_t lu
+DBT xid DBT s
+ARG formatID int32_t ld
+ARG gtrid u_int32_t u
+ARG bqual u_int32_t u
+END
+
+/*
+ * This is the standard log operation for prepare (since right now
+ * we only use prepare in an XA environment).
+ */
+BEGIN xa_regop 13
+ARG opcode u_int32_t lu
+DBT xid DBT s
+ARG formatID int32_t ld
+ARG gtrid u_int32_t u
+ARG bqual u_int32_t u
+POINTER begin_lsn DB_LSN * lu
+END
+
+/*
+ * This is the log operation for a child commit.
+ */
+DEPRECATED child_old 9
+ARG opcode u_int32_t lu
+ARG parent u_int32_t lx
+END
+
+/*
+ * This is the (new) log operation for a child commit. It is
+ * logged as a record in the PARENT. The child field contains
+ * the transaction ID of the child committing and the c_lsn is
+ * the last LSN of the child's log trail.
+ */
+BEGIN child 12
+ARG child u_int32_t lx
+POINTER c_lsn DB_LSN * lu
+END
diff --git a/bdb/txn/txn_auto.c b/bdb/txn/txn_auto.c
new file mode 100644
index 00000000000..cbfa536733e
--- /dev/null
+++ b/bdb/txn/txn_auto.c
@@ -0,0 +1,893 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <errno.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_dispatch.h"
+#include "db_am.h"
+#include "txn.h"
+
+int
+__txn_old_regop_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __txn_old_regop_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __txn_old_regop_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]txn_old_regop: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\topcode: %lu\n", (u_long)argp->opcode);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__txn_old_regop_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __txn_old_regop_args **argpp;
+{
+ __txn_old_regop_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__txn_old_regop_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->opcode, bp, sizeof(argp->opcode));
+ bp += sizeof(argp->opcode);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__txn_regop_log(dbenv, txnid, ret_lsnp, flags,
+ opcode, timestamp)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ u_int32_t opcode;
+ int32_t timestamp;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_txn_regop;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(opcode)
+ + sizeof(timestamp);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(bp, &opcode, sizeof(opcode));
+ bp += sizeof(opcode);
+ memcpy(bp, &timestamp, sizeof(timestamp));
+ bp += sizeof(timestamp);
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__txn_regop_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __txn_regop_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __txn_regop_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]txn_regop: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\topcode: %lu\n", (u_long)argp->opcode);
+ printf("\ttimestamp: %ld\n", (long)argp->timestamp);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__txn_regop_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __txn_regop_args **argpp;
+{
+ __txn_regop_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__txn_regop_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->opcode, bp, sizeof(argp->opcode));
+ bp += sizeof(argp->opcode);
+ memcpy(&argp->timestamp, bp, sizeof(argp->timestamp));
+ bp += sizeof(argp->timestamp);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__txn_old_ckp_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __txn_old_ckp_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __txn_old_ckp_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]txn_old_ckp: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tckp_lsn: [%lu][%lu]\n",
+ (u_long)argp->ckp_lsn.file, (u_long)argp->ckp_lsn.offset);
+ printf("\tlast_ckp: [%lu][%lu]\n",
+ (u_long)argp->last_ckp.file, (u_long)argp->last_ckp.offset);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__txn_old_ckp_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __txn_old_ckp_args **argpp;
+{
+ __txn_old_ckp_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__txn_old_ckp_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->ckp_lsn, bp, sizeof(argp->ckp_lsn));
+ bp += sizeof(argp->ckp_lsn);
+ memcpy(&argp->last_ckp, bp, sizeof(argp->last_ckp));
+ bp += sizeof(argp->last_ckp);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__txn_ckp_log(dbenv, txnid, ret_lsnp, flags,
+ ckp_lsn, last_ckp, timestamp)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ DB_LSN * ckp_lsn;
+ DB_LSN * last_ckp;
+ int32_t timestamp;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_txn_ckp;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(*ckp_lsn)
+ + sizeof(*last_ckp)
+ + sizeof(timestamp);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ if (ckp_lsn != NULL)
+ memcpy(bp, ckp_lsn, sizeof(*ckp_lsn));
+ else
+ memset(bp, 0, sizeof(*ckp_lsn));
+ bp += sizeof(*ckp_lsn);
+ if (last_ckp != NULL)
+ memcpy(bp, last_ckp, sizeof(*last_ckp));
+ else
+ memset(bp, 0, sizeof(*last_ckp));
+ bp += sizeof(*last_ckp);
+ memcpy(bp, &timestamp, sizeof(timestamp));
+ bp += sizeof(timestamp);
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__txn_ckp_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __txn_ckp_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __txn_ckp_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]txn_ckp: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tckp_lsn: [%lu][%lu]\n",
+ (u_long)argp->ckp_lsn.file, (u_long)argp->ckp_lsn.offset);
+ printf("\tlast_ckp: [%lu][%lu]\n",
+ (u_long)argp->last_ckp.file, (u_long)argp->last_ckp.offset);
+ printf("\ttimestamp: %ld\n", (long)argp->timestamp);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__txn_ckp_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __txn_ckp_args **argpp;
+{
+ __txn_ckp_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__txn_ckp_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->ckp_lsn, bp, sizeof(argp->ckp_lsn));
+ bp += sizeof(argp->ckp_lsn);
+ memcpy(&argp->last_ckp, bp, sizeof(argp->last_ckp));
+ bp += sizeof(argp->last_ckp);
+ memcpy(&argp->timestamp, bp, sizeof(argp->timestamp));
+ bp += sizeof(argp->timestamp);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__txn_xa_regop_old_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __txn_xa_regop_old_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __txn_xa_regop_old_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]txn_xa_regop_old: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\topcode: %lu\n", (u_long)argp->opcode);
+ printf("\txid: ");
+ for (i = 0; i < argp->xid.size; i++) {
+ ch = ((u_int8_t *)argp->xid.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\tformatID: %ld\n", (long)argp->formatID);
+ printf("\tgtrid: %u\n", argp->gtrid);
+ printf("\tbqual: %u\n", argp->bqual);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__txn_xa_regop_old_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __txn_xa_regop_old_args **argpp;
+{
+ __txn_xa_regop_old_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__txn_xa_regop_old_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->opcode, bp, sizeof(argp->opcode));
+ bp += sizeof(argp->opcode);
+ memset(&argp->xid, 0, sizeof(argp->xid));
+ memcpy(&argp->xid.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->xid.data = bp;
+ bp += argp->xid.size;
+ memcpy(&argp->formatID, bp, sizeof(argp->formatID));
+ bp += sizeof(argp->formatID);
+ memcpy(&argp->gtrid, bp, sizeof(argp->gtrid));
+ bp += sizeof(argp->gtrid);
+ memcpy(&argp->bqual, bp, sizeof(argp->bqual));
+ bp += sizeof(argp->bqual);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__txn_xa_regop_log(dbenv, txnid, ret_lsnp, flags,
+ opcode, xid, formatID, gtrid, bqual, begin_lsn)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ u_int32_t opcode;
+ const DBT *xid;
+ int32_t formatID;
+ u_int32_t gtrid;
+ u_int32_t bqual;
+ DB_LSN * begin_lsn;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_txn_xa_regop;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(opcode)
+ + sizeof(u_int32_t) + (xid == NULL ? 0 : xid->size)
+ + sizeof(formatID)
+ + sizeof(gtrid)
+ + sizeof(bqual)
+ + sizeof(*begin_lsn);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(bp, &opcode, sizeof(opcode));
+ bp += sizeof(opcode);
+ if (xid == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &xid->size, sizeof(xid->size));
+ bp += sizeof(xid->size);
+ memcpy(bp, xid->data, xid->size);
+ bp += xid->size;
+ }
+ memcpy(bp, &formatID, sizeof(formatID));
+ bp += sizeof(formatID);
+ memcpy(bp, &gtrid, sizeof(gtrid));
+ bp += sizeof(gtrid);
+ memcpy(bp, &bqual, sizeof(bqual));
+ bp += sizeof(bqual);
+ if (begin_lsn != NULL)
+ memcpy(bp, begin_lsn, sizeof(*begin_lsn));
+ else
+ memset(bp, 0, sizeof(*begin_lsn));
+ bp += sizeof(*begin_lsn);
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__txn_xa_regop_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __txn_xa_regop_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __txn_xa_regop_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]txn_xa_regop: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\topcode: %lu\n", (u_long)argp->opcode);
+ printf("\txid: ");
+ for (i = 0; i < argp->xid.size; i++) {
+ ch = ((u_int8_t *)argp->xid.data)[i];
+ if (isprint(ch) || ch == 0xa)
+ putchar(ch);
+ else
+ printf("%#x ", ch);
+ }
+ printf("\n");
+ printf("\tformatID: %ld\n", (long)argp->formatID);
+ printf("\tgtrid: %u\n", argp->gtrid);
+ printf("\tbqual: %u\n", argp->bqual);
+ printf("\tbegin_lsn: [%lu][%lu]\n",
+ (u_long)argp->begin_lsn.file, (u_long)argp->begin_lsn.offset);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__txn_xa_regop_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __txn_xa_regop_args **argpp;
+{
+ __txn_xa_regop_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__txn_xa_regop_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->opcode, bp, sizeof(argp->opcode));
+ bp += sizeof(argp->opcode);
+ memset(&argp->xid, 0, sizeof(argp->xid));
+ memcpy(&argp->xid.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->xid.data = bp;
+ bp += argp->xid.size;
+ memcpy(&argp->formatID, bp, sizeof(argp->formatID));
+ bp += sizeof(argp->formatID);
+ memcpy(&argp->gtrid, bp, sizeof(argp->gtrid));
+ bp += sizeof(argp->gtrid);
+ memcpy(&argp->bqual, bp, sizeof(argp->bqual));
+ bp += sizeof(argp->bqual);
+ memcpy(&argp->begin_lsn, bp, sizeof(argp->begin_lsn));
+ bp += sizeof(argp->begin_lsn);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__txn_child_old_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __txn_child_old_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __txn_child_old_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]txn_child_old: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\topcode: %lu\n", (u_long)argp->opcode);
+ printf("\tparent: 0x%lx\n", (u_long)argp->parent);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__txn_child_old_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __txn_child_old_args **argpp;
+{
+ __txn_child_old_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__txn_child_old_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->opcode, bp, sizeof(argp->opcode));
+ bp += sizeof(argp->opcode);
+ memcpy(&argp->parent, bp, sizeof(argp->parent));
+ bp += sizeof(argp->parent);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__txn_child_log(dbenv, txnid, ret_lsnp, flags,
+ child, c_lsn)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ u_int32_t child;
+ DB_LSN * c_lsn;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_txn_child;
+ if (txnid != NULL &&
+ TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid == NULL ? 0 : txnid->txnid;
+ if (txnid == NULL) {
+ ZERO_LSN(null_lsn);
+ lsnp = &null_lsn;
+ } else
+ lsnp = &txnid->last_lsn;
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(child)
+ + sizeof(*c_lsn);
+ if ((ret = __os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)
+ return (ret);
+
+ bp = logrec.data;
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(bp, &child, sizeof(child));
+ bp += sizeof(child);
+ if (c_lsn != NULL)
+ memcpy(bp, c_lsn, sizeof(*c_lsn));
+ else
+ memset(bp, 0, sizeof(*c_lsn));
+ bp += sizeof(*c_lsn);
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) == logrec.size);
+ ret = log_put(dbenv, ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL)
+ txnid->last_lsn = *ret_lsnp;
+ __os_free(logrec.data, logrec.size);
+ return (ret);
+}
+
+int
+__txn_child_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __txn_child_args *argp;
+ u_int32_t i;
+ u_int ch;
+ int ret;
+
+ i = 0;
+ ch = 0;
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __txn_child_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ printf("[%lu][%lu]txn_child: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ printf("\tchild: 0x%lx\n", (u_long)argp->child);
+ printf("\tc_lsn: [%lu][%lu]\n",
+ (u_long)argp->c_lsn.file, (u_long)argp->c_lsn.offset);
+ printf("\n");
+ __os_free(argp, 0);
+ return (0);
+}
+
+int
+__txn_child_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __txn_child_args **argpp;
+{
+ __txn_child_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ ret = __os_malloc(dbenv, sizeof(__txn_child_args) +
+ sizeof(DB_TXN), NULL, &argp);
+ if (ret != 0)
+ return (ret);
+ argp->txnid = (DB_TXN *)&argp[1];
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+ memcpy(&argp->child, bp, sizeof(argp->child));
+ bp += sizeof(argp->child);
+ memcpy(&argp->c_lsn, bp, sizeof(argp->c_lsn));
+ bp += sizeof(argp->c_lsn);
+ *argpp = argp;
+ return (0);
+}
+
+int
+__txn_init_print(dbenv)
+ DB_ENV *dbenv;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv,
+ __txn_old_regop_print, DB_txn_old_regop)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __txn_regop_print, DB_txn_regop)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __txn_old_ckp_print, DB_txn_old_ckp)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __txn_ckp_print, DB_txn_ckp)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __txn_xa_regop_old_print, DB_txn_xa_regop_old)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __txn_xa_regop_print, DB_txn_xa_regop)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __txn_child_old_print, DB_txn_child_old)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __txn_child_print, DB_txn_child)) != 0)
+ return (ret);
+ return (0);
+}
+
+int
+__txn_init_recover(dbenv)
+ DB_ENV *dbenv;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv,
+ __deprecated_recover, DB_txn_old_regop)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __txn_regop_recover, DB_txn_regop)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __deprecated_recover, DB_txn_old_ckp)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __txn_ckp_recover, DB_txn_ckp)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __deprecated_recover, DB_txn_xa_regop_old)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __txn_xa_regop_recover, DB_txn_xa_regop)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __deprecated_recover, DB_txn_child_old)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv,
+ __txn_child_recover, DB_txn_child)) != 0)
+ return (ret);
+ return (0);
+}
+
diff --git a/bdb/txn/txn_rec.c b/bdb/txn/txn_rec.c
new file mode 100644
index 00000000000..bed20d98e1e
--- /dev/null
+++ b/bdb/txn/txn_rec.c
@@ -0,0 +1,339 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1996
+ * The President and Fellows of Harvard University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: txn_rec.c,v 11.15 2001/01/11 18:19:55 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "txn.h"
+#include "db_am.h"
+#include "db_dispatch.h"
+#include "log.h"
+#include "common_ext.h"
+
+static int __txn_restore_txn __P((DB_ENV *, DB_LSN *, __txn_xa_regop_args *));
+
+#define IS_XA_TXN(R) (R->xid.size != 0)
+
+/*
+ * PUBLIC: int __txn_regop_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ *
+ * These records are only ever written for commits. Normally, we redo any
+ * committed transaction, however if we are doing recovery to a timestamp, then
+ * we may treat transactions that commited after the timestamp as aborted.
+ */
+int
+__txn_regop_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __txn_regop_args *argp;
+ int ret;
+
+#ifdef DEBUG_RECOVER
+ (void)__txn_regop_print(dbenv, dbtp, lsnp, op, info);
+#endif
+
+ if ((ret = __txn_regop_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+
+ if (argp->opcode != TXN_COMMIT) {
+ ret = EINVAL;
+ goto err;
+ }
+
+ if (op == DB_TXN_FORWARD_ROLL)
+ ret = __db_txnlist_remove(info, argp->txnid->txnid);
+ else if (dbenv->tx_timestamp == 0 ||
+ argp->timestamp <= (int32_t)dbenv->tx_timestamp)
+ /*
+ * We know this is the backward roll case because we
+ * are never called during ABORT or OPENFILES.
+ */
+ ret = __db_txnlist_add(dbenv, info, argp->txnid->txnid, 0);
+ else
+ /*
+ * This is commit record, but we failed the timestamp check
+ * so we should treat it as an abort and add it to the list
+ * as an aborted record.
+ */
+ ret = __db_txnlist_add(dbenv, info, argp->txnid->txnid, 1);
+
+ if (ret == 0)
+ *lsnp = argp->prev_lsn;
+err: __os_free(argp, 0);
+
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __txn_xa_regop_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ *
+ * These records are only ever written for prepares.
+ */
+int
+__txn_xa_regop_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __txn_xa_regop_args *argp;
+ int ret;
+
+#ifdef DEBUG_RECOVER
+ (void)__txn_xa_regop_print(dbenv, dbtp, lsnp, op, info);
+#endif
+
+ if ((ret = __txn_xa_regop_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+
+ if (argp->opcode != TXN_PREPARE) {
+ ret = EINVAL;
+ goto err;
+ }
+
+ ret = __db_txnlist_find(info, argp->txnid->txnid);
+
+ /*
+ * If we are rolling forward, then an aborted prepare
+ * indicates that this is the last record we'll see for
+ * this transaction ID and we should remove it from the
+ * list.
+ */
+
+ if (op == DB_TXN_FORWARD_ROLL && ret == 1)
+ ret = __db_txnlist_remove(info, argp->txnid->txnid);
+ else if (op == DB_TXN_BACKWARD_ROLL && ret != 0) {
+ /*
+ * On the backward pass, we have three possibilities:
+ * 1. The transaction is already committed, no-op.
+ * 2. The transaction is not committed and we are XA, treat
+ * like commited and roll forward so that can be committed
+ * or aborted late.
+ * 3. The transaction is not committed and we are not XA
+ * mark the transaction as aborted.
+ *
+ * Cases 2 and 3 are handled here.
+ */
+
+ /*
+ * Should never have seen this transaction unless it was
+ * commited.
+ */
+ DB_ASSERT(ret == DB_NOTFOUND);
+
+ if (IS_XA_TXN(argp)) {
+ /*
+ * This is an XA prepared, but not yet committed
+ * transaction. We need to add it to the
+ * transaction list, so that it gets rolled
+ * forward. We also have to add it to the region's
+ * internal state so it can be properly aborted
+ * or recovered.
+ */
+ if ((ret = __db_txnlist_add(dbenv,
+ info, argp->txnid->txnid, 0)) == 0)
+ ret = __txn_restore_txn(dbenv, lsnp, argp);
+ } else
+ ret = __db_txnlist_add(dbenv,
+ info, argp->txnid->txnid, 1);
+ } else
+ ret = 0;
+
+ if (ret == 0)
+ *lsnp = argp->prev_lsn;
+
+err: __os_free(argp, 0);
+
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __txn_ckp_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__txn_ckp_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __txn_ckp_args *argp;
+ int ret;
+
+#ifdef DEBUG_RECOVER
+ __txn_ckp_print(dbenv, dbtp, lsnp, op, info);
+#endif
+ COMPQUIET(dbenv, NULL);
+
+ if ((ret = __txn_ckp_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+
+ /*
+ * Check for 'restart' checkpoint record. This occurs when the
+ * checkpoint lsn is equal to the lsn of the checkpoint record
+ * and means that we could set the transaction ID back to 1, so
+ * that we don't exhaust the transaction ID name space.
+ */
+ if (argp->ckp_lsn.file == lsnp->file &&
+ argp->ckp_lsn.offset == lsnp->offset)
+ __db_txnlist_gen(info, DB_REDO(op) ? -1 : 1);
+
+ *lsnp = argp->last_ckp;
+ __os_free(argp, 0);
+ return (DB_TXN_CKP);
+}
+
+/*
+ * __txn_child_recover
+ * Recover a commit record for a child transaction.
+ *
+ * PUBLIC: int __txn_child_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__txn_child_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __txn_child_args *argp;
+ int ret;
+
+#ifdef DEBUG_RECOVER
+ (void)__txn_child_print(dbenv, dbtp, lsnp, op, info);
+#endif
+ if ((ret = __txn_child_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+
+ /*
+ * This is a record in a PARENT's log trail indicating that a
+ * child commited. If we are aborting, we need to update the
+ * parent's LSN array. If we are in recovery, then if the
+ * parent is commiting, we set ourselves up to commit, else
+ * we do nothing.
+ */
+ if (op == DB_TXN_ABORT) {
+ /* Note that __db_txnlist_lsnadd rewrites its LSN
+ * parameter, so you cannot reuse the argp->c_lsn field.
+ */
+ ret = __db_txnlist_lsnadd(dbenv,
+ info, &argp->c_lsn, TXNLIST_NEW);
+ } else if (op == DB_TXN_BACKWARD_ROLL) {
+ if (__db_txnlist_find(info, argp->txnid->txnid) == 0)
+ ret = __db_txnlist_add(dbenv, info, argp->child, 0);
+ else
+ ret = __db_txnlist_add(dbenv, info, argp->child, 1);
+ } else
+ ret = __db_txnlist_remove(info, argp->child);
+
+ if (ret == 0)
+ *lsnp = argp->prev_lsn;
+
+ __os_free(argp, 0);
+
+ return (ret);
+}
+
+/*
+ * __txn_restore_txn --
+ * Using only during XA recovery. If we find any transactions that are
+ * prepared, but not yet committed, then we need to restore the transaction's
+ * state into the shared region, because the TM is going to issue a txn_abort
+ * or txn_commit and we need to respond correctly.
+ *
+ * lsnp is the LSN of the returned LSN
+ * argp is the perpare record (in an appropriate structure)
+ */
+static int
+__txn_restore_txn(dbenv, lsnp, argp)
+ DB_ENV *dbenv;
+ DB_LSN *lsnp;
+ __txn_xa_regop_args *argp;
+{
+ DB_TXNMGR *mgr;
+ TXN_DETAIL *td;
+ DB_TXNREGION *region;
+ int ret;
+
+ if (argp->xid.size == 0)
+ return (0);
+
+ mgr = dbenv->tx_handle;
+ region = mgr->reginfo.primary;
+ R_LOCK(dbenv, &mgr->reginfo);
+
+ /* Allocate a new transaction detail structure. */
+ if ((ret =
+ __db_shalloc(mgr->reginfo.addr, sizeof(TXN_DETAIL), 0, &td)) != 0)
+ return (ret);
+
+ /* Place transaction on active transaction list. */
+ SH_TAILQ_INSERT_HEAD(&region->active_txn, td, links, __txn_detail);
+
+ td->txnid = argp->txnid->txnid;
+ td->begin_lsn = argp->begin_lsn;
+ td->last_lsn = *lsnp;
+ td->parent = 0;
+ td->status = TXN_PREPARED;
+ td->xa_status = TXN_XA_PREPARED;
+ memcpy(td->xid, argp->xid.data, argp->xid.size);
+ td->bqual = argp->bqual;
+ td->gtrid = argp->gtrid;
+ td->format = argp->formatID;
+
+ R_UNLOCK(dbenv, &mgr->reginfo);
+ return (0);
+}
diff --git a/bdb/txn/txn_region.c b/bdb/txn/txn_region.c
new file mode 100644
index 00000000000..77ce3d08f89
--- /dev/null
+++ b/bdb/txn/txn_region.c
@@ -0,0 +1,393 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: txn_region.c,v 11.36 2001/01/11 18:19:55 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <string.h>
+#endif
+
+#ifdef HAVE_RPC
+#include "db_server.h"
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "log.h" /* for __log_lastckp */
+#include "txn.h"
+#include "db_am.h"
+
+#ifdef HAVE_RPC
+#include "gen_client_ext.h"
+#include "rpc_client_ext.h"
+#endif
+
+static int __txn_init __P((DB_ENV *, DB_TXNMGR *));
+static int __txn_set_tx_max __P((DB_ENV *, u_int32_t));
+static int __txn_set_tx_recover __P((DB_ENV *,
+ int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops)));
+static int __txn_set_tx_timestamp __P((DB_ENV *, time_t *));
+
+/*
+ * __txn_dbenv_create --
+ * Transaction specific initialization of the DB_ENV structure.
+ *
+ * PUBLIC: void __txn_dbenv_create __P((DB_ENV *));
+ */
+void
+__txn_dbenv_create(dbenv)
+ DB_ENV *dbenv;
+{
+ dbenv->tx_max = DEF_MAX_TXNS;
+
+ dbenv->set_tx_max = __txn_set_tx_max;
+ dbenv->set_tx_recover = __txn_set_tx_recover;
+ dbenv->set_tx_timestamp = __txn_set_tx_timestamp;
+
+#ifdef HAVE_RPC
+ /*
+ * If we have a client, overwrite what we just setup to point to
+ * client functions.
+ */
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) {
+ dbenv->set_tx_max = __dbcl_set_tx_max;
+ dbenv->set_tx_recover = __dbcl_set_tx_recover;
+ dbenv->set_tx_timestamp = __dbcl_set_tx_timestamp;
+ }
+#endif
+}
+
+/*
+ * __txn_set_tx_max --
+ * Set the size of the transaction table.
+ */
+static int
+__txn_set_tx_max(dbenv, tx_max)
+ DB_ENV *dbenv;
+ u_int32_t tx_max;
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_tx_max");
+
+ dbenv->tx_max = tx_max;
+ return (0);
+}
+
+/*
+ * __txn_set_tx_recover --
+ * Set the transaction abort recover function.
+ */
+static int
+__txn_set_tx_recover(dbenv, tx_recover)
+ DB_ENV *dbenv;
+ int (*tx_recover) __P((DB_ENV *, DBT *, DB_LSN *, db_recops));
+{
+ dbenv->tx_recover = tx_recover;
+ return (0);
+}
+
+/*
+ * __txn_set_tx_timestamp --
+ * Set the transaction recovery timestamp.
+ */
+static int
+__txn_set_tx_timestamp(dbenv, timestamp)
+ DB_ENV *dbenv;
+ time_t *timestamp;
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_tx_timestamp");
+
+ dbenv->tx_timestamp = *timestamp;
+ return (0);
+}
+
+/*
+ * __txn_open --
+ * Open a transaction region.
+ *
+ * PUBLIC: int __txn_open __P((DB_ENV *));
+ */
+int
+__txn_open(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_TXNMGR *tmgrp;
+ int ret;
+
+ /* Create/initialize the transaction manager structure. */
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_TXNMGR), &tmgrp)) != 0)
+ return (ret);
+ TAILQ_INIT(&tmgrp->txn_chain);
+ tmgrp->dbenv = dbenv;
+
+ /* Join/create the txn region. */
+ tmgrp->reginfo.type = REGION_TYPE_TXN;
+ tmgrp->reginfo.id = INVALID_REGION_ID;
+ tmgrp->reginfo.mode = dbenv->db_mode;
+ tmgrp->reginfo.flags = REGION_JOIN_OK;
+ if (F_ISSET(dbenv, DB_ENV_CREATE))
+ F_SET(&tmgrp->reginfo, REGION_CREATE_OK);
+ if ((ret = __db_r_attach(dbenv,
+ &tmgrp->reginfo, TXN_REGION_SIZE(dbenv->tx_max))) != 0)
+ goto err;
+
+ /* If we created the region, initialize it. */
+ if (F_ISSET(&tmgrp->reginfo, REGION_CREATE))
+ if ((ret = __txn_init(dbenv, tmgrp)) != 0)
+ goto err;
+
+ /* Set the local addresses. */
+ tmgrp->reginfo.primary =
+ R_ADDR(&tmgrp->reginfo, tmgrp->reginfo.rp->primary);
+
+ /* Acquire a mutex to protect the active TXN list. */
+ if (F_ISSET(dbenv, DB_ENV_THREAD)) {
+ if ((ret = __db_mutex_alloc(
+ dbenv, &tmgrp->reginfo, &tmgrp->mutexp)) != 0)
+ goto err;
+ if ((ret = __db_mutex_init(
+ dbenv, tmgrp->mutexp, 0, MUTEX_THREAD)) != 0)
+ goto err;
+ }
+
+ R_UNLOCK(dbenv, &tmgrp->reginfo);
+
+ dbenv->tx_handle = tmgrp;
+ return (0);
+
+err: if (tmgrp->reginfo.addr != NULL) {
+ if (F_ISSET(&tmgrp->reginfo, REGION_CREATE))
+ ret = __db_panic(dbenv, ret);
+ R_UNLOCK(dbenv, &tmgrp->reginfo);
+
+ (void)__db_r_detach(dbenv, &tmgrp->reginfo, 0);
+ }
+ if (tmgrp->mutexp != NULL)
+ __db_mutex_free(dbenv, &tmgrp->reginfo, tmgrp->mutexp);
+ __os_free(tmgrp, sizeof(*tmgrp));
+ return (ret);
+}
+
+/*
+ * __txn_init --
+ * Initialize a transaction region in shared memory.
+ */
+static int
+__txn_init(dbenv, tmgrp)
+ DB_ENV *dbenv;
+ DB_TXNMGR *tmgrp;
+{
+ DB_LSN last_ckp;
+ DB_TXNREGION *region;
+ int ret;
+
+ ZERO_LSN(last_ckp);
+ /*
+ * If possible, fetch the last checkpoint LSN from the log system
+ * so that the backwards chain of checkpoints is unbroken when
+ * the environment is removed and recreated. [#2865]
+ */
+ if (LOGGING_ON(dbenv) && (ret = __log_lastckp(dbenv, &last_ckp)) != 0)
+ return (ret);
+
+ if ((ret = __db_shalloc(tmgrp->reginfo.addr,
+ sizeof(DB_TXNREGION), 0, &tmgrp->reginfo.primary)) != 0) {
+ __db_err(dbenv,
+ "Unable to allocate memory for the transaction region");
+ return (ret);
+ }
+ tmgrp->reginfo.rp->primary =
+ R_OFFSET(&tmgrp->reginfo, tmgrp->reginfo.primary);
+ region = tmgrp->reginfo.primary;
+ memset(region, 0, sizeof(*region));
+
+ region->maxtxns = dbenv->tx_max;
+ region->last_txnid = TXN_MINIMUM;
+ ZERO_LSN(region->pending_ckp);
+ region->last_ckp = last_ckp;
+ region->time_ckp = time(NULL);
+
+ /*
+ * XXX
+ * If we ever do more types of locking and logging, this changes.
+ */
+ region->logtype = 0;
+ region->locktype = 0;
+ region->naborts = 0;
+ region->ncommits = 0;
+ region->nbegins = 0;
+ region->nactive = 0;
+ region->maxnactive = 0;
+
+ SH_TAILQ_INIT(&region->active_txn);
+
+ return (0);
+}
+
+/*
+ * __txn_close --
+ * Close a transaction region.
+ *
+ * PUBLIC: int __txn_close __P((DB_ENV *));
+ */
+int
+__txn_close(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_TXN *txnp;
+ DB_TXNMGR *tmgrp;
+ u_int32_t txnid;
+ int ret, t_ret;
+
+ ret = 0;
+ tmgrp = dbenv->tx_handle;
+
+ /*
+ * This function can only be called once per process (i.e., not
+ * once per thread), so no synchronization is required.
+ *
+ * The caller is doing something wrong if close is called with
+ * active transactions. Try and abort any active transactions,
+ * but it's quite likely the aborts will fail because recovery
+ * won't find open files. If we can't abort any transaction,
+ * panic, we have to run recovery to get back to a known state.
+ */
+ if (TAILQ_FIRST(&tmgrp->txn_chain) != NULL) {
+ __db_err(dbenv,
+ "Error: closing the transaction region with active transactions\n");
+ ret = EINVAL;
+ while ((txnp = TAILQ_FIRST(&tmgrp->txn_chain)) != NULL) {
+ txnid = txnp->txnid;
+ if ((t_ret = txn_abort(txnp)) != 0) {
+ __db_err(dbenv,
+ "Unable to abort transaction 0x%x: %s\n",
+ txnid, db_strerror(t_ret));
+ ret = __db_panic(dbenv, t_ret);
+ }
+ }
+ }
+
+ /* Flush the log. */
+ if (LOGGING_ON(dbenv) &&
+ (t_ret = log_flush(dbenv, NULL)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Discard the per-thread lock. */
+ if (tmgrp->mutexp != NULL)
+ __db_mutex_free(dbenv, &tmgrp->reginfo, tmgrp->mutexp);
+
+ /* Detach from the region. */
+ if ((t_ret = __db_r_detach(dbenv, &tmgrp->reginfo, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ __os_free(tmgrp, sizeof(*tmgrp));
+
+ dbenv->tx_handle = NULL;
+ return (ret);
+}
+
+int
+txn_stat(dbenv, statp, db_malloc)
+ DB_ENV *dbenv;
+ DB_TXN_STAT **statp;
+ void *(*db_malloc) __P((size_t));
+{
+ DB_TXNMGR *mgr;
+ DB_TXNREGION *region;
+ DB_TXN_STAT *stats;
+ TXN_DETAIL *txnp;
+ size_t nbytes;
+ u_int32_t nactive, ndx;
+ int ret, slop;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
+ return (__dbcl_txn_stat(dbenv, statp, db_malloc));
+#endif
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->tx_handle, DB_INIT_TXN);
+
+ *statp = NULL;
+
+ slop = 200;
+ mgr = dbenv->tx_handle;
+ region = mgr->reginfo.primary;
+
+retry: R_LOCK(dbenv, &mgr->reginfo);
+ nactive = region->nactive;
+ R_UNLOCK(dbenv, &mgr->reginfo);
+
+ /*
+ * Allocate extra active structures to handle any transactions that
+ * are created while we have the region unlocked.
+ */
+ nbytes = sizeof(DB_TXN_STAT) + sizeof(DB_TXN_ACTIVE) * (nactive + slop);
+ if ((ret = __os_malloc(dbenv, nbytes, db_malloc, &stats)) != 0)
+ return (ret);
+
+ R_LOCK(dbenv, &mgr->reginfo);
+ stats->st_last_txnid = region->last_txnid;
+ stats->st_last_ckp = region->last_ckp;
+ stats->st_maxtxns = region->maxtxns;
+ stats->st_naborts = region->naborts;
+ stats->st_nbegins = region->nbegins;
+ stats->st_ncommits = region->ncommits;
+ stats->st_pending_ckp = region->pending_ckp;
+ stats->st_time_ckp = region->time_ckp;
+ stats->st_nactive = region->nactive;
+ if (stats->st_nactive > nactive + 200) {
+ R_UNLOCK(dbenv, &mgr->reginfo);
+ slop *= 2;
+ goto retry;
+ }
+ stats->st_maxnactive = region->maxnactive;
+ stats->st_txnarray = (DB_TXN_ACTIVE *)&stats[1];
+
+ ndx = 0;
+ for (txnp = SH_TAILQ_FIRST(&region->active_txn, __txn_detail);
+ txnp != NULL;
+ txnp = SH_TAILQ_NEXT(txnp, links, __txn_detail)) {
+ stats->st_txnarray[ndx].txnid = txnp->txnid;
+ if (txnp->parent == INVALID_ROFF)
+ stats->st_txnarray[ndx].parentid = TXN_INVALID_ID;
+ else
+ stats->st_txnarray[ndx].parentid =
+ ((TXN_DETAIL *)R_ADDR(&mgr->reginfo,
+ txnp->parent))->txnid;
+ stats->st_txnarray[ndx].lsn = txnp->begin_lsn;
+ ndx++;
+
+ if (ndx >= stats->st_nactive)
+ break;
+ }
+
+ stats->st_region_wait = mgr->reginfo.rp->mutex.mutex_set_wait;
+ stats->st_region_nowait = mgr->reginfo.rp->mutex.mutex_set_nowait;
+ stats->st_regsize = mgr->reginfo.rp->size;
+
+ R_UNLOCK(dbenv, &mgr->reginfo);
+
+ *statp = stats;
+ return (0);
+}
diff --git a/bdb/xa/xa.c b/bdb/xa/xa.c
new file mode 100644
index 00000000000..b13a6d503b3
--- /dev/null
+++ b/bdb/xa/xa.c
@@ -0,0 +1,661 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: xa.c,v 11.10 2000/12/14 07:39:14 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "log.h"
+#include "txn.h"
+#include "db_am.h"
+#include "db_dispatch.h"
+
+static int __db_xa_close __P((char *, int, long));
+static int __db_xa_commit __P((XID *, int, long));
+static int __db_xa_complete __P((int *, int *, int, long));
+static int __db_xa_end __P((XID *, int, long));
+static int __db_xa_forget __P((XID *, int, long));
+static int __db_xa_open __P((char *, int, long));
+static int __db_xa_prepare __P((XID *, int, long));
+static int __db_xa_recover __P((XID *, long, int, long));
+static int __db_xa_rollback __P((XID *, int, long));
+static int __db_xa_start __P((XID *, int, long));
+static void __xa_txn_end __P((DB_ENV *));
+static void __xa_txn_init __P((DB_ENV *, TXN_DETAIL *, size_t));
+
+/*
+ * Possible flag values:
+ * Dynamic registration 0 => no dynamic registration
+ * TMREGISTER => dynamic registration
+ * Asynchronous operation 0 => no support for asynchrony
+ * TMUSEASYNC => async support
+ * Migration support 0 => migration of transactions across
+ * threads is possible
+ * TMNOMIGRATE => no migration across threads
+ */
+const struct xa_switch_t db_xa_switch = {
+ "Berkeley DB", /* name[RMNAMESZ] */
+ TMNOMIGRATE, /* flags */
+ 0, /* version */
+ __db_xa_open, /* xa_open_entry */
+ __db_xa_close, /* xa_close_entry */
+ __db_xa_start, /* xa_start_entry */
+ __db_xa_end, /* xa_end_entry */
+ __db_xa_rollback, /* xa_rollback_entry */
+ __db_xa_prepare, /* xa_prepare_entry */
+ __db_xa_commit, /* xa_commit_entry */
+ __db_xa_recover, /* xa_recover_entry */
+ __db_xa_forget, /* xa_forget_entry */
+ __db_xa_complete /* xa_complete_entry */
+};
+
+/*
+ * __db_xa_open --
+ * The open call in the XA protocol. The rmid field is an id number
+ * that the TM assigned us and will pass us on every xa call. We need to
+ * map that rmid number into a dbenv structure that we create during
+ * initialization. Since this id number is thread specific, we do not
+ * need to store it in shared memory. The file xa_map.c implements all
+ * such xa->db mappings.
+ * The xa_info field is instance specific information. We require
+ * that the value of DB_HOME be passed in xa_info. Since xa_info is the
+ * only thing that we get to pass to db_env_create, any config information
+ * will have to be done via a config file instead of via the db_env_create
+ * call.
+ */
+static int
+__db_xa_open(xa_info, rmid, flags)
+ char *xa_info;
+ int rmid;
+ long flags;
+{
+ DB_ENV *env;
+
+ if (LF_ISSET(TMASYNC))
+ return (XAER_ASYNC);
+ if (flags != TMNOFLAGS)
+ return (XAER_INVAL);
+
+ /* Verify if we already have this environment open. */
+ if (__db_rmid_to_env(rmid, &env) == 0)
+ return (XA_OK);
+ if (__os_calloc(env, 1, sizeof(DB_ENV), &env) != 0)
+ return (XAER_RMERR);
+
+ /* Open a new environment. */
+#define XA_FLAGS \
+ DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN
+ if (db_env_create(&env, 0) != 0)
+ return (XAER_RMERR);
+ if (env->open(env, xa_info, XA_FLAGS, 0) != 0)
+ goto err;
+
+ /* Create the mapping. */
+ if (__db_map_rmid(rmid, env) != 0)
+ goto err;
+
+ /* Allocate space for the current transaction. */
+ if (__os_calloc(env, 1, sizeof(DB_TXN), &env->xa_txn) != 0)
+ goto err;
+ env->xa_txn->txnid = TXN_INVALID;
+
+ return (XA_OK);
+
+err: (void)env->close(env, 0);
+
+ return (XAER_RMERR);
+}
+
+/*
+ * __db_xa_close --
+ * The close call of the XA protocol. The only trickiness here
+ * is that if there are any active transactions, we must fail. It is
+ * *not* an error to call close on an environment that has already been
+ * closed (I am interpreting that to mean it's OK to call close on an
+ * environment that has never been opened).
+ */
+static int
+__db_xa_close(xa_info, rmid, flags)
+ char *xa_info;
+ int rmid;
+ long flags;
+{
+ DB_ENV *env;
+ int ret, t_ret;
+
+ COMPQUIET(xa_info, NULL);
+
+ if (LF_ISSET(TMASYNC))
+ return (XAER_ASYNC);
+ if (flags != TMNOFLAGS)
+ return (XAER_INVAL);
+
+ /* If the environment is closed, then we're done. */
+ if (__db_rmid_to_env(rmid, &env) != 0)
+ return (XA_OK);
+
+ /* Check if there are any pending transactions. */
+ if (env->xa_txn != NULL && env->xa_txn->txnid != TXN_INVALID)
+ return (XAER_PROTO);
+
+ /* Destroy the mapping. */
+ ret = __db_unmap_rmid(rmid);
+
+ /* Discard space held for the current transaction. */
+ if (env->xa_txn != NULL)
+ __os_free(env->xa_txn, sizeof(DB_TXN));
+
+ /* Close the environment. */
+ if ((t_ret = env->close(env, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret == 0 ? XA_OK : XAER_RMERR);
+}
+
+/*
+ * __db_xa_start --
+ * Begin a transaction for the current resource manager.
+ */
+static int
+__db_xa_start(xid, rmid, flags)
+ XID *xid;
+ int rmid;
+ long flags;
+{
+ DB_ENV *env;
+ TXN_DETAIL *td;
+ size_t off;
+ int is_known;
+
+#define OK_FLAGS (TMJOIN | TMRESUME | TMNOWAIT | TMASYNC | TMNOFLAGS)
+ if (LF_ISSET(~OK_FLAGS))
+ return (XAER_INVAL);
+
+ if (LF_ISSET(TMJOIN) && LF_ISSET(TMRESUME))
+ return (XAER_INVAL);
+
+ if (LF_ISSET(TMASYNC))
+ return (XAER_ASYNC);
+
+ if (__db_rmid_to_env(rmid, &env) != 0)
+ return (XAER_PROTO);
+
+ is_known = __db_xid_to_txn(env, xid, &off) == 0;
+
+ if (is_known && !LF_ISSET(TMRESUME) && !LF_ISSET(TMJOIN))
+ return (XAER_DUPID);
+
+ if (!is_known && LF_ISSET(TMRESUME | TMJOIN))
+ return (XAER_NOTA);
+
+ /*
+ * This can't block, so we can ignore TMNOWAIT.
+ *
+ * Other error conditions: RMERR, RMFAIL, OUTSIDE, PROTO, RB*
+ */
+ if (is_known) {
+ td = (TXN_DETAIL *)
+ R_ADDR(&((DB_TXNMGR *)env->tx_handle)->reginfo, off);
+ if (td->xa_status == TXN_XA_SUSPENDED &&
+ !LF_ISSET(TMRESUME | TMJOIN))
+ return (XAER_PROTO);
+ if (td->xa_status == TXN_XA_DEADLOCKED)
+ return (XA_RBDEADLOCK);
+ if (td->xa_status == TXN_XA_ABORTED)
+ return (XA_RBOTHER);
+
+ /* Now, fill in the global transaction structure. */
+ __xa_txn_init(env, td, off);
+ td->xa_status = TXN_XA_STARTED;
+ } else {
+ if (__txn_xa_begin(env, env->xa_txn) != 0)
+ return (XAER_RMERR);
+ (void)__db_map_xid(env, xid, env->xa_txn->off);
+ td = (TXN_DETAIL *)
+ R_ADDR(&((DB_TXNMGR *)env->tx_handle)->reginfo,
+ env->xa_txn->off);
+ td->xa_status = TXN_XA_STARTED;
+ }
+ return (XA_OK);
+}
+
+/*
+ * __db_xa_end --
+ * Disassociate the current transaction from the current process.
+ */
+static int
+__db_xa_end(xid, rmid, flags)
+ XID *xid;
+ int rmid;
+ long flags;
+{
+ DB_ENV *env;
+ DB_TXN *txn;
+ TXN_DETAIL *td;
+ size_t off;
+
+ if (flags != TMNOFLAGS && !LF_ISSET(TMSUSPEND | TMSUCCESS | TMFAIL))
+ return (XAER_INVAL);
+
+ if (__db_rmid_to_env(rmid, &env) != 0)
+ return (XAER_PROTO);
+
+ if (__db_xid_to_txn(env, xid, &off) != 0)
+ return (XAER_NOTA);
+
+ txn = env->xa_txn;
+ if (off != txn->off)
+ return (XAER_PROTO);
+
+ td = (TXN_DETAIL *)R_ADDR(&((DB_TXNMGR *)env->tx_handle)->reginfo, off);
+ if (td->xa_status == TXN_XA_DEADLOCKED)
+ return (XA_RBDEADLOCK);
+
+ if (td->status == TXN_ABORTED)
+ return (XA_RBOTHER);
+
+ if (td->xa_status != TXN_XA_STARTED)
+ return (XAER_PROTO);
+
+ /* Update the shared memory last_lsn field */
+ td->last_lsn = txn->last_lsn;
+
+ /*
+ * If we ever support XA migration, we cannot keep SUSPEND/END
+ * status in the shared region; it would have to be process local.
+ */
+ if (LF_ISSET(TMSUSPEND))
+ td->xa_status = TXN_XA_SUSPENDED;
+ else
+ td->xa_status = TXN_XA_ENDED;
+
+ txn->txnid = TXN_INVALID;
+ return (XA_OK);
+}
+
+/*
+ * __db_xa_prepare --
+ * Sync the log to disk so we can guarantee recoverability.
+ */
+static int
+__db_xa_prepare(xid, rmid, flags)
+ XID *xid;
+ int rmid;
+ long flags;
+{
+ DB_ENV *env;
+ TXN_DETAIL *td;
+ size_t off;
+
+ if (LF_ISSET(TMASYNC))
+ return (XAER_ASYNC);
+ if (flags != TMNOFLAGS)
+ return (XAER_INVAL);
+
+ /*
+ * We need to know if we've ever called prepare on this.
+ * As part of the prepare, we set the xa_status field to
+ * reflect that fact that prepare has been called, and if
+ * it's ever called again, it's an error.
+ */
+ if (__db_rmid_to_env(rmid, &env) != 0)
+ return (XAER_PROTO);
+
+ if (__db_xid_to_txn(env, xid, &off) != 0)
+ return (XAER_NOTA);
+
+ td = (TXN_DETAIL *)R_ADDR(&((DB_TXNMGR *)env->tx_handle)->reginfo, off);
+ if (td->xa_status == TXN_XA_DEADLOCKED)
+ return (XA_RBDEADLOCK);
+
+ if (td->xa_status != TXN_XA_ENDED && td->xa_status != TXN_XA_SUSPENDED)
+ return (XAER_PROTO);
+
+ /* Now, fill in the global transaction structure. */
+ __xa_txn_init(env, td, off);
+
+ if (txn_prepare(env->xa_txn) != 0)
+ return (XAER_RMERR);
+
+ td->xa_status = TXN_XA_PREPARED;
+
+ /* No fatal value that would require an XAER_RMFAIL. */
+ __xa_txn_end(env);
+ return (XA_OK);
+}
+
+/*
+ * __db_xa_commit --
+ * Commit the transaction
+ */
+static int
+__db_xa_commit(xid, rmid, flags)
+ XID *xid;
+ int rmid;
+ long flags;
+{
+ DB_ENV *env;
+ TXN_DETAIL *td;
+ size_t off;
+
+ if (LF_ISSET(TMASYNC))
+ return (XAER_ASYNC);
+#undef OK_FLAGS
+#define OK_FLAGS (TMNOFLAGS | TMNOWAIT | TMONEPHASE)
+ if (LF_ISSET(~OK_FLAGS))
+ return (XAER_INVAL);
+
+ /*
+ * We need to know if we've ever called prepare on this.
+ * We can verify this by examining the xa_status field.
+ */
+ if (__db_rmid_to_env(rmid, &env) != 0)
+ return (XAER_PROTO);
+
+ if (__db_xid_to_txn(env, xid, &off) != 0)
+ return (XAER_NOTA);
+
+ td = (TXN_DETAIL *)R_ADDR(&((DB_TXNMGR *)env->tx_handle)->reginfo, off);
+ if (td->xa_status == TXN_XA_DEADLOCKED)
+ return (XA_RBDEADLOCK);
+
+ if (td->xa_status == TXN_XA_ABORTED)
+ return (XA_RBOTHER);
+
+ if (LF_ISSET(TMONEPHASE) &&
+ td->xa_status != TXN_XA_ENDED && td->xa_status != TXN_XA_SUSPENDED)
+ return (XAER_PROTO);
+
+ if (!LF_ISSET(TMONEPHASE) && td->xa_status != TXN_XA_PREPARED)
+ return (XAER_PROTO);
+
+ /* Now, fill in the global transaction structure. */
+ __xa_txn_init(env, td, off);
+
+ if (txn_commit(env->xa_txn, 0) != 0)
+ return (XAER_RMERR);
+
+ /* No fatal value that would require an XAER_RMFAIL. */
+ __xa_txn_end(env);
+ return (XA_OK);
+}
+
+/*
+ * __db_xa_recover --
+ * Returns a list of prepared and heuristically completed transactions.
+ *
+ * The return value is the number of xids placed into the xid array (less
+ * than or equal to the count parameter). The flags are going to indicate
+ * whether we are starting a scan or continuing one.
+ */
+static int
+__db_xa_recover(xids, count, rmid, flags)
+ XID *xids;
+ long count, flags;
+ int rmid;
+{
+ __txn_xa_regop_args *argp;
+ DBT data;
+ DB_ENV *env;
+ DB_LOG *log;
+ XID *xidp;
+ int err, ret;
+ u_int32_t rectype, txnid;
+
+ ret = 0;
+ xidp = xids;
+
+ /* If the environment is closed, then we're done. */
+ if (__db_rmid_to_env(rmid, &env) != 0)
+ return (XAER_PROTO);
+
+ /*
+ * If we are starting a scan, then we need to figure out where
+ * to begin. If we are not starting a scan, we'll start from
+ * wherever the log cursor is. Since XA apps cannot be threaded,
+ * we don't have to worry about someone else having moved it.
+ */
+ log = env->lg_handle;
+ if (LF_ISSET(TMSTARTRSCAN)) {
+ if ((err = __log_findckp(env, &log->xa_first)) == DB_NOTFOUND) {
+ /*
+ * If there were no log files, then we have no
+ * transactions to return, so we simply return 0.
+ */
+ return (0);
+ }
+ if ((err = __db_txnlist_init(env, &log->xa_info)) != 0)
+ return (XAER_RMERR);
+ } else {
+ /*
+ * If we are not starting a scan, the log cursor had
+ * better be set.
+ */
+ if (IS_ZERO_LSN(log->xa_lsn))
+ return (XAER_PROTO);
+ }
+
+ /*
+ * At this point log->xa_first contains the point in the log
+ * to which we need to roll back. If we are starting a scan,
+ * we'll start at the last record; if we're continuing a scan,
+ * we'll have to start at log->xa_lsn.
+ */
+
+ memset(&data, 0, sizeof(data));
+ for (err = log_get(env, &log->xa_lsn, &data,
+ LF_ISSET(TMSTARTRSCAN) ? DB_LAST : DB_SET);
+ err == 0 && log_compare(&log->xa_lsn, &log->xa_first) > 0;
+ err = log_get(env, &log->xa_lsn, &data, DB_PREV)) {
+ memcpy(&rectype, data.data, sizeof(rectype));
+
+ /*
+ * The only record type we care about is an DB_txn_xa_regop.
+ * If it's a commit, we have to add it to a txnlist. If it's
+ * a prepare, and we don't have a commit, then we return it.
+ * We are redoing some of what's in the xa_regop_recovery
+ * code, but we have to do it here so we can get at the xid
+ * in the record.
+ */
+ if (rectype != DB_txn_xa_regop && rectype != DB_txn_regop)
+ continue;
+
+ memcpy(&txnid, (u_int8_t *)data.data + sizeof(rectype),
+ sizeof(txnid));
+ err = __db_txnlist_find(log->xa_info, txnid);
+ switch (rectype) {
+ case DB_txn_regop:
+ if (err == DB_NOTFOUND)
+ __db_txnlist_add(env, log->xa_info, txnid, 0);
+ err = 0;
+ break;
+ case DB_txn_xa_regop:
+ /*
+ * This transaction is committed, so we needn't read
+ * the record and do anything.
+ */
+ if (err == 0)
+ break;
+ if ((err =
+ __txn_xa_regop_read(env, data.data, &argp)) != 0) {
+ ret = XAER_RMERR;
+ goto out;
+ }
+
+ xidp->formatID = argp->formatID;
+ xidp->gtrid_length = argp->gtrid;
+ xidp->bqual_length = argp->bqual;
+ memcpy(xidp->data, argp->xid.data, argp->xid.size);
+ ret++;
+ xidp++;
+ __os_free(argp, sizeof(*argp));
+ if (ret == count)
+ goto done;
+ break;
+ }
+ }
+
+ if (err != 0 && err != DB_NOTFOUND)
+ goto out;
+
+done: if (LF_ISSET(TMENDRSCAN)) {
+ ZERO_LSN(log->xa_lsn);
+ ZERO_LSN(log->xa_first);
+
+out: __db_txnlist_end(env, log->xa_info);
+ log->xa_info = NULL;
+ }
+ return (ret);
+}
+
+/*
+ * __db_xa_rollback
+ * Abort an XA transaction.
+ */
+static int
+__db_xa_rollback(xid, rmid, flags)
+ XID *xid;
+ int rmid;
+ long flags;
+{
+ DB_ENV *env;
+ TXN_DETAIL *td;
+ size_t off;
+
+ if (LF_ISSET(TMASYNC))
+ return (XAER_ASYNC);
+ if (flags != TMNOFLAGS)
+ return (XAER_INVAL);
+
+ if (__db_rmid_to_env(rmid, &env) != 0)
+ return (XAER_PROTO);
+
+ if (__db_xid_to_txn(env, xid, &off) != 0)
+ return (XAER_NOTA);
+
+ td = (TXN_DETAIL *)R_ADDR(&((DB_TXNMGR *)env->tx_handle)->reginfo, off);
+ if (td->xa_status == TXN_XA_DEADLOCKED)
+ return (XA_RBDEADLOCK);
+
+ if (td->xa_status == TXN_XA_ABORTED)
+ return (XA_RBOTHER);
+
+ if (td->xa_status != TXN_XA_ENDED && td->xa_status != TXN_XA_SUSPENDED
+ && td->xa_status != TXN_XA_PREPARED)
+ return (XAER_PROTO);
+
+ /* Now, fill in the global transaction structure. */
+ __xa_txn_init(env, td, off);
+ if (txn_abort(env->xa_txn) != 0)
+ return (XAER_RMERR);
+
+ /* No fatal value that would require an XAER_RMFAIL. */
+ __xa_txn_end(env);
+ return (XA_OK);
+}
+
+/*
+ * __db_xa_forget --
+ * Forget about an XID for a transaction that was heuristically
+ * completed. Since we do not heuristically complete anything, I
+ * don't think we have to do anything here, but we should make sure
+ * that we reclaim the slots in the txnid table.
+ */
+static int
+__db_xa_forget(xid, rmid, flags)
+ XID *xid;
+ int rmid;
+ long flags;
+{
+ DB_ENV *env;
+ size_t off;
+
+ if (LF_ISSET(TMASYNC))
+ return (XAER_ASYNC);
+ if (flags != TMNOFLAGS)
+ return (XAER_INVAL);
+
+ if (__db_rmid_to_env(rmid, &env) != 0)
+ return (XAER_PROTO);
+
+ /*
+ * If mapping is gone, then we're done.
+ */
+ if (__db_xid_to_txn(env, xid, &off) != 0)
+ return (XA_OK);
+
+ __db_unmap_xid(env, xid, off);
+
+ /* No fatal value that would require an XAER_RMFAIL. */
+ return (XA_OK);
+}
+
+/*
+ * __db_xa_complete --
+ * Used to wait for asynchronous operations to complete. Since we're
+ * not doing asynch, this is an invalid operation.
+ */
+static int
+__db_xa_complete(handle, retval, rmid, flags)
+ int *handle, *retval, rmid;
+ long flags;
+{
+ COMPQUIET(handle, NULL);
+ COMPQUIET(retval, NULL);
+ COMPQUIET(rmid, 0);
+ COMPQUIET(flags, 0);
+
+ return (XAER_INVAL);
+}
+
+/*
+ * __xa_txn_init --
+ * Fill in the fields of the local transaction structure given
+ * the detail transaction structure.
+ */
+static void
+__xa_txn_init(env, td, off)
+ DB_ENV *env;
+ TXN_DETAIL *td;
+ size_t off;
+{
+ DB_TXN *txn;
+
+ txn = env->xa_txn;
+ txn->mgrp = env->tx_handle;
+ txn->parent = NULL;
+ txn->last_lsn = td->last_lsn;
+ txn->txnid = td->txnid;
+ txn->off = off;
+ txn->flags = 0;
+}
+
+/*
+ * __xa_txn_end --
+ * Invalidate a transaction structure that was generated by xa_txn_init.
+ */
+static void
+__xa_txn_end(env)
+ DB_ENV *env;
+{
+ DB_TXN *txn;
+
+ txn = env->xa_txn;
+ if (txn != NULL)
+ txn->txnid = TXN_INVALID;
+}
diff --git a/bdb/xa/xa_db.c b/bdb/xa/xa_db.c
new file mode 100644
index 00000000000..ba3dbfeddbb
--- /dev/null
+++ b/bdb/xa/xa_db.c
@@ -0,0 +1,182 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: xa_db.c,v 11.9 2000/09/06 18:57:59 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "xa.h"
+#include "xa_ext.h"
+#include "txn.h"
+
+static int __xa_close __P((DB *, u_int32_t));
+static int __xa_cursor __P((DB *, DB_TXN *, DBC **, u_int32_t));
+static int __xa_del __P((DB *, DB_TXN *, DBT *, u_int32_t));
+static int __xa_get __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+static int __xa_open __P((DB *,
+ const char *, const char *, DBTYPE, u_int32_t, int));
+static int __xa_put __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+
+typedef struct __xa_methods {
+ int (*close) __P((DB *, u_int32_t));
+ int (*cursor) __P((DB *, DB_TXN *, DBC **, u_int32_t));
+ int (*del) __P((DB *, DB_TXN *, DBT *, u_int32_t));
+ int (*get) __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+ int (*open) __P((DB *,
+ const char *, const char *, DBTYPE, u_int32_t, int));
+ int (*put) __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+} XA_METHODS;
+
+/*
+ * __db_xa_create --
+ * DB XA constructor.
+ *
+ * PUBLIC: int __db_xa_create __P((DB *));
+ */
+int
+__db_xa_create(dbp)
+ DB *dbp;
+{
+ XA_METHODS *xam;
+ int ret;
+
+ /*
+ * Interpose XA routines in front of any method that takes a TXN
+ * ID as an argument.
+ */
+ if ((ret = __os_calloc(dbp->dbenv, 1, sizeof(XA_METHODS), &xam)) != 0)
+ return (ret);
+
+ dbp->xa_internal = xam;
+ xam->open = dbp->open;
+ dbp->open = __xa_open;
+ xam->close = dbp->close;
+ dbp->close = __xa_close;
+
+ return (0);
+}
+
+/*
+ * __xa_open --
+ * XA open wrapper.
+ */
+
+static int
+__xa_open(dbp, name, subdb, type, flags, mode)
+ DB *dbp;
+ const char *name, *subdb;
+ DBTYPE type;
+ u_int32_t flags;
+ int mode;
+{
+ XA_METHODS *xam;
+ int ret;
+
+ xam = (XA_METHODS *)dbp->xa_internal;
+
+ if ((ret = xam->open(dbp, name, subdb, type, flags, mode)) != 0)
+ return (ret);
+
+ xam->cursor = dbp->cursor;
+ xam->del = dbp->del;
+ xam->get = dbp->get;
+ xam->put = dbp->put;
+ dbp->cursor = __xa_cursor;
+ dbp->del = __xa_del;
+ dbp->get = __xa_get;
+ dbp->put = __xa_put;
+
+ return (0);
+}
+
+static int
+__xa_cursor(dbp, txn, dbcp, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ DBC **dbcp;
+ u_int32_t flags;
+{
+ DB_TXN *t;
+
+ t = txn != NULL && txn == dbp->open_txn ? txn : dbp->dbenv->xa_txn;
+ if (t->txnid == TXN_INVALID)
+ t = NULL;
+
+ return (((XA_METHODS *)dbp->xa_internal)->cursor (dbp, t, dbcp, flags));
+}
+
+static int
+__xa_del(dbp, txn, key, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ DBT *key;
+ u_int32_t flags;
+{
+ DB_TXN *t;
+
+ t = txn != NULL && txn == dbp->open_txn ? txn : dbp->dbenv->xa_txn;
+ if (t->txnid == TXN_INVALID)
+ t = NULL;
+
+ return (((XA_METHODS *)dbp->xa_internal)->del(dbp, t, key, flags));
+}
+
+static int
+__xa_close(dbp, flags)
+ DB *dbp;
+ u_int32_t flags;
+{
+ int (*real_close) __P((DB *, u_int32_t));
+
+ real_close = ((XA_METHODS *)dbp->xa_internal)->close;
+
+ __os_free(dbp->xa_internal, sizeof(XA_METHODS));
+ dbp->xa_internal = NULL;
+
+ return (real_close(dbp, flags));
+}
+
+static int
+__xa_get(dbp, txn, key, data, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ DBT *key, *data;
+ u_int32_t flags;
+{
+ DB_TXN *t;
+
+ t = txn != NULL && txn == dbp->open_txn ? txn : dbp->dbenv->xa_txn;
+ if (t->txnid == TXN_INVALID)
+ t = NULL;
+
+ return (((XA_METHODS *)dbp->xa_internal)->get
+ (dbp, t, key, data, flags));
+}
+
+static int
+__xa_put(dbp, txn, key, data, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ DBT *key, *data;
+ u_int32_t flags;
+{
+ DB_TXN *t;
+
+ t = txn != NULL && txn == dbp->open_txn ? txn : dbp->dbenv->xa_txn;
+ if (t->txnid == TXN_INVALID)
+ t = NULL;
+
+ return (((XA_METHODS *)dbp->xa_internal)->put
+ (dbp, t, key, data, flags));
+}
diff --git a/bdb/xa/xa_map.c b/bdb/xa/xa_map.c
new file mode 100644
index 00000000000..1af268477db
--- /dev/null
+++ b/bdb/xa/xa_map.c
@@ -0,0 +1,189 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: xa_map.c,v 11.5 2000/11/30 00:58:46 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "txn.h"
+
+/*
+ * This file contains all the mapping information that we need to support
+ * the DB/XA interface.
+ */
+
+/*
+ * __db_rmid_to_env
+ * Return the environment associated with a given XA rmid.
+ *
+ * PUBLIC: int __db_rmid_to_env __P((int rmid, DB_ENV **envp));
+ */
+int
+__db_rmid_to_env(rmid, envp)
+ int rmid;
+ DB_ENV **envp;
+{
+ DB_ENV *env;
+
+ env = TAILQ_FIRST(&DB_GLOBAL(db_envq));
+ if (env != NULL && env->xa_rmid == rmid) {
+ *envp = env;
+ return (0);
+ }
+
+ /*
+ * When we map an rmid, move that environment to be the first one in
+ * the list of environments, so we acquire the correct environment
+ * in DB->open.
+ */
+ for (; env != NULL; env = TAILQ_NEXT(env, links))
+ if (env->xa_rmid == rmid) {
+ TAILQ_REMOVE(&DB_GLOBAL(db_envq), env, links);
+ TAILQ_INSERT_HEAD(&DB_GLOBAL(db_envq), env, links);
+ *envp = env;
+ return (0);
+ }
+
+ return (1);
+}
+
+/*
+ * __db_xid_to_txn
+ * Return the txn that corresponds to this XID.
+ *
+ * PUBLIC: int __db_xid_to_txn __P((DB_ENV *, XID *, size_t *));
+ */
+int
+__db_xid_to_txn(dbenv, xid, offp)
+ DB_ENV *dbenv;
+ XID *xid;
+ size_t *offp;
+{
+ DB_TXNMGR *mgr;
+ DB_TXNREGION *tmr;
+ struct __txn_detail *td;
+
+ mgr = dbenv->tx_handle;
+ tmr = mgr->reginfo.primary;
+
+ /*
+ * Search the internal active transaction table to find the
+ * matching xid. If this is a performance hit, then we
+ * can create a hash table, but I doubt it's worth it.
+ */
+ R_LOCK(dbenv, &mgr->reginfo);
+ for (td = SH_TAILQ_FIRST(&tmr->active_txn, __txn_detail);
+ td != NULL;
+ td = SH_TAILQ_NEXT(td, links, __txn_detail))
+ if (memcmp(xid->data, td->xid, XIDDATASIZE) == 0)
+ break;
+ R_UNLOCK(dbenv, &mgr->reginfo);
+
+ if (td == NULL)
+ return (EINVAL);
+
+ *offp = R_OFFSET(&mgr->reginfo, td);
+ return (0);
+}
+
+/*
+ * __db_map_rmid
+ * Create a mapping between the specified rmid and environment.
+ *
+ * PUBLIC: int __db_map_rmid __P((int, DB_ENV *));
+ */
+int
+__db_map_rmid(rmid, env)
+ int rmid;
+ DB_ENV *env;
+{
+ env->xa_rmid = rmid;
+ TAILQ_INSERT_TAIL(&DB_GLOBAL(db_envq), env, links);
+ return (0);
+}
+
+/*
+ * __db_unmap_rmid
+ * Destroy the mapping for the given rmid.
+ *
+ * PUBLIC: int __db_unmap_rmid __P((int));
+ */
+int
+__db_unmap_rmid(rmid)
+ int rmid;
+{
+ DB_ENV *e;
+
+ for (e = TAILQ_FIRST(&DB_GLOBAL(db_envq));
+ e->xa_rmid != rmid;
+ e = TAILQ_NEXT(e, links));
+
+ if (e == NULL)
+ return (EINVAL);
+
+ TAILQ_REMOVE(&DB_GLOBAL(db_envq), e, links);
+ return (0);
+}
+
+/*
+ * __db_map_xid
+ * Create a mapping between this XID and the transaction at
+ * "off" in the shared region.
+ *
+ * PUBLIC: int __db_map_xid __P((DB_ENV *, XID *, size_t));
+ */
+int
+__db_map_xid(env, xid, off)
+ DB_ENV *env;
+ XID *xid;
+ size_t off;
+{
+ REGINFO *infop;
+ TXN_DETAIL *td;
+
+ infop = &((DB_TXNMGR *)env->tx_handle)->reginfo;
+ td = (TXN_DETAIL *)R_ADDR(infop, off);
+
+ R_LOCK(env, infop);
+ memcpy(td->xid, xid->data, XIDDATASIZE);
+ td->bqual = (u_int32_t)xid->bqual_length;
+ td->gtrid = (u_int32_t)xid->gtrid_length;
+ td->format = (int32_t)xid->formatID;
+ R_UNLOCK(env, infop);
+
+ return (0);
+}
+
+/*
+ * __db_unmap_xid
+ * Destroy the mapping for the specified XID.
+ *
+ * PUBLIC: void __db_unmap_xid __P((DB_ENV *, XID *, size_t));
+ */
+
+void
+__db_unmap_xid(env, xid, off)
+ DB_ENV *env;
+ XID *xid;
+ size_t off;
+{
+ TXN_DETAIL *td;
+
+ COMPQUIET(xid, NULL);
+
+ td = (TXN_DETAIL *)R_ADDR(&((DB_TXNMGR *)env->tx_handle)->reginfo, off);
+ memset(td->xid, 0, sizeof(td->xid));
+}